• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileSlice`, along with types that produce it which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //! The First rule is because having references of any kind to memory considered volatile would
18 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19 //! done concurrently without synchronization. With volatile access we know that the compiler has
20 //! not reordered or elided the access.
21 
22 use std::cmp::min;
23 use std::mem::size_of;
24 use std::ptr::copy;
25 use std::ptr::read_volatile;
26 use std::ptr::write_bytes;
27 use std::ptr::write_volatile;
28 use std::result;
29 use std::slice;
30 use std::usize;
31 
32 use remain::sorted;
33 use thiserror::Error;
34 use zerocopy::AsBytes;
35 use zerocopy::FromBytes;
36 use zerocopy::Ref;
37 
38 use crate::IoBufMut;
39 
40 #[sorted]
41 #[derive(Error, Eq, PartialEq, Debug)]
42 pub enum VolatileMemoryError {
43     /// `addr` is out of bounds of the volatile memory slice.
44     #[error("address 0x{addr:x} is out of bounds")]
45     OutOfBounds { addr: usize },
46     /// Taking a slice at `base` with `offset` would overflow `usize`.
47     #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
48     Overflow { base: usize, offset: usize },
49 }
50 
51 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
52 
53 use crate::VolatileMemoryError as Error;
54 type Result<T> = VolatileMemoryResult<T>;
55 
56 /// Trait for types that support raw volatile access to their data.
57 pub trait VolatileMemory {
58     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
59     /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>60     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
61 }
62 
63 /// A slice of raw memory that supports volatile access. Like `std::io::IoSliceMut`, this type is
64 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoSliceMut`, it doesn't
65 /// automatically deref to `&mut [u8]`.
66 #[derive(Copy, Clone, Debug)]
67 #[repr(transparent)]
68 pub struct VolatileSlice<'a>(IoBufMut<'a>);
69 
70 impl<'a> VolatileSlice<'a> {
71     /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice72     pub fn new(buf: &mut [u8]) -> VolatileSlice {
73         VolatileSlice(IoBufMut::new(buf))
74     }
75 
76     /// Creates a `VolatileSlice` from a pointer and a length.
77     ///
78     /// # Safety
79     ///
80     /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
81     /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>82     pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
83         VolatileSlice(IoBufMut::from_raw_parts(addr, len))
84     }
85 
86     /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u887     pub fn as_ptr(&self) -> *const u8 {
88         self.0.as_ptr()
89     }
90 
91     /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u892     pub fn as_mut_ptr(&self) -> *mut u8 {
93         self.0.as_mut_ptr()
94     }
95 
96     /// Gets the size of this slice.
size(&self) -> usize97     pub fn size(&self) -> usize {
98         self.0.len()
99     }
100 
101     /// Advance the starting position of this slice.
102     ///
103     /// Panics if `count > self.size()`.
advance(&mut self, count: usize)104     pub fn advance(&mut self, count: usize) {
105         self.0.advance(count)
106     }
107 
108     /// Shorten the length of the slice.
109     ///
110     /// Has no effect if `len > self.size()`.
truncate(&mut self, len: usize)111     pub fn truncate(&mut self, len: usize) {
112         self.0.truncate(len)
113     }
114 
115     /// Returns this `VolatileSlice` as an `IoBufMut`.
as_iobuf(&self) -> &IoBufMut116     pub fn as_iobuf(&self) -> &IoBufMut {
117         &self.0
118     }
119 
120     /// Converts a slice of `VolatileSlice`s into a slice of `IoBufMut`s
121     #[allow(clippy::wrong_self_convention)]
as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>]122     pub fn as_iobufs<'mem, 'slice>(
123         iovs: &'slice [VolatileSlice<'mem>],
124     ) -> &'slice [IoBufMut<'mem>] {
125         // SAFETY:
126         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
127         unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
128     }
129 
130     /// Converts a mutable slice of `VolatileSlice`s into a mutable slice of `IoBufMut`s
131     #[inline]
as_iobufs_mut<'mem, 'slice>( iovs: &'slice mut [VolatileSlice<'mem>], ) -> &'slice mut [IoBufMut<'mem>]132     pub fn as_iobufs_mut<'mem, 'slice>(
133         iovs: &'slice mut [VolatileSlice<'mem>],
134     ) -> &'slice mut [IoBufMut<'mem>] {
135         // SAFETY:
136         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
137         unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBufMut, iovs.len()) }
138     }
139 
140     /// Creates a copy of this slice with the address increased by `count` bytes, and the size
141     /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>142     pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
143         let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
144             VolatileMemoryError::Overflow {
145                 base: self.as_mut_ptr() as usize,
146                 offset: count,
147             },
148         )?;
149         let new_size = self
150             .size()
151             .checked_sub(count)
152             .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
153 
154         // SAFETY:
155         // Safe because the memory has the same lifetime and points to a subset of the memory of the
156         // original slice.
157         unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
158     }
159 
160     /// Similar to `get_slice` but the returned slice outlives this slice.
161     ///
162     /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>163     pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
164         let mem_end = offset
165             .checked_add(count)
166             .ok_or(VolatileMemoryError::Overflow {
167                 base: offset,
168                 offset: count,
169             })?;
170         if mem_end > self.size() {
171             return Err(Error::OutOfBounds { addr: mem_end });
172         }
173         let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
174             VolatileMemoryError::Overflow {
175                 base: self.as_mut_ptr() as usize,
176                 offset,
177             },
178         )?;
179 
180         // SAFETY:
181         // Safe because we have verified that the new memory is a subset of the original slice.
182         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
183     }
184 
185     /// Sets each byte of this slice with the given byte, similar to `memset`.
186     ///
187     /// The bytes of this slice are accessed in an arbitray order.
188     ///
189     /// # Examples
190     ///
191     /// ```
192     /// # use base::VolatileSlice;
193     /// # fn test_write_45() -> Result<(), ()> {
194     /// let mut mem = [0u8; 32];
195     /// let vslice = VolatileSlice::new(&mut mem[..]);
196     /// vslice.write_bytes(45);
197     /// for &v in &mem[..] {
198     ///     assert_eq!(v, 45);
199     /// }
200     /// # Ok(())
201     /// # }
write_bytes(&self, value: u8)202     pub fn write_bytes(&self, value: u8) {
203         // SAFETY:
204         // Safe because the memory is valid and needs only byte alignment.
205         unsafe {
206             write_bytes(self.as_mut_ptr(), value, self.size());
207         }
208     }
209 
210     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
211     /// `buf`.
212     ///
213     /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
214     ///
215     /// # Examples
216     ///
217     /// ```
218     /// # use std::fs::File;
219     /// # use std::path::Path;
220     /// # use base::VolatileSlice;
221     /// # fn test_write_null() -> Result<(), ()> {
222     /// let mut mem = [0u8; 32];
223     /// let vslice = VolatileSlice::new(&mut mem[..]);
224     /// let mut buf = [5u8; 16];
225     /// vslice.copy_to(&mut buf[..]);
226     /// for v in &buf[..] {
227     ///     assert_eq!(buf[0], 0);
228     /// }
229     /// # Ok(())
230     /// # }
231     /// ```
copy_to<T>(&self, buf: &mut [T]) where T: FromBytes + AsBytes + Copy,232     pub fn copy_to<T>(&self, buf: &mut [T])
233     where
234         T: FromBytes + AsBytes + Copy,
235     {
236         let mut addr = self.as_mut_ptr() as *const u8;
237         for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
238             // SAFETY: Safe because buf is valid, aligned to type `T` and is initialized.
239             unsafe {
240                 *v = read_volatile(addr as *const T);
241                 addr = addr.add(size_of::<T>());
242             }
243         }
244     }
245 
246     /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
247     ///
248     /// The copies happen in an undefined order.
249     /// # Examples
250     ///
251     /// ```
252     /// # use base::VolatileMemory;
253     /// # use base::VolatileSlice;
254     /// # fn test_write_null() -> Result<(), ()> {
255     /// let mut mem = [0u8; 32];
256     /// let vslice = VolatileSlice::new(&mut mem[..]);
257     /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
258     /// # Ok(())
259     /// # }
260     /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)261     pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
262         // SAFETY: Safe because slice is valid and is byte aligned.
263         unsafe {
264             copy(
265                 self.as_mut_ptr() as *const u8,
266                 slice.as_mut_ptr(),
267                 min(self.size(), slice.size()),
268             );
269         }
270     }
271 
272     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
273     /// this slice's memory.
274     ///
275     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
276     ///
277     /// # Examples
278     ///
279     /// ```
280     /// # use std::fs::File;
281     /// # use std::path::Path;
282     /// # use base::VolatileMemory;
283     /// # use base::VolatileSlice;
284     /// # fn test_write_null() -> Result<(), ()> {
285     /// let mut mem = [0u8; 32];
286     /// let vslice = VolatileSlice::new(&mut mem[..]);
287     /// let buf = [5u8; 64];
288     /// vslice.copy_from(&buf[..]);
289     /// let mut copy_buf = [0u32; 4];
290     /// vslice.copy_to(&mut copy_buf);
291     /// for i in 0..4 {
292     ///     assert_eq!(copy_buf[i], 0x05050505);
293     /// }
294     /// # Ok(())
295     /// # }
296     /// ```
copy_from<T>(&self, buf: &[T]) where T: FromBytes + AsBytes,297     pub fn copy_from<T>(&self, buf: &[T])
298     where
299         T: FromBytes + AsBytes,
300     {
301         let mut addr = self.as_mut_ptr();
302         for v in buf.iter().take(self.size() / size_of::<T>()) {
303             // SAFETY: Safe because buf is valid, aligned to type `T` and is mutable.
304             unsafe {
305                 write_volatile(
306                     addr as *mut T,
307                     Ref::<_, T>::new(v.as_bytes()).unwrap().read(),
308                 );
309                 addr = addr.add(size_of::<T>());
310             }
311         }
312     }
313 
314     /// Returns whether all bytes in this slice are zero or not.
315     ///
316     /// This is optimized for [VolatileSlice] aligned with 16 bytes.
317     ///
318     /// TODO(b/274840085): Use SIMD for better performance.
is_all_zero(&self) -> bool319     pub fn is_all_zero(&self) -> bool {
320         const MASK_4BIT: usize = 0x0f;
321         let head_addr = self.as_ptr() as usize;
322         // Round up by 16
323         let aligned_head_addr = (head_addr + MASK_4BIT) & !MASK_4BIT;
324         let tail_addr = head_addr + self.size();
325         // Round down by 16
326         let aligned_tail_addr = tail_addr & !MASK_4BIT;
327 
328         // Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance.
329         if (aligned_head_addr..aligned_tail_addr).step_by(16).any(
330             |aligned_addr|
331                 // SAFETY: Each aligned_addr is within VolatileSlice
332                 unsafe { *(aligned_addr as *const u128) } != 0,
333         ) {
334             return false;
335         }
336 
337         if head_addr == aligned_head_addr && tail_addr == aligned_tail_addr {
338             // If head_addr and tail_addr are aligned, we can skip the unaligned part which contains
339             // at least 2 conditional branches.
340             true
341         } else {
342             // Check unaligned part.
343             // SAFETY: The range [head_addr, aligned_head_addr) and [aligned_tail_addr, tail_addr)
344             // are within VolatileSlice.
345             unsafe {
346                 is_all_zero_naive(head_addr, aligned_head_addr)
347                     && is_all_zero_naive(aligned_tail_addr, tail_addr)
348             }
349         }
350     }
351 }
352 
353 /// Check whether every byte is zero.
354 ///
355 /// This checks byte by byte.
356 ///
357 /// # Safety
358 ///
359 /// * `head_addr` <= `tail_addr`
360 /// * Bytes between `head_addr` and `tail_addr` is valid to access.
is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool361 unsafe fn is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool {
362     (head_addr..tail_addr).all(|addr| *(addr as *const u8) == 0)
363 }
364 
365 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>366     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
367         self.sub_slice(offset, count)
368     }
369 }
370 
371 impl PartialEq<VolatileSlice<'_>> for VolatileSlice<'_> {
eq(&self, other: &VolatileSlice) -> bool372     fn eq(&self, other: &VolatileSlice) -> bool {
373         let size = self.size();
374         if size != other.size() {
375             return false;
376         }
377 
378         // SAFETY: We pass pointers into valid VolatileSlice regions, and size is checked above.
379         let cmp = unsafe { libc::memcmp(self.as_ptr() as _, other.as_ptr() as _, size) };
380 
381         cmp == 0
382     }
383 }
384 
385 /// The `PartialEq` implementation for `VolatileSlice` is reflexive, symmetric, and transitive.
386 impl Eq for VolatileSlice<'_> {}
387 
388 #[cfg(test)]
389 mod tests {
390     use std::sync::Arc;
391     use std::sync::Barrier;
392     use std::thread::spawn;
393 
394     use super::*;
395 
396     #[derive(Clone)]
397     struct VecMem {
398         mem: Arc<Vec<u8>>,
399     }
400 
401     impl VecMem {
new(size: usize) -> VecMem402         fn new(size: usize) -> VecMem {
403             VecMem {
404                 mem: Arc::new(vec![0u8; size]),
405             }
406         }
407     }
408 
409     impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>410         fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
411             let mem_end = offset
412                 .checked_add(count)
413                 .ok_or(VolatileMemoryError::Overflow {
414                     base: offset,
415                     offset: count,
416                 })?;
417             if mem_end > self.mem.len() {
418                 return Err(Error::OutOfBounds { addr: mem_end });
419             }
420 
421             let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
422                 VolatileMemoryError::Overflow {
423                     base: self.mem.as_ptr() as usize,
424                     offset,
425                 },
426             )?;
427 
428             Ok(
429                 // SAFETY: trivially safe
430                 unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) },
431             )
432         }
433     }
434 
435     #[test]
observe_mutate()436     fn observe_mutate() {
437         let a = VecMem::new(1);
438         let a_clone = a.clone();
439         a.get_slice(0, 1).unwrap().write_bytes(99);
440 
441         let start_barrier = Arc::new(Barrier::new(2));
442         let thread_start_barrier = start_barrier.clone();
443         let end_barrier = Arc::new(Barrier::new(2));
444         let thread_end_barrier = end_barrier.clone();
445         spawn(move || {
446             thread_start_barrier.wait();
447             a_clone.get_slice(0, 1).unwrap().write_bytes(0);
448             thread_end_barrier.wait();
449         });
450 
451         let mut byte = [0u8; 1];
452         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
453         assert_eq!(byte[0], 99);
454 
455         start_barrier.wait();
456         end_barrier.wait();
457 
458         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
459         assert_eq!(byte[0], 0);
460     }
461 
462     #[test]
slice_size()463     fn slice_size() {
464         let a = VecMem::new(100);
465         let s = a.get_slice(0, 27).unwrap();
466         assert_eq!(s.size(), 27);
467 
468         let s = a.get_slice(34, 27).unwrap();
469         assert_eq!(s.size(), 27);
470 
471         let s = s.get_slice(20, 5).unwrap();
472         assert_eq!(s.size(), 5);
473     }
474 
475     #[test]
slice_overflow_error()476     fn slice_overflow_error() {
477         use std::usize::MAX;
478         let a = VecMem::new(1);
479         let res = a.get_slice(MAX, 1).unwrap_err();
480         assert_eq!(
481             res,
482             Error::Overflow {
483                 base: MAX,
484                 offset: 1,
485             }
486         );
487     }
488 
489     #[test]
slice_oob_error()490     fn slice_oob_error() {
491         let a = VecMem::new(100);
492         a.get_slice(50, 50).unwrap();
493         let res = a.get_slice(55, 50).unwrap_err();
494         assert_eq!(res, Error::OutOfBounds { addr: 105 });
495     }
496 
497     #[test]
is_all_zero_16bytes_aligned()498     fn is_all_zero_16bytes_aligned() {
499         let a = VecMem::new(1024);
500         let slice = a.get_slice(0, 1024).unwrap();
501 
502         assert!(slice.is_all_zero());
503         a.get_slice(129, 1).unwrap().write_bytes(1);
504         assert!(!slice.is_all_zero());
505     }
506 
507     #[test]
is_all_zero_head_not_aligned()508     fn is_all_zero_head_not_aligned() {
509         let a = VecMem::new(1024);
510         let slice = a.get_slice(1, 1023).unwrap();
511 
512         assert!(slice.is_all_zero());
513         a.get_slice(0, 1).unwrap().write_bytes(1);
514         assert!(slice.is_all_zero());
515         a.get_slice(1, 1).unwrap().write_bytes(1);
516         assert!(!slice.is_all_zero());
517         a.get_slice(1, 1).unwrap().write_bytes(0);
518         a.get_slice(129, 1).unwrap().write_bytes(1);
519         assert!(!slice.is_all_zero());
520     }
521 
522     #[test]
is_all_zero_tail_not_aligned()523     fn is_all_zero_tail_not_aligned() {
524         let a = VecMem::new(1024);
525         let slice = a.get_slice(0, 1023).unwrap();
526 
527         assert!(slice.is_all_zero());
528         a.get_slice(1023, 1).unwrap().write_bytes(1);
529         assert!(slice.is_all_zero());
530         a.get_slice(1022, 1).unwrap().write_bytes(1);
531         assert!(!slice.is_all_zero());
532         a.get_slice(1022, 1).unwrap().write_bytes(0);
533         a.get_slice(0, 1).unwrap().write_bytes(1);
534         assert!(!slice.is_all_zero());
535     }
536 
537     #[test]
is_all_zero_no_aligned_16bytes()538     fn is_all_zero_no_aligned_16bytes() {
539         let a = VecMem::new(1024);
540         let slice = a.get_slice(1, 16).unwrap();
541 
542         assert!(slice.is_all_zero());
543         a.get_slice(0, 1).unwrap().write_bytes(1);
544         assert!(slice.is_all_zero());
545         for i in 1..17 {
546             a.get_slice(i, 1).unwrap().write_bytes(1);
547             assert!(!slice.is_all_zero());
548             a.get_slice(i, 1).unwrap().write_bytes(0);
549         }
550         a.get_slice(17, 1).unwrap().write_bytes(1);
551         assert!(slice.is_all_zero());
552     }
553 }
554