1 // Copyright 2017 The Chromium OS Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileRef` and `VolatileSlice`, along with types that produce those which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //! The First rule is because having references of any kind to memory considered volatile would
18 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19 //! done concurrently without synchronization. With volatile access we know that the compiler has
20 //! not reordered or elided the access.
21
22 use std::cmp::min;
23 use std::fmt::{self, Debug, Display};
24 use std::marker::PhantomData;
25 use std::mem::size_of;
26 use std::ptr::{copy, read_volatile, write_bytes, write_volatile};
27 use std::result;
28 use std::slice;
29 use std::usize;
30
31 use libc::iovec;
32
33 use crate::{sys::IoBufMut, DataInit};
34
35 #[derive(Eq, PartialEq, Debug)]
36 pub enum VolatileMemoryError {
37 /// `addr` is out of bounds of the volatile memory slice.
38 OutOfBounds { addr: usize },
39 /// Taking a slice at `base` with `offset` would overflow `usize`.
40 Overflow { base: usize, offset: usize },
41 }
42
43 impl Display for VolatileMemoryError {
fmt(&self, f: &mut fmt::Formatter) -> fmt::Result44 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
45 use self::VolatileMemoryError::*;
46
47 match self {
48 OutOfBounds { addr } => write!(f, "address 0x{:x} is out of bounds", addr),
49 Overflow { base, offset } => write!(
50 f,
51 "address 0x{:x} offset by 0x{:x} would overflow",
52 base, offset
53 ),
54 }
55 }
56 }
57
58 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
59
60 use crate::VolatileMemoryError as Error;
61 type Result<T> = VolatileMemoryResult<T>;
62
63 /// Convenience function for computing `base + offset` which returns
64 /// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
65 /// `u64::MAX`.
66 ///
67 /// # Examples
68 ///
69 /// ```
70 /// # use data_model::*;
71 /// # fn get_slice(offset: usize, count: usize) -> VolatileMemoryResult<()> {
72 /// let mem_end = calc_offset(offset, count)?;
73 /// if mem_end > 100 {
74 /// return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
75 /// }
76 /// # Ok(())
77 /// # }
78 /// ```
calc_offset(base: usize, offset: usize) -> Result<usize>79 pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
80 match base.checked_add(offset) {
81 None => Err(Error::Overflow { base, offset }),
82 Some(m) => Ok(m),
83 }
84 }
85
86 /// Trait for types that support raw volatile access to their data.
87 pub trait VolatileMemory {
88 /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
89 /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>90 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
91
92 /// Gets a `VolatileRef` at `offset`.
get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>>93 fn get_ref<T: DataInit>(&self, offset: usize) -> Result<VolatileRef<T>> {
94 let slice = self.get_slice(offset, size_of::<T>())?;
95 Ok(VolatileRef {
96 addr: slice.as_mut_ptr() as *mut T,
97 phantom: PhantomData,
98 })
99 }
100 }
101
102 /// A slice of raw memory that supports volatile access. Like `std::io::IoBufMut`, this type is
103 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoBufMut`, it doesn't
104 /// automatically deref to `&mut [u8]`.
105 #[derive(Copy, Clone, Debug)]
106 #[repr(transparent)]
107 pub struct VolatileSlice<'a>(IoBufMut<'a>);
108
109 impl<'a> VolatileSlice<'a> {
110 /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice111 pub fn new(buf: &mut [u8]) -> VolatileSlice {
112 VolatileSlice(IoBufMut::new(buf))
113 }
114
115 /// Creates a `VolatileSlice` from a pointer and a length.
116 ///
117 /// # Safety
118 ///
119 /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
120 /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>121 pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
122 VolatileSlice(IoBufMut::from_raw_parts(addr, len))
123 }
124
125 /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u8126 pub fn as_ptr(&self) -> *const u8 {
127 self.0.as_ptr()
128 }
129
130 /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u8131 pub fn as_mut_ptr(&self) -> *mut u8 {
132 self.0.as_mut_ptr()
133 }
134
135 /// Gets the size of this slice.
size(&self) -> usize136 pub fn size(&self) -> usize {
137 self.0.len()
138 }
139
140 /// Returns this `VolatileSlice` as an `iovec`.
as_iobuf(&self) -> &iovec141 pub fn as_iobuf(&self) -> &iovec {
142 self.0.as_ref()
143 }
144
145 /// Converts a slice of `VolatileSlice`s into a slice of `iovec`s
146 #[allow(clippy::wrong_self_convention)]
as_iobufs<'slice>(iovs: &'slice [VolatileSlice<'_>]) -> &'slice [iovec]147 pub fn as_iobufs<'slice>(iovs: &'slice [VolatileSlice<'_>]) -> &'slice [iovec] {
148 // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
149 IoBufMut::as_iobufs(unsafe {
150 slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len())
151 })
152 }
153
154 /// Creates a copy of this slice with the address increased by `count` bytes, and the size
155 /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>156 pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
157 let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
158 VolatileMemoryError::Overflow {
159 base: self.as_mut_ptr() as usize,
160 offset: count,
161 },
162 )?;
163 let new_size = self
164 .size()
165 .checked_sub(count)
166 .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
167
168 // Safe because the memory has the same lifetime and points to a subset of the memory of the
169 // original slice.
170 unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
171 }
172
173 /// Similar to `get_slice` but the returned slice outlives this slice.
174 ///
175 /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>176 pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
177 let mem_end = calc_offset(offset, count)?;
178 if mem_end > self.size() {
179 return Err(Error::OutOfBounds { addr: mem_end });
180 }
181 let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
182 VolatileMemoryError::Overflow {
183 base: self.as_mut_ptr() as usize,
184 offset,
185 },
186 )?;
187
188 // Safe because we have verified that the new memory is a subset of the original slice.
189 Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
190 }
191
192 /// Sets each byte of this slice with the given byte, similar to `memset`.
193 ///
194 /// The bytes of this slice are accessed in an arbitray order.
195 ///
196 /// # Examples
197 ///
198 /// ```
199 /// # use data_model::VolatileSlice;
200 /// # fn test_write_45() -> Result<(), ()> {
201 /// let mut mem = [0u8; 32];
202 /// let vslice = VolatileSlice::new(&mut mem[..]);
203 /// vslice.write_bytes(45);
204 /// for &v in &mem[..] {
205 /// assert_eq!(v, 45);
206 /// }
207 /// # Ok(())
208 /// # }
write_bytes(&self, value: u8)209 pub fn write_bytes(&self, value: u8) {
210 // Safe because the memory is valid and needs only byte alignment.
211 unsafe {
212 write_bytes(self.as_mut_ptr(), value, self.size());
213 }
214 }
215
216 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
217 /// `buf`.
218 ///
219 /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
220 ///
221 /// # Examples
222 ///
223 /// ```
224 /// # use std::fs::File;
225 /// # use std::path::Path;
226 /// # use data_model::VolatileSlice;
227 /// # fn test_write_null() -> Result<(), ()> {
228 /// let mut mem = [0u8; 32];
229 /// let vslice = VolatileSlice::new(&mut mem[..]);
230 /// let mut buf = [5u8; 16];
231 /// vslice.copy_to(&mut buf[..]);
232 /// for v in &buf[..] {
233 /// assert_eq!(buf[0], 0);
234 /// }
235 /// # Ok(())
236 /// # }
237 /// ```
copy_to<T>(&self, buf: &mut [T]) where T: DataInit,238 pub fn copy_to<T>(&self, buf: &mut [T])
239 where
240 T: DataInit,
241 {
242 let mut addr = self.as_mut_ptr() as *const u8;
243 for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
244 unsafe {
245 *v = read_volatile(addr as *const T);
246 addr = addr.add(size_of::<T>());
247 }
248 }
249 }
250
251 /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
252 ///
253 /// The copies happen in an undefined order.
254 /// # Examples
255 ///
256 /// ```
257 /// # use data_model::{VolatileMemory, VolatileSlice};
258 /// # fn test_write_null() -> Result<(), ()> {
259 /// let mut mem = [0u8; 32];
260 /// let vslice = VolatileSlice::new(&mut mem[..]);
261 /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
262 /// # Ok(())
263 /// # }
264 /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)265 pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
266 unsafe {
267 copy(
268 self.as_mut_ptr() as *const u8,
269 slice.as_mut_ptr(),
270 min(self.size(), slice.size()),
271 );
272 }
273 }
274
275 /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
276 /// this slice's memory.
277 ///
278 /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
279 ///
280 /// # Examples
281 ///
282 /// ```
283 /// # use std::fs::File;
284 /// # use std::path::Path;
285 /// # use data_model::{VolatileMemory, VolatileSlice};
286 /// # fn test_write_null() -> Result<(), ()> {
287 /// let mut mem = [0u8; 32];
288 /// let vslice = VolatileSlice::new(&mut mem[..]);
289 /// let buf = [5u8; 64];
290 /// vslice.copy_from(&buf[..]);
291 /// for i in 0..4 {
292 /// assert_eq!(vslice.get_ref::<u32>(i * 4).map_err(|_| ())?.load(), 0x05050505);
293 /// }
294 /// # Ok(())
295 /// # }
296 /// ```
copy_from<T>(&self, buf: &[T]) where T: DataInit,297 pub fn copy_from<T>(&self, buf: &[T])
298 where
299 T: DataInit,
300 {
301 let mut addr = self.as_mut_ptr();
302 for &v in buf.iter().take(self.size() / size_of::<T>()) {
303 unsafe {
304 write_volatile(addr as *mut T, v);
305 addr = addr.add(size_of::<T>());
306 }
307 }
308 }
309 }
310
311 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>312 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
313 self.sub_slice(offset, count)
314 }
315 }
316
317 /// A memory location that supports volatile access of a `T`.
318 ///
319 /// # Examples
320 ///
321 /// ```
322 /// # use data_model::VolatileRef;
323 /// let mut v = 5u32;
324 /// assert_eq!(v, 5);
325 /// let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32) };
326 /// assert_eq!(v_ref.load(), 5);
327 /// v_ref.store(500);
328 /// assert_eq!(v, 500);
329 #[derive(Debug)]
330 pub struct VolatileRef<'a, T: DataInit>
331 where
332 T: 'a,
333 {
334 addr: *mut T,
335 phantom: PhantomData<&'a T>,
336 }
337
338 impl<'a, T: DataInit> VolatileRef<'a, T> {
339 /// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
340 ///
341 /// # Safety
342 /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
343 /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
344 /// must also guarantee that all other users of the given chunk of memory are using volatile
345 /// accesses.
new(addr: *mut T) -> VolatileRef<'a, T>346 pub unsafe fn new(addr: *mut T) -> VolatileRef<'a, T> {
347 VolatileRef {
348 addr,
349 phantom: PhantomData,
350 }
351 }
352
353 /// Gets the address of this slice's memory.
as_mut_ptr(&self) -> *mut T354 pub fn as_mut_ptr(&self) -> *mut T {
355 self.addr
356 }
357
358 /// Gets the size of this slice.
359 ///
360 /// # Examples
361 ///
362 /// ```
363 /// # use std::mem::size_of;
364 /// # use data_model::VolatileRef;
365 /// let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
366 /// assert_eq!(v_ref.size(), size_of::<u32>());
367 /// ```
size(&self) -> usize368 pub fn size(&self) -> usize {
369 size_of::<T>()
370 }
371
372 /// Does a volatile write of the value `v` to the address of this ref.
373 #[inline(always)]
store(&self, v: T)374 pub fn store(&self, v: T) {
375 unsafe { write_volatile(self.addr, v) };
376 }
377
378 /// Does a volatile read of the value at the address of this ref.
379 #[inline(always)]
load(&self) -> T380 pub fn load(&self) -> T {
381 // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
382 // in this function with the commented code below and running `cargo test --release`.
383 // unsafe { *(self.addr as *const T) }
384 unsafe { read_volatile(self.addr) }
385 }
386
387 /// Converts this `T` reference to a raw slice with the same size and address.
to_slice(&self) -> VolatileSlice<'a>388 pub fn to_slice(&self) -> VolatileSlice<'a> {
389 unsafe { VolatileSlice::from_raw_parts(self.as_mut_ptr() as *mut u8, self.size()) }
390 }
391 }
392
393 #[cfg(test)]
394 mod tests {
395 use super::*;
396
397 use std::sync::{Arc, Barrier};
398 use std::thread::spawn;
399
400 #[derive(Clone)]
401 struct VecMem {
402 mem: Arc<Vec<u8>>,
403 }
404
405 impl VecMem {
new(size: usize) -> VecMem406 fn new(size: usize) -> VecMem {
407 let mut mem = Vec::new();
408 mem.resize(size, 0);
409 VecMem { mem: Arc::new(mem) }
410 }
411 }
412
413 impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>414 fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
415 let mem_end = calc_offset(offset, count)?;
416 if mem_end > self.mem.len() {
417 return Err(Error::OutOfBounds { addr: mem_end });
418 }
419
420 let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
421 VolatileMemoryError::Overflow {
422 base: self.mem.as_ptr() as usize,
423 offset,
424 },
425 )?;
426
427 Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
428 }
429 }
430
431 #[test]
ref_store()432 fn ref_store() {
433 let mut a = [0u8; 1];
434 let a_ref = VolatileSlice::new(&mut a[..]);
435 let v_ref = a_ref.get_ref(0).unwrap();
436 v_ref.store(2u8);
437 assert_eq!(a[0], 2);
438 }
439
440 #[test]
ref_load()441 fn ref_load() {
442 let mut a = [5u8; 1];
443 {
444 let a_ref = VolatileSlice::new(&mut a[..]);
445 let c = {
446 let v_ref = a_ref.get_ref::<u8>(0).unwrap();
447 assert_eq!(v_ref.load(), 5u8);
448 v_ref
449 };
450 // To make sure we can take a v_ref out of the scope we made it in:
451 c.load();
452 // but not too far:
453 // c
454 } //.load()
455 ;
456 }
457
458 #[test]
ref_to_slice()459 fn ref_to_slice() {
460 let mut a = [1u8; 5];
461 let a_ref = VolatileSlice::new(&mut a[..]);
462 let v_ref = a_ref.get_ref(1).unwrap();
463 v_ref.store(0x12345678u32);
464 let ref_slice = v_ref.to_slice();
465 assert_eq!(v_ref.as_mut_ptr() as usize, ref_slice.as_mut_ptr() as usize);
466 assert_eq!(v_ref.size(), ref_slice.size());
467 }
468
469 #[test]
observe_mutate()470 fn observe_mutate() {
471 let a = VecMem::new(1);
472 let a_clone = a.clone();
473 let v_ref = a.get_ref::<u8>(0).unwrap();
474 v_ref.store(99);
475
476 let start_barrier = Arc::new(Barrier::new(2));
477 let thread_start_barrier = start_barrier.clone();
478 let end_barrier = Arc::new(Barrier::new(2));
479 let thread_end_barrier = end_barrier.clone();
480 spawn(move || {
481 thread_start_barrier.wait();
482 let clone_v_ref = a_clone.get_ref::<u8>(0).unwrap();
483 clone_v_ref.store(0);
484 thread_end_barrier.wait();
485 });
486
487 assert_eq!(v_ref.load(), 99);
488
489 start_barrier.wait();
490 end_barrier.wait();
491
492 assert_eq!(v_ref.load(), 0);
493 }
494
495 #[test]
slice_size()496 fn slice_size() {
497 let a = VecMem::new(100);
498 let s = a.get_slice(0, 27).unwrap();
499 assert_eq!(s.size(), 27);
500
501 let s = a.get_slice(34, 27).unwrap();
502 assert_eq!(s.size(), 27);
503
504 let s = s.get_slice(20, 5).unwrap();
505 assert_eq!(s.size(), 5);
506 }
507
508 #[test]
slice_overflow_error()509 fn slice_overflow_error() {
510 use std::usize::MAX;
511 let a = VecMem::new(1);
512 let res = a.get_slice(MAX, 1).unwrap_err();
513 assert_eq!(
514 res,
515 Error::Overflow {
516 base: MAX,
517 offset: 1,
518 }
519 );
520 }
521
522 #[test]
slice_oob_error()523 fn slice_oob_error() {
524 let a = VecMem::new(100);
525 a.get_slice(50, 50).unwrap();
526 let res = a.get_slice(55, 50).unwrap_err();
527 assert_eq!(res, Error::OutOfBounds { addr: 105 });
528 }
529
530 #[test]
ref_overflow_error()531 fn ref_overflow_error() {
532 use std::usize::MAX;
533 let a = VecMem::new(1);
534 let res = a.get_ref::<u8>(MAX).unwrap_err();
535 assert_eq!(
536 res,
537 Error::Overflow {
538 base: MAX,
539 offset: 1,
540 }
541 );
542 }
543
544 #[test]
ref_oob_error()545 fn ref_oob_error() {
546 let a = VecMem::new(100);
547 a.get_ref::<u8>(99).unwrap();
548 let res = a.get_ref::<u16>(99).unwrap_err();
549 assert_eq!(res, Error::OutOfBounds { addr: 101 });
550 }
551
552 #[test]
ref_oob_too_large()553 fn ref_oob_too_large() {
554 let a = VecMem::new(3);
555 let res = a.get_ref::<u32>(0).unwrap_err();
556 assert_eq!(res, Error::OutOfBounds { addr: 4 });
557 }
558 }
559