• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileSlice`, along with types that produce it which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //!
18 //! The first rule is because having references of any kind to memory considered volatile would
19 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
20 //! done concurrently without synchronization. With volatile access we know that the compiler has
21 //! not reordered or elided the access.
22 
23 use std::cmp::min;
24 use std::mem::size_of;
25 use std::ptr::copy;
26 use std::ptr::read_volatile;
27 use std::ptr::write_bytes;
28 use std::ptr::write_volatile;
29 use std::result;
30 use std::slice;
31 
32 use remain::sorted;
33 use thiserror::Error;
34 use zerocopy::FromBytes;
35 use zerocopy::IntoBytes;
36 
37 use crate::IoBufMut;
38 
39 #[sorted]
40 #[derive(Error, Eq, PartialEq, Debug)]
41 pub enum VolatileMemoryError {
42     /// `addr` is out of bounds of the volatile memory slice.
43     #[error("address 0x{addr:x} is out of bounds")]
44     OutOfBounds { addr: usize },
45     /// Taking a slice at `base` with `offset` would overflow `usize`.
46     #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
47     Overflow { base: usize, offset: usize },
48 }
49 
50 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
51 
52 use crate::VolatileMemoryError as Error;
53 type Result<T> = VolatileMemoryResult<T>;
54 
55 /// Trait for types that support raw volatile access to their data.
56 pub trait VolatileMemory {
57     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
58     /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>59     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
60 }
61 
62 /// A slice of raw memory that supports volatile access. Like `std::io::IoSliceMut`, this type is
63 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoSliceMut`, it doesn't
64 /// automatically deref to `&mut [u8]`.
65 #[derive(Copy, Clone, Debug)]
66 #[repr(transparent)]
67 pub struct VolatileSlice<'a>(IoBufMut<'a>);
68 
69 impl<'a> VolatileSlice<'a> {
70     /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice71     pub fn new(buf: &mut [u8]) -> VolatileSlice {
72         VolatileSlice(IoBufMut::new(buf))
73     }
74 
75     /// Creates a `VolatileSlice` from a pointer and a length.
76     ///
77     /// # Safety
78     ///
79     /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
80     /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>81     pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
82         VolatileSlice(IoBufMut::from_raw_parts(addr, len))
83     }
84 
85     /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u886     pub fn as_ptr(&self) -> *const u8 {
87         self.0.as_ptr()
88     }
89 
90     /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u891     pub fn as_mut_ptr(&self) -> *mut u8 {
92         self.0.as_mut_ptr()
93     }
94 
95     /// Gets the size of this slice.
size(&self) -> usize96     pub fn size(&self) -> usize {
97         self.0.len()
98     }
99 
100     /// Advance the starting position of this slice.
101     ///
102     /// Panics if `count > self.size()`.
advance(&mut self, count: usize)103     pub fn advance(&mut self, count: usize) {
104         self.0.advance(count)
105     }
106 
107     /// Shorten the length of the slice.
108     ///
109     /// Has no effect if `len > self.size()`.
truncate(&mut self, len: usize)110     pub fn truncate(&mut self, len: usize) {
111         self.0.truncate(len)
112     }
113 
114     /// Returns this `VolatileSlice` as an `IoBufMut`.
as_iobuf(&self) -> &IoBufMut115     pub fn as_iobuf(&self) -> &IoBufMut {
116         &self.0
117     }
118 
119     /// Converts a slice of `VolatileSlice`s into a slice of `IoBufMut`s
120     #[allow(clippy::wrong_self_convention)]
as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>]121     pub fn as_iobufs<'mem, 'slice>(
122         iovs: &'slice [VolatileSlice<'mem>],
123     ) -> &'slice [IoBufMut<'mem>] {
124         // SAFETY:
125         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
126         unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
127     }
128 
129     /// Converts a mutable slice of `VolatileSlice`s into a mutable slice of `IoBufMut`s
130     #[inline]
as_iobufs_mut<'mem, 'slice>( iovs: &'slice mut [VolatileSlice<'mem>], ) -> &'slice mut [IoBufMut<'mem>]131     pub fn as_iobufs_mut<'mem, 'slice>(
132         iovs: &'slice mut [VolatileSlice<'mem>],
133     ) -> &'slice mut [IoBufMut<'mem>] {
134         // SAFETY:
135         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
136         unsafe { slice::from_raw_parts_mut(iovs.as_mut_ptr() as *mut IoBufMut, iovs.len()) }
137     }
138 
139     /// Creates a copy of this slice with the address increased by `count` bytes, and the size
140     /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>141     pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
142         let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
143             VolatileMemoryError::Overflow {
144                 base: self.as_mut_ptr() as usize,
145                 offset: count,
146             },
147         )?;
148         let new_size = self
149             .size()
150             .checked_sub(count)
151             .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
152 
153         // SAFETY:
154         // Safe because the memory has the same lifetime and points to a subset of the memory of the
155         // original slice.
156         unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
157     }
158 
159     /// Similar to `get_slice` but the returned slice outlives this slice.
160     ///
161     /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>162     pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
163         let mem_end = offset
164             .checked_add(count)
165             .ok_or(VolatileMemoryError::Overflow {
166                 base: offset,
167                 offset: count,
168             })?;
169         if mem_end > self.size() {
170             return Err(Error::OutOfBounds { addr: mem_end });
171         }
172         let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
173             VolatileMemoryError::Overflow {
174                 base: self.as_mut_ptr() as usize,
175                 offset,
176             },
177         )?;
178 
179         // SAFETY:
180         // Safe because we have verified that the new memory is a subset of the original slice.
181         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
182     }
183 
184     /// Sets each byte of this slice with the given byte, similar to `memset`.
185     ///
186     /// The bytes of this slice are accessed in an arbitray order.
187     ///
188     /// # Examples
189     ///
190     /// ```
191     /// # use base::VolatileSlice;
192     /// # fn test_write_45() -> Result<(), ()> {
193     /// let mut mem = [0u8; 32];
194     /// let vslice = VolatileSlice::new(&mut mem[..]);
195     /// vslice.write_bytes(45);
196     /// for &v in &mem[..] {
197     ///     assert_eq!(v, 45);
198     /// }
199     /// # Ok(())
200     /// # }
write_bytes(&self, value: u8)201     pub fn write_bytes(&self, value: u8) {
202         // SAFETY:
203         // Safe because the memory is valid and needs only byte alignment.
204         unsafe {
205             write_bytes(self.as_mut_ptr(), value, self.size());
206         }
207     }
208 
209     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
210     /// `buf`.
211     ///
212     /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
213     ///
214     /// # Examples
215     ///
216     /// ```
217     /// # use std::fs::File;
218     /// # use std::path::Path;
219     /// # use base::VolatileSlice;
220     /// # fn test_write_null() -> Result<(), ()> {
221     /// let mut mem = [0u8; 32];
222     /// let vslice = VolatileSlice::new(&mut mem[..]);
223     /// let mut buf = [5u8; 16];
224     /// vslice.copy_to(&mut buf[..]);
225     /// for v in &buf[..] {
226     ///     assert_eq!(buf[0], 0);
227     /// }
228     /// # Ok(())
229     /// # }
230     /// ```
copy_to<T>(&self, buf: &mut [T]) where T: FromBytes + IntoBytes + Copy,231     pub fn copy_to<T>(&self, buf: &mut [T])
232     where
233         T: FromBytes + IntoBytes + Copy,
234     {
235         let mut addr = self.as_mut_ptr() as *const u8;
236         for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
237             // SAFETY: Safe because buf is valid, aligned to type `T` and is initialized.
238             unsafe {
239                 *v = read_volatile(addr as *const T);
240                 addr = addr.add(size_of::<T>());
241             }
242         }
243     }
244 
245     /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
246     ///
247     /// The copies happen in an undefined order.
248     /// # Examples
249     ///
250     /// ```
251     /// # use base::VolatileMemory;
252     /// # use base::VolatileSlice;
253     /// # fn test_write_null() -> Result<(), ()> {
254     /// let mut mem = [0u8; 32];
255     /// let vslice = VolatileSlice::new(&mut mem[..]);
256     /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
257     /// # Ok(())
258     /// # }
259     /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)260     pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
261         // SAFETY: Safe because slice is valid and is byte aligned.
262         unsafe {
263             copy(
264                 self.as_mut_ptr() as *const u8,
265                 slice.as_mut_ptr(),
266                 min(self.size(), slice.size()),
267             );
268         }
269     }
270 
271     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
272     /// this slice's memory.
273     ///
274     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
275     ///
276     /// # Examples
277     ///
278     /// ```
279     /// # use std::fs::File;
280     /// # use std::path::Path;
281     /// # use base::VolatileMemory;
282     /// # use base::VolatileSlice;
283     /// # fn test_write_null() -> Result<(), ()> {
284     /// let mut mem = [0u8; 32];
285     /// let vslice = VolatileSlice::new(&mut mem[..]);
286     /// let buf = [5u8; 64];
287     /// vslice.copy_from(&buf[..]);
288     /// let mut copy_buf = [0u32; 4];
289     /// vslice.copy_to(&mut copy_buf);
290     /// for i in 0..4 {
291     ///     assert_eq!(copy_buf[i], 0x05050505);
292     /// }
293     /// # Ok(())
294     /// # }
295     /// ```
copy_from<T>(&self, buf: &[T]) where T: IntoBytes + Copy,296     pub fn copy_from<T>(&self, buf: &[T])
297     where
298         T: IntoBytes + Copy,
299     {
300         let mut addr = self.as_mut_ptr();
301         for v in buf.iter().take(self.size() / size_of::<T>()) {
302             // SAFETY: Safe because buf is valid, aligned to type `T` and is mutable.
303             unsafe {
304                 write_volatile(addr as *mut T, *v);
305                 addr = addr.add(size_of::<T>());
306             }
307         }
308     }
309 
310     /// Returns whether all bytes in this slice are zero or not.
311     ///
312     /// This is optimized for [VolatileSlice] aligned with 16 bytes.
313     ///
314     /// TODO(b/274840085): Use SIMD for better performance.
is_all_zero(&self) -> bool315     pub fn is_all_zero(&self) -> bool {
316         const MASK_4BIT: usize = 0x0f;
317         let head_addr = self.as_ptr() as usize;
318         // Round up by 16
319         let aligned_head_addr = (head_addr + MASK_4BIT) & !MASK_4BIT;
320         let tail_addr = head_addr + self.size();
321         // Round down by 16
322         let aligned_tail_addr = tail_addr & !MASK_4BIT;
323 
324         // Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance.
325         if (aligned_head_addr..aligned_tail_addr).step_by(16).any(
326             |aligned_addr|
327                 // SAFETY: Each aligned_addr is within VolatileSlice
328                 unsafe { *(aligned_addr as *const u128) } != 0,
329         ) {
330             return false;
331         }
332 
333         if head_addr == aligned_head_addr && tail_addr == aligned_tail_addr {
334             // If head_addr and tail_addr are aligned, we can skip the unaligned part which contains
335             // at least 2 conditional branches.
336             true
337         } else {
338             // Check unaligned part.
339             // SAFETY: The range [head_addr, aligned_head_addr) and [aligned_tail_addr, tail_addr)
340             // are within VolatileSlice.
341             unsafe {
342                 is_all_zero_naive(head_addr, aligned_head_addr)
343                     && is_all_zero_naive(aligned_tail_addr, tail_addr)
344             }
345         }
346     }
347 }
348 
349 /// Check whether every byte is zero.
350 ///
351 /// This checks byte by byte.
352 ///
353 /// # Safety
354 ///
355 /// * `head_addr` <= `tail_addr`
356 /// * Bytes between `head_addr` and `tail_addr` is valid to access.
is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool357 unsafe fn is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool {
358     (head_addr..tail_addr).all(|addr| *(addr as *const u8) == 0)
359 }
360 
361 impl VolatileMemory for VolatileSlice<'_> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>362     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
363         self.sub_slice(offset, count)
364     }
365 }
366 
367 impl PartialEq<VolatileSlice<'_>> for VolatileSlice<'_> {
eq(&self, other: &VolatileSlice) -> bool368     fn eq(&self, other: &VolatileSlice) -> bool {
369         let size = self.size();
370         if size != other.size() {
371             return false;
372         }
373 
374         // SAFETY: We pass pointers into valid VolatileSlice regions, and size is checked above.
375         let cmp = unsafe { libc::memcmp(self.as_ptr() as _, other.as_ptr() as _, size) };
376 
377         cmp == 0
378     }
379 }
380 
381 /// The `PartialEq` implementation for `VolatileSlice` is reflexive, symmetric, and transitive.
382 impl Eq for VolatileSlice<'_> {}
383 
384 impl std::io::Write for VolatileSlice<'_> {
write(&mut self, buf: &[u8]) -> std::io::Result<usize>385     fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
386         let len = buf.len().min(self.size());
387         self.copy_from(&buf[..len]);
388         self.advance(len);
389         Ok(len)
390     }
391 
flush(&mut self) -> std::io::Result<()>392     fn flush(&mut self) -> std::io::Result<()> {
393         Ok(())
394     }
395 }
396 
397 #[cfg(test)]
398 mod tests {
399     use std::io::Write;
400     use std::sync::Arc;
401     use std::sync::Barrier;
402     use std::thread::spawn;
403 
404     use super::*;
405 
406     #[derive(Clone)]
407     struct VecMem {
408         mem: Arc<Vec<u8>>,
409     }
410 
411     impl VecMem {
new(size: usize) -> VecMem412         fn new(size: usize) -> VecMem {
413             VecMem {
414                 mem: Arc::new(vec![0u8; size]),
415             }
416         }
417     }
418 
419     impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>420         fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
421             let mem_end = offset
422                 .checked_add(count)
423                 .ok_or(VolatileMemoryError::Overflow {
424                     base: offset,
425                     offset: count,
426                 })?;
427             if mem_end > self.mem.len() {
428                 return Err(Error::OutOfBounds { addr: mem_end });
429             }
430 
431             let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
432                 VolatileMemoryError::Overflow {
433                     base: self.mem.as_ptr() as usize,
434                     offset,
435                 },
436             )?;
437 
438             Ok(
439                 // SAFETY: trivially safe
440                 unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) },
441             )
442         }
443     }
444 
445     #[test]
observe_mutate()446     fn observe_mutate() {
447         let a = VecMem::new(1);
448         let a_clone = a.clone();
449         a.get_slice(0, 1).unwrap().write_bytes(99);
450 
451         let start_barrier = Arc::new(Barrier::new(2));
452         let thread_start_barrier = start_barrier.clone();
453         let end_barrier = Arc::new(Barrier::new(2));
454         let thread_end_barrier = end_barrier.clone();
455         spawn(move || {
456             thread_start_barrier.wait();
457             a_clone.get_slice(0, 1).unwrap().write_bytes(0);
458             thread_end_barrier.wait();
459         });
460 
461         let mut byte = [0u8; 1];
462         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
463         assert_eq!(byte[0], 99);
464 
465         start_barrier.wait();
466         end_barrier.wait();
467 
468         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
469         assert_eq!(byte[0], 0);
470     }
471 
472     #[test]
slice_size()473     fn slice_size() {
474         let a = VecMem::new(100);
475         let s = a.get_slice(0, 27).unwrap();
476         assert_eq!(s.size(), 27);
477 
478         let s = a.get_slice(34, 27).unwrap();
479         assert_eq!(s.size(), 27);
480 
481         let s = s.get_slice(20, 5).unwrap();
482         assert_eq!(s.size(), 5);
483     }
484 
485     #[test]
slice_overflow_error()486     fn slice_overflow_error() {
487         let a = VecMem::new(1);
488         let res = a.get_slice(usize::MAX, 1).unwrap_err();
489         assert_eq!(
490             res,
491             Error::Overflow {
492                 base: usize::MAX,
493                 offset: 1,
494             }
495         );
496     }
497 
498     #[test]
slice_oob_error()499     fn slice_oob_error() {
500         let a = VecMem::new(100);
501         a.get_slice(50, 50).unwrap();
502         let res = a.get_slice(55, 50).unwrap_err();
503         assert_eq!(res, Error::OutOfBounds { addr: 105 });
504     }
505 
506     #[test]
is_all_zero_16bytes_aligned()507     fn is_all_zero_16bytes_aligned() {
508         let a = VecMem::new(1024);
509         let slice = a.get_slice(0, 1024).unwrap();
510 
511         assert!(slice.is_all_zero());
512         a.get_slice(129, 1).unwrap().write_bytes(1);
513         assert!(!slice.is_all_zero());
514     }
515 
516     #[test]
is_all_zero_head_not_aligned()517     fn is_all_zero_head_not_aligned() {
518         let a = VecMem::new(1024);
519         let slice = a.get_slice(1, 1023).unwrap();
520 
521         assert!(slice.is_all_zero());
522         a.get_slice(0, 1).unwrap().write_bytes(1);
523         assert!(slice.is_all_zero());
524         a.get_slice(1, 1).unwrap().write_bytes(1);
525         assert!(!slice.is_all_zero());
526         a.get_slice(1, 1).unwrap().write_bytes(0);
527         a.get_slice(129, 1).unwrap().write_bytes(1);
528         assert!(!slice.is_all_zero());
529     }
530 
531     #[test]
is_all_zero_tail_not_aligned()532     fn is_all_zero_tail_not_aligned() {
533         let a = VecMem::new(1024);
534         let slice = a.get_slice(0, 1023).unwrap();
535 
536         assert!(slice.is_all_zero());
537         a.get_slice(1023, 1).unwrap().write_bytes(1);
538         assert!(slice.is_all_zero());
539         a.get_slice(1022, 1).unwrap().write_bytes(1);
540         assert!(!slice.is_all_zero());
541         a.get_slice(1022, 1).unwrap().write_bytes(0);
542         a.get_slice(0, 1).unwrap().write_bytes(1);
543         assert!(!slice.is_all_zero());
544     }
545 
546     #[test]
is_all_zero_no_aligned_16bytes()547     fn is_all_zero_no_aligned_16bytes() {
548         let a = VecMem::new(1024);
549         let slice = a.get_slice(1, 16).unwrap();
550 
551         assert!(slice.is_all_zero());
552         a.get_slice(0, 1).unwrap().write_bytes(1);
553         assert!(slice.is_all_zero());
554         for i in 1..17 {
555             a.get_slice(i, 1).unwrap().write_bytes(1);
556             assert!(!slice.is_all_zero());
557             a.get_slice(i, 1).unwrap().write_bytes(0);
558         }
559         a.get_slice(17, 1).unwrap().write_bytes(1);
560         assert!(slice.is_all_zero());
561     }
562 
563     #[test]
write_partial()564     fn write_partial() {
565         let mem = VecMem::new(1024);
566         let mut slice = mem.get_slice(1, 16).unwrap();
567         slice.write_bytes(0xCC);
568 
569         // Writing 4 bytes should succeed and advance the slice by 4 bytes.
570         let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
571         assert_eq!(write_len, 4);
572         assert_eq!(slice.size(), 16 - 4);
573 
574         // The written data should appear in the memory at offset 1.
575         assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
576 
577         // The next byte of the slice should be unmodified.
578         assert_eq!(mem.mem[5], 0xCC);
579     }
580 
581     #[test]
write_multiple()582     fn write_multiple() {
583         let mem = VecMem::new(1024);
584         let mut slice = mem.get_slice(1, 16).unwrap();
585         slice.write_bytes(0xCC);
586 
587         // Writing 4 bytes should succeed and advance the slice by 4 bytes.
588         let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
589         assert_eq!(write_len, 4);
590         assert_eq!(slice.size(), 16 - 4);
591 
592         // The next byte of the slice should be unmodified.
593         assert_eq!(mem.mem[5], 0xCC);
594 
595         // Writing another 4 bytes should succeed and advance the slice again.
596         let write2_len = slice.write(&[5, 6, 7, 8]).unwrap();
597         assert_eq!(write2_len, 4);
598         assert_eq!(slice.size(), 16 - 4 - 4);
599 
600         // The written data should appear in the memory at offset 1.
601         assert_eq!(mem.mem[1..=8], [1, 2, 3, 4, 5, 6, 7, 8]);
602 
603         // The next byte of the slice should be unmodified.
604         assert_eq!(mem.mem[9], 0xCC);
605     }
606 
607     #[test]
write_exact_slice_size()608     fn write_exact_slice_size() {
609         let mem = VecMem::new(1024);
610         let mut slice = mem.get_slice(1, 4).unwrap();
611         slice.write_bytes(0xCC);
612 
613         // Writing 4 bytes should succeed and consume the entire slice.
614         let write_len = slice.write(&[1, 2, 3, 4]).unwrap();
615         assert_eq!(write_len, 4);
616         assert_eq!(slice.size(), 0);
617 
618         // The written data should appear in the memory at offset 1.
619         assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
620 
621         // The byte after the slice should be unmodified.
622         assert_eq!(mem.mem[5], 0);
623     }
624 
625     #[test]
write_more_than_slice_size()626     fn write_more_than_slice_size() {
627         let mem = VecMem::new(1024);
628         let mut slice = mem.get_slice(1, 4).unwrap();
629         slice.write_bytes(0xCC);
630 
631         // Attempting to write 5 bytes should succeed but only write 4 bytes.
632         let write_len = slice.write(&[1, 2, 3, 4, 5]).unwrap();
633         assert_eq!(write_len, 4);
634         assert_eq!(slice.size(), 0);
635 
636         // The written data should appear in the memory at offset 1.
637         assert_eq!(mem.mem[1..=4], [1, 2, 3, 4]);
638 
639         // The byte after the slice should be unmodified.
640         assert_eq!(mem.mem[5], 0);
641     }
642 
643     #[test]
write_empty_slice()644     fn write_empty_slice() {
645         let mem = VecMem::new(1024);
646         let mut slice = mem.get_slice(1, 0).unwrap();
647 
648         // Writing to an empty slice should always return 0.
649         assert_eq!(slice.write(&[1, 2, 3, 4]).unwrap(), 0);
650         assert_eq!(slice.write(&[5, 6, 7, 8]).unwrap(), 0);
651         assert_eq!(slice.write(&[]).unwrap(), 0);
652     }
653 }
654