• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The ChromiumOS Authors
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //! Types for volatile access to memory.
6 //!
7 //! Two of the core rules for safe rust is no data races and no aliased mutable references.
8 //! `VolatileSlice`, along with types that produce it which implement
9 //! `VolatileMemory`, allow us to sidestep that rule by wrapping pointers that absolutely have to be
10 //! accessed volatile. Some systems really do need to operate on shared memory and can't have the
11 //! compiler reordering or eliding access because it has no visibility into what other systems are
12 //! doing with that hunk of memory.
13 //!
14 //! For the purposes of maintaining safety, volatile memory has some rules of its own:
15 //! 1. No references or slices to volatile memory (`&` or `&mut`).
16 //! 2. Access should always been done with a volatile read or write.
17 //! The First rule is because having references of any kind to memory considered volatile would
18 //! violate pointer aliasing. The second is because unvolatile accesses are inherently undefined if
19 //! done concurrently without synchronization. With volatile access we know that the compiler has
20 //! not reordered or elided the access.
21 #![allow(deprecated)]
22 
23 use std::cmp::min;
24 use std::marker::PhantomData;
25 use std::mem::size_of;
26 use std::ptr::copy;
27 use std::ptr::read_volatile;
28 use std::ptr::write_bytes;
29 use std::ptr::write_volatile;
30 use std::result;
31 use std::slice;
32 use std::usize;
33 
34 use remain::sorted;
35 use thiserror::Error;
36 use zerocopy::AsBytes;
37 use zerocopy::FromBytes;
38 use zerocopy::LayoutVerified;
39 
40 use crate::sys::IoBufMut;
41 use crate::DataInit;
42 
43 #[sorted]
44 #[derive(Error, Eq, PartialEq, Debug)]
45 pub enum VolatileMemoryError {
46     /// `addr` is out of bounds of the volatile memory slice.
47     #[error("address 0x{addr:x} is out of bounds")]
48     OutOfBounds { addr: usize },
49     /// Taking a slice at `base` with `offset` would overflow `usize`.
50     #[error("address 0x{base:x} offset by 0x{offset:x} would overflow")]
51     Overflow { base: usize, offset: usize },
52 }
53 
54 pub type VolatileMemoryResult<T> = result::Result<T, VolatileMemoryError>;
55 
56 use crate::VolatileMemoryError as Error;
57 type Result<T> = VolatileMemoryResult<T>;
58 
59 /// Convenience function for computing `base + offset` which returns
60 /// `Err(VolatileMemoryError::Overflow)` instead of panicking in the case `base + offset` exceeds
61 /// `u64::MAX`.
62 ///
63 /// # Examples
64 ///
65 /// ```
66 /// # use data_model::*;
67 /// # fn get_slice(offset: usize, count: usize) -> VolatileMemoryResult<()> {
68 ///   let mem_end = calc_offset(offset, count)?;
69 ///   if mem_end > 100 {
70 ///       return Err(VolatileMemoryError::OutOfBounds{addr: mem_end});
71 ///   }
72 /// # Ok(())
73 /// # }
74 /// ```
calc_offset(base: usize, offset: usize) -> Result<usize>75 pub fn calc_offset(base: usize, offset: usize) -> Result<usize> {
76     match base.checked_add(offset) {
77         None => Err(Error::Overflow { base, offset }),
78         Some(m) => Ok(m),
79     }
80 }
81 
82 /// Trait for types that support raw volatile access to their data.
83 pub trait VolatileMemory {
84     /// Gets a slice of memory at `offset` that is `count` bytes in length and supports volatile
85     /// access.
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>86     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>;
87 }
88 
89 /// A slice of raw memory that supports volatile access. Like `std::io::IoSliceMut`, this type is
90 /// guaranteed to be ABI-compatible with `libc::iovec` but unlike `IoSliceMut`, it doesn't
91 /// automatically deref to `&mut [u8]`.
92 #[derive(Copy, Clone, Debug)]
93 #[repr(transparent)]
94 pub struct VolatileSlice<'a>(IoBufMut<'a>);
95 
96 impl<'a> VolatileSlice<'a> {
97     /// Creates a slice of raw memory that must support volatile access.
new(buf: &mut [u8]) -> VolatileSlice98     pub fn new(buf: &mut [u8]) -> VolatileSlice {
99         VolatileSlice(IoBufMut::new(buf))
100     }
101 
102     /// Creates a `VolatileSlice` from a pointer and a length.
103     ///
104     /// # Safety
105     ///
106     /// In order to use this method safely, `addr` must be valid for reads and writes of `len` bytes
107     /// and should live for the entire duration of lifetime `'a`.
from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a>108     pub unsafe fn from_raw_parts(addr: *mut u8, len: usize) -> VolatileSlice<'a> {
109         VolatileSlice(IoBufMut::from_raw_parts(addr, len))
110     }
111 
112     /// Gets a const pointer to this slice's memory.
as_ptr(&self) -> *const u8113     pub fn as_ptr(&self) -> *const u8 {
114         self.0.as_ptr()
115     }
116 
117     /// Gets a mutable pointer to this slice's memory.
as_mut_ptr(&self) -> *mut u8118     pub fn as_mut_ptr(&self) -> *mut u8 {
119         self.0.as_mut_ptr()
120     }
121 
122     /// Gets the size of this slice.
size(&self) -> usize123     pub fn size(&self) -> usize {
124         self.0.len()
125     }
126 
127     /// Advance the starting position of this slice.
128     ///
129     /// Panics if `count > self.size()`.
advance(&mut self, count: usize)130     pub fn advance(&mut self, count: usize) {
131         self.0.advance(count)
132     }
133 
134     /// Shorten the length of the slice.
135     ///
136     /// Has no effect if `len > self.size()`.
truncate(&mut self, len: usize)137     pub fn truncate(&mut self, len: usize) {
138         self.0.truncate(len)
139     }
140 
141     /// Returns this `VolatileSlice` as an `IoBufMut`.
as_iobuf(&self) -> &IoBufMut142     pub fn as_iobuf(&self) -> &IoBufMut {
143         &self.0
144     }
145 
146     /// Converts a slice of `VolatileSlice`s into a slice of `IoBufMut`s
147     #[allow(clippy::wrong_self_convention)]
as_iobufs<'mem, 'slice>( iovs: &'slice [VolatileSlice<'mem>], ) -> &'slice [IoBufMut<'mem>]148     pub fn as_iobufs<'mem, 'slice>(
149         iovs: &'slice [VolatileSlice<'mem>],
150     ) -> &'slice [IoBufMut<'mem>] {
151         // Safe because `VolatileSlice` is ABI-compatible with `IoBufMut`.
152         unsafe { slice::from_raw_parts(iovs.as_ptr() as *const IoBufMut, iovs.len()) }
153     }
154 
155     /// Creates a copy of this slice with the address increased by `count` bytes, and the size
156     /// reduced by `count` bytes.
offset(self, count: usize) -> Result<VolatileSlice<'a>>157     pub fn offset(self, count: usize) -> Result<VolatileSlice<'a>> {
158         let new_addr = (self.as_mut_ptr() as usize).checked_add(count).ok_or(
159             VolatileMemoryError::Overflow {
160                 base: self.as_mut_ptr() as usize,
161                 offset: count,
162             },
163         )?;
164         let new_size = self
165             .size()
166             .checked_sub(count)
167             .ok_or(VolatileMemoryError::OutOfBounds { addr: new_addr })?;
168 
169         // Safe because the memory has the same lifetime and points to a subset of the memory of the
170         // original slice.
171         unsafe { Ok(VolatileSlice::from_raw_parts(new_addr as *mut u8, new_size)) }
172     }
173 
174     /// Similar to `get_slice` but the returned slice outlives this slice.
175     ///
176     /// The returned slice's lifetime is still limited by the underlying data's lifetime.
sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>>177     pub fn sub_slice(self, offset: usize, count: usize) -> Result<VolatileSlice<'a>> {
178         let mem_end = calc_offset(offset, count)?;
179         if mem_end > self.size() {
180             return Err(Error::OutOfBounds { addr: mem_end });
181         }
182         let new_addr = (self.as_mut_ptr() as usize).checked_add(offset).ok_or(
183             VolatileMemoryError::Overflow {
184                 base: self.as_mut_ptr() as usize,
185                 offset,
186             },
187         )?;
188 
189         // Safe because we have verified that the new memory is a subset of the original slice.
190         Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
191     }
192 
193     /// Sets each byte of this slice with the given byte, similar to `memset`.
194     ///
195     /// The bytes of this slice are accessed in an arbitray order.
196     ///
197     /// # Examples
198     ///
199     /// ```
200     /// # use data_model::VolatileSlice;
201     /// # fn test_write_45() -> Result<(), ()> {
202     /// let mut mem = [0u8; 32];
203     /// let vslice = VolatileSlice::new(&mut mem[..]);
204     /// vslice.write_bytes(45);
205     /// for &v in &mem[..] {
206     ///     assert_eq!(v, 45);
207     /// }
208     /// # Ok(())
209     /// # }
write_bytes(&self, value: u8)210     pub fn write_bytes(&self, value: u8) {
211         // Safe because the memory is valid and needs only byte alignment.
212         unsafe {
213             write_bytes(self.as_mut_ptr(), value, self.size());
214         }
215     }
216 
217     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
218     /// `buf`.
219     ///
220     /// The copy happens from smallest to largest address in `T` sized chunks using volatile reads.
221     ///
222     /// # Examples
223     ///
224     /// ```
225     /// # use std::fs::File;
226     /// # use std::path::Path;
227     /// # use data_model::VolatileSlice;
228     /// # fn test_write_null() -> Result<(), ()> {
229     /// let mut mem = [0u8; 32];
230     /// let vslice = VolatileSlice::new(&mut mem[..]);
231     /// let mut buf = [5u8; 16];
232     /// vslice.copy_to(&mut buf[..]);
233     /// for v in &buf[..] {
234     ///     assert_eq!(buf[0], 0);
235     /// }
236     /// # Ok(())
237     /// # }
238     /// ```
copy_to<T>(&self, buf: &mut [T]) where T: DataInit,239     pub fn copy_to<T>(&self, buf: &mut [T])
240     where
241         T: DataInit,
242     {
243         let mut addr = self.as_mut_ptr() as *const u8;
244         for v in buf.iter_mut().take(self.size() / size_of::<T>()) {
245             unsafe {
246                 *v = read_volatile(addr as *const T);
247                 addr = addr.add(size_of::<T>());
248             }
249         }
250     }
251 
252     /// Copies `self.size()` or `slice.size()` bytes, whichever is smaller, to `slice`.
253     ///
254     /// The copies happen in an undefined order.
255     /// # Examples
256     ///
257     /// ```
258     /// # use data_model::{VolatileMemory, VolatileSlice};
259     /// # fn test_write_null() -> Result<(), ()> {
260     /// let mut mem = [0u8; 32];
261     /// let vslice = VolatileSlice::new(&mut mem[..]);
262     /// vslice.copy_to_volatile_slice(vslice.get_slice(16, 16).map_err(|_| ())?);
263     /// # Ok(())
264     /// # }
265     /// ```
copy_to_volatile_slice(&self, slice: VolatileSlice)266     pub fn copy_to_volatile_slice(&self, slice: VolatileSlice) {
267         unsafe {
268             copy(
269                 self.as_mut_ptr() as *const u8,
270                 slice.as_mut_ptr(),
271                 min(self.size(), slice.size()),
272             );
273         }
274     }
275 
276     /// Copies `self.size()` or `buf.len()` times the size of `T` bytes, whichever is smaller, to
277     /// this slice's memory.
278     ///
279     /// The copy happens from smallest to largest address in `T` sized chunks using volatile writes.
280     ///
281     /// # Examples
282     ///
283     /// ```
284     /// # use std::fs::File;
285     /// # use std::path::Path;
286     /// # use data_model::{VolatileMemory, VolatileSlice};
287     /// # fn test_write_null() -> Result<(), ()> {
288     /// let mut mem = [0u8; 32];
289     /// let vslice = VolatileSlice::new(&mut mem[..]);
290     /// let buf = [5u8; 64];
291     /// vslice.copy_from(&buf[..]);
292     /// let mut copy_buf = [0u32; 4];
293     /// vslice.copy_to(&mut copy_buf);
294     /// for i in 0..4 {
295     ///     assert_eq!(copy_buf[i], 0x05050505);
296     /// }
297     /// # Ok(())
298     /// # }
299     /// ```
copy_from<T>(&self, buf: &[T]) where T: FromBytes + AsBytes,300     pub fn copy_from<T>(&self, buf: &[T])
301     where
302         T: FromBytes + AsBytes,
303     {
304         let mut addr = self.as_mut_ptr();
305         for v in buf.iter().take(self.size() / size_of::<T>()) {
306             unsafe {
307                 write_volatile(
308                     addr as *mut T,
309                     LayoutVerified::<_, T>::new(v.as_bytes()).unwrap().read(),
310                 );
311                 addr = addr.add(size_of::<T>());
312             }
313         }
314     }
315 
316     /// Returns whether all bytes in this slice are zero or not.
317     ///
318     /// This is optimized for [VolatileSlice] aligned with 16 bytes.
319     ///
320     /// TODO(b/274840085): Use SIMD for better performance.
is_all_zero(&self) -> bool321     pub fn is_all_zero(&self) -> bool {
322         const MASK_4BIT: usize = 0x0f;
323         let head_addr = self.as_ptr() as usize;
324         // Round up by 16
325         let aligned_head_addr = (head_addr + MASK_4BIT) & !MASK_4BIT;
326         let tail_addr = head_addr + self.size();
327         // Round down by 16
328         let aligned_tail_addr = tail_addr & !MASK_4BIT;
329 
330         // Check 16 bytes at once. The addresses should be 16 bytes aligned for better performance.
331         // SAFETY: Each aligned_addr is within VolatileSlice
332         if (aligned_head_addr..aligned_tail_addr)
333             .step_by(16)
334             .any(|aligned_addr| unsafe { *(aligned_addr as *const u128) } != 0)
335         {
336             return false;
337         }
338 
339         if head_addr == aligned_head_addr && tail_addr == aligned_tail_addr {
340             // If head_addr and tail_addr are aligned, we can skip the unaligned part which contains
341             // at least 2 conditional branches.
342             true
343         } else {
344             // Check unaligned part.
345             // SAFETY: The range [head_addr, aligned_head_addr) and [aligned_tail_addr, tail_addr)
346             // are within VolatileSlice.
347             unsafe {
348                 is_all_zero_naive(head_addr, aligned_head_addr)
349                     && is_all_zero_naive(aligned_tail_addr, tail_addr)
350             }
351         }
352     }
353 }
354 
355 /// Check whether every byte is zero.
356 ///
357 /// This checks byte by byte.
358 ///
359 /// ## Safety
360 ///
361 /// * `head_addr` <= `tail_addr`
362 /// * Bytes between `head_addr` and `tail_addr` is valid to access.
is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool363 unsafe fn is_all_zero_naive(head_addr: usize, tail_addr: usize) -> bool {
364     (head_addr..tail_addr).all(|addr| *(addr as *const u8) == 0)
365 }
366 
367 impl<'a> VolatileMemory for VolatileSlice<'a> {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>368     fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
369         self.sub_slice(offset, count)
370     }
371 }
372 
373 impl PartialEq<VolatileSlice<'_>> for VolatileSlice<'_> {
eq(&self, other: &VolatileSlice) -> bool374     fn eq(&self, other: &VolatileSlice) -> bool {
375         let size = self.size();
376         if size != other.size() {
377             return false;
378         }
379 
380         // SAFETY: We pass pointers into valid VolatileSlice regions, and size is checked above.
381         let cmp = unsafe { libc::memcmp(self.as_ptr() as _, other.as_ptr() as _, size) };
382 
383         cmp == 0
384     }
385 }
386 
387 /// The `PartialEq` implementation for `VolatileSlice` is reflexive, symmetric, and transitive.
388 impl Eq for VolatileSlice<'_> {}
389 
390 /// A memory location that supports volatile access of a `T`.
391 ///
392 /// # Examples
393 ///
394 /// ```
395 /// # use data_model::VolatileRef;
396 ///   let mut v = 5u32;
397 ///   assert_eq!(v, 5);
398 ///   let v_ref = unsafe { VolatileRef::new(&mut v as *mut u32) };
399 ///   assert_eq!(v_ref.load(), 5);
400 ///   v_ref.store(500);
401 ///   assert_eq!(v, 500);
402 #[deprecated(
403     note = "This is an unsafe abstraction. Users should use alternatives such as read_obj() and
404     write_obj() that do not create a long-lived mutable reference that could easily alias other
405     slices"
406 )]
407 #[derive(Debug)]
408 pub struct VolatileRef<'a, T: DataInit>
409 where
410     T: 'a,
411 {
412     addr: *mut T,
413     phantom: PhantomData<&'a T>,
414 }
415 
416 impl<'a, T: DataInit> VolatileRef<'a, T> {
417     /// Creates a reference to raw memory that must support volatile access of `T` sized chunks.
418     ///
419     /// # Safety
420     /// To use this safely, the caller must guarantee that the memory at `addr` is big enough for a
421     /// `T` and is available for the duration of the lifetime of the new `VolatileRef`. The caller
422     /// must also guarantee that all other users of the given chunk of memory are using volatile
423     /// accesses.
new(addr: *mut T) -> VolatileRef<'a, T>424     pub unsafe fn new(addr: *mut T) -> VolatileRef<'a, T> {
425         VolatileRef {
426             addr,
427             phantom: PhantomData,
428         }
429     }
430 
431     /// Gets the size of this slice.
432     ///
433     /// # Examples
434     ///
435     /// ```
436     /// # use std::mem::size_of;
437     /// # use data_model::VolatileRef;
438     ///   let v_ref = unsafe { VolatileRef::new(0 as *mut u32) };
439     ///   assert_eq!(v_ref.size(), size_of::<u32>());
440     /// ```
size(&self) -> usize441     pub fn size(&self) -> usize {
442         size_of::<T>()
443     }
444 
445     /// Does a volatile write of the value `v` to the address of this ref.
446     #[inline(always)]
store(&self, v: T)447     pub fn store(&self, v: T) {
448         unsafe { write_volatile(self.addr, v) };
449     }
450 
451     /// Does a volatile read of the value at the address of this ref.
452     #[inline(always)]
load(&self) -> T453     pub fn load(&self) -> T {
454         // For the purposes of demonstrating why read_volatile is necessary, try replacing the code
455         // in this function with the commented code below and running `cargo test --release`.
456         // unsafe { *(self.addr as *const T) }
457         unsafe { read_volatile(self.addr) }
458     }
459 }
460 
461 #[cfg(test)]
462 mod tests {
463     use std::sync::Arc;
464     use std::sync::Barrier;
465     use std::thread::spawn;
466 
467     use super::*;
468 
469     #[derive(Clone)]
470     struct VecMem {
471         mem: Arc<Vec<u8>>,
472     }
473 
474     impl VecMem {
new(size: usize) -> VecMem475         fn new(size: usize) -> VecMem {
476             let mut mem = Vec::new();
477             mem.resize(size, 0);
478             VecMem { mem: Arc::new(mem) }
479         }
480     }
481 
482     impl VolatileMemory for VecMem {
get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice>483         fn get_slice(&self, offset: usize, count: usize) -> Result<VolatileSlice> {
484             let mem_end = calc_offset(offset, count)?;
485             if mem_end > self.mem.len() {
486                 return Err(Error::OutOfBounds { addr: mem_end });
487             }
488 
489             let new_addr = (self.mem.as_ptr() as usize).checked_add(offset).ok_or(
490                 VolatileMemoryError::Overflow {
491                     base: self.mem.as_ptr() as usize,
492                     offset,
493                 },
494             )?;
495 
496             Ok(unsafe { VolatileSlice::from_raw_parts(new_addr as *mut u8, count) })
497         }
498     }
499 
500     #[test]
observe_mutate()501     fn observe_mutate() {
502         let a = VecMem::new(1);
503         let a_clone = a.clone();
504         a.get_slice(0, 1).unwrap().write_bytes(99);
505 
506         let start_barrier = Arc::new(Barrier::new(2));
507         let thread_start_barrier = start_barrier.clone();
508         let end_barrier = Arc::new(Barrier::new(2));
509         let thread_end_barrier = end_barrier.clone();
510         spawn(move || {
511             thread_start_barrier.wait();
512             a_clone.get_slice(0, 1).unwrap().write_bytes(0);
513             thread_end_barrier.wait();
514         });
515 
516         let mut byte = [0u8; 1];
517         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
518         assert_eq!(byte[0], 99);
519 
520         start_barrier.wait();
521         end_barrier.wait();
522 
523         a.get_slice(0, 1).unwrap().copy_to(&mut byte);
524         assert_eq!(byte[0], 0);
525     }
526 
527     #[test]
slice_size()528     fn slice_size() {
529         let a = VecMem::new(100);
530         let s = a.get_slice(0, 27).unwrap();
531         assert_eq!(s.size(), 27);
532 
533         let s = a.get_slice(34, 27).unwrap();
534         assert_eq!(s.size(), 27);
535 
536         let s = s.get_slice(20, 5).unwrap();
537         assert_eq!(s.size(), 5);
538     }
539 
540     #[test]
slice_overflow_error()541     fn slice_overflow_error() {
542         use std::usize::MAX;
543         let a = VecMem::new(1);
544         let res = a.get_slice(MAX, 1).unwrap_err();
545         assert_eq!(
546             res,
547             Error::Overflow {
548                 base: MAX,
549                 offset: 1,
550             }
551         );
552     }
553 
554     #[test]
slice_oob_error()555     fn slice_oob_error() {
556         let a = VecMem::new(100);
557         a.get_slice(50, 50).unwrap();
558         let res = a.get_slice(55, 50).unwrap_err();
559         assert_eq!(res, Error::OutOfBounds { addr: 105 });
560     }
561 
562     #[test]
is_all_zero_16bytes_aligned()563     fn is_all_zero_16bytes_aligned() {
564         let a = VecMem::new(1024);
565         let slice = a.get_slice(0, 1024).unwrap();
566 
567         assert!(slice.is_all_zero());
568         a.get_slice(129, 1).unwrap().write_bytes(1);
569         assert!(!slice.is_all_zero());
570     }
571 
572     #[test]
is_all_zero_head_not_aligned()573     fn is_all_zero_head_not_aligned() {
574         let a = VecMem::new(1024);
575         let slice = a.get_slice(1, 1023).unwrap();
576 
577         assert!(slice.is_all_zero());
578         a.get_slice(0, 1).unwrap().write_bytes(1);
579         assert!(slice.is_all_zero());
580         a.get_slice(1, 1).unwrap().write_bytes(1);
581         assert!(!slice.is_all_zero());
582         a.get_slice(1, 1).unwrap().write_bytes(0);
583         a.get_slice(129, 1).unwrap().write_bytes(1);
584         assert!(!slice.is_all_zero());
585     }
586 
587     #[test]
is_all_zero_tail_not_aligned()588     fn is_all_zero_tail_not_aligned() {
589         let a = VecMem::new(1024);
590         let slice = a.get_slice(0, 1023).unwrap();
591 
592         assert!(slice.is_all_zero());
593         a.get_slice(1023, 1).unwrap().write_bytes(1);
594         assert!(slice.is_all_zero());
595         a.get_slice(1022, 1).unwrap().write_bytes(1);
596         assert!(!slice.is_all_zero());
597         a.get_slice(1022, 1).unwrap().write_bytes(0);
598         a.get_slice(0, 1).unwrap().write_bytes(1);
599         assert!(!slice.is_all_zero());
600     }
601 
602     #[test]
is_all_zero_no_aligned_16bytes()603     fn is_all_zero_no_aligned_16bytes() {
604         let a = VecMem::new(1024);
605         let slice = a.get_slice(1, 16).unwrap();
606 
607         assert!(slice.is_all_zero());
608         a.get_slice(0, 1).unwrap().write_bytes(1);
609         assert!(slice.is_all_zero());
610         for i in 1..17 {
611             a.get_slice(i, 1).unwrap().write_bytes(1);
612             assert!(!slice.is_all_zero());
613             a.get_slice(i, 1).unwrap().write_bytes(0);
614         }
615         a.get_slice(17, 1).unwrap().write_bytes(1);
616         assert!(slice.is_all_zero());
617     }
618 }
619