1 // Copyright 2018 The Fuchsia Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //! Utilities for safe zero-copy parsing and serialization.
6 //!
7 //! This crate provides utilities which make it easy to perform zero-copy
8 //! parsing and serialization by allowing zero-copy conversion to/from byte
9 //! slices.
10 //!
11 //! This is enabled by three core marker traits, each of which can be derived
12 //! (e.g., `#[derive(FromBytes)]`):
13 //! - [`FromBytes`] indicates that a type may safely be converted from an
14 //! arbitrary byte sequence
15 //! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
16 //! sequence
17 //! - [`Unaligned`] indicates that a type's alignment requirement is 1
18 //!
19 //! Types which implement a subset of these traits can then be converted to/from
20 //! byte sequences with little to no runtime overhead.
21 //!
22 //! Note that these traits are ignorant of byte order. For byte order-aware
23 //! types, see the [`byteorder`] module.
24 //!
25 //! # Features
26 //!
27 //! `alloc`: By default, `zerocopy` is `no_std`. When the `alloc` feature is
28 //! enabled, the `alloc` crate is added as a dependency, and some
29 //! allocation-related functionality is added.
30 //!
31 //! `simd`: When the `simd` feature is enabled, `FromBytes` and `AsBytes` impls
32 //! are emitted for all stable SIMD types which exist on the target platform.
33 //! Note that the layout of SIMD types is not yet stabilized, so these impls may
34 //! be removed in the future if layout changes make them invalid. For more
35 //! information, see the Unsafe Code Guidelines Reference page on the [Layout of
36 //! packed SIMD vectors][simd-layout].
37 //!
38 //! `simd-nightly`: Enables the `simd` feature and adds support for SIMD types
39 //! which are only available on nightly. Since these types are unstable, support
40 //! for any type may be removed at any point in the future.
41 //!
42 //! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
43
44 #![deny(missing_docs)]
45 #![cfg_attr(not(test), no_std)]
46 #![recursion_limit = "2048"]
47
48 pub mod byteorder;
49
50 pub use crate::byteorder::*;
51 pub use zerocopy_derive::*;
52
53 use core::cell::{Ref, RefMut};
54 use core::cmp::Ordering;
55 use core::fmt::{self, Debug, Display, Formatter};
56 use core::marker::PhantomData;
57 use core::mem;
58 use core::ops::{Deref, DerefMut};
59 use core::ptr;
60 use core::slice;
61
62 // This is a hack to allow derives of FromBytes, AsBytes, and Unaligned to work
63 // in this crate. They assume that zerocopy is linked as an extern crate, so
64 // they access items from it as `zerocopy::Xxx`. This makes that still work.
65 mod zerocopy {
66 pub use crate::*;
67 }
68
69 // implement an unsafe trait for a range of container types
70 macro_rules! impl_for_composite_types {
71 ($trait:ident) => {
72 unsafe impl<T> $trait for PhantomData<T> {
73 fn only_derive_is_allowed_to_implement_this_trait()
74 where
75 Self: Sized,
76 {
77 }
78 }
79 unsafe impl<T: $trait> $trait for [T] {
80 fn only_derive_is_allowed_to_implement_this_trait()
81 where
82 Self: Sized,
83 {
84 }
85 }
86 unsafe impl $trait for () {
87 fn only_derive_is_allowed_to_implement_this_trait()
88 where
89 Self: Sized,
90 {
91 }
92 }
93 unsafe impl<T: $trait, const N: usize> $trait for [T; N] {
94 fn only_derive_is_allowed_to_implement_this_trait()
95 where
96 Self: Sized,
97 {
98 }
99 }
100 };
101 }
102
103 /// Implements `$trait` for one or more `$type`s.
104 macro_rules! impl_for_types {
105 ($trait:ident, $type:ty) => (
106 unsafe impl $trait for $type {
107 fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
108 }
109 );
110 ($trait:ident, $type:ty, $($types:ty),*) => (
111 unsafe impl $trait for $type {
112 fn only_derive_is_allowed_to_implement_this_trait() where Self: Sized {}
113 }
114 impl_for_types!($trait, $($types),*);
115 );
116 }
117
118 /// Implements `$trait` for all signed and unsigned primitive types.
119 macro_rules! impl_for_primitives {
120 ($trait:ident) => {
121 impl_for_types!(
122 $trait, u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64
123 );
124 };
125 }
126
127 /// Types for which any byte pattern is valid.
128 ///
129 /// WARNING: Do not implement this trait yourself! Instead, use
130 /// `#[derive(FromBytes)]`.
131 ///
132 /// `FromBytes` types can safely be deserialized from an untrusted sequence of
133 /// bytes because any byte sequence corresponds to a valid instance of the type.
134 ///
135 /// `FromBytes` is ignorant of byte order. For byte order-aware types, see the
136 /// [`byteorder`] module.
137 ///
138 /// # Safety
139 ///
140 /// If `T: FromBytes`, then unsafe code may assume that it is sound to treat any
141 /// initialized sequence of bytes of length `size_of::<T>()` as a `T`. If a type
142 /// is marked as `FromBytes` which violates this contract, it may cause
143 /// undefined behavior.
144 ///
145 /// If a type has the following properties, then it is safe to implement
146 /// `FromBytes` for that type:
147 /// - If the type is a struct:
148 /// - All of its fields must implement `FromBytes`
149 /// - If the type is an enum:
150 /// - It must be a C-like enum (meaning that all variants have no fields)
151 /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
152 /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
153 /// - The maximum number of discriminants must be used (so that every possible
154 /// bit pattern is a valid one). Be very careful when using the `C`,
155 /// `usize`, or `isize` representations, as their size is
156 /// platform-dependent.
157 ///
158 /// # Rationale
159 ///
160 /// ## Why isn't an explicit representation required for structs?
161 ///
162 /// Per the [Rust reference](reference),
163 /// > The representation of a type can change the padding between fields, but
164 /// does not change the layout of the fields themselves.
165 ///
166 /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
167 ///
168 /// Since the layout of structs only consists of padding bytes and field bytes,
169 /// a struct is soundly `FromBytes` if:
170 /// 1. its padding is soundly `FromBytes`, and
171 /// 2. its fields are soundly `FromBytes`.
172 ///
173 /// The answer to the first question is always yes: padding bytes do not have
174 /// any validity constraints. A [discussion] of this question in the Unsafe Code
175 /// Guidelines Working Group concluded that it would be virtually unimaginable
176 /// for future versions of rustc to add validity constraints to padding bytes.
177 ///
178 /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
179 ///
180 /// Whether a struct is soundly `FromBytes` therefore solely depends on whether
181 /// its fields are `FromBytes`.
182 pub unsafe trait FromBytes {
183 // NOTE: The Self: Sized bound makes it so that FromBytes is still object
184 // safe.
185 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized186 fn only_derive_is_allowed_to_implement_this_trait()
187 where
188 Self: Sized;
189
190 /// Reads a copy of `Self` from `bytes`.
191 ///
192 /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
read_from<B: ByteSlice>(bytes: B) -> Option<Self> where Self: Sized,193 fn read_from<B: ByteSlice>(bytes: B) -> Option<Self>
194 where
195 Self: Sized,
196 {
197 let lv = LayoutVerified::<_, Unalign<Self>>::new_unaligned(bytes)?;
198 Some(lv.read().into_inner())
199 }
200
201 /// Reads a copy of `Self` from the prefix of `bytes`.
202 ///
203 /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
204 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
205 /// `None`.
read_from_prefix<B: ByteSlice>(bytes: B) -> Option<Self> where Self: Sized,206 fn read_from_prefix<B: ByteSlice>(bytes: B) -> Option<Self>
207 where
208 Self: Sized,
209 {
210 let (lv, _suffix) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)?;
211 Some(lv.read().into_inner())
212 }
213
214 /// Reads a copy of `Self` from the suffix of `bytes`.
215 ///
216 /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
217 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
218 /// `None`.
read_from_suffix<B: ByteSlice>(bytes: B) -> Option<Self> where Self: Sized,219 fn read_from_suffix<B: ByteSlice>(bytes: B) -> Option<Self>
220 where
221 Self: Sized,
222 {
223 let (_prefix, lv) = LayoutVerified::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)?;
224 Some(lv.read().into_inner())
225 }
226
227 /// Creates an instance of `Self` from zeroed bytes.
new_zeroed() -> Self where Self: Sized,228 fn new_zeroed() -> Self
229 where
230 Self: Sized,
231 {
232 unsafe {
233 // Safe because FromBytes says all bit patterns (including zeroes)
234 // are legal.
235 core::mem::zeroed()
236 }
237 }
238
239 /// Creates a `Box<Self>` from zeroed bytes.
240 ///
241 /// This function is useful for allocating large values on the heap and
242 /// zero-initializing them, without ever creating a temporary instance of
243 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
244 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
245 /// storing `[u8; 1048576]` in a temporary variable on the stack.
246 ///
247 /// On systems that use a heap implementation that supports allocating from
248 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
249 /// have performance benefits.
250 ///
251 /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
252 /// container types without reallocation.
253 ///
254 /// # Panics
255 ///
256 /// Panics if allocation of `size_of::<Self>()` bytes fails.
257 #[cfg(any(test, feature = "alloc"))]
new_box_zeroed() -> Box<Self> where Self: Sized,258 fn new_box_zeroed() -> Box<Self>
259 where
260 Self: Sized,
261 {
262 // If T is a ZST, then return a proper boxed instance of it. There is no
263 // allocation, but Box does require a correct dangling pointer.
264 let layout = Layout::new::<Self>();
265 if layout.size() == 0 {
266 return Box::new(Self::new_zeroed());
267 }
268
269 unsafe {
270 let ptr = alloc::alloc::alloc_zeroed(layout) as *mut Self;
271 if ptr.is_null() {
272 alloc::alloc::handle_alloc_error(layout);
273 }
274 Box::from_raw(ptr)
275 }
276 }
277
278 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
279 ///
280 /// This function is useful for allocating large values of `[Self]` on the
281 /// heap and zero-initializing them, without ever creating a temporary
282 /// instance of `[Self; _]` on the stack. For example,
283 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
284 /// the heap; it does not require storing the slice on the stack.
285 ///
286 /// On systems that use a heap implementation that supports allocating from
287 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
288 /// benefits.
289 ///
290 /// If `Self` is a zero-sized type, then this function will return a
291 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
292 /// actual information, but its `len()` property will report the correct
293 /// value.
294 ///
295 /// # Panics
296 ///
297 /// * Panics if `size_of::<Self>() * len` overflows.
298 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
299 #[cfg(any(test, feature = "alloc"))]
new_box_slice_zeroed(len: usize) -> Box<[Self]> where Self: Sized,300 fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
301 where
302 Self: Sized,
303 {
304 // TODO(https://fxbug.dev/80757): Use Layout::repeat() when `alloc_layout_extra` is stabilized
305 // This will intentionally panic if it overflows.
306 unsafe {
307 // from_size_align_unchecked() is sound because slice_len_bytes is
308 // guaranteed to be properly aligned (we just multiplied it by
309 // size_of::<T>(), which is guaranteed to be aligned).
310 let layout = Layout::from_size_align_unchecked(
311 size_of::<Self>().checked_mul(len).unwrap(),
312 align_of::<Self>(),
313 );
314 if layout.size() != 0 {
315 let ptr = alloc::alloc::alloc_zeroed(layout) as *mut Self;
316 if ptr.is_null() {
317 alloc::alloc::handle_alloc_error(layout);
318 }
319 Box::from_raw(core::slice::from_raw_parts_mut(ptr, len))
320 } else {
321 // Box<[T]> does not allocate when T is zero-sized or when len
322 // is zero, but it does require a non-null dangling pointer for
323 // its allocation.
324 Box::from_raw(core::slice::from_raw_parts_mut(
325 NonNull::<Self>::dangling().as_ptr(),
326 len,
327 ))
328 }
329 }
330 }
331 }
332
333 /// Types which are safe to treat as an immutable byte slice.
334 ///
335 /// WARNING: Do not implement this trait yourself! Instead, use
336 /// `#[derive(AsBytes)]`.
337 ///
338 /// `AsBytes` types can be safely viewed as a slice of bytes. In particular,
339 /// this means that, in any valid instance of the type, none of the bytes of the
340 /// instance are uninitialized. This precludes the following types:
341 /// - Structs with internal padding
342 /// - Unions in which not all variants have the same length
343 ///
344 /// `AsBytes` is ignorant of byte order. For byte order-aware types, see the
345 /// [`byteorder`] module.
346 ///
347 /// # Custom Derive Errors
348 ///
349 /// Due to the way that the custom derive for `AsBytes` is implemented, you may
350 /// get an error like this:
351 ///
352 /// ```text
353 /// error[E0080]: evaluation of constant value failed
354 /// --> lib.rs:1:10
355 /// |
356 /// 1 | #[derive(AsBytes)]
357 /// | ^^^^^^^ attempt to divide by zero
358 /// ```
359 ///
360 /// This error means that the type being annotated has padding bytes, which is
361 /// illegal for `AsBytes` types. Consider either adding explicit struct fields
362 /// where those padding bytes would be or using `#[repr(packed)]`.
363 ///
364 /// # Safety
365 ///
366 /// If `T: AsBytes`, then unsafe code may assume that it is sound to treat any
367 /// instance of the type as an immutable `[u8]` of length `size_of::<T>()`. If a
368 /// type is marked as `AsBytes` which violates this contract, it may cause
369 /// undefined behavior.
370 ///
371 /// If a type has the following properties, then it is safe to implement
372 /// `AsBytes` for that type
373 /// - If the type is a struct:
374 /// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
375 /// or `repr(packed)`).
376 /// - All of its fields must be `AsBytes`
377 /// - Its layout must have no padding. This is always true for
378 /// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
379 /// algorithm described in the [Rust Reference].
380 /// - If the type is an enum:
381 /// - It must be a C-like enum (meaning that all variants have no fields)
382 /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
383 /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
384 ///
385 /// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
386 pub unsafe trait AsBytes {
387 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized388 fn only_derive_is_allowed_to_implement_this_trait()
389 where
390 Self: Sized;
391
392 /// Gets the bytes of this value.
393 ///
394 /// `as_bytes` provides access to the bytes of this value as an immutable
395 /// byte slice.
as_bytes(&self) -> &[u8]396 fn as_bytes(&self) -> &[u8] {
397 unsafe {
398 // NOTE: This function does not have a Self: Sized bound.
399 // size_of_val works for unsized values too.
400 let len = mem::size_of_val(self);
401 slice::from_raw_parts(self as *const Self as *const u8, len)
402 }
403 }
404
405 /// Gets the bytes of this value mutably.
406 ///
407 /// `as_bytes_mut` provides access to the bytes of this value as a mutable
408 /// byte slice.
as_bytes_mut(&mut self) -> &mut [u8] where Self: FromBytes,409 fn as_bytes_mut(&mut self) -> &mut [u8]
410 where
411 Self: FromBytes,
412 {
413 unsafe {
414 // NOTE: This function does not have a Self: Sized bound.
415 // size_of_val works for unsized values too.
416 let len = mem::size_of_val(self);
417 slice::from_raw_parts_mut(self as *mut Self as *mut u8, len)
418 }
419 }
420
421 /// Writes a copy of `self` to `bytes`.
422 ///
423 /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
write_to<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()>424 fn write_to<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
425 if bytes.len() != mem::size_of_val(self) {
426 return None;
427 }
428
429 bytes.copy_from_slice(self.as_bytes());
430 Some(())
431 }
432
433 /// Writes a copy of `self` to the prefix of `bytes`.
434 ///
435 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
436 /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
write_to_prefix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()>437 fn write_to_prefix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
438 let size = mem::size_of_val(self);
439 if bytes.len() < size {
440 return None;
441 }
442
443 bytes[..size].copy_from_slice(self.as_bytes());
444 Some(())
445 }
446
447 /// Writes a copy of `self` to the suffix of `bytes`.
448 ///
449 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes
450 /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
write_to_suffix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()>451 fn write_to_suffix<B: ByteSliceMut>(&self, mut bytes: B) -> Option<()> {
452 let start = bytes.len().checked_sub(mem::size_of_val(self))?;
453 bytes[start..].copy_from_slice(self.as_bytes());
454 Some(())
455 }
456 }
457
458 // Special case for bool (it is not included in `impl_for_primitives!`).
459 impl_for_types!(AsBytes, bool);
460
461 impl_for_primitives!(FromBytes);
462 impl_for_primitives!(AsBytes);
463 impl_for_composite_types!(FromBytes);
464 impl_for_composite_types!(AsBytes);
465
466 /// Types with no alignment requirement.
467 ///
468 /// WARNING: Do not implement this trait yourself! Instead, use
469 /// `#[derive(Unaligned)]`.
470 ///
471 /// If `T: Unaligned`, then `align_of::<T>() == 1`.
472 ///
473 /// # Safety
474 ///
475 /// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
476 /// reference to `T` at any memory location regardless of alignment. If a type
477 /// is marked as `Unaligned` which violates this contract, it may cause
478 /// undefined behavior.
479 pub unsafe trait Unaligned {
480 // NOTE: The Self: Sized bound makes it so that Unaligned is still object
481 // safe.
482 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized483 fn only_derive_is_allowed_to_implement_this_trait()
484 where
485 Self: Sized;
486 }
487
488 impl_for_types!(Unaligned, u8, i8);
489 impl_for_composite_types!(Unaligned);
490
491 // SIMD support
492 //
493 // Per the Unsafe Code Guidelines Reference [1]:
494 //
495 // Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs
496 // containing `N` elements of type `T` where `N` is a power-of-two and the
497 // size and alignment requirements of `T` are equal:
498 //
499 // ```rust
500 // #[repr(simd)]
501 // struct Vector<T, N>(T_0, ..., T_(N - 1));
502 // ```
503 //
504 // ...
505 //
506 // The size of `Vector` is `N * size_of::<T>()` and its alignment is an
507 // implementation-defined function of `T` and `N` greater than or equal to
508 // `align_of::<T>()`.
509 //
510 // ...
511 //
512 // Vector elements are laid out in source field order, enabling random access
513 // to vector elements by reinterpreting the vector as an array:
514 //
515 // ```rust
516 // union U {
517 // vec: Vector<T, N>,
518 // arr: [T; N]
519 // }
520 //
521 // assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>());
522 // assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>());
523 //
524 // unsafe {
525 // let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) };
526 //
527 // assert_eq!(u.vec.0, u.arr[0]);
528 // // ...
529 // assert_eq!(u.vec.(N - 1), u.arr[N - 1]);
530 // }
531 // ```
532 //
533 // Given this background, we can observe that:
534 // - The size and bit pattern requirements of a SIMD type are equivalent to the
535 // equivalent array type. Thus, for any SIMD type whose primitive `T` is
536 // `FromBytes`, that SIMD type is also `FromBytes`. The same holds for
537 // `AsBytes`.
538 // - Since no upper bound is placed on the alignment, no SIMD type can be
539 // guaranteed to be `Unaligned`.
540 //
541 // Also per [1]:
542 //
543 // This chapter represents the consensus from issue #38. The statements in
544 // here are not (yet) "guaranteed" not to change until an RFC ratifies them.
545 //
546 // See issue #38 [2]. While this behavior is not technically guaranteed, the
547 // likelihood that the behavior will change such that SIMD types are no longer
548 // `FromBytes` or `AsBytes` is next to zero, as that would defeat the entire
549 // purpose of SIMD types. Nonetheless, we put this behavior behind the `simd`
550 // Cargo feature, which requires consumers to opt into this stability hazard.
551 //
552 // [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
553 // [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
554 #[cfg(feature = "simd")]
555 mod simd {
556 /// Defines a module which implements `FromBytes` and `AsBytes` for a set of
557 /// types from a module in `core::arch`.
558 ///
559 /// `$arch` is both the name of the defined module and the name of the
560 /// module in `core::arch`, and `$typ` is the list of items from that module
561 /// to implement `FromBytes` and `AsBytes` for.
562 macro_rules! simd_arch_mod {
563 ($arch:ident, $($typ:ident),*) => {
564 mod $arch {
565 use core::arch::$arch::{$($typ),*};
566
567 use crate::*;
568
569 impl_for_types!(FromBytes, $($typ),*);
570 impl_for_types!(AsBytes, $($typ),*);
571 }
572 };
573 }
574
575 #[cfg(target_arch = "x86")]
576 simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
577 #[cfg(target_arch = "x86_64")]
578 simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
579 #[cfg(target_arch = "wasm32")]
580 simd_arch_mod!(wasm32, v128);
581 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
582 simd_arch_mod!(
583 powerpc,
584 vector_bool_long,
585 vector_double,
586 vector_signed_long,
587 vector_unsigned_long
588 );
589 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
590 simd_arch_mod!(
591 powerpc64,
592 vector_bool_long,
593 vector_double,
594 vector_signed_long,
595 vector_unsigned_long
596 );
597 #[cfg(all(feature = "simd-nightly", target_arch = "aarch64"))]
598 #[rustfmt::skip]
599 simd_arch_mod!(
600 aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
601 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
602 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
603 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
604 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
605 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
606 uint64x1_t, uint64x2_t
607 );
608 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
609 #[rustfmt::skip]
610 simd_arch_mod!(
611 arm, float32x2_t, float32x4_t, int8x4_t, int8x8_t, int8x8x2_t, int8x8x3_t, int8x8x4_t,
612 int8x16_t, int16x2_t, int16x4_t, int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t,
613 poly8x8_t, poly8x8x2_t, poly8x8x3_t, poly8x8x4_t, poly8x16_t, poly16x4_t, poly16x8_t,
614 poly64x1_t, poly64x2_t, uint8x4_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t,
615 uint8x16_t, uint16x2_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t, uint64x1_t,
616 uint64x2_t
617 );
618 }
619
620 /// A type with no alignment requirement.
621 ///
622 /// A `Unalign` wraps a `T`, removing any alignment requirement. `Unalign<T>`
623 /// has the same size and ABI as `T`, but not necessarily the same alignment.
624 /// This is useful if a type with an alignment requirement needs to be read from
625 /// a chunk of memory which provides no alignment guarantees.
626 ///
627 /// Since `Unalign` has no alignment requirement, the inner `T` may not be
628 /// properly aligned in memory, and so `Unalign` provides no way of getting a
629 /// reference to the inner `T`. Instead, the `T` may only be obtained by value
630 /// (see [`get`] and [`into_inner`]).
631 ///
632 /// [`get`]: Unalign::get
633 /// [`into_inner`]: Unalign::into_inner
634 #[derive(FromBytes, Unaligned, Copy)]
635 #[repr(C, packed)]
636 pub struct Unalign<T>(T);
637
638 // Note that `Unalign: Clone` only if `T: Copy`. Since the inner `T` may not be
639 // aligned, there's no way to safely call `T::clone`, and so a `T: Clone` bound
640 // is not sufficient to implement `Clone` for `Unalign`.
641 impl<T: Copy> Clone for Unalign<T> {
clone(&self) -> Unalign<T>642 fn clone(&self) -> Unalign<T> {
643 *self
644 }
645 }
646
647 impl<T> Unalign<T> {
648 /// Constructs a new `Unalign`.
new(val: T) -> Unalign<T>649 pub fn new(val: T) -> Unalign<T> {
650 Unalign(val)
651 }
652
653 /// Consumes `self`, returning the inner `T`.
into_inner(self) -> T654 pub fn into_inner(self) -> T {
655 let Unalign(val) = self;
656 val
657 }
658
659 /// Gets an unaligned raw pointer to the inner `T`.
660 ///
661 /// # Safety
662 ///
663 /// The returned raw pointer is not necessarily aligned to
664 /// `align_of::<T>()`. Most functions which operate on raw pointers require
665 /// those pointers to be aligned, so calling those functions with the result
666 /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
667 /// using some out-of-band mechanism. In general, the only functions which
668 /// are safe to call with this pointer are which that are explicitly
669 /// documented as being sound to use with an unaligned pointer, such as
670 /// [`read_unaligned`].
671 ///
672 /// [`read_unaligned`]: core::ptr::read_unaligned
get_ptr(&self) -> *const T673 pub fn get_ptr(&self) -> *const T {
674 ptr::addr_of!(self.0)
675 }
676
677 /// Gets an unaligned mutable raw pointer to the inner `T`.
678 ///
679 /// # Safety
680 ///
681 /// The returned raw pointer is not necessarily aligned to
682 /// `align_of::<T>()`. Most functions which operate on raw pointers require
683 /// those pointers to be aligned, so calling those functions with the result
684 /// of `get_ptr` will be undefined behavior if alignment is not guaranteed
685 /// using some out-of-band mechanism. In general, the only functions which
686 /// are safe to call with this pointer are those which are explicitly
687 /// documented as being sound to use with an unaligned pointer, such as
688 /// [`read_unaligned`].
689 ///
690 /// [`read_unaligned`]: core::ptr::read_unaligned
get_mut_ptr(&mut self) -> *mut T691 pub fn get_mut_ptr(&mut self) -> *mut T {
692 ptr::addr_of_mut!(self.0)
693 }
694 }
695
696 impl<T: Copy> Unalign<T> {
697 /// Gets a copy of the inner `T`.
get(&self) -> T698 pub fn get(&self) -> T {
699 let Unalign(val) = *self;
700 val
701 }
702 }
703
704 // SAFETY: Since `T: AsBytes`, we know that it's safe to construct a `&[u8]`
705 // from an aligned `&T`. Since `&[u8]` itself has no alignment requirements, it
706 // must also be safe to construct a `&[u8]` from a `&T` at any address. Since
707 // `Unalign<T>` is `#[repr(packed)]`, everything about its layout except for its
708 // alignment is the same as `T`'s layout.
709 unsafe impl<T: AsBytes> AsBytes for Unalign<T> {
only_derive_is_allowed_to_implement_this_trait() where Self: Sized,710 fn only_derive_is_allowed_to_implement_this_trait()
711 where
712 Self: Sized,
713 {
714 }
715 }
716
717 // Used in `transmute!` below.
718 #[doc(hidden)]
719 pub use core::mem::transmute as __real_transmute;
720
721 /// Safely transmutes a value of one type to a value of another type of the same
722 /// size.
723 ///
724 /// The expression `$e` must have a concrete type, `T`, which implements
725 /// `AsBytes`. The `transmute!` expression must also have a concrete type, `U`
726 /// (`U` is inferred from the calling context), and `U` must implement
727 /// `FromBytes`.
728 ///
729 /// Note that the `T` produced by the expression `$e` will *not* be dropped.
730 /// Semantically, its bits will be copied into a new value of type `U`, the
731 /// original `T` will be forgotten, and the value of type `U` will be returned.
732 #[macro_export]
733 macro_rules! transmute {
734 ($e:expr) => {{
735 // NOTE: This must be a macro (rather than a function with trait bounds)
736 // because there's no way, in a generic context, to enforce that two
737 // types have the same size. `core::mem::transmute` uses compiler magic
738 // to enforce this so long as the types are concrete.
739
740 let e = $e;
741 if false {
742 // This branch, though never taken, ensures that the type of `e` is
743 // `AsBytes` and that the type of this macro invocation expression
744 // is `FromBytes`.
745 fn transmute<T: $crate::AsBytes, U: $crate::FromBytes>(_t: T) -> U {
746 unreachable!()
747 }
748 transmute(e)
749 } else {
750 // `core::mem::transmute` ensures that the type of `e` and the type
751 // of this macro invocation expression have the same size. We know
752 // this transmute is safe thanks to the `AsBytes` and `FromBytes`
753 // bounds enforced by the `false` branch.
754 //
755 // We use `$crate::__real_transmute` because we know it will always
756 // be available for crates which are using the 2015 edition of Rust.
757 // By contrast, if we were to use `std::mem::transmute`, this macro
758 // would not work for such crates in `no_std` contexts, and if we
759 // were to use `core::mem::transmute`, this macro would not work in
760 // `std` contexts in which `core` was not manually imported. This is
761 // not a problem for 2018 edition crates.
762 unsafe { $crate::__real_transmute(e) }
763 }
764 }}
765 }
766
767 /// A length- and alignment-checked reference to a byte slice which can safely
768 /// be reinterpreted as another type.
769 ///
770 /// `LayoutVerified` is a byte slice reference (`&[u8]`, `&mut [u8]`,
771 /// `Ref<[u8]>`, `RefMut<[u8]>`, etc) with the invaraint that the slice's length
772 /// and alignment are each greater than or equal to the length and alignment of
773 /// `T`. Using this invariant, it implements `Deref` for `T` so long as `T:
774 /// FromBytes` and `DerefMut` so long as `T: FromBytes + AsBytes`.
775 ///
776 /// # Examples
777 ///
778 /// `LayoutVerified` can be used to treat a sequence of bytes as a structured
779 /// type, and to read and write the fields of that type as if the byte slice
780 /// reference were simply a reference to that type.
781 ///
782 /// ```rust
783 /// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, LayoutVerified, Unaligned};
784 ///
785 /// #[derive(FromBytes, AsBytes, Unaligned)]
786 /// #[repr(C)]
787 /// struct UdpHeader {
788 /// src_port: [u8; 2],
789 /// dst_port: [u8; 2],
790 /// length: [u8; 2],
791 /// checksum: [u8; 2],
792 /// }
793 ///
794 /// struct UdpPacket<B> {
795 /// header: LayoutVerified<B, UdpHeader>,
796 /// body: B,
797 /// }
798 ///
799 /// impl<B: ByteSlice> UdpPacket<B> {
800 /// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
801 /// let (header, body) = LayoutVerified::new_unaligned_from_prefix(bytes)?;
802 /// Some(UdpPacket { header, body })
803 /// }
804 ///
805 /// pub fn get_src_port(&self) -> [u8; 2] {
806 /// self.header.src_port
807 /// }
808 /// }
809 ///
810 /// impl<B: ByteSliceMut> UdpPacket<B> {
811 /// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
812 /// self.header.src_port = src_port;
813 /// }
814 /// }
815 /// ```
816 pub struct LayoutVerified<B, T: ?Sized>(B, PhantomData<T>);
817
818 impl<B, T> LayoutVerified<B, T>
819 where
820 B: ByteSlice,
821 {
822 /// Constructs a new `LayoutVerified`.
823 ///
824 /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
825 /// aligned to `align_of::<T>()`, and constructs a new `LayoutVerified`. If
826 /// either of these checks fail, it returns `None`.
827 #[inline]
new(bytes: B) -> Option<LayoutVerified<B, T>>828 pub fn new(bytes: B) -> Option<LayoutVerified<B, T>> {
829 if bytes.len() != mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
830 return None;
831 }
832 Some(LayoutVerified(bytes, PhantomData))
833 }
834
835 /// Constructs a new `LayoutVerified` from the prefix of a byte slice.
836 ///
837 /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
838 /// `bytes` is aligned to `align_of::<T>()`. It consumes the first
839 /// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
840 /// returns the remaining bytes to the caller. If either the length or
841 /// alignment checks fail, it returns `None`.
842 #[inline]
new_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)>843 pub fn new_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
844 if bytes.len() < mem::size_of::<T>() || !aligned_to(bytes.deref(), mem::align_of::<T>()) {
845 return None;
846 }
847 let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
848 Some((LayoutVerified(bytes, PhantomData), suffix))
849 }
850
851 /// Constructs a new `LayoutVerified` from the suffix of a byte slice.
852 ///
853 /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
854 /// the last `size_of::<T>()` bytes of `bytes` are aligned to
855 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
856 /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
857 /// to the caller. If either the length or alignment checks fail, it returns
858 /// `None`.
859 #[inline]
new_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)>860 pub fn new_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
861 let bytes_len = bytes.len();
862 if bytes_len < mem::size_of::<T>() {
863 return None;
864 }
865 let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
866 if !aligned_to(bytes.deref(), mem::align_of::<T>()) {
867 return None;
868 }
869 Some((prefix, LayoutVerified(bytes, PhantomData)))
870 }
871 }
872
873 impl<B, T> LayoutVerified<B, [T]>
874 where
875 B: ByteSlice,
876 {
877 /// Constructs a new `LayoutVerified` of a slice type.
878 ///
879 /// `new_slice` verifies that `bytes.len()` is a multiple of
880 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
881 /// constructs a new `LayoutVerified`. If either of these checks fail, it
882 /// returns `None`.
883 ///
884 /// # Panics
885 ///
886 /// `new_slice` panics if `T` is a zero-sized type.
887 #[inline]
new_slice(bytes: B) -> Option<LayoutVerified<B, [T]>>888 pub fn new_slice(bytes: B) -> Option<LayoutVerified<B, [T]>> {
889 assert_ne!(mem::size_of::<T>(), 0);
890 if bytes.len() % mem::size_of::<T>() != 0
891 || !aligned_to(bytes.deref(), mem::align_of::<T>())
892 {
893 return None;
894 }
895 Some(LayoutVerified(bytes, PhantomData))
896 }
897
898 /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
899 /// byte slice.
900 ///
901 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
902 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
903 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
904 /// `LayoutVerified`, and returns the remaining bytes to the caller. It also
905 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
906 /// of the length, alignment, or overflow checks fail, it returns `None`.
907 ///
908 /// # Panics
909 ///
910 /// `new_slice_from_prefix` panics if `T` is a zero-sized type.
911 #[inline]
new_slice_from_prefix(bytes: B, count: usize) -> Option<(LayoutVerified<B, [T]>, B)>912 pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(LayoutVerified<B, [T]>, B)> {
913 let expected_len = match mem::size_of::<T>().checked_mul(count) {
914 Some(len) => len,
915 None => return None,
916 };
917 if bytes.len() < expected_len {
918 return None;
919 }
920 let (prefix, bytes) = bytes.split_at(expected_len);
921 Self::new_slice(prefix).map(move |l| (l, bytes))
922 }
923
924 /// Constructs a new `LayoutVerified` of a slice type from the suffix of a
925 /// byte slice.
926 ///
927 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
928 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
929 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
930 /// `LayoutVerified`, and returns the preceding bytes to the caller. It also
931 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
932 /// of the length, alignment, or overflow checks fail, it returns `None`.
933 ///
934 /// # Panics
935 ///
936 /// `new_slice_from_suffix` panics if `T` is a zero-sized type.
937 #[inline]
new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, LayoutVerified<B, [T]>)>938 pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, LayoutVerified<B, [T]>)> {
939 let expected_len = match mem::size_of::<T>().checked_mul(count) {
940 Some(len) => len,
941 None => return None,
942 };
943 if bytes.len() < expected_len {
944 return None;
945 }
946 let (bytes, suffix) = bytes.split_at(expected_len);
947 Self::new_slice(suffix).map(move |l| (bytes, l))
948 }
949 }
950
map_zeroed<B: ByteSliceMut, T: ?Sized>( opt: Option<LayoutVerified<B, T>>, ) -> Option<LayoutVerified<B, T>>951 fn map_zeroed<B: ByteSliceMut, T: ?Sized>(
952 opt: Option<LayoutVerified<B, T>>,
953 ) -> Option<LayoutVerified<B, T>> {
954 match opt {
955 Some(mut lv) => {
956 for b in lv.0.iter_mut() {
957 *b = 0;
958 }
959 Some(lv)
960 }
961 None => None,
962 }
963 }
964
map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( opt: Option<(LayoutVerified<B, T>, B)>, ) -> Option<(LayoutVerified<B, T>, B)>965 fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
966 opt: Option<(LayoutVerified<B, T>, B)>,
967 ) -> Option<(LayoutVerified<B, T>, B)> {
968 match opt {
969 Some((mut lv, rest)) => {
970 for b in lv.0.iter_mut() {
971 *b = 0;
972 }
973 Some((lv, rest))
974 }
975 None => None,
976 }
977 }
978
map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( opt: Option<(B, LayoutVerified<B, T>)>, ) -> Option<(B, LayoutVerified<B, T>)>979 fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
980 opt: Option<(B, LayoutVerified<B, T>)>,
981 ) -> Option<(B, LayoutVerified<B, T>)> {
982 map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
983 }
984
985 impl<B, T> LayoutVerified<B, T>
986 where
987 B: ByteSliceMut,
988 {
989 /// Constructs a new `LayoutVerified` after zeroing the bytes.
990 ///
991 /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
992 /// `bytes` is aligned to `align_of::<T>()`, and constructs a new
993 /// `LayoutVerified`. If either of these checks fail, it returns `None`.
994 ///
995 /// If the checks succeed, then `bytes` will be initialized to zero. This
996 /// can be useful when re-using buffers to ensure that sensitive data
997 /// previously stored in the buffer is not leaked.
998 #[inline]
new_zeroed(bytes: B) -> Option<LayoutVerified<B, T>>999 pub fn new_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
1000 map_zeroed(Self::new(bytes))
1001 }
1002
1003 /// Constructs a new `LayoutVerified` from the prefix of a byte slice,
1004 /// zeroing the prefix.
1005 ///
1006 /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
1007 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
1008 /// `size_of::<T>()` bytes from `bytes` to construct a `LayoutVerified`, and
1009 /// returns the remaining bytes to the caller. If either the length or
1010 /// alignment checks fail, it returns `None`.
1011 ///
1012 /// If the checks succeed, then the prefix which is consumed will be
1013 /// initialized to zero. This can be useful when re-using buffers to ensure
1014 /// that sensitive data previously stored in the buffer is not leaked.
1015 #[inline]
new_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)>1016 pub fn new_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
1017 map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
1018 }
1019
1020 /// Constructs a new `LayoutVerified` from the suffix of a byte slice,
1021 /// zeroing the suffix.
1022 ///
1023 /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
1024 /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
1025 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
1026 /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
1027 /// to the caller. If either the length or alignment checks fail, it returns
1028 /// `None`.
1029 ///
1030 /// If the checks succeed, then the suffix which is consumed will be
1031 /// initialized to zero. This can be useful when re-using buffers to ensure
1032 /// that sensitive data previously stored in the buffer is not leaked.
1033 #[inline]
new_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)>1034 pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
1035 map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
1036 }
1037 }
1038
1039 impl<B, T> LayoutVerified<B, [T]>
1040 where
1041 B: ByteSliceMut,
1042 {
1043 /// Constructs a new `LayoutVerified` of a slice type after zeroing the
1044 /// bytes.
1045 ///
1046 /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
1047 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
1048 /// constructs a new `LayoutVerified`. If either of these checks fail, it
1049 /// returns `None`.
1050 ///
1051 /// If the checks succeed, then `bytes` will be initialized to zero. This
1052 /// can be useful when re-using buffers to ensure that sensitive data
1053 /// previously stored in the buffer is not leaked.
1054 ///
1055 /// # Panics
1056 ///
1057 /// `new_slice` panics if `T` is a zero-sized type.
1058 #[inline]
new_slice_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>>1059 pub fn new_slice_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
1060 map_zeroed(Self::new_slice(bytes))
1061 }
1062
1063 /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
1064 /// byte slice, after zeroing the bytes.
1065 ///
1066 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
1067 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
1068 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
1069 /// `LayoutVerified`, and returns the remaining bytes to the caller. It also
1070 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
1071 /// of the length, alignment, or overflow checks fail, it returns `None`.
1072 ///
1073 /// If the checks succeed, then the suffix which is consumed will be
1074 /// initialized to zero. This can be useful when re-using buffers to ensure
1075 /// that sensitive data previously stored in the buffer is not leaked.
1076 ///
1077 /// # Panics
1078 ///
1079 /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
1080 #[inline]
new_slice_from_prefix_zeroed( bytes: B, count: usize, ) -> Option<(LayoutVerified<B, [T]>, B)>1081 pub fn new_slice_from_prefix_zeroed(
1082 bytes: B,
1083 count: usize,
1084 ) -> Option<(LayoutVerified<B, [T]>, B)> {
1085 map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
1086 }
1087
1088 /// Constructs a new `LayoutVerified` of a slice type from the prefix of a
1089 /// byte slice, after zeroing the bytes.
1090 ///
1091 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
1092 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
1093 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
1094 /// `LayoutVerified`, and returns the preceding bytes to the caller. It also
1095 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`. If any
1096 /// of the length, alignment, or overflow checks fail, it returns `None`.
1097 ///
1098 /// If the checks succeed, then the consumed suffix will be initialized to
1099 /// zero. This can be useful when re-using buffers to ensure that sensitive
1100 /// data previously stored in the buffer is not leaked.
1101 ///
1102 /// # Panics
1103 ///
1104 /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
1105 #[inline]
new_slice_from_suffix_zeroed( bytes: B, count: usize, ) -> Option<(B, LayoutVerified<B, [T]>)>1106 pub fn new_slice_from_suffix_zeroed(
1107 bytes: B,
1108 count: usize,
1109 ) -> Option<(B, LayoutVerified<B, [T]>)> {
1110 map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
1111 }
1112 }
1113
1114 impl<B, T> LayoutVerified<B, T>
1115 where
1116 B: ByteSlice,
1117 T: Unaligned,
1118 {
1119 /// Constructs a new `LayoutVerified` for a type with no alignment
1120 /// requirement.
1121 ///
1122 /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
1123 /// constructs a new `LayoutVerified`. If the check fails, it returns
1124 /// `None`.
1125 #[inline]
new_unaligned(bytes: B) -> Option<LayoutVerified<B, T>>1126 pub fn new_unaligned(bytes: B) -> Option<LayoutVerified<B, T>> {
1127 if bytes.len() != mem::size_of::<T>() {
1128 return None;
1129 }
1130 Some(LayoutVerified(bytes, PhantomData))
1131 }
1132
1133 /// Constructs a new `LayoutVerified` from the prefix of a byte slice for a
1134 /// type with no alignment requirement.
1135 ///
1136 /// `new_unaligned_from_prefix` verifies that `bytes.len() >=
1137 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
1138 /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
1139 /// to the caller. If the length check fails, it returns `None`.
1140 #[inline]
new_unaligned_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)>1141 pub fn new_unaligned_from_prefix(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
1142 if bytes.len() < mem::size_of::<T>() {
1143 return None;
1144 }
1145 let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
1146 Some((LayoutVerified(bytes, PhantomData), suffix))
1147 }
1148
1149 /// Constructs a new `LayoutVerified` from the suffix of a byte slice for a
1150 /// type with no alignment requirement.
1151 ///
1152 /// `new_unaligned_from_suffix` verifies that `bytes.len() >=
1153 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
1154 /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
1155 /// to the caller. If the length check fails, it returns `None`.
1156 #[inline]
new_unaligned_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)>1157 pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
1158 let bytes_len = bytes.len();
1159 if bytes_len < mem::size_of::<T>() {
1160 return None;
1161 }
1162 let (prefix, bytes) = bytes.split_at(bytes_len - mem::size_of::<T>());
1163 Some((prefix, LayoutVerified(bytes, PhantomData)))
1164 }
1165 }
1166
1167 impl<B, T> LayoutVerified<B, [T]>
1168 where
1169 B: ByteSlice,
1170 T: Unaligned,
1171 {
1172 /// Constructs a new `LayoutVerified` of a slice type with no alignment
1173 /// requirement.
1174 ///
1175 /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
1176 /// `size_of::<T>()` and constructs a new `LayoutVerified`. If the check
1177 /// fails, it returns `None`.
1178 ///
1179 /// # Panics
1180 ///
1181 /// `new_slice` panics if `T` is a zero-sized type.
1182 #[inline]
new_slice_unaligned(bytes: B) -> Option<LayoutVerified<B, [T]>>1183 pub fn new_slice_unaligned(bytes: B) -> Option<LayoutVerified<B, [T]>> {
1184 assert_ne!(mem::size_of::<T>(), 0);
1185 if bytes.len() % mem::size_of::<T>() != 0 {
1186 return None;
1187 }
1188 Some(LayoutVerified(bytes, PhantomData))
1189 }
1190
1191 /// Constructs a new `LayoutVerified` of a slice type with no alignment
1192 /// requirement from the prefix of a byte slice.
1193 ///
1194 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
1195 /// count`. It consumes the first `size_of::<T>() * count` bytes from
1196 /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
1197 /// to the caller. It also ensures that `sizeof::<T>() * count` does not
1198 /// overflow a `usize`. If either the length, or overflow checks fail, it
1199 /// returns `None`.
1200 ///
1201 /// # Panics
1202 ///
1203 /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
1204 #[inline]
new_slice_unaligned_from_prefix( bytes: B, count: usize, ) -> Option<(LayoutVerified<B, [T]>, B)>1205 pub fn new_slice_unaligned_from_prefix(
1206 bytes: B,
1207 count: usize,
1208 ) -> Option<(LayoutVerified<B, [T]>, B)> {
1209 let expected_len = match mem::size_of::<T>().checked_mul(count) {
1210 Some(len) => len,
1211 None => return None,
1212 };
1213 if bytes.len() < expected_len {
1214 return None;
1215 }
1216 let (prefix, bytes) = bytes.split_at(expected_len);
1217 Self::new_slice_unaligned(prefix).map(move |l| (l, bytes))
1218 }
1219
1220 /// Constructs a new `LayoutVerified` of a slice type with no alignment
1221 /// requirement from the suffix of a byte slice.
1222 ///
1223 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
1224 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
1225 /// to construct a `LayoutVerified`, and returns the remaining bytes to the
1226 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
1227 /// `usize`. If either the length, or overflow checks fail, it returns
1228 /// `None`.
1229 ///
1230 /// # Panics
1231 ///
1232 /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
1233 #[inline]
new_slice_unaligned_from_suffix( bytes: B, count: usize, ) -> Option<(B, LayoutVerified<B, [T]>)>1234 pub fn new_slice_unaligned_from_suffix(
1235 bytes: B,
1236 count: usize,
1237 ) -> Option<(B, LayoutVerified<B, [T]>)> {
1238 let expected_len = match mem::size_of::<T>().checked_mul(count) {
1239 Some(len) => len,
1240 None => return None,
1241 };
1242 if bytes.len() < expected_len {
1243 return None;
1244 }
1245 let (bytes, suffix) = bytes.split_at(expected_len);
1246 Self::new_slice_unaligned(suffix).map(move |l| (bytes, l))
1247 }
1248 }
1249
1250 impl<B, T> LayoutVerified<B, T>
1251 where
1252 B: ByteSliceMut,
1253 T: Unaligned,
1254 {
1255 /// Constructs a new `LayoutVerified` for a type with no alignment
1256 /// requirement, zeroing the bytes.
1257 ///
1258 /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
1259 /// constructs a new `LayoutVerified`. If the check fails, it returns
1260 /// `None`.
1261 ///
1262 /// If the check succeeds, then `bytes` will be initialized to zero. This
1263 /// can be useful when re-using buffers to ensure that sensitive data
1264 /// previously stored in the buffer is not leaked.
1265 #[inline]
new_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, T>>1266 pub fn new_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, T>> {
1267 map_zeroed(Self::new_unaligned(bytes))
1268 }
1269
1270 /// Constructs a new `LayoutVerified` from the prefix of a byte slice for a
1271 /// type with no alignment requirement, zeroing the prefix.
1272 ///
1273 /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
1274 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
1275 /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
1276 /// to the caller. If the length check fails, it returns `None`.
1277 ///
1278 /// If the check succeeds, then the prefix which is consumed will be
1279 /// initialized to zero. This can be useful when re-using buffers to ensure
1280 /// that sensitive data previously stored in the buffer is not leaked.
1281 #[inline]
new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)>1282 pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(LayoutVerified<B, T>, B)> {
1283 map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
1284 }
1285
1286 /// Constructs a new `LayoutVerified` from the suffix of a byte slice for a
1287 /// type with no alignment requirement, zeroing the suffix.
1288 ///
1289 /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
1290 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
1291 /// `bytes` to construct a `LayoutVerified`, and returns the preceding bytes
1292 /// to the caller. If the length check fails, it returns `None`.
1293 ///
1294 /// If the check succeeds, then the suffix which is consumed will be
1295 /// initialized to zero. This can be useful when re-using buffers to ensure
1296 /// that sensitive data previously stored in the buffer is not leaked.
1297 #[inline]
new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)>1298 pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, LayoutVerified<B, T>)> {
1299 map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
1300 }
1301 }
1302
1303 impl<B, T> LayoutVerified<B, [T]>
1304 where
1305 B: ByteSliceMut,
1306 T: Unaligned,
1307 {
1308 /// Constructs a new `LayoutVerified` for a slice type with no alignment
1309 /// requirement, zeroing the bytes.
1310 ///
1311 /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
1312 /// of `size_of::<T>()` and constructs a new `LayoutVerified`. If the check
1313 /// fails, it returns `None`.
1314 ///
1315 /// If the check succeeds, then `bytes` will be initialized to zero. This
1316 /// can be useful when re-using buffers to ensure that sensitive data
1317 /// previously stored in the buffer is not leaked.
1318 ///
1319 /// # Panics
1320 ///
1321 /// `new_slice` panics if `T` is a zero-sized type.
1322 #[inline]
new_slice_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>>1323 pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<LayoutVerified<B, [T]>> {
1324 map_zeroed(Self::new_slice_unaligned(bytes))
1325 }
1326
1327 /// Constructs a new `LayoutVerified` of a slice type with no alignment
1328 /// requirement from the prefix of a byte slice, after zeroing the bytes.
1329 ///
1330 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
1331 /// count`. It consumes the first `size_of::<T>() * count` bytes from
1332 /// `bytes` to construct a `LayoutVerified`, and returns the remaining bytes
1333 /// to the caller. It also ensures that `sizeof::<T>() * count` does not
1334 /// overflow a `usize`. If either the length, or overflow checks fail, it
1335 /// returns `None`.
1336 ///
1337 /// If the checks succeed, then the prefix will be initialized to zero. This
1338 /// can be useful when re-using buffers to ensure that sensitive data
1339 /// previously stored in the buffer is not leaked.
1340 ///
1341 /// # Panics
1342 ///
1343 /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
1344 /// type.
1345 #[inline]
new_slice_unaligned_from_prefix_zeroed( bytes: B, count: usize, ) -> Option<(LayoutVerified<B, [T]>, B)>1346 pub fn new_slice_unaligned_from_prefix_zeroed(
1347 bytes: B,
1348 count: usize,
1349 ) -> Option<(LayoutVerified<B, [T]>, B)> {
1350 map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
1351 }
1352
1353 /// Constructs a new `LayoutVerified` of a slice type with no alignment
1354 /// requirement from the suffix of a byte slice, after zeroing the bytes.
1355 ///
1356 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
1357 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
1358 /// to construct a `LayoutVerified`, and returns the remaining bytes to the
1359 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
1360 /// `usize`. If either the length, or overflow checks fail, it returns
1361 /// `None`.
1362 ///
1363 /// If the checks succeed, then the suffix will be initialized to zero. This
1364 /// can be useful when re-using buffers to ensure that sensitive data
1365 /// previously stored in the buffer is not leaked.
1366 ///
1367 /// # Panics
1368 ///
1369 /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
1370 /// type.
1371 #[inline]
new_slice_unaligned_from_suffix_zeroed( bytes: B, count: usize, ) -> Option<(B, LayoutVerified<B, [T]>)>1372 pub fn new_slice_unaligned_from_suffix_zeroed(
1373 bytes: B,
1374 count: usize,
1375 ) -> Option<(B, LayoutVerified<B, [T]>)> {
1376 map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
1377 }
1378 }
1379
1380 impl<'a, B, T> LayoutVerified<B, T>
1381 where
1382 B: 'a + ByteSlice,
1383 T: FromBytes,
1384 {
1385 /// Converts this `LayoutVerified` into a reference.
1386 ///
1387 /// `into_ref` consumes the `LayoutVerified`, and returns a reference to
1388 /// `T`.
into_ref(self) -> &'a T1389 pub fn into_ref(self) -> &'a T {
1390 // NOTE: This is safe because `B` is guaranteed to live for the lifetime
1391 // `'a`, meaning that a) the returned reference cannot outlive the `B`
1392 // from which `self` was constructed and, b) no mutable methods on that
1393 // `B` can be called during the lifetime of the returned reference. See
1394 // the documentation on `deref_helper` for what invariants we are
1395 // required to uphold.
1396 unsafe { self.deref_helper() }
1397 }
1398 }
1399
1400 impl<'a, B, T> LayoutVerified<B, T>
1401 where
1402 B: 'a + ByteSliceMut,
1403 T: FromBytes + AsBytes,
1404 {
1405 /// Converts this `LayoutVerified` into a mutable reference.
1406 ///
1407 /// `into_mut` consumes the `LayoutVerified`, and returns a mutable
1408 /// reference to `T`.
into_mut(mut self) -> &'a mut T1409 pub fn into_mut(mut self) -> &'a mut T {
1410 // NOTE: This is safe because `B` is guaranteed to live for the lifetime
1411 // `'a`, meaning that a) the returned reference cannot outlive the `B`
1412 // from which `self` was constructed and, b) no other methods - mutable
1413 // or immutable - on that `B` can be called during the lifetime of the
1414 // returned reference. See the documentation on `deref_mut_helper` for
1415 // what invariants we are required to uphold.
1416 unsafe { self.deref_mut_helper() }
1417 }
1418 }
1419
1420 impl<'a, B, T> LayoutVerified<B, [T]>
1421 where
1422 B: 'a + ByteSlice,
1423 T: FromBytes,
1424 {
1425 /// Converts this `LayoutVerified` into a slice reference.
1426 ///
1427 /// `into_slice` consumes the `LayoutVerified`, and returns a reference to
1428 /// `[T]`.
into_slice(self) -> &'a [T]1429 pub fn into_slice(self) -> &'a [T] {
1430 // NOTE: This is safe because `B` is guaranteed to live for the lifetime
1431 // `'a`, meaning that a) the returned reference cannot outlive the `B`
1432 // from which `self` was constructed and, b) no mutable methods on that
1433 // `B` can be called during the lifetime of the returned reference. See
1434 // the documentation on `deref_slice_helper` for what invariants we are
1435 // required to uphold.
1436 unsafe { self.deref_slice_helper() }
1437 }
1438 }
1439
1440 impl<'a, B, T> LayoutVerified<B, [T]>
1441 where
1442 B: 'a + ByteSliceMut,
1443 T: FromBytes + AsBytes,
1444 {
1445 /// Converts this `LayoutVerified` into a mutable slice reference.
1446 ///
1447 /// `into_mut_slice` consumes the `LayoutVerified`, and returns a mutable
1448 /// reference to `[T]`.
into_mut_slice(mut self) -> &'a mut [T]1449 pub fn into_mut_slice(mut self) -> &'a mut [T] {
1450 // NOTE: This is safe because `B` is guaranteed to live for the lifetime
1451 // `'a`, meaning that a) the returned reference cannot outlive the `B`
1452 // from which `self` was constructed and, b) no other methods - mutable
1453 // or immutable - on that `B` can be called during the lifetime of the
1454 // returned reference. See the documentation on `deref_mut_slice_helper`
1455 // for what invariants we are required to uphold.
1456 unsafe { self.deref_mut_slice_helper() }
1457 }
1458 }
1459
1460 impl<B, T> LayoutVerified<B, T>
1461 where
1462 B: ByteSlice,
1463 T: FromBytes,
1464 {
1465 /// Creates an immutable reference to `T` with a specific lifetime.
1466 ///
1467 /// # Safety
1468 ///
1469 /// The type bounds on this method guarantee that it is safe to create an
1470 /// immutable reference to `T` from `self`. However, since the lifetime `'a`
1471 /// is not required to be shorter than the lifetime of the reference to
1472 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
1473 /// this reference. In particular, the referent must exist for all of `'a`,
1474 /// and no mutable references to the same memory may be constructed during
1475 /// `'a`.
deref_helper<'a>(&self) -> &'a T1476 unsafe fn deref_helper<'a>(&self) -> &'a T {
1477 &*(self.0.as_ptr() as *const T)
1478 }
1479 }
1480
1481 impl<B, T> LayoutVerified<B, T>
1482 where
1483 B: ByteSliceMut,
1484 T: FromBytes + AsBytes,
1485 {
1486 /// Creates a mutable reference to `T` with a specific lifetime.
1487 ///
1488 /// # Safety
1489 ///
1490 /// The type bounds on this method guarantee that it is safe to create a
1491 /// mutable reference to `T` from `self`. However, since the lifetime `'a`
1492 /// is not required to be shorter than the lifetime of the reference to
1493 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
1494 /// this reference. In particular, the referent must exist for all of `'a`,
1495 /// and no other references - mutable or immutable - to the same memory may
1496 /// be constructed during `'a`.
deref_mut_helper<'a>(&mut self) -> &'a mut T1497 unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
1498 &mut *(self.0.as_mut_ptr() as *mut T)
1499 }
1500 }
1501
1502 impl<B, T> LayoutVerified<B, [T]>
1503 where
1504 B: ByteSlice,
1505 T: FromBytes,
1506 {
1507 /// Creates an immutable reference to `[T]` with a specific lifetime.
1508 ///
1509 /// # Safety
1510 ///
1511 /// `deref_slice_helper` has the same safety requirements as `deref_helper`.
deref_slice_helper<'a>(&self) -> &'a [T]1512 unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
1513 let len = self.0.len();
1514 let elem_size = mem::size_of::<T>();
1515 debug_assert_ne!(elem_size, 0);
1516 debug_assert_eq!(len % elem_size, 0);
1517 let elems = len / elem_size;
1518 slice::from_raw_parts(self.0.as_ptr() as *const T, elems)
1519 }
1520 }
1521
1522 impl<B, T> LayoutVerified<B, [T]>
1523 where
1524 B: ByteSliceMut,
1525 T: FromBytes + AsBytes,
1526 {
1527 /// Creates a mutable reference to `[T]` with a specific lifetime.
1528 ///
1529 /// # Safety
1530 ///
1531 /// `deref_mut_slice_helper` has the same safety requirements as
1532 /// `deref_mut_helper`.
deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T]1533 unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
1534 let len = self.0.len();
1535 let elem_size = mem::size_of::<T>();
1536 debug_assert_ne!(elem_size, 0);
1537 debug_assert_eq!(len % elem_size, 0);
1538 let elems = len / elem_size;
1539 slice::from_raw_parts_mut(self.0.as_mut_ptr() as *mut T, elems)
1540 }
1541 }
1542
aligned_to(bytes: &[u8], align: usize) -> bool1543 fn aligned_to(bytes: &[u8], align: usize) -> bool {
1544 (bytes as *const _ as *const () as usize) % align == 0
1545 }
1546
1547 impl<B, T> LayoutVerified<B, T>
1548 where
1549 B: ByteSlice,
1550 T: ?Sized,
1551 {
1552 /// Gets the underlying bytes.
1553 #[inline]
bytes(&self) -> &[u8]1554 pub fn bytes(&self) -> &[u8] {
1555 &self.0
1556 }
1557 }
1558
1559 impl<B, T> LayoutVerified<B, T>
1560 where
1561 B: ByteSliceMut,
1562 T: ?Sized,
1563 {
1564 /// Gets the underlying bytes mutably.
1565 #[inline]
bytes_mut(&mut self) -> &mut [u8]1566 pub fn bytes_mut(&mut self) -> &mut [u8] {
1567 &mut self.0
1568 }
1569 }
1570
1571 impl<B, T> LayoutVerified<B, T>
1572 where
1573 B: ByteSlice,
1574 T: FromBytes,
1575 {
1576 /// Reads a copy of `T`.
1577 #[inline]
read(&self) -> T1578 pub fn read(&self) -> T {
1579 // SAFETY: Because of the invariants on `LayoutVerified`, we know that
1580 // `self.0` is at least `size_of::<T>()` bytes long, and that it is at
1581 // least as aligned as `align_of::<T>()`. Because `T: FromBytes`, it is
1582 // sound to interpret these bytes as a `T`.
1583 unsafe { ptr::read(self.0.as_ptr() as *const T) }
1584 }
1585 }
1586
1587 impl<B, T> LayoutVerified<B, T>
1588 where
1589 B: ByteSliceMut,
1590 T: AsBytes,
1591 {
1592 /// Writes the bytes of `t` and then forgets `t`.
1593 #[inline]
write(&mut self, t: T)1594 pub fn write(&mut self, t: T) {
1595 // SAFETY: Because of the invariants on `LayoutVerified`, we know that
1596 // `self.0` is at least `size_of::<T>()` bytes long, and that it is at
1597 // least as aligned as `align_of::<T>()`. Writing `t` to the buffer will
1598 // allow all of the bytes of `t` to be accessed as a `[u8]`, but because
1599 // `T: AsBytes`, we know this is sound.
1600 unsafe { ptr::write(self.0.as_mut_ptr() as *mut T, t) }
1601 }
1602 }
1603
1604 impl<B, T> Deref for LayoutVerified<B, T>
1605 where
1606 B: ByteSlice,
1607 T: FromBytes,
1608 {
1609 type Target = T;
1610 #[inline]
deref(&self) -> &T1611 fn deref(&self) -> &T {
1612 // SAFETY: This is safe because the lifetime of `self` is the same as
1613 // the lifetime of the return value, meaning that a) the returned
1614 // reference cannot outlive `self` and, b) no mutable methods on `self`
1615 // can be called during the lifetime of the returned reference. See the
1616 // documentation on `deref_helper` for what invariants we are required
1617 // to uphold.
1618 unsafe { self.deref_helper() }
1619 }
1620 }
1621
1622 impl<B, T> DerefMut for LayoutVerified<B, T>
1623 where
1624 B: ByteSliceMut,
1625 T: FromBytes + AsBytes,
1626 {
1627 #[inline]
deref_mut(&mut self) -> &mut T1628 fn deref_mut(&mut self) -> &mut T {
1629 // SAFETY: This is safe because the lifetime of `self` is the same as
1630 // the lifetime of the return value, meaning that a) the returned
1631 // reference cannot outlive `self` and, b) no other methods on `self`
1632 // can be called during the lifetime of the returned reference. See the
1633 // documentation on `deref_mut_helper` for what invariants we are
1634 // required to uphold.
1635 unsafe { self.deref_mut_helper() }
1636 }
1637 }
1638
1639 impl<B, T> Deref for LayoutVerified<B, [T]>
1640 where
1641 B: ByteSlice,
1642 T: FromBytes,
1643 {
1644 type Target = [T];
1645 #[inline]
deref(&self) -> &[T]1646 fn deref(&self) -> &[T] {
1647 // SAFETY: This is safe because the lifetime of `self` is the same as
1648 // the lifetime of the return value, meaning that a) the returned
1649 // reference cannot outlive `self` and, b) no mutable methods on `self`
1650 // can be called during the lifetime of the returned reference. See the
1651 // documentation on `deref_slice_helper` for what invariants we are
1652 // required to uphold.
1653 unsafe { self.deref_slice_helper() }
1654 }
1655 }
1656
1657 impl<B, T> DerefMut for LayoutVerified<B, [T]>
1658 where
1659 B: ByteSliceMut,
1660 T: FromBytes + AsBytes,
1661 {
1662 #[inline]
deref_mut(&mut self) -> &mut [T]1663 fn deref_mut(&mut self) -> &mut [T] {
1664 // SAFETY: This is safe because the lifetime of `self` is the same as
1665 // the lifetime of the return value, meaning that a) the returned
1666 // reference cannot outlive `self` and, b) no other methods on `self`
1667 // can be called during the lifetime of the returned reference. See the
1668 // documentation on `deref_mut_slice_helper` for what invariants we are
1669 // required to uphold.
1670 unsafe { self.deref_mut_slice_helper() }
1671 }
1672 }
1673
1674 impl<T, B> Display for LayoutVerified<B, T>
1675 where
1676 B: ByteSlice,
1677 T: FromBytes + Display,
1678 {
1679 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result1680 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
1681 let inner: &T = self;
1682 inner.fmt(fmt)
1683 }
1684 }
1685
1686 impl<T, B> Display for LayoutVerified<B, [T]>
1687 where
1688 B: ByteSlice,
1689 T: FromBytes,
1690 [T]: Display,
1691 {
1692 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result1693 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
1694 let inner: &[T] = self;
1695 inner.fmt(fmt)
1696 }
1697 }
1698
1699 impl<T, B> Debug for LayoutVerified<B, T>
1700 where
1701 B: ByteSlice,
1702 T: FromBytes + Debug,
1703 {
1704 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result1705 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
1706 let inner: &T = self;
1707 fmt.debug_tuple("LayoutVerified").field(&inner).finish()
1708 }
1709 }
1710
1711 impl<T, B> Debug for LayoutVerified<B, [T]>
1712 where
1713 B: ByteSlice,
1714 T: FromBytes + Debug,
1715 {
1716 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result1717 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
1718 let inner: &[T] = self;
1719 fmt.debug_tuple("LayoutVerified").field(&inner).finish()
1720 }
1721 }
1722
1723 impl<T, B> Eq for LayoutVerified<B, T>
1724 where
1725 B: ByteSlice,
1726 T: FromBytes + Eq,
1727 {
1728 }
1729
1730 impl<T, B> Eq for LayoutVerified<B, [T]>
1731 where
1732 B: ByteSlice,
1733 T: FromBytes + Eq,
1734 {
1735 }
1736
1737 impl<T, B> PartialEq for LayoutVerified<B, T>
1738 where
1739 B: ByteSlice,
1740 T: FromBytes + PartialEq,
1741 {
1742 #[inline]
eq(&self, other: &Self) -> bool1743 fn eq(&self, other: &Self) -> bool {
1744 self.deref().eq(other.deref())
1745 }
1746 }
1747
1748 impl<T, B> PartialEq for LayoutVerified<B, [T]>
1749 where
1750 B: ByteSlice,
1751 T: FromBytes + PartialEq,
1752 {
1753 #[inline]
eq(&self, other: &Self) -> bool1754 fn eq(&self, other: &Self) -> bool {
1755 self.deref().eq(other.deref())
1756 }
1757 }
1758
1759 impl<T, B> Ord for LayoutVerified<B, T>
1760 where
1761 B: ByteSlice,
1762 T: FromBytes + Ord,
1763 {
1764 #[inline]
cmp(&self, other: &Self) -> Ordering1765 fn cmp(&self, other: &Self) -> Ordering {
1766 let inner: &T = self;
1767 let other_inner: &T = other;
1768 inner.cmp(other_inner)
1769 }
1770 }
1771
1772 impl<T, B> Ord for LayoutVerified<B, [T]>
1773 where
1774 B: ByteSlice,
1775 T: FromBytes + Ord,
1776 {
1777 #[inline]
cmp(&self, other: &Self) -> Ordering1778 fn cmp(&self, other: &Self) -> Ordering {
1779 let inner: &[T] = self;
1780 let other_inner: &[T] = other;
1781 inner.cmp(other_inner)
1782 }
1783 }
1784
1785 impl<T, B> PartialOrd for LayoutVerified<B, T>
1786 where
1787 B: ByteSlice,
1788 T: FromBytes + PartialOrd,
1789 {
1790 #[inline]
partial_cmp(&self, other: &Self) -> Option<Ordering>1791 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1792 let inner: &T = self;
1793 let other_inner: &T = other;
1794 inner.partial_cmp(other_inner)
1795 }
1796 }
1797
1798 impl<T, B> PartialOrd for LayoutVerified<B, [T]>
1799 where
1800 B: ByteSlice,
1801 T: FromBytes + PartialOrd,
1802 {
1803 #[inline]
partial_cmp(&self, other: &Self) -> Option<Ordering>1804 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
1805 let inner: &[T] = self;
1806 let other_inner: &[T] = other;
1807 inner.partial_cmp(other_inner)
1808 }
1809 }
1810
1811 mod sealed {
1812 use core::cell::{Ref, RefMut};
1813
1814 pub trait Sealed {}
1815 impl<'a> Sealed for &'a [u8] {}
1816 impl<'a> Sealed for &'a mut [u8] {}
1817 impl<'a> Sealed for Ref<'a, [u8]> {}
1818 impl<'a> Sealed for RefMut<'a, [u8]> {}
1819 }
1820
1821 // ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
1822 // Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
1823 // references such as that a given reference will never changes its length
1824 // between calls to deref() or deref_mut(), and that split_at() works as
1825 // expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
1826 // implement them in a way that violated these behaviors, and would break our
1827 // unsafe code. Thus, we seal them and implement it only for known-good
1828 // reference types. For the same reason, they're unsafe traits.
1829
1830 /// A mutable or immutable reference to a byte slice.
1831 ///
1832 /// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
1833 /// implemented for various special reference types such as `Ref<[u8]>` and
1834 /// `RefMut<[u8]>`.
1835 ///
1836 /// Note that, while it would be technically possible, `ByteSlice` is not
1837 /// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`]
1838 /// method would involve reallocation, and `split_at` must be a very cheap
1839 /// operation in order for the utilities in this crate to perform as designed.
1840 ///
1841 /// [`Vec<u8>`]: std::vec::Vec
1842 /// [`split_at`]: crate::ByteSlice::split_at
1843 pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + self::sealed::Sealed {
1844 /// Gets a raw pointer to the first byte in the slice.
as_ptr(&self) -> *const u81845 fn as_ptr(&self) -> *const u8;
1846
1847 /// Splits the slice at the midpoint.
1848 ///
1849 /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`.
1850 ///
1851 /// # Panics
1852 ///
1853 /// `x.split_at(mid)` panics if `mid > x.len()`.
split_at(self, mid: usize) -> (Self, Self)1854 fn split_at(self, mid: usize) -> (Self, Self);
1855 }
1856
1857 /// A mutable reference to a byte slice.
1858 ///
1859 /// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
1860 /// a byte slice, and is implemented for various special reference types such as
1861 /// `RefMut<[u8]>`.
1862 pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
1863 /// Gets a mutable raw pointer to the first byte in the slice.
as_mut_ptr(&mut self) -> *mut u81864 fn as_mut_ptr(&mut self) -> *mut u8;
1865 }
1866
1867 unsafe impl<'a> ByteSlice for &'a [u8] {
as_ptr(&self) -> *const u81868 fn as_ptr(&self) -> *const u8 {
1869 <[u8]>::as_ptr(self)
1870 }
split_at(self, mid: usize) -> (Self, Self)1871 fn split_at(self, mid: usize) -> (Self, Self) {
1872 <[u8]>::split_at(self, mid)
1873 }
1874 }
1875 unsafe impl<'a> ByteSlice for &'a mut [u8] {
as_ptr(&self) -> *const u81876 fn as_ptr(&self) -> *const u8 {
1877 <[u8]>::as_ptr(self)
1878 }
split_at(self, mid: usize) -> (Self, Self)1879 fn split_at(self, mid: usize) -> (Self, Self) {
1880 <[u8]>::split_at_mut(self, mid)
1881 }
1882 }
1883 unsafe impl<'a> ByteSlice for Ref<'a, [u8]> {
as_ptr(&self) -> *const u81884 fn as_ptr(&self) -> *const u8 {
1885 <[u8]>::as_ptr(self)
1886 }
split_at(self, mid: usize) -> (Self, Self)1887 fn split_at(self, mid: usize) -> (Self, Self) {
1888 Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
1889 }
1890 }
1891 unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
as_ptr(&self) -> *const u81892 fn as_ptr(&self) -> *const u8 {
1893 <[u8]>::as_ptr(self)
1894 }
split_at(self, mid: usize) -> (Self, Self)1895 fn split_at(self, mid: usize) -> (Self, Self) {
1896 RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
1897 }
1898 }
1899
1900 unsafe impl<'a> ByteSliceMut for &'a mut [u8] {
as_mut_ptr(&mut self) -> *mut u81901 fn as_mut_ptr(&mut self) -> *mut u8 {
1902 <[u8]>::as_mut_ptr(self)
1903 }
1904 }
1905 unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {
as_mut_ptr(&mut self) -> *mut u81906 fn as_mut_ptr(&mut self) -> *mut u8 {
1907 <[u8]>::as_mut_ptr(self)
1908 }
1909 }
1910
1911 #[cfg(any(test, feature = "alloc"))]
1912 mod alloc_support {
1913 pub(crate) extern crate alloc;
1914 pub(crate) use super::*;
1915 pub(crate) use alloc::alloc::Layout;
1916 pub(crate) use alloc::boxed::Box;
1917 pub(crate) use alloc::vec::Vec;
1918 pub(crate) use core::mem::{align_of, size_of};
1919 pub(crate) use core::ptr::NonNull;
1920
1921 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
1922 /// vector. The new items are initialized with zeroes.
1923 ///
1924 /// # Panics
1925 ///
1926 /// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
extend_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, additional: usize)1927 pub fn extend_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, additional: usize) {
1928 insert_vec_zeroed(v, v.len(), additional);
1929 }
1930
1931 /// Inserts `additional` new items into `Vec<T>` at `position`.
1932 /// The new items are initialized with zeroes.
1933 ///
1934 /// # Panics
1935 ///
1936 /// * Panics if `position > v.len()`.
1937 /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
insert_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, position: usize, additional: usize)1938 pub fn insert_vec_zeroed<T: FromBytes>(v: &mut Vec<T>, position: usize, additional: usize) {
1939 assert!(position <= v.len());
1940 v.reserve(additional);
1941 // The reserve() call guarantees that these cannot overflow:
1942 // * `ptr.add(position)`
1943 // * `position + additional`
1944 // * `v.len() + additional`
1945 //
1946 // `v.len() - position` cannot overflow because we asserted that
1947 // position <= v.len().
1948 unsafe {
1949 // This is a potentially overlapping copy.
1950 let ptr = v.as_mut_ptr();
1951 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
1952 ptr.add(position).write_bytes(0, additional);
1953 v.set_len(v.len() + additional);
1954 }
1955 }
1956 }
1957
1958 #[cfg(any(test, feature = "alloc"))]
1959 #[doc(inline)]
1960 pub use alloc_support::*;
1961
1962 #[cfg(test)]
1963 mod tests {
1964 #![allow(clippy::unreadable_literal)]
1965
1966 use core::ops::Deref;
1967
1968 use super::*;
1969
1970 // B should be [u8; N]. T will require that the entire structure is aligned
1971 // to the alignment of T.
1972 #[derive(Default)]
1973 struct AlignedBuffer<T, B> {
1974 buf: B,
1975 _t: T,
1976 }
1977
1978 impl<T, B: Default> AlignedBuffer<T, B> {
clear_buf(&mut self)1979 fn clear_buf(&mut self) {
1980 self.buf = B::default();
1981 }
1982 }
1983
1984 // convert a u64 to bytes using this platform's endianness
u64_to_bytes(u: u64) -> [u8; 8]1985 fn u64_to_bytes(u: u64) -> [u8; 8] {
1986 unsafe { ptr::read(&u as *const u64 as *const [u8; 8]) }
1987 }
1988
1989 #[test]
test_read_write()1990 fn test_read_write() {
1991 const VAL: u64 = 0x12345678;
1992 #[cfg(target_endian = "big")]
1993 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
1994 #[cfg(target_endian = "little")]
1995 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
1996
1997 // Test FromBytes::{read_from, read_from_prefix, read_from_suffix}
1998
1999 assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
2000 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
2001 // zeroes.
2002 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
2003 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
2004 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
2005 // The first 8 bytes are all zeroes and the second 8 bytes are from
2006 // `VAL_BYTES`
2007 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
2008 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
2009 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
2010
2011 // Test AsBytes::{write_to, write_to_prefix, write_to_suffix}
2012
2013 let mut bytes = [0u8; 8];
2014 assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
2015 assert_eq!(bytes, VAL_BYTES);
2016 let mut bytes = [0u8; 16];
2017 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
2018 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
2019 assert_eq!(bytes, want);
2020 let mut bytes = [0u8; 16];
2021 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
2022 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
2023 assert_eq!(bytes, want);
2024 }
2025
2026 #[test]
test_transmute()2027 fn test_transmute() {
2028 // Test that memory is transmuted as expected.
2029 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
2030 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
2031 let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
2032 assert_eq!(x, array_of_arrays);
2033 let x: [u8; 8] = transmute!(array_of_arrays);
2034 assert_eq!(x, array_of_u8s);
2035
2036 // Test that the source expression's value is forgotten rather than
2037 // dropped.
2038 #[derive(AsBytes)]
2039 #[repr(transparent)]
2040 struct PanicOnDrop(());
2041 impl Drop for PanicOnDrop {
2042 fn drop(&mut self) {
2043 panic!("PanicOnDrop::drop");
2044 }
2045 }
2046 let _: () = transmute!(PanicOnDrop(()));
2047 }
2048
2049 #[test]
test_address()2050 fn test_address() {
2051 // test that the Deref and DerefMut implementations return a reference
2052 // which points to the right region of memory
2053
2054 let buf = [0];
2055 let lv = LayoutVerified::<_, u8>::new(&buf[..]).unwrap();
2056 let buf_ptr = buf.as_ptr();
2057 let deref_ptr = lv.deref() as *const u8;
2058 assert_eq!(buf_ptr, deref_ptr);
2059
2060 let buf = [0];
2061 let lv = LayoutVerified::<_, [u8]>::new_slice(&buf[..]).unwrap();
2062 let buf_ptr = buf.as_ptr();
2063 let deref_ptr = lv.deref().as_ptr();
2064 assert_eq!(buf_ptr, deref_ptr);
2065 }
2066
2067 // verify that values written to a LayoutVerified are properly shared
2068 // between the typed and untyped representations, that reads via `deref` and
2069 // `read` behave the same, and that writes via `deref_mut` and `write`
2070 // behave the same
test_new_helper<'a>(mut lv: LayoutVerified<&'a mut [u8], u64>)2071 fn test_new_helper<'a>(mut lv: LayoutVerified<&'a mut [u8], u64>) {
2072 // assert that the value starts at 0
2073 assert_eq!(*lv, 0);
2074 assert_eq!(lv.read(), 0);
2075
2076 // assert that values written to the typed value are reflected in the
2077 // byte slice
2078 const VAL1: u64 = 0xFF00FF00FF00FF00;
2079 *lv = VAL1;
2080 assert_eq!(lv.bytes(), &u64_to_bytes(VAL1));
2081 *lv = 0;
2082 lv.write(VAL1);
2083 assert_eq!(lv.bytes(), &u64_to_bytes(VAL1));
2084
2085 // assert that values written to the byte slice are reflected in the
2086 // typed value
2087 const VAL2: u64 = !VAL1; // different from VAL1
2088 lv.bytes_mut().copy_from_slice(&u64_to_bytes(VAL2)[..]);
2089 assert_eq!(*lv, VAL2);
2090 assert_eq!(lv.read(), VAL2);
2091 }
2092
2093 // verify that values written to a LayoutVerified are properly shared
2094 // between the typed and untyped representations; pass a value with
2095 // `typed_len` `u64`s backed by an array of `typed_len * 8` bytes.
test_new_helper_slice<'a>(mut lv: LayoutVerified<&'a mut [u8], [u64]>, typed_len: usize)2096 fn test_new_helper_slice<'a>(mut lv: LayoutVerified<&'a mut [u8], [u64]>, typed_len: usize) {
2097 // assert that the value starts out zeroed
2098 assert_eq!(&*lv, vec![0; typed_len].as_slice());
2099
2100 // check the backing storage is the exact same slice
2101 let untyped_len = typed_len * 8;
2102 assert_eq!(lv.bytes().len(), untyped_len);
2103 assert_eq!(lv.bytes().as_ptr(), lv.as_ptr() as *const u8);
2104
2105 // assert that values written to the typed value are reflected in the
2106 // byte slice
2107 const VAL1: u64 = 0xFF00FF00FF00FF00;
2108 for typed in &mut *lv {
2109 *typed = VAL1;
2110 }
2111 assert_eq!(lv.bytes(), VAL1.to_ne_bytes().repeat(typed_len).as_slice());
2112
2113 // assert that values written to the byte slice are reflected in the
2114 // typed value
2115 const VAL2: u64 = !VAL1; // different from VAL1
2116 lv.bytes_mut().copy_from_slice(&VAL2.to_ne_bytes().repeat(typed_len));
2117 assert!(lv.iter().copied().all(|x| x == VAL2));
2118 }
2119
2120 // verify that values written to a LayoutVerified are properly shared
2121 // between the typed and untyped representations, that reads via `deref` and
2122 // `read` behave the same, and that writes via `deref_mut` and `write`
2123 // behave the same
test_new_helper_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8; 8]>)2124 fn test_new_helper_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8; 8]>) {
2125 // assert that the value starts at 0
2126 assert_eq!(*lv, [0; 8]);
2127 assert_eq!(lv.read(), [0; 8]);
2128
2129 // assert that values written to the typed value are reflected in the
2130 // byte slice
2131 const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
2132 *lv = VAL1;
2133 assert_eq!(lv.bytes(), &VAL1);
2134 *lv = [0; 8];
2135 lv.write(VAL1);
2136 assert_eq!(lv.bytes(), &VAL1);
2137
2138 // assert that values written to the byte slice are reflected in the
2139 // typed value
2140 const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
2141 lv.bytes_mut().copy_from_slice(&VAL2[..]);
2142 assert_eq!(*lv, VAL2);
2143 assert_eq!(lv.read(), VAL2);
2144 }
2145
2146 // verify that values written to a LayoutVerified are properly shared
2147 // between the typed and untyped representations; pass a value with
2148 // `len` `u8`s backed by an array of `len` bytes.
test_new_helper_slice_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8]>, len: usize)2149 fn test_new_helper_slice_unaligned<'a>(mut lv: LayoutVerified<&'a mut [u8], [u8]>, len: usize) {
2150 // assert that the value starts out zeroed
2151 assert_eq!(&*lv, vec![0u8; len].as_slice());
2152
2153 // check the backing storage is the exact same slice
2154 assert_eq!(lv.bytes().len(), len);
2155 assert_eq!(lv.bytes().as_ptr(), lv.as_ptr());
2156
2157 // assert that values written to the typed value are reflected in the
2158 // byte slice
2159 let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
2160 lv.copy_from_slice(&expected_bytes);
2161 assert_eq!(lv.bytes(), expected_bytes.as_slice());
2162
2163 // assert that values written to the byte slice are reflected in the
2164 // typed value
2165 for byte in &mut expected_bytes {
2166 *byte = !*byte; // different from expected_len
2167 }
2168 lv.bytes_mut().copy_from_slice(&expected_bytes);
2169 assert_eq!(&*lv, expected_bytes.as_slice());
2170 }
2171
2172 #[test]
test_new_aligned_sized()2173 fn test_new_aligned_sized() {
2174 // Test that a properly-aligned, properly-sized buffer works for new,
2175 // new_from_preifx, and new_from_suffix, and that new_from_prefix and
2176 // new_from_suffix return empty slices. Test that a properly-aligned
2177 // buffer whose length is a multiple of the element size works for
2178 // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
2179 // memory.
2180
2181 // a buffer with an alignment of 8
2182 let mut buf = AlignedBuffer::<u64, [u8; 8]>::default();
2183 // buf.buf should be aligned to 8, so this should always succeed
2184 test_new_helper(LayoutVerified::<_, u64>::new(&mut buf.buf[..]).unwrap());
2185 buf.buf = [0xFFu8; 8];
2186 test_new_helper(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).unwrap());
2187 {
2188 // in a block so that lv and suffix don't live too long
2189 buf.clear_buf();
2190 let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
2191 assert!(suffix.is_empty());
2192 test_new_helper(lv);
2193 }
2194 {
2195 buf.buf = [0xFFu8; 8];
2196 let (lv, suffix) =
2197 LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
2198 assert!(suffix.is_empty());
2199 test_new_helper(lv);
2200 }
2201 {
2202 buf.clear_buf();
2203 let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
2204 assert!(prefix.is_empty());
2205 test_new_helper(lv);
2206 }
2207 {
2208 buf.buf = [0xFFu8; 8];
2209 let (prefix, lv) =
2210 LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
2211 assert!(prefix.is_empty());
2212 test_new_helper(lv);
2213 }
2214
2215 // a buffer with alignment 8 and length 16
2216 let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
2217 // buf.buf should be aligned to 8 and have a length which is a multiple
2218 // of size_of::<u64>(), so this should always succeed
2219 test_new_helper_slice(LayoutVerified::<_, [u64]>::new_slice(&mut buf.buf[..]).unwrap(), 2);
2220 buf.buf = [0xFFu8; 16];
2221 test_new_helper_slice(
2222 LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[..]).unwrap(),
2223 2,
2224 );
2225
2226 {
2227 buf.clear_buf();
2228 let (lv, suffix) =
2229 LayoutVerified::<_, [u64]>::new_slice_from_prefix(&mut buf.buf[..], 1).unwrap();
2230 assert_eq!(suffix, [0; 8]);
2231 test_new_helper_slice(lv, 1);
2232 }
2233 {
2234 buf.buf = [0xFFu8; 16];
2235 let (lv, suffix) =
2236 LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[..], 1)
2237 .unwrap();
2238 assert_eq!(suffix, [0xFF; 8]);
2239 test_new_helper_slice(lv, 1);
2240 }
2241 {
2242 buf.clear_buf();
2243 let (prefix, lv) =
2244 LayoutVerified::<_, [u64]>::new_slice_from_suffix(&mut buf.buf[..], 1).unwrap();
2245 assert_eq!(prefix, [0; 8]);
2246 test_new_helper_slice(lv, 1);
2247 }
2248 {
2249 buf.buf = [0xFFu8; 16];
2250 let (prefix, lv) =
2251 LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[..], 1)
2252 .unwrap();
2253 assert_eq!(prefix, [0xFF; 8]);
2254 test_new_helper_slice(lv, 1);
2255 }
2256 }
2257
2258 #[test]
test_new_unaligned_sized()2259 fn test_new_unaligned_sized() {
2260 // Test that an unaligned, properly-sized buffer works for
2261 // new_unaligned, new_unaligned_from_prefix, and
2262 // new_unaligned_from_suffix, and that new_unaligned_from_prefix
2263 // new_unaligned_from_suffix return empty slices. Test that an unaligned
2264 // buffer whose length is a multiple of the element size works for
2265 // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
2266 // memory.
2267
2268 let mut buf = [0u8; 8];
2269 test_new_helper_unaligned(
2270 LayoutVerified::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap(),
2271 );
2272 buf = [0xFFu8; 8];
2273 test_new_helper_unaligned(
2274 LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap(),
2275 );
2276 {
2277 // in a block so that lv and suffix don't live too long
2278 buf = [0u8; 8];
2279 let (lv, suffix) =
2280 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
2281 assert!(suffix.is_empty());
2282 test_new_helper_unaligned(lv);
2283 }
2284 {
2285 buf = [0xFFu8; 8];
2286 let (lv, suffix) =
2287 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
2288 .unwrap();
2289 assert!(suffix.is_empty());
2290 test_new_helper_unaligned(lv);
2291 }
2292 {
2293 buf = [0u8; 8];
2294 let (prefix, lv) =
2295 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
2296 assert!(prefix.is_empty());
2297 test_new_helper_unaligned(lv);
2298 }
2299 {
2300 buf = [0xFFu8; 8];
2301 let (prefix, lv) =
2302 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
2303 .unwrap();
2304 assert!(prefix.is_empty());
2305 test_new_helper_unaligned(lv);
2306 }
2307
2308 let mut buf = [0u8; 16];
2309 // buf.buf should be aligned to 8 and have a length which is a multiple
2310 // of size_of::<u64>(), so this should always succeed
2311 test_new_helper_slice_unaligned(
2312 LayoutVerified::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
2313 16,
2314 );
2315 buf = [0xFFu8; 16];
2316 test_new_helper_slice_unaligned(
2317 LayoutVerified::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
2318 16,
2319 );
2320
2321 {
2322 buf = [0u8; 16];
2323 let (lv, suffix) =
2324 LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8)
2325 .unwrap();
2326 assert_eq!(suffix, [0; 8]);
2327 test_new_helper_slice_unaligned(lv, 8);
2328 }
2329 {
2330 buf = [0xFFu8; 16];
2331 let (lv, suffix) =
2332 LayoutVerified::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8)
2333 .unwrap();
2334 assert_eq!(suffix, [0xFF; 8]);
2335 test_new_helper_slice_unaligned(lv, 8);
2336 }
2337 {
2338 buf = [0u8; 16];
2339 let (prefix, lv) =
2340 LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8)
2341 .unwrap();
2342 assert_eq!(prefix, [0; 8]);
2343 test_new_helper_slice_unaligned(lv, 8);
2344 }
2345 {
2346 buf = [0xFFu8; 16];
2347 let (prefix, lv) =
2348 LayoutVerified::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8)
2349 .unwrap();
2350 assert_eq!(prefix, [0xFF; 8]);
2351 test_new_helper_slice_unaligned(lv, 8);
2352 }
2353 }
2354
2355 #[test]
test_new_oversized()2356 fn test_new_oversized() {
2357 // Test that a properly-aligned, overly-sized buffer works for
2358 // new_from_prefix and new_from_suffix, and that they return the
2359 // remainder and prefix of the slice respectively. Test that xxx_zeroed
2360 // behaves the same, and zeroes the memory.
2361
2362 let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
2363 {
2364 // in a block so that lv and suffix don't live too long
2365 // buf.buf should be aligned to 8, so this should always succeed
2366 let (lv, suffix) = LayoutVerified::<_, u64>::new_from_prefix(&mut buf.buf[..]).unwrap();
2367 assert_eq!(suffix.len(), 8);
2368 test_new_helper(lv);
2369 }
2370 {
2371 buf.buf = [0xFFu8; 16];
2372 // buf.buf should be aligned to 8, so this should always succeed
2373 let (lv, suffix) =
2374 LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).unwrap();
2375 // assert that the suffix wasn't zeroed
2376 assert_eq!(suffix, &[0xFFu8; 8]);
2377 test_new_helper(lv);
2378 }
2379 {
2380 buf.clear_buf();
2381 // buf.buf should be aligned to 8, so this should always succeed
2382 let (prefix, lv) = LayoutVerified::<_, u64>::new_from_suffix(&mut buf.buf[..]).unwrap();
2383 assert_eq!(prefix.len(), 8);
2384 test_new_helper(lv);
2385 }
2386 {
2387 buf.buf = [0xFFu8; 16];
2388 // buf.buf should be aligned to 8, so this should always succeed
2389 let (prefix, lv) =
2390 LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).unwrap();
2391 // assert that the prefix wasn't zeroed
2392 assert_eq!(prefix, &[0xFFu8; 8]);
2393 test_new_helper(lv);
2394 }
2395 }
2396
2397 #[test]
test_new_unaligned_oversized()2398 fn test_new_unaligned_oversized() {
2399 // Test than an unaligned, overly-sized buffer works for
2400 // new_unaligned_from_prefix and new_unaligned_from_suffix, and that
2401 // they return the remainder and prefix of the slice respectively. Test
2402 // that xxx_zeroed behaves the same, and zeroes the memory.
2403
2404 let mut buf = [0u8; 16];
2405 {
2406 // in a block so that lv and suffix don't live too long
2407 let (lv, suffix) =
2408 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
2409 assert_eq!(suffix.len(), 8);
2410 test_new_helper_unaligned(lv);
2411 }
2412 {
2413 buf = [0xFFu8; 16];
2414 let (lv, suffix) =
2415 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..])
2416 .unwrap();
2417 // assert that the suffix wasn't zeroed
2418 assert_eq!(suffix, &[0xFF; 8]);
2419 test_new_helper_unaligned(lv);
2420 }
2421 {
2422 buf = [0u8; 16];
2423 let (prefix, lv) =
2424 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
2425 assert_eq!(prefix.len(), 8);
2426 test_new_helper_unaligned(lv);
2427 }
2428 {
2429 buf = [0xFFu8; 16];
2430 let (prefix, lv) =
2431 LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..])
2432 .unwrap();
2433 // assert that the prefix wasn't zeroed
2434 assert_eq!(prefix, &[0xFF; 8]);
2435 test_new_helper_unaligned(lv);
2436 }
2437 }
2438
2439 #[test]
2440 #[allow(clippy::cognitive_complexity)]
test_new_error()2441 fn test_new_error() {
2442 // fail because the buffer is too large
2443
2444 // a buffer with an alignment of 8
2445 let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
2446 // buf.buf should be aligned to 8, so only the length check should fail
2447 assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
2448 assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
2449 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
2450 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
2451
2452 // fail because the buffer is too small
2453
2454 // a buffer with an alignment of 8
2455 let mut buf = AlignedBuffer::<u64, [u8; 4]>::default();
2456 // buf.buf should be aligned to 8, so only the length check should fail
2457 assert!(LayoutVerified::<_, u64>::new(&buf.buf[..]).is_none());
2458 assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[..]).is_none());
2459 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned(&buf.buf[..]).is_none());
2460 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.buf[..]).is_none());
2461 assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[..]).is_none());
2462 assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[..]).is_none());
2463 assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
2464 assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
2465 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.buf[..]).is_none());
2466 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.buf[..])
2467 .is_none());
2468 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.buf[..]).is_none());
2469 assert!(LayoutVerified::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.buf[..])
2470 .is_none());
2471
2472 // fail because the length is not a multiple of the element size
2473
2474 let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
2475 // buf.buf has length 12, but element size is 8
2476 assert!(LayoutVerified::<_, [u64]>::new_slice(&buf.buf[..]).is_none());
2477 assert!(LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[..]).is_none());
2478 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned(&buf.buf[..]).is_none());
2479 assert!(
2480 LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.buf[..]).is_none()
2481 );
2482
2483 // fail beacuse the buffer is too short.
2484 let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
2485 // buf.buf has length 12, but the element size is 8 (and we're expecting two of them).
2486 assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[..], 2).is_none());
2487 assert!(
2488 LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[..], 2).is_none()
2489 );
2490 assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[..], 2).is_none());
2491 assert!(
2492 LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[..], 2).is_none()
2493 );
2494 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.buf[..], 2)
2495 .is_none());
2496 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
2497 &mut buf.buf[..],
2498 2
2499 )
2500 .is_none());
2501 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.buf[..], 2)
2502 .is_none());
2503 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
2504 &mut buf.buf[..],
2505 2
2506 )
2507 .is_none());
2508
2509 // fail because the alignment is insufficient
2510
2511 // a buffer with an alignment of 8
2512 let mut buf = AlignedBuffer::<u64, [u8; 12]>::default();
2513 // slicing from 4, we get a buffer with size 8 (so the length check
2514 // should succeed) but an alignment of only 4, which is insufficient
2515 assert!(LayoutVerified::<_, u64>::new(&buf.buf[4..]).is_none());
2516 assert!(LayoutVerified::<_, u64>::new_zeroed(&mut buf.buf[4..]).is_none());
2517 assert!(LayoutVerified::<_, u64>::new_from_prefix(&buf.buf[4..]).is_none());
2518 assert!(LayoutVerified::<_, u64>::new_from_prefix_zeroed(&mut buf.buf[4..]).is_none());
2519 assert!(LayoutVerified::<_, [u64]>::new_slice(&buf.buf[4..]).is_none());
2520 assert!(LayoutVerified::<_, [u64]>::new_slice_zeroed(&mut buf.buf[4..]).is_none());
2521 assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[4..], 1).is_none());
2522 assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(&mut buf.buf[4..], 1)
2523 .is_none());
2524 assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[4..], 1).is_none());
2525 assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(&mut buf.buf[4..], 1)
2526 .is_none());
2527 // slicing from 4 should be unnecessary because new_from_suffix[_zeroed]
2528 // use the suffix of the slice
2529 assert!(LayoutVerified::<_, u64>::new_from_suffix(&buf.buf[..]).is_none());
2530 assert!(LayoutVerified::<_, u64>::new_from_suffix_zeroed(&mut buf.buf[..]).is_none());
2531
2532 // fail due to arithmetic overflow
2533
2534 let mut buf = AlignedBuffer::<u64, [u8; 16]>::default();
2535 let unreasonable_len = std::usize::MAX / mem::size_of::<u64>() + 1;
2536 assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix(&buf.buf[..], unreasonable_len)
2537 .is_none());
2538 assert!(LayoutVerified::<_, [u64]>::new_slice_from_prefix_zeroed(
2539 &mut buf.buf[..],
2540 unreasonable_len
2541 )
2542 .is_none());
2543 assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix(&buf.buf[..], unreasonable_len)
2544 .is_none());
2545 assert!(LayoutVerified::<_, [u64]>::new_slice_from_suffix_zeroed(
2546 &mut buf.buf[..],
2547 unreasonable_len
2548 )
2549 .is_none());
2550 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(
2551 &buf.buf[..],
2552 unreasonable_len
2553 )
2554 .is_none());
2555 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
2556 &mut buf.buf[..],
2557 unreasonable_len
2558 )
2559 .is_none());
2560 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(
2561 &buf.buf[..],
2562 unreasonable_len
2563 )
2564 .is_none());
2565 assert!(LayoutVerified::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
2566 &mut buf.buf[..],
2567 unreasonable_len
2568 )
2569 .is_none());
2570 }
2571
2572 // Tests for ensuring that, if a ZST is passed into a slice-like function, we always
2573 // panic. Since these tests need to be separate per-function, and they tend to take
2574 // up a lot of space, we generate them using a macro in a submodule instead. The
2575 // submodule ensures that we can just re-use the name of the function under test for
2576 // the name of the test itself.
2577 mod test_zst_panics {
2578 macro_rules! zst_test {
2579 ($name:ident($($tt:tt)*)) => {
2580 #[test]
2581 #[should_panic = "assertion failed"]
2582 fn $name() {
2583 let mut buffer = [0u8];
2584 let lv = $crate::LayoutVerified::<_, [()]>::$name(&mut buffer[..], $($tt)*);
2585 unreachable!("should have panicked, got {:?}", lv);
2586 }
2587 }
2588 }
2589 zst_test!(new_slice());
2590 zst_test!(new_slice_zeroed());
2591 zst_test!(new_slice_from_prefix(1));
2592 zst_test!(new_slice_from_prefix_zeroed(1));
2593 zst_test!(new_slice_from_suffix(1));
2594 zst_test!(new_slice_from_suffix_zeroed(1));
2595 zst_test!(new_slice_unaligned());
2596 zst_test!(new_slice_unaligned_zeroed());
2597 zst_test!(new_slice_unaligned_from_prefix(1));
2598 zst_test!(new_slice_unaligned_from_prefix_zeroed(1));
2599 zst_test!(new_slice_unaligned_from_suffix(1));
2600 zst_test!(new_slice_unaligned_from_suffix_zeroed(1));
2601 }
2602
2603 #[test]
test_as_bytes_methods()2604 fn test_as_bytes_methods() {
2605 #[derive(Debug, Eq, PartialEq, FromBytes, AsBytes)]
2606 #[repr(C)]
2607 struct Foo {
2608 a: u32,
2609 b: u32,
2610 }
2611
2612 let mut foo = Foo { a: 1, b: 2 };
2613 // Test that we can access the underlying bytes, and that we get the
2614 // right bytes and the right number of bytes.
2615 assert_eq!(foo.as_bytes(), [1, 0, 0, 0, 2, 0, 0, 0]);
2616 // Test that changes to the underlying byte slices are reflected in the
2617 // original object.
2618 foo.as_bytes_mut()[0] = 3;
2619 assert_eq!(foo, Foo { a: 3, b: 2 });
2620
2621 // Do the same tests for a slice, which ensures that this logic works
2622 // for unsized types as well.
2623 let foo = &mut [Foo { a: 1, b: 2 }, Foo { a: 3, b: 4 }];
2624 assert_eq!(foo.as_bytes(), [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0, 4, 0, 0, 0]);
2625 foo.as_bytes_mut()[8] = 5;
2626 assert_eq!(foo, &mut [Foo { a: 1, b: 2 }, Foo { a: 5, b: 4 }]);
2627 }
2628
2629 #[test]
test_array()2630 fn test_array() {
2631 // This is a hack, as per above in `test_as_bytes_methods`.
2632 mod zerocopy {
2633 pub use crate::*;
2634 }
2635 #[derive(FromBytes, AsBytes)]
2636 #[repr(C)]
2637 struct Foo {
2638 a: [u16; 33],
2639 }
2640
2641 let foo = Foo { a: [0xFFFF; 33] };
2642 let expected = [0xFFu8; 66];
2643 assert_eq!(foo.as_bytes(), &expected[..]);
2644 }
2645
2646 #[test]
test_display_debug()2647 fn test_display_debug() {
2648 let buf = AlignedBuffer::<u64, [u8; 8]>::default();
2649 let lv = LayoutVerified::<_, u64>::new(&buf.buf[..]).unwrap();
2650 assert_eq!(format!("{}", lv), "0");
2651 assert_eq!(format!("{:?}", lv), "LayoutVerified(0)");
2652
2653 let buf = AlignedBuffer::<u64, [u8; 8]>::default();
2654 let lv = LayoutVerified::<_, [u64]>::new_slice(&buf.buf[..]).unwrap();
2655 assert_eq!(format!("{:?}", lv), "LayoutVerified([0])");
2656 }
2657
2658 #[test]
test_eq()2659 fn test_eq() {
2660 let buf = [0u8; 8];
2661 let lv1 = LayoutVerified::<_, u64>::new(&buf[..]).unwrap();
2662 let lv2 = LayoutVerified::<_, u64>::new(&buf[..]).unwrap();
2663 assert_eq!(lv1, lv2);
2664 }
2665
2666 #[test]
test_ne()2667 fn test_ne() {
2668 let buf1 = [0u8; 8];
2669 let lv1 = LayoutVerified::<_, u64>::new(&buf1[..]).unwrap();
2670 let buf2 = [1u8; 8];
2671 let lv2 = LayoutVerified::<_, u64>::new(&buf2[..]).unwrap();
2672 assert_ne!(lv1, lv2);
2673 }
2674
2675 #[test]
test_ord()2676 fn test_ord() {
2677 let buf1 = [0u8; 8];
2678 let lv1 = LayoutVerified::<_, u64>::new(&buf1[..]).unwrap();
2679 let buf2 = [1u8; 8];
2680 let lv2 = LayoutVerified::<_, u64>::new(&buf2[..]).unwrap();
2681 assert!(lv1 < lv2);
2682 }
2683
2684 #[test]
test_new_zeroed()2685 fn test_new_zeroed() {
2686 assert_eq!(u64::new_zeroed(), 0);
2687 assert_eq!(<()>::new_zeroed(), ());
2688 }
2689
2690 #[test]
test_new_box_zeroed()2691 fn test_new_box_zeroed() {
2692 assert_eq!(*u64::new_box_zeroed(), 0);
2693 }
2694
2695 #[test]
test_new_box_zeroed_array()2696 fn test_new_box_zeroed_array() {
2697 drop(<[u32; 0x1000]>::new_box_zeroed());
2698 }
2699
2700 #[test]
test_new_box_zeroed_zst()2701 fn test_new_box_zeroed_zst() {
2702 assert_eq!(*<()>::new_box_zeroed(), ());
2703 }
2704
2705 #[test]
test_new_box_slice_zeroed()2706 fn test_new_box_slice_zeroed() {
2707 let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
2708 assert_eq!(s.len(), 3);
2709 assert_eq!(&*s, &[0, 0, 0]);
2710 s[1] = 3;
2711 assert_eq!(&*s, &[0, 3, 0]);
2712 }
2713
2714 #[test]
test_new_box_slice_zeroed_empty()2715 fn test_new_box_slice_zeroed_empty() {
2716 let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
2717 assert_eq!(s.len(), 0);
2718 }
2719
2720 #[test]
test_new_box_slice_zeroed_zst()2721 fn test_new_box_slice_zeroed_zst() {
2722 let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
2723 assert_eq!(s.len(), 3);
2724 assert!(s.get(10).is_none());
2725 assert_eq!(s[1], ());
2726 s[2] = ();
2727 }
2728
2729 #[test]
test_new_box_slice_zeroed_zst_empty()2730 fn test_new_box_slice_zeroed_zst_empty() {
2731 let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
2732 assert_eq!(s.len(), 0);
2733 }
2734
2735 #[test]
test_extend_vec_zeroed()2736 fn test_extend_vec_zeroed() {
2737 // test extending when there is an existing allocation
2738 let mut v: Vec<u64> = Vec::with_capacity(3);
2739 v.push(100);
2740 v.push(200);
2741 v.push(300);
2742 extend_vec_zeroed(&mut v, 3);
2743 assert_eq!(v.len(), 6);
2744 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
2745 drop(v);
2746
2747 // test extending when there is no existing allocation
2748 let mut v: Vec<u64> = Vec::new();
2749 extend_vec_zeroed(&mut v, 3);
2750 assert_eq!(v.len(), 3);
2751 assert_eq!(&*v, &[0, 0, 0]);
2752 drop(v);
2753 }
2754
2755 #[test]
test_extend_vec_zeroed_zst()2756 fn test_extend_vec_zeroed_zst() {
2757 // test extending when there is an existing (fake) allocation
2758 let mut v: Vec<()> = Vec::with_capacity(3);
2759 v.push(());
2760 v.push(());
2761 v.push(());
2762 extend_vec_zeroed(&mut v, 3);
2763 assert_eq!(v.len(), 6);
2764 assert_eq!(&*v, &[(), (), (), (), (), ()]);
2765 drop(v);
2766
2767 // test extending when there is no existing (fake) allocation
2768 let mut v: Vec<()> = Vec::new();
2769 extend_vec_zeroed(&mut v, 3);
2770 assert_eq!(&*v, &[(), (), ()]);
2771 drop(v);
2772 }
2773
2774 #[test]
test_insert_vec_zeroed()2775 fn test_insert_vec_zeroed() {
2776 // insert at start (no existing allocation)
2777 let mut v: Vec<u64> = Vec::new();
2778 insert_vec_zeroed(&mut v, 0, 2);
2779 assert_eq!(v.len(), 2);
2780 assert_eq!(&*v, &[0, 0]);
2781 drop(v);
2782
2783 // insert at start
2784 let mut v: Vec<u64> = Vec::with_capacity(3);
2785 v.push(100);
2786 v.push(200);
2787 v.push(300);
2788 insert_vec_zeroed(&mut v, 0, 2);
2789 assert_eq!(v.len(), 5);
2790 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
2791 drop(v);
2792
2793 // insert at middle
2794 let mut v: Vec<u64> = Vec::with_capacity(3);
2795 v.push(100);
2796 v.push(200);
2797 v.push(300);
2798 insert_vec_zeroed(&mut v, 1, 1);
2799 assert_eq!(v.len(), 4);
2800 assert_eq!(&*v, &[100, 0, 200, 300]);
2801 drop(v);
2802
2803 // insert at end
2804 let mut v: Vec<u64> = Vec::with_capacity(3);
2805 v.push(100);
2806 v.push(200);
2807 v.push(300);
2808 insert_vec_zeroed(&mut v, 3, 1);
2809 assert_eq!(v.len(), 4);
2810 assert_eq!(&*v, &[100, 200, 300, 0]);
2811 drop(v);
2812 }
2813
2814 #[test]
test_insert_vec_zeroed_zst()2815 fn test_insert_vec_zeroed_zst() {
2816 // insert at start (no existing fake allocation)
2817 let mut v: Vec<()> = Vec::new();
2818 insert_vec_zeroed(&mut v, 0, 2);
2819 assert_eq!(v.len(), 2);
2820 assert_eq!(&*v, &[(), ()]);
2821 drop(v);
2822
2823 // insert at start
2824 let mut v: Vec<()> = Vec::with_capacity(3);
2825 v.push(());
2826 v.push(());
2827 v.push(());
2828 insert_vec_zeroed(&mut v, 0, 2);
2829 assert_eq!(v.len(), 5);
2830 assert_eq!(&*v, &[(), (), (), (), ()]);
2831 drop(v);
2832
2833 // insert at middle
2834 let mut v: Vec<()> = Vec::with_capacity(3);
2835 v.push(());
2836 v.push(());
2837 v.push(());
2838 insert_vec_zeroed(&mut v, 1, 1);
2839 assert_eq!(v.len(), 4);
2840 assert_eq!(&*v, &[(), (), (), ()]);
2841 drop(v);
2842
2843 // insert at end
2844 let mut v: Vec<()> = Vec::with_capacity(3);
2845 v.push(());
2846 v.push(());
2847 v.push(());
2848 insert_vec_zeroed(&mut v, 3, 1);
2849 assert_eq!(v.len(), 4);
2850 assert_eq!(&*v, &[(), (), (), ()]);
2851 drop(v);
2852 }
2853 }
2854