1 // Copyright 2018 The Fuchsia Authors
2 //
3 // Licensed under the 2-Clause BSD License <LICENSE-BSD or
4 // https://opensource.org/license/bsd-2-clause>, Apache License, Version 2.0
5 // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
6 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
7 // This file may not be copied, modified, or distributed except according to
8 // those terms.
9
10 // After updating the following doc comment, make sure to run the following
11 // command to update `README.md` based on its contents:
12 //
13 // ./generate-readme.sh > README.md
14
15 //! *<span style="font-size: 100%; color:grey;">Want to help improve zerocopy?
16 //! Fill out our [user survey][user-survey]!</span>*
17 //!
18 //! ***<span style="font-size: 140%">Fast, safe, <span
19 //! style="color:red;">compile error</span>. Pick two.</span>***
20 //!
21 //! Zerocopy makes zero-cost memory manipulation effortless. We write `unsafe`
22 //! so you don't have to.
23 //!
24 //! # Overview
25 //!
26 //! Zerocopy provides four core marker traits, each of which can be derived
27 //! (e.g., `#[derive(FromZeroes)]`):
28 //! - [`FromZeroes`] indicates that a sequence of zero bytes represents a valid
29 //! instance of a type
30 //! - [`FromBytes`] indicates that a type may safely be converted from an
31 //! arbitrary byte sequence
32 //! - [`AsBytes`] indicates that a type may safely be converted *to* a byte
33 //! sequence
34 //! - [`Unaligned`] indicates that a type's alignment requirement is 1
35 //!
36 //! Types which implement a subset of these traits can then be converted to/from
37 //! byte sequences with little to no runtime overhead.
38 //!
39 //! Zerocopy also provides byte-order aware integer types that support these
40 //! conversions; see the [`byteorder`] module. These types are especially useful
41 //! for network parsing.
42 //!
43 //! [user-survey]: https://docs.google.com/forms/d/e/1FAIpQLSdzBNTN9tzwsmtyZxRFNL02K36IWCdHWW2ZBckyQS2xiO3i8Q/viewform?usp=published_options
44 //!
45 //! # Cargo Features
46 //!
47 //! - **`alloc`**
48 //! By default, `zerocopy` is `no_std`. When the `alloc` feature is enabled,
49 //! the `alloc` crate is added as a dependency, and some allocation-related
50 //! functionality is added.
51 //!
52 //! - **`byteorder`** (enabled by default)
53 //! Adds the [`byteorder`] module and a dependency on the `byteorder` crate.
54 //! The `byteorder` module provides byte order-aware equivalents of the
55 //! multi-byte primitive numerical types. Unlike their primitive equivalents,
56 //! the types in this module have no alignment requirement and support byte
57 //! order conversions. This can be useful in handling file formats, network
58 //! packet layouts, etc which don't provide alignment guarantees and which may
59 //! use a byte order different from that of the execution platform.
60 //!
61 //! - **`derive`**
62 //! Provides derives for the core marker traits via the `zerocopy-derive`
63 //! crate. These derives are re-exported from `zerocopy`, so it is not
64 //! necessary to depend on `zerocopy-derive` directly.
65 //!
66 //! However, you may experience better compile times if you instead directly
67 //! depend on both `zerocopy` and `zerocopy-derive` in your `Cargo.toml`,
68 //! since doing so will allow Rust to compile these crates in parallel. To do
69 //! so, do *not* enable the `derive` feature, and list both dependencies in
70 //! your `Cargo.toml` with the same leading non-zero version number; e.g:
71 //!
72 //! ```toml
73 //! [dependencies]
74 //! zerocopy = "0.X"
75 //! zerocopy-derive = "0.X"
76 //! ```
77 //!
78 //! - **`simd`**
79 //! When the `simd` feature is enabled, `FromZeroes`, `FromBytes`, and
80 //! `AsBytes` impls are emitted for all stable SIMD types which exist on the
81 //! target platform. Note that the layout of SIMD types is not yet stabilized,
82 //! so these impls may be removed in the future if layout changes make them
83 //! invalid. For more information, see the Unsafe Code Guidelines Reference
84 //! page on the [layout of packed SIMD vectors][simd-layout].
85 //!
86 //! - **`simd-nightly`**
87 //! Enables the `simd` feature and adds support for SIMD types which are only
88 //! available on nightly. Since these types are unstable, support for any type
89 //! may be removed at any point in the future.
90 //!
91 //! [simd-layout]: https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
92 //!
93 //! # Security Ethos
94 //!
95 //! Zerocopy is expressly designed for use in security-critical contexts. We
96 //! strive to ensure that that zerocopy code is sound under Rust's current
97 //! memory model, and *any future memory model*. We ensure this by:
98 //! - **...not 'guessing' about Rust's semantics.**
99 //! We annotate `unsafe` code with a precise rationale for its soundness that
100 //! cites a relevant section of Rust's official documentation. When Rust's
101 //! documented semantics are unclear, we work with the Rust Operational
102 //! Semantics Team to clarify Rust's documentation.
103 //! - **...rigorously testing our implementation.**
104 //! We run tests using [Miri], ensuring that zerocopy is sound across a wide
105 //! array of supported target platforms of varying endianness and pointer
106 //! width, and across both current and experimental memory models of Rust.
107 //! - **...formally proving the correctness of our implementation.**
108 //! We apply formal verification tools like [Kani][kani] to prove zerocopy's
109 //! correctness.
110 //!
111 //! For more information, see our full [soundness policy].
112 //!
113 //! [Miri]: https://github.com/rust-lang/miri
114 //! [Kani]: https://github.com/model-checking/kani
115 //! [soundness policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#soundness
116 //!
117 //! # Relationship to Project Safe Transmute
118 //!
119 //! [Project Safe Transmute] is an official initiative of the Rust Project to
120 //! develop language-level support for safer transmutation. The Project consults
121 //! with crates like zerocopy to identify aspects of safer transmutation that
122 //! would benefit from compiler support, and has developed an [experimental,
123 //! compiler-supported analysis][mcp-transmutability] which determines whether,
124 //! for a given type, any value of that type may be soundly transmuted into
125 //! another type. Once this functionality is sufficiently mature, zerocopy
126 //! intends to replace its internal transmutability analysis (implemented by our
127 //! custom derives) with the compiler-supported one. This change will likely be
128 //! an implementation detail that is invisible to zerocopy's users.
129 //!
130 //! Project Safe Transmute will not replace the need for most of zerocopy's
131 //! higher-level abstractions. The experimental compiler analysis is a tool for
132 //! checking the soundness of `unsafe` code, not a tool to avoid writing
133 //! `unsafe` code altogether. For the foreseeable future, crates like zerocopy
134 //! will still be required in order to provide higher-level abstractions on top
135 //! of the building block provided by Project Safe Transmute.
136 //!
137 //! [Project Safe Transmute]: https://rust-lang.github.io/rfcs/2835-project-safe-transmute.html
138 //! [mcp-transmutability]: https://github.com/rust-lang/compiler-team/issues/411
139 //!
140 //! # MSRV
141 //!
142 //! See our [MSRV policy].
143 //!
144 //! [MSRV policy]: https://github.com/google/zerocopy/blob/main/POLICIES.md#msrv
145 //!
146 //! # Changelog
147 //!
148 //! Zerocopy uses [GitHub Releases].
149 //!
150 //! [GitHub Releases]: https://github.com/google/zerocopy/releases
151
152 // Sometimes we want to use lints which were added after our MSRV.
153 // `unknown_lints` is `warn` by default and we deny warnings in CI, so without
154 // this attribute, any unknown lint would cause a CI failure when testing with
155 // our MSRV.
156 //
157 // TODO(#1201): Remove `unexpected_cfgs`
158 #![allow(unknown_lints, non_local_definitions, unexpected_cfgs)]
159 #![deny(renamed_and_removed_lints)]
160 #![deny(
161 anonymous_parameters,
162 deprecated_in_future,
163 late_bound_lifetime_arguments,
164 missing_copy_implementations,
165 missing_debug_implementations,
166 missing_docs,
167 path_statements,
168 patterns_in_fns_without_body,
169 rust_2018_idioms,
170 trivial_numeric_casts,
171 unreachable_pub,
172 unsafe_op_in_unsafe_fn,
173 unused_extern_crates,
174 unused_qualifications,
175 variant_size_differences
176 )]
177 #![cfg_attr(
178 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
179 deny(fuzzy_provenance_casts, lossy_provenance_casts)
180 )]
181 #![deny(
182 clippy::all,
183 clippy::alloc_instead_of_core,
184 clippy::arithmetic_side_effects,
185 clippy::as_underscore,
186 clippy::assertions_on_result_states,
187 clippy::as_conversions,
188 clippy::correctness,
189 clippy::dbg_macro,
190 clippy::decimal_literal_representation,
191 clippy::get_unwrap,
192 clippy::indexing_slicing,
193 clippy::missing_inline_in_public_items,
194 clippy::missing_safety_doc,
195 clippy::obfuscated_if_else,
196 clippy::perf,
197 clippy::print_stdout,
198 clippy::std_instead_of_core,
199 clippy::style,
200 clippy::suspicious,
201 clippy::todo,
202 clippy::undocumented_unsafe_blocks,
203 clippy::unimplemented,
204 clippy::unnested_or_patterns,
205 clippy::unwrap_used,
206 clippy::use_debug
207 )]
208 #![deny(
209 rustdoc::bare_urls,
210 rustdoc::broken_intra_doc_links,
211 rustdoc::invalid_codeblock_attributes,
212 rustdoc::invalid_html_tags,
213 rustdoc::invalid_rust_codeblocks,
214 rustdoc::missing_crate_level_docs,
215 rustdoc::private_intra_doc_links
216 )]
217 // In test code, it makes sense to weight more heavily towards concise, readable
218 // code over correct or debuggable code.
219 #![cfg_attr(any(test, kani), allow(
220 // In tests, you get line numbers and have access to source code, so panic
221 // messages are less important. You also often unwrap a lot, which would
222 // make expect'ing instead very verbose.
223 clippy::unwrap_used,
224 // In tests, there's no harm to "panic risks" - the worst that can happen is
225 // that your test will fail, and you'll fix it. By contrast, panic risks in
226 // production code introduce the possibly of code panicking unexpectedly "in
227 // the field".
228 clippy::arithmetic_side_effects,
229 clippy::indexing_slicing,
230 ))]
231 #![cfg_attr(not(test), no_std)]
232 #![cfg_attr(
233 all(feature = "simd-nightly", any(target_arch = "x86", target_arch = "x86_64")),
234 feature(stdarch_x86_avx512)
235 )]
236 #![cfg_attr(
237 all(feature = "simd-nightly", target_arch = "arm"),
238 feature(stdarch_arm_dsp, stdarch_arm_neon_intrinsics)
239 )]
240 #![cfg_attr(
241 all(feature = "simd-nightly", any(target_arch = "powerpc", target_arch = "powerpc64")),
242 feature(stdarch_powerpc)
243 )]
244 #![cfg_attr(doc_cfg, feature(doc_cfg))]
245 #![cfg_attr(
246 __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS,
247 feature(layout_for_ptr, strict_provenance)
248 )]
249
250 // This is a hack to allow zerocopy-derive derives to work in this crate. They
251 // assume that zerocopy is linked as an extern crate, so they access items from
252 // it as `zerocopy::Xxx`. This makes that still work.
253 #[cfg(any(feature = "derive", test))]
254 extern crate self as zerocopy;
255
256 #[macro_use]
257 mod macros;
258
259 #[cfg(feature = "byteorder")]
260 #[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
261 pub mod byteorder;
262 #[doc(hidden)]
263 pub mod macro_util;
264 mod post_monomorphization_compile_fail_tests;
265 mod util;
266 // TODO(#252): If we make this pub, come up with a better name.
267 mod wrappers;
268
269 #[cfg(feature = "byteorder")]
270 #[cfg_attr(doc_cfg, doc(cfg(feature = "byteorder")))]
271 pub use crate::byteorder::*;
272 pub use crate::wrappers::*;
273
274 #[cfg(any(feature = "derive", test))]
275 #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
276 pub use zerocopy_derive::Unaligned;
277
278 // `pub use` separately here so that we can mark it `#[doc(hidden)]`.
279 //
280 // TODO(#29): Remove this or add a doc comment.
281 #[cfg(any(feature = "derive", test))]
282 #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
283 #[doc(hidden)]
284 pub use zerocopy_derive::KnownLayout;
285
286 use core::{
287 cell::{self, RefMut},
288 cmp::Ordering,
289 fmt::{self, Debug, Display, Formatter},
290 hash::Hasher,
291 marker::PhantomData,
292 mem::{self, ManuallyDrop, MaybeUninit},
293 num::{
294 NonZeroI128, NonZeroI16, NonZeroI32, NonZeroI64, NonZeroI8, NonZeroIsize, NonZeroU128,
295 NonZeroU16, NonZeroU32, NonZeroU64, NonZeroU8, NonZeroUsize, Wrapping,
296 },
297 ops::{Deref, DerefMut},
298 ptr::{self, NonNull},
299 slice,
300 };
301
302 #[cfg(feature = "alloc")]
303 extern crate alloc;
304 #[cfg(feature = "alloc")]
305 use alloc::{boxed::Box, vec::Vec};
306
307 #[cfg(any(feature = "alloc", kani))]
308 use core::alloc::Layout;
309
310 // Used by `TryFromBytes::is_bit_valid`.
311 #[doc(hidden)]
312 pub use crate::util::ptr::Ptr;
313
314 // For each polyfill, as soon as the corresponding feature is stable, the
315 // polyfill import will be unused because method/function resolution will prefer
316 // the inherent method/function over a trait method/function. Thus, we suppress
317 // the `unused_imports` warning.
318 //
319 // See the documentation on `util::polyfills` for more information.
320 #[allow(unused_imports)]
321 use crate::util::polyfills::NonNullExt as _;
322
323 #[rustversion::nightly]
324 #[cfg(all(test, not(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)))]
325 const _: () = {
326 #[deprecated = "some tests may be skipped due to missing RUSTFLAGS=\"--cfg __INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS\""]
327 const _WARNING: () = ();
328 #[warn(deprecated)]
329 _WARNING
330 };
331
332 /// The target pointer width, counted in bits.
333 const POINTER_WIDTH_BITS: usize = mem::size_of::<usize>() * 8;
334
335 /// The layout of a type which might be dynamically-sized.
336 ///
337 /// `DstLayout` describes the layout of sized types, slice types, and "slice
338 /// DSTs" - ie, those that are known by the type system to have a trailing slice
339 /// (as distinguished from `dyn Trait` types - such types *might* have a
340 /// trailing slice type, but the type system isn't aware of it).
341 ///
342 /// # Safety
343 ///
344 /// Unlike [`core::alloc::Layout`], `DstLayout` is only used to describe full
345 /// Rust types - ie, those that satisfy the layout requirements outlined by
346 /// [the reference]. Callers may assume that an instance of `DstLayout`
347 /// satisfies any conditions imposed on Rust types by the reference.
348 ///
349 /// If `layout: DstLayout` describes a type, `T`, then it is guaranteed that:
350 /// - `layout.align` is equal to `T`'s alignment
351 /// - If `layout.size_info` is `SizeInfo::Sized { size }`, then `T: Sized` and
352 /// `size_of::<T>() == size`
353 /// - If `layout.size_info` is `SizeInfo::SliceDst(slice_layout)`, then
354 /// - `T` is a slice DST
355 /// - The `size` of an instance of `T` with `elems` trailing slice elements is
356 /// equal to `slice_layout.offset + slice_layout.elem_size * elems` rounded up
357 /// to the nearest multiple of `layout.align`. Any bytes in the range
358 /// `[slice_layout.offset + slice_layout.elem_size * elems, size)` are padding
359 /// and must not be assumed to be initialized.
360 ///
361 /// [the reference]: https://doc.rust-lang.org/reference/type-layout.html
362 #[doc(hidden)]
363 #[allow(missing_debug_implementations, missing_copy_implementations)]
364 #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
365 pub struct DstLayout {
366 align: NonZeroUsize,
367 size_info: SizeInfo,
368 }
369
370 #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
371 enum SizeInfo<E = usize> {
372 Sized { _size: usize },
373 SliceDst(TrailingSliceLayout<E>),
374 }
375
376 #[cfg_attr(any(kani, test), derive(Copy, Clone, Debug, PartialEq, Eq))]
377 struct TrailingSliceLayout<E = usize> {
378 // The offset of the first byte of the trailing slice field. Note that this
379 // is NOT the same as the minimum size of the type. For example, consider
380 // the following type:
381 //
382 // struct Foo {
383 // a: u16,
384 // b: u8,
385 // c: [u8],
386 // }
387 //
388 // In `Foo`, `c` is at byte offset 3. When `c.len() == 0`, `c` is followed
389 // by a padding byte.
390 _offset: usize,
391 // The size of the element type of the trailing slice field.
392 _elem_size: E,
393 }
394
395 impl SizeInfo {
396 /// Attempts to create a `SizeInfo` from `Self` in which `elem_size` is a
397 /// `NonZeroUsize`. If `elem_size` is 0, returns `None`.
398 #[allow(unused)]
try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>>399 const fn try_to_nonzero_elem_size(&self) -> Option<SizeInfo<NonZeroUsize>> {
400 Some(match *self {
401 SizeInfo::Sized { _size } => SizeInfo::Sized { _size },
402 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
403 if let Some(_elem_size) = NonZeroUsize::new(_elem_size) {
404 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
405 } else {
406 return None;
407 }
408 }
409 })
410 }
411 }
412
413 #[doc(hidden)]
414 #[derive(Copy, Clone)]
415 #[cfg_attr(test, derive(Debug))]
416 #[allow(missing_debug_implementations)]
417 pub enum _CastType {
418 _Prefix,
419 _Suffix,
420 }
421
422 impl DstLayout {
423 /// The minimum possible alignment of a type.
424 const MIN_ALIGN: NonZeroUsize = match NonZeroUsize::new(1) {
425 Some(min_align) => min_align,
426 None => unreachable!(),
427 };
428
429 /// The maximum theoretic possible alignment of a type.
430 ///
431 /// For compatibility with future Rust versions, this is defined as the
432 /// maximum power-of-two that fits into a `usize`. See also
433 /// [`DstLayout::CURRENT_MAX_ALIGN`].
434 const THEORETICAL_MAX_ALIGN: NonZeroUsize =
435 match NonZeroUsize::new(1 << (POINTER_WIDTH_BITS - 1)) {
436 Some(max_align) => max_align,
437 None => unreachable!(),
438 };
439
440 /// The current, documented max alignment of a type \[1\].
441 ///
442 /// \[1\] Per <https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers>:
443 ///
444 /// The alignment value must be a power of two from 1 up to
445 /// 2<sup>29</sup>.
446 #[cfg(not(kani))]
447 const CURRENT_MAX_ALIGN: NonZeroUsize = match NonZeroUsize::new(1 << 28) {
448 Some(max_align) => max_align,
449 None => unreachable!(),
450 };
451
452 /// Constructs a `DstLayout` for a zero-sized type with `repr_align`
453 /// alignment (or 1). If `repr_align` is provided, then it must be a power
454 /// of two.
455 ///
456 /// # Panics
457 ///
458 /// This function panics if the supplied `repr_align` is not a power of two.
459 ///
460 /// # Safety
461 ///
462 /// Unsafe code may assume that the contract of this function is satisfied.
463 #[doc(hidden)]
464 #[inline]
new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout465 pub const fn new_zst(repr_align: Option<NonZeroUsize>) -> DstLayout {
466 let align = match repr_align {
467 Some(align) => align,
468 None => Self::MIN_ALIGN,
469 };
470
471 assert!(align.is_power_of_two());
472
473 DstLayout { align, size_info: SizeInfo::Sized { _size: 0 } }
474 }
475
476 /// Constructs a `DstLayout` which describes `T`.
477 ///
478 /// # Safety
479 ///
480 /// Unsafe code may assume that `DstLayout` is the correct layout for `T`.
481 #[doc(hidden)]
482 #[inline]
for_type<T>() -> DstLayout483 pub const fn for_type<T>() -> DstLayout {
484 // SAFETY: `align` is correct by construction. `T: Sized`, and so it is
485 // sound to initialize `size_info` to `SizeInfo::Sized { size }`; the
486 // `size` field is also correct by construction.
487 DstLayout {
488 align: match NonZeroUsize::new(mem::align_of::<T>()) {
489 Some(align) => align,
490 None => unreachable!(),
491 },
492 size_info: SizeInfo::Sized { _size: mem::size_of::<T>() },
493 }
494 }
495
496 /// Constructs a `DstLayout` which describes `[T]`.
497 ///
498 /// # Safety
499 ///
500 /// Unsafe code may assume that `DstLayout` is the correct layout for `[T]`.
for_slice<T>() -> DstLayout501 const fn for_slice<T>() -> DstLayout {
502 // SAFETY: The alignment of a slice is equal to the alignment of its
503 // element type, and so `align` is initialized correctly.
504 //
505 // Since this is just a slice type, there is no offset between the
506 // beginning of the type and the beginning of the slice, so it is
507 // correct to set `offset: 0`. The `elem_size` is correct by
508 // construction. Since `[T]` is a (degenerate case of a) slice DST, it
509 // is correct to initialize `size_info` to `SizeInfo::SliceDst`.
510 DstLayout {
511 align: match NonZeroUsize::new(mem::align_of::<T>()) {
512 Some(align) => align,
513 None => unreachable!(),
514 },
515 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
516 _offset: 0,
517 _elem_size: mem::size_of::<T>(),
518 }),
519 }
520 }
521
522 /// Like `Layout::extend`, this creates a layout that describes a record
523 /// whose layout consists of `self` followed by `next` that includes the
524 /// necessary inter-field padding, but not any trailing padding.
525 ///
526 /// In order to match the layout of a `#[repr(C)]` struct, this method
527 /// should be invoked for each field in declaration order. To add trailing
528 /// padding, call `DstLayout::pad_to_align` after extending the layout for
529 /// all fields. If `self` corresponds to a type marked with
530 /// `repr(packed(N))`, then `repr_packed` should be set to `Some(N)`,
531 /// otherwise `None`.
532 ///
533 /// This method cannot be used to match the layout of a record with the
534 /// default representation, as that representation is mostly unspecified.
535 ///
536 /// # Safety
537 ///
538 /// If a (potentially hypothetical) valid `repr(C)` Rust type begins with
539 /// fields whose layout are `self`, and those fields are immediately
540 /// followed by a field whose layout is `field`, then unsafe code may rely
541 /// on `self.extend(field, repr_packed)` producing a layout that correctly
542 /// encompasses those two components.
543 ///
544 /// We make no guarantees to the behavior of this method if these fragments
545 /// cannot appear in a valid Rust type (e.g., the concatenation of the
546 /// layouts would lead to a size larger than `isize::MAX`).
547 #[doc(hidden)]
548 #[inline]
extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self549 pub const fn extend(self, field: DstLayout, repr_packed: Option<NonZeroUsize>) -> Self {
550 use util::{core_layout::padding_needed_for, max, min};
551
552 // If `repr_packed` is `None`, there are no alignment constraints, and
553 // the value can be defaulted to `THEORETICAL_MAX_ALIGN`.
554 let max_align = match repr_packed {
555 Some(max_align) => max_align,
556 None => Self::THEORETICAL_MAX_ALIGN,
557 };
558
559 assert!(max_align.is_power_of_two());
560
561 // We use Kani to prove that this method is robust to future increases
562 // in Rust's maximum allowed alignment. However, if such a change ever
563 // actually occurs, we'd like to be notified via assertion failures.
564 #[cfg(not(kani))]
565 {
566 debug_assert!(self.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
567 debug_assert!(field.align.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
568 if let Some(repr_packed) = repr_packed {
569 debug_assert!(repr_packed.get() <= DstLayout::CURRENT_MAX_ALIGN.get());
570 }
571 }
572
573 // The field's alignment is clamped by `repr_packed` (i.e., the
574 // `repr(packed(N))` attribute, if any) [1].
575 //
576 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
577 //
578 // The alignments of each field, for the purpose of positioning
579 // fields, is the smaller of the specified alignment and the alignment
580 // of the field's type.
581 let field_align = min(field.align, max_align);
582
583 // The struct's alignment is the maximum of its previous alignment and
584 // `field_align`.
585 let align = max(self.align, field_align);
586
587 let size_info = match self.size_info {
588 // If the layout is already a DST, we panic; DSTs cannot be extended
589 // with additional fields.
590 SizeInfo::SliceDst(..) => panic!("Cannot extend a DST with additional fields."),
591
592 SizeInfo::Sized { _size: preceding_size } => {
593 // Compute the minimum amount of inter-field padding needed to
594 // satisfy the field's alignment, and offset of the trailing
595 // field. [1]
596 //
597 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
598 //
599 // Inter-field padding is guaranteed to be the minimum
600 // required in order to satisfy each field's (possibly
601 // altered) alignment.
602 let padding = padding_needed_for(preceding_size, field_align);
603
604 // This will not panic (and is proven to not panic, with Kani)
605 // if the layout components can correspond to a leading layout
606 // fragment of a valid Rust type, but may panic otherwise (e.g.,
607 // combining or aligning the components would create a size
608 // exceeding `isize::MAX`).
609 let offset = match preceding_size.checked_add(padding) {
610 Some(offset) => offset,
611 None => panic!("Adding padding to `self`'s size overflows `usize`."),
612 };
613
614 match field.size_info {
615 SizeInfo::Sized { _size: field_size } => {
616 // If the trailing field is sized, the resulting layout
617 // will be sized. Its size will be the sum of the
618 // preceeding layout, the size of the new field, and the
619 // size of inter-field padding between the two.
620 //
621 // This will not panic (and is proven with Kani to not
622 // panic) if the layout components can correspond to a
623 // leading layout fragment of a valid Rust type, but may
624 // panic otherwise (e.g., combining or aligning the
625 // components would create a size exceeding
626 // `usize::MAX`).
627 let size = match offset.checked_add(field_size) {
628 Some(size) => size,
629 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
630 };
631 SizeInfo::Sized { _size: size }
632 }
633 SizeInfo::SliceDst(TrailingSliceLayout {
634 _offset: trailing_offset,
635 _elem_size,
636 }) => {
637 // If the trailing field is dynamically sized, so too
638 // will the resulting layout. The offset of the trailing
639 // slice component is the sum of the offset of the
640 // trailing field and the trailing slice offset within
641 // that field.
642 //
643 // This will not panic (and is proven with Kani to not
644 // panic) if the layout components can correspond to a
645 // leading layout fragment of a valid Rust type, but may
646 // panic otherwise (e.g., combining or aligning the
647 // components would create a size exceeding
648 // `usize::MAX`).
649 let offset = match offset.checked_add(trailing_offset) {
650 Some(offset) => offset,
651 None => panic!("`field` cannot be appended without the total size overflowing `usize`"),
652 };
653 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size })
654 }
655 }
656 }
657 };
658
659 DstLayout { align, size_info }
660 }
661
662 /// Like `Layout::pad_to_align`, this routine rounds the size of this layout
663 /// up to the nearest multiple of this type's alignment or `repr_packed`
664 /// (whichever is less). This method leaves DST layouts unchanged, since the
665 /// trailing padding of DSTs is computed at runtime.
666 ///
667 /// In order to match the layout of a `#[repr(C)]` struct, this method
668 /// should be invoked after the invocations of [`DstLayout::extend`]. If
669 /// `self` corresponds to a type marked with `repr(packed(N))`, then
670 /// `repr_packed` should be set to `Some(N)`, otherwise `None`.
671 ///
672 /// This method cannot be used to match the layout of a record with the
673 /// default representation, as that representation is mostly unspecified.
674 ///
675 /// # Safety
676 ///
677 /// If a (potentially hypothetical) valid `repr(C)` type begins with fields
678 /// whose layout are `self` followed only by zero or more bytes of trailing
679 /// padding (not included in `self`), then unsafe code may rely on
680 /// `self.pad_to_align(repr_packed)` producing a layout that correctly
681 /// encapsulates the layout of that type.
682 ///
683 /// We make no guarantees to the behavior of this method if `self` cannot
684 /// appear in a valid Rust type (e.g., because the addition of trailing
685 /// padding would lead to a size larger than `isize::MAX`).
686 #[doc(hidden)]
687 #[inline]
pad_to_align(self) -> Self688 pub const fn pad_to_align(self) -> Self {
689 use util::core_layout::padding_needed_for;
690
691 let size_info = match self.size_info {
692 // For sized layouts, we add the minimum amount of trailing padding
693 // needed to satisfy alignment.
694 SizeInfo::Sized { _size: unpadded_size } => {
695 let padding = padding_needed_for(unpadded_size, self.align);
696 let size = match unpadded_size.checked_add(padding) {
697 Some(size) => size,
698 None => panic!("Adding padding caused size to overflow `usize`."),
699 };
700 SizeInfo::Sized { _size: size }
701 }
702 // For DST layouts, trailing padding depends on the length of the
703 // trailing DST and is computed at runtime. This does not alter the
704 // offset or element size of the layout, so we leave `size_info`
705 // unchanged.
706 size_info @ SizeInfo::SliceDst(_) => size_info,
707 };
708
709 DstLayout { align: self.align, size_info }
710 }
711
712 /// Validates that a cast is sound from a layout perspective.
713 ///
714 /// Validates that the size and alignment requirements of a type with the
715 /// layout described in `self` would not be violated by performing a
716 /// `cast_type` cast from a pointer with address `addr` which refers to a
717 /// memory region of size `bytes_len`.
718 ///
719 /// If the cast is valid, `validate_cast_and_convert_metadata` returns
720 /// `(elems, split_at)`. If `self` describes a dynamically-sized type, then
721 /// `elems` is the maximum number of trailing slice elements for which a
722 /// cast would be valid (for sized types, `elem` is meaningless and should
723 /// be ignored). `split_at` is the index at which to split the memory region
724 /// in order for the prefix (suffix) to contain the result of the cast, and
725 /// in order for the remaining suffix (prefix) to contain the leftover
726 /// bytes.
727 ///
728 /// There are three conditions under which a cast can fail:
729 /// - The smallest possible value for the type is larger than the provided
730 /// memory region
731 /// - A prefix cast is requested, and `addr` does not satisfy `self`'s
732 /// alignment requirement
733 /// - A suffix cast is requested, and `addr + bytes_len` does not satisfy
734 /// `self`'s alignment requirement (as a consequence, since all instances
735 /// of the type are a multiple of its alignment, no size for the type will
736 /// result in a starting address which is properly aligned)
737 ///
738 /// # Safety
739 ///
740 /// The caller may assume that this implementation is correct, and may rely
741 /// on that assumption for the soundness of their code. In particular, the
742 /// caller may assume that, if `validate_cast_and_convert_metadata` returns
743 /// `Some((elems, split_at))`, then:
744 /// - A pointer to the type (for dynamically sized types, this includes
745 /// `elems` as its pointer metadata) describes an object of size `size <=
746 /// bytes_len`
747 /// - If this is a prefix cast:
748 /// - `addr` satisfies `self`'s alignment
749 /// - `size == split_at`
750 /// - If this is a suffix cast:
751 /// - `split_at == bytes_len - size`
752 /// - `addr + split_at` satisfies `self`'s alignment
753 ///
754 /// Note that this method does *not* ensure that a pointer constructed from
755 /// its return values will be a valid pointer. In particular, this method
756 /// does not reason about `isize` overflow, which is a requirement of many
757 /// Rust pointer APIs, and may at some point be determined to be a validity
758 /// invariant of pointer types themselves. This should never be a problem so
759 /// long as the arguments to this method are derived from a known-valid
760 /// pointer (e.g., one derived from a safe Rust reference), but it is
761 /// nonetheless the caller's responsibility to justify that pointer
762 /// arithmetic will not overflow based on a safety argument *other than* the
763 /// mere fact that this method returned successfully.
764 ///
765 /// # Panics
766 ///
767 /// `validate_cast_and_convert_metadata` will panic if `self` describes a
768 /// DST whose trailing slice element is zero-sized.
769 ///
770 /// If `addr + bytes_len` overflows `usize`,
771 /// `validate_cast_and_convert_metadata` may panic, or it may return
772 /// incorrect results. No guarantees are made about when
773 /// `validate_cast_and_convert_metadata` will panic. The caller should not
774 /// rely on `validate_cast_and_convert_metadata` panicking in any particular
775 /// condition, even if `debug_assertions` are enabled.
776 #[allow(unused)]
validate_cast_and_convert_metadata( &self, addr: usize, bytes_len: usize, cast_type: _CastType, ) -> Option<(usize, usize)>777 const fn validate_cast_and_convert_metadata(
778 &self,
779 addr: usize,
780 bytes_len: usize,
781 cast_type: _CastType,
782 ) -> Option<(usize, usize)> {
783 // `debug_assert!`, but with `#[allow(clippy::arithmetic_side_effects)]`.
784 macro_rules! __debug_assert {
785 ($e:expr $(, $msg:expr)?) => {
786 debug_assert!({
787 #[allow(clippy::arithmetic_side_effects)]
788 let e = $e;
789 e
790 } $(, $msg)?);
791 };
792 }
793
794 // Note that, in practice, `self` is always a compile-time constant. We
795 // do this check earlier than needed to ensure that we always panic as a
796 // result of bugs in the program (such as calling this function on an
797 // invalid type) instead of allowing this panic to be hidden if the cast
798 // would have failed anyway for runtime reasons (such as a too-small
799 // memory region).
800 //
801 // TODO(#67): Once our MSRV is 1.65, use let-else:
802 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
803 let size_info = match self.size_info.try_to_nonzero_elem_size() {
804 Some(size_info) => size_info,
805 None => panic!("attempted to cast to slice type with zero-sized element"),
806 };
807
808 // Precondition
809 __debug_assert!(addr.checked_add(bytes_len).is_some(), "`addr` + `bytes_len` > usize::MAX");
810
811 // Alignment checks go in their own block to avoid introducing variables
812 // into the top-level scope.
813 {
814 // We check alignment for `addr` (for prefix casts) or `addr +
815 // bytes_len` (for suffix casts). For a prefix cast, the correctness
816 // of this check is trivial - `addr` is the address the object will
817 // live at.
818 //
819 // For a suffix cast, we know that all valid sizes for the type are
820 // a multiple of the alignment (and by safety precondition, we know
821 // `DstLayout` may only describe valid Rust types). Thus, a
822 // validly-sized instance which lives at a validly-aligned address
823 // must also end at a validly-aligned address. Thus, if the end
824 // address for a suffix cast (`addr + bytes_len`) is not aligned,
825 // then no valid start address will be aligned either.
826 let offset = match cast_type {
827 _CastType::_Prefix => 0,
828 _CastType::_Suffix => bytes_len,
829 };
830
831 // Addition is guaranteed not to overflow because `offset <=
832 // bytes_len`, and `addr + bytes_len <= usize::MAX` is a
833 // precondition of this method. Modulus is guaranteed not to divide
834 // by 0 because `align` is non-zero.
835 #[allow(clippy::arithmetic_side_effects)]
836 if (addr + offset) % self.align.get() != 0 {
837 return None;
838 }
839 }
840
841 let (elems, self_bytes) = match size_info {
842 SizeInfo::Sized { _size: size } => {
843 if size > bytes_len {
844 return None;
845 }
846 (0, size)
847 }
848 SizeInfo::SliceDst(TrailingSliceLayout { _offset: offset, _elem_size: elem_size }) => {
849 // Calculate the maximum number of bytes that could be consumed
850 // - any number of bytes larger than this will either not be a
851 // multiple of the alignment, or will be larger than
852 // `bytes_len`.
853 let max_total_bytes =
854 util::round_down_to_next_multiple_of_alignment(bytes_len, self.align);
855 // Calculate the maximum number of bytes that could be consumed
856 // by the trailing slice.
857 //
858 // TODO(#67): Once our MSRV is 1.65, use let-else:
859 // https://blog.rust-lang.org/2022/11/03/Rust-1.65.0.html#let-else-statements
860 let max_slice_and_padding_bytes = match max_total_bytes.checked_sub(offset) {
861 Some(max) => max,
862 // `bytes_len` too small even for 0 trailing slice elements.
863 None => return None,
864 };
865
866 // Calculate the number of elements that fit in
867 // `max_slice_and_padding_bytes`; any remaining bytes will be
868 // considered padding.
869 //
870 // Guaranteed not to divide by zero: `elem_size` is non-zero.
871 #[allow(clippy::arithmetic_side_effects)]
872 let elems = max_slice_and_padding_bytes / elem_size.get();
873 // Guaranteed not to overflow on multiplication: `usize::MAX >=
874 // max_slice_and_padding_bytes >= (max_slice_and_padding_bytes /
875 // elem_size) * elem_size`.
876 //
877 // Guaranteed not to overflow on addition:
878 // - max_slice_and_padding_bytes == max_total_bytes - offset
879 // - elems * elem_size <= max_slice_and_padding_bytes == max_total_bytes - offset
880 // - elems * elem_size + offset <= max_total_bytes <= usize::MAX
881 #[allow(clippy::arithmetic_side_effects)]
882 let without_padding = offset + elems * elem_size.get();
883 // `self_bytes` is equal to the offset bytes plus the bytes
884 // consumed by the trailing slice plus any padding bytes
885 // required to satisfy the alignment. Note that we have computed
886 // the maximum number of trailing slice elements that could fit
887 // in `self_bytes`, so any padding is guaranteed to be less than
888 // the size of an extra element.
889 //
890 // Guaranteed not to overflow:
891 // - By previous comment: without_padding == elems * elem_size +
892 // offset <= max_total_bytes
893 // - By construction, `max_total_bytes` is a multiple of
894 // `self.align`.
895 // - At most, adding padding needed to round `without_padding`
896 // up to the next multiple of the alignment will bring
897 // `self_bytes` up to `max_total_bytes`.
898 #[allow(clippy::arithmetic_side_effects)]
899 let self_bytes = without_padding
900 + util::core_layout::padding_needed_for(without_padding, self.align);
901 (elems, self_bytes)
902 }
903 };
904
905 __debug_assert!(self_bytes <= bytes_len);
906
907 let split_at = match cast_type {
908 _CastType::_Prefix => self_bytes,
909 // Guaranteed not to underflow:
910 // - In the `Sized` branch, only returns `size` if `size <=
911 // bytes_len`.
912 // - In the `SliceDst` branch, calculates `self_bytes <=
913 // max_toatl_bytes`, which is upper-bounded by `bytes_len`.
914 #[allow(clippy::arithmetic_side_effects)]
915 _CastType::_Suffix => bytes_len - self_bytes,
916 };
917
918 Some((elems, split_at))
919 }
920 }
921
922 /// A trait which carries information about a type's layout that is used by the
923 /// internals of this crate.
924 ///
925 /// This trait is not meant for consumption by code outside of this crate. While
926 /// the normal semver stability guarantees apply with respect to which types
927 /// implement this trait and which trait implementations are implied by this
928 /// trait, no semver stability guarantees are made regarding its internals; they
929 /// may change at any time, and code which makes use of them may break.
930 ///
931 /// # Safety
932 ///
933 /// This trait does not convey any safety guarantees to code outside this crate.
934 #[doc(hidden)] // TODO: Remove this once KnownLayout is used by other APIs
935 pub unsafe trait KnownLayout {
936 // The `Self: Sized` bound makes it so that `KnownLayout` can still be
937 // object safe. It's not currently object safe thanks to `const LAYOUT`, and
938 // it likely won't be in the future, but there's no reason not to be
939 // forwards-compatible with object safety.
940 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized941 fn only_derive_is_allowed_to_implement_this_trait()
942 where
943 Self: Sized;
944
945 #[doc(hidden)]
946 const LAYOUT: DstLayout;
947
948 /// SAFETY: The returned pointer has the same address and provenance as
949 /// `bytes`. If `Self` is a DST, the returned pointer's referent has `elems`
950 /// elements in its trailing slice. If `Self` is sized, `elems` is ignored.
951 #[doc(hidden)]
raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>952 fn raw_from_ptr_len(bytes: NonNull<u8>, elems: usize) -> NonNull<Self>;
953 }
954
955 // SAFETY: Delegates safety to `DstLayout::for_slice`.
956 unsafe impl<T: KnownLayout> KnownLayout for [T] {
957 #[allow(clippy::missing_inline_in_public_items)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized,958 fn only_derive_is_allowed_to_implement_this_trait()
959 where
960 Self: Sized,
961 {
962 }
963 const LAYOUT: DstLayout = DstLayout::for_slice::<T>();
964
965 // SAFETY: `.cast` preserves address and provenance. The returned pointer
966 // refers to an object with `elems` elements by construction.
967 #[inline(always)]
raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self>968 fn raw_from_ptr_len(data: NonNull<u8>, elems: usize) -> NonNull<Self> {
969 // TODO(#67): Remove this allow. See NonNullExt for more details.
970 #[allow(unstable_name_collisions)]
971 NonNull::slice_from_raw_parts(data.cast::<T>(), elems)
972 }
973 }
974
975 #[rustfmt::skip]
976 impl_known_layout!(
977 (),
978 u8, i8, u16, i16, u32, i32, u64, i64, u128, i128, usize, isize, f32, f64,
979 bool, char,
980 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32, NonZeroI32,
981 NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128, NonZeroUsize, NonZeroIsize
982 );
983 #[rustfmt::skip]
984 impl_known_layout!(
985 T => Option<T>,
986 T: ?Sized => PhantomData<T>,
987 T => Wrapping<T>,
988 T => MaybeUninit<T>,
989 T: ?Sized => *const T,
990 T: ?Sized => *mut T,
991 );
992 impl_known_layout!(const N: usize, T => [T; N]);
993
994 safety_comment! {
995 /// SAFETY:
996 /// `str` and `ManuallyDrop<[T]>` [1] have the same representations as
997 /// `[u8]` and `[T]` repsectively. `str` has different bit validity than
998 /// `[u8]`, but that doesn't affect the soundness of this impl.
999 ///
1000 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
1001 ///
1002 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
1003 /// validity as `T`
1004 ///
1005 /// TODO(#429):
1006 /// - Add quotes from docs.
1007 /// - Once [1] (added in
1008 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
1009 /// quote the stable docs instead of the nightly docs.
1010 unsafe_impl_known_layout!(#[repr([u8])] str);
1011 unsafe_impl_known_layout!(T: ?Sized + KnownLayout => #[repr(T)] ManuallyDrop<T>);
1012 }
1013
1014 /// Analyzes whether a type is [`FromZeroes`].
1015 ///
1016 /// This derive analyzes, at compile time, whether the annotated type satisfies
1017 /// the [safety conditions] of `FromZeroes` and implements `FromZeroes` if it is
1018 /// sound to do so. This derive can be applied to structs, enums, and unions;
1019 /// e.g.:
1020 ///
1021 /// ```
1022 /// # use zerocopy_derive::FromZeroes;
1023 /// #[derive(FromZeroes)]
1024 /// struct MyStruct {
1025 /// # /*
1026 /// ...
1027 /// # */
1028 /// }
1029 ///
1030 /// #[derive(FromZeroes)]
1031 /// #[repr(u8)]
1032 /// enum MyEnum {
1033 /// # Variant0,
1034 /// # /*
1035 /// ...
1036 /// # */
1037 /// }
1038 ///
1039 /// #[derive(FromZeroes)]
1040 /// union MyUnion {
1041 /// # variant: u8,
1042 /// # /*
1043 /// ...
1044 /// # */
1045 /// }
1046 /// ```
1047 ///
1048 /// [safety conditions]: trait@FromZeroes#safety
1049 ///
1050 /// # Analysis
1051 ///
1052 /// *This section describes, roughly, the analysis performed by this derive to
1053 /// determine whether it is sound to implement `FromZeroes` for a given type.
1054 /// Unless you are modifying the implementation of this derive, or attempting to
1055 /// manually implement `FromZeroes` for a type yourself, you don't need to read
1056 /// this section.*
1057 ///
1058 /// If a type has the following properties, then this derive can implement
1059 /// `FromZeroes` for that type:
1060 ///
1061 /// - If the type is a struct, all of its fields must be `FromZeroes`.
1062 /// - If the type is an enum, it must be C-like (meaning that all variants have
1063 /// no fields) and it must have a variant with a discriminant of `0`. See [the
1064 /// reference] for a description of how discriminant values are chosen.
1065 /// - The type must not contain any [`UnsafeCell`]s (this is required in order
1066 /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1067 /// memory). The type may contain references or pointers to `UnsafeCell`s so
1068 /// long as those values can themselves be initialized from zeroes
1069 /// (`FromZeroes` is not currently implemented for, e.g.,
1070 /// `Option<&UnsafeCell<_>>`, but it could be one day).
1071 ///
1072 /// This analysis is subject to change. Unsafe code may *only* rely on the
1073 /// documented [safety conditions] of `FromZeroes`, and must *not* rely on the
1074 /// implementation details of this derive.
1075 ///
1076 /// [the reference]: https://doc.rust-lang.org/reference/items/enumerations.html#custom-discriminant-values-for-fieldless-enumerations
1077 /// [`UnsafeCell`]: core::cell::UnsafeCell
1078 ///
1079 /// ## Why isn't an explicit representation required for structs?
1080 ///
1081 /// Neither this derive, nor the [safety conditions] of `FromZeroes`, requires
1082 /// that structs are marked with `#[repr(C)]`.
1083 ///
1084 /// Per the [Rust reference](reference),
1085 ///
1086 /// > The representation of a type can change the padding between fields, but
1087 /// does not change the layout of the fields themselves.
1088 ///
1089 /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1090 ///
1091 /// Since the layout of structs only consists of padding bytes and field bytes,
1092 /// a struct is soundly `FromZeroes` if:
1093 /// 1. its padding is soundly `FromZeroes`, and
1094 /// 2. its fields are soundly `FromZeroes`.
1095 ///
1096 /// The answer to the first question is always yes: padding bytes do not have
1097 /// any validity constraints. A [discussion] of this question in the Unsafe Code
1098 /// Guidelines Working Group concluded that it would be virtually unimaginable
1099 /// for future versions of rustc to add validity constraints to padding bytes.
1100 ///
1101 /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1102 ///
1103 /// Whether a struct is soundly `FromZeroes` therefore solely depends on whether
1104 /// its fields are `FromZeroes`.
1105 // TODO(#146): Document why we don't require an enum to have an explicit `repr`
1106 // attribute.
1107 #[cfg(any(feature = "derive", test))]
1108 #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1109 pub use zerocopy_derive::FromZeroes;
1110
1111 /// Types whose validity can be checked at runtime, allowing them to be
1112 /// conditionally converted from byte slices.
1113 ///
1114 /// WARNING: Do not implement this trait yourself! Instead, use
1115 /// `#[derive(TryFromBytes)]`.
1116 ///
1117 /// `TryFromBytes` types can safely be deserialized from an untrusted sequence
1118 /// of bytes by performing a runtime check that the byte sequence contains a
1119 /// valid instance of `Self`.
1120 ///
1121 /// `TryFromBytes` is ignorant of byte order. For byte order-aware types, see
1122 /// the [`byteorder`] module.
1123 ///
1124 /// # What is a "valid instance"?
1125 ///
1126 /// In Rust, each type has *bit validity*, which refers to the set of bit
1127 /// patterns which may appear in an instance of that type. It is impossible for
1128 /// safe Rust code to produce values which violate bit validity (ie, values
1129 /// outside of the "valid" set of bit patterns). If `unsafe` code produces an
1130 /// invalid value, this is considered [undefined behavior].
1131 ///
1132 /// Rust's bit validity rules are currently being decided, which means that some
1133 /// types have three classes of bit patterns: those which are definitely valid,
1134 /// and whose validity is documented in the language; those which may or may not
1135 /// be considered valid at some point in the future; and those which are
1136 /// definitely invalid.
1137 ///
1138 /// Zerocopy takes a conservative approach, and only considers a bit pattern to
1139 /// be valid if its validity is a documenteed guarantee provided by the
1140 /// language.
1141 ///
1142 /// For most use cases, Rust's current guarantees align with programmers'
1143 /// intuitions about what ought to be valid. As a result, zerocopy's
1144 /// conservatism should not affect most users. One notable exception is unions,
1145 /// whose bit validity is very up in the air; zerocopy does not permit
1146 /// implementing `TryFromBytes` for any union type.
1147 ///
1148 /// If you are negatively affected by lack of support for a particular type,
1149 /// we encourage you to let us know by [filing an issue][github-repo].
1150 ///
1151 /// # Safety
1152 ///
1153 /// On its own, `T: TryFromBytes` does not make any guarantees about the layout
1154 /// or representation of `T`. It merely provides the ability to perform a
1155 /// validity check at runtime via methods like [`try_from_ref`].
1156 ///
1157 /// Currently, it is not possible to stably implement `TryFromBytes` other than
1158 /// by using `#[derive(TryFromBytes)]`. While there are `#[doc(hidden)]` items
1159 /// on this trait that provide well-defined safety invariants, no stability
1160 /// guarantees are made with respect to these items. In particular, future
1161 /// releases of zerocopy may make backwards-breaking changes to these items,
1162 /// including changes that only affect soundness, which may cause code which
1163 /// uses those items to silently become unsound.
1164 ///
1165 /// [undefined behavior]: https://raphlinus.github.io/programming/rust/2018/08/17/undefined-behavior.html
1166 /// [github-repo]: https://github.com/google/zerocopy
1167 /// [`try_from_ref`]: TryFromBytes::try_from_ref
1168 // TODO(#5): Update `try_from_ref` doc link once it exists
1169 #[doc(hidden)]
1170 pub unsafe trait TryFromBytes {
1171 /// Does a given memory range contain a valid instance of `Self`?
1172 ///
1173 /// # Safety
1174 ///
1175 /// ## Preconditions
1176 ///
1177 /// The memory referenced by `candidate` may only be accessed via reads for
1178 /// the duration of this method call. This prohibits writes through mutable
1179 /// references and through [`UnsafeCell`]s. There may exist immutable
1180 /// references to the same memory which contain `UnsafeCell`s so long as:
1181 /// - Those `UnsafeCell`s exist at the same byte ranges as `UnsafeCell`s in
1182 /// `Self`. This is a bidirectional property: `Self` may not contain
1183 /// `UnsafeCell`s where other references to the same memory do not, and
1184 /// vice-versa.
1185 /// - Those `UnsafeCell`s are never used to perform mutation for the
1186 /// duration of this method call.
1187 ///
1188 /// The memory referenced by `candidate` may not be referenced by any
1189 /// mutable references even if these references are not used to perform
1190 /// mutation.
1191 ///
1192 /// `candidate` is not required to refer to a valid `Self`. However, it must
1193 /// satisfy the requirement that uninitialized bytes may only be present
1194 /// where it is possible for them to be present in `Self`. This is a dynamic
1195 /// property: if, at a particular byte offset, a valid enum discriminant is
1196 /// set, the subsequent bytes may only have uninitialized bytes as
1197 /// specificed by the corresponding enum.
1198 ///
1199 /// Formally, given `len = size_of_val_raw(candidate)`, at every byte
1200 /// offset, `b`, in the range `[0, len)`:
1201 /// - If, in all instances `s: Self` of length `len`, the byte at offset `b`
1202 /// in `s` is initialized, then the byte at offset `b` within `*candidate`
1203 /// must be initialized.
1204 /// - Let `c` be the contents of the byte range `[0, b)` in `*candidate`.
1205 /// Let `S` be the subset of valid instances of `Self` of length `len`
1206 /// which contain `c` in the offset range `[0, b)`. If, for all instances
1207 /// of `s: Self` in `S`, the byte at offset `b` in `s` is initialized,
1208 /// then the byte at offset `b` in `*candidate` must be initialized.
1209 ///
1210 /// Pragmatically, this means that if `*candidate` is guaranteed to
1211 /// contain an enum type at a particular offset, and the enum discriminant
1212 /// stored in `*candidate` corresponds to a valid variant of that enum
1213 /// type, then it is guaranteed that the appropriate bytes of `*candidate`
1214 /// are initialized as defined by that variant's bit validity (although
1215 /// note that the variant may contain another enum type, in which case the
1216 /// same rules apply depending on the state of its discriminant, and so on
1217 /// recursively).
1218 ///
1219 /// ## Postconditions
1220 ///
1221 /// Unsafe code may assume that, if `is_bit_valid(candidate)` returns true,
1222 /// `*candidate` contains a valid `Self`.
1223 ///
1224 /// # Panics
1225 ///
1226 /// `is_bit_valid` may panic. Callers are responsible for ensuring that any
1227 /// `unsafe` code remains sound even in the face of `is_bit_valid`
1228 /// panicking. (We support user-defined validation routines; so long as
1229 /// these routines are not required to be `unsafe`, there is no way to
1230 /// ensure that these do not generate panics.)
1231 ///
1232 /// [`UnsafeCell`]: core::cell::UnsafeCell
1233 #[doc(hidden)]
is_bit_valid(candidate: Ptr<'_, Self>) -> bool1234 unsafe fn is_bit_valid(candidate: Ptr<'_, Self>) -> bool;
1235
1236 /// Attempts to interpret a byte slice as a `Self`.
1237 ///
1238 /// `try_from_ref` validates that `bytes` contains a valid `Self`, and that
1239 /// it satisfies `Self`'s alignment requirement. If it does, then `bytes` is
1240 /// reinterpreted as a `Self`.
1241 ///
1242 /// Note that Rust's bit validity rules are still being decided. As such,
1243 /// there exist types whose bit validity is ambiguous. See the
1244 /// `TryFromBytes` docs for a discussion of how these cases are handled.
1245 // TODO(#251): In a future in which we distinguish between `FromBytes` and
1246 // `RefFromBytes`, this requires `where Self: RefFromBytes` to disallow
1247 // interior mutability.
1248 #[inline]
1249 #[doc(hidden)] // TODO(#5): Finalize name before remove this attribute.
try_from_ref(bytes: &[u8]) -> Option<&Self> where Self: KnownLayout,1250 fn try_from_ref(bytes: &[u8]) -> Option<&Self>
1251 where
1252 Self: KnownLayout,
1253 {
1254 let maybe_self = Ptr::from(bytes).try_cast_into_no_leftover::<Self>()?;
1255
1256 // SAFETY:
1257 // - Since `bytes` is an immutable reference, we know that no mutable
1258 // references exist to this memory region.
1259 // - Since `[u8]` contains no `UnsafeCell`s, we know there are no
1260 // `&UnsafeCell` references to this memory region.
1261 // - Since we don't permit implementing `TryFromBytes` for types which
1262 // contain `UnsafeCell`s, there are no `UnsafeCell`s in `Self`, and so
1263 // the requirement that all references contain `UnsafeCell`s at the
1264 // same offsets is trivially satisfied.
1265 // - All bytes of `bytes` are initialized.
1266 //
1267 // This call may panic. If that happens, it doesn't cause any soundness
1268 // issues, as we have not generated any invalid state which we need to
1269 // fix before returning.
1270 if unsafe { !Self::is_bit_valid(maybe_self) } {
1271 return None;
1272 }
1273
1274 // SAFETY:
1275 // - Preconditions for `as_ref`:
1276 // - `is_bit_valid` guarantees that `*maybe_self` contains a valid
1277 // `Self`. Since `&[u8]` does not permit interior mutation, this
1278 // cannot be invalidated after this method returns.
1279 // - Since the argument and return types are immutable references,
1280 // Rust will prevent the caller from producing any mutable
1281 // references to the same memory region.
1282 // - Since `Self` is not allowed to contain any `UnsafeCell`s and the
1283 // same is true of `[u8]`, interior mutation is not possible. Thus,
1284 // no mutation is possible. For the same reason, there is no
1285 // mismatch between the two types in terms of which byte ranges are
1286 // referenced as `UnsafeCell`s.
1287 // - Since interior mutation isn't possible within `Self`, there's no
1288 // way for the returned reference to be used to modify the byte range,
1289 // and thus there's no way for the returned reference to be used to
1290 // write an invalid `[u8]` which would be observable via the original
1291 // `&[u8]`.
1292 Some(unsafe { maybe_self.as_ref() })
1293 }
1294 }
1295
1296 /// Types for which a sequence of bytes all set to zero represents a valid
1297 /// instance of the type.
1298 ///
1299 /// Any memory region of the appropriate length which is guaranteed to contain
1300 /// only zero bytes can be viewed as any `FromZeroes` type with no runtime
1301 /// overhead. This is useful whenever memory is known to be in a zeroed state,
1302 /// such memory returned from some allocation routines.
1303 ///
1304 /// # Implementation
1305 ///
1306 /// **Do not implement this trait yourself!** Instead, use
1307 /// [`#[derive(FromZeroes)]`][derive] (requires the `derive` Cargo feature);
1308 /// e.g.:
1309 ///
1310 /// ```
1311 /// # use zerocopy_derive::FromZeroes;
1312 /// #[derive(FromZeroes)]
1313 /// struct MyStruct {
1314 /// # /*
1315 /// ...
1316 /// # */
1317 /// }
1318 ///
1319 /// #[derive(FromZeroes)]
1320 /// #[repr(u8)]
1321 /// enum MyEnum {
1322 /// # Variant0,
1323 /// # /*
1324 /// ...
1325 /// # */
1326 /// }
1327 ///
1328 /// #[derive(FromZeroes)]
1329 /// union MyUnion {
1330 /// # variant: u8,
1331 /// # /*
1332 /// ...
1333 /// # */
1334 /// }
1335 /// ```
1336 ///
1337 /// This derive performs a sophisticated, compile-time safety analysis to
1338 /// determine whether a type is `FromZeroes`.
1339 ///
1340 /// # Safety
1341 ///
1342 /// *This section describes what is required in order for `T: FromZeroes`, and
1343 /// what unsafe code may assume of such types. If you don't plan on implementing
1344 /// `FromZeroes` manually, and you don't plan on writing unsafe code that
1345 /// operates on `FromZeroes` types, then you don't need to read this section.*
1346 ///
1347 /// If `T: FromZeroes`, then unsafe code may assume that:
1348 /// - It is sound to treat any initialized sequence of zero bytes of length
1349 /// `size_of::<T>()` as a `T`.
1350 /// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1351 /// `align_of::<T>()`, and `b` contains only zero bytes, it is sound to
1352 /// construct a `t: &T` at the same address as `b`, and it is sound for both
1353 /// `b` and `t` to be live at the same time.
1354 ///
1355 /// If a type is marked as `FromZeroes` which violates this contract, it may
1356 /// cause undefined behavior.
1357 ///
1358 /// `#[derive(FromZeroes)]` only permits [types which satisfy these
1359 /// requirements][derive-analysis].
1360 ///
1361 #[cfg_attr(
1362 feature = "derive",
1363 doc = "[derive]: zerocopy_derive::FromZeroes",
1364 doc = "[derive-analysis]: zerocopy_derive::FromZeroes#analysis"
1365 )]
1366 #[cfg_attr(
1367 not(feature = "derive"),
1368 doc = concat!("[derive]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.FromZeroes.html"),
1369 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.FromZeroes.html#analysis"),
1370 )]
1371 pub unsafe trait FromZeroes {
1372 // The `Self: Sized` bound makes it so that `FromZeroes` is still object
1373 // safe.
1374 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized1375 fn only_derive_is_allowed_to_implement_this_trait()
1376 where
1377 Self: Sized;
1378
1379 /// Overwrites `self` with zeroes.
1380 ///
1381 /// Sets every byte in `self` to 0. While this is similar to doing `*self =
1382 /// Self::new_zeroed()`, it differs in that `zero` does not semantically
1383 /// drop the current value and replace it with a new one - it simply
1384 /// modifies the bytes of the existing value.
1385 ///
1386 /// # Examples
1387 ///
1388 /// ```
1389 /// # use zerocopy::FromZeroes;
1390 /// # use zerocopy_derive::*;
1391 /// #
1392 /// #[derive(FromZeroes)]
1393 /// #[repr(C)]
1394 /// struct PacketHeader {
1395 /// src_port: [u8; 2],
1396 /// dst_port: [u8; 2],
1397 /// length: [u8; 2],
1398 /// checksum: [u8; 2],
1399 /// }
1400 ///
1401 /// let mut header = PacketHeader {
1402 /// src_port: 100u16.to_be_bytes(),
1403 /// dst_port: 200u16.to_be_bytes(),
1404 /// length: 300u16.to_be_bytes(),
1405 /// checksum: 400u16.to_be_bytes(),
1406 /// };
1407 ///
1408 /// header.zero();
1409 ///
1410 /// assert_eq!(header.src_port, [0, 0]);
1411 /// assert_eq!(header.dst_port, [0, 0]);
1412 /// assert_eq!(header.length, [0, 0]);
1413 /// assert_eq!(header.checksum, [0, 0]);
1414 /// ```
1415 #[inline(always)]
zero(&mut self)1416 fn zero(&mut self) {
1417 let slf: *mut Self = self;
1418 let len = mem::size_of_val(self);
1419 // SAFETY:
1420 // - `self` is guaranteed by the type system to be valid for writes of
1421 // size `size_of_val(self)`.
1422 // - `u8`'s alignment is 1, and thus `self` is guaranteed to be aligned
1423 // as required by `u8`.
1424 // - Since `Self: FromZeroes`, the all-zeroes instance is a valid
1425 // instance of `Self.`
1426 //
1427 // TODO(#429): Add references to docs and quotes.
1428 unsafe { ptr::write_bytes(slf.cast::<u8>(), 0, len) };
1429 }
1430
1431 /// Creates an instance of `Self` from zeroed bytes.
1432 ///
1433 /// # Examples
1434 ///
1435 /// ```
1436 /// # use zerocopy::FromZeroes;
1437 /// # use zerocopy_derive::*;
1438 /// #
1439 /// #[derive(FromZeroes)]
1440 /// #[repr(C)]
1441 /// struct PacketHeader {
1442 /// src_port: [u8; 2],
1443 /// dst_port: [u8; 2],
1444 /// length: [u8; 2],
1445 /// checksum: [u8; 2],
1446 /// }
1447 ///
1448 /// let header: PacketHeader = FromZeroes::new_zeroed();
1449 ///
1450 /// assert_eq!(header.src_port, [0, 0]);
1451 /// assert_eq!(header.dst_port, [0, 0]);
1452 /// assert_eq!(header.length, [0, 0]);
1453 /// assert_eq!(header.checksum, [0, 0]);
1454 /// ```
1455 #[inline(always)]
new_zeroed() -> Self where Self: Sized,1456 fn new_zeroed() -> Self
1457 where
1458 Self: Sized,
1459 {
1460 // SAFETY: `FromZeroes` says that the all-zeroes bit pattern is legal.
1461 unsafe { mem::zeroed() }
1462 }
1463
1464 /// Creates a `Box<Self>` from zeroed bytes.
1465 ///
1466 /// This function is useful for allocating large values on the heap and
1467 /// zero-initializing them, without ever creating a temporary instance of
1468 /// `Self` on the stack. For example, `<[u8; 1048576]>::new_box_zeroed()`
1469 /// will allocate `[u8; 1048576]` directly on the heap; it does not require
1470 /// storing `[u8; 1048576]` in a temporary variable on the stack.
1471 ///
1472 /// On systems that use a heap implementation that supports allocating from
1473 /// pre-zeroed memory, using `new_box_zeroed` (or related functions) may
1474 /// have performance benefits.
1475 ///
1476 /// Note that `Box<Self>` can be converted to `Arc<Self>` and other
1477 /// container types without reallocation.
1478 ///
1479 /// # Panics
1480 ///
1481 /// Panics if allocation of `size_of::<Self>()` bytes fails.
1482 #[cfg(feature = "alloc")]
1483 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1484 #[inline]
new_box_zeroed() -> Box<Self> where Self: Sized,1485 fn new_box_zeroed() -> Box<Self>
1486 where
1487 Self: Sized,
1488 {
1489 // If `T` is a ZST, then return a proper boxed instance of it. There is
1490 // no allocation, but `Box` does require a correct dangling pointer.
1491 let layout = Layout::new::<Self>();
1492 if layout.size() == 0 {
1493 return Box::new(Self::new_zeroed());
1494 }
1495
1496 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1497 #[allow(clippy::undocumented_unsafe_blocks)]
1498 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1499 if ptr.is_null() {
1500 alloc::alloc::handle_alloc_error(layout);
1501 }
1502 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1503 #[allow(clippy::undocumented_unsafe_blocks)]
1504 unsafe {
1505 Box::from_raw(ptr)
1506 }
1507 }
1508
1509 /// Creates a `Box<[Self]>` (a boxed slice) from zeroed bytes.
1510 ///
1511 /// This function is useful for allocating large values of `[Self]` on the
1512 /// heap and zero-initializing them, without ever creating a temporary
1513 /// instance of `[Self; _]` on the stack. For example,
1514 /// `u8::new_box_slice_zeroed(1048576)` will allocate the slice directly on
1515 /// the heap; it does not require storing the slice on the stack.
1516 ///
1517 /// On systems that use a heap implementation that supports allocating from
1518 /// pre-zeroed memory, using `new_box_slice_zeroed` may have performance
1519 /// benefits.
1520 ///
1521 /// If `Self` is a zero-sized type, then this function will return a
1522 /// `Box<[Self]>` that has the correct `len`. Such a box cannot contain any
1523 /// actual information, but its `len()` property will report the correct
1524 /// value.
1525 ///
1526 /// # Panics
1527 ///
1528 /// * Panics if `size_of::<Self>() * len` overflows.
1529 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1530 #[cfg(feature = "alloc")]
1531 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
1532 #[inline]
new_box_slice_zeroed(len: usize) -> Box<[Self]> where Self: Sized,1533 fn new_box_slice_zeroed(len: usize) -> Box<[Self]>
1534 where
1535 Self: Sized,
1536 {
1537 let size = mem::size_of::<Self>()
1538 .checked_mul(len)
1539 .expect("mem::size_of::<Self>() * len overflows `usize`");
1540 let align = mem::align_of::<Self>();
1541 // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a
1542 // bug in which sufficiently-large allocations (those which, when
1543 // rounded up to the alignment, overflow `isize`) are not rejected,
1544 // which can cause undefined behavior. See #64 for details.
1545 //
1546 // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
1547 #[allow(clippy::as_conversions)]
1548 let max_alloc = (isize::MAX as usize).saturating_sub(align);
1549 assert!(size <= max_alloc);
1550 // TODO(https://github.com/rust-lang/rust/issues/55724): Use
1551 // `Layout::repeat` once it's stabilized.
1552 let layout =
1553 Layout::from_size_align(size, align).expect("total allocation size overflows `isize`");
1554
1555 let ptr = if layout.size() != 0 {
1556 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1557 #[allow(clippy::undocumented_unsafe_blocks)]
1558 let ptr = unsafe { alloc::alloc::alloc_zeroed(layout).cast::<Self>() };
1559 if ptr.is_null() {
1560 alloc::alloc::handle_alloc_error(layout);
1561 }
1562 ptr
1563 } else {
1564 // `Box<[T]>` does not allocate when `T` is zero-sized or when `len`
1565 // is zero, but it does require a non-null dangling pointer for its
1566 // allocation.
1567 NonNull::<Self>::dangling().as_ptr()
1568 };
1569
1570 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
1571 #[allow(clippy::undocumented_unsafe_blocks)]
1572 unsafe {
1573 Box::from_raw(slice::from_raw_parts_mut(ptr, len))
1574 }
1575 }
1576
1577 /// Creates a `Vec<Self>` from zeroed bytes.
1578 ///
1579 /// This function is useful for allocating large values of `Vec`s and
1580 /// zero-initializing them, without ever creating a temporary instance of
1581 /// `[Self; _]` (or many temporary instances of `Self`) on the stack. For
1582 /// example, `u8::new_vec_zeroed(1048576)` will allocate directly on the
1583 /// heap; it does not require storing intermediate values on the stack.
1584 ///
1585 /// On systems that use a heap implementation that supports allocating from
1586 /// pre-zeroed memory, using `new_vec_zeroed` may have performance benefits.
1587 ///
1588 /// If `Self` is a zero-sized type, then this function will return a
1589 /// `Vec<Self>` that has the correct `len`. Such a `Vec` cannot contain any
1590 /// actual information, but its `len()` property will report the correct
1591 /// value.
1592 ///
1593 /// # Panics
1594 ///
1595 /// * Panics if `size_of::<Self>() * len` overflows.
1596 /// * Panics if allocation of `size_of::<Self>() * len` bytes fails.
1597 #[cfg(feature = "alloc")]
1598 #[cfg_attr(doc_cfg, doc(cfg(feature = "new_vec_zeroed")))]
1599 #[inline(always)]
new_vec_zeroed(len: usize) -> Vec<Self> where Self: Sized,1600 fn new_vec_zeroed(len: usize) -> Vec<Self>
1601 where
1602 Self: Sized,
1603 {
1604 Self::new_box_slice_zeroed(len).into()
1605 }
1606 }
1607
1608 /// Analyzes whether a type is [`FromBytes`].
1609 ///
1610 /// This derive analyzes, at compile time, whether the annotated type satisfies
1611 /// the [safety conditions] of `FromBytes` and implements `FromBytes` if it is
1612 /// sound to do so. This derive can be applied to structs, enums, and unions;
1613 /// e.g.:
1614 ///
1615 /// ```
1616 /// # use zerocopy_derive::{FromBytes, FromZeroes};
1617 /// #[derive(FromZeroes, FromBytes)]
1618 /// struct MyStruct {
1619 /// # /*
1620 /// ...
1621 /// # */
1622 /// }
1623 ///
1624 /// #[derive(FromZeroes, FromBytes)]
1625 /// #[repr(u8)]
1626 /// enum MyEnum {
1627 /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1628 /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1629 /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1630 /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1631 /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1632 /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1633 /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1634 /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1635 /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1636 /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1637 /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1638 /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1639 /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1640 /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1641 /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1642 /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1643 /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1644 /// # VFF,
1645 /// # /*
1646 /// ...
1647 /// # */
1648 /// }
1649 ///
1650 /// #[derive(FromZeroes, FromBytes)]
1651 /// union MyUnion {
1652 /// # variant: u8,
1653 /// # /*
1654 /// ...
1655 /// # */
1656 /// }
1657 /// ```
1658 ///
1659 /// [safety conditions]: trait@FromBytes#safety
1660 ///
1661 /// # Analysis
1662 ///
1663 /// *This section describes, roughly, the analysis performed by this derive to
1664 /// determine whether it is sound to implement `FromBytes` for a given type.
1665 /// Unless you are modifying the implementation of this derive, or attempting to
1666 /// manually implement `FromBytes` for a type yourself, you don't need to read
1667 /// this section.*
1668 ///
1669 /// If a type has the following properties, then this derive can implement
1670 /// `FromBytes` for that type:
1671 ///
1672 /// - If the type is a struct, all of its fields must be `FromBytes`.
1673 /// - If the type is an enum:
1674 /// - It must be a C-like enum (meaning that all variants have no fields).
1675 /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
1676 /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
1677 /// - The maximum number of discriminants must be used (so that every possible
1678 /// bit pattern is a valid one). Be very careful when using the `C`,
1679 /// `usize`, or `isize` representations, as their size is
1680 /// platform-dependent.
1681 /// - The type must not contain any [`UnsafeCell`]s (this is required in order
1682 /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
1683 /// memory). The type may contain references or pointers to `UnsafeCell`s so
1684 /// long as those values can themselves be initialized from zeroes
1685 /// (`FromBytes` is not currently implemented for, e.g., `Option<*const
1686 /// UnsafeCell<_>>`, but it could be one day).
1687 ///
1688 /// [`UnsafeCell`]: core::cell::UnsafeCell
1689 ///
1690 /// This analysis is subject to change. Unsafe code may *only* rely on the
1691 /// documented [safety conditions] of `FromBytes`, and must *not* rely on the
1692 /// implementation details of this derive.
1693 ///
1694 /// ## Why isn't an explicit representation required for structs?
1695 ///
1696 /// Neither this derive, nor the [safety conditions] of `FromBytes`, requires
1697 /// that structs are marked with `#[repr(C)]`.
1698 ///
1699 /// Per the [Rust reference](reference),
1700 ///
1701 /// > The representation of a type can change the padding between fields, but
1702 /// does not change the layout of the fields themselves.
1703 ///
1704 /// [reference]: https://doc.rust-lang.org/reference/type-layout.html#representations
1705 ///
1706 /// Since the layout of structs only consists of padding bytes and field bytes,
1707 /// a struct is soundly `FromBytes` if:
1708 /// 1. its padding is soundly `FromBytes`, and
1709 /// 2. its fields are soundly `FromBytes`.
1710 ///
1711 /// The answer to the first question is always yes: padding bytes do not have
1712 /// any validity constraints. A [discussion] of this question in the Unsafe Code
1713 /// Guidelines Working Group concluded that it would be virtually unimaginable
1714 /// for future versions of rustc to add validity constraints to padding bytes.
1715 ///
1716 /// [discussion]: https://github.com/rust-lang/unsafe-code-guidelines/issues/174
1717 ///
1718 /// Whether a struct is soundly `FromBytes` therefore solely depends on whether
1719 /// its fields are `FromBytes`.
1720 // TODO(#146): Document why we don't require an enum to have an explicit `repr`
1721 // attribute.
1722 #[cfg(any(feature = "derive", test))]
1723 #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
1724 pub use zerocopy_derive::FromBytes;
1725
1726 /// Types for which any bit pattern is valid.
1727 ///
1728 /// Any memory region of the appropriate length which contains initialized bytes
1729 /// can be viewed as any `FromBytes` type with no runtime overhead. This is
1730 /// useful for efficiently parsing bytes as structured data.
1731 ///
1732 /// # Implementation
1733 ///
1734 /// **Do not implement this trait yourself!** Instead, use
1735 /// [`#[derive(FromBytes)]`][derive] (requires the `derive` Cargo feature);
1736 /// e.g.:
1737 ///
1738 /// ```
1739 /// # use zerocopy_derive::{FromBytes, FromZeroes};
1740 /// #[derive(FromZeroes, FromBytes)]
1741 /// struct MyStruct {
1742 /// # /*
1743 /// ...
1744 /// # */
1745 /// }
1746 ///
1747 /// #[derive(FromZeroes, FromBytes)]
1748 /// #[repr(u8)]
1749 /// enum MyEnum {
1750 /// # V00, V01, V02, V03, V04, V05, V06, V07, V08, V09, V0A, V0B, V0C, V0D, V0E,
1751 /// # V0F, V10, V11, V12, V13, V14, V15, V16, V17, V18, V19, V1A, V1B, V1C, V1D,
1752 /// # V1E, V1F, V20, V21, V22, V23, V24, V25, V26, V27, V28, V29, V2A, V2B, V2C,
1753 /// # V2D, V2E, V2F, V30, V31, V32, V33, V34, V35, V36, V37, V38, V39, V3A, V3B,
1754 /// # V3C, V3D, V3E, V3F, V40, V41, V42, V43, V44, V45, V46, V47, V48, V49, V4A,
1755 /// # V4B, V4C, V4D, V4E, V4F, V50, V51, V52, V53, V54, V55, V56, V57, V58, V59,
1756 /// # V5A, V5B, V5C, V5D, V5E, V5F, V60, V61, V62, V63, V64, V65, V66, V67, V68,
1757 /// # V69, V6A, V6B, V6C, V6D, V6E, V6F, V70, V71, V72, V73, V74, V75, V76, V77,
1758 /// # V78, V79, V7A, V7B, V7C, V7D, V7E, V7F, V80, V81, V82, V83, V84, V85, V86,
1759 /// # V87, V88, V89, V8A, V8B, V8C, V8D, V8E, V8F, V90, V91, V92, V93, V94, V95,
1760 /// # V96, V97, V98, V99, V9A, V9B, V9C, V9D, V9E, V9F, VA0, VA1, VA2, VA3, VA4,
1761 /// # VA5, VA6, VA7, VA8, VA9, VAA, VAB, VAC, VAD, VAE, VAF, VB0, VB1, VB2, VB3,
1762 /// # VB4, VB5, VB6, VB7, VB8, VB9, VBA, VBB, VBC, VBD, VBE, VBF, VC0, VC1, VC2,
1763 /// # VC3, VC4, VC5, VC6, VC7, VC8, VC9, VCA, VCB, VCC, VCD, VCE, VCF, VD0, VD1,
1764 /// # VD2, VD3, VD4, VD5, VD6, VD7, VD8, VD9, VDA, VDB, VDC, VDD, VDE, VDF, VE0,
1765 /// # VE1, VE2, VE3, VE4, VE5, VE6, VE7, VE8, VE9, VEA, VEB, VEC, VED, VEE, VEF,
1766 /// # VF0, VF1, VF2, VF3, VF4, VF5, VF6, VF7, VF8, VF9, VFA, VFB, VFC, VFD, VFE,
1767 /// # VFF,
1768 /// # /*
1769 /// ...
1770 /// # */
1771 /// }
1772 ///
1773 /// #[derive(FromZeroes, FromBytes)]
1774 /// union MyUnion {
1775 /// # variant: u8,
1776 /// # /*
1777 /// ...
1778 /// # */
1779 /// }
1780 /// ```
1781 ///
1782 /// This derive performs a sophisticated, compile-time safety analysis to
1783 /// determine whether a type is `FromBytes`.
1784 ///
1785 /// # Safety
1786 ///
1787 /// *This section describes what is required in order for `T: FromBytes`, and
1788 /// what unsafe code may assume of such types. If you don't plan on implementing
1789 /// `FromBytes` manually, and you don't plan on writing unsafe code that
1790 /// operates on `FromBytes` types, then you don't need to read this section.*
1791 ///
1792 /// If `T: FromBytes`, then unsafe code may assume that:
1793 /// - It is sound to treat any initialized sequence of bytes of length
1794 /// `size_of::<T>()` as a `T`.
1795 /// - Given `b: &[u8]` where `b.len() == size_of::<T>()`, `b` is aligned to
1796 /// `align_of::<T>()` it is sound to construct a `t: &T` at the same address
1797 /// as `b`, and it is sound for both `b` and `t` to be live at the same time.
1798 ///
1799 /// If a type is marked as `FromBytes` which violates this contract, it may
1800 /// cause undefined behavior.
1801 ///
1802 /// `#[derive(FromBytes)]` only permits [types which satisfy these
1803 /// requirements][derive-analysis].
1804 ///
1805 #[cfg_attr(
1806 feature = "derive",
1807 doc = "[derive]: zerocopy_derive::FromBytes",
1808 doc = "[derive-analysis]: zerocopy_derive::FromBytes#analysis"
1809 )]
1810 #[cfg_attr(
1811 not(feature = "derive"),
1812 doc = concat!("[derive]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.FromBytes.html"),
1813 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.FromBytes.html#analysis"),
1814 )]
1815 pub unsafe trait FromBytes: FromZeroes {
1816 // The `Self: Sized` bound makes it so that `FromBytes` is still object
1817 // safe.
1818 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized1819 fn only_derive_is_allowed_to_implement_this_trait()
1820 where
1821 Self: Sized;
1822
1823 /// Interprets the given `bytes` as a `&Self` without copying.
1824 ///
1825 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1826 /// `align_of::<Self>()`, this returns `None`.
1827 ///
1828 /// # Examples
1829 ///
1830 /// ```
1831 /// use zerocopy::FromBytes;
1832 /// # use zerocopy_derive::*;
1833 ///
1834 /// #[derive(FromZeroes, FromBytes)]
1835 /// #[repr(C)]
1836 /// struct PacketHeader {
1837 /// src_port: [u8; 2],
1838 /// dst_port: [u8; 2],
1839 /// length: [u8; 2],
1840 /// checksum: [u8; 2],
1841 /// }
1842 ///
1843 /// // These bytes encode a `PacketHeader`.
1844 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
1845 ///
1846 /// let header = PacketHeader::ref_from(bytes).unwrap();
1847 ///
1848 /// assert_eq!(header.src_port, [0, 1]);
1849 /// assert_eq!(header.dst_port, [2, 3]);
1850 /// assert_eq!(header.length, [4, 5]);
1851 /// assert_eq!(header.checksum, [6, 7]);
1852 /// ```
1853 #[inline]
ref_from(bytes: &[u8]) -> Option<&Self> where Self: Sized,1854 fn ref_from(bytes: &[u8]) -> Option<&Self>
1855 where
1856 Self: Sized,
1857 {
1858 Ref::<&[u8], Self>::new(bytes).map(Ref::into_ref)
1859 }
1860
1861 /// Interprets the prefix of the given `bytes` as a `&Self` without copying.
1862 ///
1863 /// `ref_from_prefix` returns a reference to the first `size_of::<Self>()`
1864 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1865 /// aligned to `align_of::<Self>()`, this returns `None`.
1866 ///
1867 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1868 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1869 ///
1870 /// # Examples
1871 ///
1872 /// ```
1873 /// use zerocopy::FromBytes;
1874 /// # use zerocopy_derive::*;
1875 ///
1876 /// #[derive(FromZeroes, FromBytes)]
1877 /// #[repr(C)]
1878 /// struct PacketHeader {
1879 /// src_port: [u8; 2],
1880 /// dst_port: [u8; 2],
1881 /// length: [u8; 2],
1882 /// checksum: [u8; 2],
1883 /// }
1884 ///
1885 /// // These are more bytes than are needed to encode a `PacketHeader`.
1886 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1887 ///
1888 /// let header = PacketHeader::ref_from_prefix(bytes).unwrap();
1889 ///
1890 /// assert_eq!(header.src_port, [0, 1]);
1891 /// assert_eq!(header.dst_port, [2, 3]);
1892 /// assert_eq!(header.length, [4, 5]);
1893 /// assert_eq!(header.checksum, [6, 7]);
1894 /// ```
1895 #[inline]
ref_from_prefix(bytes: &[u8]) -> Option<&Self> where Self: Sized,1896 fn ref_from_prefix(bytes: &[u8]) -> Option<&Self>
1897 where
1898 Self: Sized,
1899 {
1900 Ref::<&[u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_ref())
1901 }
1902
1903 /// Interprets the suffix of the given `bytes` as a `&Self` without copying.
1904 ///
1905 /// `ref_from_suffix` returns a reference to the last `size_of::<Self>()`
1906 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
1907 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
1908 ///
1909 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then, use
1910 /// [`Ref::into_ref`] to get a `&Self` with the same lifetime.
1911 ///
1912 /// # Examples
1913 ///
1914 /// ```
1915 /// use zerocopy::FromBytes;
1916 /// # use zerocopy_derive::*;
1917 ///
1918 /// #[derive(FromZeroes, FromBytes)]
1919 /// #[repr(C)]
1920 /// struct PacketTrailer {
1921 /// frame_check_sequence: [u8; 4],
1922 /// }
1923 ///
1924 /// // These are more bytes than are needed to encode a `PacketTrailer`.
1925 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
1926 ///
1927 /// let trailer = PacketTrailer::ref_from_suffix(bytes).unwrap();
1928 ///
1929 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
1930 /// ```
1931 #[inline]
ref_from_suffix(bytes: &[u8]) -> Option<&Self> where Self: Sized,1932 fn ref_from_suffix(bytes: &[u8]) -> Option<&Self>
1933 where
1934 Self: Sized,
1935 {
1936 Ref::<&[u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_ref())
1937 }
1938
1939 /// Interprets the given `bytes` as a `&mut Self` without copying.
1940 ///
1941 /// If `bytes.len() != size_of::<Self>()` or `bytes` is not aligned to
1942 /// `align_of::<Self>()`, this returns `None`.
1943 ///
1944 /// # Examples
1945 ///
1946 /// ```
1947 /// use zerocopy::FromBytes;
1948 /// # use zerocopy_derive::*;
1949 ///
1950 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1951 /// #[repr(C)]
1952 /// struct PacketHeader {
1953 /// src_port: [u8; 2],
1954 /// dst_port: [u8; 2],
1955 /// length: [u8; 2],
1956 /// checksum: [u8; 2],
1957 /// }
1958 ///
1959 /// // These bytes encode a `PacketHeader`.
1960 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
1961 ///
1962 /// let header = PacketHeader::mut_from(bytes).unwrap();
1963 ///
1964 /// assert_eq!(header.src_port, [0, 1]);
1965 /// assert_eq!(header.dst_port, [2, 3]);
1966 /// assert_eq!(header.length, [4, 5]);
1967 /// assert_eq!(header.checksum, [6, 7]);
1968 ///
1969 /// header.checksum = [0, 0];
1970 ///
1971 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0]);
1972 /// ```
1973 #[inline]
mut_from(bytes: &mut [u8]) -> Option<&mut Self> where Self: Sized + AsBytes,1974 fn mut_from(bytes: &mut [u8]) -> Option<&mut Self>
1975 where
1976 Self: Sized + AsBytes,
1977 {
1978 Ref::<&mut [u8], Self>::new(bytes).map(Ref::into_mut)
1979 }
1980
1981 /// Interprets the prefix of the given `bytes` as a `&mut Self` without
1982 /// copying.
1983 ///
1984 /// `mut_from_prefix` returns a reference to the first `size_of::<Self>()`
1985 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or `bytes` is not
1986 /// aligned to `align_of::<Self>()`, this returns `None`.
1987 ///
1988 /// To also access the prefix bytes, use [`Ref::new_from_prefix`]. Then, use
1989 /// [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
1990 ///
1991 /// # Examples
1992 ///
1993 /// ```
1994 /// use zerocopy::FromBytes;
1995 /// # use zerocopy_derive::*;
1996 ///
1997 /// #[derive(AsBytes, FromZeroes, FromBytes)]
1998 /// #[repr(C)]
1999 /// struct PacketHeader {
2000 /// src_port: [u8; 2],
2001 /// dst_port: [u8; 2],
2002 /// length: [u8; 2],
2003 /// checksum: [u8; 2],
2004 /// }
2005 ///
2006 /// // These are more bytes than are needed to encode a `PacketHeader`.
2007 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2008 ///
2009 /// let header = PacketHeader::mut_from_prefix(bytes).unwrap();
2010 ///
2011 /// assert_eq!(header.src_port, [0, 1]);
2012 /// assert_eq!(header.dst_port, [2, 3]);
2013 /// assert_eq!(header.length, [4, 5]);
2014 /// assert_eq!(header.checksum, [6, 7]);
2015 ///
2016 /// header.checksum = [0, 0];
2017 ///
2018 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 8, 9]);
2019 /// ```
2020 #[inline]
mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self> where Self: Sized + AsBytes,2021 fn mut_from_prefix(bytes: &mut [u8]) -> Option<&mut Self>
2022 where
2023 Self: Sized + AsBytes,
2024 {
2025 Ref::<&mut [u8], Self>::new_from_prefix(bytes).map(|(r, _)| r.into_mut())
2026 }
2027
2028 /// Interprets the suffix of the given `bytes` as a `&mut Self` without copying.
2029 ///
2030 /// `mut_from_suffix` returns a reference to the last `size_of::<Self>()`
2031 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()` or the suffix of
2032 /// `bytes` is not aligned to `align_of::<Self>()`, this returns `None`.
2033 ///
2034 /// To also access the suffix bytes, use [`Ref::new_from_suffix`]. Then,
2035 /// use [`Ref::into_mut`] to get a `&mut Self` with the same lifetime.
2036 ///
2037 /// # Examples
2038 ///
2039 /// ```
2040 /// use zerocopy::FromBytes;
2041 /// # use zerocopy_derive::*;
2042 ///
2043 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2044 /// #[repr(C)]
2045 /// struct PacketTrailer {
2046 /// frame_check_sequence: [u8; 4],
2047 /// }
2048 ///
2049 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2050 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2051 ///
2052 /// let trailer = PacketTrailer::mut_from_suffix(bytes).unwrap();
2053 ///
2054 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2055 ///
2056 /// trailer.frame_check_sequence = [0, 0, 0, 0];
2057 ///
2058 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2059 /// ```
2060 #[inline]
mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self> where Self: Sized + AsBytes,2061 fn mut_from_suffix(bytes: &mut [u8]) -> Option<&mut Self>
2062 where
2063 Self: Sized + AsBytes,
2064 {
2065 Ref::<&mut [u8], Self>::new_from_suffix(bytes).map(|(_, r)| r.into_mut())
2066 }
2067
2068 /// Interprets the given `bytes` as a `&[Self]` without copying.
2069 ///
2070 /// If `bytes.len() % size_of::<Self>() != 0` or `bytes` is not aligned to
2071 /// `align_of::<Self>()`, this returns `None`.
2072 ///
2073 /// If you need to convert a specific number of slice elements, see
2074 /// [`slice_from_prefix`](FromBytes::slice_from_prefix) or
2075 /// [`slice_from_suffix`](FromBytes::slice_from_suffix).
2076 ///
2077 /// # Panics
2078 ///
2079 /// If `Self` is a zero-sized type.
2080 ///
2081 /// # Examples
2082 ///
2083 /// ```
2084 /// use zerocopy::FromBytes;
2085 /// # use zerocopy_derive::*;
2086 ///
2087 /// # #[derive(Debug, PartialEq, Eq)]
2088 /// #[derive(FromZeroes, FromBytes)]
2089 /// #[repr(C)]
2090 /// struct Pixel {
2091 /// r: u8,
2092 /// g: u8,
2093 /// b: u8,
2094 /// a: u8,
2095 /// }
2096 ///
2097 /// // These bytes encode two `Pixel`s.
2098 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2099 ///
2100 /// let pixels = Pixel::slice_from(bytes).unwrap();
2101 ///
2102 /// assert_eq!(pixels, &[
2103 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2104 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2105 /// ]);
2106 /// ```
2107 #[inline]
slice_from(bytes: &[u8]) -> Option<&[Self]> where Self: Sized,2108 fn slice_from(bytes: &[u8]) -> Option<&[Self]>
2109 where
2110 Self: Sized,
2111 {
2112 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_slice())
2113 }
2114
2115 /// Interprets the prefix of the given `bytes` as a `&[Self]` with length
2116 /// equal to `count` without copying.
2117 ///
2118 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2119 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2120 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2121 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2122 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2123 /// If any of the length, alignment, or overflow checks fail, it returns
2124 /// `None`.
2125 ///
2126 /// # Panics
2127 ///
2128 /// If `T` is a zero-sized type.
2129 ///
2130 /// # Examples
2131 ///
2132 /// ```
2133 /// use zerocopy::FromBytes;
2134 /// # use zerocopy_derive::*;
2135 ///
2136 /// # #[derive(Debug, PartialEq, Eq)]
2137 /// #[derive(FromZeroes, FromBytes)]
2138 /// #[repr(C)]
2139 /// struct Pixel {
2140 /// r: u8,
2141 /// g: u8,
2142 /// b: u8,
2143 /// a: u8,
2144 /// }
2145 ///
2146 /// // These are more bytes than are needed to encode two `Pixel`s.
2147 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2148 ///
2149 /// let (pixels, rest) = Pixel::slice_from_prefix(bytes, 2).unwrap();
2150 ///
2151 /// assert_eq!(pixels, &[
2152 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2153 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2154 /// ]);
2155 ///
2156 /// assert_eq!(rest, &[8, 9]);
2157 /// ```
2158 #[inline]
slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])> where Self: Sized,2159 fn slice_from_prefix(bytes: &[u8], count: usize) -> Option<(&[Self], &[u8])>
2160 where
2161 Self: Sized,
2162 {
2163 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_slice(), b))
2164 }
2165
2166 /// Interprets the suffix of the given `bytes` as a `&[Self]` with length
2167 /// equal to `count` without copying.
2168 ///
2169 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2170 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2171 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2172 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2173 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2174 /// If any of the length, alignment, or overflow checks fail, it returns
2175 /// `None`.
2176 ///
2177 /// # Panics
2178 ///
2179 /// If `T` is a zero-sized type.
2180 ///
2181 /// # Examples
2182 ///
2183 /// ```
2184 /// use zerocopy::FromBytes;
2185 /// # use zerocopy_derive::*;
2186 ///
2187 /// # #[derive(Debug, PartialEq, Eq)]
2188 /// #[derive(FromZeroes, FromBytes)]
2189 /// #[repr(C)]
2190 /// struct Pixel {
2191 /// r: u8,
2192 /// g: u8,
2193 /// b: u8,
2194 /// a: u8,
2195 /// }
2196 ///
2197 /// // These are more bytes than are needed to encode two `Pixel`s.
2198 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2199 ///
2200 /// let (rest, pixels) = Pixel::slice_from_suffix(bytes, 2).unwrap();
2201 ///
2202 /// assert_eq!(rest, &[0, 1]);
2203 ///
2204 /// assert_eq!(pixels, &[
2205 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2206 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2207 /// ]);
2208 /// ```
2209 #[inline]
slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])> where Self: Sized,2210 fn slice_from_suffix(bytes: &[u8], count: usize) -> Option<(&[u8], &[Self])>
2211 where
2212 Self: Sized,
2213 {
2214 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_slice()))
2215 }
2216
2217 /// Interprets the given `bytes` as a `&mut [Self]` without copying.
2218 ///
2219 /// If `bytes.len() % size_of::<T>() != 0` or `bytes` is not aligned to
2220 /// `align_of::<T>()`, this returns `None`.
2221 ///
2222 /// If you need to convert a specific number of slice elements, see
2223 /// [`mut_slice_from_prefix`](FromBytes::mut_slice_from_prefix) or
2224 /// [`mut_slice_from_suffix`](FromBytes::mut_slice_from_suffix).
2225 ///
2226 /// # Panics
2227 ///
2228 /// If `T` is a zero-sized type.
2229 ///
2230 /// # Examples
2231 ///
2232 /// ```
2233 /// use zerocopy::FromBytes;
2234 /// # use zerocopy_derive::*;
2235 ///
2236 /// # #[derive(Debug, PartialEq, Eq)]
2237 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2238 /// #[repr(C)]
2239 /// struct Pixel {
2240 /// r: u8,
2241 /// g: u8,
2242 /// b: u8,
2243 /// a: u8,
2244 /// }
2245 ///
2246 /// // These bytes encode two `Pixel`s.
2247 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7][..];
2248 ///
2249 /// let pixels = Pixel::mut_slice_from(bytes).unwrap();
2250 ///
2251 /// assert_eq!(pixels, &[
2252 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2253 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2254 /// ]);
2255 ///
2256 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2257 ///
2258 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0]);
2259 /// ```
2260 #[inline]
mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]> where Self: Sized + AsBytes,2261 fn mut_slice_from(bytes: &mut [u8]) -> Option<&mut [Self]>
2262 where
2263 Self: Sized + AsBytes,
2264 {
2265 Ref::<_, [Self]>::new_slice(bytes).map(|r| r.into_mut_slice())
2266 }
2267
2268 /// Interprets the prefix of the given `bytes` as a `&mut [Self]` with length
2269 /// equal to `count` without copying.
2270 ///
2271 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2272 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2273 /// first `size_of::<T>() * count` bytes from `bytes` to construct a
2274 /// `&[Self]`, and returns the remaining bytes to the caller. It also
2275 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2276 /// If any of the length, alignment, or overflow checks fail, it returns
2277 /// `None`.
2278 ///
2279 /// # Panics
2280 ///
2281 /// If `T` is a zero-sized type.
2282 ///
2283 /// # Examples
2284 ///
2285 /// ```
2286 /// use zerocopy::FromBytes;
2287 /// # use zerocopy_derive::*;
2288 ///
2289 /// # #[derive(Debug, PartialEq, Eq)]
2290 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2291 /// #[repr(C)]
2292 /// struct Pixel {
2293 /// r: u8,
2294 /// g: u8,
2295 /// b: u8,
2296 /// a: u8,
2297 /// }
2298 ///
2299 /// // These are more bytes than are needed to encode two `Pixel`s.
2300 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2301 ///
2302 /// let (pixels, rest) = Pixel::mut_slice_from_prefix(bytes, 2).unwrap();
2303 ///
2304 /// assert_eq!(pixels, &[
2305 /// Pixel { r: 0, g: 1, b: 2, a: 3 },
2306 /// Pixel { r: 4, g: 5, b: 6, a: 7 },
2307 /// ]);
2308 ///
2309 /// assert_eq!(rest, &[8, 9]);
2310 ///
2311 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2312 ///
2313 /// assert_eq!(bytes, [0, 1, 2, 3, 0, 0, 0, 0, 8, 9]);
2314 /// ```
2315 #[inline]
mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])> where Self: Sized + AsBytes,2316 fn mut_slice_from_prefix(bytes: &mut [u8], count: usize) -> Option<(&mut [Self], &mut [u8])>
2317 where
2318 Self: Sized + AsBytes,
2319 {
2320 Ref::<_, [Self]>::new_slice_from_prefix(bytes, count).map(|(r, b)| (r.into_mut_slice(), b))
2321 }
2322
2323 /// Interprets the suffix of the given `bytes` as a `&mut [Self]` with length
2324 /// equal to `count` without copying.
2325 ///
2326 /// This method verifies that `bytes.len() >= size_of::<T>() * count`
2327 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the
2328 /// last `size_of::<T>() * count` bytes from `bytes` to construct a
2329 /// `&[Self]`, and returns the preceding bytes to the caller. It also
2330 /// ensures that `sizeof::<T>() * count` does not overflow a `usize`.
2331 /// If any of the length, alignment, or overflow checks fail, it returns
2332 /// `None`.
2333 ///
2334 /// # Panics
2335 ///
2336 /// If `T` is a zero-sized type.
2337 ///
2338 /// # Examples
2339 ///
2340 /// ```
2341 /// use zerocopy::FromBytes;
2342 /// # use zerocopy_derive::*;
2343 ///
2344 /// # #[derive(Debug, PartialEq, Eq)]
2345 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2346 /// #[repr(C)]
2347 /// struct Pixel {
2348 /// r: u8,
2349 /// g: u8,
2350 /// b: u8,
2351 /// a: u8,
2352 /// }
2353 ///
2354 /// // These are more bytes than are needed to encode two `Pixel`s.
2355 /// let bytes = &mut [0, 1, 2, 3, 4, 5, 6, 7, 8, 9][..];
2356 ///
2357 /// let (rest, pixels) = Pixel::mut_slice_from_suffix(bytes, 2).unwrap();
2358 ///
2359 /// assert_eq!(rest, &[0, 1]);
2360 ///
2361 /// assert_eq!(pixels, &[
2362 /// Pixel { r: 2, g: 3, b: 4, a: 5 },
2363 /// Pixel { r: 6, g: 7, b: 8, a: 9 },
2364 /// ]);
2365 ///
2366 /// pixels[1] = Pixel { r: 0, g: 0, b: 0, a: 0 };
2367 ///
2368 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 0, 0, 0, 0]);
2369 /// ```
2370 #[inline]
mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])> where Self: Sized + AsBytes,2371 fn mut_slice_from_suffix(bytes: &mut [u8], count: usize) -> Option<(&mut [u8], &mut [Self])>
2372 where
2373 Self: Sized + AsBytes,
2374 {
2375 Ref::<_, [Self]>::new_slice_from_suffix(bytes, count).map(|(b, r)| (b, r.into_mut_slice()))
2376 }
2377
2378 /// Reads a copy of `Self` from `bytes`.
2379 ///
2380 /// If `bytes.len() != size_of::<Self>()`, `read_from` returns `None`.
2381 ///
2382 /// # Examples
2383 ///
2384 /// ```
2385 /// use zerocopy::FromBytes;
2386 /// # use zerocopy_derive::*;
2387 ///
2388 /// #[derive(FromZeroes, FromBytes)]
2389 /// #[repr(C)]
2390 /// struct PacketHeader {
2391 /// src_port: [u8; 2],
2392 /// dst_port: [u8; 2],
2393 /// length: [u8; 2],
2394 /// checksum: [u8; 2],
2395 /// }
2396 ///
2397 /// // These bytes encode a `PacketHeader`.
2398 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7].as_slice();
2399 ///
2400 /// let header = PacketHeader::read_from(bytes).unwrap();
2401 ///
2402 /// assert_eq!(header.src_port, [0, 1]);
2403 /// assert_eq!(header.dst_port, [2, 3]);
2404 /// assert_eq!(header.length, [4, 5]);
2405 /// assert_eq!(header.checksum, [6, 7]);
2406 /// ```
2407 #[inline]
read_from(bytes: &[u8]) -> Option<Self> where Self: Sized,2408 fn read_from(bytes: &[u8]) -> Option<Self>
2409 where
2410 Self: Sized,
2411 {
2412 Ref::<_, Unalign<Self>>::new_unaligned(bytes).map(|r| r.read().into_inner())
2413 }
2414
2415 /// Reads a copy of `Self` from the prefix of `bytes`.
2416 ///
2417 /// `read_from_prefix` reads a `Self` from the first `size_of::<Self>()`
2418 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2419 /// `None`.
2420 ///
2421 /// # Examples
2422 ///
2423 /// ```
2424 /// use zerocopy::FromBytes;
2425 /// # use zerocopy_derive::*;
2426 ///
2427 /// #[derive(FromZeroes, FromBytes)]
2428 /// #[repr(C)]
2429 /// struct PacketHeader {
2430 /// src_port: [u8; 2],
2431 /// dst_port: [u8; 2],
2432 /// length: [u8; 2],
2433 /// checksum: [u8; 2],
2434 /// }
2435 ///
2436 /// // These are more bytes than are needed to encode a `PacketHeader`.
2437 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2438 ///
2439 /// let header = PacketHeader::read_from_prefix(bytes).unwrap();
2440 ///
2441 /// assert_eq!(header.src_port, [0, 1]);
2442 /// assert_eq!(header.dst_port, [2, 3]);
2443 /// assert_eq!(header.length, [4, 5]);
2444 /// assert_eq!(header.checksum, [6, 7]);
2445 /// ```
2446 #[inline]
read_from_prefix(bytes: &[u8]) -> Option<Self> where Self: Sized,2447 fn read_from_prefix(bytes: &[u8]) -> Option<Self>
2448 where
2449 Self: Sized,
2450 {
2451 Ref::<_, Unalign<Self>>::new_unaligned_from_prefix(bytes)
2452 .map(|(r, _)| r.read().into_inner())
2453 }
2454
2455 /// Reads a copy of `Self` from the suffix of `bytes`.
2456 ///
2457 /// `read_from_suffix` reads a `Self` from the last `size_of::<Self>()`
2458 /// bytes of `bytes`. If `bytes.len() < size_of::<Self>()`, it returns
2459 /// `None`.
2460 ///
2461 /// # Examples
2462 ///
2463 /// ```
2464 /// use zerocopy::FromBytes;
2465 /// # use zerocopy_derive::*;
2466 ///
2467 /// #[derive(FromZeroes, FromBytes)]
2468 /// #[repr(C)]
2469 /// struct PacketTrailer {
2470 /// frame_check_sequence: [u8; 4],
2471 /// }
2472 ///
2473 /// // These are more bytes than are needed to encode a `PacketTrailer`.
2474 /// let bytes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9].as_slice();
2475 ///
2476 /// let trailer = PacketTrailer::read_from_suffix(bytes).unwrap();
2477 ///
2478 /// assert_eq!(trailer.frame_check_sequence, [6, 7, 8, 9]);
2479 /// ```
2480 #[inline]
read_from_suffix(bytes: &[u8]) -> Option<Self> where Self: Sized,2481 fn read_from_suffix(bytes: &[u8]) -> Option<Self>
2482 where
2483 Self: Sized,
2484 {
2485 Ref::<_, Unalign<Self>>::new_unaligned_from_suffix(bytes)
2486 .map(|(_, r)| r.read().into_inner())
2487 }
2488 }
2489
2490 /// Analyzes whether a type is [`AsBytes`].
2491 ///
2492 /// This derive analyzes, at compile time, whether the annotated type satisfies
2493 /// the [safety conditions] of `AsBytes` and implements `AsBytes` if it is
2494 /// sound to do so. This derive can be applied to structs, enums, and unions;
2495 /// e.g.:
2496 ///
2497 /// ```
2498 /// # use zerocopy_derive::{AsBytes};
2499 /// #[derive(AsBytes)]
2500 /// #[repr(C)]
2501 /// struct MyStruct {
2502 /// # /*
2503 /// ...
2504 /// # */
2505 /// }
2506 ///
2507 /// #[derive(AsBytes)]
2508 /// #[repr(u8)]
2509 /// enum MyEnum {
2510 /// # Variant,
2511 /// # /*
2512 /// ...
2513 /// # */
2514 /// }
2515 ///
2516 /// #[derive(AsBytes)]
2517 /// #[repr(C)]
2518 /// union MyUnion {
2519 /// # variant: u8,
2520 /// # /*
2521 /// ...
2522 /// # */
2523 /// }
2524 /// ```
2525 ///
2526 /// [safety conditions]: trait@AsBytes#safety
2527 ///
2528 /// # Error Messages
2529 ///
2530 /// Due to the way that the custom derive for `AsBytes` is implemented, you may
2531 /// get an error like this:
2532 ///
2533 /// ```text
2534 /// error[E0277]: the trait bound `HasPadding<Foo, true>: ShouldBe<false>` is not satisfied
2535 /// --> lib.rs:23:10
2536 /// |
2537 /// 1 | #[derive(AsBytes)]
2538 /// | ^^^^^^^ the trait `ShouldBe<false>` is not implemented for `HasPadding<Foo, true>`
2539 /// |
2540 /// = help: the trait `ShouldBe<VALUE>` is implemented for `HasPadding<T, VALUE>`
2541 /// ```
2542 ///
2543 /// This error indicates that the type being annotated has padding bytes, which
2544 /// is illegal for `AsBytes` types. Consider reducing the alignment of some
2545 /// fields by using types in the [`byteorder`] module, adding explicit struct
2546 /// fields where those padding bytes would be, or using `#[repr(packed)]`. See
2547 /// the Rust Reference's page on [type layout] for more information
2548 /// about type layout and padding.
2549 ///
2550 /// [type layout]: https://doc.rust-lang.org/reference/type-layout.html
2551 ///
2552 /// # Analysis
2553 ///
2554 /// *This section describes, roughly, the analysis performed by this derive to
2555 /// determine whether it is sound to implement `AsBytes` for a given type.
2556 /// Unless you are modifying the implementation of this derive, or attempting to
2557 /// manually implement `AsBytes` for a type yourself, you don't need to read
2558 /// this section.*
2559 ///
2560 /// If a type has the following properties, then this derive can implement
2561 /// `AsBytes` for that type:
2562 ///
2563 /// - If the type is a struct:
2564 /// - It must have a defined representation (`repr(C)`, `repr(transparent)`,
2565 /// or `repr(packed)`).
2566 /// - All of its fields must be `AsBytes`.
2567 /// - Its layout must have no padding. This is always true for
2568 /// `repr(transparent)` and `repr(packed)`. For `repr(C)`, see the layout
2569 /// algorithm described in the [Rust Reference].
2570 /// - If the type is an enum:
2571 /// - It must be a C-like enum (meaning that all variants have no fields).
2572 /// - It must have a defined representation (`repr`s `C`, `u8`, `u16`, `u32`,
2573 /// `u64`, `usize`, `i8`, `i16`, `i32`, `i64`, or `isize`).
2574 /// - The type must not contain any [`UnsafeCell`]s (this is required in order
2575 /// for it to be sound to construct a `&[u8]` and a `&T` to the same region of
2576 /// memory). The type may contain references or pointers to `UnsafeCell`s so
2577 /// long as those values can themselves be initialized from zeroes (`AsBytes`
2578 /// is not currently implemented for, e.g., `Option<&UnsafeCell<_>>`, but it
2579 /// could be one day).
2580 ///
2581 /// [`UnsafeCell`]: core::cell::UnsafeCell
2582 ///
2583 /// This analysis is subject to change. Unsafe code may *only* rely on the
2584 /// documented [safety conditions] of `FromBytes`, and must *not* rely on the
2585 /// implementation details of this derive.
2586 ///
2587 /// [Rust Reference]: https://doc.rust-lang.org/reference/type-layout.html
2588 #[cfg(any(feature = "derive", test))]
2589 #[cfg_attr(doc_cfg, doc(cfg(feature = "derive")))]
2590 pub use zerocopy_derive::AsBytes;
2591
2592 /// Types that can be viewed as an immutable slice of initialized bytes.
2593 ///
2594 /// Any `AsBytes` type can be viewed as a slice of initialized bytes of the same
2595 /// size. This is useful for efficiently serializing structured data as raw
2596 /// bytes.
2597 ///
2598 /// # Implementation
2599 ///
2600 /// **Do not implement this trait yourself!** Instead, use
2601 /// [`#[derive(AsBytes)]`][derive] (requires the `derive` Cargo feature); e.g.:
2602 ///
2603 /// ```
2604 /// # use zerocopy_derive::AsBytes;
2605 /// #[derive(AsBytes)]
2606 /// #[repr(C)]
2607 /// struct MyStruct {
2608 /// # /*
2609 /// ...
2610 /// # */
2611 /// }
2612 ///
2613 /// #[derive(AsBytes)]
2614 /// #[repr(u8)]
2615 /// enum MyEnum {
2616 /// # Variant0,
2617 /// # /*
2618 /// ...
2619 /// # */
2620 /// }
2621 ///
2622 /// #[derive(AsBytes)]
2623 /// #[repr(C)]
2624 /// union MyUnion {
2625 /// # variant: u8,
2626 /// # /*
2627 /// ...
2628 /// # */
2629 /// }
2630 /// ```
2631 ///
2632 /// This derive performs a sophisticated, compile-time safety analysis to
2633 /// determine whether a type is `AsBytes`. See the [derive
2634 /// documentation][derive] for guidance on how to interpret error messages
2635 /// produced by the derive's analysis.
2636 ///
2637 /// # Safety
2638 ///
2639 /// *This section describes what is required in order for `T: AsBytes`, and
2640 /// what unsafe code may assume of such types. If you don't plan on implementing
2641 /// `AsBytes` manually, and you don't plan on writing unsafe code that
2642 /// operates on `AsBytes` types, then you don't need to read this section.*
2643 ///
2644 /// If `T: AsBytes`, then unsafe code may assume that:
2645 /// - It is sound to treat any `t: T` as an immutable `[u8]` of length
2646 /// `size_of_val(t)`.
2647 /// - Given `t: &T`, it is sound to construct a `b: &[u8]` where `b.len() ==
2648 /// size_of_val(t)` at the same address as `t`, and it is sound for both `b`
2649 /// and `t` to be live at the same time.
2650 ///
2651 /// If a type is marked as `AsBytes` which violates this contract, it may cause
2652 /// undefined behavior.
2653 ///
2654 /// `#[derive(AsBytes)]` only permits [types which satisfy these
2655 /// requirements][derive-analysis].
2656 ///
2657 #[cfg_attr(
2658 feature = "derive",
2659 doc = "[derive]: zerocopy_derive::AsBytes",
2660 doc = "[derive-analysis]: zerocopy_derive::AsBytes#analysis"
2661 )]
2662 #[cfg_attr(
2663 not(feature = "derive"),
2664 doc = concat!("[derive]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.AsBytes.html"),
2665 doc = concat!("[derive-analysis]: https://docs.rs/zerocopy/", "0.7.34", "/zerocopy/derive.AsBytes.html#analysis"),
2666 )]
2667 pub unsafe trait AsBytes {
2668 // The `Self: Sized` bound makes it so that this function doesn't prevent
2669 // `AsBytes` from being object safe. Note that other `AsBytes` methods
2670 // prevent object safety, but those provide a benefit in exchange for object
2671 // safety. If at some point we remove those methods, change their type
2672 // signatures, or move them out of this trait so that `AsBytes` is object
2673 // safe again, it's important that this function not prevent object safety.
2674 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized2675 fn only_derive_is_allowed_to_implement_this_trait()
2676 where
2677 Self: Sized;
2678
2679 /// Gets the bytes of this value.
2680 ///
2681 /// `as_bytes` provides access to the bytes of this value as an immutable
2682 /// byte slice.
2683 ///
2684 /// # Examples
2685 ///
2686 /// ```
2687 /// use zerocopy::AsBytes;
2688 /// # use zerocopy_derive::*;
2689 ///
2690 /// #[derive(AsBytes)]
2691 /// #[repr(C)]
2692 /// struct PacketHeader {
2693 /// src_port: [u8; 2],
2694 /// dst_port: [u8; 2],
2695 /// length: [u8; 2],
2696 /// checksum: [u8; 2],
2697 /// }
2698 ///
2699 /// let header = PacketHeader {
2700 /// src_port: [0, 1],
2701 /// dst_port: [2, 3],
2702 /// length: [4, 5],
2703 /// checksum: [6, 7],
2704 /// };
2705 ///
2706 /// let bytes = header.as_bytes();
2707 ///
2708 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2709 /// ```
2710 #[inline(always)]
as_bytes(&self) -> &[u8]2711 fn as_bytes(&self) -> &[u8] {
2712 // Note that this method does not have a `Self: Sized` bound;
2713 // `size_of_val` works for unsized values too.
2714 let len = mem::size_of_val(self);
2715 let slf: *const Self = self;
2716
2717 // SAFETY:
2718 // - `slf.cast::<u8>()` is valid for reads for `len *
2719 // mem::size_of::<u8>()` many bytes because...
2720 // - `slf` is the same pointer as `self`, and `self` is a reference
2721 // which points to an object whose size is `len`. Thus...
2722 // - The entire region of `len` bytes starting at `slf` is contained
2723 // within a single allocation.
2724 // - `slf` is non-null.
2725 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2726 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2727 // initialized.
2728 // - Since `slf` is derived from `self`, and `self` is an immutable
2729 // reference, the only other references to this memory region that
2730 // could exist are other immutable references, and those don't allow
2731 // mutation. `AsBytes` prohibits types which contain `UnsafeCell`s,
2732 // which are the only types for which this rule wouldn't be sufficient.
2733 // - The total size of the resulting slice is no larger than
2734 // `isize::MAX` because no allocation produced by safe code can be
2735 // larger than `isize::MAX`.
2736 //
2737 // TODO(#429): Add references to docs and quotes.
2738 unsafe { slice::from_raw_parts(slf.cast::<u8>(), len) }
2739 }
2740
2741 /// Gets the bytes of this value mutably.
2742 ///
2743 /// `as_bytes_mut` provides access to the bytes of this value as a mutable
2744 /// byte slice.
2745 ///
2746 /// # Examples
2747 ///
2748 /// ```
2749 /// use zerocopy::AsBytes;
2750 /// # use zerocopy_derive::*;
2751 ///
2752 /// # #[derive(Eq, PartialEq, Debug)]
2753 /// #[derive(AsBytes, FromZeroes, FromBytes)]
2754 /// #[repr(C)]
2755 /// struct PacketHeader {
2756 /// src_port: [u8; 2],
2757 /// dst_port: [u8; 2],
2758 /// length: [u8; 2],
2759 /// checksum: [u8; 2],
2760 /// }
2761 ///
2762 /// let mut header = PacketHeader {
2763 /// src_port: [0, 1],
2764 /// dst_port: [2, 3],
2765 /// length: [4, 5],
2766 /// checksum: [6, 7],
2767 /// };
2768 ///
2769 /// let bytes = header.as_bytes_mut();
2770 ///
2771 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2772 ///
2773 /// bytes.reverse();
2774 ///
2775 /// assert_eq!(header, PacketHeader {
2776 /// src_port: [7, 6],
2777 /// dst_port: [5, 4],
2778 /// length: [3, 2],
2779 /// checksum: [1, 0],
2780 /// });
2781 /// ```
2782 #[inline(always)]
as_bytes_mut(&mut self) -> &mut [u8] where Self: FromBytes,2783 fn as_bytes_mut(&mut self) -> &mut [u8]
2784 where
2785 Self: FromBytes,
2786 {
2787 // Note that this method does not have a `Self: Sized` bound;
2788 // `size_of_val` works for unsized values too.
2789 let len = mem::size_of_val(self);
2790 let slf: *mut Self = self;
2791
2792 // SAFETY:
2793 // - `slf.cast::<u8>()` is valid for reads and writes for `len *
2794 // mem::size_of::<u8>()` many bytes because...
2795 // - `slf` is the same pointer as `self`, and `self` is a reference
2796 // which points to an object whose size is `len`. Thus...
2797 // - The entire region of `len` bytes starting at `slf` is contained
2798 // within a single allocation.
2799 // - `slf` is non-null.
2800 // - `slf` is trivially aligned to `align_of::<u8>() == 1`.
2801 // - `Self: AsBytes` ensures that all of the bytes of `slf` are
2802 // initialized.
2803 // - `Self: FromBytes` ensures that no write to this memory region
2804 // could result in it containing an invalid `Self`.
2805 // - Since `slf` is derived from `self`, and `self` is a mutable
2806 // reference, no other references to this memory region can exist.
2807 // - The total size of the resulting slice is no larger than
2808 // `isize::MAX` because no allocation produced by safe code can be
2809 // larger than `isize::MAX`.
2810 //
2811 // TODO(#429): Add references to docs and quotes.
2812 unsafe { slice::from_raw_parts_mut(slf.cast::<u8>(), len) }
2813 }
2814
2815 /// Writes a copy of `self` to `bytes`.
2816 ///
2817 /// If `bytes.len() != size_of_val(self)`, `write_to` returns `None`.
2818 ///
2819 /// # Examples
2820 ///
2821 /// ```
2822 /// use zerocopy::AsBytes;
2823 /// # use zerocopy_derive::*;
2824 ///
2825 /// #[derive(AsBytes)]
2826 /// #[repr(C)]
2827 /// struct PacketHeader {
2828 /// src_port: [u8; 2],
2829 /// dst_port: [u8; 2],
2830 /// length: [u8; 2],
2831 /// checksum: [u8; 2],
2832 /// }
2833 ///
2834 /// let header = PacketHeader {
2835 /// src_port: [0, 1],
2836 /// dst_port: [2, 3],
2837 /// length: [4, 5],
2838 /// checksum: [6, 7],
2839 /// };
2840 ///
2841 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0];
2842 ///
2843 /// header.write_to(&mut bytes[..]);
2844 ///
2845 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7]);
2846 /// ```
2847 ///
2848 /// If too many or too few target bytes are provided, `write_to` returns
2849 /// `None` and leaves the target bytes unmodified:
2850 ///
2851 /// ```
2852 /// # use zerocopy::AsBytes;
2853 /// # let header = u128::MAX;
2854 /// let mut excessive_bytes = &mut [0u8; 128][..];
2855 ///
2856 /// let write_result = header.write_to(excessive_bytes);
2857 ///
2858 /// assert!(write_result.is_none());
2859 /// assert_eq!(excessive_bytes, [0u8; 128]);
2860 /// ```
2861 #[inline]
write_to(&self, bytes: &mut [u8]) -> Option<()>2862 fn write_to(&self, bytes: &mut [u8]) -> Option<()> {
2863 if bytes.len() != mem::size_of_val(self) {
2864 return None;
2865 }
2866
2867 bytes.copy_from_slice(self.as_bytes());
2868 Some(())
2869 }
2870
2871 /// Writes a copy of `self` to the prefix of `bytes`.
2872 ///
2873 /// `write_to_prefix` writes `self` to the first `size_of_val(self)` bytes
2874 /// of `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2875 ///
2876 /// # Examples
2877 ///
2878 /// ```
2879 /// use zerocopy::AsBytes;
2880 /// # use zerocopy_derive::*;
2881 ///
2882 /// #[derive(AsBytes)]
2883 /// #[repr(C)]
2884 /// struct PacketHeader {
2885 /// src_port: [u8; 2],
2886 /// dst_port: [u8; 2],
2887 /// length: [u8; 2],
2888 /// checksum: [u8; 2],
2889 /// }
2890 ///
2891 /// let header = PacketHeader {
2892 /// src_port: [0, 1],
2893 /// dst_port: [2, 3],
2894 /// length: [4, 5],
2895 /// checksum: [6, 7],
2896 /// };
2897 ///
2898 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2899 ///
2900 /// header.write_to_prefix(&mut bytes[..]);
2901 ///
2902 /// assert_eq!(bytes, [0, 1, 2, 3, 4, 5, 6, 7, 0, 0]);
2903 /// ```
2904 ///
2905 /// If insufficient target bytes are provided, `write_to_prefix` returns
2906 /// `None` and leaves the target bytes unmodified:
2907 ///
2908 /// ```
2909 /// # use zerocopy::AsBytes;
2910 /// # let header = u128::MAX;
2911 /// let mut insufficent_bytes = &mut [0, 0][..];
2912 ///
2913 /// let write_result = header.write_to_suffix(insufficent_bytes);
2914 ///
2915 /// assert!(write_result.is_none());
2916 /// assert_eq!(insufficent_bytes, [0, 0]);
2917 /// ```
2918 #[inline]
write_to_prefix(&self, bytes: &mut [u8]) -> Option<()>2919 fn write_to_prefix(&self, bytes: &mut [u8]) -> Option<()> {
2920 let size = mem::size_of_val(self);
2921 bytes.get_mut(..size)?.copy_from_slice(self.as_bytes());
2922 Some(())
2923 }
2924
2925 /// Writes a copy of `self` to the suffix of `bytes`.
2926 ///
2927 /// `write_to_suffix` writes `self` to the last `size_of_val(self)` bytes of
2928 /// `bytes`. If `bytes.len() < size_of_val(self)`, it returns `None`.
2929 ///
2930 /// # Examples
2931 ///
2932 /// ```
2933 /// use zerocopy::AsBytes;
2934 /// # use zerocopy_derive::*;
2935 ///
2936 /// #[derive(AsBytes)]
2937 /// #[repr(C)]
2938 /// struct PacketHeader {
2939 /// src_port: [u8; 2],
2940 /// dst_port: [u8; 2],
2941 /// length: [u8; 2],
2942 /// checksum: [u8; 2],
2943 /// }
2944 ///
2945 /// let header = PacketHeader {
2946 /// src_port: [0, 1],
2947 /// dst_port: [2, 3],
2948 /// length: [4, 5],
2949 /// checksum: [6, 7],
2950 /// };
2951 ///
2952 /// let mut bytes = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0];
2953 ///
2954 /// header.write_to_suffix(&mut bytes[..]);
2955 ///
2956 /// assert_eq!(bytes, [0, 0, 0, 1, 2, 3, 4, 5, 6, 7]);
2957 ///
2958 /// let mut insufficent_bytes = &mut [0, 0][..];
2959 ///
2960 /// let write_result = header.write_to_suffix(insufficent_bytes);
2961 ///
2962 /// assert!(write_result.is_none());
2963 /// assert_eq!(insufficent_bytes, [0, 0]);
2964 /// ```
2965 ///
2966 /// If insufficient target bytes are provided, `write_to_suffix` returns
2967 /// `None` and leaves the target bytes unmodified:
2968 ///
2969 /// ```
2970 /// # use zerocopy::AsBytes;
2971 /// # let header = u128::MAX;
2972 /// let mut insufficent_bytes = &mut [0, 0][..];
2973 ///
2974 /// let write_result = header.write_to_suffix(insufficent_bytes);
2975 ///
2976 /// assert!(write_result.is_none());
2977 /// assert_eq!(insufficent_bytes, [0, 0]);
2978 /// ```
2979 #[inline]
write_to_suffix(&self, bytes: &mut [u8]) -> Option<()>2980 fn write_to_suffix(&self, bytes: &mut [u8]) -> Option<()> {
2981 let start = bytes.len().checked_sub(mem::size_of_val(self))?;
2982 bytes
2983 .get_mut(start..)
2984 .expect("`start` should be in-bounds of `bytes`")
2985 .copy_from_slice(self.as_bytes());
2986 Some(())
2987 }
2988 }
2989
2990 /// Types with no alignment requirement.
2991 ///
2992 /// WARNING: Do not implement this trait yourself! Instead, use
2993 /// `#[derive(Unaligned)]` (requires the `derive` Cargo feature).
2994 ///
2995 /// If `T: Unaligned`, then `align_of::<T>() == 1`.
2996 ///
2997 /// # Safety
2998 ///
2999 /// *This section describes what is required in order for `T: Unaligned`, and
3000 /// what unsafe code may assume of such types. `#[derive(Unaligned)]` only
3001 /// permits types which satisfy these requirements. If you don't plan on
3002 /// implementing `Unaligned` manually, and you don't plan on writing unsafe code
3003 /// that operates on `Unaligned` types, then you don't need to read this
3004 /// section.*
3005 ///
3006 /// If `T: Unaligned`, then unsafe code may assume that it is sound to produce a
3007 /// reference to `T` at any memory location regardless of alignment. If a type
3008 /// is marked as `Unaligned` which violates this contract, it may cause
3009 /// undefined behavior.
3010 pub unsafe trait Unaligned {
3011 // The `Self: Sized` bound makes it so that `Unaligned` is still object
3012 // safe.
3013 #[doc(hidden)]
only_derive_is_allowed_to_implement_this_trait() where Self: Sized3014 fn only_derive_is_allowed_to_implement_this_trait()
3015 where
3016 Self: Sized;
3017 }
3018
3019 safety_comment! {
3020 /// SAFETY:
3021 /// Per the reference [1], "the unit tuple (`()`) ... is guaranteed as a
3022 /// zero-sized type to have a size of 0 and an alignment of 1."
3023 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3024 /// is only one possible sequence of 0 bytes, and `()` is inhabited.
3025 /// - `AsBytes`: Since `()` has size 0, it contains no padding bytes.
3026 /// - `Unaligned`: `()` has alignment 1.
3027 ///
3028 /// [1] https://doc.rust-lang.org/reference/type-layout.html#tuple-layout
3029 unsafe_impl!((): TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3030 assert_unaligned!(());
3031 }
3032
3033 safety_comment! {
3034 /// SAFETY:
3035 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: all bit
3036 /// patterns are valid for numeric types [1]
3037 /// - `AsBytes`: numeric types have no padding bytes [1]
3038 /// - `Unaligned` (`u8` and `i8` only): The reference [2] specifies the size
3039 /// of `u8` and `i8` as 1 byte. We also know that:
3040 /// - Alignment is >= 1 [3]
3041 /// - Size is an integer multiple of alignment [4]
3042 /// - The only value >= 1 for which 1 is an integer multiple is 1
3043 /// Therefore, the only possible alignment for `u8` and `i8` is 1.
3044 ///
3045 /// [1] Per https://doc.rust-lang.org/beta/reference/types/numeric.html#bit-validity:
3046 ///
3047 /// For every numeric type, `T`, the bit validity of `T` is equivalent to
3048 /// the bit validity of `[u8; size_of::<T>()]`. An uninitialized byte is
3049 /// not a valid `u8`.
3050 ///
3051 /// TODO(https://github.com/rust-lang/reference/pull/1392): Once this text
3052 /// is available on the Stable docs, cite those instead.
3053 ///
3054 /// [2] https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout
3055 ///
3056 /// [3] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3057 ///
3058 /// Alignment is measured in bytes, and must be at least 1.
3059 ///
3060 /// [4] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3061 ///
3062 /// The size of a value is always a multiple of its alignment.
3063 ///
3064 /// TODO(#278): Once we've updated the trait docs to refer to `u8`s rather
3065 /// than bits or bytes, update this comment, especially the reference to
3066 /// [1].
3067 unsafe_impl!(u8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3068 unsafe_impl!(i8: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3069 assert_unaligned!(u8, i8);
3070 unsafe_impl!(u16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3071 unsafe_impl!(i16: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3072 unsafe_impl!(u32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3073 unsafe_impl!(i32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3074 unsafe_impl!(u64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3075 unsafe_impl!(i64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3076 unsafe_impl!(u128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3077 unsafe_impl!(i128: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3078 unsafe_impl!(usize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3079 unsafe_impl!(isize: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3080 unsafe_impl!(f32: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3081 unsafe_impl!(f64: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3082 }
3083
3084 safety_comment! {
3085 /// SAFETY:
3086 /// - `FromZeroes`: Valid since "[t]he value false has the bit pattern
3087 /// 0x00" [1].
3088 /// - `AsBytes`: Since "the boolean type has a size and alignment of 1 each"
3089 /// and "The value false has the bit pattern 0x00 and the value true has
3090 /// the bit pattern 0x01" [1]. Thus, the only byte of the bool is always
3091 /// initialized.
3092 /// - `Unaligned`: Per the reference [1], "[a]n object with the boolean type
3093 /// has a size and alignment of 1 each."
3094 ///
3095 /// [1] https://doc.rust-lang.org/reference/types/boolean.html
3096 unsafe_impl!(bool: FromZeroes, AsBytes, Unaligned);
3097 assert_unaligned!(bool);
3098 /// SAFETY:
3099 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3100 /// closure:
3101 /// - Given `t: *mut bool` and `let r = *mut u8`, `r` refers to an object
3102 /// of the same size as that referred to by `t`. This is true because
3103 /// `bool` and `u8` have the same size (1 byte) [1].
3104 /// - Since the closure takes a `&u8` argument, given a `Ptr<'a, bool>`
3105 /// which satisfies the preconditions of
3106 /// `TryFromBytes::<bool>::is_bit_valid`, it must be guaranteed that the
3107 /// memory referenced by that `Ptr` always contains a valid `u8`. Since
3108 /// `bool`'s single byte is always initialized, `is_bit_valid`'s
3109 /// precondition requires that the same is true of its argument. Since
3110 /// `u8`'s only bit validity invariant is that its single byte must be
3111 /// initialized, this memory is guaranteed to contain a valid `u8`.
3112 /// - The alignment of `bool` is equal to the alignment of `u8`. [1] [2]
3113 /// - The impl must only return `true` for its argument if the original
3114 /// `Ptr<bool>` refers to a valid `bool`. We only return true if the
3115 /// `u8` value is 0 or 1, and both of these are valid values for `bool`.
3116 /// [3]
3117 ///
3118 /// [1] Per https://doc.rust-lang.org/reference/type-layout.html#primitive-data-layout:
3119 ///
3120 /// The size of most primitives is given in this table.
3121 ///
3122 /// | Type | `size_of::<Type>() ` |
3123 /// |-----------|----------------------|
3124 /// | `bool` | 1 |
3125 /// | `u8`/`i8` | 1 |
3126 ///
3127 /// [2] Per https://doc.rust-lang.org/reference/type-layout.html#size-and-alignment:
3128 ///
3129 /// The size of a value is always a multiple of its alignment.
3130 ///
3131 /// [3] Per https://doc.rust-lang.org/reference/types/boolean.html:
3132 ///
3133 /// The value false has the bit pattern 0x00 and the value true has the
3134 /// bit pattern 0x01.
3135 unsafe_impl!(bool: TryFromBytes; |byte: &u8| *byte < 2);
3136 }
3137 safety_comment! {
3138 /// SAFETY:
3139 /// - `FromZeroes`: Per reference [1], "[a] value of type char is a Unicode
3140 /// scalar value (i.e. a code point that is not a surrogate), represented
3141 /// as a 32-bit unsigned word in the 0x0000 to 0xD7FF or 0xE000 to
3142 /// 0x10FFFF range" which contains 0x0000.
3143 /// - `AsBytes`: `char` is per reference [1] "represented as a 32-bit
3144 /// unsigned word" (`u32`) which is `AsBytes`. Note that unlike `u32`, not
3145 /// all bit patterns are valid for `char`.
3146 ///
3147 /// [1] https://doc.rust-lang.org/reference/types/textual.html
3148 unsafe_impl!(char: FromZeroes, AsBytes);
3149 /// SAFETY:
3150 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3151 /// closure:
3152 /// - Given `t: *mut char` and `let r = *mut u32`, `r` refers to an object
3153 /// of the same size as that referred to by `t`. This is true because
3154 /// `char` and `u32` have the same size [1].
3155 /// - Since the closure takes a `&u32` argument, given a `Ptr<'a, char>`
3156 /// which satisfies the preconditions of
3157 /// `TryFromBytes::<char>::is_bit_valid`, it must be guaranteed that the
3158 /// memory referenced by that `Ptr` always contains a valid `u32`. Since
3159 /// `char`'s bytes are always initialized [2], `is_bit_valid`'s
3160 /// precondition requires that the same is true of its argument. Since
3161 /// `u32`'s only bit validity invariant is that its bytes must be
3162 /// initialized, this memory is guaranteed to contain a valid `u32`.
3163 /// - The alignment of `char` is equal to the alignment of `u32`. [1]
3164 /// - The impl must only return `true` for its argument if the original
3165 /// `Ptr<char>` refers to a valid `char`. `char::from_u32` guarantees
3166 /// that it returns `None` if its input is not a valid `char`. [3]
3167 ///
3168 /// [1] Per https://doc.rust-lang.org/nightly/reference/types/textual.html#layout-and-bit-validity:
3169 ///
3170 /// `char` is guaranteed to have the same size and alignment as `u32` on
3171 /// all platforms.
3172 ///
3173 /// [2] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3174 ///
3175 /// Every byte of a `char` is guaranteed to be initialized.
3176 ///
3177 /// [3] Per https://doc.rust-lang.org/core/primitive.char.html#method.from_u32:
3178 ///
3179 /// `from_u32()` will return `None` if the input is not a valid value for
3180 /// a `char`.
3181 unsafe_impl!(char: TryFromBytes; |candidate: &u32| char::from_u32(*candidate).is_some());
3182 }
3183 safety_comment! {
3184 /// SAFETY:
3185 /// - `FromZeroes`, `AsBytes`, `Unaligned`: Per the reference [1], `str`
3186 /// has the same layout as `[u8]`, and `[u8]` is `FromZeroes`, `AsBytes`,
3187 /// and `Unaligned`.
3188 ///
3189 /// Note that we don't `assert_unaligned!(str)` because `assert_unaligned!`
3190 /// uses `align_of`, which only works for `Sized` types.
3191 ///
3192 /// TODO(#429): Add quotes from documentation.
3193 ///
3194 /// [1] https://doc.rust-lang.org/reference/type-layout.html#str-layout
3195 unsafe_impl!(str: FromZeroes, AsBytes, Unaligned);
3196 /// SAFETY:
3197 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3198 /// closure:
3199 /// - Given `t: *mut str` and `let r = *mut [u8]`, `r` refers to an object
3200 /// of the same size as that referred to by `t`. This is true because
3201 /// `str` and `[u8]` have the same representation. [1]
3202 /// - Since the closure takes a `&[u8]` argument, given a `Ptr<'a, str>`
3203 /// which satisfies the preconditions of
3204 /// `TryFromBytes::<str>::is_bit_valid`, it must be guaranteed that the
3205 /// memory referenced by that `Ptr` always contains a valid `[u8]`.
3206 /// Since `str`'s bytes are always initialized [1], `is_bit_valid`'s
3207 /// precondition requires that the same is true of its argument. Since
3208 /// `[u8]`'s only bit validity invariant is that its bytes must be
3209 /// initialized, this memory is guaranteed to contain a valid `[u8]`.
3210 /// - The alignment of `str` is equal to the alignment of `[u8]`. [1]
3211 /// - The impl must only return `true` for its argument if the original
3212 /// `Ptr<str>` refers to a valid `str`. `str::from_utf8` guarantees that
3213 /// it returns `Err` if its input is not a valid `str`. [2]
3214 ///
3215 /// [1] Per https://doc.rust-lang.org/reference/types/textual.html:
3216 ///
3217 /// A value of type `str` is represented the same was as `[u8]`.
3218 ///
3219 /// [2] Per https://doc.rust-lang.org/core/str/fn.from_utf8.html#errors:
3220 ///
3221 /// Returns `Err` if the slice is not UTF-8.
3222 unsafe_impl!(str: TryFromBytes; |candidate: &[u8]| core::str::from_utf8(candidate).is_ok());
3223 }
3224
3225 safety_comment! {
3226 // `NonZeroXxx` is `AsBytes`, but not `FromZeroes` or `FromBytes`.
3227 //
3228 /// SAFETY:
3229 /// - `AsBytes`: `NonZeroXxx` has the same layout as its associated
3230 /// primitive. Since it is the same size, this guarantees it has no
3231 /// padding - integers have no padding, and there's no room for padding
3232 /// if it can represent all of the same values except 0.
3233 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3234 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3235 /// This is worded in a way that makes it unclear whether it's meant as a
3236 /// guarantee, but given the purpose of those types, it's virtually
3237 /// unthinkable that that would ever change. `Option` cannot be smaller
3238 /// than its contained type, which implies that, and `NonZeroX8` are of
3239 /// size 1 or 0. `NonZeroX8` can represent multiple states, so they cannot
3240 /// be 0 bytes, which means that they must be 1 byte. The only valid
3241 /// alignment for a 1-byte type is 1.
3242 ///
3243 /// TODO(#429): Add quotes from documentation.
3244 ///
3245 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3246 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3247 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3248 /// that layout is the same as primitive layout.
3249 unsafe_impl!(NonZeroU8: AsBytes, Unaligned);
3250 unsafe_impl!(NonZeroI8: AsBytes, Unaligned);
3251 assert_unaligned!(NonZeroU8, NonZeroI8);
3252 unsafe_impl!(NonZeroU16: AsBytes);
3253 unsafe_impl!(NonZeroI16: AsBytes);
3254 unsafe_impl!(NonZeroU32: AsBytes);
3255 unsafe_impl!(NonZeroI32: AsBytes);
3256 unsafe_impl!(NonZeroU64: AsBytes);
3257 unsafe_impl!(NonZeroI64: AsBytes);
3258 unsafe_impl!(NonZeroU128: AsBytes);
3259 unsafe_impl!(NonZeroI128: AsBytes);
3260 unsafe_impl!(NonZeroUsize: AsBytes);
3261 unsafe_impl!(NonZeroIsize: AsBytes);
3262 /// SAFETY:
3263 /// - The safety requirements for `unsafe_impl!` with an `is_bit_valid`
3264 /// closure:
3265 /// - Given `t: *mut NonZeroXxx` and `let r = *mut xxx`, `r` refers to an
3266 /// object of the same size as that referred to by `t`. This is true
3267 /// because `NonZeroXxx` and `xxx` have the same size. [1]
3268 /// - Since the closure takes a `&xxx` argument, given a `Ptr<'a,
3269 /// NonZeroXxx>` which satisfies the preconditions of
3270 /// `TryFromBytes::<NonZeroXxx>::is_bit_valid`, it must be guaranteed
3271 /// that the memory referenced by that `Ptr` always contains a valid
3272 /// `xxx`. Since `NonZeroXxx`'s bytes are always initialized [1],
3273 /// `is_bit_valid`'s precondition requires that the same is true of its
3274 /// argument. Since `xxx`'s only bit validity invariant is that its
3275 /// bytes must be initialized, this memory is guaranteed to contain a
3276 /// valid `xxx`.
3277 /// - The alignment of `NonZeroXxx` is equal to the alignment of `xxx`.
3278 /// [1]
3279 /// - The impl must only return `true` for its argument if the original
3280 /// `Ptr<NonZeroXxx>` refers to a valid `NonZeroXxx`. The only `xxx`
3281 /// which is not also a valid `NonZeroXxx` is 0. [1]
3282 ///
3283 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3284 ///
3285 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3286 /// `u16` with the exception that `0` is not a valid instance.
3287 unsafe_impl!(NonZeroU8: TryFromBytes; |n: &u8| *n != 0);
3288 unsafe_impl!(NonZeroI8: TryFromBytes; |n: &i8| *n != 0);
3289 unsafe_impl!(NonZeroU16: TryFromBytes; |n: &u16| *n != 0);
3290 unsafe_impl!(NonZeroI16: TryFromBytes; |n: &i16| *n != 0);
3291 unsafe_impl!(NonZeroU32: TryFromBytes; |n: &u32| *n != 0);
3292 unsafe_impl!(NonZeroI32: TryFromBytes; |n: &i32| *n != 0);
3293 unsafe_impl!(NonZeroU64: TryFromBytes; |n: &u64| *n != 0);
3294 unsafe_impl!(NonZeroI64: TryFromBytes; |n: &i64| *n != 0);
3295 unsafe_impl!(NonZeroU128: TryFromBytes; |n: &u128| *n != 0);
3296 unsafe_impl!(NonZeroI128: TryFromBytes; |n: &i128| *n != 0);
3297 unsafe_impl!(NonZeroUsize: TryFromBytes; |n: &usize| *n != 0);
3298 unsafe_impl!(NonZeroIsize: TryFromBytes; |n: &isize| *n != 0);
3299 }
3300 safety_comment! {
3301 /// SAFETY:
3302 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`,
3303 /// `AsBytes`: The Rust compiler reuses `0` value to represent `None`, so
3304 /// `size_of::<Option<NonZeroXxx>>() == size_of::<xxx>()`; see
3305 /// `NonZeroXxx` documentation.
3306 /// - `Unaligned`: `NonZeroU8` and `NonZeroI8` document that
3307 /// `Option<NonZeroU8>` and `Option<NonZeroI8>` both have size 1. [1] [2]
3308 /// This is worded in a way that makes it unclear whether it's meant as a
3309 /// guarantee, but given the purpose of those types, it's virtually
3310 /// unthinkable that that would ever change. The only valid alignment for
3311 /// a 1-byte type is 1.
3312 ///
3313 /// TODO(#429): Add quotes from documentation.
3314 ///
3315 /// [1] https://doc.rust-lang.org/stable/std/num/struct.NonZeroU8.html
3316 /// [2] https://doc.rust-lang.org/stable/std/num/struct.NonZeroI8.html
3317 ///
3318 /// TODO(https://github.com/rust-lang/rust/pull/104082): Cite documentation
3319 /// for layout guarantees.
3320 unsafe_impl!(Option<NonZeroU8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3321 unsafe_impl!(Option<NonZeroI8>: TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
3322 assert_unaligned!(Option<NonZeroU8>, Option<NonZeroI8>);
3323 unsafe_impl!(Option<NonZeroU16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3324 unsafe_impl!(Option<NonZeroI16>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3325 unsafe_impl!(Option<NonZeroU32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3326 unsafe_impl!(Option<NonZeroI32>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3327 unsafe_impl!(Option<NonZeroU64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3328 unsafe_impl!(Option<NonZeroI64>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3329 unsafe_impl!(Option<NonZeroU128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3330 unsafe_impl!(Option<NonZeroI128>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3331 unsafe_impl!(Option<NonZeroUsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3332 unsafe_impl!(Option<NonZeroIsize>: TryFromBytes, FromZeroes, FromBytes, AsBytes);
3333 }
3334
3335 safety_comment! {
3336 /// SAFETY:
3337 /// The following types can be transmuted from `[0u8; size_of::<T>()]`. [1]
3338 /// None of them contain `UnsafeCell`s, and so they all soundly implement
3339 /// `FromZeroes`.
3340 ///
3341 /// [1] Per
3342 /// https://doc.rust-lang.org/nightly/core/option/index.html#representation:
3343 ///
3344 /// Rust guarantees to optimize the following types `T` such that
3345 /// [`Option<T>`] has the same size and alignment as `T`. In some of these
3346 /// cases, Rust further guarantees that `transmute::<_, Option<T>>([0u8;
3347 /// size_of::<T>()])` is sound and produces `Option::<T>::None`. These
3348 /// cases are identified by the second column:
3349 ///
3350 /// | `T` | `transmute::<_, Option<T>>([0u8; size_of::<T>()])` sound? |
3351 /// |-----------------------|-----------------------------------------------------------|
3352 /// | [`Box<U>`] | when `U: Sized` |
3353 /// | `&U` | when `U: Sized` |
3354 /// | `&mut U` | when `U: Sized` |
3355 /// | [`ptr::NonNull<U>`] | when `U: Sized` |
3356 /// | `fn`, `extern "C" fn` | always |
3357 ///
3358 /// TODO(#429), TODO(https://github.com/rust-lang/rust/pull/115333): Cite
3359 /// the Stable docs once they're available.
3360 #[cfg(feature = "alloc")]
3361 unsafe_impl!(
3362 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
3363 T => FromZeroes for Option<Box<T>>
3364 );
3365 unsafe_impl!(T => FromZeroes for Option<&'_ T>);
3366 unsafe_impl!(T => FromZeroes for Option<&'_ mut T>);
3367 unsafe_impl!(T => FromZeroes for Option<NonNull<T>>);
3368 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_fn!(...));
3369 unsafe_impl_for_power_set!(A, B, C, D, E, F, G, H, I, J, K, L -> M => FromZeroes for opt_extern_c_fn!(...));
3370 }
3371
3372 safety_comment! {
3373 /// SAFETY:
3374 /// Per reference [1]:
3375 /// "For all T, the following are guaranteed:
3376 /// size_of::<PhantomData<T>>() == 0
3377 /// align_of::<PhantomData<T>>() == 1".
3378 /// This gives:
3379 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`: There
3380 /// is only one possible sequence of 0 bytes, and `PhantomData` is
3381 /// inhabited.
3382 /// - `AsBytes`: Since `PhantomData` has size 0, it contains no padding
3383 /// bytes.
3384 /// - `Unaligned`: Per the preceding reference, `PhantomData` has alignment
3385 /// 1.
3386 ///
3387 /// [1] https://doc.rust-lang.org/std/marker/struct.PhantomData.html#layout-1
3388 unsafe_impl!(T: ?Sized => TryFromBytes for PhantomData<T>);
3389 unsafe_impl!(T: ?Sized => FromZeroes for PhantomData<T>);
3390 unsafe_impl!(T: ?Sized => FromBytes for PhantomData<T>);
3391 unsafe_impl!(T: ?Sized => AsBytes for PhantomData<T>);
3392 unsafe_impl!(T: ?Sized => Unaligned for PhantomData<T>);
3393 assert_unaligned!(PhantomData<()>, PhantomData<u8>, PhantomData<u64>);
3394 }
3395 safety_comment! {
3396 /// SAFETY:
3397 /// `Wrapping<T>` is guaranteed by its docs [1] to have the same layout and
3398 /// bit validity as `T`. Also, `Wrapping<T>` is `#[repr(transparent)]`, and
3399 /// has a single field, which is `pub`. Per the reference [2], this means
3400 /// that the `#[repr(transparent)]` attribute is "considered part of the
3401 /// public ABI".
3402 ///
3403 /// - `TryFromBytes`: The safety requirements for `unsafe_impl!` with an
3404 /// `is_bit_valid` closure:
3405 /// - Given `t: *mut Wrapping<T>` and `let r = *mut T`, `r` refers to an
3406 /// object of the same size as that referred to by `t`. This is true
3407 /// because `Wrapping<T>` and `T` have the same layout
3408 /// - The alignment of `Wrapping<T>` is equal to the alignment of `T`.
3409 /// - The impl must only return `true` for its argument if the original
3410 /// `Ptr<Wrapping<T>>` refers to a valid `Wrapping<T>`. Since
3411 /// `Wrapping<T>` has the same bit validity as `T`, and since our impl
3412 /// just calls `T::is_bit_valid`, our impl returns `true` exactly when
3413 /// its argument contains a valid `Wrapping<T>`.
3414 /// - `FromBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3415 /// `T: FromBytes`, then all initialized byte sequences are valid
3416 /// instances of `Wrapping<T>`. Similarly, if `T: FromBytes`, then
3417 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl FromBytes
3418 /// for Wrapping<T> where T: FromBytes` is a sound impl.
3419 /// - `AsBytes`: Since `Wrapping<T>` has the same bit validity as `T`, if
3420 /// `T: AsBytes`, then all valid instances of `Wrapping<T>` have all of
3421 /// their bytes initialized. Similarly, if `T: AsBytes`, then
3422 /// `Wrapping<T>` doesn't contain any `UnsafeCell`s. Thus, `impl AsBytes
3423 /// for Wrapping<T> where T: AsBytes` is a valid impl.
3424 /// - `Unaligned`: Since `Wrapping<T>` has the same layout as `T`,
3425 /// `Wrapping<T>` has alignment 1 exactly when `T` does.
3426 ///
3427 /// [1] Per https://doc.rust-lang.org/core/num/struct.NonZeroU16.html:
3428 ///
3429 /// `NonZeroU16` is guaranteed to have the same layout and bit validity as
3430 /// `u16` with the exception that `0` is not a valid instance.
3431 ///
3432 /// TODO(#429): Add quotes from documentation.
3433 ///
3434 /// [1] TODO(https://doc.rust-lang.org/nightly/core/num/struct.Wrapping.html#layout-1):
3435 /// Reference this documentation once it's available on stable.
3436 ///
3437 /// [2] https://doc.rust-lang.org/nomicon/other-reprs.html#reprtransparent
3438 unsafe_impl!(T: TryFromBytes => TryFromBytes for Wrapping<T>; |candidate: Ptr<T>| {
3439 // SAFETY:
3440 // - Since `T` and `Wrapping<T>` have the same layout and bit validity
3441 // and contain the same fields, `T` contains `UnsafeCell`s exactly
3442 // where `Wrapping<T>` does. Thus, all memory and `UnsafeCell`
3443 // preconditions of `T::is_bit_valid` hold exactly when the same
3444 // preconditions for `Wrapping<T>::is_bit_valid` hold.
3445 // - By the same token, since `candidate` is guaranteed to have its
3446 // bytes initialized where there are always initialized bytes in
3447 // `Wrapping<T>`, the same is true for `T`.
3448 unsafe { T::is_bit_valid(candidate) }
3449 });
3450 unsafe_impl!(T: FromZeroes => FromZeroes for Wrapping<T>);
3451 unsafe_impl!(T: FromBytes => FromBytes for Wrapping<T>);
3452 unsafe_impl!(T: AsBytes => AsBytes for Wrapping<T>);
3453 unsafe_impl!(T: Unaligned => Unaligned for Wrapping<T>);
3454 assert_unaligned!(Wrapping<()>, Wrapping<u8>);
3455 }
3456 safety_comment! {
3457 // `MaybeUninit<T>` is `FromZeroes` and `FromBytes`, but never `AsBytes`
3458 // since it may contain uninitialized bytes.
3459 //
3460 /// SAFETY:
3461 /// - `TryFromBytes` (with no validator), `FromZeroes`, `FromBytes`:
3462 /// `MaybeUninit<T>` has no restrictions on its contents. Unfortunately,
3463 /// in addition to bit validity, `TryFromBytes`, `FromZeroes` and
3464 /// `FromBytes` also require that implementers contain no `UnsafeCell`s.
3465 /// Thus, we require `T: Trait` in order to ensure that `T` - and thus
3466 /// `MaybeUninit<T>` - contains to `UnsafeCell`s. Thus, requiring that `T`
3467 /// implement each of these traits is sufficient.
3468 /// - `Unaligned`: "MaybeUninit<T> is guaranteed to have the same size,
3469 /// alignment, and ABI as T" [1]
3470 ///
3471 /// [1] https://doc.rust-lang.org/stable/core/mem/union.MaybeUninit.html#layout-1
3472 ///
3473 /// TODO(https://github.com/google/zerocopy/issues/251): If we split
3474 /// `FromBytes` and `RefFromBytes`, or if we introduce a separate
3475 /// `NoCell`/`Freeze` trait, we can relax the trait bounds for `FromZeroes`
3476 /// and `FromBytes`.
3477 unsafe_impl!(T: TryFromBytes => TryFromBytes for MaybeUninit<T>);
3478 unsafe_impl!(T: FromZeroes => FromZeroes for MaybeUninit<T>);
3479 unsafe_impl!(T: FromBytes => FromBytes for MaybeUninit<T>);
3480 unsafe_impl!(T: Unaligned => Unaligned for MaybeUninit<T>);
3481 assert_unaligned!(MaybeUninit<()>, MaybeUninit<u8>);
3482 }
3483 safety_comment! {
3484 /// SAFETY:
3485 /// `ManuallyDrop` has the same layout and bit validity as `T` [1], and
3486 /// accessing the inner value is safe (meaning that it's unsound to leave
3487 /// the inner value uninitialized while exposing the `ManuallyDrop` to safe
3488 /// code).
3489 /// - `FromZeroes`, `FromBytes`: Since it has the same layout as `T`, any
3490 /// valid `T` is a valid `ManuallyDrop<T>`. If `T: FromZeroes`, a sequence
3491 /// of zero bytes is a valid `T`, and thus a valid `ManuallyDrop<T>`. If
3492 /// `T: FromBytes`, any sequence of bytes is a valid `T`, and thus a valid
3493 /// `ManuallyDrop<T>`.
3494 /// - `AsBytes`: Since it has the same layout as `T`, and since it's unsound
3495 /// to let safe code access a `ManuallyDrop` whose inner value is
3496 /// uninitialized, safe code can only ever access a `ManuallyDrop` whose
3497 /// contents are a valid `T`. Since `T: AsBytes`, this means that safe
3498 /// code can only ever access a `ManuallyDrop` with all initialized bytes.
3499 /// - `Unaligned`: `ManuallyDrop` has the same layout (and thus alignment)
3500 /// as `T`, and `T: Unaligned` guarantees that that alignment is 1.
3501 ///
3502 /// `ManuallyDrop<T>` is guaranteed to have the same layout and bit
3503 /// validity as `T`
3504 ///
3505 /// [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
3506 ///
3507 /// TODO(#429):
3508 /// - Add quotes from docs.
3509 /// - Once [1] (added in
3510 /// https://github.com/rust-lang/rust/pull/115522) is available on stable,
3511 /// quote the stable docs instead of the nightly docs.
3512 unsafe_impl!(T: ?Sized + FromZeroes => FromZeroes for ManuallyDrop<T>);
3513 unsafe_impl!(T: ?Sized + FromBytes => FromBytes for ManuallyDrop<T>);
3514 unsafe_impl!(T: ?Sized + AsBytes => AsBytes for ManuallyDrop<T>);
3515 unsafe_impl!(T: ?Sized + Unaligned => Unaligned for ManuallyDrop<T>);
3516 assert_unaligned!(ManuallyDrop<()>, ManuallyDrop<u8>);
3517 }
3518 safety_comment! {
3519 /// SAFETY:
3520 /// Per the reference [1]:
3521 ///
3522 /// An array of `[T; N]` has a size of `size_of::<T>() * N` and the same
3523 /// alignment of `T`. Arrays are laid out so that the zero-based `nth`
3524 /// element of the array is offset from the start of the array by `n *
3525 /// size_of::<T>()` bytes.
3526 ///
3527 /// ...
3528 ///
3529 /// Slices have the same layout as the section of the array they slice.
3530 ///
3531 /// In other words, the layout of a `[T]` or `[T; N]` is a sequence of `T`s
3532 /// laid out back-to-back with no bytes in between. Therefore, `[T]` or `[T;
3533 /// N]` are `TryFromBytes`, `FromZeroes`, `FromBytes`, and `AsBytes` if `T`
3534 /// is (respectively). Furthermore, since an array/slice has "the same
3535 /// alignment of `T`", `[T]` and `[T; N]` are `Unaligned` if `T` is.
3536 ///
3537 /// Note that we don't `assert_unaligned!` for slice types because
3538 /// `assert_unaligned!` uses `align_of`, which only works for `Sized` types.
3539 ///
3540 /// [1] https://doc.rust-lang.org/reference/type-layout.html#array-layout
3541 unsafe_impl!(const N: usize, T: FromZeroes => FromZeroes for [T; N]);
3542 unsafe_impl!(const N: usize, T: FromBytes => FromBytes for [T; N]);
3543 unsafe_impl!(const N: usize, T: AsBytes => AsBytes for [T; N]);
3544 unsafe_impl!(const N: usize, T: Unaligned => Unaligned for [T; N]);
3545 assert_unaligned!([(); 0], [(); 1], [u8; 0], [u8; 1]);
3546 unsafe_impl!(T: TryFromBytes => TryFromBytes for [T]; |c: Ptr<[T]>| {
3547 // SAFETY: Assuming the preconditions of `is_bit_valid` are satisfied,
3548 // so too will the postcondition: that, if `is_bit_valid(candidate)`
3549 // returns true, `*candidate` contains a valid `Self`. Per the reference
3550 // [1]:
3551 //
3552 // An array of `[T; N]` has a size of `size_of::<T>() * N` and the
3553 // same alignment of `T`. Arrays are laid out so that the zero-based
3554 // `nth` element of the array is offset from the start of the array by
3555 // `n * size_of::<T>()` bytes.
3556 //
3557 // ...
3558 //
3559 // Slices have the same layout as the section of the array they slice.
3560 //
3561 // In other words, the layout of a `[T] is a sequence of `T`s laid out
3562 // back-to-back with no bytes in between. If all elements in `candidate`
3563 // are `is_bit_valid`, so too is `candidate`.
3564 //
3565 // Note that any of the below calls may panic, but it would still be
3566 // sound even if it did. `is_bit_valid` does not promise that it will
3567 // not panic (in fact, it explicitly warns that it's a possibility), and
3568 // we have not violated any safety invariants that we must fix before
3569 // returning.
3570 c.iter().all(|elem|
3571 // SAFETY: We uphold the safety contract of `is_bit_valid(elem)`, by
3572 // precondition on the surrounding call to `is_bit_valid`. The
3573 // memory referenced by `elem` is contained entirely within `c`, and
3574 // satisfies the preconditions satisfied by `c`. By axiom, we assume
3575 // that `Iterator:all` does not invalidate these preconditions
3576 // (e.g., by writing to `elem`.) Since `elem` is derived from `c`,
3577 // it is only possible for uninitialized bytes to occur in `elem` at
3578 // the same bytes they occur within `c`.
3579 unsafe { <T as TryFromBytes>::is_bit_valid(elem) }
3580 )
3581 });
3582 unsafe_impl!(T: FromZeroes => FromZeroes for [T]);
3583 unsafe_impl!(T: FromBytes => FromBytes for [T]);
3584 unsafe_impl!(T: AsBytes => AsBytes for [T]);
3585 unsafe_impl!(T: Unaligned => Unaligned for [T]);
3586 }
3587 safety_comment! {
3588 /// SAFETY:
3589 /// - `FromZeroes`: For thin pointers (note that `T: Sized`), the zero
3590 /// pointer is considered "null". [1] No operations which require
3591 /// provenance are legal on null pointers, so this is not a footgun.
3592 ///
3593 /// NOTE(#170): Implementing `FromBytes` and `AsBytes` for raw pointers
3594 /// would be sound, but carries provenance footguns. We want to support
3595 /// `FromBytes` and `AsBytes` for raw pointers eventually, but we are
3596 /// holding off until we can figure out how to address those footguns.
3597 ///
3598 /// [1] TODO(https://github.com/rust-lang/rust/pull/116988): Cite the
3599 /// documentation once this PR lands.
3600 unsafe_impl!(T => FromZeroes for *const T);
3601 unsafe_impl!(T => FromZeroes for *mut T);
3602 }
3603
3604 // SIMD support
3605 //
3606 // Per the Unsafe Code Guidelines Reference [1]:
3607 //
3608 // Packed SIMD vector types are `repr(simd)` homogeneous tuple-structs
3609 // containing `N` elements of type `T` where `N` is a power-of-two and the
3610 // size and alignment requirements of `T` are equal:
3611 //
3612 // ```rust
3613 // #[repr(simd)]
3614 // struct Vector<T, N>(T_0, ..., T_(N - 1));
3615 // ```
3616 //
3617 // ...
3618 //
3619 // The size of `Vector` is `N * size_of::<T>()` and its alignment is an
3620 // implementation-defined function of `T` and `N` greater than or equal to
3621 // `align_of::<T>()`.
3622 //
3623 // ...
3624 //
3625 // Vector elements are laid out in source field order, enabling random access
3626 // to vector elements by reinterpreting the vector as an array:
3627 //
3628 // ```rust
3629 // union U {
3630 // vec: Vector<T, N>,
3631 // arr: [T; N]
3632 // }
3633 //
3634 // assert_eq!(size_of::<Vector<T, N>>(), size_of::<[T; N]>());
3635 // assert!(align_of::<Vector<T, N>>() >= align_of::<[T; N]>());
3636 //
3637 // unsafe {
3638 // let u = U { vec: Vector<T, N>(t_0, ..., t_(N - 1)) };
3639 //
3640 // assert_eq!(u.vec.0, u.arr[0]);
3641 // // ...
3642 // assert_eq!(u.vec.(N - 1), u.arr[N - 1]);
3643 // }
3644 // ```
3645 //
3646 // Given this background, we can observe that:
3647 // - The size and bit pattern requirements of a SIMD type are equivalent to the
3648 // equivalent array type. Thus, for any SIMD type whose primitive `T` is
3649 // `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes`, that SIMD type is
3650 // also `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` respectively.
3651 // - Since no upper bound is placed on the alignment, no SIMD type can be
3652 // guaranteed to be `Unaligned`.
3653 //
3654 // Also per [1]:
3655 //
3656 // This chapter represents the consensus from issue #38. The statements in
3657 // here are not (yet) "guaranteed" not to change until an RFC ratifies them.
3658 //
3659 // See issue #38 [2]. While this behavior is not technically guaranteed, the
3660 // likelihood that the behavior will change such that SIMD types are no longer
3661 // `TryFromBytes`, `FromZeroes`, `FromBytes`, or `AsBytes` is next to zero, as
3662 // that would defeat the entire purpose of SIMD types. Nonetheless, we put this
3663 // behavior behind the `simd` Cargo feature, which requires consumers to opt
3664 // into this stability hazard.
3665 //
3666 // [1] https://rust-lang.github.io/unsafe-code-guidelines/layout/packed-simd-vectors.html
3667 // [2] https://github.com/rust-lang/unsafe-code-guidelines/issues/38
3668 #[cfg(feature = "simd")]
3669 #[cfg_attr(doc_cfg, doc(cfg(feature = "simd")))]
3670 mod simd {
3671 /// Defines a module which implements `TryFromBytes`, `FromZeroes`,
3672 /// `FromBytes`, and `AsBytes` for a set of types from a module in
3673 /// `core::arch`.
3674 ///
3675 /// `$arch` is both the name of the defined module and the name of the
3676 /// module in `core::arch`, and `$typ` is the list of items from that module
3677 /// to implement `FromZeroes`, `FromBytes`, and `AsBytes` for.
3678 #[allow(unused_macros)] // `allow(unused_macros)` is needed because some
3679 // target/feature combinations don't emit any impls
3680 // and thus don't use this macro.
3681 macro_rules! simd_arch_mod {
3682 (#[cfg $cfg:tt] $arch:ident, $mod:ident, $($typ:ident),*) => {
3683 #[cfg $cfg]
3684 #[cfg_attr(doc_cfg, doc(cfg $cfg))]
3685 mod $mod {
3686 use core::arch::$arch::{$($typ),*};
3687
3688 use crate::*;
3689 impl_known_layout!($($typ),*);
3690 safety_comment! {
3691 /// SAFETY:
3692 /// See comment on module definition for justification.
3693 $( unsafe_impl!($typ: TryFromBytes, FromZeroes, FromBytes, AsBytes); )*
3694 }
3695 }
3696 };
3697 }
3698
3699 #[rustfmt::skip]
3700 const _: () = {
3701 simd_arch_mod!(
3702 #[cfg(target_arch = "x86")]
3703 x86, x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3704 );
3705 simd_arch_mod!(
3706 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
3707 x86, x86_nightly, __m512bh, __m512, __m512d, __m512i
3708 );
3709 simd_arch_mod!(
3710 #[cfg(target_arch = "x86_64")]
3711 x86_64, x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i
3712 );
3713 simd_arch_mod!(
3714 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
3715 x86_64, x86_64_nightly, __m512bh, __m512, __m512d, __m512i
3716 );
3717 simd_arch_mod!(
3718 #[cfg(target_arch = "wasm32")]
3719 wasm32, wasm32, v128
3720 );
3721 simd_arch_mod!(
3722 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
3723 powerpc, powerpc, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3724 );
3725 simd_arch_mod!(
3726 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
3727 powerpc64, powerpc64, vector_bool_long, vector_double, vector_signed_long, vector_unsigned_long
3728 );
3729 simd_arch_mod!(
3730 #[cfg(target_arch = "aarch64")]
3731 aarch64, aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
3732 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
3733 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
3734 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
3735 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
3736 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
3737 uint64x1_t, uint64x2_t
3738 );
3739 simd_arch_mod!(
3740 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
3741 arm, arm, int8x4_t, uint8x4_t
3742 );
3743 };
3744 }
3745
3746 /// Safely transmutes a value of one type to a value of another type of the same
3747 /// size.
3748 ///
3749 /// The expression `$e` must have a concrete type, `T`, which implements
3750 /// `AsBytes`. The `transmute!` expression must also have a concrete type, `U`
3751 /// (`U` is inferred from the calling context), and `U` must implement
3752 /// `FromBytes`.
3753 ///
3754 /// Note that the `T` produced by the expression `$e` will *not* be dropped.
3755 /// Semantically, its bits will be copied into a new value of type `U`, the
3756 /// original `T` will be forgotten, and the value of type `U` will be returned.
3757 ///
3758 /// # Examples
3759 ///
3760 /// ```
3761 /// # use zerocopy::transmute;
3762 /// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3763 ///
3764 /// let two_dimensional: [[u8; 4]; 2] = transmute!(one_dimensional);
3765 ///
3766 /// assert_eq!(two_dimensional, [[0, 1, 2, 3], [4, 5, 6, 7]]);
3767 /// ```
3768 #[macro_export]
3769 macro_rules! transmute {
3770 ($e:expr) => {{
3771 // NOTE: This must be a macro (rather than a function with trait bounds)
3772 // because there's no way, in a generic context, to enforce that two
3773 // types have the same size. `core::mem::transmute` uses compiler magic
3774 // to enforce this so long as the types are concrete.
3775
3776 let e = $e;
3777 if false {
3778 // This branch, though never taken, ensures that the type of `e` is
3779 // `AsBytes` and that the type of this macro invocation expression
3780 // is `FromBytes`.
3781
3782 struct AssertIsAsBytes<T: $crate::AsBytes>(T);
3783 let _ = AssertIsAsBytes(e);
3784
3785 struct AssertIsFromBytes<U: $crate::FromBytes>(U);
3786 #[allow(unused, unreachable_code)]
3787 let u = AssertIsFromBytes(loop {});
3788 u.0
3789 } else {
3790 // SAFETY: `core::mem::transmute` ensures that the type of `e` and
3791 // the type of this macro invocation expression have the same size.
3792 // We know this transmute is safe thanks to the `AsBytes` and
3793 // `FromBytes` bounds enforced by the `false` branch.
3794 //
3795 // We use this reexport of `core::mem::transmute` because we know it
3796 // will always be available for crates which are using the 2015
3797 // edition of Rust. By contrast, if we were to use
3798 // `std::mem::transmute`, this macro would not work for such crates
3799 // in `no_std` contexts, and if we were to use
3800 // `core::mem::transmute`, this macro would not work in `std`
3801 // contexts in which `core` was not manually imported. This is not a
3802 // problem for 2018 edition crates.
3803 unsafe {
3804 // Clippy: It's okay to transmute a type to itself.
3805 #[allow(clippy::useless_transmute, clippy::missing_transmute_annotations)]
3806 $crate::macro_util::core_reexport::mem::transmute(e)
3807 }
3808 }
3809 }}
3810 }
3811
3812 /// Safely transmutes a mutable or immutable reference of one type to an
3813 /// immutable reference of another type of the same size.
3814 ///
3815 /// The expression `$e` must have a concrete type, `&T` or `&mut T`, where `T:
3816 /// Sized + AsBytes`. The `transmute_ref!` expression must also have a concrete
3817 /// type, `&U` (`U` is inferred from the calling context), where `U: Sized +
3818 /// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3819 ///
3820 /// The lifetime of the input type, `&T` or `&mut T`, must be the same as or
3821 /// outlive the lifetime of the output type, `&U`.
3822 ///
3823 /// # Examples
3824 ///
3825 /// ```
3826 /// # use zerocopy::transmute_ref;
3827 /// let one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3828 ///
3829 /// let two_dimensional: &[[u8; 4]; 2] = transmute_ref!(&one_dimensional);
3830 ///
3831 /// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3832 /// ```
3833 ///
3834 /// # Alignment increase error message
3835 ///
3836 /// Because of limitations on macros, the error message generated when
3837 /// `transmute_ref!` is used to transmute from a type of lower alignment to a
3838 /// type of higher alignment is somewhat confusing. For example, the following
3839 /// code:
3840 ///
3841 /// ```compile_fail
3842 /// const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3843 /// ```
3844 ///
3845 /// ...generates the following error:
3846 ///
3847 /// ```text
3848 /// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3849 /// --> src/lib.rs:1524:34
3850 /// |
3851 /// 5 | const INCREASE_ALIGNMENT: &u16 = zerocopy::transmute_ref!(&[0u8; 2]);
3852 /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3853 /// |
3854 /// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3855 /// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3856 /// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_ref` (in Nightly builds, run with -Z macro-backtrace for more info)
3857 /// ```
3858 ///
3859 /// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3860 /// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3861 /// align_of::<U>()`.
3862 #[macro_export]
3863 macro_rules! transmute_ref {
3864 ($e:expr) => {{
3865 // NOTE: This must be a macro (rather than a function with trait bounds)
3866 // because there's no way, in a generic context, to enforce that two
3867 // types have the same size or alignment.
3868
3869 // Ensure that the source type is a reference or a mutable reference
3870 // (note that mutable references are implicitly reborrowed here).
3871 let e: &_ = $e;
3872
3873 #[allow(unused, clippy::diverging_sub_expression)]
3874 if false {
3875 // This branch, though never taken, ensures that the type of `e` is
3876 // `&T` where `T: 't + Sized + AsBytes`, that the type of this macro
3877 // expression is `&U` where `U: 'u + Sized + FromBytes`, and that
3878 // `'t` outlives `'u`.
3879
3880 struct AssertIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3881 let _ = AssertIsAsBytes(e);
3882
3883 struct AssertIsFromBytes<'a, U: ::core::marker::Sized + $crate::FromBytes>(&'a U);
3884 #[allow(unused, unreachable_code)]
3885 let u = AssertIsFromBytes(loop {});
3886 u.0
3887 } else if false {
3888 // This branch, though never taken, ensures that `size_of::<T>() ==
3889 // size_of::<U>()` and that that `align_of::<T>() >=
3890 // align_of::<U>()`.
3891
3892 // `t` is inferred to have type `T` because it's assigned to `e` (of
3893 // type `&T`) as `&t`.
3894 let mut t = unreachable!();
3895 e = &t;
3896
3897 // `u` is inferred to have type `U` because it's used as `&u` as the
3898 // value returned from this branch.
3899 let u;
3900
3901 $crate::assert_size_eq!(t, u);
3902 $crate::assert_align_gt_eq!(t, u);
3903
3904 &u
3905 } else {
3906 // SAFETY: For source type `Src` and destination type `Dst`:
3907 // - We know that `Src: AsBytes` and `Dst: FromBytes` thanks to the
3908 // uses of `AssertIsAsBytes` and `AssertIsFromBytes` above.
3909 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
3910 // the use of `assert_size_eq!` above.
3911 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
3912 // the use of `assert_align_gt_eq!` above.
3913 unsafe { $crate::macro_util::transmute_ref(e) }
3914 }
3915 }}
3916 }
3917
3918 /// Safely transmutes a mutable reference of one type to an mutable reference of
3919 /// another type of the same size.
3920 ///
3921 /// The expression `$e` must have a concrete type, `&mut T`, where `T: Sized +
3922 /// AsBytes`. The `transmute_mut!` expression must also have a concrete type,
3923 /// `&mut U` (`U` is inferred from the calling context), where `U: Sized +
3924 /// FromBytes`. It must be the case that `align_of::<T>() >= align_of::<U>()`.
3925 ///
3926 /// The lifetime of the input type, `&mut T`, must be the same as or outlive the
3927 /// lifetime of the output type, `&mut U`.
3928 ///
3929 /// # Examples
3930 ///
3931 /// ```
3932 /// # use zerocopy::transmute_mut;
3933 /// let mut one_dimensional: [u8; 8] = [0, 1, 2, 3, 4, 5, 6, 7];
3934 ///
3935 /// let two_dimensional: &mut [[u8; 4]; 2] = transmute_mut!(&mut one_dimensional);
3936 ///
3937 /// assert_eq!(two_dimensional, &[[0, 1, 2, 3], [4, 5, 6, 7]]);
3938 ///
3939 /// two_dimensional.reverse();
3940 ///
3941 /// assert_eq!(one_dimensional, [4, 5, 6, 7, 0, 1, 2, 3]);
3942 /// ```
3943 ///
3944 /// # Alignment increase error message
3945 ///
3946 /// Because of limitations on macros, the error message generated when
3947 /// `transmute_mut!` is used to transmute from a type of lower alignment to a
3948 /// type of higher alignment is somewhat confusing. For example, the following
3949 /// code:
3950 ///
3951 /// ```compile_fail
3952 /// const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3953 /// ```
3954 ///
3955 /// ...generates the following error:
3956 ///
3957 /// ```text
3958 /// error[E0512]: cannot transmute between types of different sizes, or dependently-sized types
3959 /// --> src/lib.rs:1524:34
3960 /// |
3961 /// 5 | const INCREASE_ALIGNMENT: &mut u16 = zerocopy::transmute_mut!(&mut [0u8; 2]);
3962 /// | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
3963 /// |
3964 /// = note: source type: `AlignOf<[u8; 2]>` (8 bits)
3965 /// = note: target type: `MaxAlignsOf<[u8; 2], u16>` (16 bits)
3966 /// = note: this error originates in the macro `$crate::assert_align_gt_eq` which comes from the expansion of the macro `transmute_mut` (in Nightly builds, run with -Z macro-backtrace for more info)
3967 /// ```
3968 ///
3969 /// This is saying that `max(align_of::<T>(), align_of::<U>()) !=
3970 /// align_of::<T>()`, which is equivalent to `align_of::<T>() <
3971 /// align_of::<U>()`.
3972 #[macro_export]
3973 macro_rules! transmute_mut {
3974 ($e:expr) => {{
3975 // NOTE: This must be a macro (rather than a function with trait bounds)
3976 // because there's no way, in a generic context, to enforce that two
3977 // types have the same size or alignment.
3978
3979 // Ensure that the source type is a mutable reference.
3980 let e: &mut _ = $e;
3981
3982 #[allow(unused, clippy::diverging_sub_expression)]
3983 if false {
3984 // This branch, though never taken, ensures that the type of `e` is
3985 // `&mut T` where `T: 't + Sized + FromBytes + AsBytes`, that the
3986 // type of this macro expression is `&mut U` where `U: 'u + Sized +
3987 // FromBytes + AsBytes`.
3988
3989 // We use immutable references here rather than mutable so that, if
3990 // this macro is used in a const context (in which, as of this
3991 // writing, mutable references are banned), the error message
3992 // appears to originate in the user's code rather than in the
3993 // internals of this macro.
3994 struct AssertSrcIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3995 struct AssertSrcIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3996 struct AssertDstIsFromBytes<'a, T: ::core::marker::Sized + $crate::FromBytes>(&'a T);
3997 struct AssertDstIsAsBytes<'a, T: ::core::marker::Sized + $crate::AsBytes>(&'a T);
3998
3999 if true {
4000 let _ = AssertSrcIsFromBytes(&*e);
4001 } else {
4002 let _ = AssertSrcIsAsBytes(&*e);
4003 }
4004
4005 if true {
4006 #[allow(unused, unreachable_code)]
4007 let u = AssertDstIsFromBytes(loop {});
4008 &mut *u.0
4009 } else {
4010 #[allow(unused, unreachable_code)]
4011 let u = AssertDstIsAsBytes(loop {});
4012 &mut *u.0
4013 }
4014 } else if false {
4015 // This branch, though never taken, ensures that `size_of::<T>() ==
4016 // size_of::<U>()` and that that `align_of::<T>() >=
4017 // align_of::<U>()`.
4018
4019 // `t` is inferred to have type `T` because it's assigned to `e` (of
4020 // type `&mut T`) as `&mut t`.
4021 let mut t = unreachable!();
4022 e = &mut t;
4023
4024 // `u` is inferred to have type `U` because it's used as `&mut u` as
4025 // the value returned from this branch.
4026 let u;
4027
4028 $crate::assert_size_eq!(t, u);
4029 $crate::assert_align_gt_eq!(t, u);
4030
4031 &mut u
4032 } else {
4033 // SAFETY: For source type `Src` and destination type `Dst`:
4034 // - We know that `Src: FromBytes + AsBytes` and `Dst: FromBytes +
4035 // AsBytes` thanks to the uses of `AssertSrcIsFromBytes`,
4036 // `AssertSrcIsAsBytes`, `AssertDstIsFromBytes`, and
4037 // `AssertDstIsAsBytes` above.
4038 // - We know that `size_of::<Src>() == size_of::<Dst>()` thanks to
4039 // the use of `assert_size_eq!` above.
4040 // - We know that `align_of::<Src>() >= align_of::<Dst>()` thanks to
4041 // the use of `assert_align_gt_eq!` above.
4042 unsafe { $crate::macro_util::transmute_mut(e) }
4043 }
4044 }}
4045 }
4046
4047 /// Includes a file and safely transmutes it to a value of an arbitrary type.
4048 ///
4049 /// The file will be included as a byte array, `[u8; N]`, which will be
4050 /// transmuted to another type, `T`. `T` is inferred from the calling context,
4051 /// and must implement [`FromBytes`].
4052 ///
4053 /// The file is located relative to the current file (similarly to how modules
4054 /// are found). The provided path is interpreted in a platform-specific way at
4055 /// compile time. So, for instance, an invocation with a Windows path containing
4056 /// backslashes `\` would not compile correctly on Unix.
4057 ///
4058 /// `include_value!` is ignorant of byte order. For byte order-aware types, see
4059 /// the [`byteorder`] module.
4060 ///
4061 /// # Examples
4062 ///
4063 /// Assume there are two files in the same directory with the following
4064 /// contents:
4065 ///
4066 /// File `data` (no trailing newline):
4067 ///
4068 /// ```text
4069 /// abcd
4070 /// ```
4071 ///
4072 /// File `main.rs`:
4073 ///
4074 /// ```rust
4075 /// use zerocopy::include_value;
4076 /// # macro_rules! include_value {
4077 /// # ($file:expr) => { zerocopy::include_value!(concat!("../testdata/include_value/", $file)) };
4078 /// # }
4079 ///
4080 /// fn main() {
4081 /// let as_u32: u32 = include_value!("data");
4082 /// assert_eq!(as_u32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4083 /// let as_i32: i32 = include_value!("data");
4084 /// assert_eq!(as_i32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
4085 /// }
4086 /// ```
4087 #[doc(alias("include_bytes", "include_data", "include_type"))]
4088 #[macro_export]
4089 macro_rules! include_value {
4090 ($file:expr $(,)?) => {
4091 $crate::transmute!(*::core::include_bytes!($file))
4092 };
4093 }
4094
4095 /// A typed reference derived from a byte slice.
4096 ///
4097 /// A `Ref<B, T>` is a reference to a `T` which is stored in a byte slice, `B`.
4098 /// Unlike a native reference (`&T` or `&mut T`), `Ref<B, T>` has the same
4099 /// mutability as the byte slice it was constructed from (`B`).
4100 ///
4101 /// # Examples
4102 ///
4103 /// `Ref` can be used to treat a sequence of bytes as a structured type, and to
4104 /// read and write the fields of that type as if the byte slice reference were
4105 /// simply a reference to that type.
4106 ///
4107 /// ```rust
4108 /// # #[cfg(feature = "derive")] { // This example uses derives, and won't compile without them
4109 /// use zerocopy::{AsBytes, ByteSlice, ByteSliceMut, FromBytes, FromZeroes, Ref, Unaligned};
4110 ///
4111 /// #[derive(FromZeroes, FromBytes, AsBytes, Unaligned)]
4112 /// #[repr(C)]
4113 /// struct UdpHeader {
4114 /// src_port: [u8; 2],
4115 /// dst_port: [u8; 2],
4116 /// length: [u8; 2],
4117 /// checksum: [u8; 2],
4118 /// }
4119 ///
4120 /// struct UdpPacket<B> {
4121 /// header: Ref<B, UdpHeader>,
4122 /// body: B,
4123 /// }
4124 ///
4125 /// impl<B: ByteSlice> UdpPacket<B> {
4126 /// pub fn parse(bytes: B) -> Option<UdpPacket<B>> {
4127 /// let (header, body) = Ref::new_unaligned_from_prefix(bytes)?;
4128 /// Some(UdpPacket { header, body })
4129 /// }
4130 ///
4131 /// pub fn get_src_port(&self) -> [u8; 2] {
4132 /// self.header.src_port
4133 /// }
4134 /// }
4135 ///
4136 /// impl<B: ByteSliceMut> UdpPacket<B> {
4137 /// pub fn set_src_port(&mut self, src_port: [u8; 2]) {
4138 /// self.header.src_port = src_port;
4139 /// }
4140 /// }
4141 /// # }
4142 /// ```
4143 pub struct Ref<B, T: ?Sized>(B, PhantomData<T>);
4144
4145 /// Deprecated: prefer [`Ref`] instead.
4146 #[deprecated(since = "0.7.0", note = "LayoutVerified has been renamed to Ref")]
4147 #[doc(hidden)]
4148 pub type LayoutVerified<B, T> = Ref<B, T>;
4149
4150 impl<B, T> Ref<B, T>
4151 where
4152 B: ByteSlice,
4153 {
4154 /// Constructs a new `Ref`.
4155 ///
4156 /// `new` verifies that `bytes.len() == size_of::<T>()` and that `bytes` is
4157 /// aligned to `align_of::<T>()`, and constructs a new `Ref`. If either of
4158 /// these checks fail, it returns `None`.
4159 #[inline]
new(bytes: B) -> Option<Ref<B, T>>4160 pub fn new(bytes: B) -> Option<Ref<B, T>> {
4161 if bytes.len() != mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4162 return None;
4163 }
4164 Some(Ref(bytes, PhantomData))
4165 }
4166
4167 /// Constructs a new `Ref` from the prefix of a byte slice.
4168 ///
4169 /// `new_from_prefix` verifies that `bytes.len() >= size_of::<T>()` and that
4170 /// `bytes` is aligned to `align_of::<T>()`. It consumes the first
4171 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4172 /// the remaining bytes to the caller. If either the length or alignment
4173 /// checks fail, it returns `None`.
4174 #[inline]
new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)>4175 pub fn new_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4176 if bytes.len() < mem::size_of::<T>() || !util::aligned_to::<_, T>(bytes.deref()) {
4177 return None;
4178 }
4179 let (bytes, suffix) = bytes.split_at(mem::size_of::<T>());
4180 Some((Ref(bytes, PhantomData), suffix))
4181 }
4182
4183 /// Constructs a new `Ref` from the suffix of a byte slice.
4184 ///
4185 /// `new_from_suffix` verifies that `bytes.len() >= size_of::<T>()` and that
4186 /// the last `size_of::<T>()` bytes of `bytes` are aligned to
4187 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4188 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4189 /// caller. If either the length or alignment checks fail, it returns
4190 /// `None`.
4191 #[inline]
new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)>4192 pub fn new_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4193 let bytes_len = bytes.len();
4194 let split_at = bytes_len.checked_sub(mem::size_of::<T>())?;
4195 let (prefix, bytes) = bytes.split_at(split_at);
4196 if !util::aligned_to::<_, T>(bytes.deref()) {
4197 return None;
4198 }
4199 Some((prefix, Ref(bytes, PhantomData)))
4200 }
4201 }
4202
4203 impl<B, T> Ref<B, [T]>
4204 where
4205 B: ByteSlice,
4206 {
4207 /// Constructs a new `Ref` of a slice type.
4208 ///
4209 /// `new_slice` verifies that `bytes.len()` is a multiple of
4210 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4211 /// constructs a new `Ref`. If either of these checks fail, it returns
4212 /// `None`.
4213 ///
4214 /// # Panics
4215 ///
4216 /// `new_slice` panics if `T` is a zero-sized type.
4217 #[inline]
new_slice(bytes: B) -> Option<Ref<B, [T]>>4218 pub fn new_slice(bytes: B) -> Option<Ref<B, [T]>> {
4219 let remainder = bytes
4220 .len()
4221 .checked_rem(mem::size_of::<T>())
4222 .expect("Ref::new_slice called on a zero-sized type");
4223 if remainder != 0 || !util::aligned_to::<_, T>(bytes.deref()) {
4224 return None;
4225 }
4226 Some(Ref(bytes, PhantomData))
4227 }
4228
4229 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice.
4230 ///
4231 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4232 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4233 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4234 /// and returns the remaining bytes to the caller. It also ensures that
4235 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4236 /// length, alignment, or overflow checks fail, it returns `None`.
4237 ///
4238 /// # Panics
4239 ///
4240 /// `new_slice_from_prefix` panics if `T` is a zero-sized type.
4241 #[inline]
new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)>4242 pub fn new_slice_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4243 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4244 Some(len) => len,
4245 None => return None,
4246 };
4247 if bytes.len() < expected_len {
4248 return None;
4249 }
4250 let (prefix, bytes) = bytes.split_at(expected_len);
4251 Self::new_slice(prefix).map(move |l| (l, bytes))
4252 }
4253
4254 /// Constructs a new `Ref` of a slice type from the suffix of a byte slice.
4255 ///
4256 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4257 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4258 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4259 /// and returns the preceding bytes to the caller. It also ensures that
4260 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4261 /// length, alignment, or overflow checks fail, it returns `None`.
4262 ///
4263 /// # Panics
4264 ///
4265 /// `new_slice_from_suffix` panics if `T` is a zero-sized type.
4266 #[inline]
new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)>4267 pub fn new_slice_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4268 let expected_len = match mem::size_of::<T>().checked_mul(count) {
4269 Some(len) => len,
4270 None => return None,
4271 };
4272 let split_at = bytes.len().checked_sub(expected_len)?;
4273 let (bytes, suffix) = bytes.split_at(split_at);
4274 Self::new_slice(suffix).map(move |l| (bytes, l))
4275 }
4276 }
4277
map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>>4278 fn map_zeroed<B: ByteSliceMut, T: ?Sized>(opt: Option<Ref<B, T>>) -> Option<Ref<B, T>> {
4279 match opt {
4280 Some(mut r) => {
4281 r.0.fill(0);
4282 Some(r)
4283 }
4284 None => None,
4285 }
4286 }
4287
map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( opt: Option<(Ref<B, T>, B)>, ) -> Option<(Ref<B, T>, B)>4288 fn map_prefix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4289 opt: Option<(Ref<B, T>, B)>,
4290 ) -> Option<(Ref<B, T>, B)> {
4291 match opt {
4292 Some((mut r, rest)) => {
4293 r.0.fill(0);
4294 Some((r, rest))
4295 }
4296 None => None,
4297 }
4298 }
4299
map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>( opt: Option<(B, Ref<B, T>)>, ) -> Option<(B, Ref<B, T>)>4300 fn map_suffix_tuple_zeroed<B: ByteSliceMut, T: ?Sized>(
4301 opt: Option<(B, Ref<B, T>)>,
4302 ) -> Option<(B, Ref<B, T>)> {
4303 map_prefix_tuple_zeroed(opt.map(|(a, b)| (b, a))).map(|(a, b)| (b, a))
4304 }
4305
4306 impl<B, T> Ref<B, T>
4307 where
4308 B: ByteSliceMut,
4309 {
4310 /// Constructs a new `Ref` after zeroing the bytes.
4311 ///
4312 /// `new_zeroed` verifies that `bytes.len() == size_of::<T>()` and that
4313 /// `bytes` is aligned to `align_of::<T>()`, and constructs a new `Ref`. If
4314 /// either of these checks fail, it returns `None`.
4315 ///
4316 /// If the checks succeed, then `bytes` will be initialized to zero. This
4317 /// can be useful when re-using buffers to ensure that sensitive data
4318 /// previously stored in the buffer is not leaked.
4319 #[inline(always)]
new_zeroed(bytes: B) -> Option<Ref<B, T>>4320 pub fn new_zeroed(bytes: B) -> Option<Ref<B, T>> {
4321 map_zeroed(Self::new(bytes))
4322 }
4323
4324 /// Constructs a new `Ref` from the prefix of a byte slice, zeroing the
4325 /// prefix.
4326 ///
4327 /// `new_from_prefix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4328 /// and that `bytes` is aligned to `align_of::<T>()`. It consumes the first
4329 /// `size_of::<T>()` bytes from `bytes` to construct a `Ref`, and returns
4330 /// the remaining bytes to the caller. If either the length or alignment
4331 /// checks fail, it returns `None`.
4332 ///
4333 /// If the checks succeed, then the prefix which is consumed will be
4334 /// initialized to zero. This can be useful when re-using buffers to ensure
4335 /// that sensitive data previously stored in the buffer is not leaked.
4336 #[inline(always)]
new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)>4337 pub fn new_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4338 map_prefix_tuple_zeroed(Self::new_from_prefix(bytes))
4339 }
4340
4341 /// Constructs a new `Ref` from the suffix of a byte slice, zeroing the
4342 /// suffix.
4343 ///
4344 /// `new_from_suffix_zeroed` verifies that `bytes.len() >= size_of::<T>()`
4345 /// and that the last `size_of::<T>()` bytes of `bytes` are aligned to
4346 /// `align_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4347 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4348 /// caller. If either the length or alignment checks fail, it returns
4349 /// `None`.
4350 ///
4351 /// If the checks succeed, then the suffix which is consumed will be
4352 /// initialized to zero. This can be useful when re-using buffers to ensure
4353 /// that sensitive data previously stored in the buffer is not leaked.
4354 #[inline(always)]
new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)>4355 pub fn new_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4356 map_suffix_tuple_zeroed(Self::new_from_suffix(bytes))
4357 }
4358 }
4359
4360 impl<B, T> Ref<B, [T]>
4361 where
4362 B: ByteSliceMut,
4363 {
4364 /// Constructs a new `Ref` of a slice type after zeroing the bytes.
4365 ///
4366 /// `new_slice_zeroed` verifies that `bytes.len()` is a multiple of
4367 /// `size_of::<T>()` and that `bytes` is aligned to `align_of::<T>()`, and
4368 /// constructs a new `Ref`. If either of these checks fail, it returns
4369 /// `None`.
4370 ///
4371 /// If the checks succeed, then `bytes` will be initialized to zero. This
4372 /// can be useful when re-using buffers to ensure that sensitive data
4373 /// previously stored in the buffer is not leaked.
4374 ///
4375 /// # Panics
4376 ///
4377 /// `new_slice` panics if `T` is a zero-sized type.
4378 #[inline(always)]
new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>>4379 pub fn new_slice_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4380 map_zeroed(Self::new_slice(bytes))
4381 }
4382
4383 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4384 /// after zeroing the bytes.
4385 ///
4386 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4387 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4388 /// first `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4389 /// and returns the remaining bytes to the caller. It also ensures that
4390 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4391 /// length, alignment, or overflow checks fail, it returns `None`.
4392 ///
4393 /// If the checks succeed, then the suffix which is consumed will be
4394 /// initialized to zero. This can be useful when re-using buffers to ensure
4395 /// that sensitive data previously stored in the buffer is not leaked.
4396 ///
4397 /// # Panics
4398 ///
4399 /// `new_slice_from_prefix_zeroed` panics if `T` is a zero-sized type.
4400 #[inline(always)]
new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)>4401 pub fn new_slice_from_prefix_zeroed(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4402 map_prefix_tuple_zeroed(Self::new_slice_from_prefix(bytes, count))
4403 }
4404
4405 /// Constructs a new `Ref` of a slice type from the prefix of a byte slice,
4406 /// after zeroing the bytes.
4407 ///
4408 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4409 /// count` and that `bytes` is aligned to `align_of::<T>()`. It consumes the
4410 /// last `size_of::<T>() * count` bytes from `bytes` to construct a `Ref`,
4411 /// and returns the preceding bytes to the caller. It also ensures that
4412 /// `sizeof::<T>() * count` does not overflow a `usize`. If any of the
4413 /// length, alignment, or overflow checks fail, it returns `None`.
4414 ///
4415 /// If the checks succeed, then the consumed suffix will be initialized to
4416 /// zero. This can be useful when re-using buffers to ensure that sensitive
4417 /// data previously stored in the buffer is not leaked.
4418 ///
4419 /// # Panics
4420 ///
4421 /// `new_slice_from_suffix_zeroed` panics if `T` is a zero-sized type.
4422 #[inline(always)]
new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)>4423 pub fn new_slice_from_suffix_zeroed(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4424 map_suffix_tuple_zeroed(Self::new_slice_from_suffix(bytes, count))
4425 }
4426 }
4427
4428 impl<B, T> Ref<B, T>
4429 where
4430 B: ByteSlice,
4431 T: Unaligned,
4432 {
4433 /// Constructs a new `Ref` for a type with no alignment requirement.
4434 ///
4435 /// `new_unaligned` verifies that `bytes.len() == size_of::<T>()` and
4436 /// constructs a new `Ref`. If the check fails, it returns `None`.
4437 #[inline(always)]
new_unaligned(bytes: B) -> Option<Ref<B, T>>4438 pub fn new_unaligned(bytes: B) -> Option<Ref<B, T>> {
4439 Ref::new(bytes)
4440 }
4441
4442 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4443 /// no alignment requirement.
4444 ///
4445 /// `new_unaligned_from_prefix` verifies that `bytes.len() >=
4446 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4447 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4448 /// caller. If the length check fails, it returns `None`.
4449 #[inline(always)]
new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)>4450 pub fn new_unaligned_from_prefix(bytes: B) -> Option<(Ref<B, T>, B)> {
4451 Ref::new_from_prefix(bytes)
4452 }
4453
4454 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4455 /// no alignment requirement.
4456 ///
4457 /// `new_unaligned_from_suffix` verifies that `bytes.len() >=
4458 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4459 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4460 /// caller. If the length check fails, it returns `None`.
4461 #[inline(always)]
new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)>4462 pub fn new_unaligned_from_suffix(bytes: B) -> Option<(B, Ref<B, T>)> {
4463 Ref::new_from_suffix(bytes)
4464 }
4465 }
4466
4467 impl<B, T> Ref<B, [T]>
4468 where
4469 B: ByteSlice,
4470 T: Unaligned,
4471 {
4472 /// Constructs a new `Ref` of a slice type with no alignment requirement.
4473 ///
4474 /// `new_slice_unaligned` verifies that `bytes.len()` is a multiple of
4475 /// `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4476 /// returns `None`.
4477 ///
4478 /// # Panics
4479 ///
4480 /// `new_slice` panics if `T` is a zero-sized type.
4481 #[inline(always)]
new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>>4482 pub fn new_slice_unaligned(bytes: B) -> Option<Ref<B, [T]>> {
4483 Ref::new_slice(bytes)
4484 }
4485
4486 /// Constructs a new `Ref` of a slice type with no alignment requirement
4487 /// from the prefix of a byte slice.
4488 ///
4489 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4490 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4491 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4492 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4493 /// `usize`. If either the length, or overflow checks fail, it returns
4494 /// `None`.
4495 ///
4496 /// # Panics
4497 ///
4498 /// `new_slice_unaligned_from_prefix` panics if `T` is a zero-sized type.
4499 #[inline(always)]
new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)>4500 pub fn new_slice_unaligned_from_prefix(bytes: B, count: usize) -> Option<(Ref<B, [T]>, B)> {
4501 Ref::new_slice_from_prefix(bytes, count)
4502 }
4503
4504 /// Constructs a new `Ref` of a slice type with no alignment requirement
4505 /// from the suffix of a byte slice.
4506 ///
4507 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4508 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4509 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4510 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4511 /// If either the length, or overflow checks fail, it returns `None`.
4512 ///
4513 /// # Panics
4514 ///
4515 /// `new_slice_unaligned_from_suffix` panics if `T` is a zero-sized type.
4516 #[inline(always)]
new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)>4517 pub fn new_slice_unaligned_from_suffix(bytes: B, count: usize) -> Option<(B, Ref<B, [T]>)> {
4518 Ref::new_slice_from_suffix(bytes, count)
4519 }
4520 }
4521
4522 impl<B, T> Ref<B, T>
4523 where
4524 B: ByteSliceMut,
4525 T: Unaligned,
4526 {
4527 /// Constructs a new `Ref` for a type with no alignment requirement, zeroing
4528 /// the bytes.
4529 ///
4530 /// `new_unaligned_zeroed` verifies that `bytes.len() == size_of::<T>()` and
4531 /// constructs a new `Ref`. If the check fails, it returns `None`.
4532 ///
4533 /// If the check succeeds, then `bytes` will be initialized to zero. This
4534 /// can be useful when re-using buffers to ensure that sensitive data
4535 /// previously stored in the buffer is not leaked.
4536 #[inline(always)]
new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>>4537 pub fn new_unaligned_zeroed(bytes: B) -> Option<Ref<B, T>> {
4538 map_zeroed(Self::new_unaligned(bytes))
4539 }
4540
4541 /// Constructs a new `Ref` from the prefix of a byte slice for a type with
4542 /// no alignment requirement, zeroing the prefix.
4543 ///
4544 /// `new_unaligned_from_prefix_zeroed` verifies that `bytes.len() >=
4545 /// size_of::<T>()`. It consumes the first `size_of::<T>()` bytes from
4546 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4547 /// caller. If the length check fails, it returns `None`.
4548 ///
4549 /// If the check succeeds, then the prefix which is consumed will be
4550 /// initialized to zero. This can be useful when re-using buffers to ensure
4551 /// that sensitive data previously stored in the buffer is not leaked.
4552 #[inline(always)]
new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)>4553 pub fn new_unaligned_from_prefix_zeroed(bytes: B) -> Option<(Ref<B, T>, B)> {
4554 map_prefix_tuple_zeroed(Self::new_unaligned_from_prefix(bytes))
4555 }
4556
4557 /// Constructs a new `Ref` from the suffix of a byte slice for a type with
4558 /// no alignment requirement, zeroing the suffix.
4559 ///
4560 /// `new_unaligned_from_suffix_zeroed` verifies that `bytes.len() >=
4561 /// size_of::<T>()`. It consumes the last `size_of::<T>()` bytes from
4562 /// `bytes` to construct a `Ref`, and returns the preceding bytes to the
4563 /// caller. If the length check fails, it returns `None`.
4564 ///
4565 /// If the check succeeds, then the suffix which is consumed will be
4566 /// initialized to zero. This can be useful when re-using buffers to ensure
4567 /// that sensitive data previously stored in the buffer is not leaked.
4568 #[inline(always)]
new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)>4569 pub fn new_unaligned_from_suffix_zeroed(bytes: B) -> Option<(B, Ref<B, T>)> {
4570 map_suffix_tuple_zeroed(Self::new_unaligned_from_suffix(bytes))
4571 }
4572 }
4573
4574 impl<B, T> Ref<B, [T]>
4575 where
4576 B: ByteSliceMut,
4577 T: Unaligned,
4578 {
4579 /// Constructs a new `Ref` for a slice type with no alignment requirement,
4580 /// zeroing the bytes.
4581 ///
4582 /// `new_slice_unaligned_zeroed` verifies that `bytes.len()` is a multiple
4583 /// of `size_of::<T>()` and constructs a new `Ref`. If the check fails, it
4584 /// returns `None`.
4585 ///
4586 /// If the check succeeds, then `bytes` will be initialized to zero. This
4587 /// can be useful when re-using buffers to ensure that sensitive data
4588 /// previously stored in the buffer is not leaked.
4589 ///
4590 /// # Panics
4591 ///
4592 /// `new_slice` panics if `T` is a zero-sized type.
4593 #[inline(always)]
new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>>4594 pub fn new_slice_unaligned_zeroed(bytes: B) -> Option<Ref<B, [T]>> {
4595 map_zeroed(Self::new_slice_unaligned(bytes))
4596 }
4597
4598 /// Constructs a new `Ref` of a slice type with no alignment requirement
4599 /// from the prefix of a byte slice, after zeroing the bytes.
4600 ///
4601 /// `new_slice_from_prefix` verifies that `bytes.len() >= size_of::<T>() *
4602 /// count`. It consumes the first `size_of::<T>() * count` bytes from
4603 /// `bytes` to construct a `Ref`, and returns the remaining bytes to the
4604 /// caller. It also ensures that `sizeof::<T>() * count` does not overflow a
4605 /// `usize`. If either the length, or overflow checks fail, it returns
4606 /// `None`.
4607 ///
4608 /// If the checks succeed, then the prefix will be initialized to zero. This
4609 /// can be useful when re-using buffers to ensure that sensitive data
4610 /// previously stored in the buffer is not leaked.
4611 ///
4612 /// # Panics
4613 ///
4614 /// `new_slice_unaligned_from_prefix_zeroed` panics if `T` is a zero-sized
4615 /// type.
4616 #[inline(always)]
new_slice_unaligned_from_prefix_zeroed( bytes: B, count: usize, ) -> Option<(Ref<B, [T]>, B)>4617 pub fn new_slice_unaligned_from_prefix_zeroed(
4618 bytes: B,
4619 count: usize,
4620 ) -> Option<(Ref<B, [T]>, B)> {
4621 map_prefix_tuple_zeroed(Self::new_slice_unaligned_from_prefix(bytes, count))
4622 }
4623
4624 /// Constructs a new `Ref` of a slice type with no alignment requirement
4625 /// from the suffix of a byte slice, after zeroing the bytes.
4626 ///
4627 /// `new_slice_from_suffix` verifies that `bytes.len() >= size_of::<T>() *
4628 /// count`. It consumes the last `size_of::<T>() * count` bytes from `bytes`
4629 /// to construct a `Ref`, and returns the remaining bytes to the caller. It
4630 /// also ensures that `sizeof::<T>() * count` does not overflow a `usize`.
4631 /// If either the length, or overflow checks fail, it returns `None`.
4632 ///
4633 /// If the checks succeed, then the suffix will be initialized to zero. This
4634 /// can be useful when re-using buffers to ensure that sensitive data
4635 /// previously stored in the buffer is not leaked.
4636 ///
4637 /// # Panics
4638 ///
4639 /// `new_slice_unaligned_from_suffix_zeroed` panics if `T` is a zero-sized
4640 /// type.
4641 #[inline(always)]
new_slice_unaligned_from_suffix_zeroed( bytes: B, count: usize, ) -> Option<(B, Ref<B, [T]>)>4642 pub fn new_slice_unaligned_from_suffix_zeroed(
4643 bytes: B,
4644 count: usize,
4645 ) -> Option<(B, Ref<B, [T]>)> {
4646 map_suffix_tuple_zeroed(Self::new_slice_unaligned_from_suffix(bytes, count))
4647 }
4648 }
4649
4650 impl<'a, B, T> Ref<B, T>
4651 where
4652 B: 'a + ByteSlice,
4653 T: FromBytes,
4654 {
4655 /// Converts this `Ref` into a reference.
4656 ///
4657 /// `into_ref` consumes the `Ref`, and returns a reference to `T`.
4658 #[inline(always)]
into_ref(self) -> &'a T4659 pub fn into_ref(self) -> &'a T {
4660 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4661
4662 // SAFETY: According to the safety preconditions on
4663 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4664 // ensures that, given `B: 'a`, it is sound to drop `self` and still
4665 // access the underlying memory using reads for `'a`.
4666 unsafe { self.deref_helper() }
4667 }
4668 }
4669
4670 impl<'a, B, T> Ref<B, T>
4671 where
4672 B: 'a + ByteSliceMut,
4673 T: FromBytes + AsBytes,
4674 {
4675 /// Converts this `Ref` into a mutable reference.
4676 ///
4677 /// `into_mut` consumes the `Ref`, and returns a mutable reference to `T`.
4678 #[inline(always)]
into_mut(mut self) -> &'a mut T4679 pub fn into_mut(mut self) -> &'a mut T {
4680 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4681
4682 // SAFETY: According to the safety preconditions on
4683 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4684 // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
4685 // `self` and still access the underlying memory using both reads and
4686 // writes for `'a`.
4687 unsafe { self.deref_mut_helper() }
4688 }
4689 }
4690
4691 impl<'a, B, T> Ref<B, [T]>
4692 where
4693 B: 'a + ByteSlice,
4694 T: FromBytes,
4695 {
4696 /// Converts this `Ref` into a slice reference.
4697 ///
4698 /// `into_slice` consumes the `Ref`, and returns a reference to `[T]`.
4699 #[inline(always)]
into_slice(self) -> &'a [T]4700 pub fn into_slice(self) -> &'a [T] {
4701 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4702
4703 // SAFETY: According to the safety preconditions on
4704 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4705 // ensures that, given `B: 'a`, it is sound to drop `self` and still
4706 // access the underlying memory using reads for `'a`.
4707 unsafe { self.deref_slice_helper() }
4708 }
4709 }
4710
4711 impl<'a, B, T> Ref<B, [T]>
4712 where
4713 B: 'a + ByteSliceMut,
4714 T: FromBytes + AsBytes,
4715 {
4716 /// Converts this `Ref` into a mutable slice reference.
4717 ///
4718 /// `into_mut_slice` consumes the `Ref`, and returns a mutable reference to
4719 /// `[T]`.
4720 #[inline(always)]
into_mut_slice(mut self) -> &'a mut [T]4721 pub fn into_mut_slice(mut self) -> &'a mut [T] {
4722 assert!(B::INTO_REF_INTO_MUT_ARE_SOUND);
4723
4724 // SAFETY: According to the safety preconditions on
4725 // `ByteSlice::INTO_REF_INTO_MUT_ARE_SOUND`, the preceding assert
4726 // ensures that, given `B: 'a + ByteSliceMut`, it is sound to drop
4727 // `self` and still access the underlying memory using both reads and
4728 // writes for `'a`.
4729 unsafe { self.deref_mut_slice_helper() }
4730 }
4731 }
4732
4733 impl<B, T> Ref<B, T>
4734 where
4735 B: ByteSlice,
4736 T: FromBytes,
4737 {
4738 /// Creates an immutable reference to `T` with a specific lifetime.
4739 ///
4740 /// # Safety
4741 ///
4742 /// The type bounds on this method guarantee that it is safe to create an
4743 /// immutable reference to `T` from `self`. However, since the lifetime `'a`
4744 /// is not required to be shorter than the lifetime of the reference to
4745 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4746 /// this reference. In particular, the referent must exist for all of `'a`,
4747 /// and no mutable references to the same memory may be constructed during
4748 /// `'a`.
deref_helper<'a>(&self) -> &'a T4749 unsafe fn deref_helper<'a>(&self) -> &'a T {
4750 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4751 #[allow(clippy::undocumented_unsafe_blocks)]
4752 unsafe {
4753 &*self.0.as_ptr().cast::<T>()
4754 }
4755 }
4756 }
4757
4758 impl<B, T> Ref<B, T>
4759 where
4760 B: ByteSliceMut,
4761 T: FromBytes + AsBytes,
4762 {
4763 /// Creates a mutable reference to `T` with a specific lifetime.
4764 ///
4765 /// # Safety
4766 ///
4767 /// The type bounds on this method guarantee that it is safe to create a
4768 /// mutable reference to `T` from `self`. However, since the lifetime `'a`
4769 /// is not required to be shorter than the lifetime of the reference to
4770 /// `self`, the caller must guarantee that the lifetime `'a` is valid for
4771 /// this reference. In particular, the referent must exist for all of `'a`,
4772 /// and no other references - mutable or immutable - to the same memory may
4773 /// be constructed during `'a`.
deref_mut_helper<'a>(&mut self) -> &'a mut T4774 unsafe fn deref_mut_helper<'a>(&mut self) -> &'a mut T {
4775 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4776 #[allow(clippy::undocumented_unsafe_blocks)]
4777 unsafe {
4778 &mut *self.0.as_mut_ptr().cast::<T>()
4779 }
4780 }
4781 }
4782
4783 impl<B, T> Ref<B, [T]>
4784 where
4785 B: ByteSlice,
4786 T: FromBytes,
4787 {
4788 /// Creates an immutable reference to `[T]` with a specific lifetime.
4789 ///
4790 /// # Safety
4791 ///
4792 /// `deref_slice_helper` has the same safety requirements as `deref_helper`.
deref_slice_helper<'a>(&self) -> &'a [T]4793 unsafe fn deref_slice_helper<'a>(&self) -> &'a [T] {
4794 let len = self.0.len();
4795 let elem_size = mem::size_of::<T>();
4796 debug_assert_ne!(elem_size, 0);
4797 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4798 // Thus, neither the mod nor division operations here can panic.
4799 #[allow(clippy::arithmetic_side_effects)]
4800 let elems = {
4801 debug_assert_eq!(len % elem_size, 0);
4802 len / elem_size
4803 };
4804 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4805 #[allow(clippy::undocumented_unsafe_blocks)]
4806 unsafe {
4807 slice::from_raw_parts(self.0.as_ptr().cast::<T>(), elems)
4808 }
4809 }
4810 }
4811
4812 impl<B, T> Ref<B, [T]>
4813 where
4814 B: ByteSliceMut,
4815 T: FromBytes + AsBytes,
4816 {
4817 /// Creates a mutable reference to `[T]` with a specific lifetime.
4818 ///
4819 /// # Safety
4820 ///
4821 /// `deref_mut_slice_helper` has the same safety requirements as
4822 /// `deref_mut_helper`.
deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T]4823 unsafe fn deref_mut_slice_helper<'a>(&mut self) -> &'a mut [T] {
4824 let len = self.0.len();
4825 let elem_size = mem::size_of::<T>();
4826 debug_assert_ne!(elem_size, 0);
4827 // `Ref<_, [T]>` maintains the invariant that `size_of::<T>() > 0`.
4828 // Thus, neither the mod nor division operations here can panic.
4829 #[allow(clippy::arithmetic_side_effects)]
4830 let elems = {
4831 debug_assert_eq!(len % elem_size, 0);
4832 len / elem_size
4833 };
4834 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
4835 #[allow(clippy::undocumented_unsafe_blocks)]
4836 unsafe {
4837 slice::from_raw_parts_mut(self.0.as_mut_ptr().cast::<T>(), elems)
4838 }
4839 }
4840 }
4841
4842 impl<B, T> Ref<B, T>
4843 where
4844 B: ByteSlice,
4845 T: ?Sized,
4846 {
4847 /// Gets the underlying bytes.
4848 #[inline]
bytes(&self) -> &[u8]4849 pub fn bytes(&self) -> &[u8] {
4850 &self.0
4851 }
4852 }
4853
4854 impl<B, T> Ref<B, T>
4855 where
4856 B: ByteSliceMut,
4857 T: ?Sized,
4858 {
4859 /// Gets the underlying bytes mutably.
4860 #[inline]
bytes_mut(&mut self) -> &mut [u8]4861 pub fn bytes_mut(&mut self) -> &mut [u8] {
4862 &mut self.0
4863 }
4864 }
4865
4866 impl<B, T> Ref<B, T>
4867 where
4868 B: ByteSlice,
4869 T: FromBytes,
4870 {
4871 /// Reads a copy of `T`.
4872 #[inline]
read(&self) -> T4873 pub fn read(&self) -> T {
4874 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4875 // at least `size_of::<T>()` bytes long, and that it is at least as
4876 // aligned as `align_of::<T>()`. Because `T: FromBytes`, it is sound to
4877 // interpret these bytes as a `T`.
4878 unsafe { ptr::read(self.0.as_ptr().cast::<T>()) }
4879 }
4880 }
4881
4882 impl<B, T> Ref<B, T>
4883 where
4884 B: ByteSliceMut,
4885 T: AsBytes,
4886 {
4887 /// Writes the bytes of `t` and then forgets `t`.
4888 #[inline]
write(&mut self, t: T)4889 pub fn write(&mut self, t: T) {
4890 // SAFETY: Because of the invariants on `Ref`, we know that `self.0` is
4891 // at least `size_of::<T>()` bytes long, and that it is at least as
4892 // aligned as `align_of::<T>()`. Writing `t` to the buffer will allow
4893 // all of the bytes of `t` to be accessed as a `[u8]`, but because `T:
4894 // AsBytes`, we know this is sound.
4895 unsafe { ptr::write(self.0.as_mut_ptr().cast::<T>(), t) }
4896 }
4897 }
4898
4899 impl<B, T> Deref for Ref<B, T>
4900 where
4901 B: ByteSlice,
4902 T: FromBytes,
4903 {
4904 type Target = T;
4905 #[inline]
deref(&self) -> &T4906 fn deref(&self) -> &T {
4907 // SAFETY: This is sound because the lifetime of `self` is the same as
4908 // the lifetime of the return value, meaning that a) the returned
4909 // reference cannot outlive `self` and, b) no mutable methods on `self`
4910 // can be called during the lifetime of the returned reference. See the
4911 // documentation on `deref_helper` for what invariants we are required
4912 // to uphold.
4913 unsafe { self.deref_helper() }
4914 }
4915 }
4916
4917 impl<B, T> DerefMut for Ref<B, T>
4918 where
4919 B: ByteSliceMut,
4920 T: FromBytes + AsBytes,
4921 {
4922 #[inline]
deref_mut(&mut self) -> &mut T4923 fn deref_mut(&mut self) -> &mut T {
4924 // SAFETY: This is sound because the lifetime of `self` is the same as
4925 // the lifetime of the return value, meaning that a) the returned
4926 // reference cannot outlive `self` and, b) no other methods on `self`
4927 // can be called during the lifetime of the returned reference. See the
4928 // documentation on `deref_mut_helper` for what invariants we are
4929 // required to uphold.
4930 unsafe { self.deref_mut_helper() }
4931 }
4932 }
4933
4934 impl<B, T> Deref for Ref<B, [T]>
4935 where
4936 B: ByteSlice,
4937 T: FromBytes,
4938 {
4939 type Target = [T];
4940 #[inline]
deref(&self) -> &[T]4941 fn deref(&self) -> &[T] {
4942 // SAFETY: This is sound because the lifetime of `self` is the same as
4943 // the lifetime of the return value, meaning that a) the returned
4944 // reference cannot outlive `self` and, b) no mutable methods on `self`
4945 // can be called during the lifetime of the returned reference. See the
4946 // documentation on `deref_slice_helper` for what invariants we are
4947 // required to uphold.
4948 unsafe { self.deref_slice_helper() }
4949 }
4950 }
4951
4952 impl<B, T> DerefMut for Ref<B, [T]>
4953 where
4954 B: ByteSliceMut,
4955 T: FromBytes + AsBytes,
4956 {
4957 #[inline]
deref_mut(&mut self) -> &mut [T]4958 fn deref_mut(&mut self) -> &mut [T] {
4959 // SAFETY: This is sound because the lifetime of `self` is the same as
4960 // the lifetime of the return value, meaning that a) the returned
4961 // reference cannot outlive `self` and, b) no other methods on `self`
4962 // can be called during the lifetime of the returned reference. See the
4963 // documentation on `deref_mut_slice_helper` for what invariants we are
4964 // required to uphold.
4965 unsafe { self.deref_mut_slice_helper() }
4966 }
4967 }
4968
4969 impl<T, B> Display for Ref<B, T>
4970 where
4971 B: ByteSlice,
4972 T: FromBytes + Display,
4973 {
4974 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result4975 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4976 let inner: &T = self;
4977 inner.fmt(fmt)
4978 }
4979 }
4980
4981 impl<T, B> Display for Ref<B, [T]>
4982 where
4983 B: ByteSlice,
4984 T: FromBytes,
4985 [T]: Display,
4986 {
4987 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result4988 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
4989 let inner: &[T] = self;
4990 inner.fmt(fmt)
4991 }
4992 }
4993
4994 impl<T, B> Debug for Ref<B, T>
4995 where
4996 B: ByteSlice,
4997 T: FromBytes + Debug,
4998 {
4999 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result5000 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
5001 let inner: &T = self;
5002 fmt.debug_tuple("Ref").field(&inner).finish()
5003 }
5004 }
5005
5006 impl<T, B> Debug for Ref<B, [T]>
5007 where
5008 B: ByteSlice,
5009 T: FromBytes + Debug,
5010 {
5011 #[inline]
fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result5012 fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
5013 let inner: &[T] = self;
5014 fmt.debug_tuple("Ref").field(&inner).finish()
5015 }
5016 }
5017
5018 impl<T, B> Eq for Ref<B, T>
5019 where
5020 B: ByteSlice,
5021 T: FromBytes + Eq,
5022 {
5023 }
5024
5025 impl<T, B> Eq for Ref<B, [T]>
5026 where
5027 B: ByteSlice,
5028 T: FromBytes + Eq,
5029 {
5030 }
5031
5032 impl<T, B> PartialEq for Ref<B, T>
5033 where
5034 B: ByteSlice,
5035 T: FromBytes + PartialEq,
5036 {
5037 #[inline]
eq(&self, other: &Self) -> bool5038 fn eq(&self, other: &Self) -> bool {
5039 self.deref().eq(other.deref())
5040 }
5041 }
5042
5043 impl<T, B> PartialEq for Ref<B, [T]>
5044 where
5045 B: ByteSlice,
5046 T: FromBytes + PartialEq,
5047 {
5048 #[inline]
eq(&self, other: &Self) -> bool5049 fn eq(&self, other: &Self) -> bool {
5050 self.deref().eq(other.deref())
5051 }
5052 }
5053
5054 impl<T, B> Ord for Ref<B, T>
5055 where
5056 B: ByteSlice,
5057 T: FromBytes + Ord,
5058 {
5059 #[inline]
cmp(&self, other: &Self) -> Ordering5060 fn cmp(&self, other: &Self) -> Ordering {
5061 let inner: &T = self;
5062 let other_inner: &T = other;
5063 inner.cmp(other_inner)
5064 }
5065 }
5066
5067 impl<T, B> Ord for Ref<B, [T]>
5068 where
5069 B: ByteSlice,
5070 T: FromBytes + Ord,
5071 {
5072 #[inline]
cmp(&self, other: &Self) -> Ordering5073 fn cmp(&self, other: &Self) -> Ordering {
5074 let inner: &[T] = self;
5075 let other_inner: &[T] = other;
5076 inner.cmp(other_inner)
5077 }
5078 }
5079
5080 impl<T, B> PartialOrd for Ref<B, T>
5081 where
5082 B: ByteSlice,
5083 T: FromBytes + PartialOrd,
5084 {
5085 #[inline]
partial_cmp(&self, other: &Self) -> Option<Ordering>5086 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5087 let inner: &T = self;
5088 let other_inner: &T = other;
5089 inner.partial_cmp(other_inner)
5090 }
5091 }
5092
5093 impl<T, B> PartialOrd for Ref<B, [T]>
5094 where
5095 B: ByteSlice,
5096 T: FromBytes + PartialOrd,
5097 {
5098 #[inline]
partial_cmp(&self, other: &Self) -> Option<Ordering>5099 fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
5100 let inner: &[T] = self;
5101 let other_inner: &[T] = other;
5102 inner.partial_cmp(other_inner)
5103 }
5104 }
5105
5106 mod sealed {
5107 pub trait ByteSliceSealed {}
5108 }
5109
5110 // ByteSlice and ByteSliceMut abstract over [u8] references (&[u8], &mut [u8],
5111 // Ref<[u8]>, RefMut<[u8]>, etc). We rely on various behaviors of these
5112 // references such as that a given reference will never changes its length
5113 // between calls to deref() or deref_mut(), and that split_at() works as
5114 // expected. If ByteSlice or ByteSliceMut were not sealed, consumers could
5115 // implement them in a way that violated these behaviors, and would break our
5116 // unsafe code. Thus, we seal them and implement it only for known-good
5117 // reference types. For the same reason, they're unsafe traits.
5118
5119 #[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5120 /// A mutable or immutable reference to a byte slice.
5121 ///
5122 /// `ByteSlice` abstracts over the mutability of a byte slice reference, and is
5123 /// implemented for various special reference types such as `Ref<[u8]>` and
5124 /// `RefMut<[u8]>`.
5125 ///
5126 /// Note that, while it would be technically possible, `ByteSlice` is not
5127 /// implemented for [`Vec<u8>`], as the only way to implement the [`split_at`]
5128 /// method would involve reallocation, and `split_at` must be a very cheap
5129 /// operation in order for the utilities in this crate to perform as designed.
5130 ///
5131 /// [`split_at`]: crate::ByteSlice::split_at
5132 // It may seem overkill to go to this length to ensure that this doc link never
5133 // breaks. We do this because it simplifies CI - it means that generating docs
5134 // always succeeds, so we don't need special logic to only generate docs under
5135 // certain features.
5136 #[cfg_attr(feature = "alloc", doc = "[`Vec<u8>`]: alloc::vec::Vec")]
5137 #[cfg_attr(
5138 not(feature = "alloc"),
5139 doc = "[`Vec<u8>`]: https://doc.rust-lang.org/std/vec/struct.Vec.html"
5140 )]
5141 pub unsafe trait ByteSlice: Deref<Target = [u8]> + Sized + sealed::ByteSliceSealed {
5142 /// Are the [`Ref::into_ref`] and [`Ref::into_mut`] methods sound when used
5143 /// with `Self`? If not, evaluating this constant must panic at compile
5144 /// time.
5145 ///
5146 /// This exists to work around #716 on versions of zerocopy prior to 0.8.
5147 ///
5148 /// # Safety
5149 ///
5150 /// This may only be set to true if the following holds: Given the
5151 /// following:
5152 /// - `Self: 'a`
5153 /// - `bytes: Self`
5154 /// - `let ptr = bytes.as_ptr()`
5155 ///
5156 /// ...then:
5157 /// - Using `ptr` to read the memory previously addressed by `bytes` is
5158 /// sound for `'a` even after `bytes` has been dropped.
5159 /// - If `Self: ByteSliceMut`, using `ptr` to write the memory previously
5160 /// addressed by `bytes` is sound for `'a` even after `bytes` has been
5161 /// dropped.
5162 #[doc(hidden)]
5163 const INTO_REF_INTO_MUT_ARE_SOUND: bool;
5164
5165 /// Gets a raw pointer to the first byte in the slice.
5166 #[inline]
as_ptr(&self) -> *const u85167 fn as_ptr(&self) -> *const u8 {
5168 <[u8]>::as_ptr(self)
5169 }
5170
5171 /// Splits the slice at the midpoint.
5172 ///
5173 /// `x.split_at(mid)` returns `x[..mid]` and `x[mid..]`.
5174 ///
5175 /// # Panics
5176 ///
5177 /// `x.split_at(mid)` panics if `mid > x.len()`.
split_at(self, mid: usize) -> (Self, Self)5178 fn split_at(self, mid: usize) -> (Self, Self);
5179 }
5180
5181 #[allow(clippy::missing_safety_doc)] // TODO(fxbug.dev/99068)
5182 /// A mutable reference to a byte slice.
5183 ///
5184 /// `ByteSliceMut` abstracts over various ways of storing a mutable reference to
5185 /// a byte slice, and is implemented for various special reference types such as
5186 /// `RefMut<[u8]>`.
5187 pub unsafe trait ByteSliceMut: ByteSlice + DerefMut {
5188 /// Gets a mutable raw pointer to the first byte in the slice.
5189 #[inline]
as_mut_ptr(&mut self) -> *mut u85190 fn as_mut_ptr(&mut self) -> *mut u8 {
5191 <[u8]>::as_mut_ptr(self)
5192 }
5193 }
5194
5195 impl<'a> sealed::ByteSliceSealed for &'a [u8] {}
5196 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5197 #[allow(clippy::undocumented_unsafe_blocks)]
5198 unsafe impl<'a> ByteSlice for &'a [u8] {
5199 // SAFETY: If `&'b [u8]: 'a`, then the underlying memory is treated as
5200 // borrowed immutably for `'a` even if the slice itself is dropped.
5201 const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
5202
5203 #[inline]
split_at(self, mid: usize) -> (Self, Self)5204 fn split_at(self, mid: usize) -> (Self, Self) {
5205 <[u8]>::split_at(self, mid)
5206 }
5207 }
5208
5209 impl<'a> sealed::ByteSliceSealed for &'a mut [u8] {}
5210 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5211 #[allow(clippy::undocumented_unsafe_blocks)]
5212 unsafe impl<'a> ByteSlice for &'a mut [u8] {
5213 // SAFETY: If `&'b mut [u8]: 'a`, then the underlying memory is treated as
5214 // borrowed mutably for `'a` even if the slice itself is dropped.
5215 const INTO_REF_INTO_MUT_ARE_SOUND: bool = true;
5216
5217 #[inline]
split_at(self, mid: usize) -> (Self, Self)5218 fn split_at(self, mid: usize) -> (Self, Self) {
5219 <[u8]>::split_at_mut(self, mid)
5220 }
5221 }
5222
5223 impl<'a> sealed::ByteSliceSealed for cell::Ref<'a, [u8]> {}
5224 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5225 #[allow(clippy::undocumented_unsafe_blocks)]
5226 unsafe impl<'a> ByteSlice for cell::Ref<'a, [u8]> {
5227 const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
5228 panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::Ref; see https://github.com/google/zerocopy/issues/716")
5229 } else {
5230 // When compiling documentation, allow the evaluation of this constant
5231 // to succeed. This doesn't represent a soundness hole - it just delays
5232 // any error to runtime. The reason we need this is that, otherwise,
5233 // `rustdoc` will fail when trying to document this item.
5234 false
5235 };
5236
5237 #[inline]
split_at(self, mid: usize) -> (Self, Self)5238 fn split_at(self, mid: usize) -> (Self, Self) {
5239 cell::Ref::map_split(self, |slice| <[u8]>::split_at(slice, mid))
5240 }
5241 }
5242
5243 impl<'a> sealed::ByteSliceSealed for RefMut<'a, [u8]> {}
5244 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5245 #[allow(clippy::undocumented_unsafe_blocks)]
5246 unsafe impl<'a> ByteSlice for RefMut<'a, [u8]> {
5247 const INTO_REF_INTO_MUT_ARE_SOUND: bool = if !cfg!(doc) {
5248 panic!("Ref::into_ref and Ref::into_mut are unsound when used with core::cell::RefMut; see https://github.com/google/zerocopy/issues/716")
5249 } else {
5250 // When compiling documentation, allow the evaluation of this constant
5251 // to succeed. This doesn't represent a soundness hole - it just delays
5252 // any error to runtime. The reason we need this is that, otherwise,
5253 // `rustdoc` will fail when trying to document this item.
5254 false
5255 };
5256
5257 #[inline]
split_at(self, mid: usize) -> (Self, Self)5258 fn split_at(self, mid: usize) -> (Self, Self) {
5259 RefMut::map_split(self, |slice| <[u8]>::split_at_mut(slice, mid))
5260 }
5261 }
5262
5263 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5264 #[allow(clippy::undocumented_unsafe_blocks)]
5265 unsafe impl<'a> ByteSliceMut for &'a mut [u8] {}
5266
5267 // TODO(#429): Add a "SAFETY" comment and remove this `allow`.
5268 #[allow(clippy::undocumented_unsafe_blocks)]
5269 unsafe impl<'a> ByteSliceMut for RefMut<'a, [u8]> {}
5270
5271 #[cfg(feature = "alloc")]
5272 #[cfg_attr(doc_cfg, doc(cfg(feature = "alloc")))]
5273 mod alloc_support {
5274 use alloc::vec::Vec;
5275
5276 use super::*;
5277
5278 /// Extends a `Vec<T>` by pushing `additional` new items onto the end of the
5279 /// vector. The new items are initialized with zeroes.
5280 ///
5281 /// # Panics
5282 ///
5283 /// Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5284 #[inline(always)]
extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize)5285 pub fn extend_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, additional: usize) {
5286 insert_vec_zeroed(v, v.len(), additional);
5287 }
5288
5289 /// Inserts `additional` new items into `Vec<T>` at `position`.
5290 /// The new items are initialized with zeroes.
5291 ///
5292 /// # Panics
5293 ///
5294 /// * Panics if `position > v.len()`.
5295 /// * Panics if `Vec::reserve(additional)` fails to reserve enough memory.
5296 #[inline]
insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize)5297 pub fn insert_vec_zeroed<T: FromZeroes>(v: &mut Vec<T>, position: usize, additional: usize) {
5298 assert!(position <= v.len());
5299 v.reserve(additional);
5300 // SAFETY: The `reserve` call guarantees that these cannot overflow:
5301 // * `ptr.add(position)`
5302 // * `position + additional`
5303 // * `v.len() + additional`
5304 //
5305 // `v.len() - position` cannot overflow because we asserted that
5306 // `position <= v.len()`.
5307 unsafe {
5308 // This is a potentially overlapping copy.
5309 let ptr = v.as_mut_ptr();
5310 #[allow(clippy::arithmetic_side_effects)]
5311 ptr.add(position).copy_to(ptr.add(position + additional), v.len() - position);
5312 ptr.add(position).write_bytes(0, additional);
5313 #[allow(clippy::arithmetic_side_effects)]
5314 v.set_len(v.len() + additional);
5315 }
5316 }
5317
5318 #[cfg(test)]
5319 mod tests {
5320 use core::convert::TryFrom as _;
5321
5322 use super::*;
5323
5324 #[test]
test_extend_vec_zeroed()5325 fn test_extend_vec_zeroed() {
5326 // Test extending when there is an existing allocation.
5327 let mut v = vec![100u64, 200, 300];
5328 extend_vec_zeroed(&mut v, 3);
5329 assert_eq!(v.len(), 6);
5330 assert_eq!(&*v, &[100, 200, 300, 0, 0, 0]);
5331 drop(v);
5332
5333 // Test extending when there is no existing allocation.
5334 let mut v: Vec<u64> = Vec::new();
5335 extend_vec_zeroed(&mut v, 3);
5336 assert_eq!(v.len(), 3);
5337 assert_eq!(&*v, &[0, 0, 0]);
5338 drop(v);
5339 }
5340
5341 #[test]
test_extend_vec_zeroed_zst()5342 fn test_extend_vec_zeroed_zst() {
5343 // Test extending when there is an existing (fake) allocation.
5344 let mut v = vec![(), (), ()];
5345 extend_vec_zeroed(&mut v, 3);
5346 assert_eq!(v.len(), 6);
5347 assert_eq!(&*v, &[(), (), (), (), (), ()]);
5348 drop(v);
5349
5350 // Test extending when there is no existing (fake) allocation.
5351 let mut v: Vec<()> = Vec::new();
5352 extend_vec_zeroed(&mut v, 3);
5353 assert_eq!(&*v, &[(), (), ()]);
5354 drop(v);
5355 }
5356
5357 #[test]
test_insert_vec_zeroed()5358 fn test_insert_vec_zeroed() {
5359 // Insert at start (no existing allocation).
5360 let mut v: Vec<u64> = Vec::new();
5361 insert_vec_zeroed(&mut v, 0, 2);
5362 assert_eq!(v.len(), 2);
5363 assert_eq!(&*v, &[0, 0]);
5364 drop(v);
5365
5366 // Insert at start.
5367 let mut v = vec![100u64, 200, 300];
5368 insert_vec_zeroed(&mut v, 0, 2);
5369 assert_eq!(v.len(), 5);
5370 assert_eq!(&*v, &[0, 0, 100, 200, 300]);
5371 drop(v);
5372
5373 // Insert at middle.
5374 let mut v = vec![100u64, 200, 300];
5375 insert_vec_zeroed(&mut v, 1, 1);
5376 assert_eq!(v.len(), 4);
5377 assert_eq!(&*v, &[100, 0, 200, 300]);
5378 drop(v);
5379
5380 // Insert at end.
5381 let mut v = vec![100u64, 200, 300];
5382 insert_vec_zeroed(&mut v, 3, 1);
5383 assert_eq!(v.len(), 4);
5384 assert_eq!(&*v, &[100, 200, 300, 0]);
5385 drop(v);
5386 }
5387
5388 #[test]
test_insert_vec_zeroed_zst()5389 fn test_insert_vec_zeroed_zst() {
5390 // Insert at start (no existing fake allocation).
5391 let mut v: Vec<()> = Vec::new();
5392 insert_vec_zeroed(&mut v, 0, 2);
5393 assert_eq!(v.len(), 2);
5394 assert_eq!(&*v, &[(), ()]);
5395 drop(v);
5396
5397 // Insert at start.
5398 let mut v = vec![(), (), ()];
5399 insert_vec_zeroed(&mut v, 0, 2);
5400 assert_eq!(v.len(), 5);
5401 assert_eq!(&*v, &[(), (), (), (), ()]);
5402 drop(v);
5403
5404 // Insert at middle.
5405 let mut v = vec![(), (), ()];
5406 insert_vec_zeroed(&mut v, 1, 1);
5407 assert_eq!(v.len(), 4);
5408 assert_eq!(&*v, &[(), (), (), ()]);
5409 drop(v);
5410
5411 // Insert at end.
5412 let mut v = vec![(), (), ()];
5413 insert_vec_zeroed(&mut v, 3, 1);
5414 assert_eq!(v.len(), 4);
5415 assert_eq!(&*v, &[(), (), (), ()]);
5416 drop(v);
5417 }
5418
5419 #[test]
test_new_box_zeroed()5420 fn test_new_box_zeroed() {
5421 assert_eq!(*u64::new_box_zeroed(), 0);
5422 }
5423
5424 #[test]
test_new_box_zeroed_array()5425 fn test_new_box_zeroed_array() {
5426 drop(<[u32; 0x1000]>::new_box_zeroed());
5427 }
5428
5429 #[test]
test_new_box_zeroed_zst()5430 fn test_new_box_zeroed_zst() {
5431 // This test exists in order to exercise unsafe code, especially
5432 // when running under Miri.
5433 #[allow(clippy::unit_cmp)]
5434 {
5435 assert_eq!(*<()>::new_box_zeroed(), ());
5436 }
5437 }
5438
5439 #[test]
test_new_box_slice_zeroed()5440 fn test_new_box_slice_zeroed() {
5441 let mut s: Box<[u64]> = u64::new_box_slice_zeroed(3);
5442 assert_eq!(s.len(), 3);
5443 assert_eq!(&*s, &[0, 0, 0]);
5444 s[1] = 3;
5445 assert_eq!(&*s, &[0, 3, 0]);
5446 }
5447
5448 #[test]
test_new_box_slice_zeroed_empty()5449 fn test_new_box_slice_zeroed_empty() {
5450 let s: Box<[u64]> = u64::new_box_slice_zeroed(0);
5451 assert_eq!(s.len(), 0);
5452 }
5453
5454 #[test]
test_new_box_slice_zeroed_zst()5455 fn test_new_box_slice_zeroed_zst() {
5456 let mut s: Box<[()]> = <()>::new_box_slice_zeroed(3);
5457 assert_eq!(s.len(), 3);
5458 assert!(s.get(10).is_none());
5459 // This test exists in order to exercise unsafe code, especially
5460 // when running under Miri.
5461 #[allow(clippy::unit_cmp)]
5462 {
5463 assert_eq!(s[1], ());
5464 }
5465 s[2] = ();
5466 }
5467
5468 #[test]
test_new_box_slice_zeroed_zst_empty()5469 fn test_new_box_slice_zeroed_zst_empty() {
5470 let s: Box<[()]> = <()>::new_box_slice_zeroed(0);
5471 assert_eq!(s.len(), 0);
5472 }
5473
5474 #[test]
5475 #[should_panic(expected = "mem::size_of::<Self>() * len overflows `usize`")]
test_new_box_slice_zeroed_panics_mul_overflow()5476 fn test_new_box_slice_zeroed_panics_mul_overflow() {
5477 let _ = u16::new_box_slice_zeroed(usize::MAX);
5478 }
5479
5480 #[test]
5481 #[should_panic(expected = "assertion failed: size <= max_alloc")]
test_new_box_slice_zeroed_panics_isize_overflow()5482 fn test_new_box_slice_zeroed_panics_isize_overflow() {
5483 let max = usize::try_from(isize::MAX).unwrap();
5484 let _ = u16::new_box_slice_zeroed((max / mem::size_of::<u16>()) + 1);
5485 }
5486 }
5487 }
5488
5489 #[cfg(feature = "alloc")]
5490 #[doc(inline)]
5491 pub use alloc_support::*;
5492
5493 #[cfg(test)]
5494 mod tests {
5495 #![allow(clippy::unreadable_literal)]
5496
5497 use core::{cell::UnsafeCell, convert::TryInto as _, ops::Deref};
5498
5499 use static_assertions::assert_impl_all;
5500
5501 use super::*;
5502 use crate::util::testutil::*;
5503
5504 // An unsized type.
5505 //
5506 // This is used to test the custom derives of our traits. The `[u8]` type
5507 // gets a hand-rolled impl, so it doesn't exercise our custom derives.
5508 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes, Unaligned)]
5509 #[repr(transparent)]
5510 struct Unsized([u8]);
5511
5512 impl Unsized {
from_mut_slice(slc: &mut [u8]) -> &mut Unsized5513 fn from_mut_slice(slc: &mut [u8]) -> &mut Unsized {
5514 // SAFETY: This *probably* sound - since the layouts of `[u8]` and
5515 // `Unsized` are the same, so are the layouts of `&mut [u8]` and
5516 // `&mut Unsized`. [1] Even if it turns out that this isn't actually
5517 // guaranteed by the language spec, we can just change this since
5518 // it's in test code.
5519 //
5520 // [1] https://github.com/rust-lang/unsafe-code-guidelines/issues/375
5521 unsafe { mem::transmute(slc) }
5522 }
5523 }
5524
5525 /// Tests of when a sized `DstLayout` is extended with a sized field.
5526 #[allow(clippy::decimal_literal_representation)]
5527 #[test]
test_dst_layout_extend_sized_with_sized()5528 fn test_dst_layout_extend_sized_with_sized() {
5529 // This macro constructs a layout corresponding to a `u8` and extends it
5530 // with a zero-sized trailing field of given alignment `n`. The macro
5531 // tests that the resulting layout has both size and alignment `min(n,
5532 // P)` for all valid values of `repr(packed(P))`.
5533 macro_rules! test_align_is_size {
5534 ($n:expr) => {
5535 let base = DstLayout::for_type::<u8>();
5536 let trailing_field = DstLayout::for_type::<elain::Align<$n>>();
5537
5538 let packs =
5539 core::iter::once(None).chain((0..29).map(|p| NonZeroUsize::new(2usize.pow(p))));
5540
5541 for pack in packs {
5542 let composite = base.extend(trailing_field, pack);
5543 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN);
5544 let align = $n.min(max_align.get());
5545 assert_eq!(
5546 composite,
5547 DstLayout {
5548 align: NonZeroUsize::new(align).unwrap(),
5549 size_info: SizeInfo::Sized { _size: align }
5550 }
5551 )
5552 }
5553 };
5554 }
5555
5556 test_align_is_size!(1);
5557 test_align_is_size!(2);
5558 test_align_is_size!(4);
5559 test_align_is_size!(8);
5560 test_align_is_size!(16);
5561 test_align_is_size!(32);
5562 test_align_is_size!(64);
5563 test_align_is_size!(128);
5564 test_align_is_size!(256);
5565 test_align_is_size!(512);
5566 test_align_is_size!(1024);
5567 test_align_is_size!(2048);
5568 test_align_is_size!(4096);
5569 test_align_is_size!(8192);
5570 test_align_is_size!(16384);
5571 test_align_is_size!(32768);
5572 test_align_is_size!(65536);
5573 test_align_is_size!(131072);
5574 test_align_is_size!(262144);
5575 test_align_is_size!(524288);
5576 test_align_is_size!(1048576);
5577 test_align_is_size!(2097152);
5578 test_align_is_size!(4194304);
5579 test_align_is_size!(8388608);
5580 test_align_is_size!(16777216);
5581 test_align_is_size!(33554432);
5582 test_align_is_size!(67108864);
5583 test_align_is_size!(33554432);
5584 test_align_is_size!(134217728);
5585 test_align_is_size!(268435456);
5586 }
5587
5588 /// Tests of when a sized `DstLayout` is extended with a DST field.
5589 #[test]
test_dst_layout_extend_sized_with_dst()5590 fn test_dst_layout_extend_sized_with_dst() {
5591 // Test that for all combinations of real-world alignments and
5592 // `repr_packed` values, that the extension of a sized `DstLayout`` with
5593 // a DST field correctly computes the trailing offset in the composite
5594 // layout.
5595
5596 let aligns = (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap());
5597 let packs = core::iter::once(None).chain(aligns.clone().map(Some));
5598
5599 for align in aligns {
5600 for pack in packs.clone() {
5601 let base = DstLayout::for_type::<u8>();
5602 let elem_size = 42;
5603 let trailing_field_offset = 11;
5604
5605 let trailing_field = DstLayout {
5606 align,
5607 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5608 _elem_size: elem_size,
5609 _offset: 11,
5610 }),
5611 };
5612
5613 let composite = base.extend(trailing_field, pack);
5614
5615 let max_align = pack.unwrap_or(DstLayout::CURRENT_MAX_ALIGN).get();
5616
5617 let align = align.get().min(max_align);
5618
5619 assert_eq!(
5620 composite,
5621 DstLayout {
5622 align: NonZeroUsize::new(align).unwrap(),
5623 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5624 _elem_size: elem_size,
5625 _offset: align + trailing_field_offset,
5626 }),
5627 }
5628 )
5629 }
5630 }
5631 }
5632
5633 /// Tests that calling `pad_to_align` on a sized `DstLayout` adds the
5634 /// expected amount of trailing padding.
5635 #[test]
test_dst_layout_pad_to_align_with_sized()5636 fn test_dst_layout_pad_to_align_with_sized() {
5637 // For all valid alignments `align`, construct a one-byte layout aligned
5638 // to `align`, call `pad_to_align`, and assert that the size of the
5639 // resulting layout is equal to `align`.
5640 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5641 let layout = DstLayout { align, size_info: SizeInfo::Sized { _size: 1 } };
5642
5643 assert_eq!(
5644 layout.pad_to_align(),
5645 DstLayout { align, size_info: SizeInfo::Sized { _size: align.get() } }
5646 );
5647 }
5648
5649 // Test explicitly-provided combinations of unpadded and padded
5650 // counterparts.
5651
5652 macro_rules! test {
5653 (unpadded { size: $unpadded_size:expr, align: $unpadded_align:expr }
5654 => padded { size: $padded_size:expr, align: $padded_align:expr }) => {
5655 let unpadded = DstLayout {
5656 align: NonZeroUsize::new($unpadded_align).unwrap(),
5657 size_info: SizeInfo::Sized { _size: $unpadded_size },
5658 };
5659 let padded = unpadded.pad_to_align();
5660
5661 assert_eq!(
5662 padded,
5663 DstLayout {
5664 align: NonZeroUsize::new($padded_align).unwrap(),
5665 size_info: SizeInfo::Sized { _size: $padded_size },
5666 }
5667 );
5668 };
5669 }
5670
5671 test!(unpadded { size: 0, align: 4 } => padded { size: 0, align: 4 });
5672 test!(unpadded { size: 1, align: 4 } => padded { size: 4, align: 4 });
5673 test!(unpadded { size: 2, align: 4 } => padded { size: 4, align: 4 });
5674 test!(unpadded { size: 3, align: 4 } => padded { size: 4, align: 4 });
5675 test!(unpadded { size: 4, align: 4 } => padded { size: 4, align: 4 });
5676 test!(unpadded { size: 5, align: 4 } => padded { size: 8, align: 4 });
5677 test!(unpadded { size: 6, align: 4 } => padded { size: 8, align: 4 });
5678 test!(unpadded { size: 7, align: 4 } => padded { size: 8, align: 4 });
5679 test!(unpadded { size: 8, align: 4 } => padded { size: 8, align: 4 });
5680
5681 let current_max_align = DstLayout::CURRENT_MAX_ALIGN.get();
5682
5683 test!(unpadded { size: 1, align: current_max_align }
5684 => padded { size: current_max_align, align: current_max_align });
5685
5686 test!(unpadded { size: current_max_align + 1, align: current_max_align }
5687 => padded { size: current_max_align * 2, align: current_max_align });
5688 }
5689
5690 /// Tests that calling `pad_to_align` on a DST `DstLayout` is a no-op.
5691 #[test]
test_dst_layout_pad_to_align_with_dst()5692 fn test_dst_layout_pad_to_align_with_dst() {
5693 for align in (0..29).map(|p| NonZeroUsize::new(2usize.pow(p)).unwrap()) {
5694 for offset in 0..10 {
5695 for elem_size in 0..10 {
5696 let layout = DstLayout {
5697 align,
5698 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
5699 _offset: offset,
5700 _elem_size: elem_size,
5701 }),
5702 };
5703 assert_eq!(layout.pad_to_align(), layout);
5704 }
5705 }
5706 }
5707 }
5708
5709 // This test takes a long time when running under Miri, so we skip it in
5710 // that case. This is acceptable because this is a logic test that doesn't
5711 // attempt to expose UB.
5712 #[test]
5713 #[cfg_attr(miri, ignore)]
testvalidate_cast_and_convert_metadata()5714 fn testvalidate_cast_and_convert_metadata() {
5715 impl From<usize> for SizeInfo {
5716 fn from(_size: usize) -> SizeInfo {
5717 SizeInfo::Sized { _size }
5718 }
5719 }
5720
5721 impl From<(usize, usize)> for SizeInfo {
5722 fn from((_offset, _elem_size): (usize, usize)) -> SizeInfo {
5723 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size })
5724 }
5725 }
5726
5727 fn layout<S: Into<SizeInfo>>(s: S, align: usize) -> DstLayout {
5728 DstLayout { size_info: s.into(), align: NonZeroUsize::new(align).unwrap() }
5729 }
5730
5731 /// This macro accepts arguments in the form of:
5732 ///
5733 /// layout(_, _, _).validate(_, _, _), Ok(Some((_, _)))
5734 /// | | | | | | | |
5735 /// base_size ----+ | | | | | | |
5736 /// align -----------+ | | | | | |
5737 /// trailing_size ------+ | | | | |
5738 /// addr ---------------------------+ | | | |
5739 /// bytes_len -------------------------+ | | |
5740 /// cast_type ----------------------------+ | |
5741 /// elems ---------------------------------------------+ |
5742 /// split_at ---------------------------------------------+
5743 ///
5744 /// `.validate` is shorthand for `.validate_cast_and_convert_metadata`
5745 /// for brevity.
5746 ///
5747 /// Each argument can either be an iterator or a wildcard. Each
5748 /// wildcarded variable is implicitly replaced by an iterator over a
5749 /// representative sample of values for that variable. Each `test!`
5750 /// invocation iterates over every combination of values provided by
5751 /// each variable's iterator (ie, the cartesian product) and validates
5752 /// that the results are expected.
5753 ///
5754 /// The final argument uses the same syntax, but it has a different
5755 /// meaning:
5756 /// - If it is `Ok(pat)`, then the pattern `pat` is supplied to
5757 /// `assert_matches!` to validate the computed result for each
5758 /// combination of input values.
5759 /// - If it is `Err(msg)`, then `test!` validates that the call to
5760 /// `validate_cast_and_convert_metadata` panics with the given panic
5761 /// message.
5762 ///
5763 /// Note that the meta-variables that match these variables have the
5764 /// `tt` type, and some valid expressions are not valid `tt`s (such as
5765 /// `a..b`). In this case, wrap the expression in parentheses, and it
5766 /// will become valid `tt`.
5767 macro_rules! test {
5768 ($(:$sizes:expr =>)?
5769 layout($size:tt, $align:tt)
5770 .validate($addr:tt, $bytes_len:tt, $cast_type:tt), $expect:pat $(,)?
5771 ) => {
5772 itertools::iproduct!(
5773 test!(@generate_size $size),
5774 test!(@generate_align $align),
5775 test!(@generate_usize $addr),
5776 test!(@generate_usize $bytes_len),
5777 test!(@generate_cast_type $cast_type)
5778 ).for_each(|(size_info, align, addr, bytes_len, cast_type)| {
5779 // Temporarily disable the panic hook installed by the test
5780 // harness. If we don't do this, all panic messages will be
5781 // kept in an internal log. On its own, this isn't a
5782 // problem, but if a non-caught panic ever happens (ie, in
5783 // code later in this test not in this macro), all of the
5784 // previously-buffered messages will be dumped, hiding the
5785 // real culprit.
5786 let previous_hook = std::panic::take_hook();
5787 // I don't understand why, but this seems to be required in
5788 // addition to the previous line.
5789 std::panic::set_hook(Box::new(|_| {}));
5790 let actual = std::panic::catch_unwind(|| {
5791 layout(size_info, align).validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5792 }).map_err(|d| {
5793 *d.downcast::<&'static str>().expect("expected string panic message").as_ref()
5794 });
5795 std::panic::set_hook(previous_hook);
5796
5797 assert_matches::assert_matches!(
5798 actual, $expect,
5799 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?})",
5800 );
5801 });
5802 };
5803 (@generate_usize _) => { 0..8 };
5804 // Generate sizes for both Sized and !Sized types.
5805 (@generate_size _) => {
5806 test!(@generate_size (_)).chain(test!(@generate_size (_, _)))
5807 };
5808 // Generate sizes for both Sized and !Sized types by chaining
5809 // specified iterators for each.
5810 (@generate_size ($sized_sizes:tt | $unsized_sizes:tt)) => {
5811 test!(@generate_size ($sized_sizes)).chain(test!(@generate_size $unsized_sizes))
5812 };
5813 // Generate sizes for Sized types.
5814 (@generate_size (_)) => { test!(@generate_size (0..8)) };
5815 (@generate_size ($sizes:expr)) => { $sizes.into_iter().map(Into::<SizeInfo>::into) };
5816 // Generate sizes for !Sized types.
5817 (@generate_size ($min_sizes:tt, $elem_sizes:tt)) => {
5818 itertools::iproduct!(
5819 test!(@generate_min_size $min_sizes),
5820 test!(@generate_elem_size $elem_sizes)
5821 ).map(Into::<SizeInfo>::into)
5822 };
5823 (@generate_fixed_size _) => { (0..8).into_iter().map(Into::<SizeInfo>::into) };
5824 (@generate_min_size _) => { 0..8 };
5825 (@generate_elem_size _) => { 1..8 };
5826 (@generate_align _) => { [1, 2, 4, 8, 16] };
5827 (@generate_opt_usize _) => { [None].into_iter().chain((0..8).map(Some).into_iter()) };
5828 (@generate_cast_type _) => { [_CastType::_Prefix, _CastType::_Suffix] };
5829 (@generate_cast_type $variant:ident) => { [_CastType::$variant] };
5830 // Some expressions need to be wrapped in parentheses in order to be
5831 // valid `tt`s (required by the top match pattern). See the comment
5832 // below for more details. This arm removes these parentheses to
5833 // avoid generating an `unused_parens` warning.
5834 (@$_:ident ($vals:expr)) => { $vals };
5835 (@$_:ident $vals:expr) => { $vals };
5836 }
5837
5838 const EVENS: [usize; 8] = [0, 2, 4, 6, 8, 10, 12, 14];
5839 const ODDS: [usize; 8] = [1, 3, 5, 7, 9, 11, 13, 15];
5840
5841 // base_size is too big for the memory region.
5842 test!(layout(((1..8) | ((1..8), (1..8))), _).validate(_, [0], _), Ok(None));
5843 test!(layout(((2..8) | ((2..8), (2..8))), _).validate(_, [1], _), Ok(None));
5844
5845 // addr is unaligned for prefix cast
5846 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5847 test!(layout(_, [2]).validate(ODDS, _, _Prefix), Ok(None));
5848
5849 // addr is aligned, but end of buffer is unaligned for suffix cast
5850 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5851 test!(layout(_, [2]).validate(EVENS, ODDS, _Suffix), Ok(None));
5852
5853 // Unfortunately, these constants cannot easily be used in the
5854 // implementation of `validate_cast_and_convert_metadata`, since
5855 // `panic!` consumes a string literal, not an expression.
5856 //
5857 // It's important that these messages be in a separate module. If they
5858 // were at the function's top level, we'd pass them to `test!` as, e.g.,
5859 // `Err(TRAILING)`, which would run into a subtle Rust footgun - the
5860 // `TRAILING` identifier would be treated as a pattern to match rather
5861 // than a value to check for equality.
5862 mod msgs {
5863 pub(super) const TRAILING: &str =
5864 "attempted to cast to slice type with zero-sized element";
5865 pub(super) const OVERFLOW: &str = "`addr` + `bytes_len` > usize::MAX";
5866 }
5867
5868 // casts with ZST trailing element types are unsupported
5869 test!(layout((_, [0]), _).validate(_, _, _), Err(msgs::TRAILING),);
5870
5871 // addr + bytes_len must not overflow usize
5872 test!(layout(_, _).validate([usize::MAX], (1..100), _), Err(msgs::OVERFLOW));
5873 test!(layout(_, _).validate((1..100), [usize::MAX], _), Err(msgs::OVERFLOW));
5874 test!(
5875 layout(_, _).validate(
5876 [usize::MAX / 2 + 1, usize::MAX],
5877 [usize::MAX / 2 + 1, usize::MAX],
5878 _
5879 ),
5880 Err(msgs::OVERFLOW)
5881 );
5882
5883 // Validates that `validate_cast_and_convert_metadata` satisfies its own
5884 // documented safety postconditions, and also a few other properties
5885 // that aren't documented but we want to guarantee anyway.
5886 fn validate_behavior(
5887 (layout, addr, bytes_len, cast_type): (DstLayout, usize, usize, _CastType),
5888 ) {
5889 if let Some((elems, split_at)) =
5890 layout.validate_cast_and_convert_metadata(addr, bytes_len, cast_type)
5891 {
5892 let (size_info, align) = (layout.size_info, layout.align);
5893 let debug_str = format!(
5894 "layout({size_info:?}, {align}).validate_cast_and_convert_metadata({addr}, {bytes_len}, {cast_type:?}) => ({elems}, {split_at})",
5895 );
5896
5897 // If this is a sized type (no trailing slice), then `elems` is
5898 // meaningless, but in practice we set it to 0. Callers are not
5899 // allowed to rely on this, but a lot of math is nicer if
5900 // they're able to, and some callers might accidentally do that.
5901 let sized = matches!(layout.size_info, SizeInfo::Sized { .. });
5902 assert!(!(sized && elems != 0), "{}", debug_str);
5903
5904 let resulting_size = match layout.size_info {
5905 SizeInfo::Sized { _size } => _size,
5906 SizeInfo::SliceDst(TrailingSliceLayout {
5907 _offset: offset,
5908 _elem_size: elem_size,
5909 }) => {
5910 let padded_size = |elems| {
5911 let without_padding = offset + elems * elem_size;
5912 without_padding
5913 + util::core_layout::padding_needed_for(without_padding, align)
5914 };
5915
5916 let resulting_size = padded_size(elems);
5917 // Test that `validate_cast_and_convert_metadata`
5918 // computed the largest possible value that fits in the
5919 // given range.
5920 assert!(padded_size(elems + 1) > bytes_len, "{}", debug_str);
5921 resulting_size
5922 }
5923 };
5924
5925 // Test safety postconditions guaranteed by
5926 // `validate_cast_and_convert_metadata`.
5927 assert!(resulting_size <= bytes_len, "{}", debug_str);
5928 match cast_type {
5929 _CastType::_Prefix => {
5930 assert_eq!(addr % align, 0, "{}", debug_str);
5931 assert_eq!(resulting_size, split_at, "{}", debug_str);
5932 }
5933 _CastType::_Suffix => {
5934 assert_eq!(split_at, bytes_len - resulting_size, "{}", debug_str);
5935 assert_eq!((addr + split_at) % align, 0, "{}", debug_str);
5936 }
5937 }
5938 } else {
5939 let min_size = match layout.size_info {
5940 SizeInfo::Sized { _size } => _size,
5941 SizeInfo::SliceDst(TrailingSliceLayout { _offset, .. }) => {
5942 _offset + util::core_layout::padding_needed_for(_offset, layout.align)
5943 }
5944 };
5945
5946 // If a cast is invalid, it is either because...
5947 // 1. there are insufficent bytes at the given region for type:
5948 let insufficient_bytes = bytes_len < min_size;
5949 // 2. performing the cast would misalign type:
5950 let base = match cast_type {
5951 _CastType::_Prefix => 0,
5952 _CastType::_Suffix => bytes_len,
5953 };
5954 let misaligned = (base + addr) % layout.align != 0;
5955
5956 assert!(insufficient_bytes || misaligned);
5957 }
5958 }
5959
5960 let sizes = 0..8;
5961 let elem_sizes = 1..8;
5962 let size_infos = sizes
5963 .clone()
5964 .map(Into::<SizeInfo>::into)
5965 .chain(itertools::iproduct!(sizes, elem_sizes).map(Into::<SizeInfo>::into));
5966 let layouts = itertools::iproduct!(size_infos, [1, 2, 4, 8, 16, 32])
5967 .filter(|(size_info, align)| !matches!(size_info, SizeInfo::Sized { _size } if _size % align != 0))
5968 .map(|(size_info, align)| layout(size_info, align));
5969 itertools::iproduct!(layouts, 0..8, 0..8, [_CastType::_Prefix, _CastType::_Suffix])
5970 .for_each(validate_behavior);
5971 }
5972
5973 #[test]
5974 #[cfg(__INTERNAL_USE_ONLY_NIGHLTY_FEATURES_IN_TESTS)]
test_validate_rust_layout()5975 fn test_validate_rust_layout() {
5976 use core::ptr::NonNull;
5977
5978 // This test synthesizes pointers with various metadata and uses Rust's
5979 // built-in APIs to confirm that Rust makes decisions about type layout
5980 // which are consistent with what we believe is guaranteed by the
5981 // language. If this test fails, it doesn't just mean our code is wrong
5982 // - it means we're misunderstanding the language's guarantees.
5983
5984 #[derive(Debug)]
5985 struct MacroArgs {
5986 offset: usize,
5987 align: NonZeroUsize,
5988 elem_size: Option<usize>,
5989 }
5990
5991 /// # Safety
5992 ///
5993 /// `test` promises to only call `addr_of_slice_field` on a `NonNull<T>`
5994 /// which points to a valid `T`.
5995 ///
5996 /// `with_elems` must produce a pointer which points to a valid `T`.
5997 fn test<T: ?Sized, W: Fn(usize) -> NonNull<T>>(
5998 args: MacroArgs,
5999 with_elems: W,
6000 addr_of_slice_field: Option<fn(NonNull<T>) -> NonNull<u8>>,
6001 ) {
6002 let dst = args.elem_size.is_some();
6003 let layout = {
6004 let size_info = match args.elem_size {
6005 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
6006 _offset: args.offset,
6007 _elem_size: elem_size,
6008 }),
6009 None => SizeInfo::Sized {
6010 // Rust only supports types whose sizes are a multiple
6011 // of their alignment. If the macro created a type like
6012 // this:
6013 //
6014 // #[repr(C, align(2))]
6015 // struct Foo([u8; 1]);
6016 //
6017 // ...then Rust will automatically round the type's size
6018 // up to 2.
6019 _size: args.offset
6020 + util::core_layout::padding_needed_for(args.offset, args.align),
6021 },
6022 };
6023 DstLayout { size_info, align: args.align }
6024 };
6025
6026 for elems in 0..128 {
6027 let ptr = with_elems(elems);
6028
6029 if let Some(addr_of_slice_field) = addr_of_slice_field {
6030 let slc_field_ptr = addr_of_slice_field(ptr).as_ptr();
6031 // SAFETY: Both `slc_field_ptr` and `ptr` are pointers to
6032 // the same valid Rust object.
6033 let offset: usize =
6034 unsafe { slc_field_ptr.byte_offset_from(ptr.as_ptr()).try_into().unwrap() };
6035 assert_eq!(offset, args.offset);
6036 }
6037
6038 // SAFETY: `ptr` points to a valid `T`.
6039 let (size, align) = unsafe {
6040 (mem::size_of_val_raw(ptr.as_ptr()), mem::align_of_val_raw(ptr.as_ptr()))
6041 };
6042
6043 // Avoid expensive allocation when running under Miri.
6044 let assert_msg = if !cfg!(miri) {
6045 format!("\n{args:?}\nsize:{size}, align:{align}")
6046 } else {
6047 String::new()
6048 };
6049
6050 let without_padding =
6051 args.offset + args.elem_size.map(|elem_size| elems * elem_size).unwrap_or(0);
6052 assert!(size >= without_padding, "{}", assert_msg);
6053 assert_eq!(align, args.align.get(), "{}", assert_msg);
6054
6055 // This encodes the most important part of the test: our
6056 // understanding of how Rust determines the layout of repr(C)
6057 // types. Sized repr(C) types are trivial, but DST types have
6058 // some subtlety. Note that:
6059 // - For sized types, `without_padding` is just the size of the
6060 // type that we constructed for `Foo`. Since we may have
6061 // requested a larger alignment, `Foo` may actually be larger
6062 // than this, hence `padding_needed_for`.
6063 // - For unsized types, `without_padding` is dynamically
6064 // computed from the offset, the element size, and element
6065 // count. We expect that the size of the object should be
6066 // `offset + elem_size * elems` rounded up to the next
6067 // alignment.
6068 let expected_size = without_padding
6069 + util::core_layout::padding_needed_for(without_padding, args.align);
6070 assert_eq!(expected_size, size, "{}", assert_msg);
6071
6072 // For zero-sized element types,
6073 // `validate_cast_and_convert_metadata` just panics, so we skip
6074 // testing those types.
6075 if args.elem_size.map(|elem_size| elem_size > 0).unwrap_or(true) {
6076 let addr = ptr.addr().get();
6077 let (got_elems, got_split_at) = layout
6078 .validate_cast_and_convert_metadata(addr, size, _CastType::_Prefix)
6079 .unwrap();
6080 // Avoid expensive allocation when running under Miri.
6081 let assert_msg = if !cfg!(miri) {
6082 format!(
6083 "{}\nvalidate_cast_and_convert_metadata({addr}, {size})",
6084 assert_msg
6085 )
6086 } else {
6087 String::new()
6088 };
6089 assert_eq!(got_split_at, size, "{}", assert_msg);
6090 if dst {
6091 assert!(got_elems >= elems, "{}", assert_msg);
6092 if got_elems != elems {
6093 // If `validate_cast_and_convert_metadata`
6094 // returned more elements than `elems`, that
6095 // means that `elems` is not the maximum number
6096 // of elements that can fit in `size` - in other
6097 // words, there is enough padding at the end of
6098 // the value to fit at least one more element.
6099 // If we use this metadata to synthesize a
6100 // pointer, despite having a different element
6101 // count, we still expect it to have the same
6102 // size.
6103 let got_ptr = with_elems(got_elems);
6104 // SAFETY: `got_ptr` is a pointer to a valid `T`.
6105 let size_of_got_ptr = unsafe { mem::size_of_val_raw(got_ptr.as_ptr()) };
6106 assert_eq!(size_of_got_ptr, size, "{}", assert_msg);
6107 }
6108 } else {
6109 // For sized casts, the returned element value is
6110 // technically meaningless, and we don't guarantee any
6111 // particular value. In practice, it's always zero.
6112 assert_eq!(got_elems, 0, "{}", assert_msg)
6113 }
6114 }
6115 }
6116 }
6117
6118 macro_rules! validate_against_rust {
6119 ($offset:literal, $align:literal $(, $elem_size:literal)?) => {{
6120 #[repr(C, align($align))]
6121 struct Foo([u8; $offset]$(, [[u8; $elem_size]])?);
6122
6123 let args = MacroArgs {
6124 offset: $offset,
6125 align: $align.try_into().unwrap(),
6126 elem_size: {
6127 #[allow(unused)]
6128 let ret = None::<usize>;
6129 $(let ret = Some($elem_size);)?
6130 ret
6131 }
6132 };
6133
6134 #[repr(C, align($align))]
6135 struct FooAlign;
6136 // Create an aligned buffer to use in order to synthesize
6137 // pointers to `Foo`. We don't ever load values from these
6138 // pointers - we just do arithmetic on them - so having a "real"
6139 // block of memory as opposed to a validly-aligned-but-dangling
6140 // pointer is only necessary to make Miri happy since we run it
6141 // with "strict provenance" checking enabled.
6142 let aligned_buf = Align::<_, FooAlign>::new([0u8; 1024]);
6143 let with_elems = |elems| {
6144 let slc = NonNull::slice_from_raw_parts(NonNull::from(&aligned_buf.t), elems);
6145 #[allow(clippy::as_conversions)]
6146 NonNull::new(slc.as_ptr() as *mut Foo).unwrap()
6147 };
6148 let addr_of_slice_field = {
6149 #[allow(unused)]
6150 let f = None::<fn(NonNull<Foo>) -> NonNull<u8>>;
6151 $(
6152 // SAFETY: `test` promises to only call `f` with a `ptr`
6153 // to a valid `Foo`.
6154 let f: Option<fn(NonNull<Foo>) -> NonNull<u8>> = Some(|ptr: NonNull<Foo>| unsafe {
6155 NonNull::new(ptr::addr_of_mut!((*ptr.as_ptr()).1)).unwrap().cast::<u8>()
6156 });
6157 let _ = $elem_size;
6158 )?
6159 f
6160 };
6161
6162 test::<Foo, _>(args, with_elems, addr_of_slice_field);
6163 }};
6164 }
6165
6166 // Every permutation of:
6167 // - offset in [0, 4]
6168 // - align in [1, 16]
6169 // - elem_size in [0, 4] (plus no elem_size)
6170 validate_against_rust!(0, 1);
6171 validate_against_rust!(0, 1, 0);
6172 validate_against_rust!(0, 1, 1);
6173 validate_against_rust!(0, 1, 2);
6174 validate_against_rust!(0, 1, 3);
6175 validate_against_rust!(0, 1, 4);
6176 validate_against_rust!(0, 2);
6177 validate_against_rust!(0, 2, 0);
6178 validate_against_rust!(0, 2, 1);
6179 validate_against_rust!(0, 2, 2);
6180 validate_against_rust!(0, 2, 3);
6181 validate_against_rust!(0, 2, 4);
6182 validate_against_rust!(0, 4);
6183 validate_against_rust!(0, 4, 0);
6184 validate_against_rust!(0, 4, 1);
6185 validate_against_rust!(0, 4, 2);
6186 validate_against_rust!(0, 4, 3);
6187 validate_against_rust!(0, 4, 4);
6188 validate_against_rust!(0, 8);
6189 validate_against_rust!(0, 8, 0);
6190 validate_against_rust!(0, 8, 1);
6191 validate_against_rust!(0, 8, 2);
6192 validate_against_rust!(0, 8, 3);
6193 validate_against_rust!(0, 8, 4);
6194 validate_against_rust!(0, 16);
6195 validate_against_rust!(0, 16, 0);
6196 validate_against_rust!(0, 16, 1);
6197 validate_against_rust!(0, 16, 2);
6198 validate_against_rust!(0, 16, 3);
6199 validate_against_rust!(0, 16, 4);
6200 validate_against_rust!(1, 1);
6201 validate_against_rust!(1, 1, 0);
6202 validate_against_rust!(1, 1, 1);
6203 validate_against_rust!(1, 1, 2);
6204 validate_against_rust!(1, 1, 3);
6205 validate_against_rust!(1, 1, 4);
6206 validate_against_rust!(1, 2);
6207 validate_against_rust!(1, 2, 0);
6208 validate_against_rust!(1, 2, 1);
6209 validate_against_rust!(1, 2, 2);
6210 validate_against_rust!(1, 2, 3);
6211 validate_against_rust!(1, 2, 4);
6212 validate_against_rust!(1, 4);
6213 validate_against_rust!(1, 4, 0);
6214 validate_against_rust!(1, 4, 1);
6215 validate_against_rust!(1, 4, 2);
6216 validate_against_rust!(1, 4, 3);
6217 validate_against_rust!(1, 4, 4);
6218 validate_against_rust!(1, 8);
6219 validate_against_rust!(1, 8, 0);
6220 validate_against_rust!(1, 8, 1);
6221 validate_against_rust!(1, 8, 2);
6222 validate_against_rust!(1, 8, 3);
6223 validate_against_rust!(1, 8, 4);
6224 validate_against_rust!(1, 16);
6225 validate_against_rust!(1, 16, 0);
6226 validate_against_rust!(1, 16, 1);
6227 validate_against_rust!(1, 16, 2);
6228 validate_against_rust!(1, 16, 3);
6229 validate_against_rust!(1, 16, 4);
6230 validate_against_rust!(2, 1);
6231 validate_against_rust!(2, 1, 0);
6232 validate_against_rust!(2, 1, 1);
6233 validate_against_rust!(2, 1, 2);
6234 validate_against_rust!(2, 1, 3);
6235 validate_against_rust!(2, 1, 4);
6236 validate_against_rust!(2, 2);
6237 validate_against_rust!(2, 2, 0);
6238 validate_against_rust!(2, 2, 1);
6239 validate_against_rust!(2, 2, 2);
6240 validate_against_rust!(2, 2, 3);
6241 validate_against_rust!(2, 2, 4);
6242 validate_against_rust!(2, 4);
6243 validate_against_rust!(2, 4, 0);
6244 validate_against_rust!(2, 4, 1);
6245 validate_against_rust!(2, 4, 2);
6246 validate_against_rust!(2, 4, 3);
6247 validate_against_rust!(2, 4, 4);
6248 validate_against_rust!(2, 8);
6249 validate_against_rust!(2, 8, 0);
6250 validate_against_rust!(2, 8, 1);
6251 validate_against_rust!(2, 8, 2);
6252 validate_against_rust!(2, 8, 3);
6253 validate_against_rust!(2, 8, 4);
6254 validate_against_rust!(2, 16);
6255 validate_against_rust!(2, 16, 0);
6256 validate_against_rust!(2, 16, 1);
6257 validate_against_rust!(2, 16, 2);
6258 validate_against_rust!(2, 16, 3);
6259 validate_against_rust!(2, 16, 4);
6260 validate_against_rust!(3, 1);
6261 validate_against_rust!(3, 1, 0);
6262 validate_against_rust!(3, 1, 1);
6263 validate_against_rust!(3, 1, 2);
6264 validate_against_rust!(3, 1, 3);
6265 validate_against_rust!(3, 1, 4);
6266 validate_against_rust!(3, 2);
6267 validate_against_rust!(3, 2, 0);
6268 validate_against_rust!(3, 2, 1);
6269 validate_against_rust!(3, 2, 2);
6270 validate_against_rust!(3, 2, 3);
6271 validate_against_rust!(3, 2, 4);
6272 validate_against_rust!(3, 4);
6273 validate_against_rust!(3, 4, 0);
6274 validate_against_rust!(3, 4, 1);
6275 validate_against_rust!(3, 4, 2);
6276 validate_against_rust!(3, 4, 3);
6277 validate_against_rust!(3, 4, 4);
6278 validate_against_rust!(3, 8);
6279 validate_against_rust!(3, 8, 0);
6280 validate_against_rust!(3, 8, 1);
6281 validate_against_rust!(3, 8, 2);
6282 validate_against_rust!(3, 8, 3);
6283 validate_against_rust!(3, 8, 4);
6284 validate_against_rust!(3, 16);
6285 validate_against_rust!(3, 16, 0);
6286 validate_against_rust!(3, 16, 1);
6287 validate_against_rust!(3, 16, 2);
6288 validate_against_rust!(3, 16, 3);
6289 validate_against_rust!(3, 16, 4);
6290 validate_against_rust!(4, 1);
6291 validate_against_rust!(4, 1, 0);
6292 validate_against_rust!(4, 1, 1);
6293 validate_against_rust!(4, 1, 2);
6294 validate_against_rust!(4, 1, 3);
6295 validate_against_rust!(4, 1, 4);
6296 validate_against_rust!(4, 2);
6297 validate_against_rust!(4, 2, 0);
6298 validate_against_rust!(4, 2, 1);
6299 validate_against_rust!(4, 2, 2);
6300 validate_against_rust!(4, 2, 3);
6301 validate_against_rust!(4, 2, 4);
6302 validate_against_rust!(4, 4);
6303 validate_against_rust!(4, 4, 0);
6304 validate_against_rust!(4, 4, 1);
6305 validate_against_rust!(4, 4, 2);
6306 validate_against_rust!(4, 4, 3);
6307 validate_against_rust!(4, 4, 4);
6308 validate_against_rust!(4, 8);
6309 validate_against_rust!(4, 8, 0);
6310 validate_against_rust!(4, 8, 1);
6311 validate_against_rust!(4, 8, 2);
6312 validate_against_rust!(4, 8, 3);
6313 validate_against_rust!(4, 8, 4);
6314 validate_against_rust!(4, 16);
6315 validate_against_rust!(4, 16, 0);
6316 validate_against_rust!(4, 16, 1);
6317 validate_against_rust!(4, 16, 2);
6318 validate_against_rust!(4, 16, 3);
6319 validate_against_rust!(4, 16, 4);
6320 }
6321
6322 #[test]
test_known_layout()6323 fn test_known_layout() {
6324 // Test that `$ty` and `ManuallyDrop<$ty>` have the expected layout.
6325 // Test that `PhantomData<$ty>` has the same layout as `()` regardless
6326 // of `$ty`.
6327 macro_rules! test {
6328 ($ty:ty, $expect:expr) => {
6329 let expect = $expect;
6330 assert_eq!(<$ty as KnownLayout>::LAYOUT, expect);
6331 assert_eq!(<ManuallyDrop<$ty> as KnownLayout>::LAYOUT, expect);
6332 assert_eq!(<PhantomData<$ty> as KnownLayout>::LAYOUT, <() as KnownLayout>::LAYOUT);
6333 };
6334 }
6335
6336 let layout = |offset, align, _trailing_slice_elem_size| DstLayout {
6337 align: NonZeroUsize::new(align).unwrap(),
6338 size_info: match _trailing_slice_elem_size {
6339 None => SizeInfo::Sized { _size: offset },
6340 Some(elem_size) => SizeInfo::SliceDst(TrailingSliceLayout {
6341 _offset: offset,
6342 _elem_size: elem_size,
6343 }),
6344 },
6345 };
6346
6347 test!((), layout(0, 1, None));
6348 test!(u8, layout(1, 1, None));
6349 // Use `align_of` because `u64` alignment may be smaller than 8 on some
6350 // platforms.
6351 test!(u64, layout(8, mem::align_of::<u64>(), None));
6352 test!(AU64, layout(8, 8, None));
6353
6354 test!(Option<&'static ()>, usize::LAYOUT);
6355
6356 test!([()], layout(0, 1, Some(0)));
6357 test!([u8], layout(0, 1, Some(1)));
6358 test!(str, layout(0, 1, Some(1)));
6359 }
6360
6361 #[cfg(feature = "derive")]
6362 #[test]
test_known_layout_derive()6363 fn test_known_layout_derive() {
6364 // In this and other files (`late_compile_pass.rs`,
6365 // `mid_compile_pass.rs`, and `struct.rs`), we test success and failure
6366 // modes of `derive(KnownLayout)` for the following combination of
6367 // properties:
6368 //
6369 // +------------+--------------------------------------+-----------+
6370 // | | trailing field properties | |
6371 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6372 // |------------+----------+----------------+----------+-----------|
6373 // | N | N | N | N | KL00 |
6374 // | N | N | N | Y | KL01 |
6375 // | N | N | Y | N | KL02 |
6376 // | N | N | Y | Y | KL03 |
6377 // | N | Y | N | N | KL04 |
6378 // | N | Y | N | Y | KL05 |
6379 // | N | Y | Y | N | KL06 |
6380 // | N | Y | Y | Y | KL07 |
6381 // | Y | N | N | N | KL08 |
6382 // | Y | N | N | Y | KL09 |
6383 // | Y | N | Y | N | KL10 |
6384 // | Y | N | Y | Y | KL11 |
6385 // | Y | Y | N | N | KL12 |
6386 // | Y | Y | N | Y | KL13 |
6387 // | Y | Y | Y | N | KL14 |
6388 // | Y | Y | Y | Y | KL15 |
6389 // +------------+----------+----------------+----------+-----------+
6390
6391 struct NotKnownLayout<T = ()> {
6392 _t: T,
6393 }
6394
6395 #[derive(KnownLayout)]
6396 #[repr(C)]
6397 struct AlignSize<const ALIGN: usize, const SIZE: usize>
6398 where
6399 elain::Align<ALIGN>: elain::Alignment,
6400 {
6401 _align: elain::Align<ALIGN>,
6402 _size: [u8; SIZE],
6403 }
6404
6405 type AU16 = AlignSize<2, 2>;
6406 type AU32 = AlignSize<4, 4>;
6407
6408 fn _assert_kl<T: ?Sized + KnownLayout>(_: &T) {}
6409
6410 let sized_layout = |align, size| DstLayout {
6411 align: NonZeroUsize::new(align).unwrap(),
6412 size_info: SizeInfo::Sized { _size: size },
6413 };
6414
6415 let unsized_layout = |align, elem_size, offset| DstLayout {
6416 align: NonZeroUsize::new(align).unwrap(),
6417 size_info: SizeInfo::SliceDst(TrailingSliceLayout {
6418 _offset: offset,
6419 _elem_size: elem_size,
6420 }),
6421 };
6422
6423 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6424 // | N | N | N | Y | KL01 |
6425 #[derive(KnownLayout)]
6426 #[allow(dead_code)] // fields are never read
6427 struct KL01(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6428
6429 let expected = DstLayout::for_type::<KL01>();
6430
6431 assert_eq!(<KL01 as KnownLayout>::LAYOUT, expected);
6432 assert_eq!(<KL01 as KnownLayout>::LAYOUT, sized_layout(4, 8));
6433
6434 // ...with `align(N)`:
6435 #[derive(KnownLayout)]
6436 #[repr(align(64))]
6437 #[allow(dead_code)] // fields are never read
6438 struct KL01Align(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6439
6440 let expected = DstLayout::for_type::<KL01Align>();
6441
6442 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, expected);
6443 assert_eq!(<KL01Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6444
6445 // ...with `packed`:
6446 #[derive(KnownLayout)]
6447 #[repr(packed)]
6448 #[allow(dead_code)] // fields are never read
6449 struct KL01Packed(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6450
6451 let expected = DstLayout::for_type::<KL01Packed>();
6452
6453 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, expected);
6454 assert_eq!(<KL01Packed as KnownLayout>::LAYOUT, sized_layout(1, 6));
6455
6456 // ...with `packed(N)`:
6457 #[derive(KnownLayout)]
6458 #[repr(packed(2))]
6459 #[allow(dead_code)] // fields are never read
6460 struct KL01PackedN(NotKnownLayout<AU32>, NotKnownLayout<AU16>);
6461
6462 assert_impl_all!(KL01PackedN: KnownLayout);
6463
6464 let expected = DstLayout::for_type::<KL01PackedN>();
6465
6466 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, expected);
6467 assert_eq!(<KL01PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6468
6469 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6470 // | N | N | Y | Y | KL03 |
6471 #[derive(KnownLayout)]
6472 #[allow(dead_code)] // fields are never read
6473 struct KL03(NotKnownLayout, u8);
6474
6475 let expected = DstLayout::for_type::<KL03>();
6476
6477 assert_eq!(<KL03 as KnownLayout>::LAYOUT, expected);
6478 assert_eq!(<KL03 as KnownLayout>::LAYOUT, sized_layout(1, 1));
6479
6480 // ... with `align(N)`
6481 #[derive(KnownLayout)]
6482 #[repr(align(64))]
6483 #[allow(dead_code)] // fields are never read
6484 struct KL03Align(NotKnownLayout<AU32>, u8);
6485
6486 let expected = DstLayout::for_type::<KL03Align>();
6487
6488 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, expected);
6489 assert_eq!(<KL03Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6490
6491 // ... with `packed`:
6492 #[derive(KnownLayout)]
6493 #[repr(packed)]
6494 #[allow(dead_code)] // fields are never read
6495 struct KL03Packed(NotKnownLayout<AU32>, u8);
6496
6497 let expected = DstLayout::for_type::<KL03Packed>();
6498
6499 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, expected);
6500 assert_eq!(<KL03Packed as KnownLayout>::LAYOUT, sized_layout(1, 5));
6501
6502 // ... with `packed(N)`
6503 #[derive(KnownLayout)]
6504 #[repr(packed(2))]
6505 #[allow(dead_code)] // fields are never read
6506 struct KL03PackedN(NotKnownLayout<AU32>, u8);
6507
6508 assert_impl_all!(KL03PackedN: KnownLayout);
6509
6510 let expected = DstLayout::for_type::<KL03PackedN>();
6511
6512 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, expected);
6513 assert_eq!(<KL03PackedN as KnownLayout>::LAYOUT, sized_layout(2, 6));
6514
6515 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6516 // | N | Y | N | Y | KL05 |
6517 #[derive(KnownLayout)]
6518 #[allow(dead_code)] // fields are never read
6519 struct KL05<T>(u8, T);
6520
6521 fn _test_kl05<T>(t: T) -> impl KnownLayout {
6522 KL05(0u8, t)
6523 }
6524
6525 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6526 // | N | Y | Y | Y | KL07 |
6527 #[derive(KnownLayout)]
6528 #[allow(dead_code)] // fields are never read
6529 struct KL07<T: KnownLayout>(u8, T);
6530
6531 fn _test_kl07<T: KnownLayout>(t: T) -> impl KnownLayout {
6532 let _ = KL07(0u8, t);
6533 }
6534
6535 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6536 // | Y | N | Y | N | KL10 |
6537 #[derive(KnownLayout)]
6538 #[repr(C)]
6539 struct KL10(NotKnownLayout<AU32>, [u8]);
6540
6541 let expected = DstLayout::new_zst(None)
6542 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6543 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6544 .pad_to_align();
6545
6546 assert_eq!(<KL10 as KnownLayout>::LAYOUT, expected);
6547 assert_eq!(<KL10 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 4));
6548
6549 // ...with `align(N)`:
6550 #[derive(KnownLayout)]
6551 #[repr(C, align(64))]
6552 struct KL10Align(NotKnownLayout<AU32>, [u8]);
6553
6554 let repr_align = NonZeroUsize::new(64);
6555
6556 let expected = DstLayout::new_zst(repr_align)
6557 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), None)
6558 .extend(<[u8] as KnownLayout>::LAYOUT, None)
6559 .pad_to_align();
6560
6561 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, expected);
6562 assert_eq!(<KL10Align as KnownLayout>::LAYOUT, unsized_layout(64, 1, 4));
6563
6564 // ...with `packed`:
6565 #[derive(KnownLayout)]
6566 #[repr(C, packed)]
6567 struct KL10Packed(NotKnownLayout<AU32>, [u8]);
6568
6569 let repr_packed = NonZeroUsize::new(1);
6570
6571 let expected = DstLayout::new_zst(None)
6572 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6573 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6574 .pad_to_align();
6575
6576 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, expected);
6577 assert_eq!(<KL10Packed as KnownLayout>::LAYOUT, unsized_layout(1, 1, 4));
6578
6579 // ...with `packed(N)`:
6580 #[derive(KnownLayout)]
6581 #[repr(C, packed(2))]
6582 struct KL10PackedN(NotKnownLayout<AU32>, [u8]);
6583
6584 let repr_packed = NonZeroUsize::new(2);
6585
6586 let expected = DstLayout::new_zst(None)
6587 .extend(DstLayout::for_type::<NotKnownLayout<AU32>>(), repr_packed)
6588 .extend(<[u8] as KnownLayout>::LAYOUT, repr_packed)
6589 .pad_to_align();
6590
6591 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, expected);
6592 assert_eq!(<KL10PackedN as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6593
6594 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6595 // | Y | N | Y | Y | KL11 |
6596 #[derive(KnownLayout)]
6597 #[repr(C)]
6598 struct KL11(NotKnownLayout<AU64>, u8);
6599
6600 let expected = DstLayout::new_zst(None)
6601 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6602 .extend(<u8 as KnownLayout>::LAYOUT, None)
6603 .pad_to_align();
6604
6605 assert_eq!(<KL11 as KnownLayout>::LAYOUT, expected);
6606 assert_eq!(<KL11 as KnownLayout>::LAYOUT, sized_layout(8, 16));
6607
6608 // ...with `align(N)`:
6609 #[derive(KnownLayout)]
6610 #[repr(C, align(64))]
6611 struct KL11Align(NotKnownLayout<AU64>, u8);
6612
6613 let repr_align = NonZeroUsize::new(64);
6614
6615 let expected = DstLayout::new_zst(repr_align)
6616 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), None)
6617 .extend(<u8 as KnownLayout>::LAYOUT, None)
6618 .pad_to_align();
6619
6620 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, expected);
6621 assert_eq!(<KL11Align as KnownLayout>::LAYOUT, sized_layout(64, 64));
6622
6623 // ...with `packed`:
6624 #[derive(KnownLayout)]
6625 #[repr(C, packed)]
6626 struct KL11Packed(NotKnownLayout<AU64>, u8);
6627
6628 let repr_packed = NonZeroUsize::new(1);
6629
6630 let expected = DstLayout::new_zst(None)
6631 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6632 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6633 .pad_to_align();
6634
6635 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, expected);
6636 assert_eq!(<KL11Packed as KnownLayout>::LAYOUT, sized_layout(1, 9));
6637
6638 // ...with `packed(N)`:
6639 #[derive(KnownLayout)]
6640 #[repr(C, packed(2))]
6641 struct KL11PackedN(NotKnownLayout<AU64>, u8);
6642
6643 let repr_packed = NonZeroUsize::new(2);
6644
6645 let expected = DstLayout::new_zst(None)
6646 .extend(DstLayout::for_type::<NotKnownLayout<AU64>>(), repr_packed)
6647 .extend(<u8 as KnownLayout>::LAYOUT, repr_packed)
6648 .pad_to_align();
6649
6650 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, expected);
6651 assert_eq!(<KL11PackedN as KnownLayout>::LAYOUT, sized_layout(2, 10));
6652
6653 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6654 // | Y | Y | Y | N | KL14 |
6655 #[derive(KnownLayout)]
6656 #[repr(C)]
6657 struct KL14<T: ?Sized + KnownLayout>(u8, T);
6658
6659 fn _test_kl14<T: ?Sized + KnownLayout>(kl: &KL14<T>) {
6660 _assert_kl(kl)
6661 }
6662
6663 // | `repr(C)`? | generic? | `KnownLayout`? | `Sized`? | Type Name |
6664 // | Y | Y | Y | Y | KL15 |
6665 #[derive(KnownLayout)]
6666 #[repr(C)]
6667 struct KL15<T: KnownLayout>(u8, T);
6668
6669 fn _test_kl15<T: KnownLayout>(t: T) -> impl KnownLayout {
6670 let _ = KL15(0u8, t);
6671 }
6672
6673 // Test a variety of combinations of field types:
6674 // - ()
6675 // - u8
6676 // - AU16
6677 // - [()]
6678 // - [u8]
6679 // - [AU16]
6680
6681 #[allow(clippy::upper_case_acronyms)]
6682 #[derive(KnownLayout)]
6683 #[repr(C)]
6684 struct KLTU<T, U: ?Sized>(T, U);
6685
6686 assert_eq!(<KLTU<(), ()> as KnownLayout>::LAYOUT, sized_layout(1, 0));
6687
6688 assert_eq!(<KLTU<(), u8> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6689
6690 assert_eq!(<KLTU<(), AU16> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6691
6692 assert_eq!(<KLTU<(), [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 0));
6693
6694 assert_eq!(<KLTU<(), [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6695
6696 assert_eq!(<KLTU<(), [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 0));
6697
6698 assert_eq!(<KLTU<u8, ()> as KnownLayout>::LAYOUT, sized_layout(1, 1));
6699
6700 assert_eq!(<KLTU<u8, u8> as KnownLayout>::LAYOUT, sized_layout(1, 2));
6701
6702 assert_eq!(<KLTU<u8, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6703
6704 assert_eq!(<KLTU<u8, [()]> as KnownLayout>::LAYOUT, unsized_layout(1, 0, 1));
6705
6706 assert_eq!(<KLTU<u8, [u8]> as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6707
6708 assert_eq!(<KLTU<u8, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6709
6710 assert_eq!(<KLTU<AU16, ()> as KnownLayout>::LAYOUT, sized_layout(2, 2));
6711
6712 assert_eq!(<KLTU<AU16, u8> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6713
6714 assert_eq!(<KLTU<AU16, AU16> as KnownLayout>::LAYOUT, sized_layout(2, 4));
6715
6716 assert_eq!(<KLTU<AU16, [()]> as KnownLayout>::LAYOUT, unsized_layout(2, 0, 2));
6717
6718 assert_eq!(<KLTU<AU16, [u8]> as KnownLayout>::LAYOUT, unsized_layout(2, 1, 2));
6719
6720 assert_eq!(<KLTU<AU16, [AU16]> as KnownLayout>::LAYOUT, unsized_layout(2, 2, 2));
6721
6722 // Test a variety of field counts.
6723
6724 #[derive(KnownLayout)]
6725 #[repr(C)]
6726 struct KLF0;
6727
6728 assert_eq!(<KLF0 as KnownLayout>::LAYOUT, sized_layout(1, 0));
6729
6730 #[derive(KnownLayout)]
6731 #[repr(C)]
6732 struct KLF1([u8]);
6733
6734 assert_eq!(<KLF1 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 0));
6735
6736 #[derive(KnownLayout)]
6737 #[repr(C)]
6738 struct KLF2(NotKnownLayout<u8>, [u8]);
6739
6740 assert_eq!(<KLF2 as KnownLayout>::LAYOUT, unsized_layout(1, 1, 1));
6741
6742 #[derive(KnownLayout)]
6743 #[repr(C)]
6744 struct KLF3(NotKnownLayout<u8>, NotKnownLayout<AU16>, [u8]);
6745
6746 assert_eq!(<KLF3 as KnownLayout>::LAYOUT, unsized_layout(2, 1, 4));
6747
6748 #[derive(KnownLayout)]
6749 #[repr(C)]
6750 struct KLF4(NotKnownLayout<u8>, NotKnownLayout<AU16>, NotKnownLayout<AU32>, [u8]);
6751
6752 assert_eq!(<KLF4 as KnownLayout>::LAYOUT, unsized_layout(4, 1, 8));
6753 }
6754
6755 #[test]
test_object_safety()6756 fn test_object_safety() {
6757 fn _takes_from_zeroes(_: &dyn FromZeroes) {}
6758 fn _takes_from_bytes(_: &dyn FromBytes) {}
6759 fn _takes_unaligned(_: &dyn Unaligned) {}
6760 }
6761
6762 #[test]
test_from_zeroes_only()6763 fn test_from_zeroes_only() {
6764 // Test types that implement `FromZeroes` but not `FromBytes`.
6765
6766 assert!(!bool::new_zeroed());
6767 assert_eq!(char::new_zeroed(), '\0');
6768
6769 #[cfg(feature = "alloc")]
6770 {
6771 assert_eq!(bool::new_box_zeroed(), Box::new(false));
6772 assert_eq!(char::new_box_zeroed(), Box::new('\0'));
6773
6774 assert_eq!(bool::new_box_slice_zeroed(3).as_ref(), [false, false, false]);
6775 assert_eq!(char::new_box_slice_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6776
6777 assert_eq!(bool::new_vec_zeroed(3).as_ref(), [false, false, false]);
6778 assert_eq!(char::new_vec_zeroed(3).as_ref(), ['\0', '\0', '\0']);
6779 }
6780
6781 let mut string = "hello".to_string();
6782 let s: &mut str = string.as_mut();
6783 assert_eq!(s, "hello");
6784 s.zero();
6785 assert_eq!(s, "\0\0\0\0\0");
6786 }
6787
6788 #[test]
test_read_write()6789 fn test_read_write() {
6790 const VAL: u64 = 0x12345678;
6791 #[cfg(target_endian = "big")]
6792 const VAL_BYTES: [u8; 8] = VAL.to_be_bytes();
6793 #[cfg(target_endian = "little")]
6794 const VAL_BYTES: [u8; 8] = VAL.to_le_bytes();
6795
6796 // Test `FromBytes::{read_from, read_from_prefix, read_from_suffix}`.
6797
6798 assert_eq!(u64::read_from(&VAL_BYTES[..]), Some(VAL));
6799 // The first 8 bytes are from `VAL_BYTES` and the second 8 bytes are all
6800 // zeroes.
6801 let bytes_with_prefix: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6802 assert_eq!(u64::read_from_prefix(&bytes_with_prefix[..]), Some(VAL));
6803 assert_eq!(u64::read_from_suffix(&bytes_with_prefix[..]), Some(0));
6804 // The first 8 bytes are all zeroes and the second 8 bytes are from
6805 // `VAL_BYTES`
6806 let bytes_with_suffix: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6807 assert_eq!(u64::read_from_prefix(&bytes_with_suffix[..]), Some(0));
6808 assert_eq!(u64::read_from_suffix(&bytes_with_suffix[..]), Some(VAL));
6809
6810 // Test `AsBytes::{write_to, write_to_prefix, write_to_suffix}`.
6811
6812 let mut bytes = [0u8; 8];
6813 assert_eq!(VAL.write_to(&mut bytes[..]), Some(()));
6814 assert_eq!(bytes, VAL_BYTES);
6815 let mut bytes = [0u8; 16];
6816 assert_eq!(VAL.write_to_prefix(&mut bytes[..]), Some(()));
6817 let want: [u8; 16] = transmute!([VAL_BYTES, [0; 8]]);
6818 assert_eq!(bytes, want);
6819 let mut bytes = [0u8; 16];
6820 assert_eq!(VAL.write_to_suffix(&mut bytes[..]), Some(()));
6821 let want: [u8; 16] = transmute!([[0; 8], VAL_BYTES]);
6822 assert_eq!(bytes, want);
6823 }
6824
6825 #[test]
test_transmute()6826 fn test_transmute() {
6827 // Test that memory is transmuted as expected.
6828 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6829 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6830 let x: [[u8; 2]; 4] = transmute!(array_of_u8s);
6831 assert_eq!(x, array_of_arrays);
6832 let x: [u8; 8] = transmute!(array_of_arrays);
6833 assert_eq!(x, array_of_u8s);
6834
6835 // Test that the source expression's value is forgotten rather than
6836 // dropped.
6837 #[derive(AsBytes)]
6838 #[repr(transparent)]
6839 struct PanicOnDrop(());
6840 impl Drop for PanicOnDrop {
6841 fn drop(&mut self) {
6842 panic!("PanicOnDrop::drop");
6843 }
6844 }
6845 #[allow(clippy::let_unit_value)]
6846 let _: () = transmute!(PanicOnDrop(()));
6847
6848 // Test that `transmute!` is legal in a const context.
6849 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6850 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6851 const X: [[u8; 2]; 4] = transmute!(ARRAY_OF_U8S);
6852 assert_eq!(X, ARRAY_OF_ARRAYS);
6853 }
6854
6855 #[test]
test_transmute_ref()6856 fn test_transmute_ref() {
6857 // Test that memory is transmuted as expected.
6858 let array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6859 let array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6860 let x: &[[u8; 2]; 4] = transmute_ref!(&array_of_u8s);
6861 assert_eq!(*x, array_of_arrays);
6862 let x: &[u8; 8] = transmute_ref!(&array_of_arrays);
6863 assert_eq!(*x, array_of_u8s);
6864
6865 // Test that `transmute_ref!` is legal in a const context.
6866 const ARRAY_OF_U8S: [u8; 8] = [0u8, 1, 2, 3, 4, 5, 6, 7];
6867 const ARRAY_OF_ARRAYS: [[u8; 2]; 4] = [[0, 1], [2, 3], [4, 5], [6, 7]];
6868 #[allow(clippy::redundant_static_lifetimes)]
6869 const X: &'static [[u8; 2]; 4] = transmute_ref!(&ARRAY_OF_U8S);
6870 assert_eq!(*X, ARRAY_OF_ARRAYS);
6871
6872 // Test that it's legal to transmute a reference while shrinking the
6873 // lifetime (note that `X` has the lifetime `'static`).
6874 let x: &[u8; 8] = transmute_ref!(X);
6875 assert_eq!(*x, ARRAY_OF_U8S);
6876
6877 // Test that `transmute_ref!` supports decreasing alignment.
6878 let u = AU64(0);
6879 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6880 let x: &[u8; 8] = transmute_ref!(&u);
6881 assert_eq!(*x, array);
6882
6883 // Test that a mutable reference can be turned into an immutable one.
6884 let mut x = 0u8;
6885 #[allow(clippy::useless_transmute)]
6886 let y: &u8 = transmute_ref!(&mut x);
6887 assert_eq!(*y, 0);
6888 }
6889
6890 #[test]
test_transmute_mut()6891 fn test_transmute_mut() {
6892 // Test that memory is transmuted as expected.
6893 let mut array_of_u8s = [0u8, 1, 2, 3, 4, 5, 6, 7];
6894 let mut array_of_arrays = [[0, 1], [2, 3], [4, 5], [6, 7]];
6895 let x: &mut [[u8; 2]; 4] = transmute_mut!(&mut array_of_u8s);
6896 assert_eq!(*x, array_of_arrays);
6897 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6898 assert_eq!(*x, array_of_u8s);
6899
6900 {
6901 // Test that it's legal to transmute a reference while shrinking the
6902 // lifetime.
6903 let x: &mut [u8; 8] = transmute_mut!(&mut array_of_arrays);
6904 assert_eq!(*x, array_of_u8s);
6905 }
6906 // Test that `transmute_mut!` supports decreasing alignment.
6907 let mut u = AU64(0);
6908 let array = [0, 0, 0, 0, 0, 0, 0, 0];
6909 let x: &[u8; 8] = transmute_mut!(&mut u);
6910 assert_eq!(*x, array);
6911
6912 // Test that a mutable reference can be turned into an immutable one.
6913 let mut x = 0u8;
6914 #[allow(clippy::useless_transmute)]
6915 let y: &u8 = transmute_mut!(&mut x);
6916 assert_eq!(*y, 0);
6917 }
6918
6919 #[test]
test_macros_evaluate_args_once()6920 fn test_macros_evaluate_args_once() {
6921 let mut ctr = 0;
6922 let _: usize = transmute!({
6923 ctr += 1;
6924 0usize
6925 });
6926 assert_eq!(ctr, 1);
6927
6928 let mut ctr = 0;
6929 let _: &usize = transmute_ref!({
6930 ctr += 1;
6931 &0usize
6932 });
6933 assert_eq!(ctr, 1);
6934 }
6935
6936 #[test]
test_include_value()6937 fn test_include_value() {
6938 const AS_U32: u32 = include_value!("../testdata/include_value/data");
6939 assert_eq!(AS_U32, u32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6940 const AS_I32: i32 = include_value!("../testdata/include_value/data");
6941 assert_eq!(AS_I32, i32::from_ne_bytes([b'a', b'b', b'c', b'd']));
6942 }
6943
6944 #[test]
test_address()6945 fn test_address() {
6946 // Test that the `Deref` and `DerefMut` implementations return a
6947 // reference which points to the right region of memory.
6948
6949 let buf = [0];
6950 let r = Ref::<_, u8>::new(&buf[..]).unwrap();
6951 let buf_ptr = buf.as_ptr();
6952 let deref_ptr: *const u8 = r.deref();
6953 assert_eq!(buf_ptr, deref_ptr);
6954
6955 let buf = [0];
6956 let r = Ref::<_, [u8]>::new_slice(&buf[..]).unwrap();
6957 let buf_ptr = buf.as_ptr();
6958 let deref_ptr = r.deref().as_ptr();
6959 assert_eq!(buf_ptr, deref_ptr);
6960 }
6961
6962 // Verify that values written to a `Ref` are properly shared between the
6963 // typed and untyped representations, that reads via `deref` and `read`
6964 // behave the same, and that writes via `deref_mut` and `write` behave the
6965 // same.
test_new_helper(mut r: Ref<&mut [u8], AU64>)6966 fn test_new_helper(mut r: Ref<&mut [u8], AU64>) {
6967 // assert that the value starts at 0
6968 assert_eq!(*r, AU64(0));
6969 assert_eq!(r.read(), AU64(0));
6970
6971 // Assert that values written to the typed value are reflected in the
6972 // byte slice.
6973 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
6974 *r = VAL1;
6975 assert_eq!(r.bytes(), &VAL1.to_bytes());
6976 *r = AU64(0);
6977 r.write(VAL1);
6978 assert_eq!(r.bytes(), &VAL1.to_bytes());
6979
6980 // Assert that values written to the byte slice are reflected in the
6981 // typed value.
6982 const VAL2: AU64 = AU64(!VAL1.0); // different from `VAL1`
6983 r.bytes_mut().copy_from_slice(&VAL2.to_bytes()[..]);
6984 assert_eq!(*r, VAL2);
6985 assert_eq!(r.read(), VAL2);
6986 }
6987
6988 // Verify that values written to a `Ref` are properly shared between the
6989 // typed and untyped representations; pass a value with `typed_len` `AU64`s
6990 // backed by an array of `typed_len * 8` bytes.
test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize)6991 fn test_new_helper_slice(mut r: Ref<&mut [u8], [AU64]>, typed_len: usize) {
6992 // Assert that the value starts out zeroed.
6993 assert_eq!(&*r, vec![AU64(0); typed_len].as_slice());
6994
6995 // Check the backing storage is the exact same slice.
6996 let untyped_len = typed_len * 8;
6997 assert_eq!(r.bytes().len(), untyped_len);
6998 assert_eq!(r.bytes().as_ptr(), r.as_ptr().cast::<u8>());
6999
7000 // Assert that values written to the typed value are reflected in the
7001 // byte slice.
7002 const VAL1: AU64 = AU64(0xFF00FF00FF00FF00);
7003 for typed in &mut *r {
7004 *typed = VAL1;
7005 }
7006 assert_eq!(r.bytes(), VAL1.0.to_ne_bytes().repeat(typed_len).as_slice());
7007
7008 // Assert that values written to the byte slice are reflected in the
7009 // typed value.
7010 const VAL2: AU64 = AU64(!VAL1.0); // different from VAL1
7011 r.bytes_mut().copy_from_slice(&VAL2.0.to_ne_bytes().repeat(typed_len));
7012 assert!(r.iter().copied().all(|x| x == VAL2));
7013 }
7014
7015 // Verify that values written to a `Ref` are properly shared between the
7016 // typed and untyped representations, that reads via `deref` and `read`
7017 // behave the same, and that writes via `deref_mut` and `write` behave the
7018 // same.
test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>)7019 fn test_new_helper_unaligned(mut r: Ref<&mut [u8], [u8; 8]>) {
7020 // assert that the value starts at 0
7021 assert_eq!(*r, [0; 8]);
7022 assert_eq!(r.read(), [0; 8]);
7023
7024 // Assert that values written to the typed value are reflected in the
7025 // byte slice.
7026 const VAL1: [u8; 8] = [0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00];
7027 *r = VAL1;
7028 assert_eq!(r.bytes(), &VAL1);
7029 *r = [0; 8];
7030 r.write(VAL1);
7031 assert_eq!(r.bytes(), &VAL1);
7032
7033 // Assert that values written to the byte slice are reflected in the
7034 // typed value.
7035 const VAL2: [u8; 8] = [0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF, 0x00, 0xFF]; // different from VAL1
7036 r.bytes_mut().copy_from_slice(&VAL2[..]);
7037 assert_eq!(*r, VAL2);
7038 assert_eq!(r.read(), VAL2);
7039 }
7040
7041 // Verify that values written to a `Ref` are properly shared between the
7042 // typed and untyped representations; pass a value with `len` `u8`s backed
7043 // by an array of `len` bytes.
test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize)7044 fn test_new_helper_slice_unaligned(mut r: Ref<&mut [u8], [u8]>, len: usize) {
7045 // Assert that the value starts out zeroed.
7046 assert_eq!(&*r, vec![0u8; len].as_slice());
7047
7048 // Check the backing storage is the exact same slice.
7049 assert_eq!(r.bytes().len(), len);
7050 assert_eq!(r.bytes().as_ptr(), r.as_ptr());
7051
7052 // Assert that values written to the typed value are reflected in the
7053 // byte slice.
7054 let mut expected_bytes = [0xFF, 0x00].iter().copied().cycle().take(len).collect::<Vec<_>>();
7055 r.copy_from_slice(&expected_bytes);
7056 assert_eq!(r.bytes(), expected_bytes.as_slice());
7057
7058 // Assert that values written to the byte slice are reflected in the
7059 // typed value.
7060 for byte in &mut expected_bytes {
7061 *byte = !*byte; // different from `expected_len`
7062 }
7063 r.bytes_mut().copy_from_slice(&expected_bytes);
7064 assert_eq!(&*r, expected_bytes.as_slice());
7065 }
7066
7067 #[test]
test_new_aligned_sized()7068 fn test_new_aligned_sized() {
7069 // Test that a properly-aligned, properly-sized buffer works for new,
7070 // new_from_prefix, and new_from_suffix, and that new_from_prefix and
7071 // new_from_suffix return empty slices. Test that a properly-aligned
7072 // buffer whose length is a multiple of the element size works for
7073 // new_slice. Test that xxx_zeroed behaves the same, and zeroes the
7074 // memory.
7075
7076 // A buffer with an alignment of 8.
7077 let mut buf = Align::<[u8; 8], AU64>::default();
7078 // `buf.t` should be aligned to 8, so this should always succeed.
7079 test_new_helper(Ref::<_, AU64>::new(&mut buf.t[..]).unwrap());
7080 let ascending: [u8; 8] = (0..8).collect::<Vec<_>>().try_into().unwrap();
7081 buf.t = ascending;
7082 test_new_helper(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).unwrap());
7083 {
7084 // In a block so that `r` and `suffix` don't live too long.
7085 buf.set_default();
7086 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
7087 assert!(suffix.is_empty());
7088 test_new_helper(r);
7089 }
7090 {
7091 buf.t = ascending;
7092 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
7093 assert!(suffix.is_empty());
7094 test_new_helper(r);
7095 }
7096 {
7097 buf.set_default();
7098 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
7099 assert!(prefix.is_empty());
7100 test_new_helper(r);
7101 }
7102 {
7103 buf.t = ascending;
7104 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
7105 assert!(prefix.is_empty());
7106 test_new_helper(r);
7107 }
7108
7109 // A buffer with alignment 8 and length 24. We choose this length very
7110 // intentionally: if we instead used length 16, then the prefix and
7111 // suffix lengths would be identical. In the past, we used length 16,
7112 // which resulted in this test failing to discover the bug uncovered in
7113 // #506.
7114 let mut buf = Align::<[u8; 24], AU64>::default();
7115 // `buf.t` should be aligned to 8 and have a length which is a multiple
7116 // of `size_of::<AU64>()`, so this should always succeed.
7117 test_new_helper_slice(Ref::<_, [AU64]>::new_slice(&mut buf.t[..]).unwrap(), 3);
7118 let ascending: [u8; 24] = (0..24).collect::<Vec<_>>().try_into().unwrap();
7119 // 16 ascending bytes followed by 8 zeros.
7120 let mut ascending_prefix = ascending;
7121 ascending_prefix[16..].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7122 // 8 zeros followed by 16 ascending bytes.
7123 let mut ascending_suffix = ascending;
7124 ascending_suffix[..8].copy_from_slice(&[0, 0, 0, 0, 0, 0, 0, 0]);
7125 test_new_helper_slice(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).unwrap(), 3);
7126
7127 {
7128 buf.t = ascending_suffix;
7129 let (r, suffix) = Ref::<_, [AU64]>::new_slice_from_prefix(&mut buf.t[..], 1).unwrap();
7130 assert_eq!(suffix, &ascending[8..]);
7131 test_new_helper_slice(r, 1);
7132 }
7133 {
7134 buf.t = ascending_suffix;
7135 let (r, suffix) =
7136 Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 1).unwrap();
7137 assert_eq!(suffix, &ascending[8..]);
7138 test_new_helper_slice(r, 1);
7139 }
7140 {
7141 buf.t = ascending_prefix;
7142 let (prefix, r) = Ref::<_, [AU64]>::new_slice_from_suffix(&mut buf.t[..], 1).unwrap();
7143 assert_eq!(prefix, &ascending[..16]);
7144 test_new_helper_slice(r, 1);
7145 }
7146 {
7147 buf.t = ascending_prefix;
7148 let (prefix, r) =
7149 Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 1).unwrap();
7150 assert_eq!(prefix, &ascending[..16]);
7151 test_new_helper_slice(r, 1);
7152 }
7153 }
7154
7155 #[test]
test_new_unaligned_sized()7156 fn test_new_unaligned_sized() {
7157 // Test that an unaligned, properly-sized buffer works for
7158 // `new_unaligned`, `new_unaligned_from_prefix`, and
7159 // `new_unaligned_from_suffix`, and that `new_unaligned_from_prefix`
7160 // `new_unaligned_from_suffix` return empty slices. Test that an
7161 // unaligned buffer whose length is a multiple of the element size works
7162 // for `new_slice`. Test that `xxx_zeroed` behaves the same, and zeroes
7163 // the memory.
7164
7165 let mut buf = [0u8; 8];
7166 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned(&mut buf[..]).unwrap());
7167 buf = [0xFFu8; 8];
7168 test_new_helper_unaligned(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf[..]).unwrap());
7169 {
7170 // In a block so that `r` and `suffix` don't live too long.
7171 buf = [0u8; 8];
7172 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7173 assert!(suffix.is_empty());
7174 test_new_helper_unaligned(r);
7175 }
7176 {
7177 buf = [0xFFu8; 8];
7178 let (r, suffix) =
7179 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7180 assert!(suffix.is_empty());
7181 test_new_helper_unaligned(r);
7182 }
7183 {
7184 buf = [0u8; 8];
7185 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7186 assert!(prefix.is_empty());
7187 test_new_helper_unaligned(r);
7188 }
7189 {
7190 buf = [0xFFu8; 8];
7191 let (prefix, r) =
7192 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7193 assert!(prefix.is_empty());
7194 test_new_helper_unaligned(r);
7195 }
7196
7197 let mut buf = [0u8; 16];
7198 // `buf.t` should be aligned to 8 and have a length which is a multiple
7199 // of `size_of::AU64>()`, so this should always succeed.
7200 test_new_helper_slice_unaligned(
7201 Ref::<_, [u8]>::new_slice_unaligned(&mut buf[..]).unwrap(),
7202 16,
7203 );
7204 buf = [0xFFu8; 16];
7205 test_new_helper_slice_unaligned(
7206 Ref::<_, [u8]>::new_slice_unaligned_zeroed(&mut buf[..]).unwrap(),
7207 16,
7208 );
7209
7210 {
7211 buf = [0u8; 16];
7212 let (r, suffix) =
7213 Ref::<_, [u8]>::new_slice_unaligned_from_prefix(&mut buf[..], 8).unwrap();
7214 assert_eq!(suffix, [0; 8]);
7215 test_new_helper_slice_unaligned(r, 8);
7216 }
7217 {
7218 buf = [0xFFu8; 16];
7219 let (r, suffix) =
7220 Ref::<_, [u8]>::new_slice_unaligned_from_prefix_zeroed(&mut buf[..], 8).unwrap();
7221 assert_eq!(suffix, [0xFF; 8]);
7222 test_new_helper_slice_unaligned(r, 8);
7223 }
7224 {
7225 buf = [0u8; 16];
7226 let (prefix, r) =
7227 Ref::<_, [u8]>::new_slice_unaligned_from_suffix(&mut buf[..], 8).unwrap();
7228 assert_eq!(prefix, [0; 8]);
7229 test_new_helper_slice_unaligned(r, 8);
7230 }
7231 {
7232 buf = [0xFFu8; 16];
7233 let (prefix, r) =
7234 Ref::<_, [u8]>::new_slice_unaligned_from_suffix_zeroed(&mut buf[..], 8).unwrap();
7235 assert_eq!(prefix, [0xFF; 8]);
7236 test_new_helper_slice_unaligned(r, 8);
7237 }
7238 }
7239
7240 #[test]
test_new_oversized()7241 fn test_new_oversized() {
7242 // Test that a properly-aligned, overly-sized buffer works for
7243 // `new_from_prefix` and `new_from_suffix`, and that they return the
7244 // remainder and prefix of the slice respectively. Test that
7245 // `xxx_zeroed` behaves the same, and zeroes the memory.
7246
7247 let mut buf = Align::<[u8; 16], AU64>::default();
7248 {
7249 // In a block so that `r` and `suffix` don't live too long. `buf.t`
7250 // should be aligned to 8, so this should always succeed.
7251 let (r, suffix) = Ref::<_, AU64>::new_from_prefix(&mut buf.t[..]).unwrap();
7252 assert_eq!(suffix.len(), 8);
7253 test_new_helper(r);
7254 }
7255 {
7256 buf.t = [0xFFu8; 16];
7257 // `buf.t` should be aligned to 8, so this should always succeed.
7258 let (r, suffix) = Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).unwrap();
7259 // Assert that the suffix wasn't zeroed.
7260 assert_eq!(suffix, &[0xFFu8; 8]);
7261 test_new_helper(r);
7262 }
7263 {
7264 buf.set_default();
7265 // `buf.t` should be aligned to 8, so this should always succeed.
7266 let (prefix, r) = Ref::<_, AU64>::new_from_suffix(&mut buf.t[..]).unwrap();
7267 assert_eq!(prefix.len(), 8);
7268 test_new_helper(r);
7269 }
7270 {
7271 buf.t = [0xFFu8; 16];
7272 // `buf.t` should be aligned to 8, so this should always succeed.
7273 let (prefix, r) = Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).unwrap();
7274 // Assert that the prefix wasn't zeroed.
7275 assert_eq!(prefix, &[0xFFu8; 8]);
7276 test_new_helper(r);
7277 }
7278 }
7279
7280 #[test]
test_new_unaligned_oversized()7281 fn test_new_unaligned_oversized() {
7282 // Test than an unaligned, overly-sized buffer works for
7283 // `new_unaligned_from_prefix` and `new_unaligned_from_suffix`, and that
7284 // they return the remainder and prefix of the slice respectively. Test
7285 // that `xxx_zeroed` behaves the same, and zeroes the memory.
7286
7287 let mut buf = [0u8; 16];
7288 {
7289 // In a block so that `r` and `suffix` don't live too long.
7290 let (r, suffix) = Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&mut buf[..]).unwrap();
7291 assert_eq!(suffix.len(), 8);
7292 test_new_helper_unaligned(r);
7293 }
7294 {
7295 buf = [0xFFu8; 16];
7296 let (r, suffix) =
7297 Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf[..]).unwrap();
7298 // Assert that the suffix wasn't zeroed.
7299 assert_eq!(suffix, &[0xFF; 8]);
7300 test_new_helper_unaligned(r);
7301 }
7302 {
7303 buf = [0u8; 16];
7304 let (prefix, r) = Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&mut buf[..]).unwrap();
7305 assert_eq!(prefix.len(), 8);
7306 test_new_helper_unaligned(r);
7307 }
7308 {
7309 buf = [0xFFu8; 16];
7310 let (prefix, r) =
7311 Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf[..]).unwrap();
7312 // Assert that the prefix wasn't zeroed.
7313 assert_eq!(prefix, &[0xFF; 8]);
7314 test_new_helper_unaligned(r);
7315 }
7316 }
7317
7318 #[test]
test_ref_from_mut_from()7319 fn test_ref_from_mut_from() {
7320 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` success cases
7321 // Exhaustive coverage for these methods is covered by the `Ref` tests above,
7322 // which these helper methods defer to.
7323
7324 let mut buf =
7325 Align::<[u8; 16], AU64>::new([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);
7326
7327 assert_eq!(
7328 AU64::ref_from(&buf.t[8..]).unwrap().0.to_ne_bytes(),
7329 [8, 9, 10, 11, 12, 13, 14, 15]
7330 );
7331 let suffix = AU64::mut_from(&mut buf.t[8..]).unwrap();
7332 suffix.0 = 0x0101010101010101;
7333 // The `[u8:9]` is a non-half size of the full buffer, which would catch
7334 // `from_prefix` having the same implementation as `from_suffix` (issues #506, #511).
7335 assert_eq!(<[u8; 9]>::ref_from_suffix(&buf.t[..]).unwrap(), &[7u8, 1, 1, 1, 1, 1, 1, 1, 1]);
7336 let suffix = AU64::mut_from_suffix(&mut buf.t[1..]).unwrap();
7337 suffix.0 = 0x0202020202020202;
7338 <[u8; 10]>::mut_from_suffix(&mut buf.t[..]).unwrap()[0] = 42;
7339 assert_eq!(<[u8; 9]>::ref_from_prefix(&buf.t[..]).unwrap(), &[0, 1, 2, 3, 4, 5, 42, 7, 2]);
7340 <[u8; 2]>::mut_from_prefix(&mut buf.t[..]).unwrap()[1] = 30;
7341 assert_eq!(buf.t, [0, 30, 2, 3, 4, 5, 42, 7, 2, 2, 2, 2, 2, 2, 2, 2]);
7342 }
7343
7344 #[test]
test_ref_from_mut_from_error()7345 fn test_ref_from_mut_from_error() {
7346 // Test `FromBytes::{ref_from, mut_from}{,_prefix,_suffix}` error cases.
7347
7348 // Fail because the buffer is too large.
7349 let mut buf = Align::<[u8; 16], AU64>::default();
7350 // `buf.t` should be aligned to 8, so only the length check should fail.
7351 assert!(AU64::ref_from(&buf.t[..]).is_none());
7352 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7353 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7354 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7355
7356 // Fail because the buffer is too small.
7357 let mut buf = Align::<[u8; 4], AU64>::default();
7358 assert!(AU64::ref_from(&buf.t[..]).is_none());
7359 assert!(AU64::mut_from(&mut buf.t[..]).is_none());
7360 assert!(<[u8; 8]>::ref_from(&buf.t[..]).is_none());
7361 assert!(<[u8; 8]>::mut_from(&mut buf.t[..]).is_none());
7362 assert!(AU64::ref_from_prefix(&buf.t[..]).is_none());
7363 assert!(AU64::mut_from_prefix(&mut buf.t[..]).is_none());
7364 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7365 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7366 assert!(<[u8; 8]>::ref_from_prefix(&buf.t[..]).is_none());
7367 assert!(<[u8; 8]>::mut_from_prefix(&mut buf.t[..]).is_none());
7368 assert!(<[u8; 8]>::ref_from_suffix(&buf.t[..]).is_none());
7369 assert!(<[u8; 8]>::mut_from_suffix(&mut buf.t[..]).is_none());
7370
7371 // Fail because the alignment is insufficient.
7372 let mut buf = Align::<[u8; 13], AU64>::default();
7373 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7374 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7375 assert!(AU64::ref_from(&buf.t[1..]).is_none());
7376 assert!(AU64::mut_from(&mut buf.t[1..]).is_none());
7377 assert!(AU64::ref_from_prefix(&buf.t[1..]).is_none());
7378 assert!(AU64::mut_from_prefix(&mut buf.t[1..]).is_none());
7379 assert!(AU64::ref_from_suffix(&buf.t[..]).is_none());
7380 assert!(AU64::mut_from_suffix(&mut buf.t[..]).is_none());
7381 }
7382
7383 #[test]
7384 #[allow(clippy::cognitive_complexity)]
test_new_error()7385 fn test_new_error() {
7386 // Fail because the buffer is too large.
7387
7388 // A buffer with an alignment of 8.
7389 let mut buf = Align::<[u8; 16], AU64>::default();
7390 // `buf.t` should be aligned to 8, so only the length check should fail.
7391 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7392 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7393 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7394 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7395
7396 // Fail because the buffer is too small.
7397
7398 // A buffer with an alignment of 8.
7399 let mut buf = Align::<[u8; 4], AU64>::default();
7400 // `buf.t` should be aligned to 8, so only the length check should fail.
7401 assert!(Ref::<_, AU64>::new(&buf.t[..]).is_none());
7402 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[..]).is_none());
7403 assert!(Ref::<_, [u8; 8]>::new_unaligned(&buf.t[..]).is_none());
7404 assert!(Ref::<_, [u8; 8]>::new_unaligned_zeroed(&mut buf.t[..]).is_none());
7405 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[..]).is_none());
7406 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[..]).is_none());
7407 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7408 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7409 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix(&buf.t[..]).is_none());
7410 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_prefix_zeroed(&mut buf.t[..]).is_none());
7411 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix(&buf.t[..]).is_none());
7412 assert!(Ref::<_, [u8; 8]>::new_unaligned_from_suffix_zeroed(&mut buf.t[..]).is_none());
7413
7414 // Fail because the length is not a multiple of the element size.
7415
7416 let mut buf = Align::<[u8; 12], AU64>::default();
7417 // `buf.t` has length 12, but element size is 8.
7418 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[..]).is_none());
7419 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[..]).is_none());
7420 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned(&buf.t[..]).is_none());
7421 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_zeroed(&mut buf.t[..]).is_none());
7422
7423 // Fail because the buffer is too short.
7424 let mut buf = Align::<[u8; 12], AU64>::default();
7425 // `buf.t` has length 12, but the element size is 8 (and we're expecting
7426 // two of them).
7427 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], 2).is_none());
7428 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], 2).is_none());
7429 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], 2).is_none());
7430 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], 2).is_none());
7431 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], 2).is_none());
7432 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(&mut buf.t[..], 2)
7433 .is_none());
7434 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], 2).is_none());
7435 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(&mut buf.t[..], 2)
7436 .is_none());
7437
7438 // Fail because the alignment is insufficient.
7439
7440 // A buffer with an alignment of 8. An odd buffer size is chosen so that
7441 // the last byte of the buffer has odd alignment.
7442 let mut buf = Align::<[u8; 13], AU64>::default();
7443 // Slicing from 1, we get a buffer with size 12 (so the length check
7444 // should succeed) but an alignment of only 1, which is insufficient.
7445 assert!(Ref::<_, AU64>::new(&buf.t[1..]).is_none());
7446 assert!(Ref::<_, AU64>::new_zeroed(&mut buf.t[1..]).is_none());
7447 assert!(Ref::<_, AU64>::new_from_prefix(&buf.t[1..]).is_none());
7448 assert!(Ref::<_, AU64>::new_from_prefix_zeroed(&mut buf.t[1..]).is_none());
7449 assert!(Ref::<_, [AU64]>::new_slice(&buf.t[1..]).is_none());
7450 assert!(Ref::<_, [AU64]>::new_slice_zeroed(&mut buf.t[1..]).is_none());
7451 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[1..], 1).is_none());
7452 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[1..], 1).is_none());
7453 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[1..], 1).is_none());
7454 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[1..], 1).is_none());
7455 // Slicing is unnecessary here because `new_from_suffix[_zeroed]` use
7456 // the suffix of the slice, which has odd alignment.
7457 assert!(Ref::<_, AU64>::new_from_suffix(&buf.t[..]).is_none());
7458 assert!(Ref::<_, AU64>::new_from_suffix_zeroed(&mut buf.t[..]).is_none());
7459
7460 // Fail due to arithmetic overflow.
7461
7462 let mut buf = Align::<[u8; 16], AU64>::default();
7463 let unreasonable_len = usize::MAX / mem::size_of::<AU64>() + 1;
7464 assert!(Ref::<_, [AU64]>::new_slice_from_prefix(&buf.t[..], unreasonable_len).is_none());
7465 assert!(Ref::<_, [AU64]>::new_slice_from_prefix_zeroed(&mut buf.t[..], unreasonable_len)
7466 .is_none());
7467 assert!(Ref::<_, [AU64]>::new_slice_from_suffix(&buf.t[..], unreasonable_len).is_none());
7468 assert!(Ref::<_, [AU64]>::new_slice_from_suffix_zeroed(&mut buf.t[..], unreasonable_len)
7469 .is_none());
7470 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix(&buf.t[..], unreasonable_len)
7471 .is_none());
7472 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_prefix_zeroed(
7473 &mut buf.t[..],
7474 unreasonable_len
7475 )
7476 .is_none());
7477 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix(&buf.t[..], unreasonable_len)
7478 .is_none());
7479 assert!(Ref::<_, [[u8; 8]]>::new_slice_unaligned_from_suffix_zeroed(
7480 &mut buf.t[..],
7481 unreasonable_len
7482 )
7483 .is_none());
7484 }
7485
7486 // Tests for ensuring that, if a ZST is passed into a slice-like function,
7487 // we always panic. Since these tests need to be separate per-function, and
7488 // they tend to take up a lot of space, we generate them using a macro in a
7489 // submodule instead. The submodule ensures that we can just re-use the name
7490 // of the function under test for the name of the test itself.
7491 mod test_zst_panics {
7492 macro_rules! zst_test {
7493 ($name:ident($($tt:tt)*), $constructor_in_panic_msg:tt) => {
7494 #[test]
7495 #[should_panic = concat!("Ref::", $constructor_in_panic_msg, " called on a zero-sized type")]
7496 fn $name() {
7497 let mut buffer = [0u8];
7498 let r = $crate::Ref::<_, [()]>::$name(&mut buffer[..], $($tt)*);
7499 unreachable!("should have panicked, got {:?}", r);
7500 }
7501 }
7502 }
7503 zst_test!(new_slice(), "new_slice");
7504 zst_test!(new_slice_zeroed(), "new_slice");
7505 zst_test!(new_slice_from_prefix(1), "new_slice");
7506 zst_test!(new_slice_from_prefix_zeroed(1), "new_slice");
7507 zst_test!(new_slice_from_suffix(1), "new_slice");
7508 zst_test!(new_slice_from_suffix_zeroed(1), "new_slice");
7509 zst_test!(new_slice_unaligned(), "new_slice_unaligned");
7510 zst_test!(new_slice_unaligned_zeroed(), "new_slice_unaligned");
7511 zst_test!(new_slice_unaligned_from_prefix(1), "new_slice_unaligned");
7512 zst_test!(new_slice_unaligned_from_prefix_zeroed(1), "new_slice_unaligned");
7513 zst_test!(new_slice_unaligned_from_suffix(1), "new_slice_unaligned");
7514 zst_test!(new_slice_unaligned_from_suffix_zeroed(1), "new_slice_unaligned");
7515 }
7516
7517 #[test]
test_as_bytes_methods()7518 fn test_as_bytes_methods() {
7519 /// Run a series of tests by calling `AsBytes` methods on `t`.
7520 ///
7521 /// `bytes` is the expected byte sequence returned from `t.as_bytes()`
7522 /// before `t` has been modified. `post_mutation` is the expected
7523 /// sequence returned from `t.as_bytes()` after `t.as_bytes_mut()[0]`
7524 /// has had its bits flipped (by applying `^= 0xFF`).
7525 ///
7526 /// `N` is the size of `t` in bytes.
7527 fn test<T: FromBytes + AsBytes + Debug + Eq + ?Sized, const N: usize>(
7528 t: &mut T,
7529 bytes: &[u8],
7530 post_mutation: &T,
7531 ) {
7532 // Test that we can access the underlying bytes, and that we get the
7533 // right bytes and the right number of bytes.
7534 assert_eq!(t.as_bytes(), bytes);
7535
7536 // Test that changes to the underlying byte slices are reflected in
7537 // the original object.
7538 t.as_bytes_mut()[0] ^= 0xFF;
7539 assert_eq!(t, post_mutation);
7540 t.as_bytes_mut()[0] ^= 0xFF;
7541
7542 // `write_to` rejects slices that are too small or too large.
7543 assert_eq!(t.write_to(&mut vec![0; N - 1][..]), None);
7544 assert_eq!(t.write_to(&mut vec![0; N + 1][..]), None);
7545
7546 // `write_to` works as expected.
7547 let mut bytes = [0; N];
7548 assert_eq!(t.write_to(&mut bytes[..]), Some(()));
7549 assert_eq!(bytes, t.as_bytes());
7550
7551 // `write_to_prefix` rejects slices that are too small.
7552 assert_eq!(t.write_to_prefix(&mut vec![0; N - 1][..]), None);
7553
7554 // `write_to_prefix` works with exact-sized slices.
7555 let mut bytes = [0; N];
7556 assert_eq!(t.write_to_prefix(&mut bytes[..]), Some(()));
7557 assert_eq!(bytes, t.as_bytes());
7558
7559 // `write_to_prefix` works with too-large slices, and any bytes past
7560 // the prefix aren't modified.
7561 let mut too_many_bytes = vec![0; N + 1];
7562 too_many_bytes[N] = 123;
7563 assert_eq!(t.write_to_prefix(&mut too_many_bytes[..]), Some(()));
7564 assert_eq!(&too_many_bytes[..N], t.as_bytes());
7565 assert_eq!(too_many_bytes[N], 123);
7566
7567 // `write_to_suffix` rejects slices that are too small.
7568 assert_eq!(t.write_to_suffix(&mut vec![0; N - 1][..]), None);
7569
7570 // `write_to_suffix` works with exact-sized slices.
7571 let mut bytes = [0; N];
7572 assert_eq!(t.write_to_suffix(&mut bytes[..]), Some(()));
7573 assert_eq!(bytes, t.as_bytes());
7574
7575 // `write_to_suffix` works with too-large slices, and any bytes
7576 // before the suffix aren't modified.
7577 let mut too_many_bytes = vec![0; N + 1];
7578 too_many_bytes[0] = 123;
7579 assert_eq!(t.write_to_suffix(&mut too_many_bytes[..]), Some(()));
7580 assert_eq!(&too_many_bytes[1..], t.as_bytes());
7581 assert_eq!(too_many_bytes[0], 123);
7582 }
7583
7584 #[derive(Debug, Eq, PartialEq, FromZeroes, FromBytes, AsBytes)]
7585 #[repr(C)]
7586 struct Foo {
7587 a: u32,
7588 b: Wrapping<u32>,
7589 c: Option<NonZeroU32>,
7590 }
7591
7592 let expected_bytes: Vec<u8> = if cfg!(target_endian = "little") {
7593 vec![1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0]
7594 } else {
7595 vec![0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 0]
7596 };
7597 let post_mutation_expected_a =
7598 if cfg!(target_endian = "little") { 0x00_00_00_FE } else { 0xFF_00_00_01 };
7599 test::<_, 12>(
7600 &mut Foo { a: 1, b: Wrapping(2), c: None },
7601 expected_bytes.as_bytes(),
7602 &Foo { a: post_mutation_expected_a, b: Wrapping(2), c: None },
7603 );
7604 test::<_, 3>(
7605 Unsized::from_mut_slice(&mut [1, 2, 3]),
7606 &[1, 2, 3],
7607 Unsized::from_mut_slice(&mut [0xFE, 2, 3]),
7608 );
7609 }
7610
7611 #[test]
test_array()7612 fn test_array() {
7613 #[derive(FromZeroes, FromBytes, AsBytes)]
7614 #[repr(C)]
7615 struct Foo {
7616 a: [u16; 33],
7617 }
7618
7619 let foo = Foo { a: [0xFFFF; 33] };
7620 let expected = [0xFFu8; 66];
7621 assert_eq!(foo.as_bytes(), &expected[..]);
7622 }
7623
7624 #[test]
test_display_debug()7625 fn test_display_debug() {
7626 let buf = Align::<[u8; 8], u64>::default();
7627 let r = Ref::<_, u64>::new(&buf.t[..]).unwrap();
7628 assert_eq!(format!("{}", r), "0");
7629 assert_eq!(format!("{:?}", r), "Ref(0)");
7630
7631 let buf = Align::<[u8; 8], u64>::default();
7632 let r = Ref::<_, [u64]>::new_slice(&buf.t[..]).unwrap();
7633 assert_eq!(format!("{:?}", r), "Ref([0])");
7634 }
7635
7636 #[test]
test_eq()7637 fn test_eq() {
7638 let buf1 = 0_u64;
7639 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7640 let buf2 = 0_u64;
7641 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7642 assert_eq!(r1, r2);
7643 }
7644
7645 #[test]
test_ne()7646 fn test_ne() {
7647 let buf1 = 0_u64;
7648 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7649 let buf2 = 1_u64;
7650 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7651 assert_ne!(r1, r2);
7652 }
7653
7654 #[test]
test_ord()7655 fn test_ord() {
7656 let buf1 = 0_u64;
7657 let r1 = Ref::<_, u64>::new(buf1.as_bytes()).unwrap();
7658 let buf2 = 1_u64;
7659 let r2 = Ref::<_, u64>::new(buf2.as_bytes()).unwrap();
7660 assert!(r1 < r2);
7661 }
7662
7663 #[test]
test_new_zeroed()7664 fn test_new_zeroed() {
7665 assert!(!bool::new_zeroed());
7666 assert_eq!(u64::new_zeroed(), 0);
7667 // This test exists in order to exercise unsafe code, especially when
7668 // running under Miri.
7669 #[allow(clippy::unit_cmp)]
7670 {
7671 assert_eq!(<()>::new_zeroed(), ());
7672 }
7673 }
7674
7675 #[test]
test_transparent_packed_generic_struct()7676 fn test_transparent_packed_generic_struct() {
7677 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7678 #[repr(transparent)]
7679 #[allow(dead_code)] // for the unused fields
7680 struct Foo<T> {
7681 _t: T,
7682 _phantom: PhantomData<()>,
7683 }
7684
7685 assert_impl_all!(Foo<u32>: FromZeroes, FromBytes, AsBytes);
7686 assert_impl_all!(Foo<u8>: Unaligned);
7687
7688 #[derive(AsBytes, FromZeroes, FromBytes, Unaligned)]
7689 #[repr(packed)]
7690 #[allow(dead_code)] // for the unused fields
7691 struct Bar<T, U> {
7692 _t: T,
7693 _u: U,
7694 }
7695
7696 assert_impl_all!(Bar<u8, AU64>: FromZeroes, FromBytes, AsBytes, Unaligned);
7697 }
7698
7699 #[test]
test_impls()7700 fn test_impls() {
7701 use core::borrow::Borrow;
7702
7703 // A type that can supply test cases for testing
7704 // `TryFromBytes::is_bit_valid`. All types passed to `assert_impls!`
7705 // must implement this trait; that macro uses it to generate runtime
7706 // tests for `TryFromBytes` impls.
7707 //
7708 // All `T: FromBytes` types are provided with a blanket impl. Other
7709 // types must implement `TryFromBytesTestable` directly (ie using
7710 // `impl_try_from_bytes_testable!`).
7711 trait TryFromBytesTestable {
7712 fn with_passing_test_cases<F: Fn(&Self)>(f: F);
7713 fn with_failing_test_cases<F: Fn(&[u8])>(f: F);
7714 }
7715
7716 impl<T: FromBytes> TryFromBytesTestable for T {
7717 fn with_passing_test_cases<F: Fn(&Self)>(f: F) {
7718 // Test with a zeroed value.
7719 f(&Self::new_zeroed());
7720
7721 let ffs = {
7722 let mut t = Self::new_zeroed();
7723 let ptr: *mut T = &mut t;
7724 // SAFETY: `T: FromBytes`
7725 unsafe { ptr::write_bytes(ptr.cast::<u8>(), 0xFF, mem::size_of::<T>()) };
7726 t
7727 };
7728
7729 // Test with a value initialized with 0xFF.
7730 f(&ffs);
7731 }
7732
7733 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {}
7734 }
7735
7736 // Implements `TryFromBytesTestable`.
7737 macro_rules! impl_try_from_bytes_testable {
7738 // Base case for recursion (when the list of types has run out).
7739 (=> @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {};
7740 // Implements for type(s) with no type parameters.
7741 ($ty:ty $(,$tys:ty)* => @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7742 impl TryFromBytesTestable for $ty {
7743 impl_try_from_bytes_testable!(
7744 @methods @success $($success_case),*
7745 $(, @failure $($failure_case),*)?
7746 );
7747 }
7748 impl_try_from_bytes_testable!($($tys),* => @success $($success_case),* $(, @failure $($failure_case),*)?);
7749 };
7750 // Implements for multiple types with no type parameters.
7751 ($($($ty:ty),* => @success $($success_case:expr), * $(, @failure $($failure_case:expr),*)?;)*) => {
7752 $(
7753 impl_try_from_bytes_testable!($($ty),* => @success $($success_case),* $(, @failure $($failure_case),*)*);
7754 )*
7755 };
7756 // Implements only the methods; caller must invoke this from inside
7757 // an impl block.
7758 (@methods @success $($success_case:expr),* $(, @failure $($failure_case:expr),*)?) => {
7759 fn with_passing_test_cases<F: Fn(&Self)>(_f: F) {
7760 $(
7761 _f($success_case.borrow());
7762 )*
7763 }
7764
7765 fn with_failing_test_cases<F: Fn(&[u8])>(_f: F) {
7766 $($(
7767 // `unused_qualifications` is spuriously triggered on
7768 // `Option::<Self>::None`.
7769 #[allow(unused_qualifications)]
7770 let case = $failure_case.as_bytes();
7771 _f(case.as_bytes());
7772 )*)?
7773 }
7774 };
7775 }
7776
7777 // Note that these impls are only for types which are not `FromBytes`.
7778 // `FromBytes` types are covered by a preceding blanket impl.
7779 impl_try_from_bytes_testable!(
7780 bool => @success true, false,
7781 @failure 2u8, 3u8, 0xFFu8;
7782 char => @success '\u{0}', '\u{D7FF}', '\u{E000}', '\u{10FFFF}',
7783 @failure 0xD800u32, 0xDFFFu32, 0x110000u32;
7784 str => @success "", "hello", "❤️",
7785 @failure [0, 159, 146, 150];
7786 [u8] => @success [], [0, 1, 2];
7787 NonZeroU8, NonZeroI8, NonZeroU16, NonZeroI16, NonZeroU32,
7788 NonZeroI32, NonZeroU64, NonZeroI64, NonZeroU128, NonZeroI128,
7789 NonZeroUsize, NonZeroIsize
7790 => @success Self::new(1).unwrap(),
7791 // Doing this instead of `0` ensures that we always satisfy
7792 // the size and alignment requirements of `Self` (whereas
7793 // `0` may be any integer type with a different size or
7794 // alignment than some `NonZeroXxx` types).
7795 @failure Option::<Self>::None;
7796 [bool]
7797 => @success [true, false], [false, true],
7798 @failure [2u8], [3u8], [0xFFu8], [0u8, 1u8, 2u8];
7799 );
7800
7801 // Asserts that `$ty` implements any `$trait` and doesn't implement any
7802 // `!$trait`. Note that all `$trait`s must come before any `!$trait`s.
7803 //
7804 // For `T: TryFromBytes`, uses `TryFromBytesTestable` to test success
7805 // and failure cases for `TryFromBytes::is_bit_valid`.
7806 macro_rules! assert_impls {
7807 ($ty:ty: TryFromBytes) => {
7808 <$ty as TryFromBytesTestable>::with_passing_test_cases(|val| {
7809 let c = Ptr::from(val);
7810 // SAFETY:
7811 // - Since `val` is a normal reference, `c` is guranteed to
7812 // be aligned, to point to a single allocation, and to
7813 // have a size which doesn't overflow `isize`.
7814 // - Since `val` is a valid `$ty`, `c`'s referent satisfies
7815 // the bit validity constraints of `is_bit_valid`, which
7816 // are a superset of the bit validity constraints of
7817 // `$ty`.
7818 let res = unsafe { <$ty as TryFromBytes>::is_bit_valid(c) };
7819 assert!(res, "{}::is_bit_valid({:?}): got false, expected true", stringify!($ty), val);
7820
7821 // TODO(#5): In addition to testing `is_bit_valid`, test the
7822 // methods built on top of it. This would both allow us to
7823 // test their implementations and actually convert the bytes
7824 // to `$ty`, giving Miri a chance to catch if this is
7825 // unsound (ie, if our `is_bit_valid` impl is buggy).
7826 //
7827 // The following code was tried, but it doesn't work because
7828 // a) some types are not `AsBytes` and, b) some types are
7829 // not `Sized`.
7830 //
7831 // let r = <$ty as TryFromBytes>::try_from_ref(val.as_bytes()).unwrap();
7832 // assert_eq!(r, &val);
7833 // let r = <$ty as TryFromBytes>::try_from_mut(val.as_bytes_mut()).unwrap();
7834 // assert_eq!(r, &mut val);
7835 // let v = <$ty as TryFromBytes>::try_read_from(val.as_bytes()).unwrap();
7836 // assert_eq!(v, val);
7837 });
7838 #[allow(clippy::as_conversions)]
7839 <$ty as TryFromBytesTestable>::with_failing_test_cases(|c| {
7840 let res = <$ty as TryFromBytes>::try_from_ref(c);
7841 assert!(res.is_none(), "{}::is_bit_valid({:?}): got true, expected false", stringify!($ty), c);
7842 });
7843
7844 #[allow(dead_code)]
7845 const _: () = { static_assertions::assert_impl_all!($ty: TryFromBytes); };
7846 };
7847 ($ty:ty: $trait:ident) => {
7848 #[allow(dead_code)]
7849 const _: () = { static_assertions::assert_impl_all!($ty: $trait); };
7850 };
7851 ($ty:ty: !$trait:ident) => {
7852 #[allow(dead_code)]
7853 const _: () = { static_assertions::assert_not_impl_any!($ty: $trait); };
7854 };
7855 ($ty:ty: $($trait:ident),* $(,)? $(!$negative_trait:ident),*) => {
7856 $(
7857 assert_impls!($ty: $trait);
7858 )*
7859
7860 $(
7861 assert_impls!($ty: !$negative_trait);
7862 )*
7863 };
7864 }
7865
7866 // NOTE: The negative impl assertions here are not necessarily
7867 // prescriptive. They merely serve as change detectors to make sure
7868 // we're aware of what trait impls are getting added with a given
7869 // change. Of course, some impls would be invalid (e.g., `bool:
7870 // FromBytes`), and so this change detection is very important.
7871
7872 assert_impls!((): KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7873 assert_impls!(u8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7874 assert_impls!(i8: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7875 assert_impls!(u16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7876 assert_impls!(i16: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7877 assert_impls!(u32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7878 assert_impls!(i32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7879 assert_impls!(u64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7880 assert_impls!(i64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7881 assert_impls!(u128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7882 assert_impls!(i128: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7883 assert_impls!(usize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7884 assert_impls!(isize: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7885 assert_impls!(f32: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7886 assert_impls!(f64: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7887
7888 assert_impls!(bool: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7889 assert_impls!(char: KnownLayout, TryFromBytes, FromZeroes, AsBytes, !FromBytes, !Unaligned);
7890 assert_impls!(str: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7891
7892 assert_impls!(NonZeroU8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7893 assert_impls!(NonZeroI8: KnownLayout, TryFromBytes, AsBytes, Unaligned, !FromZeroes, !FromBytes);
7894 assert_impls!(NonZeroU16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7895 assert_impls!(NonZeroI16: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7896 assert_impls!(NonZeroU32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7897 assert_impls!(NonZeroI32: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7898 assert_impls!(NonZeroU64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7899 assert_impls!(NonZeroI64: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7900 assert_impls!(NonZeroU128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7901 assert_impls!(NonZeroI128: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7902 assert_impls!(NonZeroUsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7903 assert_impls!(NonZeroIsize: KnownLayout, TryFromBytes, AsBytes, !FromBytes, !Unaligned);
7904
7905 assert_impls!(Option<NonZeroU8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7906 assert_impls!(Option<NonZeroI8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7907 assert_impls!(Option<NonZeroU16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7908 assert_impls!(Option<NonZeroI16>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7909 assert_impls!(Option<NonZeroU32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7910 assert_impls!(Option<NonZeroI32>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7911 assert_impls!(Option<NonZeroU64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7912 assert_impls!(Option<NonZeroI64>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7913 assert_impls!(Option<NonZeroU128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7914 assert_impls!(Option<NonZeroI128>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7915 assert_impls!(Option<NonZeroUsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7916 assert_impls!(Option<NonZeroIsize>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned);
7917
7918 // Implements none of the ZC traits.
7919 struct NotZerocopy;
7920
7921 #[rustfmt::skip]
7922 type FnManyArgs = fn(
7923 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7924 ) -> (NotZerocopy, NotZerocopy);
7925
7926 // Allowed, because we're not actually using this type for FFI.
7927 #[allow(improper_ctypes_definitions)]
7928 #[rustfmt::skip]
7929 type ECFnManyArgs = extern "C" fn(
7930 NotZerocopy, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8, u8,
7931 ) -> (NotZerocopy, NotZerocopy);
7932
7933 #[cfg(feature = "alloc")]
7934 assert_impls!(Option<Box<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7935 assert_impls!(Option<Box<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7936 assert_impls!(Option<&'static UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7937 assert_impls!(Option<&'static [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7938 assert_impls!(Option<&'static mut UnsafeCell<NotZerocopy>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7939 assert_impls!(Option<&'static mut [UnsafeCell<NotZerocopy>]>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7940 assert_impls!(Option<NonNull<UnsafeCell<NotZerocopy>>>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7941 assert_impls!(Option<NonNull<[UnsafeCell<NotZerocopy>]>>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7942 assert_impls!(Option<fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7943 assert_impls!(Option<FnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7944 assert_impls!(Option<extern "C" fn()>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7945 assert_impls!(Option<ECFnManyArgs>: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7946
7947 assert_impls!(PhantomData<NotZerocopy>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7948 assert_impls!(PhantomData<[u8]>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7949
7950 assert_impls!(ManuallyDrop<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7951 assert_impls!(ManuallyDrop<[u8]>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7952 assert_impls!(ManuallyDrop<NotZerocopy>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7953 assert_impls!(ManuallyDrop<[NotZerocopy]>: !TryFromBytes, !KnownLayout, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7954
7955 assert_impls!(MaybeUninit<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, Unaligned, !AsBytes);
7956 assert_impls!(MaybeUninit<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7957
7958 assert_impls!(Wrapping<u8>: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7959 assert_impls!(Wrapping<NotZerocopy>: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7960
7961 assert_impls!(Unalign<u8>: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7962 assert_impls!(Unalign<NotZerocopy>: Unaligned, !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes);
7963
7964 assert_impls!([u8]: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, Unaligned);
7965 assert_impls!([bool]: KnownLayout, TryFromBytes, FromZeroes, AsBytes, Unaligned, !FromBytes);
7966 assert_impls!([NotZerocopy]: !KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7967 assert_impls!([u8; 0]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7968 assert_impls!([NotZerocopy; 0]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7969 assert_impls!([u8; 1]: KnownLayout, FromZeroes, FromBytes, AsBytes, Unaligned, !TryFromBytes);
7970 assert_impls!([NotZerocopy; 1]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7971
7972 assert_impls!(*const NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7973 assert_impls!(*mut NotZerocopy: KnownLayout, FromZeroes, !TryFromBytes, !FromBytes, !AsBytes, !Unaligned);
7974 assert_impls!(*const [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7975 assert_impls!(*mut [NotZerocopy]: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7976 assert_impls!(*const dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7977 assert_impls!(*mut dyn Debug: KnownLayout, !TryFromBytes, !FromZeroes, !FromBytes, !AsBytes, !Unaligned);
7978
7979 #[cfg(feature = "simd")]
7980 {
7981 #[allow(unused_macros)]
7982 macro_rules! test_simd_arch_mod {
7983 ($arch:ident, $($typ:ident),*) => {
7984 {
7985 use core::arch::$arch::{$($typ),*};
7986 use crate::*;
7987 $( assert_impls!($typ: KnownLayout, TryFromBytes, FromZeroes, FromBytes, AsBytes, !Unaligned); )*
7988 }
7989 };
7990 }
7991 #[cfg(target_arch = "x86")]
7992 test_simd_arch_mod!(x86, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7993
7994 #[cfg(all(feature = "simd-nightly", target_arch = "x86"))]
7995 test_simd_arch_mod!(x86, __m512bh, __m512, __m512d, __m512i);
7996
7997 #[cfg(target_arch = "x86_64")]
7998 test_simd_arch_mod!(x86_64, __m128, __m128d, __m128i, __m256, __m256d, __m256i);
7999
8000 #[cfg(all(feature = "simd-nightly", target_arch = "x86_64"))]
8001 test_simd_arch_mod!(x86_64, __m512bh, __m512, __m512d, __m512i);
8002
8003 #[cfg(target_arch = "wasm32")]
8004 test_simd_arch_mod!(wasm32, v128);
8005
8006 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc"))]
8007 test_simd_arch_mod!(
8008 powerpc,
8009 vector_bool_long,
8010 vector_double,
8011 vector_signed_long,
8012 vector_unsigned_long
8013 );
8014
8015 #[cfg(all(feature = "simd-nightly", target_arch = "powerpc64"))]
8016 test_simd_arch_mod!(
8017 powerpc64,
8018 vector_bool_long,
8019 vector_double,
8020 vector_signed_long,
8021 vector_unsigned_long
8022 );
8023 #[cfg(target_arch = "aarch64")]
8024 #[rustfmt::skip]
8025 test_simd_arch_mod!(
8026 aarch64, float32x2_t, float32x4_t, float64x1_t, float64x2_t, int8x8_t, int8x8x2_t,
8027 int8x8x3_t, int8x8x4_t, int8x16_t, int8x16x2_t, int8x16x3_t, int8x16x4_t, int16x4_t,
8028 int16x8_t, int32x2_t, int32x4_t, int64x1_t, int64x2_t, poly8x8_t, poly8x8x2_t, poly8x8x3_t,
8029 poly8x8x4_t, poly8x16_t, poly8x16x2_t, poly8x16x3_t, poly8x16x4_t, poly16x4_t, poly16x8_t,
8030 poly64x1_t, poly64x2_t, uint8x8_t, uint8x8x2_t, uint8x8x3_t, uint8x8x4_t, uint8x16_t,
8031 uint8x16x2_t, uint8x16x3_t, uint8x16x4_t, uint16x4_t, uint16x8_t, uint32x2_t, uint32x4_t,
8032 uint64x1_t, uint64x2_t
8033 );
8034 #[cfg(all(feature = "simd-nightly", target_arch = "arm"))]
8035 #[rustfmt::skip]
8036 test_simd_arch_mod!(arm, int8x4_t, uint8x4_t);
8037 }
8038 }
8039 }
8040
8041 #[cfg(kani)]
8042 mod proofs {
8043 use super::*;
8044
8045 impl kani::Arbitrary for DstLayout {
any() -> Self8046 fn any() -> Self {
8047 let align: NonZeroUsize = kani::any();
8048 let size_info: SizeInfo = kani::any();
8049
8050 kani::assume(align.is_power_of_two());
8051 kani::assume(align < DstLayout::THEORETICAL_MAX_ALIGN);
8052
8053 // For testing purposes, we most care about instantiations of
8054 // `DstLayout` that can correspond to actual Rust types. We use
8055 // `Layout` to verify that our `DstLayout` satisfies the validity
8056 // conditions of Rust layouts.
8057 kani::assume(
8058 match size_info {
8059 SizeInfo::Sized { _size } => Layout::from_size_align(_size, align.get()),
8060 SizeInfo::SliceDst(TrailingSliceLayout { _offset, _elem_size }) => {
8061 // `SliceDst`` cannot encode an exact size, but we know
8062 // it is at least `_offset` bytes.
8063 Layout::from_size_align(_offset, align.get())
8064 }
8065 }
8066 .is_ok(),
8067 );
8068
8069 Self { align: align, size_info: size_info }
8070 }
8071 }
8072
8073 impl kani::Arbitrary for SizeInfo {
any() -> Self8074 fn any() -> Self {
8075 let is_sized: bool = kani::any();
8076
8077 match is_sized {
8078 true => {
8079 let size: usize = kani::any();
8080
8081 kani::assume(size <= isize::MAX as _);
8082
8083 SizeInfo::Sized { _size: size }
8084 }
8085 false => SizeInfo::SliceDst(kani::any()),
8086 }
8087 }
8088 }
8089
8090 impl kani::Arbitrary for TrailingSliceLayout {
any() -> Self8091 fn any() -> Self {
8092 let elem_size: usize = kani::any();
8093 let offset: usize = kani::any();
8094
8095 kani::assume(elem_size < isize::MAX as _);
8096 kani::assume(offset < isize::MAX as _);
8097
8098 TrailingSliceLayout { _elem_size: elem_size, _offset: offset }
8099 }
8100 }
8101
8102 #[kani::proof]
prove_dst_layout_extend()8103 fn prove_dst_layout_extend() {
8104 use crate::util::{core_layout::padding_needed_for, max, min};
8105
8106 let base: DstLayout = kani::any();
8107 let field: DstLayout = kani::any();
8108 let packed: Option<NonZeroUsize> = kani::any();
8109
8110 if let Some(max_align) = packed {
8111 kani::assume(max_align.is_power_of_two());
8112 kani::assume(base.align <= max_align);
8113 }
8114
8115 // The base can only be extended if it's sized.
8116 kani::assume(matches!(base.size_info, SizeInfo::Sized { .. }));
8117 let base_size = if let SizeInfo::Sized { _size: size } = base.size_info {
8118 size
8119 } else {
8120 unreachable!();
8121 };
8122
8123 // Under the above conditions, `DstLayout::extend` will not panic.
8124 let composite = base.extend(field, packed);
8125
8126 // The field's alignment is clamped by `max_align` (i.e., the
8127 // `packed` attribute, if any) [1].
8128 //
8129 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8130 //
8131 // The alignments of each field, for the purpose of positioning
8132 // fields, is the smaller of the specified alignment and the
8133 // alignment of the field's type.
8134 let field_align = min(field.align, packed.unwrap_or(DstLayout::THEORETICAL_MAX_ALIGN));
8135
8136 // The struct's alignment is the maximum of its previous alignment and
8137 // `field_align`.
8138 assert_eq!(composite.align, max(base.align, field_align));
8139
8140 // Compute the minimum amount of inter-field padding needed to
8141 // satisfy the field's alignment, and offset of the trailing field.
8142 // [1]
8143 //
8144 // [1] Per https://doc.rust-lang.org/reference/type-layout.html#the-alignment-modifiers:
8145 //
8146 // Inter-field padding is guaranteed to be the minimum required in
8147 // order to satisfy each field's (possibly altered) alignment.
8148 let padding = padding_needed_for(base_size, field_align);
8149 let offset = base_size + padding;
8150
8151 // For testing purposes, we'll also construct `alloc::Layout`
8152 // stand-ins for `DstLayout`, and show that `extend` behaves
8153 // comparably on both types.
8154 let base_analog = Layout::from_size_align(base_size, base.align.get()).unwrap();
8155
8156 match field.size_info {
8157 SizeInfo::Sized { _size: field_size } => {
8158 if let SizeInfo::Sized { _size: composite_size } = composite.size_info {
8159 // If the trailing field is sized, the resulting layout
8160 // will be sized. Its size will be the sum of the
8161 // preceeding layout, the size of the new field, and the
8162 // size of inter-field padding between the two.
8163 assert_eq!(composite_size, offset + field_size);
8164
8165 let field_analog =
8166 Layout::from_size_align(field_size, field_align.get()).unwrap();
8167
8168 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8169 {
8170 assert_eq!(actual_offset, offset);
8171 assert_eq!(actual_composite.size(), composite_size);
8172 assert_eq!(actual_composite.align(), composite.align.get());
8173 } else {
8174 // An error here reflects that composite of `base`
8175 // and `field` cannot correspond to a real Rust type
8176 // fragment, because such a fragment would violate
8177 // the basic invariants of a valid Rust layout. At
8178 // the time of writing, `DstLayout` is a little more
8179 // permissive than `Layout`, so we don't assert
8180 // anything in this branch (e.g., unreachability).
8181 }
8182 } else {
8183 panic!("The composite of two sized layouts must be sized.")
8184 }
8185 }
8186 SizeInfo::SliceDst(TrailingSliceLayout {
8187 _offset: field_offset,
8188 _elem_size: field_elem_size,
8189 }) => {
8190 if let SizeInfo::SliceDst(TrailingSliceLayout {
8191 _offset: composite_offset,
8192 _elem_size: composite_elem_size,
8193 }) = composite.size_info
8194 {
8195 // The offset of the trailing slice component is the sum
8196 // of the offset of the trailing field and the trailing
8197 // slice offset within that field.
8198 assert_eq!(composite_offset, offset + field_offset);
8199 // The elem size is unchanged.
8200 assert_eq!(composite_elem_size, field_elem_size);
8201
8202 let field_analog =
8203 Layout::from_size_align(field_offset, field_align.get()).unwrap();
8204
8205 if let Ok((actual_composite, actual_offset)) = base_analog.extend(field_analog)
8206 {
8207 assert_eq!(actual_offset, offset);
8208 assert_eq!(actual_composite.size(), composite_offset);
8209 assert_eq!(actual_composite.align(), composite.align.get());
8210 } else {
8211 // An error here reflects that composite of `base`
8212 // and `field` cannot correspond to a real Rust type
8213 // fragment, because such a fragment would violate
8214 // the basic invariants of a valid Rust layout. At
8215 // the time of writing, `DstLayout` is a little more
8216 // permissive than `Layout`, so we don't assert
8217 // anything in this branch (e.g., unreachability).
8218 }
8219 } else {
8220 panic!("The extension of a layout with a DST must result in a DST.")
8221 }
8222 }
8223 }
8224 }
8225
8226 #[kani::proof]
8227 #[kani::should_panic]
prove_dst_layout_extend_dst_panics()8228 fn prove_dst_layout_extend_dst_panics() {
8229 let base: DstLayout = kani::any();
8230 let field: DstLayout = kani::any();
8231 let packed: Option<NonZeroUsize> = kani::any();
8232
8233 if let Some(max_align) = packed {
8234 kani::assume(max_align.is_power_of_two());
8235 kani::assume(base.align <= max_align);
8236 }
8237
8238 kani::assume(matches!(base.size_info, SizeInfo::SliceDst(..)));
8239
8240 let _ = base.extend(field, packed);
8241 }
8242
8243 #[kani::proof]
prove_dst_layout_pad_to_align()8244 fn prove_dst_layout_pad_to_align() {
8245 use crate::util::core_layout::padding_needed_for;
8246
8247 let layout: DstLayout = kani::any();
8248
8249 let padded: DstLayout = layout.pad_to_align();
8250
8251 // Calling `pad_to_align` does not alter the `DstLayout`'s alignment.
8252 assert_eq!(padded.align, layout.align);
8253
8254 if let SizeInfo::Sized { _size: unpadded_size } = layout.size_info {
8255 if let SizeInfo::Sized { _size: padded_size } = padded.size_info {
8256 // If the layout is sized, it will remain sized after padding is
8257 // added. Its sum will be its unpadded size and the size of the
8258 // trailing padding needed to satisfy its alignment
8259 // requirements.
8260 let padding = padding_needed_for(unpadded_size, layout.align);
8261 assert_eq!(padded_size, unpadded_size + padding);
8262
8263 // Prove that calling `DstLayout::pad_to_align` behaves
8264 // identically to `Layout::pad_to_align`.
8265 let layout_analog =
8266 Layout::from_size_align(unpadded_size, layout.align.get()).unwrap();
8267 let padded_analog = layout_analog.pad_to_align();
8268 assert_eq!(padded_analog.align(), layout.align.get());
8269 assert_eq!(padded_analog.size(), padded_size);
8270 } else {
8271 panic!("The padding of a sized layout must result in a sized layout.")
8272 }
8273 } else {
8274 // If the layout is a DST, padding cannot be statically added.
8275 assert_eq!(padded.size_info, layout.size_info);
8276 }
8277 }
8278 }
8279