• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2023 The Fuchsia Authors
2 //
3 // Licensed under a BSD-style license <LICENSE-BSD>, Apache License, Version 2.0
4 // <LICENSE-APACHE or https://www.apache.org/licenses/LICENSE-2.0>, or the MIT
5 // license <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your option.
6 // This file may not be copied, modified, or distributed except according to
7 // those terms.
8 
9 #[macro_use]
10 mod macros;
11 
12 #[doc(hidden)]
13 pub mod macro_util;
14 
15 use core::{
16     cell::UnsafeCell,
17     marker::PhantomData,
18     mem::{self, ManuallyDrop, MaybeUninit},
19     num::{NonZeroUsize, Wrapping},
20     ptr::NonNull,
21 };
22 
23 use crate::{
24     error::AlignmentError,
25     pointer::invariant::{self, Invariants},
26     Unalign,
27 };
28 
29 /// A type which has the same layout as the type it wraps.
30 ///
31 /// # Safety
32 ///
33 /// `T: TransparentWrapper` implies that `T` has the same size as [`T::Inner`].
34 /// Further, `T: TransparentWrapper<I>` implies that:
35 /// - If `T::UnsafeCellVariance = Covariant`, then `T` has `UnsafeCell`s
36 ///   covering the same byte ranges as `T::Inner`.
37 /// - If a `T` pointer satisfies the alignment invariant `I::Alignment`, then
38 ///   that same pointer, cast to `T::Inner`, satisfies the alignment invariant
39 ///   `<T::AlignmentVariance as AlignmentVariance<I::Alignment>>::Applied`.
40 /// - If a `T` pointer satisfies the validity invariant `I::Validity`, then that
41 ///   same pointer, cast to `T::Inner`, satisfies the validity invariant
42 ///   `<T::ValidityVariance as ValidityVariance<I::Validity>>::Applied`.
43 ///
44 /// [`T::Inner`]: TransparentWrapper::Inner
45 /// [`UnsafeCell`]: core::cell::UnsafeCell
46 /// [`T::AlignmentVariance`]: TransparentWrapper::AlignmentVariance
47 /// [`T::ValidityVariance`]: TransparentWrapper::ValidityVariance
48 #[doc(hidden)]
49 pub unsafe trait TransparentWrapper<I: Invariants> {
50     type Inner: ?Sized;
51 
52     type UnsafeCellVariance;
53     type AlignmentVariance: AlignmentVariance<I::Alignment>;
54     type ValidityVariance: ValidityVariance<I::Validity>;
55 
56     /// Casts a wrapper pointer to an inner pointer.
57     ///
58     /// # Safety
59     ///
60     /// The resulting pointer has the same address and provenance as `ptr`, and
61     /// addresses the same number of bytes.
cast_into_inner(ptr: *mut Self) -> *mut Self::Inner62     fn cast_into_inner(ptr: *mut Self) -> *mut Self::Inner;
63 
64     /// Casts an inner pointer to a wrapper pointer.
65     ///
66     /// # Safety
67     ///
68     /// The resulting pointer has the same address and provenance as `ptr`, and
69     /// addresses the same number of bytes.
cast_from_inner(ptr: *mut Self::Inner) -> *mut Self70     fn cast_from_inner(ptr: *mut Self::Inner) -> *mut Self;
71 }
72 
73 #[allow(unreachable_pub)]
74 #[doc(hidden)]
75 pub trait AlignmentVariance<I: invariant::Alignment> {
76     type Applied: invariant::Alignment;
77 }
78 
79 #[allow(unreachable_pub)]
80 #[doc(hidden)]
81 pub trait ValidityVariance<I: invariant::Validity> {
82     type Applied: invariant::Validity;
83 }
84 
85 #[doc(hidden)]
86 #[allow(missing_copy_implementations, missing_debug_implementations)]
87 pub enum Covariant {}
88 
89 impl<I: invariant::Alignment> AlignmentVariance<I> for Covariant {
90     type Applied = I;
91 }
92 
93 impl<I: invariant::Validity> ValidityVariance<I> for Covariant {
94     type Applied = I;
95 }
96 
97 #[doc(hidden)]
98 #[allow(missing_copy_implementations, missing_debug_implementations)]
99 pub enum Invariant {}
100 
101 impl<I: invariant::Alignment> AlignmentVariance<I> for Invariant {
102     type Applied = invariant::Unaligned;
103 }
104 
105 impl<I: invariant::Validity> ValidityVariance<I> for Invariant {
106     type Applied = invariant::Uninit;
107 }
108 
109 // SAFETY:
110 // - Per [1], `MaybeUninit<T>` has the same size as `T`.
111 // - See inline comments for other safety justifications.
112 //
113 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#layout-1:
114 //
115 //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and ABI as
116 //   `T`
117 unsafe impl<T, I: Invariants> TransparentWrapper<I> for MaybeUninit<T> {
118     type Inner = T;
119 
120     // SAFETY: `MaybeUninit<T>` has `UnsafeCell`s covering the same byte ranges
121     // as `Inner = T`. This is not explicitly documented, but it can be
122     // inferred. Per [1] in the preceding safety comment, `MaybeUninit<T>` has
123     // the same size as `T`. Further, note the signature of
124     // `MaybeUninit::assume_init_ref` [2]:
125     //
126     //   pub unsafe fn assume_init_ref(&self) -> &T
127     //
128     // If the argument `&MaybeUninit<T>` and the returned `&T` had `UnsafeCell`s
129     // at different offsets, this would be unsound. Its existence is proof that
130     // this is not the case.
131     //
132     // [2] https://doc.rust-lang.org/1.81.0/std/mem/union.MaybeUninit.html#method.assume_init_ref
133     type UnsafeCellVariance = Covariant;
134     // SAFETY: Per [1], `MaybeUninit<T>` has the same layout as `T`, and thus
135     // has the same alignment as `T`.
136     //
137     // [1] Per https://doc.rust-lang.org/std/mem/union.MaybeUninit.html#layout-1:
138     //
139     //   `MaybeUninit<T>` is guaranteed to have the same size, alignment, and
140     //   ABI as `T`.
141     type AlignmentVariance = Covariant;
142     // SAFETY: `MaybeUninit` has no validity invariants. Thus, a valid
143     // `MaybeUninit<T>` is not necessarily a valid `T`.
144     type ValidityVariance = Invariant;
145 
146     #[inline(always)]
cast_into_inner(ptr: *mut MaybeUninit<T>) -> *mut T147     fn cast_into_inner(ptr: *mut MaybeUninit<T>) -> *mut T {
148         // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same
149         // layout as `T`. Thus, this cast preserves size.
150         //
151         // This cast trivially preserves provenance.
152         ptr.cast::<T>()
153     }
154 
155     #[inline(always)]
cast_from_inner(ptr: *mut T) -> *mut MaybeUninit<T>156     fn cast_from_inner(ptr: *mut T) -> *mut MaybeUninit<T> {
157         // SAFETY: Per [1] (from comment above), `MaybeUninit<T>` has the same
158         // layout as `T`. Thus, this cast preserves size.
159         //
160         // This cast trivially preserves provenance.
161         ptr.cast::<MaybeUninit<T>>()
162     }
163 }
164 
165 // SAFETY:
166 // - Per [1], `ManuallyDrop<T>` has the same size as `T`.
167 // - See inline comments for other safety justifications.
168 //
169 // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
170 //
171 //   `ManuallyDrop<T>` is guaranteed to have the same layout and bit validity as
172 //   `T`
173 unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for ManuallyDrop<T> {
174     type Inner = T;
175 
176     // SAFETY: Per [1], `ManuallyDrop<T>` has `UnsafeCell`s covering the same
177     // byte ranges as `Inner = T`.
178     //
179     // [1] Per https://doc.rust-lang.org/1.81.0/std/mem/struct.ManuallyDrop.html:
180     //
181     //   `ManuallyDrop<T>` is guaranteed to have the same layout and bit
182     //   validity as `T`, and is subject to the same layout optimizations as
183     //   `T`. As a consequence, it has no effect on the assumptions that the
184     //   compiler makes about its contents.
185     type UnsafeCellVariance = Covariant;
186     // SAFETY: Per [1], `ManuallyDrop<T>` has the same layout as `T`, and thus
187     // has the same alignment as `T`.
188     //
189     // [1] Per https://doc.rust-lang.org/nightly/core/mem/struct.ManuallyDrop.html:
190     //
191     //   `ManuallyDrop<T>` is guaranteed to have the same layout and bit
192     //   validity as `T`
193     type AlignmentVariance = Covariant;
194 
195     // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same bit
196     // validity as `T`.
197     type ValidityVariance = Covariant;
198 
199     #[inline(always)]
cast_into_inner(ptr: *mut ManuallyDrop<T>) -> *mut T200     fn cast_into_inner(ptr: *mut ManuallyDrop<T>) -> *mut T {
201         // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same
202         // layout as `T`. Thus, this cast preserves size even if `T` is unsized.
203         //
204         // This cast trivially preserves provenance.
205         #[allow(clippy::as_conversions)]
206         return ptr as *mut T;
207     }
208 
209     #[inline(always)]
cast_from_inner(ptr: *mut T) -> *mut ManuallyDrop<T>210     fn cast_from_inner(ptr: *mut T) -> *mut ManuallyDrop<T> {
211         // SAFETY: Per [1] (from comment above), `ManuallyDrop<T>` has the same
212         // layout as `T`. Thus, this cast preserves size even if `T` is unsized.
213         //
214         // This cast trivially preserves provenance.
215         #[allow(clippy::as_conversions)]
216         return ptr as *mut ManuallyDrop<T>;
217     }
218 }
219 
220 // SAFETY:
221 // - Per [1], `Wrapping<T>` has the same size as `T`.
222 // - See inline comments for other safety justifications.
223 //
224 // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1:
225 //
226 //   `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
227 unsafe impl<T, I: Invariants> TransparentWrapper<I> for Wrapping<T> {
228     type Inner = T;
229 
230     // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`. Since its
231     // single field (of type `T`) is public, it would be a breaking change to
232     // add or remove fields. Thus, we know that `Wrapping<T>` contains a `T` (as
233     // opposed to just having the same size and alignment as `T`) with no pre-
234     // or post-padding. Thus, `Wrapping<T>` must have `UnsafeCell`s covering the
235     // same byte ranges as `Inner = T`.
236     //
237     // [1] Per https://doc.rust-lang.org/1.81.0/std/num/struct.Wrapping.html#layout-1:
238     //
239     //   `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
240     type UnsafeCellVariance = Covariant;
241     // SAFETY: Per [1], `Wrapping<T>` has the same layout as `T`, and thus has
242     // the same alignment as `T`.
243     //
244     // [1] Per https://doc.rust-lang.org/core/num/struct.Wrapping.html#layout-1:
245     //
246     //   `Wrapping<T>` is guaranteed to have the same layout and ABI as `T`.
247     type AlignmentVariance = Covariant;
248 
249     // SAFETY: `Wrapping<T>` has only one field, which is `pub` [2]. We are also
250     // guaranteed per [1] (from the comment above) that `Wrapping<T>` has the
251     // same layout as `T`. The only way for both of these to be true
252     // simultaneously is for `Wrapping<T>` to have the same bit validity as `T`.
253     // In particular, in order to change the bit validity, one of the following
254     // would need to happen:
255     // - `Wrapping` could change its `repr`, but this would violate the layout
256     //   guarantee.
257     // - `Wrapping` could add or change its fields, but this would be a
258     //   stability-breaking change.
259     //
260     // [2] https://doc.rust-lang.org/core/num/struct.Wrapping.html
261     type ValidityVariance = Covariant;
262 
263     #[inline(always)]
cast_into_inner(ptr: *mut Wrapping<T>) -> *mut T264     fn cast_into_inner(ptr: *mut Wrapping<T>) -> *mut T {
265         // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same
266         // layout as `T`. Thus, this cast preserves size.
267         //
268         // This cast trivially preserves provenance.
269         ptr.cast::<T>()
270     }
271 
272     #[inline(always)]
cast_from_inner(ptr: *mut T) -> *mut Wrapping<T>273     fn cast_from_inner(ptr: *mut T) -> *mut Wrapping<T> {
274         // SAFETY: Per [1] (from comment above), `Wrapping<T>` has the same
275         // layout as `T`. Thus, this cast preserves size.
276         //
277         // This cast trivially preserves provenance.
278         ptr.cast::<Wrapping<T>>()
279     }
280 }
281 
282 // SAFETY:
283 // - Per [1], `UnsafeCell<T>` has the same size as `T`.
284 // - See inline comments for other safety justifications.
285 //
286 // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
287 //
288 //   `UnsafeCell<T>` has the same in-memory representation as its inner type
289 //   `T`.
290 unsafe impl<T: ?Sized, I: Invariants> TransparentWrapper<I> for UnsafeCell<T> {
291     type Inner = T;
292 
293     // SAFETY: Since we set this to `Invariant`, we make no safety claims.
294     type UnsafeCellVariance = Invariant;
295 
296     // SAFETY: Per [1] (from comment on impl), `Unalign<T>` has the same
297     // representation as `T`, and thus has the same alignment as `T`.
298     type AlignmentVariance = Covariant;
299 
300     // SAFETY: Per [1], `Unalign<T>` has the same bit validity as `T`.
301     // Technically the term "representation" doesn't guarantee this, but the
302     // subsequent sentence in the documentation makes it clear that this is the
303     // intention.
304     //
305     // [1] Per https://doc.rust-lang.org/1.81.0/core/cell/struct.UnsafeCell.html#memory-layout:
306     //
307     //   `UnsafeCell<T>` has the same in-memory representation as its inner type
308     //   `T`. A consequence of this guarantee is that it is possible to convert
309     //   between `T` and `UnsafeCell<T>`.
310     type ValidityVariance = Covariant;
311 
312     #[inline(always)]
cast_into_inner(ptr: *mut UnsafeCell<T>) -> *mut T313     fn cast_into_inner(ptr: *mut UnsafeCell<T>) -> *mut T {
314         // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same
315         // representation as `T`. Thus, this cast preserves size.
316         //
317         // This cast trivially preserves provenance.
318         #[allow(clippy::as_conversions)]
319         return ptr as *mut T;
320     }
321 
322     #[inline(always)]
cast_from_inner(ptr: *mut T) -> *mut UnsafeCell<T>323     fn cast_from_inner(ptr: *mut T) -> *mut UnsafeCell<T> {
324         // SAFETY: Per [1] (from comment above), `UnsafeCell<T>` has the same
325         // representation as `T`. Thus, this cast preserves size.
326         //
327         // This cast trivially preserves provenance.
328         #[allow(clippy::as_conversions)]
329         return ptr as *mut UnsafeCell<T>;
330     }
331 }
332 
333 // SAFETY: `Unalign<T>` promises to have the same size as `T`.
334 //
335 // See inline comments for other safety justifications.
336 unsafe impl<T, I: Invariants> TransparentWrapper<I> for Unalign<T> {
337     type Inner = T;
338 
339     // SAFETY: `Unalign<T>` promises to have `UnsafeCell`s covering the same
340     // byte ranges as `Inner = T`.
341     type UnsafeCellVariance = Covariant;
342 
343     // SAFETY: Since `Unalign<T>` promises to have alignment 1 regardless of
344     // `T`'s alignment. Thus, an aligned pointer to `Unalign<T>` is not
345     // necessarily an aligned pointer to `T`.
346     type AlignmentVariance = Invariant;
347 
348     // SAFETY: `Unalign<T>` promises to have the same validity as `T`.
349     type ValidityVariance = Covariant;
350 
351     #[inline(always)]
cast_into_inner(ptr: *mut Unalign<T>) -> *mut T352     fn cast_into_inner(ptr: *mut Unalign<T>) -> *mut T {
353         // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has
354         // the size as `T`. Thus, this cast preserves size.
355         //
356         // This cast trivially preserves provenance.
357         ptr.cast::<T>()
358     }
359 
360     #[inline(always)]
cast_from_inner(ptr: *mut T) -> *mut Unalign<T>361     fn cast_from_inner(ptr: *mut T) -> *mut Unalign<T> {
362         // SAFETY: Per the safety comment on the impl block, `Unalign<T>` has
363         // the size as `T`. Thus, this cast preserves size.
364         //
365         // This cast trivially preserves provenance.
366         ptr.cast::<Unalign<T>>()
367     }
368 }
369 
370 /// Implements `TransparentWrapper` for an atomic type.
371 ///
372 /// # Safety
373 ///
374 /// The caller promises that `$atomic` is an atomic type whose natie equivalent
375 /// is `$native`.
376 #[cfg(all(
377     zerocopy_target_has_atomics_1_60_0,
378     any(
379         target_has_atomic = "8",
380         target_has_atomic = "16",
381         target_has_atomic = "32",
382         target_has_atomic = "64",
383         target_has_atomic = "ptr"
384     )
385 ))]
386 macro_rules! unsafe_impl_transparent_wrapper_for_atomic {
387     ($(#[$attr:meta])* $(,)?) => {};
388     ($(#[$attr:meta])* $atomic:ty [$native:ty], $($atomics:ty [$natives:ty]),* $(,)?) => {
389         $(#[$attr])*
390         // SAFETY: See safety comment in next match arm.
391         unsafe impl<I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic {
392             unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]);
393         }
394         unsafe_impl_transparent_wrapper_for_atomic!($(#[$attr])* $($atomics [$natives],)*);
395     };
396     ($(#[$attr:meta])* $tyvar:ident => $atomic:ty [$native:ty]) => {
397         // We implement for `$atomic` and set `Inner = $native`. The caller has
398         // promised that `$atomic` and `$native` are an atomic type and its
399         // native counterpart, respectively. Per [1], `$atomic` and `$native`
400         // have the same size.
401         //
402         // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
403         //
404         //   This type has the same size and bit validity as the underlying
405         //   integer type
406         $(#[$attr])*
407         unsafe impl<$tyvar, I: crate::invariant::Invariants> crate::util::TransparentWrapper<I> for $atomic {
408             unsafe_impl_transparent_wrapper_for_atomic!(@inner $atomic [$native]);
409         }
410     };
411     (@inner $atomic:ty [$native:ty]) => {
412         type Inner = UnsafeCell<$native>;
413 
414         // SAFETY: It is "obvious" that each atomic type contains a single
415         // `UnsafeCell` that covers all bytes of the type, but we can also prove
416         // it:
417         // - Since `$atomic` provides an API which permits loading and storing
418         //   values of type `$native` via a `&self` (shared) reference, *some*
419         //   interior mutation must be happening, and interior mutation can only
420         //   happen via `UnsafeCell`. Further, there must be enough bytes in
421         //   `$atomic` covered by an `UnsafeCell` to hold every possible value
422         //   of `$native`.
423         // - Per [1], `$atomic` has the same size as `$native`. This on its own
424         //   isn't enough: it would still be possible for `$atomic` to store
425         //   `$native` using a compact representation (for `$native` types for
426         //   which some bit patterns are illegal). However, this is ruled out by
427         //   the fact that `$atomic` has the same bit validity as `$native` [1].
428         //   Thus, we can conclude that every byte of `$atomic` must be covered
429         //   by an `UnsafeCell`.
430         //
431         // Thus, every byte of `$atomic` is covered by an `UnsafeCell`, and we
432         // set `type Inner = UnsafeCell<$native>`. Thus, `Self` and
433         // `Self::Inner` have `UnsafeCell`s covering the same byte ranges.
434         //
435         // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
436         //
437         //   This type has the same size and bit validity as the underlying
438         //   integer type
439         type UnsafeCellVariance = crate::util::Covariant;
440 
441         // SAFETY: No safety justification is required for an invariant
442         // variance.
443         type AlignmentVariance = crate::util::Invariant;
444 
445         // SAFETY: Per [1], all atomic types have the same bit validity as their
446         // native counterparts. The caller has promised that `$atomic` and
447         // `$native` are an atomic type and its native counterpart,
448         // respectively.
449         //
450         // [1] Per (for example) https://doc.rust-lang.org/1.81.0/std/sync/atomic/struct.AtomicU64.html:
451         //
452         //   This type has the same size and bit validity as the underlying
453         //   integer type
454         type ValidityVariance = crate::util::Covariant;
455 
456         #[inline(always)]
457         fn cast_into_inner(ptr: *mut $atomic) -> *mut UnsafeCell<$native> {
458             // SAFETY: Per [1] (from comment on impl block), `$atomic` has the
459             // same size as `$native`. Thus, this cast preserves size.
460             //
461             // This cast trivially preserves provenance.
462             ptr.cast::<UnsafeCell<$native>>()
463         }
464 
465         #[inline(always)]
466         fn cast_from_inner(ptr: *mut UnsafeCell<$native>) -> *mut $atomic {
467             // SAFETY: Per [1] (from comment on impl block), `$atomic` has the
468             // same size as `$native`. Thus, this cast preserves size.
469             //
470             // This cast trivially preserves provenance.
471             ptr.cast::<$atomic>()
472         }
473     };
474 }
475 
476 /// Like [`PhantomData`], but [`Send`] and [`Sync`] regardless of whether the
477 /// wrapped `T` is.
478 pub(crate) struct SendSyncPhantomData<T: ?Sized>(PhantomData<T>);
479 
480 // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
481 // to be called from multiple threads.
482 unsafe impl<T: ?Sized> Send for SendSyncPhantomData<T> {}
483 // SAFETY: `SendSyncPhantomData` does not enable any behavior which isn't sound
484 // to be called from multiple threads.
485 unsafe impl<T: ?Sized> Sync for SendSyncPhantomData<T> {}
486 
487 impl<T: ?Sized> Default for SendSyncPhantomData<T> {
default() -> SendSyncPhantomData<T>488     fn default() -> SendSyncPhantomData<T> {
489         SendSyncPhantomData(PhantomData)
490     }
491 }
492 
493 impl<T: ?Sized> PartialEq for SendSyncPhantomData<T> {
eq(&self, other: &Self) -> bool494     fn eq(&self, other: &Self) -> bool {
495         self.0.eq(&other.0)
496     }
497 }
498 
499 impl<T: ?Sized> Eq for SendSyncPhantomData<T> {}
500 
501 pub(crate) trait AsAddress {
addr(self) -> usize502     fn addr(self) -> usize;
503 }
504 
505 impl<T: ?Sized> AsAddress for &T {
506     #[inline(always)]
addr(self) -> usize507     fn addr(self) -> usize {
508         let ptr: *const T = self;
509         AsAddress::addr(ptr)
510     }
511 }
512 
513 impl<T: ?Sized> AsAddress for &mut T {
514     #[inline(always)]
addr(self) -> usize515     fn addr(self) -> usize {
516         let ptr: *const T = self;
517         AsAddress::addr(ptr)
518     }
519 }
520 
521 impl<T: ?Sized> AsAddress for NonNull<T> {
522     #[inline(always)]
addr(self) -> usize523     fn addr(self) -> usize {
524         AsAddress::addr(self.as_ptr())
525     }
526 }
527 
528 impl<T: ?Sized> AsAddress for *const T {
529     #[inline(always)]
addr(self) -> usize530     fn addr(self) -> usize {
531         // TODO(#181), TODO(https://github.com/rust-lang/rust/issues/95228): Use
532         // `.addr()` instead of `as usize` once it's stable, and get rid of this
533         // `allow`. Currently, `as usize` is the only way to accomplish this.
534         #[allow(clippy::as_conversions)]
535         #[cfg_attr(
536             __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS,
537             allow(lossy_provenance_casts)
538         )]
539         return self.cast::<()>() as usize;
540     }
541 }
542 
543 impl<T: ?Sized> AsAddress for *mut T {
544     #[inline(always)]
addr(self) -> usize545     fn addr(self) -> usize {
546         let ptr: *const T = self;
547         AsAddress::addr(ptr)
548     }
549 }
550 
551 /// Validates that `t` is aligned to `align_of::<U>()`.
552 #[inline(always)]
validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>>553 pub(crate) fn validate_aligned_to<T: AsAddress, U>(t: T) -> Result<(), AlignmentError<(), U>> {
554     // `mem::align_of::<U>()` is guaranteed to return a non-zero value, which in
555     // turn guarantees that this mod operation will not panic.
556     #[allow(clippy::arithmetic_side_effects)]
557     let remainder = t.addr() % mem::align_of::<U>();
558     if remainder == 0 {
559         Ok(())
560     } else {
561         // SAFETY: We just confirmed that `t.addr() % align_of::<U>() != 0`.
562         // That's only possible if `align_of::<U>() > 1`.
563         Err(unsafe { AlignmentError::new_unchecked(()) })
564     }
565 }
566 
567 /// Returns the bytes needed to pad `len` to the next multiple of `align`.
568 ///
569 /// This function assumes that align is a power of two; there are no guarantees
570 /// on the answer it gives if this is not the case.
padding_needed_for(len: usize, align: NonZeroUsize) -> usize571 pub(crate) const fn padding_needed_for(len: usize, align: NonZeroUsize) -> usize {
572     // Abstractly, we want to compute:
573     //   align - (len % align).
574     // Handling the case where len%align is 0.
575     // Because align is a power of two, len % align = len & (align-1).
576     // Guaranteed not to underflow as align is nonzero.
577     #[allow(clippy::arithmetic_side_effects)]
578     let mask = align.get() - 1;
579 
580     // To efficiently subtract this value from align, we can use the bitwise complement.
581     // Note that ((!len) & (align-1)) gives us a number that with (len &
582     // (align-1)) sums to align-1. So subtracting 1 from x before taking the
583     // complement subtracts `len` from `align`. Some quick inspection of
584     // cases shows that this also handles the case where `len % align = 0`
585     // correctly too: len-1 % align then equals align-1, so the complement mod
586     // align will be 0, as desired.
587     //
588     // The following reasoning can be verified quickly by an SMT solver
589     // supporting the theory of bitvectors:
590     // ```smtlib
591     // ; Naive implementation of padding
592     // (define-fun padding1 (
593     //     (len (_ BitVec 32))
594     //     (align (_ BitVec 32))) (_ BitVec 32)
595     //    (ite
596     //      (= (_ bv0 32) (bvand len (bvsub align (_ bv1 32))))
597     //      (_ bv0 32)
598     //      (bvsub align (bvand len (bvsub align (_ bv1 32))))))
599     //
600     // ; The implementation below
601     // (define-fun padding2 (
602     //     (len (_ BitVec 32))
603     //     (align (_ BitVec 32))) (_ BitVec 32)
604     // (bvand (bvnot (bvsub len (_ bv1 32))) (bvsub align (_ bv1 32))))
605     //
606     // (define-fun is-power-of-two ((x (_ BitVec 32))) Bool
607     //   (= (_ bv0 32) (bvand x (bvsub x (_ bv1 32)))))
608     //
609     // (declare-const len (_ BitVec 32))
610     // (declare-const align (_ BitVec 32))
611     // ; Search for a case where align is a power of two and padding2 disagrees with padding1
612     // (assert (and (is-power-of-two align)
613     //              (not (= (padding1 len align) (padding2 len align)))))
614     // (simplify (padding1 (_ bv300 32) (_ bv32 32))) ; 20
615     // (simplify (padding2 (_ bv300 32) (_ bv32 32))) ; 20
616     // (simplify (padding1 (_ bv322 32) (_ bv32 32))) ; 30
617     // (simplify (padding2 (_ bv322 32) (_ bv32 32))) ; 30
618     // (simplify (padding1 (_ bv8 32) (_ bv8 32)))    ; 0
619     // (simplify (padding2 (_ bv8 32) (_ bv8 32)))    ; 0
620     // (check-sat) ; unsat, also works for 64-bit bitvectors
621     // ```
622     !(len.wrapping_sub(1)) & mask
623 }
624 
625 /// Rounds `n` down to the largest value `m` such that `m <= n` and `m % align
626 /// == 0`.
627 ///
628 /// # Panics
629 ///
630 /// May panic if `align` is not a power of two. Even if it doesn't panic in this
631 /// case, it will produce nonsense results.
632 #[inline(always)]
round_down_to_next_multiple_of_alignment( n: usize, align: NonZeroUsize, ) -> usize633 pub(crate) const fn round_down_to_next_multiple_of_alignment(
634     n: usize,
635     align: NonZeroUsize,
636 ) -> usize {
637     let align = align.get();
638     #[cfg(zerocopy_panic_in_const_and_vec_try_reserve_1_57_0)]
639     debug_assert!(align.is_power_of_two());
640 
641     // Subtraction can't underflow because `align.get() >= 1`.
642     #[allow(clippy::arithmetic_side_effects)]
643     let mask = !(align - 1);
644     n & mask
645 }
646 
max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize647 pub(crate) const fn max(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
648     if a.get() < b.get() {
649         b
650     } else {
651         a
652     }
653 }
654 
min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize655 pub(crate) const fn min(a: NonZeroUsize, b: NonZeroUsize) -> NonZeroUsize {
656     if a.get() > b.get() {
657         b
658     } else {
659         a
660     }
661 }
662 
663 /// Copies `src` into the prefix of `dst`.
664 ///
665 /// # Safety
666 ///
667 /// The caller guarantees that `src.len() <= dst.len()`.
668 #[inline(always)]
copy_unchecked(src: &[u8], dst: &mut [u8])669 pub(crate) unsafe fn copy_unchecked(src: &[u8], dst: &mut [u8]) {
670     debug_assert!(src.len() <= dst.len());
671     // SAFETY: This invocation satisfies the safety contract of
672     // copy_nonoverlapping [1]:
673     // - `src.as_ptr()` is trivially valid for reads of `src.len()` bytes
674     // - `dst.as_ptr()` is valid for writes of `src.len()` bytes, because the
675     //   caller has promised that `src.len() <= dst.len()`
676     // - `src` and `dst` are, trivially, properly aligned
677     // - the region of memory beginning at `src` with a size of `src.len()`
678     //   bytes does not overlap with the region of memory beginning at `dst`
679     //   with the same size, because `dst` is derived from an exclusive
680     //   reference.
681     unsafe {
682         core::ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
683     };
684 }
685 
686 /// Unsafely transmutes the given `src` into a type `Dst`.
687 ///
688 /// # Safety
689 ///
690 /// The value `src` must be a valid instance of `Dst`.
691 #[inline(always)]
transmute_unchecked<Src, Dst>(src: Src) -> Dst692 pub(crate) const unsafe fn transmute_unchecked<Src, Dst>(src: Src) -> Dst {
693     static_assert!(Src, Dst => core::mem::size_of::<Src>() == core::mem::size_of::<Dst>());
694 
695     #[repr(C)]
696     union Transmute<Src, Dst> {
697         src: ManuallyDrop<Src>,
698         dst: ManuallyDrop<Dst>,
699     }
700 
701     // SAFETY: Since `Transmute<Src, Dst>` is `#[repr(C)]`, its `src` and `dst`
702     // fields both start at the same offset and the types of those fields are
703     // transparent wrappers around `Src` and `Dst` [1]. Consequently,
704     // initializng `Transmute` with with `src` and then reading out `dst` is
705     // equivalent to transmuting from `Src` to `Dst` [2]. Transmuting from `src`
706     // to `Dst` is valid because — by contract on the caller — `src` is a valid
707     // instance of `Dst`.
708     //
709     // [1] Per https://doc.rust-lang.org/1.82.0/std/mem/struct.ManuallyDrop.html:
710     //
711     //     `ManuallyDrop<T>` is guaranteed to have the same layout and bit
712     //     validity as `T`, and is subject to the same layout optimizations as
713     //     `T`.
714     //
715     // [2] Per https://doc.rust-lang.org/1.82.0/reference/items/unions.html#reading-and-writing-union-fields:
716     //
717     //     Effectively, writing to and then reading from a union with the C
718     //     representation is analogous to a transmute from the type used for
719     //     writing to the type used for reading.
720     unsafe { ManuallyDrop::into_inner(Transmute { src: ManuallyDrop::new(src) }.dst) }
721 }
722 
723 /// Uses `allocate` to create a `Box<T>`.
724 ///
725 /// # Errors
726 ///
727 /// Returns an error on allocation failure. Allocation failure is guaranteed
728 /// never to cause a panic or an abort.
729 ///
730 /// # Safety
731 ///
732 /// `allocate` must be either `alloc::alloc::alloc` or
733 /// `alloc::alloc::alloc_zeroed`. The referent of the box returned by `new_box`
734 /// has the same bit-validity as the referent of the pointer returned by the
735 /// given `allocate` and sufficient size to store `T` with `meta`.
736 #[must_use = "has no side effects (other than allocation)"]
737 #[cfg(feature = "alloc")]
738 #[inline]
new_box<T>( meta: T::PointerMetadata, allocate: unsafe fn(core::alloc::Layout) -> *mut u8, ) -> Result<alloc::boxed::Box<T>, crate::error::AllocError> where T: ?Sized + crate::KnownLayout,739 pub(crate) unsafe fn new_box<T>(
740     meta: T::PointerMetadata,
741     allocate: unsafe fn(core::alloc::Layout) -> *mut u8,
742 ) -> Result<alloc::boxed::Box<T>, crate::error::AllocError>
743 where
744     T: ?Sized + crate::KnownLayout,
745 {
746     use crate::error::AllocError;
747     use crate::PointerMetadata;
748     use core::alloc::Layout;
749 
750     let size = match meta.size_for_metadata(T::LAYOUT) {
751         Some(size) => size,
752         None => return Err(AllocError),
753     };
754 
755     let align = T::LAYOUT.align.get();
756     // On stable Rust versions <= 1.64.0, `Layout::from_size_align` has a bug in
757     // which sufficiently-large allocations (those which, when rounded up to the
758     // alignment, overflow `isize`) are not rejected, which can cause undefined
759     // behavior. See #64 for details.
760     //
761     // TODO(#67): Once our MSRV is > 1.64.0, remove this assertion.
762     #[allow(clippy::as_conversions)]
763     let max_alloc = (isize::MAX as usize).saturating_sub(align);
764     if size > max_alloc {
765         return Err(AllocError);
766     }
767 
768     // TODO(https://github.com/rust-lang/rust/issues/55724): Use
769     // `Layout::repeat` once it's stabilized.
770     let layout = Layout::from_size_align(size, align).or(Err(AllocError))?;
771 
772     let ptr = if layout.size() != 0 {
773         // SAFETY: By contract on the caller, `allocate` is either
774         // `alloc::alloc::alloc` or `alloc::alloc::alloc_zeroed`. The above
775         // check ensures their shared safety precondition: that the supplied
776         // layout is not zero-sized type [1].
777         //
778         // [1] Per https://doc.rust-lang.org/stable/std/alloc/trait.GlobalAlloc.html#tymethod.alloc:
779         //
780         //     This function is unsafe because undefined behavior can result if
781         //     the caller does not ensure that layout has non-zero size.
782         let ptr = unsafe { allocate(layout) };
783         match NonNull::new(ptr) {
784             Some(ptr) => ptr,
785             None => return Err(AllocError),
786         }
787     } else {
788         let align = T::LAYOUT.align.get();
789         // We use `transmute` instead of an `as` cast since Miri (with strict
790         // provenance enabled) notices and complains that an `as` cast creates a
791         // pointer with no provenance. Miri isn't smart enough to realize that
792         // we're only executing this branch when we're constructing a zero-sized
793         // `Box`, which doesn't require provenance.
794         //
795         // SAFETY: any initialized bit sequence is a bit-valid `*mut u8`. All
796         // bits of a `usize` are initialized.
797         #[allow(clippy::useless_transmute)]
798         let dangling = unsafe { mem::transmute::<usize, *mut u8>(align) };
799         // SAFETY: `dangling` is constructed from `T::LAYOUT.align`, which is a
800         // `NonZeroUsize`, which is guaranteed to be non-zero.
801         //
802         // `Box<[T]>` does not allocate when `T` is zero-sized or when `len` is
803         // zero, but it does require a non-null dangling pointer for its
804         // allocation.
805         //
806         // TODO(https://github.com/rust-lang/rust/issues/95228): Use
807         // `std::ptr::without_provenance` once it's stable. That may optimize
808         // better. As written, Rust may assume that this consumes "exposed"
809         // provenance, and thus Rust may have to assume that this may consume
810         // provenance from any pointer whose provenance has been exposed.
811         unsafe { NonNull::new_unchecked(dangling) }
812     };
813 
814     let ptr = T::raw_from_ptr_len(ptr, meta);
815 
816     // TODO(#429): Add a "SAFETY" comment and remove this `allow`. Make sure to
817     // include a justification that `ptr.as_ptr()` is validly-aligned in the ZST
818     // case (in which we manually construct a dangling pointer) and to justify
819     // why `Box` is safe to drop (it's because `allocate` uses the system
820     // allocator).
821     #[allow(clippy::undocumented_unsafe_blocks)]
822     Ok(unsafe { alloc::boxed::Box::from_raw(ptr.as_ptr()) })
823 }
824 
825 /// Since we support multiple versions of Rust, there are often features which
826 /// have been stabilized in the most recent stable release which do not yet
827 /// exist (stably) on our MSRV. This module provides polyfills for those
828 /// features so that we can write more "modern" code, and just remove the
829 /// polyfill once our MSRV supports the corresponding feature. Without this,
830 /// we'd have to write worse/more verbose code and leave TODO comments sprinkled
831 /// throughout the codebase to update to the new pattern once it's stabilized.
832 ///
833 /// Each trait is imported as `_` at the crate root; each polyfill should "just
834 /// work" at usage sites.
835 pub(crate) mod polyfills {
836     use core::ptr::{self, NonNull};
837 
838     // A polyfill for `NonNull::slice_from_raw_parts` that we can use before our
839     // MSRV is 1.70, when that function was stabilized.
840     //
841     // The `#[allow(unused)]` is necessary because, on sufficiently recent
842     // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
843     // method rather than to this trait, and so this trait is considered unused.
844     //
845     // TODO(#67): Once our MSRV is 1.70, remove this.
846     #[allow(unused)]
847     pub(crate) trait NonNullExt<T> {
slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>848         fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>;
849     }
850 
851     impl<T> NonNullExt<T> for NonNull<T> {
852         // NOTE on coverage: this will never be tested in nightly since it's a
853         // polyfill for a feature which has been stabilized on our nightly
854         // toolchain.
855         #[cfg_attr(
856             all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
857             coverage(off)
858         )]
859         #[inline(always)]
slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]>860         fn slice_from_raw_parts(data: Self, len: usize) -> NonNull<[T]> {
861             let ptr = ptr::slice_from_raw_parts_mut(data.as_ptr(), len);
862             // SAFETY: `ptr` is converted from `data`, which is non-null.
863             unsafe { NonNull::new_unchecked(ptr) }
864         }
865     }
866 
867     // A polyfill for `Self::unchecked_sub` that we can use until methods like
868     // `usize::unchecked_sub` is stabilized.
869     //
870     // The `#[allow(unused)]` is necessary because, on sufficiently recent
871     // toolchain versions, `ptr.slice_from_raw_parts()` resolves to the inherent
872     // method rather than to this trait, and so this trait is considered unused.
873     //
874     // TODO(#67): Once our MSRV is high enough, remove this.
875     #[allow(unused)]
876     pub(crate) trait NumExt {
877         /// Subtract without checking for underflow.
878         ///
879         /// # Safety
880         ///
881         /// The caller promises that the subtraction will not underflow.
unchecked_sub(self, rhs: Self) -> Self882         unsafe fn unchecked_sub(self, rhs: Self) -> Self;
883     }
884 
885     impl NumExt for usize {
886         // NOTE on coverage: this will never be tested in nightly since it's a
887         // polyfill for a feature which has been stabilized on our nightly
888         // toolchain.
889         #[cfg_attr(
890             all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
891             coverage(off)
892         )]
893         #[inline(always)]
unchecked_sub(self, rhs: usize) -> usize894         unsafe fn unchecked_sub(self, rhs: usize) -> usize {
895             match self.checked_sub(rhs) {
896                 Some(x) => x,
897                 None => {
898                     // SAFETY: The caller promises that the subtraction will not
899                     // underflow.
900                     unsafe { core::hint::unreachable_unchecked() }
901                 }
902             }
903         }
904     }
905 }
906 
907 #[cfg(test)]
908 pub(crate) mod testutil {
909     use crate::*;
910 
911     /// A `T` which is aligned to at least `align_of::<A>()`.
912     #[derive(Default)]
913     pub(crate) struct Align<T, A> {
914         pub(crate) t: T,
915         _a: [A; 0],
916     }
917 
918     impl<T: Default, A> Align<T, A> {
set_default(&mut self)919         pub(crate) fn set_default(&mut self) {
920             self.t = T::default();
921         }
922     }
923 
924     impl<T, A> Align<T, A> {
new(t: T) -> Align<T, A>925         pub(crate) const fn new(t: T) -> Align<T, A> {
926             Align { t, _a: [] }
927         }
928     }
929 
930     /// A `T` which is guaranteed not to satisfy `align_of::<A>()`.
931     ///
932     /// It must be the case that `align_of::<T>() < align_of::<A>()` in order
933     /// fot this type to work properly.
934     #[repr(C)]
935     pub(crate) struct ForceUnalign<T: Unaligned, A> {
936         // The outer struct is aligned to `A`, and, thanks to `repr(C)`, `t` is
937         // placed at the minimum offset that guarantees its alignment. If
938         // `align_of::<T>() < align_of::<A>()`, then that offset will be
939         // guaranteed *not* to satisfy `align_of::<A>()`.
940         //
941         // Note that we need `T: Unaligned` in order to guarantee that there is
942         // no padding between `_u` and `t`.
943         _u: u8,
944         pub(crate) t: T,
945         _a: [A; 0],
946     }
947 
948     impl<T: Unaligned, A> ForceUnalign<T, A> {
new(t: T) -> ForceUnalign<T, A>949         pub(crate) fn new(t: T) -> ForceUnalign<T, A> {
950             ForceUnalign { _u: 0, t, _a: [] }
951         }
952     }
953     // A `u64` with alignment 8.
954     //
955     // Though `u64` has alignment 8 on some platforms, it's not guaranteed. By
956     // contrast, `AU64` is guaranteed to have alignment 8 on all platforms.
957     #[derive(
958         KnownLayout,
959         Immutable,
960         FromBytes,
961         IntoBytes,
962         Eq,
963         PartialEq,
964         Ord,
965         PartialOrd,
966         Default,
967         Debug,
968         Copy,
969         Clone,
970     )]
971     #[repr(C, align(8))]
972     pub(crate) struct AU64(pub(crate) u64);
973 
974     impl AU64 {
975         // Converts this `AU64` to bytes using this platform's endianness.
to_bytes(self) -> [u8; 8]976         pub(crate) fn to_bytes(self) -> [u8; 8] {
977             crate::transmute!(self)
978         }
979     }
980 
981     impl Display for AU64 {
982         #[cfg_attr(
983             all(coverage_nightly, __ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS),
984             coverage(off)
985         )]
fmt(&self, f: &mut Formatter<'_>) -> fmt::Result986         fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
987             Display::fmt(&self.0, f)
988         }
989     }
990 
991     #[derive(Immutable, FromBytes, Eq, PartialEq, Ord, PartialOrd, Default, Debug, Copy, Clone)]
992     #[repr(C)]
993     pub(crate) struct Nested<T, U: ?Sized> {
994         _t: T,
995         _u: U,
996     }
997 }
998 
999 #[cfg(test)]
1000 mod tests {
1001     use super::*;
1002 
1003     #[test]
test_round_down_to_next_multiple_of_alignment()1004     fn test_round_down_to_next_multiple_of_alignment() {
1005         fn alt_impl(n: usize, align: NonZeroUsize) -> usize {
1006             let mul = n / align.get();
1007             mul * align.get()
1008         }
1009 
1010         for align in [1, 2, 4, 8, 16] {
1011             for n in 0..256 {
1012                 let align = NonZeroUsize::new(align).unwrap();
1013                 let want = alt_impl(n, align);
1014                 let got = round_down_to_next_multiple_of_alignment(n, align);
1015                 assert_eq!(got, want, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
1016             }
1017         }
1018     }
1019 
1020     #[rustversion::since(1.57.0)]
1021     #[test]
1022     #[should_panic]
test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve()1023     fn test_round_down_to_next_multiple_of_alignment_zerocopy_panic_in_const_and_vec_try_reserve() {
1024         round_down_to_next_multiple_of_alignment(0, NonZeroUsize::new(3).unwrap());
1025     }
1026 }
1027 
1028 #[cfg(kani)]
1029 mod proofs {
1030     use super::*;
1031 
1032     #[kani::proof]
prove_round_down_to_next_multiple_of_alignment()1033     fn prove_round_down_to_next_multiple_of_alignment() {
1034         fn model_impl(n: usize, align: NonZeroUsize) -> usize {
1035             assert!(align.get().is_power_of_two());
1036             let mul = n / align.get();
1037             mul * align.get()
1038         }
1039 
1040         let align: NonZeroUsize = kani::any();
1041         kani::assume(align.get().is_power_of_two());
1042         let n: usize = kani::any();
1043 
1044         let expected = model_impl(n, align);
1045         let actual = round_down_to_next_multiple_of_alignment(n, align);
1046         assert_eq!(expected, actual, "round_down_to_next_multiple_of_alignment({}, {})", n, align);
1047     }
1048 
1049     // Restricted to nightly since we use the unstable `usize::next_multiple_of`
1050     // in our model implementation.
1051     #[cfg(__ZEROCOPY_INTERNAL_USE_ONLY_NIGHTLY_FEATURES_IN_TESTS)]
1052     #[kani::proof]
prove_padding_needed_for()1053     fn prove_padding_needed_for() {
1054         fn model_impl(len: usize, align: NonZeroUsize) -> usize {
1055             let padded = len.next_multiple_of(align.get());
1056             let padding = padded - len;
1057             padding
1058         }
1059 
1060         let align: NonZeroUsize = kani::any();
1061         kani::assume(align.get().is_power_of_two());
1062         let len: usize = kani::any();
1063         // Constrain `len` to valid Rust lengths, since our model implementation
1064         // isn't robust to overflow.
1065         kani::assume(len <= isize::MAX as usize);
1066         kani::assume(align.get() < 1 << 29);
1067 
1068         let expected = model_impl(len, align);
1069         let actual = padding_needed_for(len, align);
1070         assert_eq!(expected, actual, "padding_needed_for({}, {})", len, align);
1071 
1072         let padded_len = actual + len;
1073         assert_eq!(padded_len % align, 0);
1074         assert!(padded_len / align >= len / align);
1075     }
1076 }
1077