• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Computations on places -- field projections, going from mir::Place, and writing
2 //! into a place.
3 //! All high-level functions to write to memory work on places as destinations.
4 
5 use either::{Either, Left, Right};
6 
7 use rustc_ast::Mutability;
8 use rustc_index::IndexSlice;
9 use rustc_middle::mir;
10 use rustc_middle::ty;
11 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
12 use rustc_middle::ty::Ty;
13 use rustc_target::abi::{self, Abi, Align, FieldIdx, HasDataLayout, Size, FIRST_VARIANT};
14 
15 use super::{
16     alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
17     ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
18     Pointer, Provenance, Scalar,
19 };
20 
21 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
22 /// Information required for the sound usage of a `MemPlace`.
23 pub enum MemPlaceMeta<Prov: Provenance = AllocId> {
24     /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
25     Meta(Scalar<Prov>),
26     /// `Sized` types or unsized `extern type`
27     None,
28 }
29 
30 impl<Prov: Provenance> MemPlaceMeta<Prov> {
31     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
unwrap_meta(self) -> Scalar<Prov>32     pub fn unwrap_meta(self) -> Scalar<Prov> {
33         match self {
34             Self::Meta(s) => s,
35             Self::None => {
36                 bug!("expected wide pointer extra data (e.g. slice length or trait object vtable)")
37             }
38         }
39     }
40 
has_meta(self) -> bool41     pub fn has_meta(self) -> bool {
42         match self {
43             Self::Meta(_) => true,
44             Self::None => false,
45         }
46     }
47 }
48 
49 #[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
50 pub struct MemPlace<Prov: Provenance = AllocId> {
51     /// The pointer can be a pure integer, with the `None` provenance.
52     pub ptr: Pointer<Option<Prov>>,
53     /// Metadata for unsized places. Interpretation is up to the type.
54     /// Must not be present for sized types, but can be missing for unsized types
55     /// (e.g., `extern type`).
56     pub meta: MemPlaceMeta<Prov>,
57 }
58 
59 /// A MemPlace with its layout. Constructing it is only possible in this module.
60 #[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
61 pub struct MPlaceTy<'tcx, Prov: Provenance = AllocId> {
62     mplace: MemPlace<Prov>,
63     pub layout: TyAndLayout<'tcx>,
64     /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
65     /// it needs to have a different alignment than the field type would usually have.
66     /// So we represent this here with a separate field that "overwrites" `layout.align`.
67     /// This means `layout.align` should never be used for a `MPlaceTy`!
68     pub align: Align,
69 }
70 
71 #[derive(Copy, Clone, Debug)]
72 pub enum Place<Prov: Provenance = AllocId> {
73     /// A place referring to a value allocated in the `Memory` system.
74     Ptr(MemPlace<Prov>),
75 
76     /// To support alloc-free locals, we are able to write directly to a local.
77     /// (Without that optimization, we'd just always be a `MemPlace`.)
78     Local { frame: usize, local: mir::Local },
79 }
80 
81 #[derive(Clone, Debug)]
82 pub struct PlaceTy<'tcx, Prov: Provenance = AllocId> {
83     place: Place<Prov>, // Keep this private; it helps enforce invariants.
84     pub layout: TyAndLayout<'tcx>,
85     /// rustc does not have a proper way to represent the type of a field of a `repr(packed)` struct:
86     /// it needs to have a different alignment than the field type would usually have.
87     /// So we represent this here with a separate field that "overwrites" `layout.align`.
88     /// This means `layout.align` should never be used for a `PlaceTy`!
89     pub align: Align,
90 }
91 
92 impl<'tcx, Prov: Provenance> std::ops::Deref for PlaceTy<'tcx, Prov> {
93     type Target = Place<Prov>;
94     #[inline(always)]
deref(&self) -> &Place<Prov>95     fn deref(&self) -> &Place<Prov> {
96         &self.place
97     }
98 }
99 
100 impl<'tcx, Prov: Provenance> std::ops::Deref for MPlaceTy<'tcx, Prov> {
101     type Target = MemPlace<Prov>;
102     #[inline(always)]
deref(&self) -> &MemPlace<Prov>103     fn deref(&self) -> &MemPlace<Prov> {
104         &self.mplace
105     }
106 }
107 
108 impl<'tcx, Prov: Provenance> From<MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
109     #[inline(always)]
from(mplace: MPlaceTy<'tcx, Prov>) -> Self110     fn from(mplace: MPlaceTy<'tcx, Prov>) -> Self {
111         PlaceTy { place: Place::Ptr(*mplace), layout: mplace.layout, align: mplace.align }
112     }
113 }
114 
115 impl<'tcx, Prov: Provenance> From<&'_ MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
116     #[inline(always)]
from(mplace: &MPlaceTy<'tcx, Prov>) -> Self117     fn from(mplace: &MPlaceTy<'tcx, Prov>) -> Self {
118         PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
119     }
120 }
121 
122 impl<'tcx, Prov: Provenance> From<&'_ mut MPlaceTy<'tcx, Prov>> for PlaceTy<'tcx, Prov> {
123     #[inline(always)]
from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self124     fn from(mplace: &mut MPlaceTy<'tcx, Prov>) -> Self {
125         PlaceTy { place: Place::Ptr(**mplace), layout: mplace.layout, align: mplace.align }
126     }
127 }
128 
129 impl<Prov: Provenance> MemPlace<Prov> {
130     #[inline(always)]
from_ptr(ptr: Pointer<Option<Prov>>) -> Self131     pub fn from_ptr(ptr: Pointer<Option<Prov>>) -> Self {
132         MemPlace { ptr, meta: MemPlaceMeta::None }
133     }
134 
135     /// Adjust the provenance of the main pointer (metadata is unaffected).
map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self136     pub fn map_provenance(self, f: impl FnOnce(Option<Prov>) -> Option<Prov>) -> Self {
137         MemPlace { ptr: self.ptr.map_provenance(f), ..self }
138     }
139 
140     /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
141     /// This is the inverse of `ref_to_mplace`.
142     #[inline(always)]
to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov>143     pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Prov> {
144         match self.meta {
145             MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
146             MemPlaceMeta::Meta(meta) => {
147                 Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx), meta)
148             }
149         }
150     }
151 
152     #[inline]
offset_with_meta<'tcx>( self, offset: Size, meta: MemPlaceMeta<Prov>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self>153     pub(super) fn offset_with_meta<'tcx>(
154         self,
155         offset: Size,
156         meta: MemPlaceMeta<Prov>,
157         cx: &impl HasDataLayout,
158     ) -> InterpResult<'tcx, Self> {
159         debug_assert!(
160             !meta.has_meta() || self.meta.has_meta(),
161             "cannot use `offset_with_meta` to add metadata to a place"
162         );
163         Ok(MemPlace { ptr: self.ptr.offset(offset, cx)?, meta })
164     }
165 }
166 
167 impl<Prov: Provenance> Place<Prov> {
168     /// Asserts that this points to some local variable.
169     /// Returns the frame idx and the variable idx.
170     #[inline]
171     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
assert_local(&self) -> (usize, mir::Local)172     pub fn assert_local(&self) -> (usize, mir::Local) {
173         match self {
174             Place::Local { frame, local } => (*frame, *local),
175             _ => bug!("assert_local: expected Place::Local, got {:?}", self),
176         }
177     }
178 }
179 
180 impl<'tcx, Prov: Provenance> MPlaceTy<'tcx, Prov> {
181     /// Produces a MemPlace that works for ZST but nothing else.
182     /// Conceptually this is a new allocation, but it doesn't actually create an allocation so you
183     /// don't need to worry about memory leaks.
184     #[inline]
fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self185     pub fn fake_alloc_zst(layout: TyAndLayout<'tcx>) -> Self {
186         assert!(layout.is_zst());
187         let align = layout.align.abi;
188         let ptr = Pointer::from_addr_invalid(align.bytes()); // no provenance, absolute address
189         MPlaceTy { mplace: MemPlace { ptr, meta: MemPlaceMeta::None }, layout, align }
190     }
191 
192     /// Offset the place in memory and change its metadata.
193     ///
194     /// This can go wrong very easily if you give the wrong layout for the new place!
195     #[inline]
offset_with_meta( &self, offset: Size, meta: MemPlaceMeta<Prov>, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self>196     pub(crate) fn offset_with_meta(
197         &self,
198         offset: Size,
199         meta: MemPlaceMeta<Prov>,
200         layout: TyAndLayout<'tcx>,
201         cx: &impl HasDataLayout,
202     ) -> InterpResult<'tcx, Self> {
203         Ok(MPlaceTy {
204             mplace: self.mplace.offset_with_meta(offset, meta, cx)?,
205             align: self.align.restrict_for_offset(offset),
206             layout,
207         })
208     }
209 
210     /// Offset the place in memory.
211     ///
212     /// This can go wrong very easily if you give the wrong layout for the new place!
offset( &self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout, ) -> InterpResult<'tcx, Self>213     pub fn offset(
214         &self,
215         offset: Size,
216         layout: TyAndLayout<'tcx>,
217         cx: &impl HasDataLayout,
218     ) -> InterpResult<'tcx, Self> {
219         assert!(layout.is_sized());
220         self.offset_with_meta(offset, MemPlaceMeta::None, layout, cx)
221     }
222 
223     #[inline]
from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self224     pub fn from_aligned_ptr(ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>) -> Self {
225         MPlaceTy { mplace: MemPlace::from_ptr(ptr), layout, align: layout.align.abi }
226     }
227 
228     #[inline]
from_aligned_ptr_with_meta( ptr: Pointer<Option<Prov>>, layout: TyAndLayout<'tcx>, meta: MemPlaceMeta<Prov>, ) -> Self229     pub fn from_aligned_ptr_with_meta(
230         ptr: Pointer<Option<Prov>>,
231         layout: TyAndLayout<'tcx>,
232         meta: MemPlaceMeta<Prov>,
233     ) -> Self {
234         let mut mplace = MemPlace::from_ptr(ptr);
235         mplace.meta = meta;
236 
237         MPlaceTy { mplace, layout, align: layout.align.abi }
238     }
239 
240     #[inline]
len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64>241     pub(crate) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
242         if self.layout.is_unsized() {
243             // We need to consult `meta` metadata
244             match self.layout.ty.kind() {
245                 ty::Slice(..) | ty::Str => self.mplace.meta.unwrap_meta().to_target_usize(cx),
246                 _ => bug!("len not supported on unsized type {:?}", self.layout.ty),
247             }
248         } else {
249             // Go through the layout. There are lots of types that support a length,
250             // e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
251             match self.layout.fields {
252                 abi::FieldsShape::Array { count, .. } => Ok(count),
253                 _ => bug!("len not supported on sized type {:?}", self.layout.ty),
254             }
255         }
256     }
257 }
258 
259 // These are defined here because they produce a place.
260 impl<'tcx, Prov: Provenance> OpTy<'tcx, Prov> {
261     #[inline(always)]
as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>>262     pub fn as_mplace_or_imm(&self) -> Either<MPlaceTy<'tcx, Prov>, ImmTy<'tcx, Prov>> {
263         match **self {
264             Operand::Indirect(mplace) => {
265                 Left(MPlaceTy { mplace, layout: self.layout, align: self.align.unwrap() })
266             }
267             Operand::Immediate(imm) => Right(ImmTy::from_immediate(imm, self.layout)),
268         }
269     }
270 
271     #[inline(always)]
272     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
assert_mem_place(&self) -> MPlaceTy<'tcx, Prov>273     pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
274         self.as_mplace_or_imm().left().unwrap_or_else(|| {
275             bug!(
276                 "OpTy of type {} was immediate when it was expected to be an MPlace",
277                 self.layout.ty
278             )
279         })
280     }
281 }
282 
283 impl<'tcx, Prov: Provenance> PlaceTy<'tcx, Prov> {
284     /// A place is either an mplace or some local.
285     #[inline]
as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)>286     pub fn as_mplace_or_local(&self) -> Either<MPlaceTy<'tcx, Prov>, (usize, mir::Local)> {
287         match **self {
288             Place::Ptr(mplace) => Left(MPlaceTy { mplace, layout: self.layout, align: self.align }),
289             Place::Local { frame, local } => Right((frame, local)),
290         }
291     }
292 
293     #[inline(always)]
294     #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980)
assert_mem_place(&self) -> MPlaceTy<'tcx, Prov>295     pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Prov> {
296         self.as_mplace_or_local().left().unwrap_or_else(|| {
297             bug!(
298                 "PlaceTy of type {} was a local when it was expected to be an MPlace",
299                 self.layout.ty
300             )
301         })
302     }
303 }
304 
305 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
306 impl<'mir, 'tcx: 'mir, Prov, M> InterpCx<'mir, 'tcx, M>
307 where
308     Prov: Provenance + 'static,
309     M: Machine<'mir, 'tcx, Provenance = Prov>,
310 {
311     /// Take a value, which represents a (thin or wide) reference, and make it a place.
312     /// Alignment is just based on the type. This is the inverse of `MemPlace::to_ref()`.
313     ///
314     /// Only call this if you are sure the place is "valid" (aligned and inbounds), or do not
315     /// want to ever use the place for memory access!
316     /// Generally prefer `deref_operand`.
ref_to_mplace( &self, val: &ImmTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>317     pub fn ref_to_mplace(
318         &self,
319         val: &ImmTy<'tcx, M::Provenance>,
320     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
321         let pointee_type =
322             val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
323         let layout = self.layout_of(pointee_type)?;
324         let (ptr, meta) = match **val {
325             Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
326             Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta)),
327             Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
328         };
329 
330         let mplace = MemPlace { ptr: ptr.to_pointer(self)?, meta };
331         // When deref'ing a pointer, the *static* alignment given by the type is what matters.
332         let align = layout.align.abi;
333         Ok(MPlaceTy { mplace, layout, align })
334     }
335 
336     /// Take an operand, representing a pointer, and dereference it to a place.
337     #[instrument(skip(self), level = "debug")]
deref_operand( &self, src: &OpTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>338     pub fn deref_operand(
339         &self,
340         src: &OpTy<'tcx, M::Provenance>,
341     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
342         let val = self.read_immediate(src)?;
343         trace!("deref to {} on {:?}", val.layout.ty, *val);
344 
345         if val.layout.ty.is_box() {
346             bug!("dereferencing {:?}", val.layout.ty);
347         }
348 
349         let mplace = self.ref_to_mplace(&val)?;
350         self.check_mplace(mplace)?;
351         Ok(mplace)
352     }
353 
354     #[inline]
get_place_alloc( &self, place: &MPlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>355     pub(super) fn get_place_alloc(
356         &self,
357         place: &MPlaceTy<'tcx, M::Provenance>,
358     ) -> InterpResult<'tcx, Option<AllocRef<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
359     {
360         assert!(place.layout.is_sized());
361         assert!(!place.meta.has_meta());
362         let size = place.layout.size;
363         self.get_ptr_alloc(place.ptr, size, place.align)
364     }
365 
366     #[inline]
get_place_alloc_mut( &mut self, place: &MPlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>367     pub(super) fn get_place_alloc_mut(
368         &mut self,
369         place: &MPlaceTy<'tcx, M::Provenance>,
370     ) -> InterpResult<'tcx, Option<AllocRefMut<'_, 'tcx, M::Provenance, M::AllocExtra, M::Bytes>>>
371     {
372         assert!(place.layout.is_sized());
373         assert!(!place.meta.has_meta());
374         let size = place.layout.size;
375         self.get_ptr_alloc_mut(place.ptr, size, place.align)
376     }
377 
378     /// Check if this mplace is dereferenceable and sufficiently aligned.
check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx>379     pub fn check_mplace(&self, mplace: MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
380         let (size, align) = self
381             .size_and_align_of_mplace(&mplace)?
382             .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
383         assert!(mplace.align <= align, "dynamic alignment less strict than static one?");
384         let align = if M::enforce_alignment(self).should_check() { align } else { Align::ONE };
385         self.check_ptr_access_align(mplace.ptr, size, align, CheckInAllocMsg::DerefTest)?;
386         Ok(())
387     }
388 
389     /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
390     /// Also returns the number of elements.
mplace_to_simd( &self, mplace: &MPlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)>391     pub fn mplace_to_simd(
392         &self,
393         mplace: &MPlaceTy<'tcx, M::Provenance>,
394     ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
395         // Basically we just transmute this place into an array following simd_size_and_type.
396         // (Transmuting is okay since this is an in-memory place. We also double-check the size
397         // stays the same.)
398         let (len, e_ty) = mplace.layout.ty.simd_size_and_type(*self.tcx);
399         let array = Ty::new_array(self.tcx.tcx, e_ty, len);
400         let layout = self.layout_of(array)?;
401         assert_eq!(layout.size, mplace.layout.size);
402         Ok((MPlaceTy { layout, ..*mplace }, len))
403     }
404 
405     /// Converts a repr(simd) place into a place where `place_index` accesses the SIMD elements.
406     /// Also returns the number of elements.
place_to_simd( &mut self, place: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)>407     pub fn place_to_simd(
408         &mut self,
409         place: &PlaceTy<'tcx, M::Provenance>,
410     ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, u64)> {
411         let mplace = self.force_allocation(place)?;
412         self.mplace_to_simd(&mplace)
413     }
414 
local_to_place( &self, frame: usize, local: mir::Local, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>>415     pub fn local_to_place(
416         &self,
417         frame: usize,
418         local: mir::Local,
419     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
420         let layout = self.layout_of_local(&self.stack()[frame], local, None)?;
421         let place = Place::Local { frame, local };
422         Ok(PlaceTy { place, layout, align: layout.align.abi })
423     }
424 
425     /// Computes a place. You should only use this if you intend to write into this
426     /// place; for reading, a more efficient alternative is `eval_place_to_op`.
427     #[instrument(skip(self), level = "debug")]
eval_place( &mut self, mir_place: mir::Place<'tcx>, ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>>428     pub fn eval_place(
429         &mut self,
430         mir_place: mir::Place<'tcx>,
431     ) -> InterpResult<'tcx, PlaceTy<'tcx, M::Provenance>> {
432         let mut place = self.local_to_place(self.frame_idx(), mir_place.local)?;
433         // Using `try_fold` turned out to be bad for performance, hence the loop.
434         for elem in mir_place.projection.iter() {
435             place = self.place_projection(&place, elem)?
436         }
437 
438         trace!("{:?}", self.dump_place(place.place));
439         // Sanity-check the type we ended up with.
440         debug_assert!(
441             mir_assign_valid_types(
442                 *self.tcx,
443                 self.param_env,
444                 self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
445                     mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty
446                 )?)?,
447                 place.layout,
448             ),
449             "eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
450             mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
451             place.layout.ty,
452         );
453         Ok(place)
454     }
455 
456     /// Write an immediate to a place
457     #[inline(always)]
458     #[instrument(skip(self), level = "debug")]
write_immediate( &mut self, src: Immediate<M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>459     pub fn write_immediate(
460         &mut self,
461         src: Immediate<M::Provenance>,
462         dest: &PlaceTy<'tcx, M::Provenance>,
463     ) -> InterpResult<'tcx> {
464         self.write_immediate_no_validate(src, dest)?;
465 
466         if M::enforce_validity(self, dest.layout) {
467             // Data got changed, better make sure it matches the type!
468             self.validate_operand(&self.place_to_op(dest)?)?;
469         }
470 
471         Ok(())
472     }
473 
474     /// Write a scalar to a place
475     #[inline(always)]
write_scalar( &mut self, val: impl Into<Scalar<M::Provenance>>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>476     pub fn write_scalar(
477         &mut self,
478         val: impl Into<Scalar<M::Provenance>>,
479         dest: &PlaceTy<'tcx, M::Provenance>,
480     ) -> InterpResult<'tcx> {
481         self.write_immediate(Immediate::Scalar(val.into()), dest)
482     }
483 
484     /// Write a pointer to a place
485     #[inline(always)]
write_pointer( &mut self, ptr: impl Into<Pointer<Option<M::Provenance>>>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>486     pub fn write_pointer(
487         &mut self,
488         ptr: impl Into<Pointer<Option<M::Provenance>>>,
489         dest: &PlaceTy<'tcx, M::Provenance>,
490     ) -> InterpResult<'tcx> {
491         self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
492     }
493 
494     /// Write an immediate to a place.
495     /// If you use this you are responsible for validating that things got copied at the
496     /// right type.
write_immediate_no_validate( &mut self, src: Immediate<M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>497     fn write_immediate_no_validate(
498         &mut self,
499         src: Immediate<M::Provenance>,
500         dest: &PlaceTy<'tcx, M::Provenance>,
501     ) -> InterpResult<'tcx> {
502         assert!(dest.layout.is_sized(), "Cannot write unsized data");
503         trace!("write_immediate: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
504 
505         // See if we can avoid an allocation. This is the counterpart to `read_immediate_raw`,
506         // but not factored as a separate function.
507         let mplace = match dest.place {
508             Place::Local { frame, local } => {
509                 match M::access_local_mut(self, frame, local)? {
510                     Operand::Immediate(local) => {
511                         // Local can be updated in-place.
512                         *local = src;
513                         return Ok(());
514                     }
515                     Operand::Indirect(mplace) => {
516                         // The local is in memory, go on below.
517                         *mplace
518                     }
519                 }
520             }
521             Place::Ptr(mplace) => mplace, // already referring to memory
522         };
523 
524         // This is already in memory, write there.
525         self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
526     }
527 
528     /// Write an immediate to memory.
529     /// If you use this you are responsible for validating that things got copied at the
530     /// right layout.
write_immediate_to_mplace_no_validate( &mut self, value: Immediate<M::Provenance>, layout: TyAndLayout<'tcx>, align: Align, dest: MemPlace<M::Provenance>, ) -> InterpResult<'tcx>531     fn write_immediate_to_mplace_no_validate(
532         &mut self,
533         value: Immediate<M::Provenance>,
534         layout: TyAndLayout<'tcx>,
535         align: Align,
536         dest: MemPlace<M::Provenance>,
537     ) -> InterpResult<'tcx> {
538         // Note that it is really important that the type here is the right one, and matches the
539         // type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
540         // to handle padding properly, which is only correct if we never look at this data with the
541         // wrong type.
542 
543         let tcx = *self.tcx;
544         let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
545             // zero-sized access
546             return Ok(());
547         };
548 
549         match value {
550             Immediate::Scalar(scalar) => {
551                 let Abi::Scalar(s) = layout.abi else { span_bug!(
552                         self.cur_span(),
553                         "write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
554                     )
555                 };
556                 let size = s.size(&tcx);
557                 assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
558                 alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
559             }
560             Immediate::ScalarPair(a_val, b_val) => {
561                 // We checked `ptr_align` above, so all fields will have the alignment they need.
562                 // We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
563                 // which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
564                 let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
565                         self.cur_span(),
566                         "write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
567                         layout
568                     )
569                 };
570                 let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
571                 let b_offset = a_size.align_to(b.align(&tcx).abi);
572                 assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
573 
574                 // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
575                 // but that does not work: We could be a newtype around a pair, then the
576                 // fields do not match the `ScalarPair` components.
577 
578                 alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
579                 alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
580             }
581             Immediate::Uninit => alloc.write_uninit(),
582         }
583     }
584 
write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx>585     pub fn write_uninit(&mut self, dest: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
586         let mplace = match dest.as_mplace_or_local() {
587             Left(mplace) => mplace,
588             Right((frame, local)) => {
589                 match M::access_local_mut(self, frame, local)? {
590                     Operand::Immediate(local) => {
591                         *local = Immediate::Uninit;
592                         return Ok(());
593                     }
594                     Operand::Indirect(mplace) => {
595                         // The local is in memory, go on below.
596                         MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
597                     }
598                 }
599             }
600         };
601         let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
602             // Zero-sized access
603             return Ok(());
604         };
605         alloc.write_uninit()?;
606         Ok(())
607     }
608 
609     /// Copies the data from an operand to a place.
610     /// `allow_transmute` indicates whether the layouts may disagree.
611     #[inline(always)]
612     #[instrument(skip(self), level = "debug")]
copy_op( &mut self, src: &OpTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, allow_transmute: bool, ) -> InterpResult<'tcx>613     pub fn copy_op(
614         &mut self,
615         src: &OpTy<'tcx, M::Provenance>,
616         dest: &PlaceTy<'tcx, M::Provenance>,
617         allow_transmute: bool,
618     ) -> InterpResult<'tcx> {
619         self.copy_op_no_validate(src, dest, allow_transmute)?;
620 
621         if M::enforce_validity(self, dest.layout) {
622             // Data got changed, better make sure it matches the type!
623             self.validate_operand(&self.place_to_op(dest)?)?;
624         }
625 
626         Ok(())
627     }
628 
629     /// Copies the data from an operand to a place.
630     /// `allow_transmute` indicates whether the layouts may disagree.
631     /// Also, if you use this you are responsible for validating that things get copied at the
632     /// right type.
633     #[instrument(skip(self), level = "debug")]
copy_op_no_validate( &mut self, src: &OpTy<'tcx, M::Provenance>, dest: &PlaceTy<'tcx, M::Provenance>, allow_transmute: bool, ) -> InterpResult<'tcx>634     fn copy_op_no_validate(
635         &mut self,
636         src: &OpTy<'tcx, M::Provenance>,
637         dest: &PlaceTy<'tcx, M::Provenance>,
638         allow_transmute: bool,
639     ) -> InterpResult<'tcx> {
640         // We do NOT compare the types for equality, because well-typed code can
641         // actually "transmute" `&mut T` to `&T` in an assignment without a cast.
642         let layout_compat =
643             mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
644         if !allow_transmute && !layout_compat {
645             span_bug!(
646                 self.cur_span(),
647                 "type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
648                 src.layout.ty,
649                 dest.layout.ty,
650             );
651         }
652 
653         // Let us see if the layout is simple so we take a shortcut,
654         // avoid force_allocation.
655         let src = match self.read_immediate_raw(src)? {
656             Right(src_val) => {
657                 // FIXME(const_prop): Const-prop can possibly evaluate an
658                 // unsized copy operation when it thinks that the type is
659                 // actually sized, due to a trivially false where-clause
660                 // predicate like `where Self: Sized` with `Self = dyn Trait`.
661                 // See #102553 for an example of such a predicate.
662                 if src.layout.is_unsized() {
663                     throw_inval!(SizeOfUnsizedType(src.layout.ty));
664                 }
665                 if dest.layout.is_unsized() {
666                     throw_inval!(SizeOfUnsizedType(dest.layout.ty));
667                 }
668                 assert_eq!(src.layout.size, dest.layout.size);
669                 // Yay, we got a value that we can write directly.
670                 return if layout_compat {
671                     self.write_immediate_no_validate(*src_val, dest)
672                 } else {
673                     // This is tricky. The problematic case is `ScalarPair`: the `src_val` was
674                     // loaded using the offsets defined by `src.layout`. When we put this back into
675                     // the destination, we have to use the same offsets! So (a) we make sure we
676                     // write back to memory, and (b) we use `dest` *with the source layout*.
677                     let dest_mem = self.force_allocation(dest)?;
678                     self.write_immediate_to_mplace_no_validate(
679                         *src_val,
680                         src.layout,
681                         dest_mem.align,
682                         *dest_mem,
683                     )
684                 };
685             }
686             Left(mplace) => mplace,
687         };
688         // Slow path, this does not fit into an immediate. Just memcpy.
689         trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
690 
691         let dest = self.force_allocation(&dest)?;
692         let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
693             span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
694         };
695         if cfg!(debug_assertions) {
696             let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
697             assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
698         } else {
699             // As a cheap approximation, we compare the fixed parts of the size.
700             assert_eq!(src.layout.size, dest.layout.size);
701         }
702 
703         // Setting `nonoverlapping` here only has an effect when we don't hit the fast-path above,
704         // but that should at least match what LLVM does where `memcpy` is also only used when the
705         // type does not have Scalar/ScalarPair layout.
706         // (Or as the `Assign` docs put it, assignments "not producing primitives" must be
707         // non-overlapping.)
708         self.mem_copy(
709             src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ true,
710         )
711     }
712 
713     /// Ensures that a place is in memory, and returns where it is.
714     /// If the place currently refers to a local that doesn't yet have a matching allocation,
715     /// create such an allocation.
716     /// This is essentially `force_to_memplace`.
717     #[instrument(skip(self), level = "debug")]
force_allocation( &mut self, place: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>718     pub fn force_allocation(
719         &mut self,
720         place: &PlaceTy<'tcx, M::Provenance>,
721     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
722         let mplace = match place.place {
723             Place::Local { frame, local } => {
724                 match M::access_local_mut(self, frame, local)? {
725                     &mut Operand::Immediate(local_val) => {
726                         // We need to make an allocation.
727 
728                         // We need the layout of the local. We can NOT use the layout we got,
729                         // that might e.g., be an inner field of a struct with `Scalar` layout,
730                         // that has different alignment than the outer field.
731                         let local_layout =
732                             self.layout_of_local(&self.stack()[frame], local, None)?;
733                         if local_layout.is_unsized() {
734                             throw_unsup_format!("unsized locals are not supported");
735                         }
736                         let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
737                         if !matches!(local_val, Immediate::Uninit) {
738                             // Preserve old value. (As an optimization, we can skip this if it was uninit.)
739                             // We don't have to validate as we can assume the local
740                             // was already valid for its type.
741                             self.write_immediate_to_mplace_no_validate(
742                                 local_val,
743                                 local_layout,
744                                 local_layout.align.abi,
745                                 mplace,
746                             )?;
747                         }
748                         // Now we can call `access_mut` again, asserting it goes well,
749                         // and actually overwrite things.
750                         *M::access_local_mut(self, frame, local).unwrap() =
751                             Operand::Indirect(mplace);
752                         mplace
753                     }
754                     &mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
755                 }
756             }
757             Place::Ptr(mplace) => mplace,
758         };
759         // Return with the original layout, so that the caller can go on
760         Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
761     }
762 
allocate( &mut self, layout: TyAndLayout<'tcx>, kind: MemoryKind<M::MemoryKind>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>763     pub fn allocate(
764         &mut self,
765         layout: TyAndLayout<'tcx>,
766         kind: MemoryKind<M::MemoryKind>,
767     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
768         assert!(layout.is_sized());
769         let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
770         Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
771     }
772 
773     /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
allocate_str( &mut self, str: &str, kind: MemoryKind<M::MemoryKind>, mutbl: Mutability, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>774     pub fn allocate_str(
775         &mut self,
776         str: &str,
777         kind: MemoryKind<M::MemoryKind>,
778         mutbl: Mutability,
779     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
780         let ptr = self.allocate_bytes_ptr(str.as_bytes(), Align::ONE, kind, mutbl)?;
781         let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
782         let mplace = MemPlace { ptr: ptr.into(), meta: MemPlaceMeta::Meta(meta) };
783 
784         let ty = Ty::new_ref(
785             self.tcx.tcx,
786             self.tcx.lifetimes.re_static,
787             ty::TypeAndMut { ty: self.tcx.types.str_, mutbl },
788         );
789         let layout = self.layout_of(ty).unwrap();
790         Ok(MPlaceTy { mplace, layout, align: layout.align.abi })
791     }
792 
793     /// Writes the aggregate to the destination.
794     #[instrument(skip(self), level = "trace")]
write_aggregate( &mut self, kind: &mir::AggregateKind<'tcx>, operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>, dest: &PlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx>795     pub fn write_aggregate(
796         &mut self,
797         kind: &mir::AggregateKind<'tcx>,
798         operands: &IndexSlice<FieldIdx, mir::Operand<'tcx>>,
799         dest: &PlaceTy<'tcx, M::Provenance>,
800     ) -> InterpResult<'tcx> {
801         self.write_uninit(&dest)?;
802         let (variant_index, variant_dest, active_field_index) = match *kind {
803             mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
804                 let variant_dest = self.place_downcast(&dest, variant_index)?;
805                 (variant_index, variant_dest, active_field_index)
806             }
807             _ => (FIRST_VARIANT, dest.clone(), None),
808         };
809         if active_field_index.is_some() {
810             assert_eq!(operands.len(), 1);
811         }
812         for (field_index, operand) in operands.iter_enumerated() {
813             let field_index = active_field_index.unwrap_or(field_index);
814             let field_dest = self.place_field(&variant_dest, field_index.as_usize())?;
815             let op = self.eval_operand(operand, Some(field_dest.layout))?;
816             self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
817         }
818         self.write_discriminant(variant_index, &dest)
819     }
820 
raw_const_to_mplace( &self, raw: ConstAlloc<'tcx>, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>>821     pub fn raw_const_to_mplace(
822         &self,
823         raw: ConstAlloc<'tcx>,
824     ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> {
825         // This must be an allocation in `tcx`
826         let _ = self.tcx.global_alloc(raw.alloc_id);
827         let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
828         let layout = self.layout_of(raw.ty)?;
829         Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
830     }
831 
832     /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
833     /// Aso returns the vtable.
unpack_dyn_trait( &self, mplace: &MPlaceTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)>834     pub(super) fn unpack_dyn_trait(
835         &self,
836         mplace: &MPlaceTy<'tcx, M::Provenance>,
837     ) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
838         assert!(
839             matches!(mplace.layout.ty.kind(), ty::Dynamic(_, _, ty::Dyn)),
840             "`unpack_dyn_trait` only makes sense on `dyn*` types"
841         );
842         let vtable = mplace.meta.unwrap_meta().to_pointer(self)?;
843         let (ty, _) = self.get_ptr_vtable(vtable)?;
844         let layout = self.layout_of(ty)?;
845 
846         let mplace = MPlaceTy {
847             mplace: MemPlace { meta: MemPlaceMeta::None, ..**mplace },
848             layout,
849             align: layout.align.abi,
850         };
851         Ok((mplace, vtable))
852     }
853 
854     /// Turn an operand with a `dyn* Trait` type into an operand with the actual dynamic type.
855     /// Aso returns the vtable.
unpack_dyn_star( &self, op: &OpTy<'tcx, M::Provenance>, ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)>856     pub(super) fn unpack_dyn_star(
857         &self,
858         op: &OpTy<'tcx, M::Provenance>,
859     ) -> InterpResult<'tcx, (OpTy<'tcx, M::Provenance>, Pointer<Option<M::Provenance>>)> {
860         assert!(
861             matches!(op.layout.ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
862             "`unpack_dyn_star` only makes sense on `dyn*` types"
863         );
864         let data = self.operand_field(&op, 0)?;
865         let vtable = self.operand_field(&op, 1)?;
866         let vtable = self.read_pointer(&vtable)?;
867         let (ty, _) = self.get_ptr_vtable(vtable)?;
868         let layout = self.layout_of(ty)?;
869         let data = data.transmute(layout);
870         Ok((data, vtable))
871     }
872 }
873 
874 // Some nodes are used a lot. Make sure they don't unintentionally get bigger.
875 #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
876 mod size_asserts {
877     use super::*;
878     use rustc_data_structures::static_assert_size;
879     // tidy-alphabetical-start
880     static_assert_size!(MemPlace, 40);
881     static_assert_size!(MemPlaceMeta, 24);
882     static_assert_size!(MPlaceTy<'_>, 64);
883     static_assert_size!(Place, 40);
884     static_assert_size!(PlaceTy<'_>, 64);
885     // tidy-alphabetical-end
886 }
887