• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! The virtual memory representation of the MIR interpreter.
2 
3 mod init_mask;
4 mod provenance_map;
5 
6 use std::borrow::Cow;
7 use std::fmt;
8 use std::hash;
9 use std::hash::Hash;
10 use std::ops::{Deref, DerefMut, Range};
11 use std::ptr;
12 
13 use either::{Left, Right};
14 
15 use rustc_ast::Mutability;
16 use rustc_data_structures::intern::Interned;
17 use rustc_span::DUMMY_SP;
18 use rustc_target::abi::{Align, HasDataLayout, Size};
19 
20 use super::{
21     read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
22     ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
23     UnsupportedOpInfo,
24 };
25 use crate::ty;
26 use init_mask::*;
27 use provenance_map::*;
28 
29 pub use init_mask::{InitChunk, InitChunkIter};
30 
31 /// Functionality required for the bytes of an `Allocation`.
32 pub trait AllocBytes:
33     Clone + fmt::Debug + Eq + PartialEq + Hash + Deref<Target = [u8]> + DerefMut<Target = [u8]>
34 {
35     /// Adjust the bytes to the specified alignment -- by default, this is a no-op.
adjust_to_align(self, _align: Align) -> Self36     fn adjust_to_align(self, _align: Align) -> Self;
37 
38     /// Create an `AllocBytes` from a slice of `u8`.
from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self39     fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self;
40 
41     /// Create a zeroed `AllocBytes` of the specified size and alignment;
42     /// call the callback error handler if there is an error in allocating the memory.
zeroed(size: Size, _align: Align) -> Option<Self>43     fn zeroed(size: Size, _align: Align) -> Option<Self>;
44 }
45 
46 // Default `bytes` for `Allocation` is a `Box<[u8]>`.
47 impl AllocBytes for Box<[u8]> {
adjust_to_align(self, _align: Align) -> Self48     fn adjust_to_align(self, _align: Align) -> Self {
49         self
50     }
51 
from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self52     fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, _align: Align) -> Self {
53         Box::<[u8]>::from(slice.into())
54     }
55 
zeroed(size: Size, _align: Align) -> Option<Self>56     fn zeroed(size: Size, _align: Align) -> Option<Self> {
57         let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).ok()?;
58         // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
59         let bytes = unsafe { bytes.assume_init() };
60         Some(bytes)
61     }
62 }
63 
64 /// This type represents an Allocation in the Miri/CTFE core engine.
65 ///
66 /// Its public API is rather low-level, working directly with allocation offsets and a custom error
67 /// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
68 /// module provides higher-level access.
69 // Note: for performance reasons when interning, some of the `Allocation` fields can be partially
70 // hashed. (see the `Hash` impl below for more details), so the impl is not derived.
71 #[derive(Clone, Eq, PartialEq, TyEncodable, TyDecodable)]
72 #[derive(HashStable)]
73 pub struct Allocation<Prov: Provenance = AllocId, Extra = (), Bytes = Box<[u8]>> {
74     /// The actual bytes of the allocation.
75     /// Note that the bytes of a pointer represent the offset of the pointer.
76     bytes: Bytes,
77     /// Maps from byte addresses to extra provenance data for each pointer.
78     /// Only the first byte of a pointer is inserted into the map; i.e.,
79     /// every entry in this map applies to `pointer_size` consecutive bytes starting
80     /// at the given offset.
81     provenance: ProvenanceMap<Prov>,
82     /// Denotes which part of this allocation is initialized.
83     init_mask: InitMask,
84     /// The alignment of the allocation to detect unaligned reads.
85     /// (`Align` guarantees that this is a power of two.)
86     pub align: Align,
87     /// `true` if the allocation is mutable.
88     /// Also used by codegen to determine if a static should be put into mutable memory,
89     /// which happens for `static mut` and `static` with interior mutability.
90     pub mutability: Mutability,
91     /// Extra state for the machine.
92     pub extra: Extra,
93 }
94 
95 /// This is the maximum size we will hash at a time, when interning an `Allocation` and its
96 /// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
97 /// Used when these two structures are large: we only partially hash the larger fields in that
98 /// situation. See the comment at the top of their respective `Hash` impl for more details.
99 const MAX_BYTES_TO_HASH: usize = 64;
100 
101 /// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
102 /// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
103 /// bytes.
104 const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
105 
106 // Const allocations are only hashed for interning. However, they can be large, making the hashing
107 // expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
108 // big buffers like the actual bytes of allocation. We can partially hash some fields when they're
109 // large.
110 impl hash::Hash for Allocation {
hash<H: hash::Hasher>(&self, state: &mut H)111     fn hash<H: hash::Hasher>(&self, state: &mut H) {
112         let Self {
113             bytes,
114             provenance,
115             init_mask,
116             align,
117             mutability,
118             extra: (), // don't bother hashing ()
119         } = self;
120 
121         // Partially hash the `bytes` buffer when it is large. To limit collisions with common
122         // prefixes and suffixes, we hash the length and some slices of the buffer.
123         let byte_count = bytes.len();
124         if byte_count > MAX_HASHED_BUFFER_LEN {
125             // Hash the buffer's length.
126             byte_count.hash(state);
127 
128             // And its head and tail.
129             bytes[..MAX_BYTES_TO_HASH].hash(state);
130             bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
131         } else {
132             bytes.hash(state);
133         }
134 
135         // Hash the other fields as usual.
136         provenance.hash(state);
137         init_mask.hash(state);
138         align.hash(state);
139         mutability.hash(state);
140     }
141 }
142 
143 /// Interned types generally have an `Outer` type and an `Inner` type, where
144 /// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
145 /// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
146 /// outer type and `TyKind` is its inner type.
147 ///
148 /// Here things are different because only const allocations are interned. This
149 /// means that both the inner type (`Allocation`) and the outer type
150 /// (`ConstAllocation`) are used quite a bit.
151 #[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable)]
152 #[rustc_pass_by_value]
153 pub struct ConstAllocation<'tcx>(pub Interned<'tcx, Allocation>);
154 
155 impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result156     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
157         // The debug representation of this is very verbose and basically useless,
158         // so don't print it.
159         write!(f, "ConstAllocation {{ .. }}")
160     }
161 }
162 
163 impl<'tcx> ConstAllocation<'tcx> {
inner(self) -> &'tcx Allocation164     pub fn inner(self) -> &'tcx Allocation {
165         self.0.0
166     }
167 }
168 
169 /// We have our own error type that does not know about the `AllocId`; that information
170 /// is added when converting to `InterpError`.
171 #[derive(Debug)]
172 pub enum AllocError {
173     /// A scalar had the wrong size.
174     ScalarSizeMismatch(ScalarSizeMismatch),
175     /// Encountered a pointer where we needed raw bytes.
176     ReadPointerAsBytes,
177     /// Partially overwriting a pointer.
178     PartialPointerOverwrite(Size),
179     /// Partially copying a pointer.
180     PartialPointerCopy(Size),
181     /// Using uninitialized data where it is not allowed.
182     InvalidUninitBytes(Option<UninitBytesAccess>),
183 }
184 pub type AllocResult<T = ()> = Result<T, AllocError>;
185 
186 impl From<ScalarSizeMismatch> for AllocError {
from(s: ScalarSizeMismatch) -> Self187     fn from(s: ScalarSizeMismatch) -> Self {
188         AllocError::ScalarSizeMismatch(s)
189     }
190 }
191 
192 impl AllocError {
to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx>193     pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
194         use AllocError::*;
195         match self {
196             ScalarSizeMismatch(s) => {
197                 InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
198             }
199             ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
200             PartialPointerOverwrite(offset) => InterpError::Unsupported(
201                 UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
202             ),
203             PartialPointerCopy(offset) => InterpError::Unsupported(
204                 UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)),
205             ),
206             InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
207                 UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
208             ),
209         }
210     }
211 }
212 
213 /// The information that makes up a memory access: offset and size.
214 #[derive(Copy, Clone)]
215 pub struct AllocRange {
216     pub start: Size,
217     pub size: Size,
218 }
219 
220 impl fmt::Debug for AllocRange {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result221     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
222         write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
223     }
224 }
225 
226 /// Free-starting constructor for less syntactic overhead.
227 #[inline(always)]
alloc_range(start: Size, size: Size) -> AllocRange228 pub fn alloc_range(start: Size, size: Size) -> AllocRange {
229     AllocRange { start, size }
230 }
231 
232 impl From<Range<Size>> for AllocRange {
233     #[inline]
from(r: Range<Size>) -> Self234     fn from(r: Range<Size>) -> Self {
235         alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
236     }
237 }
238 
239 impl From<Range<usize>> for AllocRange {
240     #[inline]
from(r: Range<usize>) -> Self241     fn from(r: Range<usize>) -> Self {
242         AllocRange::from(Size::from_bytes(r.start)..Size::from_bytes(r.end))
243     }
244 }
245 
246 impl AllocRange {
247     #[inline(always)]
end(self) -> Size248     pub fn end(self) -> Size {
249         self.start + self.size // This does overflow checking.
250     }
251 
252     /// Returns the `subrange` within this range; panics if it is not a subrange.
253     #[inline]
subrange(self, subrange: AllocRange) -> AllocRange254     pub fn subrange(self, subrange: AllocRange) -> AllocRange {
255         let sub_start = self.start + subrange.start;
256         let range = alloc_range(sub_start, subrange.size);
257         assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
258         range
259     }
260 }
261 
262 // The constructors are all without extra; the extra gets added by a machine hook later.
263 impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
264     /// Creates an allocation from an existing `Bytes` value - this is needed for miri FFI support
from_raw_bytes(bytes: Bytes, align: Align, mutability: Mutability) -> Self265     pub fn from_raw_bytes(bytes: Bytes, align: Align, mutability: Mutability) -> Self {
266         let size = Size::from_bytes(bytes.len());
267         Self {
268             bytes,
269             provenance: ProvenanceMap::new(),
270             init_mask: InitMask::new(size, true),
271             align,
272             mutability,
273             extra: (),
274         }
275     }
276 
277     /// Creates an allocation initialized by the given bytes
from_bytes<'a>( slice: impl Into<Cow<'a, [u8]>>, align: Align, mutability: Mutability, ) -> Self278     pub fn from_bytes<'a>(
279         slice: impl Into<Cow<'a, [u8]>>,
280         align: Align,
281         mutability: Mutability,
282     ) -> Self {
283         let bytes = Bytes::from_bytes(slice, align);
284         let size = Size::from_bytes(bytes.len());
285         Self {
286             bytes,
287             provenance: ProvenanceMap::new(),
288             init_mask: InitMask::new(size, true),
289             align,
290             mutability,
291             extra: (),
292         }
293     }
294 
from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self295     pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
296         Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
297     }
298 
uninit_inner<R>(size: Size, align: Align, fail: impl FnOnce() -> R) -> Result<Self, R>299     fn uninit_inner<R>(size: Size, align: Align, fail: impl FnOnce() -> R) -> Result<Self, R> {
300         // This results in an error that can happen non-deterministically, since the memory
301         // available to the compiler can change between runs. Normally queries are always
302         // deterministic. However, we can be non-deterministic here because all uses of const
303         // evaluation (including ConstProp!) will make compilation fail (via hard error
304         // or ICE) upon encountering a `MemoryExhausted` error.
305         let bytes = Bytes::zeroed(size, align).ok_or_else(fail)?;
306 
307         Ok(Allocation {
308             bytes,
309             provenance: ProvenanceMap::new(),
310             init_mask: InitMask::new(size, false),
311             align,
312             mutability: Mutability::Mut,
313             extra: (),
314         })
315     }
316 
317     /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
318     /// available to the compiler to do so.
try_uninit<'tcx>(size: Size, align: Align) -> InterpResult<'tcx, Self>319     pub fn try_uninit<'tcx>(size: Size, align: Align) -> InterpResult<'tcx, Self> {
320         Self::uninit_inner(size, align, || {
321             ty::tls::with(|tcx| {
322                 tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpretation")
323             });
324             InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted).into()
325         })
326     }
327 
328     /// Try to create an Allocation of `size` bytes, panics if there is not enough memory
329     /// available to the compiler to do so.
uninit(size: Size, align: Align) -> Self330     pub fn uninit(size: Size, align: Align) -> Self {
331         match Self::uninit_inner(size, align, || {
332             panic!("Allocation::uninit called with panic_on_fail had allocation failure");
333         }) {
334             Ok(x) => x,
335             Err(x) => x,
336         }
337     }
338 }
339 
340 impl<Bytes: AllocBytes> Allocation<AllocId, (), Bytes> {
341     /// Adjust allocation from the ones in tcx to a custom Machine instance
342     /// with a different Provenance and Extra type.
adjust_from_tcx<Prov: Provenance, Extra, Err>( self, cx: &impl HasDataLayout, extra: Extra, mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>, ) -> Result<Allocation<Prov, Extra, Bytes>, Err>343     pub fn adjust_from_tcx<Prov: Provenance, Extra, Err>(
344         self,
345         cx: &impl HasDataLayout,
346         extra: Extra,
347         mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
348     ) -> Result<Allocation<Prov, Extra, Bytes>, Err> {
349         // Compute new pointer provenance, which also adjusts the bytes, and realign the pointer if
350         // necessary.
351         let mut bytes = self.bytes.adjust_to_align(self.align);
352 
353         let mut new_provenance = Vec::with_capacity(self.provenance.ptrs().len());
354         let ptr_size = cx.data_layout().pointer_size.bytes_usize();
355         let endian = cx.data_layout().endian;
356         for &(offset, alloc_id) in self.provenance.ptrs().iter() {
357             let idx = offset.bytes_usize();
358             let ptr_bytes = &mut bytes[idx..idx + ptr_size];
359             let bits = read_target_uint(endian, ptr_bytes).unwrap();
360             let (ptr_prov, ptr_offset) =
361                 adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
362             write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
363             new_provenance.push((offset, ptr_prov));
364         }
365         // Create allocation.
366         Ok(Allocation {
367             bytes,
368             provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
369             init_mask: self.init_mask,
370             align: self.align,
371             mutability: self.mutability,
372             extra,
373         })
374     }
375 }
376 
377 /// Raw accessors. Provide access to otherwise private bytes.
378 impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
len(&self) -> usize379     pub fn len(&self) -> usize {
380         self.bytes.len()
381     }
382 
size(&self) -> Size383     pub fn size(&self) -> Size {
384         Size::from_bytes(self.len())
385     }
386 
387     /// Looks at a slice which may contain uninitialized bytes or provenance. This differs
388     /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the
389     /// edges) at all.
390     /// This must not be used for reads affecting the interpreter execution.
inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8]391     pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
392         &self.bytes[range]
393     }
394 
395     /// Returns the mask indicating which bytes are initialized.
init_mask(&self) -> &InitMask396     pub fn init_mask(&self) -> &InitMask {
397         &self.init_mask
398     }
399 
400     /// Returns the provenance map.
provenance(&self) -> &ProvenanceMap<Prov>401     pub fn provenance(&self) -> &ProvenanceMap<Prov> {
402         &self.provenance
403     }
404 }
405 
406 /// Byte accessors.
407 impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
base_addr(&self) -> *const u8408     pub fn base_addr(&self) -> *const u8 {
409         self.bytes.as_ptr()
410     }
411 
412     /// This is the entirely abstraction-violating way to just grab the raw bytes without
413     /// caring about provenance or initialization.
414     ///
415     /// This function also guarantees that the resulting pointer will remain stable
416     /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
417     /// on that.
418     #[inline]
get_bytes_unchecked(&self, range: AllocRange) -> &[u8]419     pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] {
420         &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
421     }
422 
423     /// Checks that these bytes are initialized, and then strip provenance (if possible) and return
424     /// them.
425     ///
426     /// It is the caller's responsibility to check bounds and alignment beforehand.
427     /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
428     /// on `InterpCx` instead.
429     #[inline]
get_bytes_strip_provenance( &self, cx: &impl HasDataLayout, range: AllocRange, ) -> AllocResult<&[u8]>430     pub fn get_bytes_strip_provenance(
431         &self,
432         cx: &impl HasDataLayout,
433         range: AllocRange,
434     ) -> AllocResult<&[u8]> {
435         self.init_mask.is_range_initialized(range).map_err(|uninit_range| {
436             AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
437                 access: range,
438                 uninit: uninit_range,
439             }))
440         })?;
441         if !Prov::OFFSET_IS_ADDR {
442             if !self.provenance.range_empty(range, cx) {
443                 return Err(AllocError::ReadPointerAsBytes);
444             }
445         }
446         Ok(self.get_bytes_unchecked(range))
447     }
448 
449     /// Just calling this already marks everything as defined and removes provenance,
450     /// so be sure to actually put data there!
451     ///
452     /// It is the caller's responsibility to check bounds and alignment beforehand.
453     /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
454     /// on `InterpCx` instead.
get_bytes_mut( &mut self, cx: &impl HasDataLayout, range: AllocRange, ) -> AllocResult<&mut [u8]>455     pub fn get_bytes_mut(
456         &mut self,
457         cx: &impl HasDataLayout,
458         range: AllocRange,
459     ) -> AllocResult<&mut [u8]> {
460         self.mark_init(range, true);
461         self.provenance.clear(range, cx)?;
462 
463         Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
464     }
465 
466     /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
get_bytes_mut_ptr( &mut self, cx: &impl HasDataLayout, range: AllocRange, ) -> AllocResult<*mut [u8]>467     pub fn get_bytes_mut_ptr(
468         &mut self,
469         cx: &impl HasDataLayout,
470         range: AllocRange,
471     ) -> AllocResult<*mut [u8]> {
472         self.mark_init(range, true);
473         self.provenance.clear(range, cx)?;
474 
475         assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
476         let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
477         let len = range.end().bytes_usize() - range.start.bytes_usize();
478         Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
479     }
480 }
481 
482 /// Reading and writing.
483 impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> {
484     /// Sets the init bit for the given range.
mark_init(&mut self, range: AllocRange, is_init: bool)485     fn mark_init(&mut self, range: AllocRange, is_init: bool) {
486         if range.size.bytes() == 0 {
487             return;
488         }
489         assert!(self.mutability == Mutability::Mut);
490         self.init_mask.set_range(range, is_init);
491     }
492 
493     /// Reads a *non-ZST* scalar.
494     ///
495     /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
496     /// supports that) provenance is entirely ignored.
497     ///
498     /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
499     /// for ZSTness anyway due to integer pointers being valid for ZSTs.
500     ///
501     /// It is the caller's responsibility to check bounds and alignment beforehand.
502     /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
read_scalar( &self, cx: &impl HasDataLayout, range: AllocRange, read_provenance: bool, ) -> AllocResult<Scalar<Prov>>503     pub fn read_scalar(
504         &self,
505         cx: &impl HasDataLayout,
506         range: AllocRange,
507         read_provenance: bool,
508     ) -> AllocResult<Scalar<Prov>> {
509         // First and foremost, if anything is uninit, bail.
510         if self.init_mask.is_range_initialized(range).is_err() {
511             return Err(AllocError::InvalidUninitBytes(None));
512         }
513 
514         // Get the integer part of the result. We HAVE TO check provenance before returning this!
515         let bytes = self.get_bytes_unchecked(range);
516         let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
517 
518         if read_provenance {
519             assert_eq!(range.size, cx.data_layout().pointer_size);
520 
521             // When reading data with provenance, the easy case is finding provenance exactly where we
522             // are reading, then we can put data and provenance back together and return that.
523             if let Some(prov) = self.provenance.get_ptr(range.start) {
524                 // Now we can return the bits, with their appropriate provenance.
525                 let ptr = Pointer::new(prov, Size::from_bytes(bits));
526                 return Ok(Scalar::from_pointer(ptr, cx));
527             }
528 
529             // If we can work on pointers byte-wise, join the byte-wise provenances.
530             if Prov::OFFSET_IS_ADDR {
531                 let mut prov = self.provenance.get(range.start, cx);
532                 for offset in Size::from_bytes(1)..range.size {
533                     let this_prov = self.provenance.get(range.start + offset, cx);
534                     prov = Prov::join(prov, this_prov);
535                 }
536                 // Now use this provenance.
537                 let ptr = Pointer::new(prov, Size::from_bytes(bits));
538                 return Ok(Scalar::from_maybe_pointer(ptr, cx));
539             }
540         } else {
541             // We are *not* reading a pointer.
542             // If we can just ignore provenance, do exactly that.
543             if Prov::OFFSET_IS_ADDR {
544                 // We just strip provenance.
545                 return Ok(Scalar::from_uint(bits, range.size));
546             }
547         }
548 
549         // Fallback path for when we cannot treat provenance bytewise or ignore it.
550         assert!(!Prov::OFFSET_IS_ADDR);
551         if !self.provenance.range_empty(range, cx) {
552             return Err(AllocError::ReadPointerAsBytes);
553         }
554         // There is no provenance, we can just return the bits.
555         Ok(Scalar::from_uint(bits, range.size))
556     }
557 
558     /// Writes a *non-ZST* scalar.
559     ///
560     /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
561     /// for ZSTness anyway due to integer pointers being valid for ZSTs.
562     ///
563     /// It is the caller's responsibility to check bounds and alignment beforehand.
564     /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
write_scalar( &mut self, cx: &impl HasDataLayout, range: AllocRange, val: Scalar<Prov>, ) -> AllocResult565     pub fn write_scalar(
566         &mut self,
567         cx: &impl HasDataLayout,
568         range: AllocRange,
569         val: Scalar<Prov>,
570     ) -> AllocResult {
571         assert!(self.mutability == Mutability::Mut);
572 
573         // `to_bits_or_ptr_internal` is the right method because we just want to store this data
574         // as-is into memory.
575         let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
576             Right(ptr) => {
577                 let (provenance, offset) = ptr.into_parts();
578                 (u128::from(offset.bytes()), Some(provenance))
579             }
580             Left(data) => (data, None),
581         };
582 
583         let endian = cx.data_layout().endian;
584         let dst = self.get_bytes_mut(cx, range)?;
585         write_target_uint(endian, dst, bytes).unwrap();
586 
587         // See if we have to also store some provenance.
588         if let Some(provenance) = provenance {
589             assert_eq!(range.size, cx.data_layout().pointer_size);
590             self.provenance.insert_ptr(range.start, provenance, cx);
591         }
592 
593         Ok(())
594     }
595 
596     /// Write "uninit" to the given memory range.
write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult597     pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
598         self.mark_init(range, false);
599         self.provenance.clear(range, cx)?;
600         return Ok(());
601     }
602 
603     /// Applies a previously prepared provenance copy.
604     /// The affected range, as defined in the parameters to `provenance().prepare_copy` is expected
605     /// to be clear of provenance.
606     ///
607     /// This is dangerous to use as it can violate internal `Allocation` invariants!
608     /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>)609     pub fn provenance_apply_copy(&mut self, copy: ProvenanceCopy<Prov>) {
610         self.provenance.apply_copy(copy)
611     }
612 
613     /// Applies a previously prepared copy of the init mask.
614     ///
615     /// This is dangerous to use as it can violate internal `Allocation` invariants!
616     /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64)617     pub fn init_mask_apply_copy(&mut self, copy: InitCopy, range: AllocRange, repeat: u64) {
618         self.init_mask.apply_copy(copy, range, repeat)
619     }
620 }
621