• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use rustc_hir::def::DefKind;
2 use rustc_hir::{LangItem, CRATE_HIR_ID};
3 use rustc_middle::mir;
4 use rustc_middle::mir::interpret::PointerArithmetic;
5 use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
6 use rustc_middle::ty::{self, Ty, TyCtxt};
7 use rustc_session::lint::builtin::INVALID_ALIGNMENT;
8 use std::borrow::Borrow;
9 use std::hash::Hash;
10 use std::ops::ControlFlow;
11 
12 use rustc_data_structures::fx::FxIndexMap;
13 use rustc_data_structures::fx::IndexEntry;
14 use std::fmt;
15 
16 use rustc_ast::Mutability;
17 use rustc_hir::def_id::DefId;
18 use rustc_middle::mir::AssertMessage;
19 use rustc_span::symbol::{sym, Symbol};
20 use rustc_target::abi::{Align, Size};
21 use rustc_target::spec::abi::Abi as CallAbi;
22 
23 use crate::errors::{LongRunning, LongRunningWarn};
24 use crate::interpret::{
25     self, compile_time_machine, AllocId, ConstAllocation, FnVal, Frame, ImmTy, InterpCx,
26     InterpResult, OpTy, PlaceTy, Pointer, Scalar,
27 };
28 use crate::{errors, fluent_generated as fluent};
29 
30 use super::error::*;
31 
32 /// When hitting this many interpreted terminators we emit a deny by default lint
33 /// that notfies the user that their constant takes a long time to evaluate. If that's
34 /// what they intended, they can just allow the lint.
35 const LINT_TERMINATOR_LIMIT: usize = 2_000_000;
36 /// The limit used by `-Z tiny-const-eval-limit`. This smaller limit is useful for internal
37 /// tests not needing to run 30s or more to show some behaviour.
38 const TINY_LINT_TERMINATOR_LIMIT: usize = 20;
39 /// After this many interpreted terminators, we start emitting progress indicators at every
40 /// power of two of interpreted terminators.
41 const PROGRESS_INDICATOR_START: usize = 4_000_000;
42 
43 /// Extra machine state for CTFE, and the Machine instance
44 pub struct CompileTimeInterpreter<'mir, 'tcx> {
45     /// The number of terminators that have been evaluated.
46     ///
47     /// This is used to produce lints informing the user that the compiler is not stuck.
48     /// Set to `usize::MAX` to never report anything.
49     pub(super) num_evaluated_steps: usize,
50 
51     /// The virtual call stack.
52     pub(super) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
53 
54     /// We need to make sure consts never point to anything mutable, even recursively. That is
55     /// relied on for pattern matching on consts with references.
56     /// To achieve this, two pieces have to work together:
57     /// * Interning makes everything outside of statics immutable.
58     /// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
59     /// This boolean here controls the second part.
60     pub(super) can_access_statics: CanAccessStatics,
61 
62     /// Whether to check alignment during evaluation.
63     pub(super) check_alignment: CheckAlignment,
64 }
65 
66 #[derive(Copy, Clone)]
67 pub enum CheckAlignment {
68     /// Ignore alignment when following relocations.
69     /// This is mainly used in interning.
70     No,
71     /// Hard error when dereferencing a misaligned pointer.
72     Error,
73     /// Emit a future incompat lint when dereferencing a misaligned pointer.
74     FutureIncompat,
75 }
76 
77 impl CheckAlignment {
should_check(&self) -> bool78     pub fn should_check(&self) -> bool {
79         match self {
80             CheckAlignment::No => false,
81             CheckAlignment::Error | CheckAlignment::FutureIncompat => true,
82         }
83     }
84 }
85 
86 #[derive(Copy, Clone, PartialEq)]
87 pub(crate) enum CanAccessStatics {
88     No,
89     Yes,
90 }
91 
92 impl From<bool> for CanAccessStatics {
from(value: bool) -> Self93     fn from(value: bool) -> Self {
94         if value { Self::Yes } else { Self::No }
95     }
96 }
97 
98 impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
new( can_access_statics: CanAccessStatics, check_alignment: CheckAlignment, ) -> Self99     pub(crate) fn new(
100         can_access_statics: CanAccessStatics,
101         check_alignment: CheckAlignment,
102     ) -> Self {
103         CompileTimeInterpreter {
104             num_evaluated_steps: 0,
105             stack: Vec::new(),
106             can_access_statics,
107             check_alignment,
108         }
109     }
110 }
111 
112 impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxIndexMap<K, V> {
113     #[inline(always)]
contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool where K: Borrow<Q>,114     fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
115     where
116         K: Borrow<Q>,
117     {
118         FxIndexMap::contains_key(self, k)
119     }
120 
121     #[inline(always)]
insert(&mut self, k: K, v: V) -> Option<V>122     fn insert(&mut self, k: K, v: V) -> Option<V> {
123         FxIndexMap::insert(self, k, v)
124     }
125 
126     #[inline(always)]
remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V> where K: Borrow<Q>,127     fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
128     where
129         K: Borrow<Q>,
130     {
131         FxIndexMap::remove(self, k)
132     }
133 
134     #[inline(always)]
filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>135     fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
136         self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
137     }
138 
139     #[inline(always)]
get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>140     fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
141         match self.get(&k) {
142             Some(v) => Ok(v),
143             None => {
144                 vacant()?;
145                 bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
146             }
147         }
148     }
149 
150     #[inline(always)]
get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>151     fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
152         match self.entry(k) {
153             IndexEntry::Occupied(e) => Ok(e.into_mut()),
154             IndexEntry::Vacant(e) => {
155                 let v = vacant()?;
156                 Ok(e.insert(v))
157             }
158         }
159     }
160 }
161 
162 pub(crate) type CompileTimeEvalContext<'mir, 'tcx> =
163     InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
164 
165 #[derive(Debug, PartialEq, Eq, Copy, Clone)]
166 pub enum MemoryKind {
167     Heap,
168 }
169 
170 impl fmt::Display for MemoryKind {
fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result171     fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
172         match self {
173             MemoryKind::Heap => write!(f, "heap allocation"),
174         }
175     }
176 }
177 
178 impl interpret::MayLeak for MemoryKind {
179     #[inline(always)]
may_leak(self) -> bool180     fn may_leak(self) -> bool {
181         match self {
182             MemoryKind::Heap => false,
183         }
184     }
185 }
186 
187 impl interpret::MayLeak for ! {
188     #[inline(always)]
may_leak(self) -> bool189     fn may_leak(self) -> bool {
190         // `self` is uninhabited
191         self
192     }
193 }
194 
195 impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
196     /// "Intercept" a function call, because we have something special to do for it.
197     /// All `#[rustc_do_not_const_check]` functions should be hooked here.
198     /// If this returns `Some` function, which may be `instance` or a different function with
199     /// compatible arguments, then evaluation should continue with that function.
200     /// If this returns `None`, the function call has been handled and the function has returned.
hook_special_const_fn( &mut self, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: &PlaceTy<'tcx>, ret: Option<mir::BasicBlock>, ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>>201     fn hook_special_const_fn(
202         &mut self,
203         instance: ty::Instance<'tcx>,
204         args: &[OpTy<'tcx>],
205         dest: &PlaceTy<'tcx>,
206         ret: Option<mir::BasicBlock>,
207     ) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
208         let def_id = instance.def_id();
209 
210         if Some(def_id) == self.tcx.lang_items().panic_display()
211             || Some(def_id) == self.tcx.lang_items().begin_panic_fn()
212         {
213             // &str or &&str
214             assert!(args.len() == 1);
215 
216             let mut msg_place = self.deref_operand(&args[0])?;
217             while msg_place.layout.ty.is_ref() {
218                 msg_place = self.deref_operand(&msg_place.into())?;
219             }
220 
221             let msg = Symbol::intern(self.read_str(&msg_place)?);
222             let span = self.find_closest_untracked_caller_location();
223             let (file, line, col) = self.location_triple_for_span(span);
224             return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
225         } else if Some(def_id) == self.tcx.lang_items().panic_fmt() {
226             // For panic_fmt, call const_panic_fmt instead.
227             let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
228             let new_instance = ty::Instance::resolve(
229                 *self.tcx,
230                 ty::ParamEnv::reveal_all(),
231                 const_def_id,
232                 instance.substs,
233             )
234             .unwrap()
235             .unwrap();
236 
237             return Ok(Some(new_instance));
238         } else if Some(def_id) == self.tcx.lang_items().align_offset_fn() {
239             // For align_offset, we replace the function call if the pointer has no address.
240             match self.align_offset(instance, args, dest, ret)? {
241                 ControlFlow::Continue(()) => return Ok(Some(instance)),
242                 ControlFlow::Break(()) => return Ok(None),
243             }
244         }
245         Ok(Some(instance))
246     }
247 
248     /// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
249     /// may not have an address.
250     ///
251     /// If `ptr` does have a known address, then we return `Continue(())` and the function call should
252     /// proceed as normal.
253     ///
254     /// If `ptr` doesn't have an address, but its underlying allocation's alignment is at most
255     /// `target_align`, then we call the function again with an dummy address relative to the
256     /// allocation.
257     ///
258     /// If `ptr` doesn't have an address and `target_align` is stricter than the underlying
259     /// allocation's alignment, then we return `usize::MAX` immediately.
align_offset( &mut self, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: &PlaceTy<'tcx>, ret: Option<mir::BasicBlock>, ) -> InterpResult<'tcx, ControlFlow<()>>260     fn align_offset(
261         &mut self,
262         instance: ty::Instance<'tcx>,
263         args: &[OpTy<'tcx>],
264         dest: &PlaceTy<'tcx>,
265         ret: Option<mir::BasicBlock>,
266     ) -> InterpResult<'tcx, ControlFlow<()>> {
267         assert_eq!(args.len(), 2);
268 
269         let ptr = self.read_pointer(&args[0])?;
270         let target_align = self.read_scalar(&args[1])?.to_target_usize(self)?;
271 
272         if !target_align.is_power_of_two() {
273             throw_ub_custom!(
274                 fluent::const_eval_align_offset_invalid_align,
275                 target_align = target_align,
276             );
277         }
278 
279         match self.ptr_try_get_alloc_id(ptr) {
280             Ok((alloc_id, offset, _extra)) => {
281                 let (_size, alloc_align, _kind) = self.get_alloc_info(alloc_id);
282 
283                 if target_align <= alloc_align.bytes() {
284                     // Extract the address relative to the allocation base that is definitely
285                     // sufficiently aligned and call `align_offset` again.
286                     let addr = ImmTy::from_uint(offset.bytes(), args[0].layout).into();
287                     let align = ImmTy::from_uint(target_align, args[1].layout).into();
288                     let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
289 
290                     // We replace the entire function call with a "tail call".
291                     // Note that this happens before the frame of the original function
292                     // is pushed on the stack.
293                     self.eval_fn_call(
294                         FnVal::Instance(instance),
295                         (CallAbi::Rust, fn_abi),
296                         &[addr, align],
297                         /* with_caller_location = */ false,
298                         dest,
299                         ret,
300                         mir::UnwindAction::Unreachable,
301                     )?;
302                     Ok(ControlFlow::Break(()))
303                 } else {
304                     // Not alignable in const, return `usize::MAX`.
305                     let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
306                     self.write_scalar(usize_max, dest)?;
307                     self.return_to_block(ret)?;
308                     Ok(ControlFlow::Break(()))
309                 }
310             }
311             Err(_addr) => {
312                 // The pointer has an address, continue with function call.
313                 Ok(ControlFlow::Continue(()))
314             }
315         }
316     }
317 
318     /// See documentation on the `ptr_guaranteed_cmp` intrinsic.
guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8>319     fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
320         Ok(match (a, b) {
321             // Comparisons between integers are always known.
322             (Scalar::Int { .. }, Scalar::Int { .. }) => {
323                 if a == b {
324                     1
325                 } else {
326                     0
327                 }
328             }
329             // Comparisons of abstract pointers with null pointers are known if the pointer
330             // is in bounds, because if they are in bounds, the pointer can't be null.
331             // Inequality with integers other than null can never be known for sure.
332             (Scalar::Int(int), ptr @ Scalar::Ptr(..))
333             | (ptr @ Scalar::Ptr(..), Scalar::Int(int))
334                 if int.is_null() && !self.scalar_may_be_null(ptr)? =>
335             {
336                 0
337             }
338             // Equality with integers can never be known for sure.
339             (Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => 2,
340             // FIXME: return a `1` for when both sides are the same pointer, *except* that
341             // some things (like functions and vtables) do not have stable addresses
342             // so we need to be careful around them (see e.g. #73722).
343             // FIXME: return `0` for at least some comparisons where we can reliably
344             // determine the result of runtime inequality tests at compile-time.
345             // Examples include comparison of addresses in different static items.
346             (Scalar::Ptr(..), Scalar::Ptr(..)) => 2,
347         })
348     }
349 }
350 
351 impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
352     compile_time_machine!(<'mir, 'tcx>);
353 
354     type MemoryKind = MemoryKind;
355 
356     const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
357 
358     #[inline(always)]
enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment359     fn enforce_alignment(ecx: &InterpCx<'mir, 'tcx, Self>) -> CheckAlignment {
360         ecx.machine.check_alignment
361     }
362 
363     #[inline(always)]
enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool364     fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>, layout: TyAndLayout<'tcx>) -> bool {
365         ecx.tcx.sess.opts.unstable_opts.extra_const_ub_checks || layout.abi.is_uninhabited()
366     }
367 
alignment_check_failed( ecx: &InterpCx<'mir, 'tcx, Self>, has: Align, required: Align, check: CheckAlignment, ) -> InterpResult<'tcx, ()>368     fn alignment_check_failed(
369         ecx: &InterpCx<'mir, 'tcx, Self>,
370         has: Align,
371         required: Align,
372         check: CheckAlignment,
373     ) -> InterpResult<'tcx, ()> {
374         let err = err_ub!(AlignmentCheckFailed { has, required }).into();
375         match check {
376             CheckAlignment::Error => Err(err),
377             CheckAlignment::No => span_bug!(
378                 ecx.cur_span(),
379                 "`alignment_check_failed` called when no alignment check requested"
380             ),
381             CheckAlignment::FutureIncompat => {
382                 let (_, backtrace) = err.into_parts();
383                 backtrace.print_backtrace();
384                 let (span, frames) = super::get_span_and_frames(&ecx);
385 
386                 ecx.tcx.emit_spanned_lint(
387                     INVALID_ALIGNMENT,
388                     ecx.stack().iter().find_map(|frame| frame.lint_root()).unwrap_or(CRATE_HIR_ID),
389                     span,
390                     errors::AlignmentCheckFailed {
391                         has: has.bytes(),
392                         required: required.bytes(),
393                         frames,
394                     },
395                 );
396                 Ok(())
397             }
398         }
399     }
400 
load_mir( ecx: &InterpCx<'mir, 'tcx, Self>, instance: ty::InstanceDef<'tcx>, ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>>401     fn load_mir(
402         ecx: &InterpCx<'mir, 'tcx, Self>,
403         instance: ty::InstanceDef<'tcx>,
404     ) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
405         match instance {
406             ty::InstanceDef::Item(def) => {
407                 if ecx.tcx.is_ctfe_mir_available(def) {
408                     Ok(ecx.tcx.mir_for_ctfe(def))
409                 } else if ecx.tcx.def_kind(def) == DefKind::AssocConst {
410                     let guar = ecx.tcx.sess.delay_span_bug(
411                         rustc_span::DUMMY_SP,
412                         "This is likely a const item that is missing from its impl",
413                     );
414                     throw_inval!(AlreadyReported(guar.into()));
415                 } else {
416                     // `find_mir_or_eval_fn` checks that this is a const fn before even calling us,
417                     // so this should be unreachable.
418                     let path = ecx.tcx.def_path_str(def);
419                     bug!("trying to call extern function `{path}` at compile-time");
420                 }
421             }
422             _ => Ok(ecx.tcx.instance_mir(instance)),
423         }
424     }
425 
find_mir_or_eval_fn( ecx: &mut InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, _abi: CallAbi, args: &[OpTy<'tcx>], dest: &PlaceTy<'tcx>, ret: Option<mir::BasicBlock>, _unwind: mir::UnwindAction, ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>>426     fn find_mir_or_eval_fn(
427         ecx: &mut InterpCx<'mir, 'tcx, Self>,
428         instance: ty::Instance<'tcx>,
429         _abi: CallAbi,
430         args: &[OpTy<'tcx>],
431         dest: &PlaceTy<'tcx>,
432         ret: Option<mir::BasicBlock>,
433         _unwind: mir::UnwindAction, // unwinding is not supported in consts
434     ) -> InterpResult<'tcx, Option<(&'mir mir::Body<'tcx>, ty::Instance<'tcx>)>> {
435         debug!("find_mir_or_eval_fn: {:?}", instance);
436 
437         // Only check non-glue functions
438         if let ty::InstanceDef::Item(def) = instance.def {
439             // Execution might have wandered off into other crates, so we cannot do a stability-
440             // sensitive check here. But we can at least rule out functions that are not const
441             // at all.
442             if !ecx.tcx.is_const_fn_raw(def) {
443                 // allow calling functions inside a trait marked with #[const_trait].
444                 if !ecx.tcx.is_const_default_method(def) {
445                     // We certainly do *not* want to actually call the fn
446                     // though, so be sure we return here.
447                     throw_unsup_format!("calling non-const function `{}`", instance)
448                 }
449             }
450 
451             let Some(new_instance) = ecx.hook_special_const_fn(instance, args, dest, ret)? else {
452                 return Ok(None);
453             };
454 
455             if new_instance != instance {
456                 // We call another const fn instead.
457                 // However, we return the *original* instance to make backtraces work out
458                 // (and we hope this does not confuse the FnAbi checks too much).
459                 return Ok(Self::find_mir_or_eval_fn(
460                     ecx,
461                     new_instance,
462                     _abi,
463                     args,
464                     dest,
465                     ret,
466                     _unwind,
467                 )?
468                 .map(|(body, _instance)| (body, instance)));
469             }
470         }
471 
472         // This is a const fn. Call it.
473         Ok(Some((ecx.load_mir(instance.def, None)?, instance)))
474     }
475 
call_intrinsic( ecx: &mut InterpCx<'mir, 'tcx, Self>, instance: ty::Instance<'tcx>, args: &[OpTy<'tcx>], dest: &PlaceTy<'tcx, Self::Provenance>, target: Option<mir::BasicBlock>, _unwind: mir::UnwindAction, ) -> InterpResult<'tcx>476     fn call_intrinsic(
477         ecx: &mut InterpCx<'mir, 'tcx, Self>,
478         instance: ty::Instance<'tcx>,
479         args: &[OpTy<'tcx>],
480         dest: &PlaceTy<'tcx, Self::Provenance>,
481         target: Option<mir::BasicBlock>,
482         _unwind: mir::UnwindAction,
483     ) -> InterpResult<'tcx> {
484         // Shared intrinsics.
485         if ecx.emulate_intrinsic(instance, args, dest, target)? {
486             return Ok(());
487         }
488         let intrinsic_name = ecx.tcx.item_name(instance.def_id());
489 
490         // CTFE-specific intrinsics.
491         let Some(ret) = target else {
492             throw_unsup_format!("intrinsic `{intrinsic_name}` is not supported at compile-time");
493         };
494         match intrinsic_name {
495             sym::ptr_guaranteed_cmp => {
496                 let a = ecx.read_scalar(&args[0])?;
497                 let b = ecx.read_scalar(&args[1])?;
498                 let cmp = ecx.guaranteed_cmp(a, b)?;
499                 ecx.write_scalar(Scalar::from_u8(cmp), dest)?;
500             }
501             sym::const_allocate => {
502                 let size = ecx.read_scalar(&args[0])?.to_target_usize(ecx)?;
503                 let align = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
504 
505                 let align = match Align::from_bytes(align) {
506                     Ok(a) => a,
507                     Err(err) => throw_ub_custom!(
508                         fluent::const_eval_invalid_align_details,
509                         name = "const_allocate",
510                         err_kind = err.diag_ident(),
511                         align = err.align()
512                     ),
513                 };
514 
515                 let ptr = ecx.allocate_ptr(
516                     Size::from_bytes(size as u64),
517                     align,
518                     interpret::MemoryKind::Machine(MemoryKind::Heap),
519                 )?;
520                 ecx.write_pointer(ptr, dest)?;
521             }
522             sym::const_deallocate => {
523                 let ptr = ecx.read_pointer(&args[0])?;
524                 let size = ecx.read_scalar(&args[1])?.to_target_usize(ecx)?;
525                 let align = ecx.read_scalar(&args[2])?.to_target_usize(ecx)?;
526 
527                 let size = Size::from_bytes(size);
528                 let align = match Align::from_bytes(align) {
529                     Ok(a) => a,
530                     Err(err) => throw_ub_custom!(
531                         fluent::const_eval_invalid_align_details,
532                         name = "const_deallocate",
533                         err_kind = err.diag_ident(),
534                         align = err.align()
535                     ),
536                 };
537 
538                 // If an allocation is created in an another const,
539                 // we don't deallocate it.
540                 let (alloc_id, _, _) = ecx.ptr_get_alloc_id(ptr)?;
541                 let is_allocated_in_another_const = matches!(
542                     ecx.tcx.try_get_global_alloc(alloc_id),
543                     Some(interpret::GlobalAlloc::Memory(_))
544                 );
545 
546                 if !is_allocated_in_another_const {
547                     ecx.deallocate_ptr(
548                         ptr,
549                         Some((size, align)),
550                         interpret::MemoryKind::Machine(MemoryKind::Heap),
551                     )?;
552                 }
553             }
554             _ => {
555                 throw_unsup_format!(
556                     "intrinsic `{intrinsic_name}` is not supported at compile-time"
557                 );
558             }
559         }
560 
561         ecx.go_to_block(ret);
562         Ok(())
563     }
564 
assert_panic( ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: &AssertMessage<'tcx>, _unwind: mir::UnwindAction, ) -> InterpResult<'tcx>565     fn assert_panic(
566         ecx: &mut InterpCx<'mir, 'tcx, Self>,
567         msg: &AssertMessage<'tcx>,
568         _unwind: mir::UnwindAction,
569     ) -> InterpResult<'tcx> {
570         use rustc_middle::mir::AssertKind::*;
571         // Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
572         let eval_to_int =
573             |op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
574         let err = match msg {
575             BoundsCheck { len, index } => {
576                 let len = eval_to_int(len)?;
577                 let index = eval_to_int(index)?;
578                 BoundsCheck { len, index }
579             }
580             Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
581             OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
582             DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
583             RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
584             ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
585             ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
586             MisalignedPointerDereference { ref required, ref found } => {
587                 MisalignedPointerDereference {
588                     required: eval_to_int(required)?,
589                     found: eval_to_int(found)?,
590                 }
591             }
592         };
593         Err(ConstEvalErrKind::AssertFailure(err).into())
594     }
595 
abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !>596     fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
597         Err(ConstEvalErrKind::Abort(msg).into())
598     }
599 
binary_ptr_op( _ecx: &InterpCx<'mir, 'tcx, Self>, _bin_op: mir::BinOp, _left: &ImmTy<'tcx>, _right: &ImmTy<'tcx>, ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)>600     fn binary_ptr_op(
601         _ecx: &InterpCx<'mir, 'tcx, Self>,
602         _bin_op: mir::BinOp,
603         _left: &ImmTy<'tcx>,
604         _right: &ImmTy<'tcx>,
605     ) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
606         throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
607     }
608 
increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx>609     fn increment_const_eval_counter(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
610         // The step limit has already been hit in a previous call to `increment_const_eval_counter`.
611 
612         if let Some(new_steps) = ecx.machine.num_evaluated_steps.checked_add(1) {
613             let (limit, start) = if ecx.tcx.sess.opts.unstable_opts.tiny_const_eval_limit {
614                 (TINY_LINT_TERMINATOR_LIMIT, TINY_LINT_TERMINATOR_LIMIT)
615             } else {
616                 (LINT_TERMINATOR_LIMIT, PROGRESS_INDICATOR_START)
617             };
618 
619             ecx.machine.num_evaluated_steps = new_steps;
620             // By default, we have a *deny* lint kicking in after some time
621             // to ensure `loop {}` doesn't just go forever.
622             // In case that lint got reduced, in particular for `--cap-lint` situations, we also
623             // have a hard warning shown every now and then for really long executions.
624             if new_steps == limit {
625                 // By default, we stop after a million steps, but the user can disable this lint
626                 // to be able to run until the heat death of the universe or power loss, whichever
627                 // comes first.
628                 let hir_id = ecx.best_lint_scope();
629                 let is_error = ecx
630                     .tcx
631                     .lint_level_at_node(
632                         rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
633                         hir_id,
634                     )
635                     .0
636                     .is_error();
637                 let span = ecx.cur_span();
638                 ecx.tcx.emit_spanned_lint(
639                     rustc_session::lint::builtin::LONG_RUNNING_CONST_EVAL,
640                     hir_id,
641                     span,
642                     LongRunning { item_span: ecx.tcx.span },
643                 );
644                 // If this was a hard error, don't bother continuing evaluation.
645                 if is_error {
646                     let guard = ecx
647                         .tcx
648                         .sess
649                         .delay_span_bug(span, "The deny lint should have already errored");
650                     throw_inval!(AlreadyReported(guard.into()));
651                 }
652             } else if new_steps > start && new_steps.is_power_of_two() {
653                 // Only report after a certain number of terminators have been evaluated and the
654                 // current number of evaluated terminators is a power of 2. The latter gives us a cheap
655                 // way to implement exponential backoff.
656                 let span = ecx.cur_span();
657                 ecx.tcx.sess.emit_warning(LongRunningWarn { span, item_span: ecx.tcx.span });
658             }
659         }
660 
661         Ok(())
662     }
663 
664     #[inline(always)]
expose_ptr( _ecx: &mut InterpCx<'mir, 'tcx, Self>, _ptr: Pointer<AllocId>, ) -> InterpResult<'tcx>665     fn expose_ptr(
666         _ecx: &mut InterpCx<'mir, 'tcx, Self>,
667         _ptr: Pointer<AllocId>,
668     ) -> InterpResult<'tcx> {
669         // This is only reachable with -Zunleash-the-miri-inside-of-you.
670         throw_unsup_format!("exposing pointers is not possible at compile-time")
671     }
672 
673     #[inline(always)]
init_frame_extra( ecx: &mut InterpCx<'mir, 'tcx, Self>, frame: Frame<'mir, 'tcx>, ) -> InterpResult<'tcx, Frame<'mir, 'tcx>>674     fn init_frame_extra(
675         ecx: &mut InterpCx<'mir, 'tcx, Self>,
676         frame: Frame<'mir, 'tcx>,
677     ) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
678         // Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
679         if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
680             throw_exhaust!(StackFrameLimitReached)
681         } else {
682             Ok(frame)
683         }
684     }
685 
686     #[inline(always)]
stack<'a>( ecx: &'a InterpCx<'mir, 'tcx, Self>, ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>]687     fn stack<'a>(
688         ecx: &'a InterpCx<'mir, 'tcx, Self>,
689     ) -> &'a [Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>] {
690         &ecx.machine.stack
691     }
692 
693     #[inline(always)]
stack_mut<'a>( ecx: &'a mut InterpCx<'mir, 'tcx, Self>, ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>>694     fn stack_mut<'a>(
695         ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
696     ) -> &'a mut Vec<Frame<'mir, 'tcx, Self::Provenance, Self::FrameExtra>> {
697         &mut ecx.machine.stack
698     }
699 
before_access_global( _tcx: TyCtxt<'tcx>, machine: &Self, alloc_id: AllocId, alloc: ConstAllocation<'tcx>, static_def_id: Option<DefId>, is_write: bool, ) -> InterpResult<'tcx>700     fn before_access_global(
701         _tcx: TyCtxt<'tcx>,
702         machine: &Self,
703         alloc_id: AllocId,
704         alloc: ConstAllocation<'tcx>,
705         static_def_id: Option<DefId>,
706         is_write: bool,
707     ) -> InterpResult<'tcx> {
708         let alloc = alloc.inner();
709         if is_write {
710             // Write access. These are never allowed, but we give a targeted error message.
711             match alloc.mutability {
712                 Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
713                 Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
714             }
715         } else {
716             // Read access. These are usually allowed, with some exceptions.
717             if machine.can_access_statics == CanAccessStatics::Yes {
718                 // Machine configuration allows us read from anything (e.g., `static` initializer).
719                 Ok(())
720             } else if static_def_id.is_some() {
721                 // Machine configuration does not allow us to read statics
722                 // (e.g., `const` initializer).
723                 // See const_eval::machine::MemoryExtra::can_access_statics for why
724                 // this check is so important: if we could read statics, we could read pointers
725                 // to mutable allocations *inside* statics. These allocations are not themselves
726                 // statics, so pointers to them can get around the check in `validity.rs`.
727                 Err(ConstEvalErrKind::ConstAccessesStatic.into())
728             } else {
729                 // Immutable global, this read is fine.
730                 // But make sure we never accept a read from something mutable, that would be
731                 // unsound. The reason is that as the content of this allocation may be different
732                 // now and at run-time, so if we permit reading now we might return the wrong value.
733                 assert_eq!(alloc.mutability, Mutability::Not);
734                 Ok(())
735             }
736         }
737     }
738 }
739 
740 // Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
741 // so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
742 // at the bottom of this file.
743