• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use super::operand::OperandRef;
2 use super::operand::OperandValue::{Immediate, Pair, Ref, ZeroSized};
3 use super::place::PlaceRef;
4 use super::{CachedLlbb, FunctionCx, LocalRef};
5 
6 use crate::base;
7 use crate::common::{self, IntPredicate};
8 use crate::meth;
9 use crate::traits::*;
10 use crate::MemFlags;
11 
12 use rustc_ast as ast;
13 use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
14 use rustc_hir::lang_items::LangItem;
15 use rustc_middle::mir::{self, AssertKind, SwitchTargets};
16 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement};
17 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
18 use rustc_middle::ty::{self, Instance, Ty};
19 use rustc_session::config::OptLevel;
20 use rustc_span::source_map::Span;
21 use rustc_span::{sym, Symbol};
22 use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode, Reg};
23 use rustc_target::abi::{self, HasDataLayout, WrappingRange};
24 use rustc_target::spec::abi::Abi;
25 
26 // Indicates if we are in the middle of merging a BB's successor into it. This
27 // can happen when BB jumps directly to its successor and the successor has no
28 // other predecessors.
29 #[derive(Debug, PartialEq)]
30 enum MergingSucc {
31     False,
32     True,
33 }
34 
35 /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
36 /// e.g., creating a basic block, calling a function, etc.
37 struct TerminatorCodegenHelper<'tcx> {
38     bb: mir::BasicBlock,
39     terminator: &'tcx mir::Terminator<'tcx>,
40 }
41 
42 impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
43     /// Returns the appropriate `Funclet` for the current funclet, if on MSVC,
44     /// either already previously cached, or newly created, by `landing_pad_for`.
funclet<'b, Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &'b mut FunctionCx<'a, 'tcx, Bx>, ) -> Option<&'b Bx::Funclet>45     fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
46         &self,
47         fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
48     ) -> Option<&'b Bx::Funclet> {
49         let cleanup_kinds = (&fx.cleanup_kinds).as_ref()?;
50         let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb)?;
51         // If `landing_pad_for` hasn't been called yet to create the `Funclet`,
52         // it has to be now. This may not seem necessary, as RPO should lead
53         // to all the unwind edges being visited (and so to `landing_pad_for`
54         // getting called for them), before building any of the blocks inside
55         // the funclet itself - however, if MIR contains edges that end up not
56         // being needed in the LLVM IR after monomorphization, the funclet may
57         // be unreachable, and we don't have yet a way to skip building it in
58         // such an eventuality (which may be a better solution than this).
59         if fx.funclets[funclet_bb].is_none() {
60             fx.landing_pad_for(funclet_bb);
61         }
62         Some(
63             fx.funclets[funclet_bb]
64                 .as_ref()
65                 .expect("landing_pad_for didn't also create funclets entry"),
66         )
67     }
68 
69     /// Get a basic block (creating it if necessary), possibly with cleanup
70     /// stuff in it or next to it.
llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, target: mir::BasicBlock, ) -> Bx::BasicBlock71     fn llbb_with_cleanup<Bx: BuilderMethods<'a, 'tcx>>(
72         &self,
73         fx: &mut FunctionCx<'a, 'tcx, Bx>,
74         target: mir::BasicBlock,
75     ) -> Bx::BasicBlock {
76         let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
77         let mut lltarget = fx.llbb(target);
78         if needs_landing_pad {
79             lltarget = fx.landing_pad_for(target);
80         }
81         if is_cleanupret {
82             // Cross-funclet jump - need a trampoline
83             debug_assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
84             debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
85             let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
86             let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);
87             let mut trampoline_bx = Bx::build(fx.cx, trampoline_llbb);
88             trampoline_bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
89             trampoline_llbb
90         } else {
91             lltarget
92         }
93     }
94 
llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, target: mir::BasicBlock, ) -> (bool, bool)95     fn llbb_characteristics<Bx: BuilderMethods<'a, 'tcx>>(
96         &self,
97         fx: &mut FunctionCx<'a, 'tcx, Bx>,
98         target: mir::BasicBlock,
99     ) -> (bool, bool) {
100         if let Some(ref cleanup_kinds) = fx.cleanup_kinds {
101             let funclet_bb = cleanup_kinds[self.bb].funclet_bb(self.bb);
102             let target_funclet = cleanup_kinds[target].funclet_bb(target);
103             let (needs_landing_pad, is_cleanupret) = match (funclet_bb, target_funclet) {
104                 (None, None) => (false, false),
105                 (None, Some(_)) => (true, false),
106                 (Some(f), Some(t_f)) => (f != t_f, f != t_f),
107                 (Some(_), None) => {
108                     let span = self.terminator.source_info.span;
109                     span_bug!(span, "{:?} - jump out of cleanup?", self.terminator);
110                 }
111             };
112             (needs_landing_pad, is_cleanupret)
113         } else {
114             let needs_landing_pad = !fx.mir[self.bb].is_cleanup && fx.mir[target].is_cleanup;
115             let is_cleanupret = false;
116             (needs_landing_pad, is_cleanupret)
117         }
118     }
119 
funclet_br<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, bx: &mut Bx, target: mir::BasicBlock, mergeable_succ: bool, ) -> MergingSucc120     fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
121         &self,
122         fx: &mut FunctionCx<'a, 'tcx, Bx>,
123         bx: &mut Bx,
124         target: mir::BasicBlock,
125         mergeable_succ: bool,
126     ) -> MergingSucc {
127         let (needs_landing_pad, is_cleanupret) = self.llbb_characteristics(fx, target);
128         if mergeable_succ && !needs_landing_pad && !is_cleanupret {
129             // We can merge the successor into this bb, so no need for a `br`.
130             MergingSucc::True
131         } else {
132             let mut lltarget = fx.llbb(target);
133             if needs_landing_pad {
134                 lltarget = fx.landing_pad_for(target);
135             }
136             if is_cleanupret {
137                 // micro-optimization: generate a `ret` rather than a jump
138                 // to a trampoline.
139                 bx.cleanup_ret(self.funclet(fx).unwrap(), Some(lltarget));
140             } else {
141                 bx.br(lltarget);
142             }
143             MergingSucc::False
144         }
145     }
146 
147     /// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
148     /// return destination `destination` and the unwind action `unwind`.
do_call<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, bx: &mut Bx, fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>, fn_ptr: Bx::Value, llargs: &[Bx::Value], destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>, mut unwind: mir::UnwindAction, copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>], mergeable_succ: bool, ) -> MergingSucc149     fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
150         &self,
151         fx: &mut FunctionCx<'a, 'tcx, Bx>,
152         bx: &mut Bx,
153         fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
154         fn_ptr: Bx::Value,
155         llargs: &[Bx::Value],
156         destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
157         mut unwind: mir::UnwindAction,
158         copied_constant_arguments: &[PlaceRef<'tcx, <Bx as BackendTypes>::Value>],
159         mergeable_succ: bool,
160     ) -> MergingSucc {
161         // If there is a cleanup block and the function we're calling can unwind, then
162         // do an invoke, otherwise do a call.
163         let fn_ty = bx.fn_decl_backend_type(&fn_abi);
164 
165         let fn_attrs = if bx.tcx().def_kind(fx.instance.def_id()).has_codegen_attrs() {
166             Some(bx.tcx().codegen_fn_attrs(fx.instance.def_id()))
167         } else {
168             None
169         };
170 
171         if !fn_abi.can_unwind {
172             unwind = mir::UnwindAction::Unreachable;
173         }
174 
175         let unwind_block = match unwind {
176             mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
177             mir::UnwindAction::Continue => None,
178             mir::UnwindAction::Unreachable => None,
179             mir::UnwindAction::Terminate => {
180                 if fx.mir[self.bb].is_cleanup && base::wants_new_eh_instructions(fx.cx.tcx().sess) {
181                     // MSVC SEH will abort automatically if an exception tries to
182                     // propagate out from cleanup.
183 
184                     // FIXME(@mirkootter): For wasm, we currently do not support terminate during
185                     // cleanup, because this requires a few more changes: The current code
186                     // caches the `terminate_block` for each function; funclet based code - however -
187                     // requires a different terminate_block for each funclet
188                     // Until this is implemented, we just do not unwind inside cleanup blocks
189 
190                     None
191                 } else {
192                     Some(fx.terminate_block())
193                 }
194             }
195         };
196 
197         if let Some(unwind_block) = unwind_block {
198             let ret_llbb = if let Some((_, target)) = destination {
199                 fx.llbb(target)
200             } else {
201                 fx.unreachable_block()
202             };
203             let invokeret = bx.invoke(
204                 fn_ty,
205                 fn_attrs,
206                 Some(&fn_abi),
207                 fn_ptr,
208                 &llargs,
209                 ret_llbb,
210                 unwind_block,
211                 self.funclet(fx),
212             );
213             if fx.mir[self.bb].is_cleanup {
214                 bx.do_not_inline(invokeret);
215             }
216 
217             if let Some((ret_dest, target)) = destination {
218                 bx.switch_to_block(fx.llbb(target));
219                 fx.set_debug_loc(bx, self.terminator.source_info);
220                 for tmp in copied_constant_arguments {
221                     bx.lifetime_end(tmp.llval, tmp.layout.size);
222                 }
223                 fx.store_return(bx, ret_dest, &fn_abi.ret, invokeret);
224             }
225             MergingSucc::False
226         } else {
227             let llret = bx.call(fn_ty, fn_attrs, Some(&fn_abi), fn_ptr, &llargs, self.funclet(fx));
228             if fx.mir[self.bb].is_cleanup {
229                 // Cleanup is always the cold path. Don't inline
230                 // drop glue. Also, when there is a deeply-nested
231                 // struct, there are "symmetry" issues that cause
232                 // exponential inlining - see issue #41696.
233                 bx.do_not_inline(llret);
234             }
235 
236             if let Some((ret_dest, target)) = destination {
237                 for tmp in copied_constant_arguments {
238                     bx.lifetime_end(tmp.llval, tmp.layout.size);
239                 }
240                 fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
241                 self.funclet_br(fx, bx, target, mergeable_succ)
242             } else {
243                 bx.unreachable();
244                 MergingSucc::False
245             }
246         }
247     }
248 
249     /// Generates inline assembly with optional `destination` and `unwind`.
do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>( &self, fx: &mut FunctionCx<'a, 'tcx, Bx>, bx: &mut Bx, template: &[InlineAsmTemplatePiece], operands: &[InlineAsmOperandRef<'tcx, Bx>], options: InlineAsmOptions, line_spans: &[Span], destination: Option<mir::BasicBlock>, unwind: mir::UnwindAction, instance: Instance<'_>, mergeable_succ: bool, ) -> MergingSucc250     fn do_inlineasm<Bx: BuilderMethods<'a, 'tcx>>(
251         &self,
252         fx: &mut FunctionCx<'a, 'tcx, Bx>,
253         bx: &mut Bx,
254         template: &[InlineAsmTemplatePiece],
255         operands: &[InlineAsmOperandRef<'tcx, Bx>],
256         options: InlineAsmOptions,
257         line_spans: &[Span],
258         destination: Option<mir::BasicBlock>,
259         unwind: mir::UnwindAction,
260         instance: Instance<'_>,
261         mergeable_succ: bool,
262     ) -> MergingSucc {
263         let unwind_target = match unwind {
264             mir::UnwindAction::Cleanup(cleanup) => Some(self.llbb_with_cleanup(fx, cleanup)),
265             mir::UnwindAction::Terminate => Some(fx.terminate_block()),
266             mir::UnwindAction::Continue => None,
267             mir::UnwindAction::Unreachable => None,
268         };
269 
270         if let Some(cleanup) = unwind_target {
271             let ret_llbb = if let Some(target) = destination {
272                 fx.llbb(target)
273             } else {
274                 fx.unreachable_block()
275             };
276 
277             bx.codegen_inline_asm(
278                 template,
279                 &operands,
280                 options,
281                 line_spans,
282                 instance,
283                 Some((ret_llbb, cleanup, self.funclet(fx))),
284             );
285             MergingSucc::False
286         } else {
287             bx.codegen_inline_asm(template, &operands, options, line_spans, instance, None);
288 
289             if let Some(target) = destination {
290                 self.funclet_br(fx, bx, target, mergeable_succ)
291             } else {
292                 bx.unreachable();
293                 MergingSucc::False
294             }
295         }
296     }
297 }
298 
299 /// Codegen implementations for some terminator variants.
300 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
301     /// Generates code for a `Resume` terminator.
codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx)302     fn codegen_resume_terminator(&mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx) {
303         if let Some(funclet) = helper.funclet(self) {
304             bx.cleanup_ret(funclet, None);
305         } else {
306             let slot = self.get_personality_slot(bx);
307             let exn0 = slot.project_field(bx, 0);
308             let exn0 = bx.load_operand(exn0).immediate();
309             let exn1 = slot.project_field(bx, 1);
310             let exn1 = bx.load_operand(exn1).immediate();
311             slot.storage_dead(bx);
312 
313             bx.resume(exn0, exn1);
314         }
315     }
316 
codegen_switchint_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, discr: &mir::Operand<'tcx>, targets: &SwitchTargets, )317     fn codegen_switchint_terminator(
318         &mut self,
319         helper: TerminatorCodegenHelper<'tcx>,
320         bx: &mut Bx,
321         discr: &mir::Operand<'tcx>,
322         targets: &SwitchTargets,
323     ) {
324         let discr = self.codegen_operand(bx, &discr);
325         let switch_ty = discr.layout.ty;
326         let mut target_iter = targets.iter();
327         if target_iter.len() == 1 {
328             // If there are two targets (one conditional, one fallback), emit `br` instead of
329             // `switch`.
330             let (test_value, target) = target_iter.next().unwrap();
331             let lltrue = helper.llbb_with_cleanup(self, target);
332             let llfalse = helper.llbb_with_cleanup(self, targets.otherwise());
333             if switch_ty == bx.tcx().types.bool {
334                 // Don't generate trivial icmps when switching on bool.
335                 match test_value {
336                     0 => bx.cond_br(discr.immediate(), llfalse, lltrue),
337                     1 => bx.cond_br(discr.immediate(), lltrue, llfalse),
338                     _ => bug!(),
339                 }
340             } else {
341                 let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
342                 let llval = bx.const_uint_big(switch_llty, test_value);
343                 let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
344                 bx.cond_br(cmp, lltrue, llfalse);
345             }
346         } else if self.cx.sess().opts.optimize == OptLevel::No
347             && target_iter.len() == 2
348             && self.mir[targets.otherwise()].is_empty_unreachable()
349         {
350             // In unoptimized builds, if there are two normal targets and the `otherwise` target is
351             // an unreachable BB, emit `br` instead of `switch`. This leaves behind the unreachable
352             // BB, which will usually (but not always) be dead code.
353             //
354             // Why only in unoptimized builds?
355             // - In unoptimized builds LLVM uses FastISel which does not support switches, so it
356             //   must fall back to the to the slower SelectionDAG isel. Therefore, using `br` gives
357             //   significant compile time speedups for unoptimized builds.
358             // - In optimized builds the above doesn't hold, and using `br` sometimes results in
359             //   worse generated code because LLVM can no longer tell that the value being switched
360             //   on can only have two values, e.g. 0 and 1.
361             //
362             let (test_value1, target1) = target_iter.next().unwrap();
363             let (_test_value2, target2) = target_iter.next().unwrap();
364             let ll1 = helper.llbb_with_cleanup(self, target1);
365             let ll2 = helper.llbb_with_cleanup(self, target2);
366             let switch_llty = bx.immediate_backend_type(bx.layout_of(switch_ty));
367             let llval = bx.const_uint_big(switch_llty, test_value1);
368             let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
369             bx.cond_br(cmp, ll1, ll2);
370         } else {
371             bx.switch(
372                 discr.immediate(),
373                 helper.llbb_with_cleanup(self, targets.otherwise()),
374                 target_iter.map(|(value, target)| (value, helper.llbb_with_cleanup(self, target))),
375             );
376         }
377     }
378 
codegen_return_terminator(&mut self, bx: &mut Bx)379     fn codegen_return_terminator(&mut self, bx: &mut Bx) {
380         // Call `va_end` if this is the definition of a C-variadic function.
381         if self.fn_abi.c_variadic {
382             // The `VaList` "spoofed" argument is just after all the real arguments.
383             let va_list_arg_idx = self.fn_abi.args.len();
384             match self.locals[mir::Local::from_usize(1 + va_list_arg_idx)] {
385                 LocalRef::Place(va_list) => {
386                     bx.va_end(va_list.llval);
387                 }
388                 _ => bug!("C-variadic function must have a `VaList` place"),
389             }
390         }
391         if self.fn_abi.ret.layout.abi.is_uninhabited() {
392             // Functions with uninhabited return values are marked `noreturn`,
393             // so we should make sure that we never actually do.
394             // We play it safe by using a well-defined `abort`, but we could go for immediate UB
395             // if that turns out to be helpful.
396             bx.abort();
397             // `abort` does not terminate the block, so we still need to generate
398             // an `unreachable` terminator after it.
399             bx.unreachable();
400             return;
401         }
402         let llval = match &self.fn_abi.ret.mode {
403             PassMode::Ignore | PassMode::Indirect { .. } => {
404                 bx.ret_void();
405                 return;
406             }
407 
408             PassMode::Direct(_) | PassMode::Pair(..) => {
409                 let op = self.codegen_consume(bx, mir::Place::return_place().as_ref());
410                 if let Ref(llval, _, align) = op.val {
411                     bx.load(bx.backend_type(op.layout), llval, align)
412                 } else {
413                     op.immediate_or_packed_pair(bx)
414                 }
415             }
416 
417             PassMode::Cast(cast_ty, _) => {
418                 let op = match self.locals[mir::RETURN_PLACE] {
419                     LocalRef::Operand(op) => op,
420                     LocalRef::PendingOperand => bug!("use of return before def"),
421                     LocalRef::Place(cg_place) => OperandRef {
422                         val: Ref(cg_place.llval, None, cg_place.align),
423                         layout: cg_place.layout,
424                     },
425                     LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
426                 };
427                 let llslot = match op.val {
428                     Immediate(_) | Pair(..) => {
429                         let scratch = PlaceRef::alloca(bx, self.fn_abi.ret.layout);
430                         op.val.store(bx, scratch);
431                         scratch.llval
432                     }
433                     Ref(llval, _, align) => {
434                         assert_eq!(align, op.layout.align.abi, "return place is unaligned!");
435                         llval
436                     }
437                     ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
438                 };
439                 let ty = bx.cast_backend_type(cast_ty);
440                 let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
441                 bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
442             }
443         };
444         bx.ret(llval);
445     }
446 
447     #[tracing::instrument(level = "trace", skip(self, helper, bx))]
codegen_drop_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, location: mir::Place<'tcx>, target: mir::BasicBlock, unwind: mir::UnwindAction, mergeable_succ: bool, ) -> MergingSucc448     fn codegen_drop_terminator(
449         &mut self,
450         helper: TerminatorCodegenHelper<'tcx>,
451         bx: &mut Bx,
452         location: mir::Place<'tcx>,
453         target: mir::BasicBlock,
454         unwind: mir::UnwindAction,
455         mergeable_succ: bool,
456     ) -> MergingSucc {
457         let ty = location.ty(self.mir, bx.tcx()).ty;
458         let ty = self.monomorphize(ty);
459         let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
460 
461         if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
462             // we don't actually need to drop anything.
463             return helper.funclet_br(self, bx, target, mergeable_succ);
464         }
465 
466         let place = self.codegen_place(bx, location.as_ref());
467         let (args1, args2);
468         let mut args = if let Some(llextra) = place.llextra {
469             args2 = [place.llval, llextra];
470             &args2[..]
471         } else {
472             args1 = [place.llval];
473             &args1[..]
474         };
475         let (drop_fn, fn_abi) =
476             match ty.kind() {
477                 // FIXME(eddyb) perhaps move some of this logic into
478                 // `Instance::resolve_drop_in_place`?
479                 ty::Dynamic(_, _, ty::Dyn) => {
480                     // IN THIS ARM, WE HAVE:
481                     // ty = *mut (dyn Trait)
482                     // which is: exists<T> ( *mut T,    Vtable<T: Trait> )
483                     //                       args[0]    args[1]
484                     //
485                     // args = ( Data, Vtable )
486                     //                  |
487                     //                  v
488                     //                /-------\
489                     //                | ...   |
490                     //                \-------/
491                     //
492                     let virtual_drop = Instance {
493                         def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
494                         substs: drop_fn.substs,
495                     };
496                     debug!("ty = {:?}", ty);
497                     debug!("drop_fn = {:?}", drop_fn);
498                     debug!("args = {:?}", args);
499                     let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
500                     let vtable = args[1];
501                     // Truncate vtable off of args list
502                     args = &args[..1];
503                     (
504                         meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
505                             .get_fn(bx, vtable, ty, &fn_abi),
506                         fn_abi,
507                     )
508                 }
509                 ty::Dynamic(_, _, ty::DynStar) => {
510                     // IN THIS ARM, WE HAVE:
511                     // ty = *mut (dyn* Trait)
512                     // which is: *mut exists<T: sizeof(T) == sizeof(usize)> (T, Vtable<T: Trait>)
513                     //
514                     // args = [ * ]
515                     //          |
516                     //          v
517                     //      ( Data, Vtable )
518                     //                |
519                     //                v
520                     //              /-------\
521                     //              | ...   |
522                     //              \-------/
523                     //
524                     //
525                     // WE CAN CONVERT THIS INTO THE ABOVE LOGIC BY DOING
526                     //
527                     // data = &(*args[0]).0    // gives a pointer to Data above (really the same pointer)
528                     // vtable = (*args[0]).1   // loads the vtable out
529                     // (data, vtable)          // an equivalent Rust `*mut dyn Trait`
530                     //
531                     // SO THEN WE CAN USE THE ABOVE CODE.
532                     let virtual_drop = Instance {
533                         def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
534                         substs: drop_fn.substs,
535                     };
536                     debug!("ty = {:?}", ty);
537                     debug!("drop_fn = {:?}", drop_fn);
538                     debug!("args = {:?}", args);
539                     let fn_abi = bx.fn_abi_of_instance(virtual_drop, ty::List::empty());
540                     let meta_ptr = place.project_field(bx, 1);
541                     let meta = bx.load_operand(meta_ptr);
542                     // Truncate vtable off of args list
543                     args = &args[..1];
544                     debug!("args' = {:?}", args);
545                     (
546                         meth::VirtualIndex::from_index(ty::COMMON_VTABLE_ENTRIES_DROPINPLACE)
547                             .get_fn(bx, meta.immediate(), ty, &fn_abi),
548                         fn_abi,
549                     )
550                 }
551                 _ => (bx.get_fn_addr(drop_fn), bx.fn_abi_of_instance(drop_fn, ty::List::empty())),
552             };
553         helper.do_call(
554             self,
555             bx,
556             fn_abi,
557             drop_fn,
558             args,
559             Some((ReturnDest::Nothing, target)),
560             unwind,
561             &[],
562             mergeable_succ,
563         )
564     }
565 
codegen_assert_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, terminator: &mir::Terminator<'tcx>, cond: &mir::Operand<'tcx>, expected: bool, msg: &mir::AssertMessage<'tcx>, target: mir::BasicBlock, unwind: mir::UnwindAction, mergeable_succ: bool, ) -> MergingSucc566     fn codegen_assert_terminator(
567         &mut self,
568         helper: TerminatorCodegenHelper<'tcx>,
569         bx: &mut Bx,
570         terminator: &mir::Terminator<'tcx>,
571         cond: &mir::Operand<'tcx>,
572         expected: bool,
573         msg: &mir::AssertMessage<'tcx>,
574         target: mir::BasicBlock,
575         unwind: mir::UnwindAction,
576         mergeable_succ: bool,
577     ) -> MergingSucc {
578         let span = terminator.source_info.span;
579         let cond = self.codegen_operand(bx, cond).immediate();
580         let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
581 
582         // This case can currently arise only from functions marked
583         // with #[rustc_inherit_overflow_checks] and inlined from
584         // another crate (mostly core::num generic/#[inline] fns),
585         // while the current crate doesn't use overflow checks.
586         if !bx.cx().check_overflow() && msg.is_optional_overflow_check() {
587             const_cond = Some(expected);
588         }
589 
590         // Don't codegen the panic block if success if known.
591         if const_cond == Some(expected) {
592             return helper.funclet_br(self, bx, target, mergeable_succ);
593         }
594 
595         // Pass the condition through llvm.expect for branch hinting.
596         let cond = bx.expect(cond, expected);
597 
598         // Create the failure block and the conditional branch to it.
599         let lltarget = helper.llbb_with_cleanup(self, target);
600         let panic_block = bx.append_sibling_block("panic");
601         if expected {
602             bx.cond_br(cond, lltarget, panic_block);
603         } else {
604             bx.cond_br(cond, panic_block, lltarget);
605         }
606 
607         // After this point, bx is the block for the call to panic.
608         bx.switch_to_block(panic_block);
609         self.set_debug_loc(bx, terminator.source_info);
610 
611         // Get the location information.
612         let location = self.get_caller_location(bx, terminator.source_info).immediate();
613 
614         // Put together the arguments to the panic entry point.
615         let (lang_item, args) = match msg {
616             AssertKind::BoundsCheck { ref len, ref index } => {
617                 let len = self.codegen_operand(bx, len).immediate();
618                 let index = self.codegen_operand(bx, index).immediate();
619                 // It's `fn panic_bounds_check(index: usize, len: usize)`,
620                 // and `#[track_caller]` adds an implicit third argument.
621                 (LangItem::PanicBoundsCheck, vec![index, len, location])
622             }
623             AssertKind::MisalignedPointerDereference { ref required, ref found } => {
624                 let required = self.codegen_operand(bx, required).immediate();
625                 let found = self.codegen_operand(bx, found).immediate();
626                 // It's `fn panic_misaligned_pointer_dereference(required: usize, found: usize)`,
627                 // and `#[track_caller]` adds an implicit third argument.
628                 (LangItem::PanicMisalignedPointerDereference, vec![required, found, location])
629             }
630             _ => {
631                 let msg = bx.const_str(msg.description());
632                 // It's `pub fn panic(expr: &str)`, with the wide reference being passed
633                 // as two arguments, and `#[track_caller]` adds an implicit third argument.
634                 (LangItem::Panic, vec![msg.0, msg.1, location])
635             }
636         };
637 
638         let (fn_abi, llfn) = common::build_langcall(bx, Some(span), lang_item);
639 
640         // Codegen the actual panic invoke/call.
641         let merging_succ = helper.do_call(self, bx, fn_abi, llfn, &args, None, unwind, &[], false);
642         assert_eq!(merging_succ, MergingSucc::False);
643         MergingSucc::False
644     }
645 
codegen_terminate_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, terminator: &mir::Terminator<'tcx>, )646     fn codegen_terminate_terminator(
647         &mut self,
648         helper: TerminatorCodegenHelper<'tcx>,
649         bx: &mut Bx,
650         terminator: &mir::Terminator<'tcx>,
651     ) {
652         let span = terminator.source_info.span;
653         self.set_debug_loc(bx, terminator.source_info);
654 
655         // Obtain the panic entry point.
656         let (fn_abi, llfn) = common::build_langcall(bx, Some(span), LangItem::PanicCannotUnwind);
657 
658         // Codegen the actual panic invoke/call.
659         let merging_succ = helper.do_call(
660             self,
661             bx,
662             fn_abi,
663             llfn,
664             &[],
665             None,
666             mir::UnwindAction::Unreachable,
667             &[],
668             false,
669         );
670         assert_eq!(merging_succ, MergingSucc::False);
671     }
672 
673     /// Returns `Some` if this is indeed a panic intrinsic and codegen is done.
codegen_panic_intrinsic( &mut self, helper: &TerminatorCodegenHelper<'tcx>, bx: &mut Bx, intrinsic: Option<Symbol>, instance: Option<Instance<'tcx>>, source_info: mir::SourceInfo, target: Option<mir::BasicBlock>, unwind: mir::UnwindAction, mergeable_succ: bool, ) -> Option<MergingSucc>674     fn codegen_panic_intrinsic(
675         &mut self,
676         helper: &TerminatorCodegenHelper<'tcx>,
677         bx: &mut Bx,
678         intrinsic: Option<Symbol>,
679         instance: Option<Instance<'tcx>>,
680         source_info: mir::SourceInfo,
681         target: Option<mir::BasicBlock>,
682         unwind: mir::UnwindAction,
683         mergeable_succ: bool,
684     ) -> Option<MergingSucc> {
685         // Emit a panic or a no-op for `assert_*` intrinsics.
686         // These are intrinsics that compile to panics so that we can get a message
687         // which mentions the offending type, even from a const context.
688         let panic_intrinsic = intrinsic.and_then(|s| ValidityRequirement::from_intrinsic(s));
689         if let Some(requirement) = panic_intrinsic {
690             let ty = instance.unwrap().substs.type_at(0);
691 
692             let do_panic = !bx
693                 .tcx()
694                 .check_validity_requirement((requirement, bx.param_env().and(ty)))
695                 .expect("expect to have layout during codegen");
696 
697             let layout = bx.layout_of(ty);
698 
699             Some(if do_panic {
700                 let msg_str = with_no_visible_paths!({
701                     with_no_trimmed_paths!({
702                         if layout.abi.is_uninhabited() {
703                             // Use this error even for the other intrinsics as it is more precise.
704                             format!("attempted to instantiate uninhabited type `{}`", ty)
705                         } else if requirement == ValidityRequirement::Zero {
706                             format!("attempted to zero-initialize type `{}`, which is invalid", ty)
707                         } else {
708                             format!(
709                                 "attempted to leave type `{}` uninitialized, which is invalid",
710                                 ty
711                             )
712                         }
713                     })
714                 });
715                 let msg = bx.const_str(&msg_str);
716 
717                 // Obtain the panic entry point.
718                 let (fn_abi, llfn) =
719                     common::build_langcall(bx, Some(source_info.span), LangItem::PanicNounwind);
720 
721                 // Codegen the actual panic invoke/call.
722                 helper.do_call(
723                     self,
724                     bx,
725                     fn_abi,
726                     llfn,
727                     &[msg.0, msg.1],
728                     target.as_ref().map(|bb| (ReturnDest::Nothing, *bb)),
729                     unwind,
730                     &[],
731                     mergeable_succ,
732                 )
733             } else {
734                 // a NOP
735                 let target = target.unwrap();
736                 helper.funclet_br(self, bx, target, mergeable_succ)
737             })
738         } else {
739             None
740         }
741     }
742 
codegen_call_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, terminator: &mir::Terminator<'tcx>, func: &mir::Operand<'tcx>, args: &[mir::Operand<'tcx>], destination: mir::Place<'tcx>, target: Option<mir::BasicBlock>, unwind: mir::UnwindAction, fn_span: Span, mergeable_succ: bool, ) -> MergingSucc743     fn codegen_call_terminator(
744         &mut self,
745         helper: TerminatorCodegenHelper<'tcx>,
746         bx: &mut Bx,
747         terminator: &mir::Terminator<'tcx>,
748         func: &mir::Operand<'tcx>,
749         args: &[mir::Operand<'tcx>],
750         destination: mir::Place<'tcx>,
751         target: Option<mir::BasicBlock>,
752         unwind: mir::UnwindAction,
753         fn_span: Span,
754         mergeable_succ: bool,
755     ) -> MergingSucc {
756         let source_info = terminator.source_info;
757         let span = source_info.span;
758 
759         // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
760         let callee = self.codegen_operand(bx, func);
761 
762         let (instance, mut llfn) = match *callee.layout.ty.kind() {
763             ty::FnDef(def_id, substs) => (
764                 Some(
765                     ty::Instance::expect_resolve(
766                         bx.tcx(),
767                         ty::ParamEnv::reveal_all(),
768                         def_id,
769                         substs,
770                     )
771                     .polymorphize(bx.tcx()),
772                 ),
773                 None,
774             ),
775             ty::FnPtr(_) => (None, Some(callee.immediate())),
776             _ => bug!("{} is not callable", callee.layout.ty),
777         };
778         let def = instance.map(|i| i.def);
779 
780         if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
781             // Empty drop glue; a no-op.
782             let target = target.unwrap();
783             return helper.funclet_br(self, bx, target, mergeable_succ);
784         }
785 
786         // FIXME(eddyb) avoid computing this if possible, when `instance` is
787         // available - right now `sig` is only needed for getting the `abi`
788         // and figuring out how many extra args were passed to a C-variadic `fn`.
789         let sig = callee.layout.ty.fn_sig(bx.tcx());
790         let abi = sig.abi();
791 
792         // Handle intrinsics old codegen wants Expr's for, ourselves.
793         let intrinsic = match def {
794             Some(ty::InstanceDef::Intrinsic(def_id)) => Some(bx.tcx().item_name(def_id)),
795             _ => None,
796         };
797 
798         let extra_args = &args[sig.inputs().skip_binder().len()..];
799         let extra_args = bx.tcx().mk_type_list_from_iter(extra_args.iter().map(|op_arg| {
800             let op_ty = op_arg.ty(self.mir, bx.tcx());
801             self.monomorphize(op_ty)
802         }));
803 
804         let fn_abi = match instance {
805             Some(instance) => bx.fn_abi_of_instance(instance, extra_args),
806             None => bx.fn_abi_of_fn_ptr(sig, extra_args),
807         };
808 
809         if let Some(merging_succ) = self.codegen_panic_intrinsic(
810             &helper,
811             bx,
812             intrinsic,
813             instance,
814             source_info,
815             target,
816             unwind,
817             mergeable_succ,
818         ) {
819             return merging_succ;
820         }
821 
822         // The arguments we'll be passing. Plus one to account for outptr, if used.
823         let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
824         let mut llargs = Vec::with_capacity(arg_count);
825 
826         // Prepare the return value destination
827         let ret_dest = if target.is_some() {
828             let is_intrinsic = intrinsic.is_some();
829             self.make_return_dest(bx, destination, &fn_abi.ret, &mut llargs, is_intrinsic)
830         } else {
831             ReturnDest::Nothing
832         };
833 
834         if intrinsic == Some(sym::caller_location) {
835             return if let Some(target) = target {
836                 let location =
837                     self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
838 
839                 if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
840                     location.val.store(bx, tmp);
841                 }
842                 self.store_return(bx, ret_dest, &fn_abi.ret, location.immediate());
843                 helper.funclet_br(self, bx, target, mergeable_succ)
844             } else {
845                 MergingSucc::False
846             };
847         }
848 
849         match intrinsic {
850             None | Some(sym::drop_in_place) => {}
851             Some(intrinsic) => {
852                 let dest = match ret_dest {
853                     _ if fn_abi.ret.is_indirect() => llargs[0],
854                     ReturnDest::Nothing => {
855                         bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
856                     }
857                     ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
858                     ReturnDest::DirectOperand(_) => {
859                         bug!("Cannot use direct operand with an intrinsic call")
860                     }
861                 };
862 
863                 let args: Vec<_> = args
864                     .iter()
865                     .enumerate()
866                     .map(|(i, arg)| {
867                         // The indices passed to simd_shuffle* in the
868                         // third argument must be constant. This is
869                         // checked by const-qualification, which also
870                         // promotes any complex rvalues to constants.
871                         if i == 2 && intrinsic.as_str().starts_with("simd_shuffle") {
872                             if let mir::Operand::Constant(constant) = arg {
873                                 let (llval, ty) = self.simd_shuffle_indices(&bx, constant);
874                                 return OperandRef {
875                                     val: Immediate(llval),
876                                     layout: bx.layout_of(ty),
877                                 };
878                             } else {
879                                 span_bug!(span, "shuffle indices must be constant");
880                             }
881                         }
882 
883                         self.codegen_operand(bx, arg)
884                     })
885                     .collect();
886 
887                 Self::codegen_intrinsic_call(
888                     bx,
889                     *instance.as_ref().unwrap(),
890                     &fn_abi,
891                     &args,
892                     dest,
893                     span,
894                 );
895 
896                 if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
897                     self.store_return(bx, ret_dest, &fn_abi.ret, dst.llval);
898                 }
899 
900                 return if let Some(target) = target {
901                     helper.funclet_br(self, bx, target, mergeable_succ)
902                 } else {
903                     bx.unreachable();
904                     MergingSucc::False
905                 };
906             }
907         }
908 
909         // Split the rust-call tupled arguments off.
910         let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
911             let (tup, args) = args.split_last().unwrap();
912             (args, Some(tup))
913         } else {
914             (args, None)
915         };
916 
917         let mut copied_constant_arguments = vec![];
918         'make_args: for (i, arg) in first_args.iter().enumerate() {
919             let mut op = self.codegen_operand(bx, arg);
920 
921             if let (0, Some(ty::InstanceDef::Virtual(_, idx))) = (i, def) {
922                 match op.val {
923                     Pair(data_ptr, meta) => {
924                         // In the case of Rc<Self>, we need to explicitly pass a
925                         // *mut RcBox<Self> with a Scalar (not ScalarPair) ABI. This is a hack
926                         // that is understood elsewhere in the compiler as a method on
927                         // `dyn Trait`.
928                         // To get a `*mut RcBox<Self>`, we just keep unwrapping newtypes until
929                         // we get a value of a built-in pointer type.
930                         //
931                         // This is also relevant for `Pin<&mut Self>`, where we need to peel the `Pin`.
932                         'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
933                             && !op.layout.ty.is_ref()
934                         {
935                             for i in 0..op.layout.fields.count() {
936                                 let field = op.extract_field(bx, i);
937                                 if !field.layout.is_zst() {
938                                     // we found the one non-zero-sized field that is allowed
939                                     // now find *its* non-zero-sized field, or stop if it's a
940                                     // pointer
941                                     op = field;
942                                     continue 'descend_newtypes;
943                                 }
944                             }
945 
946                             span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
947                         }
948 
949                         // now that we have `*dyn Trait` or `&dyn Trait`, split it up into its
950                         // data pointer and vtable. Look up the method in the vtable, and pass
951                         // the data pointer as the first argument
952                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
953                             bx,
954                             meta,
955                             op.layout.ty,
956                             &fn_abi,
957                         ));
958                         llargs.push(data_ptr);
959                         continue 'make_args;
960                     }
961                     Ref(data_ptr, Some(meta), _) => {
962                         // by-value dynamic dispatch
963                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
964                             bx,
965                             meta,
966                             op.layout.ty,
967                             &fn_abi,
968                         ));
969                         llargs.push(data_ptr);
970                         continue;
971                     }
972                     Immediate(_) => {
973                         // See comment above explaining why we peel these newtypes
974                         'descend_newtypes: while !op.layout.ty.is_unsafe_ptr()
975                             && !op.layout.ty.is_ref()
976                         {
977                             for i in 0..op.layout.fields.count() {
978                                 let field = op.extract_field(bx, i);
979                                 if !field.layout.is_zst() {
980                                     // we found the one non-zero-sized field that is allowed
981                                     // now find *its* non-zero-sized field, or stop if it's a
982                                     // pointer
983                                     op = field;
984                                     continue 'descend_newtypes;
985                                 }
986                             }
987 
988                             span_bug!(span, "receiver has no non-zero-sized fields {:?}", op);
989                         }
990 
991                         // Make sure that we've actually unwrapped the rcvr down
992                         // to a pointer or ref to `dyn* Trait`.
993                         if !op.layout.ty.builtin_deref(true).unwrap().ty.is_dyn_star() {
994                             span_bug!(span, "can't codegen a virtual call on {:#?}", op);
995                         }
996                         let place = op.deref(bx.cx());
997                         let data_ptr = place.project_field(bx, 0);
998                         let meta_ptr = place.project_field(bx, 1);
999                         let meta = bx.load_operand(meta_ptr);
1000                         llfn = Some(meth::VirtualIndex::from_index(idx).get_fn(
1001                             bx,
1002                             meta.immediate(),
1003                             op.layout.ty,
1004                             &fn_abi,
1005                         ));
1006                         llargs.push(data_ptr.llval);
1007                         continue;
1008                     }
1009                     _ => {
1010                         span_bug!(span, "can't codegen a virtual call on {:#?}", op);
1011                     }
1012                 }
1013             }
1014 
1015             // The callee needs to own the argument memory if we pass it
1016             // by-ref, so make a local copy of non-immediate constants.
1017             match (arg, op.val) {
1018                 (&mir::Operand::Copy(_), Ref(_, None, _))
1019                 | (&mir::Operand::Constant(_), Ref(_, None, _)) => {
1020                     let tmp = PlaceRef::alloca(bx, op.layout);
1021                     bx.lifetime_start(tmp.llval, tmp.layout.size);
1022                     op.val.store(bx, tmp);
1023                     op.val = Ref(tmp.llval, None, tmp.align);
1024                     copied_constant_arguments.push(tmp);
1025                 }
1026                 _ => {}
1027             }
1028 
1029             self.codegen_argument(bx, op, &mut llargs, &fn_abi.args[i]);
1030         }
1031         let num_untupled = untuple.map(|tup| {
1032             self.codegen_arguments_untupled(bx, tup, &mut llargs, &fn_abi.args[first_args.len()..])
1033         });
1034 
1035         let needs_location =
1036             instance.is_some_and(|i| i.def.requires_caller_location(self.cx.tcx()));
1037         if needs_location {
1038             let mir_args = if let Some(num_untupled) = num_untupled {
1039                 first_args.len() + num_untupled
1040             } else {
1041                 args.len()
1042             };
1043             assert_eq!(
1044                 fn_abi.args.len(),
1045                 mir_args + 1,
1046                 "#[track_caller] fn's must have 1 more argument in their ABI than in their MIR: {:?} {:?} {:?}",
1047                 instance,
1048                 fn_span,
1049                 fn_abi,
1050             );
1051             let location =
1052                 self.get_caller_location(bx, mir::SourceInfo { span: fn_span, ..source_info });
1053             debug!(
1054                 "codegen_call_terminator({:?}): location={:?} (fn_span {:?})",
1055                 terminator, location, fn_span
1056             );
1057 
1058             let last_arg = fn_abi.args.last().unwrap();
1059             self.codegen_argument(bx, location, &mut llargs, last_arg);
1060         }
1061 
1062         let fn_ptr = match (instance, llfn) {
1063             (Some(instance), None) => bx.get_fn_addr(instance),
1064             (_, Some(llfn)) => llfn,
1065             _ => span_bug!(span, "no instance or llfn for call"),
1066         };
1067 
1068         helper.do_call(
1069             self,
1070             bx,
1071             fn_abi,
1072             fn_ptr,
1073             &llargs,
1074             target.as_ref().map(|&target| (ret_dest, target)),
1075             unwind,
1076             &copied_constant_arguments,
1077             mergeable_succ,
1078         )
1079     }
1080 
codegen_asm_terminator( &mut self, helper: TerminatorCodegenHelper<'tcx>, bx: &mut Bx, terminator: &mir::Terminator<'tcx>, template: &[ast::InlineAsmTemplatePiece], operands: &[mir::InlineAsmOperand<'tcx>], options: ast::InlineAsmOptions, line_spans: &[Span], destination: Option<mir::BasicBlock>, unwind: mir::UnwindAction, instance: Instance<'_>, mergeable_succ: bool, ) -> MergingSucc1081     fn codegen_asm_terminator(
1082         &mut self,
1083         helper: TerminatorCodegenHelper<'tcx>,
1084         bx: &mut Bx,
1085         terminator: &mir::Terminator<'tcx>,
1086         template: &[ast::InlineAsmTemplatePiece],
1087         operands: &[mir::InlineAsmOperand<'tcx>],
1088         options: ast::InlineAsmOptions,
1089         line_spans: &[Span],
1090         destination: Option<mir::BasicBlock>,
1091         unwind: mir::UnwindAction,
1092         instance: Instance<'_>,
1093         mergeable_succ: bool,
1094     ) -> MergingSucc {
1095         let span = terminator.source_info.span;
1096 
1097         let operands: Vec<_> = operands
1098             .iter()
1099             .map(|op| match *op {
1100                 mir::InlineAsmOperand::In { reg, ref value } => {
1101                     let value = self.codegen_operand(bx, value);
1102                     InlineAsmOperandRef::In { reg, value }
1103                 }
1104                 mir::InlineAsmOperand::Out { reg, late, ref place } => {
1105                     let place = place.map(|place| self.codegen_place(bx, place.as_ref()));
1106                     InlineAsmOperandRef::Out { reg, late, place }
1107                 }
1108                 mir::InlineAsmOperand::InOut { reg, late, ref in_value, ref out_place } => {
1109                     let in_value = self.codegen_operand(bx, in_value);
1110                     let out_place =
1111                         out_place.map(|out_place| self.codegen_place(bx, out_place.as_ref()));
1112                     InlineAsmOperandRef::InOut { reg, late, in_value, out_place }
1113                 }
1114                 mir::InlineAsmOperand::Const { ref value } => {
1115                     let const_value = self
1116                         .eval_mir_constant(value)
1117                         .unwrap_or_else(|_| span_bug!(span, "asm const cannot be resolved"));
1118                     let string = common::asm_const_to_str(
1119                         bx.tcx(),
1120                         span,
1121                         const_value,
1122                         bx.layout_of(value.ty()),
1123                     );
1124                     InlineAsmOperandRef::Const { string }
1125                 }
1126                 mir::InlineAsmOperand::SymFn { ref value } => {
1127                     let literal = self.monomorphize(value.literal);
1128                     if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
1129                         let instance = ty::Instance::resolve_for_fn_ptr(
1130                             bx.tcx(),
1131                             ty::ParamEnv::reveal_all(),
1132                             def_id,
1133                             substs,
1134                         )
1135                         .unwrap();
1136                         InlineAsmOperandRef::SymFn { instance }
1137                     } else {
1138                         span_bug!(span, "invalid type for asm sym (fn)");
1139                     }
1140                 }
1141                 mir::InlineAsmOperand::SymStatic { def_id } => {
1142                     InlineAsmOperandRef::SymStatic { def_id }
1143                 }
1144             })
1145             .collect();
1146 
1147         helper.do_inlineasm(
1148             self,
1149             bx,
1150             template,
1151             &operands,
1152             options,
1153             line_spans,
1154             destination,
1155             unwind,
1156             instance,
1157             mergeable_succ,
1158         )
1159     }
1160 }
1161 
1162 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
codegen_block(&mut self, mut bb: mir::BasicBlock)1163     pub fn codegen_block(&mut self, mut bb: mir::BasicBlock) {
1164         let llbb = match self.try_llbb(bb) {
1165             Some(llbb) => llbb,
1166             None => return,
1167         };
1168         let bx = &mut Bx::build(self.cx, llbb);
1169         let mir = self.mir;
1170 
1171         // MIR basic blocks stop at any function call. This may not be the case
1172         // for the backend's basic blocks, in which case we might be able to
1173         // combine multiple MIR basic blocks into a single backend basic block.
1174         loop {
1175             let data = &mir[bb];
1176 
1177             debug!("codegen_block({:?}={:?})", bb, data);
1178 
1179             for statement in &data.statements {
1180                 self.codegen_statement(bx, statement);
1181             }
1182 
1183             let merging_succ = self.codegen_terminator(bx, bb, data.terminator());
1184             if let MergingSucc::False = merging_succ {
1185                 break;
1186             }
1187 
1188             // We are merging the successor into the produced backend basic
1189             // block. Record that the successor should be skipped when it is
1190             // reached.
1191             //
1192             // Note: we must not have already generated code for the successor.
1193             // This is implicitly ensured by the reverse postorder traversal,
1194             // and the assertion explicitly guarantees that.
1195             let mut successors = data.terminator().successors();
1196             let succ = successors.next().unwrap();
1197             assert!(matches!(self.cached_llbbs[succ], CachedLlbb::None));
1198             self.cached_llbbs[succ] = CachedLlbb::Skip;
1199             bb = succ;
1200         }
1201     }
1202 
codegen_terminator( &mut self, bx: &mut Bx, bb: mir::BasicBlock, terminator: &'tcx mir::Terminator<'tcx>, ) -> MergingSucc1203     fn codegen_terminator(
1204         &mut self,
1205         bx: &mut Bx,
1206         bb: mir::BasicBlock,
1207         terminator: &'tcx mir::Terminator<'tcx>,
1208     ) -> MergingSucc {
1209         debug!("codegen_terminator: {:?}", terminator);
1210 
1211         let helper = TerminatorCodegenHelper { bb, terminator };
1212 
1213         let mergeable_succ = || {
1214             // Note: any call to `switch_to_block` will invalidate a `true` value
1215             // of `mergeable_succ`.
1216             let mut successors = terminator.successors();
1217             if let Some(succ) = successors.next()
1218                 && successors.next().is_none()
1219                 && let &[succ_pred] = self.mir.basic_blocks.predecessors()[succ].as_slice()
1220             {
1221                 // bb has a single successor, and bb is its only predecessor. This
1222                 // makes it a candidate for merging.
1223                 assert_eq!(succ_pred, bb);
1224                 true
1225             } else {
1226                 false
1227             }
1228         };
1229 
1230         self.set_debug_loc(bx, terminator.source_info);
1231         match terminator.kind {
1232             mir::TerminatorKind::Resume => {
1233                 self.codegen_resume_terminator(helper, bx);
1234                 MergingSucc::False
1235             }
1236 
1237             mir::TerminatorKind::Terminate => {
1238                 self.codegen_terminate_terminator(helper, bx, terminator);
1239                 MergingSucc::False
1240             }
1241 
1242             mir::TerminatorKind::Goto { target } => {
1243                 helper.funclet_br(self, bx, target, mergeable_succ())
1244             }
1245 
1246             mir::TerminatorKind::SwitchInt { ref discr, ref targets } => {
1247                 self.codegen_switchint_terminator(helper, bx, discr, targets);
1248                 MergingSucc::False
1249             }
1250 
1251             mir::TerminatorKind::Return => {
1252                 self.codegen_return_terminator(bx);
1253                 MergingSucc::False
1254             }
1255 
1256             mir::TerminatorKind::Unreachable => {
1257                 bx.unreachable();
1258                 MergingSucc::False
1259             }
1260 
1261             mir::TerminatorKind::Drop { place, target, unwind, replace: _ } => {
1262                 self.codegen_drop_terminator(helper, bx, place, target, unwind, mergeable_succ())
1263             }
1264 
1265             mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, unwind } => self
1266                 .codegen_assert_terminator(
1267                     helper,
1268                     bx,
1269                     terminator,
1270                     cond,
1271                     expected,
1272                     msg,
1273                     target,
1274                     unwind,
1275                     mergeable_succ(),
1276                 ),
1277 
1278             mir::TerminatorKind::Call {
1279                 ref func,
1280                 ref args,
1281                 destination,
1282                 target,
1283                 unwind,
1284                 call_source: _,
1285                 fn_span,
1286             } => self.codegen_call_terminator(
1287                 helper,
1288                 bx,
1289                 terminator,
1290                 func,
1291                 args,
1292                 destination,
1293                 target,
1294                 unwind,
1295                 fn_span,
1296                 mergeable_succ(),
1297             ),
1298             mir::TerminatorKind::GeneratorDrop | mir::TerminatorKind::Yield { .. } => {
1299                 bug!("generator ops in codegen")
1300             }
1301             mir::TerminatorKind::FalseEdge { .. } | mir::TerminatorKind::FalseUnwind { .. } => {
1302                 bug!("borrowck false edges in codegen")
1303             }
1304 
1305             mir::TerminatorKind::InlineAsm {
1306                 template,
1307                 ref operands,
1308                 options,
1309                 line_spans,
1310                 destination,
1311                 unwind,
1312             } => self.codegen_asm_terminator(
1313                 helper,
1314                 bx,
1315                 terminator,
1316                 template,
1317                 operands,
1318                 options,
1319                 line_spans,
1320                 destination,
1321                 unwind,
1322                 self.instance,
1323                 mergeable_succ(),
1324             ),
1325         }
1326     }
1327 
codegen_argument( &mut self, bx: &mut Bx, op: OperandRef<'tcx, Bx::Value>, llargs: &mut Vec<Bx::Value>, arg: &ArgAbi<'tcx, Ty<'tcx>>, )1328     fn codegen_argument(
1329         &mut self,
1330         bx: &mut Bx,
1331         op: OperandRef<'tcx, Bx::Value>,
1332         llargs: &mut Vec<Bx::Value>,
1333         arg: &ArgAbi<'tcx, Ty<'tcx>>,
1334     ) {
1335         match arg.mode {
1336             PassMode::Ignore => return,
1337             PassMode::Cast(_, true) => {
1338                 // Fill padding with undef value, where applicable.
1339                 llargs.push(bx.const_undef(bx.reg_backend_type(&Reg::i32())));
1340             }
1341             PassMode::Pair(..) => match op.val {
1342                 Pair(a, b) => {
1343                     llargs.push(a);
1344                     llargs.push(b);
1345                     return;
1346                 }
1347                 _ => bug!("codegen_argument: {:?} invalid for pair argument", op),
1348             },
1349             PassMode::Indirect { attrs: _, extra_attrs: Some(_), on_stack: _ } => match op.val {
1350                 Ref(a, Some(b), _) => {
1351                     llargs.push(a);
1352                     llargs.push(b);
1353                     return;
1354                 }
1355                 _ => bug!("codegen_argument: {:?} invalid for unsized indirect argument", op),
1356             },
1357             _ => {}
1358         }
1359 
1360         // Force by-ref if we have to load through a cast pointer.
1361         let (mut llval, align, by_ref) = match op.val {
1362             Immediate(_) | Pair(..) => match arg.mode {
1363                 PassMode::Indirect { .. } | PassMode::Cast(..) => {
1364                     let scratch = PlaceRef::alloca(bx, arg.layout);
1365                     op.val.store(bx, scratch);
1366                     (scratch.llval, scratch.align, true)
1367                 }
1368                 _ => (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false),
1369             },
1370             Ref(llval, _, align) => {
1371                 if arg.is_indirect() && align < arg.layout.align.abi {
1372                     // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
1373                     // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
1374                     // have scary latent bugs around.
1375 
1376                     let scratch = PlaceRef::alloca(bx, arg.layout);
1377                     base::memcpy_ty(
1378                         bx,
1379                         scratch.llval,
1380                         scratch.align,
1381                         llval,
1382                         align,
1383                         op.layout,
1384                         MemFlags::empty(),
1385                     );
1386                     (scratch.llval, scratch.align, true)
1387                 } else {
1388                     (llval, align, true)
1389                 }
1390             }
1391             ZeroSized => match arg.mode {
1392                 PassMode::Indirect { .. } => {
1393                     // Though `extern "Rust"` doesn't pass ZSTs, some ABIs pass
1394                     // a pointer for `repr(C)` structs even when empty, so get
1395                     // one from an `alloca` (which can be left uninitialized).
1396                     let scratch = PlaceRef::alloca(bx, arg.layout);
1397                     (scratch.llval, scratch.align, true)
1398                 }
1399                 _ => bug!("ZST {op:?} wasn't ignored, but was passed with abi {arg:?}"),
1400             },
1401         };
1402 
1403         if by_ref && !arg.is_indirect() {
1404             // Have to load the argument, maybe while casting it.
1405             if let PassMode::Cast(ty, _) = &arg.mode {
1406                 let llty = bx.cast_backend_type(ty);
1407                 let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
1408                 llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
1409             } else {
1410                 // We can't use `PlaceRef::load` here because the argument
1411                 // may have a type we don't treat as immediate, but the ABI
1412                 // used for this call is passing it by-value. In that case,
1413                 // the load would just produce `OperandValue::Ref` instead
1414                 // of the `OperandValue::Immediate` we need for the call.
1415                 llval = bx.load(bx.backend_type(arg.layout), llval, align);
1416                 if let abi::Abi::Scalar(scalar) = arg.layout.abi {
1417                     if scalar.is_bool() {
1418                         bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
1419                     }
1420                 }
1421                 // We store bools as `i8` so we need to truncate to `i1`.
1422                 llval = bx.to_immediate(llval, arg.layout);
1423             }
1424         }
1425 
1426         llargs.push(llval);
1427     }
1428 
codegen_arguments_untupled( &mut self, bx: &mut Bx, operand: &mir::Operand<'tcx>, llargs: &mut Vec<Bx::Value>, args: &[ArgAbi<'tcx, Ty<'tcx>>], ) -> usize1429     fn codegen_arguments_untupled(
1430         &mut self,
1431         bx: &mut Bx,
1432         operand: &mir::Operand<'tcx>,
1433         llargs: &mut Vec<Bx::Value>,
1434         args: &[ArgAbi<'tcx, Ty<'tcx>>],
1435     ) -> usize {
1436         let tuple = self.codegen_operand(bx, operand);
1437 
1438         // Handle both by-ref and immediate tuples.
1439         if let Ref(llval, None, align) = tuple.val {
1440             let tuple_ptr = PlaceRef::new_sized_aligned(llval, tuple.layout, align);
1441             for i in 0..tuple.layout.fields.count() {
1442                 let field_ptr = tuple_ptr.project_field(bx, i);
1443                 let field = bx.load_operand(field_ptr);
1444                 self.codegen_argument(bx, field, llargs, &args[i]);
1445             }
1446         } else if let Ref(_, Some(_), _) = tuple.val {
1447             bug!("closure arguments must be sized")
1448         } else {
1449             // If the tuple is immediate, the elements are as well.
1450             for i in 0..tuple.layout.fields.count() {
1451                 let op = tuple.extract_field(bx, i);
1452                 self.codegen_argument(bx, op, llargs, &args[i]);
1453             }
1454         }
1455         tuple.layout.fields.count()
1456     }
1457 
get_caller_location( &mut self, bx: &mut Bx, mut source_info: mir::SourceInfo, ) -> OperandRef<'tcx, Bx::Value>1458     fn get_caller_location(
1459         &mut self,
1460         bx: &mut Bx,
1461         mut source_info: mir::SourceInfo,
1462     ) -> OperandRef<'tcx, Bx::Value> {
1463         let tcx = bx.tcx();
1464 
1465         let mut span_to_caller_location = |span: Span| {
1466             let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
1467             let caller = tcx.sess.source_map().lookup_char_pos(topmost.lo());
1468             let const_loc = tcx.const_caller_location((
1469                 Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
1470                 caller.line as u32,
1471                 caller.col_display as u32 + 1,
1472             ));
1473             OperandRef::from_const(bx, const_loc, bx.tcx().caller_location_ty())
1474         };
1475 
1476         // Walk up the `SourceScope`s, in case some of them are from MIR inlining.
1477         // If so, the starting `source_info.span` is in the innermost inlined
1478         // function, and will be replaced with outer callsite spans as long
1479         // as the inlined functions were `#[track_caller]`.
1480         loop {
1481             let scope_data = &self.mir.source_scopes[source_info.scope];
1482 
1483             if let Some((callee, callsite_span)) = scope_data.inlined {
1484                 // Stop inside the most nested non-`#[track_caller]` function,
1485                 // before ever reaching its caller (which is irrelevant).
1486                 if !callee.def.requires_caller_location(tcx) {
1487                     return span_to_caller_location(source_info.span);
1488                 }
1489                 source_info.span = callsite_span;
1490             }
1491 
1492             // Skip past all of the parents with `inlined: None`.
1493             match scope_data.inlined_parent_scope {
1494                 Some(parent) => source_info.scope = parent,
1495                 None => break,
1496             }
1497         }
1498 
1499         // No inlined `SourceScope`s, or all of them were `#[track_caller]`.
1500         self.caller_location.unwrap_or_else(|| span_to_caller_location(source_info.span))
1501     }
1502 
get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value>1503     fn get_personality_slot(&mut self, bx: &mut Bx) -> PlaceRef<'tcx, Bx::Value> {
1504         let cx = bx.cx();
1505         if let Some(slot) = self.personality_slot {
1506             slot
1507         } else {
1508             let layout = cx.layout_of(Ty::new_tup(
1509                 cx.tcx(),
1510                 &[Ty::new_mut_ptr(cx.tcx(), cx.tcx().types.u8), cx.tcx().types.i32],
1511             ));
1512             let slot = PlaceRef::alloca(bx, layout);
1513             self.personality_slot = Some(slot);
1514             slot
1515         }
1516     }
1517 
1518     /// Returns the landing/cleanup pad wrapper around the given basic block.
1519     // FIXME(eddyb) rename this to `eh_pad_for`.
landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock1520     fn landing_pad_for(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1521         if let Some(landing_pad) = self.landing_pads[bb] {
1522             return landing_pad;
1523         }
1524 
1525         let landing_pad = self.landing_pad_for_uncached(bb);
1526         self.landing_pads[bb] = Some(landing_pad);
1527         landing_pad
1528     }
1529 
1530     // FIXME(eddyb) rename this to `eh_pad_for_uncached`.
landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock1531     fn landing_pad_for_uncached(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1532         let llbb = self.llbb(bb);
1533         if base::wants_new_eh_instructions(self.cx.sess()) {
1534             let cleanup_bb = Bx::append_block(self.cx, self.llfn, &format!("funclet_{:?}", bb));
1535             let mut cleanup_bx = Bx::build(self.cx, cleanup_bb);
1536             let funclet = cleanup_bx.cleanup_pad(None, &[]);
1537             cleanup_bx.br(llbb);
1538             self.funclets[bb] = Some(funclet);
1539             cleanup_bb
1540         } else {
1541             let cleanup_llbb = Bx::append_block(self.cx, self.llfn, "cleanup");
1542             let mut cleanup_bx = Bx::build(self.cx, cleanup_llbb);
1543 
1544             let llpersonality = self.cx.eh_personality();
1545             let (exn0, exn1) = cleanup_bx.cleanup_landing_pad(llpersonality);
1546 
1547             let slot = self.get_personality_slot(&mut cleanup_bx);
1548             slot.storage_live(&mut cleanup_bx);
1549             Pair(exn0, exn1).store(&mut cleanup_bx, slot);
1550 
1551             cleanup_bx.br(llbb);
1552             cleanup_llbb
1553         }
1554     }
1555 
unreachable_block(&mut self) -> Bx::BasicBlock1556     fn unreachable_block(&mut self) -> Bx::BasicBlock {
1557         self.unreachable_block.unwrap_or_else(|| {
1558             let llbb = Bx::append_block(self.cx, self.llfn, "unreachable");
1559             let mut bx = Bx::build(self.cx, llbb);
1560             bx.unreachable();
1561             self.unreachable_block = Some(llbb);
1562             llbb
1563         })
1564     }
1565 
terminate_block(&mut self) -> Bx::BasicBlock1566     fn terminate_block(&mut self) -> Bx::BasicBlock {
1567         self.terminate_block.unwrap_or_else(|| {
1568             let funclet;
1569             let llbb;
1570             let mut bx;
1571             if base::wants_msvc_seh(self.cx.sess()) {
1572                 // This is a basic block that we're aborting the program for,
1573                 // notably in an `extern` function. These basic blocks are inserted
1574                 // so that we assert that `extern` functions do indeed not panic,
1575                 // and if they do we abort the process.
1576                 //
1577                 // On MSVC these are tricky though (where we're doing funclets). If
1578                 // we were to do a cleanuppad (like below) the normal functions like
1579                 // `longjmp` would trigger the abort logic, terminating the
1580                 // program. Instead we insert the equivalent of `catch(...)` for C++
1581                 // which magically doesn't trigger when `longjmp` files over this
1582                 // frame.
1583                 //
1584                 // Lots more discussion can be found on #48251 but this codegen is
1585                 // modeled after clang's for:
1586                 //
1587                 //      try {
1588                 //          foo();
1589                 //      } catch (...) {
1590                 //          bar();
1591                 //      }
1592                 //
1593                 // which creates an IR snippet like
1594                 //
1595                 //      cs_terminate:
1596                 //         %cs = catchswitch within none [%cp_terminate] unwind to caller
1597                 //      cp_terminate:
1598                 //         %cp = catchpad within %cs [null, i32 64, null]
1599                 //         ...
1600 
1601                 llbb = Bx::append_block(self.cx, self.llfn, "cs_terminate");
1602                 let cp_llbb = Bx::append_block(self.cx, self.llfn, "cp_terminate");
1603 
1604                 let mut cs_bx = Bx::build(self.cx, llbb);
1605                 let cs = cs_bx.catch_switch(None, None, &[cp_llbb]);
1606 
1607                 // The "null" here is actually a RTTI type descriptor for the
1608                 // C++ personality function, but `catch (...)` has no type so
1609                 // it's null. The 64 here is actually a bitfield which
1610                 // represents that this is a catch-all block.
1611                 bx = Bx::build(self.cx, cp_llbb);
1612                 let null =
1613                     bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space));
1614                 let sixty_four = bx.const_i32(64);
1615                 funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
1616             } else {
1617                 llbb = Bx::append_block(self.cx, self.llfn, "terminate");
1618                 bx = Bx::build(self.cx, llbb);
1619 
1620                 let llpersonality = self.cx.eh_personality();
1621                 bx.filter_landing_pad(llpersonality);
1622 
1623                 funclet = None;
1624             }
1625 
1626             self.set_debug_loc(&mut bx, mir::SourceInfo::outermost(self.mir.span));
1627 
1628             let (fn_abi, fn_ptr) = common::build_langcall(&bx, None, LangItem::PanicCannotUnwind);
1629             let fn_ty = bx.fn_decl_backend_type(&fn_abi);
1630 
1631             let llret = bx.call(fn_ty, None, Some(&fn_abi), fn_ptr, &[], funclet.as_ref());
1632             bx.do_not_inline(llret);
1633 
1634             bx.unreachable();
1635 
1636             self.terminate_block = Some(llbb);
1637             llbb
1638         })
1639     }
1640 
1641     /// Get the backend `BasicBlock` for a MIR `BasicBlock`, either already
1642     /// cached in `self.cached_llbbs`, or created on demand (and cached).
1643     // FIXME(eddyb) rename `llbb` and other `ll`-prefixed things to use a
1644     // more backend-agnostic prefix such as `cg` (i.e. this would be `cgbb`).
llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock1645     pub fn llbb(&mut self, bb: mir::BasicBlock) -> Bx::BasicBlock {
1646         self.try_llbb(bb).unwrap()
1647     }
1648 
1649     /// Like `llbb`, but may fail if the basic block should be skipped.
try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock>1650     pub fn try_llbb(&mut self, bb: mir::BasicBlock) -> Option<Bx::BasicBlock> {
1651         match self.cached_llbbs[bb] {
1652             CachedLlbb::None => {
1653                 // FIXME(eddyb) only name the block if `fewer_names` is `false`.
1654                 let llbb = Bx::append_block(self.cx, self.llfn, &format!("{:?}", bb));
1655                 self.cached_llbbs[bb] = CachedLlbb::Some(llbb);
1656                 Some(llbb)
1657             }
1658             CachedLlbb::Some(llbb) => Some(llbb),
1659             CachedLlbb::Skip => None,
1660         }
1661     }
1662 
make_return_dest( &mut self, bx: &mut Bx, dest: mir::Place<'tcx>, fn_ret: &ArgAbi<'tcx, Ty<'tcx>>, llargs: &mut Vec<Bx::Value>, is_intrinsic: bool, ) -> ReturnDest<'tcx, Bx::Value>1663     fn make_return_dest(
1664         &mut self,
1665         bx: &mut Bx,
1666         dest: mir::Place<'tcx>,
1667         fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
1668         llargs: &mut Vec<Bx::Value>,
1669         is_intrinsic: bool,
1670     ) -> ReturnDest<'tcx, Bx::Value> {
1671         // If the return is ignored, we can just return a do-nothing `ReturnDest`.
1672         if fn_ret.is_ignore() {
1673             return ReturnDest::Nothing;
1674         }
1675         let dest = if let Some(index) = dest.as_local() {
1676             match self.locals[index] {
1677                 LocalRef::Place(dest) => dest,
1678                 LocalRef::UnsizedPlace(_) => bug!("return type must be sized"),
1679                 LocalRef::PendingOperand => {
1680                     // Handle temporary places, specifically `Operand` ones, as
1681                     // they don't have `alloca`s.
1682                     return if fn_ret.is_indirect() {
1683                         // Odd, but possible, case, we have an operand temporary,
1684                         // but the calling convention has an indirect return.
1685                         let tmp = PlaceRef::alloca(bx, fn_ret.layout);
1686                         tmp.storage_live(bx);
1687                         llargs.push(tmp.llval);
1688                         ReturnDest::IndirectOperand(tmp, index)
1689                     } else if is_intrinsic {
1690                         // Currently, intrinsics always need a location to store
1691                         // the result, so we create a temporary `alloca` for the
1692                         // result.
1693                         let tmp = PlaceRef::alloca(bx, fn_ret.layout);
1694                         tmp.storage_live(bx);
1695                         ReturnDest::IndirectOperand(tmp, index)
1696                     } else {
1697                         ReturnDest::DirectOperand(index)
1698                     };
1699                 }
1700                 LocalRef::Operand(_) => {
1701                     bug!("place local already assigned to");
1702                 }
1703             }
1704         } else {
1705             self.codegen_place(
1706                 bx,
1707                 mir::PlaceRef { local: dest.local, projection: &dest.projection },
1708             )
1709         };
1710         if fn_ret.is_indirect() {
1711             if dest.align < dest.layout.align.abi {
1712                 // Currently, MIR code generation does not create calls
1713                 // that store directly to fields of packed structs (in
1714                 // fact, the calls it creates write only to temps).
1715                 //
1716                 // If someone changes that, please update this code path
1717                 // to create a temporary.
1718                 span_bug!(self.mir.span, "can't directly store to unaligned value");
1719             }
1720             llargs.push(dest.llval);
1721             ReturnDest::Nothing
1722         } else {
1723             ReturnDest::Store(dest)
1724         }
1725     }
1726 
1727     // Stores the return value of a function call into it's final location.
store_return( &mut self, bx: &mut Bx, dest: ReturnDest<'tcx, Bx::Value>, ret_abi: &ArgAbi<'tcx, Ty<'tcx>>, llval: Bx::Value, )1728     fn store_return(
1729         &mut self,
1730         bx: &mut Bx,
1731         dest: ReturnDest<'tcx, Bx::Value>,
1732         ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
1733         llval: Bx::Value,
1734     ) {
1735         use self::ReturnDest::*;
1736 
1737         match dest {
1738             Nothing => (),
1739             Store(dst) => bx.store_arg(&ret_abi, llval, dst),
1740             IndirectOperand(tmp, index) => {
1741                 let op = bx.load_operand(tmp);
1742                 tmp.storage_dead(bx);
1743                 self.overwrite_local(index, LocalRef::Operand(op));
1744                 self.debug_introduce_local(bx, index);
1745             }
1746             DirectOperand(index) => {
1747                 // If there is a cast, we have to store and reload.
1748                 let op = if let PassMode::Cast(..) = ret_abi.mode {
1749                     let tmp = PlaceRef::alloca(bx, ret_abi.layout);
1750                     tmp.storage_live(bx);
1751                     bx.store_arg(&ret_abi, llval, tmp);
1752                     let op = bx.load_operand(tmp);
1753                     tmp.storage_dead(bx);
1754                     op
1755                 } else {
1756                     OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
1757                 };
1758                 self.overwrite_local(index, LocalRef::Operand(op));
1759                 self.debug_introduce_local(bx, index);
1760             }
1761         }
1762     }
1763 }
1764 
1765 enum ReturnDest<'tcx, V> {
1766     // Do nothing; the return value is indirect or ignored.
1767     Nothing,
1768     // Store the return value to the pointer.
1769     Store(PlaceRef<'tcx, V>),
1770     // Store an indirect return value to an operand local place.
1771     IndirectOperand(PlaceRef<'tcx, V>, mir::Local),
1772     // Store a direct return value to an operand local place.
1773     DirectOperand(mir::Local),
1774 }
1775