• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use super::operand::{OperandRef, OperandValue};
2 use super::place::PlaceRef;
3 use super::{FunctionCx, LocalRef};
4 
5 use crate::base;
6 use crate::common::{self, IntPredicate};
7 use crate::traits::*;
8 use crate::MemFlags;
9 
10 use rustc_middle::mir;
11 use rustc_middle::mir::Operand;
12 use rustc_middle::ty::cast::{CastTy, IntTy};
13 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
14 use rustc_middle::ty::{self, adjustment::PointerCoercion, Instance, Ty, TyCtxt};
15 use rustc_session::config::OptLevel;
16 use rustc_span::source_map::{Span, DUMMY_SP};
17 use rustc_target::abi::{self, FIRST_VARIANT};
18 
19 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
20     #[instrument(level = "trace", skip(self, bx))]
codegen_rvalue( &mut self, bx: &mut Bx, dest: PlaceRef<'tcx, Bx::Value>, rvalue: &mir::Rvalue<'tcx>, )21     pub fn codegen_rvalue(
22         &mut self,
23         bx: &mut Bx,
24         dest: PlaceRef<'tcx, Bx::Value>,
25         rvalue: &mir::Rvalue<'tcx>,
26     ) {
27         match *rvalue {
28             mir::Rvalue::Use(ref operand) => {
29                 let cg_operand = self.codegen_operand(bx, operand);
30                 // FIXME: consider not copying constants through stack. (Fixable by codegen'ing
31                 // constants into `OperandValue::Ref`; why don’t we do that yet if we don’t?)
32                 cg_operand.val.store(bx, dest);
33             }
34 
35             mir::Rvalue::Cast(
36                 mir::CastKind::PointerCoercion(PointerCoercion::Unsize),
37                 ref source,
38                 _,
39             ) => {
40                 // The destination necessarily contains a fat pointer, so if
41                 // it's a scalar pair, it's a fat pointer or newtype thereof.
42                 if bx.cx().is_backend_scalar_pair(dest.layout) {
43                     // Into-coerce of a thin pointer to a fat pointer -- just
44                     // use the operand path.
45                     let temp = self.codegen_rvalue_operand(bx, rvalue);
46                     temp.val.store(bx, dest);
47                     return;
48                 }
49 
50                 // Unsize of a nontrivial struct. I would prefer for
51                 // this to be eliminated by MIR building, but
52                 // `CoerceUnsized` can be passed by a where-clause,
53                 // so the (generic) MIR may not be able to expand it.
54                 let operand = self.codegen_operand(bx, source);
55                 match operand.val {
56                     OperandValue::Pair(..) | OperandValue::Immediate(_) => {
57                         // Unsize from an immediate structure. We don't
58                         // really need a temporary alloca here, but
59                         // avoiding it would require us to have
60                         // `coerce_unsized_into` use `extractvalue` to
61                         // index into the struct, and this case isn't
62                         // important enough for it.
63                         debug!("codegen_rvalue: creating ugly alloca");
64                         let scratch = PlaceRef::alloca(bx, operand.layout);
65                         scratch.storage_live(bx);
66                         operand.val.store(bx, scratch);
67                         base::coerce_unsized_into(bx, scratch, dest);
68                         scratch.storage_dead(bx);
69                     }
70                     OperandValue::Ref(llref, None, align) => {
71                         let source = PlaceRef::new_sized_aligned(llref, operand.layout, align);
72                         base::coerce_unsized_into(bx, source, dest);
73                     }
74                     OperandValue::Ref(_, Some(_), _) => {
75                         bug!("unsized coercion on an unsized rvalue");
76                     }
77                     OperandValue::ZeroSized => {
78                         bug!("unsized coercion on a ZST rvalue");
79                     }
80                 }
81             }
82 
83             mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, _ty) => {
84                 let src = self.codegen_operand(bx, operand);
85                 self.codegen_transmute(bx, src, dest);
86             }
87 
88             mir::Rvalue::Repeat(ref elem, count) => {
89                 let cg_elem = self.codegen_operand(bx, elem);
90 
91                 // Do not generate the loop for zero-sized elements or empty arrays.
92                 if dest.layout.is_zst() {
93                     return;
94                 }
95 
96                 if let OperandValue::Immediate(v) = cg_elem.val {
97                     let zero = bx.const_usize(0);
98                     let start = dest.project_index(bx, zero).llval;
99                     let size = bx.const_usize(dest.layout.size.bytes());
100 
101                     // Use llvm.memset.p0i8.* to initialize all zero arrays
102                     if bx.cx().const_to_opt_u128(v, false) == Some(0) {
103                         let fill = bx.cx().const_u8(0);
104                         bx.memset(start, fill, size, dest.align, MemFlags::empty());
105                         return;
106                     }
107 
108                     // Use llvm.memset.p0i8.* to initialize byte arrays
109                     let v = bx.from_immediate(v);
110                     if bx.cx().val_ty(v) == bx.cx().type_i8() {
111                         bx.memset(start, v, size, dest.align, MemFlags::empty());
112                         return;
113                     }
114                 }
115 
116                 let count = self
117                     .monomorphize(count)
118                     .eval_target_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
119 
120                 bx.write_operand_repeatedly(cg_elem, count, dest);
121             }
122 
123             mir::Rvalue::Aggregate(ref kind, ref operands) => {
124                 let (variant_index, variant_dest, active_field_index) = match **kind {
125                     mir::AggregateKind::Adt(_, variant_index, _, _, active_field_index) => {
126                         let variant_dest = dest.project_downcast(bx, variant_index);
127                         (variant_index, variant_dest, active_field_index)
128                     }
129                     _ => (FIRST_VARIANT, dest, None),
130                 };
131                 if active_field_index.is_some() {
132                     assert_eq!(operands.len(), 1);
133                 }
134                 for (i, operand) in operands.iter_enumerated() {
135                     let op = self.codegen_operand(bx, operand);
136                     // Do not generate stores and GEPis for zero-sized fields.
137                     if !op.layout.is_zst() {
138                         let field_index = active_field_index.unwrap_or(i);
139                         let field = if let mir::AggregateKind::Array(_) = **kind {
140                             let llindex = bx.cx().const_usize(field_index.as_u32().into());
141                             variant_dest.project_index(bx, llindex)
142                         } else {
143                             variant_dest.project_field(bx, field_index.as_usize())
144                         };
145                         op.val.store(bx, field);
146                     }
147                 }
148                 dest.codegen_set_discr(bx, variant_index);
149             }
150 
151             _ => {
152                 assert!(self.rvalue_creates_operand(rvalue, DUMMY_SP));
153                 let temp = self.codegen_rvalue_operand(bx, rvalue);
154                 temp.val.store(bx, dest);
155             }
156         }
157     }
158 
codegen_transmute( &mut self, bx: &mut Bx, src: OperandRef<'tcx, Bx::Value>, dst: PlaceRef<'tcx, Bx::Value>, )159     fn codegen_transmute(
160         &mut self,
161         bx: &mut Bx,
162         src: OperandRef<'tcx, Bx::Value>,
163         dst: PlaceRef<'tcx, Bx::Value>,
164     ) {
165         // The MIR validator enforces no unsized transmutes.
166         debug_assert!(src.layout.is_sized());
167         debug_assert!(dst.layout.is_sized());
168 
169         if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
170             val.store(bx, dst);
171             return;
172         }
173 
174         match src.val {
175             OperandValue::Ref(..) | OperandValue::ZeroSized => {
176                 span_bug!(
177                     self.mir.span,
178                     "Operand path should have handled transmute \
179                     from {src:?} to place {dst:?}"
180                 );
181             }
182             OperandValue::Immediate(..) | OperandValue::Pair(..) => {
183                 // When we have immediate(s), the alignment of the source is irrelevant,
184                 // so we can store them using the destination's alignment.
185                 let llty = bx.backend_type(src.layout);
186                 let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
187                 src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
188             }
189         }
190     }
191 
192     /// Attempts to transmute an `OperandValue` to another `OperandValue`.
193     ///
194     /// Returns `None` for cases that can't work in that framework, such as for
195     /// `Immediate`->`Ref` that needs an `alloc` to get the location.
codegen_transmute_operand( &mut self, bx: &mut Bx, operand: OperandRef<'tcx, Bx::Value>, cast: TyAndLayout<'tcx>, ) -> Option<OperandValue<Bx::Value>>196     fn codegen_transmute_operand(
197         &mut self,
198         bx: &mut Bx,
199         operand: OperandRef<'tcx, Bx::Value>,
200         cast: TyAndLayout<'tcx>,
201     ) -> Option<OperandValue<Bx::Value>> {
202         // Check for transmutes that are always UB.
203         if operand.layout.size != cast.size
204             || operand.layout.abi.is_uninhabited()
205             || cast.abi.is_uninhabited()
206         {
207             if !operand.layout.abi.is_uninhabited() {
208                 // Since this is known statically and the input could have existed
209                 // without already having hit UB, might as well trap for it.
210                 bx.abort();
211             }
212 
213             // Because this transmute is UB, return something easy to generate,
214             // since it's fine that later uses of the value are probably UB.
215             return Some(OperandValue::poison(bx, cast));
216         }
217 
218         let operand_kind = self.value_kind(operand.layout);
219         let cast_kind = self.value_kind(cast);
220 
221         match operand.val {
222             OperandValue::Ref(ptr, meta, align) => {
223                 debug_assert_eq!(meta, None);
224                 debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
225                 let cast_bty = bx.backend_type(cast);
226                 let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
227                 let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
228                 Some(bx.load_operand(fake_place).val)
229             }
230             OperandValue::ZeroSized => {
231                 let OperandValueKind::ZeroSized = operand_kind else {
232                     bug!("Found {operand_kind:?} for operand {operand:?}");
233                 };
234                 if let OperandValueKind::ZeroSized = cast_kind {
235                     Some(OperandValue::ZeroSized)
236                 } else {
237                     None
238                 }
239             }
240             OperandValue::Immediate(imm) => {
241                 let OperandValueKind::Immediate(in_scalar) = operand_kind else {
242                     bug!("Found {operand_kind:?} for operand {operand:?}");
243                 };
244                 if let OperandValueKind::Immediate(out_scalar) = cast_kind
245                     && in_scalar.size(self.cx) == out_scalar.size(self.cx)
246                         {
247                             let operand_bty = bx.backend_type(operand.layout);
248                             let cast_bty = bx.backend_type(cast);
249                             Some(OperandValue::Immediate(self.transmute_immediate(
250                                 bx,
251                                 imm,
252                                 in_scalar,
253                                 operand_bty,
254                                 out_scalar,
255                                 cast_bty,
256                             )))
257                 } else {
258                     None
259                 }
260             }
261             OperandValue::Pair(imm_a, imm_b) => {
262                 let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
263                     bug!("Found {operand_kind:?} for operand {operand:?}");
264                 };
265                 if let OperandValueKind::Pair(out_a, out_b) = cast_kind
266                     && in_a.size(self.cx) == out_a.size(self.cx)
267                     && in_b.size(self.cx) == out_b.size(self.cx)
268                 {
269                     let in_a_ibty = bx.scalar_pair_element_backend_type(operand.layout, 0, false);
270                     let in_b_ibty = bx.scalar_pair_element_backend_type(operand.layout, 1, false);
271                     let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
272                     let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
273                     Some(OperandValue::Pair(
274                         self.transmute_immediate(bx, imm_a, in_a, in_a_ibty, out_a, out_a_ibty),
275                         self.transmute_immediate(bx, imm_b, in_b, in_b_ibty, out_b, out_b_ibty),
276                     ))
277                 } else {
278                     None
279                 }
280             }
281         }
282     }
283 
284     /// Transmutes one of the immediates from an [`OperandValue::Immediate`]
285     /// or an [`OperandValue::Pair`] to an immediate of the target type.
286     ///
287     /// `to_backend_ty` must be the *non*-immediate backend type (so it will be
288     /// `i8`, not `i1`, for `bool`-like types.)
transmute_immediate( &self, bx: &mut Bx, mut imm: Bx::Value, from_scalar: abi::Scalar, from_backend_ty: Bx::Type, to_scalar: abi::Scalar, to_backend_ty: Bx::Type, ) -> Bx::Value289     fn transmute_immediate(
290         &self,
291         bx: &mut Bx,
292         mut imm: Bx::Value,
293         from_scalar: abi::Scalar,
294         from_backend_ty: Bx::Type,
295         to_scalar: abi::Scalar,
296         to_backend_ty: Bx::Type,
297     ) -> Bx::Value {
298         debug_assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
299 
300         use abi::Primitive::*;
301         imm = bx.from_immediate(imm);
302 
303         // When scalars are passed by value, there's no metadata recording their
304         // valid ranges. For example, `char`s are passed as just `i32`, with no
305         // way for LLVM to know that they're 0x10FFFF at most. Thus we assume
306         // the range of the input value too, not just the output range.
307         self.assume_scalar_range(bx, imm, from_scalar, from_backend_ty);
308 
309         imm = match (from_scalar.primitive(), to_scalar.primitive()) {
310             (Int(..) | F32 | F64, Int(..) | F32 | F64) => bx.bitcast(imm, to_backend_ty),
311             (Pointer(..), Pointer(..)) => bx.pointercast(imm, to_backend_ty),
312             (Int(..), Pointer(..)) => bx.inttoptr(imm, to_backend_ty),
313             (Pointer(..), Int(..)) => bx.ptrtoint(imm, to_backend_ty),
314             (F32 | F64, Pointer(..)) => {
315                 let int_imm = bx.bitcast(imm, bx.cx().type_isize());
316                 bx.inttoptr(int_imm, to_backend_ty)
317             }
318             (Pointer(..), F32 | F64) => {
319                 let int_imm = bx.ptrtoint(imm, bx.cx().type_isize());
320                 bx.bitcast(int_imm, to_backend_ty)
321             }
322         };
323         self.assume_scalar_range(bx, imm, to_scalar, to_backend_ty);
324         imm = bx.to_immediate_scalar(imm, to_scalar);
325         imm
326     }
327 
assume_scalar_range( &self, bx: &mut Bx, imm: Bx::Value, scalar: abi::Scalar, backend_ty: Bx::Type, )328     fn assume_scalar_range(
329         &self,
330         bx: &mut Bx,
331         imm: Bx::Value,
332         scalar: abi::Scalar,
333         backend_ty: Bx::Type,
334     ) {
335         if matches!(self.cx.sess().opts.optimize, OptLevel::No | OptLevel::Less)
336             // For now, the critical niches are all over `Int`eger values.
337             // Should floating-point values or pointers ever get more complex
338             // niches, then this code will probably want to handle them too.
339             || !matches!(scalar.primitive(), abi::Primitive::Int(..))
340             || scalar.is_always_valid(self.cx)
341         {
342             return;
343         }
344 
345         let abi::WrappingRange { start, end } = scalar.valid_range(self.cx);
346 
347         if start <= end {
348             if start > 0 {
349                 let low = bx.const_uint_big(backend_ty, start);
350                 let cmp = bx.icmp(IntPredicate::IntUGE, imm, low);
351                 bx.assume(cmp);
352             }
353 
354             let type_max = scalar.size(self.cx).unsigned_int_max();
355             if end < type_max {
356                 let high = bx.const_uint_big(backend_ty, end);
357                 let cmp = bx.icmp(IntPredicate::IntULE, imm, high);
358                 bx.assume(cmp);
359             }
360         } else {
361             let low = bx.const_uint_big(backend_ty, start);
362             let cmp_low = bx.icmp(IntPredicate::IntUGE, imm, low);
363 
364             let high = bx.const_uint_big(backend_ty, end);
365             let cmp_high = bx.icmp(IntPredicate::IntULE, imm, high);
366 
367             let or = bx.or(cmp_low, cmp_high);
368             bx.assume(or);
369         }
370     }
371 
codegen_rvalue_unsized( &mut self, bx: &mut Bx, indirect_dest: PlaceRef<'tcx, Bx::Value>, rvalue: &mir::Rvalue<'tcx>, )372     pub fn codegen_rvalue_unsized(
373         &mut self,
374         bx: &mut Bx,
375         indirect_dest: PlaceRef<'tcx, Bx::Value>,
376         rvalue: &mir::Rvalue<'tcx>,
377     ) {
378         debug!(
379             "codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
380             indirect_dest.llval, rvalue
381         );
382 
383         match *rvalue {
384             mir::Rvalue::Use(ref operand) => {
385                 let cg_operand = self.codegen_operand(bx, operand);
386                 cg_operand.val.store_unsized(bx, indirect_dest);
387             }
388 
389             _ => bug!("unsized assignment other than `Rvalue::Use`"),
390         }
391     }
392 
codegen_rvalue_operand( &mut self, bx: &mut Bx, rvalue: &mir::Rvalue<'tcx>, ) -> OperandRef<'tcx, Bx::Value>393     pub fn codegen_rvalue_operand(
394         &mut self,
395         bx: &mut Bx,
396         rvalue: &mir::Rvalue<'tcx>,
397     ) -> OperandRef<'tcx, Bx::Value> {
398         assert!(
399             self.rvalue_creates_operand(rvalue, DUMMY_SP),
400             "cannot codegen {:?} to operand",
401             rvalue,
402         );
403 
404         match *rvalue {
405             mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
406                 let operand = self.codegen_operand(bx, source);
407                 debug!("cast operand is {:?}", operand);
408                 let cast = bx.cx().layout_of(self.monomorphize(mir_cast_ty));
409 
410                 let val = match *kind {
411                     mir::CastKind::PointerExposeAddress => {
412                         assert!(bx.cx().is_backend_immediate(cast));
413                         let llptr = operand.immediate();
414                         let llcast_ty = bx.cx().immediate_backend_type(cast);
415                         let lladdr = bx.ptrtoint(llptr, llcast_ty);
416                         OperandValue::Immediate(lladdr)
417                     }
418                     mir::CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer) => {
419                         match *operand.layout.ty.kind() {
420                             ty::FnDef(def_id, substs) => {
421                                 let instance = ty::Instance::resolve_for_fn_ptr(
422                                     bx.tcx(),
423                                     ty::ParamEnv::reveal_all(),
424                                     def_id,
425                                     substs,
426                                 )
427                                 .unwrap()
428                                 .polymorphize(bx.cx().tcx());
429                                 OperandValue::Immediate(bx.get_fn_addr(instance))
430                             }
431                             _ => bug!("{} cannot be reified to a fn ptr", operand.layout.ty),
432                         }
433                     }
434                     mir::CastKind::PointerCoercion(PointerCoercion::ClosureFnPointer(_)) => {
435                         match *operand.layout.ty.kind() {
436                             ty::Closure(def_id, substs) => {
437                                 let instance = Instance::resolve_closure(
438                                     bx.cx().tcx(),
439                                     def_id,
440                                     substs,
441                                     ty::ClosureKind::FnOnce,
442                                 )
443                                 .expect("failed to normalize and resolve closure during codegen")
444                                 .polymorphize(bx.cx().tcx());
445                                 OperandValue::Immediate(bx.cx().get_fn_addr(instance))
446                             }
447                             _ => bug!("{} cannot be cast to a fn ptr", operand.layout.ty),
448                         }
449                     }
450                     mir::CastKind::PointerCoercion(PointerCoercion::UnsafeFnPointer) => {
451                         // This is a no-op at the LLVM level.
452                         operand.val
453                     }
454                     mir::CastKind::PointerCoercion(PointerCoercion::Unsize) => {
455                         assert!(bx.cx().is_backend_scalar_pair(cast));
456                         let (lldata, llextra) = match operand.val {
457                             OperandValue::Pair(lldata, llextra) => {
458                                 // unsize from a fat pointer -- this is a
459                                 // "trait-object-to-supertrait" coercion.
460                                 (lldata, Some(llextra))
461                             }
462                             OperandValue::Immediate(lldata) => {
463                                 // "standard" unsize
464                                 (lldata, None)
465                             }
466                             OperandValue::Ref(..) => {
467                                 bug!("by-ref operand {:?} in `codegen_rvalue_operand`", operand);
468                             }
469                             OperandValue::ZeroSized => {
470                                 bug!("zero-sized operand {:?} in `codegen_rvalue_operand`", operand);
471                             }
472                         };
473                         let (lldata, llextra) =
474                             base::unsize_ptr(bx, lldata, operand.layout.ty, cast.ty, llextra);
475                         OperandValue::Pair(lldata, llextra)
476                     }
477                     mir::CastKind::PointerCoercion(PointerCoercion::MutToConstPointer)
478                     | mir::CastKind::PtrToPtr
479                         if bx.cx().is_backend_scalar_pair(operand.layout) =>
480                     {
481                         if let OperandValue::Pair(data_ptr, meta) = operand.val {
482                             if bx.cx().is_backend_scalar_pair(cast) {
483                                 let data_cast = bx.pointercast(
484                                     data_ptr,
485                                     bx.cx().scalar_pair_element_backend_type(cast, 0, true),
486                                 );
487                                 OperandValue::Pair(data_cast, meta)
488                             } else {
489                                 // cast to thin-ptr
490                                 // Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
491                                 // pointer-cast of that pointer to desired pointer type.
492                                 let llcast_ty = bx.cx().immediate_backend_type(cast);
493                                 let llval = bx.pointercast(data_ptr, llcast_ty);
494                                 OperandValue::Immediate(llval)
495                             }
496                         } else {
497                             bug!("unexpected non-pair operand");
498                         }
499                     }
500                     mir::CastKind::DynStar => {
501                         let (lldata, llextra) = match operand.val {
502                             OperandValue::Ref(_, _, _) => todo!(),
503                             OperandValue::Immediate(v) => (v, None),
504                             OperandValue::Pair(v, l) => (v, Some(l)),
505                             OperandValue::ZeroSized => bug!("ZST -- which is not PointerLike -- in DynStar"),
506                         };
507                         let (lldata, llextra) =
508                             base::cast_to_dyn_star(bx, lldata, operand.layout, cast.ty, llextra);
509                         OperandValue::Pair(lldata, llextra)
510                     }
511                     mir::CastKind::PointerCoercion(
512                         PointerCoercion::MutToConstPointer | PointerCoercion::ArrayToPointer,
513                     )
514                     | mir::CastKind::IntToInt
515                     | mir::CastKind::FloatToInt
516                     | mir::CastKind::FloatToFloat
517                     | mir::CastKind::IntToFloat
518                     | mir::CastKind::PtrToPtr
519                     | mir::CastKind::FnPtrToPtr
520 
521                     // Since int2ptr can have arbitrary integer types as input (so we have to do
522                     // sign extension and all that), it is currently best handled in the same code
523                     // path as the other integer-to-X casts.
524                     | mir::CastKind::PointerFromExposedAddress => {
525                         assert!(bx.cx().is_backend_immediate(cast));
526                         let ll_t_out = bx.cx().immediate_backend_type(cast);
527                         if operand.layout.abi.is_uninhabited() {
528                             let val = OperandValue::Immediate(bx.cx().const_poison(ll_t_out));
529                             return OperandRef { val, layout: cast };
530                         }
531                         let r_t_in =
532                             CastTy::from_ty(operand.layout.ty).expect("bad input type for cast");
533                         let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
534                         let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
535                         let llval = operand.immediate();
536 
537                         let newval = match (r_t_in, r_t_out) {
538                             (CastTy::Int(i), CastTy::Int(_)) => {
539                                 bx.intcast(llval, ll_t_out, i.is_signed())
540                             }
541                             (CastTy::Float, CastTy::Float) => {
542                                 let srcsz = bx.cx().float_width(ll_t_in);
543                                 let dstsz = bx.cx().float_width(ll_t_out);
544                                 if dstsz > srcsz {
545                                     bx.fpext(llval, ll_t_out)
546                                 } else if srcsz > dstsz {
547                                     bx.fptrunc(llval, ll_t_out)
548                                 } else {
549                                     llval
550                                 }
551                             }
552                             (CastTy::Int(i), CastTy::Float) => {
553                                 if i.is_signed() {
554                                     bx.sitofp(llval, ll_t_out)
555                                 } else {
556                                     bx.uitofp(llval, ll_t_out)
557                                 }
558                             }
559                             (CastTy::Ptr(_) | CastTy::FnPtr, CastTy::Ptr(_)) => {
560                                 bx.pointercast(llval, ll_t_out)
561                             }
562                             (CastTy::Int(i), CastTy::Ptr(_)) => {
563                                 let usize_llval =
564                                     bx.intcast(llval, bx.cx().type_isize(), i.is_signed());
565                                 bx.inttoptr(usize_llval, ll_t_out)
566                             }
567                             (CastTy::Float, CastTy::Int(IntTy::I)) => {
568                                 bx.cast_float_to_int(true, llval, ll_t_out)
569                             }
570                             (CastTy::Float, CastTy::Int(_)) => {
571                                 bx.cast_float_to_int(false, llval, ll_t_out)
572                             }
573                             _ => bug!("unsupported cast: {:?} to {:?}", operand.layout.ty, cast.ty),
574                         };
575                         OperandValue::Immediate(newval)
576                     }
577                     mir::CastKind::Transmute => {
578                         self.codegen_transmute_operand(bx, operand, cast).unwrap_or_else(|| {
579                             bug!("Unsupported transmute-as-operand of {operand:?} to {cast:?}");
580                         })
581                     }
582                 };
583                 OperandRef { val, layout: cast }
584             }
585 
586             mir::Rvalue::Ref(_, bk, place) => {
587                 let mk_ref = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
588                     Ty::new_ref(
589                         tcx,
590                         tcx.lifetimes.re_erased,
591                         ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() },
592                     )
593                 };
594                 self.codegen_place_to_pointer(bx, place, mk_ref)
595             }
596 
597             mir::Rvalue::CopyForDeref(place) => self.codegen_operand(bx, &Operand::Copy(place)),
598             mir::Rvalue::AddressOf(mutability, place) => {
599                 let mk_ptr = move |tcx: TyCtxt<'tcx>, ty: Ty<'tcx>| {
600                     Ty::new_ptr(tcx, ty::TypeAndMut { ty, mutbl: mutability })
601                 };
602                 self.codegen_place_to_pointer(bx, place, mk_ptr)
603             }
604 
605             mir::Rvalue::Len(place) => {
606                 let size = self.evaluate_array_len(bx, place);
607                 OperandRef {
608                     val: OperandValue::Immediate(size),
609                     layout: bx.cx().layout_of(bx.tcx().types.usize),
610                 }
611             }
612 
613             mir::Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
614                 let lhs = self.codegen_operand(bx, lhs);
615                 let rhs = self.codegen_operand(bx, rhs);
616                 let llresult = match (lhs.val, rhs.val) {
617                     (
618                         OperandValue::Pair(lhs_addr, lhs_extra),
619                         OperandValue::Pair(rhs_addr, rhs_extra),
620                     ) => self.codegen_fat_ptr_binop(
621                         bx,
622                         op,
623                         lhs_addr,
624                         lhs_extra,
625                         rhs_addr,
626                         rhs_extra,
627                         lhs.layout.ty,
628                     ),
629 
630                     (OperandValue::Immediate(lhs_val), OperandValue::Immediate(rhs_val)) => {
631                         self.codegen_scalar_binop(bx, op, lhs_val, rhs_val, lhs.layout.ty)
632                     }
633 
634                     _ => bug!(),
635                 };
636                 OperandRef {
637                     val: OperandValue::Immediate(llresult),
638                     layout: bx.cx().layout_of(op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
639                 }
640             }
641             mir::Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
642                 let lhs = self.codegen_operand(bx, lhs);
643                 let rhs = self.codegen_operand(bx, rhs);
644                 let result = self.codegen_scalar_checked_binop(
645                     bx,
646                     op,
647                     lhs.immediate(),
648                     rhs.immediate(),
649                     lhs.layout.ty,
650                 );
651                 let val_ty = op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty);
652                 let operand_ty = Ty::new_tup(bx.tcx(), &[val_ty, bx.tcx().types.bool]);
653                 OperandRef { val: result, layout: bx.cx().layout_of(operand_ty) }
654             }
655 
656             mir::Rvalue::UnaryOp(op, ref operand) => {
657                 let operand = self.codegen_operand(bx, operand);
658                 let lloperand = operand.immediate();
659                 let is_float = operand.layout.ty.is_floating_point();
660                 let llval = match op {
661                     mir::UnOp::Not => bx.not(lloperand),
662                     mir::UnOp::Neg => {
663                         if is_float {
664                             bx.fneg(lloperand)
665                         } else {
666                             bx.neg(lloperand)
667                         }
668                     }
669                 };
670                 OperandRef { val: OperandValue::Immediate(llval), layout: operand.layout }
671             }
672 
673             mir::Rvalue::Discriminant(ref place) => {
674                 let discr_ty = rvalue.ty(self.mir, bx.tcx());
675                 let discr_ty = self.monomorphize(discr_ty);
676                 let discr = self.codegen_place(bx, place.as_ref()).codegen_get_discr(bx, discr_ty);
677                 OperandRef {
678                     val: OperandValue::Immediate(discr),
679                     layout: self.cx.layout_of(discr_ty),
680                 }
681             }
682 
683             mir::Rvalue::NullaryOp(ref null_op, ty) => {
684                 let ty = self.monomorphize(ty);
685                 let layout = bx.cx().layout_of(ty);
686                 let val = match null_op {
687                     mir::NullOp::SizeOf => {
688                         assert!(bx.cx().type_is_sized(ty));
689                         layout.size.bytes()
690                     }
691                     mir::NullOp::AlignOf => {
692                         assert!(bx.cx().type_is_sized(ty));
693                         layout.align.abi.bytes()
694                     }
695                     mir::NullOp::OffsetOf(fields) => {
696                         layout.offset_of_subfield(bx.cx(), fields.iter().map(|f| f.index())).bytes()
697                     }
698                 };
699                 let val = bx.cx().const_usize(val);
700                 let tcx = self.cx.tcx();
701                 OperandRef {
702                     val: OperandValue::Immediate(val),
703                     layout: self.cx.layout_of(tcx.types.usize),
704                 }
705             }
706 
707             mir::Rvalue::ThreadLocalRef(def_id) => {
708                 assert!(bx.cx().tcx().is_static(def_id));
709                 let layout = bx.layout_of(bx.cx().tcx().static_ptr_ty(def_id));
710                 let static_ = if !def_id.is_local() && bx.cx().tcx().needs_thread_local_shim(def_id)
711                 {
712                     let instance = ty::Instance {
713                         def: ty::InstanceDef::ThreadLocalShim(def_id),
714                         substs: ty::InternalSubsts::empty(),
715                     };
716                     let fn_ptr = bx.get_fn_addr(instance);
717                     let fn_abi = bx.fn_abi_of_instance(instance, ty::List::empty());
718                     let fn_ty = bx.fn_decl_backend_type(&fn_abi);
719                     let fn_attrs = if bx.tcx().def_kind(instance.def_id()).has_codegen_attrs() {
720                         Some(bx.tcx().codegen_fn_attrs(instance.def_id()))
721                     } else {
722                         None
723                     };
724                     bx.call(fn_ty, fn_attrs, Some(fn_abi), fn_ptr, &[], None)
725                 } else {
726                     bx.get_static(def_id)
727                 };
728                 OperandRef { val: OperandValue::Immediate(static_), layout }
729             }
730             mir::Rvalue::Use(ref operand) => self.codegen_operand(bx, operand),
731             mir::Rvalue::Repeat(..) | mir::Rvalue::Aggregate(..) => {
732                 // According to `rvalue_creates_operand`, only ZST
733                 // aggregate rvalues are allowed to be operands.
734                 let ty = rvalue.ty(self.mir, self.cx.tcx());
735                 OperandRef::zero_sized(self.cx.layout_of(self.monomorphize(ty)))
736             }
737             mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
738                 let operand = self.codegen_operand(bx, operand);
739                 let lloperand = operand.immediate();
740 
741                 let content_ty = self.monomorphize(content_ty);
742                 let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
743                 let llty_ptr = bx.cx().backend_type(box_layout);
744 
745                 let val = bx.pointercast(lloperand, llty_ptr);
746                 OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
747             }
748         }
749     }
750 
evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value751     fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value {
752         // ZST are passed as operands and require special handling
753         // because codegen_place() panics if Local is operand.
754         if let Some(index) = place.as_local() {
755             if let LocalRef::Operand(op) = self.locals[index] {
756                 if let ty::Array(_, n) = op.layout.ty.kind() {
757                     let n = n.eval_target_usize(bx.cx().tcx(), ty::ParamEnv::reveal_all());
758                     return bx.cx().const_usize(n);
759                 }
760             }
761         }
762         // use common size calculation for non zero-sized types
763         let cg_value = self.codegen_place(bx, place.as_ref());
764         cg_value.len(bx.cx())
765     }
766 
767     /// Codegen an `Rvalue::AddressOf` or `Rvalue::Ref`
codegen_place_to_pointer( &mut self, bx: &mut Bx, place: mir::Place<'tcx>, mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>, ) -> OperandRef<'tcx, Bx::Value>768     fn codegen_place_to_pointer(
769         &mut self,
770         bx: &mut Bx,
771         place: mir::Place<'tcx>,
772         mk_ptr_ty: impl FnOnce(TyCtxt<'tcx>, Ty<'tcx>) -> Ty<'tcx>,
773     ) -> OperandRef<'tcx, Bx::Value> {
774         let cg_place = self.codegen_place(bx, place.as_ref());
775 
776         let ty = cg_place.layout.ty;
777 
778         // Note: places are indirect, so storing the `llval` into the
779         // destination effectively creates a reference.
780         let val = if !bx.cx().type_has_metadata(ty) {
781             OperandValue::Immediate(cg_place.llval)
782         } else {
783             OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
784         };
785         OperandRef { val, layout: self.cx.layout_of(mk_ptr_ty(self.cx.tcx(), ty)) }
786     }
787 
codegen_scalar_binop( &mut self, bx: &mut Bx, op: mir::BinOp, lhs: Bx::Value, rhs: Bx::Value, input_ty: Ty<'tcx>, ) -> Bx::Value788     pub fn codegen_scalar_binop(
789         &mut self,
790         bx: &mut Bx,
791         op: mir::BinOp,
792         lhs: Bx::Value,
793         rhs: Bx::Value,
794         input_ty: Ty<'tcx>,
795     ) -> Bx::Value {
796         let is_float = input_ty.is_floating_point();
797         let is_signed = input_ty.is_signed();
798         match op {
799             mir::BinOp::Add => {
800                 if is_float {
801                     bx.fadd(lhs, rhs)
802                 } else {
803                     bx.add(lhs, rhs)
804                 }
805             }
806             mir::BinOp::AddUnchecked => {
807                 if is_signed {
808                     bx.unchecked_sadd(lhs, rhs)
809                 } else {
810                     bx.unchecked_uadd(lhs, rhs)
811                 }
812             }
813             mir::BinOp::Sub => {
814                 if is_float {
815                     bx.fsub(lhs, rhs)
816                 } else {
817                     bx.sub(lhs, rhs)
818                 }
819             }
820             mir::BinOp::SubUnchecked => {
821                 if is_signed {
822                     bx.unchecked_ssub(lhs, rhs)
823                 } else {
824                     bx.unchecked_usub(lhs, rhs)
825                 }
826             }
827             mir::BinOp::Mul => {
828                 if is_float {
829                     bx.fmul(lhs, rhs)
830                 } else {
831                     bx.mul(lhs, rhs)
832                 }
833             }
834             mir::BinOp::MulUnchecked => {
835                 if is_signed {
836                     bx.unchecked_smul(lhs, rhs)
837                 } else {
838                     bx.unchecked_umul(lhs, rhs)
839                 }
840             }
841             mir::BinOp::Div => {
842                 if is_float {
843                     bx.fdiv(lhs, rhs)
844                 } else if is_signed {
845                     bx.sdiv(lhs, rhs)
846                 } else {
847                     bx.udiv(lhs, rhs)
848                 }
849             }
850             mir::BinOp::Rem => {
851                 if is_float {
852                     bx.frem(lhs, rhs)
853                 } else if is_signed {
854                     bx.srem(lhs, rhs)
855                 } else {
856                     bx.urem(lhs, rhs)
857                 }
858             }
859             mir::BinOp::BitOr => bx.or(lhs, rhs),
860             mir::BinOp::BitAnd => bx.and(lhs, rhs),
861             mir::BinOp::BitXor => bx.xor(lhs, rhs),
862             mir::BinOp::Offset => {
863                 let pointee_type = input_ty
864                     .builtin_deref(true)
865                     .unwrap_or_else(|| bug!("deref of non-pointer {:?}", input_ty))
866                     .ty;
867                 let pointee_layout = bx.cx().layout_of(pointee_type);
868                 if pointee_layout.is_zst() {
869                     // `Offset` works in terms of the size of pointee,
870                     // so offsetting a pointer to ZST is a noop.
871                     lhs
872                 } else {
873                     let llty = bx.cx().backend_type(pointee_layout);
874                     bx.inbounds_gep(llty, lhs, &[rhs])
875                 }
876             }
877             mir::BinOp::Shl => common::build_masked_lshift(bx, lhs, rhs),
878             mir::BinOp::ShlUnchecked => {
879                 let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
880                 bx.shl(lhs, rhs)
881             }
882             mir::BinOp::Shr => common::build_masked_rshift(bx, input_ty, lhs, rhs),
883             mir::BinOp::ShrUnchecked => {
884                 let rhs = base::cast_shift_expr_rhs(bx, lhs, rhs);
885                 if is_signed { bx.ashr(lhs, rhs) } else { bx.lshr(lhs, rhs) }
886             }
887             mir::BinOp::Ne
888             | mir::BinOp::Lt
889             | mir::BinOp::Gt
890             | mir::BinOp::Eq
891             | mir::BinOp::Le
892             | mir::BinOp::Ge => {
893                 if is_float {
894                     bx.fcmp(base::bin_op_to_fcmp_predicate(op.to_hir_binop()), lhs, rhs)
895                 } else {
896                     bx.icmp(base::bin_op_to_icmp_predicate(op.to_hir_binop(), is_signed), lhs, rhs)
897                 }
898             }
899         }
900     }
901 
codegen_fat_ptr_binop( &mut self, bx: &mut Bx, op: mir::BinOp, lhs_addr: Bx::Value, lhs_extra: Bx::Value, rhs_addr: Bx::Value, rhs_extra: Bx::Value, _input_ty: Ty<'tcx>, ) -> Bx::Value902     pub fn codegen_fat_ptr_binop(
903         &mut self,
904         bx: &mut Bx,
905         op: mir::BinOp,
906         lhs_addr: Bx::Value,
907         lhs_extra: Bx::Value,
908         rhs_addr: Bx::Value,
909         rhs_extra: Bx::Value,
910         _input_ty: Ty<'tcx>,
911     ) -> Bx::Value {
912         match op {
913             mir::BinOp::Eq => {
914                 let lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
915                 let rhs = bx.icmp(IntPredicate::IntEQ, lhs_extra, rhs_extra);
916                 bx.and(lhs, rhs)
917             }
918             mir::BinOp::Ne => {
919                 let lhs = bx.icmp(IntPredicate::IntNE, lhs_addr, rhs_addr);
920                 let rhs = bx.icmp(IntPredicate::IntNE, lhs_extra, rhs_extra);
921                 bx.or(lhs, rhs)
922             }
923             mir::BinOp::Le | mir::BinOp::Lt | mir::BinOp::Ge | mir::BinOp::Gt => {
924                 // a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
925                 let (op, strict_op) = match op {
926                     mir::BinOp::Lt => (IntPredicate::IntULT, IntPredicate::IntULT),
927                     mir::BinOp::Le => (IntPredicate::IntULE, IntPredicate::IntULT),
928                     mir::BinOp::Gt => (IntPredicate::IntUGT, IntPredicate::IntUGT),
929                     mir::BinOp::Ge => (IntPredicate::IntUGE, IntPredicate::IntUGT),
930                     _ => bug!(),
931                 };
932                 let lhs = bx.icmp(strict_op, lhs_addr, rhs_addr);
933                 let and_lhs = bx.icmp(IntPredicate::IntEQ, lhs_addr, rhs_addr);
934                 let and_rhs = bx.icmp(op, lhs_extra, rhs_extra);
935                 let rhs = bx.and(and_lhs, and_rhs);
936                 bx.or(lhs, rhs)
937             }
938             _ => {
939                 bug!("unexpected fat ptr binop");
940             }
941         }
942     }
943 
codegen_scalar_checked_binop( &mut self, bx: &mut Bx, op: mir::BinOp, lhs: Bx::Value, rhs: Bx::Value, input_ty: Ty<'tcx>, ) -> OperandValue<Bx::Value>944     pub fn codegen_scalar_checked_binop(
945         &mut self,
946         bx: &mut Bx,
947         op: mir::BinOp,
948         lhs: Bx::Value,
949         rhs: Bx::Value,
950         input_ty: Ty<'tcx>,
951     ) -> OperandValue<Bx::Value> {
952         let (val, of) = match op {
953             // These are checked using intrinsics
954             mir::BinOp::Add | mir::BinOp::Sub | mir::BinOp::Mul => {
955                 let oop = match op {
956                     mir::BinOp::Add => OverflowOp::Add,
957                     mir::BinOp::Sub => OverflowOp::Sub,
958                     mir::BinOp::Mul => OverflowOp::Mul,
959                     _ => unreachable!(),
960                 };
961                 bx.checked_binop(oop, input_ty, lhs, rhs)
962             }
963             _ => bug!("Operator `{:?}` is not a checkable operator", op),
964         };
965 
966         OperandValue::Pair(val, of)
967     }
968 }
969 
970 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool971     pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>, span: Span) -> bool {
972         match *rvalue {
973             mir::Rvalue::Cast(mir::CastKind::Transmute, ref operand, cast_ty) => {
974                 let operand_ty = operand.ty(self.mir, self.cx.tcx());
975                 let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
976                 let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
977 
978                 match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
979                     // Can always load from a pointer as needed
980                     (OperandValueKind::Ref, _) => true,
981 
982                     // ZST-to-ZST is the easiest thing ever
983                     (OperandValueKind::ZeroSized, OperandValueKind::ZeroSized) => true,
984 
985                     // But if only one of them is a ZST the sizes can't match
986                     (OperandValueKind::ZeroSized, _) | (_, OperandValueKind::ZeroSized) => false,
987 
988                     // Need to generate an `alloc` to get a pointer from an immediate
989                     (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
990 
991                     // When we have scalar immediates, we can only convert things
992                     // where the sizes match, to avoid endianness questions.
993                     (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
994                         a.size(self.cx) == b.size(self.cx),
995                     (OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
996                         a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
997 
998                     // Send mixings between scalars and pairs through the memory route
999                     // FIXME: Maybe this could use insertvalue/extractvalue instead?
1000                     (OperandValueKind::Immediate(..), OperandValueKind::Pair(..)) |
1001                     (OperandValueKind::Pair(..), OperandValueKind::Immediate(..)) => false,
1002                 }
1003             }
1004             mir::Rvalue::Ref(..) |
1005             mir::Rvalue::CopyForDeref(..) |
1006             mir::Rvalue::AddressOf(..) |
1007             mir::Rvalue::Len(..) |
1008             mir::Rvalue::Cast(..) | // (*)
1009             mir::Rvalue::ShallowInitBox(..) | // (*)
1010             mir::Rvalue::BinaryOp(..) |
1011             mir::Rvalue::CheckedBinaryOp(..) |
1012             mir::Rvalue::UnaryOp(..) |
1013             mir::Rvalue::Discriminant(..) |
1014             mir::Rvalue::NullaryOp(..) |
1015             mir::Rvalue::ThreadLocalRef(_) |
1016             mir::Rvalue::Use(..) => // (*)
1017                 true,
1018             mir::Rvalue::Repeat(..) |
1019             mir::Rvalue::Aggregate(..) => {
1020                 let ty = rvalue.ty(self.mir, self.cx.tcx());
1021                 let ty = self.monomorphize(ty);
1022                 self.cx.spanned_layout_of(ty, span).is_zst()
1023             }
1024         }
1025 
1026         // (*) this is only true if the type is suitable
1027     }
1028 
1029     /// Gets which variant of [`OperandValue`] is expected for a particular type.
value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind1030     fn value_kind(&self, layout: TyAndLayout<'tcx>) -> OperandValueKind {
1031         if layout.is_zst() {
1032             OperandValueKind::ZeroSized
1033         } else if self.cx.is_backend_immediate(layout) {
1034             debug_assert!(!self.cx.is_backend_scalar_pair(layout));
1035             OperandValueKind::Immediate(match layout.abi {
1036                 abi::Abi::Scalar(s) => s,
1037                 abi::Abi::Vector { element, .. } => element,
1038                 x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
1039             })
1040         } else if self.cx.is_backend_scalar_pair(layout) {
1041             let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
1042                 span_bug!(
1043                     self.mir.span,
1044                     "Couldn't translate {:?} as backend scalar pair",
1045                     layout.abi,
1046                 );
1047             };
1048             OperandValueKind::Pair(s1, s2)
1049         } else {
1050             OperandValueKind::Ref
1051         }
1052     }
1053 }
1054 
1055 /// The variants of this match [`OperandValue`], giving details about the
1056 /// backend values that will be held in that other type.
1057 #[derive(Debug, Copy, Clone)]
1058 enum OperandValueKind {
1059     Ref,
1060     Immediate(abi::Scalar),
1061     Pair(abi::Scalar, abi::Scalar),
1062     ZeroSized,
1063 }
1064