• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3 
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14 
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20 
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23 
24 use rustc_middle::ty;
25 use rustc_middle::ty::layout::{HasParamEnv, ValidityRequirement};
26 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
27 use rustc_middle::ty::subst::SubstsRef;
28 use rustc_span::symbol::{kw, sym, Symbol};
29 
30 use crate::prelude::*;
31 use cranelift_codegen::ir::AtomicRmwOp;
32 
bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> !33 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
34     bug!("wrong number of args for intrinsic {}", intrinsic);
35 }
36 
report_atomic_type_validation_error<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, intrinsic: Symbol, span: Span, ty: Ty<'tcx>, )37 fn report_atomic_type_validation_error<'tcx>(
38     fx: &mut FunctionCx<'_, '_, 'tcx>,
39     intrinsic: Symbol,
40     span: Span,
41     ty: Ty<'tcx>,
42 ) {
43     fx.tcx.sess.span_err(
44         span,
45         format!(
46             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
47             intrinsic, ty
48         ),
49     );
50     // Prevent verifier error
51     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
52 }
53 
clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type54 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
55     let (element, count) = match layout.abi {
56         Abi::Vector { element, count } => (element, count),
57         _ => unreachable!(),
58     };
59 
60     scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()).unwrap()
61 }
62 
simd_for_each_lane<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, val: CValue<'tcx>, ret: CPlace<'tcx>, f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value, )63 fn simd_for_each_lane<'tcx>(
64     fx: &mut FunctionCx<'_, '_, 'tcx>,
65     val: CValue<'tcx>,
66     ret: CPlace<'tcx>,
67     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
68 ) {
69     let layout = val.layout();
70 
71     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
72     let lane_layout = fx.layout_of(lane_ty);
73     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
74     let ret_lane_layout = fx.layout_of(ret_lane_ty);
75     assert_eq!(lane_count, ret_lane_count);
76 
77     for lane_idx in 0..lane_count {
78         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
79 
80         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
81         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
82 
83         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
84     }
85 }
86 
simd_pair_for_each_lane_typed<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, x: CValue<'tcx>, y: CValue<'tcx>, ret: CPlace<'tcx>, f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>, )87 fn simd_pair_for_each_lane_typed<'tcx>(
88     fx: &mut FunctionCx<'_, '_, 'tcx>,
89     x: CValue<'tcx>,
90     y: CValue<'tcx>,
91     ret: CPlace<'tcx>,
92     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
93 ) {
94     assert_eq!(x.layout(), y.layout());
95     let layout = x.layout();
96 
97     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
98     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
99     assert_eq!(lane_count, ret_lane_count);
100 
101     for lane_idx in 0..lane_count {
102         let x_lane = x.value_lane(fx, lane_idx);
103         let y_lane = y.value_lane(fx, lane_idx);
104 
105         let res_lane = f(fx, x_lane, y_lane);
106 
107         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
108     }
109 }
110 
simd_pair_for_each_lane<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, x: CValue<'tcx>, y: CValue<'tcx>, ret: CPlace<'tcx>, f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value, )111 fn simd_pair_for_each_lane<'tcx>(
112     fx: &mut FunctionCx<'_, '_, 'tcx>,
113     x: CValue<'tcx>,
114     y: CValue<'tcx>,
115     ret: CPlace<'tcx>,
116     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
117 ) {
118     assert_eq!(x.layout(), y.layout());
119     let layout = x.layout();
120 
121     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
122     let lane_layout = fx.layout_of(lane_ty);
123     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
124     let ret_lane_layout = fx.layout_of(ret_lane_ty);
125     assert_eq!(lane_count, ret_lane_count);
126 
127     for lane_idx in 0..lane_count {
128         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
129         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
130 
131         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
132         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
133 
134         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
135     }
136 }
137 
simd_reduce<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, val: CValue<'tcx>, acc: Option<Value>, ret: CPlace<'tcx>, f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value, )138 fn simd_reduce<'tcx>(
139     fx: &mut FunctionCx<'_, '_, 'tcx>,
140     val: CValue<'tcx>,
141     acc: Option<Value>,
142     ret: CPlace<'tcx>,
143     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
144 ) {
145     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
146     let lane_layout = fx.layout_of(lane_ty);
147     assert_eq!(lane_layout, ret.layout());
148 
149     let (mut res_val, start_lane) =
150         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
151     for lane_idx in start_lane..lane_count {
152         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
153         res_val = f(fx, lane_layout.ty, res_val, lane);
154     }
155     let res = CValue::by_val(res_val, lane_layout);
156     ret.write_cvalue(fx, res);
157 }
158 
159 // FIXME move all uses to `simd_reduce`
simd_reduce_bool<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, val: CValue<'tcx>, ret: CPlace<'tcx>, f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value, )160 fn simd_reduce_bool<'tcx>(
161     fx: &mut FunctionCx<'_, '_, 'tcx>,
162     val: CValue<'tcx>,
163     ret: CPlace<'tcx>,
164     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
165 ) {
166     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
167     assert!(ret.layout().ty.is_bool());
168 
169     let res_val = val.value_lane(fx, 0).load_scalar(fx);
170     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
171     for lane_idx in 1..lane_count {
172         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
173         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
174         res_val = f(fx, res_val, lane);
175     }
176     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
177         fx.bcx.ins().ireduce(types::I8, res_val)
178     } else {
179         res_val
180     };
181     let res = CValue::by_val(res_val, ret.layout());
182     ret.write_cvalue(fx, res);
183 }
184 
bool_to_zero_or_max_uint<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>, val: Value, ) -> Value185 fn bool_to_zero_or_max_uint<'tcx>(
186     fx: &mut FunctionCx<'_, '_, 'tcx>,
187     ty: Ty<'tcx>,
188     val: Value,
189 ) -> Value {
190     let ty = fx.clif_type(ty).unwrap();
191 
192     let int_ty = match ty {
193         types::F32 => types::I32,
194         types::F64 => types::I64,
195         ty => ty,
196     };
197 
198     let mut res = fx.bcx.ins().bmask(int_ty, val);
199 
200     if ty.is_float() {
201         res = codegen_bitcast(fx, ty, res);
202     }
203 
204     res
205 }
206 
codegen_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, instance: Instance<'tcx>, args: &[mir::Operand<'tcx>], destination: CPlace<'tcx>, target: Option<BasicBlock>, source_info: mir::SourceInfo, )207 pub(crate) fn codegen_intrinsic_call<'tcx>(
208     fx: &mut FunctionCx<'_, '_, 'tcx>,
209     instance: Instance<'tcx>,
210     args: &[mir::Operand<'tcx>],
211     destination: CPlace<'tcx>,
212     target: Option<BasicBlock>,
213     source_info: mir::SourceInfo,
214 ) {
215     let intrinsic = fx.tcx.item_name(instance.def_id());
216     let substs = instance.substs;
217 
218     if intrinsic.as_str().starts_with("simd_") {
219         self::simd::codegen_simd_intrinsic_call(
220             fx,
221             intrinsic,
222             substs,
223             args,
224             destination,
225             target.expect("target for simd intrinsic"),
226             source_info.span,
227         );
228     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
229         let ret_block = fx.get_block(target.expect("target for float intrinsic"));
230         fx.bcx.ins().jump(ret_block, &[]);
231     } else {
232         codegen_regular_intrinsic_call(
233             fx,
234             instance,
235             intrinsic,
236             substs,
237             args,
238             destination,
239             target,
240             source_info,
241         );
242     }
243 }
244 
codegen_float_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, intrinsic: Symbol, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, ) -> bool245 fn codegen_float_intrinsic_call<'tcx>(
246     fx: &mut FunctionCx<'_, '_, 'tcx>,
247     intrinsic: Symbol,
248     args: &[mir::Operand<'tcx>],
249     ret: CPlace<'tcx>,
250 ) -> bool {
251     let (name, arg_count, ty, clif_ty) = match intrinsic {
252         sym::expf32 => ("expf", 1, fx.tcx.types.f32, types::F32),
253         sym::expf64 => ("exp", 1, fx.tcx.types.f64, types::F64),
254         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32, types::F32),
255         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64, types::F64),
256         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32, types::F32),
257         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64, types::F64),
258         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32, types::F32), // compiler-builtins
259         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64, types::F64), // compiler-builtins
260         sym::powf32 => ("powf", 2, fx.tcx.types.f32, types::F32),
261         sym::powf64 => ("pow", 2, fx.tcx.types.f64, types::F64),
262         sym::logf32 => ("logf", 1, fx.tcx.types.f32, types::F32),
263         sym::logf64 => ("log", 1, fx.tcx.types.f64, types::F64),
264         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32, types::F32),
265         sym::log2f64 => ("log2", 1, fx.tcx.types.f64, types::F64),
266         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32, types::F32),
267         sym::log10f64 => ("log10", 1, fx.tcx.types.f64, types::F64),
268         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32, types::F32),
269         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64, types::F64),
270         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32, types::F32),
271         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64, types::F64),
272         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32, types::F32),
273         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64, types::F64),
274         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32, types::F32),
275         sym::floorf64 => ("floor", 1, fx.tcx.types.f64, types::F64),
276         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32, types::F32),
277         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64, types::F64),
278         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32, types::F32),
279         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64, types::F64),
280         sym::rintf32 => ("rintf", 1, fx.tcx.types.f32, types::F32),
281         sym::rintf64 => ("rint", 1, fx.tcx.types.f64, types::F64),
282         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32, types::F32),
283         sym::roundf64 => ("round", 1, fx.tcx.types.f64, types::F64),
284         sym::roundevenf32 => ("roundevenf", 1, fx.tcx.types.f32, types::F32),
285         sym::roundevenf64 => ("roundeven", 1, fx.tcx.types.f64, types::F64),
286         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32, types::F32),
287         sym::sinf64 => ("sin", 1, fx.tcx.types.f64, types::F64),
288         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32, types::F32),
289         sym::cosf64 => ("cos", 1, fx.tcx.types.f64, types::F64),
290         _ => return false,
291     };
292 
293     if args.len() != arg_count {
294         bug!("wrong number of args for intrinsic {:?}", intrinsic);
295     }
296 
297     let (a, b, c);
298     let args = match args {
299         [x] => {
300             a = [codegen_operand(fx, x).load_scalar(fx)];
301             &a as &[_]
302         }
303         [x, y] => {
304             b = [codegen_operand(fx, x).load_scalar(fx), codegen_operand(fx, y).load_scalar(fx)];
305             &b
306         }
307         [x, y, z] => {
308             c = [
309                 codegen_operand(fx, x).load_scalar(fx),
310                 codegen_operand(fx, y).load_scalar(fx),
311                 codegen_operand(fx, z).load_scalar(fx),
312             ];
313             &c
314         }
315         _ => unreachable!(),
316     };
317 
318     let layout = fx.layout_of(ty);
319     let res = match intrinsic {
320         sym::fmaf32 | sym::fmaf64 => {
321             CValue::by_val(fx.bcx.ins().fma(args[0], args[1], args[2]), layout)
322         }
323         sym::copysignf32 | sym::copysignf64 => {
324             CValue::by_val(fx.bcx.ins().fcopysign(args[0], args[1]), layout)
325         }
326         sym::fabsf32
327         | sym::fabsf64
328         | sym::floorf32
329         | sym::floorf64
330         | sym::ceilf32
331         | sym::ceilf64
332         | sym::truncf32
333         | sym::truncf64 => {
334             let val = match intrinsic {
335                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(args[0]),
336                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(args[0]),
337                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(args[0]),
338                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(args[0]),
339                 _ => unreachable!(),
340             };
341 
342             CValue::by_val(val, layout)
343         }
344 
345         // These intrinsics aren't supported natively by Cranelift.
346         // Lower them to a libcall.
347         sym::powif32 | sym::powif64 => {
348             let input_tys: Vec<_> = vec![AbiParam::new(clif_ty), AbiParam::new(types::I32)];
349             let ret_val = fx.lib_call(name, input_tys, vec![AbiParam::new(clif_ty)], &args)[0];
350             CValue::by_val(ret_val, fx.layout_of(ty))
351         }
352         _ => {
353             let input_tys: Vec<_> = args.iter().map(|_| AbiParam::new(clif_ty)).collect();
354             let ret_val = fx.lib_call(name, input_tys, vec![AbiParam::new(clif_ty)], &args)[0];
355             CValue::by_val(ret_val, fx.layout_of(ty))
356         }
357     };
358 
359     ret.write_cvalue(fx, res);
360 
361     true
362 }
363 
codegen_regular_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, instance: Instance<'tcx>, intrinsic: Symbol, substs: SubstsRef<'tcx>, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, destination: Option<BasicBlock>, source_info: mir::SourceInfo, )364 fn codegen_regular_intrinsic_call<'tcx>(
365     fx: &mut FunctionCx<'_, '_, 'tcx>,
366     instance: Instance<'tcx>,
367     intrinsic: Symbol,
368     substs: SubstsRef<'tcx>,
369     args: &[mir::Operand<'tcx>],
370     ret: CPlace<'tcx>,
371     destination: Option<BasicBlock>,
372     source_info: mir::SourceInfo,
373 ) {
374     let usize_layout = fx.layout_of(fx.tcx.types.usize);
375 
376     match intrinsic {
377         sym::abort => {
378             fx.bcx.ins().trap(TrapCode::User(0));
379             return;
380         }
381         sym::likely | sym::unlikely => {
382             intrinsic_args!(fx, args => (a); intrinsic);
383 
384             ret.write_cvalue(fx, a);
385         }
386         sym::breakpoint => {
387             intrinsic_args!(fx, args => (); intrinsic);
388 
389             fx.bcx.ins().debugtrap();
390         }
391         sym::copy => {
392             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
393             let src = src.load_scalar(fx);
394             let dst = dst.load_scalar(fx);
395             let count = count.load_scalar(fx);
396 
397             let elem_ty = substs.type_at(0);
398             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
399             assert_eq!(args.len(), 3);
400             let byte_amount =
401                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
402 
403             // FIXME emit_small_memmove
404             fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
405         }
406         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
407             // NOTE: the volatile variants have src and dst swapped
408             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
409             let dst = dst.load_scalar(fx);
410             let src = src.load_scalar(fx);
411             let count = count.load_scalar(fx);
412 
413             let elem_ty = substs.type_at(0);
414             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
415             assert_eq!(args.len(), 3);
416             let byte_amount =
417                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
418 
419             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
420             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
421                 // FIXME emit_small_memcpy
422                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
423             } else {
424                 // FIXME emit_small_memmove
425                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
426             }
427         }
428         sym::size_of_val => {
429             intrinsic_args!(fx, args => (ptr); intrinsic);
430 
431             let layout = fx.layout_of(substs.type_at(0));
432             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
433             // branch
434             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
435                 let (_ptr, info) = ptr.load_scalar_pair(fx);
436                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
437                 size
438             } else {
439                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
440             };
441             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
442         }
443         sym::min_align_of_val => {
444             intrinsic_args!(fx, args => (ptr); intrinsic);
445 
446             let layout = fx.layout_of(substs.type_at(0));
447             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
448             // branch
449             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
450                 let (_ptr, info) = ptr.load_scalar_pair(fx);
451                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
452                 align
453             } else {
454                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
455             };
456             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
457         }
458 
459         sym::vtable_size => {
460             intrinsic_args!(fx, args => (vtable); intrinsic);
461             let vtable = vtable.load_scalar(fx);
462 
463             let size = crate::vtable::size_of_obj(fx, vtable);
464             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
465         }
466 
467         sym::vtable_align => {
468             intrinsic_args!(fx, args => (vtable); intrinsic);
469             let vtable = vtable.load_scalar(fx);
470 
471             let align = crate::vtable::min_align_of_obj(fx, vtable);
472             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
473         }
474 
475         sym::exact_div => {
476             intrinsic_args!(fx, args => (x, y); intrinsic);
477 
478             // FIXME trap on inexact
479             let res = crate::num::codegen_int_binop(fx, BinOp::Div, x, y);
480             ret.write_cvalue(fx, res);
481         }
482         sym::saturating_add | sym::saturating_sub => {
483             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
484 
485             assert_eq!(lhs.layout().ty, rhs.layout().ty);
486             let bin_op = match intrinsic {
487                 sym::saturating_add => BinOp::Add,
488                 sym::saturating_sub => BinOp::Sub,
489                 _ => unreachable!(),
490             };
491 
492             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
493             ret.write_cvalue(fx, res);
494         }
495         sym::rotate_left => {
496             intrinsic_args!(fx, args => (x, y); intrinsic);
497             let y = y.load_scalar(fx);
498 
499             let layout = x.layout();
500             let x = x.load_scalar(fx);
501             let res = fx.bcx.ins().rotl(x, y);
502             ret.write_cvalue(fx, CValue::by_val(res, layout));
503         }
504         sym::rotate_right => {
505             intrinsic_args!(fx, args => (x, y); intrinsic);
506             let y = y.load_scalar(fx);
507 
508             let layout = x.layout();
509             let x = x.load_scalar(fx);
510             let res = fx.bcx.ins().rotr(x, y);
511             ret.write_cvalue(fx, CValue::by_val(res, layout));
512         }
513 
514         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
515         // doesn't have UB both are codegen'ed the same way
516         sym::arith_offset => {
517             intrinsic_args!(fx, args => (base, offset); intrinsic);
518             let offset = offset.load_scalar(fx);
519 
520             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
521             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
522             let ptr_diff = if pointee_size != 1 {
523                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
524             } else {
525                 offset
526             };
527             let base_val = base.load_scalar(fx);
528             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
529             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
530         }
531 
532         sym::ptr_mask => {
533             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
534             let ptr = ptr.load_scalar(fx);
535             let mask = mask.load_scalar(fx);
536             fx.bcx.ins().band(ptr, mask);
537         }
538 
539         sym::write_bytes | sym::volatile_set_memory => {
540             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
541             let val = val.load_scalar(fx);
542             let count = count.load_scalar(fx);
543 
544             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
545             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
546             let count = if pointee_size != 1 {
547                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
548             } else {
549                 count
550             };
551             let dst_ptr = dst.load_scalar(fx);
552             // FIXME make the memset actually volatile when switching to emit_small_memset
553             // FIXME use emit_small_memset
554             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
555         }
556         sym::ctlz | sym::ctlz_nonzero => {
557             intrinsic_args!(fx, args => (arg); intrinsic);
558             let val = arg.load_scalar(fx);
559 
560             // FIXME trap on `ctlz_nonzero` with zero arg.
561             let res = fx.bcx.ins().clz(val);
562             let res = CValue::by_val(res, arg.layout());
563             ret.write_cvalue(fx, res);
564         }
565         sym::cttz | sym::cttz_nonzero => {
566             intrinsic_args!(fx, args => (arg); intrinsic);
567             let val = arg.load_scalar(fx);
568 
569             // FIXME trap on `cttz_nonzero` with zero arg.
570             let res = fx.bcx.ins().ctz(val);
571             let res = CValue::by_val(res, arg.layout());
572             ret.write_cvalue(fx, res);
573         }
574         sym::ctpop => {
575             intrinsic_args!(fx, args => (arg); intrinsic);
576             let val = arg.load_scalar(fx);
577 
578             let res = fx.bcx.ins().popcnt(val);
579             let res = CValue::by_val(res, arg.layout());
580             ret.write_cvalue(fx, res);
581         }
582         sym::bitreverse => {
583             intrinsic_args!(fx, args => (arg); intrinsic);
584             let val = arg.load_scalar(fx);
585 
586             let res = fx.bcx.ins().bitrev(val);
587             let res = CValue::by_val(res, arg.layout());
588             ret.write_cvalue(fx, res);
589         }
590         sym::bswap => {
591             intrinsic_args!(fx, args => (arg); intrinsic);
592             let val = arg.load_scalar(fx);
593 
594             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
595                 val
596             } else {
597                 fx.bcx.ins().bswap(val)
598             };
599             let res = CValue::by_val(res, arg.layout());
600             ret.write_cvalue(fx, res);
601         }
602         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
603             intrinsic_args!(fx, args => (); intrinsic);
604 
605             let ty = substs.type_at(0);
606 
607             let requirement = ValidityRequirement::from_intrinsic(intrinsic);
608 
609             if let Some(requirement) = requirement {
610                 let do_panic = !fx
611                     .tcx
612                     .check_validity_requirement((requirement, fx.param_env().and(ty)))
613                     .expect("expect to have layout during codegen");
614 
615                 if do_panic {
616                     let layout = fx.layout_of(ty);
617                     let msg_str = with_no_visible_paths!({
618                         with_no_trimmed_paths!({
619                             if layout.abi.is_uninhabited() {
620                                 // Use this error even for the other intrinsics as it is more precise.
621                                 format!("attempted to instantiate uninhabited type `{}`", ty)
622                             } else if intrinsic == sym::assert_zero_valid {
623                                 format!(
624                                     "attempted to zero-initialize type `{}`, which is invalid",
625                                     ty
626                                 )
627                             } else {
628                                 format!(
629                                     "attempted to leave type `{}` uninitialized, which is invalid",
630                                     ty
631                                 )
632                             }
633                         })
634                     });
635                     crate::base::codegen_panic_nounwind(fx, &msg_str, source_info);
636                     return;
637                 }
638             }
639         }
640 
641         sym::volatile_load | sym::unaligned_volatile_load => {
642             intrinsic_args!(fx, args => (ptr); intrinsic);
643 
644             // Cranelift treats loads as volatile by default
645             // FIXME correctly handle unaligned_volatile_load
646             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
647             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
648             ret.write_cvalue(fx, val);
649         }
650         sym::volatile_store | sym::unaligned_volatile_store => {
651             intrinsic_args!(fx, args => (ptr, val); intrinsic);
652             let ptr = ptr.load_scalar(fx);
653 
654             // Cranelift treats stores as volatile by default
655             // FIXME correctly handle unaligned_volatile_store
656             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
657             dest.write_cvalue(fx, val);
658         }
659 
660         sym::pref_align_of
661         | sym::needs_drop
662         | sym::type_id
663         | sym::type_name
664         | sym::variant_count => {
665             intrinsic_args!(fx, args => (); intrinsic);
666 
667             let const_val =
668                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
669             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
670             ret.write_cvalue(fx, val);
671         }
672 
673         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
674             intrinsic_args!(fx, args => (ptr, base); intrinsic);
675             let ptr = ptr.load_scalar(fx);
676             let base = base.load_scalar(fx);
677             let ty = substs.type_at(0);
678 
679             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
680             let diff_bytes = fx.bcx.ins().isub(ptr, base);
681             // FIXME this can be an exact division.
682             let val = if intrinsic == sym::ptr_offset_from_unsigned {
683                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
684                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
685                 // but unsigned is slightly easier to codegen, so might as well.
686                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
687             } else {
688                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
689                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
690             };
691             ret.write_cvalue(fx, val);
692         }
693 
694         sym::ptr_guaranteed_cmp => {
695             intrinsic_args!(fx, args => (a, b); intrinsic);
696 
697             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
698             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
699         }
700 
701         sym::caller_location => {
702             intrinsic_args!(fx, args => (); intrinsic);
703 
704             let caller_location = fx.get_caller_location(source_info);
705             ret.write_cvalue(fx, caller_location);
706         }
707 
708         _ if intrinsic.as_str().starts_with("atomic_fence") => {
709             intrinsic_args!(fx, args => (); intrinsic);
710 
711             fx.bcx.ins().fence();
712         }
713         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
714             intrinsic_args!(fx, args => (); intrinsic);
715 
716             // FIXME use a compiler fence once Cranelift supports it
717             fx.bcx.ins().fence();
718         }
719         _ if intrinsic.as_str().starts_with("atomic_load") => {
720             intrinsic_args!(fx, args => (ptr); intrinsic);
721             let ptr = ptr.load_scalar(fx);
722 
723             let ty = substs.type_at(0);
724             match ty.kind() {
725                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
726                     // FIXME implement 128bit atomics
727                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
728                         // special case for compiler-builtins to avoid having to patch it
729                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
730                         return;
731                     } else {
732                         fx.tcx
733                             .sess
734                             .span_fatal(source_info.span, "128bit atomics not yet supported");
735                     }
736                 }
737                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
738                 _ => {
739                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
740                     return;
741                 }
742             }
743             let clif_ty = fx.clif_type(ty).unwrap();
744 
745             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
746 
747             let val = CValue::by_val(val, fx.layout_of(ty));
748             ret.write_cvalue(fx, val);
749         }
750         _ if intrinsic.as_str().starts_with("atomic_store") => {
751             intrinsic_args!(fx, args => (ptr, val); intrinsic);
752             let ptr = ptr.load_scalar(fx);
753 
754             let ty = substs.type_at(0);
755             match ty.kind() {
756                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
757                     // FIXME implement 128bit atomics
758                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
759                         // special case for compiler-builtins to avoid having to patch it
760                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
761                         return;
762                     } else {
763                         fx.tcx
764                             .sess
765                             .span_fatal(source_info.span, "128bit atomics not yet supported");
766                     }
767                 }
768                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
769                 _ => {
770                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
771                     return;
772                 }
773             }
774 
775             let val = val.load_scalar(fx);
776 
777             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
778         }
779         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
780             intrinsic_args!(fx, args => (ptr, new); intrinsic);
781             let ptr = ptr.load_scalar(fx);
782 
783             let layout = new.layout();
784             match layout.ty.kind() {
785                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
786                 _ => {
787                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
788                     return;
789                 }
790             }
791             let ty = fx.clif_type(layout.ty).unwrap();
792 
793             let new = new.load_scalar(fx);
794 
795             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
796 
797             let old = CValue::by_val(old, layout);
798             ret.write_cvalue(fx, old);
799         }
800         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
801             // both atomic_cxchg_* and atomic_cxchgweak_*
802             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
803             let ptr = ptr.load_scalar(fx);
804 
805             let layout = new.layout();
806             match layout.ty.kind() {
807                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
808                 _ => {
809                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
810                     return;
811                 }
812             }
813 
814             let test_old = test_old.load_scalar(fx);
815             let new = new.load_scalar(fx);
816 
817             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
818             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
819 
820             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
821             ret.write_cvalue(fx, ret_val)
822         }
823 
824         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
825             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
826             let ptr = ptr.load_scalar(fx);
827 
828             let layout = amount.layout();
829             match layout.ty.kind() {
830                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
831                 _ => {
832                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
833                     return;
834                 }
835             }
836             let ty = fx.clif_type(layout.ty).unwrap();
837 
838             let amount = amount.load_scalar(fx);
839 
840             let old =
841                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
842 
843             let old = CValue::by_val(old, layout);
844             ret.write_cvalue(fx, old);
845         }
846         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
847             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
848             let ptr = ptr.load_scalar(fx);
849 
850             let layout = amount.layout();
851             match layout.ty.kind() {
852                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
853                 _ => {
854                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
855                     return;
856                 }
857             }
858             let ty = fx.clif_type(layout.ty).unwrap();
859 
860             let amount = amount.load_scalar(fx);
861 
862             let old =
863                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
864 
865             let old = CValue::by_val(old, layout);
866             ret.write_cvalue(fx, old);
867         }
868         _ if intrinsic.as_str().starts_with("atomic_and") => {
869             intrinsic_args!(fx, args => (ptr, src); intrinsic);
870             let ptr = ptr.load_scalar(fx);
871 
872             let layout = src.layout();
873             match layout.ty.kind() {
874                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
875                 _ => {
876                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
877                     return;
878                 }
879             }
880             let ty = fx.clif_type(layout.ty).unwrap();
881 
882             let src = src.load_scalar(fx);
883 
884             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
885 
886             let old = CValue::by_val(old, layout);
887             ret.write_cvalue(fx, old);
888         }
889         _ if intrinsic.as_str().starts_with("atomic_or") => {
890             intrinsic_args!(fx, args => (ptr, src); intrinsic);
891             let ptr = ptr.load_scalar(fx);
892 
893             let layout = src.layout();
894             match layout.ty.kind() {
895                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
896                 _ => {
897                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
898                     return;
899                 }
900             }
901             let ty = fx.clif_type(layout.ty).unwrap();
902 
903             let src = src.load_scalar(fx);
904 
905             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
906 
907             let old = CValue::by_val(old, layout);
908             ret.write_cvalue(fx, old);
909         }
910         _ if intrinsic.as_str().starts_with("atomic_xor") => {
911             intrinsic_args!(fx, args => (ptr, src); intrinsic);
912             let ptr = ptr.load_scalar(fx);
913 
914             let layout = src.layout();
915             match layout.ty.kind() {
916                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
917                 _ => {
918                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
919                     return;
920                 }
921             }
922             let ty = fx.clif_type(layout.ty).unwrap();
923 
924             let src = src.load_scalar(fx);
925 
926             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
927 
928             let old = CValue::by_val(old, layout);
929             ret.write_cvalue(fx, old);
930         }
931         _ if intrinsic.as_str().starts_with("atomic_nand") => {
932             intrinsic_args!(fx, args => (ptr, src); intrinsic);
933             let ptr = ptr.load_scalar(fx);
934 
935             let layout = src.layout();
936             match layout.ty.kind() {
937                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
938                 _ => {
939                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
940                     return;
941                 }
942             }
943             let ty = fx.clif_type(layout.ty).unwrap();
944 
945             let src = src.load_scalar(fx);
946 
947             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
948 
949             let old = CValue::by_val(old, layout);
950             ret.write_cvalue(fx, old);
951         }
952         _ if intrinsic.as_str().starts_with("atomic_max") => {
953             intrinsic_args!(fx, args => (ptr, src); intrinsic);
954             let ptr = ptr.load_scalar(fx);
955 
956             let layout = src.layout();
957             match layout.ty.kind() {
958                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
959                 _ => {
960                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
961                     return;
962                 }
963             }
964             let ty = fx.clif_type(layout.ty).unwrap();
965 
966             let src = src.load_scalar(fx);
967 
968             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
969 
970             let old = CValue::by_val(old, layout);
971             ret.write_cvalue(fx, old);
972         }
973         _ if intrinsic.as_str().starts_with("atomic_umax") => {
974             intrinsic_args!(fx, args => (ptr, src); intrinsic);
975             let ptr = ptr.load_scalar(fx);
976 
977             let layout = src.layout();
978             match layout.ty.kind() {
979                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
980                 _ => {
981                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
982                     return;
983                 }
984             }
985             let ty = fx.clif_type(layout.ty).unwrap();
986 
987             let src = src.load_scalar(fx);
988 
989             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
990 
991             let old = CValue::by_val(old, layout);
992             ret.write_cvalue(fx, old);
993         }
994         _ if intrinsic.as_str().starts_with("atomic_min") => {
995             intrinsic_args!(fx, args => (ptr, src); intrinsic);
996             let ptr = ptr.load_scalar(fx);
997 
998             let layout = src.layout();
999             match layout.ty.kind() {
1000                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1001                 _ => {
1002                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1003                     return;
1004                 }
1005             }
1006             let ty = fx.clif_type(layout.ty).unwrap();
1007 
1008             let src = src.load_scalar(fx);
1009 
1010             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1011 
1012             let old = CValue::by_val(old, layout);
1013             ret.write_cvalue(fx, old);
1014         }
1015         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1016             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1017             let ptr = ptr.load_scalar(fx);
1018 
1019             let layout = src.layout();
1020             match layout.ty.kind() {
1021                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1022                 _ => {
1023                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1024                     return;
1025                 }
1026             }
1027             let ty = fx.clif_type(layout.ty).unwrap();
1028 
1029             let src = src.load_scalar(fx);
1030 
1031             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1032 
1033             let old = CValue::by_val(old, layout);
1034             ret.write_cvalue(fx, old);
1035         }
1036 
1037         sym::minnumf32 => {
1038             intrinsic_args!(fx, args => (a, b); intrinsic);
1039             let a = a.load_scalar(fx);
1040             let b = b.load_scalar(fx);
1041 
1042             let val = crate::num::codegen_float_min(fx, a, b);
1043             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1044             ret.write_cvalue(fx, val);
1045         }
1046         sym::minnumf64 => {
1047             intrinsic_args!(fx, args => (a, b); intrinsic);
1048             let a = a.load_scalar(fx);
1049             let b = b.load_scalar(fx);
1050 
1051             let val = crate::num::codegen_float_min(fx, a, b);
1052             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1053             ret.write_cvalue(fx, val);
1054         }
1055         sym::maxnumf32 => {
1056             intrinsic_args!(fx, args => (a, b); intrinsic);
1057             let a = a.load_scalar(fx);
1058             let b = b.load_scalar(fx);
1059 
1060             let val = crate::num::codegen_float_max(fx, a, b);
1061             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1062             ret.write_cvalue(fx, val);
1063         }
1064         sym::maxnumf64 => {
1065             intrinsic_args!(fx, args => (a, b); intrinsic);
1066             let a = a.load_scalar(fx);
1067             let b = b.load_scalar(fx);
1068 
1069             let val = crate::num::codegen_float_max(fx, a, b);
1070             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1071             ret.write_cvalue(fx, val);
1072         }
1073 
1074         kw::Try => {
1075             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1076             let f = f.load_scalar(fx);
1077             let data = data.load_scalar(fx);
1078             let _catch_fn = catch_fn.load_scalar(fx);
1079 
1080             // FIXME once unwinding is supported, change this to actually catch panics
1081             let f_sig = fx.bcx.func.import_signature(Signature {
1082                 call_conv: fx.target_config.default_call_conv,
1083                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1084                 returns: vec![],
1085             });
1086 
1087             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1088 
1089             let layout = fx.layout_of(fx.tcx.types.i32);
1090             let ret_val = CValue::by_val(fx.bcx.ins().iconst(types::I32, 0), layout);
1091             ret.write_cvalue(fx, ret_val);
1092         }
1093 
1094         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1095             intrinsic_args!(fx, args => (x, y); intrinsic);
1096 
1097             let res = crate::num::codegen_float_binop(
1098                 fx,
1099                 match intrinsic {
1100                     sym::fadd_fast => BinOp::Add,
1101                     sym::fsub_fast => BinOp::Sub,
1102                     sym::fmul_fast => BinOp::Mul,
1103                     sym::fdiv_fast => BinOp::Div,
1104                     sym::frem_fast => BinOp::Rem,
1105                     _ => unreachable!(),
1106                 },
1107                 x,
1108                 y,
1109             );
1110             ret.write_cvalue(fx, res);
1111         }
1112         sym::float_to_int_unchecked => {
1113             intrinsic_args!(fx, args => (f); intrinsic);
1114             let f = f.load_scalar(fx);
1115 
1116             let res = crate::cast::clif_int_or_float_cast(
1117                 fx,
1118                 f,
1119                 false,
1120                 fx.clif_type(ret.layout().ty).unwrap(),
1121                 type_sign(ret.layout().ty),
1122             );
1123             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1124         }
1125 
1126         sym::raw_eq => {
1127             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1128             let lhs_ref = lhs_ref.load_scalar(fx);
1129             let rhs_ref = rhs_ref.load_scalar(fx);
1130 
1131             let size = fx.layout_of(substs.type_at(0)).layout.size();
1132             // FIXME add and use emit_small_memcmp
1133             let is_eq_value = if size == Size::ZERO {
1134                 // No bytes means they're trivially equal
1135                 fx.bcx.ins().iconst(types::I8, 1)
1136             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1137                 // Can't use `trusted` for these loads; they could be unaligned.
1138                 let mut flags = MemFlags::new();
1139                 flags.set_notrap();
1140                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1141                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1142                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1143             } else {
1144                 // Just call `memcmp` (like slices do in core) when the
1145                 // size is too large or it's not a power-of-two.
1146                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1147                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1148                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1149                 let returns = vec![AbiParam::new(types::I32)];
1150                 let args = &[lhs_ref, rhs_ref, bytes_val];
1151                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1152                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1153             };
1154             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1155         }
1156 
1157         sym::const_allocate => {
1158             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1159 
1160             // returns a null pointer at runtime.
1161             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1162             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1163         }
1164 
1165         sym::const_deallocate => {
1166             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1167             // nop at runtime.
1168         }
1169 
1170         sym::black_box => {
1171             intrinsic_args!(fx, args => (a); intrinsic);
1172 
1173             // FIXME implement black_box semantics
1174             ret.write_cvalue(fx, a);
1175         }
1176 
1177         // FIXME implement variadics in cranelift
1178         sym::va_copy | sym::va_arg | sym::va_end => {
1179             fx.tcx.sess.span_fatal(
1180                 source_info.span,
1181                 "Defining variadic functions is not yet supported by Cranelift",
1182             );
1183         }
1184 
1185         _ => {
1186             fx.tcx
1187                 .sess
1188                 .span_fatal(source_info.span, format!("unsupported intrinsic {}", intrinsic));
1189         }
1190     }
1191 
1192     let ret_block = fx.get_block(destination.unwrap());
1193     fx.bcx.ins().jump(ret_block, &[]);
1194 }
1195