1 //! Definition of [`CValue`] and [`CPlace`]
2
3 use crate::prelude::*;
4
5 use cranelift_codegen::entity::EntityRef;
6 use cranelift_codegen::ir::immediates::Offset32;
7
codegen_field<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, base: Pointer, extra: Option<Value>, layout: TyAndLayout<'tcx>, field: FieldIdx, ) -> (Pointer, TyAndLayout<'tcx>)8 fn codegen_field<'tcx>(
9 fx: &mut FunctionCx<'_, '_, 'tcx>,
10 base: Pointer,
11 extra: Option<Value>,
12 layout: TyAndLayout<'tcx>,
13 field: FieldIdx,
14 ) -> (Pointer, TyAndLayout<'tcx>) {
15 let field_offset = layout.fields.offset(field.index());
16 let field_layout = layout.field(&*fx, field.index());
17
18 let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
19 (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
20 };
21
22 if let Some(extra) = extra {
23 if field_layout.is_sized() {
24 return simple(fx);
25 }
26 match field_layout.ty.kind() {
27 ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
28 ty::Adt(def, _) if def.repr().packed() => {
29 assert_eq!(layout.align.abi.bytes(), 1);
30 simple(fx)
31 }
32 _ => {
33 // We have to align the offset for DST's
34 let unaligned_offset = field_offset.bytes();
35 let (_, unsized_align) =
36 crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
37
38 let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
39 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
40 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
41 let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
42 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
43 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
44
45 (base.offset_value(fx, offset), field_layout)
46 }
47 }
48 } else {
49 simple(fx)
50 }
51 }
52
scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset3253 fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
54 let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
55 Offset32::new(b_offset.bytes().try_into().unwrap())
56 }
57
58 /// A read-only value
59 #[derive(Debug, Copy, Clone)]
60 pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
61
62 #[derive(Debug, Copy, Clone)]
63 enum CValueInner {
64 ByRef(Pointer, Option<Value>),
65 ByVal(Value),
66 ByValPair(Value, Value),
67 }
68
69 impl<'tcx> CValue<'tcx> {
by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx>70 pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
71 CValue(CValueInner::ByRef(ptr, None), layout)
72 }
73
by_ref_unsized( ptr: Pointer, meta: Value, layout: TyAndLayout<'tcx>, ) -> CValue<'tcx>74 pub(crate) fn by_ref_unsized(
75 ptr: Pointer,
76 meta: Value,
77 layout: TyAndLayout<'tcx>,
78 ) -> CValue<'tcx> {
79 CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
80 }
81
by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx>82 pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
83 CValue(CValueInner::ByVal(value), layout)
84 }
85
by_val_pair( value: Value, extra: Value, layout: TyAndLayout<'tcx>, ) -> CValue<'tcx>86 pub(crate) fn by_val_pair(
87 value: Value,
88 extra: Value,
89 layout: TyAndLayout<'tcx>,
90 ) -> CValue<'tcx> {
91 CValue(CValueInner::ByValPair(value, extra), layout)
92 }
93
layout(&self) -> TyAndLayout<'tcx>94 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
95 self.1
96 }
97
98 // FIXME remove
force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>)99 pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
100 let layout = self.1;
101 match self.0 {
102 CValueInner::ByRef(ptr, meta) => (ptr, meta),
103 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
104 let cplace = CPlace::new_stack_slot(fx, layout);
105 cplace.write_cvalue(fx, self);
106 (cplace.to_ptr(), None)
107 }
108 }
109 }
110
111 // FIXME remove
112 /// Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
113 /// vtable pointer.
dyn_star_force_data_on_stack( self, fx: &mut FunctionCx<'_, '_, 'tcx>, ) -> (Value, Value)114 pub(crate) fn dyn_star_force_data_on_stack(
115 self,
116 fx: &mut FunctionCx<'_, '_, 'tcx>,
117 ) -> (Value, Value) {
118 assert!(self.1.ty.is_dyn_star());
119
120 match self.0 {
121 CValueInner::ByRef(ptr, None) => {
122 let (a_scalar, b_scalar) = match self.1.abi {
123 Abi::ScalarPair(a, b) => (a, b),
124 _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
125 };
126 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
127 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
128 let mut flags = MemFlags::new();
129 flags.set_notrap();
130 let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
131 (ptr.get_addr(fx), vtable)
132 }
133 CValueInner::ByValPair(data, vtable) => {
134 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
135 kind: StackSlotKind::ExplicitSlot,
136 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
137 // specify stack slot alignment.
138 size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
139 / 16
140 * 16,
141 });
142 let data_ptr = Pointer::stack_slot(stack_slot);
143 let mut flags = MemFlags::new();
144 flags.set_notrap();
145 data_ptr.store(fx, data, flags);
146
147 (data_ptr.get_addr(fx), vtable)
148 }
149 CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
150 unreachable!("dyn_star_force_data_on_stack({:?})", self)
151 }
152 }
153 }
154
try_to_ptr(self) -> Option<(Pointer, Option<Value>)>155 pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
156 match self.0 {
157 CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
158 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
159 }
160 }
161
162 /// Load a value with layout.abi of scalar
load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value163 pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
164 let layout = self.1;
165 match self.0 {
166 CValueInner::ByRef(ptr, None) => {
167 let clif_ty = match layout.abi {
168 Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
169 Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
170 .by(u32::try_from(count).unwrap())
171 .unwrap(),
172 _ => unreachable!("{:?}", layout.ty),
173 };
174 let mut flags = MemFlags::new();
175 flags.set_notrap();
176 ptr.load(fx, clif_ty, flags)
177 }
178 CValueInner::ByVal(value) => value,
179 CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
180 CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
181 }
182 }
183
184 /// Load a value pair with layout.abi of scalar pair
load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value)185 pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
186 let layout = self.1;
187 match self.0 {
188 CValueInner::ByRef(ptr, None) => {
189 let (a_scalar, b_scalar) = match layout.abi {
190 Abi::ScalarPair(a, b) => (a, b),
191 _ => unreachable!("load_scalar_pair({:?})", self),
192 };
193 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
194 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
195 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
196 let mut flags = MemFlags::new();
197 flags.set_notrap();
198 let val1 = ptr.load(fx, clif_ty1, flags);
199 let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
200 (val1, val2)
201 }
202 CValueInner::ByRef(_, Some(_)) => {
203 bug!("load_scalar_pair for unsized value not allowed")
204 }
205 CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
206 CValueInner::ByValPair(val1, val2) => (val1, val2),
207 }
208 }
209
value_field( self, fx: &mut FunctionCx<'_, '_, 'tcx>, field: FieldIdx, ) -> CValue<'tcx>210 pub(crate) fn value_field(
211 self,
212 fx: &mut FunctionCx<'_, '_, 'tcx>,
213 field: FieldIdx,
214 ) -> CValue<'tcx> {
215 let layout = self.1;
216 match self.0 {
217 CValueInner::ByVal(_) => unreachable!(),
218 CValueInner::ByValPair(val1, val2) => match layout.abi {
219 Abi::ScalarPair(_, _) => {
220 let val = match field.as_u32() {
221 0 => val1,
222 1 => val2,
223 _ => bug!("field should be 0 or 1"),
224 };
225 let field_layout = layout.field(&*fx, usize::from(field));
226 CValue::by_val(val, field_layout)
227 }
228 _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
229 },
230 CValueInner::ByRef(ptr, None) => {
231 let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
232 CValue::by_ref(field_ptr, field_layout)
233 }
234 CValueInner::ByRef(_, Some(_)) => todo!(),
235 }
236 }
237
238 /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
239 /// such that you can access individual lanes.
value_lane( self, fx: &mut FunctionCx<'_, '_, 'tcx>, lane_idx: u64, ) -> CValue<'tcx>240 pub(crate) fn value_lane(
241 self,
242 fx: &mut FunctionCx<'_, '_, 'tcx>,
243 lane_idx: u64,
244 ) -> CValue<'tcx> {
245 let layout = self.1;
246 assert!(layout.ty.is_simd());
247 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
248 let lane_layout = fx.layout_of(lane_ty);
249 assert!(lane_idx < lane_count);
250 match self.0 {
251 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => unreachable!(),
252 CValueInner::ByRef(ptr, None) => {
253 let field_offset = lane_layout.size * lane_idx;
254 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
255 CValue::by_ref(field_ptr, lane_layout)
256 }
257 CValueInner::ByRef(_, Some(_)) => unreachable!(),
258 }
259 }
260
261 /// Like [`CValue::value_lane`] except allowing a dynamically calculated lane index.
value_lane_dyn( self, fx: &mut FunctionCx<'_, '_, 'tcx>, lane_idx: Value, ) -> CValue<'tcx>262 pub(crate) fn value_lane_dyn(
263 self,
264 fx: &mut FunctionCx<'_, '_, 'tcx>,
265 lane_idx: Value,
266 ) -> CValue<'tcx> {
267 let layout = self.1;
268 assert!(layout.ty.is_simd());
269 let (_lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
270 let lane_layout = fx.layout_of(lane_ty);
271 match self.0 {
272 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => unreachable!(),
273 CValueInner::ByRef(ptr, None) => {
274 let field_offset = fx.bcx.ins().imul_imm(lane_idx, lane_layout.size.bytes() as i64);
275 let field_ptr = ptr.offset_value(fx, field_offset);
276 CValue::by_ref(field_ptr, lane_layout)
277 }
278 CValueInner::ByRef(_, Some(_)) => unreachable!(),
279 }
280 }
281
282 /// If `ty` is signed, `const_val` must already be sign extended.
const_val( fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, const_val: ty::ScalarInt, ) -> CValue<'tcx>283 pub(crate) fn const_val(
284 fx: &mut FunctionCx<'_, '_, 'tcx>,
285 layout: TyAndLayout<'tcx>,
286 const_val: ty::ScalarInt,
287 ) -> CValue<'tcx> {
288 assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
289 use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
290
291 let clif_ty = fx.clif_type(layout.ty).unwrap();
292
293 if let ty::Bool = layout.ty.kind() {
294 assert!(
295 const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
296 "Invalid bool 0x{:032X}",
297 const_val
298 );
299 }
300
301 let val = match layout.ty.kind() {
302 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
303 let const_val = const_val.to_bits(layout.size).unwrap();
304 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
305 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
306 fx.bcx.ins().iconcat(lsb, msb)
307 }
308 ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
309 fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
310 }
311 ty::Float(FloatTy::F32) => {
312 fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
313 }
314 ty::Float(FloatTy::F64) => {
315 fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
316 }
317 _ => panic!(
318 "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
319 layout.ty
320 ),
321 };
322
323 CValue::by_val(val, layout)
324 }
325
cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self326 pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
327 assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
328 assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
329 assert_eq!(self.layout().abi, layout.abi);
330 CValue(self.0, layout)
331 }
332 }
333
334 /// A place where you can write a value to or read a value from
335 #[derive(Debug, Copy, Clone)]
336 pub(crate) struct CPlace<'tcx> {
337 inner: CPlaceInner,
338 layout: TyAndLayout<'tcx>,
339 }
340
341 #[derive(Debug, Copy, Clone)]
342 enum CPlaceInner {
343 Var(Local, Variable),
344 VarPair(Local, Variable, Variable),
345 Addr(Pointer, Option<Value>),
346 }
347
348 impl<'tcx> CPlace<'tcx> {
layout(&self) -> TyAndLayout<'tcx>349 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
350 self.layout
351 }
352
new_stack_slot( fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx>353 pub(crate) fn new_stack_slot(
354 fx: &mut FunctionCx<'_, '_, 'tcx>,
355 layout: TyAndLayout<'tcx>,
356 ) -> CPlace<'tcx> {
357 assert!(layout.is_sized());
358 if layout.size.bytes() == 0 {
359 return CPlace {
360 inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
361 layout,
362 };
363 }
364
365 if layout.size.bytes() >= u64::from(u32::MAX - 16) {
366 fx.tcx
367 .sess
368 .fatal(format!("values of type {} are too big to store on the stack", layout.ty));
369 }
370
371 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
372 kind: StackSlotKind::ExplicitSlot,
373 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
374 // specify stack slot alignment.
375 size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
376 });
377 CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
378 }
379
new_var( fx: &mut FunctionCx<'_, '_, 'tcx>, local: Local, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx>380 pub(crate) fn new_var(
381 fx: &mut FunctionCx<'_, '_, 'tcx>,
382 local: Local,
383 layout: TyAndLayout<'tcx>,
384 ) -> CPlace<'tcx> {
385 let var = Variable::from_u32(fx.next_ssa_var);
386 fx.next_ssa_var += 1;
387 fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
388 CPlace { inner: CPlaceInner::Var(local, var), layout }
389 }
390
new_var_pair( fx: &mut FunctionCx<'_, '_, 'tcx>, local: Local, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx>391 pub(crate) fn new_var_pair(
392 fx: &mut FunctionCx<'_, '_, 'tcx>,
393 local: Local,
394 layout: TyAndLayout<'tcx>,
395 ) -> CPlace<'tcx> {
396 let var1 = Variable::from_u32(fx.next_ssa_var);
397 fx.next_ssa_var += 1;
398 let var2 = Variable::from_u32(fx.next_ssa_var);
399 fx.next_ssa_var += 1;
400
401 let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
402 fx.bcx.declare_var(var1, ty1);
403 fx.bcx.declare_var(var2, ty2);
404 CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
405 }
406
for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx>407 pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
408 CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
409 }
410
for_ptr_with_extra( ptr: Pointer, extra: Value, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx>411 pub(crate) fn for_ptr_with_extra(
412 ptr: Pointer,
413 extra: Value,
414 layout: TyAndLayout<'tcx>,
415 ) -> CPlace<'tcx> {
416 CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
417 }
418
to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx>419 pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
420 let layout = self.layout();
421 match self.inner {
422 CPlaceInner::Var(_local, var) => {
423 let val = fx.bcx.use_var(var);
424 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
425 CValue::by_val(val, layout)
426 }
427 CPlaceInner::VarPair(_local, var1, var2) => {
428 let val1 = fx.bcx.use_var(var1);
429 //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
430 let val2 = fx.bcx.use_var(var2);
431 //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
432 CValue::by_val_pair(val1, val2, layout)
433 }
434 CPlaceInner::Addr(ptr, extra) => {
435 if let Some(extra) = extra {
436 CValue::by_ref_unsized(ptr, extra, layout)
437 } else {
438 CValue::by_ref(ptr, layout)
439 }
440 }
441 }
442 }
443
debug_comment(self) -> (&'static str, String)444 pub(crate) fn debug_comment(self) -> (&'static str, String) {
445 match self.inner {
446 CPlaceInner::Var(_local, var) => ("ssa", format!("var={}", var.index())),
447 CPlaceInner::VarPair(_local, var1, var2) => {
448 ("ssa", format!("var=({}, {})", var1.index(), var2.index()))
449 }
450 CPlaceInner::Addr(ptr, meta) => {
451 let meta =
452 if let Some(meta) = meta { format!(",meta={}", meta) } else { String::new() };
453 match ptr.debug_base_and_offset() {
454 (crate::pointer::PointerBase::Addr(addr), offset) => {
455 ("reuse", format!("storage={}{}{}", addr, offset, meta))
456 }
457 (crate::pointer::PointerBase::Stack(stack_slot), offset) => {
458 ("stack", format!("storage={}{}{}", stack_slot, offset, meta))
459 }
460 (crate::pointer::PointerBase::Dangling(align), offset) => {
461 ("zst", format!("align={},offset={}", align.bytes(), offset))
462 }
463 }
464 }
465 }
466 }
467
468 #[track_caller]
to_ptr(self) -> Pointer469 pub(crate) fn to_ptr(self) -> Pointer {
470 match self.inner {
471 CPlaceInner::Addr(ptr, None) => ptr,
472 CPlaceInner::Addr(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
473 CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
474 bug!("Expected CPlace::Addr, found {:?}", self)
475 }
476 }
477 }
478
479 #[track_caller]
to_ptr_unsized(self) -> (Pointer, Value)480 pub(crate) fn to_ptr_unsized(self) -> (Pointer, Value) {
481 match self.inner {
482 CPlaceInner::Addr(ptr, Some(extra)) => (ptr, extra),
483 CPlaceInner::Addr(_, None) | CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
484 bug!("Expected unsized cplace, found {:?}", self)
485 }
486 }
487 }
488
try_to_ptr(self) -> Option<Pointer>489 pub(crate) fn try_to_ptr(self) -> Option<Pointer> {
490 match self.inner {
491 CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => None,
492 CPlaceInner::Addr(ptr, None) => Some(ptr),
493 CPlaceInner::Addr(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
494 }
495 }
496
write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>)497 pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
498 assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
499
500 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
501 }
502
write_cvalue_transmute( self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>, )503 pub(crate) fn write_cvalue_transmute(
504 self,
505 fx: &mut FunctionCx<'_, '_, 'tcx>,
506 from: CValue<'tcx>,
507 ) {
508 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
509 }
510
write_cvalue_maybe_transmute( self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>, method: &'static str, )511 fn write_cvalue_maybe_transmute(
512 self,
513 fx: &mut FunctionCx<'_, '_, 'tcx>,
514 from: CValue<'tcx>,
515 method: &'static str,
516 ) {
517 fn transmute_scalar<'tcx>(
518 fx: &mut FunctionCx<'_, '_, 'tcx>,
519 var: Variable,
520 data: Value,
521 dst_ty: Type,
522 ) {
523 let src_ty = fx.bcx.func.dfg.value_type(data);
524 assert_eq!(
525 src_ty.bytes(),
526 dst_ty.bytes(),
527 "write_cvalue_transmute: {:?} -> {:?}",
528 src_ty,
529 dst_ty,
530 );
531 let data = match (src_ty, dst_ty) {
532 (_, _) if src_ty == dst_ty => data,
533
534 // This is a `write_cvalue_transmute`.
535 (types::I32, types::F32)
536 | (types::F32, types::I32)
537 | (types::I64, types::F64)
538 | (types::F64, types::I64) => codegen_bitcast(fx, dst_ty, data),
539 _ if src_ty.is_vector() && dst_ty.is_vector() => codegen_bitcast(fx, dst_ty, data),
540 _ if src_ty.is_vector() || dst_ty.is_vector() => {
541 // FIXME(bytecodealliance/wasmtime#6104) do something more efficient for transmutes between vectors and integers.
542 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
543 kind: StackSlotKind::ExplicitSlot,
544 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
545 // specify stack slot alignment.
546 size: (src_ty.bytes() + 15) / 16 * 16,
547 });
548 let ptr = Pointer::stack_slot(stack_slot);
549 ptr.store(fx, data, MemFlags::trusted());
550 ptr.load(fx, dst_ty, MemFlags::trusted())
551 }
552
553 // `CValue`s should never contain SSA-only types, so if you ended
554 // up here having seen an error like `B1 -> I8`, then before
555 // calling `write_cvalue` you need to add a `bint` instruction.
556 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
557 };
558 //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
559 fx.bcx.def_var(var, data);
560 }
561
562 assert_eq!(self.layout().size, from.layout().size);
563
564 if fx.clif_comments.enabled() {
565 use cranelift_codegen::cursor::{Cursor, CursorPosition};
566 let cur_block = match fx.bcx.cursor().position() {
567 CursorPosition::After(block) => block,
568 _ => unreachable!(),
569 };
570 fx.add_comment(
571 fx.bcx.func.layout.last_inst(cur_block).unwrap(),
572 format!(
573 "{}: {:?}: {:?} <- {:?}: {:?}",
574 method,
575 self.inner,
576 self.layout().ty,
577 from.0,
578 from.layout().ty
579 ),
580 );
581 }
582
583 let dst_layout = self.layout();
584 match self.inner {
585 CPlaceInner::Var(_local, var) => {
586 let data = CValue(from.0, dst_layout).load_scalar(fx);
587 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
588 transmute_scalar(fx, var, data, dst_ty);
589 }
590 CPlaceInner::VarPair(_local, var1, var2) => {
591 let (data1, data2) = if from.layout().ty == dst_layout.ty {
592 CValue(from.0, dst_layout).load_scalar_pair(fx)
593 } else {
594 let (ptr, meta) = from.force_stack(fx);
595 assert!(meta.is_none());
596 CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
597 };
598 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
599 transmute_scalar(fx, var1, data1, dst_ty1);
600 transmute_scalar(fx, var2, data2, dst_ty2);
601 }
602 CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
603 CPlaceInner::Addr(to_ptr, None) => {
604 if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
605 return;
606 }
607
608 let mut flags = MemFlags::new();
609 flags.set_notrap();
610 match from.layout().abi {
611 Abi::Scalar(_) => {
612 let val = from.load_scalar(fx);
613 to_ptr.store(fx, val, flags);
614 return;
615 }
616 Abi::ScalarPair(a_scalar, b_scalar) => {
617 let (value, extra) = from.load_scalar_pair(fx);
618 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
619 to_ptr.store(fx, value, flags);
620 to_ptr.offset(fx, b_offset).store(fx, extra, flags);
621 return;
622 }
623 _ => {}
624 }
625
626 match from.0 {
627 CValueInner::ByVal(val) => {
628 to_ptr.store(fx, val, flags);
629 }
630 CValueInner::ByValPair(_, _) => {
631 bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
632 }
633 CValueInner::ByRef(from_ptr, None) => {
634 let from_addr = from_ptr.get_addr(fx);
635 let to_addr = to_ptr.get_addr(fx);
636 let src_layout = from.1;
637 let size = dst_layout.size.bytes();
638 let src_align = src_layout.align.abi.bytes() as u8;
639 let dst_align = dst_layout.align.abi.bytes() as u8;
640 fx.bcx.emit_small_memory_copy(
641 fx.target_config,
642 to_addr,
643 from_addr,
644 size,
645 dst_align,
646 src_align,
647 true,
648 flags,
649 );
650 }
651 CValueInner::ByRef(_, Some(_)) => todo!(),
652 }
653 }
654 }
655 }
656
place_opaque_cast( self, fx: &mut FunctionCx<'_, '_, 'tcx>, ty: Ty<'tcx>, ) -> CPlace<'tcx>657 pub(crate) fn place_opaque_cast(
658 self,
659 fx: &mut FunctionCx<'_, '_, 'tcx>,
660 ty: Ty<'tcx>,
661 ) -> CPlace<'tcx> {
662 CPlace { inner: self.inner, layout: fx.layout_of(ty) }
663 }
664
place_field( self, fx: &mut FunctionCx<'_, '_, 'tcx>, field: FieldIdx, ) -> CPlace<'tcx>665 pub(crate) fn place_field(
666 self,
667 fx: &mut FunctionCx<'_, '_, 'tcx>,
668 field: FieldIdx,
669 ) -> CPlace<'tcx> {
670 let layout = self.layout();
671
672 match self.inner {
673 CPlaceInner::VarPair(local, var1, var2) => {
674 let layout = layout.field(&*fx, field.index());
675
676 match field.as_u32() {
677 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
678 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
679 _ => unreachable!("field should be 0 or 1"),
680 }
681 }
682 _ => {}
683 }
684
685 let (base, extra) = match self.inner {
686 CPlaceInner::Addr(ptr, extra) => (ptr, extra),
687 CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
688 bug!("Expected CPlace::Addr, found {:?}", self)
689 }
690 };
691
692 let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
693 if field_layout.is_unsized() {
694 if let ty::Foreign(_) = field_layout.ty.kind() {
695 assert!(extra.is_none());
696 CPlace::for_ptr(field_ptr, field_layout)
697 } else {
698 CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
699 }
700 } else {
701 CPlace::for_ptr(field_ptr, field_layout)
702 }
703 }
704
705 /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
706 /// such that you can access individual lanes.
place_lane( self, fx: &mut FunctionCx<'_, '_, 'tcx>, lane_idx: u64, ) -> CPlace<'tcx>707 pub(crate) fn place_lane(
708 self,
709 fx: &mut FunctionCx<'_, '_, 'tcx>,
710 lane_idx: u64,
711 ) -> CPlace<'tcx> {
712 let layout = self.layout();
713 assert!(layout.ty.is_simd());
714 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
715 let lane_layout = fx.layout_of(lane_ty);
716 assert!(lane_idx < lane_count);
717
718 match self.inner {
719 CPlaceInner::Var(_, _) => unreachable!(),
720 CPlaceInner::VarPair(_, _, _) => unreachable!(),
721 CPlaceInner::Addr(ptr, None) => {
722 let field_offset = lane_layout.size * lane_idx;
723 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
724 CPlace::for_ptr(field_ptr, lane_layout)
725 }
726 CPlaceInner::Addr(_, Some(_)) => unreachable!(),
727 }
728 }
729
place_index( self, fx: &mut FunctionCx<'_, '_, 'tcx>, index: Value, ) -> CPlace<'tcx>730 pub(crate) fn place_index(
731 self,
732 fx: &mut FunctionCx<'_, '_, 'tcx>,
733 index: Value,
734 ) -> CPlace<'tcx> {
735 let (elem_layout, ptr) = match self.layout().ty.kind() {
736 ty::Array(elem_ty, _) => {
737 let elem_layout = fx.layout_of(*elem_ty);
738 match self.inner {
739 CPlaceInner::Addr(addr, None) => (elem_layout, addr),
740 CPlaceInner::Var(_, _)
741 | CPlaceInner::Addr(_, Some(_))
742 | CPlaceInner::VarPair(_, _, _) => bug!("Can't index into {self:?}"),
743 }
744 }
745 ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_unsized().0),
746 _ => bug!("place_index({:?})", self.layout().ty),
747 };
748
749 let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
750
751 CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
752 }
753
place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx>754 pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
755 let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
756 if has_ptr_meta(fx.tcx, inner_layout.ty) {
757 let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
758 CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
759 } else {
760 CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
761 }
762 }
763
place_ref( self, fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, ) -> CValue<'tcx>764 pub(crate) fn place_ref(
765 self,
766 fx: &mut FunctionCx<'_, '_, 'tcx>,
767 layout: TyAndLayout<'tcx>,
768 ) -> CValue<'tcx> {
769 if has_ptr_meta(fx.tcx, self.layout().ty) {
770 let (ptr, extra) = self.to_ptr_unsized();
771 CValue::by_val_pair(ptr.get_addr(fx), extra, layout)
772 } else {
773 CValue::by_val(self.to_ptr().get_addr(fx), layout)
774 }
775 }
776
downcast_variant( self, fx: &FunctionCx<'_, '_, 'tcx>, variant: VariantIdx, ) -> Self777 pub(crate) fn downcast_variant(
778 self,
779 fx: &FunctionCx<'_, '_, 'tcx>,
780 variant: VariantIdx,
781 ) -> Self {
782 assert!(self.layout().is_sized());
783 let layout = self.layout().for_variant(fx, variant);
784 CPlace { inner: self.inner, layout }
785 }
786 }
787
788 #[track_caller]
assert_assignable<'tcx>( fx: &FunctionCx<'_, '_, 'tcx>, from_ty: Ty<'tcx>, to_ty: Ty<'tcx>, limit: usize, )789 pub(crate) fn assert_assignable<'tcx>(
790 fx: &FunctionCx<'_, '_, 'tcx>,
791 from_ty: Ty<'tcx>,
792 to_ty: Ty<'tcx>,
793 limit: usize,
794 ) {
795 if limit == 0 {
796 // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
797 // soundness. don't attempt to check deep types to avoid exponential behavior in certain
798 // cases.
799 return;
800 }
801 match (from_ty.kind(), to_ty.kind()) {
802 (ty::Ref(_, a, _), ty::Ref(_, b, _))
803 | (
804 ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
805 ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
806 ) => {
807 assert_assignable(fx, *a, *b, limit - 1);
808 }
809 (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
810 | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
811 assert_assignable(fx, *a, *b, limit - 1);
812 }
813 (ty::FnPtr(_), ty::FnPtr(_)) => {
814 let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
815 ParamEnv::reveal_all(),
816 from_ty.fn_sig(fx.tcx),
817 );
818 let to_sig = fx
819 .tcx
820 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
821 assert_eq!(
822 from_sig, to_sig,
823 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
824 from_sig, to_sig, fx,
825 );
826 // fn(&T) -> for<'l> fn(&'l T) is allowed
827 }
828 (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
829 // FIXME(dyn-star): Do the right thing with DynKinds
830 for (from, to) in from_traits.iter().zip(to_traits) {
831 let from =
832 fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
833 let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
834 assert_eq!(
835 from, to,
836 "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
837 from_traits, to_traits, fx,
838 );
839 }
840 // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
841 }
842 (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
843 let mut types_a = types_a.iter();
844 let mut types_b = types_b.iter();
845 loop {
846 match (types_a.next(), types_b.next()) {
847 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
848 (None, None) => return,
849 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
850 }
851 }
852 }
853 (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
854 if adt_def_a.did() == adt_def_b.did() =>
855 {
856 let mut types_a = substs_a.types();
857 let mut types_b = substs_b.types();
858 loop {
859 match (types_a.next(), types_b.next()) {
860 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
861 (None, None) => return,
862 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
863 }
864 }
865 }
866 (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
867 (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
868 if def_id_a == def_id_b =>
869 {
870 let mut types_a = substs_a.types();
871 let mut types_b = substs_b.types();
872 loop {
873 match (types_a.next(), types_b.next()) {
874 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
875 (None, None) => return,
876 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
877 }
878 }
879 }
880 (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
881 // No way to check if it is correct or not with polymorphization enabled
882 }
883 _ => {
884 assert_eq!(
885 from_ty,
886 to_ty,
887 "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
888 from_ty.kind(),
889 to_ty.kind(),
890 fx,
891 );
892 }
893 }
894 }
895