• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use crate::common::*;
2 use crate::context::TypeLowering;
3 use crate::type_::Type;
4 use rustc_codegen_ssa::traits::*;
5 use rustc_middle::bug;
6 use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
7 use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
8 use rustc_middle::ty::{self, Ty, TypeVisitableExt};
9 use rustc_target::abi::HasDataLayout;
10 use rustc_target::abi::{Abi, Align, FieldsShape};
11 use rustc_target::abi::{Int, Pointer, F32, F64};
12 use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
13 use smallvec::{smallvec, SmallVec};
14 
15 use std::fmt::Write;
16 
uncached_llvm_type<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>, field_remapping: &mut Option<SmallVec<[u32; 4]>>, ) -> &'a Type17 fn uncached_llvm_type<'a, 'tcx>(
18     cx: &CodegenCx<'a, 'tcx>,
19     layout: TyAndLayout<'tcx>,
20     defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
21     field_remapping: &mut Option<SmallVec<[u32; 4]>>,
22 ) -> &'a Type {
23     match layout.abi {
24         Abi::Scalar(_) => bug!("handled elsewhere"),
25         Abi::Vector { element, count } => {
26             let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
27             return cx.type_vector(element, count);
28         }
29         Abi::ScalarPair(..) => {
30             return cx.type_struct(
31                 &[
32                     layout.scalar_pair_element_llvm_type(cx, 0, false),
33                     layout.scalar_pair_element_llvm_type(cx, 1, false),
34                 ],
35                 false,
36             );
37         }
38         Abi::Uninhabited | Abi::Aggregate { .. } => {}
39     }
40 
41     let name = match layout.ty.kind() {
42         // FIXME(eddyb) producing readable type names for trait objects can result
43         // in problematically distinct types due to HRTB and subtyping (see #47638).
44         // ty::Dynamic(..) |
45         ty::Adt(..) | ty::Closure(..) | ty::Foreign(..) | ty::Generator(..) | ty::Str
46             // For performance reasons we use names only when emitting LLVM IR.
47             if !cx.sess().fewer_names() =>
48         {
49             let mut name = with_no_visible_paths!(with_no_trimmed_paths!(layout.ty.to_string()));
50             if let (&ty::Adt(def, _), &Variants::Single { index }) =
51                 (layout.ty.kind(), &layout.variants)
52             {
53                 if def.is_enum() && !def.variants().is_empty() {
54                     write!(&mut name, "::{}", def.variant(index).name).unwrap();
55                 }
56             }
57             if let (&ty::Generator(_, _, _), &Variants::Single { index }) =
58                 (layout.ty.kind(), &layout.variants)
59             {
60                 write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
61             }
62             Some(name)
63         }
64         // Use identified structure types for ADT. Due to pointee types in LLVM IR their definition
65         // might be recursive. Other cases are non-recursive and we can use literal structure types.
66         ty::Adt(..) => Some(String::new()),
67         _ => None,
68     };
69 
70     match layout.fields {
71         FieldsShape::Primitive | FieldsShape::Union(_) => {
72             let fill = cx.type_padding_filler(layout.size, layout.align.abi);
73             let packed = false;
74             match name {
75                 None => cx.type_struct(&[fill], packed),
76                 Some(ref name) => {
77                     let llty = cx.type_named_struct(name);
78                     cx.set_struct_body(llty, &[fill], packed);
79                     llty
80                 }
81             }
82         }
83         FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
84         FieldsShape::Arbitrary { .. } => match name {
85             None => {
86                 let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
87                 *field_remapping = new_field_remapping;
88                 cx.type_struct(&llfields, packed)
89             }
90             Some(ref name) => {
91                 let llty = cx.type_named_struct(name);
92                 *defer = Some((llty, layout));
93                 llty
94             }
95         },
96     }
97 }
98 
struct_llfields<'a, 'tcx>( cx: &CodegenCx<'a, 'tcx>, layout: TyAndLayout<'tcx>, ) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>)99 fn struct_llfields<'a, 'tcx>(
100     cx: &CodegenCx<'a, 'tcx>,
101     layout: TyAndLayout<'tcx>,
102 ) -> (Vec<&'a Type>, bool, Option<SmallVec<[u32; 4]>>) {
103     debug!("struct_llfields: {:#?}", layout);
104     let field_count = layout.fields.count();
105 
106     let mut packed = false;
107     let mut offset = Size::ZERO;
108     let mut prev_effective_align = layout.align.abi;
109     let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
110     let mut field_remapping = smallvec![0; field_count];
111     for i in layout.fields.index_by_increasing_offset() {
112         let target_offset = layout.fields.offset(i as usize);
113         let field = layout.field(cx, i);
114         let effective_field_align =
115             layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
116         packed |= effective_field_align < field.align.abi;
117 
118         debug!(
119             "struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
120                 effective_field_align: {}",
121             i,
122             field,
123             offset,
124             target_offset,
125             effective_field_align.bytes()
126         );
127         assert!(target_offset >= offset);
128         let padding = target_offset - offset;
129         if padding != Size::ZERO {
130             let padding_align = prev_effective_align.min(effective_field_align);
131             assert_eq!(offset.align_to(padding_align) + padding, target_offset);
132             result.push(cx.type_padding_filler(padding, padding_align));
133             debug!("    padding before: {:?}", padding);
134         }
135         field_remapping[i] = result.len() as u32;
136         result.push(field.llvm_type(cx));
137         offset = target_offset + field.size;
138         prev_effective_align = effective_field_align;
139     }
140     let padding_used = result.len() > field_count;
141     if layout.is_sized() && field_count > 0 {
142         if offset > layout.size {
143             bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
144         }
145         let padding = layout.size - offset;
146         if padding != Size::ZERO {
147             let padding_align = prev_effective_align;
148             assert_eq!(offset.align_to(padding_align) + padding, layout.size);
149             debug!(
150                 "struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
151                 padding, offset, layout.size
152             );
153             result.push(cx.type_padding_filler(padding, padding_align));
154         }
155     } else {
156         debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
157     }
158     let field_remapping = padding_used.then_some(field_remapping);
159     (result, packed, field_remapping)
160 }
161 
162 impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
align_of(&self, ty: Ty<'tcx>) -> Align163     pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
164         self.layout_of(ty).align.abi
165     }
166 
size_of(&self, ty: Ty<'tcx>) -> Size167     pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
168         self.layout_of(ty).size
169     }
170 
size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align)171     pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
172         let layout = self.layout_of(ty);
173         (layout.size, layout.align.abi)
174     }
175 }
176 
177 pub trait LayoutLlvmExt<'tcx> {
is_llvm_immediate(&self) -> bool178     fn is_llvm_immediate(&self) -> bool;
is_llvm_scalar_pair(&self) -> bool179     fn is_llvm_scalar_pair(&self) -> bool;
llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type180     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type181     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar, offset: Size, ) -> &'a Type182     fn scalar_llvm_type_at<'a>(
183         &self,
184         cx: &CodegenCx<'a, 'tcx>,
185         scalar: Scalar,
186         offset: Size,
187     ) -> &'a Type;
scalar_pair_element_llvm_type<'a>( &self, cx: &CodegenCx<'a, 'tcx>, index: usize, immediate: bool, ) -> &'a Type188     fn scalar_pair_element_llvm_type<'a>(
189         &self,
190         cx: &CodegenCx<'a, 'tcx>,
191         index: usize,
192         immediate: bool,
193     ) -> &'a Type;
llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64194     fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64;
pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>195     fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>196     fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>;
197 }
198 
199 impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
is_llvm_immediate(&self) -> bool200     fn is_llvm_immediate(&self) -> bool {
201         match self.abi {
202             Abi::Scalar(_) | Abi::Vector { .. } => true,
203             Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
204         }
205     }
206 
is_llvm_scalar_pair(&self) -> bool207     fn is_llvm_scalar_pair(&self) -> bool {
208         match self.abi {
209             Abi::ScalarPair(..) => true,
210             Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
211         }
212     }
213 
214     /// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
215     /// The pointee type of the pointer in `PlaceRef` is always this type.
216     /// For sized types, it is also the right LLVM type for an `alloca`
217     /// containing a value of that type, and most immediates (except `bool`).
218     /// Unsized types, however, are represented by a "minimal unit", e.g.
219     /// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
220     /// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
221     /// If the type is an unsized struct, the regular layout is generated,
222     /// with the inner-most trailing unsized field using the "minimal unit"
223     /// of that field's type - this is useful for taking the address of
224     /// that field and ensuring the struct has the right alignment.
llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type225     fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
226         if let Abi::Scalar(scalar) = self.abi {
227             // Use a different cache for scalars because pointers to DSTs
228             // can be either fat or thin (data pointers of fat pointers).
229             if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
230                 return llty;
231             }
232             let llty = match *self.ty.kind() {
233                 ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
234                     cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
235                 }
236                 ty::Adt(def, _) if def.is_box() => {
237                     cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
238                 }
239                 ty::FnPtr(sig) => {
240                     cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
241                 }
242                 _ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
243             };
244             cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
245             return llty;
246         }
247 
248         // Check the cache.
249         let variant_index = match self.variants {
250             Variants::Single { index } => Some(index),
251             _ => None,
252         };
253         if let Some(llty) = cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
254             return llty.lltype;
255         }
256 
257         debug!("llvm_type({:#?})", self);
258 
259         assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
260 
261         // Make sure lifetimes are erased, to avoid generating distinct LLVM
262         // types for Rust types that only differ in the choice of lifetimes.
263         let normal_ty = cx.tcx.erase_regions(self.ty);
264 
265         let mut defer = None;
266         let mut field_remapping = None;
267         let llty = if self.ty != normal_ty {
268             let mut layout = cx.layout_of(normal_ty);
269             if let Some(v) = variant_index {
270                 layout = layout.for_variant(cx, v);
271             }
272             layout.llvm_type(cx)
273         } else {
274             uncached_llvm_type(cx, *self, &mut defer, &mut field_remapping)
275         };
276         debug!("--> mapped {:#?} to llty={:?}", self, llty);
277 
278         cx.type_lowering
279             .borrow_mut()
280             .insert((self.ty, variant_index), TypeLowering { lltype: llty, field_remapping });
281 
282         if let Some((llty, layout)) = defer {
283             let (llfields, packed, new_field_remapping) = struct_llfields(cx, layout);
284             cx.set_struct_body(llty, &llfields, packed);
285             cx.type_lowering
286                 .borrow_mut()
287                 .get_mut(&(self.ty, variant_index))
288                 .unwrap()
289                 .field_remapping = new_field_remapping;
290         }
291         llty
292     }
293 
immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type294     fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
295         if let Abi::Scalar(scalar) = self.abi {
296             if scalar.is_bool() {
297                 return cx.type_i1();
298             }
299         }
300         self.llvm_type(cx)
301     }
302 
scalar_llvm_type_at<'a>( &self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar, offset: Size, ) -> &'a Type303     fn scalar_llvm_type_at<'a>(
304         &self,
305         cx: &CodegenCx<'a, 'tcx>,
306         scalar: Scalar,
307         offset: Size,
308     ) -> &'a Type {
309         match scalar.primitive() {
310             Int(i, _) => cx.type_from_integer(i),
311             F32 => cx.type_f32(),
312             F64 => cx.type_f64(),
313             Pointer(address_space) => {
314                 // If we know the alignment, pick something better than i8.
315                 let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
316                     cx.type_pointee_for_align(pointee.align)
317                 } else {
318                     cx.type_i8()
319                 };
320                 cx.type_ptr_to_ext(pointee, address_space)
321             }
322         }
323     }
324 
scalar_pair_element_llvm_type<'a>( &self, cx: &CodegenCx<'a, 'tcx>, index: usize, immediate: bool, ) -> &'a Type325     fn scalar_pair_element_llvm_type<'a>(
326         &self,
327         cx: &CodegenCx<'a, 'tcx>,
328         index: usize,
329         immediate: bool,
330     ) -> &'a Type {
331         // HACK(eddyb) special-case fat pointers until LLVM removes
332         // pointee types, to avoid bitcasting every `OperandRef::deref`.
333         match *self.ty.kind() {
334             ty::Ref(..) | ty::RawPtr(_) => {
335                 return self.field(cx, index).llvm_type(cx);
336             }
337             // only wide pointer boxes are handled as pointers
338             // thin pointer boxes with scalar allocators are handled by the general logic below
339             ty::Adt(def, substs) if def.is_box() && cx.layout_of(substs.type_at(1)).is_zst() => {
340                 let ptr_ty = Ty::new_mut_ptr(cx.tcx, self.ty.boxed_ty());
341                 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
342             }
343             // `dyn* Trait` has the same ABI as `*mut dyn Trait`
344             ty::Dynamic(bounds, region, ty::DynStar) => {
345                 let ptr_ty =
346                     Ty::new_mut_ptr(cx.tcx, Ty::new_dynamic(cx.tcx, bounds, region, ty::Dyn));
347                 return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
348             }
349             _ => {}
350         }
351 
352         let Abi::ScalarPair(a, b) = self.abi else {
353             bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
354         };
355         let scalar = [a, b][index];
356 
357         // Make sure to return the same type `immediate_llvm_type` would when
358         // dealing with an immediate pair. This means that `(bool, bool)` is
359         // effectively represented as `{i8, i8}` in memory and two `i1`s as an
360         // immediate, just like `bool` is typically `i8` in memory and only `i1`
361         // when immediate. We need to load/store `bool` as `i8` to avoid
362         // crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
363         if immediate && scalar.is_bool() {
364             return cx.type_i1();
365         }
366 
367         let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
368         self.scalar_llvm_type_at(cx, scalar, offset)
369     }
370 
llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64371     fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {
372         match self.abi {
373             Abi::Scalar(_) | Abi::ScalarPair(..) => {
374                 bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
375             }
376             _ => {}
377         }
378         match self.fields {
379             FieldsShape::Primitive | FieldsShape::Union(_) => {
380                 bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
381             }
382 
383             FieldsShape::Array { .. } => index as u64,
384 
385             FieldsShape::Arbitrary { .. } => {
386                 let variant_index = match self.variants {
387                     Variants::Single { index } => Some(index),
388                     _ => None,
389                 };
390 
391                 // Look up llvm field if indexes do not match memory order due to padding. If
392                 // `field_remapping` is `None` no padding was used and the llvm field index
393                 // matches the memory index.
394                 match cx.type_lowering.borrow().get(&(self.ty, variant_index)) {
395                     Some(TypeLowering { field_remapping: Some(ref remap), .. }) => {
396                         remap[index] as u64
397                     }
398                     Some(_) => self.fields.memory_index(index) as u64,
399                     None => {
400                         bug!("TyAndLayout::llvm_field_index({:?}): type info not found", self)
401                     }
402                 }
403             }
404         }
405     }
406 
407     // FIXME(eddyb) this having the same name as `TyAndLayout::pointee_info_at`
408     // (the inherent method, which is lacking this caching logic) can result in
409     // the uncached version being called - not wrong, but potentially inefficient.
pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>410     fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
411         if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
412             return pointee;
413         }
414 
415         let result = Ty::ty_and_layout_pointee_info_at(*self, cx, offset);
416 
417         cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
418         result
419     }
420 
scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type>421     fn scalar_copy_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> Option<&'a Type> {
422         debug_assert!(self.is_sized());
423 
424         // FIXME: this is a fairly arbitrary choice, but 128 bits on WASM
425         // (matching the 128-bit SIMD types proposal) and 256 bits on x64
426         // (like AVX2 registers) seems at least like a tolerable starting point.
427         let threshold = cx.data_layout().pointer_size * 4;
428         if self.layout.size() > threshold {
429             return None;
430         }
431 
432         // Vectors, even for non-power-of-two sizes, have the same layout as
433         // arrays but don't count as aggregate types
434         if let FieldsShape::Array { count, .. } = self.layout.fields()
435             && let element = self.field(cx, 0)
436             && element.ty.is_integral()
437         {
438             // `cx.type_ix(bits)` is tempting here, but while that works great
439             // for things that *stay* as memory-to-memory copies, it also ends
440             // up suppressing vectorization as it introduces shifts when it
441             // extracts all the individual values.
442 
443             let ety = element.llvm_type(cx);
444             return Some(cx.type_vector(ety, *count));
445         }
446 
447         // FIXME: The above only handled integer arrays; surely more things
448         // would also be possible. Be careful about provenance, though!
449         None
450     }
451 }
452