• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 use hir::def_id::DefId;
2 use rustc_hir as hir;
3 use rustc_index::bit_set::BitSet;
4 use rustc_index::{IndexSlice, IndexVec};
5 use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
6 use rustc_middle::query::Providers;
7 use rustc_middle::ty::layout::{
8     IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
9 };
10 use rustc_middle::ty::{
11     self, subst::SubstsRef, AdtDef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitableExt,
12 };
13 use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo};
14 use rustc_span::symbol::Symbol;
15 use rustc_span::DUMMY_SP;
16 use rustc_target::abi::*;
17 
18 use std::fmt::Debug;
19 use std::iter;
20 
21 use crate::errors::{
22     MultipleArrayFieldsSimdType, NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType,
23 };
24 use crate::layout_sanity_check::sanity_check_layout;
25 
provide(providers: &mut Providers)26 pub fn provide(providers: &mut Providers) {
27     *providers = Providers { layout_of, ..*providers };
28 }
29 
30 #[instrument(skip(tcx, query), level = "debug")]
layout_of<'tcx>( tcx: TyCtxt<'tcx>, query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, ) -> Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>>31 fn layout_of<'tcx>(
32     tcx: TyCtxt<'tcx>,
33     query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
34 ) -> Result<TyAndLayout<'tcx>, &'tcx LayoutError<'tcx>> {
35     let (param_env, ty) = query.into_parts();
36     debug!(?ty);
37 
38     let param_env = param_env.with_reveal_all_normalized(tcx);
39     let unnormalized_ty = ty;
40 
41     // FIXME: We might want to have two different versions of `layout_of`:
42     // One that can be called after typecheck has completed and can use
43     // `normalize_erasing_regions` here and another one that can be called
44     // before typecheck has completed and uses `try_normalize_erasing_regions`.
45     let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
46         Ok(t) => t,
47         Err(normalization_error) => {
48             return Err(tcx
49                 .arena
50                 .alloc(LayoutError::NormalizationFailure(ty, normalization_error)));
51         }
52     };
53 
54     if ty != unnormalized_ty {
55         // Ensure this layout is also cached for the normalized type.
56         return tcx.layout_of(param_env.and(ty));
57     }
58 
59     let cx = LayoutCx { tcx, param_env };
60 
61     let layout = layout_of_uncached(&cx, ty)?;
62     let layout = TyAndLayout { ty, layout };
63 
64     record_layout_for_printing(&cx, layout);
65 
66     sanity_check_layout(&cx, &layout);
67 
68     Ok(layout)
69 }
70 
error<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, err: LayoutError<'tcx>, ) -> &'tcx LayoutError<'tcx>71 fn error<'tcx>(
72     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
73     err: LayoutError<'tcx>,
74 ) -> &'tcx LayoutError<'tcx> {
75     cx.tcx.arena.alloc(err)
76 }
77 
univariant_uninterned<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, ty: Ty<'tcx>, fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind: StructKind, ) -> Result<LayoutS, &'tcx LayoutError<'tcx>>78 fn univariant_uninterned<'tcx>(
79     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
80     ty: Ty<'tcx>,
81     fields: &IndexSlice<FieldIdx, Layout<'_>>,
82     repr: &ReprOptions,
83     kind: StructKind,
84 ) -> Result<LayoutS, &'tcx LayoutError<'tcx>> {
85     let dl = cx.data_layout();
86     let pack = repr.pack;
87     if pack.is_some() && repr.align.is_some() {
88         cx.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
89         return Err(cx.tcx.arena.alloc(LayoutError::Unknown(ty)));
90     }
91 
92     cx.univariant(dl, fields, repr, kind).ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))
93 }
94 
layout_of_uncached<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, ty: Ty<'tcx>, ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>>95 fn layout_of_uncached<'tcx>(
96     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
97     ty: Ty<'tcx>,
98 ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
99     let tcx = cx.tcx;
100     let param_env = cx.param_env;
101     let dl = cx.data_layout();
102     let scalar_unit = |value: Primitive| {
103         let size = value.size(dl);
104         assert!(size.bits() <= 128);
105         Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
106     };
107     let scalar = |value: Primitive| tcx.mk_layout(LayoutS::scalar(cx, scalar_unit(value)));
108 
109     let univariant = |fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind| {
110         Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
111     };
112     debug_assert!(!ty.has_non_region_infer());
113 
114     Ok(match *ty.kind() {
115         // Basic scalars.
116         ty::Bool => tcx.mk_layout(LayoutS::scalar(
117             cx,
118             Scalar::Initialized {
119                 value: Int(I8, false),
120                 valid_range: WrappingRange { start: 0, end: 1 },
121             },
122         )),
123         ty::Char => tcx.mk_layout(LayoutS::scalar(
124             cx,
125             Scalar::Initialized {
126                 value: Int(I32, false),
127                 valid_range: WrappingRange { start: 0, end: 0x10FFFF },
128             },
129         )),
130         ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
131         ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
132         ty::Float(fty) => scalar(match fty {
133             ty::FloatTy::F32 => F32,
134             ty::FloatTy::F64 => F64,
135         }),
136         ty::FnPtr(_) => {
137             let mut ptr = scalar_unit(Pointer(dl.instruction_address_space));
138             ptr.valid_range_mut().start = 1;
139             tcx.mk_layout(LayoutS::scalar(cx, ptr))
140         }
141 
142         // The never type.
143         ty::Never => tcx.mk_layout(cx.layout_of_never_type()),
144 
145         // Potentially-wide pointers.
146         ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
147             let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
148             if !ty.is_unsafe_ptr() {
149                 data_ptr.valid_range_mut().start = 1;
150             }
151 
152             let pointee = tcx.normalize_erasing_regions(param_env, pointee);
153             if pointee.is_sized(tcx, param_env) {
154                 return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
155             }
156 
157             let metadata = if let Some(metadata_def_id) = tcx.lang_items().metadata_type()
158                 // Projection eagerly bails out when the pointee references errors,
159                 // fall back to structurally deducing metadata.
160                 && !pointee.references_error()
161             {
162                 let pointee_metadata = Ty::new_projection(tcx,metadata_def_id, [pointee]);
163                 let metadata_ty = match tcx.try_normalize_erasing_regions(
164                     param_env,
165                     pointee_metadata,
166                 ) {
167                     Ok(metadata_ty) => metadata_ty,
168                     Err(mut err) => {
169                         // Usually `<Ty as Pointee>::Metadata` can't be normalized because
170                         // its struct tail cannot be normalized either, so try to get a
171                         // more descriptive layout error here, which will lead to less confusing
172                         // diagnostics.
173                         match tcx.try_normalize_erasing_regions(
174                             param_env,
175                             tcx.struct_tail_without_normalization(pointee),
176                         ) {
177                             Ok(_) => {},
178                             Err(better_err) => {
179                                 err = better_err;
180                             }
181                         }
182                         return Err(error(cx, LayoutError::NormalizationFailure(pointee, err)));
183                     },
184                 };
185 
186                 let metadata_layout = cx.layout_of(metadata_ty)?;
187                 // If the metadata is a 1-zst, then the pointer is thin.
188                 if metadata_layout.is_zst() && metadata_layout.align.abi.bytes() == 1 {
189                     return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
190                 }
191 
192                 let Abi::Scalar(metadata) = metadata_layout.abi else {
193                     return Err(error(cx, LayoutError::Unknown(pointee)));
194                 };
195 
196                 metadata
197             } else {
198                 let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
199 
200                 match unsized_part.kind() {
201                     ty::Foreign(..) => {
202                         return Ok(tcx.mk_layout(LayoutS::scalar(cx, data_ptr)));
203                     }
204                     ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
205                     ty::Dynamic(..) => {
206                         let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
207                         vtable.valid_range_mut().start = 1;
208                         vtable
209                     }
210                     _ => {
211                         return Err(error(cx, LayoutError::Unknown(pointee)));
212                     }
213                 }
214             };
215 
216             // Effectively a (ptr, meta) tuple.
217             tcx.mk_layout(cx.scalar_pair(data_ptr, metadata))
218         }
219 
220         ty::Dynamic(_, _, ty::DynStar) => {
221             let mut data = scalar_unit(Pointer(AddressSpace::DATA));
222             data.valid_range_mut().start = 0;
223             let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
224             vtable.valid_range_mut().start = 1;
225             tcx.mk_layout(cx.scalar_pair(data, vtable))
226         }
227 
228         // Arrays and slices.
229         ty::Array(element, mut count) => {
230             if count.has_projections() {
231                 count = tcx.normalize_erasing_regions(param_env, count);
232                 if count.has_projections() {
233                     return Err(error(cx, LayoutError::Unknown(ty)));
234                 }
235             }
236 
237             let count = count
238                 .try_eval_target_usize(tcx, param_env)
239                 .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?;
240             let element = cx.layout_of(element)?;
241             let size = element
242                 .size
243                 .checked_mul(count, dl)
244                 .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
245 
246             let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
247                 Abi::Uninhabited
248             } else {
249                 Abi::Aggregate { sized: true }
250             };
251 
252             let largest_niche = if count != 0 { element.largest_niche } else { None };
253 
254             tcx.mk_layout(LayoutS {
255                 variants: Variants::Single { index: FIRST_VARIANT },
256                 fields: FieldsShape::Array { stride: element.size, count },
257                 abi,
258                 largest_niche,
259                 align: element.align,
260                 size,
261             })
262         }
263         ty::Slice(element) => {
264             let element = cx.layout_of(element)?;
265             tcx.mk_layout(LayoutS {
266                 variants: Variants::Single { index: FIRST_VARIANT },
267                 fields: FieldsShape::Array { stride: element.size, count: 0 },
268                 abi: Abi::Aggregate { sized: false },
269                 largest_niche: None,
270                 align: element.align,
271                 size: Size::ZERO,
272             })
273         }
274         ty::Str => tcx.mk_layout(LayoutS {
275             variants: Variants::Single { index: FIRST_VARIANT },
276             fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
277             abi: Abi::Aggregate { sized: false },
278             largest_niche: None,
279             align: dl.i8_align,
280             size: Size::ZERO,
281         }),
282 
283         // Odd unit types.
284         ty::FnDef(..) => {
285             univariant(IndexSlice::empty(), &ReprOptions::default(), StructKind::AlwaysSized)?
286         }
287         ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
288             let mut unit = univariant_uninterned(
289                 cx,
290                 ty,
291                 IndexSlice::empty(),
292                 &ReprOptions::default(),
293                 StructKind::AlwaysSized,
294             )?;
295             match unit.abi {
296                 Abi::Aggregate { ref mut sized } => *sized = false,
297                 _ => bug!(),
298             }
299             tcx.mk_layout(unit)
300         }
301 
302         ty::Generator(def_id, substs, _) => generator_layout(cx, ty, def_id, substs)?,
303 
304         ty::Closure(_, ref substs) => {
305             let tys = substs.as_closure().upvar_tys();
306             univariant(
307                 &tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).try_collect::<IndexVec<_, _>>()?,
308                 &ReprOptions::default(),
309                 StructKind::AlwaysSized,
310             )?
311         }
312 
313         ty::Tuple(tys) => {
314             let kind =
315                 if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
316 
317             univariant(
318                 &tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).try_collect::<IndexVec<_, _>>()?,
319                 &ReprOptions::default(),
320                 kind,
321             )?
322         }
323 
324         // SIMD vector types.
325         ty::Adt(def, substs) if def.repr().simd() => {
326             if !def.is_struct() {
327                 // Should have yielded E0517 by now.
328                 tcx.sess.delay_span_bug(
329                     DUMMY_SP,
330                     "#[repr(simd)] was applied to an ADT that is not a struct",
331                 );
332                 return Err(error(cx, LayoutError::Unknown(ty)));
333             }
334 
335             let fields = &def.non_enum_variant().fields;
336 
337             // Supported SIMD vectors are homogeneous ADTs with at least one field:
338             //
339             // * #[repr(simd)] struct S(T, T, T, T);
340             // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
341             // * #[repr(simd)] struct S([T; 4])
342             //
343             // where T is a primitive scalar (integer/float/pointer).
344 
345             // SIMD vectors with zero fields are not supported.
346             // (should be caught by typeck)
347             if fields.is_empty() {
348                 tcx.sess.emit_fatal(ZeroLengthSimdType { ty })
349             }
350 
351             // Type of the first ADT field:
352             let f0_ty = fields[FieldIdx::from_u32(0)].ty(tcx, substs);
353 
354             // Heterogeneous SIMD vectors are not supported:
355             // (should be caught by typeck)
356             for fi in fields {
357                 if fi.ty(tcx, substs) != f0_ty {
358                     tcx.sess.delay_span_bug(
359                         DUMMY_SP,
360                         "#[repr(simd)] was applied to an ADT with heterogeneous field type",
361                     );
362                     return Err(error(cx, LayoutError::Unknown(ty)));
363                 }
364             }
365 
366             // The element type and number of elements of the SIMD vector
367             // are obtained from:
368             //
369             // * the element type and length of the single array field, if
370             // the first field is of array type, or
371             //
372             // * the homogeneous field type and the number of fields.
373             let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
374                 // First ADT field is an array:
375 
376                 // SIMD vectors with multiple array fields are not supported:
377                 // Can't be caught by typeck with a generic simd type.
378                 if def.non_enum_variant().fields.len() != 1 {
379                     tcx.sess.emit_fatal(MultipleArrayFieldsSimdType { ty });
380                 }
381 
382                 // Extract the number of elements from the layout of the array field:
383                 let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else {
384                     return Err(error(cx, LayoutError::Unknown(ty)));
385                 };
386 
387                 (*e_ty, *count, true)
388             } else {
389                 // First ADT field is not an array:
390                 (f0_ty, def.non_enum_variant().fields.len() as _, false)
391             };
392 
393             // SIMD vectors of zero length are not supported.
394             // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
395             // support.
396             //
397             // Can't be caught in typeck if the array length is generic.
398             if e_len == 0 {
399                 tcx.sess.emit_fatal(ZeroLengthSimdType { ty });
400             } else if e_len > MAX_SIMD_LANES {
401                 tcx.sess.emit_fatal(OversizedSimdType { ty, max_lanes: MAX_SIMD_LANES });
402             }
403 
404             // Compute the ABI of the element type:
405             let e_ly = cx.layout_of(e_ty)?;
406             let Abi::Scalar(e_abi) = e_ly.abi else {
407                 // This error isn't caught in typeck, e.g., if
408                 // the element type of the vector is generic.
409                 tcx.sess.emit_fatal(NonPrimitiveSimdType { ty, e_ty });
410             };
411 
412             // Compute the size and alignment of the vector:
413             let size = e_ly
414                 .size
415                 .checked_mul(e_len, dl)
416                 .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
417             let align = dl.vector_align(size);
418             let size = size.align_to(align.abi);
419 
420             // Compute the placement of the vector fields:
421             let fields = if is_array {
422                 FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }
423             } else {
424                 FieldsShape::Array { stride: e_ly.size, count: e_len }
425             };
426 
427             tcx.mk_layout(LayoutS {
428                 variants: Variants::Single { index: FIRST_VARIANT },
429                 fields,
430                 abi: Abi::Vector { element: e_abi, count: e_len },
431                 largest_niche: e_ly.largest_niche,
432                 size,
433                 align,
434             })
435         }
436 
437         // ADTs.
438         ty::Adt(def, substs) => {
439             // Cache the field layouts.
440             let variants = def
441                 .variants()
442                 .iter()
443                 .map(|v| {
444                     v.fields
445                         .iter()
446                         .map(|field| Ok(cx.layout_of(field.ty(tcx, substs))?.layout))
447                         .try_collect::<IndexVec<_, _>>()
448                 })
449                 .try_collect::<IndexVec<VariantIdx, _>>()?;
450 
451             if def.is_union() {
452                 if def.repr().pack.is_some() && def.repr().align.is_some() {
453                     cx.tcx.sess.delay_span_bug(
454                         tcx.def_span(def.did()),
455                         "union cannot be packed and aligned",
456                     );
457                     return Err(error(cx, LayoutError::Unknown(ty)));
458                 }
459 
460                 return Ok(tcx.mk_layout(
461                     cx.layout_of_union(&def.repr(), &variants)
462                         .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?,
463                 ));
464             }
465 
466             let get_discriminant_type =
467                 |min, max| Integer::repr_discr(tcx, ty, &def.repr(), min, max);
468 
469             let discriminants_iter = || {
470                 def.is_enum()
471                     .then(|| def.discriminants(tcx).map(|(v, d)| (v, d.val as i128)))
472                     .into_iter()
473                     .flatten()
474             };
475 
476             let dont_niche_optimize_enum = def.repr().inhibit_enum_layout_opt()
477                 || def
478                     .variants()
479                     .iter_enumerated()
480                     .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()));
481 
482             let maybe_unsized = def.is_struct()
483                 && def.non_enum_variant().tail_opt().is_some_and(|last_field| {
484                     let param_env = tcx.param_env(def.did());
485                     !tcx.type_of(last_field.did).subst_identity().is_sized(tcx, param_env)
486                 });
487 
488             let Some(layout) = cx.layout_of_struct_or_enum(
489                 &def.repr(),
490                 &variants,
491                 def.is_enum(),
492                 def.is_unsafe_cell(),
493                 tcx.layout_scalar_valid_range(def.did()),
494                 get_discriminant_type,
495                 discriminants_iter(),
496                 dont_niche_optimize_enum,
497                 !maybe_unsized,
498             ) else {
499                 return Err(error(cx, LayoutError::SizeOverflow(ty)));
500             };
501 
502             // If the struct tail is sized and can be unsized, check that unsizing doesn't move the fields around.
503             if cfg!(debug_assertions)
504                 && maybe_unsized
505                 && def.non_enum_variant().tail().ty(tcx, substs).is_sized(tcx, cx.param_env)
506             {
507                 let mut variants = variants;
508                 let tail_replacement = cx.layout_of(Ty::new_slice(tcx, tcx.types.u8)).unwrap();
509                 *variants[FIRST_VARIANT].raw.last_mut().unwrap() = tail_replacement.layout;
510 
511                 let Some(unsized_layout) = cx.layout_of_struct_or_enum(
512                     &def.repr(),
513                     &variants,
514                     def.is_enum(),
515                     def.is_unsafe_cell(),
516                     tcx.layout_scalar_valid_range(def.did()),
517                     get_discriminant_type,
518                     discriminants_iter(),
519                     dont_niche_optimize_enum,
520                     !maybe_unsized,
521                 ) else {
522                     bug!("failed to compute unsized layout of {ty:?}");
523                 };
524 
525                 let FieldsShape::Arbitrary { offsets: sized_offsets, .. } = &layout.fields else {
526                     bug!("unexpected FieldsShape for sized layout of {ty:?}: {:?}", layout.fields);
527                 };
528                 let FieldsShape::Arbitrary { offsets: unsized_offsets, .. } = &unsized_layout.fields else {
529                     bug!("unexpected FieldsShape for unsized layout of {ty:?}: {:?}", unsized_layout.fields);
530                 };
531 
532                 let (sized_tail, sized_fields) = sized_offsets.raw.split_last().unwrap();
533                 let (unsized_tail, unsized_fields) = unsized_offsets.raw.split_last().unwrap();
534 
535                 if sized_fields != unsized_fields {
536                     bug!("unsizing {ty:?} changed field order!\n{layout:?}\n{unsized_layout:?}");
537                 }
538 
539                 if sized_tail < unsized_tail {
540                     bug!("unsizing {ty:?} moved tail backwards!\n{layout:?}\n{unsized_layout:?}");
541                 }
542             }
543 
544             tcx.mk_layout(layout)
545         }
546 
547         // Types with no meaningful known layout.
548         ty::Alias(..) => {
549             // NOTE(eddyb) `layout_of` query should've normalized these away,
550             // if that was possible, so there's no reason to try again here.
551             return Err(error(cx, LayoutError::Unknown(ty)));
552         }
553 
554         ty::Bound(..) | ty::GeneratorWitness(..) | ty::GeneratorWitnessMIR(..) | ty::Infer(_) => {
555             bug!("Layout::compute: unexpected type `{}`", ty)
556         }
557 
558         ty::Placeholder(..) | ty::Param(_) | ty::Error(_) => {
559             return Err(error(cx, LayoutError::Unknown(ty)));
560         }
561     })
562 }
563 
564 /// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
565 #[derive(Clone, Debug, PartialEq)]
566 enum SavedLocalEligibility {
567     Unassigned,
568     Assigned(VariantIdx),
569     Ineligible(Option<FieldIdx>),
570 }
571 
572 // When laying out generators, we divide our saved local fields into two
573 // categories: overlap-eligible and overlap-ineligible.
574 //
575 // Those fields which are ineligible for overlap go in a "prefix" at the
576 // beginning of the layout, and always have space reserved for them.
577 //
578 // Overlap-eligible fields are only assigned to one variant, so we lay
579 // those fields out for each variant and put them right after the
580 // prefix.
581 //
582 // Finally, in the layout details, we point to the fields from the
583 // variants they are assigned to. It is possible for some fields to be
584 // included in multiple variants. No field ever "moves around" in the
585 // layout; its offset is always the same.
586 //
587 // Also included in the layout are the upvars and the discriminant.
588 // These are included as fields on the "outer" layout; they are not part
589 // of any variant.
590 
591 /// Compute the eligibility and assignment of each local.
generator_saved_local_eligibility( info: &GeneratorLayout<'_>, ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>)592 fn generator_saved_local_eligibility(
593     info: &GeneratorLayout<'_>,
594 ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
595     use SavedLocalEligibility::*;
596 
597     let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
598         IndexVec::from_elem(Unassigned, &info.field_tys);
599 
600     // The saved locals not eligible for overlap. These will get
601     // "promoted" to the prefix of our generator.
602     let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
603 
604     // Figure out which of our saved locals are fields in only
605     // one variant. The rest are deemed ineligible for overlap.
606     for (variant_index, fields) in info.variant_fields.iter_enumerated() {
607         for local in fields {
608             match assignments[*local] {
609                 Unassigned => {
610                     assignments[*local] = Assigned(variant_index);
611                 }
612                 Assigned(idx) => {
613                     // We've already seen this local at another suspension
614                     // point, so it is no longer a candidate.
615                     trace!(
616                         "removing local {:?} in >1 variant ({:?}, {:?})",
617                         local,
618                         variant_index,
619                         idx
620                     );
621                     ineligible_locals.insert(*local);
622                     assignments[*local] = Ineligible(None);
623                 }
624                 Ineligible(_) => {}
625             }
626         }
627     }
628 
629     // Next, check every pair of eligible locals to see if they
630     // conflict.
631     for local_a in info.storage_conflicts.rows() {
632         let conflicts_a = info.storage_conflicts.count(local_a);
633         if ineligible_locals.contains(local_a) {
634             continue;
635         }
636 
637         for local_b in info.storage_conflicts.iter(local_a) {
638             // local_a and local_b are storage live at the same time, therefore they
639             // cannot overlap in the generator layout. The only way to guarantee
640             // this is if they are in the same variant, or one is ineligible
641             // (which means it is stored in every variant).
642             if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] {
643                 continue;
644             }
645 
646             // If they conflict, we will choose one to make ineligible.
647             // This is not always optimal; it's just a greedy heuristic that
648             // seems to produce good results most of the time.
649             let conflicts_b = info.storage_conflicts.count(local_b);
650             let (remove, other) =
651                 if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
652             ineligible_locals.insert(remove);
653             assignments[remove] = Ineligible(None);
654             trace!("removing local {:?} due to conflict with {:?}", remove, other);
655         }
656     }
657 
658     // Count the number of variants in use. If only one of them, then it is
659     // impossible to overlap any locals in our layout. In this case it's
660     // always better to make the remaining locals ineligible, so we can
661     // lay them out with the other locals in the prefix and eliminate
662     // unnecessary padding bytes.
663     {
664         let mut used_variants = BitSet::new_empty(info.variant_fields.len());
665         for assignment in &assignments {
666             if let Assigned(idx) = assignment {
667                 used_variants.insert(*idx);
668             }
669         }
670         if used_variants.count() < 2 {
671             for assignment in assignments.iter_mut() {
672                 *assignment = Ineligible(None);
673             }
674             ineligible_locals.insert_all();
675         }
676     }
677 
678     // Write down the order of our locals that will be promoted to the prefix.
679     {
680         for (idx, local) in ineligible_locals.iter().enumerate() {
681             assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
682         }
683     }
684     debug!("generator saved local assignments: {:?}", assignments);
685 
686     (ineligible_locals, assignments)
687 }
688 
689 /// Compute the full generator layout.
generator_layout<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, ty: Ty<'tcx>, def_id: hir::def_id::DefId, substs: SubstsRef<'tcx>, ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>>690 fn generator_layout<'tcx>(
691     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
692     ty: Ty<'tcx>,
693     def_id: hir::def_id::DefId,
694     substs: SubstsRef<'tcx>,
695 ) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> {
696     use SavedLocalEligibility::*;
697     let tcx = cx.tcx;
698     let subst_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).subst(tcx, substs);
699 
700     let Some(info) = tcx.generator_layout(def_id) else {
701         return Err(error(cx, LayoutError::Unknown(ty)));
702     };
703     let (ineligible_locals, assignments) = generator_saved_local_eligibility(&info);
704 
705     // Build a prefix layout, including "promoting" all ineligible
706     // locals as part of the prefix. We compute the layout of all of
707     // these fields at once to get optimal packing.
708     let tag_index = substs.as_generator().prefix_tys().count();
709 
710     // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
711     let max_discr = (info.variant_fields.len() - 1) as u128;
712     let discr_int = Integer::fit_unsigned(max_discr);
713     let tag = Scalar::Initialized {
714         value: Primitive::Int(discr_int, false),
715         valid_range: WrappingRange { start: 0, end: max_discr },
716     };
717     let tag_layout = cx.tcx.mk_layout(LayoutS::scalar(cx, tag));
718 
719     let promoted_layouts = ineligible_locals
720         .iter()
721         .map(|local| subst_field(info.field_tys[local].ty))
722         .map(|ty| Ty::new_maybe_uninit(tcx, ty))
723         .map(|ty| Ok(cx.layout_of(ty)?.layout));
724     let prefix_layouts = substs
725         .as_generator()
726         .prefix_tys()
727         .map(|ty| Ok(cx.layout_of(ty)?.layout))
728         .chain(iter::once(Ok(tag_layout)))
729         .chain(promoted_layouts)
730         .try_collect::<IndexVec<_, _>>()?;
731     let prefix = univariant_uninterned(
732         cx,
733         ty,
734         &prefix_layouts,
735         &ReprOptions::default(),
736         StructKind::AlwaysSized,
737     )?;
738 
739     let (prefix_size, prefix_align) = (prefix.size, prefix.align);
740 
741     // Split the prefix layout into the "outer" fields (upvars and
742     // discriminant) and the "promoted" fields. Promoted fields will
743     // get included in each variant that requested them in
744     // GeneratorLayout.
745     debug!("prefix = {:#?}", prefix);
746     let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
747         FieldsShape::Arbitrary { mut offsets, memory_index } => {
748             let mut inverse_memory_index = memory_index.invert_bijective_mapping();
749 
750             // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
751             // "outer" and "promoted" fields respectively.
752             let b_start = FieldIdx::from_usize(tag_index + 1);
753             let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
754             let offsets_a = offsets;
755 
756             // Disentangle the "a" and "b" components of `inverse_memory_index`
757             // by preserving the order but keeping only one disjoint "half" each.
758             // FIXME(eddyb) build a better abstraction for permutations, if possible.
759             let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
760                 .iter()
761                 .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
762                 .collect();
763             inverse_memory_index.raw.retain(|&i| i < b_start);
764             let inverse_memory_index_a = inverse_memory_index;
765 
766             // Since `inverse_memory_index_{a,b}` each only refer to their
767             // respective fields, they can be safely inverted
768             let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
769             let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
770 
771             let outer_fields =
772                 FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
773             (outer_fields, offsets_b, memory_index_b)
774         }
775         _ => bug!(),
776     };
777 
778     let mut size = prefix.size;
779     let mut align = prefix.align;
780     let variants = info
781         .variant_fields
782         .iter_enumerated()
783         .map(|(index, variant_fields)| {
784             // Only include overlap-eligible fields when we compute our variant layout.
785             let variant_only_tys = variant_fields
786                 .iter()
787                 .filter(|local| match assignments[**local] {
788                     Unassigned => bug!(),
789                     Assigned(v) if v == index => true,
790                     Assigned(_) => bug!("assignment does not match variant"),
791                     Ineligible(_) => false,
792                 })
793                 .map(|local| subst_field(info.field_tys[*local].ty));
794 
795             let mut variant = univariant_uninterned(
796                 cx,
797                 ty,
798                 &variant_only_tys
799                     .map(|ty| Ok(cx.layout_of(ty)?.layout))
800                     .try_collect::<IndexVec<_, _>>()?,
801                 &ReprOptions::default(),
802                 StructKind::Prefixed(prefix_size, prefix_align.abi),
803             )?;
804             variant.variants = Variants::Single { index };
805 
806             let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
807                 bug!();
808             };
809 
810             // Now, stitch the promoted and variant-only fields back together in
811             // the order they are mentioned by our GeneratorLayout.
812             // Because we only use some subset (that can differ between variants)
813             // of the promoted fields, we can't just pick those elements of the
814             // `promoted_memory_index` (as we'd end up with gaps).
815             // So instead, we build an "inverse memory_index", as if all of the
816             // promoted fields were being used, but leave the elements not in the
817             // subset as `INVALID_FIELD_IDX`, which we can filter out later to
818             // obtain a valid (bijective) mapping.
819             const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
820             debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
821 
822             let mut combined_inverse_memory_index = IndexVec::from_elem_n(
823                 INVALID_FIELD_IDX,
824                 promoted_memory_index.len() + memory_index.len(),
825             );
826             let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
827             let combined_offsets = variant_fields
828                 .iter_enumerated()
829                 .map(|(i, local)| {
830                     let (offset, memory_index) = match assignments[*local] {
831                         Unassigned => bug!(),
832                         Assigned(_) => {
833                             let (offset, memory_index) = offsets_and_memory_index.next().unwrap();
834                             (offset, promoted_memory_index.len() as u32 + memory_index)
835                         }
836                         Ineligible(field_idx) => {
837                             let field_idx = field_idx.unwrap();
838                             (promoted_offsets[field_idx], promoted_memory_index[field_idx])
839                         }
840                     };
841                     combined_inverse_memory_index[memory_index] = i;
842                     offset
843                 })
844                 .collect();
845 
846             // Remove the unused slots and invert the mapping to obtain the
847             // combined `memory_index` (also see previous comment).
848             combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
849             let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
850 
851             variant.fields = FieldsShape::Arbitrary {
852                 offsets: combined_offsets,
853                 memory_index: combined_memory_index,
854             };
855 
856             size = size.max(variant.size);
857             align = align.max(variant.align);
858             Ok(variant)
859         })
860         .try_collect::<IndexVec<VariantIdx, _>>()?;
861 
862     size = size.align_to(align.abi);
863 
864     let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
865         Abi::Uninhabited
866     } else {
867         Abi::Aggregate { sized: true }
868     };
869 
870     let layout = tcx.mk_layout(LayoutS {
871         variants: Variants::Multiple {
872             tag,
873             tag_encoding: TagEncoding::Direct,
874             tag_field: tag_index,
875             variants,
876         },
877         fields: outer_fields,
878         abi,
879         largest_niche: prefix.largest_niche,
880         size,
881         align,
882     });
883     debug!("generator layout ({:?}): {:#?}", ty, layout);
884     Ok(layout)
885 }
886 
887 /// This is invoked by the `layout_of` query to record the final
888 /// layout of each type.
889 #[inline(always)]
record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>)890 fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>) {
891     // If we are running with `-Zprint-type-sizes`, maybe record layouts
892     // for dumping later.
893     if cx.tcx.sess.opts.unstable_opts.print_type_sizes {
894         record_layout_for_printing_outlined(cx, layout)
895     }
896 }
897 
record_layout_for_printing_outlined<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>, )898 fn record_layout_for_printing_outlined<'tcx>(
899     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
900     layout: TyAndLayout<'tcx>,
901 ) {
902     // Ignore layouts that are done with non-empty environments or
903     // non-monomorphic layouts, as the user only wants to see the stuff
904     // resulting from the final codegen session.
905     if layout.ty.has_non_region_param() || !cx.param_env.caller_bounds().is_empty() {
906         return;
907     }
908 
909     // (delay format until we actually need it)
910     let record = |kind, packed, opt_discr_size, variants| {
911         let type_desc = format!("{:?}", layout.ty);
912         cx.tcx.sess.code_stats.record_type_size(
913             kind,
914             type_desc,
915             layout.align.abi,
916             layout.size,
917             packed,
918             opt_discr_size,
919             variants,
920         );
921     };
922 
923     match *layout.ty.kind() {
924         ty::Adt(adt_def, _) => {
925             debug!("print-type-size t: `{:?}` process adt", layout.ty);
926             let adt_kind = adt_def.adt_kind();
927             let adt_packed = adt_def.repr().pack.is_some();
928             let (variant_infos, opt_discr_size) = variant_info_for_adt(cx, layout, adt_def);
929             record(adt_kind.into(), adt_packed, opt_discr_size, variant_infos);
930         }
931 
932         ty::Generator(def_id, substs, _) => {
933             debug!("print-type-size t: `{:?}` record generator", layout.ty);
934             // Generators always have a begin/poisoned/end state with additional suspend points
935             let (variant_infos, opt_discr_size) =
936                 variant_info_for_generator(cx, layout, def_id, substs);
937             record(DataTypeKind::Generator, false, opt_discr_size, variant_infos);
938         }
939 
940         ty::Closure(..) => {
941             debug!("print-type-size t: `{:?}` record closure", layout.ty);
942             record(DataTypeKind::Closure, false, None, vec![]);
943         }
944 
945         _ => {
946             debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
947         }
948     };
949 }
950 
variant_info_for_adt<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>, adt_def: AdtDef<'tcx>, ) -> (Vec<VariantInfo>, Option<Size>)951 fn variant_info_for_adt<'tcx>(
952     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
953     layout: TyAndLayout<'tcx>,
954     adt_def: AdtDef<'tcx>,
955 ) -> (Vec<VariantInfo>, Option<Size>) {
956     let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
957         let mut min_size = Size::ZERO;
958         let field_info: Vec<_> = flds
959             .iter()
960             .enumerate()
961             .map(|(i, &name)| {
962                 let field_layout = layout.field(cx, i);
963                 let offset = layout.fields.offset(i);
964                 min_size = min_size.max(offset + field_layout.size);
965                 FieldInfo {
966                     kind: FieldKind::AdtField,
967                     name,
968                     offset: offset.bytes(),
969                     size: field_layout.size.bytes(),
970                     align: field_layout.align.abi.bytes(),
971                 }
972             })
973             .collect();
974 
975         VariantInfo {
976             name: n,
977             kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
978             align: layout.align.abi.bytes(),
979             size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
980             fields: field_info,
981         }
982     };
983 
984     match layout.variants {
985         Variants::Single { index } => {
986             if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
987                 debug!("print-type-size `{:#?}` variant {}", layout, adt_def.variant(index).name);
988                 let variant_def = &adt_def.variant(index);
989                 let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
990                 (vec![build_variant_info(Some(variant_def.name), &fields, layout)], None)
991             } else {
992                 (vec![], None)
993             }
994         }
995 
996         Variants::Multiple { tag, ref tag_encoding, .. } => {
997             debug!(
998                 "print-type-size `{:#?}` adt general variants def {}",
999                 layout.ty,
1000                 adt_def.variants().len()
1001             );
1002             let variant_infos: Vec<_> = adt_def
1003                 .variants()
1004                 .iter_enumerated()
1005                 .map(|(i, variant_def)| {
1006                     let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
1007                     build_variant_info(Some(variant_def.name), &fields, layout.for_variant(cx, i))
1008                 })
1009                 .collect();
1010 
1011             (
1012                 variant_infos,
1013                 match tag_encoding {
1014                     TagEncoding::Direct => Some(tag.size(cx)),
1015                     _ => None,
1016                 },
1017             )
1018         }
1019     }
1020 }
1021 
variant_info_for_generator<'tcx>( cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: TyAndLayout<'tcx>, def_id: DefId, substs: ty::SubstsRef<'tcx>, ) -> (Vec<VariantInfo>, Option<Size>)1022 fn variant_info_for_generator<'tcx>(
1023     cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
1024     layout: TyAndLayout<'tcx>,
1025     def_id: DefId,
1026     substs: ty::SubstsRef<'tcx>,
1027 ) -> (Vec<VariantInfo>, Option<Size>) {
1028     let Variants::Multiple { tag, ref tag_encoding, tag_field, .. } = layout.variants else {
1029         return (vec![], None);
1030     };
1031 
1032     let generator = cx.tcx.optimized_mir(def_id).generator_layout().unwrap();
1033     let upvar_names = cx.tcx.closure_saved_names_of_captured_variables(def_id);
1034 
1035     let mut upvars_size = Size::ZERO;
1036     let upvar_fields: Vec<_> = substs
1037         .as_generator()
1038         .upvar_tys()
1039         .zip(upvar_names)
1040         .enumerate()
1041         .map(|(field_idx, (_, name))| {
1042             let field_layout = layout.field(cx, field_idx);
1043             let offset = layout.fields.offset(field_idx);
1044             upvars_size = upvars_size.max(offset + field_layout.size);
1045             FieldInfo {
1046                 kind: FieldKind::Upvar,
1047                 name: *name,
1048                 offset: offset.bytes(),
1049                 size: field_layout.size.bytes(),
1050                 align: field_layout.align.abi.bytes(),
1051             }
1052         })
1053         .collect();
1054 
1055     let mut variant_infos: Vec<_> = generator
1056         .variant_fields
1057         .iter_enumerated()
1058         .map(|(variant_idx, variant_def)| {
1059             let variant_layout = layout.for_variant(cx, variant_idx);
1060             let mut variant_size = Size::ZERO;
1061             let fields = variant_def
1062                 .iter()
1063                 .enumerate()
1064                 .map(|(field_idx, local)| {
1065                     let field_layout = variant_layout.field(cx, field_idx);
1066                     let offset = variant_layout.fields.offset(field_idx);
1067                     // The struct is as large as the last field's end
1068                     variant_size = variant_size.max(offset + field_layout.size);
1069                     FieldInfo {
1070                         kind: FieldKind::GeneratorLocal,
1071                         name: generator.field_names[*local].unwrap_or(Symbol::intern(&format!(
1072                             ".generator_field{}",
1073                             local.as_usize()
1074                         ))),
1075                         offset: offset.bytes(),
1076                         size: field_layout.size.bytes(),
1077                         align: field_layout.align.abi.bytes(),
1078                     }
1079                 })
1080                 .chain(upvar_fields.iter().copied())
1081                 .collect();
1082 
1083             // If the variant has no state-specific fields, then it's the size of the upvars.
1084             if variant_size == Size::ZERO {
1085                 variant_size = upvars_size;
1086             }
1087 
1088             // This `if` deserves some explanation.
1089             //
1090             // The layout code has a choice of where to place the discriminant of this generator.
1091             // If the discriminant of the generator is placed early in the layout (before the
1092             // variant's own fields), then it'll implicitly be counted towards the size of the
1093             // variant, since we use the maximum offset to calculate size.
1094             //    (side-note: I know this is a bit problematic given upvars placement, etc).
1095             //
1096             // This is important, since the layout printing code always subtracts this discriminant
1097             // size from the variant size if the struct is "enum"-like, so failing to account for it
1098             // will either lead to numerical underflow, or an underreported variant size...
1099             //
1100             // However, if the discriminant is placed past the end of the variant, then we need
1101             // to factor in the size of the discriminant manually. This really should be refactored
1102             // better, but this "works" for now.
1103             if layout.fields.offset(tag_field) >= variant_size {
1104                 variant_size += match tag_encoding {
1105                     TagEncoding::Direct => tag.size(cx),
1106                     _ => Size::ZERO,
1107                 };
1108             }
1109 
1110             VariantInfo {
1111                 name: Some(Symbol::intern(&ty::GeneratorSubsts::variant_name(variant_idx))),
1112                 kind: SizeKind::Exact,
1113                 size: variant_size.bytes(),
1114                 align: variant_layout.align.abi.bytes(),
1115                 fields,
1116             }
1117         })
1118         .collect();
1119 
1120     // The first three variants are hardcoded to be `UNRESUMED`, `RETURNED` and `POISONED`.
1121     // We will move the `RETURNED` and `POISONED` elements to the end so we
1122     // are left with a sorting order according to the generators yield points:
1123     // First `Unresumed`, then the `SuspendN` followed by `Returned` and `Panicked` (POISONED).
1124     let end_states = variant_infos.drain(1..=2);
1125     let end_states: Vec<_> = end_states.collect();
1126     variant_infos.extend(end_states);
1127 
1128     (
1129         variant_infos,
1130         match tag_encoding {
1131             TagEncoding::Direct => Some(tag.size(cx)),
1132             _ => None,
1133         },
1134     )
1135 }
1136