1 use super::*;
2 use std::fmt::Write;
3 use std::{borrow::Borrow, cmp, iter, ops::Bound};
4
5 #[cfg(feature = "randomize")]
6 use rand::{seq::SliceRandom, SeedableRng};
7 #[cfg(feature = "randomize")]
8 use rand_xoshiro::Xoshiro128StarStar;
9
10 use tracing::debug;
11
12 pub trait LayoutCalculator {
13 type TargetDataLayoutRef: Borrow<TargetDataLayout>;
14
delay_bug(&self, txt: String)15 fn delay_bug(&self, txt: String);
current_data_layout(&self) -> Self::TargetDataLayoutRef16 fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
17
scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS18 fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
19 let dl = self.current_data_layout();
20 let dl = dl.borrow();
21 let b_align = b.align(dl);
22 let align = a.align(dl).max(b_align).max(dl.aggregate_align);
23 let b_offset = a.size(dl).align_to(b_align.abi);
24 let size = (b_offset + b.size(dl)).align_to(align.abi);
25
26 // HACK(nox): We iter on `b` and then `a` because `max_by_key`
27 // returns the last maximum.
28 let largest_niche = Niche::from_scalar(dl, b_offset, b)
29 .into_iter()
30 .chain(Niche::from_scalar(dl, Size::ZERO, a))
31 .max_by_key(|niche| niche.available(dl));
32
33 LayoutS {
34 variants: Variants::Single { index: FIRST_VARIANT },
35 fields: FieldsShape::Arbitrary {
36 offsets: [Size::ZERO, b_offset].into(),
37 memory_index: [0, 1].into(),
38 },
39 abi: Abi::ScalarPair(a, b),
40 largest_niche,
41 align,
42 size,
43 }
44 }
45
univariant( &self, dl: &TargetDataLayout, fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind: StructKind, ) -> Option<LayoutS>46 fn univariant(
47 &self,
48 dl: &TargetDataLayout,
49 fields: &IndexSlice<FieldIdx, Layout<'_>>,
50 repr: &ReprOptions,
51 kind: StructKind,
52 ) -> Option<LayoutS> {
53 let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
54 // Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
55 // data-carrying variants can be packed into the space after/before the niche.
56 // If the default field ordering does not give us a niche at the front then we do a second
57 // run and bias niches to the right and then check which one is closer to one of the struct's
58 // edges.
59 if let Some(layout) = &layout {
60 // Don't try to calculate an end-biased layout for unsizable structs,
61 // otherwise we could end up with different layouts for
62 // Foo<Type> and Foo<dyn Trait> which would break unsizing
63 if !matches!(kind, StructKind::MaybeUnsized) {
64 if let Some(niche) = layout.largest_niche {
65 let head_space = niche.offset.bytes();
66 let niche_length = niche.value.size(dl).bytes();
67 let tail_space = layout.size.bytes() - head_space - niche_length;
68
69 // This may end up doing redundant work if the niche is already in the last field
70 // (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
71 // the unpadded size so we try anyway.
72 if fields.len() > 1 && head_space != 0 && tail_space > 0 {
73 let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
74 .expect("alt layout should always work");
75 let niche = alt_layout
76 .largest_niche
77 .expect("alt layout should have a niche like the regular one");
78 let alt_head_space = niche.offset.bytes();
79 let alt_niche_len = niche.value.size(dl).bytes();
80 let alt_tail_space =
81 alt_layout.size.bytes() - alt_head_space - alt_niche_len;
82
83 debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
84
85 let prefer_alt_layout =
86 alt_head_space > head_space && alt_head_space > tail_space;
87
88 debug!(
89 "sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
90 layout: {}\n\
91 alt_layout: {}\n",
92 layout.size.bytes(),
93 head_space,
94 niche_length,
95 tail_space,
96 alt_head_space,
97 alt_niche_len,
98 alt_tail_space,
99 layout.fields.count(),
100 prefer_alt_layout,
101 format_field_niches(&layout, &fields, &dl),
102 format_field_niches(&alt_layout, &fields, &dl),
103 );
104
105 if prefer_alt_layout {
106 return Some(alt_layout);
107 }
108 }
109 }
110 }
111 }
112 layout
113 }
114
layout_of_never_type(&self) -> LayoutS115 fn layout_of_never_type(&self) -> LayoutS {
116 let dl = self.current_data_layout();
117 let dl = dl.borrow();
118 LayoutS {
119 variants: Variants::Single { index: FIRST_VARIANT },
120 fields: FieldsShape::Primitive,
121 abi: Abi::Uninhabited,
122 largest_niche: None,
123 align: dl.i8_align,
124 size: Size::ZERO,
125 }
126 }
127
layout_of_struct_or_enum( &self, repr: &ReprOptions, variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>, is_enum: bool, is_unsafe_cell: bool, scalar_valid_range: (Bound<u128>, Bound<u128>), discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool), discriminants: impl Iterator<Item = (VariantIdx, i128)>, dont_niche_optimize_enum: bool, always_sized: bool, ) -> Option<LayoutS>128 fn layout_of_struct_or_enum(
129 &self,
130 repr: &ReprOptions,
131 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
132 is_enum: bool,
133 is_unsafe_cell: bool,
134 scalar_valid_range: (Bound<u128>, Bound<u128>),
135 discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
136 discriminants: impl Iterator<Item = (VariantIdx, i128)>,
137 dont_niche_optimize_enum: bool,
138 always_sized: bool,
139 ) -> Option<LayoutS> {
140 let dl = self.current_data_layout();
141 let dl = dl.borrow();
142
143 let scalar_unit = |value: Primitive| {
144 let size = value.size(dl);
145 assert!(size.bits() <= 128);
146 Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
147 };
148
149 // A variant is absent if it's uninhabited and only has ZST fields.
150 // Present uninhabited variants only require space for their fields,
151 // but *not* an encoding of the discriminant (e.g., a tag value).
152 // See issue #49298 for more details on the need to leave space
153 // for non-ZST uninhabited data (mostly partial initialization).
154 let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
155 let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
156 let is_zst = fields.iter().all(|f| f.0.is_zst());
157 uninhabited && is_zst
158 };
159 let (present_first, present_second) = {
160 let mut present_variants = variants
161 .iter_enumerated()
162 .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
163 (present_variants.next(), present_variants.next())
164 };
165 let present_first = match present_first {
166 Some(present_first) => present_first,
167 // Uninhabited because it has no variants, or only absent ones.
168 None if is_enum => {
169 return Some(self.layout_of_never_type());
170 }
171 // If it's a struct, still compute a layout so that we can still compute the
172 // field offsets.
173 None => FIRST_VARIANT,
174 };
175
176 let is_struct = !is_enum ||
177 // Only one variant is present.
178 (present_second.is_none() &&
179 // Representation optimizations are allowed.
180 !repr.inhibit_enum_layout_opt());
181 if is_struct {
182 // Struct, or univariant enum equivalent to a struct.
183 // (Typechecking will reject discriminant-sizing attrs.)
184
185 let v = present_first;
186 let kind = if is_enum || variants[v].is_empty() || always_sized {
187 StructKind::AlwaysSized
188 } else {
189 StructKind::MaybeUnsized
190 };
191
192 let mut st = self.univariant(dl, &variants[v], repr, kind)?;
193 st.variants = Variants::Single { index: v };
194
195 if is_unsafe_cell {
196 let hide_niches = |scalar: &mut _| match scalar {
197 Scalar::Initialized { value, valid_range } => {
198 *valid_range = WrappingRange::full(value.size(dl))
199 }
200 // Already doesn't have any niches
201 Scalar::Union { .. } => {}
202 };
203 match &mut st.abi {
204 Abi::Uninhabited => {}
205 Abi::Scalar(scalar) => hide_niches(scalar),
206 Abi::ScalarPair(a, b) => {
207 hide_niches(a);
208 hide_niches(b);
209 }
210 Abi::Vector { element, count: _ } => hide_niches(element),
211 Abi::Aggregate { sized: _ } => {}
212 }
213 st.largest_niche = None;
214 return Some(st);
215 }
216
217 let (start, end) = scalar_valid_range;
218 match st.abi {
219 Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
220 // Enlarging validity ranges would result in missed
221 // optimizations, *not* wrongly assuming the inner
222 // value is valid. e.g. unions already enlarge validity ranges,
223 // because the values may be uninitialized.
224 //
225 // Because of that we only check that the start and end
226 // of the range is representable with this scalar type.
227
228 let max_value = scalar.size(dl).unsigned_int_max();
229 if let Bound::Included(start) = start {
230 // FIXME(eddyb) this might be incorrect - it doesn't
231 // account for wrap-around (end < start) ranges.
232 assert!(start <= max_value, "{start} > {max_value}");
233 scalar.valid_range_mut().start = start;
234 }
235 if let Bound::Included(end) = end {
236 // FIXME(eddyb) this might be incorrect - it doesn't
237 // account for wrap-around (end < start) ranges.
238 assert!(end <= max_value, "{end} > {max_value}");
239 scalar.valid_range_mut().end = end;
240 }
241
242 // Update `largest_niche` if we have introduced a larger niche.
243 let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
244 if let Some(niche) = niche {
245 match st.largest_niche {
246 Some(largest_niche) => {
247 // Replace the existing niche even if they're equal,
248 // because this one is at a lower offset.
249 if largest_niche.available(dl) <= niche.available(dl) {
250 st.largest_niche = Some(niche);
251 }
252 }
253 None => st.largest_niche = Some(niche),
254 }
255 }
256 }
257 _ => assert!(
258 start == Bound::Unbounded && end == Bound::Unbounded,
259 "nonscalar layout for layout_scalar_valid_range type: {:#?}",
260 st,
261 ),
262 }
263
264 return Some(st);
265 }
266
267 // At this point, we have handled all unions and
268 // structs. (We have also handled univariant enums
269 // that allow representation optimization.)
270 assert!(is_enum);
271
272 // Until we've decided whether to use the tagged or
273 // niche filling LayoutS, we don't want to intern the
274 // variant layouts, so we can't store them in the
275 // overall LayoutS. Store the overall LayoutS
276 // and the variant LayoutSs here until then.
277 struct TmpLayout {
278 layout: LayoutS,
279 variants: IndexVec<VariantIdx, LayoutS>,
280 }
281
282 let calculate_niche_filling_layout = || -> Option<TmpLayout> {
283 if dont_niche_optimize_enum {
284 return None;
285 }
286
287 if variants.len() < 2 {
288 return None;
289 }
290
291 let mut align = dl.aggregate_align;
292 let mut variant_layouts = variants
293 .iter_enumerated()
294 .map(|(j, v)| {
295 let mut st = self.univariant(dl, v, repr, StructKind::AlwaysSized)?;
296 st.variants = Variants::Single { index: j };
297
298 align = align.max(st.align);
299
300 Some(st)
301 })
302 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
303
304 let largest_variant_index = variant_layouts
305 .iter_enumerated()
306 .max_by_key(|(_i, layout)| layout.size.bytes())
307 .map(|(i, _layout)| i)?;
308
309 let all_indices = variants.indices();
310 let needs_disc =
311 |index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
312 let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
313 ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
314
315 let count = niche_variants.size_hint().1.unwrap() as u128;
316
317 // Find the field with the largest niche
318 let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
319 .iter()
320 .enumerate()
321 .filter_map(|(j, field)| Some((j, field.largest_niche()?)))
322 .max_by_key(|(_, niche)| niche.available(dl))
323 .and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
324 let niche_offset =
325 niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
326 let niche_size = niche.value.size(dl);
327 let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
328
329 let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
330 if i == largest_variant_index {
331 return true;
332 }
333
334 layout.largest_niche = None;
335
336 if layout.size <= niche_offset {
337 // This variant will fit before the niche.
338 return true;
339 }
340
341 // Determine if it'll fit after the niche.
342 let this_align = layout.align.abi;
343 let this_offset = (niche_offset + niche_size).align_to(this_align);
344
345 if this_offset + layout.size > size {
346 return false;
347 }
348
349 // It'll fit, but we need to make some adjustments.
350 match layout.fields {
351 FieldsShape::Arbitrary { ref mut offsets, .. } => {
352 for (j, offset) in offsets.iter_enumerated_mut() {
353 if !variants[i][j].0.is_zst() {
354 *offset += this_offset;
355 }
356 }
357 }
358 _ => {
359 panic!("Layout of fields should be Arbitrary for variants")
360 }
361 }
362
363 // It can't be a Scalar or ScalarPair because the offset isn't 0.
364 if !layout.abi.is_uninhabited() {
365 layout.abi = Abi::Aggregate { sized: true };
366 }
367 layout.size += this_offset;
368
369 true
370 });
371
372 if !all_variants_fit {
373 return None;
374 }
375
376 let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
377
378 let others_zst = variant_layouts
379 .iter_enumerated()
380 .all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
381 let same_size = size == variant_layouts[largest_variant_index].size;
382 let same_align = align == variant_layouts[largest_variant_index].align;
383
384 let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
385 Abi::Uninhabited
386 } else if same_size && same_align && others_zst {
387 match variant_layouts[largest_variant_index].abi {
388 // When the total alignment and size match, we can use the
389 // same ABI as the scalar variant with the reserved niche.
390 Abi::Scalar(_) => Abi::Scalar(niche_scalar),
391 Abi::ScalarPair(first, second) => {
392 // Only the niche is guaranteed to be initialised,
393 // so use union layouts for the other primitive.
394 if niche_offset == Size::ZERO {
395 Abi::ScalarPair(niche_scalar, second.to_union())
396 } else {
397 Abi::ScalarPair(first.to_union(), niche_scalar)
398 }
399 }
400 _ => Abi::Aggregate { sized: true },
401 }
402 } else {
403 Abi::Aggregate { sized: true }
404 };
405
406 let layout = LayoutS {
407 variants: Variants::Multiple {
408 tag: niche_scalar,
409 tag_encoding: TagEncoding::Niche {
410 untagged_variant: largest_variant_index,
411 niche_variants,
412 niche_start,
413 },
414 tag_field: 0,
415 variants: IndexVec::new(),
416 },
417 fields: FieldsShape::Arbitrary {
418 offsets: [niche_offset].into(),
419 memory_index: [0].into(),
420 },
421 abi,
422 largest_niche,
423 size,
424 align,
425 };
426
427 Some(TmpLayout { layout, variants: variant_layouts })
428 };
429
430 let niche_filling_layout = calculate_niche_filling_layout();
431
432 let (mut min, mut max) = (i128::MAX, i128::MIN);
433 let discr_type = repr.discr_type();
434 let bits = Integer::from_attr(dl, discr_type).size().bits();
435 for (i, mut val) in discriminants {
436 if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
437 continue;
438 }
439 if discr_type.is_signed() {
440 // sign extend the raw representation to be an i128
441 val = (val << (128 - bits)) >> (128 - bits);
442 }
443 if val < min {
444 min = val;
445 }
446 if val > max {
447 max = val;
448 }
449 }
450 // We might have no inhabited variants, so pretend there's at least one.
451 if (min, max) == (i128::MAX, i128::MIN) {
452 min = 0;
453 max = 0;
454 }
455 assert!(min <= max, "discriminant range is {}...{}", min, max);
456 let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
457
458 let mut align = dl.aggregate_align;
459 let mut size = Size::ZERO;
460
461 // We're interested in the smallest alignment, so start large.
462 let mut start_align = Align::from_bytes(256).unwrap();
463 assert_eq!(Integer::for_align(dl, start_align), None);
464
465 // repr(C) on an enum tells us to make a (tag, union) layout,
466 // so we need to grow the prefix alignment to be at least
467 // the alignment of the union. (This value is used both for
468 // determining the alignment of the overall enum, and the
469 // determining the alignment of the payload after the tag.)
470 let mut prefix_align = min_ity.align(dl).abi;
471 if repr.c() {
472 for fields in variants {
473 for field in fields {
474 prefix_align = prefix_align.max(field.align().abi);
475 }
476 }
477 }
478
479 // Create the set of structs that represent each variant.
480 let mut layout_variants = variants
481 .iter_enumerated()
482 .map(|(i, field_layouts)| {
483 let mut st = self.univariant(
484 dl,
485 field_layouts,
486 repr,
487 StructKind::Prefixed(min_ity.size(), prefix_align),
488 )?;
489 st.variants = Variants::Single { index: i };
490 // Find the first field we can't move later
491 // to make room for a larger discriminant.
492 for field_idx in st.fields.index_by_increasing_offset() {
493 let field = &field_layouts[FieldIdx::from_usize(field_idx)];
494 if !field.0.is_zst() || field.align().abi.bytes() != 1 {
495 start_align = start_align.min(field.align().abi);
496 break;
497 }
498 }
499 size = cmp::max(size, st.size);
500 align = align.max(st.align);
501 Some(st)
502 })
503 .collect::<Option<IndexVec<VariantIdx, _>>>()?;
504
505 // Align the maximum variant size to the largest alignment.
506 size = size.align_to(align.abi);
507
508 if size.bytes() >= dl.obj_size_bound() {
509 return None;
510 }
511
512 let typeck_ity = Integer::from_attr(dl, repr.discr_type());
513 if typeck_ity < min_ity {
514 // It is a bug if Layout decided on a greater discriminant size than typeck for
515 // some reason at this point (based on values discriminant can take on). Mostly
516 // because this discriminant will be loaded, and then stored into variable of
517 // type calculated by typeck. Consider such case (a bug): typeck decided on
518 // byte-sized discriminant, but layout thinks we need a 16-bit to store all
519 // discriminant values. That would be a bug, because then, in codegen, in order
520 // to store this 16-bit discriminant into 8-bit sized temporary some of the
521 // space necessary to represent would have to be discarded (or layout is wrong
522 // on thinking it needs 16 bits)
523 panic!(
524 "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
525 min_ity, typeck_ity
526 );
527 // However, it is fine to make discr type however large (as an optimisation)
528 // after this point – we’ll just truncate the value we load in codegen.
529 }
530
531 // Check to see if we should use a different type for the
532 // discriminant. We can safely use a type with the same size
533 // as the alignment of the first field of each variant.
534 // We increase the size of the discriminant to avoid LLVM copying
535 // padding when it doesn't need to. This normally causes unaligned
536 // load/stores and excessive memcpy/memset operations. By using a
537 // bigger integer size, LLVM can be sure about its contents and
538 // won't be so conservative.
539
540 // Use the initial field alignment
541 let mut ity = if repr.c() || repr.int.is_some() {
542 min_ity
543 } else {
544 Integer::for_align(dl, start_align).unwrap_or(min_ity)
545 };
546
547 // If the alignment is not larger than the chosen discriminant size,
548 // don't use the alignment as the final size.
549 if ity <= min_ity {
550 ity = min_ity;
551 } else {
552 // Patch up the variants' first few fields.
553 let old_ity_size = min_ity.size();
554 let new_ity_size = ity.size();
555 for variant in &mut layout_variants {
556 match variant.fields {
557 FieldsShape::Arbitrary { ref mut offsets, .. } => {
558 for i in offsets {
559 if *i <= old_ity_size {
560 assert_eq!(*i, old_ity_size);
561 *i = new_ity_size;
562 }
563 }
564 // We might be making the struct larger.
565 if variant.size <= old_ity_size {
566 variant.size = new_ity_size;
567 }
568 }
569 _ => panic!(),
570 }
571 }
572 }
573
574 let tag_mask = ity.size().unsigned_int_max();
575 let tag = Scalar::Initialized {
576 value: Int(ity, signed),
577 valid_range: WrappingRange {
578 start: (min as u128 & tag_mask),
579 end: (max as u128 & tag_mask),
580 },
581 };
582 let mut abi = Abi::Aggregate { sized: true };
583
584 if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
585 abi = Abi::Uninhabited;
586 } else if tag.size(dl) == size {
587 // Make sure we only use scalar layout when the enum is entirely its
588 // own tag (i.e. it has no padding nor any non-ZST variant fields).
589 abi = Abi::Scalar(tag);
590 } else {
591 // Try to use a ScalarPair for all tagged enums.
592 let mut common_prim = None;
593 let mut common_prim_initialized_in_all_variants = true;
594 for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
595 let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
596 panic!();
597 };
598 let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
599 let (field, offset) = match (fields.next(), fields.next()) {
600 (None, None) => {
601 common_prim_initialized_in_all_variants = false;
602 continue;
603 }
604 (Some(pair), None) => pair,
605 _ => {
606 common_prim = None;
607 break;
608 }
609 };
610 let prim = match field.abi() {
611 Abi::Scalar(scalar) => {
612 common_prim_initialized_in_all_variants &=
613 matches!(scalar, Scalar::Initialized { .. });
614 scalar.primitive()
615 }
616 _ => {
617 common_prim = None;
618 break;
619 }
620 };
621 if let Some(pair) = common_prim {
622 // This is pretty conservative. We could go fancier
623 // by conflating things like i32 and u32, or even
624 // realising that (u8, u8) could just cohabit with
625 // u16 or even u32.
626 if pair != (prim, offset) {
627 common_prim = None;
628 break;
629 }
630 } else {
631 common_prim = Some((prim, offset));
632 }
633 }
634 if let Some((prim, offset)) = common_prim {
635 let prim_scalar = if common_prim_initialized_in_all_variants {
636 scalar_unit(prim)
637 } else {
638 // Common prim might be uninit.
639 Scalar::Union { value: prim }
640 };
641 let pair = self.scalar_pair(tag, prim_scalar);
642 let pair_offsets = match pair.fields {
643 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
644 assert_eq!(memory_index.raw, [0, 1]);
645 offsets
646 }
647 _ => panic!(),
648 };
649 if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
650 && pair_offsets[FieldIdx::from_u32(1)] == *offset
651 && align == pair.align
652 && size == pair.size
653 {
654 // We can use `ScalarPair` only when it matches our
655 // already computed layout (including `#[repr(C)]`).
656 abi = pair.abi;
657 }
658 }
659 }
660
661 // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
662 // variants to ensure they are consistent. This is because a downcast is
663 // semantically a NOP, and thus should not affect layout.
664 if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
665 for variant in &mut layout_variants {
666 // We only do this for variants with fields; the others are not accessed anyway.
667 // Also do not overwrite any already existing "clever" ABIs.
668 if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
669 variant.abi = abi;
670 // Also need to bump up the size and alignment, so that the entire value fits in here.
671 variant.size = cmp::max(variant.size, size);
672 variant.align.abi = cmp::max(variant.align.abi, align.abi);
673 }
674 }
675 }
676
677 let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
678
679 let tagged_layout = LayoutS {
680 variants: Variants::Multiple {
681 tag,
682 tag_encoding: TagEncoding::Direct,
683 tag_field: 0,
684 variants: IndexVec::new(),
685 },
686 fields: FieldsShape::Arbitrary {
687 offsets: [Size::ZERO].into(),
688 memory_index: [0].into(),
689 },
690 largest_niche,
691 abi,
692 align,
693 size,
694 };
695
696 let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
697
698 let mut best_layout = match (tagged_layout, niche_filling_layout) {
699 (tl, Some(nl)) => {
700 // Pick the smaller layout; otherwise,
701 // pick the layout with the larger niche; otherwise,
702 // pick tagged as it has simpler codegen.
703 use cmp::Ordering::*;
704 let niche_size =
705 |tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
706 match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
707 (Greater, _) => nl,
708 (Equal, Less) => nl,
709 _ => tl,
710 }
711 }
712 (tl, None) => tl,
713 };
714
715 // Now we can intern the variant layouts and store them in the enum layout.
716 best_layout.layout.variants = match best_layout.layout.variants {
717 Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
718 Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
719 }
720 _ => panic!(),
721 };
722 Some(best_layout.layout)
723 }
724
layout_of_union( &self, repr: &ReprOptions, variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>, ) -> Option<LayoutS>725 fn layout_of_union(
726 &self,
727 repr: &ReprOptions,
728 variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
729 ) -> Option<LayoutS> {
730 let dl = self.current_data_layout();
731 let dl = dl.borrow();
732 let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
733
734 if let Some(repr_align) = repr.align {
735 align = align.max(AbiAndPrefAlign::new(repr_align));
736 }
737
738 // If all the non-ZST fields have the same ABI and union ABI optimizations aren't
739 // disabled, we can use that common ABI for the union as a whole.
740 struct AbiMismatch;
741 let mut common_non_zst_abi_and_align = if repr.inhibit_union_abi_opt() {
742 // Can't optimize
743 Err(AbiMismatch)
744 } else {
745 Ok(None)
746 };
747
748 let mut size = Size::ZERO;
749 let only_variant = &variants[FIRST_VARIANT];
750 for field in only_variant {
751 assert!(field.0.is_sized());
752
753 align = align.max(field.align());
754 size = cmp::max(size, field.size());
755
756 if field.0.is_zst() {
757 // Nothing more to do for ZST fields
758 continue;
759 }
760
761 if let Ok(common) = common_non_zst_abi_and_align {
762 // Discard valid range information and allow undef
763 let field_abi = field.abi().to_union();
764
765 if let Some((common_abi, common_align)) = common {
766 if common_abi != field_abi {
767 // Different fields have different ABI: disable opt
768 common_non_zst_abi_and_align = Err(AbiMismatch);
769 } else {
770 // Fields with the same non-Aggregate ABI should also
771 // have the same alignment
772 if !matches!(common_abi, Abi::Aggregate { .. }) {
773 assert_eq!(
774 common_align,
775 field.align().abi,
776 "non-Aggregate field with matching ABI but differing alignment"
777 );
778 }
779 }
780 } else {
781 // First non-ZST field: record its ABI and alignment
782 common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
783 }
784 }
785 }
786
787 if let Some(pack) = repr.pack {
788 align = align.min(AbiAndPrefAlign::new(pack));
789 }
790
791 // If all non-ZST fields have the same ABI, we may forward that ABI
792 // for the union as a whole, unless otherwise inhibited.
793 let abi = match common_non_zst_abi_and_align {
794 Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
795 Ok(Some((abi, _))) => {
796 if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
797 // Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
798 Abi::Aggregate { sized: true }
799 } else {
800 abi
801 }
802 }
803 };
804
805 Some(LayoutS {
806 variants: Variants::Single { index: FIRST_VARIANT },
807 fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
808 abi,
809 largest_niche: None,
810 align,
811 size: size.align_to(align.abi),
812 })
813 }
814 }
815
816 /// Determines towards which end of a struct layout optimizations will try to place the best niches.
817 enum NicheBias {
818 Start,
819 End,
820 }
821
univariant( this: &(impl LayoutCalculator + ?Sized), dl: &TargetDataLayout, fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind: StructKind, niche_bias: NicheBias, ) -> Option<LayoutS>822 fn univariant(
823 this: &(impl LayoutCalculator + ?Sized),
824 dl: &TargetDataLayout,
825 fields: &IndexSlice<FieldIdx, Layout<'_>>,
826 repr: &ReprOptions,
827 kind: StructKind,
828 niche_bias: NicheBias,
829 ) -> Option<LayoutS> {
830 let pack = repr.pack;
831 let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
832 let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
833 let optimize = !repr.inhibit_struct_field_reordering_opt();
834 if optimize && fields.len() > 1 {
835 let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
836 let optimizing = &mut inverse_memory_index.raw[..end];
837 let fields_excluding_tail = &fields.raw[..end];
838
839 // If `-Z randomize-layout` was enabled for the type definition we can shuffle
840 // the field ordering to try and catch some code making assumptions about layouts
841 // we don't guarantee
842 if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
843 #[cfg(feature = "randomize")]
844 {
845 // `ReprOptions.layout_seed` is a deterministic seed that we can use to
846 // randomize field ordering with
847 let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
848
849 // Shuffle the ordering of the fields
850 optimizing.shuffle(&mut rng);
851 }
852 // Otherwise we just leave things alone and actually optimize the type's fields
853 } else {
854 // To allow unsizing `&Foo<Type>` -> `&Foo<dyn Trait>`, the layout of the struct must
855 // not depend on the layout of the tail.
856 let max_field_align =
857 fields_excluding_tail.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
858 let largest_niche_size = fields_excluding_tail
859 .iter()
860 .filter_map(|f| f.largest_niche())
861 .map(|n| n.available(dl))
862 .max()
863 .unwrap_or(0);
864
865 // Calculates a sort key to group fields by their alignment or possibly some size-derived
866 // pseudo-alignment.
867 let alignment_group_key = |layout: Layout<'_>| {
868 if let Some(pack) = pack {
869 // return the packed alignment in bytes
870 layout.align().abi.min(pack).bytes()
871 } else {
872 // returns log2(effective-align).
873 // This is ok since `pack` applies to all fields equally.
874 // The calculation assumes that size is an integer multiple of align, except for ZSTs.
875 //
876 let align = layout.align().abi.bytes();
877 let size = layout.size().bytes();
878 let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
879 // group [u8; 4] with align-4 or [u8; 6] with align-2 fields
880 let size_as_align = align.max(size).trailing_zeros();
881 let size_as_align = if largest_niche_size > 0 {
882 match niche_bias {
883 // Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
884 // to the front in the first case (for aligned loads) but keep the bool in front
885 // in the second case for its niches.
886 NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
887 // When moving niches towards the end of the struct then for
888 // A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
889 // in the align-1 group because its bool can be moved closer to the end.
890 NicheBias::End if niche_size == largest_niche_size => {
891 align.trailing_zeros()
892 }
893 NicheBias::End => size_as_align,
894 }
895 } else {
896 size_as_align
897 };
898 size_as_align as u64
899 }
900 };
901
902 match kind {
903 StructKind::AlwaysSized | StructKind::MaybeUnsized => {
904 // Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
905 // to get one niche into the preferred position. If it ever supported multiple niches
906 // then a more advanced pick-and-pack approach could provide better results.
907 // But even for the single-niche cache it's not optimal. E.g. for
908 // A(u32, (bool, u8), u16) it would be possible to move the bool to the front
909 // but it would require packing the tuple together with the u16 to build a 4-byte
910 // group so that the u32 can be placed after it without padding. This kind
911 // of packing can't be achieved by sorting.
912 optimizing.sort_by_key(|&x| {
913 let f = fields[x];
914 let field_size = f.size().bytes();
915 let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
916 let niche_size_key = match niche_bias {
917 // large niche first
918 NicheBias::Start => !niche_size,
919 // large niche last
920 NicheBias::End => niche_size,
921 };
922 let inner_niche_offset_key = match niche_bias {
923 NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
924 NicheBias::End => f.largest_niche().map_or(0, |n| {
925 !(field_size - n.value.size(dl).bytes() - n.offset.bytes())
926 }),
927 };
928
929 (
930 // Place ZSTs first to avoid "interesting offsets", especially with only one
931 // or two non-ZST fields. This helps Scalar/ScalarPair layouts.
932 !f.0.is_zst(),
933 // Then place largest alignments first.
934 cmp::Reverse(alignment_group_key(f)),
935 // Then prioritize niche placement within alignment group according to
936 // `niche_bias_start`.
937 niche_size_key,
938 // Then among fields with equally-sized niches prefer the ones
939 // closer to the start/end of the field.
940 inner_niche_offset_key,
941 )
942 });
943 }
944
945 StructKind::Prefixed(..) => {
946 // Sort in ascending alignment so that the layout stays optimal
947 // regardless of the prefix.
948 // And put the largest niche in an alignment group at the end
949 // so it can be used as discriminant in jagged enums
950 optimizing.sort_by_key(|&x| {
951 let f = fields[x];
952 let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
953 (alignment_group_key(f), niche_size)
954 });
955 }
956 }
957
958 // FIXME(Kixiron): We can always shuffle fields within a given alignment class
959 // regardless of the status of `-Z randomize-layout`
960 }
961 }
962 // inverse_memory_index holds field indices by increasing memory offset.
963 // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
964 // We now write field offsets to the corresponding offset slot;
965 // field 5 with offset 0 puts 0 in offsets[5].
966 // At the bottom of this function, we invert `inverse_memory_index` to
967 // produce `memory_index` (see `invert_mapping`).
968 let mut sized = true;
969 let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
970 let mut offset = Size::ZERO;
971 let mut largest_niche = None;
972 let mut largest_niche_available = 0;
973 if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
974 let prefix_align =
975 if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
976 align = align.max(AbiAndPrefAlign::new(prefix_align));
977 offset = prefix_size.align_to(prefix_align);
978 }
979 for &i in &inverse_memory_index {
980 let field = &fields[i];
981 if !sized {
982 this.delay_bug(format!(
983 "univariant: field #{} comes after unsized field",
984 offsets.len(),
985 ));
986 }
987
988 if field.0.is_unsized() {
989 sized = false;
990 }
991
992 // Invariant: offset < dl.obj_size_bound() <= 1<<61
993 let field_align = if let Some(pack) = pack {
994 field.align().min(AbiAndPrefAlign::new(pack))
995 } else {
996 field.align()
997 };
998 offset = offset.align_to(field_align.abi);
999 align = align.max(field_align);
1000
1001 debug!("univariant offset: {:?} field: {:#?}", offset, field);
1002 offsets[i] = offset;
1003
1004 if let Some(mut niche) = field.largest_niche() {
1005 let available = niche.available(dl);
1006 // Pick up larger niches.
1007 let prefer_new_niche = match niche_bias {
1008 NicheBias::Start => available > largest_niche_available,
1009 // if there are several niches of the same size then pick the last one
1010 NicheBias::End => available >= largest_niche_available,
1011 };
1012 if prefer_new_niche {
1013 largest_niche_available = available;
1014 niche.offset += offset;
1015 largest_niche = Some(niche);
1016 }
1017 }
1018
1019 offset = offset.checked_add(field.size(), dl)?;
1020 }
1021 if let Some(repr_align) = repr.align {
1022 align = align.max(AbiAndPrefAlign::new(repr_align));
1023 }
1024 debug!("univariant min_size: {:?}", offset);
1025 let min_size = offset;
1026 // As stated above, inverse_memory_index holds field indices by increasing offset.
1027 // This makes it an already-sorted view of the offsets vec.
1028 // To invert it, consider:
1029 // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
1030 // Field 5 would be the first element, so memory_index is i:
1031 // Note: if we didn't optimize, it's already right.
1032 let memory_index = if optimize {
1033 inverse_memory_index.invert_bijective_mapping()
1034 } else {
1035 debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
1036 inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
1037 };
1038 let size = min_size.align_to(align.abi);
1039 let mut abi = Abi::Aggregate { sized };
1040 // Unpack newtype ABIs and find scalar pairs.
1041 if sized && size.bytes() > 0 {
1042 // All other fields must be ZSTs.
1043 let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
1044
1045 match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
1046 // We have exactly one non-ZST field.
1047 (Some((i, field)), None, None) => {
1048 // Field fills the struct and it has a scalar or scalar pair ABI.
1049 if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
1050 {
1051 match field.abi() {
1052 // For plain scalars, or vectors of them, we can't unpack
1053 // newtypes for `#[repr(C)]`, as that affects C ABIs.
1054 Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
1055 abi = field.abi();
1056 }
1057 // But scalar pairs are Rust-specific and get
1058 // treated as aggregates by C ABIs anyway.
1059 Abi::ScalarPair(..) => {
1060 abi = field.abi();
1061 }
1062 _ => {}
1063 }
1064 }
1065 }
1066
1067 // Two non-ZST fields, and they're both scalars.
1068 (Some((i, a)), Some((j, b)), None) => {
1069 match (a.abi(), b.abi()) {
1070 (Abi::Scalar(a), Abi::Scalar(b)) => {
1071 // Order by the memory placement, not source order.
1072 let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
1073 ((i, a), (j, b))
1074 } else {
1075 ((j, b), (i, a))
1076 };
1077 let pair = this.scalar_pair(a, b);
1078 let pair_offsets = match pair.fields {
1079 FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
1080 assert_eq!(memory_index.raw, [0, 1]);
1081 offsets
1082 }
1083 _ => panic!(),
1084 };
1085 if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
1086 && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
1087 && align == pair.align
1088 && size == pair.size
1089 {
1090 // We can use `ScalarPair` only when it matches our
1091 // already computed layout (including `#[repr(C)]`).
1092 abi = pair.abi;
1093 }
1094 }
1095 _ => {}
1096 }
1097 }
1098
1099 _ => {}
1100 }
1101 }
1102 if fields.iter().any(|f| f.abi().is_uninhabited()) {
1103 abi = Abi::Uninhabited;
1104 }
1105 Some(LayoutS {
1106 variants: Variants::Single { index: FIRST_VARIANT },
1107 fields: FieldsShape::Arbitrary { offsets, memory_index },
1108 abi,
1109 largest_niche,
1110 align,
1111 size,
1112 })
1113 }
1114
format_field_niches( layout: &LayoutS, fields: &IndexSlice<FieldIdx, Layout<'_>>, dl: &TargetDataLayout, ) -> String1115 fn format_field_niches(
1116 layout: &LayoutS,
1117 fields: &IndexSlice<FieldIdx, Layout<'_>>,
1118 dl: &TargetDataLayout,
1119 ) -> String {
1120 let mut s = String::new();
1121 for i in layout.fields.index_by_increasing_offset() {
1122 let offset = layout.fields.offset(i);
1123 let f = fields[i.into()];
1124 write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
1125 if let Some(n) = f.largest_niche() {
1126 write!(
1127 s,
1128 " n{}b{}s{}",
1129 n.offset.bytes(),
1130 n.available(dl).ilog2(),
1131 n.value.size(dl).bytes()
1132 )
1133 .unwrap();
1134 }
1135 write!(s, "] ").unwrap();
1136 }
1137 s
1138 }
1139