2022-11-01 19:50:30 +03:30
|
|
|
|
use super::*;
|
2023-03-05 16:18:19 +01:00
|
|
|
|
use std::fmt::Write;
|
2023-01-20 20:56:16 -08:00
|
|
|
|
use std::{borrow::Borrow, cmp, iter, ops::Bound};
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
2022-11-07 00:36:11 +03:30
|
|
|
|
#[cfg(feature = "randomize")]
|
2022-11-01 19:50:30 +03:30
|
|
|
|
use rand::{seq::SliceRandom, SeedableRng};
|
2022-11-07 00:36:11 +03:30
|
|
|
|
#[cfg(feature = "randomize")]
|
2022-11-01 19:50:30 +03:30
|
|
|
|
use rand_xoshiro::Xoshiro128StarStar;
|
|
|
|
|
|
|
|
|
|
use tracing::debug;
|
|
|
|
|
|
|
|
|
|
pub trait LayoutCalculator {
|
|
|
|
|
type TargetDataLayoutRef: Borrow<TargetDataLayout>;
|
|
|
|
|
|
2023-05-16 16:04:03 +10:00
|
|
|
|
fn delay_bug(&self, txt: String);
|
2022-11-01 19:50:30 +03:30
|
|
|
|
fn current_data_layout(&self) -> Self::TargetDataLayoutRef;
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let dl = self.current_data_layout();
|
|
|
|
|
let dl = dl.borrow();
|
|
|
|
|
let b_align = b.align(dl);
|
|
|
|
|
let align = a.align(dl).max(b_align).max(dl.aggregate_align);
|
|
|
|
|
let b_offset = a.size(dl).align_to(b_align.abi);
|
|
|
|
|
let size = (b_offset + b.size(dl)).align_to(align.abi);
|
|
|
|
|
|
|
|
|
|
// HACK(nox): We iter on `b` and then `a` because `max_by_key`
|
|
|
|
|
// returns the last maximum.
|
|
|
|
|
let largest_niche = Niche::from_scalar(dl, b_offset, b)
|
|
|
|
|
.into_iter()
|
|
|
|
|
.chain(Niche::from_scalar(dl, Size::ZERO, a))
|
|
|
|
|
.max_by_key(|niche| niche.available(dl));
|
|
|
|
|
|
|
|
|
|
LayoutS {
|
2023-03-25 18:43:03 -07:00
|
|
|
|
variants: Variants::Single { index: FIRST_VARIANT },
|
2022-11-01 19:50:30 +03:30
|
|
|
|
fields: FieldsShape::Arbitrary {
|
2023-04-02 23:21:09 -07:00
|
|
|
|
offsets: [Size::ZERO, b_offset].into(),
|
|
|
|
|
memory_index: [0, 1].into(),
|
2022-11-01 19:50:30 +03:30
|
|
|
|
},
|
|
|
|
|
abi: Abi::ScalarPair(a, b),
|
|
|
|
|
largest_niche,
|
|
|
|
|
align,
|
|
|
|
|
size,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
fn univariant(
|
2022-11-01 19:50:30 +03:30
|
|
|
|
&self,
|
|
|
|
|
dl: &TargetDataLayout,
|
2023-04-02 23:21:09 -07:00
|
|
|
|
fields: &IndexSlice<FieldIdx, Layout<'_>>,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
repr: &ReprOptions,
|
|
|
|
|
kind: StructKind,
|
2023-01-20 20:56:16 -08:00
|
|
|
|
) -> Option<LayoutS> {
|
2023-04-19 22:14:28 +02:00
|
|
|
|
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
|
2023-02-16 01:53:47 +01:00
|
|
|
|
// Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
|
|
|
|
|
// data-carrying variants can be packed into the space after/before the niche.
|
|
|
|
|
// If the default field ordering does not give us a niche at the front then we do a second
|
|
|
|
|
// run and bias niches to the right and then check which one is closer to one of the struct's
|
|
|
|
|
// edges.
|
|
|
|
|
if let Some(layout) = &layout {
|
|
|
|
|
if let Some(niche) = layout.largest_niche {
|
|
|
|
|
let head_space = niche.offset.bytes();
|
|
|
|
|
let niche_length = niche.value.size(dl).bytes();
|
|
|
|
|
let tail_space = layout.size.bytes() - head_space - niche_length;
|
|
|
|
|
|
|
|
|
|
// This may end up doing redundant work if the niche is already in the last field
|
|
|
|
|
// (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
|
|
|
|
|
// the unpadded size so we try anyway.
|
|
|
|
|
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
|
2023-04-19 22:14:28 +02:00
|
|
|
|
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
|
2023-02-16 01:53:47 +01:00
|
|
|
|
.expect("alt layout should always work");
|
|
|
|
|
let niche = alt_layout
|
|
|
|
|
.largest_niche
|
|
|
|
|
.expect("alt layout should have a niche like the regular one");
|
|
|
|
|
let alt_head_space = niche.offset.bytes();
|
|
|
|
|
let alt_niche_len = niche.value.size(dl).bytes();
|
2023-03-05 16:18:19 +01:00
|
|
|
|
let alt_tail_space = alt_layout.size.bytes() - alt_head_space - alt_niche_len;
|
2023-02-16 01:53:47 +01:00
|
|
|
|
|
|
|
|
|
debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
|
|
|
|
|
|
|
|
|
|
let prefer_alt_layout =
|
|
|
|
|
alt_head_space > head_space && alt_head_space > tail_space;
|
|
|
|
|
|
2023-03-05 16:18:19 +01:00
|
|
|
|
debug!(
|
|
|
|
|
"sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
|
|
|
|
|
layout: {}\n\
|
|
|
|
|
alt_layout: {}\n",
|
|
|
|
|
layout.size.bytes(),
|
|
|
|
|
head_space,
|
|
|
|
|
niche_length,
|
|
|
|
|
tail_space,
|
|
|
|
|
alt_head_space,
|
|
|
|
|
alt_niche_len,
|
|
|
|
|
alt_tail_space,
|
|
|
|
|
layout.fields.count(),
|
|
|
|
|
prefer_alt_layout,
|
|
|
|
|
format_field_niches(&layout, &fields, &dl),
|
|
|
|
|
format_field_niches(&alt_layout, &fields, &dl),
|
|
|
|
|
);
|
|
|
|
|
|
2023-02-16 01:53:47 +01:00
|
|
|
|
if prefer_alt_layout {
|
|
|
|
|
return Some(alt_layout);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
layout
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
fn layout_of_never_type(&self) -> LayoutS {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let dl = self.current_data_layout();
|
|
|
|
|
let dl = dl.borrow();
|
|
|
|
|
LayoutS {
|
2023-03-25 18:43:03 -07:00
|
|
|
|
variants: Variants::Single { index: FIRST_VARIANT },
|
2022-11-01 19:50:30 +03:30
|
|
|
|
fields: FieldsShape::Primitive,
|
|
|
|
|
abi: Abi::Uninhabited,
|
|
|
|
|
largest_niche: None,
|
|
|
|
|
align: dl.i8_align,
|
|
|
|
|
size: Size::ZERO,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
fn layout_of_struct_or_enum(
|
2022-11-01 19:50:30 +03:30
|
|
|
|
&self,
|
|
|
|
|
repr: &ReprOptions,
|
2023-04-02 23:21:09 -07:00
|
|
|
|
variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
is_enum: bool,
|
|
|
|
|
is_unsafe_cell: bool,
|
|
|
|
|
scalar_valid_range: (Bound<u128>, Bound<u128>),
|
|
|
|
|
discr_range_of_repr: impl Fn(i128, i128) -> (Integer, bool),
|
2023-01-20 20:56:16 -08:00
|
|
|
|
discriminants: impl Iterator<Item = (VariantIdx, i128)>,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
niche_optimize_enum: bool,
|
|
|
|
|
always_sized: bool,
|
2023-01-20 20:56:16 -08:00
|
|
|
|
) -> Option<LayoutS> {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let dl = self.current_data_layout();
|
|
|
|
|
let dl = dl.borrow();
|
|
|
|
|
|
|
|
|
|
let scalar_unit = |value: Primitive| {
|
|
|
|
|
let size = value.size(dl);
|
|
|
|
|
assert!(size.bits() <= 128);
|
|
|
|
|
Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// A variant is absent if it's uninhabited and only has ZST fields.
|
|
|
|
|
// Present uninhabited variants only require space for their fields,
|
|
|
|
|
// but *not* an encoding of the discriminant (e.g., a tag value).
|
|
|
|
|
// See issue #49298 for more details on the need to leave space
|
|
|
|
|
// for non-ZST uninhabited data (mostly partial initialization).
|
2023-04-02 23:21:09 -07:00
|
|
|
|
let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
|
|
|
|
|
let is_zst = fields.iter().all(|f| f.0.is_zst());
|
2022-11-01 19:50:30 +03:30
|
|
|
|
uninhabited && is_zst
|
|
|
|
|
};
|
|
|
|
|
let (present_first, present_second) = {
|
|
|
|
|
let mut present_variants = variants
|
|
|
|
|
.iter_enumerated()
|
|
|
|
|
.filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
|
|
|
|
|
(present_variants.next(), present_variants.next())
|
|
|
|
|
};
|
|
|
|
|
let present_first = match present_first {
|
|
|
|
|
Some(present_first) => present_first,
|
|
|
|
|
// Uninhabited because it has no variants, or only absent ones.
|
|
|
|
|
None if is_enum => {
|
|
|
|
|
return Some(self.layout_of_never_type());
|
|
|
|
|
}
|
|
|
|
|
// If it's a struct, still compute a layout so that we can still compute the
|
|
|
|
|
// field offsets.
|
2023-03-25 18:43:03 -07:00
|
|
|
|
None => FIRST_VARIANT,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let is_struct = !is_enum ||
|
|
|
|
|
// Only one variant is present.
|
|
|
|
|
(present_second.is_none() &&
|
|
|
|
|
// Representation optimizations are allowed.
|
|
|
|
|
!repr.inhibit_enum_layout_opt());
|
|
|
|
|
if is_struct {
|
|
|
|
|
// Struct, or univariant enum equivalent to a struct.
|
|
|
|
|
// (Typechecking will reject discriminant-sizing attrs.)
|
|
|
|
|
|
|
|
|
|
let v = present_first;
|
|
|
|
|
let kind = if is_enum || variants[v].is_empty() {
|
|
|
|
|
StructKind::AlwaysSized
|
|
|
|
|
} else {
|
|
|
|
|
if !always_sized { StructKind::MaybeUnsized } else { StructKind::AlwaysSized }
|
|
|
|
|
};
|
|
|
|
|
|
2022-11-29 11:01:17 +00:00
|
|
|
|
let mut st = self.univariant(dl, &variants[v], repr, kind)?;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
st.variants = Variants::Single { index: v };
|
|
|
|
|
|
|
|
|
|
if is_unsafe_cell {
|
|
|
|
|
let hide_niches = |scalar: &mut _| match scalar {
|
|
|
|
|
Scalar::Initialized { value, valid_range } => {
|
|
|
|
|
*valid_range = WrappingRange::full(value.size(dl))
|
|
|
|
|
}
|
|
|
|
|
// Already doesn't have any niches
|
|
|
|
|
Scalar::Union { .. } => {}
|
|
|
|
|
};
|
|
|
|
|
match &mut st.abi {
|
|
|
|
|
Abi::Uninhabited => {}
|
|
|
|
|
Abi::Scalar(scalar) => hide_niches(scalar),
|
|
|
|
|
Abi::ScalarPair(a, b) => {
|
|
|
|
|
hide_niches(a);
|
|
|
|
|
hide_niches(b);
|
|
|
|
|
}
|
|
|
|
|
Abi::Vector { element, count: _ } => hide_niches(element),
|
|
|
|
|
Abi::Aggregate { sized: _ } => {}
|
|
|
|
|
}
|
|
|
|
|
st.largest_niche = None;
|
|
|
|
|
return Some(st);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let (start, end) = scalar_valid_range;
|
|
|
|
|
match st.abi {
|
|
|
|
|
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
|
2022-11-07 10:26:14 +00:00
|
|
|
|
// Enlarging validity ranges would result in missed
|
2022-11-01 19:50:30 +03:30
|
|
|
|
// optimizations, *not* wrongly assuming the inner
|
2022-11-07 10:26:14 +00:00
|
|
|
|
// value is valid. e.g. unions already enlarge validity ranges,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
// because the values may be uninitialized.
|
2022-11-07 10:26:14 +00:00
|
|
|
|
//
|
|
|
|
|
// Because of that we only check that the start and end
|
|
|
|
|
// of the range is representable with this scalar type.
|
|
|
|
|
|
|
|
|
|
let max_value = scalar.size(dl).unsigned_int_max();
|
2022-11-01 19:50:30 +03:30
|
|
|
|
if let Bound::Included(start) = start {
|
|
|
|
|
// FIXME(eddyb) this might be incorrect - it doesn't
|
|
|
|
|
// account for wrap-around (end < start) ranges.
|
2022-11-07 10:26:14 +00:00
|
|
|
|
assert!(start <= max_value, "{start} > {max_value}");
|
|
|
|
|
scalar.valid_range_mut().start = start;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
if let Bound::Included(end) = end {
|
|
|
|
|
// FIXME(eddyb) this might be incorrect - it doesn't
|
|
|
|
|
// account for wrap-around (end < start) ranges.
|
2022-11-07 10:26:14 +00:00
|
|
|
|
assert!(end <= max_value, "{end} > {max_value}");
|
|
|
|
|
scalar.valid_range_mut().end = end;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Update `largest_niche` if we have introduced a larger niche.
|
|
|
|
|
let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
|
|
|
|
|
if let Some(niche) = niche {
|
|
|
|
|
match st.largest_niche {
|
|
|
|
|
Some(largest_niche) => {
|
|
|
|
|
// Replace the existing niche even if they're equal,
|
|
|
|
|
// because this one is at a lower offset.
|
|
|
|
|
if largest_niche.available(dl) <= niche.available(dl) {
|
|
|
|
|
st.largest_niche = Some(niche);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => st.largest_niche = Some(niche),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => assert!(
|
|
|
|
|
start == Bound::Unbounded && end == Bound::Unbounded,
|
|
|
|
|
"nonscalar layout for layout_scalar_valid_range type: {:#?}",
|
|
|
|
|
st,
|
|
|
|
|
),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return Some(st);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// At this point, we have handled all unions and
|
|
|
|
|
// structs. (We have also handled univariant enums
|
|
|
|
|
// that allow representation optimization.)
|
|
|
|
|
assert!(is_enum);
|
|
|
|
|
|
|
|
|
|
// Until we've decided whether to use the tagged or
|
|
|
|
|
// niche filling LayoutS, we don't want to intern the
|
|
|
|
|
// variant layouts, so we can't store them in the
|
|
|
|
|
// overall LayoutS. Store the overall LayoutS
|
|
|
|
|
// and the variant LayoutSs here until then.
|
2023-01-20 20:56:16 -08:00
|
|
|
|
struct TmpLayout {
|
|
|
|
|
layout: LayoutS,
|
|
|
|
|
variants: IndexVec<VariantIdx, LayoutS>,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let calculate_niche_filling_layout = || -> Option<TmpLayout> {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
if niche_optimize_enum {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if variants.len() < 2 {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut align = dl.aggregate_align;
|
|
|
|
|
let mut variant_layouts = variants
|
|
|
|
|
.iter_enumerated()
|
|
|
|
|
.map(|(j, v)| {
|
2022-11-29 11:01:17 +00:00
|
|
|
|
let mut st = self.univariant(dl, v, repr, StructKind::AlwaysSized)?;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
st.variants = Variants::Single { index: j };
|
|
|
|
|
|
|
|
|
|
align = align.max(st.align);
|
|
|
|
|
|
|
|
|
|
Some(st)
|
|
|
|
|
})
|
2023-01-20 20:56:16 -08:00
|
|
|
|
.collect::<Option<IndexVec<VariantIdx, _>>>()?;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
|
|
|
|
let largest_variant_index = variant_layouts
|
|
|
|
|
.iter_enumerated()
|
|
|
|
|
.max_by_key(|(_i, layout)| layout.size.bytes())
|
|
|
|
|
.map(|(i, _layout)| i)?;
|
|
|
|
|
|
2023-03-25 18:43:03 -07:00
|
|
|
|
let all_indices = variants.indices();
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let needs_disc =
|
|
|
|
|
|index: VariantIdx| index != largest_variant_index && !absent(&variants[index]);
|
2023-04-16 01:06:55 -07:00
|
|
|
|
let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
|
|
|
|
|
..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
|
|
|
|
let count = niche_variants.size_hint().1.unwrap() as u128;
|
|
|
|
|
|
|
|
|
|
// Find the field with the largest niche
|
|
|
|
|
let (field_index, niche, (niche_start, niche_scalar)) = variants[largest_variant_index]
|
|
|
|
|
.iter()
|
|
|
|
|
.enumerate()
|
2023-01-20 20:56:16 -08:00
|
|
|
|
.filter_map(|(j, field)| Some((j, field.largest_niche()?)))
|
2022-11-01 19:50:30 +03:30
|
|
|
|
.max_by_key(|(_, niche)| niche.available(dl))
|
|
|
|
|
.and_then(|(j, niche)| Some((j, niche, niche.reserve(dl, count)?)))?;
|
|
|
|
|
let niche_offset =
|
|
|
|
|
niche.offset + variant_layouts[largest_variant_index].fields.offset(field_index);
|
|
|
|
|
let niche_size = niche.value.size(dl);
|
|
|
|
|
let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
|
|
|
|
|
|
|
|
|
|
let all_variants_fit = variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
|
|
|
|
|
if i == largest_variant_index {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
layout.largest_niche = None;
|
|
|
|
|
|
|
|
|
|
if layout.size <= niche_offset {
|
|
|
|
|
// This variant will fit before the niche.
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Determine if it'll fit after the niche.
|
|
|
|
|
let this_align = layout.align.abi;
|
|
|
|
|
let this_offset = (niche_offset + niche_size).align_to(this_align);
|
|
|
|
|
|
|
|
|
|
if this_offset + layout.size > size {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// It'll fit, but we need to make some adjustments.
|
|
|
|
|
match layout.fields {
|
|
|
|
|
FieldsShape::Arbitrary { ref mut offsets, .. } => {
|
2023-04-02 23:21:09 -07:00
|
|
|
|
for (j, offset) in offsets.iter_enumerated_mut() {
|
2023-01-20 20:56:16 -08:00
|
|
|
|
if !variants[i][j].0.is_zst() {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
*offset += this_offset;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
panic!("Layout of fields should be Arbitrary for variants")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// It can't be a Scalar or ScalarPair because the offset isn't 0.
|
|
|
|
|
if !layout.abi.is_uninhabited() {
|
|
|
|
|
layout.abi = Abi::Aggregate { sized: true };
|
|
|
|
|
}
|
|
|
|
|
layout.size += this_offset;
|
|
|
|
|
|
|
|
|
|
true
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
if !all_variants_fit {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
|
|
|
|
|
|
|
|
|
|
let others_zst = variant_layouts
|
|
|
|
|
.iter_enumerated()
|
|
|
|
|
.all(|(i, layout)| i == largest_variant_index || layout.size == Size::ZERO);
|
|
|
|
|
let same_size = size == variant_layouts[largest_variant_index].size;
|
|
|
|
|
let same_align = align == variant_layouts[largest_variant_index].align;
|
|
|
|
|
|
|
|
|
|
let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
|
|
|
|
|
Abi::Uninhabited
|
|
|
|
|
} else if same_size && same_align && others_zst {
|
|
|
|
|
match variant_layouts[largest_variant_index].abi {
|
|
|
|
|
// When the total alignment and size match, we can use the
|
|
|
|
|
// same ABI as the scalar variant with the reserved niche.
|
|
|
|
|
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
|
|
|
|
|
Abi::ScalarPair(first, second) => {
|
|
|
|
|
// Only the niche is guaranteed to be initialised,
|
|
|
|
|
// so use union layouts for the other primitive.
|
|
|
|
|
if niche_offset == Size::ZERO {
|
|
|
|
|
Abi::ScalarPair(niche_scalar, second.to_union())
|
|
|
|
|
} else {
|
|
|
|
|
Abi::ScalarPair(first.to_union(), niche_scalar)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => Abi::Aggregate { sized: true },
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
Abi::Aggregate { sized: true }
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let layout = LayoutS {
|
|
|
|
|
variants: Variants::Multiple {
|
|
|
|
|
tag: niche_scalar,
|
|
|
|
|
tag_encoding: TagEncoding::Niche {
|
|
|
|
|
untagged_variant: largest_variant_index,
|
2023-04-16 01:06:55 -07:00
|
|
|
|
niche_variants,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
niche_start,
|
|
|
|
|
},
|
|
|
|
|
tag_field: 0,
|
|
|
|
|
variants: IndexVec::new(),
|
|
|
|
|
},
|
|
|
|
|
fields: FieldsShape::Arbitrary {
|
2023-04-02 23:21:09 -07:00
|
|
|
|
offsets: [niche_offset].into(),
|
|
|
|
|
memory_index: [0].into(),
|
2022-11-01 19:50:30 +03:30
|
|
|
|
},
|
|
|
|
|
abi,
|
|
|
|
|
largest_niche,
|
|
|
|
|
size,
|
|
|
|
|
align,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Some(TmpLayout { layout, variants: variant_layouts })
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let niche_filling_layout = calculate_niche_filling_layout();
|
|
|
|
|
|
|
|
|
|
let (mut min, mut max) = (i128::MAX, i128::MIN);
|
|
|
|
|
let discr_type = repr.discr_type();
|
|
|
|
|
let bits = Integer::from_attr(dl, discr_type).size().bits();
|
|
|
|
|
for (i, mut val) in discriminants {
|
2023-01-20 20:56:16 -08:00
|
|
|
|
if variants[i].iter().any(|f| f.abi().is_uninhabited()) {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
if discr_type.is_signed() {
|
|
|
|
|
// sign extend the raw representation to be an i128
|
|
|
|
|
val = (val << (128 - bits)) >> (128 - bits);
|
|
|
|
|
}
|
|
|
|
|
if val < min {
|
|
|
|
|
min = val;
|
|
|
|
|
}
|
|
|
|
|
if val > max {
|
|
|
|
|
max = val;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// We might have no inhabited variants, so pretend there's at least one.
|
|
|
|
|
if (min, max) == (i128::MAX, i128::MIN) {
|
|
|
|
|
min = 0;
|
|
|
|
|
max = 0;
|
|
|
|
|
}
|
|
|
|
|
assert!(min <= max, "discriminant range is {}...{}", min, max);
|
|
|
|
|
let (min_ity, signed) = discr_range_of_repr(min, max); //Integer::repr_discr(tcx, ty, &repr, min, max);
|
|
|
|
|
|
|
|
|
|
let mut align = dl.aggregate_align;
|
|
|
|
|
let mut size = Size::ZERO;
|
|
|
|
|
|
|
|
|
|
// We're interested in the smallest alignment, so start large.
|
|
|
|
|
let mut start_align = Align::from_bytes(256).unwrap();
|
|
|
|
|
assert_eq!(Integer::for_align(dl, start_align), None);
|
|
|
|
|
|
|
|
|
|
// repr(C) on an enum tells us to make a (tag, union) layout,
|
|
|
|
|
// so we need to grow the prefix alignment to be at least
|
|
|
|
|
// the alignment of the union. (This value is used both for
|
|
|
|
|
// determining the alignment of the overall enum, and the
|
|
|
|
|
// determining the alignment of the payload after the tag.)
|
|
|
|
|
let mut prefix_align = min_ity.align(dl).abi;
|
|
|
|
|
if repr.c() {
|
|
|
|
|
for fields in variants {
|
|
|
|
|
for field in fields {
|
2023-01-20 20:56:16 -08:00
|
|
|
|
prefix_align = prefix_align.max(field.align().abi);
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create the set of structs that represent each variant.
|
|
|
|
|
let mut layout_variants = variants
|
|
|
|
|
.iter_enumerated()
|
|
|
|
|
.map(|(i, field_layouts)| {
|
|
|
|
|
let mut st = self.univariant(
|
|
|
|
|
dl,
|
2022-11-29 11:01:17 +00:00
|
|
|
|
field_layouts,
|
|
|
|
|
repr,
|
2022-11-01 19:50:30 +03:30
|
|
|
|
StructKind::Prefixed(min_ity.size(), prefix_align),
|
|
|
|
|
)?;
|
|
|
|
|
st.variants = Variants::Single { index: i };
|
|
|
|
|
// Find the first field we can't move later
|
|
|
|
|
// to make room for a larger discriminant.
|
2023-04-02 23:21:09 -07:00
|
|
|
|
for field_idx in st.fields.index_by_increasing_offset() {
|
|
|
|
|
let field = &field_layouts[FieldIdx::from_usize(field_idx)];
|
2023-01-20 20:56:16 -08:00
|
|
|
|
if !field.0.is_zst() || field.align().abi.bytes() != 1 {
|
|
|
|
|
start_align = start_align.min(field.align().abi);
|
2022-11-01 19:50:30 +03:30
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
size = cmp::max(size, st.size);
|
|
|
|
|
align = align.max(st.align);
|
|
|
|
|
Some(st)
|
|
|
|
|
})
|
2023-01-20 20:56:16 -08:00
|
|
|
|
.collect::<Option<IndexVec<VariantIdx, _>>>()?;
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
|
|
|
|
// Align the maximum variant size to the largest alignment.
|
|
|
|
|
size = size.align_to(align.abi);
|
|
|
|
|
|
|
|
|
|
if size.bytes() >= dl.obj_size_bound() {
|
|
|
|
|
return None;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let typeck_ity = Integer::from_attr(dl, repr.discr_type());
|
|
|
|
|
if typeck_ity < min_ity {
|
|
|
|
|
// It is a bug if Layout decided on a greater discriminant size than typeck for
|
|
|
|
|
// some reason at this point (based on values discriminant can take on). Mostly
|
|
|
|
|
// because this discriminant will be loaded, and then stored into variable of
|
|
|
|
|
// type calculated by typeck. Consider such case (a bug): typeck decided on
|
|
|
|
|
// byte-sized discriminant, but layout thinks we need a 16-bit to store all
|
|
|
|
|
// discriminant values. That would be a bug, because then, in codegen, in order
|
|
|
|
|
// to store this 16-bit discriminant into 8-bit sized temporary some of the
|
|
|
|
|
// space necessary to represent would have to be discarded (or layout is wrong
|
|
|
|
|
// on thinking it needs 16 bits)
|
|
|
|
|
panic!(
|
|
|
|
|
"layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
|
|
|
|
|
min_ity, typeck_ity
|
|
|
|
|
);
|
|
|
|
|
// However, it is fine to make discr type however large (as an optimisation)
|
|
|
|
|
// after this point – we’ll just truncate the value we load in codegen.
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Check to see if we should use a different type for the
|
|
|
|
|
// discriminant. We can safely use a type with the same size
|
|
|
|
|
// as the alignment of the first field of each variant.
|
|
|
|
|
// We increase the size of the discriminant to avoid LLVM copying
|
|
|
|
|
// padding when it doesn't need to. This normally causes unaligned
|
|
|
|
|
// load/stores and excessive memcpy/memset operations. By using a
|
|
|
|
|
// bigger integer size, LLVM can be sure about its contents and
|
|
|
|
|
// won't be so conservative.
|
|
|
|
|
|
|
|
|
|
// Use the initial field alignment
|
|
|
|
|
let mut ity = if repr.c() || repr.int.is_some() {
|
|
|
|
|
min_ity
|
|
|
|
|
} else {
|
|
|
|
|
Integer::for_align(dl, start_align).unwrap_or(min_ity)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// If the alignment is not larger than the chosen discriminant size,
|
|
|
|
|
// don't use the alignment as the final size.
|
|
|
|
|
if ity <= min_ity {
|
|
|
|
|
ity = min_ity;
|
|
|
|
|
} else {
|
|
|
|
|
// Patch up the variants' first few fields.
|
|
|
|
|
let old_ity_size = min_ity.size();
|
|
|
|
|
let new_ity_size = ity.size();
|
|
|
|
|
for variant in &mut layout_variants {
|
|
|
|
|
match variant.fields {
|
|
|
|
|
FieldsShape::Arbitrary { ref mut offsets, .. } => {
|
|
|
|
|
for i in offsets {
|
|
|
|
|
if *i <= old_ity_size {
|
|
|
|
|
assert_eq!(*i, old_ity_size);
|
|
|
|
|
*i = new_ity_size;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// We might be making the struct larger.
|
|
|
|
|
if variant.size <= old_ity_size {
|
|
|
|
|
variant.size = new_ity_size;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => panic!(),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let tag_mask = ity.size().unsigned_int_max();
|
|
|
|
|
let tag = Scalar::Initialized {
|
|
|
|
|
value: Int(ity, signed),
|
|
|
|
|
valid_range: WrappingRange {
|
|
|
|
|
start: (min as u128 & tag_mask),
|
|
|
|
|
end: (max as u128 & tag_mask),
|
|
|
|
|
},
|
|
|
|
|
};
|
|
|
|
|
let mut abi = Abi::Aggregate { sized: true };
|
|
|
|
|
|
|
|
|
|
if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
|
|
|
|
|
abi = Abi::Uninhabited;
|
|
|
|
|
} else if tag.size(dl) == size {
|
|
|
|
|
// Make sure we only use scalar layout when the enum is entirely its
|
|
|
|
|
// own tag (i.e. it has no padding nor any non-ZST variant fields).
|
|
|
|
|
abi = Abi::Scalar(tag);
|
|
|
|
|
} else {
|
|
|
|
|
// Try to use a ScalarPair for all tagged enums.
|
|
|
|
|
let mut common_prim = None;
|
|
|
|
|
let mut common_prim_initialized_in_all_variants = true;
|
2022-11-29 11:01:17 +00:00
|
|
|
|
for (field_layouts, layout_variant) in iter::zip(variants, &layout_variants) {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
|
|
|
|
|
panic!();
|
|
|
|
|
};
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let mut fields = iter::zip(field_layouts, offsets).filter(|p| !p.0.0.is_zst());
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let (field, offset) = match (fields.next(), fields.next()) {
|
|
|
|
|
(None, None) => {
|
|
|
|
|
common_prim_initialized_in_all_variants = false;
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
(Some(pair), None) => pair,
|
|
|
|
|
_ => {
|
|
|
|
|
common_prim = None;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
};
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let prim = match field.abi() {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
Abi::Scalar(scalar) => {
|
|
|
|
|
common_prim_initialized_in_all_variants &=
|
|
|
|
|
matches!(scalar, Scalar::Initialized { .. });
|
|
|
|
|
scalar.primitive()
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
common_prim = None;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
if let Some(pair) = common_prim {
|
|
|
|
|
// This is pretty conservative. We could go fancier
|
|
|
|
|
// by conflating things like i32 and u32, or even
|
|
|
|
|
// realising that (u8, u8) could just cohabit with
|
|
|
|
|
// u16 or even u32.
|
|
|
|
|
if pair != (prim, offset) {
|
|
|
|
|
common_prim = None;
|
|
|
|
|
break;
|
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
common_prim = Some((prim, offset));
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if let Some((prim, offset)) = common_prim {
|
|
|
|
|
let prim_scalar = if common_prim_initialized_in_all_variants {
|
|
|
|
|
scalar_unit(prim)
|
|
|
|
|
} else {
|
|
|
|
|
// Common prim might be uninit.
|
|
|
|
|
Scalar::Union { value: prim }
|
|
|
|
|
};
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let pair = self.scalar_pair(tag, prim_scalar);
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let pair_offsets = match pair.fields {
|
|
|
|
|
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
2023-04-02 23:21:09 -07:00
|
|
|
|
assert_eq!(memory_index.raw, [0, 1]);
|
2022-11-01 19:50:30 +03:30
|
|
|
|
offsets
|
|
|
|
|
}
|
|
|
|
|
_ => panic!(),
|
|
|
|
|
};
|
2023-04-02 23:21:09 -07:00
|
|
|
|
if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
|
|
|
|
|
&& pair_offsets[FieldIdx::from_u32(1)] == *offset
|
2022-11-01 19:50:30 +03:30
|
|
|
|
&& align == pair.align
|
|
|
|
|
&& size == pair.size
|
|
|
|
|
{
|
|
|
|
|
// We can use `ScalarPair` only when it matches our
|
|
|
|
|
// already computed layout (including `#[repr(C)]`).
|
|
|
|
|
abi = pair.abi;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
|
|
|
|
|
// variants to ensure they are consistent. This is because a downcast is
|
|
|
|
|
// semantically a NOP, and thus should not affect layout.
|
|
|
|
|
if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
|
|
|
|
for variant in &mut layout_variants {
|
|
|
|
|
// We only do this for variants with fields; the others are not accessed anyway.
|
|
|
|
|
// Also do not overwrite any already existing "clever" ABIs.
|
|
|
|
|
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
|
|
|
|
|
variant.abi = abi;
|
|
|
|
|
// Also need to bump up the size and alignment, so that the entire value fits in here.
|
|
|
|
|
variant.size = cmp::max(variant.size, size);
|
|
|
|
|
variant.align.abi = cmp::max(variant.align.abi, align.abi);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
|
|
|
|
|
|
|
|
|
|
let tagged_layout = LayoutS {
|
|
|
|
|
variants: Variants::Multiple {
|
|
|
|
|
tag,
|
|
|
|
|
tag_encoding: TagEncoding::Direct,
|
|
|
|
|
tag_field: 0,
|
|
|
|
|
variants: IndexVec::new(),
|
|
|
|
|
},
|
2023-04-02 23:21:09 -07:00
|
|
|
|
fields: FieldsShape::Arbitrary {
|
|
|
|
|
offsets: [Size::ZERO].into(),
|
|
|
|
|
memory_index: [0].into(),
|
|
|
|
|
},
|
2022-11-01 19:50:30 +03:30
|
|
|
|
largest_niche,
|
|
|
|
|
abi,
|
|
|
|
|
align,
|
|
|
|
|
size,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
|
|
|
|
|
|
|
|
|
|
let mut best_layout = match (tagged_layout, niche_filling_layout) {
|
|
|
|
|
(tl, Some(nl)) => {
|
|
|
|
|
// Pick the smaller layout; otherwise,
|
|
|
|
|
// pick the layout with the larger niche; otherwise,
|
|
|
|
|
// pick tagged as it has simpler codegen.
|
|
|
|
|
use cmp::Ordering::*;
|
2023-01-20 20:56:16 -08:00
|
|
|
|
let niche_size =
|
|
|
|
|
|tmp_l: &TmpLayout| tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl));
|
2022-11-01 19:50:30 +03:30
|
|
|
|
match (tl.layout.size.cmp(&nl.layout.size), niche_size(&tl).cmp(&niche_size(&nl))) {
|
|
|
|
|
(Greater, _) => nl,
|
|
|
|
|
(Equal, Less) => nl,
|
|
|
|
|
_ => tl,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
(tl, None) => tl,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// Now we can intern the variant layouts and store them in the enum layout.
|
|
|
|
|
best_layout.layout.variants = match best_layout.layout.variants {
|
|
|
|
|
Variants::Multiple { tag, tag_encoding, tag_field, .. } => {
|
|
|
|
|
Variants::Multiple { tag, tag_encoding, tag_field, variants: best_layout.variants }
|
|
|
|
|
}
|
|
|
|
|
_ => panic!(),
|
|
|
|
|
};
|
|
|
|
|
Some(best_layout.layout)
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-20 20:56:16 -08:00
|
|
|
|
fn layout_of_union(
|
2022-11-01 19:50:30 +03:30
|
|
|
|
&self,
|
|
|
|
|
repr: &ReprOptions,
|
2023-04-02 23:21:09 -07:00
|
|
|
|
variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
|
2023-01-20 20:56:16 -08:00
|
|
|
|
) -> Option<LayoutS> {
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let dl = self.current_data_layout();
|
|
|
|
|
let dl = dl.borrow();
|
|
|
|
|
let mut align = if repr.pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
|
|
|
|
|
|
|
|
|
if let Some(repr_align) = repr.align {
|
|
|
|
|
align = align.max(AbiAndPrefAlign::new(repr_align));
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-19 16:25:07 -08:00
|
|
|
|
// If all the non-ZST fields have the same ABI and union ABI optimizations aren't
|
|
|
|
|
// disabled, we can use that common ABI for the union as a whole.
|
|
|
|
|
struct AbiMismatch;
|
|
|
|
|
let mut common_non_zst_abi_and_align = if repr.inhibit_union_abi_opt() {
|
|
|
|
|
// Can't optimize
|
|
|
|
|
Err(AbiMismatch)
|
|
|
|
|
} else {
|
|
|
|
|
Ok(None)
|
|
|
|
|
};
|
|
|
|
|
|
2022-11-01 19:50:30 +03:30
|
|
|
|
let mut size = Size::ZERO;
|
2023-03-25 18:43:03 -07:00
|
|
|
|
let only_variant = &variants[FIRST_VARIANT];
|
|
|
|
|
for field in only_variant {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
assert!(field.0.is_sized());
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
2023-05-05 16:30:32 -07:00
|
|
|
|
align = align.max(field.align());
|
|
|
|
|
size = cmp::max(size, field.size());
|
|
|
|
|
|
|
|
|
|
if field.0.is_zst() {
|
|
|
|
|
// Nothing more to do for ZST fields
|
|
|
|
|
continue;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if let Ok(common) = common_non_zst_abi_and_align {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
// Discard valid range information and allow undef
|
|
|
|
|
let field_abi = field.abi().to_union();
|
2022-11-01 19:50:30 +03:30
|
|
|
|
|
2023-05-05 16:30:32 -07:00
|
|
|
|
if let Some((common_abi, common_align)) = common {
|
|
|
|
|
if common_abi != field_abi {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
// Different fields have different ABI: disable opt
|
2023-02-19 16:25:07 -08:00
|
|
|
|
common_non_zst_abi_and_align = Err(AbiMismatch);
|
2022-11-03 14:45:42 +00:00
|
|
|
|
} else {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
// Fields with the same non-Aggregate ABI should also
|
|
|
|
|
// have the same alignment
|
2023-02-19 16:25:07 -08:00
|
|
|
|
if !matches!(common_abi, Abi::Aggregate { .. }) {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
assert_eq!(
|
2023-05-05 16:30:32 -07:00
|
|
|
|
common_align,
|
2022-11-30 23:12:04 -08:00
|
|
|
|
field.align().abi,
|
|
|
|
|
"non-Aggregate field with matching ABI but differing alignment"
|
|
|
|
|
);
|
|
|
|
|
}
|
2022-11-03 14:45:42 +00:00
|
|
|
|
}
|
2022-11-30 23:12:04 -08:00
|
|
|
|
} else {
|
2023-02-19 16:25:07 -08:00
|
|
|
|
// First non-ZST field: record its ABI and alignment
|
|
|
|
|
common_non_zst_abi_and_align = Ok(Some((field_abi, field.align().abi)));
|
2022-11-01 19:50:30 +03:30
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-30 23:12:04 -08:00
|
|
|
|
if let Some(pack) = repr.pack {
|
|
|
|
|
align = align.min(AbiAndPrefAlign::new(pack));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If all non-ZST fields have the same ABI, we may forward that ABI
|
|
|
|
|
// for the union as a whole, unless otherwise inhibited.
|
2023-02-19 16:25:07 -08:00
|
|
|
|
let abi = match common_non_zst_abi_and_align {
|
|
|
|
|
Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
|
|
|
|
|
Ok(Some((abi, _))) => {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
|
2023-02-19 16:25:07 -08:00
|
|
|
|
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
|
2022-11-03 14:45:42 +00:00
|
|
|
|
Abi::Aggregate { sized: true }
|
|
|
|
|
} else {
|
2022-11-30 23:12:04 -08:00
|
|
|
|
abi
|
2022-11-03 14:45:42 +00:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2022-11-01 19:50:30 +03:30
|
|
|
|
Some(LayoutS {
|
2023-03-25 18:43:03 -07:00
|
|
|
|
variants: Variants::Single { index: FIRST_VARIANT },
|
|
|
|
|
fields: FieldsShape::Union(NonZeroUsize::new(only_variant.len())?),
|
2022-11-01 19:50:30 +03:30
|
|
|
|
abi,
|
|
|
|
|
largest_niche: None,
|
|
|
|
|
align,
|
|
|
|
|
size: size.align_to(align.abi),
|
|
|
|
|
})
|
|
|
|
|
}
|
|
|
|
|
}
|
2023-02-16 01:50:57 +01:00
|
|
|
|
|
2023-04-19 22:14:28 +02:00
|
|
|
|
/// Determines towards which end of a struct layout optimizations will try to place the best niches.
|
|
|
|
|
enum NicheBias {
|
|
|
|
|
Start,
|
|
|
|
|
End,
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-16 01:50:57 +01:00
|
|
|
|
fn univariant(
|
|
|
|
|
this: &(impl LayoutCalculator + ?Sized),
|
|
|
|
|
dl: &TargetDataLayout,
|
|
|
|
|
fields: &IndexSlice<FieldIdx, Layout<'_>>,
|
|
|
|
|
repr: &ReprOptions,
|
|
|
|
|
kind: StructKind,
|
2023-04-19 22:14:28 +02:00
|
|
|
|
niche_bias: NicheBias,
|
2023-02-16 01:50:57 +01:00
|
|
|
|
) -> Option<LayoutS> {
|
|
|
|
|
let pack = repr.pack;
|
|
|
|
|
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
|
|
|
|
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
|
|
|
|
|
let optimize = !repr.inhibit_struct_field_reordering_opt();
|
2023-04-22 19:24:39 +02:00
|
|
|
|
if optimize && fields.len() > 1 {
|
2023-02-16 01:50:57 +01:00
|
|
|
|
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
|
|
|
|
|
let optimizing = &mut inverse_memory_index.raw[..end];
|
|
|
|
|
|
|
|
|
|
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
|
|
|
|
|
// the field ordering to try and catch some code making assumptions about layouts
|
|
|
|
|
// we don't guarantee
|
|
|
|
|
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
|
|
|
|
|
#[cfg(feature = "randomize")]
|
|
|
|
|
{
|
|
|
|
|
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
|
|
|
|
|
// randomize field ordering with
|
|
|
|
|
let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
|
|
|
|
|
|
|
|
|
|
// Shuffle the ordering of the fields
|
|
|
|
|
optimizing.shuffle(&mut rng);
|
|
|
|
|
}
|
|
|
|
|
// Otherwise we just leave things alone and actually optimize the type's fields
|
|
|
|
|
} else {
|
2023-03-05 16:15:16 +01:00
|
|
|
|
let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
|
2023-04-22 19:24:39 +02:00
|
|
|
|
let largest_niche_size = fields
|
|
|
|
|
.iter()
|
|
|
|
|
.filter_map(|f| f.largest_niche())
|
|
|
|
|
.map(|n| n.available(dl))
|
|
|
|
|
.max()
|
|
|
|
|
.unwrap_or(0);
|
2023-04-19 22:14:28 +02:00
|
|
|
|
|
|
|
|
|
// Calculates a sort key to group fields by their alignment or possibly some size-derived
|
|
|
|
|
// pseudo-alignment.
|
|
|
|
|
let alignment_group_key = |layout: Layout<'_>| {
|
2023-03-05 16:15:16 +01:00
|
|
|
|
if let Some(pack) = pack {
|
|
|
|
|
// return the packed alignment in bytes
|
|
|
|
|
layout.align().abi.min(pack).bytes()
|
|
|
|
|
} else {
|
|
|
|
|
// returns log2(effective-align).
|
|
|
|
|
// This is ok since `pack` applies to all fields equally.
|
|
|
|
|
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
|
|
|
|
|
//
|
|
|
|
|
let align = layout.align().abi.bytes();
|
|
|
|
|
let size = layout.size().bytes();
|
2023-04-22 19:24:39 +02:00
|
|
|
|
let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
|
2023-04-19 22:14:28 +02:00
|
|
|
|
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
|
2023-03-05 16:15:16 +01:00
|
|
|
|
let size_as_align = align.max(size).trailing_zeros();
|
2023-04-22 19:24:39 +02:00
|
|
|
|
let size_as_align = if largest_niche_size > 0 {
|
|
|
|
|
match niche_bias {
|
|
|
|
|
// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
|
|
|
|
|
// to the front in the first case (for aligned loads) but keep the bool in front
|
|
|
|
|
// in the second case for its niches.
|
|
|
|
|
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
|
|
|
|
|
// When moving niches towards the end of the struct then for
|
|
|
|
|
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
|
|
|
|
|
// in the align-1 group because its bool can be moved closer to the end.
|
|
|
|
|
NicheBias::End if niche_size == largest_niche_size => {
|
|
|
|
|
align.trailing_zeros()
|
|
|
|
|
}
|
|
|
|
|
NicheBias::End => size_as_align,
|
|
|
|
|
}
|
2023-03-05 16:15:16 +01:00
|
|
|
|
} else {
|
|
|
|
|
size_as_align
|
|
|
|
|
};
|
|
|
|
|
size_as_align as u64
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
2023-02-16 01:50:57 +01:00
|
|
|
|
match kind {
|
|
|
|
|
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
|
2023-04-19 22:14:28 +02:00
|
|
|
|
// Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
|
|
|
|
|
// to get one niche into the preferred position. If it ever supported multiple niches
|
|
|
|
|
// then a more advanced pick-and-pack approach could provide better results.
|
|
|
|
|
// But even for the single-niche cache it's not optimal. E.g. for
|
|
|
|
|
// A(u32, (bool, u8), u16) it would be possible to move the bool to the front
|
|
|
|
|
// but it would require packing the tuple together with the u16 to build a 4-byte
|
|
|
|
|
// group so that the u32 can be placed after it without padding. This kind
|
|
|
|
|
// of packing can't be achieved by sorting.
|
2023-02-16 01:50:57 +01:00
|
|
|
|
optimizing.sort_by_key(|&x| {
|
|
|
|
|
let f = fields[x];
|
2023-02-16 01:53:47 +01:00
|
|
|
|
let field_size = f.size().bytes();
|
2023-02-16 01:50:57 +01:00
|
|
|
|
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
2023-04-19 22:14:28 +02:00
|
|
|
|
let niche_size_key = match niche_bias {
|
|
|
|
|
// large niche first
|
|
|
|
|
NicheBias::Start => !niche_size,
|
|
|
|
|
// large niche last
|
|
|
|
|
NicheBias::End => niche_size,
|
2023-02-16 01:53:47 +01:00
|
|
|
|
};
|
2023-04-19 22:14:28 +02:00
|
|
|
|
let inner_niche_offset_key = match niche_bias {
|
|
|
|
|
NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
|
|
|
|
|
NicheBias::End => f.largest_niche().map_or(0, |n| {
|
|
|
|
|
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
|
|
|
|
|
}),
|
2023-02-16 01:53:47 +01:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
(
|
|
|
|
|
// Place ZSTs first to avoid "interesting offsets", especially with only one
|
|
|
|
|
// or two non-ZST fields. This helps Scalar/ScalarPair layouts.
|
|
|
|
|
!f.0.is_zst(),
|
|
|
|
|
// Then place largest alignments first.
|
2023-04-19 22:14:28 +02:00
|
|
|
|
cmp::Reverse(alignment_group_key(f)),
|
2023-02-16 01:53:47 +01:00
|
|
|
|
// Then prioritize niche placement within alignment group according to
|
|
|
|
|
// `niche_bias_start`.
|
2023-04-19 22:14:28 +02:00
|
|
|
|
niche_size_key,
|
2023-02-16 01:53:47 +01:00
|
|
|
|
// Then among fields with equally-sized niches prefer the ones
|
|
|
|
|
// closer to the start/end of the field.
|
2023-04-19 22:14:28 +02:00
|
|
|
|
inner_niche_offset_key,
|
2023-02-16 01:53:47 +01:00
|
|
|
|
)
|
2023-02-16 01:50:57 +01:00
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
StructKind::Prefixed(..) => {
|
|
|
|
|
// Sort in ascending alignment so that the layout stays optimal
|
|
|
|
|
// regardless of the prefix.
|
|
|
|
|
// And put the largest niche in an alignment group at the end
|
|
|
|
|
// so it can be used as discriminant in jagged enums
|
|
|
|
|
optimizing.sort_by_key(|&x| {
|
|
|
|
|
let f = fields[x];
|
|
|
|
|
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
2023-04-19 22:14:28 +02:00
|
|
|
|
(alignment_group_key(f), niche_size)
|
2023-02-16 01:50:57 +01:00
|
|
|
|
});
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
|
|
|
|
|
// regardless of the status of `-Z randomize-layout`
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
// inverse_memory_index holds field indices by increasing memory offset.
|
|
|
|
|
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
|
|
|
|
|
// We now write field offsets to the corresponding offset slot;
|
|
|
|
|
// field 5 with offset 0 puts 0 in offsets[5].
|
|
|
|
|
// At the bottom of this function, we invert `inverse_memory_index` to
|
|
|
|
|
// produce `memory_index` (see `invert_mapping`).
|
|
|
|
|
let mut sized = true;
|
|
|
|
|
let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
|
|
|
|
|
let mut offset = Size::ZERO;
|
|
|
|
|
let mut largest_niche = None;
|
|
|
|
|
let mut largest_niche_available = 0;
|
|
|
|
|
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
|
|
|
|
|
let prefix_align =
|
|
|
|
|
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
|
|
|
|
|
align = align.max(AbiAndPrefAlign::new(prefix_align));
|
|
|
|
|
offset = prefix_size.align_to(prefix_align);
|
|
|
|
|
}
|
|
|
|
|
for &i in &inverse_memory_index {
|
|
|
|
|
let field = &fields[i];
|
|
|
|
|
if !sized {
|
2023-05-16 16:04:03 +10:00
|
|
|
|
this.delay_bug(format!(
|
2023-02-16 01:50:57 +01:00
|
|
|
|
"univariant: field #{} comes after unsized field",
|
|
|
|
|
offsets.len(),
|
|
|
|
|
));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if field.0.is_unsized() {
|
|
|
|
|
sized = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Invariant: offset < dl.obj_size_bound() <= 1<<61
|
|
|
|
|
let field_align = if let Some(pack) = pack {
|
|
|
|
|
field.align().min(AbiAndPrefAlign::new(pack))
|
|
|
|
|
} else {
|
|
|
|
|
field.align()
|
|
|
|
|
};
|
|
|
|
|
offset = offset.align_to(field_align.abi);
|
|
|
|
|
align = align.max(field_align);
|
|
|
|
|
|
|
|
|
|
debug!("univariant offset: {:?} field: {:#?}", offset, field);
|
|
|
|
|
offsets[i] = offset;
|
|
|
|
|
|
|
|
|
|
if let Some(mut niche) = field.largest_niche() {
|
|
|
|
|
let available = niche.available(dl);
|
2023-04-19 22:14:28 +02:00
|
|
|
|
// Pick up larger niches.
|
|
|
|
|
let prefer_new_niche = match niche_bias {
|
|
|
|
|
NicheBias::Start => available > largest_niche_available,
|
|
|
|
|
// if there are several niches of the same size then pick the last one
|
|
|
|
|
NicheBias::End => available >= largest_niche_available,
|
2023-02-16 01:53:47 +01:00
|
|
|
|
};
|
|
|
|
|
if prefer_new_niche {
|
2023-02-16 01:50:57 +01:00
|
|
|
|
largest_niche_available = available;
|
|
|
|
|
niche.offset += offset;
|
|
|
|
|
largest_niche = Some(niche);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
offset = offset.checked_add(field.size(), dl)?;
|
|
|
|
|
}
|
|
|
|
|
if let Some(repr_align) = repr.align {
|
|
|
|
|
align = align.max(AbiAndPrefAlign::new(repr_align));
|
|
|
|
|
}
|
|
|
|
|
debug!("univariant min_size: {:?}", offset);
|
|
|
|
|
let min_size = offset;
|
|
|
|
|
// As stated above, inverse_memory_index holds field indices by increasing offset.
|
|
|
|
|
// This makes it an already-sorted view of the offsets vec.
|
|
|
|
|
// To invert it, consider:
|
|
|
|
|
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
|
|
|
|
|
// Field 5 would be the first element, so memory_index is i:
|
|
|
|
|
// Note: if we didn't optimize, it's already right.
|
|
|
|
|
let memory_index = if optimize {
|
|
|
|
|
inverse_memory_index.invert_bijective_mapping()
|
|
|
|
|
} else {
|
|
|
|
|
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
|
|
|
|
|
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
|
|
|
|
|
};
|
|
|
|
|
let size = min_size.align_to(align.abi);
|
|
|
|
|
let mut abi = Abi::Aggregate { sized };
|
|
|
|
|
// Unpack newtype ABIs and find scalar pairs.
|
|
|
|
|
if sized && size.bytes() > 0 {
|
|
|
|
|
// All other fields must be ZSTs.
|
|
|
|
|
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
|
|
|
|
|
|
|
|
|
|
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
|
|
|
|
|
// We have exactly one non-ZST field.
|
|
|
|
|
(Some((i, field)), None, None) => {
|
|
|
|
|
// Field fills the struct and it has a scalar or scalar pair ABI.
|
|
|
|
|
if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
|
|
|
|
|
{
|
|
|
|
|
match field.abi() {
|
|
|
|
|
// For plain scalars, or vectors of them, we can't unpack
|
|
|
|
|
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
|
|
|
|
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
|
|
|
|
|
abi = field.abi();
|
|
|
|
|
}
|
|
|
|
|
// But scalar pairs are Rust-specific and get
|
|
|
|
|
// treated as aggregates by C ABIs anyway.
|
|
|
|
|
Abi::ScalarPair(..) => {
|
|
|
|
|
abi = field.abi();
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Two non-ZST fields, and they're both scalars.
|
|
|
|
|
(Some((i, a)), Some((j, b)), None) => {
|
|
|
|
|
match (a.abi(), b.abi()) {
|
|
|
|
|
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
|
|
|
|
// Order by the memory placement, not source order.
|
|
|
|
|
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
|
|
|
|
((i, a), (j, b))
|
|
|
|
|
} else {
|
|
|
|
|
((j, b), (i, a))
|
|
|
|
|
};
|
|
|
|
|
let pair = this.scalar_pair(a, b);
|
|
|
|
|
let pair_offsets = match pair.fields {
|
|
|
|
|
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
|
|
|
|
assert_eq!(memory_index.raw, [0, 1]);
|
|
|
|
|
offsets
|
|
|
|
|
}
|
|
|
|
|
_ => panic!(),
|
|
|
|
|
};
|
|
|
|
|
if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
|
|
|
|
|
&& offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
|
|
|
|
|
&& align == pair.align
|
|
|
|
|
&& size == pair.size
|
|
|
|
|
{
|
|
|
|
|
// We can use `ScalarPair` only when it matches our
|
|
|
|
|
// already computed layout (including `#[repr(C)]`).
|
|
|
|
|
abi = pair.abi;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
if fields.iter().any(|f| f.abi().is_uninhabited()) {
|
|
|
|
|
abi = Abi::Uninhabited;
|
|
|
|
|
}
|
|
|
|
|
Some(LayoutS {
|
|
|
|
|
variants: Variants::Single { index: FIRST_VARIANT },
|
|
|
|
|
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
|
|
|
|
abi,
|
|
|
|
|
largest_niche,
|
|
|
|
|
align,
|
|
|
|
|
size,
|
|
|
|
|
})
|
|
|
|
|
}
|
2023-03-05 16:18:19 +01:00
|
|
|
|
|
|
|
|
|
fn format_field_niches(
|
|
|
|
|
layout: &LayoutS,
|
|
|
|
|
fields: &IndexSlice<FieldIdx, Layout<'_>>,
|
|
|
|
|
dl: &TargetDataLayout,
|
|
|
|
|
) -> String {
|
|
|
|
|
let mut s = String::new();
|
|
|
|
|
for i in layout.fields.index_by_increasing_offset() {
|
|
|
|
|
let offset = layout.fields.offset(i);
|
|
|
|
|
let f = fields[i.into()];
|
|
|
|
|
write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
|
|
|
|
|
if let Some(n) = f.largest_niche() {
|
|
|
|
|
write!(
|
|
|
|
|
s,
|
|
|
|
|
" n{}b{}s{}",
|
|
|
|
|
n.offset.bytes(),
|
|
|
|
|
n.available(dl).ilog2(),
|
|
|
|
|
n.value.size(dl).bytes()
|
|
|
|
|
)
|
|
|
|
|
.unwrap();
|
|
|
|
|
}
|
|
|
|
|
write!(s, "] ").unwrap();
|
|
|
|
|
}
|
|
|
|
|
s
|
|
|
|
|
}
|