Auto merge of #108106 - the8472:layout-opt, r=wesleywiser
Improve niche placement by trying two strategies and picking the better result Fixes #104807 Fixes #105371 Determining which sort order is better requires calculating the struct size (so we can calculate the niche offset). But that in turn depends on the field order, so happens after sorting. So the simple way to solve that is to run the whole thing twice and pick the better result. 1st commit is just code motion, the meat is in the later ones.
This commit is contained in:
commit
f2299490c1
12 changed files with 514 additions and 301 deletions
|
@ -1,4 +1,5 @@
|
|||
use super::*;
|
||||
use std::fmt::Write;
|
||||
use std::{borrow::Borrow, cmp, iter, ops::Bound};
|
||||
|
||||
#[cfg(feature = "randomize")]
|
||||
|
@ -49,220 +50,60 @@ pub trait LayoutCalculator {
|
|||
repr: &ReprOptions,
|
||||
kind: StructKind,
|
||||
) -> Option<LayoutS> {
|
||||
let pack = repr.pack;
|
||||
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
||||
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
|
||||
let optimize = !repr.inhibit_struct_field_reordering_opt();
|
||||
if optimize {
|
||||
let end =
|
||||
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
|
||||
let optimizing = &mut inverse_memory_index.raw[..end];
|
||||
let effective_field_align = |layout: Layout<'_>| {
|
||||
if let Some(pack) = pack {
|
||||
// return the packed alignment in bytes
|
||||
layout.align().abi.min(pack).bytes()
|
||||
} else {
|
||||
// returns log2(effective-align).
|
||||
// This is ok since `pack` applies to all fields equally.
|
||||
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
|
||||
//
|
||||
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
|
||||
layout.align().abi.bytes().max(layout.size().bytes()).trailing_zeros() as u64
|
||||
}
|
||||
};
|
||||
let layout = univariant(self, dl, fields, repr, kind, NicheBias::Start);
|
||||
// Enums prefer niches close to the beginning or the end of the variants so that other (smaller)
|
||||
// data-carrying variants can be packed into the space after/before the niche.
|
||||
// If the default field ordering does not give us a niche at the front then we do a second
|
||||
// run and bias niches to the right and then check which one is closer to one of the struct's
|
||||
// edges.
|
||||
if let Some(layout) = &layout {
|
||||
if let Some(niche) = layout.largest_niche {
|
||||
let head_space = niche.offset.bytes();
|
||||
let niche_length = niche.value.size(dl).bytes();
|
||||
let tail_space = layout.size.bytes() - head_space - niche_length;
|
||||
|
||||
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
|
||||
// the field ordering to try and catch some code making assumptions about layouts
|
||||
// we don't guarantee
|
||||
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
|
||||
#[cfg(feature = "randomize")]
|
||||
{
|
||||
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
|
||||
// randomize field ordering with
|
||||
let mut rng =
|
||||
Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
|
||||
// This may end up doing redundant work if the niche is already in the last field
|
||||
// (e.g. a trailing bool) and there is tail padding. But it's non-trivial to get
|
||||
// the unpadded size so we try anyway.
|
||||
if fields.len() > 1 && head_space != 0 && tail_space > 0 {
|
||||
let alt_layout = univariant(self, dl, fields, repr, kind, NicheBias::End)
|
||||
.expect("alt layout should always work");
|
||||
let niche = alt_layout
|
||||
.largest_niche
|
||||
.expect("alt layout should have a niche like the regular one");
|
||||
let alt_head_space = niche.offset.bytes();
|
||||
let alt_niche_len = niche.value.size(dl).bytes();
|
||||
let alt_tail_space = alt_layout.size.bytes() - alt_head_space - alt_niche_len;
|
||||
|
||||
// Shuffle the ordering of the fields
|
||||
optimizing.shuffle(&mut rng);
|
||||
}
|
||||
// Otherwise we just leave things alone and actually optimize the type's fields
|
||||
} else {
|
||||
match kind {
|
||||
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
|
||||
optimizing.sort_by_key(|&x| {
|
||||
// Place ZSTs first to avoid "interesting offsets",
|
||||
// especially with only one or two non-ZST fields.
|
||||
// Then place largest alignments first, largest niches within an alignment group last
|
||||
let f = fields[x];
|
||||
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
||||
(!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
|
||||
});
|
||||
}
|
||||
debug_assert_eq!(layout.size.bytes(), alt_layout.size.bytes());
|
||||
|
||||
StructKind::Prefixed(..) => {
|
||||
// Sort in ascending alignment so that the layout stays optimal
|
||||
// regardless of the prefix.
|
||||
// And put the largest niche in an alignment group at the end
|
||||
// so it can be used as discriminant in jagged enums
|
||||
optimizing.sort_by_key(|&x| {
|
||||
let f = fields[x];
|
||||
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
||||
(effective_field_align(f), niche_size)
|
||||
});
|
||||
let prefer_alt_layout =
|
||||
alt_head_space > head_space && alt_head_space > tail_space;
|
||||
|
||||
debug!(
|
||||
"sz: {}, default_niche_at: {}+{}, default_tail_space: {}, alt_niche_at/head_space: {}+{}, alt_tail: {}, num_fields: {}, better: {}\n\
|
||||
layout: {}\n\
|
||||
alt_layout: {}\n",
|
||||
layout.size.bytes(),
|
||||
head_space,
|
||||
niche_length,
|
||||
tail_space,
|
||||
alt_head_space,
|
||||
alt_niche_len,
|
||||
alt_tail_space,
|
||||
layout.fields.count(),
|
||||
prefer_alt_layout,
|
||||
format_field_niches(&layout, &fields, &dl),
|
||||
format_field_niches(&alt_layout, &fields, &dl),
|
||||
);
|
||||
|
||||
if prefer_alt_layout {
|
||||
return Some(alt_layout);
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
|
||||
// regardless of the status of `-Z randomize-layout`
|
||||
}
|
||||
}
|
||||
// inverse_memory_index holds field indices by increasing memory offset.
|
||||
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
|
||||
// We now write field offsets to the corresponding offset slot;
|
||||
// field 5 with offset 0 puts 0 in offsets[5].
|
||||
// At the bottom of this function, we invert `inverse_memory_index` to
|
||||
// produce `memory_index` (see `invert_mapping`).
|
||||
let mut sized = true;
|
||||
let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
|
||||
let mut offset = Size::ZERO;
|
||||
let mut largest_niche = None;
|
||||
let mut largest_niche_available = 0;
|
||||
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
|
||||
let prefix_align =
|
||||
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
|
||||
align = align.max(AbiAndPrefAlign::new(prefix_align));
|
||||
offset = prefix_size.align_to(prefix_align);
|
||||
}
|
||||
for &i in &inverse_memory_index {
|
||||
let field = &fields[i];
|
||||
if !sized {
|
||||
self.delay_bug(&format!(
|
||||
"univariant: field #{} comes after unsized field",
|
||||
offsets.len(),
|
||||
));
|
||||
}
|
||||
|
||||
if field.0.is_unsized() {
|
||||
sized = false;
|
||||
}
|
||||
|
||||
// Invariant: offset < dl.obj_size_bound() <= 1<<61
|
||||
let field_align = if let Some(pack) = pack {
|
||||
field.align().min(AbiAndPrefAlign::new(pack))
|
||||
} else {
|
||||
field.align()
|
||||
};
|
||||
offset = offset.align_to(field_align.abi);
|
||||
align = align.max(field_align);
|
||||
|
||||
debug!("univariant offset: {:?} field: {:#?}", offset, field);
|
||||
offsets[i] = offset;
|
||||
|
||||
if let Some(mut niche) = field.largest_niche() {
|
||||
let available = niche.available(dl);
|
||||
if available > largest_niche_available {
|
||||
largest_niche_available = available;
|
||||
niche.offset += offset;
|
||||
largest_niche = Some(niche);
|
||||
}
|
||||
}
|
||||
|
||||
offset = offset.checked_add(field.size(), dl)?;
|
||||
}
|
||||
if let Some(repr_align) = repr.align {
|
||||
align = align.max(AbiAndPrefAlign::new(repr_align));
|
||||
}
|
||||
debug!("univariant min_size: {:?}", offset);
|
||||
let min_size = offset;
|
||||
// As stated above, inverse_memory_index holds field indices by increasing offset.
|
||||
// This makes it an already-sorted view of the offsets vec.
|
||||
// To invert it, consider:
|
||||
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
|
||||
// Field 5 would be the first element, so memory_index is i:
|
||||
// Note: if we didn't optimize, it's already right.
|
||||
let memory_index = if optimize {
|
||||
inverse_memory_index.invert_bijective_mapping()
|
||||
} else {
|
||||
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
|
||||
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
|
||||
};
|
||||
let size = min_size.align_to(align.abi);
|
||||
let mut abi = Abi::Aggregate { sized };
|
||||
// Unpack newtype ABIs and find scalar pairs.
|
||||
if sized && size.bytes() > 0 {
|
||||
// All other fields must be ZSTs.
|
||||
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
|
||||
|
||||
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
|
||||
// We have exactly one non-ZST field.
|
||||
(Some((i, field)), None, None) => {
|
||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||
if offsets[i].bytes() == 0
|
||||
&& align.abi == field.align().abi
|
||||
&& size == field.size()
|
||||
{
|
||||
match field.abi() {
|
||||
// For plain scalars, or vectors of them, we can't unpack
|
||||
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
||||
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
|
||||
abi = field.abi();
|
||||
}
|
||||
// But scalar pairs are Rust-specific and get
|
||||
// treated as aggregates by C ABIs anyway.
|
||||
Abi::ScalarPair(..) => {
|
||||
abi = field.abi();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Two non-ZST fields, and they're both scalars.
|
||||
(Some((i, a)), Some((j, b)), None) => {
|
||||
match (a.abi(), b.abi()) {
|
||||
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
||||
// Order by the memory placement, not source order.
|
||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||
((i, a), (j, b))
|
||||
} else {
|
||||
((j, b), (i, a))
|
||||
};
|
||||
let pair = self.scalar_pair(a, b);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index.raw, [0, 1]);
|
||||
offsets
|
||||
}
|
||||
_ => panic!(),
|
||||
};
|
||||
if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
|
||||
&& offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
|
||||
&& align == pair.align
|
||||
&& size == pair.size
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if fields.iter().any(|f| f.abi().is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
}
|
||||
Some(LayoutS {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
||||
abi,
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
})
|
||||
layout
|
||||
}
|
||||
|
||||
fn layout_of_never_type(&self) -> LayoutS {
|
||||
|
@ -934,3 +775,323 @@ pub trait LayoutCalculator {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Determines towards which end of a struct layout optimizations will try to place the best niches.
|
||||
enum NicheBias {
|
||||
Start,
|
||||
End,
|
||||
}
|
||||
|
||||
fn univariant(
|
||||
this: &(impl LayoutCalculator + ?Sized),
|
||||
dl: &TargetDataLayout,
|
||||
fields: &IndexSlice<FieldIdx, Layout<'_>>,
|
||||
repr: &ReprOptions,
|
||||
kind: StructKind,
|
||||
niche_bias: NicheBias,
|
||||
) -> Option<LayoutS> {
|
||||
let pack = repr.pack;
|
||||
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
|
||||
let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
|
||||
let optimize = !repr.inhibit_struct_field_reordering_opt();
|
||||
if optimize && fields.len() > 1 {
|
||||
let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
|
||||
let optimizing = &mut inverse_memory_index.raw[..end];
|
||||
|
||||
// If `-Z randomize-layout` was enabled for the type definition we can shuffle
|
||||
// the field ordering to try and catch some code making assumptions about layouts
|
||||
// we don't guarantee
|
||||
if repr.can_randomize_type_layout() && cfg!(feature = "randomize") {
|
||||
#[cfg(feature = "randomize")]
|
||||
{
|
||||
// `ReprOptions.layout_seed` is a deterministic seed that we can use to
|
||||
// randomize field ordering with
|
||||
let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed.as_u64());
|
||||
|
||||
// Shuffle the ordering of the fields
|
||||
optimizing.shuffle(&mut rng);
|
||||
}
|
||||
// Otherwise we just leave things alone and actually optimize the type's fields
|
||||
} else {
|
||||
let max_field_align = fields.iter().map(|f| f.align().abi.bytes()).max().unwrap_or(1);
|
||||
let largest_niche_size = fields
|
||||
.iter()
|
||||
.filter_map(|f| f.largest_niche())
|
||||
.map(|n| n.available(dl))
|
||||
.max()
|
||||
.unwrap_or(0);
|
||||
|
||||
// Calculates a sort key to group fields by their alignment or possibly some size-derived
|
||||
// pseudo-alignment.
|
||||
let alignment_group_key = |layout: Layout<'_>| {
|
||||
if let Some(pack) = pack {
|
||||
// return the packed alignment in bytes
|
||||
layout.align().abi.min(pack).bytes()
|
||||
} else {
|
||||
// returns log2(effective-align).
|
||||
// This is ok since `pack` applies to all fields equally.
|
||||
// The calculation assumes that size is an integer multiple of align, except for ZSTs.
|
||||
//
|
||||
let align = layout.align().abi.bytes();
|
||||
let size = layout.size().bytes();
|
||||
let niche_size = layout.largest_niche().map(|n| n.available(dl)).unwrap_or(0);
|
||||
// group [u8; 4] with align-4 or [u8; 6] with align-2 fields
|
||||
let size_as_align = align.max(size).trailing_zeros();
|
||||
let size_as_align = if largest_niche_size > 0 {
|
||||
match niche_bias {
|
||||
// Given `A(u8, [u8; 16])` and `B(bool, [u8; 16])` we want to bump the array
|
||||
// to the front in the first case (for aligned loads) but keep the bool in front
|
||||
// in the second case for its niches.
|
||||
NicheBias::Start => max_field_align.trailing_zeros().min(size_as_align),
|
||||
// When moving niches towards the end of the struct then for
|
||||
// A((u8, u8, u8, bool), (u8, bool, u8)) we want to keep the first tuple
|
||||
// in the align-1 group because its bool can be moved closer to the end.
|
||||
NicheBias::End if niche_size == largest_niche_size => {
|
||||
align.trailing_zeros()
|
||||
}
|
||||
NicheBias::End => size_as_align,
|
||||
}
|
||||
} else {
|
||||
size_as_align
|
||||
};
|
||||
size_as_align as u64
|
||||
}
|
||||
};
|
||||
|
||||
match kind {
|
||||
StructKind::AlwaysSized | StructKind::MaybeUnsized => {
|
||||
// Currently `LayoutS` only exposes a single niche so sorting is usually sufficient
|
||||
// to get one niche into the preferred position. If it ever supported multiple niches
|
||||
// then a more advanced pick-and-pack approach could provide better results.
|
||||
// But even for the single-niche cache it's not optimal. E.g. for
|
||||
// A(u32, (bool, u8), u16) it would be possible to move the bool to the front
|
||||
// but it would require packing the tuple together with the u16 to build a 4-byte
|
||||
// group so that the u32 can be placed after it without padding. This kind
|
||||
// of packing can't be achieved by sorting.
|
||||
optimizing.sort_by_key(|&x| {
|
||||
let f = fields[x];
|
||||
let field_size = f.size().bytes();
|
||||
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
||||
let niche_size_key = match niche_bias {
|
||||
// large niche first
|
||||
NicheBias::Start => !niche_size,
|
||||
// large niche last
|
||||
NicheBias::End => niche_size,
|
||||
};
|
||||
let inner_niche_offset_key = match niche_bias {
|
||||
NicheBias::Start => f.largest_niche().map_or(0, |n| n.offset.bytes()),
|
||||
NicheBias::End => f.largest_niche().map_or(0, |n| {
|
||||
!(field_size - n.value.size(dl).bytes() - n.offset.bytes())
|
||||
}),
|
||||
};
|
||||
|
||||
(
|
||||
// Place ZSTs first to avoid "interesting offsets", especially with only one
|
||||
// or two non-ZST fields. This helps Scalar/ScalarPair layouts.
|
||||
!f.0.is_zst(),
|
||||
// Then place largest alignments first.
|
||||
cmp::Reverse(alignment_group_key(f)),
|
||||
// Then prioritize niche placement within alignment group according to
|
||||
// `niche_bias_start`.
|
||||
niche_size_key,
|
||||
// Then among fields with equally-sized niches prefer the ones
|
||||
// closer to the start/end of the field.
|
||||
inner_niche_offset_key,
|
||||
)
|
||||
});
|
||||
}
|
||||
|
||||
StructKind::Prefixed(..) => {
|
||||
// Sort in ascending alignment so that the layout stays optimal
|
||||
// regardless of the prefix.
|
||||
// And put the largest niche in an alignment group at the end
|
||||
// so it can be used as discriminant in jagged enums
|
||||
optimizing.sort_by_key(|&x| {
|
||||
let f = fields[x];
|
||||
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
|
||||
(alignment_group_key(f), niche_size)
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME(Kixiron): We can always shuffle fields within a given alignment class
|
||||
// regardless of the status of `-Z randomize-layout`
|
||||
}
|
||||
}
|
||||
// inverse_memory_index holds field indices by increasing memory offset.
|
||||
// That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
|
||||
// We now write field offsets to the corresponding offset slot;
|
||||
// field 5 with offset 0 puts 0 in offsets[5].
|
||||
// At the bottom of this function, we invert `inverse_memory_index` to
|
||||
// produce `memory_index` (see `invert_mapping`).
|
||||
let mut sized = true;
|
||||
let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
|
||||
let mut offset = Size::ZERO;
|
||||
let mut largest_niche = None;
|
||||
let mut largest_niche_available = 0;
|
||||
if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
|
||||
let prefix_align =
|
||||
if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
|
||||
align = align.max(AbiAndPrefAlign::new(prefix_align));
|
||||
offset = prefix_size.align_to(prefix_align);
|
||||
}
|
||||
for &i in &inverse_memory_index {
|
||||
let field = &fields[i];
|
||||
if !sized {
|
||||
this.delay_bug(&format!(
|
||||
"univariant: field #{} comes after unsized field",
|
||||
offsets.len(),
|
||||
));
|
||||
}
|
||||
|
||||
if field.0.is_unsized() {
|
||||
sized = false;
|
||||
}
|
||||
|
||||
// Invariant: offset < dl.obj_size_bound() <= 1<<61
|
||||
let field_align = if let Some(pack) = pack {
|
||||
field.align().min(AbiAndPrefAlign::new(pack))
|
||||
} else {
|
||||
field.align()
|
||||
};
|
||||
offset = offset.align_to(field_align.abi);
|
||||
align = align.max(field_align);
|
||||
|
||||
debug!("univariant offset: {:?} field: {:#?}", offset, field);
|
||||
offsets[i] = offset;
|
||||
|
||||
if let Some(mut niche) = field.largest_niche() {
|
||||
let available = niche.available(dl);
|
||||
// Pick up larger niches.
|
||||
let prefer_new_niche = match niche_bias {
|
||||
NicheBias::Start => available > largest_niche_available,
|
||||
// if there are several niches of the same size then pick the last one
|
||||
NicheBias::End => available >= largest_niche_available,
|
||||
};
|
||||
if prefer_new_niche {
|
||||
largest_niche_available = available;
|
||||
niche.offset += offset;
|
||||
largest_niche = Some(niche);
|
||||
}
|
||||
}
|
||||
|
||||
offset = offset.checked_add(field.size(), dl)?;
|
||||
}
|
||||
if let Some(repr_align) = repr.align {
|
||||
align = align.max(AbiAndPrefAlign::new(repr_align));
|
||||
}
|
||||
debug!("univariant min_size: {:?}", offset);
|
||||
let min_size = offset;
|
||||
// As stated above, inverse_memory_index holds field indices by increasing offset.
|
||||
// This makes it an already-sorted view of the offsets vec.
|
||||
// To invert it, consider:
|
||||
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
|
||||
// Field 5 would be the first element, so memory_index is i:
|
||||
// Note: if we didn't optimize, it's already right.
|
||||
let memory_index = if optimize {
|
||||
inverse_memory_index.invert_bijective_mapping()
|
||||
} else {
|
||||
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
|
||||
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
|
||||
};
|
||||
let size = min_size.align_to(align.abi);
|
||||
let mut abi = Abi::Aggregate { sized };
|
||||
// Unpack newtype ABIs and find scalar pairs.
|
||||
if sized && size.bytes() > 0 {
|
||||
// All other fields must be ZSTs.
|
||||
let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
|
||||
|
||||
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
|
||||
// We have exactly one non-ZST field.
|
||||
(Some((i, field)), None, None) => {
|
||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||
if offsets[i].bytes() == 0 && align.abi == field.align().abi && size == field.size()
|
||||
{
|
||||
match field.abi() {
|
||||
// For plain scalars, or vectors of them, we can't unpack
|
||||
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
||||
Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
|
||||
abi = field.abi();
|
||||
}
|
||||
// But scalar pairs are Rust-specific and get
|
||||
// treated as aggregates by C ABIs anyway.
|
||||
Abi::ScalarPair(..) => {
|
||||
abi = field.abi();
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Two non-ZST fields, and they're both scalars.
|
||||
(Some((i, a)), Some((j, b)), None) => {
|
||||
match (a.abi(), b.abi()) {
|
||||
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
||||
// Order by the memory placement, not source order.
|
||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||
((i, a), (j, b))
|
||||
} else {
|
||||
((j, b), (i, a))
|
||||
};
|
||||
let pair = this.scalar_pair(a, b);
|
||||
let pair_offsets = match pair.fields {
|
||||
FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
|
||||
assert_eq!(memory_index.raw, [0, 1]);
|
||||
offsets
|
||||
}
|
||||
_ => panic!(),
|
||||
};
|
||||
if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
|
||||
&& offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
|
||||
&& align == pair.align
|
||||
&& size == pair.size
|
||||
{
|
||||
// We can use `ScalarPair` only when it matches our
|
||||
// already computed layout (including `#[repr(C)]`).
|
||||
abi = pair.abi;
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if fields.iter().any(|f| f.abi().is_uninhabited()) {
|
||||
abi = Abi::Uninhabited;
|
||||
}
|
||||
Some(LayoutS {
|
||||
variants: Variants::Single { index: FIRST_VARIANT },
|
||||
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
||||
abi,
|
||||
largest_niche,
|
||||
align,
|
||||
size,
|
||||
})
|
||||
}
|
||||
|
||||
fn format_field_niches(
|
||||
layout: &LayoutS,
|
||||
fields: &IndexSlice<FieldIdx, Layout<'_>>,
|
||||
dl: &TargetDataLayout,
|
||||
) -> String {
|
||||
let mut s = String::new();
|
||||
for i in layout.fields.index_by_increasing_offset() {
|
||||
let offset = layout.fields.offset(i);
|
||||
let f = fields[i.into()];
|
||||
write!(s, "[o{}a{}s{}", offset.bytes(), f.align().abi.bytes(), f.size().bytes()).unwrap();
|
||||
if let Some(n) = f.largest_niche() {
|
||||
write!(
|
||||
s,
|
||||
" n{}b{}s{}",
|
||||
n.offset.bytes(),
|
||||
n.available(dl).ilog2(),
|
||||
n.value.size(dl).bytes()
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
write!(s, "] ").unwrap();
|
||||
}
|
||||
s
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue