1
Fork 0

Auto merge of #109915 - scottmcm:layout-indexvec, r=oli-obk

Use `FieldIdx` in `FieldsShape`

Finally got to the main motivating example from https://github.com/rust-lang/compiler-team/issues/606 :)
This commit is contained in:
bors 2023-04-06 07:38:58 +00:00
commit 2824db39f1
7 changed files with 132 additions and 100 deletions

View file

@ -8,19 +8,6 @@ use rand_xoshiro::Xoshiro128StarStar;
use tracing::debug; use tracing::debug;
// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
// This is used to go between `memory_index` (source field order to memory order)
// and `inverse_memory_index` (memory order to source field order).
// See also `FieldsShape::Arbitrary::memory_index` for more details.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
fn invert_mapping(map: &[u32]) -> Vec<u32> {
let mut inverse = vec![0; map.len()];
for i in 0..map.len() {
inverse[map[i] as usize] = i as u32;
}
inverse
}
pub trait LayoutCalculator { pub trait LayoutCalculator {
type TargetDataLayoutRef: Borrow<TargetDataLayout>; type TargetDataLayoutRef: Borrow<TargetDataLayout>;
@ -45,8 +32,8 @@ pub trait LayoutCalculator {
LayoutS { LayoutS {
variants: Variants::Single { index: FIRST_VARIANT }, variants: Variants::Single { index: FIRST_VARIANT },
fields: FieldsShape::Arbitrary { fields: FieldsShape::Arbitrary {
offsets: vec![Size::ZERO, b_offset], offsets: [Size::ZERO, b_offset].into(),
memory_index: vec![0, 1], memory_index: [0, 1].into(),
}, },
abi: Abi::ScalarPair(a, b), abi: Abi::ScalarPair(a, b),
largest_niche, largest_niche,
@ -58,18 +45,18 @@ pub trait LayoutCalculator {
fn univariant( fn univariant(
&self, &self,
dl: &TargetDataLayout, dl: &TargetDataLayout,
fields: &[Layout<'_>], fields: &IndexSlice<FieldIdx, Layout<'_>>,
repr: &ReprOptions, repr: &ReprOptions,
kind: StructKind, kind: StructKind,
) -> Option<LayoutS> { ) -> Option<LayoutS> {
let pack = repr.pack; let pack = repr.pack;
let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect(); let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect();
let optimize = !repr.inhibit_struct_field_reordering_opt(); let optimize = !repr.inhibit_struct_field_reordering_opt();
if optimize { if optimize {
let end = let end =
if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
let optimizing = &mut inverse_memory_index[..end]; let optimizing = &mut inverse_memory_index.raw[..end];
let effective_field_align = |layout: Layout<'_>| { let effective_field_align = |layout: Layout<'_>| {
if let Some(pack) = pack { if let Some(pack) = pack {
// return the packed alignment in bytes // return the packed alignment in bytes
@ -105,7 +92,7 @@ pub trait LayoutCalculator {
// Place ZSTs first to avoid "interesting offsets", // Place ZSTs first to avoid "interesting offsets",
// especially with only one or two non-ZST fields. // especially with only one or two non-ZST fields.
// Then place largest alignments first, largest niches within an alignment group last // Then place largest alignments first, largest niches within an alignment group last
let f = fields[x as usize]; let f = fields[x];
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl)); let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size) (!f.0.is_zst(), cmp::Reverse(effective_field_align(f)), niche_size)
}); });
@ -117,7 +104,7 @@ pub trait LayoutCalculator {
// And put the largest niche in an alignment group at the end // And put the largest niche in an alignment group at the end
// so it can be used as discriminant in jagged enums // so it can be used as discriminant in jagged enums
optimizing.sort_by_key(|&x| { optimizing.sort_by_key(|&x| {
let f = fields[x as usize]; let f = fields[x];
let niche_size = f.largest_niche().map_or(0, |n| n.available(dl)); let niche_size = f.largest_niche().map_or(0, |n| n.available(dl));
(effective_field_align(f), niche_size) (effective_field_align(f), niche_size)
}); });
@ -135,7 +122,7 @@ pub trait LayoutCalculator {
// At the bottom of this function, we invert `inverse_memory_index` to // At the bottom of this function, we invert `inverse_memory_index` to
// produce `memory_index` (see `invert_mapping`). // produce `memory_index` (see `invert_mapping`).
let mut sized = true; let mut sized = true;
let mut offsets = vec![Size::ZERO; fields.len()]; let mut offsets = IndexVec::from_elem(Size::ZERO, &fields);
let mut offset = Size::ZERO; let mut offset = Size::ZERO;
let mut largest_niche = None; let mut largest_niche = None;
let mut largest_niche_available = 0; let mut largest_niche_available = 0;
@ -146,7 +133,7 @@ pub trait LayoutCalculator {
offset = prefix_size.align_to(prefix_align); offset = prefix_size.align_to(prefix_align);
} }
for &i in &inverse_memory_index { for &i in &inverse_memory_index {
let field = &fields[i as usize]; let field = &fields[i];
if !sized { if !sized {
self.delay_bug(&format!( self.delay_bug(&format!(
"univariant: field #{} comes after unsized field", "univariant: field #{} comes after unsized field",
@ -168,7 +155,7 @@ pub trait LayoutCalculator {
align = align.max(field_align); align = align.max(field_align);
debug!("univariant offset: {:?} field: {:#?}", offset, field); debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i as usize] = offset; offsets[i] = offset;
if let Some(mut niche) = field.largest_niche() { if let Some(mut niche) = field.largest_niche() {
let available = niche.available(dl); let available = niche.available(dl);
@ -192,14 +179,18 @@ pub trait LayoutCalculator {
// If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
// Field 5 would be the first element, so memory_index is i: // Field 5 would be the first element, so memory_index is i:
// Note: if we didn't optimize, it's already right. // Note: if we didn't optimize, it's already right.
let memory_index = let memory_index = if optimize {
if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index }; inverse_memory_index.invert_bijective_mapping()
} else {
debug_assert!(inverse_memory_index.iter().copied().eq(fields.indices()));
inverse_memory_index.into_iter().map(FieldIdx::as_u32).collect()
};
let size = min_size.align_to(align.abi); let size = min_size.align_to(align.abi);
let mut abi = Abi::Aggregate { sized }; let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs. // Unpack newtype ABIs and find scalar pairs.
if sized && size.bytes() > 0 { if sized && size.bytes() > 0 {
// All other fields must be ZSTs. // All other fields must be ZSTs.
let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.0.is_zst()); let mut non_zst_fields = fields.iter_enumerated().filter(|&(_, f)| !f.0.is_zst());
match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
// We have exactly one non-ZST field. // We have exactly one non-ZST field.
@ -238,13 +229,13 @@ pub trait LayoutCalculator {
let pair = self.scalar_pair(a, b); let pair = self.scalar_pair(a, b);
let pair_offsets = match pair.fields { let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => { FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]); assert_eq!(memory_index.raw, [0, 1]);
offsets offsets
} }
_ => panic!(), _ => panic!(),
}; };
if offsets[i] == pair_offsets[0] if offsets[i] == pair_offsets[FieldIdx::from_usize(0)]
&& offsets[j] == pair_offsets[1] && offsets[j] == pair_offsets[FieldIdx::from_usize(1)]
&& align == pair.align && align == pair.align
&& size == pair.size && size == pair.size
{ {
@ -289,7 +280,7 @@ pub trait LayoutCalculator {
fn layout_of_struct_or_enum( fn layout_of_struct_or_enum(
&self, &self,
repr: &ReprOptions, repr: &ReprOptions,
variants: &IndexSlice<VariantIdx, Vec<Layout<'_>>>, variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
is_enum: bool, is_enum: bool,
is_unsafe_cell: bool, is_unsafe_cell: bool,
scalar_valid_range: (Bound<u128>, Bound<u128>), scalar_valid_range: (Bound<u128>, Bound<u128>),
@ -312,7 +303,7 @@ pub trait LayoutCalculator {
// but *not* an encoding of the discriminant (e.g., a tag value). // but *not* an encoding of the discriminant (e.g., a tag value).
// See issue #49298 for more details on the need to leave space // See issue #49298 for more details on the need to leave space
// for non-ZST uninhabited data (mostly partial initialization). // for non-ZST uninhabited data (mostly partial initialization).
let absent = |fields: &[Layout<'_>]| { let absent = |fields: &IndexSlice<FieldIdx, Layout<'_>>| {
let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited()); let uninhabited = fields.iter().any(|f| f.abi().is_uninhabited());
let is_zst = fields.iter().all(|f| f.0.is_zst()); let is_zst = fields.iter().all(|f| f.0.is_zst());
uninhabited && is_zst uninhabited && is_zst
@ -510,7 +501,7 @@ pub trait LayoutCalculator {
// It'll fit, but we need to make some adjustments. // It'll fit, but we need to make some adjustments.
match layout.fields { match layout.fields {
FieldsShape::Arbitrary { ref mut offsets, .. } => { FieldsShape::Arbitrary { ref mut offsets, .. } => {
for (j, offset) in offsets.iter_mut().enumerate() { for (j, offset) in offsets.iter_enumerated_mut() {
if !variants[i][j].0.is_zst() { if !variants[i][j].0.is_zst() {
*offset += this_offset; *offset += this_offset;
} }
@ -577,8 +568,8 @@ pub trait LayoutCalculator {
variants: IndexVec::new(), variants: IndexVec::new(),
}, },
fields: FieldsShape::Arbitrary { fields: FieldsShape::Arbitrary {
offsets: vec![niche_offset], offsets: [niche_offset].into(),
memory_index: vec![0], memory_index: [0].into(),
}, },
abi, abi,
largest_niche, largest_niche,
@ -651,7 +642,8 @@ pub trait LayoutCalculator {
st.variants = Variants::Single { index: i }; st.variants = Variants::Single { index: i };
// Find the first field we can't move later // Find the first field we can't move later
// to make room for a larger discriminant. // to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| &field_layouts[j]) { for field_idx in st.fields.index_by_increasing_offset() {
let field = &field_layouts[FieldIdx::from_usize(field_idx)];
if !field.0.is_zst() || field.align().abi.bytes() != 1 { if !field.0.is_zst() || field.align().abi.bytes() != 1 {
start_align = start_align.min(field.align().abi); start_align = start_align.min(field.align().abi);
break; break;
@ -802,13 +794,13 @@ pub trait LayoutCalculator {
let pair = self.scalar_pair(tag, prim_scalar); let pair = self.scalar_pair(tag, prim_scalar);
let pair_offsets = match pair.fields { let pair_offsets = match pair.fields {
FieldsShape::Arbitrary { ref offsets, ref memory_index } => { FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
assert_eq!(memory_index, &[0, 1]); assert_eq!(memory_index.raw, [0, 1]);
offsets offsets
} }
_ => panic!(), _ => panic!(),
}; };
if pair_offsets[0] == Size::ZERO if pair_offsets[FieldIdx::from_u32(0)] == Size::ZERO
&& pair_offsets[1] == *offset && pair_offsets[FieldIdx::from_u32(1)] == *offset
&& align == pair.align && align == pair.align
&& size == pair.size && size == pair.size
{ {
@ -844,7 +836,10 @@ pub trait LayoutCalculator {
tag_field: 0, tag_field: 0,
variants: IndexVec::new(), variants: IndexVec::new(),
}, },
fields: FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }, fields: FieldsShape::Arbitrary {
offsets: [Size::ZERO].into(),
memory_index: [0].into(),
},
largest_niche, largest_niche,
abi, abi,
align, align,
@ -883,7 +878,7 @@ pub trait LayoutCalculator {
fn layout_of_union( fn layout_of_union(
&self, &self,
repr: &ReprOptions, repr: &ReprOptions,
variants: &IndexSlice<VariantIdx, Vec<Layout<'_>>>, variants: &IndexSlice<VariantIdx, IndexVec<FieldIdx, Layout<'_>>>,
) -> Option<LayoutS> { ) -> Option<LayoutS> {
let dl = self.current_data_layout(); let dl = self.current_data_layout();
let dl = dl.borrow(); let dl = dl.borrow();

View file

@ -1108,7 +1108,7 @@ pub enum FieldsShape {
/// ordered to match the source definition order. /// ordered to match the source definition order.
/// This vector does not go in increasing order. /// This vector does not go in increasing order.
// FIXME(eddyb) use small vector optimization for the common case. // FIXME(eddyb) use small vector optimization for the common case.
offsets: Vec<Size>, offsets: IndexVec<FieldIdx, Size>,
/// Maps source order field indices to memory order indices, /// Maps source order field indices to memory order indices,
/// depending on how the fields were reordered (if at all). /// depending on how the fields were reordered (if at all).
@ -1122,7 +1122,7 @@ pub enum FieldsShape {
/// ///
// FIXME(eddyb) build a better abstraction for permutations, if possible. // FIXME(eddyb) build a better abstraction for permutations, if possible.
// FIXME(camlorn) also consider small vector optimization here. // FIXME(camlorn) also consider small vector optimization here.
memory_index: Vec<u32>, memory_index: IndexVec<FieldIdx, u32>,
}, },
} }
@ -1157,7 +1157,7 @@ impl FieldsShape {
assert!(i < count); assert!(i < count);
stride * i stride * i
} }
FieldsShape::Arbitrary { ref offsets, .. } => offsets[i], FieldsShape::Arbitrary { ref offsets, .. } => offsets[FieldIdx::from_usize(i)],
} }
} }
@ -1168,7 +1168,9 @@ impl FieldsShape {
unreachable!("FieldsShape::memory_index: `Primitive`s have no fields") unreachable!("FieldsShape::memory_index: `Primitive`s have no fields")
} }
FieldsShape::Union(_) | FieldsShape::Array { .. } => i, FieldsShape::Union(_) | FieldsShape::Array { .. } => i,
FieldsShape::Arbitrary { ref memory_index, .. } => memory_index[i].try_into().unwrap(), FieldsShape::Arbitrary { ref memory_index, .. } => {
memory_index[FieldIdx::from_usize(i)].try_into().unwrap()
}
} }
} }
@ -1176,20 +1178,17 @@ impl FieldsShape {
#[inline] #[inline]
pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a { pub fn index_by_increasing_offset<'a>(&'a self) -> impl Iterator<Item = usize> + 'a {
let mut inverse_small = [0u8; 64]; let mut inverse_small = [0u8; 64];
let mut inverse_big = vec![]; let mut inverse_big = IndexVec::new();
let use_small = self.count() <= inverse_small.len(); let use_small = self.count() <= inverse_small.len();
// We have to write this logic twice in order to keep the array small. // We have to write this logic twice in order to keep the array small.
if let FieldsShape::Arbitrary { ref memory_index, .. } = *self { if let FieldsShape::Arbitrary { ref memory_index, .. } = *self {
if use_small { if use_small {
for i in 0..self.count() { for (field_idx, &mem_idx) in memory_index.iter_enumerated() {
inverse_small[memory_index[i] as usize] = i as u8; inverse_small[mem_idx as usize] = field_idx.as_u32() as u8;
} }
} else { } else {
inverse_big = vec![0; self.count()]; inverse_big = memory_index.invert_bijective_mapping();
for i in 0..self.count() {
inverse_big[memory_index[i] as usize] = i as u32;
}
} }
} }
@ -1199,7 +1198,7 @@ impl FieldsShape {
if use_small { if use_small {
inverse_small[i] as usize inverse_small[i] as usize
} else { } else {
inverse_big[i] as usize inverse_big[i as u32].as_usize()
} }
} }
}) })

View file

@ -24,6 +24,7 @@ pub trait Idx: Copy + 'static + Eq + PartialEq + Debug + Hash {
} }
#[inline] #[inline]
#[must_use = "Use `increment_by` if you wanted to update the index in-place"]
fn plus(self, amount: usize) -> Self { fn plus(self, amount: usize) -> Self {
Self::new(self.index() + amount) Self::new(self.index() + amount)
} }
@ -294,6 +295,11 @@ impl<I: Idx, T: Clone> ToOwned for IndexSlice<I, T> {
} }
impl<I: Idx, T> IndexSlice<I, T> { impl<I: Idx, T> IndexSlice<I, T> {
#[inline]
pub fn empty() -> &'static Self {
Default::default()
}
#[inline] #[inline]
pub fn from_raw(raw: &[T]) -> &Self { pub fn from_raw(raw: &[T]) -> &Self {
let ptr: *const [T] = raw; let ptr: *const [T] = raw;
@ -409,6 +415,36 @@ impl<I: Idx, T> IndexSlice<I, T> {
} }
} }
impl<I: Idx, J: Idx> IndexSlice<I, J> {
/// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`,
/// assuming the values in `self` are a permutation of `0..self.len()`.
///
/// This is used to go between `memory_index` (source field order to memory order)
/// and `inverse_memory_index` (memory order to source field order).
/// See also `FieldsShape::Arbitrary::memory_index` for more details.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
pub fn invert_bijective_mapping(&self) -> IndexVec<J, I> {
debug_assert_eq!(
self.iter().map(|x| x.index() as u128).sum::<u128>(),
(0..self.len() as u128).sum::<u128>(),
"The values aren't 0..N in input {self:?}",
);
let mut inverse = IndexVec::from_elem_n(Idx::new(0), self.len());
for (i1, &i2) in self.iter_enumerated() {
inverse[i2] = i1;
}
debug_assert_eq!(
inverse.iter().map(|x| x.index() as u128).sum::<u128>(),
(0..inverse.len() as u128).sum::<u128>(),
"The values aren't 0..N in result {self:?}",
);
inverse
}
}
/// `IndexVec` is often used as a map, so it provides some map-like APIs. /// `IndexVec` is often used as a map, so it provides some map-like APIs.
impl<I: Idx, T> IndexVec<I, Option<T>> { impl<I: Idx, T> IndexVec<I, Option<T>> {
#[inline] #[inline]
@ -513,6 +549,13 @@ impl<I: Idx, T> FromIterator<T> for IndexVec<I, T> {
} }
} }
impl<I: Idx, T, const N: usize> From<[T; N]> for IndexVec<I, T> {
#[inline]
fn from(array: [T; N]) -> Self {
IndexVec::from_raw(array.into())
}
}
impl<I: Idx, T> IntoIterator for IndexVec<I, T> { impl<I: Idx, T> IntoIterator for IndexVec<I, T> {
type Item = T; type Item = T;
type IntoIter = vec::IntoIter<T>; type IntoIter = vec::IntoIter<T>;

View file

@ -5,6 +5,7 @@ use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitableExt};
use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic}; use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
use rustc_hir as hir; use rustc_hir as hir;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_index::vec::IndexVec;
use rustc_session::config::OptLevel; use rustc_session::config::OptLevel;
use rustc_span::symbol::{sym, Symbol}; use rustc_span::symbol::{sym, Symbol};
use rustc_span::{Span, DUMMY_SP}; use rustc_span::{Span, DUMMY_SP};
@ -635,7 +636,7 @@ where
variants: Variants::Single { index: variant_index }, variants: Variants::Single { index: variant_index },
fields: match NonZeroUsize::new(fields) { fields: match NonZeroUsize::new(fields) {
Some(fields) => FieldsShape::Union(fields), Some(fields) => FieldsShape::Union(fields),
None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] }, None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
}, },
abi: Abi::Uninhabited, abi: Abi::Uninhabited,
largest_niche: None, largest_niche: None,

View file

@ -287,7 +287,7 @@ impl<'tcx> TransformVisitor<'tcx> {
statements.push(Statement { statements.push(Statement {
kind: StatementKind::Assign(Box::new(( kind: StatementKind::Assign(Box::new((
Place::return_place(), Place::return_place(),
Rvalue::Aggregate(Box::new(kind), IndexVec::from_iter([val])), Rvalue::Aggregate(Box::new(kind), [val].into()),
))), ))),
source_info, source_info,
}); });

View file

@ -1,7 +1,7 @@
use hir::def_id::DefId; use hir::def_id::DefId;
use rustc_hir as hir; use rustc_hir as hir;
use rustc_index::bit_set::BitSet; use rustc_index::bit_set::BitSet;
use rustc_index::vec::IndexVec; use rustc_index::vec::{IndexSlice, IndexVec};
use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal}; use rustc_middle::mir::{GeneratorLayout, GeneratorSavedLocal};
use rustc_middle::ty::layout::{ use rustc_middle::ty::layout::{
IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, MAX_SIMD_LANES,
@ -62,23 +62,10 @@ fn layout_of<'tcx>(
Ok(layout) Ok(layout)
} }
// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
// This is used to go between `memory_index` (source field order to memory order)
// and `inverse_memory_index` (memory order to source field order).
// See also `FieldsShape::Arbitrary::memory_index` for more details.
// FIXME(eddyb) build a better abstraction for permutations, if possible.
fn invert_mapping(map: &[u32]) -> Vec<u32> {
let mut inverse = vec![0; map.len()];
for i in 0..map.len() {
inverse[map[i] as usize] = i as u32;
}
inverse
}
fn univariant_uninterned<'tcx>( fn univariant_uninterned<'tcx>(
cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
ty: Ty<'tcx>, ty: Ty<'tcx>,
fields: &[Layout<'_>], fields: &IndexSlice<FieldIdx, Layout<'_>>,
repr: &ReprOptions, repr: &ReprOptions,
kind: StructKind, kind: StructKind,
) -> Result<LayoutS, LayoutError<'tcx>> { ) -> Result<LayoutS, LayoutError<'tcx>> {
@ -106,7 +93,7 @@ fn layout_of_uncached<'tcx>(
}; };
let scalar = |value: Primitive| tcx.mk_layout(LayoutS::scalar(cx, scalar_unit(value))); let scalar = |value: Primitive| tcx.mk_layout(LayoutS::scalar(cx, scalar_unit(value)));
let univariant = |fields: &[Layout<'_>], repr: &ReprOptions, kind| { let univariant = |fields: &IndexSlice<FieldIdx, Layout<'_>>, repr: &ReprOptions, kind| {
Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?)) Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, repr, kind)?))
}; };
debug_assert!(!ty.has_non_region_infer()); debug_assert!(!ty.has_non_region_infer());
@ -256,12 +243,14 @@ fn layout_of_uncached<'tcx>(
}), }),
// Odd unit types. // Odd unit types.
ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?, ty::FnDef(..) => {
univariant(IndexSlice::empty(), &ReprOptions::default(), StructKind::AlwaysSized)?
}
ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => { ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
let mut unit = univariant_uninterned( let mut unit = univariant_uninterned(
cx, cx,
ty, ty,
&[], IndexSlice::empty(),
&ReprOptions::default(), &ReprOptions::default(),
StructKind::AlwaysSized, StructKind::AlwaysSized,
)?; )?;
@ -277,7 +266,7 @@ fn layout_of_uncached<'tcx>(
ty::Closure(_, ref substs) => { ty::Closure(_, ref substs) => {
let tys = substs.as_closure().upvar_tys(); let tys = substs.as_closure().upvar_tys();
univariant( univariant(
&tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).collect::<Result<Vec<_>, _>>()?, &tys.map(|ty| Ok(cx.layout_of(ty)?.layout)).try_collect::<IndexVec<_, _>>()?,
&ReprOptions::default(), &ReprOptions::default(),
StructKind::AlwaysSized, StructKind::AlwaysSized,
)? )?
@ -288,7 +277,7 @@ fn layout_of_uncached<'tcx>(
if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
univariant( univariant(
&tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).collect::<Result<Vec<_>, _>>()?, &tys.iter().map(|k| Ok(cx.layout_of(k)?.layout)).try_collect::<IndexVec<_, _>>()?,
&ReprOptions::default(), &ReprOptions::default(),
kind, kind,
)? )?
@ -393,7 +382,7 @@ fn layout_of_uncached<'tcx>(
// Compute the placement of the vector fields: // Compute the placement of the vector fields:
let fields = if is_array { let fields = if is_array {
FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] } FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() }
} else { } else {
FieldsShape::Array { stride: e_ly.size, count: e_len } FieldsShape::Array { stride: e_ly.size, count: e_len }
}; };
@ -418,9 +407,9 @@ fn layout_of_uncached<'tcx>(
v.fields v.fields
.iter() .iter()
.map(|field| Ok(cx.layout_of(field.ty(tcx, substs))?.layout)) .map(|field| Ok(cx.layout_of(field.ty(tcx, substs))?.layout))
.collect::<Result<Vec<_>, _>>() .try_collect::<IndexVec<_, _>>()
}) })
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?; .try_collect::<IndexVec<VariantIdx, _>>()?;
if def.is_union() { if def.is_union() {
if def.repr().pack.is_some() && def.repr().align.is_some() { if def.repr().pack.is_some() && def.repr().align.is_some() {
@ -492,8 +481,7 @@ fn layout_of_uncached<'tcx>(
enum SavedLocalEligibility { enum SavedLocalEligibility {
Unassigned, Unassigned,
Assigned(VariantIdx), Assigned(VariantIdx),
// FIXME: Use newtype_index so we aren't wasting bytes Ineligible(Option<FieldIdx>),
Ineligible(Option<u32>),
} }
// When laying out generators, we divide our saved local fields into two // When laying out generators, we divide our saved local fields into two
@ -605,7 +593,7 @@ fn generator_saved_local_eligibility(
// Write down the order of our locals that will be promoted to the prefix. // Write down the order of our locals that will be promoted to the prefix.
{ {
for (idx, local) in ineligible_locals.iter().enumerate() { for (idx, local) in ineligible_locals.iter().enumerate() {
assignments[local] = Ineligible(Some(idx as u32)); assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx)));
} }
} }
debug!("generator saved local assignments: {:?}", assignments); debug!("generator saved local assignments: {:?}", assignments);
@ -654,7 +642,7 @@ fn generator_layout<'tcx>(
.map(|ty| Ok(cx.layout_of(ty)?.layout)) .map(|ty| Ok(cx.layout_of(ty)?.layout))
.chain(iter::once(Ok(tag_layout))) .chain(iter::once(Ok(tag_layout)))
.chain(promoted_layouts) .chain(promoted_layouts)
.collect::<Result<Vec<_>, _>>()?; .try_collect::<IndexVec<_, _>>()?;
let prefix = univariant_uninterned( let prefix = univariant_uninterned(
cx, cx,
ty, ty,
@ -672,26 +660,28 @@ fn generator_layout<'tcx>(
debug!("prefix = {:#?}", prefix); debug!("prefix = {:#?}", prefix);
let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
FieldsShape::Arbitrary { mut offsets, memory_index } => { FieldsShape::Arbitrary { mut offsets, memory_index } => {
let mut inverse_memory_index = invert_mapping(&memory_index); let mut inverse_memory_index = memory_index.invert_bijective_mapping();
// "a" (`0..b_start`) and "b" (`b_start..`) correspond to // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
// "outer" and "promoted" fields respectively. // "outer" and "promoted" fields respectively.
let b_start = (tag_index + 1) as u32; let b_start = FieldIdx::from_usize(tag_index + 1);
let offsets_b = offsets.split_off(b_start as usize); let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize()));
let offsets_a = offsets; let offsets_a = offsets;
// Disentangle the "a" and "b" components of `inverse_memory_index` // Disentangle the "a" and "b" components of `inverse_memory_index`
// by preserving the order but keeping only one disjoint "half" each. // by preserving the order but keeping only one disjoint "half" each.
// FIXME(eddyb) build a better abstraction for permutations, if possible. // FIXME(eddyb) build a better abstraction for permutations, if possible.
let inverse_memory_index_b: Vec<_> = let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index
inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect(); .iter()
inverse_memory_index.retain(|&i| i < b_start); .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32))
.collect();
inverse_memory_index.raw.retain(|&i| i < b_start);
let inverse_memory_index_a = inverse_memory_index; let inverse_memory_index_a = inverse_memory_index;
// Since `inverse_memory_index_{a,b}` each only refer to their // Since `inverse_memory_index_{a,b}` each only refer to their
// respective fields, they can be safely inverted // respective fields, they can be safely inverted
let memory_index_a = invert_mapping(&inverse_memory_index_a); let memory_index_a = inverse_memory_index_a.invert_bijective_mapping();
let memory_index_b = invert_mapping(&inverse_memory_index_b); let memory_index_b = inverse_memory_index_b.invert_bijective_mapping();
let outer_fields = let outer_fields =
FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }; FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
@ -722,7 +712,7 @@ fn generator_layout<'tcx>(
ty, ty,
&variant_only_tys &variant_only_tys
.map(|ty| Ok(cx.layout_of(ty)?.layout)) .map(|ty| Ok(cx.layout_of(ty)?.layout))
.collect::<Result<Vec<_>, _>>()?, .try_collect::<IndexVec<_, _>>()?,
&ReprOptions::default(), &ReprOptions::default(),
StructKind::Prefixed(prefix_size, prefix_align.abi), StructKind::Prefixed(prefix_size, prefix_align.abi),
)?; )?;
@ -741,13 +731,16 @@ fn generator_layout<'tcx>(
// promoted fields were being used, but leave the elements not in the // promoted fields were being used, but leave the elements not in the
// subset as `INVALID_FIELD_IDX`, which we can filter out later to // subset as `INVALID_FIELD_IDX`, which we can filter out later to
// obtain a valid (bijective) mapping. // obtain a valid (bijective) mapping.
const INVALID_FIELD_IDX: u32 = !0; const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX;
let mut combined_inverse_memory_index = debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX);
vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
let mut combined_inverse_memory_index = IndexVec::from_elem_n(
INVALID_FIELD_IDX,
promoted_memory_index.len() + memory_index.len(),
);
let mut offsets_and_memory_index = iter::zip(offsets, memory_index); let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
let combined_offsets = variant_fields let combined_offsets = variant_fields
.iter() .iter_enumerated()
.enumerate()
.map(|(i, local)| { .map(|(i, local)| {
let (offset, memory_index) = match assignments[*local] { let (offset, memory_index) = match assignments[*local] {
Unassigned => bug!(), Unassigned => bug!(),
@ -756,19 +749,19 @@ fn generator_layout<'tcx>(
(offset, promoted_memory_index.len() as u32 + memory_index) (offset, promoted_memory_index.len() as u32 + memory_index)
} }
Ineligible(field_idx) => { Ineligible(field_idx) => {
let field_idx = field_idx.unwrap() as usize; let field_idx = field_idx.unwrap();
(promoted_offsets[field_idx], promoted_memory_index[field_idx]) (promoted_offsets[field_idx], promoted_memory_index[field_idx])
} }
}; };
combined_inverse_memory_index[memory_index as usize] = i as u32; combined_inverse_memory_index[memory_index] = i;
offset offset
}) })
.collect(); .collect();
// Remove the unused slots and invert the mapping to obtain the // Remove the unused slots and invert the mapping to obtain the
// combined `memory_index` (also see previous comment). // combined `memory_index` (also see previous comment).
combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX); combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX);
let combined_memory_index = invert_mapping(&combined_inverse_memory_index); let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping();
variant.fields = FieldsShape::Arbitrary { variant.fields = FieldsShape::Arbitrary {
offsets: combined_offsets, offsets: combined_offsets,
@ -779,7 +772,7 @@ fn generator_layout<'tcx>(
align = align.max(variant.align); align = align.max(variant.align);
Ok(variant) Ok(variant)
}) })
.collect::<Result<IndexVec<VariantIdx, _>, _>>()?; .try_collect::<IndexVec<VariantIdx, _>>()?;
size = size.align_to(align.abi); size = size.align_to(align.abi);

View file

@ -5,6 +5,7 @@
//! This API is completely unstable and subject to change. //! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(iterator_try_collect)]
#![feature(let_chains)] #![feature(let_chains)]
#![feature(never_type)] #![feature(never_type)]
#![feature(box_patterns)] #![feature(box_patterns)]