compiler: rustc_abi::Abi
=> BackendRepr
The initial naming of "Abi" was an awful mistake, conveying wrong ideas about how psABIs worked and even more about what the enum meant. It was only meant to represent the way the value would be described to a codegen backend as it was lowered to that intermediate representation. It was never meant to mean anything about the actual psABI handling! The conflation is because LLVM typically will associate a certain form with a certain ABI, but even that does not hold when the special cases that actually exist arise, plus the IR annotations that modify the ABI. Reframe `rustc_abi::Abi` as the `BackendRepr` of the type, and rename `BackendRepr::Aggregate` as `BackendRepr::Memory`. Unfortunately, due to the persistent misunderstandings, this too is now incorrect: - Scattered ABI-relevant code is entangled with BackendRepr - We do not always pre-compute a correct BackendRepr that reflects how we "actually" want this value to be handled, so we leave the backend interface to also inject various special-cases here - In some cases `BackendRepr::Memory` is a "real" aggregate, but in others it is in fact using memory, and in some cases it is a scalar! Our rustc-to-backend lowering code handles this sort of thing right now. That will eventually be addressed by lifting duplicated lowering code to either rustc_codegen_ssa or rustc_target as appropriate.
This commit is contained in:
parent
2dece5bb62
commit
7086dd83cc
51 changed files with 517 additions and 428 deletions
|
@ -6,9 +6,9 @@ mod abi {
|
||||||
#[cfg(feature = "nightly")]
|
#[cfg(feature = "nightly")]
|
||||||
use rustc_macros::HashStable_Generic;
|
use rustc_macros::HashStable_Generic;
|
||||||
|
|
||||||
#[cfg(feature = "nightly")]
|
|
||||||
use crate::{Abi, FieldsShape, TyAbiInterface, TyAndLayout};
|
|
||||||
use crate::{Align, HasDataLayout, Size};
|
use crate::{Align, HasDataLayout, Size};
|
||||||
|
#[cfg(feature = "nightly")]
|
||||||
|
use crate::{BackendRepr, FieldsShape, TyAbiInterface, TyAndLayout};
|
||||||
|
|
||||||
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
||||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||||
|
@ -128,11 +128,11 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||||
where
|
where
|
||||||
Ty: TyAbiInterface<'a, C> + Copy,
|
Ty: TyAbiInterface<'a, C> + Copy,
|
||||||
{
|
{
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Uninhabited => Err(Heterogeneous),
|
BackendRepr::Uninhabited => Err(Heterogeneous),
|
||||||
|
|
||||||
// The primitive for this algorithm.
|
// The primitive for this algorithm.
|
||||||
Abi::Scalar(scalar) => {
|
BackendRepr::Scalar(scalar) => {
|
||||||
let kind = match scalar.primitive() {
|
let kind = match scalar.primitive() {
|
||||||
abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
|
abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
|
||||||
abi::Float(_) => RegKind::Float,
|
abi::Float(_) => RegKind::Float,
|
||||||
|
@ -140,7 +140,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||||
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
|
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))
|
||||||
}
|
}
|
||||||
|
|
||||||
Abi::Vector { .. } => {
|
BackendRepr::Vector { .. } => {
|
||||||
assert!(!self.is_zst());
|
assert!(!self.is_zst());
|
||||||
Ok(HomogeneousAggregate::Homogeneous(Reg {
|
Ok(HomogeneousAggregate::Homogeneous(Reg {
|
||||||
kind: RegKind::Vector,
|
kind: RegKind::Vector,
|
||||||
|
@ -148,7 +148,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
|
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => {
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
|
||||||
// Helper for computing `homogeneous_aggregate`, allowing a custom
|
// Helper for computing `homogeneous_aggregate`, allowing a custom
|
||||||
// starting offset (used below for handling variants).
|
// starting offset (used below for handling variants).
|
||||||
let from_fields_at =
|
let from_fields_at =
|
||||||
|
@ -246,7 +246,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||||
Ok(result)
|
Ok(result)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Abi::Aggregate { sized: false } => Err(Heterogeneous),
|
BackendRepr::Memory { sized: false } => Err(Heterogeneous),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ use rustc_index::Idx;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
Abi, AbiAndPrefAlign, Align, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
|
AbiAndPrefAlign, Align, BackendRepr, FieldsShape, HasDataLayout, IndexSlice, IndexVec, Integer,
|
||||||
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
LayoutData, Niche, NonZeroUsize, Primitive, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
||||||
Variants, WrappingRange,
|
Variants, WrappingRange,
|
||||||
};
|
};
|
||||||
|
@ -125,7 +125,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
offsets: [Size::ZERO, b_offset].into(),
|
offsets: [Size::ZERO, b_offset].into(),
|
||||||
memory_index: [0, 1].into(),
|
memory_index: [0, 1].into(),
|
||||||
},
|
},
|
||||||
abi: Abi::ScalarPair(a, b),
|
backend_repr: BackendRepr::ScalarPair(a, b),
|
||||||
largest_niche,
|
largest_niche,
|
||||||
align,
|
align,
|
||||||
size,
|
size,
|
||||||
|
@ -216,7 +216,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
LayoutData {
|
LayoutData {
|
||||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
fields: FieldsShape::Primitive,
|
fields: FieldsShape::Primitive,
|
||||||
abi: Abi::Uninhabited,
|
backend_repr: BackendRepr::Uninhabited,
|
||||||
largest_niche: None,
|
largest_niche: None,
|
||||||
align: dl.i8_align,
|
align: dl.i8_align,
|
||||||
size: Size::ZERO,
|
size: Size::ZERO,
|
||||||
|
@ -331,7 +331,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
|
|
||||||
if let Ok(common) = common_non_zst_abi_and_align {
|
if let Ok(common) = common_non_zst_abi_and_align {
|
||||||
// Discard valid range information and allow undef
|
// Discard valid range information and allow undef
|
||||||
let field_abi = field.abi.to_union();
|
let field_abi = field.backend_repr.to_union();
|
||||||
|
|
||||||
if let Some((common_abi, common_align)) = common {
|
if let Some((common_abi, common_align)) = common {
|
||||||
if common_abi != field_abi {
|
if common_abi != field_abi {
|
||||||
|
@ -340,7 +340,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
} else {
|
} else {
|
||||||
// Fields with the same non-Aggregate ABI should also
|
// Fields with the same non-Aggregate ABI should also
|
||||||
// have the same alignment
|
// have the same alignment
|
||||||
if !matches!(common_abi, Abi::Aggregate { .. }) {
|
if !matches!(common_abi, BackendRepr::Memory { .. }) {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
common_align, field.align.abi,
|
common_align, field.align.abi,
|
||||||
"non-Aggregate field with matching ABI but differing alignment"
|
"non-Aggregate field with matching ABI but differing alignment"
|
||||||
|
@ -369,11 +369,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
// If all non-ZST fields have the same ABI, we may forward that ABI
|
// If all non-ZST fields have the same ABI, we may forward that ABI
|
||||||
// for the union as a whole, unless otherwise inhibited.
|
// for the union as a whole, unless otherwise inhibited.
|
||||||
let abi = match common_non_zst_abi_and_align {
|
let abi = match common_non_zst_abi_and_align {
|
||||||
Err(AbiMismatch) | Ok(None) => Abi::Aggregate { sized: true },
|
Err(AbiMismatch) | Ok(None) => BackendRepr::Memory { sized: true },
|
||||||
Ok(Some((abi, _))) => {
|
Ok(Some((abi, _))) => {
|
||||||
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
|
if abi.inherent_align(dl).map(|a| a.abi) != Some(align.abi) {
|
||||||
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
|
// Mismatched alignment (e.g. union is #[repr(packed)]): disable opt
|
||||||
Abi::Aggregate { sized: true }
|
BackendRepr::Memory { sized: true }
|
||||||
} else {
|
} else {
|
||||||
abi
|
abi
|
||||||
}
|
}
|
||||||
|
@ -387,7 +387,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
Ok(LayoutData {
|
Ok(LayoutData {
|
||||||
variants: Variants::Single { index: only_variant_idx },
|
variants: Variants::Single { index: only_variant_idx },
|
||||||
fields: FieldsShape::Union(union_field_count),
|
fields: FieldsShape::Union(union_field_count),
|
||||||
abi,
|
backend_repr: abi,
|
||||||
largest_niche: None,
|
largest_niche: None,
|
||||||
align,
|
align,
|
||||||
size: size.align_to(align.abi),
|
size: size.align_to(align.abi),
|
||||||
|
@ -434,23 +434,23 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
// Already doesn't have any niches
|
// Already doesn't have any niches
|
||||||
Scalar::Union { .. } => {}
|
Scalar::Union { .. } => {}
|
||||||
};
|
};
|
||||||
match &mut st.abi {
|
match &mut st.backend_repr {
|
||||||
Abi::Uninhabited => {}
|
BackendRepr::Uninhabited => {}
|
||||||
Abi::Scalar(scalar) => hide_niches(scalar),
|
BackendRepr::Scalar(scalar) => hide_niches(scalar),
|
||||||
Abi::ScalarPair(a, b) => {
|
BackendRepr::ScalarPair(a, b) => {
|
||||||
hide_niches(a);
|
hide_niches(a);
|
||||||
hide_niches(b);
|
hide_niches(b);
|
||||||
}
|
}
|
||||||
Abi::Vector { element, count: _ } => hide_niches(element),
|
BackendRepr::Vector { element, count: _ } => hide_niches(element),
|
||||||
Abi::Aggregate { sized: _ } => {}
|
BackendRepr::Memory { sized: _ } => {}
|
||||||
}
|
}
|
||||||
st.largest_niche = None;
|
st.largest_niche = None;
|
||||||
return Ok(st);
|
return Ok(st);
|
||||||
}
|
}
|
||||||
|
|
||||||
let (start, end) = scalar_valid_range;
|
let (start, end) = scalar_valid_range;
|
||||||
match st.abi {
|
match st.backend_repr {
|
||||||
Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
|
BackendRepr::Scalar(ref mut scalar) | BackendRepr::ScalarPair(ref mut scalar, _) => {
|
||||||
// Enlarging validity ranges would result in missed
|
// Enlarging validity ranges would result in missed
|
||||||
// optimizations, *not* wrongly assuming the inner
|
// optimizations, *not* wrongly assuming the inner
|
||||||
// value is valid. e.g. unions already enlarge validity ranges,
|
// value is valid. e.g. unions already enlarge validity ranges,
|
||||||
|
@ -607,8 +607,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// It can't be a Scalar or ScalarPair because the offset isn't 0.
|
// It can't be a Scalar or ScalarPair because the offset isn't 0.
|
||||||
if !layout.abi.is_uninhabited() {
|
if !layout.is_uninhabited() {
|
||||||
layout.abi = Abi::Aggregate { sized: true };
|
layout.backend_repr = BackendRepr::Memory { sized: true };
|
||||||
}
|
}
|
||||||
layout.size += this_offset;
|
layout.size += this_offset;
|
||||||
|
|
||||||
|
@ -627,26 +627,26 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
let same_size = size == variant_layouts[largest_variant_index].size;
|
let same_size = size == variant_layouts[largest_variant_index].size;
|
||||||
let same_align = align == variant_layouts[largest_variant_index].align;
|
let same_align = align == variant_layouts[largest_variant_index].align;
|
||||||
|
|
||||||
let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
|
let abi = if variant_layouts.iter().all(|v| v.is_uninhabited()) {
|
||||||
Abi::Uninhabited
|
BackendRepr::Uninhabited
|
||||||
} else if same_size && same_align && others_zst {
|
} else if same_size && same_align && others_zst {
|
||||||
match variant_layouts[largest_variant_index].abi {
|
match variant_layouts[largest_variant_index].backend_repr {
|
||||||
// When the total alignment and size match, we can use the
|
// When the total alignment and size match, we can use the
|
||||||
// same ABI as the scalar variant with the reserved niche.
|
// same ABI as the scalar variant with the reserved niche.
|
||||||
Abi::Scalar(_) => Abi::Scalar(niche_scalar),
|
BackendRepr::Scalar(_) => BackendRepr::Scalar(niche_scalar),
|
||||||
Abi::ScalarPair(first, second) => {
|
BackendRepr::ScalarPair(first, second) => {
|
||||||
// Only the niche is guaranteed to be initialised,
|
// Only the niche is guaranteed to be initialised,
|
||||||
// so use union layouts for the other primitive.
|
// so use union layouts for the other primitive.
|
||||||
if niche_offset == Size::ZERO {
|
if niche_offset == Size::ZERO {
|
||||||
Abi::ScalarPair(niche_scalar, second.to_union())
|
BackendRepr::ScalarPair(niche_scalar, second.to_union())
|
||||||
} else {
|
} else {
|
||||||
Abi::ScalarPair(first.to_union(), niche_scalar)
|
BackendRepr::ScalarPair(first.to_union(), niche_scalar)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => Abi::Aggregate { sized: true },
|
_ => BackendRepr::Memory { sized: true },
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
Abi::Aggregate { sized: true }
|
BackendRepr::Memory { sized: true }
|
||||||
};
|
};
|
||||||
|
|
||||||
let layout = LayoutData {
|
let layout = LayoutData {
|
||||||
|
@ -664,7 +664,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
offsets: [niche_offset].into(),
|
offsets: [niche_offset].into(),
|
||||||
memory_index: [0].into(),
|
memory_index: [0].into(),
|
||||||
},
|
},
|
||||||
abi,
|
backend_repr: abi,
|
||||||
largest_niche,
|
largest_niche,
|
||||||
size,
|
size,
|
||||||
align,
|
align,
|
||||||
|
@ -833,14 +833,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
end: (max as u128 & tag_mask),
|
end: (max as u128 & tag_mask),
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
let mut abi = Abi::Aggregate { sized: true };
|
let mut abi = BackendRepr::Memory { sized: true };
|
||||||
|
|
||||||
if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
|
if layout_variants.iter().all(|v| v.is_uninhabited()) {
|
||||||
abi = Abi::Uninhabited;
|
abi = BackendRepr::Uninhabited;
|
||||||
} else if tag.size(dl) == size {
|
} else if tag.size(dl) == size {
|
||||||
// Make sure we only use scalar layout when the enum is entirely its
|
// Make sure we only use scalar layout when the enum is entirely its
|
||||||
// own tag (i.e. it has no padding nor any non-ZST variant fields).
|
// own tag (i.e. it has no padding nor any non-ZST variant fields).
|
||||||
abi = Abi::Scalar(tag);
|
abi = BackendRepr::Scalar(tag);
|
||||||
} else {
|
} else {
|
||||||
// Try to use a ScalarPair for all tagged enums.
|
// Try to use a ScalarPair for all tagged enums.
|
||||||
// That's possible only if we can find a common primitive type for all variants.
|
// That's possible only if we can find a common primitive type for all variants.
|
||||||
|
@ -864,8 +864,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let prim = match field.abi {
|
let prim = match field.backend_repr {
|
||||||
Abi::Scalar(scalar) => {
|
BackendRepr::Scalar(scalar) => {
|
||||||
common_prim_initialized_in_all_variants &=
|
common_prim_initialized_in_all_variants &=
|
||||||
matches!(scalar, Scalar::Initialized { .. });
|
matches!(scalar, Scalar::Initialized { .. });
|
||||||
scalar.primitive()
|
scalar.primitive()
|
||||||
|
@ -934,7 +934,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
{
|
{
|
||||||
// We can use `ScalarPair` only when it matches our
|
// We can use `ScalarPair` only when it matches our
|
||||||
// already computed layout (including `#[repr(C)]`).
|
// already computed layout (including `#[repr(C)]`).
|
||||||
abi = pair.abi;
|
abi = pair.backend_repr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -942,12 +942,14 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
|
// If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
|
||||||
// variants to ensure they are consistent. This is because a downcast is
|
// variants to ensure they are consistent. This is because a downcast is
|
||||||
// semantically a NOP, and thus should not affect layout.
|
// semantically a NOP, and thus should not affect layout.
|
||||||
if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
if matches!(abi, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||||
for variant in &mut layout_variants {
|
for variant in &mut layout_variants {
|
||||||
// We only do this for variants with fields; the others are not accessed anyway.
|
// We only do this for variants with fields; the others are not accessed anyway.
|
||||||
// Also do not overwrite any already existing "clever" ABIs.
|
// Also do not overwrite any already existing "clever" ABIs.
|
||||||
if variant.fields.count() > 0 && matches!(variant.abi, Abi::Aggregate { .. }) {
|
if variant.fields.count() > 0
|
||||||
variant.abi = abi;
|
&& matches!(variant.backend_repr, BackendRepr::Memory { .. })
|
||||||
|
{
|
||||||
|
variant.backend_repr = abi;
|
||||||
// Also need to bump up the size and alignment, so that the entire value fits
|
// Also need to bump up the size and alignment, so that the entire value fits
|
||||||
// in here.
|
// in here.
|
||||||
variant.size = cmp::max(variant.size, size);
|
variant.size = cmp::max(variant.size, size);
|
||||||
|
@ -970,7 +972,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
memory_index: [0].into(),
|
memory_index: [0].into(),
|
||||||
},
|
},
|
||||||
largest_niche,
|
largest_niche,
|
||||||
abi,
|
backend_repr: abi,
|
||||||
align,
|
align,
|
||||||
size,
|
size,
|
||||||
max_repr_align,
|
max_repr_align,
|
||||||
|
@ -1252,7 +1254,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
}
|
}
|
||||||
let mut layout_of_single_non_zst_field = None;
|
let mut layout_of_single_non_zst_field = None;
|
||||||
let sized = unsized_field.is_none();
|
let sized = unsized_field.is_none();
|
||||||
let mut abi = Abi::Aggregate { sized };
|
let mut abi = BackendRepr::Memory { sized };
|
||||||
|
|
||||||
let optimize_abi = !repr.inhibit_newtype_abi_optimization();
|
let optimize_abi = !repr.inhibit_newtype_abi_optimization();
|
||||||
|
|
||||||
|
@ -1270,16 +1272,16 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
// Field fills the struct and it has a scalar or scalar pair ABI.
|
// Field fills the struct and it has a scalar or scalar pair ABI.
|
||||||
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
|
if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
|
||||||
{
|
{
|
||||||
match field.abi {
|
match field.backend_repr {
|
||||||
// For plain scalars, or vectors of them, we can't unpack
|
// For plain scalars, or vectors of them, we can't unpack
|
||||||
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
// newtypes for `#[repr(C)]`, as that affects C ABIs.
|
||||||
Abi::Scalar(_) | Abi::Vector { .. } if optimize_abi => {
|
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => {
|
||||||
abi = field.abi;
|
abi = field.backend_repr;
|
||||||
}
|
}
|
||||||
// But scalar pairs are Rust-specific and get
|
// But scalar pairs are Rust-specific and get
|
||||||
// treated as aggregates by C ABIs anyway.
|
// treated as aggregates by C ABIs anyway.
|
||||||
Abi::ScalarPair(..) => {
|
BackendRepr::ScalarPair(..) => {
|
||||||
abi = field.abi;
|
abi = field.backend_repr;
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
|
@ -1288,8 +1290,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
|
|
||||||
// Two non-ZST fields, and they're both scalars.
|
// Two non-ZST fields, and they're both scalars.
|
||||||
(Some((i, a)), Some((j, b)), None) => {
|
(Some((i, a)), Some((j, b)), None) => {
|
||||||
match (a.abi, b.abi) {
|
match (a.backend_repr, b.backend_repr) {
|
||||||
(Abi::Scalar(a), Abi::Scalar(b)) => {
|
(BackendRepr::Scalar(a), BackendRepr::Scalar(b)) => {
|
||||||
// Order by the memory placement, not source order.
|
// Order by the memory placement, not source order.
|
||||||
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
|
||||||
((i, a), (j, b))
|
((i, a), (j, b))
|
||||||
|
@ -1315,7 +1317,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
{
|
{
|
||||||
// We can use `ScalarPair` only when it matches our
|
// We can use `ScalarPair` only when it matches our
|
||||||
// already computed layout (including `#[repr(C)]`).
|
// already computed layout (including `#[repr(C)]`).
|
||||||
abi = pair.abi;
|
abi = pair.backend_repr;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -1325,8 +1327,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
_ => {}
|
_ => {}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fields.iter().any(|f| f.abi.is_uninhabited()) {
|
if fields.iter().any(|f| f.is_uninhabited()) {
|
||||||
abi = Abi::Uninhabited;
|
abi = BackendRepr::Uninhabited;
|
||||||
}
|
}
|
||||||
|
|
||||||
let unadjusted_abi_align = if repr.transparent() {
|
let unadjusted_abi_align = if repr.transparent() {
|
||||||
|
@ -1344,7 +1346,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> {
|
||||||
Ok(LayoutData {
|
Ok(LayoutData {
|
||||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
fields: FieldsShape::Arbitrary { offsets, memory_index },
|
||||||
abi,
|
backend_repr: abi,
|
||||||
largest_niche,
|
largest_niche,
|
||||||
align,
|
align,
|
||||||
size,
|
size,
|
||||||
|
|
|
@ -83,8 +83,8 @@ impl<'a> Layout<'a> {
|
||||||
&self.0.0.variants
|
&self.0.0.variants
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn abi(self) -> Abi {
|
pub fn backend_repr(self) -> BackendRepr {
|
||||||
self.0.0.abi
|
self.0.0.backend_repr
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn largest_niche(self) -> Option<Niche> {
|
pub fn largest_niche(self) -> Option<Niche> {
|
||||||
|
@ -114,7 +114,7 @@ impl<'a> Layout<'a> {
|
||||||
pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
|
pub fn is_pointer_like(self, data_layout: &TargetDataLayout) -> bool {
|
||||||
self.size() == data_layout.pointer_size
|
self.size() == data_layout.pointer_size
|
||||||
&& self.align().abi == data_layout.pointer_align.abi
|
&& self.align().abi == data_layout.pointer_align.abi
|
||||||
&& matches!(self.abi(), Abi::Scalar(Scalar::Initialized { .. }))
|
&& matches!(self.backend_repr(), BackendRepr::Scalar(Scalar::Initialized { .. }))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -196,9 +196,9 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
|
||||||
Ty: TyAbiInterface<'a, C>,
|
Ty: TyAbiInterface<'a, C>,
|
||||||
C: HasDataLayout,
|
C: HasDataLayout,
|
||||||
{
|
{
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
|
BackendRepr::Scalar(scalar) => matches!(scalar.primitive(), Float(F32 | F64)),
|
||||||
Abi::Aggregate { .. } => {
|
BackendRepr::Memory { .. } => {
|
||||||
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
|
if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 {
|
||||||
self.field(cx, 0).is_single_fp_element(cx)
|
self.field(cx, 0).is_single_fp_element(cx)
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -1344,11 +1344,19 @@ impl AddressSpace {
|
||||||
pub const DATA: Self = AddressSpace(0);
|
pub const DATA: Self = AddressSpace(0);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Describes how values of the type are passed by target ABIs,
|
/// The way we represent values to the backend
|
||||||
/// in terms of categories of C types there are ABI rules for.
|
///
|
||||||
|
/// Previously this was conflated with the "ABI" a type is given, as in the platform-specific ABI.
|
||||||
|
/// In reality, this implies little about that, but is mostly used to describe the syntactic form
|
||||||
|
/// emitted for the backend, as most backends handle SSA values and blobs of memory differently.
|
||||||
|
/// The psABI may need consideration in doing so, but this enum does not constitute a promise for
|
||||||
|
/// how the value will be lowered to the calling convention, in itself.
|
||||||
|
///
|
||||||
|
/// Generally, a codegen backend will prefer to handle smaller values as a scalar or short vector,
|
||||||
|
/// and larger values will usually prefer to be represented as memory.
|
||||||
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
|
||||||
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
|
||||||
pub enum Abi {
|
pub enum BackendRepr {
|
||||||
Uninhabited,
|
Uninhabited,
|
||||||
Scalar(Scalar),
|
Scalar(Scalar),
|
||||||
ScalarPair(Scalar, Scalar),
|
ScalarPair(Scalar, Scalar),
|
||||||
|
@ -1356,19 +1364,23 @@ pub enum Abi {
|
||||||
element: Scalar,
|
element: Scalar,
|
||||||
count: u64,
|
count: u64,
|
||||||
},
|
},
|
||||||
Aggregate {
|
// FIXME: I sometimes use memory, sometimes use an IR aggregate!
|
||||||
|
Memory {
|
||||||
/// If true, the size is exact, otherwise it's only a lower bound.
|
/// If true, the size is exact, otherwise it's only a lower bound.
|
||||||
sized: bool,
|
sized: bool,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Abi {
|
impl BackendRepr {
|
||||||
/// Returns `true` if the layout corresponds to an unsized type.
|
/// Returns `true` if the layout corresponds to an unsized type.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_unsized(&self) -> bool {
|
pub fn is_unsized(&self) -> bool {
|
||||||
match *self {
|
match *self {
|
||||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
|
BackendRepr::Uninhabited
|
||||||
Abi::Aggregate { sized } => !sized,
|
| BackendRepr::Scalar(_)
|
||||||
|
| BackendRepr::ScalarPair(..)
|
||||||
|
| BackendRepr::Vector { .. } => false,
|
||||||
|
BackendRepr::Memory { sized } => !sized,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1381,7 +1393,7 @@ impl Abi {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_signed(&self) -> bool {
|
pub fn is_signed(&self) -> bool {
|
||||||
match self {
|
match self {
|
||||||
Abi::Scalar(scal) => match scal.primitive() {
|
BackendRepr::Scalar(scal) => match scal.primitive() {
|
||||||
Primitive::Int(_, signed) => signed,
|
Primitive::Int(_, signed) => signed,
|
||||||
_ => false,
|
_ => false,
|
||||||
},
|
},
|
||||||
|
@ -1392,61 +1404,67 @@ impl Abi {
|
||||||
/// Returns `true` if this is an uninhabited type
|
/// Returns `true` if this is an uninhabited type
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_uninhabited(&self) -> bool {
|
pub fn is_uninhabited(&self) -> bool {
|
||||||
matches!(*self, Abi::Uninhabited)
|
matches!(*self, BackendRepr::Uninhabited)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if this is a scalar type
|
/// Returns `true` if this is a scalar type
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_scalar(&self) -> bool {
|
pub fn is_scalar(&self) -> bool {
|
||||||
matches!(*self, Abi::Scalar(_))
|
matches!(*self, BackendRepr::Scalar(_))
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if this is a bool
|
/// Returns `true` if this is a bool
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_bool(&self) -> bool {
|
pub fn is_bool(&self) -> bool {
|
||||||
matches!(*self, Abi::Scalar(s) if s.is_bool())
|
matches!(*self, BackendRepr::Scalar(s) if s.is_bool())
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the fixed alignment of this ABI, if any is mandated.
|
/// Returns the fixed alignment of this ABI, if any is mandated.
|
||||||
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
|
pub fn inherent_align<C: HasDataLayout>(&self, cx: &C) -> Option<AbiAndPrefAlign> {
|
||||||
Some(match *self {
|
Some(match *self {
|
||||||
Abi::Scalar(s) => s.align(cx),
|
BackendRepr::Scalar(s) => s.align(cx),
|
||||||
Abi::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
|
BackendRepr::ScalarPair(s1, s2) => s1.align(cx).max(s2.align(cx)),
|
||||||
Abi::Vector { element, count } => {
|
BackendRepr::Vector { element, count } => {
|
||||||
cx.data_layout().vector_align(element.size(cx) * count)
|
cx.data_layout().vector_align(element.size(cx) * count)
|
||||||
}
|
}
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns the fixed size of this ABI, if any is mandated.
|
/// Returns the fixed size of this ABI, if any is mandated.
|
||||||
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
|
pub fn inherent_size<C: HasDataLayout>(&self, cx: &C) -> Option<Size> {
|
||||||
Some(match *self {
|
Some(match *self {
|
||||||
Abi::Scalar(s) => {
|
BackendRepr::Scalar(s) => {
|
||||||
// No padding in scalars.
|
// No padding in scalars.
|
||||||
s.size(cx)
|
s.size(cx)
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(s1, s2) => {
|
BackendRepr::ScalarPair(s1, s2) => {
|
||||||
// May have some padding between the pair.
|
// May have some padding between the pair.
|
||||||
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
|
let field2_offset = s1.size(cx).align_to(s2.align(cx).abi);
|
||||||
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
|
(field2_offset + s2.size(cx)).align_to(self.inherent_align(cx)?.abi)
|
||||||
}
|
}
|
||||||
Abi::Vector { element, count } => {
|
BackendRepr::Vector { element, count } => {
|
||||||
// No padding in vectors, except possibly for trailing padding
|
// No padding in vectors, except possibly for trailing padding
|
||||||
// to make the size a multiple of align (e.g. for vectors of size 3).
|
// to make the size a multiple of align (e.g. for vectors of size 3).
|
||||||
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
|
(element.size(cx) * count).align_to(self.inherent_align(cx)?.abi)
|
||||||
}
|
}
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. } => return None,
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => return None,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Discard validity range information and allow undef.
|
/// Discard validity range information and allow undef.
|
||||||
pub fn to_union(&self) -> Self {
|
pub fn to_union(&self) -> Self {
|
||||||
match *self {
|
match *self {
|
||||||
Abi::Scalar(s) => Abi::Scalar(s.to_union()),
|
BackendRepr::Scalar(s) => BackendRepr::Scalar(s.to_union()),
|
||||||
Abi::ScalarPair(s1, s2) => Abi::ScalarPair(s1.to_union(), s2.to_union()),
|
BackendRepr::ScalarPair(s1, s2) => {
|
||||||
Abi::Vector { element, count } => Abi::Vector { element: element.to_union(), count },
|
BackendRepr::ScalarPair(s1.to_union(), s2.to_union())
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. } => Abi::Aggregate { sized: true },
|
}
|
||||||
|
BackendRepr::Vector { element, count } => {
|
||||||
|
BackendRepr::Vector { element: element.to_union(), count }
|
||||||
|
}
|
||||||
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||||
|
BackendRepr::Memory { sized: true }
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1454,12 +1472,12 @@ impl Abi {
|
||||||
match (self, other) {
|
match (self, other) {
|
||||||
// Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
|
// Scalar, Vector, ScalarPair have `Scalar` in them where we ignore validity ranges.
|
||||||
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
|
// We do *not* ignore the sign since it matters for some ABIs (e.g. s390x).
|
||||||
(Abi::Scalar(l), Abi::Scalar(r)) => l.primitive() == r.primitive(),
|
(BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(),
|
||||||
(
|
(
|
||||||
Abi::Vector { element: element_l, count: count_l },
|
BackendRepr::Vector { element: element_l, count: count_l },
|
||||||
Abi::Vector { element: element_r, count: count_r },
|
BackendRepr::Vector { element: element_r, count: count_r },
|
||||||
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
|
) => element_l.primitive() == element_r.primitive() && count_l == count_r,
|
||||||
(Abi::ScalarPair(l1, l2), Abi::ScalarPair(r1, r2)) => {
|
(BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => {
|
||||||
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
|
l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive()
|
||||||
}
|
}
|
||||||
// Everything else must be strictly identical.
|
// Everything else must be strictly identical.
|
||||||
|
@ -1616,14 +1634,14 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
|
||||||
/// must be taken into account.
|
/// must be taken into account.
|
||||||
pub variants: Variants<FieldIdx, VariantIdx>,
|
pub variants: Variants<FieldIdx, VariantIdx>,
|
||||||
|
|
||||||
/// The `abi` defines how this data is passed between functions, and it defines
|
/// The `backend_repr` defines how this data will be represented to the codegen backend,
|
||||||
/// value restrictions via `valid_range`.
|
/// and encodes value restrictions via `valid_range`.
|
||||||
///
|
///
|
||||||
/// Note that this is entirely orthogonal to the recursive structure defined by
|
/// Note that this is entirely orthogonal to the recursive structure defined by
|
||||||
/// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
|
/// `variants` and `fields`; for example, `ManuallyDrop<Result<isize, isize>>` has
|
||||||
/// `Abi::ScalarPair`! So, even with non-`Aggregate` `abi`, `fields` and `variants`
|
/// `IrForm::ScalarPair`! So, even with non-`Memory` `backend_repr`, `fields` and `variants`
|
||||||
/// have to be taken into account to find all fields of this layout.
|
/// have to be taken into account to find all fields of this layout.
|
||||||
pub abi: Abi,
|
pub backend_repr: BackendRepr,
|
||||||
|
|
||||||
/// The leaf scalar with the largest number of invalid values
|
/// The leaf scalar with the largest number of invalid values
|
||||||
/// (i.e. outside of its `valid_range`), if it exists.
|
/// (i.e. outside of its `valid_range`), if it exists.
|
||||||
|
@ -1646,15 +1664,15 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> {
|
||||||
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
|
/// Returns `true` if this is an aggregate type (including a ScalarPair!)
|
||||||
pub fn is_aggregate(&self) -> bool {
|
pub fn is_aggregate(&self) -> bool {
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } => false,
|
BackendRepr::Uninhabited | BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false,
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => true,
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if this is an uninhabited type
|
/// Returns `true` if this is an uninhabited type
|
||||||
pub fn is_uninhabited(&self) -> bool {
|
pub fn is_uninhabited(&self) -> bool {
|
||||||
self.abi.is_uninhabited()
|
self.backend_repr.is_uninhabited()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self {
|
||||||
|
@ -1664,7 +1682,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
LayoutData {
|
LayoutData {
|
||||||
variants: Variants::Single { index: VariantIdx::new(0) },
|
variants: Variants::Single { index: VariantIdx::new(0) },
|
||||||
fields: FieldsShape::Primitive,
|
fields: FieldsShape::Primitive,
|
||||||
abi: Abi::Scalar(scalar),
|
backend_repr: BackendRepr::Scalar(scalar),
|
||||||
largest_niche,
|
largest_niche,
|
||||||
size,
|
size,
|
||||||
align,
|
align,
|
||||||
|
@ -1686,7 +1704,7 @@ where
|
||||||
let LayoutData {
|
let LayoutData {
|
||||||
size,
|
size,
|
||||||
align,
|
align,
|
||||||
abi,
|
backend_repr,
|
||||||
fields,
|
fields,
|
||||||
largest_niche,
|
largest_niche,
|
||||||
variants,
|
variants,
|
||||||
|
@ -1696,7 +1714,7 @@ where
|
||||||
f.debug_struct("Layout")
|
f.debug_struct("Layout")
|
||||||
.field("size", size)
|
.field("size", size)
|
||||||
.field("align", align)
|
.field("align", align)
|
||||||
.field("abi", abi)
|
.field("abi", backend_repr)
|
||||||
.field("fields", fields)
|
.field("fields", fields)
|
||||||
.field("largest_niche", largest_niche)
|
.field("largest_niche", largest_niche)
|
||||||
.field("variants", variants)
|
.field("variants", variants)
|
||||||
|
@ -1732,12 +1750,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
/// Returns `true` if the layout corresponds to an unsized type.
|
/// Returns `true` if the layout corresponds to an unsized type.
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_unsized(&self) -> bool {
|
pub fn is_unsized(&self) -> bool {
|
||||||
self.abi.is_unsized()
|
self.backend_repr.is_unsized()
|
||||||
}
|
}
|
||||||
|
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn is_sized(&self) -> bool {
|
pub fn is_sized(&self) -> bool {
|
||||||
self.abi.is_sized()
|
self.backend_repr.is_sized()
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
|
/// Returns `true` if the type is sized and a 1-ZST (meaning it has size 0 and alignment 1).
|
||||||
|
@ -1750,10 +1768,12 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
|
/// Note that this does *not* imply that the type is irrelevant for layout! It can still have
|
||||||
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
|
/// non-trivial alignment constraints. You probably want to use `is_1zst` instead.
|
||||||
pub fn is_zst(&self) -> bool {
|
pub fn is_zst(&self) -> bool {
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } => false,
|
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => {
|
||||||
Abi::Uninhabited => self.size.bytes() == 0,
|
false
|
||||||
Abi::Aggregate { sized } => sized && self.size.bytes() == 0,
|
}
|
||||||
|
BackendRepr::Uninhabited => self.size.bytes() == 0,
|
||||||
|
BackendRepr::Memory { sized } => sized && self.size.bytes() == 0,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1768,8 +1788,8 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> {
|
||||||
// 2nd point is quite hard to check though.
|
// 2nd point is quite hard to check though.
|
||||||
self.size == other.size
|
self.size == other.size
|
||||||
&& self.is_sized() == other.is_sized()
|
&& self.is_sized() == other.is_sized()
|
||||||
&& self.abi.eq_up_to_validity(&other.abi)
|
&& self.backend_repr.eq_up_to_validity(&other.backend_repr)
|
||||||
&& self.abi.is_bool() == other.abi.is_bool()
|
&& self.backend_repr.is_bool() == other.backend_repr.is_bool()
|
||||||
&& self.align.abi == other.align.abi
|
&& self.align.abi == other.align.abi
|
||||||
&& self.max_repr_align == other.max_repr_align
|
&& self.max_repr_align == other.max_repr_align
|
||||||
&& self.unadjusted_abi_align == other.unadjusted_abi_align
|
&& self.unadjusted_abi_align == other.unadjusted_abi_align
|
||||||
|
|
|
@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||||
match &self.ret.mode {
|
match &self.ret.mode {
|
||||||
PassMode::Direct(attrs) => {
|
PassMode::Direct(attrs) => {
|
||||||
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
||||||
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
|
||||||
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
|
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||||
}
|
}
|
||||||
PassMode::Direct(attrs) => {
|
PassMode::Direct(attrs) => {
|
||||||
let i = apply(attrs);
|
let i = apply(attrs);
|
||||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
|
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||||
PassMode::Pair(a, b) => {
|
PassMode::Pair(a, b) => {
|
||||||
let i = apply(a);
|
let i = apply(a);
|
||||||
let ii = apply(b);
|
let ii = apply(b);
|
||||||
if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
|
if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
|
||||||
|
arg.layout.backend_repr
|
||||||
|
{
|
||||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
|
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
|
||||||
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
|
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
|
||||||
}
|
}
|
||||||
|
@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
||||||
}
|
}
|
||||||
if bx.cx.sess().opts.optimize != config::OptLevel::No
|
if bx.cx.sess().opts.optimize != config::OptLevel::No
|
||||||
&& llvm_util::get_version() < (19, 0, 0)
|
&& llvm_util::get_version() < (19, 0, 0)
|
||||||
&& let abi::Abi::Scalar(scalar) = self.ret.layout.abi
|
&& let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
|
||||||
&& matches!(scalar.primitive(), Int(..))
|
&& matches!(scalar.primitive(), Int(..))
|
||||||
// If the value is a boolean, the range is 0..2 and that ultimately
|
// If the value is a boolean, the range is 0..2 and that ultimately
|
||||||
// become 0..0 when the type becomes i1, which would be rejected
|
// become 0..0 when the type becomes i1, which would be rejected
|
||||||
|
|
|
@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
use InlineAsmRegClass::*;
|
use InlineAsmRegClass::*;
|
||||||
let dl = &bx.tcx.data_layout;
|
let dl = &bx.tcx.data_layout;
|
||||||
match (reg, layout.abi) {
|
match (reg, layout.backend_repr) {
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||||
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
||||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||||
|
@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
value
|
value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
|
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
|
||||||
|
@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
}
|
}
|
||||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||||
if layout.size.bytes() == 8 =>
|
if layout.size.bytes() == 8 =>
|
||||||
{
|
{
|
||||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||||
|
@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
|
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
|
||||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||||
}
|
}
|
||||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||||
{
|
{
|
||||||
bx.bitcast(value, bx.cx.type_i64())
|
bx.bitcast(value, bx.cx.type_i64())
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||||
Abi::Vector { .. },
|
BackendRepr::Vector { .. },
|
||||||
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
|
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
|
||||||
(
|
(
|
||||||
X86(
|
X86(
|
||||||
|
@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
|
@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||||
let value = bx.insert_element(
|
let value = bx.insert_element(
|
||||||
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
|
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
|
||||||
|
@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (8 | 16) },
|
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||||
}
|
}
|
||||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
(
|
||||||
|
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||||
|
BackendRepr::Scalar(s),
|
||||||
|
) => {
|
||||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||||
bx.bitcast(value, bx.cx.type_f32())
|
bx.bitcast(value, bx.cx.type_f32())
|
||||||
} else {
|
} else {
|
||||||
|
@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::dreg_low8
|
| ArmInlineAsmRegClass::dreg_low8
|
||||||
| ArmInlineAsmRegClass::dreg_low16,
|
| ArmInlineAsmRegClass::dreg_low16,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) => {
|
) => {
|
||||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||||
bx.bitcast(value, bx.cx.type_f64())
|
bx.bitcast(value, bx.cx.type_f64())
|
||||||
|
@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::qreg_low4
|
| ArmInlineAsmRegClass::qreg_low4
|
||||||
| ArmInlineAsmRegClass::qreg_low8,
|
| ArmInlineAsmRegClass::qreg_low8,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (4 | 8) },
|
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||||
}
|
}
|
||||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||||
match s.primitive() {
|
match s.primitive() {
|
||||||
// MIPS only supports register-length arithmetics.
|
// MIPS only supports register-length arithmetics.
|
||||||
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
||||||
|
@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
||||||
_ => value,
|
_ => value,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F16)
|
if s.primitive() == Primitive::Float(Float::F16)
|
||||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||||
{
|
{
|
||||||
|
@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
instance: Instance<'_>,
|
instance: Instance<'_>,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
use InlineAsmRegClass::*;
|
use InlineAsmRegClass::*;
|
||||||
match (reg, layout.abi) {
|
match (reg, layout.backend_repr) {
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||||
bx.extract_element(value, bx.const_i32(0))
|
bx.extract_element(value, bx.const_i32(0))
|
||||||
} else {
|
} else {
|
||||||
value
|
value
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
value = bx.extract_element(value, bx.const_i32(0));
|
value = bx.extract_element(value, bx.const_i32(0));
|
||||||
|
@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
}
|
}
|
||||||
value
|
value
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||||
if layout.size.bytes() == 8 =>
|
if layout.size.bytes() == 8 =>
|
||||||
{
|
{
|
||||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||||
|
@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
|
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
|
||||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||||
}
|
}
|
||||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||||
{
|
{
|
||||||
bx.bitcast(value, bx.cx.type_f64())
|
bx.bitcast(value, bx.cx.type_f64())
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||||
Abi::Vector { .. },
|
BackendRepr::Vector { .. },
|
||||||
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
|
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
|
||||||
(
|
(
|
||||||
X86(
|
X86(
|
||||||
|
@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
|
@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||||
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
|
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
|
||||||
bx.extract_element(value, bx.const_usize(0))
|
bx.extract_element(value, bx.const_usize(0))
|
||||||
|
@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (8 | 16) },
|
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||||
}
|
}
|
||||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
(
|
||||||
|
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||||
|
BackendRepr::Scalar(s),
|
||||||
|
) => {
|
||||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||||
bx.bitcast(value, bx.cx.type_i32())
|
bx.bitcast(value, bx.cx.type_i32())
|
||||||
} else {
|
} else {
|
||||||
|
@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::dreg_low8
|
| ArmInlineAsmRegClass::dreg_low8
|
||||||
| ArmInlineAsmRegClass::dreg_low16,
|
| ArmInlineAsmRegClass::dreg_low16,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) => {
|
) => {
|
||||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||||
bx.bitcast(value, bx.cx.type_i64())
|
bx.bitcast(value, bx.cx.type_i64())
|
||||||
|
@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::qreg_low4
|
| ArmInlineAsmRegClass::qreg_low4
|
||||||
| ArmInlineAsmRegClass::qreg_low8,
|
| ArmInlineAsmRegClass::qreg_low8,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (4 | 8) },
|
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||||
}
|
}
|
||||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||||
match s.primitive() {
|
match s.primitive() {
|
||||||
// MIPS only supports register-length arithmetics.
|
// MIPS only supports register-length arithmetics.
|
||||||
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
||||||
|
@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
||||||
_ => value,
|
_ => value,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F16)
|
if s.primitive() == Primitive::Float(Float::F16)
|
||||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||||
{
|
{
|
||||||
|
@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
instance: Instance<'_>,
|
instance: Instance<'_>,
|
||||||
) -> &'ll Type {
|
) -> &'ll Type {
|
||||||
use InlineAsmRegClass::*;
|
use InlineAsmRegClass::*;
|
||||||
match (reg, layout.abi) {
|
match (reg, layout.backend_repr) {
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||||
cx.type_vector(cx.type_i8(), 8)
|
cx.type_vector(cx.type_i8(), 8)
|
||||||
} else {
|
} else {
|
||||||
layout.llvm_type(cx)
|
layout.llvm_type(cx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
let elem_ty = llvm_asm_scalar_type(cx, s);
|
let elem_ty = llvm_asm_scalar_type(cx, s);
|
||||||
let count = 16 / layout.size.bytes();
|
let count = 16 / layout.size.bytes();
|
||||||
cx.type_vector(elem_ty, count)
|
cx.type_vector(elem_ty, count)
|
||||||
}
|
}
|
||||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||||
if layout.size.bytes() == 8 =>
|
if layout.size.bytes() == 8 =>
|
||||||
{
|
{
|
||||||
let elem_ty = llvm_asm_scalar_type(cx, element);
|
let elem_ty = llvm_asm_scalar_type(cx, element);
|
||||||
cx.type_vector(elem_ty, count * 2)
|
cx.type_vector(elem_ty, count * 2)
|
||||||
}
|
}
|
||||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||||
{
|
{
|
||||||
cx.type_i64()
|
cx.type_i64()
|
||||||
}
|
}
|
||||||
(
|
(
|
||||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||||
Abi::Vector { .. },
|
BackendRepr::Vector { .. },
|
||||||
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
|
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
|
||||||
(
|
(
|
||||||
X86(
|
X86(
|
||||||
|
@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
|
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||||
{
|
{
|
||||||
|
@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
|
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
|
||||||
(
|
(
|
||||||
X86(
|
X86(
|
||||||
|
@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
| X86InlineAsmRegClass::ymm_reg
|
| X86InlineAsmRegClass::ymm_reg
|
||||||
| X86InlineAsmRegClass::zmm_reg,
|
| X86InlineAsmRegClass::zmm_reg,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (8 | 16) },
|
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
cx.type_vector(cx.type_i16(), count)
|
cx.type_vector(cx.type_i16(), count)
|
||||||
}
|
}
|
||||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
(
|
||||||
|
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||||
|
BackendRepr::Scalar(s),
|
||||||
|
) => {
|
||||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||||
cx.type_f32()
|
cx.type_f32()
|
||||||
} else {
|
} else {
|
||||||
|
@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::dreg_low8
|
| ArmInlineAsmRegClass::dreg_low8
|
||||||
| ArmInlineAsmRegClass::dreg_low16,
|
| ArmInlineAsmRegClass::dreg_low16,
|
||||||
),
|
),
|
||||||
Abi::Scalar(s),
|
BackendRepr::Scalar(s),
|
||||||
) => {
|
) => {
|
||||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||||
cx.type_f64()
|
cx.type_f64()
|
||||||
|
@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
| ArmInlineAsmRegClass::qreg_low4
|
| ArmInlineAsmRegClass::qreg_low4
|
||||||
| ArmInlineAsmRegClass::qreg_low8,
|
| ArmInlineAsmRegClass::qreg_low8,
|
||||||
),
|
),
|
||||||
Abi::Vector { element, count: count @ (4 | 8) },
|
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||||
cx.type_vector(cx.type_i16(), count)
|
cx.type_vector(cx.type_i16(), count)
|
||||||
}
|
}
|
||||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||||
match s.primitive() {
|
match s.primitive() {
|
||||||
// MIPS only supports register-length arithmetics.
|
// MIPS only supports register-length arithmetics.
|
||||||
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
||||||
|
@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
||||||
_ => layout.llvm_type(cx),
|
_ => layout.llvm_type(cx),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||||
if s.primitive() == Primitive::Float(Float::F16)
|
if s.primitive() == Primitive::Float(Float::F16)
|
||||||
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
|
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||||
{
|
{
|
||||||
|
|
|
@ -545,13 +545,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
let llval = const_llval.unwrap_or_else(|| {
|
let llval = const_llval.unwrap_or_else(|| {
|
||||||
let load = self.load(llty, place.val.llval, place.val.align);
|
let load = self.load(llty, place.val.llval, place.val.align);
|
||||||
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
|
||||||
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
||||||
}
|
}
|
||||||
load
|
load
|
||||||
});
|
});
|
||||||
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
||||||
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
|
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
|
||||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||||
|
|
||||||
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
||||||
|
|
|
@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||||
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
|
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
|
||||||
}
|
}
|
||||||
sym::va_arg => {
|
sym::va_arg => {
|
||||||
match fn_abi.ret.layout.abi {
|
match fn_abi.ret.layout.backend_repr {
|
||||||
abi::Abi::Scalar(scalar) => {
|
abi::BackendRepr::Scalar(scalar) => {
|
||||||
match scalar.primitive() {
|
match scalar.primitive() {
|
||||||
Primitive::Int(..) => {
|
Primitive::Int(..) => {
|
||||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||||
|
@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
sym::raw_eq => {
|
sym::raw_eq => {
|
||||||
use abi::Abi::*;
|
use abi::BackendRepr::*;
|
||||||
let tp_ty = fn_args.type_at(0);
|
let tp_ty = fn_args.type_at(0);
|
||||||
let layout = self.layout_of(tp_ty).layout;
|
let layout = self.layout_of(tp_ty).layout;
|
||||||
let use_integer_compare = match layout.abi() {
|
let use_integer_compare = match layout.backend_repr() {
|
||||||
Scalar(_) | ScalarPair(_, _) => true,
|
Scalar(_) | ScalarPair(_, _) => true,
|
||||||
Uninhabited | Vector { .. } => false,
|
Uninhabited | Vector { .. } => false,
|
||||||
Aggregate { .. } => {
|
Memory { .. } => {
|
||||||
// For rusty ABIs, small aggregates are actually passed
|
// For rusty ABIs, small aggregates are actually passed
|
||||||
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
|
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
|
||||||
// so we re-use that same threshold here.
|
// so we re-use that same threshold here.
|
||||||
|
@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
let llret_ty = if ret_ty.is_simd()
|
let llret_ty = if ret_ty.is_simd()
|
||||||
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
|
&& let abi::BackendRepr::Memory { .. } =
|
||||||
|
self.layout_of(ret_ty).layout.backend_repr
|
||||||
{
|
{
|
||||||
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
|
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
|
||||||
let elem_ll_ty = match elem_ty.kind() {
|
let elem_ll_ty = match elem_ty.kind() {
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use rustc_abi::Primitive::{Float, Int, Pointer};
|
use rustc_abi::Primitive::{Float, Int, Pointer};
|
||||||
use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
|
use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
|
||||||
use rustc_codegen_ssa::traits::*;
|
use rustc_codegen_ssa::traits::*;
|
||||||
use rustc_middle::bug;
|
use rustc_middle::bug;
|
||||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||||
|
@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
|
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
|
||||||
) -> &'a Type {
|
) -> &'a Type {
|
||||||
match layout.abi {
|
match layout.backend_repr {
|
||||||
Abi::Scalar(_) => bug!("handled elsewhere"),
|
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
|
||||||
Abi::Vector { element, count } => {
|
BackendRepr::Vector { element, count } => {
|
||||||
let element = layout.scalar_llvm_type_at(cx, element);
|
let element = layout.scalar_llvm_type_at(cx, element);
|
||||||
return cx.type_vector(element, count);
|
return cx.type_vector(element, count);
|
||||||
}
|
}
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
|
||||||
}
|
}
|
||||||
|
|
||||||
let name = match layout.ty.kind() {
|
let name = match layout.ty.kind() {
|
||||||
|
@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
|
||||||
|
|
||||||
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
fn is_llvm_immediate(&self) -> bool {
|
fn is_llvm_immediate(&self) -> bool {
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Scalar(_) | Abi::Vector { .. } => true,
|
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
|
||||||
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
|
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||||
|
false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_llvm_scalar_pair(&self) -> bool {
|
fn is_llvm_scalar_pair(&self) -> bool {
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::ScalarPair(..) => true,
|
BackendRepr::ScalarPair(..) => true,
|
||||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
|
BackendRepr::Uninhabited
|
||||||
|
| BackendRepr::Scalar(_)
|
||||||
|
| BackendRepr::Vector { .. }
|
||||||
|
| BackendRepr::Memory { .. } => false,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||||
// In other words, this should generally not look at the type at all, but only at the
|
// In other words, this should generally not look at the type at all, but only at the
|
||||||
// layout.
|
// layout.
|
||||||
if let Abi::Scalar(scalar) = self.abi {
|
if let BackendRepr::Scalar(scalar) = self.backend_repr {
|
||||||
// Use a different cache for scalars because pointers to DSTs
|
// Use a different cache for scalars because pointers to DSTs
|
||||||
// can be either wide or thin (data pointers of wide pointers).
|
// can be either wide or thin (data pointers of wide pointers).
|
||||||
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
|
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
|
||||||
|
@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||||
match self.abi {
|
match self.backend_repr {
|
||||||
Abi::Scalar(scalar) => {
|
BackendRepr::Scalar(scalar) => {
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
return cx.type_i1();
|
return cx.type_i1();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(..) => {
|
BackendRepr::ScalarPair(..) => {
|
||||||
// An immediate pair always contains just the two elements, without any padding
|
// An immediate pair always contains just the two elements, without any padding
|
||||||
// filler, as it should never be stored to memory.
|
// filler, as it should never be stored to memory.
|
||||||
return cx.type_struct(
|
return cx.type_struct(
|
||||||
|
@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||||
// In other words, this should generally not look at the type at all, but only at the
|
// In other words, this should generally not look at the type at all, but only at the
|
||||||
// layout.
|
// layout.
|
||||||
let Abi::ScalarPair(a, b) = self.abi else {
|
let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
|
||||||
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
|
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
|
||||||
};
|
};
|
||||||
let scalar = [a, b][index];
|
let scalar = [a, b][index];
|
||||||
|
|
|
@ -1532,7 +1532,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// the load would just produce `OperandValue::Ref` instead
|
// the load would just produce `OperandValue::Ref` instead
|
||||||
// of the `OperandValue::Immediate` we need for the call.
|
// of the `OperandValue::Immediate` we need for the call.
|
||||||
llval = bx.load(bx.backend_type(arg.layout), llval, align);
|
llval = bx.load(bx.backend_type(arg.layout), llval, align);
|
||||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
|
bx.range_metadata(llval, WrappingRange { start: 0, end: 1 });
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,8 +1,8 @@
|
||||||
|
use rustc_abi::BackendRepr;
|
||||||
use rustc_middle::mir::interpret::ErrorHandled;
|
use rustc_middle::mir::interpret::ErrorHandled;
|
||||||
use rustc_middle::ty::layout::HasTyCtxt;
|
use rustc_middle::ty::layout::HasTyCtxt;
|
||||||
use rustc_middle::ty::{self, Ty};
|
use rustc_middle::ty::{self, Ty};
|
||||||
use rustc_middle::{bug, mir, span_bug};
|
use rustc_middle::{bug, mir, span_bug};
|
||||||
use rustc_target::abi::Abi;
|
|
||||||
|
|
||||||
use super::FunctionCx;
|
use super::FunctionCx;
|
||||||
use crate::errors;
|
use crate::errors;
|
||||||
|
@ -86,7 +86,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
.map(|field| {
|
.map(|field| {
|
||||||
if let Some(prim) = field.try_to_scalar() {
|
if let Some(prim) = field.try_to_scalar() {
|
||||||
let layout = bx.layout_of(field_ty);
|
let layout = bx.layout_of(field_ty);
|
||||||
let Abi::Scalar(scalar) = layout.abi else {
|
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
|
||||||
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
||||||
};
|
};
|
||||||
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
|
bx.scalar_to_backend(prim, scalar, bx.immediate_backend_type(layout))
|
||||||
|
|
|
@ -2,6 +2,7 @@ use std::collections::hash_map::Entry;
|
||||||
use std::marker::PhantomData;
|
use std::marker::PhantomData;
|
||||||
use std::ops::Range;
|
use std::ops::Range;
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, FieldIdx, FieldsShape, Size, VariantIdx};
|
||||||
use rustc_data_structures::fx::FxHashMap;
|
use rustc_data_structures::fx::FxHashMap;
|
||||||
use rustc_index::IndexVec;
|
use rustc_index::IndexVec;
|
||||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||||
|
@ -11,7 +12,6 @@ use rustc_middle::{bug, mir, ty};
|
||||||
use rustc_session::config::DebugInfo;
|
use rustc_session::config::DebugInfo;
|
||||||
use rustc_span::symbol::{Symbol, kw};
|
use rustc_span::symbol::{Symbol, kw};
|
||||||
use rustc_span::{BytePos, Span, hygiene};
|
use rustc_span::{BytePos, Span, hygiene};
|
||||||
use rustc_target::abi::{Abi, FieldIdx, FieldsShape, Size, VariantIdx};
|
|
||||||
|
|
||||||
use super::operand::{OperandRef, OperandValue};
|
use super::operand::{OperandRef, OperandValue};
|
||||||
use super::place::{PlaceRef, PlaceValue};
|
use super::place::{PlaceRef, PlaceValue};
|
||||||
|
@ -510,7 +510,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// be marked as a `LocalVariable` for MSVC debuggers to visualize
|
// be marked as a `LocalVariable` for MSVC debuggers to visualize
|
||||||
// their data correctly. (See #81894 & #88625)
|
// their data correctly. (See #81894 & #88625)
|
||||||
let var_ty_layout = self.cx.layout_of(var_ty);
|
let var_ty_layout = self.cx.layout_of(var_ty);
|
||||||
if let Abi::ScalarPair(_, _) = var_ty_layout.abi {
|
if let BackendRepr::ScalarPair(_, _) = var_ty_layout.backend_repr {
|
||||||
VariableKind::LocalVariable
|
VariableKind::LocalVariable
|
||||||
} else {
|
} else {
|
||||||
VariableKind::ArgumentVariable(arg_index)
|
VariableKind::ArgumentVariable(arg_index)
|
||||||
|
|
|
@ -4,7 +4,7 @@ use std::fmt;
|
||||||
use arrayvec::ArrayVec;
|
use arrayvec::ArrayVec;
|
||||||
use either::Either;
|
use either::Either;
|
||||||
use rustc_abi as abi;
|
use rustc_abi as abi;
|
||||||
use rustc_abi::{Abi, Align, Size};
|
use rustc_abi::{Align, BackendRepr, Size};
|
||||||
use rustc_middle::bug;
|
use rustc_middle::bug;
|
||||||
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
|
use rustc_middle::mir::interpret::{Pointer, Scalar, alloc_range};
|
||||||
use rustc_middle::mir::{self, ConstValue};
|
use rustc_middle::mir::{self, ConstValue};
|
||||||
|
@ -163,7 +163,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
|
|
||||||
let val = match val {
|
let val = match val {
|
||||||
ConstValue::Scalar(x) => {
|
ConstValue::Scalar(x) => {
|
||||||
let Abi::Scalar(scalar) = layout.abi else {
|
let BackendRepr::Scalar(scalar) = layout.backend_repr else {
|
||||||
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
bug!("from_const: invalid ByVal layout: {:#?}", layout);
|
||||||
};
|
};
|
||||||
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
|
let llval = bx.scalar_to_backend(x, scalar, bx.immediate_backend_type(layout));
|
||||||
|
@ -171,7 +171,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
}
|
}
|
||||||
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
|
ConstValue::ZeroSized => return OperandRef::zero_sized(layout),
|
||||||
ConstValue::Slice { data, meta } => {
|
ConstValue::Slice { data, meta } => {
|
||||||
let Abi::ScalarPair(a_scalar, _) = layout.abi else {
|
let BackendRepr::ScalarPair(a_scalar, _) = layout.backend_repr else {
|
||||||
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
|
bug!("from_const: invalid ScalarPair layout: {:#?}", layout);
|
||||||
};
|
};
|
||||||
let a = Scalar::from_pointer(
|
let a = Scalar::from_pointer(
|
||||||
|
@ -221,14 +221,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||||
// like a `Scalar` (or `ScalarPair`).
|
// like a `Scalar` (or `ScalarPair`).
|
||||||
match layout.abi {
|
match layout.backend_repr {
|
||||||
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
|
BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => {
|
||||||
let size = s.size(bx);
|
let size = s.size(bx);
|
||||||
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
|
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
|
||||||
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
|
let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout));
|
||||||
OperandRef { val: OperandValue::Immediate(val), layout }
|
OperandRef { val: OperandValue::Immediate(val), layout }
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(
|
BackendRepr::ScalarPair(
|
||||||
a @ abi::Scalar::Initialized { .. },
|
a @ abi::Scalar::Initialized { .. },
|
||||||
b @ abi::Scalar::Initialized { .. },
|
b @ abi::Scalar::Initialized { .. },
|
||||||
) => {
|
) => {
|
||||||
|
@ -322,7 +322,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
llval: V,
|
llval: V,
|
||||||
layout: TyAndLayout<'tcx>,
|
layout: TyAndLayout<'tcx>,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let val = if let Abi::ScalarPair(..) = layout.abi {
|
let val = if let BackendRepr::ScalarPair(..) = layout.backend_repr {
|
||||||
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
|
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}", llval, layout);
|
||||||
|
|
||||||
// Deconstruct the immediate aggregate.
|
// Deconstruct the immediate aggregate.
|
||||||
|
@ -343,7 +343,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
let field = self.layout.field(bx.cx(), i);
|
let field = self.layout.field(bx.cx(), i);
|
||||||
let offset = self.layout.fields.offset(i);
|
let offset = self.layout.fields.offset(i);
|
||||||
|
|
||||||
let mut val = match (self.val, self.layout.abi) {
|
let mut val = match (self.val, self.layout.backend_repr) {
|
||||||
// If the field is ZST, it has no data.
|
// If the field is ZST, it has no data.
|
||||||
_ if field.is_zst() => OperandValue::ZeroSized,
|
_ if field.is_zst() => OperandValue::ZeroSized,
|
||||||
|
|
||||||
|
@ -356,7 +356,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Extract a scalar component from a pair.
|
// Extract a scalar component from a pair.
|
||||||
(OperandValue::Pair(a_llval, b_llval), Abi::ScalarPair(a, b)) => {
|
(OperandValue::Pair(a_llval, b_llval), BackendRepr::ScalarPair(a, b)) => {
|
||||||
if offset.bytes() == 0 {
|
if offset.bytes() == 0 {
|
||||||
assert_eq!(field.size, a.size(bx.cx()));
|
assert_eq!(field.size, a.size(bx.cx()));
|
||||||
OperandValue::Immediate(a_llval)
|
OperandValue::Immediate(a_llval)
|
||||||
|
@ -368,30 +368,30 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// `#[repr(simd)]` types are also immediate.
|
// `#[repr(simd)]` types are also immediate.
|
||||||
(OperandValue::Immediate(llval), Abi::Vector { .. }) => {
|
(OperandValue::Immediate(llval), BackendRepr::Vector { .. }) => {
|
||||||
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
|
OperandValue::Immediate(bx.extract_element(llval, bx.cx().const_usize(i as u64)))
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
|
_ => bug!("OperandRef::extract_field({:?}): not applicable", self),
|
||||||
};
|
};
|
||||||
|
|
||||||
match (&mut val, field.abi) {
|
match (&mut val, field.backend_repr) {
|
||||||
(OperandValue::ZeroSized, _) => {}
|
(OperandValue::ZeroSized, _) => {}
|
||||||
(
|
(
|
||||||
OperandValue::Immediate(llval),
|
OperandValue::Immediate(llval),
|
||||||
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. },
|
BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. },
|
||||||
) => {
|
) => {
|
||||||
// Bools in union fields needs to be truncated.
|
// Bools in union fields needs to be truncated.
|
||||||
*llval = bx.to_immediate(*llval, field);
|
*llval = bx.to_immediate(*llval, field);
|
||||||
}
|
}
|
||||||
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
|
(OperandValue::Pair(a, b), BackendRepr::ScalarPair(a_abi, b_abi)) => {
|
||||||
// Bools in union fields needs to be truncated.
|
// Bools in union fields needs to be truncated.
|
||||||
*a = bx.to_immediate_scalar(*a, a_abi);
|
*a = bx.to_immediate_scalar(*a, a_abi);
|
||||||
*b = bx.to_immediate_scalar(*b, b_abi);
|
*b = bx.to_immediate_scalar(*b, b_abi);
|
||||||
}
|
}
|
||||||
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
|
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
|
||||||
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
|
(OperandValue::Immediate(llval), BackendRepr::Memory { sized: true }) => {
|
||||||
assert_matches!(self.layout.abi, Abi::Vector { .. });
|
assert_matches!(self.layout.backend_repr, BackendRepr::Vector { .. });
|
||||||
|
|
||||||
let llfield_ty = bx.cx().backend_type(field);
|
let llfield_ty = bx.cx().backend_type(field);
|
||||||
|
|
||||||
|
@ -400,7 +400,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
|
||||||
bx.store(*llval, llptr, field.align.abi);
|
bx.store(*llval, llptr, field.align.abi);
|
||||||
*llval = bx.load(llfield_ty, llptr, field.align.abi);
|
*llval = bx.load(llfield_ty, llptr, field.align.abi);
|
||||||
}
|
}
|
||||||
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
|
(
|
||||||
|
OperandValue::Immediate(_),
|
||||||
|
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false },
|
||||||
|
) => {
|
||||||
bug!()
|
bug!()
|
||||||
}
|
}
|
||||||
(OperandValue::Pair(..), _) => bug!(),
|
(OperandValue::Pair(..), _) => bug!(),
|
||||||
|
@ -494,7 +497,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
||||||
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
|
bx.store_with_flags(val, dest.val.llval, dest.val.align, flags);
|
||||||
}
|
}
|
||||||
OperandValue::Pair(a, b) => {
|
OperandValue::Pair(a, b) => {
|
||||||
let Abi::ScalarPair(a_scalar, b_scalar) = dest.layout.abi else {
|
let BackendRepr::ScalarPair(a_scalar, b_scalar) = dest.layout.backend_repr else {
|
||||||
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
|
bug!("store_with_flags: invalid ScalarPair layout: {:#?}", dest.layout);
|
||||||
};
|
};
|
||||||
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
|
let b_offset = a_scalar.size(bx).align_to(b_scalar.align(bx).abi);
|
||||||
|
@ -645,7 +648,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// However, some SIMD types do not actually use the vector ABI
|
// However, some SIMD types do not actually use the vector ABI
|
||||||
// (in particular, packed SIMD types do not). Ensure we exclude those.
|
// (in particular, packed SIMD types do not). Ensure we exclude those.
|
||||||
let layout = bx.layout_of(constant_ty);
|
let layout = bx.layout_of(constant_ty);
|
||||||
if let Abi::Vector { .. } = layout.abi {
|
if let BackendRepr::Vector { .. } = layout.backend_repr {
|
||||||
let (llval, ty) = self.immediate_const_vector(bx, constant);
|
let (llval, ty) = self.immediate_const_vector(bx, constant);
|
||||||
return OperandRef {
|
return OperandRef {
|
||||||
val: OperandValue::Immediate(llval),
|
val: OperandValue::Immediate(llval),
|
||||||
|
|
|
@ -1136,17 +1136,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
OperandValueKind::ZeroSized
|
OperandValueKind::ZeroSized
|
||||||
} else if self.cx.is_backend_immediate(layout) {
|
} else if self.cx.is_backend_immediate(layout) {
|
||||||
assert!(!self.cx.is_backend_scalar_pair(layout));
|
assert!(!self.cx.is_backend_scalar_pair(layout));
|
||||||
OperandValueKind::Immediate(match layout.abi {
|
OperandValueKind::Immediate(match layout.backend_repr {
|
||||||
abi::Abi::Scalar(s) => s,
|
abi::BackendRepr::Scalar(s) => s,
|
||||||
abi::Abi::Vector { element, .. } => element,
|
abi::BackendRepr::Vector { element, .. } => element,
|
||||||
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
|
x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"),
|
||||||
})
|
})
|
||||||
} else if self.cx.is_backend_scalar_pair(layout) {
|
} else if self.cx.is_backend_scalar_pair(layout) {
|
||||||
let abi::Abi::ScalarPair(s1, s2) = layout.abi else {
|
let abi::BackendRepr::ScalarPair(s1, s2) = layout.backend_repr else {
|
||||||
span_bug!(
|
span_bug!(
|
||||||
self.mir.span,
|
self.mir.span,
|
||||||
"Couldn't translate {:?} as backend scalar pair",
|
"Couldn't translate {:?} as backend scalar pair",
|
||||||
layout.abi,
|
layout.backend_repr,
|
||||||
);
|
);
|
||||||
};
|
};
|
||||||
OperandValueKind::Pair(s1, s2)
|
OperandValueKind::Pair(s1, s2)
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
use std::ops::Deref;
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
use rustc_abi::{Align, BackendRepr, Scalar, Size, WrappingRange};
|
||||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
|
||||||
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::ty::{Instance, Ty};
|
use rustc_middle::ty::{Instance, Ty};
|
||||||
use rustc_session::config::OptLevel;
|
use rustc_session::config::OptLevel;
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
use rustc_target::abi::call::FnAbi;
|
use rustc_target::abi::call::FnAbi;
|
||||||
use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
|
|
||||||
|
|
||||||
use super::abi::AbiBuilderMethods;
|
use super::abi::AbiBuilderMethods;
|
||||||
use super::asm::AsmBuilderMethods;
|
use super::asm::AsmBuilderMethods;
|
||||||
|
@ -162,7 +162,7 @@ pub trait BuilderMethods<'a, 'tcx>:
|
||||||
|
|
||||||
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
|
fn from_immediate(&mut self, val: Self::Value) -> Self::Value;
|
||||||
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
|
fn to_immediate(&mut self, val: Self::Value, layout: TyAndLayout<'_>) -> Self::Value {
|
||||||
if let Abi::Scalar(scalar) = layout.abi {
|
if let BackendRepr::Scalar(scalar) = layout.backend_repr {
|
||||||
self.to_immediate_scalar(val, scalar)
|
self.to_immediate_scalar(val, scalar)
|
||||||
} else {
|
} else {
|
||||||
val
|
val
|
||||||
|
|
|
@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
|
||||||
interp_ok(match bin_op {
|
interp_ok(match bin_op {
|
||||||
Eq | Ne | Lt | Le | Gt | Ge => {
|
Eq | Ne | Lt | Le | Gt | Ge => {
|
||||||
// Types can differ, e.g. fn ptrs with different `for`.
|
// Types can differ, e.g. fn ptrs with different `for`.
|
||||||
assert_eq!(left.layout.abi, right.layout.abi);
|
assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
|
||||||
let size = ecx.pointer_size();
|
let size = ecx.pointer_size();
|
||||||
// Just compare the bits. ScalarPairs are compared lexicographically.
|
// Just compare the bits. ScalarPairs are compared lexicographically.
|
||||||
// We thus always compare pairs and simply fill scalars up with 0.
|
// We thus always compare pairs and simply fill scalars up with 0.
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::sync::atomic::Ordering::Relaxed;
|
use std::sync::atomic::Ordering::Relaxed;
|
||||||
|
|
||||||
use either::{Left, Right};
|
use either::{Left, Right};
|
||||||
|
use rustc_abi::{self as abi, BackendRepr};
|
||||||
use rustc_hir::def::DefKind;
|
use rustc_hir::def::DefKind;
|
||||||
use rustc_middle::bug;
|
use rustc_middle::bug;
|
||||||
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
|
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
|
||||||
|
@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||||
use rustc_span::def_id::LocalDefId;
|
use rustc_span::def_id::LocalDefId;
|
||||||
use rustc_span::{DUMMY_SP, Span};
|
use rustc_span::{DUMMY_SP, Span};
|
||||||
use rustc_target::abi::{self, Abi};
|
|
||||||
use tracing::{debug, instrument, trace};
|
use tracing::{debug, instrument, trace};
|
||||||
|
|
||||||
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
|
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
|
||||||
|
@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
|
||||||
// type (it's used throughout the compiler and having it work just on literals is not enough)
|
// type (it's used throughout the compiler and having it work just on literals is not enough)
|
||||||
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
|
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
|
||||||
// from its byte-serialized form).
|
// from its byte-serialized form).
|
||||||
let force_as_immediate = match op.layout.abi {
|
let force_as_immediate = match op.layout.backend_repr {
|
||||||
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
|
BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
|
||||||
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
|
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
|
||||||
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
|
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
|
||||||
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
|
// not have to generate any duplicate allocations (we preserve the original `AllocId` in
|
||||||
|
|
|
@ -1,10 +1,10 @@
|
||||||
|
use rustc_abi::{BackendRepr, VariantIdx};
|
||||||
use rustc_data_structures::stack::ensure_sufficient_stack;
|
use rustc_data_structures::stack::ensure_sufficient_stack;
|
||||||
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
|
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
|
||||||
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
|
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
|
||||||
use rustc_middle::{bug, mir};
|
use rustc_middle::{bug, mir};
|
||||||
use rustc_span::DUMMY_SP;
|
use rustc_span::DUMMY_SP;
|
||||||
use rustc_target::abi::{Abi, VariantIdx};
|
|
||||||
use tracing::{debug, instrument, trace};
|
use tracing::{debug, instrument, trace};
|
||||||
|
|
||||||
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
|
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
|
||||||
|
@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
|
||||||
let val = ecx.read_immediate(place).unwrap();
|
let val = ecx.read_immediate(place).unwrap();
|
||||||
// We could allow wide raw pointers where both sides are integers in the future,
|
// We could allow wide raw pointers where both sides are integers in the future,
|
||||||
// but for now we reject them.
|
// but for now we reject them.
|
||||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||||
return Err(ValTreeCreationError::NonSupportedType(ty));
|
return Err(ValTreeCreationError::NonSupportedType(ty));
|
||||||
}
|
}
|
||||||
let val = val.to_scalar();
|
let val = val.to_scalar();
|
||||||
|
@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
|
||||||
// Fast path to avoid some allocations.
|
// Fast path to avoid some allocations.
|
||||||
return mir::ConstValue::ZeroSized;
|
return mir::ConstValue::ZeroSized;
|
||||||
}
|
}
|
||||||
if layout.abi.is_scalar()
|
if layout.backend_repr.is_scalar()
|
||||||
&& (matches!(ty.kind(), ty::Tuple(_))
|
&& (matches!(ty.kind(), ty::Tuple(_))
|
||||||
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
|
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
|
||||||
{
|
{
|
||||||
|
|
|
@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
// must be compatible. So we just accept everything with Pointer ABI as compatible,
|
// must be compatible. So we just accept everything with Pointer ABI as compatible,
|
||||||
// even if this will accept some code that is not stably guaranteed to work.
|
// even if this will accept some code that is not stably guaranteed to work.
|
||||||
// This also handles function pointers.
|
// This also handles function pointers.
|
||||||
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
|
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
|
||||||
abi::Abi::Scalar(s) => match s.primitive() {
|
abi::BackendRepr::Scalar(s) => match s.primitive() {
|
||||||
abi::Primitive::Pointer(addr_space) => Some(addr_space),
|
abi::Primitive::Pointer(addr_space) => Some(addr_space),
|
||||||
_ => None,
|
_ => None,
|
||||||
},
|
},
|
||||||
|
|
|
@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
cast_ty: Ty<'tcx>,
|
cast_ty: Ty<'tcx>,
|
||||||
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
||||||
// Let's make sure v is sign-extended *if* it has a signed type.
|
// Let's make sure v is sign-extended *if* it has a signed type.
|
||||||
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
|
let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
|
||||||
|
|
||||||
let v = match src_layout.ty.kind() {
|
let v = match src_layout.ty.kind() {
|
||||||
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,
|
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,
|
||||||
|
|
|
@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
// Read tag and sanity-check `tag_layout`.
|
// Read tag and sanity-check `tag_layout`.
|
||||||
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
|
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
|
||||||
assert_eq!(tag_layout.size, tag_val.layout.size);
|
assert_eq!(tag_layout.size, tag_val.layout.size);
|
||||||
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
|
assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
|
||||||
trace!("tag value: {}", tag_val);
|
trace!("tag value: {}", tag_val);
|
||||||
|
|
||||||
// Figure out which discriminant and variant this corresponds to.
|
// Figure out which discriminant and variant this corresponds to.
|
||||||
|
|
|
@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
|
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
|
||||||
interp_ok(if overflowed.to_bool()? {
|
interp_ok(if overflowed.to_bool()? {
|
||||||
let size = l.layout.size;
|
let size = l.layout.size;
|
||||||
if l.layout.abi.is_signed() {
|
if l.layout.backend_repr.is_signed() {
|
||||||
// For signed ints the saturated value depends on the sign of the first
|
// For signed ints the saturated value depends on the sign of the first
|
||||||
// term since the sign of the second term can be inferred from this and
|
// term since the sign of the second term can be inferred from this and
|
||||||
// the fact that the operation has overflowed (if either is 0 no
|
// the fact that the operation has overflowed (if either is 0 no
|
||||||
|
|
|
@ -5,7 +5,7 @@ use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
use either::{Either, Left, Right};
|
use either::{Either, Left, Right};
|
||||||
use rustc_abi as abi;
|
use rustc_abi as abi;
|
||||||
use rustc_abi::{Abi, HasDataLayout, Size};
|
use rustc_abi::{BackendRepr, HasDataLayout, Size};
|
||||||
use rustc_hir::def::Namespace;
|
use rustc_hir::def::Namespace;
|
||||||
use rustc_middle::mir::interpret::ScalarSizeMismatch;
|
use rustc_middle::mir::interpret::ScalarSizeMismatch;
|
||||||
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
|
||||||
|
@ -114,9 +114,9 @@ impl<Prov: Provenance> Immediate<Prov> {
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Assert that this immediate is a valid value for the given ABI.
|
/// Assert that this immediate is a valid value for the given ABI.
|
||||||
pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
|
pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
|
||||||
match (self, abi) {
|
match (self, abi) {
|
||||||
(Immediate::Scalar(scalar), Abi::Scalar(s)) => {
|
(Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
|
||||||
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
|
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
|
||||||
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
|
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
|
||||||
// This is not a pointer, it should not carry provenance.
|
// This is not a pointer, it should not carry provenance.
|
||||||
|
@ -126,7 +126,7 @@ impl<Prov: Provenance> Immediate<Prov> {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
|
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
|
||||||
assert_eq!(
|
assert_eq!(
|
||||||
a_val.size(),
|
a_val.size(),
|
||||||
a.size(cx),
|
a.size(cx),
|
||||||
|
@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
|
||||||
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||||
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
|
debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
|
||||||
debug_assert_eq!(val.size(), layout.size);
|
debug_assert_eq!(val.size(), layout.size);
|
||||||
ImmTy { imm: val.into(), layout }
|
ImmTy { imm: val.into(), layout }
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
#[inline]
|
#[inline]
|
||||||
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
matches!(layout.abi, Abi::ScalarPair(..)),
|
matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
|
||||||
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
|
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
|
||||||
);
|
);
|
||||||
let imm = Immediate::ScalarPair(a, b);
|
let imm = Immediate::ScalarPair(a, b);
|
||||||
|
@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
|
||||||
// Without a `cx` we cannot call `assert_matches_abi`.
|
// Without a `cx` we cannot call `assert_matches_abi`.
|
||||||
debug_assert!(
|
debug_assert!(
|
||||||
match (imm, layout.abi) {
|
match (imm, layout.backend_repr) {
|
||||||
(Immediate::Scalar(..), Abi::Scalar(..)) => true,
|
(Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
|
||||||
(Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
|
(Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
|
||||||
(Immediate::Uninit, _) if layout.is_sized() => true,
|
(Immediate::Uninit, _) if layout.is_sized() => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
},
|
},
|
||||||
|
@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
|
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
|
||||||
// Verify that the input matches its type.
|
// Verify that the input matches its type.
|
||||||
if cfg!(debug_assertions) {
|
if cfg!(debug_assertions) {
|
||||||
self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
|
self.assert_matches_abi(
|
||||||
|
self.layout.backend_repr,
|
||||||
|
"invalid input to Immediate::offset",
|
||||||
|
cx,
|
||||||
|
);
|
||||||
}
|
}
|
||||||
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
|
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
|
||||||
// remains in-bounds. This cannot actually be violated since projections are type-checked
|
// remains in-bounds. This cannot actually be violated since projections are type-checked
|
||||||
|
@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
);
|
);
|
||||||
// This makes several assumptions about what layouts we will encounter; we match what
|
// This makes several assumptions about what layouts we will encounter; we match what
|
||||||
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
|
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
|
||||||
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
|
let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
|
||||||
// If the entire value is uninit, then so is the field (can happen in ConstProp).
|
// If the entire value is uninit, then so is the field (can happen in ConstProp).
|
||||||
(Immediate::Uninit, _) => Immediate::Uninit,
|
(Immediate::Uninit, _) => Immediate::Uninit,
|
||||||
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
|
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
|
||||||
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
|
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
|
||||||
// has an `Uninhabited` variant, which means this case is possible.
|
// has an `Uninhabited` variant, which means this case is possible.
|
||||||
_ if layout.abi.is_uninhabited() => Immediate::Uninit,
|
_ if layout.is_uninhabited() => Immediate::Uninit,
|
||||||
// the field contains no information, can be left uninit
|
// the field contains no information, can be left uninit
|
||||||
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
|
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
|
||||||
_ if layout.is_zst() => Immediate::Uninit,
|
_ if layout.is_zst() => Immediate::Uninit,
|
||||||
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
|
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
|
||||||
// to detect those here and also give them no data
|
// to detect those here and also give them no data
|
||||||
_ if matches!(layout.abi, Abi::Aggregate { .. })
|
_ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
|
||||||
&& matches!(layout.variants, abi::Variants::Single { .. })
|
&& matches!(layout.variants, abi::Variants::Single { .. })
|
||||||
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
|
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
|
||||||
{
|
{
|
||||||
|
@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
**self
|
**self
|
||||||
}
|
}
|
||||||
// extract fields from types with `ScalarPair` ABI
|
// extract fields from types with `ScalarPair` ABI
|
||||||
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
|
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
|
||||||
Immediate::from(if offset.bytes() == 0 {
|
Immediate::from(if offset.bytes() == 0 {
|
||||||
a_val
|
a_val
|
||||||
} else {
|
} else {
|
||||||
|
@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
||||||
),
|
),
|
||||||
};
|
};
|
||||||
// Ensure the new layout matches the new value.
|
// Ensure the new layout matches the new value.
|
||||||
inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
|
inner_val.assert_matches_abi(
|
||||||
|
layout.backend_repr,
|
||||||
|
"invalid field type in Immediate::offset",
|
||||||
|
cx,
|
||||||
|
);
|
||||||
|
|
||||||
ImmTy::from_immediate(inner_val, layout)
|
ImmTy::from_immediate(inner_val, layout)
|
||||||
}
|
}
|
||||||
|
@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||||
// like a `Scalar` (or `ScalarPair`).
|
// like a `Scalar` (or `ScalarPair`).
|
||||||
interp_ok(match mplace.layout.abi {
|
interp_ok(match mplace.layout.backend_repr {
|
||||||
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
|
BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
|
||||||
let size = s.size(self);
|
let size = s.size(self);
|
||||||
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
|
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
|
||||||
let scalar = alloc.read_scalar(
|
let scalar = alloc.read_scalar(
|
||||||
|
@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
)?;
|
)?;
|
||||||
Some(ImmTy::from_scalar(scalar, mplace.layout))
|
Some(ImmTy::from_scalar(scalar, mplace.layout))
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(
|
BackendRepr::ScalarPair(
|
||||||
abi::Scalar::Initialized { value: a, .. },
|
abi::Scalar::Initialized { value: a, .. },
|
||||||
abi::Scalar::Initialized { value: b, .. },
|
abi::Scalar::Initialized { value: b, .. },
|
||||||
) => {
|
) => {
|
||||||
|
@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
op: &impl Projectable<'tcx, M::Provenance>,
|
op: &impl Projectable<'tcx, M::Provenance>,
|
||||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
||||||
if !matches!(
|
if !matches!(
|
||||||
op.layout().abi,
|
op.layout().backend_repr,
|
||||||
Abi::Scalar(abi::Scalar::Initialized { .. })
|
BackendRepr::Scalar(abi::Scalar::Initialized { .. })
|
||||||
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
|
| BackendRepr::ScalarPair(
|
||||||
|
abi::Scalar::Initialized { .. },
|
||||||
|
abi::Scalar::Initialized { .. }
|
||||||
|
)
|
||||||
) {
|
) {
|
||||||
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
|
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
|
||||||
}
|
}
|
||||||
|
|
|
@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
let l_bits = left.layout.size.bits();
|
let l_bits = left.layout.size.bits();
|
||||||
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
|
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
|
||||||
// the one MIR operator that does *not* directly map to a single LLVM operation.)
|
// the one MIR operator that does *not* directly map to a single LLVM operation.)
|
||||||
let (shift_amount, overflow) = if right.layout.abi.is_signed() {
|
let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
|
||||||
let shift_amount = r_signed();
|
let shift_amount = r_signed();
|
||||||
let rem = shift_amount.rem_euclid(l_bits.into());
|
let rem = shift_amount.rem_euclid(l_bits.into());
|
||||||
// `rem` is guaranteed positive, so the `unwrap` cannot fail
|
// `rem` is guaranteed positive, so the `unwrap` cannot fail
|
||||||
|
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
};
|
};
|
||||||
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
|
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
|
||||||
// Compute the shifted result.
|
// Compute the shifted result.
|
||||||
let result = if left.layout.abi.is_signed() {
|
let result = if left.layout.backend_repr.is_signed() {
|
||||||
let l = l_signed();
|
let l = l_signed();
|
||||||
let result = match bin_op {
|
let result = match bin_op {
|
||||||
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
|
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
|
||||||
|
@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
|
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
|
||||||
throw_ub!(ShiftOverflow {
|
throw_ub!(ShiftOverflow {
|
||||||
intrinsic,
|
intrinsic,
|
||||||
shift_amount: if right.layout.abi.is_signed() {
|
shift_amount: if right.layout.backend_repr.is_signed() {
|
||||||
Either::Right(r_signed())
|
Either::Right(r_signed())
|
||||||
} else {
|
} else {
|
||||||
Either::Left(r_unsigned())
|
Either::Left(r_unsigned())
|
||||||
|
@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
let size = left.layout.size;
|
let size = left.layout.size;
|
||||||
|
|
||||||
// Operations that need special treatment for signed integers
|
// Operations that need special treatment for signed integers
|
||||||
if left.layout.abi.is_signed() {
|
if left.layout.backend_repr.is_signed() {
|
||||||
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
|
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
|
||||||
Lt => Some(i128::lt),
|
Lt => Some(i128::lt),
|
||||||
Le => Some(i128::le),
|
Le => Some(i128::le),
|
||||||
|
@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
BitXor => ImmTy::from_uint(l ^ r, left.layout),
|
BitXor => ImmTy::from_uint(l ^ r, left.layout),
|
||||||
|
|
||||||
_ => {
|
_ => {
|
||||||
assert!(!left.layout.abi.is_signed());
|
assert!(!left.layout.backend_repr.is_signed());
|
||||||
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
|
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
|
||||||
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
|
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
|
||||||
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
|
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
|
||||||
|
@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||||
}
|
}
|
||||||
|
|
||||||
let offset_bytes = val.to_target_isize(self)?;
|
let offset_bytes = val.to_target_isize(self)?;
|
||||||
if !right.layout.abi.is_signed() && offset_bytes < 0 {
|
if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
|
||||||
// We were supposed to do an unsigned offset but the result is negative -- this
|
// We were supposed to do an unsigned offset but the result is negative -- this
|
||||||
// can only mean that the cast wrapped around.
|
// can only mean that the cast wrapped around.
|
||||||
throw_ub!(PointerArithOverflow)
|
throw_ub!(PointerArithOverflow)
|
||||||
|
|
|
@ -5,11 +5,11 @@
|
||||||
use std::assert_matches::assert_matches;
|
use std::assert_matches::assert_matches;
|
||||||
|
|
||||||
use either::{Either, Left, Right};
|
use either::{Either, Left, Right};
|
||||||
|
use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
|
||||||
use rustc_ast::Mutability;
|
use rustc_ast::Mutability;
|
||||||
use rustc_middle::ty::Ty;
|
use rustc_middle::ty::Ty;
|
||||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::{bug, mir, span_bug};
|
use rustc_middle::{bug, mir, span_bug};
|
||||||
use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
|
|
||||||
use tracing::{instrument, trace};
|
use tracing::{instrument, trace};
|
||||||
|
|
||||||
use super::{
|
use super::{
|
||||||
|
@ -659,7 +659,7 @@ where
|
||||||
// Unfortunately this is too expensive to do in release builds.
|
// Unfortunately this is too expensive to do in release builds.
|
||||||
if cfg!(debug_assertions) {
|
if cfg!(debug_assertions) {
|
||||||
src.assert_matches_abi(
|
src.assert_matches_abi(
|
||||||
local_layout.abi,
|
local_layout.backend_repr,
|
||||||
"invalid immediate for given destination place",
|
"invalid immediate for given destination place",
|
||||||
self,
|
self,
|
||||||
);
|
);
|
||||||
|
@ -683,7 +683,11 @@ where
|
||||||
) -> InterpResult<'tcx> {
|
) -> InterpResult<'tcx> {
|
||||||
// We use the sizes from `value` below.
|
// We use the sizes from `value` below.
|
||||||
// Ensure that matches the type of the place it is written to.
|
// Ensure that matches the type of the place it is written to.
|
||||||
value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
|
value.assert_matches_abi(
|
||||||
|
layout.backend_repr,
|
||||||
|
"invalid immediate for given destination place",
|
||||||
|
self,
|
||||||
|
);
|
||||||
// Note that it is really important that the type here is the right one, and matches the
|
// Note that it is really important that the type here is the right one, and matches the
|
||||||
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
|
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
|
||||||
// to handle padding properly, which is only correct if we never look at this data with the
|
// to handle padding properly, which is only correct if we never look at this data with the
|
||||||
|
@ -700,7 +704,7 @@ where
|
||||||
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
|
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
|
||||||
}
|
}
|
||||||
Immediate::ScalarPair(a_val, b_val) => {
|
Immediate::ScalarPair(a_val, b_val) => {
|
||||||
let Abi::ScalarPair(a, b) = layout.abi else {
|
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
|
||||||
span_bug!(
|
span_bug!(
|
||||||
self.cur_span(),
|
self.cur_span(),
|
||||||
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
|
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
|
||||||
|
|
|
@ -11,6 +11,10 @@ use std::num::NonZero;
|
||||||
|
|
||||||
use either::{Left, Right};
|
use either::{Left, Right};
|
||||||
use hir::def::DefKind;
|
use hir::def::DefKind;
|
||||||
|
use rustc_abi::{
|
||||||
|
BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
|
||||||
|
WrappingRange,
|
||||||
|
};
|
||||||
use rustc_ast::Mutability;
|
use rustc_ast::Mutability;
|
||||||
use rustc_data_structures::fx::FxHashSet;
|
use rustc_data_structures::fx::FxHashSet;
|
||||||
use rustc_hir as hir;
|
use rustc_hir as hir;
|
||||||
|
@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{
|
||||||
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
||||||
use rustc_middle::ty::{self, Ty};
|
use rustc_middle::ty::{self, Ty};
|
||||||
use rustc_span::symbol::{Symbol, sym};
|
use rustc_span::symbol::{Symbol, sym};
|
||||||
use rustc_target::abi::{
|
|
||||||
Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
|
|
||||||
};
|
|
||||||
use tracing::trace;
|
use tracing::trace;
|
||||||
|
|
||||||
use super::machine::AllocMap;
|
use super::machine::AllocMap;
|
||||||
|
@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
||||||
// Reset provenance: ensure slice tail metadata does not preserve provenance,
|
// Reset provenance: ensure slice tail metadata does not preserve provenance,
|
||||||
// and ensure all pointers do not preserve partial provenance.
|
// and ensure all pointers do not preserve partial provenance.
|
||||||
if self.reset_provenance_and_padding {
|
if self.reset_provenance_and_padding {
|
||||||
if matches!(imm.layout.abi, Abi::Scalar(..)) {
|
if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
|
||||||
// A thin pointer. If it has provenance, we don't have to do anything.
|
// A thin pointer. If it has provenance, we don't have to do anything.
|
||||||
// If it does not, ensure we clear the provenance in memory.
|
// If it does not, ensure we clear the provenance in memory.
|
||||||
if matches!(imm.to_scalar(), Scalar::Int(..)) {
|
if matches!(imm.to_scalar(), Scalar::Int(..)) {
|
||||||
|
@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
||||||
let elem = layout.field(cx, 0);
|
let elem = layout.field(cx, 0);
|
||||||
|
|
||||||
// Fast-path for large arrays of simple types that do not contain any padding.
|
// Fast-path for large arrays of simple types that do not contain any padding.
|
||||||
if elem.abi.is_scalar() {
|
if elem.backend_repr.is_scalar() {
|
||||||
out.add_range(base_offset, elem.size * count);
|
out.add_range(base_offset, elem.size * count);
|
||||||
} else {
|
} else {
|
||||||
for idx in 0..count {
|
for idx in 0..count {
|
||||||
|
@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
||||||
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
|
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
|
||||||
// scalars, we do the same check on every "level" (e.g., first we check
|
// scalars, we do the same check on every "level" (e.g., first we check
|
||||||
// MyNewtype and then the scalar in there).
|
// MyNewtype and then the scalar in there).
|
||||||
match val.layout.abi {
|
match val.layout.backend_repr {
|
||||||
Abi::Uninhabited => {
|
BackendRepr::Uninhabited => {
|
||||||
let ty = val.layout.ty;
|
let ty = val.layout.ty;
|
||||||
throw_validation_failure!(self.path, UninhabitedVal { ty });
|
throw_validation_failure!(self.path, UninhabitedVal { ty });
|
||||||
}
|
}
|
||||||
Abi::Scalar(scalar_layout) => {
|
BackendRepr::Scalar(scalar_layout) => {
|
||||||
if !scalar_layout.is_uninit_valid() {
|
if !scalar_layout.is_uninit_valid() {
|
||||||
// There is something to check here.
|
// There is something to check here.
|
||||||
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
|
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
|
||||||
self.visit_scalar(scalar, scalar_layout)?;
|
self.visit_scalar(scalar, scalar_layout)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(a_layout, b_layout) => {
|
BackendRepr::ScalarPair(a_layout, b_layout) => {
|
||||||
// We can only proceed if *both* scalars need to be initialized.
|
// We can only proceed if *both* scalars need to be initialized.
|
||||||
// FIXME: find a way to also check ScalarPair when one side can be uninit but
|
// FIXME: find a way to also check ScalarPair when one side can be uninit but
|
||||||
// the other must be init.
|
// the other must be init.
|
||||||
|
@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
||||||
self.visit_scalar(b, b_layout)?;
|
self.visit_scalar(b, b_layout)?;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Abi::Vector { .. } => {
|
BackendRepr::Vector { .. } => {
|
||||||
// No checks here, we assume layout computation gets this right.
|
// No checks here, we assume layout computation gets this right.
|
||||||
// (This is harder to check since Miri does not represent these as `Immediate`. We
|
// (This is harder to check since Miri does not represent these as `Immediate`. We
|
||||||
// also cannot use field projections since this might be a newtype around a vector.)
|
// also cannot use field projections since this might be a newtype around a vector.)
|
||||||
}
|
}
|
||||||
Abi::Aggregate { .. } => {
|
BackendRepr::Memory { .. } => {
|
||||||
// Nothing to do.
|
// Nothing to do.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,9 +1,9 @@
|
||||||
|
use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
|
||||||
use rustc_middle::bug;
|
use rustc_middle::bug;
|
||||||
use rustc_middle::ty::layout::{
|
use rustc_middle::ty::layout::{
|
||||||
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
|
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
|
||||||
};
|
};
|
||||||
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
|
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
|
||||||
use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
|
|
||||||
|
|
||||||
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
|
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
|
||||||
use crate::interpret::{InterpCx, MemoryKind};
|
use crate::interpret::{InterpCx, MemoryKind};
|
||||||
|
@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
|
||||||
};
|
};
|
||||||
|
|
||||||
// Check the ABI.
|
// Check the ABI.
|
||||||
let valid = match this.abi {
|
let valid = match this.backend_repr {
|
||||||
Abi::Uninhabited => false, // definitely UB
|
BackendRepr::Uninhabited => false, // definitely UB
|
||||||
Abi::Scalar(s) => scalar_allows_raw_init(s),
|
BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
|
||||||
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
|
BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
|
||||||
Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
|
BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
|
||||||
Abi::Aggregate { .. } => true, // Fields are checked below.
|
BackendRepr::Memory { .. } => true, // Fields are checked below.
|
||||||
};
|
};
|
||||||
if !valid {
|
if !valid {
|
||||||
// This is definitely not okay.
|
// This is definitely not okay.
|
||||||
|
|
|
@ -16,6 +16,7 @@
|
||||||
use std::fmt::Write;
|
use std::fmt::Write;
|
||||||
|
|
||||||
use ast::token::TokenKind;
|
use ast::token::TokenKind;
|
||||||
|
use rustc_abi::BackendRepr;
|
||||||
use rustc_ast::tokenstream::{TokenStream, TokenTree};
|
use rustc_ast::tokenstream::{TokenStream, TokenTree};
|
||||||
use rustc_ast::visit::{FnCtxt, FnKind};
|
use rustc_ast::visit::{FnCtxt, FnKind};
|
||||||
use rustc_ast::{self as ast, *};
|
use rustc_ast::{self as ast, *};
|
||||||
|
@ -40,7 +41,6 @@ use rustc_span::edition::Edition;
|
||||||
use rustc_span::source_map::Spanned;
|
use rustc_span::source_map::Spanned;
|
||||||
use rustc_span::symbol::{Ident, Symbol, kw, sym};
|
use rustc_span::symbol::{Ident, Symbol, kw, sym};
|
||||||
use rustc_span::{BytePos, InnerSpan, Span};
|
use rustc_span::{BytePos, InnerSpan, Span};
|
||||||
use rustc_target::abi::Abi;
|
|
||||||
use rustc_target::asm::InlineAsmArch;
|
use rustc_target::asm::InlineAsmArch;
|
||||||
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
|
use rustc_trait_selection::infer::{InferCtxtExt, TyCtxtInferExt};
|
||||||
use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy;
|
use rustc_trait_selection::traits::misc::type_allowed_to_implement_copy;
|
||||||
|
@ -2466,7 +2466,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidValue {
|
||||||
|
|
||||||
// Check if this ADT has a constrained layout (like `NonNull` and friends).
|
// Check if this ADT has a constrained layout (like `NonNull` and friends).
|
||||||
if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) {
|
if let Ok(layout) = cx.tcx.layout_of(cx.param_env.and(ty)) {
|
||||||
if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &layout.abi {
|
if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
|
||||||
|
&layout.backend_repr
|
||||||
|
{
|
||||||
let range = scalar.valid_range(cx);
|
let range = scalar.valid_range(cx);
|
||||||
let msg = if !range.contains(0) {
|
let msg = if !range.contains(0) {
|
||||||
"must be non-null"
|
"must be non-null"
|
||||||
|
|
|
@ -217,7 +217,7 @@ fn structurally_same_type<'tcx>(
|
||||||
// `extern` blocks cannot be generic, so we'll always get a layout here.
|
// `extern` blocks cannot be generic, so we'll always get a layout here.
|
||||||
let a_layout = tcx.layout_of(param_env.and(a)).unwrap();
|
let a_layout = tcx.layout_of(param_env.and(a)).unwrap();
|
||||||
let b_layout = tcx.layout_of(param_env.and(b)).unwrap();
|
let b_layout = tcx.layout_of(param_env.and(b)).unwrap();
|
||||||
assert_eq!(a_layout.abi, b_layout.abi);
|
assert_eq!(a_layout.backend_repr, b_layout.backend_repr);
|
||||||
assert_eq!(a_layout.size, b_layout.size);
|
assert_eq!(a_layout.size, b_layout.size);
|
||||||
assert_eq!(a_layout.align, b_layout.align);
|
assert_eq!(a_layout.align, b_layout.align);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
use std::iter;
|
use std::iter;
|
||||||
use std::ops::ControlFlow;
|
use std::ops::ControlFlow;
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, TagEncoding, Variants, WrappingRange};
|
||||||
use rustc_data_structures::fx::FxHashSet;
|
use rustc_data_structures::fx::FxHashSet;
|
||||||
use rustc_errors::DiagMessage;
|
use rustc_errors::DiagMessage;
|
||||||
use rustc_hir::{Expr, ExprKind};
|
use rustc_hir::{Expr, ExprKind};
|
||||||
|
@ -13,7 +14,6 @@ use rustc_session::{declare_lint, declare_lint_pass, impl_lint_pass};
|
||||||
use rustc_span::def_id::LocalDefId;
|
use rustc_span::def_id::LocalDefId;
|
||||||
use rustc_span::symbol::sym;
|
use rustc_span::symbol::sym;
|
||||||
use rustc_span::{Span, Symbol, source_map};
|
use rustc_span::{Span, Symbol, source_map};
|
||||||
use rustc_target::abi::{Abi, TagEncoding, Variants, WrappingRange};
|
|
||||||
use rustc_target::spec::abi::Abi as SpecAbi;
|
use rustc_target::spec::abi::Abi as SpecAbi;
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
use {rustc_ast as ast, rustc_hir as hir};
|
use {rustc_ast as ast, rustc_hir as hir};
|
||||||
|
@ -776,8 +776,8 @@ pub(crate) fn repr_nullable_ptr<'tcx>(
|
||||||
bug!("should be able to compute the layout of non-polymorphic type");
|
bug!("should be able to compute the layout of non-polymorphic type");
|
||||||
}
|
}
|
||||||
|
|
||||||
let field_ty_abi = &field_ty_layout.ok()?.abi;
|
let field_ty_abi = &field_ty_layout.ok()?.backend_repr;
|
||||||
if let Abi::Scalar(field_ty_scalar) = field_ty_abi {
|
if let BackendRepr::Scalar(field_ty_scalar) = field_ty_abi {
|
||||||
match field_ty_scalar.valid_range(&tcx) {
|
match field_ty_scalar.valid_range(&tcx) {
|
||||||
WrappingRange { start: 0, end }
|
WrappingRange { start: 0, end }
|
||||||
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
|
if end == field_ty_scalar.size(&tcx).unsigned_int_max() - 1 =>
|
||||||
|
|
|
@ -4,8 +4,9 @@ use std::{cmp, fmt};
|
||||||
|
|
||||||
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
||||||
use rustc_abi::{
|
use rustc_abi::{
|
||||||
Abi, AddressSpace, Align, FieldsShape, HasDataLayout, Integer, LayoutCalculator, LayoutData,
|
AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutCalculator,
|
||||||
PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, Variants,
|
LayoutData, PointeeInfo, PointerKind, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout,
|
||||||
|
Variants,
|
||||||
};
|
};
|
||||||
use rustc_error_messages::DiagMessage;
|
use rustc_error_messages::DiagMessage;
|
||||||
use rustc_errors::{
|
use rustc_errors::{
|
||||||
|
@ -757,7 +758,7 @@ where
|
||||||
Some(fields) => FieldsShape::Union(fields),
|
Some(fields) => FieldsShape::Union(fields),
|
||||||
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
|
None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() },
|
||||||
},
|
},
|
||||||
abi: Abi::Uninhabited,
|
backend_repr: BackendRepr::Uninhabited,
|
||||||
largest_niche: None,
|
largest_niche: None,
|
||||||
align: tcx.data_layout.i8_align,
|
align: tcx.data_layout.i8_align,
|
||||||
size: Size::ZERO,
|
size: Size::ZERO,
|
||||||
|
|
|
@ -13,7 +13,7 @@ use rustc_middle::ty::util::IntTypeExt;
|
||||||
use rustc_middle::ty::{self, Ty, UpvarArgs};
|
use rustc_middle::ty::{self, Ty, UpvarArgs};
|
||||||
use rustc_span::source_map::Spanned;
|
use rustc_span::source_map::Spanned;
|
||||||
use rustc_span::{DUMMY_SP, Span};
|
use rustc_span::{DUMMY_SP, Span};
|
||||||
use rustc_target::abi::{Abi, FieldIdx, Primitive};
|
use rustc_target::abi::{BackendRepr, FieldIdx, Primitive};
|
||||||
use tracing::debug;
|
use tracing::debug;
|
||||||
|
|
||||||
use crate::build::expr::as_place::PlaceBase;
|
use crate::build::expr::as_place::PlaceBase;
|
||||||
|
@ -207,7 +207,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
|
||||||
);
|
);
|
||||||
let (op, ty) = (Operand::Move(discr), discr_ty);
|
let (op, ty) = (Operand::Move(discr), discr_ty);
|
||||||
|
|
||||||
if let Abi::Scalar(scalar) = layout.unwrap().abi
|
if let BackendRepr::Scalar(scalar) = layout.unwrap().backend_repr
|
||||||
&& !scalar.is_always_valid(&this.tcx)
|
&& !scalar.is_always_valid(&this.tcx)
|
||||||
&& let Primitive::Int(int_width, _signed) = scalar.primitive()
|
&& let Primitive::Int(int_width, _signed) = scalar.primitive()
|
||||||
{
|
{
|
||||||
|
|
|
@ -858,7 +858,7 @@ impl<'tcx> Map<'tcx> {
|
||||||
// Allocate a value slot if it doesn't have one, and the user requested one.
|
// Allocate a value slot if it doesn't have one, and the user requested one.
|
||||||
assert!(place_info.value_index.is_none());
|
assert!(place_info.value_index.is_none());
|
||||||
if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
|
if let Ok(layout) = tcx.layout_of(param_env.and(place_info.ty))
|
||||||
&& layout.abi.is_scalar()
|
&& layout.backend_repr.is_scalar()
|
||||||
{
|
{
|
||||||
place_info.value_index = Some(self.value_count.into());
|
place_info.value_index = Some(self.value_count.into());
|
||||||
self.value_count += 1;
|
self.value_count += 1;
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
//!
|
//!
|
||||||
//! Currently, this pass only propagates scalar values.
|
//! Currently, this pass only propagates scalar values.
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
|
||||||
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
|
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
|
||||||
use rustc_const_eval::interpret::{
|
use rustc_const_eval::interpret::{
|
||||||
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
|
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
|
||||||
|
@ -20,7 +21,6 @@ use rustc_mir_dataflow::value_analysis::{
|
||||||
};
|
};
|
||||||
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
|
use rustc_mir_dataflow::{Analysis, Results, ResultsVisitor};
|
||||||
use rustc_span::DUMMY_SP;
|
use rustc_span::DUMMY_SP;
|
||||||
use rustc_target::abi::{Abi, FIRST_VARIANT, FieldIdx, Size, VariantIdx};
|
|
||||||
use tracing::{debug, debug_span, instrument};
|
use tracing::{debug, debug_span, instrument};
|
||||||
|
|
||||||
// These constants are somewhat random guesses and have not been optimized.
|
// These constants are somewhat random guesses and have not been optimized.
|
||||||
|
@ -457,7 +457,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
||||||
// a pair and sometimes not. But as a hack we always return a pair
|
// a pair and sometimes not. But as a hack we always return a pair
|
||||||
// and just make the 2nd component `Bottom` when it does not exist.
|
// and just make the 2nd component `Bottom` when it does not exist.
|
||||||
Some(val) => {
|
Some(val) => {
|
||||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||||
let (val, overflow) = val.to_scalar_pair();
|
let (val, overflow) = val.to_scalar_pair();
|
||||||
(FlatSet::Elem(val), FlatSet::Elem(overflow))
|
(FlatSet::Elem(val), FlatSet::Elem(overflow))
|
||||||
} else {
|
} else {
|
||||||
|
@ -470,7 +470,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
||||||
// Exactly one side is known, attempt some algebraic simplifications.
|
// Exactly one side is known, attempt some algebraic simplifications.
|
||||||
(FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
|
(FlatSet::Elem(const_arg), _) | (_, FlatSet::Elem(const_arg)) => {
|
||||||
let layout = const_arg.layout;
|
let layout = const_arg.layout;
|
||||||
if !matches!(layout.abi, rustc_target::abi::Abi::Scalar(..)) {
|
if !matches!(layout.backend_repr, rustc_target::abi::BackendRepr::Scalar(..)) {
|
||||||
return (FlatSet::Top, FlatSet::Top);
|
return (FlatSet::Top, FlatSet::Top);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -589,13 +589,13 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
let place = map.find(place.as_ref())?;
|
let place = map.find(place.as_ref())?;
|
||||||
if layout.abi.is_scalar()
|
if layout.backend_repr.is_scalar()
|
||||||
&& let Some(value) = propagatable_scalar(place, state, map)
|
&& let Some(value) = propagatable_scalar(place, state, map)
|
||||||
{
|
{
|
||||||
return Some(Const::Val(ConstValue::Scalar(value), ty));
|
return Some(Const::Val(ConstValue::Scalar(value), ty));
|
||||||
}
|
}
|
||||||
|
|
||||||
if matches!(layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
if matches!(layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||||
let alloc_id = ecx
|
let alloc_id = ecx
|
||||||
.intern_with_temp_alloc(layout, |ecx, dest| {
|
.intern_with_temp_alloc(layout, |ecx, dest| {
|
||||||
try_write_constant(ecx, dest, place, ty, state, map)
|
try_write_constant(ecx, dest, place, ty, state, map)
|
||||||
|
@ -641,7 +641,7 @@ fn try_write_constant<'tcx>(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fast path for scalars.
|
// Fast path for scalars.
|
||||||
if layout.abi.is_scalar()
|
if layout.backend_repr.is_scalar()
|
||||||
&& let Some(value) = propagatable_scalar(place, state, map)
|
&& let Some(value) = propagatable_scalar(place, state, map)
|
||||||
{
|
{
|
||||||
return ecx.write_immediate(Immediate::Scalar(value), dest);
|
return ecx.write_immediate(Immediate::Scalar(value), dest);
|
||||||
|
|
|
@ -85,6 +85,7 @@
|
||||||
use std::borrow::Cow;
|
use std::borrow::Cow;
|
||||||
|
|
||||||
use either::Either;
|
use either::Either;
|
||||||
|
use rustc_abi::{self as abi, BackendRepr, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
|
||||||
use rustc_const_eval::const_eval::DummyMachine;
|
use rustc_const_eval::const_eval::DummyMachine;
|
||||||
use rustc_const_eval::interpret::{
|
use rustc_const_eval::interpret::{
|
||||||
ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
|
ImmTy, Immediate, InterpCx, MemPlaceMeta, MemoryKind, OpTy, Projectable, Scalar,
|
||||||
|
@ -103,7 +104,6 @@ use rustc_middle::ty::layout::{HasParamEnv, LayoutOf};
|
||||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||||
use rustc_span::DUMMY_SP;
|
use rustc_span::DUMMY_SP;
|
||||||
use rustc_span::def_id::DefId;
|
use rustc_span::def_id::DefId;
|
||||||
use rustc_target::abi::{self, Abi, FIRST_VARIANT, FieldIdx, Primitive, Size, VariantIdx};
|
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use tracing::{debug, instrument, trace};
|
use tracing::{debug, instrument, trace};
|
||||||
|
|
||||||
|
@ -427,7 +427,10 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||||
};
|
};
|
||||||
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
|
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
|
||||||
ImmTy::from_immediate(ptr_imm, ty).into()
|
ImmTy::from_immediate(ptr_imm, ty).into()
|
||||||
} else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
} else if matches!(
|
||||||
|
ty.backend_repr,
|
||||||
|
BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)
|
||||||
|
) {
|
||||||
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
|
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
|
||||||
let variant_dest = if let Some(variant) = variant {
|
let variant_dest = if let Some(variant) = variant {
|
||||||
self.ecx.project_downcast(&dest, variant).discard_err()?
|
self.ecx.project_downcast(&dest, variant).discard_err()?
|
||||||
|
@ -573,12 +576,12 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||||
// limited transmutes: it only works between types with the same layout, and
|
// limited transmutes: it only works between types with the same layout, and
|
||||||
// cannot transmute pointers to integers.
|
// cannot transmute pointers to integers.
|
||||||
if value.as_mplace_or_imm().is_right() {
|
if value.as_mplace_or_imm().is_right() {
|
||||||
let can_transmute = match (value.layout.abi, to.abi) {
|
let can_transmute = match (value.layout.backend_repr, to.backend_repr) {
|
||||||
(Abi::Scalar(s1), Abi::Scalar(s2)) => {
|
(BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => {
|
||||||
s1.size(&self.ecx) == s2.size(&self.ecx)
|
s1.size(&self.ecx) == s2.size(&self.ecx)
|
||||||
&& !matches!(s1.primitive(), Primitive::Pointer(..))
|
&& !matches!(s1.primitive(), Primitive::Pointer(..))
|
||||||
}
|
}
|
||||||
(Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
|
(BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
|
||||||
a1.size(&self.ecx) == a2.size(&self.ecx) &&
|
a1.size(&self.ecx) == a2.size(&self.ecx) &&
|
||||||
b1.size(&self.ecx) == b2.size(&self.ecx) &&
|
b1.size(&self.ecx) == b2.size(&self.ecx) &&
|
||||||
// The alignment of the second component determines its offset, so that also needs to match.
|
// The alignment of the second component determines its offset, so that also needs to match.
|
||||||
|
@ -1241,7 +1244,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
||||||
|
|
||||||
let as_bits = |value| {
|
let as_bits = |value| {
|
||||||
let constant = self.evaluated[value].as_ref()?;
|
let constant = self.evaluated[value].as_ref()?;
|
||||||
if layout.abi.is_scalar() {
|
if layout.backend_repr.is_scalar() {
|
||||||
let scalar = self.ecx.read_scalar(constant).discard_err()?;
|
let scalar = self.ecx.read_scalar(constant).discard_err()?;
|
||||||
scalar.to_bits(constant.layout.size).discard_err()
|
scalar.to_bits(constant.layout.size).discard_err()
|
||||||
} else {
|
} else {
|
||||||
|
@ -1497,12 +1500,12 @@ fn op_to_prop_const<'tcx>(
|
||||||
|
|
||||||
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
|
// Do not synthetize too large constants. Codegen will just memcpy them, which we'd like to
|
||||||
// avoid.
|
// avoid.
|
||||||
if !matches!(op.layout.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
if !matches!(op.layout.backend_repr, BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..)) {
|
||||||
return None;
|
return None;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
||||||
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
|
if let BackendRepr::Scalar(abi::Scalar::Initialized { .. }) = op.layout.backend_repr
|
||||||
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
|
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
|
||||||
{
|
{
|
||||||
if !scalar.try_to_scalar_int().is_ok() {
|
if !scalar.try_to_scalar_int().is_ok() {
|
||||||
|
|
|
@ -4,6 +4,7 @@
|
||||||
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
|
||||||
use rustc_const_eval::const_eval::DummyMachine;
|
use rustc_const_eval::const_eval::DummyMachine;
|
||||||
use rustc_const_eval::interpret::{
|
use rustc_const_eval::interpret::{
|
||||||
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
|
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
|
||||||
|
@ -19,7 +20,6 @@ use rustc_middle::mir::*;
|
||||||
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
use rustc_middle::ty::layout::{LayoutError, LayoutOf, LayoutOfHelpers, TyAndLayout};
|
||||||
use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
|
use rustc_middle::ty::{self, ConstInt, ParamEnv, ScalarInt, Ty, TyCtxt, TypeVisitableExt};
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
use rustc_target::abi::{Abi, FieldIdx, HasDataLayout, Size, TargetDataLayout, VariantIdx};
|
|
||||||
use tracing::{debug, instrument, trace};
|
use tracing::{debug, instrument, trace};
|
||||||
|
|
||||||
use crate::errors::{AssertLint, AssertLintKind};
|
use crate::errors::{AssertLint, AssertLintKind};
|
||||||
|
@ -557,7 +557,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||||
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
|
let right = self.use_ecx(|this| this.ecx.read_immediate(&right))?;
|
||||||
|
|
||||||
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
|
let val = self.use_ecx(|this| this.ecx.binary_op(bin_op, &left, &right))?;
|
||||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
|
||||||
// FIXME `Value` should properly support pairs in `Immediate`... but currently
|
// FIXME `Value` should properly support pairs in `Immediate`... but currently
|
||||||
// it does not.
|
// it does not.
|
||||||
let (val, overflow) = val.to_pair(&self.ecx);
|
let (val, overflow) = val.to_pair(&self.ecx);
|
||||||
|
@ -651,9 +651,9 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
||||||
let to = self.ecx.layout_of(to).ok()?;
|
let to = self.ecx.layout_of(to).ok()?;
|
||||||
// `offset` for immediates only supports scalar/scalar-pair ABIs,
|
// `offset` for immediates only supports scalar/scalar-pair ABIs,
|
||||||
// so bail out if the target is not one.
|
// so bail out if the target is not one.
|
||||||
match (value.layout.abi, to.abi) {
|
match (value.layout.backend_repr, to.backend_repr) {
|
||||||
(Abi::Scalar(..), Abi::Scalar(..)) => {}
|
(BackendRepr::Scalar(..), BackendRepr::Scalar(..)) => {}
|
||||||
(Abi::ScalarPair(..), Abi::ScalarPair(..)) => {}
|
(BackendRepr::ScalarPair(..), BackendRepr::ScalarPair(..)) => {}
|
||||||
_ => return None,
|
_ => return None,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -81,8 +81,12 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) {
|
||||||
let meta_items = attr.meta_item_list().unwrap_or_default();
|
let meta_items = attr.meta_item_list().unwrap_or_default();
|
||||||
for meta_item in meta_items {
|
for meta_item in meta_items {
|
||||||
match meta_item.name_or_empty() {
|
match meta_item.name_or_empty() {
|
||||||
|
// FIXME: this never was about ABI and now this dump arg is confusing
|
||||||
sym::abi => {
|
sym::abi => {
|
||||||
tcx.dcx().emit_err(LayoutAbi { span, abi: format!("{:?}", ty_layout.abi) });
|
tcx.dcx().emit_err(LayoutAbi {
|
||||||
|
span,
|
||||||
|
abi: format!("{:?}", ty_layout.backend_repr),
|
||||||
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
sym::align => {
|
sym::align => {
|
||||||
|
|
|
@ -56,7 +56,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::LayoutData<rustc_abi::FieldIdx, rustc_abi
|
||||||
LayoutShape {
|
LayoutShape {
|
||||||
fields: self.fields.stable(tables),
|
fields: self.fields.stable(tables),
|
||||||
variants: self.variants.stable(tables),
|
variants: self.variants.stable(tables),
|
||||||
abi: self.abi.stable(tables),
|
abi: self.backend_repr.stable(tables),
|
||||||
abi_align: self.align.abi.stable(tables),
|
abi_align: self.align.abi.stable(tables),
|
||||||
size: self.size.stable(tables),
|
size: self.size.stable(tables),
|
||||||
}
|
}
|
||||||
|
@ -196,20 +196,20 @@ impl<'tcx> Stable<'tcx> for rustc_abi::TagEncoding<rustc_abi::VariantIdx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'tcx> Stable<'tcx> for rustc_abi::Abi {
|
impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr {
|
||||||
type T = ValueAbi;
|
type T = ValueAbi;
|
||||||
|
|
||||||
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
|
fn stable(&self, tables: &mut Tables<'_>) -> Self::T {
|
||||||
match *self {
|
match *self {
|
||||||
rustc_abi::Abi::Uninhabited => ValueAbi::Uninhabited,
|
rustc_abi::BackendRepr::Uninhabited => ValueAbi::Uninhabited,
|
||||||
rustc_abi::Abi::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
|
rustc_abi::BackendRepr::Scalar(scalar) => ValueAbi::Scalar(scalar.stable(tables)),
|
||||||
rustc_abi::Abi::ScalarPair(first, second) => {
|
rustc_abi::BackendRepr::ScalarPair(first, second) => {
|
||||||
ValueAbi::ScalarPair(first.stable(tables), second.stable(tables))
|
ValueAbi::ScalarPair(first.stable(tables), second.stable(tables))
|
||||||
}
|
}
|
||||||
rustc_abi::Abi::Vector { element, count } => {
|
rustc_abi::BackendRepr::Vector { element, count } => {
|
||||||
ValueAbi::Vector { element: element.stable(tables), count }
|
ValueAbi::Vector { element: element.stable(tables), count }
|
||||||
}
|
}
|
||||||
rustc_abi::Abi::Aggregate { sized } => ValueAbi::Aggregate { sized },
|
rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized },
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
use crate::abi::{
|
||||||
|
self, BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout,
|
||||||
|
};
|
||||||
use crate::spec::HasTargetSpec;
|
use crate::spec::HasTargetSpec;
|
||||||
use crate::spec::abi::Abi as SpecAbi;
|
use crate::spec::abi::Abi as SpecAbi;
|
||||||
|
|
||||||
|
@ -21,8 +23,8 @@ enum FloatConv {
|
||||||
struct CannotUseFpConv;
|
struct CannotUseFpConv;
|
||||||
|
|
||||||
fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
||||||
match arg.layout.abi {
|
match arg.layout.backend_repr {
|
||||||
Abi::Vector { .. } => true,
|
BackendRepr::Vector { .. } => true,
|
||||||
_ => arg.layout.is_aggregate(),
|
_ => arg.layout.is_aggregate(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,8 +40,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
|
||||||
where
|
where
|
||||||
Ty: TyAbiInterface<'a, C> + Copy,
|
Ty: TyAbiInterface<'a, C> + Copy,
|
||||||
{
|
{
|
||||||
match arg_layout.abi {
|
match arg_layout.backend_repr {
|
||||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||||
abi::Int(..) | abi::Pointer(_) => {
|
abi::Int(..) | abi::Pointer(_) => {
|
||||||
if arg_layout.size.bits() > xlen {
|
if arg_layout.size.bits() > xlen {
|
||||||
return Err(CannotUseFpConv);
|
return Err(CannotUseFpConv);
|
||||||
|
@ -77,8 +79,8 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
|
BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
|
||||||
FieldsShape::Primitive => {
|
FieldsShape::Primitive => {
|
||||||
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
||||||
}
|
}
|
||||||
|
@ -311,7 +313,7 @@ fn classify_arg<'a, Ty, C>(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
||||||
if let Abi::Scalar(scalar) = arg.layout.abi {
|
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||||
if let abi::Int(i, _) = scalar.primitive() {
|
if let abi::Int(i, _) = scalar.primitive() {
|
||||||
// 32-bit integers are always sign-extended
|
// 32-bit integers are always sign-extended
|
||||||
if i.size().bits() == 32 && xlen > 32 {
|
if i.size().bits() == 32 && xlen > 32 {
|
||||||
|
|
|
@ -5,7 +5,7 @@ use crate::abi::{self, HasDataLayout, Size, TyAbiInterface};
|
||||||
|
|
||||||
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
|
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
|
||||||
// Always sign extend u32 values on 64-bit mips
|
// Always sign extend u32 values on 64-bit mips
|
||||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||||
if let abi::Int(i, signed) = scalar.primitive() {
|
if let abi::Int(i, signed) = scalar.primitive() {
|
||||||
if !signed && i.size().bits() == 32 {
|
if !signed && i.size().bits() == 32 {
|
||||||
if let PassMode::Direct(ref mut attrs) = arg.mode {
|
if let PassMode::Direct(ref mut attrs) = arg.mode {
|
||||||
|
@ -24,8 +24,8 @@ where
|
||||||
Ty: TyAbiInterface<'a, C> + Copy,
|
Ty: TyAbiInterface<'a, C> + Copy,
|
||||||
C: HasDataLayout,
|
C: HasDataLayout,
|
||||||
{
|
{
|
||||||
match ret.layout.field(cx, i).abi {
|
match ret.layout.field(cx, i).backend_repr {
|
||||||
abi::Abi::Scalar(scalar) => match scalar.primitive() {
|
abi::BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||||
abi::Float(abi::F32) => Some(Reg::f32()),
|
abi::Float(abi::F32) => Some(Reg::f32()),
|
||||||
abi::Float(abi::F64) => Some(Reg::f64()),
|
abi::Float(abi::F64) => Some(Reg::f64()),
|
||||||
_ => None,
|
_ => None,
|
||||||
|
@ -109,7 +109,7 @@ where
|
||||||
let offset = arg.layout.fields.offset(i);
|
let offset = arg.layout.fields.offset(i);
|
||||||
|
|
||||||
// We only care about aligned doubles
|
// We only care about aligned doubles
|
||||||
if let abi::Abi::Scalar(scalar) = field.abi {
|
if let abi::BackendRepr::Scalar(scalar) = field.backend_repr {
|
||||||
if scalar.primitive() == abi::Float(abi::F64) {
|
if scalar.primitive() == abi::Float(abi::F64) {
|
||||||
if offset.is_aligned(dl.f64_align.abi) {
|
if offset.is_aligned(dl.f64_align.abi) {
|
||||||
// Insert enough integers to cover [last_offset, offset)
|
// Insert enough integers to cover [last_offset, offset)
|
||||||
|
|
|
@ -6,7 +6,8 @@ use rustc_macros::HashStable_Generic;
|
||||||
use rustc_span::Symbol;
|
use rustc_span::Symbol;
|
||||||
|
|
||||||
use crate::abi::{
|
use crate::abi::{
|
||||||
self, Abi, AddressSpace, Align, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout,
|
self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface,
|
||||||
|
TyAndLayout,
|
||||||
};
|
};
|
||||||
use crate::spec::abi::Abi as SpecAbi;
|
use crate::spec::abi::Abi as SpecAbi;
|
||||||
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
|
use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi};
|
||||||
|
@ -350,15 +351,17 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||||
layout: TyAndLayout<'a, Ty>,
|
layout: TyAndLayout<'a, Ty>,
|
||||||
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
|
scalar_attrs: impl Fn(&TyAndLayout<'a, Ty>, abi::Scalar, Size) -> ArgAttributes,
|
||||||
) -> Self {
|
) -> Self {
|
||||||
let mode = match layout.abi {
|
let mode = match layout.backend_repr {
|
||||||
Abi::Uninhabited => PassMode::Ignore,
|
BackendRepr::Uninhabited => PassMode::Ignore,
|
||||||
Abi::Scalar(scalar) => PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO)),
|
BackendRepr::Scalar(scalar) => {
|
||||||
Abi::ScalarPair(a, b) => PassMode::Pair(
|
PassMode::Direct(scalar_attrs(&layout, scalar, Size::ZERO))
|
||||||
|
}
|
||||||
|
BackendRepr::ScalarPair(a, b) => PassMode::Pair(
|
||||||
scalar_attrs(&layout, a, Size::ZERO),
|
scalar_attrs(&layout, a, Size::ZERO),
|
||||||
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
|
scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)),
|
||||||
),
|
),
|
||||||
Abi::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
|
BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()),
|
||||||
Abi::Aggregate { .. } => Self::indirect_pass_mode(&layout),
|
BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout),
|
||||||
};
|
};
|
||||||
ArgAbi { layout, mode }
|
ArgAbi { layout, mode }
|
||||||
}
|
}
|
||||||
|
@ -460,7 +463,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||||
|
|
||||||
pub fn extend_integer_width_to(&mut self, bits: u64) {
|
pub fn extend_integer_width_to(&mut self, bits: u64) {
|
||||||
// Only integers have signedness
|
// Only integers have signedness
|
||||||
if let Abi::Scalar(scalar) = self.layout.abi {
|
if let BackendRepr::Scalar(scalar) = self.layout.backend_repr {
|
||||||
if let abi::Int(i, signed) = scalar.primitive() {
|
if let abi::Int(i, signed) = scalar.primitive() {
|
||||||
if i.size().bits() < bits {
|
if i.size().bits() < bits {
|
||||||
if let PassMode::Direct(ref mut attrs) = self.mode {
|
if let PassMode::Direct(ref mut attrs) = self.mode {
|
||||||
|
@ -512,7 +515,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
|
||||||
// That elevates any type difference to an ABI difference since we just use the
|
// That elevates any type difference to an ABI difference since we just use the
|
||||||
// full Rust type as the LLVM argument/return type.
|
// full Rust type as the LLVM argument/return type.
|
||||||
if matches!(self.mode, PassMode::Direct(..))
|
if matches!(self.mode, PassMode::Direct(..))
|
||||||
&& matches!(self.layout.abi, Abi::Aggregate { .. })
|
&& matches!(self.layout.backend_repr, BackendRepr::Memory { .. })
|
||||||
{
|
{
|
||||||
// For aggregates in `Direct` mode to be compatible, the types need to be equal.
|
// For aggregates in `Direct` mode to be compatible, the types need to be equal.
|
||||||
self.layout.ty == other.layout.ty
|
self.layout.ty == other.layout.ty
|
||||||
|
@ -791,8 +794,8 @@ impl<'a, Ty> FnAbi<'a, Ty> {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
match arg.layout.abi {
|
match arg.layout.backend_repr {
|
||||||
Abi::Aggregate { .. } => {}
|
BackendRepr::Memory { .. } => {}
|
||||||
|
|
||||||
// This is a fun case! The gist of what this is doing is
|
// This is a fun case! The gist of what this is doing is
|
||||||
// that we want callers and callees to always agree on the
|
// that we want callers and callees to always agree on the
|
||||||
|
@ -813,7 +816,9 @@ impl<'a, Ty> FnAbi<'a, Ty> {
|
||||||
// Note that the intrinsic ABI is exempt here as
|
// Note that the intrinsic ABI is exempt here as
|
||||||
// that's how we connect up to LLVM and it's unstable
|
// that's how we connect up to LLVM and it's unstable
|
||||||
// anyway, we control all calls to it in libstd.
|
// anyway, we control all calls to it in libstd.
|
||||||
Abi::Vector { .. } if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => {
|
BackendRepr::Vector { .. }
|
||||||
|
if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect =>
|
||||||
|
{
|
||||||
arg.make_indirect();
|
arg.make_indirect();
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,8 +4,10 @@
|
||||||
// Reference: Clang RISC-V ELF psABI lowering code
|
// Reference: Clang RISC-V ELF psABI lowering code
|
||||||
// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
|
// https://github.com/llvm/llvm-project/blob/8e780252a7284be45cf1ba224cabd884847e8e92/clang/lib/CodeGen/TargetInfo.cpp#L9311-L9773
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||||
|
|
||||||
|
use crate::abi;
|
||||||
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
use crate::abi::call::{ArgAbi, ArgExtension, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
|
||||||
use crate::abi::{self, Abi, FieldsShape, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
|
||||||
use crate::spec::HasTargetSpec;
|
use crate::spec::HasTargetSpec;
|
||||||
use crate::spec::abi::Abi as SpecAbi;
|
use crate::spec::abi::Abi as SpecAbi;
|
||||||
|
|
||||||
|
@ -27,8 +29,8 @@ enum FloatConv {
|
||||||
struct CannotUseFpConv;
|
struct CannotUseFpConv;
|
||||||
|
|
||||||
fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool {
|
||||||
match arg.layout.abi {
|
match arg.layout.backend_repr {
|
||||||
Abi::Vector { .. } => true,
|
BackendRepr::Vector { .. } => true,
|
||||||
_ => arg.layout.is_aggregate(),
|
_ => arg.layout.is_aggregate(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -44,8 +46,8 @@ fn should_use_fp_conv_helper<'a, Ty, C>(
|
||||||
where
|
where
|
||||||
Ty: TyAbiInterface<'a, C> + Copy,
|
Ty: TyAbiInterface<'a, C> + Copy,
|
||||||
{
|
{
|
||||||
match arg_layout.abi {
|
match arg_layout.backend_repr {
|
||||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||||
abi::Int(..) | abi::Pointer(_) => {
|
abi::Int(..) | abi::Pointer(_) => {
|
||||||
if arg_layout.size.bits() > xlen {
|
if arg_layout.size.bits() > xlen {
|
||||||
return Err(CannotUseFpConv);
|
return Err(CannotUseFpConv);
|
||||||
|
@ -83,8 +85,8 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
Abi::Vector { .. } | Abi::Uninhabited => return Err(CannotUseFpConv),
|
BackendRepr::Vector { .. } | BackendRepr::Uninhabited => return Err(CannotUseFpConv),
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => match arg_layout.fields {
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields {
|
||||||
FieldsShape::Primitive => {
|
FieldsShape::Primitive => {
|
||||||
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
unreachable!("aggregates can't have `FieldsShape::Primitive`")
|
||||||
}
|
}
|
||||||
|
@ -317,7 +319,7 @@ fn classify_arg<'a, Ty, C>(
|
||||||
}
|
}
|
||||||
|
|
||||||
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
fn extend_integer_width<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
|
||||||
if let Abi::Scalar(scalar) = arg.layout.abi {
|
if let BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||||
if let abi::Int(i, _) = scalar.primitive() {
|
if let abi::Int(i, _) = scalar.primitive() {
|
||||||
// 32-bit integers are always sign-extended
|
// 32-bit integers are always sign-extended
|
||||||
if i.size().bits() == 32 && xlen > 32 {
|
if i.size().bits() == 32 && xlen > 32 {
|
||||||
|
|
|
@ -109,11 +109,11 @@ where
|
||||||
return data;
|
return data;
|
||||||
}
|
}
|
||||||
|
|
||||||
match layout.abi {
|
match layout.backend_repr {
|
||||||
abi::Abi::Scalar(scalar) => {
|
abi::BackendRepr::Scalar(scalar) => {
|
||||||
data = arg_scalar(cx, &scalar, offset, data);
|
data = arg_scalar(cx, &scalar, offset, data);
|
||||||
}
|
}
|
||||||
abi::Abi::Aggregate { .. } => {
|
abi::BackendRepr::Memory { .. } => {
|
||||||
for i in 0..layout.fields.count() {
|
for i in 0..layout.fields.count() {
|
||||||
if offset < layout.fields.offset(i) {
|
if offset < layout.fields.offset(i) {
|
||||||
offset = layout.fields.offset(i);
|
offset = layout.fields.offset(i);
|
||||||
|
@ -122,7 +122,7 @@ where
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
if let abi::Abi::ScalarPair(scalar1, scalar2) = &layout.abi {
|
if let abi::BackendRepr::ScalarPair(scalar1, scalar2) = &layout.backend_repr {
|
||||||
data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
|
data = arg_scalar_pair(cx, scalar1, scalar2, offset, data);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
|
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
|
||||||
use crate::abi::{
|
use crate::abi::{
|
||||||
Abi, AddressSpace, Align, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
|
AddressSpace, Align, BackendRepr, Float, HasDataLayout, Pointer, TyAbiInterface, TyAndLayout,
|
||||||
};
|
};
|
||||||
use crate::spec::HasTargetSpec;
|
use crate::spec::HasTargetSpec;
|
||||||
use crate::spec::abi::Abi as SpecAbi;
|
use crate::spec::abi::Abi as SpecAbi;
|
||||||
|
@ -105,10 +105,12 @@ where
|
||||||
where
|
where
|
||||||
Ty: TyAbiInterface<'a, C> + Copy,
|
Ty: TyAbiInterface<'a, C> + Copy,
|
||||||
{
|
{
|
||||||
match layout.abi {
|
match layout.backend_repr {
|
||||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::ScalarPair(..) => false,
|
BackendRepr::Uninhabited
|
||||||
Abi::Vector { .. } => true,
|
| BackendRepr::Scalar(_)
|
||||||
Abi::Aggregate { .. } => {
|
| BackendRepr::ScalarPair(..) => false,
|
||||||
|
BackendRepr::Vector { .. } => true,
|
||||||
|
BackendRepr::Memory { .. } => {
|
||||||
for i in 0..layout.fields.count() {
|
for i in 0..layout.fields.count() {
|
||||||
if contains_vector(cx, layout.field(cx, i)) {
|
if contains_vector(cx, layout.field(cx, i)) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -223,9 +225,9 @@ where
|
||||||
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
|
// Intrinsics themselves are not actual "real" functions, so theres no need to change their ABIs.
|
||||||
&& abi != SpecAbi::RustIntrinsic
|
&& abi != SpecAbi::RustIntrinsic
|
||||||
{
|
{
|
||||||
let has_float = match fn_abi.ret.layout.abi {
|
let has_float = match fn_abi.ret.layout.backend_repr {
|
||||||
Abi::Scalar(s) => matches!(s.primitive(), Float(_)),
|
BackendRepr::Scalar(s) => matches!(s.primitive(), Float(_)),
|
||||||
Abi::ScalarPair(s1, s2) => {
|
BackendRepr::ScalarPair(s1, s2) => {
|
||||||
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
|
matches!(s1.primitive(), Float(_)) || matches!(s2.primitive(), Float(_))
|
||||||
}
|
}
|
||||||
_ => false, // anyway not passed via registers on x86
|
_ => false, // anyway not passed via registers on x86
|
||||||
|
|
|
@ -1,8 +1,10 @@
|
||||||
// The classification code for the x86_64 ABI is taken from the clay language
|
// The classification code for the x86_64 ABI is taken from the clay language
|
||||||
// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
|
// https://github.com/jckarter/clay/blob/db0bd2702ab0b6e48965cd85f8859bbd5f60e48e/compiler/externals.cpp
|
||||||
|
|
||||||
|
use rustc_abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
||||||
|
|
||||||
|
use crate::abi;
|
||||||
use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
|
use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
|
||||||
use crate::abi::{self, Abi, HasDataLayout, Size, TyAbiInterface, TyAndLayout};
|
|
||||||
|
|
||||||
/// Classification of "eightbyte" components.
|
/// Classification of "eightbyte" components.
|
||||||
// N.B., the order of the variants is from general to specific,
|
// N.B., the order of the variants is from general to specific,
|
||||||
|
@ -46,17 +48,17 @@ where
|
||||||
return Ok(());
|
return Ok(());
|
||||||
}
|
}
|
||||||
|
|
||||||
let mut c = match layout.abi {
|
let mut c = match layout.backend_repr {
|
||||||
Abi::Uninhabited => return Ok(()),
|
BackendRepr::Uninhabited => return Ok(()),
|
||||||
|
|
||||||
Abi::Scalar(scalar) => match scalar.primitive() {
|
BackendRepr::Scalar(scalar) => match scalar.primitive() {
|
||||||
abi::Int(..) | abi::Pointer(_) => Class::Int,
|
abi::Int(..) | abi::Pointer(_) => Class::Int,
|
||||||
abi::Float(_) => Class::Sse,
|
abi::Float(_) => Class::Sse,
|
||||||
},
|
},
|
||||||
|
|
||||||
Abi::Vector { .. } => Class::Sse,
|
BackendRepr::Vector { .. } => Class::Sse,
|
||||||
|
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { .. } => {
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => {
|
||||||
for i in 0..layout.fields.count() {
|
for i in 0..layout.fields.count() {
|
||||||
let field_off = off + layout.fields.offset(i);
|
let field_off = off + layout.fields.offset(i);
|
||||||
classify(cx, layout.field(cx, i), cls, field_off)?;
|
classify(cx, layout.field(cx, i), cls, field_off)?;
|
||||||
|
|
|
@ -1,25 +1,28 @@
|
||||||
|
use rustc_abi::{BackendRepr, Float, Primitive};
|
||||||
|
|
||||||
use crate::abi::call::{ArgAbi, FnAbi, Reg};
|
use crate::abi::call::{ArgAbi, FnAbi, Reg};
|
||||||
use crate::abi::{Abi, Float, Primitive};
|
|
||||||
use crate::spec::HasTargetSpec;
|
use crate::spec::HasTargetSpec;
|
||||||
|
|
||||||
// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
|
// Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing
|
||||||
|
|
||||||
pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
|
pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) {
|
||||||
let fixup = |a: &mut ArgAbi<'_, Ty>| {
|
let fixup = |a: &mut ArgAbi<'_, Ty>| {
|
||||||
match a.layout.abi {
|
match a.layout.backend_repr {
|
||||||
Abi::Uninhabited | Abi::Aggregate { sized: false } => {}
|
BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {}
|
||||||
Abi::ScalarPair(..) | Abi::Aggregate { sized: true } => match a.layout.size.bits() {
|
BackendRepr::ScalarPair(..) | BackendRepr::Memory { sized: true } => {
|
||||||
8 => a.cast_to(Reg::i8()),
|
match a.layout.size.bits() {
|
||||||
16 => a.cast_to(Reg::i16()),
|
8 => a.cast_to(Reg::i8()),
|
||||||
32 => a.cast_to(Reg::i32()),
|
16 => a.cast_to(Reg::i16()),
|
||||||
64 => a.cast_to(Reg::i64()),
|
32 => a.cast_to(Reg::i32()),
|
||||||
_ => a.make_indirect(),
|
64 => a.cast_to(Reg::i64()),
|
||||||
},
|
_ => a.make_indirect(),
|
||||||
Abi::Vector { .. } => {
|
}
|
||||||
|
}
|
||||||
|
BackendRepr::Vector { .. } => {
|
||||||
// FIXME(eddyb) there should be a size cap here
|
// FIXME(eddyb) there should be a size cap here
|
||||||
// (probably what clang calls "illegal vectors").
|
// (probably what clang calls "illegal vectors").
|
||||||
}
|
}
|
||||||
Abi::Scalar(scalar) => {
|
BackendRepr::Scalar(scalar) => {
|
||||||
// Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
|
// Match what LLVM does for `f128` so that `compiler-builtins` builtins match up
|
||||||
// with what LLVM expects.
|
// with what LLVM expects.
|
||||||
if a.layout.size.bytes() > 8
|
if a.layout.size.bytes() > 8
|
||||||
|
|
|
@ -6,7 +6,7 @@
|
||||||
//! Section 2.3 from the Xtensa programmers guide.
|
//! Section 2.3 from the Xtensa programmers guide.
|
||||||
|
|
||||||
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
|
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
|
||||||
use crate::abi::{Abi, HasDataLayout, Size, TyAbiInterface};
|
use crate::abi::{BackendRepr, HasDataLayout, Size, TyAbiInterface};
|
||||||
use crate::spec::HasTargetSpec;
|
use crate::spec::HasTargetSpec;
|
||||||
|
|
||||||
const NUM_ARG_GPRS: u64 = 6;
|
const NUM_ARG_GPRS: u64 = 6;
|
||||||
|
@ -114,8 +114,8 @@ where
|
||||||
}
|
}
|
||||||
|
|
||||||
fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
|
fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool {
|
||||||
match arg.layout.abi {
|
match arg.layout.backend_repr {
|
||||||
Abi::Vector { .. } => true,
|
BackendRepr::Vector { .. } => true,
|
||||||
_ => arg.layout.is_aggregate(),
|
_ => arg.layout.is_aggregate(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,7 +18,7 @@ use rustc_middle::ty::{
|
||||||
};
|
};
|
||||||
use rustc_span::Span;
|
use rustc_span::Span;
|
||||||
use rustc_span::symbol::Symbol;
|
use rustc_span::symbol::Symbol;
|
||||||
use rustc_target::abi::Abi;
|
use rustc_target::abi::BackendRepr;
|
||||||
use smallvec::SmallVec;
|
use smallvec::SmallVec;
|
||||||
use tracing::{debug, instrument};
|
use tracing::{debug, instrument};
|
||||||
|
|
||||||
|
@ -523,8 +523,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
|
||||||
|
|
||||||
// e.g., `Rc<()>`
|
// e.g., `Rc<()>`
|
||||||
let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id);
|
let unit_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, tcx.types.unit, method_def_id);
|
||||||
match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.abi) {
|
match tcx.layout_of(param_env.and(unit_receiver_ty)).map(|l| l.backend_repr) {
|
||||||
Ok(Abi::Scalar(..)) => (),
|
Ok(BackendRepr::Scalar(..)) => (),
|
||||||
abi => {
|
abi => {
|
||||||
tcx.dcx().span_delayed_bug(
|
tcx.dcx().span_delayed_bug(
|
||||||
tcx.def_span(method_def_id),
|
tcx.def_span(method_def_id),
|
||||||
|
@ -538,8 +538,8 @@ fn check_receiver_correct<'tcx>(tcx: TyCtxt<'tcx>, trait_def_id: DefId, method:
|
||||||
// e.g., `Rc<dyn Trait>`
|
// e.g., `Rc<dyn Trait>`
|
||||||
let trait_object_receiver =
|
let trait_object_receiver =
|
||||||
receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id);
|
receiver_for_self_ty(tcx, receiver_ty, trait_object_ty, method_def_id);
|
||||||
match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.abi) {
|
match tcx.layout_of(param_env.and(trait_object_receiver)).map(|l| l.backend_repr) {
|
||||||
Ok(Abi::ScalarPair(..)) => (),
|
Ok(BackendRepr::ScalarPair(..)) => (),
|
||||||
abi => {
|
abi => {
|
||||||
tcx.dcx().span_delayed_bug(
|
tcx.dcx().span_delayed_bug(
|
||||||
tcx.def_span(method_def_id),
|
tcx.def_span(method_def_id),
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
use std::iter;
|
use std::iter;
|
||||||
|
|
||||||
use rustc_abi::Primitive::Pointer;
|
use rustc_abi::Primitive::Pointer;
|
||||||
use rustc_abi::{Abi, PointerKind, Scalar, Size};
|
use rustc_abi::{BackendRepr, PointerKind, Scalar, Size};
|
||||||
use rustc_hir as hir;
|
use rustc_hir as hir;
|
||||||
use rustc_hir::lang_items::LangItem;
|
use rustc_hir::lang_items::LangItem;
|
||||||
use rustc_middle::bug;
|
use rustc_middle::bug;
|
||||||
|
@ -469,7 +469,7 @@ fn fn_abi_sanity_check<'tcx>(
|
||||||
// careful. Scalar/ScalarPair is fine, since backends will generally use
|
// careful. Scalar/ScalarPair is fine, since backends will generally use
|
||||||
// `layout.abi` and ignore everything else. We should just reject `Aggregate`
|
// `layout.abi` and ignore everything else. We should just reject `Aggregate`
|
||||||
// entirely here, but some targets need to be fixed first.
|
// entirely here, but some targets need to be fixed first.
|
||||||
if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
|
if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
|
||||||
// For an unsized type we'd only pass the sized prefix, so there is no universe
|
// For an unsized type we'd only pass the sized prefix, so there is no universe
|
||||||
// in which we ever want to allow this.
|
// in which we ever want to allow this.
|
||||||
assert!(
|
assert!(
|
||||||
|
@ -500,7 +500,7 @@ fn fn_abi_sanity_check<'tcx>(
|
||||||
// Similar to `Direct`, we need to make sure that backends use `layout.abi` and
|
// Similar to `Direct`, we need to make sure that backends use `layout.abi` and
|
||||||
// ignore the rest of the layout.
|
// ignore the rest of the layout.
|
||||||
assert!(
|
assert!(
|
||||||
matches!(arg.layout.abi, Abi::ScalarPair(..)),
|
matches!(arg.layout.backend_repr, BackendRepr::ScalarPair(..)),
|
||||||
"PassMode::Pair for type {}",
|
"PassMode::Pair for type {}",
|
||||||
arg.layout.ty
|
arg.layout.ty
|
||||||
);
|
);
|
||||||
|
@ -658,9 +658,9 @@ fn fn_abi_adjust_for_abi<'tcx>(
|
||||||
fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
|
fn unadjust<'tcx>(arg: &mut ArgAbi<'tcx, Ty<'tcx>>) {
|
||||||
// This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
|
// This still uses `PassMode::Pair` for ScalarPair types. That's unlikely to be intended,
|
||||||
// but who knows what breaks if we change this now.
|
// but who knows what breaks if we change this now.
|
||||||
if matches!(arg.layout.abi, Abi::Aggregate { .. }) {
|
if matches!(arg.layout.backend_repr, BackendRepr::Memory { .. }) {
|
||||||
assert!(
|
assert!(
|
||||||
arg.layout.abi.is_sized(),
|
arg.layout.backend_repr.is_sized(),
|
||||||
"'unadjusted' ABI does not support unsized arguments"
|
"'unadjusted' ABI does not support unsized arguments"
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -731,8 +731,8 @@ fn make_thin_self_ptr<'tcx>(
|
||||||
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
|
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
|
||||||
Ty::new_mut_ptr(tcx, layout.ty)
|
Ty::new_mut_ptr(tcx, layout.ty)
|
||||||
} else {
|
} else {
|
||||||
match layout.abi {
|
match layout.backend_repr {
|
||||||
Abi::ScalarPair(..) | Abi::Scalar(..) => (),
|
BackendRepr::ScalarPair(..) | BackendRepr::Scalar(..) => (),
|
||||||
_ => bug!("receiver type has unsupported layout: {:?}", layout),
|
_ => bug!("receiver type has unsupported layout: {:?}", layout),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,9 @@ use hir::def_id::DefId;
|
||||||
use rustc_abi::Integer::{I8, I32};
|
use rustc_abi::Integer::{I8, I32};
|
||||||
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
use rustc_abi::Primitive::{self, Float, Int, Pointer};
|
||||||
use rustc_abi::{
|
use rustc_abi::{
|
||||||
Abi, AbiAndPrefAlign, AddressSpace, Align, FieldsShape, HasDataLayout, LayoutCalculatorError,
|
AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FieldsShape, HasDataLayout,
|
||||||
LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, Variants, WrappingRange,
|
LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding,
|
||||||
|
Variants, WrappingRange,
|
||||||
};
|
};
|
||||||
use rustc_index::bit_set::BitSet;
|
use rustc_index::bit_set::BitSet;
|
||||||
use rustc_index::{IndexSlice, IndexVec};
|
use rustc_index::{IndexSlice, IndexVec};
|
||||||
|
@ -173,7 +174,9 @@ fn layout_of_uncached<'tcx>(
|
||||||
let mut layout = LayoutData::clone(&layout.0);
|
let mut layout = LayoutData::clone(&layout.0);
|
||||||
match *pat {
|
match *pat {
|
||||||
ty::PatternKind::Range { start, end, include_end } => {
|
ty::PatternKind::Range { start, end, include_end } => {
|
||||||
if let Abi::Scalar(scalar) | Abi::ScalarPair(scalar, _) = &mut layout.abi {
|
if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) =
|
||||||
|
&mut layout.backend_repr
|
||||||
|
{
|
||||||
if let Some(start) = start {
|
if let Some(start) = start {
|
||||||
scalar.valid_range_mut().start = start
|
scalar.valid_range_mut().start = start
|
||||||
.try_to_bits(tcx, param_env)
|
.try_to_bits(tcx, param_env)
|
||||||
|
@ -275,7 +278,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
|
return Ok(tcx.mk_layout(LayoutData::scalar(cx, data_ptr)));
|
||||||
}
|
}
|
||||||
|
|
||||||
let Abi::Scalar(metadata) = metadata_layout.abi else {
|
let BackendRepr::Scalar(metadata) = metadata_layout.backend_repr else {
|
||||||
return Err(error(cx, LayoutError::Unknown(pointee)));
|
return Err(error(cx, LayoutError::Unknown(pointee)));
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -330,9 +333,9 @@ fn layout_of_uncached<'tcx>(
|
||||||
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
.ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?;
|
||||||
|
|
||||||
let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
|
let abi = if count != 0 && ty.is_privately_uninhabited(tcx, param_env) {
|
||||||
Abi::Uninhabited
|
BackendRepr::Uninhabited
|
||||||
} else {
|
} else {
|
||||||
Abi::Aggregate { sized: true }
|
BackendRepr::Memory { sized: true }
|
||||||
};
|
};
|
||||||
|
|
||||||
let largest_niche = if count != 0 { element.largest_niche } else { None };
|
let largest_niche = if count != 0 { element.largest_niche } else { None };
|
||||||
|
@ -340,7 +343,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
tcx.mk_layout(LayoutData {
|
tcx.mk_layout(LayoutData {
|
||||||
variants: Variants::Single { index: FIRST_VARIANT },
|
variants: Variants::Single { index: FIRST_VARIANT },
|
||||||
fields: FieldsShape::Array { stride: element.size, count },
|
fields: FieldsShape::Array { stride: element.size, count },
|
||||||
abi,
|
backend_repr: abi,
|
||||||
largest_niche,
|
largest_niche,
|
||||||
align: element.align,
|
align: element.align,
|
||||||
size,
|
size,
|
||||||
|
@ -353,7 +356,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
tcx.mk_layout(LayoutData {
|
tcx.mk_layout(LayoutData {
|
||||||
variants: Variants::Single { index: FIRST_VARIANT },
|
variants: Variants::Single { index: FIRST_VARIANT },
|
||||||
fields: FieldsShape::Array { stride: element.size, count: 0 },
|
fields: FieldsShape::Array { stride: element.size, count: 0 },
|
||||||
abi: Abi::Aggregate { sized: false },
|
backend_repr: BackendRepr::Memory { sized: false },
|
||||||
largest_niche: None,
|
largest_niche: None,
|
||||||
align: element.align,
|
align: element.align,
|
||||||
size: Size::ZERO,
|
size: Size::ZERO,
|
||||||
|
@ -364,7 +367,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
ty::Str => tcx.mk_layout(LayoutData {
|
ty::Str => tcx.mk_layout(LayoutData {
|
||||||
variants: Variants::Single { index: FIRST_VARIANT },
|
variants: Variants::Single { index: FIRST_VARIANT },
|
||||||
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
|
fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
|
||||||
abi: Abi::Aggregate { sized: false },
|
backend_repr: BackendRepr::Memory { sized: false },
|
||||||
largest_niche: None,
|
largest_niche: None,
|
||||||
align: dl.i8_align,
|
align: dl.i8_align,
|
||||||
size: Size::ZERO,
|
size: Size::ZERO,
|
||||||
|
@ -384,8 +387,8 @@ fn layout_of_uncached<'tcx>(
|
||||||
&ReprOptions::default(),
|
&ReprOptions::default(),
|
||||||
StructKind::AlwaysSized,
|
StructKind::AlwaysSized,
|
||||||
)?;
|
)?;
|
||||||
match unit.abi {
|
match unit.backend_repr {
|
||||||
Abi::Aggregate { ref mut sized } => *sized = false,
|
BackendRepr::Memory { ref mut sized } => *sized = false,
|
||||||
_ => bug!(),
|
_ => bug!(),
|
||||||
}
|
}
|
||||||
tcx.mk_layout(unit)
|
tcx.mk_layout(unit)
|
||||||
|
@ -500,7 +503,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
|
|
||||||
// Compute the ABI of the element type:
|
// Compute the ABI of the element type:
|
||||||
let e_ly = cx.layout_of(e_ty)?;
|
let e_ly = cx.layout_of(e_ty)?;
|
||||||
let Abi::Scalar(e_abi) = e_ly.abi else {
|
let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else {
|
||||||
// This error isn't caught in typeck, e.g., if
|
// This error isn't caught in typeck, e.g., if
|
||||||
// the element type of the vector is generic.
|
// the element type of the vector is generic.
|
||||||
tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
|
tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty });
|
||||||
|
@ -516,12 +519,12 @@ fn layout_of_uncached<'tcx>(
|
||||||
// Non-power-of-two vectors have padding up to the next power-of-two.
|
// Non-power-of-two vectors have padding up to the next power-of-two.
|
||||||
// If we're a packed repr, remove the padding while keeping the alignment as close
|
// If we're a packed repr, remove the padding while keeping the alignment as close
|
||||||
// to a vector as possible.
|
// to a vector as possible.
|
||||||
(Abi::Aggregate { sized: true }, AbiAndPrefAlign {
|
(BackendRepr::Memory { sized: true }, AbiAndPrefAlign {
|
||||||
abi: Align::max_for_offset(size),
|
abi: Align::max_for_offset(size),
|
||||||
pref: dl.vector_align(size).pref,
|
pref: dl.vector_align(size).pref,
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
(Abi::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
|
(BackendRepr::Vector { element: e_abi, count: e_len }, dl.vector_align(size))
|
||||||
};
|
};
|
||||||
let size = size.align_to(align.abi);
|
let size = size.align_to(align.abi);
|
||||||
|
|
||||||
|
@ -535,7 +538,7 @@ fn layout_of_uncached<'tcx>(
|
||||||
tcx.mk_layout(LayoutData {
|
tcx.mk_layout(LayoutData {
|
||||||
variants: Variants::Single { index: FIRST_VARIANT },
|
variants: Variants::Single { index: FIRST_VARIANT },
|
||||||
fields,
|
fields,
|
||||||
abi,
|
backend_repr: abi,
|
||||||
largest_niche: e_ly.largest_niche,
|
largest_niche: e_ly.largest_niche,
|
||||||
size,
|
size,
|
||||||
align,
|
align,
|
||||||
|
@ -985,10 +988,12 @@ fn coroutine_layout<'tcx>(
|
||||||
|
|
||||||
size = size.align_to(align.abi);
|
size = size.align_to(align.abi);
|
||||||
|
|
||||||
let abi = if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi.is_uninhabited()) {
|
let abi = if prefix.backend_repr.is_uninhabited()
|
||||||
Abi::Uninhabited
|
|| variants.iter().all(|v| v.backend_repr.is_uninhabited())
|
||||||
|
{
|
||||||
|
BackendRepr::Uninhabited
|
||||||
} else {
|
} else {
|
||||||
Abi::Aggregate { sized: true }
|
BackendRepr::Memory { sized: true }
|
||||||
};
|
};
|
||||||
|
|
||||||
let layout = tcx.mk_layout(LayoutData {
|
let layout = tcx.mk_layout(LayoutData {
|
||||||
|
@ -999,7 +1004,7 @@ fn coroutine_layout<'tcx>(
|
||||||
variants,
|
variants,
|
||||||
},
|
},
|
||||||
fields: outer_fields,
|
fields: outer_fields,
|
||||||
abi,
|
backend_repr: abi,
|
||||||
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
// Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to
|
||||||
// self-referentiality), getting the discriminant can cause aliasing violations.
|
// self-referentiality), getting the discriminant can cause aliasing violations.
|
||||||
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
// `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that
|
||||||
|
|
|
@ -66,12 +66,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
|
|
||||||
fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
|
fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayout<'tcx>) {
|
||||||
// Verify the ABI mandated alignment and size.
|
// Verify the ABI mandated alignment and size.
|
||||||
let align = layout.abi.inherent_align(cx).map(|align| align.abi);
|
let align = layout.backend_repr.inherent_align(cx).map(|align| align.abi);
|
||||||
let size = layout.abi.inherent_size(cx);
|
let size = layout.backend_repr.inherent_size(cx);
|
||||||
let Some((align, size)) = align.zip(size) else {
|
let Some((align, size)) = align.zip(size) else {
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
layout.layout.abi(),
|
layout.layout.backend_repr(),
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. },
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. },
|
||||||
"ABI unexpectedly missing alignment and/or size in {layout:#?}"
|
"ABI unexpectedly missing alignment and/or size in {layout:#?}"
|
||||||
);
|
);
|
||||||
return;
|
return;
|
||||||
|
@ -88,12 +88,12 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
);
|
);
|
||||||
|
|
||||||
// Verify per-ABI invariants
|
// Verify per-ABI invariants
|
||||||
match layout.layout.abi() {
|
match layout.layout.backend_repr() {
|
||||||
Abi::Scalar(_) => {
|
BackendRepr::Scalar(_) => {
|
||||||
// Check that this matches the underlying field.
|
// Check that this matches the underlying field.
|
||||||
let inner = skip_newtypes(cx, layout);
|
let inner = skip_newtypes(cx, layout);
|
||||||
assert!(
|
assert!(
|
||||||
matches!(inner.layout.abi(), Abi::Scalar(_)),
|
matches!(inner.layout.backend_repr(), BackendRepr::Scalar(_)),
|
||||||
"`Scalar` type {} is newtype around non-`Scalar` type {}",
|
"`Scalar` type {} is newtype around non-`Scalar` type {}",
|
||||||
layout.ty,
|
layout.ty,
|
||||||
inner.ty
|
inner.ty
|
||||||
|
@ -132,7 +132,7 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
"`Scalar` field with bad align in {inner:#?}",
|
"`Scalar` field with bad align in {inner:#?}",
|
||||||
);
|
);
|
||||||
assert!(
|
assert!(
|
||||||
matches!(field.abi, Abi::Scalar(_)),
|
matches!(field.backend_repr, BackendRepr::Scalar(_)),
|
||||||
"`Scalar` field with bad ABI in {inner:#?}",
|
"`Scalar` field with bad ABI in {inner:#?}",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
@ -141,11 +141,11 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Abi::ScalarPair(scalar1, scalar2) => {
|
BackendRepr::ScalarPair(scalar1, scalar2) => {
|
||||||
// Check that the underlying pair of fields matches.
|
// Check that the underlying pair of fields matches.
|
||||||
let inner = skip_newtypes(cx, layout);
|
let inner = skip_newtypes(cx, layout);
|
||||||
assert!(
|
assert!(
|
||||||
matches!(inner.layout.abi(), Abi::ScalarPair(..)),
|
matches!(inner.layout.backend_repr(), BackendRepr::ScalarPair(..)),
|
||||||
"`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
|
"`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
|
||||||
layout.ty,
|
layout.ty,
|
||||||
inner.ty
|
inner.ty
|
||||||
|
@ -208,8 +208,8 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
"`ScalarPair` first field with bad align in {inner:#?}",
|
"`ScalarPair` first field with bad align in {inner:#?}",
|
||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
field1.abi,
|
field1.backend_repr,
|
||||||
Abi::Scalar(_),
|
BackendRepr::Scalar(_),
|
||||||
"`ScalarPair` first field with bad ABI in {inner:#?}",
|
"`ScalarPair` first field with bad ABI in {inner:#?}",
|
||||||
);
|
);
|
||||||
let field2_offset = size1.align_to(align2);
|
let field2_offset = size1.align_to(align2);
|
||||||
|
@ -226,16 +226,16 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
"`ScalarPair` second field with bad align in {inner:#?}",
|
"`ScalarPair` second field with bad align in {inner:#?}",
|
||||||
);
|
);
|
||||||
assert_matches!(
|
assert_matches!(
|
||||||
field2.abi,
|
field2.backend_repr,
|
||||||
Abi::Scalar(_),
|
BackendRepr::Scalar(_),
|
||||||
"`ScalarPair` second field with bad ABI in {inner:#?}",
|
"`ScalarPair` second field with bad ABI in {inner:#?}",
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
Abi::Vector { element, .. } => {
|
BackendRepr::Vector { element, .. } => {
|
||||||
assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`.
|
assert!(align >= element.align(cx).abi); // just sanity-checking `vector_align`.
|
||||||
// FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair.
|
// FIXME: Do some kind of check of the inner type, like for Scalar and ScalarPair.
|
||||||
}
|
}
|
||||||
Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
|
BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {} // Nothing to check.
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -274,13 +274,13 @@ pub(super) fn partially_check_layout<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLa
|
||||||
// The top-level ABI and the ABI of the variants should be coherent.
|
// The top-level ABI and the ABI of the variants should be coherent.
|
||||||
let scalar_coherent =
|
let scalar_coherent =
|
||||||
|s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx);
|
|s1: Scalar, s2: Scalar| s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx);
|
||||||
let abi_coherent = match (layout.abi, variant.abi) {
|
let abi_coherent = match (layout.backend_repr, variant.backend_repr) {
|
||||||
(Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
|
(BackendRepr::Scalar(s1), BackendRepr::Scalar(s2)) => scalar_coherent(s1, s2),
|
||||||
(Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
|
(BackendRepr::ScalarPair(a1, b1), BackendRepr::ScalarPair(a2, b2)) => {
|
||||||
scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
|
scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
|
||||||
}
|
}
|
||||||
(Abi::Uninhabited, _) => true,
|
(BackendRepr::Uninhabited, _) => true,
|
||||||
(Abi::Aggregate { .. }, _) => true,
|
(BackendRepr::Memory { .. }, _) => true,
|
||||||
_ => false,
|
_ => false,
|
||||||
};
|
};
|
||||||
if !abi_coherent {
|
if !abi_coherent {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue