1
Fork 0

compiler: rustc_abi::Abi => BackendRepr

The initial naming of "Abi" was an awful mistake, conveying wrong ideas
about how psABIs worked and even more about what the enum meant.
It was only meant to represent the way the value would be described to
a codegen backend as it was lowered to that intermediate representation.
It was never meant to mean anything about the actual psABI handling!
The conflation is because LLVM typically will associate a certain form
with a certain ABI, but even that does not hold when the special cases
that actually exist arise, plus the IR annotations that modify the ABI.

Reframe `rustc_abi::Abi` as the `BackendRepr` of the type, and rename
`BackendRepr::Aggregate` as `BackendRepr::Memory`. Unfortunately, due to
the persistent misunderstandings, this too is now incorrect:
- Scattered ABI-relevant code is entangled with BackendRepr
- We do not always pre-compute a correct BackendRepr that reflects how
  we "actually" want this value to be handled, so we leave the backend
  interface to also inject various special-cases here
- In some cases `BackendRepr::Memory` is a "real" aggregate, but in
  others it is in fact using memory, and in some cases it is a scalar!

Our rustc-to-backend lowering code handles this sort of thing right now.
That will eventually be addressed by lifting duplicated lowering code
to either rustc_codegen_ssa or rustc_target as appropriate.
This commit is contained in:
Jubilee Young 2024-10-29 13:37:26 -07:00
parent 2dece5bb62
commit 7086dd83cc
51 changed files with 517 additions and 428 deletions

View file

@ -131,7 +131,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
interp_ok(match bin_op {
Eq | Ne | Lt | Le | Gt | Ge => {
// Types can differ, e.g. fn ptrs with different `for`.
assert_eq!(left.layout.abi, right.layout.abi);
assert_eq!(left.layout.backend_repr, right.layout.backend_repr);
let size = ecx.pointer_size();
// Just compare the bits. ScalarPairs are compared lexicographically.
// We thus always compare pairs and simply fill scalars up with 0.

View file

@ -1,6 +1,7 @@
use std::sync::atomic::Ordering::Relaxed;
use either::{Left, Right};
use rustc_abi::{self as abi, BackendRepr};
use rustc_hir::def::DefKind;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{AllocId, ErrorHandled, InterpErrorInfo};
@ -12,7 +13,6 @@ use rustc_middle::ty::print::with_no_trimmed_paths;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::def_id::LocalDefId;
use rustc_span::{DUMMY_SP, Span};
use rustc_target::abi::{self, Abi};
use tracing::{debug, instrument, trace};
use super::{CanAccessMutGlobal, CompileTimeInterpCx, CompileTimeMachine};
@ -174,8 +174,8 @@ pub(super) fn op_to_const<'tcx>(
// type (it's used throughout the compiler and having it work just on literals is not enough)
// and we want it to be fast (i.e., don't go to an `Allocation` and reconstruct the `Scalar`
// from its byte-serialized form).
let force_as_immediate = match op.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { .. }) => true,
let force_as_immediate = match op.layout.backend_repr {
BackendRepr::Scalar(abi::Scalar::Initialized { .. }) => true,
// We don't *force* `ConstValue::Slice` for `ScalarPair`. This has the advantage that if the
// input `op` is a place, then turning it into a `ConstValue` and back into a `OpTy` will
// not have to generate any duplicate allocations (we preserve the original `AllocId` in

View file

@ -1,10 +1,10 @@
use rustc_abi::{BackendRepr, VariantIdx};
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_middle::mir::interpret::{EvalToValTreeResult, GlobalId};
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_span::DUMMY_SP;
use rustc_target::abi::{Abi, VariantIdx};
use tracing::{debug, instrument, trace};
use super::eval_queries::{mk_eval_cx_to_read_const_val, op_to_const};
@ -117,7 +117,7 @@ fn const_to_valtree_inner<'tcx>(
let val = ecx.read_immediate(place).unwrap();
// We could allow wide raw pointers where both sides are integers in the future,
// but for now we reject them.
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
if matches!(val.layout.backend_repr, BackendRepr::ScalarPair(..)) {
return Err(ValTreeCreationError::NonSupportedType(ty));
}
let val = val.to_scalar();
@ -311,7 +311,7 @@ pub fn valtree_to_const_value<'tcx>(
// Fast path to avoid some allocations.
return mir::ConstValue::ZeroSized;
}
if layout.abi.is_scalar()
if layout.backend_repr.is_scalar()
&& (matches!(ty.kind(), ty::Tuple(_))
|| matches!(ty.kind(), ty::Adt(def, _) if def.is_struct()))
{

View file

@ -172,8 +172,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// must be compatible. So we just accept everything with Pointer ABI as compatible,
// even if this will accept some code that is not stably guaranteed to work.
// This also handles function pointers.
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.abi {
abi::Abi::Scalar(s) => match s.primitive() {
let thin_pointer = |layout: TyAndLayout<'tcx>| match layout.backend_repr {
abi::BackendRepr::Scalar(s) => match s.primitive() {
abi::Primitive::Pointer(addr_space) => Some(addr_space),
_ => None,
},

View file

@ -274,7 +274,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
// Let's make sure v is sign-extended *if* it has a signed type.
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
let signed = src_layout.backend_repr.is_signed(); // Also asserts that abi is `Scalar`.
let v = match src_layout.ty.kind() {
Uint(_) | RawPtr(..) | FnPtr(..) => scalar.to_uint(src_layout.size)?,

View file

@ -112,7 +112,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// Read tag and sanity-check `tag_layout`.
let tag_val = self.read_immediate(&self.project_field(op, tag_field)?)?;
assert_eq!(tag_layout.size, tag_val.layout.size);
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
assert_eq!(tag_layout.backend_repr.is_signed(), tag_val.layout.backend_repr.is_signed());
trace!("tag value: {}", tag_val);
// Figure out which discriminant and variant this corresponds to.

View file

@ -563,7 +563,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
interp_ok(if overflowed.to_bool()? {
let size = l.layout.size;
if l.layout.abi.is_signed() {
if l.layout.backend_repr.is_signed() {
// For signed ints the saturated value depends on the sign of the first
// term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no

View file

@ -5,7 +5,7 @@ use std::assert_matches::assert_matches;
use either::{Either, Left, Right};
use rustc_abi as abi;
use rustc_abi::{Abi, HasDataLayout, Size};
use rustc_abi::{BackendRepr, HasDataLayout, Size};
use rustc_hir::def::Namespace;
use rustc_middle::mir::interpret::ScalarSizeMismatch;
use rustc_middle::ty::layout::{HasParamEnv, HasTyCtxt, LayoutOf, TyAndLayout};
@ -114,9 +114,9 @@ impl<Prov: Provenance> Immediate<Prov> {
}
/// Assert that this immediate is a valid value for the given ABI.
pub fn assert_matches_abi(self, abi: Abi, msg: &str, cx: &impl HasDataLayout) {
pub fn assert_matches_abi(self, abi: BackendRepr, msg: &str, cx: &impl HasDataLayout) {
match (self, abi) {
(Immediate::Scalar(scalar), Abi::Scalar(s)) => {
(Immediate::Scalar(scalar), BackendRepr::Scalar(s)) => {
assert_eq!(scalar.size(), s.size(cx), "{msg}: scalar value has wrong size");
if !matches!(s.primitive(), abi::Primitive::Pointer(..)) {
// This is not a pointer, it should not carry provenance.
@ -126,7 +126,7 @@ impl<Prov: Provenance> Immediate<Prov> {
);
}
}
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
assert_eq!(
a_val.size(),
a.size(cx),
@ -244,7 +244,7 @@ impl<'tcx, Prov: Provenance> std::ops::Deref for ImmTy<'tcx, Prov> {
impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert!(layout.backend_repr.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert_eq!(val.size(), layout.size);
ImmTy { imm: val.into(), layout }
}
@ -252,7 +252,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar_pair(a: Scalar<Prov>, b: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(
matches!(layout.abi, Abi::ScalarPair(..)),
matches!(layout.backend_repr, BackendRepr::ScalarPair(..)),
"`ImmTy::from_scalar_pair` on non-scalar-pair layout"
);
let imm = Immediate::ScalarPair(a, b);
@ -263,9 +263,9 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
// Without a `cx` we cannot call `assert_matches_abi`.
debug_assert!(
match (imm, layout.abi) {
(Immediate::Scalar(..), Abi::Scalar(..)) => true,
(Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
match (imm, layout.backend_repr) {
(Immediate::Scalar(..), BackendRepr::Scalar(..)) => true,
(Immediate::ScalarPair(..), BackendRepr::ScalarPair(..)) => true,
(Immediate::Uninit, _) if layout.is_sized() => true,
_ => false,
},
@ -356,7 +356,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
fn offset_(&self, offset: Size, layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
// Verify that the input matches its type.
if cfg!(debug_assertions) {
self.assert_matches_abi(self.layout.abi, "invalid input to Immediate::offset", cx);
self.assert_matches_abi(
self.layout.backend_repr,
"invalid input to Immediate::offset",
cx,
);
}
// `ImmTy` have already been checked to be in-bounds, so we can just check directly if this
// remains in-bounds. This cannot actually be violated since projections are type-checked
@ -370,19 +374,19 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
);
// This makes several assumptions about what layouts we will encounter; we match what
// codegen does as good as we can (see `extract_field` in `rustc_codegen_ssa/src/mir/operand.rs`).
let inner_val: Immediate<_> = match (**self, self.layout.abi) {
let inner_val: Immediate<_> = match (**self, self.layout.backend_repr) {
// If the entire value is uninit, then so is the field (can happen in ConstProp).
(Immediate::Uninit, _) => Immediate::Uninit,
// If the field is uninhabited, we can forget the data (can happen in ConstProp).
// `enum S { A(!), B, C }` is an example of an enum with Scalar layout that
// has an `Uninhabited` variant, which means this case is possible.
_ if layout.abi.is_uninhabited() => Immediate::Uninit,
_ if layout.is_uninhabited() => Immediate::Uninit,
// the field contains no information, can be left uninit
// (Scalar/ScalarPair can contain even aligned ZST, not just 1-ZST)
_ if layout.is_zst() => Immediate::Uninit,
// some fieldless enum variants can have non-zero size but still `Aggregate` ABI... try
// to detect those here and also give them no data
_ if matches!(layout.abi, Abi::Aggregate { .. })
_ if matches!(layout.backend_repr, BackendRepr::Memory { .. })
&& matches!(layout.variants, abi::Variants::Single { .. })
&& matches!(&layout.fields, abi::FieldsShape::Arbitrary { offsets, .. } if offsets.len() == 0) =>
{
@ -394,7 +398,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
**self
}
// extract fields from types with `ScalarPair` ABI
(Immediate::ScalarPair(a_val, b_val), Abi::ScalarPair(a, b)) => {
(Immediate::ScalarPair(a_val, b_val), BackendRepr::ScalarPair(a, b)) => {
Immediate::from(if offset.bytes() == 0 {
a_val
} else {
@ -411,7 +415,11 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
),
};
// Ensure the new layout matches the new value.
inner_val.assert_matches_abi(layout.abi, "invalid field type in Immediate::offset", cx);
inner_val.assert_matches_abi(
layout.backend_repr,
"invalid field type in Immediate::offset",
cx,
);
ImmTy::from_immediate(inner_val, layout)
}
@ -567,8 +575,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// case where some of the bytes are initialized and others are not. So, we need an extra
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
// like a `Scalar` (or `ScalarPair`).
interp_ok(match mplace.layout.abi {
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
interp_ok(match mplace.layout.backend_repr {
BackendRepr::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
let size = s.size(self);
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar(
@ -577,7 +585,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)?;
Some(ImmTy::from_scalar(scalar, mplace.layout))
}
Abi::ScalarPair(
BackendRepr::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
abi::Scalar::Initialized { value: b, .. },
) => {
@ -637,9 +645,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
op: &impl Projectable<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
if !matches!(
op.layout().abi,
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
op.layout().backend_repr,
BackendRepr::Scalar(abi::Scalar::Initialized { .. })
| BackendRepr::ScalarPair(
abi::Scalar::Initialized { .. },
abi::Scalar::Initialized { .. }
)
) {
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}

View file

@ -114,7 +114,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let l_bits = left.layout.size.bits();
// Compute the equivalent shift modulo `size` that is in the range `0..size`. (This is
// the one MIR operator that does *not* directly map to a single LLVM operation.)
let (shift_amount, overflow) = if right.layout.abi.is_signed() {
let (shift_amount, overflow) = if right.layout.backend_repr.is_signed() {
let shift_amount = r_signed();
let rem = shift_amount.rem_euclid(l_bits.into());
// `rem` is guaranteed positive, so the `unwrap` cannot fail
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
};
let shift_amount = u32::try_from(shift_amount).unwrap(); // we brought this in the range `0..size` so this will always fit
// Compute the shifted result.
let result = if left.layout.abi.is_signed() {
let result = if left.layout.backend_repr.is_signed() {
let l = l_signed();
let result = match bin_op {
Shl | ShlUnchecked => l.checked_shl(shift_amount).unwrap(),
@ -147,7 +147,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if overflow && let Some(intrinsic) = throw_ub_on_overflow {
throw_ub!(ShiftOverflow {
intrinsic,
shift_amount: if right.layout.abi.is_signed() {
shift_amount: if right.layout.backend_repr.is_signed() {
Either::Right(r_signed())
} else {
Either::Left(r_unsigned())
@ -171,7 +171,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let size = left.layout.size;
// Operations that need special treatment for signed integers
if left.layout.abi.is_signed() {
if left.layout.backend_repr.is_signed() {
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
Lt => Some(i128::lt),
Le => Some(i128::le),
@ -250,7 +250,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
BitXor => ImmTy::from_uint(l ^ r, left.layout),
_ => {
assert!(!left.layout.abi.is_signed());
assert!(!left.layout.backend_repr.is_signed());
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
Add | AddUnchecked | AddWithOverflow => u128::overflowing_add,
Sub | SubUnchecked | SubWithOverflow => u128::overflowing_sub,
@ -332,7 +332,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
let offset_bytes = val.to_target_isize(self)?;
if !right.layout.abi.is_signed() && offset_bytes < 0 {
if !right.layout.backend_repr.is_signed() && offset_bytes < 0 {
// We were supposed to do an unsigned offset but the result is negative -- this
// can only mean that the cast wrapped around.
throw_ub!(PointerArithOverflow)

View file

@ -5,11 +5,11 @@
use std::assert_matches::assert_matches;
use either::{Either, Left, Right};
use rustc_abi::{Align, BackendRepr, HasDataLayout, Size};
use rustc_ast::Mutability;
use rustc_middle::ty::Ty;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::{bug, mir, span_bug};
use rustc_target::abi::{Abi, Align, HasDataLayout, Size};
use tracing::{instrument, trace};
use super::{
@ -659,7 +659,7 @@ where
// Unfortunately this is too expensive to do in release builds.
if cfg!(debug_assertions) {
src.assert_matches_abi(
local_layout.abi,
local_layout.backend_repr,
"invalid immediate for given destination place",
self,
);
@ -683,7 +683,11 @@ where
) -> InterpResult<'tcx> {
// We use the sizes from `value` below.
// Ensure that matches the type of the place it is written to.
value.assert_matches_abi(layout.abi, "invalid immediate for given destination place", self);
value.assert_matches_abi(
layout.backend_repr,
"invalid immediate for given destination place",
self,
);
// Note that it is really important that the type here is the right one, and matches the
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
// to handle padding properly, which is only correct if we never look at this data with the
@ -700,7 +704,7 @@ where
alloc.write_scalar(alloc_range(Size::ZERO, scalar.size()), scalar)
}
Immediate::ScalarPair(a_val, b_val) => {
let Abi::ScalarPair(a, b) = layout.abi else {
let BackendRepr::ScalarPair(a, b) = layout.backend_repr else {
span_bug!(
self.cur_span(),
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",

View file

@ -11,6 +11,10 @@ use std::num::NonZero;
use either::{Left, Right};
use hir::def::DefKind;
use rustc_abi::{
BackendRepr, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants,
WrappingRange,
};
use rustc_ast::Mutability;
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
@ -23,9 +27,6 @@ use rustc_middle::mir::interpret::{
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty};
use rustc_span::symbol::{Symbol, sym};
use rustc_target::abi::{
Abi, FieldIdx, FieldsShape, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange,
};
use tracing::trace;
use super::machine::AllocMap;
@ -422,7 +423,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
// Reset provenance: ensure slice tail metadata does not preserve provenance,
// and ensure all pointers do not preserve partial provenance.
if self.reset_provenance_and_padding {
if matches!(imm.layout.abi, Abi::Scalar(..)) {
if matches!(imm.layout.backend_repr, BackendRepr::Scalar(..)) {
// A thin pointer. If it has provenance, we don't have to do anything.
// If it does not, ensure we clear the provenance in memory.
if matches!(imm.to_scalar(), Scalar::Int(..)) {
@ -981,7 +982,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
let elem = layout.field(cx, 0);
// Fast-path for large arrays of simple types that do not contain any padding.
if elem.abi.is_scalar() {
if elem.backend_repr.is_scalar() {
out.add_range(base_offset, elem.size * count);
} else {
for idx in 0..count {
@ -1299,19 +1300,19 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
// scalars, we do the same check on every "level" (e.g., first we check
// MyNewtype and then the scalar in there).
match val.layout.abi {
Abi::Uninhabited => {
match val.layout.backend_repr {
BackendRepr::Uninhabited => {
let ty = val.layout.ty;
throw_validation_failure!(self.path, UninhabitedVal { ty });
}
Abi::Scalar(scalar_layout) => {
BackendRepr::Scalar(scalar_layout) => {
if !scalar_layout.is_uninit_valid() {
// There is something to check here.
let scalar = self.read_scalar(val, ExpectedKind::InitScalar)?;
self.visit_scalar(scalar, scalar_layout)?;
}
}
Abi::ScalarPair(a_layout, b_layout) => {
BackendRepr::ScalarPair(a_layout, b_layout) => {
// We can only proceed if *both* scalars need to be initialized.
// FIXME: find a way to also check ScalarPair when one side can be uninit but
// the other must be init.
@ -1322,12 +1323,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
self.visit_scalar(b, b_layout)?;
}
}
Abi::Vector { .. } => {
BackendRepr::Vector { .. } => {
// No checks here, we assume layout computation gets this right.
// (This is harder to check since Miri does not represent these as `Immediate`. We
// also cannot use field projections since this might be a newtype around a vector.)
}
Abi::Aggregate { .. } => {
BackendRepr::Memory { .. } => {
// Nothing to do.
}
}

View file

@ -1,9 +1,9 @@
use rustc_abi::{BackendRepr, FieldsShape, Scalar, Variants};
use rustc_middle::bug;
use rustc_middle::ty::layout::{
HasTyCtxt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, ValidityRequirement,
};
use rustc_middle::ty::{ParamEnvAnd, Ty, TyCtxt};
use rustc_target::abi::{Abi, FieldsShape, Scalar, Variants};
use crate::const_eval::{CanAccessMutGlobal, CheckAlignment, CompileTimeMachine};
use crate::interpret::{InterpCx, MemoryKind};
@ -111,12 +111,12 @@ fn check_validity_requirement_lax<'tcx>(
};
// Check the ABI.
let valid = match this.abi {
Abi::Uninhabited => false, // definitely UB
Abi::Scalar(s) => scalar_allows_raw_init(s),
Abi::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
Abi::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
Abi::Aggregate { .. } => true, // Fields are checked below.
let valid = match this.backend_repr {
BackendRepr::Uninhabited => false, // definitely UB
BackendRepr::Scalar(s) => scalar_allows_raw_init(s),
BackendRepr::ScalarPair(s1, s2) => scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2),
BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s),
BackendRepr::Memory { .. } => true, // Fields are checked below.
};
if !valid {
// This is definitely not okay.