1
Fork 0

Auto merge of #116010 - RalfJung:more-typed-immediates, r=oli-obk

interpret: more consistently use ImmTy in operators and casts

The diff in src/tools/miri/src/shims/x86/sse2.rs should hopefully suffice to explain why this is nicer. :)
This commit is contained in:
bors 2023-09-21 14:02:55 +00:00
commit 0fd7ce99b0
23 changed files with 298 additions and 285 deletions

View file

@ -3,7 +3,7 @@ use rustc_hir::{LangItem, CRATE_HIR_ID};
use rustc_middle::mir;
use rustc_middle::mir::interpret::PointerArithmetic;
use rustc_middle::ty::layout::{FnAbiOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::lint::builtin::INVALID_ALIGNMENT;
use std::borrow::Borrow;
use std::hash::Hash;
@ -596,7 +596,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
_bin_op: mir::BinOp,
_left: &ImmTy<'tcx>,
_right: &ImmTy<'tcx>,
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx>, bool)> {
throw_unsup_format!("pointer arithmetic or comparison is not supported at compile-time");
}

View file

@ -24,41 +24,44 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: Ty<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
// `cast_ty` will often be the same as `dest.ty`, but not always, since subtyping is still
// possible.
let cast_layout =
if cast_ty == dest.layout.ty { dest.layout } else { self.layout_of(cast_ty)? };
// FIXME: In which cases should we trigger UB when the source is uninit?
match cast_kind {
CastKind::PointerCoercion(PointerCoercion::Unsize) => {
let cast_ty = self.layout_of(cast_ty)?;
self.unsize_into(src, cast_ty, dest)?;
self.unsize_into(src, cast_layout, dest)?;
}
CastKind::PointerExposeAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_expose_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
let res = self.pointer_expose_address_cast(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
CastKind::PointerFromExposedAddress => {
let src = self.read_immediate(src)?;
let res = self.pointer_from_exposed_address_cast(&src, cast_ty)?;
self.write_immediate(res, dest)?;
let res = self.pointer_from_exposed_address_cast(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
CastKind::IntToInt | CastKind::IntToFloat => {
let src = self.read_immediate(src)?;
let res = self.int_to_int_or_float(&src, cast_ty)?;
self.write_immediate(res, dest)?;
let res = self.int_to_int_or_float(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
CastKind::FloatToFloat | CastKind::FloatToInt => {
let src = self.read_immediate(src)?;
let res = self.float_to_float_or_int(&src, cast_ty)?;
self.write_immediate(res, dest)?;
let res = self.float_to_float_or_int(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
let src = self.read_immediate(src)?;
let res = self.ptr_to_ptr(&src, cast_ty)?;
self.write_immediate(res, dest)?;
let res = self.ptr_to_ptr(&src, cast_layout)?;
self.write_immediate(*res, dest)?;
}
CastKind::PointerCoercion(
@ -87,7 +90,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
_ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
_ => span_bug!(self.cur_span(), "reify fn pointer on {}", src.layout.ty),
}
}
@ -98,7 +101,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// No change to value
self.write_immediate(*src, dest)?;
}
_ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
_ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {}", cast_ty),
}
}
@ -119,7 +122,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let fn_ptr = self.fn_ptr(FnVal::Instance(instance));
self.write_pointer(fn_ptr, dest)?;
}
_ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
_ => span_bug!(self.cur_span(), "closure fn pointer on {}", src.layout.ty),
}
}
@ -140,6 +143,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
CastKind::Transmute => {
assert!(src.layout.is_sized());
assert!(dest.layout.is_sized());
assert_eq!(cast_ty, dest.layout.ty); // we otherwise ignore `cast_ty` enirely...
if src.layout.size != dest.layout.size {
let src_bytes = src.layout.size.bytes();
let dest_bytes = dest.layout.size.bytes();
@ -164,62 +168,61 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn int_to_int_or_float(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
assert!(cast_ty.is_floating_point() || cast_ty.is_integral() || cast_ty.is_char());
assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
Ok(self.cast_from_int_like(src.to_scalar(), src.layout, cast_ty)?.into())
Ok(ImmTy::from_scalar(
self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
cast_to,
))
}
/// Handles 'FloatToFloat' and 'FloatToInt' casts.
pub fn float_to_float_or_int(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
use rustc_type_ir::sty::TyKind::*;
match src.layout.ty.kind() {
let val = match src.layout.ty.kind() {
// Floating point
Float(FloatTy::F32) => {
return Ok(self.cast_from_float(src.to_scalar().to_f32()?, cast_ty).into());
}
Float(FloatTy::F64) => {
return Ok(self.cast_from_float(src.to_scalar().to_f64()?, cast_ty).into());
}
Float(FloatTy::F32) => self.cast_from_float(src.to_scalar().to_f32()?, cast_to.ty),
Float(FloatTy::F64) => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
_ => {
bug!("Can't cast 'Float' type into {:?}", cast_ty);
bug!("Can't cast 'Float' type into {}", cast_to.ty);
}
}
};
Ok(ImmTy::from_scalar(val, cast_to))
}
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
pub fn ptr_to_ptr(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_any_ptr());
assert!(cast_ty.is_unsafe_ptr());
assert!(cast_to.ty.is_unsafe_ptr());
// Handle casting any ptr to raw ptr (might be a fat ptr).
let dest_layout = self.layout_of(cast_ty)?;
if dest_layout.size == src.layout.size {
if cast_to.size == src.layout.size {
// Thin or fat pointer that just hast the ptr kind of target type changed.
return Ok(**src);
return Ok(ImmTy::from_immediate(**src, cast_to));
} else {
// Casting the metadata away from a fat ptr.
assert_eq!(src.layout.size, 2 * self.pointer_size());
assert_eq!(dest_layout.size, self.pointer_size());
assert_eq!(cast_to.size, self.pointer_size());
assert!(src.layout.ty.is_unsafe_ptr());
return match **src {
Immediate::ScalarPair(data, _) => Ok(data.into()),
Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
Immediate::Scalar(..) => span_bug!(
self.cur_span(),
"{:?} input to a fat-to-thin cast ({:?} -> {:?})",
"{:?} input to a fat-to-thin cast ({} -> {})",
*src,
src.layout.ty,
cast_ty
cast_to.ty
),
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
};
@ -229,10 +232,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn pointer_expose_address_cast(
&mut self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert_matches!(src.layout.ty.kind(), ty::RawPtr(_) | ty::FnPtr(_));
assert!(cast_ty.is_integral());
assert!(cast_to.ty.is_integral());
let scalar = src.to_scalar();
let ptr = scalar.to_pointer(self)?;
@ -240,16 +243,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(ptr) => M::expose_ptr(self, ptr)?,
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
};
Ok(self.cast_from_int_like(scalar, src.layout, cast_ty)?.into())
Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
}
pub fn pointer_from_exposed_address_cast(
&self,
src: &ImmTy<'tcx, M::Provenance>,
cast_ty: Ty<'tcx>,
) -> InterpResult<'tcx, Immediate<M::Provenance>> {
cast_to: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
assert!(src.layout.ty.is_integral());
assert_matches!(cast_ty.kind(), ty::RawPtr(_));
assert_matches!(cast_to.ty.kind(), ty::RawPtr(_));
// First cast to usize.
let scalar = src.to_scalar();
@ -258,12 +261,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Then turn address into pointer.
let ptr = M::ptr_from_addr_cast(&self, addr)?;
Ok(Scalar::from_maybe_pointer(ptr, self).into())
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
}
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
/// type (basically everything with a scalar layout) to int/float/char types.
pub fn cast_from_int_like(
fn cast_from_int_like(
&self,
scalar: Scalar<M::Provenance>, // input value (there is no ScalarTy so we separate data+layout)
src_layout: TyAndLayout<'tcx>,
@ -298,7 +301,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
// Casts to bool are not permitted by rustc, no need to handle them here.
_ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
_ => span_bug!(self.cur_span(), "invalid int to {} cast", cast_ty),
})
}
@ -331,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// float -> f64
Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
// That's it.
_ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
_ => span_bug!(self.cur_span(), "invalid float to {} cast", dest_ty),
}
}
@ -390,7 +393,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
span_bug!(
self.cur_span(),
"invalid pointer unsizing {:?} -> {:?}",
"invalid pointer unsizing {} -> {}",
src.layout.ty,
cast_ty
)
@ -404,7 +407,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
cast_ty: TyAndLayout<'tcx>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
trace!("Unsizing {:?} of type {} into {}", *src, src.layout.ty, cast_ty.ty);
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
(&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
| (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {

View file

@ -76,7 +76,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
ImmTy::from_uint(variant_index_relative, tag_layout);
let tag_val = self.binary_op(
let tag_val = self.wrapping_binary_op(
mir::BinOp::Add,
&variant_index_relative_val,
&niche_start_val,
@ -153,19 +153,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to.
let index = match *tag_encoding {
TagEncoding::Direct => {
let scalar = tag_val.to_scalar();
// Generate a specific error if `tag_val` is not an integer.
// (`tag_bits` itself is only used for error messages below.)
let tag_bits = scalar
let tag_bits = tag_val
.to_scalar()
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
// After the checks we did above, this cannot fail, as
// discriminants are int-like.
let discr_val =
self.cast_from_int_like(scalar, tag_val.layout, discr_layout.ty).unwrap();
let discr_bits = discr_val.assert_bits(discr_layout.size);
let discr_val = self.int_to_int_or_float(&tag_val, discr_layout).unwrap();
let discr_bits = discr_val.to_scalar().assert_bits(discr_layout.size);
// Convert discriminant to variant index, and catch invalid discriminants.
let index = match *ty.kind() {
ty::Adt(adt, _) => {
@ -208,7 +207,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
let variant_index_relative_val =
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
self.wrapping_binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
let variant_index_relative =
variant_index_relative_val.to_scalar().assert_bits(tag_val.layout.size);
// Check if this is in the range that indicates an actual discriminant.

View file

@ -416,7 +416,7 @@ pub(super) fn from_known_layout<'tcx>(
if !mir_assign_valid_types(tcx.tcx, param_env, check_layout, known_layout) {
span_bug!(
tcx.span,
"expected type differs from actual type.\nexpected: {:?}\nactual: {:?}",
"expected type differs from actual type.\nexpected: {}\nactual: {}",
known_layout.ty,
check_layout.ty,
);
@ -712,7 +712,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Foreign(_) => Ok(None),
_ => span_bug!(self.cur_span(), "size_and_align_of::<{:?}> not supported", layout.ty),
_ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
}
}
#[inline]
@ -982,7 +982,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`is_very_trivially_sized` applied to unexpected type: {:?}", ty)
bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
}
}
}

View file

@ -307,7 +307,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let dist = {
// Addresses are unsigned, so this is a `usize` computation. We have to do the
// overflow check separately anyway.
let (val, overflowed, _ty) = {
let (val, overflowed) = {
let a_offset = ImmTy::from_uint(a_offset, usize_layout);
let b_offset = ImmTy::from_uint(b_offset, usize_layout);
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?
@ -324,7 +324,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The signed form of the intrinsic allows this. If we interpret the
// difference as isize, we'll get the proper signed difference. If that
// seems *positive*, they were more than isize::MAX apart.
let dist = val.to_target_isize(self)?;
let dist = val.to_scalar().to_target_isize(self)?;
if dist >= 0 {
throw_ub_custom!(
fluent::const_eval_offset_from_underflow,
@ -334,7 +334,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
dist
} else {
// b >= a
let dist = val.to_target_isize(self)?;
let dist = val.to_scalar().to_target_isize(self)?;
// If converting to isize produced a *negative* result, we had an overflow
// because they were more than isize::MAX apart.
if dist < 0 {
@ -504,9 +504,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Performs an exact division, resulting in undefined behavior where
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
// First, check x % y != 0 (or if that computation overflows).
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
let (res, overflow) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
assert!(!overflow); // All overflow is UB, so this should never return on overflow.
if res.assert_bits(a.layout.size) != 0 {
if res.to_scalar().assert_bits(a.layout.size) != 0 {
throw_ub_custom!(
fluent::const_eval_exact_div_has_remainder,
a = format!("{a}"),
@ -524,7 +524,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
r: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
assert!(matches!(mir_op, BinOp::Add | BinOp::Sub));
let (val, overflowed, _ty) = self.overflowing_binary_op(mir_op, l, r)?;
let (val, overflowed) = self.overflowing_binary_op(mir_op, l, r)?;
Ok(if overflowed {
let size = l.layout.size;
let num_bits = size.bits();
@ -556,7 +556,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
} else {
val
val.to_scalar()
})
}

View file

@ -9,7 +9,7 @@ use std::hash::Hash;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_middle::mir;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::ty::{self, TyCtxt};
use rustc_span::def_id::DefId;
use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi as CallAbi;
@ -18,7 +18,7 @@ use crate::const_eval::CheckAlignment;
use super::{
AllocBytes, AllocId, AllocRange, Allocation, ConstAllocation, FnArg, Frame, ImmTy, InterpCx,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance, Scalar,
InterpResult, MPlaceTy, MemoryKind, OpTy, PlaceTy, Pointer, Provenance,
};
/// Data returned by Machine::stack_pop,
@ -238,7 +238,7 @@ pub trait Machine<'mir, 'tcx: 'mir>: Sized {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, Self::Provenance>,
right: &ImmTy<'tcx, Self::Provenance>,
) -> InterpResult<'tcx, (Scalar<Self::Provenance>, bool, Ty<'tcx>)>;
) -> InterpResult<'tcx, (ImmTy<'tcx, Self::Provenance>, bool)>;
/// Called before writing the specified `local` of the `frame`.
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked

View file

@ -8,7 +8,7 @@ use either::{Either, Left, Right};
use rustc_hir::def::Namespace;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter};
use rustc_middle::ty::{ConstInt, Ty};
use rustc_middle::ty::{ConstInt, Ty, TyCtxt};
use rustc_middle::{mir, ty};
use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size};
@ -165,7 +165,15 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline(always)]
pub fn from_immediate(imm: Immediate<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.is_sized(), "immediates must be sized");
debug_assert!(
match (imm, layout.abi) {
(Immediate::Scalar(..), Abi::Scalar(..)) => true,
(Immediate::ScalarPair(..), Abi::ScalarPair(..)) => true,
(Immediate::Uninit, _) if layout.is_sized() => true,
_ => false,
},
"immediate {imm:?} does not fit to layout {layout:?}",
);
ImmTy { imm, layout }
}
@ -194,6 +202,12 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
}
#[inline]
pub fn from_bool(b: bool, tcx: TyCtxt<'tcx>) -> Self {
let layout = tcx.layout_of(ty::ParamEnv::reveal_all().and(tcx.types.bool)).unwrap();
Self::from_scalar(Scalar::from_bool(b), layout)
}
#[inline]
pub fn to_const_int(self) -> ConstInt {
assert!(self.layout.ty.is_integral());
@ -448,7 +462,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(Size::ZERO, size),
/*read_provenance*/ matches!(s, abi::Pointer(_)),
)?;
Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
Some(ImmTy::from_scalar(scalar, mplace.layout))
}
Abi::ScalarPair(
abi::Scalar::Initialized { value: a, .. },
@ -468,7 +482,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
alloc_range(b_offset, b_size),
/*read_provenance*/ matches!(b, abi::Pointer(_)),
)?;
Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
Some(ImmTy::from_immediate(Immediate::ScalarPair(a_val, b_val), mplace.layout))
}
_ => {
// Neither a scalar nor scalar pair.
@ -514,11 +528,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Abi::Scalar(abi::Scalar::Initialized { .. })
| Abi::ScalarPair(abi::Scalar::Initialized { .. }, abi::Scalar::Initialized { .. })
) {
span_bug!(
self.cur_span(),
"primitive read not possible for type: {:?}",
op.layout().ty
);
span_bug!(self.cur_span(), "primitive read not possible for type: {}", op.layout().ty);
}
let imm = self.read_immediate_raw(op)?.right().unwrap();
if matches!(*imm, Immediate::Uninit) {
@ -669,7 +679,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
)?)?,
op.layout,
),
"eval_place of a MIR place with type {:?} produced an interpreter operand with type {:?}",
"eval_place of a MIR place with type {:?} produced an interpreter operand with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
op.layout.ty,
);

View file

@ -1,7 +1,7 @@
use rustc_apfloat::Float;
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::{self, FloatTy, Ty};
use rustc_span::symbol::sym;
use rustc_target::abi::Abi;
@ -20,9 +20,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
let (val, overflowed) = self.overflowing_binary_op(op, &left, &right)?;
debug_assert_eq!(
Ty::new_tup(self.tcx.tcx, &[ty, self.tcx.types.bool]),
Ty::new_tup(self.tcx.tcx, &[val.layout.ty, self.tcx.types.bool]),
dest.layout.ty,
"type mismatch for result of {op:?}",
);
@ -30,7 +30,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Abi::ScalarPair(..) = dest.layout.abi {
// We can use the optimized path and avoid `place_field` (which might do
// `force_allocation`).
let pair = Immediate::ScalarPair(val, Scalar::from_bool(overflowed));
let pair = Immediate::ScalarPair(val.to_scalar(), Scalar::from_bool(overflowed));
self.write_immediate(pair, dest)?;
} else {
assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
@ -38,7 +38,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// do a component-wise write here. This code path is slower than the above because
// `place_field` will have to `force_allocate` locals here.
let val_field = self.project_field(dest, 0)?;
self.write_scalar(val, &val_field)?;
self.write_scalar(val.to_scalar(), &val_field)?;
let overflowed_field = self.project_field(dest, 1)?;
self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
}
@ -54,9 +54,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right: &ImmTy<'tcx, M::Provenance>,
dest: &PlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_scalar(val, dest)
let val = self.wrapping_binary_op(op, left, right)?;
assert_eq!(val.layout.ty, dest.layout.ty, "type mismatch for result of {op:?}");
self.write_immediate(*val, dest)
}
}
@ -66,7 +66,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: char,
r: char,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -78,7 +78,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ge => l >= r,
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
};
(Scalar::from_bool(res), false, self.tcx.types.bool)
(ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_bool_op(
@ -86,7 +86,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
l: bool,
r: bool,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let res = match bin_op {
@ -101,33 +101,33 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
BitXor => l ^ r,
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
};
(Scalar::from_bool(res), false, self.tcx.types.bool)
(ImmTy::from_bool(res, *self.tcx), false)
}
fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
&self,
bin_op: mir::BinOp,
ty: Ty<'tcx>,
layout: TyAndLayout<'tcx>,
l: F,
r: F,
) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
) -> (ImmTy<'tcx, M::Provenance>, bool) {
use rustc_middle::mir::BinOp::*;
let (val, ty) = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
Add => ((l + r).value.into(), ty),
Sub => ((l - r).value.into(), ty),
Mul => ((l * r).value.into(), ty),
Div => ((l / r).value.into(), ty),
Rem => ((l % r).value.into(), ty),
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
Add => ImmTy::from_scalar((l + r).value.into(), layout),
Sub => ImmTy::from_scalar((l - r).value.into(), layout),
Mul => ImmTy::from_scalar((l * r).value.into(), layout),
Div => ImmTy::from_scalar((l / r).value.into(), layout),
Rem => ImmTy::from_scalar((l % r).value.into(), layout),
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
};
(val, false, ty)
(val, false)
}
fn binary_int_op(
@ -138,7 +138,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
left_layout: TyAndLayout<'tcx>,
r: u128,
right_layout: TyAndLayout<'tcx>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
let throw_ub_on_overflow = match bin_op {
@ -200,19 +200,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
);
}
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
// For the remaining ops, the types must be the same on both sides
if left_layout.ty != right_layout.ty {
span_bug!(
self.cur_span(),
"invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
bin_op,
l,
left_layout.ty,
r,
right_layout.ty,
"invalid asymmetric binary op {bin_op:?}: {l:?} ({l_ty}), {r:?} ({r_ty})",
l_ty = left_layout.ty,
r_ty = right_layout.ty,
)
}
@ -230,7 +227,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Some(op) = op {
let l = self.sign_extend(l, left_layout) as i128;
let r = self.sign_extend(r, right_layout) as i128;
return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
return Ok((ImmTy::from_bool(op(&l, &r), *self.tcx), false));
}
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
Div if r == 0 => throw_ub!(DivisionByZero),
@ -267,22 +264,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
}
let (val, ty) = match bin_op {
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
let val = match bin_op {
Eq => ImmTy::from_bool(l == r, *self.tcx),
Ne => ImmTy::from_bool(l != r, *self.tcx),
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
Lt => ImmTy::from_bool(l < r, *self.tcx),
Le => ImmTy::from_bool(l <= r, *self.tcx),
Gt => ImmTy::from_bool(l > r, *self.tcx),
Ge => ImmTy::from_bool(l >= r, *self.tcx),
BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
BitOr => ImmTy::from_uint(l | r, left_layout),
BitAnd => ImmTy::from_uint(l & r, left_layout),
BitXor => ImmTy::from_uint(l ^ r, left_layout),
Add | AddUnchecked | Sub | SubUnchecked | Mul | MulUnchecked | Rem | Div => {
assert!(!left_layout.abi.is_signed());
@ -304,12 +301,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if overflow && let Some(intrinsic_name) = throw_ub_on_overflow {
throw_ub_custom!(fluent::const_eval_overflow, name = intrinsic_name);
}
return Ok((Scalar::from_uint(truncated, size), overflow, left_layout.ty));
return Ok((ImmTy::from_uint(truncated, left_layout), overflow));
}
_ => span_bug!(
self.cur_span(),
"invalid binary op {:?}: {:?}, {:?} (both {:?})",
"invalid binary op {:?}: {:?}, {:?} (both {})",
bin_op,
l,
r,
@ -317,7 +314,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
),
};
Ok((val, false, ty))
Ok((val, false))
}
fn binary_ptr_op(
@ -325,7 +322,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::BinOp::*;
match bin_op {
@ -336,7 +333,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let pointee_ty = left.layout.ty.builtin_deref(true).unwrap().ty;
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
Ok((Scalar::from_maybe_pointer(offset_ptr, self), false, left.layout.ty))
Ok((
ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout),
false,
))
}
// Fall back to machine hook so Miri can support more pointer ops.
@ -344,16 +344,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
}
/// Returns the result of the specified operation, whether it overflowed, and
/// the result type.
/// Returns the result of the specified operation, and whether it overflowed.
pub fn overflowing_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
trace!(
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
"Running binary op {:?}: {:?} ({}), {:?} ({})",
bin_op,
*left,
left.layout.ty,
@ -376,15 +375,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
ty::Float(fty) => {
assert_eq!(left.layout.ty, right.layout.ty);
let ty = left.layout.ty;
let layout = left.layout;
let left = left.to_scalar();
let right = right.to_scalar();
Ok(match fty {
FloatTy::F32 => {
self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
self.binary_float_op(bin_op, layout, left.to_f32()?, right.to_f32()?)
}
FloatTy::F64 => {
self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
self.binary_float_op(bin_op, layout, left.to_f64()?, right.to_f64()?)
}
})
}
@ -392,7 +391,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// the RHS type can be different, e.g. for shifts -- but it has to be integral, too
assert!(
right.layout.ty.is_integral(),
"Unexpected types for BinOp: {:?} {:?} {:?}",
"Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@ -407,7 +406,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (Even when both sides are pointers, their type might differ, see issue #91636)
assert!(
right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
"Unexpected types for BinOp: {:?} {:?} {:?}",
"Unexpected types for BinOp: {} {:?} {}",
left.layout.ty,
bin_op,
right.layout.ty
@ -417,22 +416,21 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
self.cur_span(),
"Invalid MIR: bad LHS type for binop: {:?}",
"Invalid MIR: bad LHS type for binop: {}",
left.layout.ty
),
}
}
/// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
#[inline]
pub fn binary_op(
pub fn wrapping_binary_op(
&self,
bin_op: mir::BinOp,
left: &ImmTy<'tcx, M::Provenance>,
right: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
let (val, _overflow) = self.overflowing_binary_op(bin_op, left, right)?;
Ok(val)
}
/// Returns the result of the specified operation, whether it overflowed, and
@ -441,12 +439,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
) -> InterpResult<'tcx, (ImmTy<'tcx, M::Provenance>, bool)> {
use rustc_middle::mir::UnOp::*;
let layout = val.layout;
let val = val.to_scalar();
trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
trace!("Running unary op {:?}: {:?} ({})", un_op, val, layout.ty);
match layout.ty.kind() {
ty::Bool => {
@ -455,7 +453,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Not => !val,
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
};
Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
Ok((ImmTy::from_bool(res, *self.tcx), false))
}
ty::Float(fty) => {
let res = match (un_op, fty) {
@ -463,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
};
Ok((res, false, layout.ty))
Ok((ImmTy::from_scalar(res, layout), false))
}
_ => {
assert!(layout.ty.is_integral());
@ -482,17 +480,18 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
(truncated, overflow || self.sign_extend(truncated, layout) != res)
}
};
Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
Ok((ImmTy::from_uint(res, layout), overflow))
}
}
}
pub fn unary_op(
#[inline]
pub fn wrapping_unary_op(
&self,
un_op: mir::UnOp,
val: &ImmTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
let (val, _overflow) = self.overflowing_unary_op(un_op, val)?;
Ok(val)
}
}

View file

@ -460,7 +460,7 @@ where
trace!("deref to {} on {:?}", val.layout.ty, *val);
if val.layout.ty.is_box() {
bug!("dereferencing {:?}", val.layout.ty);
bug!("dereferencing {}", val.layout.ty);
}
let mplace = self.ref_to_mplace(&val)?;
@ -582,7 +582,7 @@ where
)?)?,
place.layout,
),
"eval_place of a MIR place with type {:?} produced an interpreter place with type {:?}",
"eval_place of a MIR place with type {:?} produced an interpreter place with type {}",
mir_place.ty(&self.frame().body.local_decls, *self.tcx).ty,
place.layout.ty,
);
@ -835,7 +835,7 @@ where
if !allow_transmute && !layout_compat {
span_bug!(
self.cur_span(),
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
"type mismatch when copying!\nsrc: {},\ndest: {}",
src.layout().ty,
dest.layout().ty,
);

View file

@ -177,7 +177,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?;
let val = self.wrapping_unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {un_op:?}");
self.write_immediate(*val, &dest)?;
}

View file

@ -98,14 +98,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self
.overflowing_binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),
)?
.0;
if res.to_bool()? {
let res = self.wrapping_binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),
)?;
if res.to_scalar().to_bool()? {
target_block = target;
break;
}
@ -151,7 +149,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(
terminator.source_info.span,
"invalid callee of type {:?}",
"invalid callee of type {}",
func.layout.ty
),
};
@ -681,10 +679,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.storage_live(local)?;
// Must be a tuple
let ty::Tuple(fields) = ty.kind() else {
span_bug!(
self.cur_span(),
"non-tuple type for `spread_arg`: {ty:?}"
)
span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
};
for (i, field_ty) in fields.iter().enumerate() {
let dest = dest.project_deeper(
@ -926,7 +921,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
target: mir::BasicBlock,
unwind: mir::UnwindAction,
) -> InterpResult<'tcx> {
trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
trace!("drop_in_place: {:?},\n instance={:?}", place, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
// implementation fail -- a problem shared by rustc.