Auto merge of #98831 - RalfJung:no-more-unsized-locals, r=oli-obk
interpret: remove support for unsized_locals I added support for unsized_locals in https://github.com/rust-lang/rust/pull/59780 but the current implementation is a crude hack and IMO definitely not the right way to have unsized locals in MIR. It also [causes problems](https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/topic/Missing.20Layout.20Check.20in.20.60interpret.2Foperand.2Ers.60.3F). and what codegen does is unsound and has been for years since clearly nobody cares (so I hope nobody actually relies on that implementation and I'll be happy if Miri ensures they do not). I think if we want to have unsized locals in Miri/MIR we should add them properly, either by having a `StorageLive` that takes metadata or by having an `alloca` that returns a pointer (making the ptr indirection explicit) or something like that. So, this PR removes the `LocalValue::Unallocated` hack. It adds `Immediate::Uninit`, for several reasons: - This lets us still do fairly little work in `push_stack_frame`, in particular we do not actually have to create any allocations. - If/when I remove `ScalarMaybeUninit`, we will need something like this to have an "optimized" representation of uninitialized locals. Without this we'd have to put uninitialized integers into the heap! - const-prop needs some way to indicate "I don't know the value of this local'; it used to use `LocalValue::Unallocated` for that, now it can use `Immediate::Uninit`. There is still a fundamental difference between `LocalValue::Unallocated` and `Immediate::Uninit`: the latter is considered a regular local that you can read from and write to, it just has a more optimized representation when compared with an actual `Allocation` that is fully uninit. In contrast, `LocalValue::Unallocated` had this really odd behavior where you would write to it but not read from it. (This is in fact what caused the problems mentioned above.) While at it I also did two drive-by cleanups/improvements: - In `pop_stack_frame`, do the return value copying and local deallocation while the frame is still on the stack. This leads to better error locations being reported. The old errors were [sometimes rather confusing](https://rust-lang.zulipchat.com/#narrow/stream/269128-miri/topic/Cron.20Job.20Failure.202022-06-24/near/287445522). - Deduplicate `copy_op` and `copy_op_transmute`. r? `@oli-obk`
This commit is contained in:
commit
8824d13161
14 changed files with 300 additions and 259 deletions
|
@ -189,6 +189,7 @@ pub(super) fn op_to_const<'tcx>(
|
|||
let len: usize = len.try_into().unwrap();
|
||||
ConstValue::Slice { data, start, end: start + len }
|
||||
}
|
||||
Immediate::Uninit => to_const_value(&op.assert_mem_place()),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -153,7 +153,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
assert_eq!(dest_layout.size, self.pointer_size());
|
||||
assert!(src.layout.ty.is_unsafe_ptr());
|
||||
return match **src {
|
||||
Immediate::ScalarPair(data, _) => Ok(data.into()),
|
||||
Immediate::ScalarPair(data, _) => Ok(data.check_init()?.into()),
|
||||
Immediate::Scalar(..) => span_bug!(
|
||||
self.cur_span(),
|
||||
"{:?} input to a fat-to-thin cast ({:?} -> {:?})",
|
||||
|
@ -161,6 +161,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
src.layout.ty,
|
||||
cast_ty
|
||||
),
|
||||
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
@ -358,7 +359,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let src_field = self.operand_field(src, i)?;
|
||||
let dst_field = self.place_field(dest, i)?;
|
||||
if src_field.layout.ty == cast_ty_field.ty {
|
||||
self.copy_op(&src_field, &dst_field)?;
|
||||
self.copy_op(&src_field, &dst_field, /*allow_transmute*/ false)?;
|
||||
} else {
|
||||
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
|
||||
}
|
||||
|
|
|
@ -112,6 +112,8 @@ pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
|
|||
/// The locals are stored as `Option<Value>`s.
|
||||
/// `None` represents a local that is currently dead, while a live local
|
||||
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
|
||||
///
|
||||
/// Do *not* access this directly; always go through the machine hook!
|
||||
pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
|
||||
|
||||
/// The span of the `tracing` crate is stored here.
|
||||
|
@ -179,10 +181,6 @@ pub struct LocalState<'tcx, Tag: Provenance = AllocId> {
|
|||
pub enum LocalValue<Tag: Provenance = AllocId> {
|
||||
/// This local is not currently alive, and cannot be used at all.
|
||||
Dead,
|
||||
/// This local is alive but not yet allocated. It cannot be read from or have its address taken,
|
||||
/// and will be allocated on the first write. This is to support unsized locals, where we cannot
|
||||
/// know their size in advance.
|
||||
Unallocated,
|
||||
/// A normal, live local.
|
||||
/// Mostly for convenience, we re-use the `Operand` type here.
|
||||
/// This is an optimization over just always having a pointer here;
|
||||
|
@ -196,12 +194,10 @@ impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
|
|||
///
|
||||
/// Note: This may only be invoked from the `Machine::access_local` hook and not from
|
||||
/// anywhere else. You may be invalidating machine invariants if you do!
|
||||
pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
|
||||
match self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal),
|
||||
LocalValue::Unallocated => {
|
||||
bug!("The type checker should prevent reading from a never-written local")
|
||||
}
|
||||
#[inline]
|
||||
pub fn access(&self) -> InterpResult<'tcx, &Operand<Tag>> {
|
||||
match &self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
|
||||
LocalValue::Live(val) => Ok(val),
|
||||
}
|
||||
}
|
||||
|
@ -211,15 +207,11 @@ impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
|
|||
///
|
||||
/// Note: This may only be invoked from the `Machine::access_local_mut` hook and not from
|
||||
/// anywhere else. You may be invalidating machine invariants if you do!
|
||||
pub fn access_mut(
|
||||
&mut self,
|
||||
) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
|
||||
match self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal),
|
||||
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
|
||||
ref mut local @ (LocalValue::Live(Operand::Immediate(_)) | LocalValue::Unallocated) => {
|
||||
Ok(Ok(local))
|
||||
}
|
||||
#[inline]
|
||||
pub fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Tag>> {
|
||||
match &mut self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
|
||||
LocalValue::Live(val) => Ok(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -710,16 +702,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
})?;
|
||||
}
|
||||
|
||||
// Locals are initially unallocated.
|
||||
let dummy = LocalState { value: LocalValue::Unallocated, layout: Cell::new(None) };
|
||||
// Most locals are initially dead.
|
||||
let dummy = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
|
||||
let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
|
||||
|
||||
// Now mark those locals as dead that we do not want to initialize
|
||||
// Mark locals that use `Storage*` annotations as dead on function entry.
|
||||
// Now mark those locals as live that have no `Storage*` annotations.
|
||||
let always_live = always_live_locals(self.body());
|
||||
for local in locals.indices() {
|
||||
if !always_live.contains(local) {
|
||||
locals[local].value = LocalValue::Dead;
|
||||
if always_live.contains(local) {
|
||||
locals[local].value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
|
||||
}
|
||||
}
|
||||
// done
|
||||
|
@ -791,7 +782,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
if unwinding { "during unwinding" } else { "returning from function" }
|
||||
);
|
||||
|
||||
// Sanity check `unwinding`.
|
||||
// Check `unwinding`.
|
||||
assert_eq!(
|
||||
unwinding,
|
||||
match self.frame().loc {
|
||||
|
@ -799,51 +790,61 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Err(_) => true,
|
||||
}
|
||||
);
|
||||
|
||||
if unwinding && self.frame_idx() == 0 {
|
||||
throw_ub_format!("unwinding past the topmost frame of the stack");
|
||||
}
|
||||
|
||||
let frame =
|
||||
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
|
||||
|
||||
if !unwinding {
|
||||
let op = self.local_to_op(&frame, mir::RETURN_PLACE, None)?;
|
||||
self.copy_op_transmute(&op, &frame.return_place)?;
|
||||
trace!("{:?}", self.dump_place(*frame.return_place));
|
||||
}
|
||||
|
||||
let return_to_block = frame.return_to_block;
|
||||
|
||||
// Now where do we jump next?
|
||||
// Copy return value. Must of course happen *before* we deallocate the locals.
|
||||
let copy_ret_result = if !unwinding {
|
||||
let op = self
|
||||
.local_to_op(self.frame(), mir::RETURN_PLACE, None)
|
||||
.expect("return place should always be live");
|
||||
let dest = self.frame().return_place;
|
||||
let err = self.copy_op(&op, &dest, /*allow_transmute*/ true);
|
||||
trace!("return value: {:?}", self.dump_place(*dest));
|
||||
// We delay actually short-circuiting on this error until *after* the stack frame is
|
||||
// popped, since we want this error to be attributed to the caller, whose type defines
|
||||
// this transmute.
|
||||
err
|
||||
} else {
|
||||
Ok(())
|
||||
};
|
||||
|
||||
// Cleanup: deallocate locals.
|
||||
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
|
||||
// In that case, we return early. We also avoid validation in that case,
|
||||
// because this is CTFE and the final value will be thoroughly validated anyway.
|
||||
// We do this while the frame is still on the stack, so errors point to the callee.
|
||||
let return_to_block = self.frame().return_to_block;
|
||||
let cleanup = match return_to_block {
|
||||
StackPopCleanup::Goto { .. } => true,
|
||||
StackPopCleanup::Root { cleanup, .. } => cleanup,
|
||||
};
|
||||
if cleanup {
|
||||
// We need to take the locals out, since we need to mutate while iterating.
|
||||
let locals = mem::take(&mut self.frame_mut().locals);
|
||||
for local in &locals {
|
||||
self.deallocate_local(local.value)?;
|
||||
}
|
||||
}
|
||||
|
||||
// All right, now it is time to actually pop the frame.
|
||||
// Note that its locals are gone already, but that's fine.
|
||||
let frame =
|
||||
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
|
||||
// Report error from return value copy, if any.
|
||||
copy_ret_result?;
|
||||
|
||||
// If we are not doing cleanup, also skip everything else.
|
||||
if !cleanup {
|
||||
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
|
||||
assert!(!unwinding, "tried to skip cleanup during unwinding");
|
||||
// Leak the locals, skip validation, skip machine hook.
|
||||
// Skip machine hook.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
trace!("locals: {:#?}", frame.locals);
|
||||
|
||||
// Cleanup: deallocate all locals that are backed by an allocation.
|
||||
for local in &frame.locals {
|
||||
self.deallocate_local(local.value)?;
|
||||
}
|
||||
|
||||
if M::after_stack_pop(self, frame, unwinding)? == StackPopJump::NoJump {
|
||||
// The hook already did everything.
|
||||
// We want to skip the `info!` below, hence early return.
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Normal return, figure out where to jump.
|
||||
if unwinding {
|
||||
// Follow the unwind edge.
|
||||
|
@ -874,7 +875,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
|
||||
trace!("{:?} is now live", local);
|
||||
|
||||
let local_val = LocalValue::Unallocated;
|
||||
let local_val = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
|
||||
// StorageLive expects the local to be dead, and marks it live.
|
||||
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
|
||||
if !matches!(old, LocalValue::Dead) {
|
||||
|
@ -977,7 +978,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
|
|||
|
||||
match self.ecx.stack()[frame].locals[local].value {
|
||||
LocalValue::Dead => write!(fmt, " is dead")?,
|
||||
LocalValue::Unallocated => write!(fmt, " is unallocated")?,
|
||||
LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
|
||||
write!(fmt, " is uninitialized")?
|
||||
}
|
||||
LocalValue::Live(Operand::Indirect(mplace)) => {
|
||||
write!(
|
||||
fmt,
|
||||
|
|
|
@ -174,7 +174,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let val =
|
||||
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
|
||||
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
|
||||
self.copy_op(&val, dest)?;
|
||||
self.copy_op(&val, dest, /*allow_transmute*/ false)?;
|
||||
}
|
||||
|
||||
sym::ctpop
|
||||
|
@ -394,7 +394,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
|
||||
sym::transmute => {
|
||||
self.copy_op_transmute(&args[0], dest)?;
|
||||
self.copy_op(&args[0], dest, /*allow_transmute*/ true)?;
|
||||
}
|
||||
sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
|
||||
let ty = instance.substs.type_at(0);
|
||||
|
@ -461,7 +461,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let place = self.mplace_index(&dest, i)?;
|
||||
let value =
|
||||
if i == index { *elem } else { self.mplace_index(&input, i)?.into() };
|
||||
self.copy_op(&value, &place.into())?;
|
||||
self.copy_op(&value, &place.into(), /*allow_transmute*/ false)?;
|
||||
}
|
||||
}
|
||||
sym::simd_extract => {
|
||||
|
@ -473,11 +473,15 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
index,
|
||||
input_len
|
||||
);
|
||||
self.copy_op(&self.mplace_index(&input, index)?.into(), dest)?;
|
||||
self.copy_op(
|
||||
&self.mplace_index(&input, index)?.into(),
|
||||
dest,
|
||||
/*allow_transmute*/ false,
|
||||
)?;
|
||||
}
|
||||
sym::likely | sym::unlikely | sym::black_box => {
|
||||
// These just return their argument
|
||||
self.copy_op(&args[0], dest)?;
|
||||
self.copy_op(&args[0], dest, /*allow_transmute*/ false)?;
|
||||
}
|
||||
sym::assume => {
|
||||
let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
|
||||
|
|
|
@ -14,8 +14,7 @@ use rustc_target::spec::abi::Abi;
|
|||
|
||||
use super::{
|
||||
AllocId, AllocRange, Allocation, ConstAllocation, Frame, ImmTy, InterpCx, InterpResult,
|
||||
LocalValue, MemPlace, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar,
|
||||
StackPopUnwind,
|
||||
MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
|
||||
};
|
||||
|
||||
/// Data returned by Machine::stack_pop,
|
||||
|
@ -226,11 +225,13 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
/// Since reading a ZST is not actually accessing memory or locals, this is never invoked
|
||||
/// for ZST reads.
|
||||
#[inline]
|
||||
fn access_local(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
|
||||
fn access_local<'a>(
|
||||
frame: &'a Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
|
||||
local: mir::Local,
|
||||
) -> InterpResult<'tcx, Operand<Self::PointerTag>> {
|
||||
) -> InterpResult<'tcx, &'a Operand<Self::PointerTag>>
|
||||
where
|
||||
'tcx: 'mir,
|
||||
{
|
||||
frame.locals[local].access()
|
||||
}
|
||||
|
||||
|
@ -242,7 +243,7 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
|
||||
frame: usize,
|
||||
local: mir::Local,
|
||||
) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
|
||||
) -> InterpResult<'tcx, &'a mut Operand<Self::PointerTag>>
|
||||
where
|
||||
'tcx: 'mir,
|
||||
{
|
||||
|
@ -418,12 +419,14 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
}
|
||||
|
||||
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
|
||||
/// The `locals` have already been destroyed!
|
||||
fn after_stack_pop(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
_frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
|
||||
_unwinding: bool,
|
||||
unwinding: bool,
|
||||
) -> InterpResult<'tcx, StackPopJump> {
|
||||
// By default, we do not support unwinding from panics
|
||||
assert!(!unwinding);
|
||||
Ok(StackPopJump::Normal)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,7 @@ use rustc_target::abi::{self, Abi, Align, HasDataLayout, Size, TagEncoding};
|
|||
use rustc_target::abi::{VariantIdx, Variants};
|
||||
|
||||
use super::{
|
||||
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
|
||||
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, Frame, GlobalId,
|
||||
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer,
|
||||
PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
@ -28,8 +28,15 @@ use super::{
|
|||
/// defined on `Immediate`, and do not have to work with a `Place`.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
|
||||
pub enum Immediate<Tag: Provenance = AllocId> {
|
||||
/// A single scalar value (must have *initialized* `Scalar` ABI).
|
||||
/// FIXME: we also currently often use this for ZST.
|
||||
/// `ScalarMaybeUninit` should reject ZST, and we should use `Uninit` for them instead.
|
||||
Scalar(ScalarMaybeUninit<Tag>),
|
||||
/// A pair of two scalar value (must have `ScalarPair` ABI where both fields are
|
||||
/// `Scalar::Initialized`).
|
||||
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
|
||||
/// A value of fully uninitialized memory. Can have and size and layout.
|
||||
Uninit,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
|
@ -75,6 +82,7 @@ impl<'tcx, Tag: Provenance> Immediate<Tag> {
|
|||
match self {
|
||||
Immediate::Scalar(val) => val,
|
||||
Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
|
||||
Immediate::Uninit => ScalarMaybeUninit::Uninit,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,6 +96,7 @@ impl<'tcx, Tag: Provenance> Immediate<Tag> {
|
|||
match self {
|
||||
Immediate::ScalarPair(val1, val2) => (val1, val2),
|
||||
Immediate::Scalar(..) => bug!("Got a scalar where a scalar pair was expected"),
|
||||
Immediate::Uninit => (ScalarMaybeUninit::Uninit, ScalarMaybeUninit::Uninit),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -149,7 +158,10 @@ impl<Tag: Provenance> std::fmt::Display for ImmTy<'_, Tag> {
|
|||
}
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
// FIXME(oli-obk): at least print tuples and slices nicely
|
||||
write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty,)
|
||||
write!(f, "({:x}, {:x}): {}", a, b, self.layout.ty)
|
||||
}
|
||||
Immediate::Uninit => {
|
||||
write!(f, "uninit: {}", self.layout.ty)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
@ -397,7 +409,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
|
||||
}
|
||||
|
||||
// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
/// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
|
||||
let len = mplace.len(self)?;
|
||||
let bytes = self.read_bytes_ptr(mplace.ptr, Size::from_bytes(len))?;
|
||||
|
@ -528,10 +540,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
/// Will not access memory, instead an indirect `Operand` is returned.
|
||||
///
|
||||
/// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
|
||||
/// OpTy from a local
|
||||
/// OpTy from a local.
|
||||
pub fn local_to_op(
|
||||
&self,
|
||||
frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
|
||||
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
|
||||
local: mir::Local,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
|
@ -540,7 +552,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// Do not read from ZST, they might not be initialized
|
||||
Operand::Immediate(Scalar::ZST.into())
|
||||
} else {
|
||||
M::access_local(&self, frame, local)?
|
||||
*M::access_local(frame, local)?
|
||||
};
|
||||
Ok(OpTy { op, layout, align: Some(layout.align.abi) })
|
||||
}
|
||||
|
|
|
@ -10,13 +10,14 @@ use rustc_macros::HashStable;
|
|||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
|
||||
use rustc_target::abi::{HasDataLayout, Size, VariantIdx, Variants};
|
||||
use rustc_target::abi::{
|
||||
Abi, Align, FieldsShape, HasDataLayout, Size, TagEncoding, VariantIdx, Variants,
|
||||
};
|
||||
|
||||
use super::{
|
||||
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
|
||||
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
|
||||
Operand, Pointer, Provenance, Scalar, ScalarMaybeUninit,
|
||||
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemoryKind, OpTy, Operand,
|
||||
Pointer, Provenance, Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
|
||||
|
@ -183,6 +184,18 @@ impl<Tag: Provenance> MemPlace<Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<Tag: Provenance> Place<Tag> {
|
||||
/// Asserts that this points to some local variable.
|
||||
/// Returns the frame idx and the variable idx.
|
||||
#[inline]
|
||||
pub fn assert_local(&self) -> (usize, mir::Local) {
|
||||
match self {
|
||||
Place::Local { frame, local } => (*frame, *local),
|
||||
_ => bug!("assert_local: expected Place::Local, got {:?}", self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
|
||||
/// Produces a MemPlace that works for ZST but nothing else
|
||||
#[inline]
|
||||
|
@ -286,7 +299,7 @@ impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
|
||||
self.try_as_mplace().unwrap()
|
||||
}
|
||||
}
|
||||
|
@ -314,6 +327,7 @@ where
|
|||
let (ptr, meta) = match **val {
|
||||
Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
|
||||
Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
|
||||
Immediate::Uninit => throw_ub!(InvalidUninitBytes(None)),
|
||||
};
|
||||
|
||||
let mplace = MemPlace { ptr: self.scalar_to_ptr(ptr.check_init()?)?, meta };
|
||||
|
@ -746,32 +760,33 @@ where
|
|||
let mplace = match dest.place {
|
||||
Place::Local { frame, local } => {
|
||||
match M::access_local_mut(self, frame, local)? {
|
||||
Ok(local) => {
|
||||
Operand::Immediate(local) => {
|
||||
// Local can be updated in-place.
|
||||
*local = LocalValue::Live(Operand::Immediate(src));
|
||||
*local = src;
|
||||
return Ok(());
|
||||
}
|
||||
Err(mplace) => {
|
||||
Operand::Indirect(mplace) => {
|
||||
// The local is in memory, go on below.
|
||||
mplace
|
||||
*mplace
|
||||
}
|
||||
}
|
||||
}
|
||||
Place::Ptr(mplace) => mplace, // already referring to memory
|
||||
};
|
||||
let dest = MPlaceTy { mplace, layout: dest.layout, align: dest.align };
|
||||
|
||||
// This is already in memory, write there.
|
||||
self.write_immediate_to_mplace_no_validate(src, &dest)
|
||||
self.write_immediate_to_mplace_no_validate(src, dest.layout, dest.align, mplace)
|
||||
}
|
||||
|
||||
/// Write an immediate to memory.
|
||||
/// If you use this you are responsible for validating that things got copied at the
|
||||
/// right type.
|
||||
/// right layout.
|
||||
fn write_immediate_to_mplace_no_validate(
|
||||
&mut self,
|
||||
value: Immediate<M::PointerTag>,
|
||||
dest: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
align: Align,
|
||||
dest: MemPlace<M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Note that it is really important that the type here is the right one, and matches the
|
||||
// type things are read at. In case `value` is a `ScalarPair`, we don't do any magic here
|
||||
|
@ -779,31 +794,30 @@ where
|
|||
// wrong type.
|
||||
|
||||
let tcx = *self.tcx;
|
||||
let Some(mut alloc) = self.get_place_alloc_mut(dest)? else {
|
||||
let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout, align })? else {
|
||||
// zero-sized access
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
match value {
|
||||
Immediate::Scalar(scalar) => {
|
||||
let Abi::Scalar(s) = dest.layout.abi else { span_bug!(
|
||||
let Abi::Scalar(s) = layout.abi else { span_bug!(
|
||||
self.cur_span(),
|
||||
"write_immediate_to_mplace: invalid Scalar layout: {:#?}",
|
||||
dest.layout
|
||||
"write_immediate_to_mplace: invalid Scalar layout: {layout:#?}",
|
||||
)
|
||||
};
|
||||
let size = s.size(&tcx);
|
||||
assert_eq!(size, dest.layout.size, "abi::Scalar size does not match layout size");
|
||||
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
|
||||
alloc.write_scalar(alloc_range(Size::ZERO, size), scalar)
|
||||
}
|
||||
Immediate::ScalarPair(a_val, b_val) => {
|
||||
// We checked `ptr_align` above, so all fields will have the alignment they need.
|
||||
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
|
||||
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
|
||||
let Abi::ScalarPair(a, b) = dest.layout.abi else { span_bug!(
|
||||
let Abi::ScalarPair(a, b) = layout.abi else { span_bug!(
|
||||
self.cur_span(),
|
||||
"write_immediate_to_mplace: invalid ScalarPair layout: {:#?}",
|
||||
dest.layout
|
||||
layout
|
||||
)
|
||||
};
|
||||
let (a_size, b_size) = (a.size(&tcx), b.size(&tcx));
|
||||
|
@ -817,6 +831,7 @@ where
|
|||
alloc.write_scalar(alloc_range(Size::ZERO, a_size), a_val)?;
|
||||
alloc.write_scalar(alloc_range(b_offset, b_size), b_val)
|
||||
}
|
||||
Immediate::Uninit => alloc.write_uninit(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -825,25 +840,13 @@ where
|
|||
Ok(mplace) => mplace,
|
||||
Err((frame, local)) => {
|
||||
match M::access_local_mut(self, frame, local)? {
|
||||
Ok(local) => match dest.layout.abi {
|
||||
Abi::Scalar(_) => {
|
||||
*local = LocalValue::Live(Operand::Immediate(Immediate::Scalar(
|
||||
ScalarMaybeUninit::Uninit,
|
||||
)));
|
||||
return Ok(());
|
||||
}
|
||||
Abi::ScalarPair(..) => {
|
||||
*local = LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(
|
||||
ScalarMaybeUninit::Uninit,
|
||||
ScalarMaybeUninit::Uninit,
|
||||
)));
|
||||
return Ok(());
|
||||
}
|
||||
_ => self.force_allocation(dest)?,
|
||||
},
|
||||
Err(mplace) => {
|
||||
Operand::Immediate(local) => {
|
||||
*local = Immediate::Uninit;
|
||||
return Ok(());
|
||||
}
|
||||
Operand::Indirect(mplace) => {
|
||||
// The local is in memory, go on below.
|
||||
MPlaceTy { mplace, layout: dest.layout, align: dest.align }
|
||||
MPlaceTy { mplace: *mplace, layout: dest.layout, align: dest.align }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -856,16 +859,17 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place. This does not support transmuting!
|
||||
/// Use `copy_op_transmute` if the layouts could disagree.
|
||||
/// Copies the data from an operand to a place.
|
||||
/// `allow_transmute` indicates whether the layouts may disagree.
|
||||
#[inline(always)]
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
pub fn copy_op(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
allow_transmute: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.copy_op_no_validate(src, dest)?;
|
||||
self.copy_op_no_validate(src, dest, allow_transmute)?;
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
|
@ -875,8 +879,8 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place. This does not support transmuting!
|
||||
/// Use `copy_op_transmute` if the layouts could disagree.
|
||||
/// Copies the data from an operand to a place.
|
||||
/// `allow_transmute` indicates whether the layouts may disagree.
|
||||
/// Also, if you use this you are responsible for validating that things get copied at the
|
||||
/// right type.
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
|
@ -884,10 +888,13 @@ where
|
|||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
allow_transmute: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
// We do NOT compare the types for equality, because well-typed code can
|
||||
// actually "transmute" `&mut T` to `&T` in an assignment without a cast.
|
||||
if !mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
|
||||
let layout_compat =
|
||||
mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout);
|
||||
if !allow_transmute && !layout_compat {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"type mismatch when copying!\nsrc: {:?},\ndest: {:?}",
|
||||
|
@ -896,100 +903,68 @@ where
|
|||
);
|
||||
}
|
||||
|
||||
// Let us see if the layout is simple so we take a shortcut, avoid force_allocation.
|
||||
// Let us see if the layout is simple so we take a shortcut,
|
||||
// avoid force_allocation.
|
||||
let src = match self.read_immediate_raw(src, /*force*/ false)? {
|
||||
Ok(src_val) => {
|
||||
assert!(!src.layout.is_unsized(), "cannot have unsized immediates");
|
||||
assert!(
|
||||
!dest.layout.is_unsized(),
|
||||
"the src is sized, so the dest must also be sized"
|
||||
);
|
||||
assert_eq!(src.layout.size, dest.layout.size);
|
||||
// Yay, we got a value that we can write directly.
|
||||
return self.write_immediate_no_validate(*src_val, dest);
|
||||
return if layout_compat {
|
||||
self.write_immediate_no_validate(*src_val, dest)
|
||||
} else {
|
||||
// This is tricky. The problematic case is `ScalarPair`: the `src_val` was
|
||||
// loaded using the offsets defined by `src.layout`. When we put this back into
|
||||
// the destination, we have to use the same offsets! So (a) we make sure we
|
||||
// write back to memory, and (b) we use `dest` *with the source layout*.
|
||||
let dest_mem = self.force_allocation(dest)?;
|
||||
self.write_immediate_to_mplace_no_validate(
|
||||
*src_val,
|
||||
src.layout,
|
||||
dest_mem.align,
|
||||
*dest_mem,
|
||||
)
|
||||
};
|
||||
}
|
||||
Err(mplace) => mplace,
|
||||
};
|
||||
// Slow path, this does not fit into an immediate. Just memcpy.
|
||||
trace!("copy_op: {:?} <- {:?}: {}", *dest, src, dest.layout.ty);
|
||||
|
||||
// This interprets `src.meta` with the `dest` local's layout, if an unsized local
|
||||
// is being initialized!
|
||||
let (dest, size) = self.force_allocation_maybe_sized(dest, src.meta)?;
|
||||
let size = size.unwrap_or_else(|| {
|
||||
assert!(
|
||||
!dest.layout.is_unsized(),
|
||||
"Cannot copy into already initialized unsized place"
|
||||
);
|
||||
dest.layout.size
|
||||
});
|
||||
assert_eq!(src.meta, dest.meta, "Can only copy between equally-sized instances");
|
||||
|
||||
self.mem_copy(src.ptr, src.align, dest.ptr, dest.align, size, /*nonoverlapping*/ false)
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place. The layouts may disagree, but they must
|
||||
/// have the same size.
|
||||
pub fn copy_op_transmute(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if mir_assign_valid_types(*self.tcx, self.param_env, src.layout, dest.layout) {
|
||||
// Fast path: Just use normal `copy_op`
|
||||
return self.copy_op(src, dest);
|
||||
}
|
||||
// We still require the sizes to match.
|
||||
if src.layout.size != dest.layout.size {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"size-changing transmute, should have been caught by transmute checking: {:#?}\ndest: {:#?}",
|
||||
src,
|
||||
dest
|
||||
);
|
||||
}
|
||||
// Unsized copies rely on interpreting `src.meta` with `dest.layout`, we want
|
||||
// to avoid that here.
|
||||
assert!(
|
||||
!src.layout.is_unsized() && !dest.layout.is_unsized(),
|
||||
"Cannot transmute unsized data"
|
||||
);
|
||||
|
||||
// The hard case is `ScalarPair`. `src` is already read from memory in this case,
|
||||
// using `src.layout` to figure out which bytes to use for the 1st and 2nd field.
|
||||
// We have to write them to `dest` at the offsets they were *read at*, which is
|
||||
// not necessarily the same as the offsets in `dest.layout`!
|
||||
// Hence we do the copy with the source layout on both sides. We also make sure to write
|
||||
// into memory, because if `dest` is a local we would not even have a way to write
|
||||
// at the `src` offsets; the fact that we came from a different layout would
|
||||
// just be lost.
|
||||
let dest = self.force_allocation(dest)?;
|
||||
self.copy_op_no_validate(
|
||||
src,
|
||||
&PlaceTy::from(MPlaceTy { mplace: *dest, layout: src.layout, align: dest.align }),
|
||||
)?;
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(&dest.into())?;
|
||||
let dest = self.force_allocation(&dest)?;
|
||||
let Some((dest_size, _)) = self.size_and_align_of_mplace(&dest)? else {
|
||||
span_bug!(self.cur_span(), "copy_op needs (dynamically) sized values")
|
||||
};
|
||||
if cfg!(debug_assertions) {
|
||||
let src_size = self.size_and_align_of_mplace(&src)?.unwrap().0;
|
||||
assert_eq!(src_size, dest_size, "Cannot copy differently-sized data");
|
||||
} else {
|
||||
// As a cheap approximation, we compare the fixed parts of the size.
|
||||
assert_eq!(src.layout.size, dest.layout.size);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
self.mem_copy(
|
||||
src.ptr, src.align, dest.ptr, dest.align, dest_size, /*nonoverlapping*/ false,
|
||||
)
|
||||
}
|
||||
|
||||
/// Ensures that a place is in memory, and returns where it is.
|
||||
/// If the place currently refers to a local that doesn't yet have a matching allocation,
|
||||
/// create such an allocation.
|
||||
/// This is essentially `force_to_memplace`.
|
||||
///
|
||||
/// This supports unsized types and returns the computed size to avoid some
|
||||
/// redundant computation when copying; use `force_allocation` for a simpler, sized-only
|
||||
/// version.
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
pub fn force_allocation_maybe_sized(
|
||||
pub fn force_allocation(
|
||||
&mut self,
|
||||
place: &PlaceTy<'tcx, M::PointerTag>,
|
||||
meta: MemPlaceMeta<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (MPlaceTy<'tcx, M::PointerTag>, Option<Size>)> {
|
||||
let (mplace, size) = match place.place {
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
let mplace = match place.place {
|
||||
Place::Local { frame, local } => {
|
||||
match M::access_local_mut(self, frame, local)? {
|
||||
Ok(&mut local_val) => {
|
||||
&mut Operand::Immediate(local_val) => {
|
||||
// We need to make an allocation.
|
||||
|
||||
// We need the layout of the local. We can NOT use the layout we got,
|
||||
|
@ -997,44 +972,34 @@ where
|
|||
// that has different alignment than the outer field.
|
||||
let local_layout =
|
||||
self.layout_of_local(&self.stack()[frame], local, None)?;
|
||||
// We also need to support unsized types, and hence cannot use `allocate`.
|
||||
let (size, align) = self
|
||||
.size_and_align_of(&meta, &local_layout)?
|
||||
.expect("Cannot allocate for non-dyn-sized type");
|
||||
let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
|
||||
let mplace = MemPlace { ptr: ptr.into(), meta };
|
||||
if let LocalValue::Live(Operand::Immediate(value)) = local_val {
|
||||
// Preserve old value.
|
||||
if local_layout.is_unsized() {
|
||||
throw_unsup_format!("unsized locals are not supported");
|
||||
}
|
||||
let mplace = *self.allocate(local_layout, MemoryKind::Stack)?;
|
||||
if !matches!(local_val, Immediate::Uninit) {
|
||||
// Preserve old value. (As an optimization, we can skip this if it was uninit.)
|
||||
// We don't have to validate as we can assume the local
|
||||
// was already valid for its type.
|
||||
let mplace = MPlaceTy {
|
||||
self.write_immediate_to_mplace_no_validate(
|
||||
local_val,
|
||||
local_layout,
|
||||
local_layout.align.abi,
|
||||
mplace,
|
||||
layout: local_layout,
|
||||
align: local_layout.align.abi,
|
||||
};
|
||||
self.write_immediate_to_mplace_no_validate(value, &mplace)?;
|
||||
)?;
|
||||
}
|
||||
// Now we can call `access_mut` again, asserting it goes well,
|
||||
// and actually overwrite things.
|
||||
*M::access_local_mut(self, frame, local).unwrap().unwrap() =
|
||||
LocalValue::Live(Operand::Indirect(mplace));
|
||||
(mplace, Some(size))
|
||||
*M::access_local_mut(self, frame, local).unwrap() =
|
||||
Operand::Indirect(mplace);
|
||||
mplace
|
||||
}
|
||||
Err(mplace) => (mplace, None), // this already was an indirect local
|
||||
&mut Operand::Indirect(mplace) => mplace, // this already was an indirect local
|
||||
}
|
||||
}
|
||||
Place::Ptr(mplace) => (mplace, None),
|
||||
Place::Ptr(mplace) => mplace,
|
||||
};
|
||||
// Return with the original layout, so that the caller can go on
|
||||
Ok((MPlaceTy { mplace, layout: place.layout, align: place.align }, size))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn force_allocation(
|
||||
&mut self,
|
||||
place: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
Ok(self.force_allocation_maybe_sized(place, MemPlaceMeta::None)?.0)
|
||||
Ok(MPlaceTy { mplace, layout: place.layout, align: place.align })
|
||||
}
|
||||
|
||||
pub fn allocate(
|
||||
|
@ -1042,6 +1007,7 @@ where
|
|||
layout: TyAndLayout<'tcx>,
|
||||
kind: MemoryKind<M::MemoryKind>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
assert!(!layout.is_unsized());
|
||||
let ptr = self.allocate_ptr(layout.size, layout.align.abi, kind)?;
|
||||
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
|
||||
}
|
||||
|
|
|
@ -169,7 +169,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Use(ref operand) => {
|
||||
// Avoid recomputing the layout
|
||||
let op = self.eval_operand(operand, Some(dest.layout))?;
|
||||
self.copy_op(&op, &dest)?;
|
||||
self.copy_op(&op, &dest, /*allow_transmute*/ false)?;
|
||||
}
|
||||
|
||||
BinaryOp(bin_op, box (ref left, ref right)) => {
|
||||
|
@ -204,7 +204,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
for (field_index, operand) in operands.iter().enumerate() {
|
||||
let op = self.eval_operand(operand, None)?;
|
||||
let field_dest = self.place_field(&dest, field_index)?;
|
||||
self.copy_op(&op, &field_dest)?;
|
||||
self.copy_op(&op, &field_dest, /*allow_transmute*/ false)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -220,7 +220,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
} else {
|
||||
// Write the src to the first element.
|
||||
let first = self.mplace_field(&dest, 0)?;
|
||||
self.copy_op(&src, &first.into())?;
|
||||
self.copy_op(&src, &first.into(), /*allow_transmute*/ false)?;
|
||||
|
||||
// This is performance-sensitive code for big static/const arrays! So we
|
||||
// avoid writing each operand individually and instead just make many copies
|
||||
|
|
|
@ -12,8 +12,8 @@ use rustc_target::abi::call::{ArgAbi, ArgAttribute, ArgAttributes, FnAbi, PassMo
|
|||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::{
|
||||
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
|
||||
StackPopCleanup, StackPopUnwind,
|
||||
FnVal, ImmTy, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemoryKind, OpTy, Operand,
|
||||
PlaceTy, Scalar, StackPopCleanup, StackPopUnwind,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
@ -185,11 +185,16 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// No question
|
||||
return true;
|
||||
}
|
||||
if caller_abi.layout.is_unsized() || callee_abi.layout.is_unsized() {
|
||||
// No, no, no. We require the types to *exactly* match for unsized arguments. If
|
||||
// these are somehow unsized "in a different way" (say, `dyn Trait` vs `[i32]`),
|
||||
// then who knows what happens.
|
||||
return false;
|
||||
}
|
||||
if caller_abi.layout.size != callee_abi.layout.size
|
||||
|| caller_abi.layout.align.abi != callee_abi.layout.align.abi
|
||||
{
|
||||
// This cannot go well...
|
||||
// FIXME: What about unsized types?
|
||||
return false;
|
||||
}
|
||||
// The rest *should* be okay, but we are extra conservative.
|
||||
|
@ -287,11 +292,36 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
caller_arg.layout.ty
|
||||
)
|
||||
}
|
||||
// Special handling for unsized parameters.
|
||||
if caller_arg.layout.is_unsized() {
|
||||
// `check_argument_compat` ensures that both have the same type, so we know they will use the metadata the same way.
|
||||
assert_eq!(caller_arg.layout.ty, callee_arg.layout.ty);
|
||||
// We have to properly pre-allocate the memory for the callee.
|
||||
// So let's tear down some wrappers.
|
||||
// This all has to be in memory, there are no immediate unsized values.
|
||||
let src = caller_arg.assert_mem_place();
|
||||
// The destination cannot be one of these "spread args".
|
||||
let (dest_frame, dest_local) = callee_arg.assert_local();
|
||||
// We are just initializing things, so there can't be anything here yet.
|
||||
assert!(matches!(
|
||||
*self.local_to_op(&self.stack()[dest_frame], dest_local, None)?,
|
||||
Operand::Immediate(Immediate::Uninit)
|
||||
));
|
||||
// Allocate enough memory to hold `src`.
|
||||
let Some((size, align)) = self.size_and_align_of_mplace(&src)? else {
|
||||
span_bug!(self.cur_span(), "unsized fn arg with `extern` type tail should not be allowed")
|
||||
};
|
||||
let ptr = self.allocate_ptr(size, align, MemoryKind::Stack)?;
|
||||
let dest_place =
|
||||
MPlaceTy::from_aligned_ptr_with_meta(ptr.into(), callee_arg.layout, src.meta);
|
||||
// Update the local to be that new place.
|
||||
*M::access_local_mut(self, dest_frame, dest_local)? = Operand::Indirect(*dest_place);
|
||||
}
|
||||
// We allow some transmutes here.
|
||||
// FIXME: Depending on the PassMode, this should reset some padding to uninitialized. (This
|
||||
// is true for all `copy_op`, but there are a lot of special cases for argument passing
|
||||
// specifically.)
|
||||
self.copy_op_transmute(&caller_arg, callee_arg)
|
||||
self.copy_op(&caller_arg, callee_arg, /*allow_transmute*/ true)
|
||||
}
|
||||
|
||||
/// Call this function -- pushing the stack frame and initializing the arguments.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue