2019-01-16 20:45:53 +01:00
|
|
|
use std::cell::Cell;
|
2017-02-07 00:39:40 -08:00
|
|
|
use std::fmt::Write;
|
2018-06-22 13:31:25 -07:00
|
|
|
use std::mem;
|
2016-12-07 20:30:37 -08:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc::ich::StableHashingContext;
|
2017-12-12 17:14:49 +01:00
|
|
|
use rustc::mir;
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc::mir::interpret::{
|
|
|
|
sign_extend, truncate, AllocId, FrameInfo, GlobalId, InterpResult, Pointer, Scalar,
|
2018-08-13 16:14:22 +02:00
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc::ty::layout::{self, Align, HasDataLayout, LayoutOf, Size, TyLayout};
|
|
|
|
use rustc::ty::query::TyCtxtAt;
|
2019-08-11 10:12:26 +02:00
|
|
|
use rustc::ty::subst::SubstsRef;
|
2018-08-16 00:18:09 +02:00
|
|
|
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
|
2018-10-12 16:10:16 +02:00
|
|
|
use rustc_data_structures::fx::FxHashMap;
|
2019-11-22 19:23:08 +01:00
|
|
|
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
|
2020-01-05 02:37:57 +01:00
|
|
|
use rustc_hir::def::DefKind;
|
|
|
|
use rustc_hir::def_id::DefId;
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc_index::vec::IndexVec;
|
2019-11-10 19:30:19 +01:00
|
|
|
use rustc_macros::HashStable;
|
2020-01-01 19:25:28 +01:00
|
|
|
use rustc_span::source_map::{self, Span, DUMMY_SP};
|
2018-06-08 03:47:26 +01:00
|
|
|
|
2018-08-13 16:14:22 +02:00
|
|
|
use super::{
|
2019-12-27 00:38:10 +01:00
|
|
|
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, OpTy, Operand, Place, PlaceTy,
|
2019-12-22 20:56:01 +01:00
|
|
|
ScalarMaybeUndef, StackPopInfo,
|
2018-08-13 16:14:22 +02:00
|
|
|
};
|
2016-06-01 17:05:20 +02:00
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
pub struct InterpCx<'mir, 'tcx, M: Machine<'mir, 'tcx>> {
|
2017-12-06 14:23:32 +02:00
|
|
|
/// Stores the `Machine` instance.
|
|
|
|
pub machine: M,
|
2017-07-21 17:25:30 +02:00
|
|
|
|
2016-03-14 21:18:39 -06:00
|
|
|
/// The results of the type checker, from rustc.
|
2019-06-14 00:48:52 +03:00
|
|
|
pub tcx: TyCtxtAt<'tcx>,
|
2016-03-14 21:18:39 -06:00
|
|
|
|
2017-12-06 14:12:05 +02:00
|
|
|
/// Bounds in scope for polymorphic evaluations.
|
2018-09-20 10:22:11 +02:00
|
|
|
pub(crate) param_env: ty::ParamEnv<'tcx>,
|
2017-12-06 14:12:05 +02:00
|
|
|
|
2018-06-22 12:36:54 -07:00
|
|
|
/// The virtual memory system.
|
2019-10-11 22:33:55 +02:00
|
|
|
pub memory: Memory<'mir, 'tcx, M>,
|
2018-06-22 12:36:54 -07:00
|
|
|
|
|
|
|
/// The virtual call stack.
|
2018-11-15 17:14:53 +01:00
|
|
|
pub(crate) stack: Vec<Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>>,
|
2018-10-12 16:10:16 +02:00
|
|
|
|
|
|
|
/// A cache for deduplicating vtables
|
2019-06-12 00:11:55 +03:00
|
|
|
pub(super) vtables:
|
|
|
|
FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), Pointer<M::PointerTag>>,
|
2016-05-09 18:21:21 -06:00
|
|
|
}
|
|
|
|
|
2016-03-06 04:23:24 -06:00
|
|
|
/// A stack frame.
|
2018-06-21 21:40:14 -07:00
|
|
|
#[derive(Clone)]
|
2019-12-22 17:42:04 -05:00
|
|
|
pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
|
2016-06-14 20:13:59 -06:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Function and callsite information
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// The MIR for the function called on this frame.
|
2019-06-03 18:26:48 -04:00
|
|
|
pub body: &'mir mir::Body<'tcx>,
|
2016-06-14 20:13:59 -06:00
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// The def_id and substs of the current function.
|
2017-03-21 13:53:55 +01:00
|
|
|
pub instance: ty::Instance<'tcx>,
|
2016-06-02 18:21:32 +02:00
|
|
|
|
2016-06-14 20:13:59 -06:00
|
|
|
/// The span of the call site.
|
2018-08-18 12:14:03 +02:00
|
|
|
pub span: source_map::Span,
|
2016-03-07 07:10:52 -06:00
|
|
|
|
2019-10-20 18:51:25 +02:00
|
|
|
/// Extra data for the machine.
|
|
|
|
pub extra: Extra,
|
|
|
|
|
2016-06-14 20:13:59 -06:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
2017-12-06 09:25:29 +01:00
|
|
|
// Return place and locals
|
2016-06-14 20:13:59 -06:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Work to perform when returning from this function.
|
2016-09-09 17:44:04 +02:00
|
|
|
pub return_to_block: StackPopCleanup,
|
2016-07-06 17:55:05 +02:00
|
|
|
|
2018-10-09 21:05:53 +02:00
|
|
|
/// The location where the result of the current stack frame should be written to,
|
|
|
|
/// and its layout in the caller.
|
|
|
|
pub return_place: Option<PlaceTy<'tcx, Tag>>,
|
2016-10-15 19:48:30 -06:00
|
|
|
|
|
|
|
/// The list of locals for this stack frame, stored in order as
|
2018-08-22 16:58:39 -03:00
|
|
|
/// `[return_ptr, arguments..., variables..., temporaries...]`.
|
|
|
|
/// The locals are stored as `Option<Value>`s.
|
2017-05-31 17:41:33 -07:00
|
|
|
/// `None` represents a local that is currently dead, while a live local
|
2018-05-20 23:43:16 +02:00
|
|
|
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
|
2019-01-30 17:51:59 +01:00
|
|
|
pub locals: IndexVec<mir::Local, LocalState<'tcx, Tag>>,
|
2016-02-27 19:20:25 -06:00
|
|
|
|
2016-06-14 20:13:59 -06:00
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Current position within the function
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
/// The block that is currently executed (or will be executed after the above call stacks
|
|
|
|
/// return).
|
2019-10-20 18:51:25 +02:00
|
|
|
/// If this is `None`, we are unwinding and this function doesn't need any clean-up.
|
2019-10-28 19:09:54 -04:00
|
|
|
/// Just continue the same as with `Resume`.
|
2019-10-20 18:51:25 +02:00
|
|
|
pub block: Option<mir::BasicBlock>,
|
2016-06-14 20:13:59 -06:00
|
|
|
|
2018-02-16 15:56:50 +01:00
|
|
|
/// The index of the currently evaluated statement.
|
2016-06-10 13:01:51 +02:00
|
|
|
pub stmt: usize,
|
2016-03-06 04:23:24 -06:00
|
|
|
}
|
2016-02-27 19:20:25 -06:00
|
|
|
|
2019-11-10 19:30:19 +01:00
|
|
|
#[derive(Clone, Eq, PartialEq, Debug, HashStable)] // Miri debug-prints these
|
2018-08-23 19:04:33 +02:00
|
|
|
pub enum StackPopCleanup {
|
|
|
|
/// Jump to the next block in the caller, or cause UB if None (that's a function
|
2018-10-09 21:05:53 +02:00
|
|
|
/// that may never return). Also store layout of return place so
|
|
|
|
/// we can validate it at that layout.
|
2019-10-20 21:31:43 -04:00
|
|
|
/// `ret` stores the block we jump to on a normal return, while 'unwind'
|
2019-04-16 21:04:54 -04:00
|
|
|
/// stores the block used for cleanup during unwinding
|
|
|
|
Goto { ret: Option<mir::BasicBlock>, unwind: Option<mir::BasicBlock> },
|
2018-08-24 17:36:18 +02:00
|
|
|
/// Just do nohing: Used by Main and for the box_alloc hook in miri.
|
2019-02-08 14:53:55 +01:00
|
|
|
/// `cleanup` says whether locals are deallocated. Static computation
|
2018-08-24 17:36:18 +02:00
|
|
|
/// wants them leaked to intern what they need (and just throw away
|
|
|
|
/// the entire `ecx` when it is done).
|
|
|
|
None { cleanup: bool },
|
2018-08-23 19:04:33 +02:00
|
|
|
}
|
|
|
|
|
2019-01-30 14:16:18 +01:00
|
|
|
/// State of a local variable including a memoized layout
|
2019-11-10 19:30:19 +01:00
|
|
|
#[derive(Clone, PartialEq, Eq, HashStable)]
|
2019-12-22 17:42:04 -05:00
|
|
|
pub struct LocalState<'tcx, Tag = (), Id = AllocId> {
|
2019-04-08 13:28:51 +02:00
|
|
|
pub value: LocalValue<Tag, Id>,
|
2019-01-30 14:16:18 +01:00
|
|
|
/// Don't modify if `Some`, this is only used to prevent computing the layout twice
|
2019-11-10 19:30:19 +01:00
|
|
|
#[stable_hasher(ignore)]
|
2019-01-30 14:16:18 +01:00
|
|
|
pub layout: Cell<Option<TyLayout<'tcx>>>,
|
|
|
|
}
|
|
|
|
|
2019-04-08 13:28:51 +02:00
|
|
|
/// Current value of a local variable
|
2019-12-14 00:05:04 +01:00
|
|
|
#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
|
2019-12-22 17:42:04 -05:00
|
|
|
pub enum LocalValue<Tag = (), Id = AllocId> {
|
2019-04-06 23:58:59 +02:00
|
|
|
/// This local is not currently alive, and cannot be used at all.
|
2018-08-13 16:14:22 +02:00
|
|
|
Dead,
|
2019-04-06 23:58:59 +02:00
|
|
|
/// This local is alive but not yet initialized. It can be written to
|
|
|
|
/// but not read from or its address taken. Locals get initialized on
|
|
|
|
/// first write because for unsized locals, we do not know their size
|
|
|
|
/// before that.
|
|
|
|
Uninitialized,
|
|
|
|
/// A normal, live local.
|
|
|
|
/// Mostly for convenience, we re-use the `Operand` type here.
|
|
|
|
/// This is an optimization over just always having a pointer here;
|
|
|
|
/// we can thus avoid doing an allocation when the local just stores
|
|
|
|
/// immediate values *and* never has its address taken.
|
2018-09-21 23:32:59 +02:00
|
|
|
Live(Operand<Tag, Id>),
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
|
2019-04-08 13:28:51 +02:00
|
|
|
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
|
2019-06-07 18:56:27 +02:00
|
|
|
pub fn access(&self) -> InterpResult<'tcx, Operand<Tag>> {
|
2019-04-08 13:28:51 +02:00
|
|
|
match self.value {
|
2019-07-30 20:18:50 +05:30
|
|
|
LocalValue::Dead => throw_unsup!(DeadLocal),
|
2019-12-22 17:42:04 -05:00
|
|
|
LocalValue::Uninitialized => {
|
|
|
|
bug!("The type checker should prevent reading from a never-written local")
|
|
|
|
}
|
2019-04-08 13:28:51 +02:00
|
|
|
LocalValue::Live(val) => Ok(val),
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-06 23:58:59 +02:00
|
|
|
/// Overwrite the local. If the local can be overwritten in place, return a reference
|
|
|
|
/// to do so; otherwise return the `MemPlace` to consult instead.
|
|
|
|
pub fn access_mut(
|
|
|
|
&mut self,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, Result<&mut LocalValue<Tag>, MemPlace<Tag>>> {
|
2019-04-08 13:28:51 +02:00
|
|
|
match self.value {
|
2019-07-30 20:18:50 +05:30
|
|
|
LocalValue::Dead => throw_unsup!(DeadLocal),
|
2019-04-06 23:58:59 +02:00
|
|
|
LocalValue::Live(Operand::Indirect(mplace)) => Ok(Err(mplace)),
|
2019-12-22 17:42:04 -05:00
|
|
|
ref mut local @ LocalValue::Live(Operand::Immediate(_))
|
|
|
|
| ref mut local @ LocalValue::Uninitialized => Ok(Ok(local)),
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-11-29 11:48:37 +01:00
|
|
|
impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
|
|
|
|
/// Return the `SourceInfo` of the current instruction.
|
|
|
|
pub fn current_source_info(&self) -> Option<mir::SourceInfo> {
|
|
|
|
self.block.map(|block| {
|
|
|
|
let block = &self.body.basic_blocks()[block];
|
|
|
|
if self.stmt < block.statements.len() {
|
|
|
|
block.statements[self.stmt].source_info
|
|
|
|
} else {
|
|
|
|
block.terminator().source_info
|
|
|
|
}
|
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> HasDataLayout for InterpCx<'mir, 'tcx, M> {
|
2017-12-06 09:25:29 +01:00
|
|
|
#[inline]
|
|
|
|
fn data_layout(&self) -> &layout::TargetDataLayout {
|
|
|
|
&self.tcx.data_layout
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
impl<'mir, 'tcx, M> layout::HasTyCtxt<'tcx> for InterpCx<'mir, 'tcx, M>
|
2019-06-12 00:11:55 +03:00
|
|
|
where
|
|
|
|
M: Machine<'mir, 'tcx>,
|
2018-08-22 16:59:14 -03:00
|
|
|
{
|
2017-12-06 09:25:29 +01:00
|
|
|
#[inline]
|
2019-06-14 00:48:52 +03:00
|
|
|
fn tcx(&self) -> TyCtxt<'tcx> {
|
2018-02-06 18:33:59 +01:00
|
|
|
*self.tcx
|
2017-12-06 09:25:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
impl<'mir, 'tcx, M> layout::HasParamEnv<'tcx> for InterpCx<'mir, 'tcx, M>
|
2019-06-12 00:11:55 +03:00
|
|
|
where
|
|
|
|
M: Machine<'mir, 'tcx>,
|
2019-05-04 15:02:22 +05:30
|
|
|
{
|
|
|
|
fn param_env(&self) -> ty::ParamEnv<'tcx> {
|
|
|
|
self.param_env
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> LayoutOf for InterpCx<'mir, 'tcx, M> {
|
2018-02-05 21:07:20 +02:00
|
|
|
type Ty = Ty<'tcx>;
|
2019-06-07 18:56:27 +02:00
|
|
|
type TyLayout = InterpResult<'tcx, TyLayout<'tcx>>;
|
2017-12-06 09:25:29 +01:00
|
|
|
|
2018-08-19 17:01:31 +02:00
|
|
|
#[inline]
|
2018-11-03 22:57:53 +02:00
|
|
|
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyLayout {
|
2019-07-29 20:17:52 +05:30
|
|
|
self.tcx
|
|
|
|
.layout_of(self.param_env.and(ty))
|
2019-08-01 09:49:01 +05:30
|
|
|
.map_err(|layout| err_inval!(Layout(layout)).into())
|
2017-12-06 09:25:29 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-27 11:36:01 +02:00
|
|
|
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
2019-06-26 13:13:19 -05:00
|
|
|
pub fn new(
|
|
|
|
tcx: TyCtxtAt<'tcx>,
|
|
|
|
param_env: ty::ParamEnv<'tcx>,
|
|
|
|
machine: M,
|
|
|
|
memory_extra: M::MemoryExtra,
|
|
|
|
) -> Self {
|
2019-06-27 11:36:01 +02:00
|
|
|
InterpCx {
|
2017-12-06 14:23:32 +02:00
|
|
|
machine,
|
2017-01-16 18:45:30 -08:00
|
|
|
tcx,
|
2017-12-06 14:12:05 +02:00
|
|
|
param_env,
|
2019-06-26 13:13:19 -05:00
|
|
|
memory: Memory::new(tcx, memory_extra),
|
2018-06-22 12:36:54 -07:00
|
|
|
stack: Vec::new(),
|
2018-10-16 10:39:48 +02:00
|
|
|
vtables: FxHashMap::default(),
|
2016-03-06 04:23:24 -06:00
|
|
|
}
|
|
|
|
}
|
2016-06-01 14:33:37 +02:00
|
|
|
|
2019-07-02 10:49:02 +02:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn force_ptr(
|
|
|
|
&self,
|
|
|
|
scalar: Scalar<M::PointerTag>,
|
|
|
|
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
|
|
|
self.memory.force_ptr(scalar)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn force_bits(
|
|
|
|
&self,
|
|
|
|
scalar: Scalar<M::PointerTag>,
|
2019-12-22 17:42:04 -05:00
|
|
|
size: Size,
|
2019-07-02 10:49:02 +02:00
|
|
|
) -> InterpResult<'tcx, u128> {
|
|
|
|
self.memory.force_bits(scalar, size)
|
|
|
|
}
|
|
|
|
|
2019-11-29 19:42:37 +01:00
|
|
|
/// Call this to turn untagged "global" pointers (obtained via `tcx`) into
|
2019-12-01 10:02:41 +01:00
|
|
|
/// the *canonical* machine pointer to the allocation. Must never be used
|
|
|
|
/// for any other pointers!
|
|
|
|
///
|
|
|
|
/// This represents a *direct* access to that memory, as opposed to access
|
|
|
|
/// through a pointer that was created by the program.
|
2019-05-28 10:44:46 +02:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn tag_static_base_pointer(&self, ptr: Pointer) -> Pointer<M::PointerTag> {
|
|
|
|
self.memory.tag_static_base_pointer(ptr)
|
|
|
|
}
|
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
#[inline(always)]
|
2018-11-15 17:14:53 +01:00
|
|
|
pub fn stack(&self) -> &[Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>] {
|
2018-06-22 12:36:54 -07:00
|
|
|
&self.stack
|
2016-06-10 13:01:51 +02:00
|
|
|
}
|
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
#[inline(always)]
|
2017-06-16 17:58:18 -07:00
|
|
|
pub fn cur_frame(&self) -> usize {
|
2020-02-28 14:20:33 +01:00
|
|
|
assert!(!self.stack.is_empty());
|
2018-06-22 12:36:54 -07:00
|
|
|
self.stack.len() - 1
|
2017-06-16 17:58:18 -07:00
|
|
|
}
|
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
#[inline(always)]
|
2018-11-15 17:14:53 +01:00
|
|
|
pub fn frame(&self) -> &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
|
2018-10-16 15:31:04 +02:00
|
|
|
self.stack.last().expect("no call frames exist")
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
#[inline(always)]
|
2018-11-15 17:14:53 +01:00
|
|
|
pub fn frame_mut(&mut self) -> &mut Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra> {
|
2018-10-16 15:31:04 +02:00
|
|
|
self.stack.last_mut().expect("no call frames exist")
|
|
|
|
}
|
2018-08-13 16:14:22 +02:00
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
#[inline(always)]
|
2019-06-03 18:26:48 -04:00
|
|
|
pub(super) fn body(&self) -> &'mir mir::Body<'tcx> {
|
|
|
|
self.frame().body
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
|
2019-07-02 10:49:02 +02:00
|
|
|
#[inline(always)]
|
|
|
|
pub fn sign_extend(&self, value: u128, ty: TyLayout<'_>) -> u128 {
|
|
|
|
assert!(ty.abi.is_signed());
|
|
|
|
sign_extend(value, ty.size)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline(always)]
|
|
|
|
pub fn truncate(&self, value: u128, ty: TyLayout<'_>) -> u128 {
|
|
|
|
truncate(value, ty.size)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
|
|
|
|
ty.is_sized(self.tcx, self.param_env)
|
|
|
|
}
|
|
|
|
|
|
|
|
#[inline]
|
|
|
|
pub fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool {
|
|
|
|
ty.is_freeze(*self.tcx, self.param_env, DUMMY_SP)
|
|
|
|
}
|
|
|
|
|
2017-08-10 08:48:38 -07:00
|
|
|
pub fn load_mir(
|
|
|
|
&self,
|
|
|
|
instance: ty::InstanceDef<'tcx>,
|
2019-08-05 20:01:59 -04:00
|
|
|
promoted: Option<mir::Promoted>,
|
2019-12-03 11:51:58 -05:00
|
|
|
) -> InterpResult<'tcx, mir::ReadOnlyBodyAndCache<'tcx, 'tcx>> {
|
2017-12-06 09:25:29 +01:00
|
|
|
// do not continue if typeck errors occurred (can only occur in local crate)
|
|
|
|
let did = instance.def_id();
|
2018-08-22 16:59:14 -03:00
|
|
|
if did.is_local()
|
|
|
|
&& self.tcx.has_typeck_tables(did)
|
2018-08-23 08:45:59 -07:00
|
|
|
&& self.tcx.typeck_tables_of(did).tainted_by_errors
|
|
|
|
{
|
2019-07-30 20:18:50 +05:30
|
|
|
throw_inval!(TypeckError)
|
2017-12-06 09:25:29 +01:00
|
|
|
}
|
2019-08-05 20:01:59 -04:00
|
|
|
trace!("load mir(instance={:?}, promoted={:?})", instance, promoted);
|
|
|
|
if let Some(promoted) = promoted {
|
2019-10-27 21:15:02 -04:00
|
|
|
return Ok(self.tcx.promoted_mir(did)[promoted].unwrap_read_only());
|
2019-08-05 20:01:59 -04:00
|
|
|
}
|
2017-03-21 13:53:55 +01:00
|
|
|
match instance {
|
2019-12-22 17:42:04 -05:00
|
|
|
ty::InstanceDef::Item(def_id) => {
|
|
|
|
if self.tcx.is_mir_available(did) {
|
|
|
|
Ok(self.tcx.optimized_mir(did).unwrap_read_only())
|
|
|
|
} else {
|
|
|
|
throw_unsup!(NoMirFor(self.tcx.def_path_str(def_id)))
|
|
|
|
}
|
|
|
|
}
|
2017-03-21 13:53:55 +01:00
|
|
|
_ => Ok(self.tcx.instance_mir(instance)),
|
2016-06-06 15:22:33 +02:00
|
|
|
}
|
|
|
|
}
|
2016-06-08 11:11:08 +02:00
|
|
|
|
2019-08-11 10:12:26 +02:00
|
|
|
/// Call this on things you got out of the MIR (so it is as generic as the current
|
2019-08-12 16:32:48 +03:00
|
|
|
/// stack frame), to bring it into the proper environment for this interpreter.
|
2019-08-11 10:12:26 +02:00
|
|
|
pub(super) fn subst_from_frame_and_normalize_erasing_regions<T: TypeFoldable<'tcx>>(
|
2019-01-14 17:54:35 +01:00
|
|
|
&self,
|
2019-08-12 16:32:48 +03:00
|
|
|
value: T,
|
|
|
|
) -> T {
|
|
|
|
self.tcx.subst_and_normalize_erasing_regions(
|
|
|
|
self.frame().instance.substs,
|
|
|
|
self.param_env,
|
|
|
|
&value,
|
|
|
|
)
|
2019-01-14 17:54:35 +01:00
|
|
|
}
|
|
|
|
|
2019-08-11 10:12:26 +02:00
|
|
|
/// The `substs` are assumed to already be in our interpreter "universe" (param_env).
|
|
|
|
pub(super) fn resolve(
|
2018-08-16 00:18:09 +02:00
|
|
|
&self,
|
2019-08-11 10:12:26 +02:00
|
|
|
def_id: DefId,
|
2019-12-22 17:42:04 -05:00
|
|
|
substs: SubstsRef<'tcx>,
|
2019-08-11 10:12:26 +02:00
|
|
|
) -> InterpResult<'tcx, ty::Instance<'tcx>> {
|
|
|
|
trace!("resolve: {:?}, {:#?}", def_id, substs);
|
|
|
|
trace!("param_env: {:#?}", self.param_env);
|
|
|
|
trace!("substs: {:#?}", substs);
|
2019-12-22 17:42:04 -05:00
|
|
|
ty::Instance::resolve(*self.tcx, self.param_env, def_id, substs)
|
|
|
|
.ok_or_else(|| err_inval!(TooGeneric).into())
|
2017-03-14 11:12:59 +01:00
|
|
|
}
|
|
|
|
|
2018-08-13 16:14:22 +02:00
|
|
|
pub fn layout_of_local(
|
|
|
|
&self,
|
2018-11-15 17:14:53 +01:00
|
|
|
frame: &Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
|
2019-01-30 14:55:31 +01:00
|
|
|
local: mir::Local,
|
|
|
|
layout: Option<TyLayout<'tcx>>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, TyLayout<'tcx>> {
|
2019-08-10 13:08:47 +02:00
|
|
|
// `const_prop` runs into this with an invalid (empty) frame, so we
|
|
|
|
// have to support that case (mostly by skipping all caching).
|
|
|
|
match frame.locals.get(local).and_then(|state| state.layout.get()) {
|
2019-01-30 15:51:20 +01:00
|
|
|
None => {
|
2019-02-08 06:28:15 +09:00
|
|
|
let layout = crate::interpret::operand::from_known_layout(layout, || {
|
2019-06-03 18:26:48 -04:00
|
|
|
let local_ty = frame.body.local_decls[local].ty;
|
2019-08-11 10:12:26 +02:00
|
|
|
let local_ty = self.tcx.subst_and_normalize_erasing_regions(
|
|
|
|
frame.instance.substs,
|
|
|
|
self.param_env,
|
|
|
|
&local_ty,
|
|
|
|
);
|
2019-01-30 15:51:20 +01:00
|
|
|
self.layout_of(local_ty)
|
|
|
|
})?;
|
2019-08-10 13:08:47 +02:00
|
|
|
if let Some(state) = frame.locals.get(local) {
|
|
|
|
// Layouts of locals are requested a lot, so we cache them.
|
|
|
|
state.layout.set(Some(layout));
|
|
|
|
}
|
2019-01-30 15:51:20 +01:00
|
|
|
Ok(layout)
|
|
|
|
}
|
|
|
|
Some(layout) => Ok(layout),
|
2019-01-16 20:45:53 +01:00
|
|
|
}
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Returns the actual dynamic size and alignment of the place at the given type.
|
2018-10-09 22:41:41 +02:00
|
|
|
/// Only the "meta" (metadata) part of the place matters.
|
|
|
|
/// This can fail to provide an answer for extern types.
|
2018-08-25 14:36:24 +02:00
|
|
|
pub(super) fn size_and_align_of(
|
2018-08-03 11:21:44 +02:00
|
|
|
&self,
|
2019-12-27 00:38:10 +01:00
|
|
|
metadata: MemPlaceMeta<M::PointerTag>,
|
2018-08-25 14:36:24 +02:00
|
|
|
layout: TyLayout<'tcx>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, Option<(Size, Align)>> {
|
2018-10-09 22:41:41 +02:00
|
|
|
if !layout.is_unsized() {
|
2018-09-09 01:16:45 +03:00
|
|
|
return Ok(Some((layout.size, layout.align.abi)));
|
2018-10-09 22:41:41 +02:00
|
|
|
}
|
2019-09-16 19:08:35 +01:00
|
|
|
match layout.ty.kind {
|
2018-08-25 14:36:24 +02:00
|
|
|
ty::Adt(..) | ty::Tuple(..) => {
|
|
|
|
// First get the size of all statically known fields.
|
|
|
|
// Don't use type_of::sizing_type_of because that expects t to be sized,
|
|
|
|
// and it also rounds up to alignment, which we want to avoid,
|
|
|
|
// as the unsized field's alignment could be smaller.
|
|
|
|
assert!(!layout.ty.is_simd());
|
2018-10-17 12:36:18 +02:00
|
|
|
trace!("DST layout: {:?}", layout);
|
2018-08-25 14:36:24 +02:00
|
|
|
|
|
|
|
let sized_size = layout.fields.offset(layout.fields.count() - 1);
|
2018-09-09 01:16:45 +03:00
|
|
|
let sized_align = layout.align.abi;
|
2018-10-17 12:36:18 +02:00
|
|
|
trace!(
|
2018-08-25 14:36:24 +02:00
|
|
|
"DST {} statically sized prefix size: {:?} align: {:?}",
|
|
|
|
layout.ty,
|
|
|
|
sized_size,
|
|
|
|
sized_align
|
|
|
|
);
|
|
|
|
|
|
|
|
// Recurse to get the size of the dynamically sized field (must be
|
2018-10-09 22:41:41 +02:00
|
|
|
// the last field). Can't have foreign types here, how would we
|
|
|
|
// adjust alignment and size for them?
|
2018-08-25 14:36:24 +02:00
|
|
|
let field = layout.field(self, layout.fields.count() - 1)?;
|
2018-11-04 11:23:34 +01:00
|
|
|
let (unsized_size, unsized_align) = match self.size_and_align_of(metadata, field)? {
|
|
|
|
Some(size_and_align) => size_and_align,
|
|
|
|
None => {
|
2018-11-06 08:41:56 +01:00
|
|
|
// A field with extern type. If this field is at offset 0, we behave
|
|
|
|
// like the underlying extern type.
|
2018-11-05 11:20:01 +01:00
|
|
|
// FIXME: Once we have made decisions for how to handle size and alignment
|
|
|
|
// of `extern type`, this should be adapted. It is just a temporary hack
|
|
|
|
// to get some code to work that probably ought to work.
|
2018-11-06 08:41:56 +01:00
|
|
|
if sized_size == Size::ZERO {
|
2019-12-22 17:42:04 -05:00
|
|
|
return Ok(None);
|
2018-11-04 11:23:34 +01:00
|
|
|
} else {
|
2018-11-04 11:55:39 +01:00
|
|
|
bug!("Fields cannot be extern types, unless they are at offset 0")
|
2018-11-04 11:23:34 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
2018-08-25 14:36:24 +02:00
|
|
|
|
|
|
|
// FIXME (#26403, #27023): We should be adding padding
|
|
|
|
// to `sized_size` (to accommodate the `unsized_align`
|
|
|
|
// required of the unsized field that follows) before
|
|
|
|
// summing it with `sized_size`. (Note that since #26403
|
|
|
|
// is unfixed, we do not yet add the necessary padding
|
|
|
|
// here. But this is where the add would go.)
|
|
|
|
|
|
|
|
// Return the sum of sizes and max of aligns.
|
|
|
|
let size = sized_size + unsized_size;
|
|
|
|
|
|
|
|
// Choose max of two known alignments (combined value must
|
|
|
|
// be aligned according to more restrictive of the two).
|
|
|
|
let align = sized_align.max(unsized_align);
|
|
|
|
|
|
|
|
// Issue #27023: must add any necessary padding to `size`
|
|
|
|
// (to make it a multiple of `align`) before returning it.
|
2019-08-27 12:54:46 +02:00
|
|
|
let size = size.align_to(align);
|
|
|
|
|
|
|
|
// Check if this brought us over the size limit.
|
|
|
|
if size.bytes() >= self.tcx.data_layout().obj_size_bound() {
|
2020-03-05 23:31:39 +01:00
|
|
|
throw_ub!(InvalidMeta("total size is bigger than largest supported object"));
|
2019-08-27 12:54:46 +02:00
|
|
|
}
|
|
|
|
Ok(Some((size, align)))
|
2018-08-25 14:36:24 +02:00
|
|
|
}
|
|
|
|
ty::Dynamic(..) => {
|
2020-01-09 12:02:44 +01:00
|
|
|
let vtable = metadata.unwrap_meta();
|
2019-08-27 12:54:46 +02:00
|
|
|
// Read size and align from vtable (already checks size).
|
2018-10-09 22:41:41 +02:00
|
|
|
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
|
2018-08-25 14:36:24 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
ty::Slice(_) | ty::Str => {
|
2020-01-09 12:02:44 +01:00
|
|
|
let len = metadata.unwrap_meta().to_machine_usize(self)?;
|
2018-09-08 22:14:55 +03:00
|
|
|
let elem = layout.field(self, 0)?;
|
2019-08-27 12:54:46 +02:00
|
|
|
|
|
|
|
// Make sure the slice is not too big.
|
2019-12-22 17:42:04 -05:00
|
|
|
let size = elem.size.checked_mul(len, &*self.tcx).ok_or_else(|| {
|
2020-03-05 23:31:39 +01:00
|
|
|
err_ub!(InvalidMeta("slice is bigger than largest supported object"))
|
2019-12-22 17:42:04 -05:00
|
|
|
})?;
|
2019-08-27 12:54:46 +02:00
|
|
|
Ok(Some((size, elem.align.abi)))
|
2018-10-09 22:41:41 +02:00
|
|
|
}
|
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
ty::Foreign(_) => Ok(None),
|
2018-08-25 14:36:24 +02:00
|
|
|
|
|
|
|
_ => bug!("size_and_align_of::<{:?}> not supported", layout.ty),
|
2017-07-28 10:16:36 +02:00
|
|
|
}
|
|
|
|
}
|
2018-08-25 14:36:24 +02:00
|
|
|
#[inline]
|
|
|
|
pub fn size_and_align_of_mplace(
|
|
|
|
&self,
|
2019-12-22 17:42:04 -05:00
|
|
|
mplace: MPlaceTy<'tcx, M::PointerTag>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, Option<(Size, Align)>> {
|
2018-09-29 21:35:20 +02:00
|
|
|
self.size_and_align_of(mplace.meta, mplace.layout)
|
2018-08-25 14:36:24 +02:00
|
|
|
}
|
2017-07-28 10:16:36 +02:00
|
|
|
|
2016-07-05 10:47:10 +02:00
|
|
|
pub fn push_stack_frame(
|
|
|
|
&mut self,
|
2017-03-21 13:53:55 +01:00
|
|
|
instance: ty::Instance<'tcx>,
|
2019-08-03 22:04:39 -07:00
|
|
|
span: Span,
|
2019-06-03 18:26:48 -04:00
|
|
|
body: &'mir mir::Body<'tcx>,
|
2018-10-09 21:05:53 +02:00
|
|
|
return_place: Option<PlaceTy<'tcx, M::PointerTag>>,
|
2016-09-09 17:44:04 +02:00
|
|
|
return_to_block: StackPopCleanup,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2020-02-28 14:20:33 +01:00
|
|
|
if !self.stack.is_empty() {
|
2018-12-19 10:10:39 +01:00
|
|
|
info!("PAUSING({}) {}", self.cur_frame(), self.frame().instance);
|
2018-10-17 12:46:20 +02:00
|
|
|
}
|
2016-05-30 18:09:52 +02:00
|
|
|
::log_settings::settings().indentation += 1;
|
|
|
|
|
2018-07-24 18:28:53 +02:00
|
|
|
// first push a stack frame so we have access to the local substs
|
2018-11-15 17:14:53 +01:00
|
|
|
let extra = M::stack_push(self)?;
|
2018-07-24 18:28:53 +02:00
|
|
|
self.stack.push(Frame {
|
2019-06-03 18:26:48 -04:00
|
|
|
body,
|
2019-10-20 18:51:25 +02:00
|
|
|
block: Some(mir::START_BLOCK),
|
2018-07-24 18:28:53 +02:00
|
|
|
return_to_block,
|
|
|
|
return_place,
|
|
|
|
// empty local array, we fill it in below, after we are inside the stack frame and
|
|
|
|
// all methods actually know about the frame
|
|
|
|
locals: IndexVec::new(),
|
|
|
|
span,
|
|
|
|
instance,
|
|
|
|
stmt: 0,
|
2018-11-15 17:14:53 +01:00
|
|
|
extra,
|
2018-07-24 18:28:53 +02:00
|
|
|
});
|
|
|
|
|
|
|
|
// don't allocate at all for trivial constants
|
2019-06-03 18:26:48 -04:00
|
|
|
if body.local_decls.len() > 1 {
|
2019-04-06 23:58:59 +02:00
|
|
|
// Locals are initially uninitialized.
|
2019-12-22 17:42:04 -05:00
|
|
|
let dummy = LocalState { value: LocalValue::Uninitialized, layout: Cell::new(None) };
|
2019-06-03 18:26:48 -04:00
|
|
|
let mut locals = IndexVec::from_elem(dummy, &body.local_decls);
|
2018-10-02 21:16:35 +02:00
|
|
|
// Return place is handled specially by the `eval_place` functions, and the
|
|
|
|
// entry in `locals` should never be used. Make it dead, to be sure.
|
2019-04-08 13:28:51 +02:00
|
|
|
locals[mir::RETURN_PLACE].value = LocalValue::Dead;
|
2018-08-13 16:14:22 +02:00
|
|
|
// Now mark those locals as dead that we do not want to initialize
|
2019-04-20 19:46:19 +03:00
|
|
|
match self.tcx.def_kind(instance.def_id()) {
|
2018-03-23 10:47:55 +01:00
|
|
|
// statics and constants don't have `Storage*` statements, no need to look for them
|
2019-12-22 17:42:04 -05:00
|
|
|
Some(DefKind::Static) | Some(DefKind::Const) | Some(DefKind::AssocConst) => {}
|
2018-03-23 10:47:55 +01:00
|
|
|
_ => {
|
2019-06-03 18:26:48 -04:00
|
|
|
trace!("push_stack_frame: {:?}: num_bbs: {}", span, body.basic_blocks().len());
|
|
|
|
for block in body.basic_blocks() {
|
2018-03-23 10:47:55 +01:00
|
|
|
for stmt in block.statements.iter() {
|
|
|
|
use rustc::mir::StatementKind::{StorageDead, StorageLive};
|
|
|
|
match stmt.kind {
|
2019-12-22 17:42:04 -05:00
|
|
|
StorageLive(local) | StorageDead(local) => {
|
2019-04-08 13:28:51 +02:00
|
|
|
locals[local].value = LocalValue::Dead;
|
2018-08-13 16:14:22 +02:00
|
|
|
}
|
2018-03-23 10:47:55 +01:00
|
|
|
_ => {}
|
|
|
|
}
|
2018-03-22 16:59:02 +01:00
|
|
|
}
|
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-03-23 10:47:55 +01:00
|
|
|
}
|
2018-08-19 17:01:31 +02:00
|
|
|
// done
|
|
|
|
self.frame_mut().locals = locals;
|
2018-07-24 18:28:53 +02:00
|
|
|
}
|
2016-10-15 19:48:30 -06:00
|
|
|
|
2019-02-07 15:02:14 +01:00
|
|
|
info!("ENTERING({}) {}", self.cur_frame(), self.frame().instance);
|
2018-10-17 12:46:20 +02:00
|
|
|
|
2019-11-24 21:09:37 -05:00
|
|
|
if self.stack.len() > *self.tcx.sess.recursion_limit.get() {
|
2019-07-30 20:18:50 +05:30
|
|
|
throw_exhaust!(StackFrameLimitReached)
|
2016-07-05 13:23:58 +02:00
|
|
|
} else {
|
|
|
|
Ok(())
|
|
|
|
}
|
2015-11-12 15:50:58 -06:00
|
|
|
}
|
|
|
|
|
2019-11-22 22:17:15 +01:00
|
|
|
/// Jump to the given block.
|
|
|
|
#[inline]
|
|
|
|
pub fn go_to_block(&mut self, target: mir::BasicBlock) {
|
|
|
|
let frame = self.frame_mut();
|
|
|
|
frame.block = Some(target);
|
|
|
|
frame.stmt = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// *Return* to the given `target` basic block.
|
|
|
|
/// Do *not* use for unwinding! Use `unwind_to_block` instead.
|
|
|
|
///
|
|
|
|
/// If `target` is `None`, that indicates the function cannot return, so we raise UB.
|
|
|
|
pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
|
|
|
|
if let Some(target) = target {
|
|
|
|
Ok(self.go_to_block(target))
|
|
|
|
} else {
|
|
|
|
throw_ub!(Unreachable)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/// *Unwind* to the given `target` basic block.
|
|
|
|
/// Do *not* use for returning! Use `return_to_block` instead.
|
|
|
|
///
|
|
|
|
/// If `target` is `None`, that indicates the function does not need cleanup during
|
|
|
|
/// unwinding, and we will just keep propagating that upwards.
|
|
|
|
pub fn unwind_to_block(&mut self, target: Option<mir::BasicBlock>) {
|
|
|
|
let frame = self.frame_mut();
|
|
|
|
frame.block = target;
|
|
|
|
frame.stmt = 0;
|
|
|
|
}
|
|
|
|
|
2019-10-20 21:40:38 -04:00
|
|
|
/// Pops the current frame from the stack, deallocating the
|
|
|
|
/// memory for allocated locals.
|
|
|
|
///
|
|
|
|
/// If `unwinding` is `false`, then we are performing a normal return
|
|
|
|
/// from a function. In this case, we jump back into the frame of the caller,
|
|
|
|
/// and continue execution as normal.
|
|
|
|
///
|
|
|
|
/// If `unwinding` is `true`, then we are in the middle of a panic,
|
|
|
|
/// and need to unwind this frame. In this case, we jump to the
|
|
|
|
/// `cleanup` block for the function, which is responsible for running
|
|
|
|
/// `Drop` impls for any locals that have been initialized at this point.
|
|
|
|
/// The cleanup block ends with a special `Resume` terminator, which will
|
2019-10-28 19:09:54 -04:00
|
|
|
/// cause us to continue unwinding.
|
2019-12-22 17:42:04 -05:00
|
|
|
pub(super) fn pop_stack_frame(&mut self, unwinding: bool) -> InterpResult<'tcx> {
|
|
|
|
info!(
|
|
|
|
"LEAVING({}) {} (unwinding = {})",
|
|
|
|
self.cur_frame(),
|
|
|
|
self.frame().instance,
|
|
|
|
unwinding
|
|
|
|
);
|
2019-04-16 21:04:54 -04:00
|
|
|
|
2019-10-20 18:51:25 +02:00
|
|
|
// Sanity check `unwinding`.
|
|
|
|
assert_eq!(
|
|
|
|
unwinding,
|
|
|
|
match self.frame().block {
|
|
|
|
None => true,
|
2019-12-22 17:42:04 -05:00
|
|
|
Some(block) => self.body().basic_blocks()[block].is_cleanup,
|
2019-10-20 18:51:25 +02:00
|
|
|
}
|
|
|
|
);
|
|
|
|
|
2016-05-30 18:09:52 +02:00
|
|
|
::log_settings::settings().indentation -= 1;
|
2019-12-22 17:42:04 -05:00
|
|
|
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
|
2019-10-28 21:37:58 -04:00
|
|
|
let stack_pop_info = M::stack_pop(self, frame.extra, unwinding)?;
|
2019-11-05 00:06:05 -05:00
|
|
|
if let (false, StackPopInfo::StopUnwinding) = (unwinding, stack_pop_info) {
|
|
|
|
bug!("Attempted to stop unwinding while there is no unwinding!");
|
2019-10-20 18:51:25 +02:00
|
|
|
}
|
2019-04-16 21:04:54 -04:00
|
|
|
|
2019-11-04 15:00:41 -05:00
|
|
|
// Now where do we jump next?
|
|
|
|
|
|
|
|
// Determine if we leave this function normally or via unwinding.
|
2019-12-22 17:42:04 -05:00
|
|
|
let cur_unwinding =
|
|
|
|
if let StackPopInfo::StopUnwinding = stack_pop_info { false } else { unwinding };
|
2019-11-04 15:00:41 -05:00
|
|
|
|
2019-10-20 18:51:25 +02:00
|
|
|
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
|
|
|
|
// In that case, we return early. We also avoid validation in that case,
|
2018-11-17 14:51:45 +01:00
|
|
|
// because this is CTFE and the final value will be thoroughly validated anyway.
|
2019-11-04 15:00:41 -05:00
|
|
|
let (cleanup, next_block) = match frame.return_to_block {
|
|
|
|
StackPopCleanup::Goto { ret, unwind } => {
|
|
|
|
(true, Some(if cur_unwinding { unwind } else { ret }))
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
|
|
|
StackPopCleanup::None { cleanup, .. } => (cleanup, None),
|
2019-10-20 18:51:25 +02:00
|
|
|
};
|
2019-11-04 15:00:41 -05:00
|
|
|
|
2019-10-20 18:51:25 +02:00
|
|
|
if !cleanup {
|
|
|
|
assert!(self.stack.is_empty(), "only the topmost frame should ever be leaked");
|
2019-11-04 15:00:41 -05:00
|
|
|
assert!(next_block.is_none(), "tried to skip cleanup when we have a next block!");
|
2019-10-20 18:51:25 +02:00
|
|
|
// Leak the locals, skip validation.
|
|
|
|
return Ok(());
|
2016-07-06 17:55:05 +02:00
|
|
|
}
|
2019-10-20 18:51:25 +02:00
|
|
|
|
|
|
|
// Cleanup: deallocate all locals that are backed by an allocation.
|
2017-01-22 00:19:35 -08:00
|
|
|
for local in frame.locals {
|
2019-04-08 13:28:51 +02:00
|
|
|
self.deallocate_local(local.value)?;
|
2016-11-17 14:48:34 +01:00
|
|
|
}
|
2019-04-16 21:04:54 -04:00
|
|
|
|
2019-12-22 17:42:04 -05:00
|
|
|
trace!(
|
|
|
|
"StackPopCleanup: {:?} StackPopInfo: {:?} cur_unwinding = {:?}",
|
|
|
|
frame.return_to_block,
|
|
|
|
stack_pop_info,
|
|
|
|
cur_unwinding
|
|
|
|
);
|
2019-10-20 18:51:25 +02:00
|
|
|
if cur_unwinding {
|
|
|
|
// Follow the unwind edge.
|
2020-03-06 12:13:55 +01:00
|
|
|
let unwind = next_block.expect("Encountered StackPopCleanup::None when unwinding!");
|
2019-11-22 22:17:15 +01:00
|
|
|
self.unwind_to_block(unwind);
|
2019-10-20 18:51:25 +02:00
|
|
|
} else {
|
|
|
|
// Follow the normal return edge.
|
2019-04-16 21:04:54 -04:00
|
|
|
// Validate the return value. Do this after deallocating so that we catch dangling
|
|
|
|
// references.
|
|
|
|
if let Some(return_place) = frame.return_place {
|
|
|
|
if M::enforce_validity(self) {
|
|
|
|
// Data got changed, better make sure it matches the type!
|
|
|
|
// It is still possible that the return place held invalid data while
|
|
|
|
// the function is running, but that's okay because nobody could have
|
|
|
|
// accessed that same data from the "outside" to observe any broken
|
|
|
|
// invariant -- that is, unless a function somehow has a ptr to
|
|
|
|
// its return place... but the way MIR is currently generated, the
|
|
|
|
// return place is always a local and then this cannot happen.
|
2020-03-05 23:31:39 +01:00
|
|
|
self.validate_operand(self.place_to_op(return_place)?)?;
|
2019-04-16 21:04:54 -04:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// Uh, that shouldn't happen... the function did not intend to return
|
|
|
|
throw_ub!(Unreachable);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Jump to new block -- *after* validation so that the spans make more sense.
|
2019-11-04 15:00:41 -05:00
|
|
|
if let Some(ret) = next_block {
|
2019-11-22 22:17:15 +01:00
|
|
|
self.return_to_block(ret)?;
|
2018-10-09 21:05:53 +02:00
|
|
|
}
|
|
|
|
}
|
2019-04-16 21:04:54 -04:00
|
|
|
|
2020-02-28 14:20:33 +01:00
|
|
|
if !self.stack.is_empty() {
|
2019-12-22 17:42:04 -05:00
|
|
|
info!(
|
|
|
|
"CONTINUING({}) {} (unwinding = {})",
|
|
|
|
self.cur_frame(),
|
|
|
|
self.frame().instance,
|
|
|
|
cur_unwinding
|
|
|
|
);
|
2018-10-17 12:46:20 +02:00
|
|
|
}
|
|
|
|
|
2016-09-09 17:44:04 +02:00
|
|
|
Ok(())
|
2016-03-06 04:23:24 -06:00
|
|
|
}
|
|
|
|
|
2018-10-16 15:31:04 +02:00
|
|
|
/// Mark a storage as live, killing the previous content and returning it.
|
|
|
|
/// Remember to deallocate that!
|
|
|
|
pub fn storage_live(
|
|
|
|
&mut self,
|
2019-12-22 17:42:04 -05:00
|
|
|
local: mir::Local,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, LocalValue<M::PointerTag>> {
|
2018-10-16 15:31:04 +02:00
|
|
|
assert!(local != mir::RETURN_PLACE, "Cannot make return place live");
|
|
|
|
trace!("{:?} is now live", local);
|
|
|
|
|
2019-04-08 13:28:51 +02:00
|
|
|
let local_val = LocalValue::Uninitialized;
|
2019-05-18 14:40:17 +02:00
|
|
|
// StorageLive *always* kills the value that's currently stored.
|
|
|
|
// However, we do not error if the variable already is live;
|
|
|
|
// see <https://github.com/rust-lang/rust/issues/42371>.
|
2019-04-08 13:28:51 +02:00
|
|
|
Ok(mem::replace(&mut self.frame_mut().locals[local].value, local_val))
|
2018-10-16 15:31:04 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
/// Returns the old value of the local.
|
|
|
|
/// Remember to deallocate that!
|
2019-01-30 17:51:59 +01:00
|
|
|
pub fn storage_dead(&mut self, local: mir::Local) -> LocalValue<M::PointerTag> {
|
2018-10-16 15:31:04 +02:00
|
|
|
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
|
|
|
|
trace!("{:?} is now dead", local);
|
|
|
|
|
2019-04-08 13:28:51 +02:00
|
|
|
mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead)
|
2018-10-16 15:31:04 +02:00
|
|
|
}
|
|
|
|
|
2018-09-22 13:36:46 +02:00
|
|
|
pub(super) fn deallocate_local(
|
|
|
|
&mut self,
|
2019-01-30 17:51:59 +01:00
|
|
|
local: LocalValue<M::PointerTag>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-08-13 16:14:22 +02:00
|
|
|
// FIXME: should we tell the user that there was a local which was never written to?
|
2019-01-30 17:51:59 +01:00
|
|
|
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
|
2018-08-13 16:14:22 +02:00
|
|
|
trace!("deallocating local");
|
2019-12-14 00:04:27 +01:00
|
|
|
// All locals have a backing allocation, even if the allocation is empty
|
|
|
|
// due to the local having ZST type.
|
|
|
|
let ptr = ptr.assert_ptr();
|
2019-11-23 09:12:17 +01:00
|
|
|
if log_enabled!(::log::Level::Trace) {
|
|
|
|
self.memory.dump_alloc(ptr.alloc_id);
|
|
|
|
}
|
2018-08-13 16:14:22 +02:00
|
|
|
self.memory.deallocate_local(ptr)?;
|
|
|
|
};
|
|
|
|
Ok(())
|
2017-08-08 15:53:07 +02:00
|
|
|
}
|
|
|
|
|
2019-12-22 20:56:01 +01:00
|
|
|
pub(super) fn const_eval(
|
|
|
|
&self,
|
|
|
|
gid: GlobalId<'tcx>,
|
2020-02-15 11:56:23 +13:00
|
|
|
ty: Ty<'tcx>,
|
2019-12-22 20:56:01 +01:00
|
|
|
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
2020-01-08 20:50:59 +01:00
|
|
|
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
|
|
|
|
// and thus don't care about the parameter environment. While we could just use
|
|
|
|
// `self.param_env`, that would mean we invoke the query to evaluate the static
|
|
|
|
// with different parameter environments, thus causing the static to be evaluated
|
|
|
|
// multiple times.
|
|
|
|
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
|
|
|
|
ty::ParamEnv::reveal_all()
|
2019-12-22 20:56:01 +01:00
|
|
|
} else {
|
2020-01-08 20:50:59 +01:00
|
|
|
self.param_env
|
2019-12-22 20:56:01 +01:00
|
|
|
};
|
2020-01-25 12:32:58 +13:00
|
|
|
let val = self.tcx.const_eval_global_id(param_env, gid, Some(self.tcx.span))?;
|
2020-01-08 20:50:59 +01:00
|
|
|
|
2019-12-22 21:31:24 +01:00
|
|
|
// Even though `ecx.const_eval` is called from `eval_const_to_op` we can never have a
|
|
|
|
// recursion deeper than one level, because the `tcx.const_eval` above is guaranteed to not
|
|
|
|
// return `ConstValue::Unevaluated`, which is the only way that `eval_const_to_op` will call
|
|
|
|
// `ecx.const_eval`.
|
2020-02-15 11:56:23 +13:00
|
|
|
let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
|
|
|
|
self.eval_const_to_op(&const_, None)
|
2019-12-22 20:56:01 +01:00
|
|
|
}
|
|
|
|
|
2018-11-06 16:16:27 +01:00
|
|
|
pub fn const_eval_raw(
|
|
|
|
&self,
|
|
|
|
gid: GlobalId<'tcx>,
|
2019-06-07 18:56:27 +02:00
|
|
|
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
2019-12-22 12:59:15 +01:00
|
|
|
// For statics we pick `ParamEnv::reveal_all`, because statics don't have generics
|
|
|
|
// and thus don't care about the parameter environment. While we could just use
|
|
|
|
// `self.param_env`, that would mean we invoke the query to evaluate the static
|
|
|
|
// with different parameter environments, thus causing the static to be evaluated
|
|
|
|
// multiple times.
|
2019-04-21 14:41:51 +03:00
|
|
|
let param_env = if self.tcx.is_static(gid.instance.def_id()) {
|
2018-02-10 13:18:02 -05:00
|
|
|
ty::ParamEnv::reveal_all()
|
2018-01-31 15:06:45 +01:00
|
|
|
} else {
|
|
|
|
self.param_env
|
|
|
|
};
|
2018-11-17 15:45:09 +01:00
|
|
|
// We use `const_eval_raw` here, and get an unvalidated result. That is okay:
|
|
|
|
// Our result will later be validated anyway, and there seems no good reason
|
|
|
|
// to have to fail early here. This is also more consistent with
|
|
|
|
// `Memory::get_static_alloc` which has to use `const_eval_raw` to avoid cycles.
|
2019-06-07 19:22:42 +02:00
|
|
|
let val = self.tcx.const_eval_raw(param_env.and(gid))?;
|
2018-11-06 16:16:27 +01:00
|
|
|
self.raw_const_to_mplace(val)
|
2018-01-31 15:06:45 +01:00
|
|
|
}
|
|
|
|
|
2018-09-21 23:32:59 +02:00
|
|
|
pub fn dump_place(&self, place: Place<M::PointerTag>) {
|
2017-05-30 10:24:37 -07:00
|
|
|
// Debug output
|
2018-04-26 15:35:24 +10:00
|
|
|
if !log_enabled!(::log::Level::Trace) {
|
|
|
|
return;
|
|
|
|
}
|
2017-12-06 09:25:29 +01:00
|
|
|
match place {
|
|
|
|
Place::Local { frame, local } => {
|
2017-08-08 16:29:47 +02:00
|
|
|
let mut allocs = Vec::new();
|
|
|
|
let mut msg = format!("{:?}", local);
|
|
|
|
if frame != self.cur_frame() {
|
|
|
|
write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
|
2017-05-31 17:41:33 -07:00
|
|
|
}
|
2017-08-08 16:29:47 +02:00
|
|
|
write!(msg, ":").unwrap();
|
|
|
|
|
2019-04-08 13:28:51 +02:00
|
|
|
match self.stack[frame].locals[local].value {
|
2019-04-07 12:52:56 +02:00
|
|
|
LocalValue::Dead => write!(msg, " is dead").unwrap(),
|
|
|
|
LocalValue::Uninitialized => write!(msg, " is uninitialized").unwrap(),
|
2019-12-22 17:42:04 -05:00
|
|
|
LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
|
|
|
|
Scalar::Ptr(ptr) => {
|
|
|
|
write!(
|
|
|
|
msg,
|
|
|
|
" by align({}){} ref:",
|
|
|
|
mplace.align.bytes(),
|
|
|
|
match mplace.meta {
|
2020-01-09 12:02:44 +01:00
|
|
|
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
|
2019-12-27 00:38:10 +01:00
|
|
|
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
|
|
|
)
|
|
|
|
.unwrap();
|
|
|
|
allocs.push(ptr.alloc_id);
|
2017-08-10 08:48:38 -07:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
|
|
|
|
},
|
2019-04-07 12:52:56 +02:00
|
|
|
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
|
2017-08-08 16:29:47 +02:00
|
|
|
write!(msg, " {:?}", val).unwrap();
|
2018-07-24 18:28:53 +02:00
|
|
|
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val {
|
2017-08-10 08:48:38 -07:00
|
|
|
allocs.push(ptr.alloc_id);
|
|
|
|
}
|
2017-08-08 16:29:47 +02:00
|
|
|
}
|
2019-04-07 12:52:56 +02:00
|
|
|
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
|
2017-08-08 16:29:47 +02:00
|
|
|
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
|
2018-07-24 18:28:53 +02:00
|
|
|
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val1 {
|
2017-08-10 08:48:38 -07:00
|
|
|
allocs.push(ptr.alloc_id);
|
|
|
|
}
|
2018-07-24 18:28:53 +02:00
|
|
|
if let ScalarMaybeUndef::Scalar(Scalar::Ptr(ptr)) = val2 {
|
2017-08-10 08:48:38 -07:00
|
|
|
allocs.push(ptr.alloc_id);
|
|
|
|
}
|
2017-08-08 16:29:47 +02:00
|
|
|
}
|
2017-05-31 17:41:33 -07:00
|
|
|
}
|
2017-08-08 16:29:47 +02:00
|
|
|
|
|
|
|
trace!("{}", msg);
|
2018-06-22 12:36:54 -07:00
|
|
|
self.memory.dump_allocs(allocs);
|
2017-08-08 16:29:47 +02:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
Place::Ptr(mplace) => match mplace.ptr {
|
|
|
|
Scalar::Ptr(ptr) => {
|
|
|
|
trace!("by align({}) ref:", mplace.align.bytes());
|
|
|
|
self.memory.dump_alloc(ptr.alloc_id);
|
2016-10-18 21:45:48 -06:00
|
|
|
}
|
2019-12-22 17:42:04 -05:00
|
|
|
ptr => trace!(" integral by ref: {:?}", ptr),
|
|
|
|
},
|
2017-02-07 00:39:40 -08:00
|
|
|
}
|
2016-10-18 21:45:48 -06:00
|
|
|
}
|
2016-11-03 12:52:13 +01:00
|
|
|
|
2018-11-14 17:25:06 +01:00
|
|
|
pub fn generate_stacktrace(&self, explicit_span: Option<Span>) -> Vec<FrameInfo<'tcx>> {
|
2018-01-31 15:06:45 +01:00
|
|
|
let mut last_span = None;
|
|
|
|
let mut frames = Vec::new();
|
2019-11-29 11:48:37 +01:00
|
|
|
for frame in self.stack().iter().rev() {
|
2018-01-31 15:06:45 +01:00
|
|
|
// make sure we don't emit frames that are duplicates of the previous
|
2019-11-29 11:48:37 +01:00
|
|
|
if explicit_span == Some(frame.span) {
|
|
|
|
last_span = Some(frame.span);
|
2018-01-31 15:06:45 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if let Some(last) = last_span {
|
2019-11-29 11:48:37 +01:00
|
|
|
if last == frame.span {
|
2018-01-31 15:06:45 +01:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
2019-11-29 11:48:37 +01:00
|
|
|
last_span = Some(frame.span);
|
2018-01-31 15:06:45 +01:00
|
|
|
}
|
2019-10-20 18:51:25 +02:00
|
|
|
|
2019-11-29 11:48:37 +01:00
|
|
|
let lint_root = frame.current_source_info().and_then(|source_info| {
|
|
|
|
match &frame.body.source_scopes[source_info.scope].local_data {
|
2019-11-26 19:55:03 +02:00
|
|
|
mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
|
2019-10-20 18:51:25 +02:00
|
|
|
mir::ClearCrossCrate::Clear => None,
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
2019-11-29 11:48:37 +01:00
|
|
|
frames.push(FrameInfo { call_site: frame.span, instance: frame.instance, lint_root });
|
2017-07-21 17:25:30 +02:00
|
|
|
}
|
2018-06-02 23:38:57 +02:00
|
|
|
trace!("generate stacktrace: {:#?}, {:?}", frames, explicit_span);
|
2018-08-26 15:19:34 +02:00
|
|
|
frames
|
2017-07-21 17:25:30 +02:00
|
|
|
}
|
2016-10-15 19:48:30 -06:00
|
|
|
}
|
2019-11-22 19:23:08 +01:00
|
|
|
|
|
|
|
impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
|
2019-12-22 17:42:04 -05:00
|
|
|
for Frame<'mir, 'tcx, Tag, Extra>
|
|
|
|
where
|
|
|
|
Extra: HashStable<StableHashingContext<'ctx>>,
|
|
|
|
Tag: HashStable<StableHashingContext<'ctx>>,
|
2019-11-22 19:23:08 +01:00
|
|
|
{
|
|
|
|
fn hash_stable(&self, hcx: &mut StableHashingContext<'ctx>, hasher: &mut StableHasher) {
|
|
|
|
self.body.hash_stable(hcx, hasher);
|
|
|
|
self.instance.hash_stable(hcx, hasher);
|
|
|
|
self.span.hash_stable(hcx, hasher);
|
|
|
|
self.return_to_block.hash_stable(hcx, hasher);
|
|
|
|
self.return_place.as_ref().map(|r| &**r).hash_stable(hcx, hasher);
|
|
|
|
self.locals.hash_stable(hcx, hasher);
|
|
|
|
self.block.hash_stable(hcx, hasher);
|
|
|
|
self.stmt.hash_stable(hcx, hasher);
|
|
|
|
self.extra.hash_stable(hcx, hasher);
|
|
|
|
}
|
|
|
|
}
|