interpret: refactor function call handling to be better-abstracted

This commit is contained in:
Ralf Jung 2024-08-05 17:34:44 +02:00
parent 8c7e0e1608
commit 522af10ccc
22 changed files with 1337 additions and 1316 deletions

View file

@ -73,7 +73,9 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
cid.promoted.map_or_else(String::new, |p| format!("::{p:?}"))
);
ecx.push_stack_frame(
// This can't use `init_stack_frame` since `body` is not a function,
// so computing its ABI would fail. It's also not worth it since there are no arguments to pass.
ecx.push_stack_frame_raw(
cid.instance,
body,
&ret.clone().into(),

View file

@ -24,8 +24,9 @@ use crate::errors::{LongRunning, LongRunningWarn};
use crate::fluent_generated as fluent;
use crate::interpret::{
self, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom, throw_unsup,
throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, FnVal, Frame,
throw_unsup_format, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame,
GlobalAlloc, ImmTy, InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, Scalar,
StackPopCleanup,
};
/// When hitting this many interpreted terminators we emit a deny by default lint
@ -306,17 +307,15 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
let align = ImmTy::from_uint(target_align, args[1].layout).into();
let fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
// We replace the entire function call with a "tail call".
// Note that this happens before the frame of the original function
// is pushed on the stack.
self.eval_fn_call(
FnVal::Instance(instance),
(CallAbi::Rust, fn_abi),
// Push the stack frame with our own adjusted arguments.
self.init_stack_frame(
instance,
self.load_mir(instance.def, None)?,
fn_abi,
&[FnArg::Copy(addr), FnArg::Copy(align)],
/* with_caller_location = */ false,
dest,
ret,
mir::UnwindAction::Unreachable,
StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable },
)?;
Ok(ControlFlow::Break(()))
} else {

View file

@ -1,24 +1,23 @@
//! Manages calling a concrete function (with known MIR body) with argument passing,
//! and returning the return value to the caller.
use std::borrow::Cow;
use either::Either;
use either::{Left, Right};
use rustc_middle::ty::layout::{FnAbiOf, IntegerExt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, AdtDef, Instance, Ty};
use rustc_middle::{bug, mir, span_bug};
use rustc_span::source_map::Spanned;
use rustc_span::sym;
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
use rustc_target::abi::{self, FieldIdx, Integer};
use rustc_target::spec::abi::Abi;
use tracing::trace;
use tracing::{info, instrument, trace};
use super::{
throw_ub, throw_ub_custom, throw_unsup_format, CtfeProvenance, FnVal, ImmTy, InterpCx,
InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, Scalar,
StackPopCleanup,
InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Projectable, Provenance, ReturnAction, Scalar,
StackPopCleanup, StackPopInfo,
};
use crate::fluent_generated as fluent;
use crate::interpret::eval_context::StackPopInfo;
use crate::interpret::ReturnAction;
/// An argment passed to a function.
#[derive(Clone, Debug)]
@ -39,15 +38,6 @@ impl<'tcx, Prov: Provenance> FnArg<'tcx, Prov> {
}
}
struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
callee: FnVal<'tcx, M::ExtraFnVal>,
args: Vec<FnArg<'tcx, M::Provenance>>,
fn_sig: ty::FnSig<'tcx>,
fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
/// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`])
with_caller_location: bool,
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Make a copy of the given fn_arg. Any `InPlace` are degenerated to copies, no protection of the
/// original memory occurs.
@ -67,7 +57,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
args.iter().map(|fn_arg| self.copy_fn_arg(fn_arg)).collect()
}
pub fn fn_arg_field(
/// Helper function for argument untupling.
pub(super) fn fn_arg_field(
&self,
arg: &FnArg<'tcx, M::Provenance>,
field: usize,
@ -78,190 +69,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
})
}
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
) -> InterpResult<'tcx> {
use rustc_middle::mir::TerminatorKind::*;
match terminator.kind {
Return => {
self.return_from_current_stack_frame(/* unwinding */ false)?
}
Goto { target } => self.go_to_block(target),
SwitchInt { ref discr, ref targets } => {
let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets.otherwise();
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self.binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),
)?;
if res.to_scalar().to_bool()? {
target_block = target;
break;
}
}
self.go_to_block(target_block);
}
Call {
ref func,
ref args,
destination,
target,
unwind,
call_source: _,
fn_span: _,
} => {
let old_stack = self.frame_idx();
let old_loc = self.frame().loc;
let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
self.eval_callee_and_args(terminator, func, args)?;
let destination = self.force_allocation(&self.eval_place(destination)?)?;
self.eval_fn_call(
callee,
(fn_sig.abi, fn_abi),
&args,
with_caller_location,
&destination,
target,
if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
)?;
// Sanity-check that `eval_fn_call` either pushed a new frame or
// did a jump to another block.
if self.frame_idx() == old_stack && self.frame().loc == old_loc {
span_bug!(terminator.source_info.span, "evaluating this call made no progress");
}
}
TailCall { ref func, ref args, fn_span: _ } => {
let old_frame_idx = self.frame_idx();
let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
self.eval_callee_and_args(terminator, func, args)?;
self.eval_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
if self.frame_idx() != old_frame_idx {
span_bug!(
terminator.source_info.span,
"evaluating this tail call pushed a new stack frame"
);
}
}
Drop { place, target, unwind, replace: _ } => {
let place = self.eval_place(place)?;
let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
if let ty::InstanceKind::DropGlue(_, None) = instance.def {
// This is the branch we enter if and only if the dropped type has no drop glue
// whatsoever. This can happen as a result of monomorphizing a drop of a
// generic. In order to make sure that generic and non-generic code behaves
// roughly the same (and in keeping with Mir semantics) we do nothing here.
self.go_to_block(target);
return Ok(());
}
trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
self.drop_in_place(&place, instance, target, unwind)?;
}
Assert { ref cond, expected, ref msg, target, unwind } => {
let ignored =
M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
if ignored || expected == cond_val {
self.go_to_block(target);
} else {
M::assert_panic(self, msg, unwind)?;
}
}
UnwindTerminate(reason) => {
M::unwind_terminate(self, reason)?;
}
// When we encounter Resume, we've finished unwinding
// cleanup for the current stack frame. We pop it in order
// to continue unwinding the next frame
UnwindResume => {
trace!("unwinding: resuming from cleanup");
// By definition, a Resume terminator means
// that we're unwinding
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(());
}
// It is UB to ever encounter this.
Unreachable => throw_ub!(Unreachable),
// These should never occur for MIR we actually run.
FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
terminator.source_info.span,
"{:#?} should have been eliminated by MIR pass",
terminator.kind
),
InlineAsm { template, ref operands, options, ref targets, .. } => {
M::eval_inline_asm(self, template, operands, options, targets)?;
}
}
Ok(())
}
/// Evaluate the arguments of a function call
pub(super) fn eval_fn_call_arguments(
&self,
ops: &[Spanned<mir::Operand<'tcx>>],
) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
ops.iter()
.map(|op| {
let arg = match &op.node {
mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
// Make a regular copy.
let op = self.eval_operand(&op.node, None)?;
FnArg::Copy(op)
}
mir::Operand::Move(place) => {
// If this place lives in memory, preserve its location.
// We call `place_to_op` which will be an `MPlaceTy` whenever there exists
// an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local`
// which can return a local even if that has an mplace.)
let place = self.eval_place(*place)?;
let op = self.place_to_op(&place)?;
match op.as_mplace_or_imm() {
Either::Left(mplace) => FnArg::InPlace(mplace),
Either::Right(_imm) => {
// This argument doesn't live in memory, so there's no place
// to make inaccessible during the call.
// We rely on there not being any stray `PlaceTy` that would let the
// caller directly access this local!
// This is also crucial for tail calls, where we want the `FnArg` to
// stay valid when the old stack frame gets popped.
FnArg::Copy(op)
}
}
}
};
Ok(arg)
})
.collect()
}
/// Find the wrapped inner type of a transparent wrapper.
/// Must not be called on 1-ZST (as they don't have a uniquely defined "wrapped field").
///
@ -503,46 +310,209 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(())
}
/// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the
/// necessary information about callee and arguments to make a call.
fn eval_callee_and_args(
&self,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
args: &[Spanned<mir::Operand<'tcx>>],
) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
let func = self.eval_operand(func, None)?;
let args = self.eval_fn_call_arguments(args)?;
let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
let extra_args = &args[fn_sig.inputs().len()..];
let extra_args =
self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
ty::FnPtr(_sig) => {
let fn_ptr = self.read_pointer(&func)?;
let fn_val = self.get_ptr_fn(fn_ptr)?;
(fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
}
ty::FnDef(def_id, args) => {
let instance = self.resolve(def_id, args)?;
(
FnVal::Instance(instance),
self.fn_abi_of_instance(instance, extra_args)?,
instance.def.requires_caller_location(*self.tcx),
)
}
_ => {
span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
}
};
Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
// Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
if !self.tcx.sess.target.is_like_wasm
&& attrs
.target_features
.iter()
.any(|feature| !self.tcx.sess.target_features.contains(feature))
{
throw_ub_custom!(
fluent::const_eval_unavailable_target_features_for_fn,
unavailable_feats = attrs
.target_features
.iter()
.filter(|&feature| !self.tcx.sess.target_features.contains(feature))
.fold(String::new(), |mut s, feature| {
if !s.is_empty() {
s.push_str(", ");
}
s.push_str(feature.as_str());
s
}),
);
}
Ok(())
}
/// Call this function -- pushing the stack frame and initializing the arguments.
/// The main entry point for creating a new stack frame: performs ABI checks and initializes
/// arguments.
#[instrument(skip(self), level = "trace")]
pub fn init_stack_frame(
&mut self,
instance: Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
caller_fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[FnArg<'tcx, M::Provenance>],
with_caller_location: bool,
destination: &MPlaceTy<'tcx, M::Provenance>,
mut stack_pop: StackPopCleanup,
) -> InterpResult<'tcx> {
// Compute callee information.
// FIXME: for variadic support, do we have to somehow determine callee's extra_args?
let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
throw_unsup_format!("calling a c-variadic function is not supported");
}
if M::enforce_abi(self) {
if caller_fn_abi.conv != callee_fn_abi.conv {
throw_ub_custom!(
fluent::const_eval_incompatible_calling_conventions,
callee_conv = format!("{:?}", callee_fn_abi.conv),
caller_conv = format!("{:?}", caller_fn_abi.conv),
)
}
}
// Check that all target features required by the callee (i.e., from
// the attribute `#[target_feature(enable = ...)]`) are enabled at
// compile time.
self.check_fn_target_features(instance)?;
if !callee_fn_abi.can_unwind {
// The callee cannot unwind, so force the `Unreachable` unwind handling.
match &mut stack_pop {
StackPopCleanup::Root { .. } => {}
StackPopCleanup::Goto { unwind, .. } => {
*unwind = mir::UnwindAction::Unreachable;
}
}
}
self.push_stack_frame_raw(instance, body, destination, stack_pop)?;
// If an error is raised here, pop the frame again to get an accurate backtrace.
// To this end, we wrap it all in a `try` block.
let res: InterpResult<'tcx> = try {
trace!(
"caller ABI: {:#?}, args: {:#?}",
caller_fn_abi,
args.iter()
.map(|arg| (
arg.layout().ty,
match arg {
FnArg::Copy(op) => format!("copy({op:?})"),
FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
}
))
.collect::<Vec<_>>()
);
trace!(
"spread_arg: {:?}, locals: {:#?}",
body.spread_arg,
body.args_iter()
.map(|local| (
local,
self.layout_of_local(self.frame(), local, None).unwrap().ty,
))
.collect::<Vec<_>>()
);
// In principle, we have two iterators: Where the arguments come from, and where
// they go to.
// The "where they come from" part is easy, we expect the caller to do any special handling
// that might be required here (e.g. for untupling).
// If `with_caller_location` is set we pretend there is an extra argument (that
// we will not pass).
assert_eq!(
args.len() + if with_caller_location { 1 } else { 0 },
caller_fn_abi.args.len(),
"mismatch between caller ABI and caller arguments",
);
let mut caller_args = args
.iter()
.zip(caller_fn_abi.args.iter())
.filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
// Now we have to spread them out across the callee's locals,
// taking into account the `spread_arg`. If we could write
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
// not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter();
for local in body.args_iter() {
// Construct the destination place for this argument. At this point all
// locals are still dead, so we cannot construct a `PlaceTy`.
let dest = mir::Place::from(local);
// `layout_of_local` does more than just the instantiation we need to get the
// type, but the result gets cached so this avoids calling the instantiation
// query *again* the next time this local is accessed.
let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == body.spread_arg {
// Make the local live once, then fill in the value field by field.
self.storage_live(local)?;
// Must be a tuple
let ty::Tuple(fields) = ty.kind() else {
span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
};
for (i, field_ty) in fields.iter().enumerate() {
let dest = dest.project_deeper(
&[mir::ProjectionElem::Field(FieldIdx::from_usize(i), field_ty)],
*self.tcx,
);
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
field_ty,
/* already_live */ true,
)?;
}
} else {
// Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
ty,
/* already_live */ false,
)?;
}
}
// If the callee needs a caller location, pretend we consume one more argument from the ABI.
if instance.def.requires_caller_location(*self.tcx) {
callee_args_abis.next().unwrap();
}
// Now we should have no more caller args or callee arg ABIs
assert!(
callee_args_abis.next().is_none(),
"mismatch between callee ABI and callee body arguments"
);
if caller_args.next().is_some() {
throw_ub_custom!(fluent::const_eval_too_many_caller_args);
}
// Don't forget to check the return type!
if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
throw_ub!(AbiMismatchReturn {
caller_ty: caller_fn_abi.ret.layout.ty,
callee_ty: callee_fn_abi.ret.layout.ty
});
}
// Protect return place for in-place return value passing.
M::protect_in_place_function_argument(self, &destination)?;
// Don't forget to mark "initially live" locals as live.
self.storage_live_for_always_live_locals()?;
};
match res {
Err(err) => {
// Don't show the incomplete stack frame in the error stacktrace.
self.stack_mut().pop();
Err(err)
}
Ok(()) => Ok(()),
}
}
/// Initiate a call to this function -- pushing the stack frame and initializing the arguments.
///
/// `caller_fn_abi` is used to determine if all the arguments are passed the proper way.
/// However, we also need `caller_abi` to determine if we need to do untupling of arguments.
@ -550,7 +520,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// `with_caller_location` indicates whether the caller passed a caller location. Miri
/// implements caller locations without argument passing, but to match `FnAbi` we need to know
/// when those arguments are present.
pub(crate) fn eval_fn_call(
pub(super) fn init_fn_call(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
@ -558,9 +528,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
with_caller_location: bool,
destination: &MPlaceTy<'tcx, M::Provenance>,
target: Option<mir::BasicBlock>,
mut unwind: mir::UnwindAction,
unwind: mir::UnwindAction,
) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", fn_val);
trace!("init_fn_call: {:#?}", fn_val);
let instance = match fn_val {
FnVal::Instance(instance) => instance,
@ -591,7 +561,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)? {
assert!(!self.tcx.intrinsic(fallback.def_id()).unwrap().must_be_overridden);
assert!(matches!(fallback.def, ty::InstanceKind::Item(_)));
return self.eval_fn_call(
return self.init_fn_call(
FnVal::Instance(fallback),
(caller_abi, caller_fn_abi),
args,
@ -630,189 +600,35 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
return Ok(());
};
// Compute callee information using the `instance` returned by
// `find_mir_or_eval_fn`.
// FIXME: for variadic support, do we have to somehow determine callee's extra_args?
let callee_fn_abi = self.fn_abi_of_instance(instance, ty::List::empty())?;
if callee_fn_abi.c_variadic || caller_fn_abi.c_variadic {
throw_unsup_format!("calling a c-variadic function is not supported");
}
if M::enforce_abi(self) {
if caller_fn_abi.conv != callee_fn_abi.conv {
throw_ub_custom!(
fluent::const_eval_incompatible_calling_conventions,
callee_conv = format!("{:?}", callee_fn_abi.conv),
caller_conv = format!("{:?}", caller_fn_abi.conv),
// Special handling for the closure ABI: untuple the last argument.
let args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (untuple_arg, args) = args.split_last().unwrap();
trace!("init_fn_call: Will pass last argument by untupling");
Cow::from(
args.iter()
.map(|a| Ok(a.clone()))
.chain(
(0..untuple_arg.layout().fields.count())
.map(|i| self.fn_arg_field(untuple_arg, i)),
)
.collect::<InterpResult<'_, Vec<_>>>()?,
)
}
}
} else {
// Plain arg passing
Cow::from(args)
};
// Check that all target features required by the callee (i.e., from
// the attribute `#[target_feature(enable = ...)]`) are enabled at
// compile time.
self.check_fn_target_features(instance)?;
if !callee_fn_abi.can_unwind {
// The callee cannot unwind, so force the `Unreachable` unwind handling.
unwind = mir::UnwindAction::Unreachable;
}
self.push_stack_frame(
self.init_stack_frame(
instance,
body,
caller_fn_abi,
&args,
with_caller_location,
destination,
StackPopCleanup::Goto { ret: target, unwind },
)?;
// If an error is raised here, pop the frame again to get an accurate backtrace.
// To this end, we wrap it all in a `try` block.
let res: InterpResult<'tcx> = try {
trace!(
"caller ABI: {:?}, args: {:#?}",
caller_abi,
args.iter()
.map(|arg| (
arg.layout().ty,
match arg {
FnArg::Copy(op) => format!("copy({op:?})"),
FnArg::InPlace(mplace) => format!("in-place({mplace:?})"),
}
))
.collect::<Vec<_>>()
);
trace!(
"spread_arg: {:?}, locals: {:#?}",
body.spread_arg,
body.args_iter()
.map(|local| (
local,
self.layout_of_local(self.frame(), local, None).unwrap().ty,
))
.collect::<Vec<_>>()
);
// In principle, we have two iterators: Where the arguments come from, and where
// they go to.
// For where they come from: If the ABI is RustCall, we untuple the
// last incoming argument. These two iterators do not have the same type,
// so to keep the code paths uniform we accept an allocation
// (for RustCall ABI only).
let caller_args: Cow<'_, [FnArg<'tcx, M::Provenance>]> =
if caller_abi == Abi::RustCall && !args.is_empty() {
// Untuple
let (untuple_arg, args) = args.split_last().unwrap();
trace!("eval_fn_call: Will pass last argument by untupling");
Cow::from(
args.iter()
.map(|a| Ok(a.clone()))
.chain(
(0..untuple_arg.layout().fields.count())
.map(|i| self.fn_arg_field(untuple_arg, i)),
)
.collect::<InterpResult<'_, Vec<_>>>()?,
)
} else {
// Plain arg passing
Cow::from(args)
};
// If `with_caller_location` is set we pretend there is an extra argument (that
// we will not pass).
assert_eq!(
caller_args.len() + if with_caller_location { 1 } else { 0 },
caller_fn_abi.args.len(),
"mismatch between caller ABI and caller arguments",
);
let mut caller_args = caller_args
.iter()
.zip(caller_fn_abi.args.iter())
.filter(|arg_and_abi| !matches!(arg_and_abi.1.mode, PassMode::Ignore));
// Now we have to spread them out across the callee's locals,
// taking into account the `spread_arg`. If we could write
// this is a single iterator (that handles `spread_arg`), then
// `pass_argument` would be the loop body. It takes care to
// not advance `caller_iter` for ignored arguments.
let mut callee_args_abis = callee_fn_abi.args.iter();
for local in body.args_iter() {
// Construct the destination place for this argument. At this point all
// locals are still dead, so we cannot construct a `PlaceTy`.
let dest = mir::Place::from(local);
// `layout_of_local` does more than just the instantiation we need to get the
// type, but the result gets cached so this avoids calling the instantiation
// query *again* the next time this local is accessed.
let ty = self.layout_of_local(self.frame(), local, None)?.ty;
if Some(local) == body.spread_arg {
// Make the local live once, then fill in the value field by field.
self.storage_live(local)?;
// Must be a tuple
let ty::Tuple(fields) = ty.kind() else {
span_bug!(self.cur_span(), "non-tuple type for `spread_arg`: {ty}")
};
for (i, field_ty) in fields.iter().enumerate() {
let dest = dest.project_deeper(
&[mir::ProjectionElem::Field(
FieldIdx::from_usize(i),
field_ty,
)],
*self.tcx,
);
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
field_ty,
/* already_live */ true,
)?;
}
} else {
// Normal argument. Cannot mark it as live yet, it might be unsized!
let callee_abi = callee_args_abis.next().unwrap();
self.pass_argument(
&mut caller_args,
callee_abi,
&dest,
ty,
/* already_live */ false,
)?;
}
}
// If the callee needs a caller location, pretend we consume one more argument from the ABI.
if instance.def.requires_caller_location(*self.tcx) {
callee_args_abis.next().unwrap();
}
// Now we should have no more caller args or callee arg ABIs
assert!(
callee_args_abis.next().is_none(),
"mismatch between callee ABI and callee body arguments"
);
if caller_args.next().is_some() {
throw_ub_custom!(fluent::const_eval_too_many_caller_args);
}
// Don't forget to check the return type!
if !self.check_argument_compat(&caller_fn_abi.ret, &callee_fn_abi.ret)? {
throw_ub!(AbiMismatchReturn {
caller_ty: caller_fn_abi.ret.layout.ty,
callee_ty: callee_fn_abi.ret.layout.ty
});
}
// Protect return place for in-place return value passing.
M::protect_in_place_function_argument(self, &destination)?;
// Don't forget to mark "initially live" locals as live.
self.storage_live_for_always_live_locals()?;
};
match res {
Err(err) => {
self.stack_mut().pop();
Err(err)
}
Ok(()) => Ok(()),
}
)
}
// `InstanceKind::Virtual` does not have callable MIR. Calls to `Virtual` instances must be
// codegen'd / interpreted as virtual calls through the vtable.
@ -935,7 +751,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
caller_fn_abi.args[0].layout.ty = receiver_ty;
// recurse with concrete function
self.eval_fn_call(
self.init_fn_call(
FnVal::Instance(fn_inst),
(caller_abi, &caller_fn_abi),
&args,
@ -948,30 +764,33 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
}
}
pub(crate) fn eval_fn_tail_call(
/// Initiate a tail call to this function -- popping the current stack frame, pushing the new
/// stack frame and initializing the arguments.
pub(super) fn init_fn_tail_call(
&mut self,
fn_val: FnVal<'tcx, M::ExtraFnVal>,
(caller_abi, caller_fn_abi): (Abi, &FnAbi<'tcx, Ty<'tcx>>),
args: &[FnArg<'tcx, M::Provenance>],
with_caller_location: bool,
) -> InterpResult<'tcx> {
trace!("eval_fn_call: {:#?}", fn_val);
trace!("init_fn_tail_call: {:#?}", fn_val);
// This is the "canonical" implementation of tails calls,
// a pop of the current stack frame, followed by a normal call
// which pushes a new stack frame, with the return address from
// the popped stack frame.
//
// Note that we are using `pop_stack_frame` and not `return_from_current_stack_frame`,
// Note that we are using `pop_stack_frame_raw` and not `return_from_current_stack_frame`,
// as the latter "executes" the goto to the return block, but we don't want to,
// only the tail called function should return to the current return block.
M::before_stack_pop(self, self.frame())?;
let StackPopInfo { return_action, return_to_block, return_place } =
self.pop_stack_frame(false)?;
self.pop_stack_frame_raw(false)?;
assert_eq!(return_action, ReturnAction::Normal);
// Take the "stack pop cleanup" info, and use that to initiate the next call.
let StackPopCleanup::Goto { ret, unwind } = return_to_block else {
bug!("can't tailcall as root");
};
@ -980,7 +799,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
// we should check if both caller&callee can/n't unwind,
// see <https://github.com/rust-lang/rust/pull/113128#issuecomment-1614979803>
self.eval_fn_call(
self.init_fn_call(
fn_val,
(caller_abi, caller_fn_abi),
args,
@ -991,41 +810,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
)
}
fn check_fn_target_features(&self, instance: ty::Instance<'tcx>) -> InterpResult<'tcx, ()> {
// Calling functions with `#[target_feature]` is not unsafe on WASM, see #84988
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
if !self.tcx.sess.target.is_like_wasm
&& attrs
.target_features
.iter()
.any(|feature| !self.tcx.sess.target_features.contains(feature))
{
throw_ub_custom!(
fluent::const_eval_unavailable_target_features_for_fn,
unavailable_feats = attrs
.target_features
.iter()
.filter(|&feature| !self.tcx.sess.target_features.contains(feature))
.fold(String::new(), |mut s, feature| {
if !s.is_empty() {
s.push_str(", ");
}
s.push_str(feature.as_str());
s
}),
);
}
Ok(())
}
fn drop_in_place(
pub(super) fn init_drop_in_place_call(
&mut self,
place: &PlaceTy<'tcx, M::Provenance>,
instance: ty::Instance<'tcx>,
target: mir::BasicBlock,
unwind: mir::UnwindAction,
) -> InterpResult<'tcx> {
trace!("drop_in_place: {:?},\n instance={:?}", place, instance);
trace!("init_drop_in_place_call: {:?},\n instance={:?}", place, instance);
// We take the address of the object. This may well be unaligned, which is fine
// for us here. However, unaligned accesses will probably make the actual drop
// implementation fail -- a problem shared by rustc.
@ -1060,7 +852,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
let arg = self.mplace_to_ref(&place)?;
let ret = MPlaceTy::fake_alloc_zst(self.layout_of(self.tcx.types.unit)?);
self.eval_fn_call(
self.init_fn_call(
FnVal::Instance(instance),
(Abi::Rust, fn_abi),
&[FnArg::Copy(arg.into())],
@ -1070,4 +862,117 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
unwind,
)
}
/// Pops the current frame from the stack, copies the return value to the caller, deallocates
/// the memory for allocated locals, and jumps to an appropriate place.
///
/// If `unwinding` is `false`, then we are performing a normal return
/// from a function. In this case, we jump back into the frame of the caller,
/// and continue execution as normal.
///
/// If `unwinding` is `true`, then we are in the middle of a panic,
/// and need to unwind this frame. In this case, we jump to the
/// `cleanup` block for the function, which is responsible for running
/// `Drop` impls for any locals that have been initialized at this point.
/// The cleanup block ends with a special `Resume` terminator, which will
/// cause us to continue unwinding.
#[instrument(skip(self), level = "trace")]
pub(super) fn return_from_current_stack_frame(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx> {
info!(
"popping stack frame ({})",
if unwinding { "during unwinding" } else { "returning from function" }
);
// Check `unwinding`.
assert_eq!(
unwinding,
match self.frame().loc {
Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
Right(_) => true,
}
);
if unwinding && self.frame_idx() == 0 {
throw_ub_custom!(fluent::const_eval_unwind_past_top);
}
M::before_stack_pop(self, self.frame())?;
// Copy return value. Must of course happen *before* we deallocate the locals.
// Must be *after* `before_stack_pop` as otherwise the return place might still be protected.
let copy_ret_result = if !unwinding {
let op = self
.local_to_op(mir::RETURN_PLACE, None)
.expect("return place should always be live");
let dest = self.frame().return_place.clone();
let err = if self.stack().len() == 1 {
// The initializer of constants and statics will get validated separately
// after the constant has been fully evaluated. While we could fall back to the default
// code path, that will cause -Zenforce-validity to cycle on static initializers.
// Reading from a static's memory is not allowed during its evaluation, and will always
// trigger a cycle error. Validation must read from the memory of the current item.
// For Miri this means we do not validate the root frame return value,
// but Miri anyway calls `read_target_isize` on that so separate validation
// is not needed.
self.copy_op_no_dest_validation(&op, &dest)
} else {
self.copy_op_allow_transmute(&op, &dest)
};
trace!("return value: {:?}", self.dump_place(&dest.into()));
// We delay actually short-circuiting on this error until *after* the stack frame is
// popped, since we want this error to be attributed to the caller, whose type defines
// this transmute.
err
} else {
Ok(())
};
// All right, now it is time to actually pop the frame.
let stack_pop_info = self.pop_stack_frame_raw(unwinding)?;
// Report error from return value copy, if any.
copy_ret_result?;
match stack_pop_info.return_action {
ReturnAction::Normal => {}
ReturnAction::NoJump => {
// The hook already did everything.
return Ok(());
}
ReturnAction::NoCleanup => {
// If we are not doing cleanup, also skip everything else.
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Skip machine hook.
return Ok(());
}
}
// Normal return, figure out where to jump.
if unwinding {
// Follow the unwind edge.
let unwind = match stack_pop_info.return_to_block {
StackPopCleanup::Goto { unwind, .. } => unwind,
StackPopCleanup::Root { .. } => {
panic!("encountered StackPopCleanup::Root when unwinding!")
}
};
// This must be the very last thing that happens, since it can in fact push a new stack frame.
self.unwind_to_block(unwind)
} else {
// Follow the normal return edge.
match stack_pop_info.return_to_block {
StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
StackPopCleanup::Root { .. } => {
assert!(
self.stack().is_empty(),
"only the topmost frame can have StackPopCleanup::Root"
);
Ok(())
}
}
}
}
}

View file

@ -1,40 +1,29 @@
use std::cell::Cell;
use std::{fmt, mem};
use either::{Either, Left, Right};
use either::{Left, Right};
use rustc_errors::DiagCtxtHandle;
use rustc_hir::def_id::DefId;
use rustc_hir::definitions::DefPathData;
use rustc_hir::{self as hir};
use rustc_index::IndexVec;
use rustc_infer::infer::at::ToTrace;
use rustc_infer::infer::TyCtxtInferExt;
use rustc_infer::traits::ObligationCause;
use rustc_middle::mir::interpret::{
CtfeProvenance, ErrorHandled, InvalidMetaKind, ReportedErrorInfo,
};
use rustc_middle::mir::interpret::{ErrorHandled, InvalidMetaKind, ReportedErrorInfo};
use rustc_middle::query::TyCtxtAt;
use rustc_middle::ty::layout::{
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOf, LayoutOfHelpers,
TyAndLayout,
self, FnAbiError, FnAbiOfHelpers, FnAbiRequest, LayoutError, LayoutOfHelpers, TyAndLayout,
};
use rustc_middle::ty::{self, GenericArgsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, Variance};
use rustc_middle::{bug, mir, span_bug};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_middle::{mir, span_bug};
use rustc_session::Limit;
use rustc_span::Span;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
use rustc_trait_selection::traits::ObligationCtxt;
use tracing::{debug, info, info_span, instrument, trace};
use tracing::{debug, trace};
use super::{
err_inval, throw_inval, throw_ub, throw_ub_custom, throw_unsup, GlobalId, Immediate,
InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind,
OpTy, Operand, Place, PlaceTy, Pointer, PointerArithmetic, Projectable, Provenance,
ReturnAction, Scalar,
err_inval, throw_inval, throw_ub, throw_ub_custom, Frame, FrameInfo, GlobalId, InterpErrorInfo,
InterpResult, MPlaceTy, Machine, MemPlaceMeta, Memory, OpTy, Place, PlaceTy, PointerArithmetic,
Projectable, Provenance,
};
use crate::{errors, fluent_generated as fluent, util, ReportErrorExt};
use crate::{fluent_generated as fluent, util, ReportErrorExt};
pub struct InterpCx<'tcx, M: Machine<'tcx>> {
/// Stores the `Machine` instance.
@ -57,314 +46,6 @@ pub struct InterpCx<'tcx, M: Machine<'tcx>> {
pub recursion_limit: Limit,
}
// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
// boundary and dropped in the other thread, it would exit the span in the other thread.
struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
impl SpanGuard {
/// By default a `SpanGuard` does nothing.
fn new() -> Self {
Self(tracing::Span::none(), std::marker::PhantomData)
}
/// If a span is entered, we exit the previous span (if any, normally none) and enter the
/// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
/// `Frame` by creating a dummy span to being with and then entering it once the frame has
/// been pushed.
fn enter(&mut self, span: tracing::Span) {
// This executes the destructor on the previous instance of `SpanGuard`, ensuring that
// we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
// can't protect the tracing stack, but that'll just lead to weird logging, no actual
// problems.
*self = Self(span, std::marker::PhantomData);
self.0.with_subscriber(|(id, dispatch)| {
dispatch.enter(id);
});
}
}
impl Drop for SpanGuard {
fn drop(&mut self) {
self.0.with_subscriber(|(id, dispatch)| {
dispatch.exit(id);
});
}
}
/// A stack frame.
pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub body: &'tcx mir::Body<'tcx>,
/// The def_id and args of the current function.
pub instance: ty::Instance<'tcx>,
/// Extra data for the machine.
pub extra: Extra,
////////////////////////////////////////////////////////////////////////////////
// Return place and locals
////////////////////////////////////////////////////////////////////////////////
/// Work to perform when returning from this function.
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to,
/// and its layout in the caller.
pub return_place: MPlaceTy<'tcx, Prov>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
///
/// Do *not* access this directly; always go through the machine hook!
pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
/// The span of the `tracing` crate is stored here.
/// When the guard is dropped, the span is exited. This gives us
/// a full stack trace on all tracing statements.
tracing_span: SpanGuard,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// If this is `Right`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
///
/// Needs to be public because ConstProp does unspeakable things to it.
pub loc: Either<mir::Location, Span>,
}
/// What we store about a frame in an interpreter backtrace.
#[derive(Clone, Debug)]
pub struct FrameInfo<'tcx> {
pub instance: ty::Instance<'tcx>,
pub span: Span,
}
#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these
pub enum StackPopCleanup {
/// Jump to the next block in the caller, or cause UB if None (that's a function
/// that may never return). Also store layout of return place so
/// we can validate it at that layout.
/// `ret` stores the block we jump to on a normal return, while `unwind`
/// stores the block used for cleanup during unwinding.
Goto { ret: Option<mir::BasicBlock>, unwind: mir::UnwindAction },
/// The root frame of the stack: nowhere else to jump to.
/// `cleanup` says whether locals are deallocated. Static computation
/// wants them leaked to intern what they need (and just throw away
/// the entire `ecx` when it is done).
Root { cleanup: bool },
}
/// Return type of [`InterpCx::pop_stack_frame`].
pub struct StackPopInfo<'tcx, Prov: Provenance> {
/// Additional information about the action to be performed when returning from the popped
/// stack frame.
pub return_action: ReturnAction,
/// [`return_to_block`](Frame::return_to_block) of the popped stack frame.
pub return_to_block: StackPopCleanup,
/// [`return_place`](Frame::return_place) of the popped stack frame.
pub return_place: MPlaceTy<'tcx, Prov>,
}
/// State of a local variable including a memoized layout
#[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> {
value: LocalValue<Prov>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice.
/// Avoids computing the layout of locals that are never actually initialized.
layout: Cell<Option<TyAndLayout<'tcx>>>,
}
impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LocalState")
.field("value", &self.value)
.field("ty", &self.layout.get().map(|l| l.ty))
.finish()
}
}
/// Current value of a local variable
///
/// This does not store the type of the local; the type is given by `body.local_decls` and can never
/// change, so by not storing here we avoid having to maintain that as an invariant.
#[derive(Copy, Clone, Debug)] // Miri debug-prints these
pub(super) enum LocalValue<Prov: Provenance = CtfeProvenance> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// A normal, live local.
/// Mostly for convenience, we re-use the `Operand` type here.
/// This is an optimization over just always having a pointer here;
/// we can thus avoid doing an allocation when the local just stores
/// immediate values *and* never has its address taken.
Live(Operand<Prov>),
}
impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub fn make_live_uninit(&mut self) {
self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
}
/// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
/// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
/// private.
pub fn as_mplace_or_imm(
&self,
) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
match self.value {
LocalValue::Dead => None,
LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
}
}
/// Read the local's value or error if the local is not yet live or not live anymore.
#[inline(always)]
pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
}
}
/// Overwrite the local. If the local can be overwritten in place, return a reference
/// to do so; otherwise return the `MemPlace` to consult instead.
#[inline(always)]
pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
}
}
}
impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> {
pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'tcx, Prov, Extra> {
Frame {
body: self.body,
instance: self.instance,
return_to_block: self.return_to_block,
return_place: self.return_place,
locals: self.locals,
loc: self.loc,
extra,
tracing_span: self.tracing_span,
}
}
}
impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> {
/// Get the current location within the Frame.
///
/// If this is `Right`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
///
/// Used by priroda.
pub fn current_loc(&self) -> Either<mir::Location, Span> {
self.loc
}
/// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
self.loc.left().map(|loc| self.body.source_info(loc))
}
pub fn current_span(&self) -> Span {
match self.loc {
Left(loc) => self.body.source_info(loc).span,
Right(span) => span,
}
}
pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option<hir::HirId> {
// We first try to get a HirId via the current source scope,
// and fall back to `body.source`.
self.current_source_info()
.and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data {
mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
mir::ClearCrossCrate::Clear => None,
})
.or_else(|| {
let def_id = self.body.source.def_id().as_local();
def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id))
})
}
/// Returns the address of the buffer where the locals are stored. This is used by `Place` as a
/// sanity check to detect bugs where we mix up which stack frame a place refers to.
#[inline(always)]
pub(super) fn locals_addr(&self) -> usize {
self.locals.raw.as_ptr().addr()
}
#[must_use]
pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec<FrameInfo<'tcx>> {
let mut frames = Vec::new();
// This deliberately does *not* honor `requires_caller_location` since it is used for much
// more than just panics.
for frame in stack.iter().rev() {
let span = match frame.loc {
Left(loc) => {
// If the stacktrace passes through MIR-inlined source scopes, add them.
let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc);
let mut scope_data = &frame.body.source_scopes[scope];
while let Some((instance, call_span)) = scope_data.inlined {
frames.push(FrameInfo { span, instance });
span = call_span;
scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()];
}
span
}
Right(span) => span,
};
frames.push(FrameInfo { span, instance: frame.instance });
}
trace!("generate stacktrace: {:#?}", frames);
frames
}
}
// FIXME: only used by miri, should be removed once translatable.
impl<'tcx> fmt::Display for FrameInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
write!(f, "inside closure")
} else {
// Note: this triggers a `must_produce_diag` state, which means that if we ever
// get here we must emit a diagnostic. We should never display a `FrameInfo` unless
// we actually want to emit a warning or error to the user.
write!(f, "inside `{}`", self.instance)
}
})
}
}
impl<'tcx> FrameInfo<'tcx> {
pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote {
let span = self.span;
if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 }
} else {
let instance = format!("{}", self.instance);
// Note: this triggers a `must_produce_diag` state, which means that if we ever get
// here we must emit a diagnostic. We should never display a `FrameInfo` unless we
// actually want to emit a warning or error to the user.
errors::FrameNote { where_: "instance", span, instance, times: 0 }
}
}
}
impl<'tcx, M: Machine<'tcx>> HasDataLayout for InterpCx<'tcx, M> {
#[inline]
fn data_layout(&self) -> &TargetDataLayout {
@ -703,30 +384,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
span_bug!(self.cur_span(), "no non-`#[track_caller]` frame found")
}
#[inline(always)]
pub(super) fn layout_of_local(
&self,
frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
local: mir::Local,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
let state = &frame.locals[local];
if let Some(layout) = state.layout.get() {
return Ok(layout);
}
let layout = from_known_layout(self.tcx, self.param_env, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
self.layout_of(local_ty)
})?;
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));
Ok(layout)
}
/// Returns the actual dynamic size and alignment of the place at the given type.
/// Only the "meta" (metadata) part of the place matters.
/// This can fail to provide an answer for extern types.
@ -825,132 +482,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
self.size_and_align_of(&mplace.meta(), &mplace.layout)
}
#[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
pub fn push_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
return_place: &MPlaceTy<'tcx, M::Provenance>,
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
// First push a stack frame so we have access to the local args
self.push_new_stack_frame(instance, body, return_to_block, return_place.clone())?;
self.after_stack_frame_push(instance, body)?;
Ok(())
}
/// Creates a new stack frame, initializes it and pushes it onto the stack.
/// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame).
fn push_new_stack_frame(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
return_to_block: StackPopCleanup,
return_place: MPlaceTy<'tcx, M::Provenance>,
) -> InterpResult<'tcx> {
let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
let locals = IndexVec::from_elem(dead_local, &body.local_decls);
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place,
locals,
instance,
tracing_span: SpanGuard::new(),
extra: (),
};
let frame = M::init_frame(self, pre_frame)?;
self.stack_mut().push(frame);
Ok(())
}
/// A private helper for [`push_stack_frame`](InterpCx::push_stack_frame).
fn after_stack_frame_push(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
) -> InterpResult<'tcx> {
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
for &const_ in body.required_consts() {
let c =
self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| {
err.emit_note(*self.tcx);
err
})?;
}
// done
M::after_stack_push(self)?;
self.frame_mut().loc = Left(mir::Location::START);
let span = info_span!("frame", "{}", instance);
self.frame_mut().tracing_span.enter(span);
Ok(())
}
/// Pops a stack frame from the stack and returns some information about it.
///
/// This also deallocates locals, if necessary.
///
/// [`M::before_stack_pop`] should be called before calling this function.
/// [`M::after_stack_pop`] is called by this function automatically.
///
/// [`M::before_stack_pop`]: Machine::before_stack_pop
/// [`M::after_stack_pop`]: Machine::after_stack_pop
pub fn pop_stack_frame(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> {
let cleanup = self.cleanup_current_frame_locals()?;
let frame =
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
let return_to_block = frame.return_to_block;
let return_place = frame.return_place.clone();
let return_action;
if cleanup {
return_action = M::after_stack_pop(self, frame, unwinding)?;
assert_ne!(return_action, ReturnAction::NoCleanup);
} else {
return_action = ReturnAction::NoCleanup;
};
Ok(StackPopInfo { return_action, return_to_block, return_place })
}
/// A private helper for [`pop_stack_frame`](InterpCx::pop_stack_frame).
/// Returns `true` if cleanup has been done, `false` otherwise.
fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> {
// Cleanup: deallocate locals.
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// We do this while the frame is still on the stack, so errors point to the callee.
let return_to_block = self.frame().return_to_block;
let cleanup = match return_to_block {
StackPopCleanup::Goto { .. } => true,
StackPopCleanup::Root { cleanup, .. } => cleanup,
};
if cleanup {
// We need to take the locals out, since we need to mutate while iterating.
let locals = mem::take(&mut self.frame_mut().locals);
for local in &locals {
self.deallocate_local(local.value)?;
}
}
Ok(cleanup)
}
/// Jump to the given block.
#[inline]
pub fn go_to_block(&mut self, target: mir::BasicBlock) {
@ -997,248 +528,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(())
}
/// Pops the current frame from the stack, deallocating the
/// memory for allocated locals, and jumps to an appropriate place.
///
/// If `unwinding` is `false`, then we are performing a normal return
/// from a function. In this case, we jump back into the frame of the caller,
/// and continue execution as normal.
///
/// If `unwinding` is `true`, then we are in the middle of a panic,
/// and need to unwind this frame. In this case, we jump to the
/// `cleanup` block for the function, which is responsible for running
/// `Drop` impls for any locals that have been initialized at this point.
/// The cleanup block ends with a special `Resume` terminator, which will
/// cause us to continue unwinding.
#[instrument(skip(self), level = "debug")]
pub(super) fn return_from_current_stack_frame(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx> {
info!(
"popping stack frame ({})",
if unwinding { "during unwinding" } else { "returning from function" }
);
// Check `unwinding`.
assert_eq!(
unwinding,
match self.frame().loc {
Left(loc) => self.body().basic_blocks[loc.block].is_cleanup,
Right(_) => true,
}
);
if unwinding && self.frame_idx() == 0 {
throw_ub_custom!(fluent::const_eval_unwind_past_top);
}
M::before_stack_pop(self, self.frame())?;
// Copy return value. Must of course happen *before* we deallocate the locals.
let copy_ret_result = if !unwinding {
let op = self
.local_to_op(mir::RETURN_PLACE, None)
.expect("return place should always be live");
let dest = self.frame().return_place.clone();
let err = if self.stack().len() == 1 {
// The initializer of constants and statics will get validated separately
// after the constant has been fully evaluated. While we could fall back to the default
// code path, that will cause -Zenforce-validity to cycle on static initializers.
// Reading from a static's memory is not allowed during its evaluation, and will always
// trigger a cycle error. Validation must read from the memory of the current item.
// For Miri this means we do not validate the root frame return value,
// but Miri anyway calls `read_target_isize` on that so separate validation
// is not needed.
self.copy_op_no_dest_validation(&op, &dest)
} else {
self.copy_op_allow_transmute(&op, &dest)
};
trace!("return value: {:?}", self.dump_place(&dest.into()));
// We delay actually short-circuiting on this error until *after* the stack frame is
// popped, since we want this error to be attributed to the caller, whose type defines
// this transmute.
err
} else {
Ok(())
};
// All right, now it is time to actually pop the frame.
let stack_pop_info = self.pop_stack_frame(unwinding)?;
// Report error from return value copy, if any.
copy_ret_result?;
match stack_pop_info.return_action {
ReturnAction::Normal => {}
ReturnAction::NoJump => {
// The hook already did everything.
return Ok(());
}
ReturnAction::NoCleanup => {
// If we are not doing cleanup, also skip everything else.
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
assert!(!unwinding, "tried to skip cleanup during unwinding");
// Skip machine hook.
return Ok(());
}
}
// Normal return, figure out where to jump.
if unwinding {
// Follow the unwind edge.
let unwind = match stack_pop_info.return_to_block {
StackPopCleanup::Goto { unwind, .. } => unwind,
StackPopCleanup::Root { .. } => {
panic!("encountered StackPopCleanup::Root when unwinding!")
}
};
// This must be the very last thing that happens, since it can in fact push a new stack frame.
self.unwind_to_block(unwind)
} else {
// Follow the normal return edge.
match stack_pop_info.return_to_block {
StackPopCleanup::Goto { ret, .. } => self.return_to_block(ret),
StackPopCleanup::Root { .. } => {
assert!(
self.stack().is_empty(),
"only the topmost frame can have StackPopCleanup::Root"
);
Ok(())
}
}
}
}
/// In the current stack frame, mark all locals as live that are not arguments and don't have
/// `Storage*` annotations (this includes the return place).
pub fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
self.storage_live(mir::RETURN_PLACE)?;
let body = self.body();
let always_live = always_storage_live_locals(body);
for local in body.vars_and_temps_iter() {
if always_live.contains(local) {
self.storage_live(local)?;
}
}
Ok(())
}
pub fn storage_live_dyn(
&mut self,
local: mir::Local,
meta: MemPlaceMeta<M::Provenance>,
) -> InterpResult<'tcx> {
trace!("{:?} is now live", local);
// We avoid `ty.is_trivially_sized` since that does something expensive for ADTs.
fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Never
| ty::Error(_)
| ty::Dynamic(_, _, ty::DynStar) => true,
ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false,
ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)),
ty::Pat(ty, ..) => is_very_trivially_sized(*ty),
// We don't want to do any queries, so there is not much we can do with ADTs.
ty::Adt(..) => false,
ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
ty::Infer(ty::TyVar(_)) => false,
ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
}
}
}
// This is a hot function, we avoid computing the layout when possible.
// `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
None
} else {
// We need the layout.
let layout = self.layout_of_local(self.frame(), local, None)?;
if layout.is_sized() { None } else { Some(layout) }
};
let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
if !meta.has_meta() {
throw_unsup!(UnsizedLocal);
}
// Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
Operand::Indirect(*dest_place.mplace())
} else {
assert!(!meta.has_meta()); // we're dropping the metadata
// Just make this an efficient immediate.
// Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized,
// which is a bit too late -- we should ideally notice this already here, when the memory
// is conceptually allocated. But given how rare that error is and that this is a hot function,
// we accept this downside for now.
Operand::Immediate(Immediate::Uninit)
});
// If the local is already live, deallocate its old memory.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
self.deallocate_local(old)?;
Ok(())
}
/// Mark a storage as live, killing the previous content.
#[inline(always)]
pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
self.storage_live_dyn(local, MemPlaceMeta::None)
}
pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
// If the local is already dead, this is a NOP.
let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
self.deallocate_local(old)?;
Ok(())
}
#[instrument(skip(self), level = "debug")]
fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type. Hence we can `unwrap`.
trace!(
"deallocating local {:?}: {:?}",
local,
// Locals always have a `alloc_id` (they are never the result of a int2ptr).
self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
);
self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
};
Ok(())
}
/// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.
/// (`mir::Const`/`ty::Const` have `eval` methods that can be used directly instead.)
pub fn ctfe_query<T>(
@ -1328,39 +617,7 @@ impl<'a, 'tcx, M: Machine<'tcx>> std::fmt::Debug for PlacePrinter<'a, 'tcx, M> {
}
write!(fmt, ":")?;
match self.ecx.frame().locals[local].value {
LocalValue::Dead => write!(fmt, " is dead")?,
LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
write!(fmt, " is uninitialized")?
}
LocalValue::Live(Operand::Indirect(mplace)) => {
write!(
fmt,
" by {} ref {:?}:",
match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
MemPlaceMeta::None => String::new(),
},
mplace.ptr,
)?;
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {val:?}")?;
if let Scalar::Ptr(ptr, _size) = val {
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({val1:?}, {val2:?})")?;
if let Scalar::Ptr(ptr, _size) = val1 {
allocs.push(ptr.provenance.get_alloc_id());
}
if let Scalar::Ptr(ptr, _size) = val2 {
allocs.push(ptr.provenance.get_alloc_id());
}
}
}
self.ecx.frame().locals[local].print(&mut allocs, fmt)?;
write!(fmt, ": {:?}", self.ecx.dump_allocs(allocs.into_iter().flatten().collect()))
}

View file

@ -37,7 +37,7 @@ pub enum ReturnAction {
/// took care of everything.
NoJump,
/// Returned by [`InterpCx::pop_stack_frame`] when no cleanup should be done.
/// Returned by [`InterpCx::pop_stack_frame_raw`] when no cleanup should be done.
NoCleanup,
}

View file

@ -1,5 +1,6 @@
//! An interpreter for MIR used in CTFE and by miri
mod call;
mod cast;
mod discriminant;
mod eval_context;
@ -11,8 +12,8 @@ mod operand;
mod operator;
mod place;
mod projection;
mod stack;
mod step;
mod terminator;
mod traits;
mod util;
mod validity;
@ -22,7 +23,8 @@ use eval_context::{from_known_layout, mir_assign_valid_types};
#[doc(no_inline)]
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
pub use self::eval_context::{format_interp_error, Frame, FrameInfo, InterpCx, StackPopCleanup};
pub use self::call::FnArg;
pub use self::eval_context::{format_interp_error, InterpCx};
pub use self::intern::{
intern_const_alloc_for_constprop, intern_const_alloc_recursive, HasStaticRootDefId, InternKind,
InternResult,
@ -35,7 +37,7 @@ pub use self::operand::{ImmTy, Immediate, OpTy, Readable};
pub use self::place::{MPlaceTy, MemPlaceMeta, PlaceTy, Writeable};
use self::place::{MemPlace, Place};
pub use self::projection::{OffsetMode, Projectable};
pub use self::terminator::FnArg;
pub use self::stack::{Frame, FrameInfo, LocalState, StackPopCleanup, StackPopInfo};
pub(crate) use self::util::create_static_alloc;
pub use self::validity::{CtfeValidationMode, RefTracking};
pub use self::visitor::ValueVisitor;

View file

@ -184,6 +184,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
#[inline]
pub fn from_scalar(val: Scalar<Prov>, layout: TyAndLayout<'tcx>) -> Self {
debug_assert!(layout.abi.is_scalar(), "`ImmTy::from_scalar` on non-scalar layout");
debug_assert_eq!(val.size(), layout.size);
ImmTy { imm: val.into(), layout }
}

View file

@ -0,0 +1,638 @@
//! Manages the low-level pushing and popping of stack frames and the (de)allocation of local variables.
//! For hadling of argument passing and return values, see the `call` module.
use std::cell::Cell;
use std::{fmt, mem};
use either::{Either, Left, Right};
use rustc_hir as hir;
use rustc_hir::definitions::DefPathData;
use rustc_index::IndexVec;
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_middle::{bug, mir};
use rustc_mir_dataflow::storage::always_storage_live_locals;
use rustc_span::Span;
use tracing::{info_span, instrument, trace};
use super::{
from_known_layout, throw_ub, throw_unsup, AllocId, CtfeProvenance, Immediate, InterpCx,
InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, MemoryKind, Operand, Pointer,
Provenance, ReturnAction, Scalar,
};
use crate::errors;
// The Phantomdata exists to prevent this type from being `Send`. If it were sent across a thread
// boundary and dropped in the other thread, it would exit the span in the other thread.
struct SpanGuard(tracing::Span, std::marker::PhantomData<*const u8>);
impl SpanGuard {
/// By default a `SpanGuard` does nothing.
fn new() -> Self {
Self(tracing::Span::none(), std::marker::PhantomData)
}
/// If a span is entered, we exit the previous span (if any, normally none) and enter the
/// new span. This is mainly so we don't have to use `Option` for the `tracing_span` field of
/// `Frame` by creating a dummy span to being with and then entering it once the frame has
/// been pushed.
fn enter(&mut self, span: tracing::Span) {
// This executes the destructor on the previous instance of `SpanGuard`, ensuring that
// we never enter or exit more spans than vice versa. Unless you `mem::leak`, then we
// can't protect the tracing stack, but that'll just lead to weird logging, no actual
// problems.
*self = Self(span, std::marker::PhantomData);
self.0.with_subscriber(|(id, dispatch)| {
dispatch.enter(id);
});
}
}
impl Drop for SpanGuard {
fn drop(&mut self) {
self.0.with_subscriber(|(id, dispatch)| {
dispatch.exit(id);
});
}
}
/// A stack frame.
pub struct Frame<'tcx, Prov: Provenance = CtfeProvenance, Extra = ()> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub body: &'tcx mir::Body<'tcx>,
/// The def_id and args of the current function.
pub instance: ty::Instance<'tcx>,
/// Extra data for the machine.
pub extra: Extra,
////////////////////////////////////////////////////////////////////////////////
// Return place and locals
////////////////////////////////////////////////////////////////////////////////
/// Work to perform when returning from this function.
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to,
/// and its layout in the caller.
pub return_place: MPlaceTy<'tcx, Prov>,
/// The list of locals for this stack frame, stored in order as
/// `[return_ptr, arguments..., variables..., temporaries...]`.
/// The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `Scalar` or refer to some part of an `Allocation`.
///
/// Do *not* access this directly; always go through the machine hook!
pub locals: IndexVec<mir::Local, LocalState<'tcx, Prov>>,
/// The span of the `tracing` crate is stored here.
/// When the guard is dropped, the span is exited. This gives us
/// a full stack trace on all tracing statements.
tracing_span: SpanGuard,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// If this is `Right`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
///
/// Needs to be public because ConstProp does unspeakable things to it.
pub loc: Either<mir::Location, Span>,
}
#[derive(Clone, Copy, Eq, PartialEq, Debug)] // Miri debug-prints these
pub enum StackPopCleanup {
/// Jump to the next block in the caller, or cause UB if None (that's a function
/// that may never return). Also store layout of return place so
/// we can validate it at that layout.
/// `ret` stores the block we jump to on a normal return, while `unwind`
/// stores the block used for cleanup during unwinding.
Goto { ret: Option<mir::BasicBlock>, unwind: mir::UnwindAction },
/// The root frame of the stack: nowhere else to jump to.
/// `cleanup` says whether locals are deallocated. Static computation
/// wants them leaked to intern what they need (and just throw away
/// the entire `ecx` when it is done).
Root { cleanup: bool },
}
/// Return type of [`InterpCx::pop_stack_frame_raw`].
pub struct StackPopInfo<'tcx, Prov: Provenance> {
/// Additional information about the action to be performed when returning from the popped
/// stack frame.
pub return_action: ReturnAction,
/// [`return_to_block`](Frame::return_to_block) of the popped stack frame.
pub return_to_block: StackPopCleanup,
/// [`return_place`](Frame::return_place) of the popped stack frame.
pub return_place: MPlaceTy<'tcx, Prov>,
}
/// State of a local variable including a memoized layout
#[derive(Clone)]
pub struct LocalState<'tcx, Prov: Provenance = CtfeProvenance> {
value: LocalValue<Prov>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice.
/// Avoids computing the layout of locals that are never actually initialized.
layout: Cell<Option<TyAndLayout<'tcx>>>,
}
impl<Prov: Provenance> std::fmt::Debug for LocalState<'_, Prov> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("LocalState")
.field("value", &self.value)
.field("ty", &self.layout.get().map(|l| l.ty))
.finish()
}
}
/// Current value of a local variable
///
/// This does not store the type of the local; the type is given by `body.local_decls` and can never
/// change, so by not storing here we avoid having to maintain that as an invariant.
#[derive(Copy, Clone, Debug)] // Miri debug-prints these
pub(super) enum LocalValue<Prov: Provenance = CtfeProvenance> {
/// This local is not currently alive, and cannot be used at all.
Dead,
/// A normal, live local.
/// Mostly for convenience, we re-use the `Operand` type here.
/// This is an optimization over just always having a pointer here;
/// we can thus avoid doing an allocation when the local just stores
/// immediate values *and* never has its address taken.
Live(Operand<Prov>),
}
impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub fn make_live_uninit(&mut self) {
self.value = LocalValue::Live(Operand::Immediate(Immediate::Uninit));
}
/// This is a hack because Miri needs a way to visit all the provenance in a `LocalState`
/// without having a layout or `TyCtxt` available, and we want to keep the `Operand` type
/// private.
pub fn as_mplace_or_imm(
&self,
) -> Option<Either<(Pointer<Option<Prov>>, MemPlaceMeta<Prov>), Immediate<Prov>>> {
match self.value {
LocalValue::Dead => None,
LocalValue::Live(Operand::Indirect(mplace)) => Some(Left((mplace.ptr, mplace.meta))),
LocalValue::Live(Operand::Immediate(imm)) => Some(Right(imm)),
}
}
/// Read the local's value or error if the local is not yet live or not live anymore.
#[inline(always)]
pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
match &self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
}
}
/// Overwrite the local. If the local can be overwritten in place, return a reference
/// to do so; otherwise return the `MemPlace` to consult instead.
#[inline(always)]
pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
match &mut self.value {
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
LocalValue::Live(val) => Ok(val),
}
}
}
/// What we store about a frame in an interpreter backtrace.
#[derive(Clone, Debug)]
pub struct FrameInfo<'tcx> {
pub instance: ty::Instance<'tcx>,
pub span: Span,
}
// FIXME: only used by miri, should be removed once translatable.
impl<'tcx> fmt::Display for FrameInfo<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ty::tls::with(|tcx| {
if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
write!(f, "inside closure")
} else {
// Note: this triggers a `must_produce_diag` state, which means that if we ever
// get here we must emit a diagnostic. We should never display a `FrameInfo` unless
// we actually want to emit a warning or error to the user.
write!(f, "inside `{}`", self.instance)
}
})
}
}
impl<'tcx> FrameInfo<'tcx> {
pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote {
let span = self.span;
if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure {
errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 }
} else {
let instance = format!("{}", self.instance);
// Note: this triggers a `must_produce_diag` state, which means that if we ever get
// here we must emit a diagnostic. We should never display a `FrameInfo` unless we
// actually want to emit a warning or error to the user.
errors::FrameNote { where_: "instance", span, instance, times: 0 }
}
}
}
impl<'tcx, Prov: Provenance> Frame<'tcx, Prov> {
pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'tcx, Prov, Extra> {
Frame {
body: self.body,
instance: self.instance,
return_to_block: self.return_to_block,
return_place: self.return_place,
locals: self.locals,
loc: self.loc,
extra,
tracing_span: self.tracing_span,
}
}
}
impl<'tcx, Prov: Provenance, Extra> Frame<'tcx, Prov, Extra> {
/// Get the current location within the Frame.
///
/// If this is `Right`, we are not currently executing any particular statement in
/// this frame (can happen e.g. during frame initialization, and during unwinding on
/// frames without cleanup code).
///
/// Used by priroda.
pub fn current_loc(&self) -> Either<mir::Location, Span> {
self.loc
}
/// Return the `SourceInfo` of the current instruction.
pub fn current_source_info(&self) -> Option<&mir::SourceInfo> {
self.loc.left().map(|loc| self.body.source_info(loc))
}
pub fn current_span(&self) -> Span {
match self.loc {
Left(loc) => self.body.source_info(loc).span,
Right(span) => span,
}
}
pub fn lint_root(&self, tcx: TyCtxt<'tcx>) -> Option<hir::HirId> {
// We first try to get a HirId via the current source scope,
// and fall back to `body.source`.
self.current_source_info()
.and_then(|source_info| match &self.body.source_scopes[source_info.scope].local_data {
mir::ClearCrossCrate::Set(data) => Some(data.lint_root),
mir::ClearCrossCrate::Clear => None,
})
.or_else(|| {
let def_id = self.body.source.def_id().as_local();
def_id.map(|def_id| tcx.local_def_id_to_hir_id(def_id))
})
}
/// Returns the address of the buffer where the locals are stored. This is used by `Place` as a
/// sanity check to detect bugs where we mix up which stack frame a place refers to.
#[inline(always)]
pub(super) fn locals_addr(&self) -> usize {
self.locals.raw.as_ptr().addr()
}
#[must_use]
pub fn generate_stacktrace_from_stack(stack: &[Self]) -> Vec<FrameInfo<'tcx>> {
let mut frames = Vec::new();
// This deliberately does *not* honor `requires_caller_location` since it is used for much
// more than just panics.
for frame in stack.iter().rev() {
let span = match frame.loc {
Left(loc) => {
// If the stacktrace passes through MIR-inlined source scopes, add them.
let mir::SourceInfo { mut span, scope } = *frame.body.source_info(loc);
let mut scope_data = &frame.body.source_scopes[scope];
while let Some((instance, call_span)) = scope_data.inlined {
frames.push(FrameInfo { span, instance });
span = call_span;
scope_data = &frame.body.source_scopes[scope_data.parent_scope.unwrap()];
}
span
}
Right(span) => span,
};
frames.push(FrameInfo { span, instance: frame.instance });
}
trace!("generate stacktrace: {:#?}", frames);
frames
}
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Very low-level helper that pushes a stack frame without initializing
/// the arguments or local variables.
#[instrument(skip(self, body, return_place, return_to_block), level = "debug")]
pub(crate) fn push_stack_frame_raw(
&mut self,
instance: ty::Instance<'tcx>,
body: &'tcx mir::Body<'tcx>,
return_place: &MPlaceTy<'tcx, M::Provenance>,
return_to_block: StackPopCleanup,
) -> InterpResult<'tcx> {
trace!("body: {:#?}", body);
// We can push a `Root` frame if and only if the stack is empty.
debug_assert_eq!(
self.stack().is_empty(),
matches!(return_to_block, StackPopCleanup::Root { .. })
);
// First push a stack frame so we have access to `instantiate_from_current_frame` and other
// `self.frame()`-based functions.
let dead_local = LocalState { value: LocalValue::Dead, layout: Cell::new(None) };
let locals = IndexVec::from_elem(dead_local, &body.local_decls);
let pre_frame = Frame {
body,
loc: Right(body.span), // Span used for errors caused during preamble.
return_to_block,
return_place: return_place.clone(),
locals,
instance,
tracing_span: SpanGuard::new(),
extra: (),
};
let frame = M::init_frame(self, pre_frame)?;
self.stack_mut().push(frame);
// Make sure all the constants required by this frame evaluate successfully (post-monomorphization check).
for &const_ in body.required_consts() {
let c =
self.instantiate_from_current_frame_and_normalize_erasing_regions(const_.const_)?;
c.eval(*self.tcx, self.param_env, const_.span).map_err(|err| {
err.emit_note(*self.tcx);
err
})?;
}
// Finish things up.
M::after_stack_push(self)?;
self.frame_mut().loc = Left(mir::Location::START);
let span = info_span!("frame", "{}", instance);
self.frame_mut().tracing_span.enter(span);
Ok(())
}
/// Pops a stack frame from the stack and returns some information about it.
///
/// This also deallocates locals, if necessary.
///
/// [`M::before_stack_pop`] should be called before calling this function.
/// [`M::after_stack_pop`] is called by this function automatically.
///
/// [`M::before_stack_pop`]: Machine::before_stack_pop
/// [`M::after_stack_pop`]: Machine::after_stack_pop
pub(super) fn pop_stack_frame_raw(
&mut self,
unwinding: bool,
) -> InterpResult<'tcx, StackPopInfo<'tcx, M::Provenance>> {
let cleanup = self.cleanup_current_frame_locals()?;
let frame =
self.stack_mut().pop().expect("tried to pop a stack frame, but there were none");
let return_to_block = frame.return_to_block;
let return_place = frame.return_place.clone();
let return_action;
if cleanup {
return_action = M::after_stack_pop(self, frame, unwinding)?;
assert_ne!(return_action, ReturnAction::NoCleanup);
} else {
return_action = ReturnAction::NoCleanup;
};
Ok(StackPopInfo { return_action, return_to_block, return_place })
}
/// A private helper for [`pop_stack_frame_raw`](InterpCx::pop_stack_frame_raw).
/// Returns `true` if cleanup has been done, `false` otherwise.
fn cleanup_current_frame_locals(&mut self) -> InterpResult<'tcx, bool> {
// Cleanup: deallocate locals.
// Usually we want to clean up (deallocate locals), but in a few rare cases we don't.
// We do this while the frame is still on the stack, so errors point to the callee.
let return_to_block = self.frame().return_to_block;
let cleanup = match return_to_block {
StackPopCleanup::Goto { .. } => true,
StackPopCleanup::Root { cleanup, .. } => cleanup,
};
if cleanup {
// We need to take the locals out, since we need to mutate while iterating.
let locals = mem::take(&mut self.frame_mut().locals);
for local in &locals {
self.deallocate_local(local.value)?;
}
}
Ok(cleanup)
}
/// In the current stack frame, mark all locals as live that are not arguments and don't have
/// `Storage*` annotations (this includes the return place).
pub(crate) fn storage_live_for_always_live_locals(&mut self) -> InterpResult<'tcx> {
self.storage_live(mir::RETURN_PLACE)?;
let body = self.body();
let always_live = always_storage_live_locals(body);
for local in body.vars_and_temps_iter() {
if always_live.contains(local) {
self.storage_live(local)?;
}
}
Ok(())
}
pub fn storage_live_dyn(
&mut self,
local: mir::Local,
meta: MemPlaceMeta<M::Provenance>,
) -> InterpResult<'tcx> {
trace!("{:?} is now live", local);
// We avoid `ty.is_trivially_sized` since that does something expensive for ADTs.
fn is_very_trivially_sized(ty: Ty<'_>) -> bool {
match ty.kind() {
ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
| ty::Uint(_)
| ty::Int(_)
| ty::Bool
| ty::Float(_)
| ty::FnDef(..)
| ty::FnPtr(_)
| ty::RawPtr(..)
| ty::Char
| ty::Ref(..)
| ty::Coroutine(..)
| ty::CoroutineWitness(..)
| ty::Array(..)
| ty::Closure(..)
| ty::CoroutineClosure(..)
| ty::Never
| ty::Error(_)
| ty::Dynamic(_, _, ty::DynStar) => true,
ty::Str | ty::Slice(_) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => false,
ty::Tuple(tys) => tys.last().is_none_or(|ty| is_very_trivially_sized(*ty)),
ty::Pat(ty, ..) => is_very_trivially_sized(*ty),
// We don't want to do any queries, so there is not much we can do with ADTs.
ty::Adt(..) => false,
ty::Alias(..) | ty::Param(_) | ty::Placeholder(..) => false,
ty::Infer(ty::TyVar(_)) => false,
ty::Bound(..)
| ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
bug!("`is_very_trivially_sized` applied to unexpected type: {}", ty)
}
}
}
// This is a hot function, we avoid computing the layout when possible.
// `unsized_` will be `None` for sized types and `Some(layout)` for unsized types.
let unsized_ = if is_very_trivially_sized(self.body().local_decls[local].ty) {
None
} else {
// We need the layout.
let layout = self.layout_of_local(self.frame(), local, None)?;
if layout.is_sized() { None } else { Some(layout) }
};
let local_val = LocalValue::Live(if let Some(layout) = unsized_ {
if !meta.has_meta() {
throw_unsup!(UnsizedLocal);
}
// Need to allocate some memory, since `Immediate::Uninit` cannot be unsized.
let dest_place = self.allocate_dyn(layout, MemoryKind::Stack, meta)?;
Operand::Indirect(*dest_place.mplace())
} else {
assert!(!meta.has_meta()); // we're dropping the metadata
// Just make this an efficient immediate.
// Note that not calling `layout_of` here does have one real consequence:
// if the type is too big, we'll only notice this when the local is actually initialized,
// which is a bit too late -- we should ideally notice this already here, when the memory
// is conceptually allocated. But given how rare that error is and that this is a hot function,
// we accept this downside for now.
Operand::Immediate(Immediate::Uninit)
});
// If the local is already live, deallocate its old memory.
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
self.deallocate_local(old)?;
Ok(())
}
/// Mark a storage as live, killing the previous content.
#[inline(always)]
pub fn storage_live(&mut self, local: mir::Local) -> InterpResult<'tcx> {
self.storage_live_dyn(local, MemPlaceMeta::None)
}
pub fn storage_dead(&mut self, local: mir::Local) -> InterpResult<'tcx> {
assert!(local != mir::RETURN_PLACE, "Cannot make return place dead");
trace!("{:?} is now dead", local);
// If the local is already dead, this is a NOP.
let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
self.deallocate_local(old)?;
Ok(())
}
fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type. Hence we can `unwrap`.
trace!(
"deallocating local {:?}: {:?}",
local,
// Locals always have a `alloc_id` (they are never the result of a int2ptr).
self.dump_alloc(ptr.provenance.unwrap().get_alloc_id().unwrap())
);
self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
};
Ok(())
}
#[inline(always)]
pub(super) fn layout_of_local(
&self,
frame: &Frame<'tcx, M::Provenance, M::FrameExtra>,
local: mir::Local,
layout: Option<TyAndLayout<'tcx>>,
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
let state = &frame.locals[local];
if let Some(layout) = state.layout.get() {
return Ok(layout);
}
let layout = from_known_layout(self.tcx, self.param_env, layout, || {
let local_ty = frame.body.local_decls[local].ty;
let local_ty =
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
self.layout_of(local_ty)
})?;
// Layouts of locals are requested a lot, so we cache them.
state.layout.set(Some(layout));
Ok(layout)
}
}
impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
pub(super) fn print(
&self,
allocs: &mut Vec<Option<AllocId>>,
fmt: &mut std::fmt::Formatter<'_>,
) -> std::fmt::Result {
match self.value {
LocalValue::Dead => write!(fmt, " is dead")?,
LocalValue::Live(Operand::Immediate(Immediate::Uninit)) => {
write!(fmt, " is uninitialized")?
}
LocalValue::Live(Operand::Indirect(mplace)) => {
write!(
fmt,
" by {} ref {:?}:",
match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({meta:?})"),
MemPlaceMeta::None => String::new(),
},
mplace.ptr,
)?;
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {val:?}")?;
if let Scalar::Ptr(ptr, _size) = val {
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({val1:?}, {val2:?})")?;
if let Scalar::Ptr(ptr, _size) = val1 {
allocs.push(ptr.provenance.get_alloc_id());
}
if let Scalar::Ptr(ptr, _size) = val2 {
allocs.push(ptr.provenance.get_alloc_id());
}
}
}
Ok(())
}
}

View file

@ -4,15 +4,29 @@
use either::Either;
use rustc_index::IndexSlice;
use rustc_middle::{bug, mir};
use rustc_middle::ty::layout::FnAbiOf;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::{bug, mir, span_bug};
use rustc_span::source_map::Spanned;
use rustc_target::abi::call::FnAbi;
use rustc_target::abi::{FieldIdx, FIRST_VARIANT};
use tracing::{info, instrument, trace};
use super::{
ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, Projectable, Scalar,
throw_ub, FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta,
PlaceTy, Projectable, Scalar,
};
use crate::util;
struct EvaluatedCalleeAndArgs<'tcx, M: Machine<'tcx>> {
callee: FnVal<'tcx, M::ExtraFnVal>,
args: Vec<FnArg<'tcx, M::Provenance>>,
fn_sig: ty::FnSig<'tcx>,
fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
/// True if the function is marked as `#[track_caller]` ([`ty::InstanceKind::requires_caller_location`])
with_caller_location: bool,
}
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// Returns `true` as long as there are more things to do.
///
@ -36,7 +50,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
let old_frames = self.frame_idx();
self.statement(stmt)?;
self.eval_statement(stmt)?;
// Make sure we are not updating `statement_index` of the wrong frame.
assert_eq!(old_frames, self.frame_idx());
// Advance the program counter.
@ -47,7 +61,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
M::before_terminator(self)?;
let terminator = basic_block.terminator();
self.terminator(terminator)?;
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
if let Either::Left(loc) = self.frame().loc {
info!("// executing {:?}", loc.block);
}
}
Ok(true)
}
@ -55,7 +74,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
/// statement counter.
///
/// This does NOT move the statement counter forward, the caller has to do that!
pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
pub fn eval_statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", stmt);
use rustc_middle::mir::StatementKind::*;
@ -349,16 +368,225 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
Ok(())
}
/// Evaluate the given terminator. Will also adjust the stack frame and statement position accordingly.
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
/// Evaluate the arguments of a function call
fn eval_fn_call_arguments(
&self,
ops: &[Spanned<mir::Operand<'tcx>>],
) -> InterpResult<'tcx, Vec<FnArg<'tcx, M::Provenance>>> {
ops.iter()
.map(|op| {
let arg = match &op.node {
mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
// Make a regular copy.
let op = self.eval_operand(&op.node, None)?;
FnArg::Copy(op)
}
mir::Operand::Move(place) => {
// If this place lives in memory, preserve its location.
// We call `place_to_op` which will be an `MPlaceTy` whenever there exists
// an mplace for this place. (This is in contrast to `PlaceTy::as_mplace_or_local`
// which can return a local even if that has an mplace.)
let place = self.eval_place(*place)?;
let op = self.place_to_op(&place)?;
match op.as_mplace_or_imm() {
Either::Left(mplace) => FnArg::InPlace(mplace),
Either::Right(_imm) => {
// This argument doesn't live in memory, so there's no place
// to make inaccessible during the call.
// We rely on there not being any stray `PlaceTy` that would let the
// caller directly access this local!
// This is also crucial for tail calls, where we want the `FnArg` to
// stay valid when the old stack frame gets popped.
FnArg::Copy(op)
}
}
}
};
Ok(arg)
})
.collect()
}
/// Shared part of `Call` and `TailCall` implementation — finding and evaluating all the
/// necessary information about callee and arguments to make a call.
fn eval_callee_and_args(
&self,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
args: &[Spanned<mir::Operand<'tcx>>],
) -> InterpResult<'tcx, EvaluatedCalleeAndArgs<'tcx, M>> {
let func = self.eval_operand(func, None)?;
let args = self.eval_fn_call_arguments(args)?;
let fn_sig_binder = func.layout.ty.fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig_binder);
let extra_args = &args[fn_sig.inputs().len()..];
let extra_args =
self.tcx.mk_type_list_from_iter(extra_args.iter().map(|arg| arg.layout().ty));
let (callee, fn_abi, with_caller_location) = match *func.layout.ty.kind() {
ty::FnPtr(_sig) => {
let fn_ptr = self.read_pointer(&func)?;
let fn_val = self.get_ptr_fn(fn_ptr)?;
(fn_val, self.fn_abi_of_fn_ptr(fn_sig_binder, extra_args)?, false)
}
ty::FnDef(def_id, args) => {
let instance = self.resolve(def_id, args)?;
(
FnVal::Instance(instance),
self.fn_abi_of_instance(instance, extra_args)?,
instance.def.requires_caller_location(*self.tcx),
)
}
_ => {
span_bug!(terminator.source_info.span, "invalid callee of type {}", func.layout.ty)
}
};
Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
}
fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", terminator.kind);
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
if let Either::Left(loc) = self.frame().loc {
info!("// executing {:?}", loc.block);
use rustc_middle::mir::TerminatorKind::*;
match terminator.kind {
Return => {
self.return_from_current_stack_frame(/* unwinding */ false)?
}
Goto { target } => self.go_to_block(target),
SwitchInt { ref discr, ref targets } => {
let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets.otherwise();
for (const_int, target) in targets.iter() {
// Compare using MIR BinOp::Eq, to also support pointer values.
// (Avoiding `self.binary_op` as that does some redundant layout computation.)
let res = self.binary_op(
mir::BinOp::Eq,
&discr,
&ImmTy::from_uint(const_int, discr.layout),
)?;
if res.to_scalar().to_bool()? {
target_block = target;
break;
}
}
self.go_to_block(target_block);
}
Call {
ref func,
ref args,
destination,
target,
unwind,
call_source: _,
fn_span: _,
} => {
let old_stack = self.frame_idx();
let old_loc = self.frame().loc;
let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
self.eval_callee_and_args(terminator, func, args)?;
let destination = self.force_allocation(&self.eval_place(destination)?)?;
self.init_fn_call(
callee,
(fn_sig.abi, fn_abi),
&args,
with_caller_location,
&destination,
target,
if fn_abi.can_unwind { unwind } else { mir::UnwindAction::Unreachable },
)?;
// Sanity-check that `eval_fn_call` either pushed a new frame or
// did a jump to another block.
if self.frame_idx() == old_stack && self.frame().loc == old_loc {
span_bug!(terminator.source_info.span, "evaluating this call made no progress");
}
}
TailCall { ref func, ref args, fn_span: _ } => {
let old_frame_idx = self.frame_idx();
let EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location } =
self.eval_callee_and_args(terminator, func, args)?;
self.init_fn_tail_call(callee, (fn_sig.abi, fn_abi), &args, with_caller_location)?;
if self.frame_idx() != old_frame_idx {
span_bug!(
terminator.source_info.span,
"evaluating this tail call pushed a new stack frame"
);
}
}
Drop { place, target, unwind, replace: _ } => {
let place = self.eval_place(place)?;
let instance = Instance::resolve_drop_in_place(*self.tcx, place.layout.ty);
if let ty::InstanceKind::DropGlue(_, None) = instance.def {
// This is the branch we enter if and only if the dropped type has no drop glue
// whatsoever. This can happen as a result of monomorphizing a drop of a
// generic. In order to make sure that generic and non-generic code behaves
// roughly the same (and in keeping with Mir semantics) we do nothing here.
self.go_to_block(target);
return Ok(());
}
trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
self.init_drop_in_place_call(&place, instance, target, unwind)?;
}
Assert { ref cond, expected, ref msg, target, unwind } => {
let ignored =
M::ignore_optional_overflow_checks(self) && msg.is_optional_overflow_check();
let cond_val = self.read_scalar(&self.eval_operand(cond, None)?)?.to_bool()?;
if ignored || expected == cond_val {
self.go_to_block(target);
} else {
M::assert_panic(self, msg, unwind)?;
}
}
UnwindTerminate(reason) => {
M::unwind_terminate(self, reason)?;
}
// When we encounter Resume, we've finished unwinding
// cleanup for the current stack frame. We pop it in order
// to continue unwinding the next frame
UnwindResume => {
trace!("unwinding: resuming from cleanup");
// By definition, a Resume terminator means
// that we're unwinding
self.return_from_current_stack_frame(/* unwinding */ true)?;
return Ok(());
}
// It is UB to ever encounter this.
Unreachable => throw_ub!(Unreachable),
// These should never occur for MIR we actually run.
FalseEdge { .. } | FalseUnwind { .. } | Yield { .. } | CoroutineDrop => span_bug!(
terminator.source_info.span,
"{:#?} should have been eliminated by MIR pass",
terminator.kind
),
InlineAsm { template, ref operands, options, ref targets, .. } => {
M::eval_inline_asm(self, template, operands, options, targets)?;
}
}
Ok(())
}
}