rust/src/librustc_mir/interpret/terminator/mod.rs

446 lines
19 KiB
Rust
Raw Normal View History

2018-08-22 16:52:01 -03:00
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::LayoutOf;
2018-08-18 12:14:03 +02:00
use syntax::source_map::Span;
use rustc_target::spec::abi::Abi;
use rustc::mir::interpret::{EvalResult, Scalar};
use super::{
EvalContext, Machine, Value, OpTy, Place, PlaceTy, ValTy, Operand, StackPopCleanup
};
mod drop;
2016-09-20 16:05:30 +02:00
2018-01-16 09:31:48 +01:00
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M> {
#[inline]
pub fn goto_block(&mut self, target: Option<mir::BasicBlock>) -> EvalResult<'tcx> {
if let Some(target) = target {
self.frame_mut().block = target;
self.frame_mut().stmt = 0;
Ok(())
} else {
err!(Unreachable)
}
}
pub(super) fn eval_terminator(
&mut self,
terminator: &mir::Terminator<'tcx>,
) -> EvalResult<'tcx> {
use rustc::mir::TerminatorKind::*;
match terminator.kind {
2016-11-26 19:13:22 -08:00
Return => {
self.dump_place(self.frame().return_place);
2016-11-26 19:13:22 -08:00
self.pop_stack_frame()?
}
Goto { target } => self.goto_block(Some(target))?,
SwitchInt {
ref discr,
ref values,
ref targets,
..
} => {
let discr = self.read_value(self.eval_operand(discr, None)?)?;
trace!("SwitchInt({:?})", *discr);
// Branch to the `otherwise` case by default, if no match is found.
let mut target_block = targets[targets.len() - 1];
2018-01-16 09:24:38 +01:00
for (index, &const_int) in values.iter().enumerate() {
// Compare using binary_op
2018-08-22 16:59:14 -03:00
let const_int = Scalar::Bits {
bits: const_int,
size: discr.layout.size.bytes() as u8
};
let (res, _) = self.binary_op(mir::BinOp::Eq,
discr,
ValTy { value: Value::Scalar(const_int.into()), layout: discr.layout }
)?;
if res.to_bool()? {
target_block = targets[index];
break;
}
}
self.goto_block(Some(target_block))?;
}
Call {
ref func,
ref args,
ref destination,
..
} => {
let (dest, ret) = match *destination {
Some((ref lv, target)) => (Some(self.eval_place(lv)?), Some(target)),
None => (None, None),
};
let func = self.eval_operand(func, None)?;
let (fn_def, sig) = match func.layout.ty.sty {
ty::FnPtr(sig) => {
let fn_ptr = self.read_scalar(func)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
let instance_ty = instance.ty(*self.tcx);
match instance_ty.sty {
ty::FnDef(..) => {
let sig = self.tcx.normalize_erasing_late_bound_regions(
self.param_env,
&sig,
);
let real_sig = instance_ty.fn_sig(*self.tcx);
let real_sig = self.tcx.normalize_erasing_late_bound_regions(
self.param_env,
&real_sig,
);
if !self.check_sig_compat(sig, real_sig)? {
2017-08-02 16:59:01 +02:00
return err!(FunctionPointerTyMismatch(real_sig, sig));
2017-03-23 15:17:02 +01:00
}
(instance, sig)
}
2017-03-23 15:17:02 +01:00
ref other => bug!("instance def ty: {:?}", other),
}
}
ty::FnDef(def_id, substs) => {
let sig = func.layout.ty.fn_sig(*self.tcx);
let sig = self.tcx.normalize_erasing_late_bound_regions(
self.param_env,
&sig,
);
(self.resolve(def_id, substs)?, sig)
},
2016-11-26 22:58:01 -08:00
_ => {
let msg = format!("can't handle callee of type {:?}", func.layout.ty);
2017-08-02 16:59:01 +02:00
return err!(Unimplemented(msg));
2016-11-26 22:58:01 -08:00
}
};
let args = self.eval_operands(args)?;
self.eval_fn_call(
fn_def,
&args[..],
dest,
ret,
terminator.source_info.span,
Some(sig),
)?;
}
Drop {
ref location,
target,
..
} => {
// FIXME(CTFE): forbid drop in const eval
2017-12-06 09:25:29 +01:00
let place = self.eval_place(location)?;
let ty = place.layout.ty;
trace!("TerminatorKind::drop: {:?}, type {}", location, ty);
2017-03-22 13:13:52 +01:00
let instance = ::monomorphize::resolve_drop_in_place(*self.tcx, ty);
self.drop_in_place(
2017-12-06 09:25:29 +01:00
place,
instance,
terminator.source_info.span,
target,
)?;
2017-03-22 13:13:52 +01:00
}
Assert {
ref cond,
expected,
ref msg,
target,
..
} => {
let cond_val = self.read_value(self.eval_operand(cond, None)?)?
.to_scalar()?.to_bool()?;
if expected == cond_val {
self.goto_block(Some(target))?;
} else {
use rustc::mir::interpret::EvalErrorKind::*;
return match *msg {
2017-08-30 11:13:01 +02:00
BoundsCheck { ref len, ref index } => {
let len = self.read_value(self.eval_operand(len, None)?)
.expect("can't eval len").to_scalar()?
2018-05-22 10:28:46 +02:00
.to_bits(self.memory().pointer_size())? as u64;
let index = self.read_value(self.eval_operand(index, None)?)
.expect("can't eval index").to_scalar()?
2018-05-22 10:28:46 +02:00
.to_bits(self.memory().pointer_size())? as u64;
err!(BoundsCheck { len, index })
}
Overflow(op) => Err(Overflow(op).into()),
OverflowNeg => Err(OverflowNeg.into()),
2018-04-28 13:35:35 +02:00
DivisionByZero => Err(DivisionByZero.into()),
RemainderByZero => Err(RemainderByZero.into()),
2017-08-30 11:13:01 +02:00
GeneratorResumedAfterReturn |
GeneratorResumedAfterPanic => unimplemented!(),
_ => bug!(),
};
}
}
2017-08-30 11:13:01 +02:00
Yield { .. } => unimplemented!("{:#?}", terminator.kind),
GeneratorDrop => unimplemented!(),
DropAndReplace { .. } => unimplemented!(),
Resume => unimplemented!(),
Abort => unimplemented!(),
2018-08-22 16:58:39 -03:00
FalseEdges { .. } => bug!("should have been eliminated by\
`simplify_branches` mir pass"),
FalseUnwind { .. } => bug!("should have been eliminated by\
`simplify_branches` mir pass"),
2017-08-02 16:59:01 +02:00
Unreachable => return err!(Unreachable),
}
Ok(())
}
2018-08-22 16:58:39 -03:00
/// Decides whether it is okay to call the method with signature `real_sig`
/// using signature `sig`.
2017-05-30 10:24:37 -07:00
/// FIXME: This should take into account the platform-dependent ABI description.
fn check_sig_compat(
&mut self,
sig: ty::FnSig<'tcx>,
real_sig: ty::FnSig<'tcx>,
) -> EvalResult<'tcx, bool> {
2017-12-06 13:55:46 +02:00
fn check_ty_compat<'tcx>(ty: Ty<'tcx>, real_ty: Ty<'tcx>) -> bool {
if ty == real_ty {
return true;
} // This is actually a fast pointer comparison
return match (&ty.sty, &real_ty.sty) {
// Permit changing the pointer type of raw pointers and references as well as
// mutability of raw pointers.
2018-08-22 16:58:39 -03:00
// FIXME: Should not be allowed when fat pointers are involved.
(&ty::RawPtr(_), &ty::RawPtr(_)) => true,
(&ty::Ref(_, _, _), &ty::Ref(_, _, _)) => {
ty.is_mutable_pointer() == real_ty.is_mutable_pointer()
}
// rule out everything else
_ => false,
};
}
if sig.abi == real_sig.abi && sig.variadic == real_sig.variadic &&
sig.inputs_and_output.len() == real_sig.inputs_and_output.len() &&
sig.inputs_and_output
.iter()
.zip(real_sig.inputs_and_output)
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty))
{
// Definitely good.
return Ok(true);
}
if sig.variadic || real_sig.variadic {
// We're not touching this
return Ok(false);
}
// We need to allow what comes up when a non-capturing closure is cast to a fn().
match (sig.abi, real_sig.abi) {
(Abi::Rust, Abi::RustCall) // check the ABIs. This makes the test here non-symmetric.
2018-08-22 16:59:14 -03:00
if check_ty_compat(sig.output(), real_sig.output())
&& real_sig.inputs_and_output.len() == 3 => {
// First argument of real_sig must be a ZST
let fst_ty = real_sig.inputs_and_output[0];
if self.layout_of(fst_ty)?.is_zst() {
// Second argument must be a tuple matching the argument list of sig
let snd_ty = real_sig.inputs_and_output[1];
match snd_ty.sty {
ty::Tuple(tys) if sig.inputs().len() == tys.len() =>
2018-08-22 16:59:14 -03:00
if sig.inputs()
2018-08-23 09:21:40 -07:00
.iter()
.zip(tys)
.all(|(ty, real_ty)| check_ty_compat(ty, real_ty)) {
return Ok(true)
},
_ => {}
}
}
}
_ => {}
};
// Nope, this doesn't work.
return Ok(false);
}
/// Call this function -- pushing the stack frame and initializing the arguments.
/// `sig` is ptional in case of FnPtr/FnDef -- but mandatory for closures!
fn eval_fn_call(
&mut self,
2017-03-21 13:53:55 +01:00
instance: ty::Instance<'tcx>,
args: &[OpTy<'tcx>],
dest: Option<PlaceTy<'tcx>>,
ret: Option<mir::BasicBlock>,
span: Span,
sig: Option<ty::FnSig<'tcx>>,
) -> EvalResult<'tcx> {
2017-03-22 13:13:52 +01:00
trace!("eval_fn_call: {:#?}", instance);
2017-03-22 13:13:52 +01:00
match instance.def {
ty::InstanceDef::Intrinsic(..) => {
// The intrinsic itself cannot diverge, so if we got here without a return
// place... (can happen e.g. for transmute returning `!`)
let dest = match dest {
2017-03-22 14:19:29 +01:00
Some(dest) => dest,
None => return err!(Unreachable)
};
M::call_intrinsic(self, instance, args, dest)?;
// No stack frame gets pushed, the main loop will just act as if the
// call completed.
self.goto_block(ret)?;
self.dump_place(*dest);
2017-03-22 14:19:29 +01:00
Ok(())
}
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::FnPtrShim(..) |
ty::InstanceDef::DropGlue(..) |
ty::InstanceDef::CloneShim(..) |
2017-03-22 13:13:52 +01:00
ty::InstanceDef::Item(_) => {
let mir = match M::find_fn(self, instance, args, dest, ret)? {
Some(mir) => mir,
None => return Ok(()),
};
let return_place = match dest {
Some(place) => *place,
None => Place::null(&self),
};
self.push_stack_frame(
instance,
span,
mir,
return_place,
StackPopCleanup::Goto(ret),
)?;
2017-03-23 13:36:13 +01:00
// If we didn't get a signture, ask `fn_sig`
let sig = sig.unwrap_or_else(|| {
let fn_sig = instance.ty(*self.tcx).fn_sig(*self.tcx);
self.tcx.normalize_erasing_late_bound_regions(self.param_env, &fn_sig)
});
assert_eq!(sig.inputs().len(), args.len());
// We can't test the types, as it is fine if the types are ABI-compatible but
// not equal.
// Figure out how to pass which arguments.
// FIXME: Somehow this is horribly full of special cases here, and codegen has
// none of that. What is going on?
2017-03-23 16:09:36 +01:00
trace!("ABI: {:?}", sig.abi);
trace!(
"args: {:#?}",
args.iter()
.map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
.collect::<Vec<_>>()
);
trace!(
"locals: {:#?}",
mir.args_iter()
.map(|local|
(local, self.layout_of_local(self.cur_frame(), local).unwrap().ty)
)
.collect::<Vec<_>>()
);
match instance.def {
ty::InstanceDef::ClosureOnceShim { .. } if sig.abi == Abi::Rust => {
// this has an entirely ridicolous calling convention where it uses the
// "Rust" ABI, but arguments come in untupled and are supposed to be tupled
// for the callee! The function's first argument is a ZST, and then
// there comes a tuple for the rest.
let mut arg_locals = mir.args_iter();
2017-03-23 13:36:13 +01:00
{ // the ZST. nothing to write.
let arg_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
assert!(dest.layout.is_zst());
2017-03-23 13:36:13 +01:00
}
{ // the tuple argument.
let arg_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
assert_eq!(dest.layout.fields.count(), args.len());
for (i, &op) in args.iter().enumerate() {
let dest_field = self.place_field(dest, i as u64)?;
self.copy_op(op, dest_field)?;
2017-03-23 13:36:13 +01:00
}
}
// that should be it
assert!(arg_locals.next().is_none());
}
2017-05-30 10:24:37 -07:00
_ => {
// overloaded-calls-simple.rs in miri's test suite demomstrates that there is
// no way to predict, from the ABI and instance.def, whether the function
// wants arguments passed with untupling or not. So we just make it
// depend on the number of arguments...
let untuple =
sig.abi == Abi::RustCall && !args.is_empty() && args.len() != mir.arg_count;
let (normal_args, untuple_arg) = if untuple {
let (tup, args) = args.split_last().unwrap();
trace!("eval_fn_call: Will pass last argument by untupling");
(args, Some(tup))
} else {
(&args[..], None)
};
// Pass the arguments.
let mut arg_locals = mir.args_iter();
// First the normal ones.
for &op in normal_args {
let arg_local = arg_locals.next().unwrap();
2017-12-06 09:25:29 +01:00
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
self.copy_op(op, dest)?;
2017-05-30 10:24:37 -07:00
}
// The the ones to untuple.
if let Some(&untuple_arg) = untuple_arg {
for i in 0..untuple_arg.layout.fields.count() {
let arg_local = arg_locals.next().unwrap();
let dest = self.eval_place(&mir::Place::Local(arg_local))?;
let op = self.operand_field(untuple_arg, i as u64)?;
self.copy_op(op, dest)?;
}
}
// That should be it.
assert!(arg_locals.next().is_none());
2017-03-23 13:36:13 +01:00
}
}
Ok(())
}
// cannot use the shim here, because that will only result in infinite recursion
2017-03-23 14:24:02 +01:00
ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.memory.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align;
let (ptr, vtable) = self.read_value(args[0])?.to_scalar_dyn_trait()?;
2018-02-22 17:29:39 +01:00
let fn_ptr = self.memory.read_ptr_sized(
vtable.offset(ptr_size * (idx as u64 + 3), &self)?,
ptr_align
)?.to_ptr()?;
let instance = self.memory.get_fn(fn_ptr)?;
// We have to patch the self argument, in particular get the layout
// expected by the actual function. Cannot just use "field 0" due to
// Box<self>.
let mut args = args.to_vec();
let pointee = args[0].layout.ty.builtin_deref(true).unwrap().ty;
let fake_fat_ptr_ty = self.tcx.mk_mut_ptr(pointee);
args[0].layout = self.layout_of(fake_fat_ptr_ty)?.field(&self, 0)?;
args[0].op = Operand::Immediate(Value::Scalar(ptr.into())); // strip vtable
trace!("Patched self operand to {:#?}", args[0]);
2017-03-23 14:57:11 +01:00
// recurse with concrete function
self.eval_fn_call(instance, &args, dest, ret, span, sig)
}
}
}
2016-09-20 12:52:01 +02:00
}