rust/src/interpreter/mod.rs

1792 lines
69 KiB
Rust
Raw Normal View History

use rustc::middle::const_val::ConstVal;
2016-04-14 00:01:00 +02:00
use rustc::hir::def_id::DefId;
use rustc::hir::map::definitions::DefPathData;
use rustc::mir;
2016-08-27 01:44:46 -06:00
use rustc::traits::Reveal;
use rustc::ty::layout::{self, Layout, Size};
2016-03-28 17:43:23 -06:00
use rustc::ty::subst::{self, Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
2016-06-11 12:38:28 -06:00
use rustc_data_structures::indexed_vec::Idx;
use syntax::codemap::{self, DUMMY_SP};
use error::{EvalError, EvalResult};
use memory::{Memory, Pointer};
use primval::{self, PrimVal, PrimValKind};
pub use self::value::Value;
use std::collections::HashMap;
use std::cell::Ref;
2016-06-23 00:04:10 -06:00
mod step;
mod terminator;
2016-09-07 18:34:59 +02:00
mod cast;
mod vtable;
mod value;
2016-06-01 17:05:20 +02:00
2016-11-26 19:18:39 -08:00
pub type MirRef<'tcx> = Ref<'tcx, mir::Mir<'tcx>>;
pub struct EvalContext<'a, 'tcx: 'a> {
/// The results of the type checker, from rustc.
2016-05-13 22:34:50 -06:00
tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// The virtual memory system.
memory: Memory<'a, 'tcx>,
/// Precomputed statics, constants and promoteds.
globals: HashMap<GlobalId<'tcx>, Global<'tcx>>,
/// The virtual call stack.
stack: Vec<Frame<'tcx>>,
2016-07-05 13:23:58 +02:00
/// The maximum number of stack frames allowed
stack_limit: usize,
/// The maximum number of operations that may be executed.
/// This prevents infinite loops and huge computations from freezing up const eval.
/// Remove once halting problem is solved.
steps_remaining: u64,
}
/// A stack frame.
pub struct Frame<'tcx> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub mir: MirRef<'tcx>,
/// The def_id of the current function.
pub def_id: DefId,
/// type substitutions for the current function invocation.
pub substs: &'tcx Substs<'tcx>,
/// The span of the call site.
pub span: codemap::Span,
////////////////////////////////////////////////////////////////////////////////
// Return lvalue and locals
////////////////////////////////////////////////////////////////////////////////
/// The block to return to when returning from the current stack frame
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to.
pub return_lvalue: Lvalue<'tcx>,
/// The list of locals for this stack frame, stored in order as
/// `[arguments..., variables..., temporaries...]`. The locals are stored as `Value`s, which
/// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
///
/// Before being initialized, a local is simply marked as None.
pub locals: Vec<Option<Value>>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// The block that is currently executed (or will be executed after the above call stacks
/// return).
pub block: mir::BasicBlock,
/// The index of the currently evaluated statment.
pub stmt: usize,
}
2016-09-27 10:14:53 +02:00
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum Lvalue<'tcx> {
/// An lvalue referring to a value allocated in the `Memory` system.
Ptr {
ptr: Pointer,
extra: LvalueExtra,
},
/// An lvalue referring to a value on the stack. Represented by a stack frame index paired with
/// a Mir local index.
Local {
frame: usize,
local: mir::Local,
},
2016-10-21 11:48:56 +02:00
Global(GlobalId<'tcx>),
// TODO(solson): None/Never?
2016-09-27 10:14:53 +02:00
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub enum LvalueExtra {
2016-09-27 10:14:53 +02:00
None,
Length(u64),
Vtable(Pointer),
DowncastVariant(usize),
}
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)]
2016-06-08 12:35:15 +02:00
/// Uniquely identifies a specific constant or static
pub struct GlobalId<'tcx> {
2016-06-08 12:35:15 +02:00
/// the def id of the constant/static or in case of promoteds, the def id of the function they belong to
def_id: DefId,
2016-06-08 12:35:15 +02:00
/// In case of statics and constants this is `Substs::empty()`, so only promoteds and associated
/// constants actually have something useful here. We could special case statics and constants,
/// but that would only require more branching when working with constants, and not bring any
/// real benefits.
substs: &'tcx Substs<'tcx>,
2016-10-21 11:54:38 +02:00
/// this `Option` is `Some` for promoted constants
promoted: Option<mir::Promoted>,
}
#[derive(Copy, Clone, Debug)]
pub struct Global<'tcx> {
data: Option<Value>,
mutable: bool,
ty: Ty<'tcx>,
}
impl<'tcx> Global<'tcx> {
fn uninitialized(ty: Ty<'tcx>) -> Self {
Global {
data: None,
mutable: true,
ty: ty,
}
}
2016-06-03 17:41:36 +02:00
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum StackPopCleanup {
2016-10-21 11:48:56 +02:00
/// The stackframe existed to compute the initial value of a static/constant, make sure it
/// isn't modifyable afterwards. The allocation of the result is frozen iff it's an
/// actual allocation. `PrimVal`s are unmodifyable anyway.
Freeze,
/// A regular stackframe added due to a function call will need to get forwarded to the next
/// block
Goto(mir::BasicBlock),
/// The main function and diverging functions have nowhere to return to
None,
}
2016-11-26 17:54:19 -08:00
#[derive(Copy, Clone, Debug)]
pub struct ResourceLimits {
pub memory_size: u64,
pub step_limit: u64,
pub stack_limit: usize,
}
impl Default for ResourceLimits {
fn default() -> Self {
ResourceLimits {
memory_size: 100 * 1024 * 1024, // 100 MB
step_limit: 1_000_000,
stack_limit: 100,
}
}
}
impl<'a, 'tcx> EvalContext<'a, 'tcx> {
2016-11-26 17:54:19 -08:00
pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>, limits: ResourceLimits) -> Self {
EvalContext {
2015-11-19 07:07:47 -06:00
tcx: tcx,
2016-11-26 17:54:19 -08:00
memory: Memory::new(&tcx.data_layout, limits.memory_size),
globals: HashMap::new(),
stack: Vec::new(),
2016-11-26 17:54:19 -08:00
stack_limit: limits.stack_limit,
steps_remaining: limits.step_limit,
}
}
2016-10-16 00:12:11 -06:00
pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, Pointer> {
let substs = self.substs();
self.alloc_ptr_with_substs(ty, substs)
}
pub fn alloc_ptr_with_substs(
&mut self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>
) -> EvalResult<'tcx, Pointer> {
let size = self.type_size_with_substs(ty, substs)?.expect("cannot alloc memory for unsized type");
let align = self.type_align_with_substs(ty, substs)?;
2016-08-27 01:44:46 -06:00
self.memory.allocate(size, align)
}
2016-06-08 11:11:08 +02:00
pub fn memory(&self) -> &Memory<'a, 'tcx> {
&self.memory
}
pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx> {
2016-06-15 12:55:04 +02:00
&mut self.memory
}
pub fn stack(&self) -> &[Frame<'tcx>] {
&self.stack
}
fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
2016-09-23 10:38:30 +02:00
// FIXME: cache these allocs
let ptr = self.memory.allocate(s.len() as u64, 1)?;
2016-09-23 10:38:30 +02:00
self.memory.write_bytes(ptr, s.as_bytes())?;
self.memory.freeze(ptr.alloc_id)?;
2016-11-26 22:58:01 -08:00
Ok(Value::ByValPair(PrimVal::from_ptr(ptr), PrimVal::from_uint(s.len() as u64)))
2016-09-23 10:38:30 +02:00
}
fn const_to_value(&mut self, const_val: &ConstVal) -> EvalResult<'tcx, Value> {
use rustc::middle::const_val::ConstVal::*;
use rustc_const_math::ConstFloat;
let primval = match *const_val {
2016-11-26 22:58:01 -08:00
Integral(const_int) => PrimVal::new(const_int.to_u64_unchecked()),
Float(ConstFloat::F32(f)) => PrimVal::from_f32(f),
Float(ConstFloat::F64(f)) => PrimVal::from_f64(f),
Float(ConstFloat::FInfer { .. }) =>
bug!("uninferred constants only exist before typeck"),
Bool(b) => PrimVal::from_bool(b),
Char(c) => PrimVal::from_char(c),
2016-09-23 10:38:30 +02:00
Str(ref s) => return self.str_to_value(s),
ByteStr(ref bs) => {
let ptr = self.memory.allocate(bs.len() as u64, 1)?;
self.memory.write_bytes(ptr, bs)?;
self.memory.freeze(ptr.alloc_id)?;
PrimVal::from_ptr(ptr)
}
Struct(_) => unimplemented!(),
Tuple(_) => unimplemented!(),
Function(_) => unimplemented!(),
Array(_, _) => unimplemented!(),
Repeat(_, _) => unimplemented!(),
Dummy => unimplemented!(),
};
Ok(Value::ByVal(primval))
}
fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
2016-09-07 18:34:59 +02:00
// generics are weird, don't run this function on a generic
assert!(!ty.needs_subst());
ty.is_sized(self.tcx, &self.tcx.empty_parameter_environment(), DUMMY_SP)
}
pub fn load_mir(&self, def_id: DefId) -> EvalResult<'tcx, MirRef<'tcx>> {
trace!("load mir {:?}", def_id);
if def_id.is_local() || self.tcx.sess.cstore.is_item_mir_available(def_id) {
Ok(self.tcx.item_mir(def_id))
2016-08-27 01:44:46 -06:00
} else {
Err(EvalError::NoMirFor(self.tcx.item_path_str(def_id)))
}
}
2016-06-08 11:11:08 +02:00
pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
2016-06-08 11:11:08 +02:00
let substituted = ty.subst(self.tcx, substs);
2016-11-26 22:58:01 -08:00
let new = self.tcx.normalize_associated_type(&substituted);
new
2016-06-08 11:11:08 +02:00
}
fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
self.type_size_with_substs(ty, self.substs())
2016-06-08 11:11:08 +02:00
}
fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
self.type_align_with_substs(ty, self.substs())
}
fn type_size_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, Option<u64>> {
let layout = self.type_layout_with_substs(ty, substs)?;
if layout.is_unsized() {
Ok(None)
} else {
Ok(Some(layout.size(&self.tcx.data_layout).bytes()))
}
}
fn type_align_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, u64> {
self.type_layout_with_substs(ty, substs).map(|layout| layout.align(&self.tcx.data_layout).abi())
}
fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
self.type_layout_with_substs(ty, self.substs())
}
fn type_layout_with_substs(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, &'tcx Layout> {
2016-06-08 11:11:08 +02:00
// TODO(solson): Is this inefficient? Needs investigation.
let ty = self.monomorphize(ty, substs);
self.tcx.infer_ctxt(None, None, Reveal::All).enter(|infcx| {
ty.layout(&infcx).map_err(EvalError::Layout)
2016-06-08 11:11:08 +02:00
})
}
2016-07-05 10:47:10 +02:00
pub fn push_stack_frame(
&mut self,
def_id: DefId,
span: codemap::Span,
mir: MirRef<'tcx>,
2016-07-05 10:47:10 +02:00
substs: &'tcx Substs<'tcx>,
return_lvalue: Lvalue<'tcx>,
return_to_block: StackPopCleanup,
2016-07-05 10:47:10 +02:00
) -> EvalResult<'tcx, ()> {
::log_settings::settings().indentation += 1;
// Subtract 1 because `local_decls` includes the ReturnPointer, but we don't store a local
// `Value` for that.
let num_locals = mir.local_decls.len() - 1;
let locals = vec![None; num_locals];
self.stack.push(Frame {
mir: mir,
block: mir::START_BLOCK,
return_to_block: return_to_block,
return_lvalue: return_lvalue,
locals: locals,
span: span,
def_id: def_id,
substs: substs,
stmt: 0,
});
2016-07-05 13:23:58 +02:00
if self.stack.len() > self.stack_limit {
Err(EvalError::StackFrameLimitReached)
} else {
Ok(())
}
}
fn pop_stack_frame(&mut self) -> EvalResult<'tcx, ()> {
::log_settings::settings().indentation -= 1;
let frame = self.stack.pop().expect("tried to pop a stack frame, but there were none");
match frame.return_to_block {
2016-10-21 11:48:56 +02:00
StackPopCleanup::Freeze => if let Lvalue::Global(id) = frame.return_lvalue {
let global_value = self.globals
.get_mut(&id)
2016-10-21 11:48:56 +02:00
.expect("global should have been cached (freeze)");
match global_value.data.expect("global should have been initialized") {
Value::ByRef(ptr) => self.memory.freeze(ptr.alloc_id)?,
Value::ByVal(val) => if let Some(alloc_id) = val.relocation {
self.memory.freeze(alloc_id)?;
},
Value::ByValPair(a, b) => {
if let Some(alloc_id) = a.relocation {
self.memory.freeze(alloc_id)?;
}
if let Some(alloc_id) = b.relocation {
self.memory.freeze(alloc_id)?;
}
},
}
2016-10-21 11:48:56 +02:00
assert!(global_value.mutable);
global_value.mutable = false;
} else {
bug!("StackPopCleanup::Freeze on: {:?}", frame.return_lvalue);
},
StackPopCleanup::Goto(target) => self.goto_block(target),
StackPopCleanup::None => {},
}
2016-11-18 10:35:41 +01:00
// deallocate all locals that are backed by an allocation
for (i, local) in frame.locals.into_iter().enumerate() {
if let Some(Value::ByRef(ptr)) = local {
trace!("deallocating local {}: {:?}", i + 1, ptr);
self.memory.dump(ptr.alloc_id);
match self.memory.deallocate(ptr) {
2016-11-18 10:35:41 +01:00
// Any frozen memory means that it belongs to a constant or something referenced
// by a constant. We could alternatively check whether the alloc_id is frozen
// before calling deallocate, but this is much simpler and is probably the
// rare case.
Ok(()) | Err(EvalError::DeallocatedFrozenMemory) => {},
other => return other,
}
}
}
Ok(())
}
fn binop_with_overflow(
&mut self,
op: mir::BinOp,
left: &mir::Operand<'tcx>,
right: &mir::Operand<'tcx>,
) -> EvalResult<'tcx, (PrimVal, bool)> {
2016-11-26 22:58:01 -08:00
let left_ty = self.operand_ty(left);
let right_ty = self.operand_ty(right);
let left_kind = self.ty_to_primval_kind(left_ty)?;
let right_kind = self.ty_to_primval_kind(right_ty)?;
let left_val = self.eval_operand_to_primval(left)?;
let right_val = self.eval_operand_to_primval(right)?;
primval::binary_op(op, left_val, left_kind, right_val, right_kind)
}
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
/// and a boolean signifying the potential overflow to the destination.
fn intrinsic_with_overflow(
&mut self,
op: mir::BinOp,
left: &mir::Operand<'tcx>,
right: &mir::Operand<'tcx>,
dest: Lvalue<'tcx>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
let val = Value::ByValPair(val, PrimVal::from_bool(overflowed));
self.write_value(val, dest, dest_ty)
}
/// Applies the binary operation `op` to the arguments and writes the result to the
/// destination. Returns `true` if the operation overflowed.
2016-06-20 16:57:36 +02:00
fn intrinsic_overflowing(
&mut self,
op: mir::BinOp,
left: &mir::Operand<'tcx>,
right: &mir::Operand<'tcx>,
dest: Lvalue<'tcx>,
2016-11-26 22:58:01 -08:00
dest_ty: Ty<'tcx>,
2016-06-20 16:57:36 +02:00
) -> EvalResult<'tcx, bool> {
let (val, overflowed) = self.binop_with_overflow(op, left, right)?;
2016-11-26 22:58:01 -08:00
self.write_primval(dest, val, dest_ty)?;
Ok(overflowed)
}
fn assign_fields<I: IntoIterator<Item = u64>>(
&mut self,
dest: Lvalue<'tcx>,
offsets: I,
operands: &[mir::Operand<'tcx>],
) -> EvalResult<'tcx, ()> {
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
for (offset, operand) in offsets.into_iter().zip(operands) {
let value = self.eval_operand(operand)?;
let value_ty = self.operand_ty(operand);
let field_dest = dest.offset(offset);
self.write_value_to_ptr(value, field_dest, value_ty)?;
}
Ok(())
}
/// Evaluate an assignment statement.
///
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
/// type writes its results directly into the memory specified by the lvalue.
fn eval_rvalue_into_lvalue(
&mut self,
rvalue: &mir::Rvalue<'tcx>,
lvalue: &mir::Lvalue<'tcx>,
) -> EvalResult<'tcx, ()> {
let dest = self.eval_lvalue(lvalue)?;
2016-04-07 05:56:07 -06:00
let dest_ty = self.lvalue_ty(lvalue);
let dest_layout = self.type_layout(dest_ty)?;
use rustc::mir::Rvalue::*;
match *rvalue {
Use(ref operand) => {
let value = self.eval_operand(operand)?;
self.write_value(value, dest, dest_ty)?;
}
BinaryOp(bin_op, ref left, ref right) => {
// ignore overflow bit, rustc inserts check branches for us
2016-11-26 22:58:01 -08:00
self.intrinsic_overflowing(bin_op, left, right, dest, dest_ty)?;
}
CheckedBinaryOp(bin_op, ref left, ref right) => {
self.intrinsic_with_overflow(bin_op, left, right, dest, dest_ty)?;
}
2016-06-11 12:38:28 -06:00
2016-03-07 07:57:08 -06:00
UnaryOp(un_op, ref operand) => {
let val = self.eval_operand_to_primval(operand)?;
2016-11-26 22:58:01 -08:00
let kind = self.ty_to_primval_kind(dest_ty)?;
self.write_primval(dest, primval::unary_op(un_op, val, kind)?, dest_ty)?;
2016-03-07 07:57:08 -06:00
}
Aggregate(ref kind, ref operands) => {
self.inc_step_counter_and_check_limit(operands.len() as u64)?;
use rustc::ty::layout::Layout::*;
match *dest_layout {
Univariant { ref variant, .. } => {
2016-10-03 20:45:50 -06:00
let offsets = variant.offsets.iter().map(|s| s.bytes());
2016-05-09 18:52:44 -06:00
self.assign_fields(dest, offsets, operands)?;
}
Array { .. } => {
let elem_size = match dest_ty.sty {
ty::TyArray(elem_ty, _) => self.type_size(elem_ty)?.expect("array elements are sized") as u64,
_ => bug!("tried to assign {:?} to non-array type {:?}", kind, dest_ty),
};
let offsets = (0..).map(|i| i * elem_size);
2016-05-09 18:52:44 -06:00
self.assign_fields(dest, offsets, operands)?;
}
General { discr, ref variants, .. } => {
if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind {
let discr_val = adt_def.variants[variant].disr_val.to_u64_unchecked();
let discr_size = discr.size().bytes();
let discr_offset = variants[variant].offsets[0].bytes();
// FIXME(solson)
let dest = self.force_allocation(dest)?;
let discr_dest = (dest.to_ptr()).offset(discr_offset);
2016-10-03 20:45:50 -06:00
self.memory.write_uint(discr_dest, discr_val, discr_size)?;
2016-10-03 20:45:50 -06:00
// Don't include the first offset; it's for the discriminant.
let field_offsets = variants[variant].offsets.iter().skip(1)
.map(|s| s.bytes());
2016-10-03 20:45:50 -06:00
self.assign_fields(dest, field_offsets, operands)?;
} else {
bug!("tried to assign {:?} to Layout::General", kind);
2016-03-15 05:50:53 -06:00
}
}
RawNullablePointer { nndiscr, .. } => {
if let mir::AggregateKind::Adt(_, variant, _, _) = *kind {
if nndiscr == variant as u64 {
assert_eq!(operands.len(), 1);
let operand = &operands[0];
let value = self.eval_operand(operand)?;
let value_ty = self.operand_ty(operand);
self.write_value(value, dest, value_ty)?;
} else {
if let Some(operand) = operands.get(0) {
assert_eq!(operands.len(), 1);
let operand_ty = self.operand_ty(operand);
assert_eq!(self.type_size(operand_ty)?, Some(0));
}
2016-11-26 22:58:01 -08:00
self.write_primval(dest, PrimVal::from_int(0), dest_ty)?;
}
} else {
bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
2016-03-15 05:50:53 -06:00
}
}
StructWrappedNullablePointer { nndiscr, ref nonnull, ref discrfield } => {
if let mir::AggregateKind::Adt(_, variant, _, _) = *kind {
if nndiscr == variant as u64 {
2016-10-03 20:45:50 -06:00
let offsets = nonnull.offsets.iter().map(|s| s.bytes());
try!(self.assign_fields(dest, offsets, operands));
} else {
for operand in operands {
let operand_ty = self.operand_ty(operand);
assert_eq!(self.type_size(operand_ty)?, Some(0));
}
let (offset, ty) = self.nonnull_offset_and_ty(dest_ty, nndiscr, discrfield)?;
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
let dest = dest.offset(offset.bytes());
let dest_size = self.type_size(ty)?.expect("bad StructWrappedNullablePointer discrfield");
try!(self.memory.write_int(dest, 0, dest_size));
}
} else {
bug!("tried to assign {:?} to Layout::RawNullablePointer", kind);
}
}
2016-11-26 22:58:01 -08:00
CEnum { .. } => {
assert_eq!(operands.len(), 0);
if let mir::AggregateKind::Adt(adt_def, variant, _, _) = *kind {
let n = adt_def.variants[variant].disr_val.to_u64_unchecked();
2016-11-26 22:58:01 -08:00
self.write_primval(dest, PrimVal::new(n), dest_ty)?;
} else {
bug!("tried to assign {:?} to Layout::CEnum", kind);
}
}
2016-11-15 15:23:19 +01:00
Vector { element, count } => {
let elem_size = element.size(&self.tcx.data_layout).bytes();
debug_assert_eq!(count, operands.len() as u64);
let offsets = (0..).map(|i| i * elem_size);
self.assign_fields(dest, offsets, operands)?;
}
2016-05-30 15:27:52 +02:00
_ => return Err(EvalError::Unimplemented(format!("can't handle destination layout {:?} when assigning {:?}", dest_layout, kind))),
}
}
2016-03-21 03:34:24 -06:00
Repeat(ref operand, _) => {
let (elem_ty, length) = match dest_ty.sty {
ty::TyArray(elem_ty, n) => (elem_ty, n as u64),
_ => bug!("tried to assign array-repeat to non-array type {:?}", dest_ty),
};
self.inc_step_counter_and_check_limit(length)?;
let elem_size = self.type_size(elem_ty)?.expect("repeat element type must be sized");
let value = self.eval_operand(operand)?;
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
for i in 0..length {
let elem_dest = dest.offset(i * elem_size);
self.write_value_to_ptr(value, elem_dest, elem_ty)?;
2016-03-21 03:34:24 -06:00
}
}
Len(ref lvalue) => {
2016-05-09 18:52:44 -06:00
let src = self.eval_lvalue(lvalue)?;
let ty = self.lvalue_ty(lvalue);
2016-09-28 18:22:09 +02:00
let (_, len) = src.elem_ty_and_len(ty);
2016-11-26 22:58:01 -08:00
self.write_primval(dest, PrimVal::from_uint(len), dest_ty)?;
}
2016-03-13 14:36:25 -06:00
Ref(_, _, ref lvalue) => {
2016-10-16 02:12:46 -06:00
let src = self.eval_lvalue(lvalue)?;
let (raw_ptr, extra) = self.force_allocation(src)?.to_ptr_and_extra();
let ptr = PrimVal::from_ptr(raw_ptr);
2016-10-16 02:12:46 -06:00
let val = match extra {
LvalueExtra::None => Value::ByVal(ptr),
2016-11-26 22:58:01 -08:00
LvalueExtra::Length(len) => Value::ByValPair(ptr, PrimVal::from_uint(len)),
LvalueExtra::Vtable(vtable) => Value::ByValPair(ptr, PrimVal::from_ptr(vtable)),
2016-09-27 10:14:53 +02:00
LvalueExtra::DowncastVariant(..) =>
bug!("attempted to take a reference to an enum downcast lvalue"),
2016-10-16 02:12:46 -06:00
};
self.write_value(val, dest, dest_ty)?;
2016-03-13 14:36:25 -06:00
}
2015-12-28 22:24:05 -06:00
2016-03-14 22:05:50 -06:00
Box(ty) => {
2016-10-16 00:12:27 -06:00
let ptr = self.alloc_ptr(ty)?;
2016-11-26 22:58:01 -08:00
self.write_primval(dest, PrimVal::from_ptr(ptr), dest_ty)?;
2016-03-14 22:05:50 -06:00
}
Cast(kind, ref operand, cast_ty) => {
debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
use rustc::mir::CastKind::*;
match kind {
Unsize => {
let src = self.eval_operand(operand)?;
2016-06-08 13:43:34 +02:00
let src_ty = self.operand_ty(operand);
self.unsize_into(src, src_ty, dest, dest_ty)?;
}
Misc => {
let src = self.eval_operand(operand)?;
let src_ty = self.operand_ty(operand);
if self.type_is_fat_ptr(src_ty) {
trace!("misc cast: {:?}", src);
match (src, self.type_is_fat_ptr(dest_ty)) {
2016-10-21 13:56:38 +02:00
(Value::ByRef(_), _) |
(Value::ByValPair(..), true) => {
self.write_value(src, dest, dest_ty)?;
},
(Value::ByValPair(data, _), false) => {
2016-10-21 13:56:38 +02:00
self.write_value(Value::ByVal(data), dest, dest_ty)?;
},
(Value::ByVal(_), _) => bug!("expected fat ptr"),
2016-09-07 18:34:59 +02:00
}
} else {
let src_val = self.value_to_primval(src, src_ty)?;
2016-11-26 22:58:01 -08:00
let dest_val = self.cast_primval(src_val, src_ty, dest_ty)?;
2016-10-21 13:56:38 +02:00
self.write_value(Value::ByVal(dest_val), dest, dest_ty)?;
}
}
2016-06-08 13:43:34 +02:00
ReifyFnPointer => match self.operand_ty(operand).sty {
ty::TyFnDef(def_id, substs, fn_ty) => {
let fn_ty = self.tcx.erase_regions(&fn_ty);
let fn_ptr = self.memory.create_fn_ptr(self.tcx,def_id, substs, fn_ty);
2016-11-26 22:58:01 -08:00
self.write_value(Value::ByVal(PrimVal::from_ptr(fn_ptr)), dest, dest_ty)?;
2016-06-08 13:43:34 +02:00
},
ref other => bug!("reify fn pointer on {:?}", other),
2016-06-08 13:43:34 +02:00
},
UnsafeFnPointer => match dest_ty.sty {
ty::TyFnPtr(unsafe_fn_ty) => {
let src = self.eval_operand(operand)?;
let ptr = src.read_ptr(&self.memory)?;
let (def_id, substs, _, _) = self.memory.get_fn(ptr.alloc_id)?;
let unsafe_fn_ty = self.tcx.erase_regions(&unsafe_fn_ty);
let fn_ptr = self.memory.create_fn_ptr(self.tcx, def_id, substs, unsafe_fn_ty);
2016-11-26 22:58:01 -08:00
self.write_value(Value::ByVal(PrimVal::from_ptr(fn_ptr)), dest, dest_ty)?;
},
ref other => bug!("fn to unsafe fn cast on {:?}", other),
},
}
}
2016-09-28 11:48:43 -06:00
InlineAsm { .. } => return Err(EvalError::InlineAsm),
}
if log_enabled!(::log::LogLevel::Trace) {
self.dump_local(dest);
}
Ok(())
}
2016-09-07 18:34:59 +02:00
fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => !self.type_is_sized(ty),
_ => false,
}
}
fn nonnull_offset_and_ty(&self, ty: Ty<'tcx>, nndiscr: u64, discrfield: &[u32]) -> EvalResult<'tcx, (Size, Ty<'tcx>)> {
// Skip the constant 0 at the start meant for LLVM GEP and the outer non-null variant
let path = discrfield.iter().skip(2).map(|&i| i as usize);
// Handle the field index for the outer non-null variant.
let inner_ty = match ty.sty {
2016-09-10 20:59:23 -06:00
ty::TyAdt(adt_def, substs) => {
let variant = &adt_def.variants[nndiscr as usize];
let index = discrfield[1];
let field = &variant.fields[index as usize];
field.ty(self.tcx, substs)
}
_ => bug!("non-enum for StructWrappedNullablePointer: {}", ty),
};
self.field_path_offset_and_ty(inner_ty, path)
}
fn field_path_offset_and_ty<I: Iterator<Item = usize>>(&self, mut ty: Ty<'tcx>, path: I) -> EvalResult<'tcx, (Size, Ty<'tcx>)> {
let mut offset = Size::from_bytes(0);
// Skip the initial 0 intended for LLVM GEP.
for field_index in path {
2016-05-30 15:27:52 +02:00
let field_offset = self.get_field_offset(ty, field_index)?;
trace!("field_path_offset_and_ty: {}, {}, {:?}, {:?}", field_index, ty, field_offset, offset);
2016-05-30 15:27:52 +02:00
ty = self.get_field_ty(ty, field_index)?;
offset = offset.checked_add(field_offset, &self.tcx.data_layout).unwrap();
}
Ok((offset, ty))
}
fn get_field_ty(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Ty<'tcx>> {
match ty.sty {
2016-09-10 20:59:23 -06:00
ty::TyAdt(adt_def, substs) => {
2016-05-30 15:27:52 +02:00
Ok(adt_def.struct_variant().fields[field_index].ty(self.tcx, substs))
}
ty::TyTuple(fields) => Ok(fields[field_index]),
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) |
ty::TyBox(ty) => {
2016-11-13 21:30:03 +01:00
match (field_index, &self.tcx.struct_tail(ty).sty) {
(1, &ty::TyStr) |
2016-11-13 21:30:03 +01:00
(1, &ty::TySlice(_)) => Ok(self.tcx.types.usize),
(1, &ty::TyTrait(_)) |
(0, _) => Ok(self.tcx.mk_imm_ptr(self.tcx.types.u8)),
_ => bug!("invalid fat pointee type: {}", ty),
2016-11-13 19:26:20 +01:00
}
}
_ => Err(EvalError::Unimplemented(format!("can't handle type: {:?}, {:?}", ty, ty.sty))),
}
}
fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
let layout = self.type_layout(ty)?;
use rustc::ty::layout::Layout::*;
match *layout {
Univariant { ref variant, .. } => {
2016-10-03 20:45:50 -06:00
Ok(variant.offsets[field_index])
}
FatPointer { .. } => {
let bytes = field_index as u64 * self.memory.pointer_size();
Ok(Size::from_bytes(bytes))
}
2016-10-16 02:12:46 -06:00
_ => {
let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout);
Err(EvalError::Unimplemented(msg))
}
}
}
fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, usize> {
let layout = self.type_layout(ty)?;
2016-10-16 02:12:46 -06:00
use rustc::ty::layout::Layout::*;
match *layout {
Univariant { ref variant, .. } => Ok(variant.offsets.len()),
FatPointer { .. } => Ok(2),
_ => {
let msg = format!("can't handle type: {:?}, with layout: {:?}", ty, layout);
Err(EvalError::Unimplemented(msg))
}
}
}
fn eval_operand_to_primval(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, PrimVal> {
let value = self.eval_operand(op)?;
let ty = self.operand_ty(op);
self.value_to_primval(value, ty)
}
fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, Value> {
use rustc::mir::Operand::*;
match *op {
Consume(ref lvalue) => self.eval_and_read_lvalue(lvalue),
Constant(mir::Constant { ref literal, ty, .. }) => {
use rustc::mir::Literal;
let value = match *literal {
Literal::Value { ref value } => self.const_to_value(value)?,
Literal::Item { def_id, substs } => {
if let ty::TyFnDef(..) = ty.sty {
2016-06-08 13:43:34 +02:00
// function items are zero sized
Value::ByRef(self.memory.allocate(0, 0)?)
2016-06-03 15:48:56 +02:00
} else {
let cid = GlobalId {
def_id: def_id,
substs: substs,
2016-10-21 11:54:38 +02:00
promoted: None,
};
2016-10-21 11:48:56 +02:00
self.read_lvalue(Lvalue::Global(cid))?
2016-06-03 15:48:56 +02:00
}
}
Literal::Promoted { index } => {
let cid = GlobalId {
2016-06-03 17:41:36 +02:00
def_id: self.frame().def_id,
substs: self.substs(),
2016-10-21 11:54:38 +02:00
promoted: Some(index),
2016-06-03 17:41:36 +02:00
};
2016-10-21 11:48:56 +02:00
self.read_lvalue(Lvalue::Global(cid))?
}
};
Ok(value)
}
}
}
2015-11-12 17:44:29 -06:00
fn eval_and_read_lvalue(&mut self, lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
if let mir::Lvalue::Projection(ref proj) = *lvalue {
if let mir::Lvalue::Local(index) = proj.base {
if let Some(Value::ByValPair(a, b)) = self.frame().get_local(index) {
if let mir::ProjectionElem::Field(ref field, _) = proj.elem {
let val = [a, b][field.index()];
return Ok(Value::ByVal(val));
}
}
}
}
let lvalue = self.eval_lvalue(lvalue)?;
self.read_lvalue(lvalue)
}
2016-10-21 10:45:01 +02:00
pub fn read_lvalue(&self, lvalue: Lvalue<'tcx>) -> EvalResult<'tcx, Value> {
match lvalue {
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
Ok(Value::ByRef(ptr))
}
Lvalue::Local { frame, local } => {
self.stack[frame].get_local(local).ok_or(EvalError::ReadUndefBytes)
}
2016-10-21 11:48:56 +02:00
Lvalue::Global(cid) => self.globals
.get(&cid)
2016-10-21 11:48:56 +02:00
.expect("global not cached")
.data
.ok_or(EvalError::ReadUndefBytes),
}
}
fn eval_lvalue(&mut self, mir_lvalue: &mir::Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
use rustc::mir::Lvalue::*;
let lvalue = match *mir_lvalue {
Local(mir::RETURN_POINTER) => self.frame().return_lvalue,
Local(local) => {
Lvalue::Local {
frame: self.stack.len() - 1,
local: local,
}
}
2016-03-13 06:48:04 -06:00
2016-06-03 17:41:36 +02:00
Static(def_id) => {
let substs = self.tcx.intern_substs(&[]);
let cid = GlobalId {
def_id: def_id,
substs: substs,
2016-10-21 11:54:38 +02:00
promoted: None,
};
2016-10-21 11:48:56 +02:00
Lvalue::Global(cid)
}
Projection(ref proj) => return self.eval_lvalue_projection(proj),
};
2016-10-18 21:45:48 -06:00
2016-10-18 23:24:30 -06:00
if log_enabled!(::log::LogLevel::Trace) {
2016-10-18 21:51:36 -06:00
self.dump_local(lvalue);
}
2016-10-18 21:45:48 -06:00
Ok(lvalue)
}
fn eval_lvalue_projection(
&mut self,
proj: &mir::LvalueProjection<'tcx>,
) -> EvalResult<'tcx, Lvalue<'tcx>> {
let base = self.eval_lvalue(&proj.base)?;
let base_ty = self.lvalue_ty(&proj.base);
let base_layout = self.type_layout(base_ty)?;
use rustc::mir::ProjectionElem::*;
let (ptr, extra) = match proj.elem {
Field(field, field_ty) => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, base_extra) = base.to_ptr_and_extra();
let field_ty = self.monomorphize(field_ty, self.substs());
let field = field.index();
use rustc::ty::layout::Layout::*;
let offset = match *base_layout {
Univariant { ref variant, .. } => variant.offsets[field],
General { ref variants, .. } => {
if let LvalueExtra::DowncastVariant(variant_idx) = base_extra {
// +1 for the discriminant, which is field 0
variants[variant_idx].offsets[field + 1]
2016-09-27 10:14:53 +02:00
} else {
bug!("field access on enum had no variant index");
}
}
RawNullablePointer { .. } => {
assert_eq!(field.index(), 0);
return Ok(base);
}
2016-03-13 06:48:04 -06:00
StructWrappedNullablePointer { ref nonnull, .. } => {
nonnull.offsets[field]
2016-09-27 10:14:53 +02:00
}
2016-03-13 14:36:25 -06:00
_ => bug!("field access on non-product type: {:?}", base_layout),
};
let ptr = base_ptr.offset(offset.bytes());
let extra = if self.type_is_sized(field_ty) {
LvalueExtra::None
} else {
match base_extra {
LvalueExtra::None => bug!("expected fat pointer"),
LvalueExtra::DowncastVariant(..) =>
bug!("Rust doesn't support unsized fields in enum variants"),
LvalueExtra::Vtable(_) |
LvalueExtra::Length(_) => {},
}
base_extra
};
(ptr, extra)
}
Downcast(_, variant) => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, base_extra) = base.to_ptr_and_extra();
use rustc::ty::layout::Layout::*;
let extra = match *base_layout {
General { .. } => LvalueExtra::DowncastVariant(variant),
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => base_extra,
_ => bug!("variant downcast on non-aggregate: {:?}", base_layout),
};
(base_ptr, extra)
}
Deref => {
let val = self.eval_and_read_lvalue(&proj.base)?;
let pointee_type = match base_ty.sty {
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) |
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyBox(ty) => ty,
_ => bug!("can only deref pointer types"),
};
trace!("deref to {} on {:?}", pointee_type, val);
match self.tcx.struct_tail(pointee_type).sty {
ty::TyTrait(_) => {
let (ptr, vtable) = val.expect_ptr_vtable_pair(&self.memory)?;
(ptr, LvalueExtra::Vtable(vtable))
},
ty::TyStr | ty::TySlice(_) => {
let (ptr, len) = val.expect_slice(&self.memory)?;
(ptr, LvalueExtra::Length(len))
},
_ => (val.read_ptr(&self.memory)?, LvalueExtra::None),
2016-03-13 06:48:04 -06:00
}
}
Index(ref operand) => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_and_extra();
let (elem_ty, len) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
let n_ptr = self.eval_operand(operand)?;
let usize = self.tcx.types.usize;
2016-11-26 22:58:01 -08:00
let n = self.value_to_primval(n_ptr, usize)?.to_u64();
assert!(n < len);
let ptr = base_ptr.offset(n * elem_size);
(ptr, LvalueExtra::None)
}
ConstantIndex { offset, min_length, from_end } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_and_extra();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("sequence element must be sized");
assert!(n >= min_length as u64);
let index = if from_end {
n - u64::from(offset)
} else {
u64::from(offset)
};
let ptr = base_ptr.offset(index * elem_size);
(ptr, LvalueExtra::None)
}
Subslice { from, to } => {
// FIXME(solson)
let base = self.force_allocation(base)?;
let (base_ptr, _) = base.to_ptr_and_extra();
let (elem_ty, n) = base.elem_ty_and_len(base_ty);
let elem_size = self.type_size(elem_ty)?.expect("slice element must be sized");
assert!(u64::from(from) <= n - u64::from(to));
let ptr = base_ptr.offset(u64::from(from) * elem_size);
let extra = LvalueExtra::Length(n - u64::from(to) - u64::from(from));
(ptr, extra)
}
2016-03-07 07:48:38 -06:00
};
2016-09-27 10:14:53 +02:00
Ok(Lvalue::Ptr { ptr: ptr, extra: extra })
2016-03-07 07:48:38 -06:00
}
fn lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
2016-08-27 01:44:46 -06:00
self.monomorphize(lvalue.ty(&self.mir(), self.tcx).to_ty(self.tcx), self.substs())
2016-03-20 23:11:06 -06:00
}
fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
2016-08-27 01:44:46 -06:00
self.monomorphize(operand.ty(&self.mir(), self.tcx), self.substs())
2016-04-07 03:02:02 -06:00
}
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, ()> {
let size = self.type_size(ty)?.expect("cannot copy from an unsized type");
let align = self.type_align(ty)?;
2016-07-22 16:35:39 +02:00
self.memory.copy(src, dest, size, align)?;
2016-04-07 05:56:07 -06:00
Ok(())
}
fn force_allocation(&mut self, lvalue: Lvalue<'tcx>) -> EvalResult<'tcx, Lvalue<'tcx>> {
let new_lvalue = match lvalue {
Lvalue::Local { frame, local } => {
match self.stack[frame].get_local(local) {
Some(Value::ByRef(ptr)) => Lvalue::from_ptr(ptr),
opt_val => {
let ty = self.stack[frame].mir.local_decls[local].ty;
2016-11-26 22:58:01 -08:00
let ty = self.monomorphize(ty, self.stack[frame].substs);
let substs = self.stack[frame].substs;
2016-10-16 00:12:11 -06:00
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
self.stack[frame].set_local(local, Value::ByRef(ptr));
if let Some(val) = opt_val {
self.write_value_to_ptr(val, ptr, ty)?;
}
Lvalue::from_ptr(ptr)
}
}
}
Lvalue::Ptr { .. } => lvalue,
2016-10-21 11:48:56 +02:00
Lvalue::Global(cid) => {
let global_val = *self.globals.get(&cid).expect("global not cached");
match global_val.data {
Some(Value::ByRef(ptr)) => Lvalue::from_ptr(ptr),
_ => {
2016-10-21 11:48:56 +02:00
let ptr = self.alloc_ptr_with_substs(global_val.ty, cid.substs)?;
if let Some(val) = global_val.data {
self.write_value_to_ptr(val, ptr, global_val.ty)?;
}
2016-10-21 11:48:56 +02:00
if !global_val.mutable {
self.memory.freeze(ptr.alloc_id)?;
}
let lval = self.globals.get_mut(&cid).expect("already checked");
*lval = Global {
data: Some(Value::ByRef(ptr)),
2016-10-21 11:48:56 +02:00
.. global_val
};
Lvalue::from_ptr(ptr)
},
}
}
};
Ok(new_lvalue)
}
2016-09-19 19:40:56 -06:00
// FIXME(solson): This method unnecessarily allocates and should not be necessary. We can
// remove it as soon as PrimVal can represent fat pointers.
fn value_to_ptr_dont_use(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Pointer> {
2016-09-19 19:40:56 -06:00
match value {
Value::ByRef(ptr) => Ok(ptr),
Value::ByVal(primval) => {
let ptr = self.alloc_ptr(ty)?;
2016-11-26 22:58:01 -08:00
let kind = self.ty_to_primval_kind(ty)?;
self.memory.write_primval(ptr, primval, kind)?;
2016-09-19 19:40:56 -06:00
Ok(ptr)
}
Value::ByValPair(a, b) => {
let ptr = self.alloc_ptr(ty)?;
self.write_pair_to_ptr(a, b, ptr, ty)?;
Ok(ptr)
}
2016-09-19 19:40:56 -06:00
}
}
/// ensures this Value is not a ByRef
fn follow_by_ref_value(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
match value {
Value::ByRef(ptr) => self.read_value(ptr, ty),
other => Ok(other),
}
}
fn value_to_primval(&mut self, value: Value, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {
Value::ByRef(_) => bug!("follow_by_ref_value can't result in `ByRef`"),
Value::ByVal(primval) => {
let new_primval = self.transmute_primval(primval, ty)?;
self.ensure_valid_value(new_primval, ty)?;
Ok(new_primval)
}
Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
}
}
2016-11-26 22:58:01 -08:00
// FIXME(solson): Delete this.
fn transmute_primval(&self, val: PrimVal, _ty: Ty<'tcx>) -> EvalResult<'tcx, PrimVal> {
Ok(val)
}
fn write_primval(
&mut self,
dest: Lvalue<'tcx>,
val: PrimVal,
2016-11-26 22:58:01 -08:00
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
match dest {
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
2016-11-26 22:58:01 -08:00
let kind = self.ty_to_primval_kind(dest_ty)?;
self.memory.write_primval(ptr, val, kind)
}
Lvalue::Local { frame, local } => {
self.stack[frame].set_local(local, Value::ByVal(val));
Ok(())
}
2016-10-21 11:48:56 +02:00
Lvalue::Global(cid) => {
let global_val = self.globals.get_mut(&cid).expect("global not cached");
if global_val.mutable {
global_val.data = Some(Value::ByVal(val));
Ok(())
} else {
Err(EvalError::ModifiedConstantMemory)
}
}
}
}
fn write_value(
&mut self,
src_val: Value,
dest: Lvalue<'tcx>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
match dest {
2016-10-21 11:48:56 +02:00
Lvalue::Global(cid) => {
let dest = *self.globals.get_mut(&cid).expect("global should be cached");
if !dest.mutable {
return Err(EvalError::ModifiedConstantMemory);
}
self.write_value_possibly_by_val(
src_val,
|this, val| *this.globals.get_mut(&cid).expect("already checked") = Global { data: Some(val), ..dest },
dest.data,
dest_ty,
)
},
Lvalue::Ptr { ptr, extra } => {
assert_eq!(extra, LvalueExtra::None);
self.write_value_to_ptr(src_val, ptr, dest_ty)
}
Lvalue::Local { frame, local } => {
let dest = self.stack[frame].get_local(local);
self.write_value_possibly_by_val(
src_val,
|this, val| this.stack[frame].set_local(local, val),
dest,
dest_ty,
)
}
}
}
// The cases here can be a bit subtle. Read carefully!
fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value)>(
&mut self,
src_val: Value,
write_dest: F,
old_dest_val: Option<Value>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
if let Some(Value::ByRef(dest_ptr)) = old_dest_val {
// If the value is already `ByRef` (that is, backed by an `Allocation`),
// then we must write the new value into this allocation, because there may be
// other pointers into the allocation. These other pointers are logically
// pointers into the local variable, and must be able to observe the change.
//
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation.
self.write_value_to_ptr(src_val, dest_ptr, dest_ty)?;
} else if let Value::ByRef(src_ptr) = src_val {
// If the value is not `ByRef`, then we know there are no pointers to it
// and we can simply overwrite the `Value` in the locals array directly.
//
// In this specific case, where the source value is `ByRef`, we must duplicate
// the allocation, because this is a by-value operation. It would be incorrect
// if they referred to the same allocation, since then a change to one would
// implicitly change the other.
//
// TODO(solson): It would be valid to attempt reading a primitive value out of
// the source and writing that into the destination without making an
// allocation. This would be a pure optimization.
let dest_ptr = self.alloc_ptr(dest_ty)?;
self.copy(src_ptr, dest_ptr, dest_ty)?;
write_dest(self, Value::ByRef(dest_ptr));
} else {
// Finally, we have the simple case where neither source nor destination are
// `ByRef`. We may simply copy the source value over the the destintion.
write_dest(self, src_val);
}
Ok(())
}
fn write_value_to_ptr(
&mut self,
value: Value,
dest: Pointer,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
match value {
Value::ByRef(ptr) => self.copy(ptr, dest, dest_ty),
2016-11-26 22:58:01 -08:00
Value::ByVal(primval) => {
let kind = self.ty_to_primval_kind(dest_ty)?;
self.memory.write_primval(dest, primval, kind)
}
Value::ByValPair(a, b) => self.write_pair_to_ptr(a, b, dest, dest_ty),
}
}
fn write_pair_to_ptr(
&mut self,
a: PrimVal,
b: PrimVal,
ptr: Pointer,
ty: Ty<'tcx>
) -> EvalResult<'tcx, ()> {
assert_eq!(self.get_field_count(ty)?, 2);
let field_0 = self.get_field_offset(ty, 0)?.bytes();
let field_1 = self.get_field_offset(ty, 1)?.bytes();
2016-11-26 22:58:01 -08:00
let field_0_ty = self.get_field_ty(ty, 0)?;
let field_1_ty = self.get_field_ty(ty, 1)?;
let field_0_kind = self.ty_to_primval_kind(field_0_ty)?;
let field_1_kind = self.ty_to_primval_kind(field_1_ty)?;
self.memory.write_primval(ptr.offset(field_0), a, field_0_kind)?;
self.memory.write_primval(ptr.offset(field_1), b, field_1_kind)?;
Ok(())
}
fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
use syntax::ast::FloatTy;
let kind = match ty.sty {
ty::TyBool => PrimValKind::Bool,
ty::TyChar => PrimValKind::Char,
ty::TyInt(int_ty) => {
use syntax::ast::IntTy::*;
let size = match int_ty {
I8 => 1,
I16 => 2,
I32 => 4,
I64 => 8,
Is => self.memory.pointer_size(),
};
PrimValKind::from_int_size(size)
}
ty::TyUint(uint_ty) => {
use syntax::ast::UintTy::*;
let size = match uint_ty {
U8 => 1,
U16 => 2,
U32 => 4,
U64 => 8,
Us => self.memory.pointer_size(),
};
PrimValKind::from_uint_size(size)
}
ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
ty::TyFnPtr(_) => PrimValKind::FnPtr,
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) if self.type_is_sized(ty) => PrimValKind::Ptr,
ty::TyAdt(..) => {
use rustc::ty::layout::Layout::*;
if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
let size = discr.size().bytes();
if signed {
PrimValKind::from_int_size(size)
} else {
PrimValKind::from_uint_size(size)
}
} else {
return Err(EvalError::TypeNotPrimitive(ty));
}
},
_ => return Err(EvalError::TypeNotPrimitive(ty)),
};
Ok(kind)
}
fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx, ()> {
match ty.sty {
ty::TyBool if val.bits > 1 => Err(EvalError::InvalidBool),
ty::TyChar if ::std::char::from_u32(val.bits as u32).is_none()
=> Err(EvalError::InvalidChar(val.bits as u32 as u64)),
_ => Ok(()),
}
}
fn read_value(&mut self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
use syntax::ast::FloatTy;
2016-10-18 22:31:21 -06:00
let val = match ty.sty {
ty::TyBool => PrimVal::from_bool(self.memory.read_bool(ptr)?),
2016-10-18 22:31:21 -06:00
ty::TyChar => {
2016-06-20 12:29:45 +02:00
let c = self.memory.read_uint(ptr, 4)? as u32;
match ::std::char::from_u32(c) {
Some(ch) => PrimVal::from_char(ch),
2016-09-07 18:34:59 +02:00
None => return Err(EvalError::InvalidChar(c as u64)),
2016-06-20 12:29:45 +02:00
}
}
2016-09-19 04:56:09 -06:00
2016-10-18 22:31:21 -06:00
ty::TyInt(int_ty) => {
use syntax::ast::IntTy::*;
let size = match int_ty {
I8 => 1,
I16 => 2,
I32 => 4,
I64 => 8,
Is => self.memory.pointer_size(),
};
2016-11-26 22:58:01 -08:00
PrimVal::from_int(self.memory.read_int(ptr, size)?)
}
2016-10-18 22:31:21 -06:00
ty::TyUint(uint_ty) => {
use syntax::ast::UintTy::*;
let size = match uint_ty {
U8 => 1,
U16 => 2,
U32 => 4,
U64 => 8,
Us => self.memory.pointer_size(),
};
2016-11-26 22:58:01 -08:00
PrimVal::from_uint(self.memory.read_uint(ptr, size)?)
}
ty::TyFloat(FloatTy::F32) => PrimVal::from_f32(self.memory.read_f32(ptr)?),
ty::TyFloat(FloatTy::F64) => PrimVal::from_f64(self.memory.read_f64(ptr)?),
2016-11-26 22:58:01 -08:00
// TODO(solson): Should this even be here? Fn items aren't primvals, are they?
2016-10-18 22:31:21 -06:00
ty::TyFnDef(def_id, substs, fn_ty) => {
2016-11-26 22:58:01 -08:00
PrimVal::from_ptr(self.memory.create_fn_ptr(self.tcx, def_id, substs, fn_ty))
2016-06-20 10:35:15 +02:00
},
2016-11-26 22:58:01 -08:00
ty::TyFnPtr(_) => self.memory.read_ptr(ptr).map(PrimVal::from_ptr)?,
2016-10-18 22:31:21 -06:00
ty::TyBox(ty) |
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty, .. }) => {
2016-09-23 15:48:23 +02:00
let p = self.memory.read_ptr(ptr)?;
if self.type_is_sized(ty) {
PrimVal::from_ptr(p)
} else {
trace!("reading fat pointer extra of type {}", ty);
let extra = ptr.offset(self.memory.pointer_size());
let extra = match self.tcx.struct_tail(ty).sty {
ty::TyTrait(..) => PrimVal::from_ptr(self.memory.read_ptr(extra)?),
2016-09-23 15:48:23 +02:00
ty::TySlice(..) |
2016-11-26 22:58:01 -08:00
ty::TyStr => PrimVal::from_uint(self.memory.read_usize(extra)?),
2016-09-23 15:48:23 +02:00
_ => bug!("unsized primval ptr read from {:?}", ty),
};
return Ok(Value::ByValPair(PrimVal::from_ptr(p), extra));
}
}
2016-10-18 22:31:21 -06:00
ty::TyAdt(..) => {
2016-09-07 18:34:59 +02:00
use rustc::ty::layout::Layout::*;
if let CEnum { discr, signed, .. } = *self.type_layout(ty)? {
let size = discr.size().bytes();
if signed {
2016-11-26 22:58:01 -08:00
PrimVal::from_int(self.memory.read_int(ptr, size)?)
} else {
2016-11-26 22:58:01 -08:00
PrimVal::from_uint(self.memory.read_uint(ptr, size)?)
2016-09-07 18:34:59 +02:00
}
} else {
bug!("primitive read of non-clike enum: {:?}", ty);
}
},
_ => bug!("primitive read of non-primitive type: {:?}", ty),
};
2016-10-18 22:31:21 -06:00
Ok(Value::ByVal(val))
}
fn frame(&self) -> &Frame<'tcx> {
self.stack.last().expect("no call frames exist")
}
pub fn frame_mut(&mut self) -> &mut Frame<'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
fn mir(&self) -> MirRef<'tcx> {
Ref::clone(&self.frame().mir)
2016-03-20 22:07:25 -06:00
}
2016-06-08 11:11:08 +02:00
fn substs(&self) -> &'tcx Substs<'tcx> {
self.frame().substs
}
fn unsize_into(
&mut self,
src: Value,
src_ty: Ty<'tcx>,
dest: Lvalue<'tcx>,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx, ()> {
match (&src_ty.sty, &dest_ty.sty) {
(&ty::TyBox(sty), &ty::TyBox(dty)) |
(&ty::TyRef(_, ty::TypeAndMut { ty: sty, .. }), &ty::TyRef(_, ty::TypeAndMut { ty: dty, .. })) |
(&ty::TyRef(_, ty::TypeAndMut { ty: sty, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: dty, .. })) |
(&ty::TyRawPtr(ty::TypeAndMut { ty: sty, .. }), &ty::TyRawPtr(ty::TypeAndMut { ty: dty, .. })) => {
// A<Struct> -> A<Trait> conversion
let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
let ptr = src.read_ptr(&self.memory)?;
2016-11-26 22:58:01 -08:00
let len = PrimVal::from_uint(length as u64);
let ptr = PrimVal::from_ptr(ptr);
self.write_value(Value::ByValPair(ptr, len), dest, dest_ty)?;
}
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
self.write_value(src, dest, dest_ty)?;
},
(_, &ty::TyTrait(ref data)) => {
let trait_ref = data.principal.with_self_ty(self.tcx, src_pointee_ty);
let trait_ref = self.tcx.erase_regions(&trait_ref);
let vtable = self.get_vtable(trait_ref)?;
let ptr = src.read_ptr(&self.memory)?;
let ptr = PrimVal::from_ptr(ptr);
let extra = PrimVal::from_ptr(vtable);
self.write_value(Value::ByValPair(ptr, extra), dest, dest_ty)?;
},
_ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
}
}
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
// FIXME(solson)
let dest = self.force_allocation(dest)?.to_ptr();
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
// here we need to increase the size of every &T thin ptr field to a fat ptr
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter();
let dst_fields = def_b.variants[0].fields.iter();
//let src = adt::MaybeSizedValue::sized(src);
//let dst = adt::MaybeSizedValue::sized(dst);
let src_ptr = match src {
Value::ByRef(ptr) => ptr,
_ => bug!("expected pointer, got {:?}", src),
};
let iter = src_fields.zip(dst_fields).enumerate();
for (i, (src_f, dst_f)) in iter {
let src_fty = monomorphize_field_ty(self.tcx, src_f, substs_a);
let dst_fty = monomorphize_field_ty(self.tcx, dst_f, substs_b);
if self.type_size(dst_fty)? == Some(0) {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
let src_f_ptr = src_ptr.offset(src_field_offset);
let dst_f_ptr = dest.offset(dst_field_offset);
if src_fty == dst_fty {
self.copy(src_f_ptr, dst_f_ptr, src_fty)?;
} else {
self.unsize_into(Value::ByRef(src_f_ptr), src_fty, Lvalue::from_ptr(dst_f_ptr), dst_fty)?;
}
}
}
_ => bug!("unsize_into: invalid conversion: {:?} -> {:?}",
src_ty,
dest_ty),
}
Ok(())
}
2016-10-16 17:18:56 -06:00
fn dump_local(&self, lvalue: Lvalue<'tcx>) {
2016-10-18 21:45:48 -06:00
if let Lvalue::Local { frame, local } = lvalue {
if let Some(val) = self.stack[frame].get_local(local) {
match val {
Value::ByRef(ptr) => {
trace!("frame[{}] {:?}:", frame, local);
self.memory.dump(ptr.alloc_id);
}
Value::ByVal(a) => {
trace!("frame[{}] {:?}: {:?}", frame, local, a);
}
Value::ByValPair(a, b) => {
trace!("frame[{}] {:?}: ({:?}, {:?})", frame, local, a, b);
}
}
}
}
}
/// convenience function to ensure correct usage of globals and code-sharing with locals
pub fn modify_global<
F: FnOnce(&mut Self, Option<Value>) -> EvalResult<'tcx, Option<Value>>,
>(
&mut self,
cid: GlobalId<'tcx>,
f: F,
) -> EvalResult<'tcx, ()> {
let mut val = *self.globals.get(&cid).expect("global not cached");
if !val.mutable {
return Err(EvalError::ModifiedConstantMemory);
}
val.data = f(self, val.data)?;
*self.globals.get_mut(&cid).expect("already checked") = val;
Ok(())
}
/// convenience function to ensure correct usage of locals and code-sharing with globals
pub fn modify_local<
F: FnOnce(&mut Self, Option<Value>) -> EvalResult<'tcx, Option<Value>>,
>(
&mut self,
frame: usize,
local: mir::Local,
f: F,
) -> EvalResult<'tcx, ()> {
let val = self.stack[frame].get_local(local);
let val = f(self, val)?;
if let Some(val) = val {
self.stack[frame].set_local(local, val);
} else {
self.deallocate_local(frame, local)?;
}
Ok(())
}
pub fn deallocate_local(&mut self, frame: usize, local: mir::Local) -> EvalResult<'tcx, ()> {
if let Some(Value::ByRef(ptr)) = self.stack[frame].get_local(local) {
self.memory.deallocate(ptr)?;
}
// Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
self.stack[frame].locals[local.index() - 1] = None;
Ok(())
}
}
impl<'tcx> Frame<'tcx> {
pub fn get_local(&self, local: mir::Local) -> Option<Value> {
// Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
self.locals[local.index() - 1]
}
fn set_local(&mut self, local: mir::Local, value: Value) {
// Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
self.locals[local.index() - 1] = Some(value);
}
}
impl<'tcx> Lvalue<'tcx> {
pub fn from_ptr(ptr: Pointer) -> Self {
Lvalue::Ptr { ptr: ptr, extra: LvalueExtra::None }
}
fn to_ptr_and_extra(self) -> (Pointer, LvalueExtra) {
match self {
Lvalue::Ptr { ptr, extra } => (ptr, extra),
_ => bug!("to_ptr_and_extra: expected Lvalue::Ptr, got {:?}", self),
}
}
2016-09-27 10:14:53 +02:00
fn to_ptr(self) -> Pointer {
let (ptr, extra) = self.to_ptr_and_extra();
assert_eq!(extra, LvalueExtra::None);
ptr
2016-09-27 10:14:53 +02:00
}
fn elem_ty_and_len(self, ty: Ty<'tcx>) -> (Ty<'tcx>, u64) {
2016-09-28 18:22:09 +02:00
match ty.sty {
ty::TyArray(elem, n) => (elem, n as u64),
ty::TySlice(elem) => {
match self {
Lvalue::Ptr { extra: LvalueExtra::Length(len), .. } => (elem, len),
_ => bug!("elem_ty_and_len of a TySlice given non-slice lvalue: {:?}", self),
}
}
2016-09-28 18:22:09 +02:00
_ => bug!("elem_ty_and_len expected array or slice, got {:?}", ty),
}
}
2016-09-27 10:14:53 +02:00
}
pub fn eval_main<'a, 'tcx: 'a>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
2016-08-27 01:44:46 -06:00
def_id: DefId,
2016-11-26 17:54:19 -08:00
limits: ResourceLimits,
) {
2016-11-26 17:54:19 -08:00
let mut ecx = EvalContext::new(tcx, limits);
let mir = ecx.load_mir(def_id).expect("main function's MIR not found");
ecx.push_stack_frame(
def_id,
mir.span,
mir,
tcx.intern_substs(&[]),
2016-10-16 00:12:11 -06:00
Lvalue::from_ptr(Pointer::zst_ptr()),
StackPopCleanup::None,
).expect("could not allocate first stack frame");
loop {
match ecx.step() {
2016-10-18 21:51:36 -06:00
Ok(true) => {}
2016-07-05 13:17:40 +02:00
Ok(false) => return,
Err(e) => {
report(tcx, &ecx, e);
2016-07-05 13:17:40 +02:00
return;
}
}
}
}
fn report(tcx: TyCtxt, ecx: &EvalContext, e: EvalError) {
let frame = ecx.stack().last().expect("stackframe was empty");
let block = &frame.mir.basic_blocks()[frame.block];
let span = if frame.stmt < block.statements.len() {
block.statements[frame.stmt].source_info.span
} else {
block.terminator().source_info.span
};
let mut err = tcx.sess.struct_span_err(span, &e.to_string());
for &Frame { def_id, substs, span, .. } in ecx.stack().iter().rev() {
if tcx.def_key(def_id).disambiguated_data.data == DefPathData::ClosureExpr {
err.span_note(span, "inside call to closure");
continue;
}
// FIXME(solson): Find a way to do this without this Display impl hack.
use rustc::util::ppaux;
use std::fmt;
struct Instance<'tcx>(DefId, &'tcx subst::Substs<'tcx>);
impl<'tcx> ::std::panic::UnwindSafe for Instance<'tcx> {}
impl<'tcx> ::std::panic::RefUnwindSafe for Instance<'tcx> {}
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
ppaux::parameterized(f, self.1, self.0, &[])
}
}
2016-09-20 12:51:48 +02:00
err.span_note(span, &format!("inside call to {}", Instance(def_id, substs)));
}
err.emit();
}
pub fn run_mir_passes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
let mut passes = ::rustc::mir::transform::Passes::new();
passes.push_hook(Box::new(::rustc_mir::transform::dump_mir::DumpMir));
passes.push_pass(Box::new(::rustc_mir::transform::no_landing_pads::NoLandingPads));
2016-11-06 22:25:54 -08:00
passes.push_pass(Box::new(::rustc_mir::transform::simplify::SimplifyCfg::new("no-landing-pads")));
passes.push_pass(Box::new(::rustc_mir::transform::erase_regions::EraseRegions));
passes.push_pass(Box::new(::rustc_borrowck::ElaborateDrops));
passes.push_pass(Box::new(::rustc_mir::transform::no_landing_pads::NoLandingPads));
2016-11-06 22:25:54 -08:00
passes.push_pass(Box::new(::rustc_mir::transform::simplify::SimplifyCfg::new("elaborate-drops")));
passes.push_pass(Box::new(::rustc_mir::transform::dump_mir::Marker("PreMiri")));
passes.run_passes(tcx);
}
// TODO(solson): Upstream these methods into rustc::ty::layout.
trait IntegerExt {
fn size(self) -> Size;
}
impl IntegerExt for layout::Integer {
fn size(self) -> Size {
use rustc::ty::layout::Integer::*;
match self {
I1 | I8 => Size::from_bits(8),
I16 => Size::from_bits(16),
I32 => Size::from_bits(32),
I64 => Size::from_bits(64),
}
}
}
pub fn monomorphize_field_ty<'a, 'tcx:'a >(tcx: TyCtxt<'a, 'tcx, 'tcx>, f: ty::FieldDef<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
let substituted = &f.ty(tcx, substs);
tcx.normalize_associated_type(&substituted)
}