2017-07-21 17:25:30 +02:00
|
|
|
//! An interpreter for MIR used in CTFE and by miri
|
|
|
|
|
2017-08-02 16:59:01 +02:00
|
|
|
#[macro_export]
|
|
|
|
macro_rules! err {
|
2017-09-18 16:18:23 +02:00
|
|
|
($($tt:tt)*) => { Err($crate::mir::interpret::EvalErrorKind::$($tt)*.into()) };
|
2017-08-02 16:59:01 +02:00
|
|
|
}
|
|
|
|
|
2016-03-14 21:48:00 -06:00
|
|
|
mod error;
|
2016-12-07 20:30:37 -08:00
|
|
|
mod value;
|
|
|
|
|
2017-08-10 08:48:38 -07:00
|
|
|
pub use self::error::{EvalError, EvalResult, EvalErrorKind};
|
2016-06-10 13:01:51 +02:00
|
|
|
|
2018-02-21 22:02:52 +01:00
|
|
|
pub use self::value::{PrimVal, PrimValKind, Value, Pointer};
|
2016-06-10 13:01:51 +02:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
use std::collections::BTreeMap;
|
|
|
|
use std::fmt;
|
|
|
|
use mir;
|
2018-03-16 09:59:42 +01:00
|
|
|
use hir::def_id::DefId;
|
|
|
|
use ty::{self, TyCtxt};
|
2017-12-17 08:47:22 +02:00
|
|
|
use ty::layout::{self, Align, HasDataLayout};
|
2017-12-12 17:14:49 +01:00
|
|
|
use middle::region;
|
|
|
|
use std::iter;
|
2018-01-26 14:28:58 +01:00
|
|
|
use syntax::ast::Mutability;
|
2018-03-16 09:59:42 +01:00
|
|
|
use rustc_serialize::{Encoder, Decoder, Decodable, Encodable};
|
2016-12-07 20:58:48 -08:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
#[derive(Clone, Debug, PartialEq)]
|
|
|
|
pub enum Lock {
|
|
|
|
NoLock,
|
|
|
|
WriteLock(DynamicLifetime),
|
|
|
|
/// This should never be empty -- that would be a read lock held and nobody there to release it...
|
|
|
|
ReadLock(Vec<DynamicLifetime>),
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
|
|
|
pub struct DynamicLifetime {
|
|
|
|
pub frame: usize,
|
|
|
|
pub region: Option<region::Scope>, // "None" indicates "until the function ends"
|
|
|
|
}
|
|
|
|
|
|
|
|
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
|
|
|
|
pub enum AccessKind {
|
|
|
|
Read,
|
|
|
|
Write,
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Uniquely identifies a specific constant or static.
|
2018-01-16 09:12:54 +01:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct GlobalId<'tcx> {
|
|
|
|
/// For a constant or static, the `Instance` of the item itself.
|
|
|
|
/// For a promoted global, the `Instance` of the function they belong to.
|
|
|
|
pub instance: ty::Instance<'tcx>,
|
|
|
|
|
|
|
|
/// The index for promoted globals within their function's `Mir`.
|
|
|
|
pub promoted: Option<mir::Promoted>,
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Pointer arithmetic
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
pub trait PointerArithmetic: layout::HasDataLayout {
|
2018-02-16 15:56:50 +01:00
|
|
|
// These are not supposed to be overridden.
|
2017-12-12 17:14:49 +01:00
|
|
|
|
|
|
|
//// Trunace the given value to the pointer size; also return whether there was an overflow
|
|
|
|
fn truncate_to_ptr(self, val: u128) -> (u64, bool) {
|
|
|
|
let max_ptr_plus_1 = 1u128 << self.data_layout().pointer_size.bits();
|
|
|
|
((val % max_ptr_plus_1) as u64, val >= max_ptr_plus_1)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Overflow checking only works properly on the range from -u64 to +u64.
|
|
|
|
fn overflowing_signed_offset(self, val: u64, i: i128) -> (u64, bool) {
|
|
|
|
// FIXME: is it possible to over/underflow here?
|
|
|
|
if i < 0 {
|
|
|
|
// trickery to ensure that i64::min_value() works fine
|
|
|
|
// this formula only works for true negative values, it panics for zero!
|
|
|
|
let n = u64::max_value() - (i as u64) + 1;
|
|
|
|
val.overflowing_sub(n)
|
|
|
|
} else {
|
|
|
|
self.overflowing_offset(val, i as u64)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn overflowing_offset(self, val: u64, i: u64) -> (u64, bool) {
|
|
|
|
let (res, over1) = val.overflowing_add(i);
|
|
|
|
let (res, over2) = self.truncate_to_ptr(res as u128);
|
|
|
|
(res, over1 || over2)
|
|
|
|
}
|
|
|
|
|
|
|
|
fn signed_offset<'tcx>(self, val: u64, i: i64) -> EvalResult<'tcx, u64> {
|
|
|
|
let (res, over) = self.overflowing_signed_offset(val, i as i128);
|
|
|
|
if over { err!(OverflowingMath) } else { Ok(res) }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn offset<'tcx>(self, val: u64, i: u64) -> EvalResult<'tcx, u64> {
|
|
|
|
let (res, over) = self.overflowing_offset(val, i);
|
|
|
|
if over { err!(OverflowingMath) } else { Ok(res) }
|
|
|
|
}
|
|
|
|
|
|
|
|
fn wrapping_signed_offset(self, val: u64, i: i64) -> u64 {
|
|
|
|
self.overflowing_signed_offset(val, i as i128).0
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<T: layout::HasDataLayout> PointerArithmetic for T {}
|
|
|
|
|
|
|
|
|
2018-01-16 09:12:54 +01:00
|
|
|
#[derive(Copy, Clone, Debug, Eq, PartialEq, RustcEncodable, RustcDecodable, Hash)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct MemoryPointer {
|
|
|
|
pub alloc_id: AllocId,
|
|
|
|
pub offset: u64,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx> MemoryPointer {
|
|
|
|
pub fn new(alloc_id: AllocId, offset: u64) -> Self {
|
|
|
|
MemoryPointer { alloc_id, offset }
|
|
|
|
}
|
2016-10-20 13:10:22 +02:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
pub(crate) fn wrapping_signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> Self {
|
|
|
|
MemoryPointer::new(
|
|
|
|
self.alloc_id,
|
|
|
|
cx.data_layout().wrapping_signed_offset(self.offset, i),
|
|
|
|
)
|
|
|
|
}
|
2017-07-21 13:39:06 +02:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
pub fn overflowing_signed_offset<C: HasDataLayout>(self, i: i128, cx: C) -> (Self, bool) {
|
|
|
|
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset, i);
|
|
|
|
(MemoryPointer::new(self.alloc_id, res), over)
|
|
|
|
}
|
2017-08-07 12:34:33 -07:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
pub(crate) fn signed_offset<C: HasDataLayout>(self, i: i64, cx: C) -> EvalResult<'tcx, Self> {
|
|
|
|
Ok(MemoryPointer::new(
|
|
|
|
self.alloc_id,
|
|
|
|
cx.data_layout().signed_offset(self.offset, i)?,
|
|
|
|
))
|
|
|
|
}
|
2017-07-04 17:03:21 +02:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
pub fn overflowing_offset<C: HasDataLayout>(self, i: u64, cx: C) -> (Self, bool) {
|
|
|
|
let (res, over) = cx.data_layout().overflowing_offset(self.offset, i);
|
|
|
|
(MemoryPointer::new(self.alloc_id, res), over)
|
|
|
|
}
|
2017-07-21 17:25:30 +02:00
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
pub fn offset<C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
|
|
|
|
Ok(MemoryPointer::new(
|
|
|
|
self.alloc_id,
|
|
|
|
cx.data_layout().offset(self.offset, i)?,
|
|
|
|
))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2018-01-05 05:12:38 +02:00
|
|
|
#[derive(Copy, Clone, Default, Eq, Hash, Ord, PartialEq, PartialOrd, Debug)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct AllocId(pub u64);
|
|
|
|
|
2018-01-16 09:12:54 +01:00
|
|
|
impl ::rustc_serialize::UseSpecializedEncodable for AllocId {}
|
|
|
|
impl ::rustc_serialize::UseSpecializedDecodable for AllocId {}
|
|
|
|
|
2018-04-10 09:58:46 +02:00
|
|
|
#[derive(RustcDecodable, RustcEncodable)]
|
|
|
|
enum AllocKind {
|
|
|
|
Alloc,
|
|
|
|
Fn,
|
|
|
|
ExternStatic,
|
|
|
|
}
|
2018-03-16 09:59:42 +01:00
|
|
|
|
|
|
|
pub fn specialized_encode_alloc_id<
|
|
|
|
'a, 'tcx,
|
|
|
|
E: Encoder,
|
|
|
|
>(
|
|
|
|
encoder: &mut E,
|
|
|
|
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
|
|
|
alloc_id: AllocId,
|
|
|
|
) -> Result<(), E::Error> {
|
|
|
|
if let Some(alloc) = tcx.interpret_interner.get_alloc(alloc_id) {
|
|
|
|
trace!("encoding {:?} with {:#?}", alloc_id, alloc);
|
2018-04-10 09:58:46 +02:00
|
|
|
AllocKind::Alloc.encode(encoder)?;
|
2018-03-16 09:59:42 +01:00
|
|
|
alloc.encode(encoder)?;
|
2018-03-20 10:36:45 +01:00
|
|
|
// encode whether this allocation is the root allocation of a static
|
2018-03-16 09:59:42 +01:00
|
|
|
tcx.interpret_interner
|
|
|
|
.get_corresponding_static_def_id(alloc_id)
|
|
|
|
.encode(encoder)?;
|
|
|
|
} else if let Some(fn_instance) = tcx.interpret_interner.get_fn(alloc_id) {
|
|
|
|
trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
|
2018-04-10 09:58:46 +02:00
|
|
|
AllocKind::Fn.encode(encoder)?;
|
2018-03-16 09:59:42 +01:00
|
|
|
fn_instance.encode(encoder)?;
|
2018-03-20 10:36:45 +01:00
|
|
|
} else if let Some(did) = tcx.interpret_interner.get_corresponding_static_def_id(alloc_id) {
|
|
|
|
// extern "C" statics don't have allocations, just encode its def_id
|
2018-04-10 09:58:46 +02:00
|
|
|
AllocKind::ExternStatic.encode(encoder)?;
|
2018-03-20 10:36:45 +01:00
|
|
|
did.encode(encoder)?;
|
2018-03-16 09:59:42 +01:00
|
|
|
} else {
|
|
|
|
bug!("alloc id without corresponding allocation: {}", alloc_id);
|
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn specialized_decode_alloc_id<
|
|
|
|
'a, 'tcx,
|
|
|
|
D: Decoder,
|
2018-04-10 09:58:46 +02:00
|
|
|
CACHE: FnOnce(&mut D, AllocId),
|
2018-03-16 09:59:42 +01:00
|
|
|
>(
|
|
|
|
decoder: &mut D,
|
|
|
|
tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
|
|
|
cache: CACHE,
|
|
|
|
) -> Result<AllocId, D::Error> {
|
2018-04-10 09:58:46 +02:00
|
|
|
match AllocKind::decode(decoder)? {
|
|
|
|
AllocKind::Alloc => {
|
2018-03-16 09:59:42 +01:00
|
|
|
let alloc_id = tcx.interpret_interner.reserve();
|
2018-04-10 09:58:46 +02:00
|
|
|
trace!("creating alloc id {:?}", alloc_id);
|
2018-03-16 09:59:42 +01:00
|
|
|
// insert early to allow recursive allocs
|
2018-04-10 09:58:46 +02:00
|
|
|
cache(decoder, alloc_id);
|
2018-03-16 09:59:42 +01:00
|
|
|
|
|
|
|
let allocation = Allocation::decode(decoder)?;
|
|
|
|
trace!("decoded alloc {:?} {:#?}", alloc_id, allocation);
|
|
|
|
let allocation = tcx.intern_const_alloc(allocation);
|
|
|
|
tcx.interpret_interner.intern_at_reserved(alloc_id, allocation);
|
|
|
|
|
|
|
|
if let Some(glob) = Option::<DefId>::decode(decoder)? {
|
|
|
|
tcx.interpret_interner.cache(glob, alloc_id);
|
|
|
|
}
|
|
|
|
|
|
|
|
Ok(alloc_id)
|
|
|
|
},
|
2018-04-10 09:58:46 +02:00
|
|
|
AllocKind::Fn => {
|
|
|
|
trace!("creating fn alloc id");
|
2018-03-16 09:59:42 +01:00
|
|
|
let instance = ty::Instance::decode(decoder)?;
|
|
|
|
trace!("decoded fn alloc instance: {:?}", instance);
|
|
|
|
let id = tcx.interpret_interner.create_fn_alloc(instance);
|
|
|
|
trace!("created fn alloc id: {:?}", id);
|
2018-04-10 09:58:46 +02:00
|
|
|
cache(decoder, id);
|
2018-03-16 09:59:42 +01:00
|
|
|
Ok(id)
|
|
|
|
},
|
2018-04-10 09:58:46 +02:00
|
|
|
AllocKind::ExternStatic => {
|
|
|
|
trace!("creating extern static alloc id at");
|
2018-03-20 10:36:45 +01:00
|
|
|
let did = DefId::decode(decoder)?;
|
|
|
|
let alloc_id = tcx.interpret_interner.reserve();
|
2018-04-10 09:58:46 +02:00
|
|
|
cache(decoder, alloc_id);
|
2018-03-20 10:36:45 +01:00
|
|
|
tcx.interpret_interner.cache(did, alloc_id);
|
|
|
|
Ok(alloc_id)
|
|
|
|
},
|
2018-03-16 09:59:42 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
impl fmt::Display for AllocId {
|
|
|
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
|
|
|
write!(f, "{}", self.0)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-01-16 09:12:54 +01:00
|
|
|
#[derive(Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct Allocation {
|
|
|
|
/// The actual bytes of the allocation.
|
|
|
|
/// Note that the bytes of a pointer represent the offset of the pointer
|
|
|
|
pub bytes: Vec<u8>,
|
|
|
|
/// Maps from byte addresses to allocations.
|
|
|
|
/// Only the first byte of a pointer is inserted into the map.
|
|
|
|
pub relocations: BTreeMap<u64, AllocId>,
|
|
|
|
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
|
|
|
|
pub undef_mask: UndefMask,
|
|
|
|
/// The alignment of the allocation to detect unaligned reads.
|
2017-12-17 08:47:22 +02:00
|
|
|
pub align: Align,
|
2018-01-26 14:28:58 +01:00
|
|
|
/// Whether the allocation (of a static) should be put into mutable memory when translating
|
2018-01-30 13:57:13 +01:00
|
|
|
///
|
2018-01-26 14:28:58 +01:00
|
|
|
/// Only happens for `static mut` or `static` with interior mutability
|
|
|
|
pub runtime_mutability: Mutability,
|
2017-12-12 17:14:49 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
impl Allocation {
|
|
|
|
pub fn from_bytes(slice: &[u8]) -> Self {
|
|
|
|
let mut undef_mask = UndefMask::new(0);
|
|
|
|
undef_mask.grow(slice.len() as u64, true);
|
|
|
|
Self {
|
|
|
|
bytes: slice.to_owned(),
|
|
|
|
relocations: BTreeMap::new(),
|
|
|
|
undef_mask,
|
2017-12-17 08:47:22 +02:00
|
|
|
align: Align::from_bytes(1, 1).unwrap(),
|
2018-01-26 14:28:58 +01:00
|
|
|
runtime_mutability: Mutability::Immutable,
|
2017-12-12 17:14:49 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Undefined byte tracking
|
|
|
|
////////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
type Block = u64;
|
|
|
|
const BLOCK_SIZE: u64 = 64;
|
2017-07-21 13:39:06 +02:00
|
|
|
|
2018-01-16 09:12:54 +01:00
|
|
|
#[derive(Clone, Debug, Eq, PartialEq, Hash, RustcEncodable, RustcDecodable)]
|
2017-12-12 17:14:49 +01:00
|
|
|
pub struct UndefMask {
|
|
|
|
blocks: Vec<Block>,
|
|
|
|
len: u64,
|
|
|
|
}
|
|
|
|
|
2018-01-16 10:16:38 +01:00
|
|
|
impl_stable_hash_for!(struct mir::interpret::UndefMask{blocks, len});
|
|
|
|
|
2017-12-12 17:14:49 +01:00
|
|
|
impl UndefMask {
|
|
|
|
pub fn new(size: u64) -> Self {
|
|
|
|
let mut m = UndefMask {
|
|
|
|
blocks: vec![],
|
|
|
|
len: 0,
|
|
|
|
};
|
|
|
|
m.grow(size, false);
|
|
|
|
m
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Check whether the range `start..end` (end-exclusive) is entirely defined.
|
|
|
|
pub fn is_range_defined(&self, start: u64, end: u64) -> bool {
|
|
|
|
if end > self.len {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
for i in start..end {
|
|
|
|
if !self.get(i) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
true
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_range(&mut self, start: u64, end: u64, new_state: bool) {
|
|
|
|
let len = self.len;
|
|
|
|
if end > len {
|
|
|
|
self.grow(end - len, new_state);
|
|
|
|
}
|
|
|
|
self.set_range_inbounds(start, end, new_state);
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set_range_inbounds(&mut self, start: u64, end: u64, new_state: bool) {
|
|
|
|
for i in start..end {
|
|
|
|
self.set(i, new_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn get(&self, i: u64) -> bool {
|
|
|
|
let (block, bit) = bit_index(i);
|
|
|
|
(self.blocks[block] & 1 << bit) != 0
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn set(&mut self, i: u64, new_state: bool) {
|
|
|
|
let (block, bit) = bit_index(i);
|
|
|
|
if new_state {
|
|
|
|
self.blocks[block] |= 1 << bit;
|
|
|
|
} else {
|
|
|
|
self.blocks[block] &= !(1 << bit);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn grow(&mut self, amount: u64, new_state: bool) {
|
|
|
|
let unused_trailing_bits = self.blocks.len() as u64 * BLOCK_SIZE - self.len;
|
|
|
|
if amount > unused_trailing_bits {
|
|
|
|
let additional_blocks = amount / BLOCK_SIZE + 1;
|
|
|
|
assert_eq!(additional_blocks as usize as u64, additional_blocks);
|
|
|
|
self.blocks.extend(
|
|
|
|
iter::repeat(0).take(additional_blocks as usize),
|
|
|
|
);
|
|
|
|
}
|
|
|
|
let start = self.len;
|
|
|
|
self.len += amount;
|
|
|
|
self.set_range_inbounds(start, start + amount, new_state);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
fn bit_index(bits: u64) -> (usize, usize) {
|
|
|
|
let a = bits / BLOCK_SIZE;
|
|
|
|
let b = bits % BLOCK_SIZE;
|
|
|
|
assert_eq!(a as usize as u64, a);
|
|
|
|
assert_eq!(b as usize as u64, b);
|
|
|
|
(a as usize, b as usize)
|
|
|
|
}
|