rust/src/librustc/mir/interpret/eval_context.rs

1864 lines
71 KiB
Rust
Raw Normal View History

use std::collections::{HashMap, HashSet};
2017-02-07 00:39:40 -08:00
use std::fmt::Write;
use hir::def_id::DefId;
use hir::map::definitions::DefPathData;
use middle::const_val::ConstVal;
use middle::region;
use mir;
use traits::Reveal;
2017-12-06 09:25:29 +01:00
use ty::layout::{self, Size, Align, HasDataLayout, LayoutOf, TyLayout};
use ty::subst::{Subst, Substs, Kind};
2017-12-06 09:25:29 +01:00
use ty::{self, Ty, TyCtxt};
2016-06-11 12:38:28 -06:00
use rustc_data_structures::indexed_vec::Idx;
use syntax::codemap::{self, DUMMY_SP};
use syntax::ast::Mutability;
2017-12-06 09:25:29 +01:00
use super::{EvalError, EvalResult, EvalErrorKind, GlobalId, Place, PlaceExtra, Memory,
MemoryPointer, HasMemory, MemoryKind, operator, PrimVal, PrimValKind, Value, Pointer,
ValidationQuery, Machine};
2016-06-01 17:05:20 +02:00
2017-07-21 17:25:30 +02:00
pub struct EvalContext<'a, 'tcx: 'a, M: Machine<'tcx>> {
/// Stores data required by the `Machine`
pub machine_data: M::Data,
/// The results of the type checker, from rustc.
2017-07-21 17:25:30 +02:00
pub tcx: TyCtxt<'a, 'tcx, 'tcx>,
/// The virtual memory system.
2017-07-21 17:25:30 +02:00
pub memory: Memory<'a, 'tcx, M>,
2017-12-06 09:25:29 +01:00
/// Places that were suspended by the validation subsystem, and will be recovered later
pub(crate) suspended: HashMap<DynamicLifetime, Vec<ValidationQuery<'tcx>>>,
/// The virtual call stack.
2017-02-10 13:35:45 -08:00
pub(crate) stack: Vec<Frame<'tcx>>,
2016-07-05 13:23:58 +02:00
/// The maximum number of stack frames allowed
2017-02-10 13:35:45 -08:00
pub(crate) stack_limit: usize,
/// The maximum number of operations that may be executed.
/// This prevents infinite loops and huge computations from freezing up const eval.
/// Remove once halting problem is solved.
2017-02-10 13:35:45 -08:00
pub(crate) steps_remaining: u64,
}
/// A stack frame.
pub struct Frame<'tcx> {
////////////////////////////////////////////////////////////////////////////////
// Function and callsite information
////////////////////////////////////////////////////////////////////////////////
/// The MIR for the function called on this frame.
pub mir: &'tcx mir::Mir<'tcx>,
2017-03-21 13:53:55 +01:00
/// The def_id and substs of the current function
pub instance: ty::Instance<'tcx>,
/// The span of the call site.
pub span: codemap::Span,
////////////////////////////////////////////////////////////////////////////////
2017-12-06 09:25:29 +01:00
// Return place and locals
////////////////////////////////////////////////////////////////////////////////
/// The block to return to when returning from the current stack frame
pub return_to_block: StackPopCleanup,
/// The location where the result of the current stack frame should be written to.
2017-12-06 09:25:29 +01:00
pub return_place: Place,
/// The list of locals for this stack frame, stored in order as
/// `[arguments..., variables..., temporaries...]`. The locals are stored as `Option<Value>`s.
/// `None` represents a local that is currently dead, while a live local
/// can either directly contain `PrimVal` or refer to some part of an `Allocation`.
///
/// Before being initialized, arguments are `Value::ByVal(PrimVal::Undef)` and other locals are `None`.
pub locals: Vec<Option<Value>>,
////////////////////////////////////////////////////////////////////////////////
// Current position within the function
////////////////////////////////////////////////////////////////////////////////
/// The block that is currently executed (or will be executed after the above call stacks
/// return).
pub block: mir::BasicBlock,
/// The index of the currently evaluated statment.
pub stmt: usize,
}
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub enum StackPopCleanup {
2016-10-21 11:48:56 +02:00
/// The stackframe existed to compute the initial value of a static/constant, make sure it
/// isn't modifyable afterwards in case of constants.
/// In case of `static mut`, mark the memory to ensure it's never marked as immutable through
/// references or deallocated
2017-07-13 17:25:17 +02:00
MarkStatic(Mutability),
/// A regular stackframe added due to a function call will need to get forwarded to the next
/// block
Goto(mir::BasicBlock),
/// The main function and diverging functions have nowhere to return to
None,
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub struct DynamicLifetime {
pub frame: usize,
pub region: Option<region::Scope>, // "None" indicates "until the function ends"
}
2016-11-26 17:54:19 -08:00
#[derive(Copy, Clone, Debug)]
pub struct ResourceLimits {
pub memory_size: u64,
pub step_limit: u64,
pub stack_limit: usize,
}
impl Default for ResourceLimits {
fn default() -> Self {
ResourceLimits {
memory_size: 100 * 1024 * 1024, // 100 MB
step_limit: 1_000_000,
stack_limit: 100,
}
}
}
2017-07-28 19:43:05 -07:00
#[derive(Copy, Clone, Debug)]
pub struct TyAndPacked<'tcx> {
pub ty: Ty<'tcx>,
pub packed: bool,
}
#[derive(Copy, Clone, Debug)]
pub struct ValTy<'tcx> {
pub value: Value,
pub ty: Ty<'tcx>,
}
impl<'tcx> ::std::ops::Deref for ValTy<'tcx> {
type Target = Value;
fn deref(&self) -> &Value {
&self.value
}
}
2017-08-08 15:53:07 +02:00
#[derive(Copy, Clone, Debug)]
pub struct PtrAndAlign {
pub ptr: Pointer,
2017-12-06 09:25:29 +01:00
/// Remember whether this place is *supposed* to be aligned.
2017-08-08 15:53:07 +02:00
pub aligned: bool,
}
impl PtrAndAlign {
pub fn to_ptr<'tcx>(self) -> EvalResult<'tcx, MemoryPointer> {
self.ptr.to_ptr()
}
pub fn offset<'tcx, C: HasDataLayout>(self, i: u64, cx: C) -> EvalResult<'tcx, Self> {
Ok(PtrAndAlign {
ptr: self.ptr.offset(i, cx)?,
aligned: self.aligned,
})
}
}
2017-12-06 09:25:29 +01:00
impl<'a, 'tcx, M: Machine<'tcx>> HasDataLayout for &'a EvalContext<'a, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> HasDataLayout
for &'c &'b mut EvalContext<'a, 'tcx, M> {
#[inline]
fn data_layout(&self) -> &layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'tcx, M: Machine<'tcx>> layout::HasTyCtxt<'tcx> for &'a EvalContext<'a, 'tcx, M> {
#[inline]
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx
}
}
impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> layout::HasTyCtxt<'tcx>
for &'c &'b mut EvalContext<'a, 'tcx, M> {
#[inline]
fn tcx<'d>(&'d self) -> TyCtxt<'d, 'tcx, 'tcx> {
self.tcx
}
}
impl<'a, 'tcx, M: Machine<'tcx>> LayoutOf<Ty<'tcx>> for &'a EvalContext<'a, 'tcx, M> {
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(self.tcx, M::param_env(self)).layout_of(ty)
.map_err(|layout| EvalErrorKind::Layout(layout).into())
}
}
impl<'c, 'b, 'a, 'tcx, M: Machine<'tcx>> LayoutOf<Ty<'tcx>>
for &'c &'b mut EvalContext<'a, 'tcx, M> {
type TyLayout = EvalResult<'tcx, TyLayout<'tcx>>;
#[inline]
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
(&**self).layout_of(ty)
}
}
2017-07-21 17:25:30 +02:00
impl<'a, 'tcx, M: Machine<'tcx>> EvalContext<'a, 'tcx, M> {
pub fn new(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
limits: ResourceLimits,
machine_data: M::Data,
memory_data: M::MemoryData,
) -> Self {
EvalContext {
2017-07-21 17:25:30 +02:00
machine_data,
2017-01-16 18:45:30 -08:00
tcx,
2017-12-06 09:25:29 +01:00
memory: Memory::new(tcx, limits.memory_size, memory_data),
suspended: HashMap::new(),
stack: Vec::new(),
2016-11-26 17:54:19 -08:00
stack_limit: limits.stack_limit,
steps_remaining: limits.step_limit,
}
}
2017-07-04 13:16:29 +02:00
pub fn alloc_ptr(&mut self, ty: Ty<'tcx>) -> EvalResult<'tcx, MemoryPointer> {
2016-10-16 00:12:11 -06:00
let substs = self.substs();
self.alloc_ptr_with_substs(ty, substs)
}
pub fn alloc_ptr_with_substs(
&mut self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
2017-07-04 13:16:29 +02:00
) -> EvalResult<'tcx, MemoryPointer> {
let size = self.type_size_with_substs(ty, substs)?.expect(
"cannot alloc memory for unsized type",
);
let align = self.type_align_with_substs(ty, substs)?;
2017-12-06 09:25:29 +01:00
self.memory.allocate(size, align, Some(MemoryKind::Stack))
}
2016-06-08 11:11:08 +02:00
2017-07-21 17:25:30 +02:00
pub fn memory(&self) -> &Memory<'a, 'tcx, M> {
&self.memory
}
2017-07-21 17:25:30 +02:00
pub fn memory_mut(&mut self) -> &mut Memory<'a, 'tcx, M> {
2016-06-15 12:55:04 +02:00
&mut self.memory
}
pub fn stack(&self) -> &[Frame<'tcx>] {
&self.stack
}
#[inline]
pub fn cur_frame(&self) -> usize {
assert!(self.stack.len() > 0);
self.stack.len() - 1
}
pub fn str_to_value(&mut self, s: &str) -> EvalResult<'tcx, Value> {
2017-12-06 09:25:29 +01:00
let ptr = self.memory.allocate_cached(s.as_bytes());
Ok(Value::ByValPair(
PrimVal::Ptr(ptr),
PrimVal::from_u128(s.len() as u128),
))
2016-09-23 10:38:30 +02:00
}
pub(super) fn const_to_value(&mut self, const_val: &ConstVal<'tcx>) -> EvalResult<'tcx, Value> {
use middle::const_val::ConstVal::*;
let primval = match *const_val {
2017-01-12 08:28:42 +01:00
Integral(const_int) => PrimVal::Bytes(const_int.to_u128_unchecked()),
2017-08-07 13:40:10 +02:00
Float(val) => PrimVal::Bytes(val.bits),
Bool(b) => PrimVal::from_bool(b),
Char(c) => PrimVal::from_char(c),
2016-09-23 10:38:30 +02:00
Str(ref s) => return self.str_to_value(s),
ByteStr(ref bs) => {
2017-12-06 09:25:29 +01:00
let ptr = self.memory.allocate_cached(bs.data);
PrimVal::Ptr(ptr)
}
2017-09-13 13:46:54 +02:00
Unevaluated(def_id, substs) => {
2017-12-06 09:25:29 +01:00
let instance = self.resolve(def_id, substs)?;
2017-09-13 13:46:54 +02:00
let cid = GlobalId {
instance,
promoted: None,
};
2017-12-06 09:25:29 +01:00
return Ok(Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("static/const not cached")));
2017-09-13 13:46:54 +02:00
}
Aggregate(..) |
Variant(_) => bug!("should not have aggregate or variant constants in MIR"),
// function items are zero sized and thus have no readable value
Function(..) => PrimVal::Undef,
};
Ok(Value::ByVal(primval))
}
2017-12-06 09:25:29 +01:00
pub(super) fn resolve(&self, def_id: DefId, substs: &'tcx Substs<'tcx>) -> EvalResult<'tcx, ty::Instance<'tcx>> {
let substs = self.tcx.trans_apply_param_substs(self.substs(), &substs);
ty::Instance::resolve(
self.tcx,
M::param_env(self),
def_id,
substs,
).ok_or(EvalErrorKind::TypeckError.into()) // turn error prop into a panic to expose associated type in const issue
}
pub(super) fn type_is_sized(&self, ty: Ty<'tcx>) -> bool {
2017-12-06 09:25:29 +01:00
ty.is_sized(self.tcx, M::param_env(self), DUMMY_SP)
}
pub fn load_mir(
&self,
instance: ty::InstanceDef<'tcx>,
) -> EvalResult<'tcx, &'tcx mir::Mir<'tcx>> {
2017-12-06 09:25:29 +01:00
// do not continue if typeck errors occurred (can only occur in local crate)
let did = instance.def_id();
if did.is_local() && self.tcx.has_typeck_tables(did) && self.tcx.typeck_tables_of(did).tainted_by_errors {
return err!(TypeckError);
}
2017-03-21 13:53:55 +01:00
trace!("load mir {:?}", instance);
match instance {
ty::InstanceDef::Item(def_id) => {
self.tcx.maybe_optimized_mir(def_id).ok_or_else(|| {
EvalErrorKind::NoMirFor(self.tcx.item_path_str(def_id)).into()
})
}
2017-03-21 13:53:55 +01:00
_ => Ok(self.tcx.instance_mir(instance)),
}
}
2016-06-08 11:11:08 +02:00
pub fn monomorphize(&self, ty: Ty<'tcx>, substs: &'tcx Substs<'tcx>) -> Ty<'tcx> {
2017-01-28 15:28:24 +01:00
// miri doesn't care about lifetimes, and will choke on some crazy ones
// let's simply get rid of them
let without_lifetimes = self.tcx.erase_regions(&ty);
let substituted = without_lifetimes.subst(self.tcx, substs);
2017-12-06 09:25:29 +01:00
let substituted = self.tcx.fully_normalize_monormophic_ty(&substituted);
2017-08-29 11:32:10 +02:00
substituted
}
2017-08-03 11:06:25 -07:00
/// Return the size and aligment of the value at the given type.
/// Note that the value does not matter if the type is sized. For unsized types,
/// the value has to be a fat pointer, and we only care about the "extra" data in it.
pub fn size_and_align_of_dst(
&mut self,
ty: ty::Ty<'tcx>,
2017-08-03 11:06:25 -07:00
value: Value,
2017-12-06 09:25:29 +01:00
) -> EvalResult<'tcx, (Size, Align)> {
let layout = self.type_layout(ty)?;
if !layout.is_unsized() {
Ok(layout.size_and_align())
} else {
match ty.sty {
2017-08-02 15:29:13 -07:00
ty::TyAdt(..) | ty::TyTuple(..) => {
// First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized,
// and it also rounds up to alignment, which we want to avoid,
// as the unsized field's alignment could be smaller.
assert!(!ty.is_simd());
debug!("DST {} layout: {:?}", ty, layout);
2017-12-06 09:25:29 +01:00
let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align;
debug!(
2017-12-06 09:25:29 +01:00
"DST {} statically sized prefix size: {:?} align: {:?}",
ty,
sized_size,
sized_align
);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
2017-08-02 15:29:13 -07:00
let (unsized_size, unsized_align) = match ty.sty {
ty::TyAdt(def, substs) => {
let last_field = def.struct_variant().fields.last().unwrap();
let field_ty = self.field_ty(substs, last_field);
self.size_and_align_of_dst(field_ty, value)?
}
ty::TyTuple(ref types, _) => {
let field_ty = types.last().unwrap();
2017-12-06 09:25:29 +01:00
let field_ty = self.tcx.fully_normalize_monormophic_ty(field_ty);
2017-08-02 15:29:13 -07:00
self.size_and_align_of_dst(field_ty, value)?
}
_ => bug!("We already checked that we know this type"),
};
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let size = sized_size + unsized_size;
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
2017-12-06 09:25:29 +01:00
let align = sized_align.max(unsized_align);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
//
// Namely, the returned size should be, in C notation:
//
// `size + ((size & (align-1)) ? align : 0)`
//
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & -align`
2017-12-06 09:25:29 +01:00
Ok((size.abi_align(align), align))
}
ty::TyDynamic(..) => {
let (_, vtable) = value.into_ptr_vtable_pair(&mut self.memory)?;
// the second entry in the vtable is the dynamic size of the object.
self.read_size_and_align_from_vtable(vtable)
}
ty::TySlice(_) | ty::TyStr => {
2017-12-06 09:25:29 +01:00
let (elem_size, align) = layout.field(&self, 0)?.size_and_align();
let (_, len) = value.into_slice(&mut self.memory)?;
2017-12-06 09:25:29 +01:00
Ok((elem_size * len, align))
}
_ => bug!("size_of_val::<{:?}>", ty),
}
}
}
/// Returns the normalized type of a struct field
fn field_ty(&self, param_substs: &Substs<'tcx>, f: &ty::FieldDef) -> ty::Ty<'tcx> {
2017-12-06 09:25:29 +01:00
self.tcx.fully_normalize_monormophic_ty(
&f.ty(self.tcx, param_substs),
)
}
pub fn type_size(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<u64>> {
self.type_size_with_substs(ty, self.substs())
2016-06-08 11:11:08 +02:00
}
pub fn type_align(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
self.type_align_with_substs(ty, self.substs())
}
2017-12-06 09:25:29 +01:00
pub(super) fn type_size_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, Option<u64>> {
let layout = self.type_layout_with_substs(ty, substs)?;
if layout.is_unsized() {
Ok(None)
} else {
2017-12-06 09:25:29 +01:00
Ok(Some(layout.size.bytes()))
}
}
2017-12-06 09:25:29 +01:00
pub(super) fn type_align_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
) -> EvalResult<'tcx, u64> {
self.type_layout_with_substs(ty, substs).map(|layout| {
2017-12-06 09:25:29 +01:00
layout.align.abi()
})
}
2017-12-06 09:25:29 +01:00
pub fn type_layout(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, TyLayout<'tcx>> {
self.type_layout_with_substs(ty, self.substs())
}
2017-12-06 09:25:29 +01:00
pub(super) fn type_layout_with_substs(
&self,
ty: Ty<'tcx>,
substs: &'tcx Substs<'tcx>,
2017-12-06 09:25:29 +01:00
) -> EvalResult<'tcx, TyLayout<'tcx>> {
2016-06-08 11:11:08 +02:00
// TODO(solson): Is this inefficient? Needs investigation.
let ty = self.monomorphize(ty, substs);
2017-12-06 09:25:29 +01:00
self.layout_of(ty)
2016-06-08 11:11:08 +02:00
}
2016-07-05 10:47:10 +02:00
pub fn push_stack_frame(
&mut self,
2017-03-21 13:53:55 +01:00
instance: ty::Instance<'tcx>,
2016-07-05 10:47:10 +02:00
span: codemap::Span,
mir: &'tcx mir::Mir<'tcx>,
2017-12-06 09:25:29 +01:00
return_place: Place,
return_to_block: StackPopCleanup,
) -> EvalResult<'tcx> {
::log_settings::settings().indentation += 1;
2017-06-01 11:01:55 -07:00
/// Return the set of locals that have a storage annotation anywhere
fn collect_storage_annotations<'tcx>(mir: &'tcx mir::Mir<'tcx>) -> HashSet<mir::Local> {
use mir::StatementKind::*;
let mut set = HashSet::new();
for block in mir.basic_blocks() {
for stmt in block.statements.iter() {
match stmt.kind {
2017-09-05 17:18:48 +02:00
StorageLive(local) |
StorageDead(local) => {
set.insert(local);
}
_ => {}
}
}
}
set
}
2017-07-04 13:16:29 +02:00
// Subtract 1 because `local_decls` includes the ReturnMemoryPointer, but we don't store a local
// `Value` for that.
let num_locals = mir.local_decls.len() - 1;
2017-08-30 11:13:01 +02:00
let locals = {
2017-08-30 11:13:01 +02:00
let annotated_locals = collect_storage_annotations(mir);
let mut locals = vec![None; num_locals];
for i in 0..num_locals {
let local = mir::Local::new(i + 1);
if !annotated_locals.contains(&local) {
locals[i] = Some(Value::ByVal(PrimVal::Undef));
}
2017-06-01 11:01:55 -07:00
}
2017-08-30 11:13:01 +02:00
locals
};
self.stack.push(Frame {
2017-01-16 18:45:30 -08:00
mir,
block: mir::START_BLOCK,
2017-01-16 18:45:30 -08:00
return_to_block,
2017-12-06 09:25:29 +01:00
return_place,
2017-01-16 18:45:30 -08:00
locals,
span,
2017-03-21 13:53:55 +01:00
instance,
stmt: 0,
});
self.memory.cur_frame = self.cur_frame();
2016-07-05 13:23:58 +02:00
if self.stack.len() > self.stack_limit {
2017-08-02 16:59:01 +02:00
err!(StackFrameLimitReached)
2016-07-05 13:23:58 +02:00
} else {
Ok(())
}
}
pub(super) fn pop_stack_frame(&mut self) -> EvalResult<'tcx> {
::log_settings::settings().indentation -= 1;
self.end_region(None)?;
let frame = self.stack.pop().expect(
"tried to pop a stack frame, but there were none",
);
if !self.stack.is_empty() {
// TODO: Is this the correct time to start considering these accesses as originating from the returned-to stack frame?
self.memory.cur_frame = self.cur_frame();
}
match frame.return_to_block {
StackPopCleanup::MarkStatic(mutable) => {
2017-12-06 09:25:29 +01:00
if let Place::Ptr { ptr, .. } = frame.return_place {
// FIXME: to_ptr()? might be too extreme here, static zsts might reach this under certain conditions
self.memory.mark_static_initalized(
ptr.to_ptr()?.alloc_id,
mutable,
)?
} else {
2017-12-06 09:25:29 +01:00
bug!("StackPopCleanup::MarkStatic on: {:?}", frame.return_place);
}
}
StackPopCleanup::Goto(target) => self.goto_block(target),
StackPopCleanup::None => {}
}
2016-11-18 10:35:41 +01:00
// deallocate all locals that are backed by an allocation
2017-01-22 00:19:35 -08:00
for local in frame.locals {
self.deallocate_local(local)?;
}
2017-03-21 13:53:55 +01:00
Ok(())
}
pub fn deallocate_local(&mut self, local: Option<Value>) -> EvalResult<'tcx> {
2017-08-08 15:53:07 +02:00
if let Some(Value::ByRef(ptr)) = local {
trace!("deallocating local");
let ptr = ptr.to_ptr()?;
self.memory.dump_alloc(ptr.alloc_id);
2017-12-06 09:25:29 +01:00
self.memory.deallocate_local(ptr)?;
};
Ok(())
}
/// Evaluate an assignment statement.
///
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
2017-12-06 09:25:29 +01:00
/// type writes its results directly into the memory specified by the place.
pub(super) fn eval_rvalue_into_place(
&mut self,
rvalue: &mir::Rvalue<'tcx>,
2017-12-06 09:25:29 +01:00
place: &mir::Place<'tcx>,
) -> EvalResult<'tcx> {
2017-12-06 09:25:29 +01:00
let dest = self.eval_place(place)?;
let dest_ty = self.place_ty(place);
use mir::Rvalue::*;
match *rvalue {
Use(ref operand) => {
let value = self.eval_operand(operand)?.value;
let valty = ValTy {
value,
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
BinaryOp(bin_op, ref left, ref right) => {
let left = self.eval_operand(left)?;
let right = self.eval_operand(right)?;
if self.intrinsic_overflowing(
bin_op,
left,
right,
dest,
dest_ty,
)?
{
// There was an overflow in an unchecked binop. Right now, we consider this an error and bail out.
// The rationale is that the reason rustc emits unchecked binops in release mode (vs. the checked binops
2017-06-08 10:56:49 -07:00
// it emits in debug mode) is performance, but it doesn't cost us any performance in miri.
// If, however, the compiler ever starts transforming unchecked intrinsics into unchecked binops,
// we have to go back to just ignoring the overflow here.
2017-08-02 16:59:01 +02:00
return err!(OverflowingMath);
}
}
CheckedBinaryOp(bin_op, ref left, ref right) => {
let left = self.eval_operand(left)?;
let right = self.eval_operand(right)?;
self.intrinsic_with_overflow(
bin_op,
left,
right,
dest,
dest_ty,
)?;
}
2016-06-11 12:38:28 -06:00
2016-03-07 07:57:08 -06:00
UnaryOp(un_op, ref operand) => {
let val = self.eval_operand_to_primval(operand)?;
2016-11-26 22:58:01 -08:00
let kind = self.ty_to_primval_kind(dest_ty)?;
self.write_primval(
dest,
operator::unary_op(un_op, val, kind)?,
dest_ty,
)?;
2016-03-07 07:57:08 -06:00
}
Aggregate(ref kind, ref operands) => {
self.inc_step_counter_and_check_limit(operands.len() as u64)?;
2017-12-06 09:25:29 +01:00
let (dest, active_field_index) = match **kind {
mir::AggregateKind::Adt(adt_def, variant_index, _, active_field_index) => {
self.write_discriminant_value(dest_ty, dest, variant_index)?;
if adt_def.is_enum() {
(self.place_downcast(dest, variant_index)?, active_field_index)
} else {
2017-12-06 09:25:29 +01:00
(dest, active_field_index)
}
}
2017-12-06 09:25:29 +01:00
_ => (dest, None)
};
2017-12-06 09:25:29 +01:00
let layout = self.type_layout(dest_ty)?;
for (i, operand) in operands.iter().enumerate() {
let value = self.eval_operand(operand)?;
// Ignore zero-sized fields.
if !self.type_layout(value.ty)?.is_zst() {
let field_index = active_field_index.unwrap_or(i);
let (field_dest, _) = self.place_field(dest, mir::Field::new(field_index), layout)?;
self.write_value(value, field_dest)?;
2016-12-07 23:25:47 -08:00
}
}
}
2016-03-21 03:34:24 -06:00
Repeat(ref operand, _) => {
let (elem_ty, length) = match dest_ty.sty {
2017-09-13 12:58:25 +02:00
ty::TyArray(elem_ty, n) => (elem_ty, n.val.to_const_int().unwrap().to_u64().unwrap()),
_ => {
bug!(
"tried to assign array-repeat to non-array type {:?}",
dest_ty
)
}
};
let elem_size = self.type_size(elem_ty)?.expect(
"repeat element type must be sized",
);
let value = self.eval_operand(operand)?.value;
let dest = Pointer::from(self.force_allocation(dest)?.to_ptr()?);
2017-12-06 09:25:29 +01:00
// FIXME: speed up repeat filling
for i in 0..length {
let elem_dest = dest.offset(i * elem_size, &self)?;
self.write_value_to_ptr(value, elem_dest, elem_ty)?;
2016-03-21 03:34:24 -06:00
}
}
2017-12-06 09:25:29 +01:00
Len(ref place) => {
// FIXME(CTFE): don't allow computing the length of arrays in const eval
2017-12-06 09:25:29 +01:00
let src = self.eval_place(place)?;
let ty = self.place_ty(place);
2016-09-28 18:22:09 +02:00
let (_, len) = src.elem_ty_and_len(ty);
self.write_primval(
dest,
PrimVal::from_u128(len as u128),
dest_ty,
)?;
}
2017-12-06 09:25:29 +01:00
Ref(_, _, ref place) => {
let src = self.eval_place(place)?;
// We ignore the alignment of the place here -- special handling for packed structs ends
2017-07-19 13:31:21 -07:00
// at the `&` operator.
2017-08-08 15:53:07 +02:00
let (ptr, extra) = self.force_allocation(src)?.to_ptr_extra_aligned();
2016-10-16 02:12:46 -06:00
let val = match extra {
2017-12-06 09:25:29 +01:00
PlaceExtra::None => ptr.ptr.to_value(),
PlaceExtra::Length(len) => ptr.ptr.to_value_with_len(len),
PlaceExtra::Vtable(vtable) => ptr.ptr.to_value_with_vtable(vtable),
PlaceExtra::DowncastVariant(..) => {
bug!("attempted to take a reference to an enum downcast place")
}
2016-10-16 02:12:46 -06:00
};
let valty = ValTy {
value: val,
ty: dest_ty,
};
self.write_value(valty, dest)?;
2016-03-13 14:36:25 -06:00
}
2015-12-28 22:24:05 -06:00
2017-05-30 09:27:08 -04:00
NullaryOp(mir::NullOp::Box, ty) => {
M::box_alloc(self, ty, dest)?;
2016-03-14 22:05:50 -06:00
}
NullaryOp(mir::NullOp::SizeOf, ty) => {
let size = self.type_size(ty)?.expect(
"SizeOf nullary MIR operator called for unsized type",
);
self.write_primval(
dest,
PrimVal::from_u128(size as u128),
dest_ty,
)?;
2017-05-30 09:27:08 -04:00
}
Cast(kind, ref operand, cast_ty) => {
debug_assert_eq!(self.monomorphize(cast_ty, self.substs()), dest_ty);
use mir::CastKind::*;
match kind {
Unsize => {
let src = self.eval_operand(operand)?;
self.unsize_into(src.value, src.ty, dest, dest_ty)?;
}
Misc => {
let src = self.eval_operand(operand)?;
if self.type_is_fat_ptr(src.ty) {
match (src.value, self.type_is_fat_ptr(dest_ty)) {
(Value::ByRef { .. }, _) |
2016-10-21 13:56:38 +02:00
(Value::ByValPair(..), true) => {
let valty = ValTy {
value: src.value,
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
(Value::ByValPair(data, _), false) => {
let valty = ValTy {
value: Value::ByVal(data),
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
(Value::ByVal(_), _) => bug!("expected fat ptr"),
2016-09-07 18:34:59 +02:00
}
} else {
let src_val = self.value_to_primval(src)?;
let dest_val = self.cast_primval(src_val, src.ty, dest_ty)?;
let valty = ValTy {
value: Value::ByVal(dest_val),
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
}
ReifyFnPointer => {
match self.operand_ty(operand).sty {
ty::TyFnDef(def_id, substs) => {
2017-12-06 09:25:29 +01:00
let instance = self.resolve(def_id, substs)?;
let fn_ptr = self.memory.create_fn_alloc(instance);
let valty = ValTy {
value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
ref other => bug!("reify fn pointer on {:?}", other),
}
}
UnsafeFnPointer => {
match dest_ty.sty {
ty::TyFnPtr(_) => {
let mut src = self.eval_operand(operand)?;
src.ty = dest_ty;
self.write_value(src, dest)?;
}
ref other => bug!("fn to unsafe fn cast on {:?}", other),
}
}
ClosureFnPointer => {
match self.operand_ty(operand).sty {
ty::TyClosure(def_id, substs) => {
2017-12-06 09:25:29 +01:00
let substs = self.tcx.trans_apply_param_substs(self.substs(), &substs);
let instance = ty::Instance::resolve_closure(
self.tcx,
def_id,
substs,
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(instance);
let valty = ValTy {
value: Value::ByVal(PrimVal::Ptr(fn_ptr)),
ty: dest_ty,
};
self.write_value(valty, dest)?;
}
ref other => bug!("closure fn pointer on {:?}", other),
}
}
}
}
2017-12-06 09:25:29 +01:00
Discriminant(ref place) => {
let ty = self.place_ty(place);
let place = self.eval_place(place)?;
let discr_val = self.read_discriminant_value(place, ty)?;
2017-02-24 10:39:55 +01:00
if let ty::TyAdt(adt_def, _) = ty.sty {
trace!("Read discriminant {}, valid discriminants {:?}", discr_val, adt_def.discriminants(self.tcx).collect::<Vec<_>>());
if adt_def.discriminants(self.tcx).all(|v| {
discr_val != v.to_u128_unchecked()
})
{
2017-08-02 16:59:01 +02:00
return err!(InvalidDiscriminant);
2017-02-24 10:39:55 +01:00
}
self.write_primval(dest, PrimVal::Bytes(discr_val), dest_ty)?;
2017-02-24 10:39:55 +01:00
} else {
bug!("rustc only generates Rvalue::Discriminant for enums");
}
}
}
if log_enabled!(::log::LogLevel::Trace) {
self.dump_local(dest);
}
Ok(())
}
pub(super) fn type_is_fat_ptr(&self, ty: Ty<'tcx>) -> bool {
2016-09-07 18:34:59 +02:00
match ty.sty {
2017-02-03 15:47:23 +01:00
ty::TyRawPtr(ref tam) |
ty::TyRef(_, ref tam) => !self.type_is_sized(tam.ty),
ty::TyAdt(def, _) if def.is_box() => !self.type_is_sized(ty.boxed_ty()),
2016-09-07 18:34:59 +02:00
_ => false,
}
}
/// Returns the field type and whether the field is packed
pub fn get_field_ty(
&self,
ty: Ty<'tcx>,
field_index: usize,
) -> EvalResult<'tcx, TyAndPacked<'tcx>> {
2017-12-06 09:25:29 +01:00
let layout = self.type_layout(ty)?.field(self, field_index)?;
Ok(TyAndPacked {
ty: layout.ty,
packed: layout.is_packed()
})
}
2017-12-06 09:25:29 +01:00
pub fn get_field_offset(&self, ty: Ty<'tcx>, field_index: usize) -> EvalResult<'tcx, Size> {
Ok(self.type_layout(ty)?.fields.offset(field_index))
2016-10-16 02:12:46 -06:00
}
pub fn get_field_count(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, u64> {
2017-12-06 09:25:29 +01:00
Ok(self.type_layout(ty)?.fields.count() as u64)
}
pub(super) fn eval_operand_to_primval(
&mut self,
op: &mir::Operand<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
let valty = self.eval_operand(op)?;
self.value_to_primval(valty)
}
pub(crate) fn operands_to_args(
&mut self,
ops: &[mir::Operand<'tcx>],
) -> EvalResult<'tcx, Vec<ValTy<'tcx>>> {
ops.into_iter()
.map(|op| self.eval_operand(op))
.collect()
}
pub fn eval_operand(&mut self, op: &mir::Operand<'tcx>) -> EvalResult<'tcx, ValTy<'tcx>> {
use mir::Operand::*;
match *op {
2017-12-06 09:25:29 +01:00
// FIXME: do some more logic on `move` to invalidate the old location
Copy(ref place) |
Move(ref place) => {
Ok(ValTy {
2017-12-06 09:25:29 +01:00
value: self.eval_and_read_place(place)?,
ty: self.operand_ty(op),
})
},
2017-05-13 07:08:30 -04:00
Constant(ref constant) => {
use mir::Literal;
2017-05-13 07:08:30 -04:00
let mir::Constant { ref literal, .. } = **constant;
let value = match *literal {
2017-09-13 13:46:54 +02:00
Literal::Value { ref value } => self.const_to_value(&value.val)?,
Literal::Promoted { index } => {
let cid = GlobalId {
2017-03-21 13:53:55 +01:00
instance: self.frame().instance,
2016-10-21 11:54:38 +02:00
promoted: Some(index),
2016-06-03 17:41:36 +02:00
};
2017-12-06 09:25:29 +01:00
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(cid).expect("promoted not cached"))
}
};
Ok(ValTy {
value,
ty: self.operand_ty(op),
})
}
}
}
2015-11-12 17:44:29 -06:00
pub fn read_discriminant_value(
2017-12-06 09:25:29 +01:00
&mut self,
place: Place,
ty: Ty<'tcx>,
) -> EvalResult<'tcx, u128> {
2017-12-06 09:25:29 +01:00
let layout = self.type_layout(ty)?;
//trace!("read_discriminant_value {:#?}", layout);
match layout.variants {
layout::Variants::Single { index } => {
return Ok(index as u128);
}
2017-12-06 09:25:29 +01:00
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
}
2017-12-06 09:25:29 +01:00
let (discr_place, discr) = self.place_field(place, mir::Field::new(0), layout)?;
let raw_discr = self.value_to_primval(ValTy {
value: self.read_place(discr_place)?,
ty: discr.ty
})?;
let discr_val = match layout.variants {
layout::Variants::Single { .. } => bug!(),
layout::Variants::Tagged { .. } => raw_discr.to_bytes()?,
layout::Variants::NicheFilling {
dataful_variant,
ref niche_variants,
niche_start,
..
} => {
2017-12-06 09:25:29 +01:00
let variants_start = niche_variants.start as u128;
let variants_end = niche_variants.end as u128;
match raw_discr {
PrimVal::Ptr(_) => {
assert!(niche_start == 0);
assert!(variants_start == variants_end);
dataful_variant as u128
},
PrimVal::Bytes(raw_discr) => {
let discr = raw_discr.wrapping_sub(niche_start)
.wrapping_add(variants_start);
if variants_start <= discr && discr <= variants_end {
discr
} else {
dataful_variant as u128
}
},
PrimVal::Undef => return err!(ReadUndefBytes),
}
}
2017-12-06 09:25:29 +01:00
};
2017-12-06 09:25:29 +01:00
Ok(discr_val)
}
pub(crate) fn write_discriminant_value(
&mut self,
dest_ty: Ty<'tcx>,
dest: Place,
variant_index: usize,
) -> EvalResult<'tcx> {
let layout = self.type_layout(dest_ty)?;
match layout.variants {
layout::Variants::Single { index } => {
if index != variant_index {
// If the layout of an enum is `Single`, all
// other variants are necessarily uninhabited.
assert_eq!(layout.for_variant(&self, variant_index).abi,
layout::Abi::Uninhabited);
}
}
2017-12-06 09:25:29 +01:00
layout::Variants::Tagged { .. } => {
let discr_val = dest_ty.ty_adt_def().unwrap()
.discriminant_for_variant(self.tcx, variant_index)
.to_u128_unchecked();
2017-12-06 09:25:29 +01:00
let (discr_dest, discr) = self.place_field(dest, mir::Field::new(0), layout)?;
self.write_primval(discr_dest, PrimVal::Bytes(discr_val), discr.ty)?;
}
layout::Variants::NicheFilling {
dataful_variant,
ref niche_variants,
niche_start,
..
} => {
2017-12-06 09:25:29 +01:00
if variant_index != dataful_variant {
let (niche_dest, niche) =
self.place_field(dest, mir::Field::new(0), layout)?;
let niche_value = ((variant_index - niche_variants.start) as u128)
.wrapping_add(niche_start);
self.write_primval(niche_dest, PrimVal::Bytes(niche_value), niche.ty)?;
}
}
2017-12-06 09:25:29 +01:00
}
2017-12-06 09:25:29 +01:00
Ok(())
}
2017-08-08 15:53:07 +02:00
pub fn read_global_as_value(&self, gid: GlobalId) -> Value {
2017-12-06 09:25:29 +01:00
Value::ByRef(self.tcx.interpret_interner.borrow().get_cached(gid).expect("global not cached"))
2017-08-08 15:53:07 +02:00
}
2017-07-21 17:25:30 +02:00
pub fn operand_ty(&self, operand: &mir::Operand<'tcx>) -> Ty<'tcx> {
self.monomorphize(operand.ty(self.mir(), self.tcx), self.substs())
2016-04-07 03:02:02 -06:00
}
fn copy(&mut self, src: Pointer, dest: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx> {
let size = self.type_size(ty)?.expect(
"cannot copy from an unsized type",
);
let align = self.type_align(ty)?;
self.memory.copy(src, dest, size, align, false)?;
2016-04-07 05:56:07 -06:00
Ok(())
}
2017-12-06 09:25:29 +01:00
pub fn force_allocation(&mut self, place: Place) -> EvalResult<'tcx, Place> {
let new_place = match place {
Place::Local { frame, local } => {
// -1 since we don't store the return value
match self.stack[frame].locals[local.index() - 1] {
2017-08-02 16:59:01 +02:00
None => return err!(DeadLocal),
2017-08-08 15:53:07 +02:00
Some(Value::ByRef(ptr)) => {
2017-12-06 09:25:29 +01:00
Place::Ptr {
ptr,
2017-12-06 09:25:29 +01:00
extra: PlaceExtra::None,
}
}
Some(val) => {
let ty = self.stack[frame].mir.local_decls[local].ty;
2017-03-21 13:53:55 +01:00
let ty = self.monomorphize(ty, self.stack[frame].instance.substs);
let substs = self.stack[frame].instance.substs;
2016-10-16 00:12:11 -06:00
let ptr = self.alloc_ptr_with_substs(ty, substs)?;
self.stack[frame].locals[local.index() - 1] =
Some(Value::by_ref(ptr.into())); // it stays live
self.write_value_to_ptr(val, ptr.into(), ty)?;
2017-12-06 09:25:29 +01:00
Place::from_ptr(ptr)
}
}
}
2017-12-06 09:25:29 +01:00
Place::Ptr { .. } => place,
};
2017-12-06 09:25:29 +01:00
Ok(new_place)
}
/// ensures this Value is not a ByRef
pub(super) fn follow_by_ref_value(
&self,
value: Value,
ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
match value {
2017-08-08 15:53:07 +02:00
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned(aligned, |ectx| ectx.read_value(ptr, ty))
2017-07-12 21:06:57 -07:00
}
other => Ok(other),
}
}
pub fn value_to_primval(
&self,
ValTy { value, ty } : ValTy<'tcx>,
) -> EvalResult<'tcx, PrimVal> {
match self.follow_by_ref_value(value, ty)? {
Value::ByRef { .. } => bug!("follow_by_ref_value can't result in `ByRef`"),
Value::ByVal(primval) => {
// TODO: Do we really want insta-UB here?
self.ensure_valid_value(primval, ty)?;
Ok(primval)
}
Value::ByValPair(..) => bug!("value_to_primval can't work with fat pointers"),
}
}
2017-12-06 09:25:29 +01:00
pub fn write_ptr(&mut self, dest: Place, val: Pointer, dest_ty: Ty<'tcx>) -> EvalResult<'tcx> {
let valty = ValTy {
value: val.to_value(),
ty: dest_ty,
};
self.write_value(valty, dest)
}
2017-07-21 17:25:30 +02:00
pub fn write_primval(
&mut self,
2017-12-06 09:25:29 +01:00
dest: Place,
val: PrimVal,
2016-11-26 22:58:01 -08:00
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
let valty = ValTy {
value: Value::ByVal(val),
ty: dest_ty,
};
self.write_value(valty, dest)
}
2017-07-21 17:25:30 +02:00
pub fn write_value(
&mut self,
ValTy { value: src_val, ty: dest_ty } : ValTy<'tcx>,
2017-12-06 09:25:29 +01:00
dest: Place,
) -> EvalResult<'tcx> {
//trace!("Writing {:?} to {:?} at type {:?}", src_val, dest, dest_ty);
2017-07-03 13:57:18 -07:00
// Note that it is really important that the type here is the right one, and matches the type things are read at.
// In case `src_val` is a `ByValPair`, we don't do any magic here to handle padding properly, which is only
// correct if we never look at this data with the wrong type.
match dest {
2017-12-06 09:25:29 +01:00
Place::Ptr {
ptr: PtrAndAlign { ptr, aligned },
extra,
} => {
2017-12-06 09:25:29 +01:00
assert_eq!(extra, PlaceExtra::None);
self.write_maybe_aligned_mut(
aligned,
|ectx| ectx.write_value_to_ptr(src_val, ptr, dest_ty),
)
}
2017-12-06 09:25:29 +01:00
Place::Local { frame, local } => {
2017-06-27 13:36:41 +02:00
let dest = self.stack[frame].get_local(local)?;
self.write_value_possibly_by_val(
src_val,
2017-06-27 13:36:41 +02:00
|this, val| this.stack[frame].set_local(local, val),
dest,
dest_ty,
)
}
}
}
// The cases here can be a bit subtle. Read carefully!
fn write_value_possibly_by_val<F: FnOnce(&mut Self, Value) -> EvalResult<'tcx>>(
&mut self,
src_val: Value,
write_dest: F,
old_dest_val: Value,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
if let Value::ByRef(PtrAndAlign {
ptr: dest_ptr,
aligned,
}) = old_dest_val
{
// If the value is already `ByRef` (that is, backed by an `Allocation`),
// then we must write the new value into this allocation, because there may be
// other pointers into the allocation. These other pointers are logically
// pointers into the local variable, and must be able to observe the change.
//
// Thus, it would be an error to replace the `ByRef` with a `ByVal`, unless we
// knew for certain that there were no outstanding pointers to this allocation.
self.write_maybe_aligned_mut(aligned, |ectx| {
ectx.write_value_to_ptr(src_val, dest_ptr, dest_ty)
})?;
} else if let Value::ByRef(PtrAndAlign {
ptr: src_ptr,
aligned,
}) = src_val
{
// If the value is not `ByRef`, then we know there are no pointers to it
// and we can simply overwrite the `Value` in the locals array directly.
//
// In this specific case, where the source value is `ByRef`, we must duplicate
// the allocation, because this is a by-value operation. It would be incorrect
// if they referred to the same allocation, since then a change to one would
// implicitly change the other.
//
// It is a valid optimization to attempt reading a primitive value out of the
// source and write that into the destination without making an allocation, so
// we do so here.
self.read_maybe_aligned_mut(aligned, |ectx| {
if let Ok(Some(src_val)) = ectx.try_read_value(src_ptr, dest_ty) {
write_dest(ectx, src_val)?;
} else {
let dest_ptr = ectx.alloc_ptr(dest_ty)?.into();
ectx.copy(src_ptr, dest_ptr, dest_ty)?;
write_dest(ectx, Value::by_ref(dest_ptr))?;
}
Ok(())
})?;
} else {
// Finally, we have the simple case where neither source nor destination are
// `ByRef`. We may simply copy the source value over the the destintion.
write_dest(self, src_val)?;
}
Ok(())
}
pub fn write_value_to_ptr(
&mut self,
value: Value,
dest: Pointer,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
2017-12-06 09:25:29 +01:00
trace!("write_value_to_ptr: {:#?}", value);
match value {
2017-08-08 15:53:07 +02:00
Value::ByRef(PtrAndAlign { ptr, aligned }) => {
self.read_maybe_aligned_mut(aligned, |ectx| ectx.copy(ptr, dest, dest_ty))
}
2016-11-26 22:58:01 -08:00
Value::ByVal(primval) => {
2017-12-06 09:25:29 +01:00
let layout = self.type_layout(dest_ty)?;
if layout.is_zst() {
2017-08-28 16:06:49 +02:00
assert!(primval.is_undef());
Ok(())
} else {
// TODO: Do we need signedness?
2017-12-06 09:25:29 +01:00
self.memory.write_maybe_aligned_mut(!layout.is_packed(), |mem| {
mem.write_primval(dest.to_ptr()?, primval, layout.size.bytes(), false)
})
}
2016-11-26 22:58:01 -08:00
}
2017-12-06 09:25:29 +01:00
Value::ByValPair(a, b) => {
let ptr = dest.to_ptr()?;
let mut layout = self.type_layout(dest_ty)?;
trace!("write_value_to_ptr valpair: {:#?}", layout);
let mut packed = layout.is_packed();
'outer: loop {
for i in 0..layout.fields.count() {
let field = layout.field(&self, i)?;
if layout.fields.offset(i).bytes() == 0 && layout.size == field.size {
layout = field;
packed |= layout.is_packed();
continue 'outer;
}
}
break;
}
trace!("write_value_to_ptr valpair: {:#?}", layout);
assert_eq!(layout.fields.count(), 2);
let field_0 = layout.field(&self, 0)?;
let field_1 = layout.field(&self, 1)?;
trace!("write_value_to_ptr field 0: {:#?}", field_0);
trace!("write_value_to_ptr field 1: {:#?}", field_1);
assert_eq!(
field_0.is_packed(),
field_1.is_packed(),
"the two fields must agree on being packed"
);
packed |= field_0.is_packed();
let field_0_ptr = ptr.offset(layout.fields.offset(0).bytes(), &self)?.into();
let field_1_ptr = ptr.offset(layout.fields.offset(1).bytes(), &self)?.into();
// TODO: What about signedess?
self.memory.write_maybe_aligned_mut(!packed, |mem| {
mem.write_primval(field_0_ptr, a, field_0.size.bytes(), false)?;
mem.write_primval(field_1_ptr, b, field_1.size.bytes(), false)
})?;
Ok(())
}
}
}
2017-01-11 10:04:17 +01:00
pub fn ty_to_primval_kind(&self, ty: Ty<'tcx>) -> EvalResult<'tcx, PrimValKind> {
use syntax::ast::FloatTy;
let kind = match ty.sty {
ty::TyBool => PrimValKind::Bool,
ty::TyChar => PrimValKind::Char,
ty::TyInt(int_ty) => {
use syntax::ast::IntTy::*;
let size = match int_ty {
I8 => 1,
I16 => 2,
I32 => 4,
I64 => 8,
2017-01-12 08:28:42 +01:00
I128 => 16,
Is => self.memory.pointer_size(),
};
PrimValKind::from_int_size(size)
}
ty::TyUint(uint_ty) => {
use syntax::ast::UintTy::*;
let size = match uint_ty {
U8 => 1,
U16 => 2,
U32 => 4,
U64 => 8,
2017-01-12 08:28:42 +01:00
U128 => 16,
Us => self.memory.pointer_size(),
};
PrimValKind::from_uint_size(size)
}
ty::TyFloat(FloatTy::F32) => PrimValKind::F32,
ty::TyFloat(FloatTy::F64) => PrimValKind::F64,
ty::TyFnPtr(_) => PrimValKind::FnPtr,
2017-02-03 15:47:23 +01:00
ty::TyRef(_, ref tam) |
ty::TyRawPtr(ref tam) if self.type_is_sized(tam.ty) => PrimValKind::Ptr,
2017-04-26 12:15:42 +02:00
ty::TyAdt(def, _) if def.is_box() => PrimValKind::Ptr,
2017-12-06 09:25:29 +01:00
ty::TyAdt(..) => {
match self.type_layout(ty)?.abi {
layout::Abi::Scalar(ref scalar) => {
use ty::layout::Primitive::*;
2017-12-06 09:25:29 +01:00
match scalar.value {
Int(i, false) => PrimValKind::from_uint_size(i.size().bytes()),
Int(i, true) => PrimValKind::from_int_size(i.size().bytes()),
F32 => PrimValKind::F32,
F64 => PrimValKind::F64,
Pointer => PrimValKind::Ptr,
}
}
2017-08-02 16:59:01 +02:00
_ => return err!(TypeNotPrimitive(ty)),
}
}
2017-08-02 16:59:01 +02:00
_ => return err!(TypeNotPrimitive(ty)),
};
Ok(kind)
}
fn ensure_valid_value(&self, val: PrimVal, ty: Ty<'tcx>) -> EvalResult<'tcx> {
match ty.sty {
2017-08-02 16:59:01 +02:00
ty::TyBool if val.to_bytes()? > 1 => err!(InvalidBool),
ty::TyChar if ::std::char::from_u32(val.to_bytes()? as u32).is_none() => {
err!(InvalidChar(val.to_bytes()? as u32 as u128))
}
_ => Ok(()),
}
}
pub fn read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Value> {
if let Some(val) = self.try_read_value(ptr, ty)? {
Ok(val)
} else {
bug!("primitive read failed for type: {:?}", ty);
}
}
pub(crate) fn read_ptr(
&self,
ptr: MemoryPointer,
pointee_ty: Ty<'tcx>,
) -> EvalResult<'tcx, Value> {
let ptr_size = self.memory.pointer_size();
let p : Pointer = self.memory.read_ptr_sized_unsigned(ptr)?.into();
2017-02-03 15:47:23 +01:00
if self.type_is_sized(pointee_ty) {
Ok(p.to_value())
2017-02-03 15:47:23 +01:00
} else {
trace!("reading fat pointer extra of type {}", pointee_ty);
let extra = ptr.offset(ptr_size, self)?;
2017-07-11 12:39:12 +02:00
match self.tcx.struct_tail(pointee_ty).sty {
ty::TyDynamic(..) => Ok(p.to_value_with_vtable(
self.memory.read_ptr_sized_unsigned(extra)?.to_ptr()?,
)),
ty::TySlice(..) | ty::TyStr => Ok(
p.to_value_with_len(self.memory.read_ptr_sized_unsigned(extra)?.to_bytes()? as u64),
),
2017-02-03 15:47:23 +01:00
_ => bug!("unsized primval ptr read from {:?}", pointee_ty),
2017-07-11 12:39:12 +02:00
}
2017-02-03 15:47:23 +01:00
}
}
2017-12-06 09:25:29 +01:00
pub fn try_read_value(&self, ptr: Pointer, ty: Ty<'tcx>) -> EvalResult<'tcx, Option<Value>> {
use syntax::ast::FloatTy;
let ptr = ptr.to_ptr()?;
2016-10-18 22:31:21 -06:00
let val = match ty.sty {
ty::TyBool => {
let val = self.memory.read_primval(ptr, 1, false)?;
let val = match val {
PrimVal::Bytes(0) => false,
PrimVal::Bytes(1) => true,
2017-09-22 13:21:30 +02:00
// TODO: This seems a little overeager, should reading at bool type already be insta-UB?
_ => return err!(InvalidBool),
};
PrimVal::from_bool(val)
}
2016-10-18 22:31:21 -06:00
ty::TyChar => {
let c = self.memory.read_primval(ptr, 4, false)?.to_bytes()? as u32;
2016-06-20 12:29:45 +02:00
match ::std::char::from_u32(c) {
Some(ch) => PrimVal::from_char(ch),
2017-08-02 16:59:01 +02:00
None => return err!(InvalidChar(c as u128)),
2016-06-20 12:29:45 +02:00
}
}
2016-09-19 04:56:09 -06:00
2016-10-18 22:31:21 -06:00
ty::TyInt(int_ty) => {
use syntax::ast::IntTy::*;
let size = match int_ty {
I8 => 1,
I16 => 2,
I32 => 4,
I64 => 8,
2017-01-12 08:28:42 +01:00
I128 => 16,
Is => self.memory.pointer_size(),
};
self.memory.read_primval(ptr, size, true)?
}
2016-10-18 22:31:21 -06:00
ty::TyUint(uint_ty) => {
use syntax::ast::UintTy::*;
let size = match uint_ty {
U8 => 1,
U16 => 2,
U32 => 4,
U64 => 8,
2017-01-12 08:28:42 +01:00
U128 => 16,
Us => self.memory.pointer_size(),
};
self.memory.read_primval(ptr, size, false)?
}
ty::TyFloat(FloatTy::F32) => PrimVal::Bytes(self.memory.read_primval(ptr, 4, false)?.to_bytes()?),
ty::TyFloat(FloatTy::F64) => PrimVal::Bytes(self.memory.read_primval(ptr, 8, false)?.to_bytes()?),
ty::TyFnPtr(_) => self.memory.read_ptr_sized_unsigned(ptr)?,
2017-02-03 15:47:23 +01:00
ty::TyRef(_, ref tam) |
ty::TyRawPtr(ref tam) => return self.read_ptr(ptr, tam.ty).map(Some),
2017-02-03 15:47:23 +01:00
ty::TyAdt(def, _) => {
if def.is_box() {
return self.read_ptr(ptr, ty.boxed_ty()).map(Some);
2017-02-03 15:47:23 +01:00
}
2017-12-06 09:25:29 +01:00
if let layout::Abi::Scalar(ref scalar) = self.type_layout(ty)?.abi {
let mut signed = false;
if let layout::Int(_, s) = scalar.value {
signed = s;
}
let size = scalar.value.size(self).bytes();
self.memory.read_primval(ptr, size, signed)?
2016-09-07 18:34:59 +02:00
} else {
return Ok(None);
2016-09-07 18:34:59 +02:00
}
}
2016-09-07 18:34:59 +02:00
_ => return Ok(None),
};
2016-10-18 22:31:21 -06:00
Ok(Some(Value::ByVal(val)))
}
2017-07-21 17:25:30 +02:00
pub fn frame(&self) -> &Frame<'tcx> {
self.stack.last().expect("no call frames exist")
}
pub(super) fn frame_mut(&mut self) -> &mut Frame<'tcx> {
self.stack.last_mut().expect("no call frames exist")
}
pub(super) fn mir(&self) -> &'tcx mir::Mir<'tcx> {
self.frame().mir
2016-03-20 22:07:25 -06:00
}
2016-06-08 11:11:08 +02:00
pub(super) fn substs(&self) -> &'tcx Substs<'tcx> {
if let Some(frame) = self.stack.last() {
frame.instance.substs
} else {
Substs::empty()
}
2016-06-08 11:11:08 +02:00
}
2017-02-03 15:47:23 +01:00
fn unsize_into_ptr(
&mut self,
src: Value,
src_ty: Ty<'tcx>,
2017-12-06 09:25:29 +01:00
dest: Place,
2017-02-03 15:47:23 +01:00
dest_ty: Ty<'tcx>,
sty: Ty<'tcx>,
dty: Ty<'tcx>,
) -> EvalResult<'tcx> {
2017-02-03 15:47:23 +01:00
// A<Struct> -> A<Trait> conversion
let (src_pointee_ty, dest_pointee_ty) = self.tcx.struct_lockstep_tails(sty, dty);
match (&src_pointee_ty.sty, &dest_pointee_ty.sty) {
(&ty::TyArray(_, length), &ty::TySlice(_)) => {
let ptr = src.into_ptr(&self.memory)?;
2017-07-11 12:39:12 +02:00
// u64 cast is from usize to u64, which is always good
let valty = ValTy {
2017-09-13 12:58:25 +02:00
value: ptr.to_value_with_len(length.val.to_const_int().unwrap().to_u64().unwrap() ),
ty: dest_ty,
};
self.write_value(valty, dest)
2017-02-03 15:47:23 +01:00
}
(&ty::TyDynamic(..), &ty::TyDynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
let valty = ValTy {
value: src,
ty: dest_ty,
};
self.write_value(valty, dest)
}
2017-02-03 15:47:23 +01:00
(_, &ty::TyDynamic(ref data, _)) => {
let trait_ref = data.principal().unwrap().with_self_ty(
self.tcx,
src_pointee_ty,
);
2017-02-03 15:47:23 +01:00
let trait_ref = self.tcx.erase_regions(&trait_ref);
2017-03-21 13:53:55 +01:00
let vtable = self.get_vtable(src_pointee_ty, trait_ref)?;
let ptr = src.into_ptr(&self.memory)?;
let valty = ValTy {
value: ptr.to_value_with_vtable(vtable),
ty: dest_ty,
};
self.write_value(valty, dest)
}
2017-02-03 15:47:23 +01:00
_ => bug!("invalid unsizing {:?} -> {:?}", src_ty, dest_ty),
}
}
fn unsize_into(
&mut self,
src: Value,
src_ty: Ty<'tcx>,
2017-12-06 09:25:29 +01:00
dest: Place,
dest_ty: Ty<'tcx>,
) -> EvalResult<'tcx> {
match (&src_ty.sty, &dest_ty.sty) {
2017-02-03 15:47:23 +01:00
(&ty::TyRef(_, ref s), &ty::TyRef(_, ref d)) |
(&ty::TyRef(_, ref s), &ty::TyRawPtr(ref d)) |
(&ty::TyRawPtr(ref s), &ty::TyRawPtr(ref d)) => {
self.unsize_into_ptr(src, src_ty, dest, dest_ty, s.ty, d.ty)
}
2017-02-03 15:47:23 +01:00
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
if def_a.is_box() || def_b.is_box() {
if !def_a.is_box() || !def_b.is_box() {
panic!("invalid unsizing between {:?} -> {:?}", src_ty, dest_ty);
}
return self.unsize_into_ptr(
src,
src_ty,
dest,
dest_ty,
src_ty.boxed_ty(),
dest_ty.boxed_ty(),
);
}
if self.ty_to_primval_kind(src_ty).is_ok() {
// TODO: We ignore the packed flag here
2017-07-28 19:43:05 -07:00
let sty = self.get_field_ty(src_ty, 0)?.ty;
let dty = self.get_field_ty(dest_ty, 0)?.ty;
return self.unsize_into(src, sty, dest, dty);
}
// unsizing of generic struct with pointer fields
// Example: `Arc<T>` -> `Arc<Trait>`
// here we need to increase the size of every &T thin ptr field to a fat ptr
assert_eq!(def_a, def_b);
let src_fields = def_a.variants[0].fields.iter();
let dst_fields = def_b.variants[0].fields.iter();
2017-12-06 09:25:29 +01:00
let iter = src_fields.zip(dst_fields).enumerate();
//let src = adt::MaybeSizedValue::sized(src);
//let dst = adt::MaybeSizedValue::sized(dst);
2017-12-06 09:25:29 +01:00
let src_ptr = match src {
2017-08-08 15:53:07 +02:00
Value::ByRef(PtrAndAlign { ptr, aligned: true }) => ptr,
2017-12-06 09:25:29 +01:00
// the entire struct is just a pointer
Value::ByVal(_) => {
for (i, (src_f, dst_f)) in iter {
let src_fty = self.field_ty(substs_a, src_f);
let dst_fty = self.field_ty(substs_b, dst_f);
if self.type_size(dst_fty)? == Some(0) {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
assert_eq!(src_field_offset, 0);
assert_eq!(dst_field_offset, 0);
assert_eq!(self.type_size(src_fty)?, self.type_size(src_ty)?);
assert_eq!(self.type_size(dst_fty)?, self.type_size(dest_ty)?);
return self.unsize_into(
src,
src_fty,
dest,
dst_fty,
);
}
bug!("by val unsize into where the value doesn't cover the entire type")
}
2017-07-12 21:06:57 -07:00
// TODO: Is it possible for unaligned pointers to occur here?
_ => bug!("expected aligned pointer, got {:?}", src),
};
// FIXME(solson)
2017-06-19 10:58:59 +02:00
let dest = self.force_allocation(dest)?.to_ptr()?;
for (i, (src_f, dst_f)) in iter {
let src_fty = self.field_ty(substs_a, src_f);
let dst_fty = self.field_ty(substs_b, dst_f);
if self.type_size(dst_fty)? == Some(0) {
continue;
}
let src_field_offset = self.get_field_offset(src_ty, i)?.bytes();
let dst_field_offset = self.get_field_offset(dest_ty, i)?.bytes();
let src_f_ptr = src_ptr.offset(src_field_offset, &self)?;
let dst_f_ptr = dest.offset(dst_field_offset, &self)?;
if src_fty == dst_fty {
self.copy(src_f_ptr, dst_f_ptr.into(), src_fty)?;
} else {
self.unsize_into(
Value::by_ref(src_f_ptr),
src_fty,
2017-12-06 09:25:29 +01:00
Place::from_ptr(dst_f_ptr),
dst_fty,
)?;
}
}
2017-02-03 15:47:23 +01:00
Ok(())
}
_ => {
bug!(
"unsize_into: invalid conversion: {:?} -> {:?}",
src_ty,
dest_ty
)
}
}
}
2016-10-16 17:18:56 -06:00
2017-12-06 09:25:29 +01:00
pub fn dump_local(&self, place: Place) {
2017-05-30 10:24:37 -07:00
// Debug output
2017-12-06 09:25:29 +01:00
match place {
Place::Local { frame, local } => {
2017-08-08 16:29:47 +02:00
let mut allocs = Vec::new();
let mut msg = format!("{:?}", local);
if frame != self.cur_frame() {
write!(msg, " ({} frames up)", self.cur_frame() - frame).unwrap();
}
2017-08-08 16:29:47 +02:00
write!(msg, ":").unwrap();
match self.stack[frame].get_local(local) {
Err(EvalError { kind: EvalErrorKind::DeadLocal, .. }) => {
2017-08-08 16:29:47 +02:00
write!(msg, " is dead").unwrap();
}
Err(err) => {
panic!("Failed to access local: {:?}", err);
}
Ok(Value::ByRef(PtrAndAlign { ptr, aligned })) => {
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
write!(msg, " by {}ref:", if aligned { "" } else { "unaligned " })
.unwrap();
allocs.push(ptr.alloc_id);
}
ptr => write!(msg, " integral by ref: {:?}", ptr).unwrap(),
}
}
2017-08-08 16:29:47 +02:00
Ok(Value::ByVal(val)) => {
write!(msg, " {:?}", val).unwrap();
if let PrimVal::Ptr(ptr) = val {
allocs.push(ptr.alloc_id);
}
2017-08-08 16:29:47 +02:00
}
Ok(Value::ByValPair(val1, val2)) => {
write!(msg, " ({:?}, {:?})", val1, val2).unwrap();
if let PrimVal::Ptr(ptr) = val1 {
allocs.push(ptr.alloc_id);
}
if let PrimVal::Ptr(ptr) = val2 {
allocs.push(ptr.alloc_id);
}
2017-08-08 16:29:47 +02:00
}
}
2017-08-08 16:29:47 +02:00
trace!("{}", msg);
self.memory.dump_allocs(allocs);
}
2017-12-06 09:25:29 +01:00
Place::Ptr { ptr: PtrAndAlign { ptr, aligned }, .. } => {
2017-08-08 16:29:47 +02:00
match ptr.into_inner_primval() {
PrimVal::Ptr(ptr) => {
2017-08-08 16:29:47 +02:00
trace!("by {}ref:", if aligned { "" } else { "unaligned " });
self.memory.dump_alloc(ptr.alloc_id);
}
2017-08-08 16:29:47 +02:00
ptr => trace!(" integral by ref: {:?}", ptr),
2016-10-18 21:45:48 -06:00
}
}
2017-02-07 00:39:40 -08:00
}
2016-10-18 21:45:48 -06:00
}
/// Convenience function to ensure correct usage of locals
pub fn modify_local<F>(&mut self, frame: usize, local: mir::Local, f: F) -> EvalResult<'tcx>
where
F: FnOnce(&mut Self, Value) -> EvalResult<'tcx, Value>,
{
2017-06-27 13:36:41 +02:00
let val = self.stack[frame].get_local(local)?;
let new_val = f(self, val)?;
2017-06-27 13:36:41 +02:00
self.stack[frame].set_local(local, new_val)?;
// FIXME(solson): Run this when setting to Undef? (See previous version of this code.)
// if let Value::ByRef(ptr) = self.stack[frame].get_local(local) {
// self.memory.deallocate(ptr)?;
// }
Ok(())
}
2017-07-21 17:25:30 +02:00
pub fn report(&self, e: &mut EvalError) {
if let Some(ref mut backtrace) = e.backtrace {
let mut trace_text = "\n\nAn error occurred in miri:\n".to_string();
backtrace.resolve();
2017-12-06 09:25:29 +01:00
write!(trace_text, "backtrace frames: {}\n", backtrace.frames().len()).unwrap();
'frames: for (i, frame) in backtrace.frames().iter().enumerate() {
2017-12-06 09:25:29 +01:00
if frame.symbols().is_empty() {
write!(trace_text, "{}: no symbols\n", i).unwrap();
}
for symbol in frame.symbols() {
write!(trace_text, "{}: ", i).unwrap();
if let Some(name) = symbol.name() {
write!(trace_text, "{}\n", name).unwrap();
} else {
write!(trace_text, "<unknown>\n").unwrap();
}
write!(trace_text, "\tat ").unwrap();
if let Some(file_path) = symbol.filename() {
write!(trace_text, "{}", file_path.display()).unwrap();
} else {
write!(trace_text, "<unknown_file>").unwrap();
}
if let Some(line) = symbol.lineno() {
write!(trace_text, ":{}\n", line).unwrap();
} else {
write!(trace_text, "\n").unwrap();
}
}
2017-08-02 16:59:01 +02:00
}
error!("{}", trace_text);
2017-08-02 16:59:01 +02:00
}
2017-07-21 17:25:30 +02:00
if let Some(frame) = self.stack().last() {
let block = &frame.mir.basic_blocks()[frame.block];
let span = if frame.stmt < block.statements.len() {
block.statements[frame.stmt].source_info.span
} else {
block.terminator().source_info.span
};
let mut err = self.tcx.sess.struct_span_err(span, &e.to_string());
for &Frame { instance, span, .. } in self.stack().iter().rev() {
if self.tcx.def_key(instance.def_id()).disambiguated_data.data ==
DefPathData::ClosureExpr
{
2017-07-21 17:25:30 +02:00
err.span_note(span, "inside call to closure");
continue;
}
err.span_note(span, &format!("inside call to {}", instance));
}
err.emit();
} else {
self.tcx.sess.err(&e.to_string());
}
}
}
impl<'tcx> Frame<'tcx> {
2017-06-27 13:36:41 +02:00
pub fn get_local(&self, local: mir::Local) -> EvalResult<'tcx, Value> {
// Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
2017-08-02 16:59:01 +02:00
self.locals[local.index() - 1].ok_or(EvalErrorKind::DeadLocal.into())
}
2017-06-27 13:36:41 +02:00
fn set_local(&mut self, local: mir::Local, value: Value) -> EvalResult<'tcx> {
// Subtract 1 because we don't store a value for the ReturnPointer, the local with index 0.
2017-06-27 13:36:41 +02:00
match self.locals[local.index() - 1] {
2017-08-02 16:59:01 +02:00
None => err!(DeadLocal),
2017-06-27 13:36:41 +02:00
Some(ref mut local) => {
*local = value;
Ok(())
}
}
}
pub fn storage_live(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
trace!("{:?} is now live", local);
let old = self.locals[local.index() - 1];
self.locals[local.index() - 1] = Some(Value::ByVal(PrimVal::Undef)); // StorageLive *always* kills the value that's currently stored
return Ok(old);
}
/// Returns the old value of the local
pub fn storage_dead(&mut self, local: mir::Local) -> EvalResult<'tcx, Option<Value>> {
trace!("{:?} is now dead", local);
let old = self.locals[local.index() - 1];
self.locals[local.index() - 1] = None;
return Ok(old);
}
}
// TODO(solson): Upstream these methods into rustc::ty::layout.
2017-03-22 16:16:23 +01:00
pub fn resolve_drop_in_place<'a, 'tcx>(
tcx: TyCtxt<'a, 'tcx, 'tcx>,
ty: Ty<'tcx>,
) -> ty::Instance<'tcx> {
let def_id = tcx.require_lang_item(::middle::lang_items::DropInPlaceFnLangItem);
2017-03-22 16:16:23 +01:00
let substs = tcx.intern_substs(&[Kind::from(ty)]);
2017-12-06 09:25:29 +01:00
ty::Instance::resolve(tcx, ty::ParamEnv::empty(Reveal::All), def_id, substs).unwrap()
2017-03-22 16:16:23 +01:00
}