rust/compiler/rustc_const_eval/src/interpret/step.rs

322 lines
13 KiB
Rust
Raw Normal View History

//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
//!
//! The main entry point is the `step` method.
2020-03-29 16:41:09 +02:00
use rustc_middle::mir;
use rustc_middle::mir::interpret::{InterpResult, Scalar};
use rustc_middle::ty::layout::LayoutOf;
use super::{InterpCx, Machine};
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
/// same type as the result.
#[inline]
fn binop_left_homogeneous(op: mir::BinOp) -> bool {
2020-03-29 16:41:09 +02:00
use rustc_middle::mir::BinOp::*;
match op {
2019-12-22 17:42:04 -05:00
Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
Eq | Ne | Lt | Le | Gt | Ge => false,
}
}
/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
/// same type as the LHS.
#[inline]
fn binop_right_homogeneous(op: mir::BinOp) -> bool {
2020-03-29 16:41:09 +02:00
use rustc_middle::mir::BinOp::*;
match op {
2019-12-22 17:42:04 -05:00
Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
Offset | Shl | Shr => false,
}
}
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn run(&mut self) -> InterpResult<'tcx> {
while self.step()? {}
Ok(())
}
2019-02-08 14:53:55 +01:00
/// Returns `true` as long as there are more things to do.
///
/// This is used by [priroda](https://github.com/oli-obk/priroda)
///
/// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
#[inline(always)]
pub fn step(&mut self) -> InterpResult<'tcx, bool> {
if self.stack().is_empty() {
return Ok(false);
2016-06-03 15:48:56 +02:00
}
let loc = match self.frame().loc {
Ok(loc) => loc,
Err(_) => {
// We are unwinding and this fn has no cleanup code.
// Just go on unwinding.
trace!("unwinding: skipping frame");
self.pop_stack_frame(/* unwinding */ true)?;
2019-12-22 17:42:04 -05:00
return Ok(true);
}
};
let basic_block = &self.body().basic_blocks()[loc.block];
2016-06-01 17:05:20 +02:00
let old_frames = self.frame_idx();
2017-12-06 09:25:29 +01:00
if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
assert_eq!(old_frames, self.frame_idx());
2018-01-16 09:31:48 +01:00
self.statement(stmt)?;
return Ok(true);
2016-06-01 17:05:20 +02:00
}
M::before_terminator(self)?;
2018-02-19 12:00:15 +01:00
2016-06-03 15:48:56 +02:00
let terminator = basic_block.terminator();
assert_eq!(old_frames, self.frame_idx());
2018-01-16 09:31:48 +01:00
self.terminator(terminator)?;
Ok(true)
2016-06-01 17:05:20 +02:00
}
/// Runs the interpretation logic for the given `mir::Statement` at the current frame and
/// statement counter. This also moves the statement counter forward.
pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", stmt);
2016-08-27 01:44:46 -06:00
2020-03-29 16:41:09 +02:00
use rustc_middle::mir::StatementKind::*;
// Some statements (e.g., box) push new stack frames.
2018-08-22 16:58:39 -03:00
// We have to record the stack frame number *before* executing the statement.
let frame_idx = self.frame_idx();
match &stmt.kind {
Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
2016-12-18 23:31:23 -08:00
SetDiscriminant { place, variant_index } => {
let dest = self.eval_place(**place)?;
2021-02-15 00:00:00 +00:00
self.write_discriminant(*variant_index, &dest)?;
2016-12-18 23:31:23 -08:00
}
2016-08-27 01:44:46 -06:00
2017-09-05 17:18:48 +02:00
// Mark locals as alive
StorageLive(local) => {
2020-11-21 20:23:00 +01:00
self.storage_live(*local)?;
2017-09-05 17:18:48 +02:00
}
// Mark locals as dead
StorageDead(local) => {
2020-11-21 20:23:00 +01:00
self.storage_dead(*local)?;
}
2016-09-21 23:16:31 -06:00
// No dynamic semantics attached to `FakeRead`; MIR
// interpreter is solely intended for borrowck'ed code.
FakeRead(..) => {}
// Stacked Borrows.
Retag(kind, place) => {
let dest = self.eval_place(**place)?;
2021-02-15 00:00:00 +00:00
M::retag(self, *kind, &dest)?;
}
2017-06-20 19:35:46 +09:00
// Call CopyNonOverlapping
2021-03-10 09:21:18 +01:00
CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
let src = self.eval_operand(src, None)?;
let dst = self.eval_operand(dst, None)?;
2021-03-10 09:21:18 +01:00
let count = self.eval_operand(count, None)?;
self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
}
// Statements we do not track.
AscribeUserType(..) => {}
2018-02-23 20:52:05 +00:00
// Currently, Miri discards Coverage statements. Coverage statements are only injected
// via an optional compile time MIR pass and have no side effects. Since Coverage
// statements don't exist at the source level, it is safe for Miri to ignore them, even
// for undefined behavior (UB) checks.
//
// A coverage counter inside a const expression (for example, a counter injected in a
// const function) is discarded when the const is evaluated at compile time. Whether
// this should change, and/or how to implement a const eval counter, is a subject of the
// following issue:
//
// FIXME(#73156): Handle source code coverage in const eval
Coverage(..) => {}
2016-09-21 23:16:31 -06:00
// Defined to do nothing. These are added by optimization passes, to avoid changing the
// size of MIR constantly.
Nop => {}
2017-02-24 10:39:55 +01:00
LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
2016-08-27 01:44:46 -06:00
}
self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
Ok(())
}
/// Evaluate an assignment statement.
///
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
/// type writes its results directly into the memory specified by the place.
pub fn eval_rvalue_into_place(
&mut self,
rvalue: &mir::Rvalue<'tcx>,
place: mir::Place<'tcx>,
) -> InterpResult<'tcx> {
let dest = self.eval_place(place)?;
2020-03-29 16:41:09 +02:00
use rustc_middle::mir::Rvalue::*;
match *rvalue {
2020-05-02 21:44:25 +02:00
ThreadLocalRef(did) => {
let ptr = M::thread_local_static_base_pointer(self, did)?;
self.write_pointer(ptr, &dest)?;
2020-05-02 21:44:25 +02:00
}
Use(ref operand) => {
// Avoid recomputing the layout
let op = self.eval_operand(operand, Some(dest.layout))?;
2021-02-15 00:00:00 +00:00
self.copy_op(&op, &dest)?;
}
2021-03-05 09:32:47 +00:00
BinaryOp(bin_op, box (ref left, ref right)) => {
2019-12-06 12:18:32 +00:00
let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
2021-02-15 00:00:00 +00:00
let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
2019-12-06 12:18:32 +00:00
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
2021-02-15 00:00:00 +00:00
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
2021-02-15 00:00:00 +00:00
self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
}
2021-03-05 09:32:47 +00:00
CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
2021-02-15 00:00:00 +00:00
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
2019-12-06 12:18:32 +00:00
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
2021-02-15 00:00:00 +00:00
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
2021-02-15 00:00:00 +00:00
self.binop_with_overflow(bin_op, &left, &right, &dest)?;
}
UnaryOp(un_op, ref operand) => {
// The operand always has the same type as the result.
2021-02-15 00:00:00 +00:00
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
let val = self.unary_op(un_op, &val)?;
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
2021-02-15 00:00:00 +00:00
self.write_immediate(*val, &dest)?;
}
Aggregate(ref kind, ref operands) => {
let (dest, active_field_index) = match **kind {
2018-08-09 11:56:53 -04:00
mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
2021-02-15 00:00:00 +00:00
self.write_discriminant(variant_index, &dest)?;
if adt_def.is_enum() {
2021-02-15 00:00:00 +00:00
(self.place_downcast(&dest, variant_index)?, active_field_index)
} else {
(dest, active_field_index)
}
}
2019-12-22 17:42:04 -05:00
_ => (dest, None),
};
for (i, operand) in operands.iter().enumerate() {
let op = self.eval_operand(operand, None)?;
// Ignore zero-sized fields.
if !op.layout.is_zst() {
let field_index = active_field_index.unwrap_or(i);
2021-02-15 00:00:00 +00:00
let field_dest = self.place_field(&dest, field_index)?;
self.copy_op(&op, &field_dest)?;
}
}
}
Repeat(ref operand, _) => {
let src = self.eval_operand(operand, None)?;
assert!(!src.layout.is_unsized());
2021-02-15 00:00:00 +00:00
let dest = self.force_allocation(&dest)?;
let length = dest.len(self)?;
if length == 0 {
// Nothing to copy... but let's still make sure that `dest` as a place is valid.
self.get_alloc_mut(&dest)?;
} else {
// Write the src to the first element.
2021-02-15 00:00:00 +00:00
let first = self.mplace_field(&dest, 0)?;
self.copy_op(&src, &first.into())?;
// This is performance-sensitive code for big static/const arrays! So we
// avoid writing each operand individually and instead just make many copies
// of the first element.
let elem_size = first.layout.size;
let first_ptr = first.ptr;
let rest_ptr = first_ptr.offset(elem_size, self)?;
self.memory.copy_repeatedly(
first_ptr,
first.align,
rest_ptr,
first.align,
elem_size,
length - 1,
/*nonoverlapping:*/ true,
)?;
}
}
Len(place) => {
// FIXME(CTFE): don't allow computing the length of arrays in const eval
let src = self.eval_place(place)?;
2021-02-15 00:00:00 +00:00
let mplace = self.force_allocation(&src)?;
let len = mplace.len(self)?;
2021-02-15 00:00:00 +00:00
self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
}
AddressOf(_, place) | Ref(_, _, place) => {
let src = self.eval_place(place)?;
2021-02-15 00:00:00 +00:00
let place = self.force_allocation(&src)?;
self.write_immediate(place.to_ref(self), &dest)?;
}
NullaryOp(mir::NullOp::Box, _) => {
2021-02-15 00:00:00 +00:00
M::box_alloc(self, &dest)?;
}
2021-09-07 16:06:07 +01:00
NullaryOp(null_op, ty) => {
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
let layout = self.layout_of(ty)?;
if layout.is_unsized() {
// FIXME: This should be a span_bug (#80742)
self.tcx.sess.delay_span_bug(
self.frame().current_span(),
2021-09-07 16:06:07 +01:00
&format!("Nullary MIR operator called for unsized type {}", ty),
);
throw_inval!(SizeOfUnsizedType(ty));
}
2021-09-07 16:06:07 +01:00
let val = match null_op {
mir::NullOp::SizeOf => layout.size.bytes(),
mir::NullOp::AlignOf => layout.align.abi.bytes(),
mir::NullOp::Box => unreachable!(),
};
self.write_scalar(Scalar::from_machine_usize(val, self), &dest)?;
}
Cast(cast_kind, ref operand, cast_ty) => {
let src = self.eval_operand(operand, None)?;
let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
2021-02-15 00:00:00 +00:00
self.cast(&src, cast_kind, cast_ty, &dest)?;
}
Discriminant(place) => {
let op = self.eval_place_to_op(place, None)?;
2021-02-15 00:00:00 +00:00
let discr_val = self.read_discriminant(&op)?.0;
2021-02-15 00:00:00 +00:00
self.write_scalar(discr_val, &dest)?;
}
}
trace!("{:?}", self.dump_place(*dest));
Ok(())
}
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
info!("{:?}", terminator.kind);
self.eval_terminator(terminator)?;
if !self.stack().is_empty() {
if let Ok(loc) = self.frame().loc {
info!("// executing {:?}", loc.block);
2019-12-08 10:48:06 +01:00
}
}
Ok(())
}
2016-06-01 17:05:20 +02:00
}