Rename rustc_mir to rustc_const_eval.
This commit is contained in:
parent
fd9c04fe32
commit
c5fc2609f0
64 changed files with 66 additions and 66 deletions
210
compiler/rustc_const_eval/src/const_eval/error.rs
Normal file
210
compiler/rustc_const_eval/src/const_eval/error.rs
Normal file
|
@ -0,0 +1,210 @@
|
|||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
use rustc_errors::{DiagnosticBuilder, ErrorReported};
|
||||
use rustc_hir as hir;
|
||||
use rustc_middle::mir::AssertKind;
|
||||
use rustc_middle::ty::{layout::LayoutError, query::TyCtxtAt, ConstInt};
|
||||
use rustc_span::{Span, Symbol};
|
||||
|
||||
use super::InterpCx;
|
||||
use crate::interpret::{
|
||||
struct_error, ErrorHandled, FrameInfo, InterpError, InterpErrorInfo, Machine, MachineStopType,
|
||||
};
|
||||
|
||||
/// The CTFE machine has some custom error kinds.
|
||||
#[derive(Clone, Debug)]
|
||||
pub enum ConstEvalErrKind {
|
||||
NeedsRfc(String),
|
||||
ConstAccessesStatic,
|
||||
ModifiedGlobal,
|
||||
AssertFailure(AssertKind<ConstInt>),
|
||||
Panic { msg: Symbol, line: u32, col: u32, file: Symbol },
|
||||
Abort(String),
|
||||
}
|
||||
|
||||
impl MachineStopType for ConstEvalErrKind {
|
||||
fn is_hard_err(&self) -> bool {
|
||||
match self {
|
||||
Self::Panic { .. } => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The errors become `MachineStop` with plain strings when being raised.
|
||||
// `ConstEvalErr` (in `librustc_middle/mir/interpret/error.rs`) knows to
|
||||
// handle these.
|
||||
impl<'tcx> Into<InterpErrorInfo<'tcx>> for ConstEvalErrKind {
|
||||
fn into(self) -> InterpErrorInfo<'tcx> {
|
||||
err_machine_stop!(self).into()
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ConstEvalErrKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
use self::ConstEvalErrKind::*;
|
||||
match *self {
|
||||
NeedsRfc(ref msg) => {
|
||||
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
|
||||
}
|
||||
ConstAccessesStatic => write!(f, "constant accesses static"),
|
||||
ModifiedGlobal => {
|
||||
write!(f, "modifying a static's initial value from another static's initializer")
|
||||
}
|
||||
AssertFailure(ref msg) => write!(f, "{:?}", msg),
|
||||
Panic { msg, line, col, file } => {
|
||||
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col)
|
||||
}
|
||||
Abort(ref msg) => write!(f, "{}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for ConstEvalErrKind {}
|
||||
|
||||
/// When const-evaluation errors, this type is constructed with the resulting information,
|
||||
/// and then used to emit the error as a lint or hard error.
|
||||
#[derive(Debug)]
|
||||
pub struct ConstEvalErr<'tcx> {
|
||||
pub span: Span,
|
||||
pub error: InterpError<'tcx>,
|
||||
pub stacktrace: Vec<FrameInfo<'tcx>>,
|
||||
}
|
||||
|
||||
impl<'tcx> ConstEvalErr<'tcx> {
|
||||
/// Turn an interpreter error into something to report to the user.
|
||||
/// As a side-effect, if RUSTC_CTFE_BACKTRACE is set, this prints the backtrace.
|
||||
/// Should be called only if the error is actually going to to be reported!
|
||||
pub fn new<'mir, M: Machine<'mir, 'tcx>>(
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
error: InterpErrorInfo<'tcx>,
|
||||
span: Option<Span>,
|
||||
) -> ConstEvalErr<'tcx>
|
||||
where
|
||||
'tcx: 'mir,
|
||||
{
|
||||
error.print_backtrace();
|
||||
let stacktrace = ecx.generate_stacktrace();
|
||||
ConstEvalErr {
|
||||
error: error.into_kind(),
|
||||
stacktrace,
|
||||
span: span.unwrap_or_else(|| ecx.cur_span()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn struct_error(
|
||||
&self,
|
||||
tcx: TyCtxtAt<'tcx>,
|
||||
message: &str,
|
||||
emit: impl FnOnce(DiagnosticBuilder<'_>),
|
||||
) -> ErrorHandled {
|
||||
self.struct_generic(tcx, message, emit, None)
|
||||
}
|
||||
|
||||
pub fn report_as_error(&self, tcx: TyCtxtAt<'tcx>, message: &str) -> ErrorHandled {
|
||||
self.struct_error(tcx, message, |mut e| e.emit())
|
||||
}
|
||||
|
||||
pub fn report_as_lint(
|
||||
&self,
|
||||
tcx: TyCtxtAt<'tcx>,
|
||||
message: &str,
|
||||
lint_root: hir::HirId,
|
||||
span: Option<Span>,
|
||||
) -> ErrorHandled {
|
||||
self.struct_generic(
|
||||
tcx,
|
||||
message,
|
||||
|mut lint: DiagnosticBuilder<'_>| {
|
||||
// Apply the span.
|
||||
if let Some(span) = span {
|
||||
let primary_spans = lint.span.primary_spans().to_vec();
|
||||
// point at the actual error as the primary span
|
||||
lint.replace_span_with(span);
|
||||
// point to the `const` statement as a secondary span
|
||||
// they don't have any label
|
||||
for sp in primary_spans {
|
||||
if sp != span {
|
||||
lint.span_label(sp, "");
|
||||
}
|
||||
}
|
||||
}
|
||||
lint.emit();
|
||||
},
|
||||
Some(lint_root),
|
||||
)
|
||||
}
|
||||
|
||||
/// Create a diagnostic for this const eval error.
|
||||
///
|
||||
/// Sets the message passed in via `message` and adds span labels with detailed error
|
||||
/// information before handing control back to `emit` to do any final processing.
|
||||
/// It's the caller's responsibility to call emit(), stash(), etc. within the `emit`
|
||||
/// function to dispose of the diagnostic properly.
|
||||
///
|
||||
/// If `lint_root.is_some()` report it as a lint, else report it as a hard error.
|
||||
/// (Except that for some errors, we ignore all that -- see `must_error` below.)
|
||||
fn struct_generic(
|
||||
&self,
|
||||
tcx: TyCtxtAt<'tcx>,
|
||||
message: &str,
|
||||
emit: impl FnOnce(DiagnosticBuilder<'_>),
|
||||
lint_root: Option<hir::HirId>,
|
||||
) -> ErrorHandled {
|
||||
let finish = |mut err: DiagnosticBuilder<'_>, span_msg: Option<String>| {
|
||||
trace!("reporting const eval failure at {:?}", self.span);
|
||||
if let Some(span_msg) = span_msg {
|
||||
err.span_label(self.span, span_msg);
|
||||
}
|
||||
// Add spans for the stacktrace. Don't print a single-line backtrace though.
|
||||
if self.stacktrace.len() > 1 {
|
||||
for frame_info in &self.stacktrace {
|
||||
err.span_label(frame_info.span, frame_info.to_string());
|
||||
}
|
||||
}
|
||||
// Let the caller finish the job.
|
||||
emit(err)
|
||||
};
|
||||
|
||||
// Special handling for certain errors
|
||||
match &self.error {
|
||||
// Don't emit a new diagnostic for these errors
|
||||
err_inval!(Layout(LayoutError::Unknown(_))) | err_inval!(TooGeneric) => {
|
||||
return ErrorHandled::TooGeneric;
|
||||
}
|
||||
err_inval!(AlreadyReported(error_reported)) => {
|
||||
return ErrorHandled::Reported(*error_reported);
|
||||
}
|
||||
err_inval!(Layout(LayoutError::SizeOverflow(_))) => {
|
||||
// We must *always* hard error on these, even if the caller wants just a lint.
|
||||
// The `message` makes little sense here, this is a more serious error than the
|
||||
// caller thinks anyway.
|
||||
// See <https://github.com/rust-lang/rust/pull/63152>.
|
||||
finish(struct_error(tcx, &self.error.to_string()), None);
|
||||
return ErrorHandled::Reported(ErrorReported);
|
||||
}
|
||||
_ => {}
|
||||
};
|
||||
|
||||
let err_msg = self.error.to_string();
|
||||
|
||||
// Regular case - emit a lint.
|
||||
if let Some(lint_root) = lint_root {
|
||||
// Report as lint.
|
||||
let hir_id =
|
||||
self.stacktrace.iter().rev().find_map(|frame| frame.lint_root).unwrap_or(lint_root);
|
||||
tcx.struct_span_lint_hir(
|
||||
rustc_session::lint::builtin::CONST_ERR,
|
||||
hir_id,
|
||||
tcx.span,
|
||||
|lint| finish(lint.build(message), Some(err_msg)),
|
||||
);
|
||||
ErrorHandled::Linted
|
||||
} else {
|
||||
// Report as hard error.
|
||||
finish(struct_error(tcx, message), Some(err_msg));
|
||||
ErrorHandled::Reported(ErrorReported)
|
||||
}
|
||||
}
|
||||
}
|
399
compiler/rustc_const_eval/src/const_eval/eval_queries.rs
Normal file
399
compiler/rustc_const_eval/src/const_eval/eval_queries.rs
Normal file
|
@ -0,0 +1,399 @@
|
|||
use super::{CompileTimeEvalContext, CompileTimeInterpreter, ConstEvalErr, MemoryExtra};
|
||||
use crate::interpret::eval_nullary_intrinsic;
|
||||
use crate::interpret::{
|
||||
intern_const_alloc_recursive, Allocation, ConstAlloc, ConstValue, CtfeValidationMode, GlobalId,
|
||||
Immediate, InternKind, InterpCx, InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, Scalar,
|
||||
ScalarMaybeUninit, StackPopCleanup,
|
||||
};
|
||||
|
||||
use rustc_errors::ErrorReported;
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::ErrorHandled;
|
||||
use rustc_middle::mir::pretty::display_allocation;
|
||||
use rustc_middle::traits::Reveal;
|
||||
use rustc_middle::ty::layout::LayoutOf;
|
||||
use rustc_middle::ty::print::with_no_trimmed_paths;
|
||||
use rustc_middle::ty::{self, subst::Subst, TyCtxt};
|
||||
use rustc_span::source_map::Span;
|
||||
use rustc_target::abi::Abi;
|
||||
use std::borrow::Cow;
|
||||
use std::convert::TryInto;
|
||||
|
||||
pub fn note_on_undefined_behavior_error() -> &'static str {
|
||||
"The rules on what exactly is undefined behavior aren't clear, \
|
||||
so this check might be overzealous. Please open an issue on the rustc \
|
||||
repository if you believe it should not be considered undefined behavior."
|
||||
}
|
||||
|
||||
// Returns a pointer to where the result lives
|
||||
fn eval_body_using_ecx<'mir, 'tcx>(
|
||||
ecx: &mut CompileTimeEvalContext<'mir, 'tcx>,
|
||||
cid: GlobalId<'tcx>,
|
||||
body: &'mir mir::Body<'tcx>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx>> {
|
||||
debug!("eval_body_using_ecx: {:?}, {:?}", cid, ecx.param_env);
|
||||
let tcx = *ecx.tcx;
|
||||
assert!(
|
||||
cid.promoted.is_some()
|
||||
|| matches!(
|
||||
ecx.tcx.def_kind(cid.instance.def_id()),
|
||||
DefKind::Const
|
||||
| DefKind::Static
|
||||
| DefKind::ConstParam
|
||||
| DefKind::AnonConst
|
||||
| DefKind::AssocConst
|
||||
),
|
||||
"Unexpected DefKind: {:?}",
|
||||
ecx.tcx.def_kind(cid.instance.def_id())
|
||||
);
|
||||
let layout = ecx.layout_of(body.return_ty().subst(tcx, cid.instance.substs))?;
|
||||
assert!(!layout.is_unsized());
|
||||
let ret = ecx.allocate(layout, MemoryKind::Stack)?;
|
||||
|
||||
let name =
|
||||
with_no_trimmed_paths(|| ty::tls::with(|tcx| tcx.def_path_str(cid.instance.def_id())));
|
||||
let prom = cid.promoted.map_or_else(String::new, |p| format!("::promoted[{:?}]", p));
|
||||
trace!("eval_body_using_ecx: pushing stack frame for global: {}{}", name, prom);
|
||||
|
||||
ecx.push_stack_frame(
|
||||
cid.instance,
|
||||
body,
|
||||
Some(&ret.into()),
|
||||
StackPopCleanup::None { cleanup: false },
|
||||
)?;
|
||||
|
||||
// The main interpreter loop.
|
||||
ecx.run()?;
|
||||
|
||||
// Intern the result
|
||||
let intern_kind = if cid.promoted.is_some() {
|
||||
InternKind::Promoted
|
||||
} else {
|
||||
match tcx.static_mutability(cid.instance.def_id()) {
|
||||
Some(m) => InternKind::Static(m),
|
||||
None => InternKind::Constant,
|
||||
}
|
||||
};
|
||||
intern_const_alloc_recursive(ecx, intern_kind, &ret)?;
|
||||
|
||||
debug!("eval_body_using_ecx done: {:?}", *ret);
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// The `InterpCx` is only meant to be used to do field and index projections into constants for
|
||||
/// `simd_shuffle` and const patterns in match arms.
|
||||
///
|
||||
/// The function containing the `match` that is currently being analyzed may have generic bounds
|
||||
/// that inform us about the generic bounds of the constant. E.g., using an associated constant
|
||||
/// of a function's generic parameter will require knowledge about the bounds on the generic
|
||||
/// parameter. These bounds are passed to `mk_eval_cx` via the `ParamEnv` argument.
|
||||
pub(super) fn mk_eval_cx<'mir, 'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
root_span: Span,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
can_access_statics: bool,
|
||||
) -> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
debug!("mk_eval_cx: {:?}", param_env);
|
||||
InterpCx::new(
|
||||
tcx,
|
||||
root_span,
|
||||
param_env,
|
||||
CompileTimeInterpreter::new(tcx.const_eval_limit()),
|
||||
MemoryExtra { can_access_statics },
|
||||
)
|
||||
}
|
||||
|
||||
/// This function converts an interpreter value into a constant that is meant for use in the
|
||||
/// type system.
|
||||
pub(super) fn op_to_const<'tcx>(
|
||||
ecx: &CompileTimeEvalContext<'_, 'tcx>,
|
||||
op: &OpTy<'tcx>,
|
||||
) -> ConstValue<'tcx> {
|
||||
// We do not have value optimizations for everything.
|
||||
// Only scalars and slices, since they are very common.
|
||||
// Note that further down we turn scalars of uninitialized bits back to `ByRef`. These can result
|
||||
// from scalar unions that are initialized with one of their zero sized variants. We could
|
||||
// instead allow `ConstValue::Scalar` to store `ScalarMaybeUninit`, but that would affect all
|
||||
// the usual cases of extracting e.g. a `usize`, without there being a real use case for the
|
||||
// `Undef` situation.
|
||||
let try_as_immediate = match op.layout.abi {
|
||||
Abi::Scalar(..) => true,
|
||||
Abi::ScalarPair(..) => match op.layout.ty.kind() {
|
||||
ty::Ref(_, inner, _) => match *inner.kind() {
|
||||
ty::Slice(elem) => elem == ecx.tcx.types.u8,
|
||||
ty::Str => true,
|
||||
_ => false,
|
||||
},
|
||||
_ => false,
|
||||
},
|
||||
_ => false,
|
||||
};
|
||||
let immediate = if try_as_immediate {
|
||||
Err(ecx.read_immediate(op).expect("normalization works on validated constants"))
|
||||
} else {
|
||||
// It is guaranteed that any non-slice scalar pair is actually ByRef here.
|
||||
// When we come back from raw const eval, we are always by-ref. The only way our op here is
|
||||
// by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
|
||||
// "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
|
||||
// structs containing such.
|
||||
op.try_as_mplace()
|
||||
};
|
||||
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
|
||||
ConstValue::ByRef { alloc, offset }
|
||||
}
|
||||
(None, offset) => {
|
||||
assert!(mplace.layout.is_zst());
|
||||
assert_eq!(
|
||||
offset.bytes() % mplace.layout.align.abi.bytes(),
|
||||
0,
|
||||
"this MPlaceTy must come from a validated constant, thus we can assume the \
|
||||
alignment is correct",
|
||||
);
|
||||
ConstValue::Scalar(Scalar::ZST)
|
||||
}
|
||||
};
|
||||
match immediate {
|
||||
Ok(ref mplace) => to_const_value(mplace),
|
||||
// see comment on `let try_as_immediate` above
|
||||
Err(imm) => match *imm {
|
||||
Immediate::Scalar(x) => match x {
|
||||
ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
|
||||
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
|
||||
},
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
|
||||
}
|
||||
(None, _offset) => (
|
||||
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
|
||||
b"" as &[u8],
|
||||
)),
|
||||
0,
|
||||
),
|
||||
};
|
||||
let len = b.to_machine_usize(ecx).unwrap();
|
||||
let start = start.try_into().unwrap();
|
||||
let len: usize = len.try_into().unwrap();
|
||||
ConstValue::Slice { data, start, end: start + len }
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
fn turn_into_const_value<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
constant: ConstAlloc<'tcx>,
|
||||
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
|
||||
) -> ConstValue<'tcx> {
|
||||
let cid = key.value;
|
||||
let def_id = cid.instance.def.def_id();
|
||||
let is_static = tcx.is_static(def_id);
|
||||
let ecx = mk_eval_cx(tcx, tcx.def_span(key.value.instance.def_id()), key.param_env, is_static);
|
||||
|
||||
let mplace = ecx.raw_const_to_mplace(constant).expect(
|
||||
"can only fail if layout computation failed, \
|
||||
which should have given a good error before ever invoking this function",
|
||||
);
|
||||
assert!(
|
||||
!is_static || cid.promoted.is_some(),
|
||||
"the `eval_to_const_value_raw` query should not be used for statics, use `eval_to_allocation` instead"
|
||||
);
|
||||
// Turn this into a proper constant.
|
||||
op_to_const(&ecx, &mplace.into())
|
||||
}
|
||||
|
||||
pub fn eval_to_const_value_raw_provider<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
|
||||
) -> ::rustc_middle::mir::interpret::EvalToConstValueResult<'tcx> {
|
||||
// see comment in eval_to_allocation_raw_provider for what we're doing here
|
||||
if key.param_env.reveal() == Reveal::All {
|
||||
let mut key = key;
|
||||
key.param_env = key.param_env.with_user_facing();
|
||||
match tcx.eval_to_const_value_raw(key) {
|
||||
// try again with reveal all as requested
|
||||
Err(ErrorHandled::TooGeneric) => {}
|
||||
// deduplicate calls
|
||||
other => return other,
|
||||
}
|
||||
}
|
||||
|
||||
// We call `const_eval` for zero arg intrinsics, too, in order to cache their value.
|
||||
// Catch such calls and evaluate them instead of trying to load a constant's MIR.
|
||||
if let ty::InstanceDef::Intrinsic(def_id) = key.value.instance.def {
|
||||
let ty = key.value.instance.ty(tcx, key.param_env);
|
||||
let substs = match ty.kind() {
|
||||
ty::FnDef(_, substs) => substs,
|
||||
_ => bug!("intrinsic with type {:?}", ty),
|
||||
};
|
||||
return eval_nullary_intrinsic(tcx, key.param_env, def_id, substs).map_err(|error| {
|
||||
let span = tcx.def_span(def_id);
|
||||
let error = ConstEvalErr { error: error.into_kind(), stacktrace: vec![], span };
|
||||
error.report_as_error(tcx.at(span), "could not evaluate nullary intrinsic")
|
||||
});
|
||||
}
|
||||
|
||||
tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
|
||||
}
|
||||
|
||||
pub fn eval_to_allocation_raw_provider<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>,
|
||||
) -> ::rustc_middle::mir::interpret::EvalToAllocationRawResult<'tcx> {
|
||||
// Because the constant is computed twice (once per value of `Reveal`), we are at risk of
|
||||
// reporting the same error twice here. To resolve this, we check whether we can evaluate the
|
||||
// constant in the more restrictive `Reveal::UserFacing`, which most likely already was
|
||||
// computed. For a large percentage of constants that will already have succeeded. Only
|
||||
// associated constants of generic functions will fail due to not enough monomorphization
|
||||
// information being available.
|
||||
|
||||
// In case we fail in the `UserFacing` variant, we just do the real computation.
|
||||
if key.param_env.reveal() == Reveal::All {
|
||||
let mut key = key;
|
||||
key.param_env = key.param_env.with_user_facing();
|
||||
match tcx.eval_to_allocation_raw(key) {
|
||||
// try again with reveal all as requested
|
||||
Err(ErrorHandled::TooGeneric) => {}
|
||||
// deduplicate calls
|
||||
other => return other,
|
||||
}
|
||||
}
|
||||
if cfg!(debug_assertions) {
|
||||
// Make sure we format the instance even if we do not print it.
|
||||
// This serves as a regression test against an ICE on printing.
|
||||
// The next two lines concatenated contain some discussion:
|
||||
// https://rust-lang.zulipchat.com/#narrow/stream/146212-t-compiler.2Fconst-eval/
|
||||
// subject/anon_const_instance_printing/near/135980032
|
||||
let instance = with_no_trimmed_paths(|| key.value.instance.to_string());
|
||||
trace!("const eval: {:?} ({})", key, instance);
|
||||
}
|
||||
|
||||
let cid = key.value;
|
||||
let def = cid.instance.def.with_opt_param();
|
||||
|
||||
if let Some(def) = def.as_local() {
|
||||
if tcx.has_typeck_results(def.did) {
|
||||
if let Some(error_reported) = tcx.typeck_opt_const_arg(def).tainted_by_errors {
|
||||
return Err(ErrorHandled::Reported(error_reported));
|
||||
}
|
||||
}
|
||||
if !tcx.is_mir_available(def.did) {
|
||||
tcx.sess.delay_span_bug(
|
||||
tcx.def_span(def.did),
|
||||
&format!("no MIR body is available for {:?}", def.did),
|
||||
);
|
||||
return Err(ErrorHandled::Reported(ErrorReported {}));
|
||||
}
|
||||
if let Some(error_reported) = tcx.mir_const_qualif_opt_const_arg(def).error_occured {
|
||||
return Err(ErrorHandled::Reported(error_reported));
|
||||
}
|
||||
}
|
||||
|
||||
let is_static = tcx.is_static(def.did);
|
||||
|
||||
let mut ecx = InterpCx::new(
|
||||
tcx,
|
||||
tcx.def_span(def.did),
|
||||
key.param_env,
|
||||
CompileTimeInterpreter::new(tcx.const_eval_limit()),
|
||||
// Statics (and promoteds inside statics) may access other statics, because unlike consts
|
||||
// they do not have to behave "as if" they were evaluated at runtime.
|
||||
MemoryExtra { can_access_statics: is_static },
|
||||
);
|
||||
|
||||
let res = ecx.load_mir(cid.instance.def, cid.promoted);
|
||||
match res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, &body)) {
|
||||
Err(error) => {
|
||||
let err = ConstEvalErr::new(&ecx, error, None);
|
||||
// Some CTFE errors raise just a lint, not a hard error; see
|
||||
// <https://github.com/rust-lang/rust/issues/71800>.
|
||||
let is_hard_err = if let Some(def) = def.as_local() {
|
||||
// (Associated) consts only emit a lint, since they might be unused.
|
||||
!matches!(tcx.def_kind(def.did.to_def_id()), DefKind::Const | DefKind::AssocConst)
|
||||
// check if the inner InterpError is hard
|
||||
|| err.error.is_hard_err()
|
||||
} else {
|
||||
// use of broken constant from other crate: always an error
|
||||
true
|
||||
};
|
||||
|
||||
if is_hard_err {
|
||||
let msg = if is_static {
|
||||
Cow::from("could not evaluate static initializer")
|
||||
} else {
|
||||
// If the current item has generics, we'd like to enrich the message with the
|
||||
// instance and its substs: to show the actual compile-time values, in addition to
|
||||
// the expression, leading to the const eval error.
|
||||
let instance = &key.value.instance;
|
||||
if !instance.substs.is_empty() {
|
||||
let instance = with_no_trimmed_paths(|| instance.to_string());
|
||||
let msg = format!("evaluation of `{}` failed", instance);
|
||||
Cow::from(msg)
|
||||
} else {
|
||||
Cow::from("evaluation of constant value failed")
|
||||
}
|
||||
};
|
||||
|
||||
Err(err.report_as_error(ecx.tcx.at(ecx.cur_span()), &msg))
|
||||
} else {
|
||||
let hir_id = tcx.hir().local_def_id_to_hir_id(def.as_local().unwrap().did);
|
||||
Err(err.report_as_lint(
|
||||
tcx.at(tcx.def_span(def.did)),
|
||||
"any use of this value will cause an error",
|
||||
hir_id,
|
||||
Some(err.span),
|
||||
))
|
||||
}
|
||||
}
|
||||
Ok(mplace) => {
|
||||
// Since evaluation had no errors, validate the resulting constant.
|
||||
// This is a separate `try` block to provide more targeted error reporting.
|
||||
let validation = try {
|
||||
let mut ref_tracking = RefTracking::new(mplace);
|
||||
let mut inner = false;
|
||||
while let Some((mplace, path)) = ref_tracking.todo.pop() {
|
||||
let mode = match tcx.static_mutability(cid.instance.def_id()) {
|
||||
Some(_) if cid.promoted.is_some() => {
|
||||
// Promoteds in statics are allowed to point to statics.
|
||||
CtfeValidationMode::Const { inner, allow_static_ptrs: true }
|
||||
}
|
||||
Some(_) => CtfeValidationMode::Regular, // a `static`
|
||||
None => CtfeValidationMode::Const { inner, allow_static_ptrs: false },
|
||||
};
|
||||
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)?;
|
||||
inner = true;
|
||||
}
|
||||
};
|
||||
let alloc_id = mplace.ptr.provenance.unwrap();
|
||||
if let Err(error) = validation {
|
||||
// Validation failed, report an error. This is always a hard error.
|
||||
let err = ConstEvalErr::new(&ecx, error, None);
|
||||
Err(err.struct_error(
|
||||
ecx.tcx,
|
||||
"it is undefined behavior to use this value",
|
||||
|mut diag| {
|
||||
diag.note(note_on_undefined_behavior_error());
|
||||
diag.note(&format!(
|
||||
"the raw bytes of the constant ({}",
|
||||
display_allocation(
|
||||
*ecx.tcx,
|
||||
ecx.tcx.global_alloc(alloc_id).unwrap_memory()
|
||||
)
|
||||
));
|
||||
diag.emit();
|
||||
},
|
||||
))
|
||||
} else {
|
||||
// Convert to raw constant
|
||||
Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
112
compiler/rustc_const_eval/src/const_eval/fn_queries.rs
Normal file
112
compiler/rustc_const_eval/src/const_eval/fn_queries.rs
Normal file
|
@ -0,0 +1,112 @@
|
|||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
use rustc_middle::hir::map::blocks::FnLikeNode;
|
||||
use rustc_middle::ty::query::Providers;
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_span::symbol::Symbol;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
/// Whether the `def_id` counts as const fn in your current crate, considering all active
|
||||
/// feature gates
|
||||
pub fn is_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
|
||||
tcx.is_const_fn_raw(def_id)
|
||||
&& match is_unstable_const_fn(tcx, def_id) {
|
||||
Some(feature_name) => {
|
||||
// has a `rustc_const_unstable` attribute, check whether the user enabled the
|
||||
// corresponding feature gate.
|
||||
tcx.features().declared_lib_features.iter().any(|&(sym, _)| sym == feature_name)
|
||||
}
|
||||
// functions without const stability are either stable user written
|
||||
// const fn or the user is using feature gates and we thus don't
|
||||
// care what they do
|
||||
None => true,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the `def_id` is an unstable const fn and what feature gate is necessary to enable it
|
||||
pub fn is_unstable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<Symbol> {
|
||||
if tcx.is_const_fn_raw(def_id) {
|
||||
let const_stab = tcx.lookup_const_stability(def_id)?;
|
||||
if const_stab.level.is_unstable() { Some(const_stab.feature) } else { None }
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_parent_const_impl_raw(tcx: TyCtxt<'_>, hir_id: hir::HirId) -> bool {
|
||||
let parent_id = tcx.hir().get_parent_did(hir_id);
|
||||
if !parent_id.is_top_level_module() { is_const_impl_raw(tcx, parent_id) } else { false }
|
||||
}
|
||||
|
||||
/// Checks whether the function has a `const` modifier or, in case it is an intrinsic, whether
|
||||
/// said intrinsic has a `rustc_const_{un,}stable` attribute.
|
||||
fn is_const_fn_raw(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
|
||||
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
|
||||
|
||||
let node = tcx.hir().get(hir_id);
|
||||
|
||||
if let hir::Node::ForeignItem(hir::ForeignItem { kind: hir::ForeignItemKind::Fn(..), .. }) =
|
||||
node
|
||||
{
|
||||
// Intrinsics use `rustc_const_{un,}stable` attributes to indicate constness. All other
|
||||
// foreign items cannot be evaluated at compile-time.
|
||||
if let Abi::RustIntrinsic | Abi::PlatformIntrinsic = tcx.hir().get_foreign_abi(hir_id) {
|
||||
tcx.lookup_const_stability(def_id).is_some()
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else if let Some(fn_like) = FnLikeNode::from_node(node) {
|
||||
if fn_like.constness() == hir::Constness::Const {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If the function itself is not annotated with `const`, it may still be a `const fn`
|
||||
// if it resides in a const trait impl.
|
||||
is_parent_const_impl_raw(tcx, hir_id)
|
||||
} else if let hir::Node::Ctor(_) = node {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks whether the given item is an `impl` that has a `const` modifier.
|
||||
fn is_const_impl_raw(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool {
|
||||
let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
|
||||
let node = tcx.hir().get(hir_id);
|
||||
matches!(
|
||||
node,
|
||||
hir::Node::Item(hir::Item {
|
||||
kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
|
||||
..
|
||||
})
|
||||
)
|
||||
}
|
||||
|
||||
fn is_promotable_const_fn(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
|
||||
is_const_fn(tcx, def_id)
|
||||
&& match tcx.lookup_const_stability(def_id) {
|
||||
Some(stab) => {
|
||||
if cfg!(debug_assertions) && stab.promotable {
|
||||
let sig = tcx.fn_sig(def_id);
|
||||
assert_eq!(
|
||||
sig.unsafety(),
|
||||
hir::Unsafety::Normal,
|
||||
"don't mark const unsafe fns as promotable",
|
||||
// https://github.com/rust-lang/rust/pull/53851#issuecomment-418760682
|
||||
);
|
||||
}
|
||||
stab.promotable
|
||||
}
|
||||
None => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn provide(providers: &mut Providers) {
|
||||
*providers = Providers {
|
||||
is_const_fn_raw,
|
||||
is_const_impl_raw: |tcx, def_id| is_const_impl_raw(tcx, def_id.expect_local()),
|
||||
is_promotable_const_fn,
|
||||
..*providers
|
||||
};
|
||||
}
|
474
compiler/rustc_const_eval/src/const_eval/machine.rs
Normal file
474
compiler/rustc_const_eval/src/const_eval/machine.rs
Normal file
|
@ -0,0 +1,474 @@
|
|||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::hash_map::Entry;
|
||||
use std::hash::Hash;
|
||||
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use std::fmt;
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::mir::AssertMessage;
|
||||
use rustc_session::Limit;
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Align, Size};
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use crate::interpret::{
|
||||
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, OpTy,
|
||||
PlaceTy, Scalar, StackPopUnwind,
|
||||
};
|
||||
|
||||
use super::error::*;
|
||||
|
||||
impl<'mir, 'tcx> InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> {
|
||||
/// "Intercept" a function call to a panic-related function
|
||||
/// because we have something special to do for it.
|
||||
/// If this returns successfully (`Ok`), the function should just be evaluated normally.
|
||||
fn hook_panic_fn(
|
||||
&mut self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx>],
|
||||
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
|
||||
// The list of functions we handle here must be in sync with
|
||||
// `is_lang_panic_fn` in `transform/check_consts/mod.rs`.
|
||||
let def_id = instance.def_id();
|
||||
if Some(def_id) == self.tcx.lang_items().panic_fn()
|
||||
|| Some(def_id) == self.tcx.lang_items().panic_str()
|
||||
|| Some(def_id) == self.tcx.lang_items().begin_panic_fn()
|
||||
{
|
||||
// &str
|
||||
assert!(args.len() == 1);
|
||||
|
||||
let msg_place = self.deref_operand(&args[0])?;
|
||||
let msg = Symbol::intern(self.read_str(&msg_place)?);
|
||||
let span = self.find_closest_untracked_caller_location();
|
||||
let (file, line, col) = self.location_triple_for_span(span);
|
||||
return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
|
||||
} else if Some(def_id) == self.tcx.lang_items().panic_fmt()
|
||||
|| Some(def_id) == self.tcx.lang_items().begin_panic_fmt()
|
||||
{
|
||||
// For panic_fmt, call const_panic_fmt instead.
|
||||
if let Some(const_panic_fmt) = self.tcx.lang_items().const_panic_fmt() {
|
||||
return Ok(Some(
|
||||
ty::Instance::resolve(
|
||||
*self.tcx,
|
||||
ty::ParamEnv::reveal_all(),
|
||||
const_panic_fmt,
|
||||
self.tcx.intern_substs(&[]),
|
||||
)
|
||||
.unwrap()
|
||||
.unwrap(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Extra machine state for CTFE, and the Machine instance
|
||||
pub struct CompileTimeInterpreter<'mir, 'tcx> {
|
||||
/// For now, the number of terminators that can be evaluated before we throw a resource
|
||||
/// exhaustion error.
|
||||
///
|
||||
/// Setting this to `0` disables the limit and allows the interpreter to run forever.
|
||||
pub steps_remaining: usize,
|
||||
|
||||
/// The virtual call stack.
|
||||
pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct MemoryExtra {
|
||||
/// We need to make sure consts never point to anything mutable, even recursively. That is
|
||||
/// relied on for pattern matching on consts with references.
|
||||
/// To achieve this, two pieces have to work together:
|
||||
/// * Interning makes everything outside of statics immutable.
|
||||
/// * Pointers to allocations inside of statics can never leak outside, to a non-static global.
|
||||
/// This boolean here controls the second part.
|
||||
pub(super) can_access_statics: bool,
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> CompileTimeInterpreter<'mir, 'tcx> {
|
||||
pub(super) fn new(const_eval_limit: Limit) -> Self {
|
||||
CompileTimeInterpreter { steps_remaining: const_eval_limit.0, stack: Vec::new() }
|
||||
}
|
||||
}
|
||||
|
||||
impl<K: Hash + Eq, V> interpret::AllocMap<K, V> for FxHashMap<K, V> {
|
||||
#[inline(always)]
|
||||
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
{
|
||||
FxHashMap::contains_key(self, k)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn insert(&mut self, k: K, v: V) -> Option<V> {
|
||||
FxHashMap::insert(self, k, v)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
|
||||
where
|
||||
K: Borrow<Q>,
|
||||
{
|
||||
FxHashMap::remove(self, k)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn filter_map_collect<T>(&self, mut f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T> {
|
||||
self.iter().filter_map(move |(k, v)| f(k, &*v)).collect()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E> {
|
||||
match self.get(&k) {
|
||||
Some(v) => Ok(v),
|
||||
None => {
|
||||
vacant()?;
|
||||
bug!("The CTFE machine shouldn't ever need to extend the alloc_map when reading")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E> {
|
||||
match self.entry(k) {
|
||||
Entry::Occupied(e) => Ok(e.into_mut()),
|
||||
Entry::Vacant(e) => {
|
||||
let v = vacant()?;
|
||||
Ok(e.insert(v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
crate type CompileTimeEvalContext<'mir, 'tcx> =
|
||||
InterpCx<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>;
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
|
||||
pub enum MemoryKind {
|
||||
Heap,
|
||||
}
|
||||
|
||||
impl fmt::Display for MemoryKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
MemoryKind::Heap => write!(f, "heap allocation"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl interpret::MayLeak for MemoryKind {
|
||||
#[inline(always)]
|
||||
fn may_leak(self) -> bool {
|
||||
match self {
|
||||
MemoryKind::Heap => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl interpret::MayLeak for ! {
|
||||
#[inline(always)]
|
||||
fn may_leak(self) -> bool {
|
||||
// `self` is uninhabited
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
||||
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
|
||||
// Equality with integers can never be known for sure.
|
||||
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
|
||||
// FIXME: return `true` for when both sides are the same pointer, *except* that
|
||||
// some things (like functions and vtables) do not have stable addresses
|
||||
// so we need to be careful around them (see e.g. #73722).
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
|
||||
match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Int(_), Scalar::Int(_)) => a != b,
|
||||
// Comparisons of abstract pointers with null pointers are known if the pointer
|
||||
// is in bounds, because if they are in bounds, the pointer can't be null.
|
||||
// Inequality with integers other than null can never be known for sure.
|
||||
(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
|
||||
int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
|
||||
}
|
||||
// FIXME: return `true` for at least some comparisons where we can reliably
|
||||
// determine the result of runtime inequality tests at compile-time.
|
||||
// Examples include comparison of addresses in different static items.
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir, 'tcx> {
|
||||
compile_time_machine!(<'mir, 'tcx>);
|
||||
|
||||
type MemoryKind = MemoryKind;
|
||||
|
||||
type MemoryExtra = MemoryExtra;
|
||||
|
||||
const PANIC_ON_ALLOC_FAIL: bool = false; // will be raised as a proper error
|
||||
|
||||
fn load_mir(
|
||||
ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::InstanceDef<'tcx>,
|
||||
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
|
||||
match instance {
|
||||
ty::InstanceDef::Item(def) => {
|
||||
if ecx.tcx.is_ctfe_mir_available(def.did) {
|
||||
Ok(ecx.tcx.mir_for_ctfe_opt_const_arg(def))
|
||||
} else {
|
||||
let path = ecx.tcx.def_path_str(def.did);
|
||||
Err(ConstEvalErrKind::NeedsRfc(format!("calling extern function `{}`", path))
|
||||
.into())
|
||||
}
|
||||
}
|
||||
_ => Ok(ecx.tcx.instance_mir(instance)),
|
||||
}
|
||||
}
|
||||
|
||||
fn find_mir_or_eval_fn(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
_abi: Abi,
|
||||
args: &[OpTy<'tcx>],
|
||||
_ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
|
||||
_unwind: StackPopUnwind, // unwinding is not supported in consts
|
||||
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
|
||||
debug!("find_mir_or_eval_fn: {:?}", instance);
|
||||
|
||||
// Only check non-glue functions
|
||||
if let ty::InstanceDef::Item(def) = instance.def {
|
||||
// Execution might have wandered off into other crates, so we cannot do a stability-
|
||||
// sensitive check here. But we can at least rule out functions that are not const
|
||||
// at all.
|
||||
if !ecx.tcx.is_const_fn_raw(def.did) {
|
||||
// allow calling functions marked with #[default_method_body_is_const].
|
||||
if !ecx.tcx.has_attr(def.did, sym::default_method_body_is_const) {
|
||||
// Some functions we support even if they are non-const -- but avoid testing
|
||||
// that for const fn!
|
||||
if let Some(new_instance) = ecx.hook_panic_fn(instance, args)? {
|
||||
// We call another const fn instead.
|
||||
return Self::find_mir_or_eval_fn(
|
||||
ecx,
|
||||
new_instance,
|
||||
_abi,
|
||||
args,
|
||||
_ret,
|
||||
_unwind,
|
||||
);
|
||||
} else {
|
||||
// We certainly do *not* want to actually call the fn
|
||||
// though, so be sure we return here.
|
||||
throw_unsup_format!("calling non-const function `{}`", instance)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// This is a const fn. Call it.
|
||||
Ok(Some(ecx.load_mir(instance.def, None)?))
|
||||
}
|
||||
|
||||
fn call_intrinsic(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx>],
|
||||
ret: Option<(&PlaceTy<'tcx>, mir::BasicBlock)>,
|
||||
_unwind: StackPopUnwind,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Shared intrinsics.
|
||||
if ecx.emulate_intrinsic(instance, args, ret)? {
|
||||
return Ok(());
|
||||
}
|
||||
let intrinsic_name = ecx.tcx.item_name(instance.def_id());
|
||||
|
||||
// CTFE-specific intrinsics.
|
||||
let (dest, ret) = match ret {
|
||||
None => {
|
||||
return Err(ConstEvalErrKind::NeedsRfc(format!(
|
||||
"calling intrinsic `{}`",
|
||||
intrinsic_name
|
||||
))
|
||||
.into());
|
||||
}
|
||||
Some(p) => p,
|
||||
};
|
||||
match intrinsic_name {
|
||||
sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
|
||||
let a = ecx.read_immediate(&args[0])?.to_scalar()?;
|
||||
let b = ecx.read_immediate(&args[1])?.to_scalar()?;
|
||||
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
|
||||
ecx.guaranteed_eq(a, b)
|
||||
} else {
|
||||
ecx.guaranteed_ne(a, b)
|
||||
};
|
||||
ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
|
||||
}
|
||||
sym::const_allocate => {
|
||||
let size = ecx.read_scalar(&args[0])?.to_machine_usize(ecx)?;
|
||||
let align = ecx.read_scalar(&args[1])?.to_machine_usize(ecx)?;
|
||||
|
||||
let align = match Align::from_bytes(align) {
|
||||
Ok(a) => a,
|
||||
Err(err) => throw_ub_format!("align has to be a power of 2, {}", err),
|
||||
};
|
||||
|
||||
let ptr = ecx.memory.allocate(
|
||||
Size::from_bytes(size as u64),
|
||||
align,
|
||||
interpret::MemoryKind::Machine(MemoryKind::Heap),
|
||||
)?;
|
||||
ecx.write_pointer(ptr, dest)?;
|
||||
}
|
||||
_ => {
|
||||
return Err(ConstEvalErrKind::NeedsRfc(format!(
|
||||
"calling intrinsic `{}`",
|
||||
intrinsic_name
|
||||
))
|
||||
.into());
|
||||
}
|
||||
}
|
||||
|
||||
ecx.go_to_block(ret);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn assert_panic(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
msg: &AssertMessage<'tcx>,
|
||||
_unwind: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx> {
|
||||
use rustc_middle::mir::AssertKind::*;
|
||||
// Convert `AssertKind<Operand>` to `AssertKind<Scalar>`.
|
||||
let eval_to_int =
|
||||
|op| ecx.read_immediate(&ecx.eval_operand(op, None)?).map(|x| x.to_const_int());
|
||||
let err = match msg {
|
||||
BoundsCheck { ref len, ref index } => {
|
||||
let len = eval_to_int(len)?;
|
||||
let index = eval_to_int(index)?;
|
||||
BoundsCheck { len, index }
|
||||
}
|
||||
Overflow(op, l, r) => Overflow(*op, eval_to_int(l)?, eval_to_int(r)?),
|
||||
OverflowNeg(op) => OverflowNeg(eval_to_int(op)?),
|
||||
DivisionByZero(op) => DivisionByZero(eval_to_int(op)?),
|
||||
RemainderByZero(op) => RemainderByZero(eval_to_int(op)?),
|
||||
ResumedAfterReturn(generator_kind) => ResumedAfterReturn(*generator_kind),
|
||||
ResumedAfterPanic(generator_kind) => ResumedAfterPanic(*generator_kind),
|
||||
};
|
||||
Err(ConstEvalErrKind::AssertFailure(err).into())
|
||||
}
|
||||
|
||||
fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, msg: String) -> InterpResult<'tcx, !> {
|
||||
Err(ConstEvalErrKind::Abort(msg).into())
|
||||
}
|
||||
|
||||
fn binary_ptr_op(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
_bin_op: mir::BinOp,
|
||||
_left: &ImmTy<'tcx>,
|
||||
_right: &ImmTy<'tcx>,
|
||||
) -> InterpResult<'tcx, (Scalar, bool, Ty<'tcx>)> {
|
||||
Err(ConstEvalErrKind::NeedsRfc("pointer arithmetic or comparison".to_string()).into())
|
||||
}
|
||||
|
||||
fn box_alloc(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
_dest: &PlaceTy<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Err(ConstEvalErrKind::NeedsRfc("heap allocations via `box` keyword".to_string()).into())
|
||||
}
|
||||
|
||||
fn before_terminator(ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
|
||||
// The step limit has already been hit in a previous call to `before_terminator`.
|
||||
if ecx.machine.steps_remaining == 0 {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
ecx.machine.steps_remaining -= 1;
|
||||
if ecx.machine.steps_remaining == 0 {
|
||||
throw_exhaust!(StepLimitReached)
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn init_frame_extra(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
frame: Frame<'mir, 'tcx>,
|
||||
) -> InterpResult<'tcx, Frame<'mir, 'tcx>> {
|
||||
// Enforce stack size limit. Add 1 because this is run before the new frame is pushed.
|
||||
if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
|
||||
throw_exhaust!(StackFrameLimitReached)
|
||||
} else {
|
||||
Ok(frame)
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn stack(
|
||||
ecx: &'a InterpCx<'mir, 'tcx, Self>,
|
||||
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>] {
|
||||
&ecx.machine.stack
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn stack_mut(
|
||||
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
|
||||
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>> {
|
||||
&mut ecx.machine.stack
|
||||
}
|
||||
|
||||
fn before_access_global(
|
||||
memory_extra: &MemoryExtra,
|
||||
alloc_id: AllocId,
|
||||
allocation: &Allocation,
|
||||
static_def_id: Option<DefId>,
|
||||
is_write: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
if is_write {
|
||||
// Write access. These are never allowed, but we give a targeted error message.
|
||||
if allocation.mutability == Mutability::Not {
|
||||
Err(err_ub!(WriteToReadOnly(alloc_id)).into())
|
||||
} else {
|
||||
Err(ConstEvalErrKind::ModifiedGlobal.into())
|
||||
}
|
||||
} else {
|
||||
// Read access. These are usually allowed, with some exceptions.
|
||||
if memory_extra.can_access_statics {
|
||||
// Machine configuration allows us read from anything (e.g., `static` initializer).
|
||||
Ok(())
|
||||
} else if static_def_id.is_some() {
|
||||
// Machine configuration does not allow us to read statics
|
||||
// (e.g., `const` initializer).
|
||||
// See const_eval::machine::MemoryExtra::can_access_statics for why
|
||||
// this check is so important: if we could read statics, we could read pointers
|
||||
// to mutable allocations *inside* statics. These allocations are not themselves
|
||||
// statics, so pointers to them can get around the check in `validity.rs`.
|
||||
Err(ConstEvalErrKind::ConstAccessesStatic.into())
|
||||
} else {
|
||||
// Immutable global, this read is fine.
|
||||
// But make sure we never accept a read from something mutable, that would be
|
||||
// unsound. The reason is that as the content of this allocation may be different
|
||||
// now and at run-time, so if we permit reading now we might return the wrong value.
|
||||
assert_eq!(allocation.mutability, Mutability::Not);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Please do not add any code below the above `Machine` trait impl. I (oli-obk) plan more cleanups
|
||||
// so we can end up having a file with just that impl, but for now, let's keep the impl discoverable
|
||||
// at the bottom of this file.
|
207
compiler/rustc_const_eval/src/const_eval/mod.rs
Normal file
207
compiler/rustc_const_eval/src/const_eval/mod.rs
Normal file
|
@ -0,0 +1,207 @@
|
|||
// Not in interpret to make sure we do not use private implementation details
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_hir::Mutability;
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
use rustc_middle::{
|
||||
mir::{self, interpret::ConstAlloc},
|
||||
ty::ScalarInt,
|
||||
};
|
||||
use rustc_span::{source_map::DUMMY_SP, symbol::Symbol};
|
||||
|
||||
use crate::interpret::{
|
||||
intern_const_alloc_recursive, ConstValue, InternKind, InterpCx, MPlaceTy, MemPlaceMeta, Scalar,
|
||||
};
|
||||
|
||||
mod error;
|
||||
mod eval_queries;
|
||||
mod fn_queries;
|
||||
mod machine;
|
||||
|
||||
pub use error::*;
|
||||
pub use eval_queries::*;
|
||||
pub use fn_queries::*;
|
||||
pub use machine::*;
|
||||
|
||||
pub(crate) fn const_caller_location(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
(file, line, col): (Symbol, u32, u32),
|
||||
) -> ConstValue<'tcx> {
|
||||
trace!("const_caller_location: {}:{}:{}", file, line, col);
|
||||
let mut ecx = mk_eval_cx(tcx, DUMMY_SP, ty::ParamEnv::reveal_all(), false);
|
||||
|
||||
let loc_place = ecx.alloc_caller_location(file, line, col);
|
||||
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
|
||||
bug!("intern_const_alloc_recursive should not error in this case")
|
||||
}
|
||||
ConstValue::Scalar(Scalar::from_pointer(loc_place.ptr.into_pointer_or_addr().unwrap(), &tcx))
|
||||
}
|
||||
|
||||
/// Convert an evaluated constant to a type level constant
|
||||
pub(crate) fn const_to_valtree<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
raw: ConstAlloc<'tcx>,
|
||||
) -> Option<ty::ValTree<'tcx>> {
|
||||
let ecx = mk_eval_cx(
|
||||
tcx, DUMMY_SP, param_env,
|
||||
// It is absolutely crucial for soundness that
|
||||
// we do not read from static items or other mutable memory.
|
||||
false,
|
||||
);
|
||||
let place = ecx.raw_const_to_mplace(raw).unwrap();
|
||||
const_to_valtree_inner(&ecx, &place)
|
||||
}
|
||||
|
||||
fn const_to_valtree_inner<'tcx>(
|
||||
ecx: &CompileTimeEvalContext<'tcx, 'tcx>,
|
||||
place: &MPlaceTy<'tcx>,
|
||||
) -> Option<ty::ValTree<'tcx>> {
|
||||
let branches = |n, variant| {
|
||||
let place = match variant {
|
||||
Some(variant) => ecx.mplace_downcast(&place, variant).unwrap(),
|
||||
None => *place,
|
||||
};
|
||||
let variant =
|
||||
variant.map(|variant| Some(ty::ValTree::Leaf(ScalarInt::from(variant.as_u32()))));
|
||||
let fields = (0..n).map(|i| {
|
||||
let field = ecx.mplace_field(&place, i).unwrap();
|
||||
const_to_valtree_inner(ecx, &field)
|
||||
});
|
||||
// For enums, we preped their variant index before the variant's fields so we can figure out
|
||||
// the variant again when just seeing a valtree.
|
||||
let branches = variant.into_iter().chain(fields);
|
||||
Some(ty::ValTree::Branch(
|
||||
ecx.tcx.arena.alloc_from_iter(branches.collect::<Option<Vec<_>>>()?),
|
||||
))
|
||||
};
|
||||
match place.layout.ty.kind() {
|
||||
ty::FnDef(..) => Some(ty::ValTree::zst()),
|
||||
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
|
||||
let val = ecx.read_immediate(&place.into()).unwrap();
|
||||
let val = val.to_scalar().unwrap();
|
||||
Some(ty::ValTree::Leaf(val.assert_int()))
|
||||
}
|
||||
|
||||
// Raw pointers are not allowed in type level constants, as we cannot properly test them for
|
||||
// equality at compile-time (see `ptr_guaranteed_eq`/`_ne`).
|
||||
// Technically we could allow function pointers (represented as `ty::Instance`), but this is not guaranteed to
|
||||
// agree with runtime equality tests.
|
||||
ty::FnPtr(_) | ty::RawPtr(_) => None,
|
||||
ty::Ref(..) => unimplemented!("need to use deref_const"),
|
||||
|
||||
// Trait objects are not allowed in type level constants, as we have no concept for
|
||||
// resolving their backing type, even if we can do that at const eval time. We may
|
||||
// hypothetically be able to allow `dyn StructuralEq` trait objects in the future,
|
||||
// but it is unclear if this is useful.
|
||||
ty::Dynamic(..) => None,
|
||||
|
||||
ty::Slice(_) | ty::Str => {
|
||||
unimplemented!("need to find the backing data of the slice/str and recurse on that")
|
||||
}
|
||||
ty::Tuple(substs) => branches(substs.len(), None),
|
||||
ty::Array(_, len) => branches(usize::try_from(len.eval_usize(ecx.tcx.tcx, ecx.param_env)).unwrap(), None),
|
||||
|
||||
ty::Adt(def, _) => {
|
||||
if def.variants.is_empty() {
|
||||
bug!("uninhabited types should have errored and never gotten converted to valtree")
|
||||
}
|
||||
|
||||
let variant = ecx.read_discriminant(&place.into()).unwrap().1;
|
||||
|
||||
branches(def.variants[variant].fields.len(), def.is_enum().then_some(variant))
|
||||
}
|
||||
|
||||
ty::Never
|
||||
| ty::Error(_)
|
||||
| ty::Foreign(..)
|
||||
| ty::Infer(ty::FreshIntTy(_))
|
||||
| ty::Infer(ty::FreshFloatTy(_))
|
||||
| ty::Projection(..)
|
||||
| ty::Param(_)
|
||||
| ty::Bound(..)
|
||||
| ty::Placeholder(..)
|
||||
// FIXME(oli-obk): we could look behind opaque types
|
||||
| ty::Opaque(..)
|
||||
| ty::Infer(_)
|
||||
// FIXME(oli-obk): we can probably encode closures just like structs
|
||||
| ty::Closure(..)
|
||||
| ty::Generator(..)
|
||||
| ty::GeneratorWitness(..) => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// This function uses `unwrap` copiously, because an already validated constant
|
||||
/// must have valid fields and can thus never fail outside of compiler bugs. However, it is
|
||||
/// invoked from the pretty printer, where it can receive enums with no variants and e.g.
|
||||
/// `read_discriminant` needs to be able to handle that.
|
||||
pub(crate) fn destructure_const<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
val: &'tcx ty::Const<'tcx>,
|
||||
) -> mir::DestructuredConst<'tcx> {
|
||||
trace!("destructure_const: {:?}", val);
|
||||
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
|
||||
let op = ecx.const_to_op(val, None).unwrap();
|
||||
|
||||
// We go to `usize` as we cannot allocate anything bigger anyway.
|
||||
let (field_count, variant, down) = match val.ty.kind() {
|
||||
ty::Array(_, len) => (usize::try_from(len.eval_usize(tcx, param_env)).unwrap(), None, op),
|
||||
ty::Adt(def, _) if def.variants.is_empty() => {
|
||||
return mir::DestructuredConst { variant: None, fields: &[] };
|
||||
}
|
||||
ty::Adt(def, _) => {
|
||||
let variant = ecx.read_discriminant(&op).unwrap().1;
|
||||
let down = ecx.operand_downcast(&op, variant).unwrap();
|
||||
(def.variants[variant].fields.len(), Some(variant), down)
|
||||
}
|
||||
ty::Tuple(substs) => (substs.len(), None, op),
|
||||
_ => bug!("cannot destructure constant {:?}", val),
|
||||
};
|
||||
|
||||
let fields_iter = (0..field_count).map(|i| {
|
||||
let field_op = ecx.operand_field(&down, i).unwrap();
|
||||
let val = op_to_const(&ecx, &field_op);
|
||||
ty::Const::from_value(tcx, val, field_op.layout.ty)
|
||||
});
|
||||
let fields = tcx.arena.alloc_from_iter(fields_iter);
|
||||
|
||||
mir::DestructuredConst { variant, fields }
|
||||
}
|
||||
|
||||
pub(crate) fn deref_const<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
val: &'tcx ty::Const<'tcx>,
|
||||
) -> &'tcx ty::Const<'tcx> {
|
||||
trace!("deref_const: {:?}", val);
|
||||
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
|
||||
let op = ecx.const_to_op(val, None).unwrap();
|
||||
let mplace = ecx.deref_operand(&op).unwrap();
|
||||
if let Some(alloc_id) = mplace.ptr.provenance {
|
||||
assert_eq!(
|
||||
tcx.get_global_alloc(alloc_id).unwrap().unwrap_memory().mutability,
|
||||
Mutability::Not,
|
||||
"deref_const cannot be used with mutable allocations as \
|
||||
that could allow pattern matching to observe mutable statics",
|
||||
);
|
||||
}
|
||||
|
||||
let ty = match mplace.meta {
|
||||
MemPlaceMeta::None => mplace.layout.ty,
|
||||
MemPlaceMeta::Poison => bug!("poison metadata in `deref_const`: {:#?}", mplace),
|
||||
// In case of unsized types, figure out the real type behind.
|
||||
MemPlaceMeta::Meta(scalar) => match mplace.layout.ty.kind() {
|
||||
ty::Str => bug!("there's no sized equivalent of a `str`"),
|
||||
ty::Slice(elem_ty) => tcx.mk_array(elem_ty, scalar.to_machine_usize(&tcx).unwrap()),
|
||||
_ => bug!(
|
||||
"type {} should not have metadata, but had {:?}",
|
||||
mplace.layout.ty,
|
||||
mplace.meta
|
||||
),
|
||||
},
|
||||
};
|
||||
|
||||
tcx.mk_const(ty::Const { val: ty::ConstKind::Value(op_to_const(&ecx, &mplace.into())), ty })
|
||||
}
|
365
compiler/rustc_const_eval/src/interpret/cast.rs
Normal file
365
compiler/rustc_const_eval/src/interpret/cast.rs
Normal file
|
@ -0,0 +1,365 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_apfloat::ieee::{Double, Single};
|
||||
use rustc_apfloat::{Float, FloatConvert};
|
||||
use rustc_middle::mir::interpret::{InterpResult, PointerArithmetic, Scalar};
|
||||
use rustc_middle::mir::CastKind;
|
||||
use rustc_middle::ty::adjustment::PointerCast;
|
||||
use rustc_middle::ty::layout::{IntegerExt, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, FloatTy, Ty, TypeAndMut};
|
||||
use rustc_target::abi::{Integer, Variants};
|
||||
|
||||
use super::{
|
||||
util::ensure_monomorphic_enough, FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn cast(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
cast_kind: CastKind,
|
||||
cast_ty: Ty<'tcx>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
use rustc_middle::mir::CastKind::*;
|
||||
// FIXME: In which cases should we trigger UB when the source is uninit?
|
||||
match cast_kind {
|
||||
Pointer(PointerCast::Unsize) => {
|
||||
let cast_ty = self.layout_of(cast_ty)?;
|
||||
self.unsize_into(src, cast_ty, dest)?;
|
||||
}
|
||||
|
||||
Misc => {
|
||||
let src = self.read_immediate(src)?;
|
||||
let res = self.misc_cast(&src, cast_ty)?;
|
||||
self.write_immediate(res, dest)?;
|
||||
}
|
||||
|
||||
Pointer(PointerCast::MutToConstPointer | PointerCast::ArrayToPointer) => {
|
||||
// These are NOPs, but can be wide pointers.
|
||||
let v = self.read_immediate(src)?;
|
||||
self.write_immediate(*v, dest)?;
|
||||
}
|
||||
|
||||
Pointer(PointerCast::ReifyFnPointer) => {
|
||||
// The src operand does not matter, just its type
|
||||
match *src.layout.ty.kind() {
|
||||
ty::FnDef(def_id, substs) => {
|
||||
// All reifications must be monomorphic, bail out otherwise.
|
||||
ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
|
||||
|
||||
let instance = ty::Instance::resolve_for_fn_ptr(
|
||||
*self.tcx,
|
||||
self.param_env,
|
||||
def_id,
|
||||
substs,
|
||||
)
|
||||
.ok_or_else(|| err_inval!(TooGeneric))?;
|
||||
|
||||
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
|
||||
self.write_pointer(fn_ptr, dest)?;
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
|
||||
}
|
||||
}
|
||||
|
||||
Pointer(PointerCast::UnsafeFnPointer) => {
|
||||
let src = self.read_immediate(src)?;
|
||||
match cast_ty.kind() {
|
||||
ty::FnPtr(_) => {
|
||||
// No change to value
|
||||
self.write_immediate(*src, dest)?;
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "fn to unsafe fn cast on {:?}", cast_ty),
|
||||
}
|
||||
}
|
||||
|
||||
Pointer(PointerCast::ClosureFnPointer(_)) => {
|
||||
// The src operand does not matter, just its type
|
||||
match *src.layout.ty.kind() {
|
||||
ty::Closure(def_id, substs) => {
|
||||
// All reifications must be monomorphic, bail out otherwise.
|
||||
ensure_monomorphic_enough(*self.tcx, src.layout.ty)?;
|
||||
|
||||
let instance = ty::Instance::resolve_closure(
|
||||
*self.tcx,
|
||||
def_id,
|
||||
substs,
|
||||
ty::ClosureKind::FnOnce,
|
||||
);
|
||||
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
|
||||
self.write_pointer(fn_ptr, dest)?;
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn misc_cast(
|
||||
&self,
|
||||
src: &ImmTy<'tcx, M::PointerTag>,
|
||||
cast_ty: Ty<'tcx>,
|
||||
) -> InterpResult<'tcx, Immediate<M::PointerTag>> {
|
||||
use rustc_middle::ty::TyKind::*;
|
||||
trace!("Casting {:?}: {:?} to {:?}", *src, src.layout.ty, cast_ty);
|
||||
|
||||
match src.layout.ty.kind() {
|
||||
// Floating point
|
||||
Float(FloatTy::F32) => {
|
||||
return Ok(self.cast_from_float(src.to_scalar()?.to_f32()?, cast_ty).into());
|
||||
}
|
||||
Float(FloatTy::F64) => {
|
||||
return Ok(self.cast_from_float(src.to_scalar()?.to_f64()?, cast_ty).into());
|
||||
}
|
||||
// The rest is integer/pointer-"like", including fn ptr casts and casts from enums that
|
||||
// are represented as integers.
|
||||
_ => assert!(
|
||||
src.layout.ty.is_bool()
|
||||
|| src.layout.ty.is_char()
|
||||
|| src.layout.ty.is_enum()
|
||||
|| src.layout.ty.is_integral()
|
||||
|| src.layout.ty.is_any_ptr(),
|
||||
"Unexpected cast from type {:?}",
|
||||
src.layout.ty
|
||||
),
|
||||
}
|
||||
|
||||
// # First handle non-scalar source values.
|
||||
|
||||
// Handle cast from a ZST enum (0 or 1 variants).
|
||||
match src.layout.variants {
|
||||
Variants::Single { index } => {
|
||||
if src.layout.abi.is_uninhabited() {
|
||||
// This is dead code, because an uninhabited enum is UB to
|
||||
// instantiate.
|
||||
throw_ub!(Unreachable);
|
||||
}
|
||||
if let Some(discr) = src.layout.ty.discriminant_for_variant(*self.tcx, index) {
|
||||
assert!(src.layout.is_zst());
|
||||
let discr_layout = self.layout_of(discr.ty)?;
|
||||
return Ok(self.cast_from_scalar(discr.val, discr_layout, cast_ty).into());
|
||||
}
|
||||
}
|
||||
Variants::Multiple { .. } => {}
|
||||
}
|
||||
|
||||
// Handle casting any ptr to raw ptr (might be a fat ptr).
|
||||
if src.layout.ty.is_any_ptr() && cast_ty.is_unsafe_ptr() {
|
||||
let dest_layout = self.layout_of(cast_ty)?;
|
||||
if dest_layout.size == src.layout.size {
|
||||
// Thin or fat pointer that just hast the ptr kind of target type changed.
|
||||
return Ok(**src);
|
||||
} else {
|
||||
// Casting the metadata away from a fat ptr.
|
||||
assert_eq!(src.layout.size, 2 * self.memory.pointer_size());
|
||||
assert_eq!(dest_layout.size, self.memory.pointer_size());
|
||||
assert!(src.layout.ty.is_unsafe_ptr());
|
||||
return match **src {
|
||||
Immediate::ScalarPair(data, _) => Ok(data.into()),
|
||||
Immediate::Scalar(..) => span_bug!(
|
||||
self.cur_span(),
|
||||
"{:?} input to a fat-to-thin cast ({:?} -> {:?})",
|
||||
*src,
|
||||
src.layout.ty,
|
||||
cast_ty
|
||||
),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
// # The remaining source values are scalar.
|
||||
|
||||
// For all remaining casts, we either
|
||||
// (a) cast a raw ptr to usize, or
|
||||
// (b) cast from an integer-like (including bool, char, enums).
|
||||
// In both cases we want the bits.
|
||||
let bits = src.to_scalar()?.to_bits(src.layout.size)?;
|
||||
Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
|
||||
}
|
||||
|
||||
pub(super) fn cast_from_scalar(
|
||||
&self,
|
||||
v: u128, // raw bits (there is no ScalarTy so we separate data+layout)
|
||||
src_layout: TyAndLayout<'tcx>,
|
||||
cast_ty: Ty<'tcx>,
|
||||
) -> Scalar<M::PointerTag> {
|
||||
// Let's make sure v is sign-extended *if* it has a signed type.
|
||||
let signed = src_layout.abi.is_signed(); // Also asserts that abi is `Scalar`.
|
||||
let v = if signed { self.sign_extend(v, src_layout) } else { v };
|
||||
trace!("cast_from_scalar: {}, {} -> {}", v, src_layout.ty, cast_ty);
|
||||
use rustc_middle::ty::TyKind::*;
|
||||
match *cast_ty.kind() {
|
||||
Int(_) | Uint(_) | RawPtr(_) => {
|
||||
let size = match *cast_ty.kind() {
|
||||
Int(t) => Integer::from_int_ty(self, t).size(),
|
||||
Uint(t) => Integer::from_uint_ty(self, t).size(),
|
||||
RawPtr(_) => self.pointer_size(),
|
||||
_ => bug!(),
|
||||
};
|
||||
let v = size.truncate(v);
|
||||
Scalar::from_uint(v, size)
|
||||
}
|
||||
|
||||
Float(FloatTy::F32) if signed => Scalar::from_f32(Single::from_i128(v as i128).value),
|
||||
Float(FloatTy::F64) if signed => Scalar::from_f64(Double::from_i128(v as i128).value),
|
||||
Float(FloatTy::F32) => Scalar::from_f32(Single::from_u128(v).value),
|
||||
Float(FloatTy::F64) => Scalar::from_f64(Double::from_u128(v).value),
|
||||
|
||||
Char => {
|
||||
// `u8` to `char` cast
|
||||
Scalar::from_u32(u8::try_from(v).unwrap().into())
|
||||
}
|
||||
|
||||
// Casts to bool are not permitted by rustc, no need to handle them here.
|
||||
_ => span_bug!(self.cur_span(), "invalid int to {:?} cast", cast_ty),
|
||||
}
|
||||
}
|
||||
|
||||
fn cast_from_float<F>(&self, f: F, dest_ty: Ty<'tcx>) -> Scalar<M::PointerTag>
|
||||
where
|
||||
F: Float + Into<Scalar<M::PointerTag>> + FloatConvert<Single> + FloatConvert<Double>,
|
||||
{
|
||||
use rustc_middle::ty::TyKind::*;
|
||||
match *dest_ty.kind() {
|
||||
// float -> uint
|
||||
Uint(t) => {
|
||||
let size = Integer::from_uint_ty(self, t).size();
|
||||
// `to_u128` is a saturating cast, which is what we need
|
||||
// (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
|
||||
let v = f.to_u128(size.bits_usize()).value;
|
||||
// This should already fit the bit width
|
||||
Scalar::from_uint(v, size)
|
||||
}
|
||||
// float -> int
|
||||
Int(t) => {
|
||||
let size = Integer::from_int_ty(self, t).size();
|
||||
// `to_i128` is a saturating cast, which is what we need
|
||||
// (https://doc.rust-lang.org/nightly/nightly-rustc/rustc_apfloat/trait.Float.html#method.to_i128_r).
|
||||
let v = f.to_i128(size.bits_usize()).value;
|
||||
Scalar::from_int(v, size)
|
||||
}
|
||||
// float -> f32
|
||||
Float(FloatTy::F32) => Scalar::from_f32(f.convert(&mut false).value),
|
||||
// float -> f64
|
||||
Float(FloatTy::F64) => Scalar::from_f64(f.convert(&mut false).value),
|
||||
// That's it.
|
||||
_ => span_bug!(self.cur_span(), "invalid float to {:?} cast", dest_ty),
|
||||
}
|
||||
}
|
||||
|
||||
fn unsize_into_ptr(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
// The pointee types
|
||||
source_ty: Ty<'tcx>,
|
||||
cast_ty: Ty<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// A<Struct> -> A<Trait> conversion
|
||||
let (src_pointee_ty, dest_pointee_ty) =
|
||||
self.tcx.struct_lockstep_tails_erasing_lifetimes(source_ty, cast_ty, self.param_env);
|
||||
|
||||
match (&src_pointee_ty.kind(), &dest_pointee_ty.kind()) {
|
||||
(&ty::Array(_, length), &ty::Slice(_)) => {
|
||||
let ptr = self.read_immediate(src)?.to_scalar()?;
|
||||
// u64 cast is from usize to u64, which is always good
|
||||
let val =
|
||||
Immediate::new_slice(ptr, length.eval_usize(*self.tcx, self.param_env), self);
|
||||
self.write_immediate(val, dest)
|
||||
}
|
||||
(&ty::Dynamic(ref data_a, ..), &ty::Dynamic(ref data_b, ..)) => {
|
||||
let val = self.read_immediate(src)?;
|
||||
if data_a.principal_def_id() == data_b.principal_def_id() {
|
||||
return self.write_immediate(*val, dest);
|
||||
}
|
||||
// trait upcasting coercion
|
||||
let vptr_entry_idx = self.tcx.vtable_trait_upcasting_coercion_new_vptr_slot((
|
||||
src_pointee_ty,
|
||||
dest_pointee_ty,
|
||||
));
|
||||
|
||||
if let Some(entry_idx) = vptr_entry_idx {
|
||||
let entry_idx = u64::try_from(entry_idx).unwrap();
|
||||
let (old_data, old_vptr) = val.to_scalar_pair()?;
|
||||
let old_vptr = self.scalar_to_ptr(old_vptr);
|
||||
let new_vptr = self
|
||||
.read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?;
|
||||
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)
|
||||
} else {
|
||||
self.write_immediate(*val, dest)
|
||||
}
|
||||
}
|
||||
(_, &ty::Dynamic(ref data, _)) => {
|
||||
// Initial cast from sized to dyn trait
|
||||
let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
|
||||
let ptr = self.read_immediate(src)?.to_scalar()?;
|
||||
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
|
||||
self.write_immediate(val, dest)
|
||||
}
|
||||
|
||||
_ => {
|
||||
span_bug!(self.cur_span(), "invalid unsizing {:?} -> {:?}", src.layout.ty, cast_ty)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn unsize_into(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
cast_ty: TyAndLayout<'tcx>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("Unsizing {:?} of type {} into {:?}", *src, src.layout.ty, cast_ty.ty);
|
||||
match (&src.layout.ty.kind(), &cast_ty.ty.kind()) {
|
||||
(&ty::Ref(_, s, _), &ty::Ref(_, c, _) | &ty::RawPtr(TypeAndMut { ty: c, .. }))
|
||||
| (&ty::RawPtr(TypeAndMut { ty: s, .. }), &ty::RawPtr(TypeAndMut { ty: c, .. })) => {
|
||||
self.unsize_into_ptr(src, dest, s, c)
|
||||
}
|
||||
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
|
||||
assert_eq!(def_a, def_b);
|
||||
if def_a.is_box() || def_b.is_box() {
|
||||
if !def_a.is_box() || !def_b.is_box() {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"invalid unsizing between {:?} -> {:?}",
|
||||
src.layout.ty,
|
||||
cast_ty.ty
|
||||
);
|
||||
}
|
||||
return self.unsize_into_ptr(
|
||||
src,
|
||||
dest,
|
||||
src.layout.ty.boxed_ty(),
|
||||
cast_ty.ty.boxed_ty(),
|
||||
);
|
||||
}
|
||||
|
||||
// unsizing of generic struct with pointer fields
|
||||
// Example: `Arc<T>` -> `Arc<Trait>`
|
||||
// here we need to increase the size of every &T thin ptr field to a fat ptr
|
||||
for i in 0..src.layout.fields.count() {
|
||||
let cast_ty_field = cast_ty.field(self, i);
|
||||
if cast_ty_field.is_zst() {
|
||||
continue;
|
||||
}
|
||||
let src_field = self.operand_field(src, i)?;
|
||||
let dst_field = self.place_field(dest, i)?;
|
||||
if src_field.layout.ty == cast_ty_field.ty {
|
||||
self.copy_op(&src_field, &dst_field)?;
|
||||
} else {
|
||||
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
_ => span_bug!(
|
||||
self.cur_span(),
|
||||
"unsize_into: invalid conversion: {:?} -> {:?}",
|
||||
src.layout,
|
||||
dest.layout
|
||||
),
|
||||
}
|
||||
}
|
||||
}
|
1049
compiler/rustc_const_eval/src/interpret/eval_context.rs
Normal file
1049
compiler/rustc_const_eval/src/interpret/eval_context.rs
Normal file
File diff suppressed because it is too large
Load diff
437
compiler/rustc_const_eval/src/interpret/intern.rs
Normal file
437
compiler/rustc_const_eval/src/interpret/intern.rs
Normal file
|
@ -0,0 +1,437 @@
|
|||
//! This module specifies the type based interner for constants.
|
||||
//!
|
||||
//! After a const evaluation has computed a value, before we destroy the const evaluator's session
|
||||
//! memory, we need to extract all memory allocations to the global memory pool so they stay around.
|
||||
//!
|
||||
//! In principle, this is not very complicated: we recursively walk the final value, follow all the
|
||||
//! pointers, and move all reachable allocations to the global `tcx` memory. The only complication
|
||||
//! is picking the right mutability for the allocations in a `static` initializer: we want to make
|
||||
//! as many allocations as possible immutable so LLVM can put them into read-only memory. At the
|
||||
//! same time, we need to make memory that could be mutated by the program mutable to avoid
|
||||
//! incorrect compilations. To achieve this, we do a type-based traversal of the final value,
|
||||
//! tracking mutable and shared references and `UnsafeCell` to determine the current mutability.
|
||||
//! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be
|
||||
//! always immutable. At least for `const` however we use this opportunity to reject any `const`
|
||||
//! that contains allocations whose mutability we cannot identify.)
|
||||
|
||||
use super::validity::RefTracking;
|
||||
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
|
||||
use rustc_errors::ErrorReported;
|
||||
use rustc_hir as hir;
|
||||
use rustc_middle::mir::interpret::InterpResult;
|
||||
use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
|
||||
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, ValueVisitor};
|
||||
use crate::const_eval;
|
||||
|
||||
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
|
||||
'mir,
|
||||
'tcx,
|
||||
MemoryKind = T,
|
||||
PointerTag = AllocId,
|
||||
ExtraFnVal = !,
|
||||
FrameExtra = (),
|
||||
AllocExtra = (),
|
||||
MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>,
|
||||
>;
|
||||
|
||||
struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> {
|
||||
/// The ectx from which we intern.
|
||||
ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
|
||||
/// Previously encountered safe references.
|
||||
ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>,
|
||||
/// A list of all encountered allocations. After type-based interning, we traverse this list to
|
||||
/// also intern allocations that are only referenced by a raw pointer or inside a union.
|
||||
leftover_allocations: &'rt mut FxHashSet<AllocId>,
|
||||
/// The root kind of the value that we're looking at. This field is never mutated for a
|
||||
/// particular allocation. It is primarily used to make as many allocations as possible
|
||||
/// read-only so LLVM can place them in const memory.
|
||||
mode: InternMode,
|
||||
/// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect
|
||||
/// the intern mode of references we encounter.
|
||||
inside_unsafe_cell: bool,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
|
||||
enum InternMode {
|
||||
/// A static and its current mutability. Below shared references inside a `static mut`,
|
||||
/// this is *immutable*, and below mutable references inside an `UnsafeCell`, this
|
||||
/// is *mutable*.
|
||||
Static(hir::Mutability),
|
||||
/// A `const`.
|
||||
Const,
|
||||
}
|
||||
|
||||
/// Signalling data structure to ensure we don't recurse
|
||||
/// into the memory of other constants or statics
|
||||
struct IsStaticOrFn;
|
||||
|
||||
/// Intern an allocation without looking at its children.
|
||||
/// `mode` is the mode of the environment where we found this pointer.
|
||||
/// `mutablity` is the mutability of the place to be interned; even if that says
|
||||
/// `immutable` things might become mutable if `ty` is not frozen.
|
||||
/// `ty` can be `None` if there is no potential interior mutability
|
||||
/// to account for (e.g. for vtables).
|
||||
fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
|
||||
ecx: &'rt mut InterpCx<'mir, 'tcx, M>,
|
||||
leftover_allocations: &'rt mut FxHashSet<AllocId>,
|
||||
alloc_id: AllocId,
|
||||
mode: InternMode,
|
||||
ty: Option<Ty<'tcx>>,
|
||||
) -> Option<IsStaticOrFn> {
|
||||
trace!("intern_shallow {:?} with {:?}", alloc_id, mode);
|
||||
// remove allocation
|
||||
let tcx = ecx.tcx;
|
||||
let (kind, mut alloc) = match ecx.memory.alloc_map.remove(&alloc_id) {
|
||||
Some(entry) => entry,
|
||||
None => {
|
||||
// Pointer not found in local memory map. It is either a pointer to the global
|
||||
// map, or dangling.
|
||||
// If the pointer is dangling (neither in local nor global memory), we leave it
|
||||
// to validation to error -- it has the much better error messages, pointing out where
|
||||
// in the value the dangling reference lies.
|
||||
// The `delay_span_bug` ensures that we don't forget such a check in validation.
|
||||
if tcx.get_global_alloc(alloc_id).is_none() {
|
||||
tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer");
|
||||
}
|
||||
// treat dangling pointers like other statics
|
||||
// just to stop trying to recurse into them
|
||||
return Some(IsStaticOrFn);
|
||||
}
|
||||
};
|
||||
// This match is just a canary for future changes to `MemoryKind`, which most likely need
|
||||
// changes in this function.
|
||||
match kind {
|
||||
MemoryKind::Stack
|
||||
| MemoryKind::Machine(const_eval::MemoryKind::Heap)
|
||||
| MemoryKind::CallerLocation => {}
|
||||
}
|
||||
// Set allocation mutability as appropriate. This is used by LLVM to put things into
|
||||
// read-only memory, and also by Miri when evaluating other globals that
|
||||
// access this one.
|
||||
if let InternMode::Static(mutability) = mode {
|
||||
// For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume
|
||||
// no interior mutability.
|
||||
let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env));
|
||||
// For statics, allocation mutability is the combination of place mutability and
|
||||
// type mutability.
|
||||
// The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere.
|
||||
let immutable = mutability == Mutability::Not && frozen;
|
||||
if immutable {
|
||||
alloc.mutability = Mutability::Not;
|
||||
} else {
|
||||
// Just making sure we are not "upgrading" an immutable allocation to mutable.
|
||||
assert_eq!(alloc.mutability, Mutability::Mut);
|
||||
}
|
||||
} else {
|
||||
// No matter what, *constants are never mutable*. Mutating them is UB.
|
||||
// See const_eval::machine::MemoryExtra::can_access_statics for why
|
||||
// immutability is so important.
|
||||
|
||||
// Validation will ensure that there is no `UnsafeCell` on an immutable allocation.
|
||||
alloc.mutability = Mutability::Not;
|
||||
};
|
||||
// link the alloc id to the actual allocation
|
||||
let alloc = tcx.intern_const_alloc(alloc);
|
||||
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
|
||||
tcx.set_alloc_id_memory(alloc_id, alloc);
|
||||
None
|
||||
}
|
||||
|
||||
impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
|
||||
InternVisitor<'rt, 'mir, 'tcx, M>
|
||||
{
|
||||
fn intern_shallow(
|
||||
&mut self,
|
||||
alloc_id: AllocId,
|
||||
mode: InternMode,
|
||||
ty: Option<Ty<'tcx>>,
|
||||
) -> Option<IsStaticOrFn> {
|
||||
intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>
|
||||
ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M>
|
||||
{
|
||||
type V = MPlaceTy<'tcx>;
|
||||
|
||||
#[inline(always)]
|
||||
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
|
||||
&self.ecx
|
||||
}
|
||||
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
mplace: &MPlaceTy<'tcx>,
|
||||
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// ZSTs cannot contain pointers, so we can skip them.
|
||||
if mplace.layout.is_zst() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(def) = mplace.layout.ty.ty_adt_def() {
|
||||
if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() {
|
||||
// We are crossing over an `UnsafeCell`, we can mutate again. This means that
|
||||
// References we encounter inside here are interned as pointing to mutable
|
||||
// allocations.
|
||||
// Remember the `old` value to handle nested `UnsafeCell`.
|
||||
let old = std::mem::replace(&mut self.inside_unsafe_cell, true);
|
||||
let walked = self.walk_aggregate(mplace, fields);
|
||||
self.inside_unsafe_cell = old;
|
||||
return walked;
|
||||
}
|
||||
}
|
||||
|
||||
self.walk_aggregate(mplace, fields)
|
||||
}
|
||||
|
||||
fn visit_value(&mut self, mplace: &MPlaceTy<'tcx>) -> InterpResult<'tcx> {
|
||||
// Handle Reference types, as these are the only relocations supported by const eval.
|
||||
// Raw pointers (and boxes) are handled by the `leftover_relocations` logic.
|
||||
let tcx = self.ecx.tcx;
|
||||
let ty = mplace.layout.ty;
|
||||
if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() {
|
||||
let value = self.ecx.read_immediate(&(*mplace).into())?;
|
||||
let mplace = self.ecx.ref_to_mplace(&value)?;
|
||||
assert_eq!(mplace.layout.ty, referenced_ty);
|
||||
// Handle trait object vtables.
|
||||
if let ty::Dynamic(..) =
|
||||
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
|
||||
{
|
||||
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
|
||||
if let Some(alloc_id) = ptr.provenance {
|
||||
// Explicitly choose const mode here, since vtables are immutable, even
|
||||
// if the reference of the fat pointer is mutable.
|
||||
self.intern_shallow(alloc_id, InternMode::Const, None);
|
||||
} else {
|
||||
// Validation will error (with a better message) on an invalid vtable pointer.
|
||||
// Let validation show the error message, but make sure it *does* error.
|
||||
tcx.sess
|
||||
.delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers");
|
||||
}
|
||||
}
|
||||
// Check if we have encountered this pointer+layout combination before.
|
||||
// Only recurse for allocation-backed pointers.
|
||||
if let Some(alloc_id) = mplace.ptr.provenance {
|
||||
// Compute the mode with which we intern this. Our goal here is to make as many
|
||||
// statics as we can immutable so they can be placed in read-only memory by LLVM.
|
||||
let ref_mode = match self.mode {
|
||||
InternMode::Static(mutbl) => {
|
||||
// In statics, merge outer mutability with reference mutability and
|
||||
// take into account whether we are in an `UnsafeCell`.
|
||||
|
||||
// The only way a mutable reference actually works as a mutable reference is
|
||||
// by being in a `static mut` directly or behind another mutable reference.
|
||||
// If there's an immutable reference or we are inside a `static`, then our
|
||||
// mutable reference is equivalent to an immutable one. As an example:
|
||||
// `&&mut Foo` is semantically equivalent to `&&Foo`
|
||||
match ref_mutability {
|
||||
_ if self.inside_unsafe_cell => {
|
||||
// Inside an `UnsafeCell` is like inside a `static mut`, the "outer"
|
||||
// mutability does not matter.
|
||||
InternMode::Static(ref_mutability)
|
||||
}
|
||||
Mutability::Not => {
|
||||
// A shared reference, things become immutable.
|
||||
// We do *not* consider `freeze` here: `intern_shallow` considers
|
||||
// `freeze` for the actual mutability of this allocation; the intern
|
||||
// mode for references contained in this allocation is tracked more
|
||||
// precisely when traversing the referenced data (by tracking
|
||||
// `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still
|
||||
// has the left inner reference interned into a read-only
|
||||
// allocation.
|
||||
InternMode::Static(Mutability::Not)
|
||||
}
|
||||
Mutability::Mut => {
|
||||
// Mutable reference.
|
||||
InternMode::Static(mutbl)
|
||||
}
|
||||
}
|
||||
}
|
||||
InternMode::Const => {
|
||||
// Ignore `UnsafeCell`, everything is immutable. Validity does some sanity
|
||||
// checking for mutable references that we encounter -- they must all be
|
||||
// ZST.
|
||||
InternMode::Const
|
||||
}
|
||||
};
|
||||
match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
|
||||
// No need to recurse, these are interned already and statics may have
|
||||
// cycles, so we don't want to recurse there
|
||||
Some(IsStaticOrFn) => {}
|
||||
// intern everything referenced by this value. The mutability is taken from the
|
||||
// reference. It is checked above that mutable references only happen in
|
||||
// `static mut`
|
||||
None => self.ref_tracking.track((mplace, ref_mode), || ()),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
} else {
|
||||
// Not a reference -- proceed recursively.
|
||||
self.walk_value(mplace)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)]
|
||||
pub enum InternKind {
|
||||
/// The `mutability` of the static, ignoring the type which may have interior mutability.
|
||||
Static(hir::Mutability),
|
||||
Constant,
|
||||
Promoted,
|
||||
}
|
||||
|
||||
/// Intern `ret` and everything it references.
|
||||
///
|
||||
/// This *cannot raise an interpreter error*. Doing so is left to validation, which
|
||||
/// tracks where in the value we are and thus can show much better error messages.
|
||||
/// Any errors here would anyway be turned into `const_err` lints, whereas validation failures
|
||||
/// are hard errors.
|
||||
#[tracing::instrument(level = "debug", skip(ecx))]
|
||||
pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, M>,
|
||||
intern_kind: InternKind,
|
||||
ret: &MPlaceTy<'tcx>,
|
||||
) -> Result<(), ErrorReported>
|
||||
where
|
||||
'tcx: 'mir,
|
||||
{
|
||||
let tcx = ecx.tcx;
|
||||
let base_intern_mode = match intern_kind {
|
||||
InternKind::Static(mutbl) => InternMode::Static(mutbl),
|
||||
// `Constant` includes array lengths.
|
||||
InternKind::Constant | InternKind::Promoted => InternMode::Const,
|
||||
};
|
||||
|
||||
// Type based interning.
|
||||
// `ref_tracking` tracks typed references we have already interned and still need to crawl for
|
||||
// more typed information inside them.
|
||||
// `leftover_allocations` collects *all* allocations we see, because some might not
|
||||
// be available in a typed way. They get interned at the end.
|
||||
let mut ref_tracking = RefTracking::empty();
|
||||
let leftover_allocations = &mut FxHashSet::default();
|
||||
|
||||
// start with the outermost allocation
|
||||
intern_shallow(
|
||||
ecx,
|
||||
leftover_allocations,
|
||||
// The outermost allocation must exist, because we allocated it with
|
||||
// `Memory::allocate`.
|
||||
ret.ptr.provenance.unwrap(),
|
||||
base_intern_mode,
|
||||
Some(ret.layout.ty),
|
||||
);
|
||||
|
||||
ref_tracking.track((*ret, base_intern_mode), || ());
|
||||
|
||||
while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() {
|
||||
let res = InternVisitor {
|
||||
ref_tracking: &mut ref_tracking,
|
||||
ecx,
|
||||
mode,
|
||||
leftover_allocations,
|
||||
inside_unsafe_cell: false,
|
||||
}
|
||||
.visit_value(&mplace);
|
||||
// We deliberately *ignore* interpreter errors here. When there is a problem, the remaining
|
||||
// references are "leftover"-interned, and later validation will show a proper error
|
||||
// and point at the right part of the value causing the problem.
|
||||
match res {
|
||||
Ok(()) => {}
|
||||
Err(error) => {
|
||||
ecx.tcx.sess.delay_span_bug(
|
||||
ecx.tcx.span,
|
||||
&format!(
|
||||
"error during interning should later cause validation failure: {}",
|
||||
error
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Intern the rest of the allocations as mutable. These might be inside unions, padding, raw
|
||||
// pointers, ... So we can't intern them according to their type rules
|
||||
|
||||
let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect();
|
||||
while let Some(alloc_id) = todo.pop() {
|
||||
if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) {
|
||||
// We can't call the `intern_shallow` method here, as its logic is tailored to safe
|
||||
// references and a `leftover_allocations` set (where we only have a todo-list here).
|
||||
// So we hand-roll the interning logic here again.
|
||||
match intern_kind {
|
||||
// Statics may contain mutable allocations even behind relocations.
|
||||
// Even for immutable statics it would be ok to have mutable allocations behind
|
||||
// raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`.
|
||||
InternKind::Static(_) => {}
|
||||
// Raw pointers in promoteds may only point to immutable things so we mark
|
||||
// everything as immutable.
|
||||
// It is UB to mutate through a raw pointer obtained via an immutable reference:
|
||||
// Since all references and pointers inside a promoted must by their very definition
|
||||
// be created from an immutable reference (and promotion also excludes interior
|
||||
// mutability), mutating through them would be UB.
|
||||
// There's no way we can check whether the user is using raw pointers correctly,
|
||||
// so all we can do is mark this as immutable here.
|
||||
InternKind::Promoted => {
|
||||
// See const_eval::machine::MemoryExtra::can_access_statics for why
|
||||
// immutability is so important.
|
||||
alloc.mutability = Mutability::Not;
|
||||
}
|
||||
InternKind::Constant => {
|
||||
// If it's a constant, we should not have any "leftovers" as everything
|
||||
// is tracked by const-checking.
|
||||
// FIXME: downgrade this to a warning? It rejects some legitimate consts,
|
||||
// such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`.
|
||||
ecx.tcx
|
||||
.sess
|
||||
.span_err(ecx.tcx.span, "untyped pointers are not allowed in constant");
|
||||
// For better errors later, mark the allocation as immutable.
|
||||
alloc.mutability = Mutability::Not;
|
||||
}
|
||||
}
|
||||
let alloc = tcx.intern_const_alloc(alloc);
|
||||
tcx.set_alloc_id_memory(alloc_id, alloc);
|
||||
for &(_, alloc_id) in alloc.relocations().iter() {
|
||||
if leftover_allocations.insert(alloc_id) {
|
||||
todo.push(alloc_id);
|
||||
}
|
||||
}
|
||||
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
|
||||
// Codegen does not like dangling pointers, and generally `tcx` assumes that
|
||||
// all allocations referenced anywhere actually exist. So, make sure we error here.
|
||||
ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant");
|
||||
return Err(ErrorReported);
|
||||
} else if ecx.tcx.get_global_alloc(alloc_id).is_none() {
|
||||
// We have hit an `AllocId` that is neither in local or global memory and isn't
|
||||
// marked as dangling by local memory. That should be impossible.
|
||||
span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
|
||||
InterpCx<'mir, 'tcx, M>
|
||||
{
|
||||
/// A helper function that allocates memory for the layout given and gives you access to mutate
|
||||
/// it. Once your own mutation code is done, the backing `Allocation` is removed from the
|
||||
/// current `Memory` and returned.
|
||||
pub fn intern_with_temp_alloc(
|
||||
&mut self,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
f: impl FnOnce(
|
||||
&mut InterpCx<'mir, 'tcx, M>,
|
||||
&PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ()>,
|
||||
) -> InterpResult<'tcx, &'tcx Allocation> {
|
||||
let dest = self.allocate(layout, MemoryKind::Stack)?;
|
||||
f(self, &dest.into())?;
|
||||
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
|
||||
alloc.mutability = Mutability::Not;
|
||||
Ok(self.tcx.intern_const_alloc(alloc))
|
||||
}
|
||||
}
|
585
compiler/rustc_const_eval/src/interpret/intrinsics.rs
Normal file
585
compiler/rustc_const_eval/src/interpret/intrinsics.rs
Normal file
|
@ -0,0 +1,585 @@
|
|||
//! Intrinsics and other functions that the miri engine executes without
|
||||
//! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
|
||||
//! and miri.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::mir::{
|
||||
self,
|
||||
interpret::{ConstValue, GlobalId, InterpResult, Scalar},
|
||||
BinOp,
|
||||
};
|
||||
use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::LayoutOf as _;
|
||||
use rustc_middle::ty::subst::SubstsRef;
|
||||
use rustc_middle::ty::{Ty, TyCtxt};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Abi, Align, Primitive, Size};
|
||||
|
||||
use super::{
|
||||
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
|
||||
Pointer,
|
||||
};
|
||||
|
||||
mod caller_location;
|
||||
mod type_name;
|
||||
|
||||
fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
|
||||
let size = match kind {
|
||||
Primitive::Int(integer, _) => integer.size(),
|
||||
_ => bug!("invalid `{}` argument: {:?}", name, bits),
|
||||
};
|
||||
let extra = 128 - u128::from(size.bits());
|
||||
let bits_out = match name {
|
||||
sym::ctpop => u128::from(bits.count_ones()),
|
||||
sym::ctlz => u128::from(bits.leading_zeros()) - extra,
|
||||
sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
|
||||
sym::bswap => (bits << extra).swap_bytes(),
|
||||
sym::bitreverse => (bits << extra).reverse_bits(),
|
||||
_ => bug!("not a numeric intrinsic: {}", name),
|
||||
};
|
||||
Scalar::from_uint(bits_out, size)
|
||||
}
|
||||
|
||||
/// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
|
||||
/// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
|
||||
crate fn eval_nullary_intrinsic<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
def_id: DefId,
|
||||
substs: SubstsRef<'tcx>,
|
||||
) -> InterpResult<'tcx, ConstValue<'tcx>> {
|
||||
let tp_ty = substs.type_at(0);
|
||||
let name = tcx.item_name(def_id);
|
||||
Ok(match name {
|
||||
sym::type_name => {
|
||||
ensure_monomorphic_enough(tcx, tp_ty)?;
|
||||
let alloc = type_name::alloc_type_name(tcx, tp_ty);
|
||||
ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
|
||||
}
|
||||
sym::needs_drop => {
|
||||
ensure_monomorphic_enough(tcx, tp_ty)?;
|
||||
ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
|
||||
}
|
||||
sym::min_align_of | sym::pref_align_of => {
|
||||
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
|
||||
let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
|
||||
let n = match name {
|
||||
sym::pref_align_of => layout.align.pref.bytes(),
|
||||
sym::min_align_of => layout.align.abi.bytes(),
|
||||
_ => bug!(),
|
||||
};
|
||||
ConstValue::from_machine_usize(n, &tcx)
|
||||
}
|
||||
sym::type_id => {
|
||||
ensure_monomorphic_enough(tcx, tp_ty)?;
|
||||
ConstValue::from_u64(tcx.type_id_hash(tp_ty))
|
||||
}
|
||||
sym::variant_count => match tp_ty.kind() {
|
||||
// Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
|
||||
ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
|
||||
ty::Projection(_)
|
||||
| ty::Opaque(_, _)
|
||||
| ty::Param(_)
|
||||
| ty::Bound(_, _)
|
||||
| ty::Placeholder(_)
|
||||
| ty::Infer(_) => throw_inval!(TooGeneric),
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Foreign(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::FnDef(_, _)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Dynamic(_, _)
|
||||
| ty::Closure(_, _)
|
||||
| ty::Generator(_, _, _)
|
||||
| ty::GeneratorWitness(_)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
|
||||
},
|
||||
other => bug!("`{}` is not a zero arg intrinsic", other),
|
||||
})
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Returns `true` if emulation happened.
|
||||
/// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
|
||||
/// intrinsic handling.
|
||||
pub fn emulate_intrinsic(
|
||||
&mut self,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx, M::PointerTag>],
|
||||
ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
let substs = instance.substs;
|
||||
let intrinsic_name = self.tcx.item_name(instance.def_id());
|
||||
|
||||
// First handle intrinsics without return place.
|
||||
let (dest, ret) = match ret {
|
||||
None => match intrinsic_name {
|
||||
sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
|
||||
sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
|
||||
// Unsupported diverging intrinsic.
|
||||
_ => return Ok(false),
|
||||
},
|
||||
Some(p) => p,
|
||||
};
|
||||
|
||||
// Keep the patterns in this match ordered the same as the list in
|
||||
// `src/librustc_middle/ty/constness.rs`
|
||||
match intrinsic_name {
|
||||
sym::caller_location => {
|
||||
let span = self.find_closest_untracked_caller_location();
|
||||
let location = self.alloc_caller_location_for_span(span);
|
||||
self.write_immediate(location.to_ref(self), dest)?;
|
||||
}
|
||||
|
||||
sym::min_align_of_val | sym::size_of_val => {
|
||||
// Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
|
||||
// dereferencable!
|
||||
let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
|
||||
let (size, align) = self
|
||||
.size_and_align_of_mplace(&place)?
|
||||
.ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
|
||||
|
||||
let result = match intrinsic_name {
|
||||
sym::min_align_of_val => align.bytes(),
|
||||
sym::size_of_val => size.bytes(),
|
||||
_ => bug!(),
|
||||
};
|
||||
|
||||
self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
|
||||
}
|
||||
|
||||
sym::min_align_of
|
||||
| sym::pref_align_of
|
||||
| sym::needs_drop
|
||||
| sym::type_id
|
||||
| sym::type_name
|
||||
| sym::variant_count => {
|
||||
let gid = GlobalId { instance, promoted: None };
|
||||
let ty = match intrinsic_name {
|
||||
sym::min_align_of | sym::pref_align_of | sym::variant_count => {
|
||||
self.tcx.types.usize
|
||||
}
|
||||
sym::needs_drop => self.tcx.types.bool,
|
||||
sym::type_id => self.tcx.types.u64,
|
||||
sym::type_name => self.tcx.mk_static_str(),
|
||||
_ => bug!("already checked for nullary intrinsics"),
|
||||
};
|
||||
let val =
|
||||
self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
|
||||
let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
|
||||
self.copy_op(&val, dest)?;
|
||||
}
|
||||
|
||||
sym::ctpop
|
||||
| sym::cttz
|
||||
| sym::cttz_nonzero
|
||||
| sym::ctlz
|
||||
| sym::ctlz_nonzero
|
||||
| sym::bswap
|
||||
| sym::bitreverse => {
|
||||
let ty = substs.type_at(0);
|
||||
let layout_of = self.layout_of(ty)?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let bits = val.to_bits(layout_of.size)?;
|
||||
let kind = match layout_of.abi {
|
||||
Abi::Scalar(ref scalar) => scalar.value,
|
||||
_ => span_bug!(
|
||||
self.cur_span(),
|
||||
"{} called on invalid type {:?}",
|
||||
intrinsic_name,
|
||||
ty
|
||||
),
|
||||
};
|
||||
let (nonzero, intrinsic_name) = match intrinsic_name {
|
||||
sym::cttz_nonzero => (true, sym::cttz),
|
||||
sym::ctlz_nonzero => (true, sym::ctlz),
|
||||
other => (false, other),
|
||||
};
|
||||
if nonzero && bits == 0 {
|
||||
throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
|
||||
}
|
||||
let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
|
||||
self.write_scalar(out_val, dest)?;
|
||||
}
|
||||
sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
|
||||
let lhs = self.read_immediate(&args[0])?;
|
||||
let rhs = self.read_immediate(&args[1])?;
|
||||
let bin_op = match intrinsic_name {
|
||||
sym::add_with_overflow => BinOp::Add,
|
||||
sym::sub_with_overflow => BinOp::Sub,
|
||||
sym::mul_with_overflow => BinOp::Mul,
|
||||
_ => bug!("Already checked for int ops"),
|
||||
};
|
||||
self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
|
||||
}
|
||||
sym::saturating_add | sym::saturating_sub => {
|
||||
let l = self.read_immediate(&args[0])?;
|
||||
let r = self.read_immediate(&args[1])?;
|
||||
let is_add = intrinsic_name == sym::saturating_add;
|
||||
let (val, overflowed, _ty) = self.overflowing_binary_op(
|
||||
if is_add { BinOp::Add } else { BinOp::Sub },
|
||||
&l,
|
||||
&r,
|
||||
)?;
|
||||
let val = if overflowed {
|
||||
let num_bits = l.layout.size.bits();
|
||||
if l.layout.abi.is_signed() {
|
||||
// For signed ints the saturated value depends on the sign of the first
|
||||
// term since the sign of the second term can be inferred from this and
|
||||
// the fact that the operation has overflowed (if either is 0 no
|
||||
// overflow can occur)
|
||||
let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
|
||||
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
|
||||
if first_term_positive {
|
||||
// Negative overflow not possible since the positive first term
|
||||
// can only increase an (in range) negative term for addition
|
||||
// or corresponding negated positive term for subtraction
|
||||
Scalar::from_uint(
|
||||
(1u128 << (num_bits - 1)) - 1, // max positive
|
||||
Size::from_bits(num_bits),
|
||||
)
|
||||
} else {
|
||||
// Positive overflow not possible for similar reason
|
||||
// max negative
|
||||
Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
|
||||
}
|
||||
} else {
|
||||
// unsigned
|
||||
if is_add {
|
||||
// max unsigned
|
||||
Scalar::from_uint(
|
||||
u128::MAX >> (128 - num_bits),
|
||||
Size::from_bits(num_bits),
|
||||
)
|
||||
} else {
|
||||
// underflow to 0
|
||||
Scalar::from_uint(0u128, Size::from_bits(num_bits))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
val
|
||||
};
|
||||
self.write_scalar(val, dest)?;
|
||||
}
|
||||
sym::discriminant_value => {
|
||||
let place = self.deref_operand(&args[0])?;
|
||||
let discr_val = self.read_discriminant(&place.into())?.0;
|
||||
self.write_scalar(discr_val, dest)?;
|
||||
}
|
||||
sym::unchecked_shl
|
||||
| sym::unchecked_shr
|
||||
| sym::unchecked_add
|
||||
| sym::unchecked_sub
|
||||
| sym::unchecked_mul
|
||||
| sym::unchecked_div
|
||||
| sym::unchecked_rem => {
|
||||
let l = self.read_immediate(&args[0])?;
|
||||
let r = self.read_immediate(&args[1])?;
|
||||
let bin_op = match intrinsic_name {
|
||||
sym::unchecked_shl => BinOp::Shl,
|
||||
sym::unchecked_shr => BinOp::Shr,
|
||||
sym::unchecked_add => BinOp::Add,
|
||||
sym::unchecked_sub => BinOp::Sub,
|
||||
sym::unchecked_mul => BinOp::Mul,
|
||||
sym::unchecked_div => BinOp::Div,
|
||||
sym::unchecked_rem => BinOp::Rem,
|
||||
_ => bug!("Already checked for int ops"),
|
||||
};
|
||||
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
|
||||
if overflowed {
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let r_val = r.to_scalar()?.to_bits(layout.size)?;
|
||||
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
|
||||
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
|
||||
} else {
|
||||
throw_ub_format!("overflow executing `{}`", intrinsic_name);
|
||||
}
|
||||
}
|
||||
self.write_scalar(val, dest)?;
|
||||
}
|
||||
sym::rotate_left | sym::rotate_right => {
|
||||
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
||||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let val_bits = val.to_bits(layout.size)?;
|
||||
let raw_shift = self.read_scalar(&args[1])?.check_init()?;
|
||||
let raw_shift_bits = raw_shift.to_bits(layout.size)?;
|
||||
let width_bits = u128::from(layout.size.bits());
|
||||
let shift_bits = raw_shift_bits % width_bits;
|
||||
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
|
||||
let result_bits = if intrinsic_name == sym::rotate_left {
|
||||
(val_bits << shift_bits) | (val_bits >> inv_shift_bits)
|
||||
} else {
|
||||
(val_bits >> shift_bits) | (val_bits << inv_shift_bits)
|
||||
};
|
||||
let truncated_bits = self.truncate(result_bits, layout);
|
||||
let result = Scalar::from_uint(truncated_bits, layout.size);
|
||||
self.write_scalar(result, dest)?;
|
||||
}
|
||||
sym::copy => {
|
||||
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
|
||||
}
|
||||
sym::offset => {
|
||||
let ptr = self.read_pointer(&args[0])?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
|
||||
self.write_pointer(offset_ptr, dest)?;
|
||||
}
|
||||
sym::arith_offset => {
|
||||
let ptr = self.read_pointer(&args[0])?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
|
||||
let offset_bytes = offset_count.wrapping_mul(pointee_size);
|
||||
let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
|
||||
self.write_pointer(offset_ptr, dest)?;
|
||||
}
|
||||
sym::ptr_offset_from => {
|
||||
let a = self.read_immediate(&args[0])?.to_scalar()?;
|
||||
let b = self.read_immediate(&args[1])?.to_scalar()?;
|
||||
|
||||
// Special case: if both scalars are *equal integers*
|
||||
// and not null, we pretend there is an allocation of size 0 right there,
|
||||
// and their offset is 0. (There's never a valid object at null, making it an
|
||||
// exception from the exception.)
|
||||
// This is the dual to the special exception for offset-by-0
|
||||
// in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
|
||||
//
|
||||
// Control flow is weird because we cannot early-return (to reach the
|
||||
// `go_to_block` at the end).
|
||||
let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
|
||||
let a = a.try_to_machine_usize(*self.tcx).unwrap();
|
||||
let b = b.try_to_machine_usize(*self.tcx).unwrap();
|
||||
if a == b && a != 0 {
|
||||
self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
if !done {
|
||||
// General case: we need two pointers.
|
||||
let a = self.scalar_to_ptr(a);
|
||||
let b = self.scalar_to_ptr(b);
|
||||
let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
|
||||
let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
|
||||
if a_alloc_id != b_alloc_id {
|
||||
throw_ub_format!(
|
||||
"ptr_offset_from cannot compute offset of pointers into different \
|
||||
allocations.",
|
||||
);
|
||||
}
|
||||
let usize_layout = self.layout_of(self.tcx.types.usize)?;
|
||||
let isize_layout = self.layout_of(self.tcx.types.isize)?;
|
||||
let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
|
||||
let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
|
||||
let (val, _overflowed, _ty) =
|
||||
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
|
||||
let pointee_layout = self.layout_of(substs.type_at(0))?;
|
||||
let val = ImmTy::from_scalar(val, isize_layout);
|
||||
let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
|
||||
self.exact_div(&val, &size, dest)?;
|
||||
}
|
||||
}
|
||||
|
||||
sym::transmute => {
|
||||
self.copy_op_transmute(&args[0], dest)?;
|
||||
}
|
||||
sym::assert_inhabited => {
|
||||
let ty = instance.substs.type_at(0);
|
||||
let layout = self.layout_of(ty)?;
|
||||
|
||||
if layout.abi.is_uninhabited() {
|
||||
// The run-time intrinsic panics just to get a good backtrace; here we abort
|
||||
// since there is no problem showing a backtrace even for aborts.
|
||||
M::abort(
|
||||
self,
|
||||
format!(
|
||||
"aborted execution: attempted to instantiate uninhabited type `{}`",
|
||||
ty
|
||||
),
|
||||
)?;
|
||||
}
|
||||
}
|
||||
sym::simd_insert => {
|
||||
let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
|
||||
let elem = &args[2];
|
||||
let input = &args[0];
|
||||
let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
|
||||
assert!(
|
||||
index < len,
|
||||
"Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
|
||||
index,
|
||||
e_ty,
|
||||
len
|
||||
);
|
||||
assert_eq!(
|
||||
input.layout, dest.layout,
|
||||
"Return type `{}` must match vector type `{}`",
|
||||
dest.layout.ty, input.layout.ty
|
||||
);
|
||||
assert_eq!(
|
||||
elem.layout.ty, e_ty,
|
||||
"Scalar element type `{}` must match vector element type `{}`",
|
||||
elem.layout.ty, e_ty
|
||||
);
|
||||
|
||||
for i in 0..len {
|
||||
let place = self.place_index(dest, i)?;
|
||||
let value = if i == index { *elem } else { self.operand_index(input, i)? };
|
||||
self.copy_op(&value, &place)?;
|
||||
}
|
||||
}
|
||||
sym::simd_extract => {
|
||||
let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
|
||||
let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
|
||||
assert!(
|
||||
index < len,
|
||||
"index `{}` is out-of-bounds of vector type `{}` with length `{}`",
|
||||
index,
|
||||
e_ty,
|
||||
len
|
||||
);
|
||||
assert_eq!(
|
||||
e_ty, dest.layout.ty,
|
||||
"Return type `{}` must match vector element type `{}`",
|
||||
dest.layout.ty, e_ty
|
||||
);
|
||||
self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
|
||||
}
|
||||
sym::likely | sym::unlikely | sym::black_box => {
|
||||
// These just return their argument
|
||||
self.copy_op(&args[0], dest)?;
|
||||
}
|
||||
sym::assume => {
|
||||
let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
|
||||
if !cond {
|
||||
throw_ub_format!("`assume` intrinsic called with `false`");
|
||||
}
|
||||
}
|
||||
sym::raw_eq => {
|
||||
let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
|
||||
self.write_scalar(result, dest)?;
|
||||
}
|
||||
_ => return Ok(false),
|
||||
}
|
||||
|
||||
trace!("{:?}", self.dump_place(**dest));
|
||||
self.go_to_block(ret);
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
pub fn exact_div(
|
||||
&mut self,
|
||||
a: &ImmTy<'tcx, M::PointerTag>,
|
||||
b: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Performs an exact division, resulting in undefined behavior where
|
||||
// `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
|
||||
// First, check x % y != 0 (or if that computation overflows).
|
||||
let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
|
||||
if overflow || res.assert_bits(a.layout.size) != 0 {
|
||||
// Then, check if `b` is -1, which is the "MIN / -1" case.
|
||||
let minus1 = Scalar::from_int(-1, dest.layout.size);
|
||||
let b_scalar = b.to_scalar().unwrap();
|
||||
if b_scalar == minus1 {
|
||||
throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
|
||||
} else {
|
||||
throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
|
||||
}
|
||||
}
|
||||
// `Rem` says this is all right, so we can let `Div` do its job.
|
||||
self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
|
||||
}
|
||||
|
||||
/// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
|
||||
/// allocation. For integer pointers, we consider each of them their own tiny allocation of size
|
||||
/// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
|
||||
pub fn ptr_offset_inbounds(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
pointee_ty: Ty<'tcx>,
|
||||
offset_count: i64,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
// We cannot overflow i64 as a type's size must be <= isize::MAX.
|
||||
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
|
||||
// The computed offset, in bytes, cannot overflow an isize.
|
||||
let offset_bytes =
|
||||
offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
|
||||
// The offset being in bounds cannot rely on "wrapping around" the address space.
|
||||
// So, first rule out overflows in the pointer arithmetic.
|
||||
let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
|
||||
// ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
|
||||
// memory between these pointers must be accessible. Note that we do not require the
|
||||
// pointers to be properly aligned (unlike a read/write operation).
|
||||
let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
|
||||
let size = offset_bytes.unsigned_abs();
|
||||
// This call handles checking for integer/null pointers.
|
||||
self.memory.check_ptr_access_align(
|
||||
min_ptr,
|
||||
Size::from_bytes(size),
|
||||
Align::ONE,
|
||||
CheckInAllocMsg::PointerArithmeticTest,
|
||||
)?;
|
||||
Ok(offset_ptr)
|
||||
}
|
||||
|
||||
/// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
|
||||
pub(crate) fn copy_intrinsic(
|
||||
&mut self,
|
||||
src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
|
||||
dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
|
||||
count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
|
||||
nonoverlapping: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
let count = self.read_scalar(&count)?.to_machine_usize(self)?;
|
||||
let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
|
||||
let (size, align) = (layout.size, layout.align.abi);
|
||||
let size = size.checked_mul(count, self).ok_or_else(|| {
|
||||
err_ub_format!(
|
||||
"overflow computing total size of `{}`",
|
||||
if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
|
||||
)
|
||||
})?;
|
||||
|
||||
let src = self.read_pointer(&src)?;
|
||||
let dst = self.read_pointer(&dst)?;
|
||||
|
||||
self.memory.copy(src, align, dst, align, size, nonoverlapping)
|
||||
}
|
||||
|
||||
pub(crate) fn raw_eq_intrinsic(
|
||||
&mut self,
|
||||
lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
|
||||
rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
|
||||
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
|
||||
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
|
||||
assert!(!layout.is_unsized());
|
||||
|
||||
let lhs = self.read_pointer(lhs)?;
|
||||
let rhs = self.read_pointer(rhs)?;
|
||||
let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
|
||||
let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
|
||||
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
use rustc_hir::lang_items::LangItem;
|
||||
use rustc_middle::mir::TerminatorKind;
|
||||
use rustc_middle::ty::layout::LayoutOf;
|
||||
use rustc_middle::ty::subst::Subst;
|
||||
use rustc_span::{Span, Symbol};
|
||||
|
||||
use crate::interpret::{
|
||||
intrinsics::{InterpCx, Machine},
|
||||
MPlaceTy, MemoryKind, Scalar,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Walks up the callstack from the intrinsic's callsite, searching for the first callsite in a
|
||||
/// frame which is not `#[track_caller]`.
|
||||
crate fn find_closest_untracked_caller_location(&self) -> Span {
|
||||
for frame in self.stack().iter().rev() {
|
||||
debug!("find_closest_untracked_caller_location: checking frame {:?}", frame.instance);
|
||||
|
||||
// Assert that the frame we look at is actually executing code currently
|
||||
// (`loc` is `Err` when we are unwinding and the frame does not require cleanup).
|
||||
let loc = frame.loc.unwrap();
|
||||
|
||||
// This could be a non-`Call` terminator (such as `Drop`), or not a terminator at all
|
||||
// (such as `box`). Use the normal span by default.
|
||||
let mut source_info = *frame.body.source_info(loc);
|
||||
|
||||
// If this is a `Call` terminator, use the `fn_span` instead.
|
||||
let block = &frame.body.basic_blocks()[loc.block];
|
||||
if loc.statement_index == block.statements.len() {
|
||||
debug!(
|
||||
"find_closest_untracked_caller_location: got terminator {:?} ({:?})",
|
||||
block.terminator(),
|
||||
block.terminator().kind
|
||||
);
|
||||
if let TerminatorKind::Call { fn_span, .. } = block.terminator().kind {
|
||||
source_info.span = fn_span;
|
||||
}
|
||||
}
|
||||
|
||||
// Walk up the `SourceScope`s, in case some of them are from MIR inlining.
|
||||
// If so, the starting `source_info.span` is in the innermost inlined
|
||||
// function, and will be replaced with outer callsite spans as long
|
||||
// as the inlined functions were `#[track_caller]`.
|
||||
loop {
|
||||
let scope_data = &frame.body.source_scopes[source_info.scope];
|
||||
|
||||
if let Some((callee, callsite_span)) = scope_data.inlined {
|
||||
// Stop inside the most nested non-`#[track_caller]` function,
|
||||
// before ever reaching its caller (which is irrelevant).
|
||||
if !callee.def.requires_caller_location(*self.tcx) {
|
||||
return source_info.span;
|
||||
}
|
||||
source_info.span = callsite_span;
|
||||
}
|
||||
|
||||
// Skip past all of the parents with `inlined: None`.
|
||||
match scope_data.inlined_parent_scope {
|
||||
Some(parent) => source_info.scope = parent,
|
||||
None => break,
|
||||
}
|
||||
}
|
||||
|
||||
// Stop inside the most nested non-`#[track_caller]` function,
|
||||
// before ever reaching its caller (which is irrelevant).
|
||||
if !frame.instance.def.requires_caller_location(*self.tcx) {
|
||||
return source_info.span;
|
||||
}
|
||||
}
|
||||
|
||||
bug!("no non-`#[track_caller]` frame found")
|
||||
}
|
||||
|
||||
/// Allocate a `const core::panic::Location` with the provided filename and line/column numbers.
|
||||
crate fn alloc_caller_location(
|
||||
&mut self,
|
||||
filename: Symbol,
|
||||
line: u32,
|
||||
col: u32,
|
||||
) -> MPlaceTy<'tcx, M::PointerTag> {
|
||||
let file =
|
||||
self.allocate_str(&filename.as_str(), MemoryKind::CallerLocation, Mutability::Not);
|
||||
let line = Scalar::from_u32(line);
|
||||
let col = Scalar::from_u32(col);
|
||||
|
||||
// Allocate memory for `CallerLocation` struct.
|
||||
let loc_ty = self
|
||||
.tcx
|
||||
.type_of(self.tcx.require_lang_item(LangItem::PanicLocation, None))
|
||||
.subst(*self.tcx, self.tcx.mk_substs([self.tcx.lifetimes.re_erased.into()].iter()));
|
||||
let loc_layout = self.layout_of(loc_ty).unwrap();
|
||||
// This can fail if rustc runs out of memory right here. Trying to emit an error would be
|
||||
// pointless, since that would require allocating more memory than a Location.
|
||||
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
|
||||
|
||||
// Initialize fields.
|
||||
self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
|
||||
.expect("writing to memory we just allocated cannot fail");
|
||||
self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
|
||||
.expect("writing to memory we just allocated cannot fail");
|
||||
self.write_scalar(col, &self.mplace_field(&location, 2).unwrap().into())
|
||||
.expect("writing to memory we just allocated cannot fail");
|
||||
|
||||
location
|
||||
}
|
||||
|
||||
crate fn location_triple_for_span(&self, span: Span) -> (Symbol, u32, u32) {
|
||||
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
|
||||
let caller = self.tcx.sess.source_map().lookup_char_pos(topmost.lo());
|
||||
(
|
||||
Symbol::intern(&caller.file.name.prefer_remapped().to_string_lossy()),
|
||||
u32::try_from(caller.line).unwrap(),
|
||||
u32::try_from(caller.col_display).unwrap().checked_add(1).unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn alloc_caller_location_for_span(&mut self, span: Span) -> MPlaceTy<'tcx, M::PointerTag> {
|
||||
let (file, line, column) = self.location_triple_for_span(span);
|
||||
self.alloc_caller_location(file, line, column)
|
||||
}
|
||||
}
|
197
compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
Normal file
197
compiler/rustc_const_eval/src/interpret/intrinsics/type_name.rs
Normal file
|
@ -0,0 +1,197 @@
|
|||
use rustc_hir::def_id::CrateNum;
|
||||
use rustc_hir::definitions::DisambiguatedDefPathData;
|
||||
use rustc_middle::mir::interpret::Allocation;
|
||||
use rustc_middle::ty::{
|
||||
self,
|
||||
print::{PrettyPrinter, Print, Printer},
|
||||
subst::{GenericArg, GenericArgKind},
|
||||
Ty, TyCtxt,
|
||||
};
|
||||
use std::fmt::Write;
|
||||
|
||||
struct AbsolutePathPrinter<'tcx> {
|
||||
tcx: TyCtxt<'tcx>,
|
||||
path: String,
|
||||
}
|
||||
|
||||
impl<'tcx> Printer<'tcx> for AbsolutePathPrinter<'tcx> {
|
||||
type Error = std::fmt::Error;
|
||||
|
||||
type Path = Self;
|
||||
type Region = Self;
|
||||
type Type = Self;
|
||||
type DynExistential = Self;
|
||||
type Const = Self;
|
||||
|
||||
fn tcx(&self) -> TyCtxt<'tcx> {
|
||||
self.tcx
|
||||
}
|
||||
|
||||
fn print_region(self, _region: ty::Region<'_>) -> Result<Self::Region, Self::Error> {
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
|
||||
match *ty.kind() {
|
||||
// Types without identity.
|
||||
ty::Bool
|
||||
| ty::Char
|
||||
| ty::Int(_)
|
||||
| ty::Uint(_)
|
||||
| ty::Float(_)
|
||||
| ty::Str
|
||||
| ty::Array(_, _)
|
||||
| ty::Slice(_)
|
||||
| ty::RawPtr(_)
|
||||
| ty::Ref(_, _, _)
|
||||
| ty::FnPtr(_)
|
||||
| ty::Never
|
||||
| ty::Tuple(_)
|
||||
| ty::Dynamic(_, _) => self.pretty_print_type(ty),
|
||||
|
||||
// Placeholders (all printed as `_` to uniformize them).
|
||||
ty::Param(_) | ty::Bound(..) | ty::Placeholder(_) | ty::Infer(_) | ty::Error(_) => {
|
||||
write!(self, "_")?;
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
// Types with identity (print the module path).
|
||||
ty::Adt(&ty::AdtDef { did: def_id, .. }, substs)
|
||||
| ty::FnDef(def_id, substs)
|
||||
| ty::Opaque(def_id, substs)
|
||||
| ty::Projection(ty::ProjectionTy { item_def_id: def_id, substs })
|
||||
| ty::Closure(def_id, substs)
|
||||
| ty::Generator(def_id, substs, _) => self.print_def_path(def_id, substs),
|
||||
ty::Foreign(def_id) => self.print_def_path(def_id, &[]),
|
||||
|
||||
ty::GeneratorWitness(_) => bug!("type_name: unexpected `GeneratorWitness`"),
|
||||
}
|
||||
}
|
||||
|
||||
fn print_const(self, ct: &'tcx ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
|
||||
self.pretty_print_const(ct, false)
|
||||
}
|
||||
|
||||
fn print_dyn_existential(
|
||||
mut self,
|
||||
predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
|
||||
) -> Result<Self::DynExistential, Self::Error> {
|
||||
let mut first = true;
|
||||
for p in predicates {
|
||||
if !first {
|
||||
write!(self, "+")?;
|
||||
}
|
||||
first = false;
|
||||
self = p.print(self)?;
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
|
||||
self.path.push_str(&self.tcx.crate_name(cnum).as_str());
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn path_qualified(
|
||||
self,
|
||||
self_ty: Ty<'tcx>,
|
||||
trait_ref: Option<ty::TraitRef<'tcx>>,
|
||||
) -> Result<Self::Path, Self::Error> {
|
||||
self.pretty_path_qualified(self_ty, trait_ref)
|
||||
}
|
||||
|
||||
fn path_append_impl(
|
||||
self,
|
||||
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
|
||||
_disambiguated_data: &DisambiguatedDefPathData,
|
||||
self_ty: Ty<'tcx>,
|
||||
trait_ref: Option<ty::TraitRef<'tcx>>,
|
||||
) -> Result<Self::Path, Self::Error> {
|
||||
self.pretty_path_append_impl(
|
||||
|mut cx| {
|
||||
cx = print_prefix(cx)?;
|
||||
|
||||
cx.path.push_str("::");
|
||||
|
||||
Ok(cx)
|
||||
},
|
||||
self_ty,
|
||||
trait_ref,
|
||||
)
|
||||
}
|
||||
|
||||
fn path_append(
|
||||
mut self,
|
||||
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
|
||||
disambiguated_data: &DisambiguatedDefPathData,
|
||||
) -> Result<Self::Path, Self::Error> {
|
||||
self = print_prefix(self)?;
|
||||
|
||||
write!(self.path, "::{}", disambiguated_data.data).unwrap();
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn path_generic_args(
|
||||
mut self,
|
||||
print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
|
||||
args: &[GenericArg<'tcx>],
|
||||
) -> Result<Self::Path, Self::Error> {
|
||||
self = print_prefix(self)?;
|
||||
let args = args.iter().cloned().filter(|arg| match arg.unpack() {
|
||||
GenericArgKind::Lifetime(_) => false,
|
||||
_ => true,
|
||||
});
|
||||
if args.clone().next().is_some() {
|
||||
self.generic_delimiters(|cx| cx.comma_sep(args))
|
||||
} else {
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PrettyPrinter<'tcx> for AbsolutePathPrinter<'tcx> {
|
||||
fn region_should_not_be_omitted(&self, _region: ty::Region<'_>) -> bool {
|
||||
false
|
||||
}
|
||||
fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
|
||||
where
|
||||
T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
|
||||
{
|
||||
if let Some(first) = elems.next() {
|
||||
self = first.print(self)?;
|
||||
for elem in elems {
|
||||
self.path.push_str(", ");
|
||||
self = elem.print(self)?;
|
||||
}
|
||||
}
|
||||
Ok(self)
|
||||
}
|
||||
|
||||
fn generic_delimiters(
|
||||
mut self,
|
||||
f: impl FnOnce(Self) -> Result<Self, Self::Error>,
|
||||
) -> Result<Self, Self::Error> {
|
||||
write!(self, "<")?;
|
||||
|
||||
self = f(self)?;
|
||||
|
||||
write!(self, ">")?;
|
||||
|
||||
Ok(self)
|
||||
}
|
||||
}
|
||||
|
||||
impl Write for AbsolutePathPrinter<'_> {
|
||||
fn write_str(&mut self, s: &str) -> std::fmt::Result {
|
||||
self.path.push_str(s);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Directly returns an `Allocation` containing an absolute path representation of the given type.
|
||||
crate fn alloc_type_name<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> &'tcx Allocation {
|
||||
let path = AbsolutePathPrinter { tcx, path: String::new() }.print_type(ty).unwrap().path;
|
||||
let alloc = Allocation::from_bytes_byte_aligned_immutable(path.into_bytes());
|
||||
tcx.intern_const_alloc(alloc)
|
||||
}
|
479
compiler/rustc_const_eval/src/interpret/machine.rs
Normal file
479
compiler/rustc_const_eval/src/interpret/machine.rs
Normal file
|
@ -0,0 +1,479 @@
|
|||
//! This module contains everything needed to instantiate an interpreter.
|
||||
//! This separation exists to ensure that no fancy miri features like
|
||||
//! interpreting common C functions leak into CTFE.
|
||||
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
use rustc_span::def_id::DefId;
|
||||
use rustc_target::abi::Size;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::{
|
||||
AllocId, AllocRange, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace,
|
||||
Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
|
||||
};
|
||||
|
||||
/// Data returned by Machine::stack_pop,
|
||||
/// to provide further control over the popping of the stack frame
|
||||
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
|
||||
pub enum StackPopJump {
|
||||
/// Indicates that no special handling should be
|
||||
/// done - we'll either return normally or unwind
|
||||
/// based on the terminator for the function
|
||||
/// we're leaving.
|
||||
Normal,
|
||||
|
||||
/// Indicates that we should *not* jump to the return/unwind address, as the callback already
|
||||
/// took care of everything.
|
||||
NoJump,
|
||||
}
|
||||
|
||||
/// Whether this kind of memory is allowed to leak
|
||||
pub trait MayLeak: Copy {
|
||||
fn may_leak(self) -> bool;
|
||||
}
|
||||
|
||||
/// The functionality needed by memory to manage its allocations
|
||||
pub trait AllocMap<K: Hash + Eq, V> {
|
||||
/// Tests if the map contains the given key.
|
||||
/// Deliberately takes `&mut` because that is sufficient, and some implementations
|
||||
/// can be more efficient then (using `RefCell::get_mut`).
|
||||
fn contains_key<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> bool
|
||||
where
|
||||
K: Borrow<Q>;
|
||||
|
||||
/// Inserts a new entry into the map.
|
||||
fn insert(&mut self, k: K, v: V) -> Option<V>;
|
||||
|
||||
/// Removes an entry from the map.
|
||||
fn remove<Q: ?Sized + Hash + Eq>(&mut self, k: &Q) -> Option<V>
|
||||
where
|
||||
K: Borrow<Q>;
|
||||
|
||||
/// Returns data based on the keys and values in the map.
|
||||
fn filter_map_collect<T>(&self, f: impl FnMut(&K, &V) -> Option<T>) -> Vec<T>;
|
||||
|
||||
/// Returns a reference to entry `k`. If no such entry exists, call
|
||||
/// `vacant` and either forward its error, or add its result to the map
|
||||
/// and return a reference to *that*.
|
||||
fn get_or<E>(&self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&V, E>;
|
||||
|
||||
/// Returns a mutable reference to entry `k`. If no such entry exists, call
|
||||
/// `vacant` and either forward its error, or add its result to the map
|
||||
/// and return a reference to *that*.
|
||||
fn get_mut_or<E>(&mut self, k: K, vacant: impl FnOnce() -> Result<V, E>) -> Result<&mut V, E>;
|
||||
|
||||
/// Read-only lookup.
|
||||
fn get(&self, k: K) -> Option<&V> {
|
||||
self.get_or(k, || Err(())).ok()
|
||||
}
|
||||
|
||||
/// Mutable lookup.
|
||||
fn get_mut(&mut self, k: K) -> Option<&mut V> {
|
||||
self.get_mut_or(k, || Err(())).ok()
|
||||
}
|
||||
}
|
||||
|
||||
/// Methods of this trait signifies a point where CTFE evaluation would fail
|
||||
/// and some use case dependent behaviour can instead be applied.
|
||||
pub trait Machine<'mir, 'tcx>: Sized {
|
||||
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
|
||||
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
|
||||
|
||||
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
|
||||
type PointerTag: Provenance + Eq + Hash + 'static;
|
||||
|
||||
/// Machines can define extra (non-instance) things that represent values of function pointers.
|
||||
/// For example, Miri uses this to return a function pointer from `dlsym`
|
||||
/// that can later be called to execute the right thing.
|
||||
type ExtraFnVal: Debug + Copy;
|
||||
|
||||
/// Extra data stored in every call frame.
|
||||
type FrameExtra;
|
||||
|
||||
/// Extra data stored in memory. A reference to this is available when `AllocExtra`
|
||||
/// gets initialized, so you can e.g., have an `Rc` here if there is global state you
|
||||
/// need access to in the `AllocExtra` hooks.
|
||||
type MemoryExtra;
|
||||
|
||||
/// Extra data stored in every allocation.
|
||||
type AllocExtra: Debug + Clone + 'static;
|
||||
|
||||
/// Memory's allocation map
|
||||
type MemoryMap: AllocMap<
|
||||
AllocId,
|
||||
(MemoryKind<Self::MemoryKind>, Allocation<Self::PointerTag, Self::AllocExtra>),
|
||||
> + Default
|
||||
+ Clone;
|
||||
|
||||
/// The memory kind to use for copied global memory (held in `tcx`) --
|
||||
/// or None if such memory should not be mutated and thus any such attempt will cause
|
||||
/// a `ModifiedStatic` error to be raised.
|
||||
/// Statics are copied under two circumstances: When they are mutated, and when
|
||||
/// `tag_allocation` (see below) returns an owned allocation
|
||||
/// that is added to the memory so that the work is not done twice.
|
||||
const GLOBAL_KIND: Option<Self::MemoryKind>;
|
||||
|
||||
/// Should the machine panic on allocation failures?
|
||||
const PANIC_ON_ALLOC_FAIL: bool;
|
||||
|
||||
/// Whether memory accesses should be alignment-checked.
|
||||
fn enforce_alignment(memory_extra: &Self::MemoryExtra) -> bool;
|
||||
|
||||
/// Whether, when checking alignment, we should `force_int` and thus support
|
||||
/// custom alignment logic based on whatever the integer address happens to be.
|
||||
fn force_int_for_alignment_check(memory_extra: &Self::MemoryExtra) -> bool;
|
||||
|
||||
/// Whether to enforce the validity invariant
|
||||
fn enforce_validity(ecx: &InterpCx<'mir, 'tcx, Self>) -> bool;
|
||||
|
||||
/// Whether function calls should be [ABI](Abi)-checked.
|
||||
fn enforce_abi(_ecx: &InterpCx<'mir, 'tcx, Self>) -> bool {
|
||||
true
|
||||
}
|
||||
|
||||
/// Entry point for obtaining the MIR of anything that should get evaluated.
|
||||
/// So not just functions and shims, but also const/static initializers, anonymous
|
||||
/// constants, ...
|
||||
fn load_mir(
|
||||
ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::InstanceDef<'tcx>,
|
||||
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
|
||||
Ok(ecx.tcx.instance_mir(instance))
|
||||
}
|
||||
|
||||
/// Entry point to all function calls.
|
||||
///
|
||||
/// Returns either the mir to use for the call, or `None` if execution should
|
||||
/// just proceed (which usually means this hook did all the work that the
|
||||
/// called function should usually have done). In the latter case, it is
|
||||
/// this hook's responsibility to advance the instruction pointer!
|
||||
/// (This is to support functions like `__rust_maybe_catch_panic` that neither find a MIR
|
||||
/// nor just jump to `ret`, but instead push their own stack frame.)
|
||||
/// Passing `dest`and `ret` in the same `Option` proved very annoying when only one of them
|
||||
/// was used.
|
||||
fn find_mir_or_eval_fn(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
abi: Abi,
|
||||
args: &[OpTy<'tcx, Self::PointerTag>],
|
||||
ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
|
||||
unwind: StackPopUnwind,
|
||||
) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>>;
|
||||
|
||||
/// Execute `fn_val`. It is the hook's responsibility to advance the instruction
|
||||
/// pointer as appropriate.
|
||||
fn call_extra_fn(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
fn_val: Self::ExtraFnVal,
|
||||
abi: Abi,
|
||||
args: &[OpTy<'tcx, Self::PointerTag>],
|
||||
ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
|
||||
unwind: StackPopUnwind,
|
||||
) -> InterpResult<'tcx>;
|
||||
|
||||
/// Directly process an intrinsic without pushing a stack frame. It is the hook's
|
||||
/// responsibility to advance the instruction pointer as appropriate.
|
||||
fn call_intrinsic(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
args: &[OpTy<'tcx, Self::PointerTag>],
|
||||
ret: Option<(&PlaceTy<'tcx, Self::PointerTag>, mir::BasicBlock)>,
|
||||
unwind: StackPopUnwind,
|
||||
) -> InterpResult<'tcx>;
|
||||
|
||||
/// Called to evaluate `Assert` MIR terminators that trigger a panic.
|
||||
fn assert_panic(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
msg: &mir::AssertMessage<'tcx>,
|
||||
unwind: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx>;
|
||||
|
||||
/// Called to evaluate `Abort` MIR terminator.
|
||||
fn abort(_ecx: &mut InterpCx<'mir, 'tcx, Self>, _msg: String) -> InterpResult<'tcx, !> {
|
||||
throw_unsup_format!("aborting execution is not supported")
|
||||
}
|
||||
|
||||
/// Called for all binary operations where the LHS has pointer type.
|
||||
///
|
||||
/// Returns a (value, overflowed) pair if the operation succeeded
|
||||
fn binary_ptr_op(
|
||||
ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
bin_op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, Self::PointerTag>,
|
||||
right: &ImmTy<'tcx, Self::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<Self::PointerTag>, bool, Ty<'tcx>)>;
|
||||
|
||||
/// Heap allocations via the `box` keyword.
|
||||
fn box_alloc(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
dest: &PlaceTy<'tcx, Self::PointerTag>,
|
||||
) -> InterpResult<'tcx>;
|
||||
|
||||
/// Called to read the specified `local` from the `frame`.
|
||||
/// Since reading a ZST is not actually accessing memory or locals, this is never invoked
|
||||
/// for ZST reads.
|
||||
#[inline]
|
||||
fn access_local(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
frame: &Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
|
||||
local: mir::Local,
|
||||
) -> InterpResult<'tcx, Operand<Self::PointerTag>> {
|
||||
frame.locals[local].access()
|
||||
}
|
||||
|
||||
/// Called to write the specified `local` from the `frame`.
|
||||
/// Since writing a ZST is not actually accessing memory or locals, this is never invoked
|
||||
/// for ZST reads.
|
||||
#[inline]
|
||||
fn access_local_mut<'a>(
|
||||
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
|
||||
frame: usize,
|
||||
local: mir::Local,
|
||||
) -> InterpResult<'tcx, Result<&'a mut LocalValue<Self::PointerTag>, MemPlace<Self::PointerTag>>>
|
||||
where
|
||||
'tcx: 'mir,
|
||||
{
|
||||
ecx.stack_mut()[frame].locals[local].access_mut()
|
||||
}
|
||||
|
||||
/// Called before a basic block terminator is executed.
|
||||
/// You can use this to detect endlessly running programs.
|
||||
#[inline]
|
||||
fn before_terminator(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called before a global allocation is accessed.
|
||||
/// `def_id` is `Some` if this is the "lazy" allocation of a static.
|
||||
#[inline]
|
||||
fn before_access_global(
|
||||
_memory_extra: &Self::MemoryExtra,
|
||||
_alloc_id: AllocId,
|
||||
_allocation: &Allocation,
|
||||
_static_def_id: Option<DefId>,
|
||||
_is_write: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Return the `AllocId` for the given thread-local static in the current thread.
|
||||
fn thread_local_static_base_pointer(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
|
||||
throw_unsup!(ThreadLocalStatic(def_id))
|
||||
}
|
||||
|
||||
/// Return the root pointer for the given `extern static`.
|
||||
fn extern_static_base_pointer(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, Pointer<Self::PointerTag>>;
|
||||
|
||||
/// Return a "base" pointer for the given allocation: the one that is used for direct
|
||||
/// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
|
||||
///
|
||||
/// Not called on `extern` or thread-local statics (those use the methods above).
|
||||
fn tag_alloc_base_pointer(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ptr: Pointer,
|
||||
) -> Pointer<Self::PointerTag>;
|
||||
|
||||
/// "Int-to-pointer cast"
|
||||
fn ptr_from_addr(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
addr: u64,
|
||||
) -> Pointer<Option<Self::PointerTag>>;
|
||||
|
||||
/// Convert a pointer with provenance into an allocation-offset pair.
|
||||
fn ptr_get_alloc(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ptr: Pointer<Self::PointerTag>,
|
||||
) -> (AllocId, Size);
|
||||
|
||||
/// Called to initialize the "extra" state of an allocation and make the pointers
|
||||
/// it contains (in relocations) tagged. The way we construct allocations is
|
||||
/// to always first construct it without extra and then add the extra.
|
||||
/// This keeps uniform code paths for handling both allocations created by CTFE
|
||||
/// for globals, and allocations created by Miri during evaluation.
|
||||
///
|
||||
/// `kind` is the kind of the allocation being tagged; it can be `None` when
|
||||
/// it's a global and `GLOBAL_KIND` is `None`.
|
||||
///
|
||||
/// This should avoid copying if no work has to be done! If this returns an owned
|
||||
/// allocation (because a copy had to be done to add tags or metadata), machine memory will
|
||||
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
|
||||
/// owned allocation to the map even when the map is shared.)
|
||||
fn init_allocation_extra<'b>(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
id: AllocId,
|
||||
alloc: Cow<'b, Allocation>,
|
||||
kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>;
|
||||
|
||||
/// Hook for performing extra checks on a memory read access.
|
||||
///
|
||||
/// Takes read-only access to the allocation so we can keep all the memory read
|
||||
/// operations take `&self`. Use a `RefCell` in `AllocExtra` if you
|
||||
/// need to mutate.
|
||||
#[inline(always)]
|
||||
fn memory_read(
|
||||
_memory_extra: &Self::MemoryExtra,
|
||||
_alloc_extra: &Self::AllocExtra,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Hook for performing extra checks on a memory write access.
|
||||
#[inline(always)]
|
||||
fn memory_written(
|
||||
_memory_extra: &mut Self::MemoryExtra,
|
||||
_alloc_extra: &mut Self::AllocExtra,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Hook for performing extra operations on a memory deallocation.
|
||||
#[inline(always)]
|
||||
fn memory_deallocated(
|
||||
_memory_extra: &mut Self::MemoryExtra,
|
||||
_alloc_extra: &mut Self::AllocExtra,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Executes a retagging operation.
|
||||
#[inline]
|
||||
fn retag(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
_kind: mir::RetagKind,
|
||||
_place: &PlaceTy<'tcx, Self::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called immediately before a new stack frame gets pushed.
|
||||
fn init_frame_extra(
|
||||
ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
frame: Frame<'mir, 'tcx, Self::PointerTag>,
|
||||
) -> InterpResult<'tcx, Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
|
||||
|
||||
/// Borrow the current thread's stack.
|
||||
fn stack(
|
||||
ecx: &'a InterpCx<'mir, 'tcx, Self>,
|
||||
) -> &'a [Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>];
|
||||
|
||||
/// Mutably borrow the current thread's stack.
|
||||
fn stack_mut(
|
||||
ecx: &'a mut InterpCx<'mir, 'tcx, Self>,
|
||||
) -> &'a mut Vec<Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>>;
|
||||
|
||||
/// Called immediately after a stack frame got pushed and its locals got initialized.
|
||||
fn after_stack_push(_ecx: &mut InterpCx<'mir, 'tcx, Self>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
|
||||
fn after_stack_pop(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
_frame: Frame<'mir, 'tcx, Self::PointerTag, Self::FrameExtra>,
|
||||
_unwinding: bool,
|
||||
) -> InterpResult<'tcx, StackPopJump> {
|
||||
// By default, we do not support unwinding from panics
|
||||
Ok(StackPopJump::Normal)
|
||||
}
|
||||
}
|
||||
|
||||
// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
|
||||
// (CTFE and ConstProp) use the same instance. Here, we share that code.
|
||||
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
|
||||
type PointerTag = AllocId;
|
||||
type ExtraFnVal = !;
|
||||
|
||||
type MemoryMap =
|
||||
rustc_data_structures::fx::FxHashMap<AllocId, (MemoryKind<Self::MemoryKind>, Allocation)>;
|
||||
const GLOBAL_KIND: Option<Self::MemoryKind> = None; // no copying of globals from `tcx` to machine memory
|
||||
|
||||
type AllocExtra = ();
|
||||
type FrameExtra = ();
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_alignment(_memory_extra: &Self::MemoryExtra) -> bool {
|
||||
// We do not check for alignment to avoid having to carry an `Align`
|
||||
// in `ConstValue::ByRef`.
|
||||
false
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn force_int_for_alignment_check(_memory_extra: &Self::MemoryExtra) -> bool {
|
||||
// We do not support `force_int`.
|
||||
false
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn enforce_validity(_ecx: &InterpCx<$mir, $tcx, Self>) -> bool {
|
||||
false // for now, we don't enforce validity
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn call_extra_fn(
|
||||
_ecx: &mut InterpCx<$mir, $tcx, Self>,
|
||||
fn_val: !,
|
||||
_abi: Abi,
|
||||
_args: &[OpTy<$tcx>],
|
||||
_ret: Option<(&PlaceTy<$tcx>, mir::BasicBlock)>,
|
||||
_unwind: StackPopUnwind,
|
||||
) -> InterpResult<$tcx> {
|
||||
match fn_val {}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn init_allocation_extra<'b>(
|
||||
_mem: &Memory<$mir, $tcx, Self>,
|
||||
_id: AllocId,
|
||||
alloc: Cow<'b, Allocation>,
|
||||
_kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> Cow<'b, Allocation<Self::PointerTag>> {
|
||||
// We do not use a tag so we can just cheaply forward the allocation
|
||||
alloc
|
||||
}
|
||||
|
||||
fn extern_static_base_pointer(
|
||||
mem: &Memory<$mir, $tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<$tcx, Pointer> {
|
||||
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
|
||||
Ok(Pointer::new(mem.tcx.create_static_alloc(def_id), Size::ZERO))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn tag_alloc_base_pointer(
|
||||
_mem: &Memory<$mir, $tcx, Self>,
|
||||
ptr: Pointer<AllocId>,
|
||||
) -> Pointer<AllocId> {
|
||||
ptr
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
|
||||
Pointer::new(None, Size::from_bytes(addr))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_get_alloc(_mem: &Memory<$mir, $tcx, Self>, ptr: Pointer<AllocId>) -> (AllocId, Size) {
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (alloc_id, offset) = ptr.into_parts();
|
||||
(alloc_id, offset)
|
||||
}
|
||||
}
|
1186
compiler/rustc_const_eval/src/interpret/memory.rs
Normal file
1186
compiler/rustc_const_eval/src/interpret/memory.rs
Normal file
File diff suppressed because it is too large
Load diff
33
compiler/rustc_const_eval/src/interpret/mod.rs
Normal file
33
compiler/rustc_const_eval/src/interpret/mod.rs
Normal file
|
@ -0,0 +1,33 @@
|
|||
//! An interpreter for MIR used in CTFE and by miri
|
||||
|
||||
mod cast;
|
||||
mod eval_context;
|
||||
mod intern;
|
||||
mod intrinsics;
|
||||
mod machine;
|
||||
mod memory;
|
||||
mod operand;
|
||||
mod operator;
|
||||
mod place;
|
||||
mod step;
|
||||
mod terminator;
|
||||
mod traits;
|
||||
mod util;
|
||||
mod validity;
|
||||
mod visitor;
|
||||
|
||||
pub use rustc_middle::mir::interpret::*; // have all the `interpret` symbols in one place: here
|
||||
|
||||
pub use self::eval_context::{
|
||||
Frame, FrameInfo, InterpCx, LocalState, LocalValue, StackPopCleanup, StackPopUnwind,
|
||||
};
|
||||
pub use self::intern::{intern_const_alloc_recursive, InternKind};
|
||||
pub use self::machine::{compile_time_machine, AllocMap, Machine, MayLeak, StackPopJump};
|
||||
pub use self::memory::{AllocCheck, AllocRef, AllocRefMut, FnVal, Memory, MemoryKind};
|
||||
pub use self::operand::{ImmTy, Immediate, OpTy, Operand};
|
||||
pub use self::place::{MPlaceTy, MemPlace, MemPlaceMeta, Place, PlaceTy};
|
||||
pub use self::validity::{CtfeValidationMode, RefTracking};
|
||||
pub use self::visitor::{MutValueVisitor, ValueVisitor};
|
||||
|
||||
crate use self::intrinsics::eval_nullary_intrinsic;
|
||||
use eval_context::{from_known_layout, mir_assign_valid_types};
|
762
compiler/rustc_const_eval/src/interpret/operand.rs
Normal file
762
compiler/rustc_const_eval/src/interpret/operand.rs
Normal file
|
@ -0,0 +1,762 @@
|
|||
//! Functions concerning immediate values and operands, and reading from operands.
|
||||
//! All high-level functions to read from memory work on operands as sources.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Write;
|
||||
|
||||
use rustc_errors::ErrorReported;
|
||||
use rustc_hir::def::Namespace;
|
||||
use rustc_macros::HashStable;
|
||||
use rustc_middle::ty::layout::{LayoutOf, PrimitiveExt, TyAndLayout};
|
||||
use rustc_middle::ty::print::{FmtPrinter, PrettyPrinter, Printer};
|
||||
use rustc_middle::ty::{ConstInt, Ty};
|
||||
use rustc_middle::{mir, ty};
|
||||
use rustc_target::abi::{Abi, HasDataLayout, Size, TagEncoding};
|
||||
use rustc_target::abi::{VariantIdx, Variants};
|
||||
|
||||
use super::{
|
||||
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
|
||||
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
|
||||
Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
||||
/// An `Immediate` represents a single immediate self-contained Rust value.
|
||||
///
|
||||
/// For optimization of a few very common cases, there is also a representation for a pair of
|
||||
/// primitive values (`ScalarPair`). It allows Miri to avoid making allocations for checked binary
|
||||
/// operations and wide pointers. This idea was taken from rustc's codegen.
|
||||
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
|
||||
/// defined on `Immediate`, and do not have to work with a `Place`.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
|
||||
pub enum Immediate<Tag: Provenance = AllocId> {
|
||||
Scalar(ScalarMaybeUninit<Tag>),
|
||||
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(Immediate, 56);
|
||||
|
||||
impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: ScalarMaybeUninit<Tag>) -> Self {
|
||||
Immediate::Scalar(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: Scalar<Tag>) -> Self {
|
||||
Immediate::Scalar(val.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> Immediate<Tag> {
|
||||
pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
|
||||
}
|
||||
|
||||
pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
|
||||
}
|
||||
|
||||
pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
|
||||
}
|
||||
|
||||
pub fn new_dyn_trait(
|
||||
val: Scalar<Tag>,
|
||||
vtable: Pointer<Option<Tag>>,
|
||||
cx: &impl HasDataLayout,
|
||||
) -> Self {
|
||||
Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_maybe_pointer(vtable, cx))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar_or_uninit(self) -> ScalarMaybeUninit<Tag> {
|
||||
match self {
|
||||
Immediate::Scalar(val) => val,
|
||||
Immediate::ScalarPair(..) => bug!("Got a scalar pair where a scalar was expected"),
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar(self) -> InterpResult<'tcx, Scalar<Tag>> {
|
||||
self.to_scalar_or_uninit().check_init()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_scalar_pair(self) -> InterpResult<'tcx, (Scalar<Tag>, Scalar<Tag>)> {
|
||||
match self {
|
||||
Immediate::ScalarPair(val1, val2) => Ok((val1.check_init()?, val2.check_init()?)),
|
||||
Immediate::Scalar(..) => {
|
||||
bug!("Got a scalar where a scalar pair was expected")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
|
||||
// as input for binary and cast operations.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
|
||||
imm: Immediate<Tag>,
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
|
||||
|
||||
impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
/// Helper function for printing a scalar to a FmtPrinter
|
||||
fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
|
||||
cx: FmtPrinter<'a, 'tcx, F>,
|
||||
s: ScalarMaybeUninit<Tag>,
|
||||
ty: Ty<'tcx>,
|
||||
) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
|
||||
match s {
|
||||
ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
|
||||
cx.pretty_print_const_scalar_int(int, ty, true)
|
||||
}
|
||||
ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
|
||||
// Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
|
||||
// print what is points to, which would fail since it has no access to the local
|
||||
// memory.
|
||||
cx.pretty_print_const_pointer(ptr, ty, true)
|
||||
}
|
||||
ScalarMaybeUninit::Uninit => cx.typed_value(
|
||||
|mut this| {
|
||||
this.write_str("uninit ")?;
|
||||
Ok(this)
|
||||
},
|
||||
|this| this.print_type(ty),
|
||||
" ",
|
||||
),
|
||||
}
|
||||
}
|
||||
ty::tls::with(|tcx| {
|
||||
match self.imm {
|
||||
Immediate::Scalar(s) => {
|
||||
if let Some(ty) = tcx.lift(self.layout.ty) {
|
||||
let cx = FmtPrinter::new(tcx, f, Namespace::ValueNS);
|
||||
p(cx, s, ty)?;
|
||||
return Ok(());
|
||||
}
|
||||
write!(f, "{}: {}", s, self.layout.ty)
|
||||
}
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
// FIXME(oli-obk): at least print tuples and slices nicely
|
||||
write!(f, "({}, {}): {}", a, b, self.layout.ty,)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
|
||||
type Target = Immediate<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Immediate<Tag> {
|
||||
&self.imm
|
||||
}
|
||||
}
|
||||
|
||||
/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
|
||||
/// or still in memory. The latter is an optimization, to delay reading that chunk of
|
||||
/// memory and to avoid having to store arbitrary-sized data here.
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
|
||||
pub enum Operand<Tag: Provenance = AllocId> {
|
||||
Immediate(Immediate<Tag>),
|
||||
Indirect(MemPlace<Tag>),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
|
||||
op: Operand<Tag>, // Keep this private; it helps enforce invariants.
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(OpTy<'_>, 80);
|
||||
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
|
||||
type Target = Operand<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Operand<Tag> {
|
||||
&self.op
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: ImmTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
|
||||
#[inline]
|
||||
pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
ImmTy { imm: val.into(), layout }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_immediate(imm: Immediate<Tag>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
ImmTy { imm, layout }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
|
||||
Some(Self::from_scalar(Scalar::try_from_uint(i, layout.size)?, layout))
|
||||
}
|
||||
#[inline]
|
||||
pub fn from_uint(i: impl Into<u128>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
Self::from_scalar(Scalar::from_uint(i, layout.size), layout)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn try_from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Option<Self> {
|
||||
Some(Self::from_scalar(Scalar::try_from_int(i, layout.size)?, layout))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn from_int(i: impl Into<i128>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
Self::from_scalar(Scalar::from_int(i, layout.size), layout)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_const_int(self) -> ConstInt {
|
||||
assert!(self.layout.ty.is_integral());
|
||||
let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
|
||||
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
|
||||
/// Returns `None` if the layout does not permit loading this as a value.
|
||||
fn try_read_immediate_from_mplace(
|
||||
&self,
|
||||
mplace: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::PointerTag>>> {
|
||||
if mplace.layout.is_unsized() {
|
||||
// Don't touch unsized
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let alloc = match self.get_alloc(mplace)? {
|
||||
Some(ptr) => ptr,
|
||||
None => {
|
||||
return Ok(Some(ImmTy {
|
||||
// zero-sized type
|
||||
imm: Scalar::ZST.into(),
|
||||
layout: mplace.layout,
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
match mplace.layout.abi {
|
||||
Abi::Scalar(..) => {
|
||||
let scalar = alloc.read_scalar(alloc_range(Size::ZERO, mplace.layout.size))?;
|
||||
Ok(Some(ImmTy { imm: scalar.into(), layout: mplace.layout }))
|
||||
}
|
||||
Abi::ScalarPair(ref a, ref b) => {
|
||||
// We checked `ptr_align` above, so all fields will have the alignment they need.
|
||||
// We would anyway check against `ptr_align.restrict_for_offset(b_offset)`,
|
||||
// which `ptr.offset(b_offset)` cannot possibly fail to satisfy.
|
||||
let (a, b) = (&a.value, &b.value);
|
||||
let (a_size, b_size) = (a.size(self), b.size(self));
|
||||
let b_offset = a_size.align_to(b.align(self).abi);
|
||||
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
|
||||
let a_val = alloc.read_scalar(alloc_range(Size::ZERO, a_size))?;
|
||||
let b_val = alloc.read_scalar(alloc_range(b_offset, b_size))?;
|
||||
Ok(Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }))
|
||||
}
|
||||
_ => Ok(None),
|
||||
}
|
||||
}
|
||||
|
||||
/// Try returning an immediate for the operand.
|
||||
/// If the layout does not permit loading this as an immediate, return where in memory
|
||||
/// we can find the data.
|
||||
/// Note that for a given layout, this operation will either always fail or always
|
||||
/// succeed! Whether it succeeds depends on whether the layout can be represented
|
||||
/// in an `Immediate`, not on which data is stored there currently.
|
||||
pub fn try_read_immediate(
|
||||
&self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
|
||||
Ok(match src.try_as_mplace() {
|
||||
Ok(ref mplace) => {
|
||||
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
|
||||
Ok(val)
|
||||
} else {
|
||||
Err(*mplace)
|
||||
}
|
||||
}
|
||||
Err(val) => Ok(val),
|
||||
})
|
||||
}
|
||||
|
||||
/// Read an immediate from a place, asserting that that is possible with the given layout.
|
||||
#[inline(always)]
|
||||
pub fn read_immediate(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(imm) = self.try_read_immediate(op)? {
|
||||
Ok(imm)
|
||||
} else {
|
||||
span_bug!(self.cur_span(), "primitive read failed for type: {:?}", op.layout.ty);
|
||||
}
|
||||
}
|
||||
|
||||
/// Read a scalar from a place
|
||||
pub fn read_scalar(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
|
||||
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
|
||||
}
|
||||
|
||||
/// Read a pointer from a place.
|
||||
pub fn read_pointer(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
|
||||
}
|
||||
|
||||
// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
|
||||
let len = mplace.len(self)?;
|
||||
let bytes = self.memory.read_bytes(mplace.ptr, Size::from_bytes(len))?;
|
||||
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
|
||||
Ok(str)
|
||||
}
|
||||
|
||||
/// Projection functions
|
||||
pub fn operand_field(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let base = match op.try_as_mplace() {
|
||||
Ok(ref mplace) => {
|
||||
// We can reuse the mplace field computation logic for indirect operands.
|
||||
let field = self.mplace_field(mplace, field)?;
|
||||
return Ok(field.into());
|
||||
}
|
||||
Err(value) => value,
|
||||
};
|
||||
|
||||
let field_layout = op.layout.field(self, field);
|
||||
if field_layout.is_zst() {
|
||||
let immediate = Scalar::ZST.into();
|
||||
return Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout });
|
||||
}
|
||||
let offset = op.layout.fields.offset(field);
|
||||
let immediate = match *base {
|
||||
// the field covers the entire type
|
||||
_ if offset.bytes() == 0 && field_layout.size == op.layout.size => *base,
|
||||
// extract fields from types with `ScalarPair` ABI
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
let val = if offset.bytes() == 0 { a } else { b };
|
||||
Immediate::from(val)
|
||||
}
|
||||
Immediate::Scalar(val) => span_bug!(
|
||||
self.cur_span(),
|
||||
"field access on non aggregate {:#?}, {:#?}",
|
||||
val,
|
||||
op.layout
|
||||
),
|
||||
};
|
||||
Ok(OpTy { op: Operand::Immediate(immediate), layout: field_layout })
|
||||
}
|
||||
|
||||
pub fn operand_index(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
index: u64,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
if let Ok(index) = usize::try_from(index) {
|
||||
// We can just treat this as a field.
|
||||
self.operand_field(op, index)
|
||||
} else {
|
||||
// Indexing into a big array. This must be an mplace.
|
||||
let mplace = op.assert_mem_place();
|
||||
Ok(self.mplace_index(&mplace, index)?.into())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn operand_downcast(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
// Downcasts only change the layout
|
||||
Ok(match op.try_as_mplace() {
|
||||
Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
|
||||
Err(..) => {
|
||||
let layout = op.layout.for_variant(self, variant);
|
||||
OpTy { layout, ..*op }
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
pub fn operand_projection(
|
||||
&self,
|
||||
base: &OpTy<'tcx, M::PointerTag>,
|
||||
proj_elem: mir::PlaceElem<'tcx>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
use rustc_middle::mir::ProjectionElem::*;
|
||||
Ok(match proj_elem {
|
||||
Field(field, _) => self.operand_field(base, field.index())?,
|
||||
Downcast(_, variant) => self.operand_downcast(base, variant)?,
|
||||
Deref => self.deref_operand(base)?.into(),
|
||||
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
|
||||
// The rest should only occur as mplace, we do not use Immediates for types
|
||||
// allowing such operations. This matches place_projection forcing an allocation.
|
||||
let mplace = base.assert_mem_place();
|
||||
self.mplace_projection(&mplace, proj_elem)?.into()
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
/// Read from a local. Will not actually access the local if reading from a ZST.
|
||||
/// Will not access memory, instead an indirect `Operand` is returned.
|
||||
///
|
||||
/// This is public because it is used by [priroda](https://github.com/oli-obk/priroda) to get an
|
||||
/// OpTy from a local
|
||||
pub fn access_local(
|
||||
&self,
|
||||
frame: &super::Frame<'mir, 'tcx, M::PointerTag, M::FrameExtra>,
|
||||
local: mir::Local,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let layout = self.layout_of_local(frame, local, layout)?;
|
||||
let op = if layout.is_zst() {
|
||||
// Do not read from ZST, they might not be initialized
|
||||
Operand::Immediate(Scalar::ZST.into())
|
||||
} else {
|
||||
M::access_local(&self, frame, local)?
|
||||
};
|
||||
Ok(OpTy { op, layout })
|
||||
}
|
||||
|
||||
/// Every place can be read from, so we can turn them into an operand.
|
||||
/// This will definitely return `Indirect` if the place is a `Ptr`, i.e., this
|
||||
/// will never actually read from memory.
|
||||
#[inline(always)]
|
||||
pub fn place_to_op(
|
||||
&self,
|
||||
place: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let op = match **place {
|
||||
Place::Ptr(mplace) => Operand::Indirect(mplace),
|
||||
Place::Local { frame, local } => {
|
||||
*self.access_local(&self.stack()[frame], local, None)?
|
||||
}
|
||||
};
|
||||
Ok(OpTy { op, layout: place.layout })
|
||||
}
|
||||
|
||||
// Evaluate a place with the goal of reading from it. This lets us sometimes
|
||||
// avoid allocations.
|
||||
pub fn eval_place_to_op(
|
||||
&self,
|
||||
place: mir::Place<'tcx>,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
// Do not use the layout passed in as argument if the base we are looking at
|
||||
// here is not the entire place.
|
||||
let layout = if place.projection.is_empty() { layout } else { None };
|
||||
|
||||
let base_op = self.access_local(self.frame(), place.local, layout)?;
|
||||
|
||||
let op = place
|
||||
.projection
|
||||
.iter()
|
||||
.try_fold(base_op, |op, elem| self.operand_projection(&op, elem))?;
|
||||
|
||||
trace!("eval_place_to_op: got {:?}", *op);
|
||||
// Sanity-check the type we ended up with.
|
||||
debug_assert!(mir_assign_valid_types(
|
||||
*self.tcx,
|
||||
self.param_env,
|
||||
self.layout_of(self.subst_from_current_frame_and_normalize_erasing_regions(
|
||||
place.ty(&self.frame().body.local_decls, *self.tcx).ty
|
||||
))?,
|
||||
op.layout,
|
||||
));
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
/// Evaluate the operand, returning a place where you can then find the data.
|
||||
/// If you already know the layout, you can save two table lookups
|
||||
/// by passing it in here.
|
||||
#[inline]
|
||||
pub fn eval_operand(
|
||||
&self,
|
||||
mir_op: &mir::Operand<'tcx>,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
use rustc_middle::mir::Operand::*;
|
||||
let op = match *mir_op {
|
||||
// FIXME: do some more logic on `move` to invalidate the old location
|
||||
Copy(place) | Move(place) => self.eval_place_to_op(place, layout)?,
|
||||
|
||||
Constant(ref constant) => {
|
||||
let val =
|
||||
self.subst_from_current_frame_and_normalize_erasing_regions(constant.literal);
|
||||
// This can still fail:
|
||||
// * During ConstProp, with `TooGeneric` or since the `requried_consts` were not all
|
||||
// checked yet.
|
||||
// * During CTFE, since promoteds in `const`/`static` initializer bodies can fail.
|
||||
|
||||
self.mir_const_to_op(&val, layout)?
|
||||
}
|
||||
};
|
||||
trace!("{:?}: {:?}", mir_op, *op);
|
||||
Ok(op)
|
||||
}
|
||||
|
||||
/// Evaluate a bunch of operands at once
|
||||
pub(super) fn eval_operands(
|
||||
&self,
|
||||
ops: &[mir::Operand<'tcx>],
|
||||
) -> InterpResult<'tcx, Vec<OpTy<'tcx, M::PointerTag>>> {
|
||||
ops.iter().map(|op| self.eval_operand(op, None)).collect()
|
||||
}
|
||||
|
||||
// Used when the miri-engine runs into a constant and for extracting information from constants
|
||||
// in patterns via the `const_eval` module
|
||||
/// The `val` and `layout` are assumed to already be in our interpreter
|
||||
/// "universe" (param_env).
|
||||
pub fn const_to_op(
|
||||
&self,
|
||||
val: &ty::Const<'tcx>,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
match val.val {
|
||||
ty::ConstKind::Param(_) | ty::ConstKind::Bound(..) => throw_inval!(TooGeneric),
|
||||
ty::ConstKind::Error(_) => throw_inval!(AlreadyReported(ErrorReported)),
|
||||
ty::ConstKind::Unevaluated(uv) => {
|
||||
let instance = self.resolve(uv.def, uv.substs(*self.tcx))?;
|
||||
Ok(self.eval_to_allocation(GlobalId { instance, promoted: uv.promoted })?.into())
|
||||
}
|
||||
ty::ConstKind::Infer(..) | ty::ConstKind::Placeholder(..) => {
|
||||
span_bug!(self.cur_span(), "const_to_op: Unexpected ConstKind {:?}", val)
|
||||
}
|
||||
ty::ConstKind::Value(val_val) => self.const_val_to_op(val_val, val.ty, layout),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mir_const_to_op(
|
||||
&self,
|
||||
val: &mir::ConstantKind<'tcx>,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
match val {
|
||||
mir::ConstantKind::Ty(ct) => self.const_to_op(ct, layout),
|
||||
mir::ConstantKind::Val(val, ty) => self.const_val_to_op(*val, ty, layout),
|
||||
}
|
||||
}
|
||||
|
||||
crate fn const_val_to_op(
|
||||
&self,
|
||||
val_val: ConstValue<'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
layout: Option<TyAndLayout<'tcx>>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
// Other cases need layout.
|
||||
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
|
||||
Ok(match scalar {
|
||||
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
})
|
||||
};
|
||||
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
|
||||
let op = match val_val {
|
||||
ConstValue::ByRef { alloc, offset } => {
|
||||
let id = self.tcx.create_memory_alloc(alloc);
|
||||
// We rely on mutability being set correctly in that allocation to prevent writes
|
||||
// where none should happen.
|
||||
let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
|
||||
Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
|
||||
}
|
||||
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
|
||||
ConstValue::Slice { data, start, end } => {
|
||||
// We rely on mutability being set correctly in `data` to prevent writes
|
||||
// where none should happen.
|
||||
let ptr = Pointer::new(
|
||||
self.tcx.create_memory_alloc(data),
|
||||
Size::from_bytes(start), // offset: `start`
|
||||
);
|
||||
Operand::Immediate(Immediate::new_slice(
|
||||
Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
|
||||
u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
|
||||
self,
|
||||
))
|
||||
}
|
||||
};
|
||||
Ok(OpTy { op, layout })
|
||||
}
|
||||
|
||||
/// Read discriminant, return the runtime value as well as the variant index.
|
||||
pub fn read_discriminant(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, VariantIdx)> {
|
||||
trace!("read_discriminant_value {:#?}", op.layout);
|
||||
// Get type and layout of the discriminant.
|
||||
let discr_layout = self.layout_of(op.layout.ty.discriminant_ty(*self.tcx))?;
|
||||
trace!("discriminant type: {:?}", discr_layout.ty);
|
||||
|
||||
// We use "discriminant" to refer to the value associated with a particular enum variant.
|
||||
// This is not to be confused with its "variant index", which is just determining its position in the
|
||||
// declared list of variants -- they can differ with explicitly assigned discriminants.
|
||||
// We use "tag" to refer to how the discriminant is encoded in memory, which can be either
|
||||
// straight-forward (`TagEncoding::Direct`) or with a niche (`TagEncoding::Niche`).
|
||||
let (tag_scalar_layout, tag_encoding, tag_field) = match op.layout.variants {
|
||||
Variants::Single { index } => {
|
||||
let discr = match op.layout.ty.discriminant_for_variant(*self.tcx, index) {
|
||||
Some(discr) => {
|
||||
// This type actually has discriminants.
|
||||
assert_eq!(discr.ty, discr_layout.ty);
|
||||
Scalar::from_uint(discr.val, discr_layout.size)
|
||||
}
|
||||
None => {
|
||||
// On a type without actual discriminants, variant is 0.
|
||||
assert_eq!(index.as_u32(), 0);
|
||||
Scalar::from_uint(index.as_u32(), discr_layout.size)
|
||||
}
|
||||
};
|
||||
return Ok((discr, index));
|
||||
}
|
||||
Variants::Multiple { ref tag, ref tag_encoding, tag_field, .. } => {
|
||||
(tag, tag_encoding, tag_field)
|
||||
}
|
||||
};
|
||||
|
||||
// There are *three* layouts that come into play here:
|
||||
// - The discriminant has a type for typechecking. This is `discr_layout`, and is used for
|
||||
// the `Scalar` we return.
|
||||
// - The tag (encoded discriminant) has layout `tag_layout`. This is always an integer type,
|
||||
// and used to interpret the value we read from the tag field.
|
||||
// For the return value, a cast to `discr_layout` is performed.
|
||||
// - The field storing the tag has a layout, which is very similar to `tag_layout` but
|
||||
// may be a pointer. This is `tag_val.layout`; we just use it for sanity checks.
|
||||
|
||||
// Get layout for tag.
|
||||
let tag_layout = self.layout_of(tag_scalar_layout.value.to_int_ty(*self.tcx))?;
|
||||
|
||||
// Read tag and sanity-check `tag_layout`.
|
||||
let tag_val = self.read_immediate(&self.operand_field(op, tag_field)?)?;
|
||||
assert_eq!(tag_layout.size, tag_val.layout.size);
|
||||
assert_eq!(tag_layout.abi.is_signed(), tag_val.layout.abi.is_signed());
|
||||
let tag_val = tag_val.to_scalar()?;
|
||||
trace!("tag value: {:?}", tag_val);
|
||||
|
||||
// Figure out which discriminant and variant this corresponds to.
|
||||
Ok(match *tag_encoding {
|
||||
TagEncoding::Direct => {
|
||||
let tag_bits = tag_val
|
||||
.try_to_int()
|
||||
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
|
||||
.assert_bits(tag_layout.size);
|
||||
// Cast bits from tag layout to discriminant layout.
|
||||
let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
|
||||
let discr_bits = discr_val.assert_bits(discr_layout.size);
|
||||
// Convert discriminant to variant index, and catch invalid discriminants.
|
||||
let index = match *op.layout.ty.kind() {
|
||||
ty::Adt(adt, _) => {
|
||||
adt.discriminants(*self.tcx).find(|(_, var)| var.val == discr_bits)
|
||||
}
|
||||
ty::Generator(def_id, substs, _) => {
|
||||
let substs = substs.as_generator();
|
||||
substs
|
||||
.discriminants(def_id, *self.tcx)
|
||||
.find(|(_, var)| var.val == discr_bits)
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
|
||||
}
|
||||
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
|
||||
// Return the cast value, and the index.
|
||||
(discr_val, index.0)
|
||||
}
|
||||
TagEncoding::Niche { dataful_variant, ref niche_variants, niche_start } => {
|
||||
// Compute the variant this niche value/"tag" corresponds to. With niche layout,
|
||||
// discriminant (encoded in niche/tag) and variant index are the same.
|
||||
let variants_start = niche_variants.start().as_u32();
|
||||
let variants_end = niche_variants.end().as_u32();
|
||||
let variant = match tag_val.try_to_int() {
|
||||
Err(dbg_val) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
let ptr = self.scalar_to_ptr(tag_val);
|
||||
// The niche must be just 0, and the ptr not null, then we know this is
|
||||
// okay. Everything else, we conservatively reject.
|
||||
let ptr_valid = niche_start == 0
|
||||
&& variants_start == variants_end
|
||||
&& !self.memory.ptr_may_be_null(ptr);
|
||||
if !ptr_valid {
|
||||
throw_ub!(InvalidTag(dbg_val))
|
||||
}
|
||||
dataful_variant
|
||||
}
|
||||
Ok(tag_bits) => {
|
||||
let tag_bits = tag_bits.assert_bits(tag_layout.size);
|
||||
// We need to use machine arithmetic to get the relative variant idx:
|
||||
// variant_index_relative = tag_val - niche_start_val
|
||||
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
|
||||
let niche_start_val = ImmTy::from_uint(niche_start, tag_layout);
|
||||
let variant_index_relative_val =
|
||||
self.binary_op(mir::BinOp::Sub, &tag_val, &niche_start_val)?;
|
||||
let variant_index_relative = variant_index_relative_val
|
||||
.to_scalar()?
|
||||
.assert_bits(tag_val.layout.size);
|
||||
// Check if this is in the range that indicates an actual discriminant.
|
||||
if variant_index_relative <= u128::from(variants_end - variants_start) {
|
||||
let variant_index_relative = u32::try_from(variant_index_relative)
|
||||
.expect("we checked that this fits into a u32");
|
||||
// Then computing the absolute variant idx should not overflow any more.
|
||||
let variant_index = variants_start
|
||||
.checked_add(variant_index_relative)
|
||||
.expect("overflow computing absolute variant idx");
|
||||
let variants_len = op
|
||||
.layout
|
||||
.ty
|
||||
.ty_adt_def()
|
||||
.expect("tagged layout for non adt")
|
||||
.variants
|
||||
.len();
|
||||
assert!(usize::try_from(variant_index).unwrap() < variants_len);
|
||||
VariantIdx::from_u32(variant_index)
|
||||
} else {
|
||||
dataful_variant
|
||||
}
|
||||
}
|
||||
};
|
||||
// Compute the size of the scalar we need to return.
|
||||
// No need to cast, because the variant index directly serves as discriminant and is
|
||||
// encoded in the tag.
|
||||
(Scalar::from_uint(variant.as_u32(), discr_layout.size), variant)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
417
compiler/rustc_const_eval/src/interpret/operator.rs
Normal file
417
compiler/rustc_const_eval/src/interpret/operator.rs
Normal file
|
@ -0,0 +1,417 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_apfloat::Float;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::{InterpResult, Scalar};
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, FloatTy, Ty};
|
||||
|
||||
use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Applies the binary operation `op` to the two operands and writes a tuple of the result
|
||||
/// and a boolean signifying the potential overflow to the destination.
|
||||
pub fn binop_with_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
|
||||
debug_assert_eq!(
|
||||
self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
|
||||
dest.layout.ty,
|
||||
"type mismatch for result of {:?}",
|
||||
op,
|
||||
);
|
||||
let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
|
||||
self.write_immediate(val, dest)
|
||||
}
|
||||
|
||||
/// Applies the binary operation `op` to the arguments and writes the result to the
|
||||
/// destination.
|
||||
pub fn binop_ignore_overflow(
|
||||
&mut self,
|
||||
op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
|
||||
assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
|
||||
self.write_scalar(val, dest)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
fn binary_char_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
l: char,
|
||||
r: char,
|
||||
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
|
||||
let res = match bin_op {
|
||||
Eq => l == r,
|
||||
Ne => l != r,
|
||||
Lt => l < r,
|
||||
Le => l <= r,
|
||||
Gt => l > r,
|
||||
Ge => l >= r,
|
||||
_ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
|
||||
};
|
||||
(Scalar::from_bool(res), false, self.tcx.types.bool)
|
||||
}
|
||||
|
||||
fn binary_bool_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
l: bool,
|
||||
r: bool,
|
||||
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
|
||||
let res = match bin_op {
|
||||
Eq => l == r,
|
||||
Ne => l != r,
|
||||
Lt => l < r,
|
||||
Le => l <= r,
|
||||
Gt => l > r,
|
||||
Ge => l >= r,
|
||||
BitAnd => l & r,
|
||||
BitOr => l | r,
|
||||
BitXor => l ^ r,
|
||||
_ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
|
||||
};
|
||||
(Scalar::from_bool(res), false, self.tcx.types.bool)
|
||||
}
|
||||
|
||||
fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
ty: Ty<'tcx>,
|
||||
l: F,
|
||||
r: F,
|
||||
) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
|
||||
let (val, ty) = match bin_op {
|
||||
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
|
||||
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
|
||||
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
|
||||
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
|
||||
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
|
||||
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
|
||||
Add => ((l + r).value.into(), ty),
|
||||
Sub => ((l - r).value.into(), ty),
|
||||
Mul => ((l * r).value.into(), ty),
|
||||
Div => ((l / r).value.into(), ty),
|
||||
Rem => ((l % r).value.into(), ty),
|
||||
_ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
|
||||
};
|
||||
(val, false, ty)
|
||||
}
|
||||
|
||||
fn binary_int_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
// passing in raw bits
|
||||
l: u128,
|
||||
left_layout: TyAndLayout<'tcx>,
|
||||
r: u128,
|
||||
right_layout: TyAndLayout<'tcx>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
|
||||
// Shift ops can have an RHS with a different numeric type.
|
||||
if bin_op == Shl || bin_op == Shr {
|
||||
let signed = left_layout.abi.is_signed();
|
||||
let size = u128::from(left_layout.size.bits());
|
||||
let overflow = r >= size;
|
||||
let r = r % size; // mask to type size
|
||||
let r = u32::try_from(r).unwrap(); // we masked so this will always fit
|
||||
let result = if signed {
|
||||
let l = self.sign_extend(l, left_layout) as i128;
|
||||
let result = match bin_op {
|
||||
Shl => l.checked_shl(r).unwrap(),
|
||||
Shr => l.checked_shr(r).unwrap(),
|
||||
_ => bug!("it has already been checked that this is a shift op"),
|
||||
};
|
||||
result as u128
|
||||
} else {
|
||||
match bin_op {
|
||||
Shl => l.checked_shl(r).unwrap(),
|
||||
Shr => l.checked_shr(r).unwrap(),
|
||||
_ => bug!("it has already been checked that this is a shift op"),
|
||||
}
|
||||
};
|
||||
let truncated = self.truncate(result, left_layout);
|
||||
return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
|
||||
}
|
||||
|
||||
// For the remaining ops, the types must be the same on both sides
|
||||
if left_layout.ty != right_layout.ty {
|
||||
span_bug!(
|
||||
self.cur_span(),
|
||||
"invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
|
||||
bin_op,
|
||||
l,
|
||||
left_layout.ty,
|
||||
r,
|
||||
right_layout.ty,
|
||||
)
|
||||
}
|
||||
|
||||
let size = left_layout.size;
|
||||
|
||||
// Operations that need special treatment for signed integers
|
||||
if left_layout.abi.is_signed() {
|
||||
let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
|
||||
Lt => Some(i128::lt),
|
||||
Le => Some(i128::le),
|
||||
Gt => Some(i128::gt),
|
||||
Ge => Some(i128::ge),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(op) = op {
|
||||
let l = self.sign_extend(l, left_layout) as i128;
|
||||
let r = self.sign_extend(r, right_layout) as i128;
|
||||
return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
|
||||
}
|
||||
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
|
||||
Div if r == 0 => throw_ub!(DivisionByZero),
|
||||
Rem if r == 0 => throw_ub!(RemainderByZero),
|
||||
Div => Some(i128::overflowing_div),
|
||||
Rem => Some(i128::overflowing_rem),
|
||||
Add => Some(i128::overflowing_add),
|
||||
Sub => Some(i128::overflowing_sub),
|
||||
Mul => Some(i128::overflowing_mul),
|
||||
_ => None,
|
||||
};
|
||||
if let Some(op) = op {
|
||||
let r = self.sign_extend(r, right_layout) as i128;
|
||||
// We need a special check for overflowing remainder:
|
||||
// "int_min % -1" overflows and returns 0, but after casting things to a larger int
|
||||
// type it does *not* overflow nor give an unrepresentable result!
|
||||
if bin_op == Rem {
|
||||
if r == -1 && l == (1 << (size.bits() - 1)) {
|
||||
return Ok((Scalar::from_int(0, size), true, left_layout.ty));
|
||||
}
|
||||
}
|
||||
let l = self.sign_extend(l, left_layout) as i128;
|
||||
|
||||
let (result, oflo) = op(l, r);
|
||||
// This may be out-of-bounds for the result type, so we have to truncate ourselves.
|
||||
// If that truncation loses any information, we have an overflow.
|
||||
let result = result as u128;
|
||||
let truncated = self.truncate(result, left_layout);
|
||||
return Ok((
|
||||
Scalar::from_uint(truncated, size),
|
||||
oflo || self.sign_extend(truncated, left_layout) != result,
|
||||
left_layout.ty,
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
let (val, ty) = match bin_op {
|
||||
Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
|
||||
Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
|
||||
|
||||
Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
|
||||
Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
|
||||
Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
|
||||
Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
|
||||
|
||||
BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
|
||||
BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
|
||||
BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
|
||||
|
||||
Add | Sub | Mul | Rem | Div => {
|
||||
assert!(!left_layout.abi.is_signed());
|
||||
let op: fn(u128, u128) -> (u128, bool) = match bin_op {
|
||||
Add => u128::overflowing_add,
|
||||
Sub => u128::overflowing_sub,
|
||||
Mul => u128::overflowing_mul,
|
||||
Div if r == 0 => throw_ub!(DivisionByZero),
|
||||
Rem if r == 0 => throw_ub!(RemainderByZero),
|
||||
Div => u128::overflowing_div,
|
||||
Rem => u128::overflowing_rem,
|
||||
_ => bug!(),
|
||||
};
|
||||
let (result, oflo) = op(l, r);
|
||||
// Truncate to target type.
|
||||
// If that truncation loses any information, we have an overflow.
|
||||
let truncated = self.truncate(result, left_layout);
|
||||
return Ok((
|
||||
Scalar::from_uint(truncated, size),
|
||||
oflo || truncated != result,
|
||||
left_layout.ty,
|
||||
));
|
||||
}
|
||||
|
||||
_ => span_bug!(
|
||||
self.cur_span(),
|
||||
"invalid binary op {:?}: {:?}, {:?} (both {:?})",
|
||||
bin_op,
|
||||
l,
|
||||
r,
|
||||
right_layout.ty,
|
||||
),
|
||||
};
|
||||
|
||||
Ok((val, false, ty))
|
||||
}
|
||||
|
||||
/// Returns the result of the specified operation, whether it overflowed, and
|
||||
/// the result type.
|
||||
pub fn overflowing_binary_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
|
||||
trace!(
|
||||
"Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
|
||||
bin_op,
|
||||
*left,
|
||||
left.layout.ty,
|
||||
*right,
|
||||
right.layout.ty
|
||||
);
|
||||
|
||||
match left.layout.ty.kind() {
|
||||
ty::Char => {
|
||||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let left = left.to_scalar()?;
|
||||
let right = right.to_scalar()?;
|
||||
Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
|
||||
}
|
||||
ty::Bool => {
|
||||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let left = left.to_scalar()?;
|
||||
let right = right.to_scalar()?;
|
||||
Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
|
||||
}
|
||||
ty::Float(fty) => {
|
||||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let ty = left.layout.ty;
|
||||
let left = left.to_scalar()?;
|
||||
let right = right.to_scalar()?;
|
||||
Ok(match fty {
|
||||
FloatTy::F32 => {
|
||||
self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
|
||||
}
|
||||
FloatTy::F64 => {
|
||||
self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
|
||||
}
|
||||
})
|
||||
}
|
||||
_ if left.layout.ty.is_integral() => {
|
||||
// the RHS type can be different, e.g. for shifts -- but it has to be integral, too
|
||||
assert!(
|
||||
right.layout.ty.is_integral(),
|
||||
"Unexpected types for BinOp: {:?} {:?} {:?}",
|
||||
left.layout.ty,
|
||||
bin_op,
|
||||
right.layout.ty
|
||||
);
|
||||
|
||||
let l = left.to_scalar()?.to_bits(left.layout.size)?;
|
||||
let r = right.to_scalar()?.to_bits(right.layout.size)?;
|
||||
self.binary_int_op(bin_op, l, left.layout, r, right.layout)
|
||||
}
|
||||
_ if left.layout.ty.is_any_ptr() => {
|
||||
// The RHS type must be the same *or an integer type* (for `Offset`).
|
||||
assert!(
|
||||
right.layout.ty == left.layout.ty || right.layout.ty.is_integral(),
|
||||
"Unexpected types for BinOp: {:?} {:?} {:?}",
|
||||
left.layout.ty,
|
||||
bin_op,
|
||||
right.layout.ty
|
||||
);
|
||||
|
||||
M::binary_ptr_op(self, bin_op, left, right)
|
||||
}
|
||||
_ => span_bug!(
|
||||
self.cur_span(),
|
||||
"Invalid MIR: bad LHS type for binop: {:?}",
|
||||
left.layout.ty
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
|
||||
#[inline]
|
||||
pub fn binary_op(
|
||||
&self,
|
||||
bin_op: mir::BinOp,
|
||||
left: &ImmTy<'tcx, M::PointerTag>,
|
||||
right: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
|
||||
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
|
||||
}
|
||||
|
||||
/// Returns the result of the specified operation, whether it overflowed, and
|
||||
/// the result type.
|
||||
pub fn overflowing_unary_op(
|
||||
&self,
|
||||
un_op: mir::UnOp,
|
||||
val: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
|
||||
use rustc_middle::mir::UnOp::*;
|
||||
|
||||
let layout = val.layout;
|
||||
let val = val.to_scalar()?;
|
||||
trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
|
||||
|
||||
match layout.ty.kind() {
|
||||
ty::Bool => {
|
||||
let val = val.to_bool()?;
|
||||
let res = match un_op {
|
||||
Not => !val,
|
||||
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
|
||||
};
|
||||
Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
|
||||
}
|
||||
ty::Float(fty) => {
|
||||
let res = match (un_op, fty) {
|
||||
(Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
|
||||
(Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
|
||||
_ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
|
||||
};
|
||||
Ok((res, false, layout.ty))
|
||||
}
|
||||
_ => {
|
||||
assert!(layout.ty.is_integral());
|
||||
let val = val.to_bits(layout.size)?;
|
||||
let (res, overflow) = match un_op {
|
||||
Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
|
||||
Neg => {
|
||||
// arithmetic negation
|
||||
assert!(layout.abi.is_signed());
|
||||
let val = self.sign_extend(val, layout) as i128;
|
||||
let (res, overflow) = val.overflowing_neg();
|
||||
let res = res as u128;
|
||||
// Truncate to target type.
|
||||
// If that truncation loses any information, we have an overflow.
|
||||
let truncated = self.truncate(res, layout);
|
||||
(truncated, overflow || self.sign_extend(truncated, layout) != res)
|
||||
}
|
||||
};
|
||||
Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn unary_op(
|
||||
&self,
|
||||
un_op: mir::UnOp,
|
||||
val: &ImmTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
|
||||
let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
|
||||
Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
|
||||
}
|
||||
}
|
1091
compiler/rustc_const_eval/src/interpret/place.rs
Normal file
1091
compiler/rustc_const_eval/src/interpret/place.rs
Normal file
File diff suppressed because it is too large
Load diff
316
compiler/rustc_const_eval/src/interpret/step.rs
Normal file
316
compiler/rustc_const_eval/src/interpret/step.rs
Normal file
|
@ -0,0 +1,316 @@
|
|||
//! This module contains the `InterpCx` methods for executing a single step of the interpreter.
|
||||
//!
|
||||
//! The main entry point is the `step` method.
|
||||
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::{InterpResult, Scalar};
|
||||
use rustc_middle::ty::layout::LayoutOf;
|
||||
|
||||
use super::{InterpCx, Machine};
|
||||
|
||||
/// Classify whether an operator is "left-homogeneous", i.e., the LHS has the
|
||||
/// same type as the result.
|
||||
#[inline]
|
||||
fn binop_left_homogeneous(op: mir::BinOp) -> bool {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
match op {
|
||||
Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Offset | Shl | Shr => true,
|
||||
Eq | Ne | Lt | Le | Gt | Ge => false,
|
||||
}
|
||||
}
|
||||
/// Classify whether an operator is "right-homogeneous", i.e., the RHS has the
|
||||
/// same type as the LHS.
|
||||
#[inline]
|
||||
fn binop_right_homogeneous(op: mir::BinOp) -> bool {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
match op {
|
||||
Add | Sub | Mul | Div | Rem | BitXor | BitAnd | BitOr | Eq | Ne | Lt | Le | Gt | Ge => true,
|
||||
Offset | Shl | Shr => false,
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
pub fn run(&mut self) -> InterpResult<'tcx> {
|
||||
while self.step()? {}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Returns `true` as long as there are more things to do.
|
||||
///
|
||||
/// This is used by [priroda](https://github.com/oli-obk/priroda)
|
||||
///
|
||||
/// This is marked `#inline(always)` to work around adverserial codegen when `opt-level = 3`
|
||||
#[inline(always)]
|
||||
pub fn step(&mut self) -> InterpResult<'tcx, bool> {
|
||||
if self.stack().is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
let loc = match self.frame().loc {
|
||||
Ok(loc) => loc,
|
||||
Err(_) => {
|
||||
// We are unwinding and this fn has no cleanup code.
|
||||
// Just go on unwinding.
|
||||
trace!("unwinding: skipping frame");
|
||||
self.pop_stack_frame(/* unwinding */ true)?;
|
||||
return Ok(true);
|
||||
}
|
||||
};
|
||||
let basic_block = &self.body().basic_blocks()[loc.block];
|
||||
|
||||
let old_frames = self.frame_idx();
|
||||
|
||||
if let Some(stmt) = basic_block.statements.get(loc.statement_index) {
|
||||
assert_eq!(old_frames, self.frame_idx());
|
||||
self.statement(stmt)?;
|
||||
return Ok(true);
|
||||
}
|
||||
|
||||
M::before_terminator(self)?;
|
||||
|
||||
let terminator = basic_block.terminator();
|
||||
assert_eq!(old_frames, self.frame_idx());
|
||||
self.terminator(terminator)?;
|
||||
Ok(true)
|
||||
}
|
||||
|
||||
/// Runs the interpretation logic for the given `mir::Statement` at the current frame and
|
||||
/// statement counter. This also moves the statement counter forward.
|
||||
pub fn statement(&mut self, stmt: &mir::Statement<'tcx>) -> InterpResult<'tcx> {
|
||||
info!("{:?}", stmt);
|
||||
|
||||
use rustc_middle::mir::StatementKind::*;
|
||||
|
||||
// Some statements (e.g., box) push new stack frames.
|
||||
// We have to record the stack frame number *before* executing the statement.
|
||||
let frame_idx = self.frame_idx();
|
||||
|
||||
match &stmt.kind {
|
||||
Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?,
|
||||
|
||||
SetDiscriminant { place, variant_index } => {
|
||||
let dest = self.eval_place(**place)?;
|
||||
self.write_discriminant(*variant_index, &dest)?;
|
||||
}
|
||||
|
||||
// Mark locals as alive
|
||||
StorageLive(local) => {
|
||||
self.storage_live(*local)?;
|
||||
}
|
||||
|
||||
// Mark locals as dead
|
||||
StorageDead(local) => {
|
||||
self.storage_dead(*local)?;
|
||||
}
|
||||
|
||||
// No dynamic semantics attached to `FakeRead`; MIR
|
||||
// interpreter is solely intended for borrowck'ed code.
|
||||
FakeRead(..) => {}
|
||||
|
||||
// Stacked Borrows.
|
||||
Retag(kind, place) => {
|
||||
let dest = self.eval_place(**place)?;
|
||||
M::retag(self, *kind, &dest)?;
|
||||
}
|
||||
|
||||
// Call CopyNonOverlapping
|
||||
CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping { src, dst, count }) => {
|
||||
let src = self.eval_operand(src, None)?;
|
||||
let dst = self.eval_operand(dst, None)?;
|
||||
let count = self.eval_operand(count, None)?;
|
||||
self.copy_intrinsic(&src, &dst, &count, /* nonoverlapping */ true)?;
|
||||
}
|
||||
|
||||
// Statements we do not track.
|
||||
AscribeUserType(..) => {}
|
||||
|
||||
// Currently, Miri discards Coverage statements. Coverage statements are only injected
|
||||
// via an optional compile time MIR pass and have no side effects. Since Coverage
|
||||
// statements don't exist at the source level, it is safe for Miri to ignore them, even
|
||||
// for undefined behavior (UB) checks.
|
||||
//
|
||||
// A coverage counter inside a const expression (for example, a counter injected in a
|
||||
// const function) is discarded when the const is evaluated at compile time. Whether
|
||||
// this should change, and/or how to implement a const eval counter, is a subject of the
|
||||
// following issue:
|
||||
//
|
||||
// FIXME(#73156): Handle source code coverage in const eval
|
||||
Coverage(..) => {}
|
||||
|
||||
// Defined to do nothing. These are added by optimization passes, to avoid changing the
|
||||
// size of MIR constantly.
|
||||
Nop => {}
|
||||
|
||||
LlvmInlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
|
||||
}
|
||||
|
||||
self.stack_mut()[frame_idx].loc.as_mut().unwrap().statement_index += 1;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Evaluate an assignment statement.
|
||||
///
|
||||
/// There is no separate `eval_rvalue` function. Instead, the code for handling each rvalue
|
||||
/// type writes its results directly into the memory specified by the place.
|
||||
pub fn eval_rvalue_into_place(
|
||||
&mut self,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
place: mir::Place<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let dest = self.eval_place(place)?;
|
||||
|
||||
use rustc_middle::mir::Rvalue::*;
|
||||
match *rvalue {
|
||||
ThreadLocalRef(did) => {
|
||||
let ptr = M::thread_local_static_base_pointer(self, did)?;
|
||||
self.write_pointer(ptr, &dest)?;
|
||||
}
|
||||
|
||||
Use(ref operand) => {
|
||||
// Avoid recomputing the layout
|
||||
let op = self.eval_operand(operand, Some(dest.layout))?;
|
||||
self.copy_op(&op, &dest)?;
|
||||
}
|
||||
|
||||
BinaryOp(bin_op, box (ref left, ref right)) => {
|
||||
let layout = binop_left_homogeneous(bin_op).then_some(dest.layout);
|
||||
let left = self.read_immediate(&self.eval_operand(left, layout)?)?;
|
||||
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
||||
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
||||
self.binop_ignore_overflow(bin_op, &left, &right, &dest)?;
|
||||
}
|
||||
|
||||
CheckedBinaryOp(bin_op, box (ref left, ref right)) => {
|
||||
// Due to the extra boolean in the result, we can never reuse the `dest.layout`.
|
||||
let left = self.read_immediate(&self.eval_operand(left, None)?)?;
|
||||
let layout = binop_right_homogeneous(bin_op).then_some(left.layout);
|
||||
let right = self.read_immediate(&self.eval_operand(right, layout)?)?;
|
||||
self.binop_with_overflow(bin_op, &left, &right, &dest)?;
|
||||
}
|
||||
|
||||
UnaryOp(un_op, ref operand) => {
|
||||
// The operand always has the same type as the result.
|
||||
let val = self.read_immediate(&self.eval_operand(operand, Some(dest.layout))?)?;
|
||||
let val = self.unary_op(un_op, &val)?;
|
||||
assert_eq!(val.layout, dest.layout, "layout mismatch for result of {:?}", un_op);
|
||||
self.write_immediate(*val, &dest)?;
|
||||
}
|
||||
|
||||
Aggregate(ref kind, ref operands) => {
|
||||
let (dest, active_field_index) = match **kind {
|
||||
mir::AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
|
||||
self.write_discriminant(variant_index, &dest)?;
|
||||
if adt_def.is_enum() {
|
||||
(self.place_downcast(&dest, variant_index)?, active_field_index)
|
||||
} else {
|
||||
(dest, active_field_index)
|
||||
}
|
||||
}
|
||||
_ => (dest, None),
|
||||
};
|
||||
|
||||
for (i, operand) in operands.iter().enumerate() {
|
||||
let op = self.eval_operand(operand, None)?;
|
||||
// Ignore zero-sized fields.
|
||||
if !op.layout.is_zst() {
|
||||
let field_index = active_field_index.unwrap_or(i);
|
||||
let field_dest = self.place_field(&dest, field_index)?;
|
||||
self.copy_op(&op, &field_dest)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Repeat(ref operand, _) => {
|
||||
let src = self.eval_operand(operand, None)?;
|
||||
assert!(!src.layout.is_unsized());
|
||||
let dest = self.force_allocation(&dest)?;
|
||||
let length = dest.len(self)?;
|
||||
|
||||
if length == 0 {
|
||||
// Nothing to copy... but let's still make sure that `dest` as a place is valid.
|
||||
self.get_alloc_mut(&dest)?;
|
||||
} else {
|
||||
// Write the src to the first element.
|
||||
let first = self.mplace_field(&dest, 0)?;
|
||||
self.copy_op(&src, &first.into())?;
|
||||
|
||||
// This is performance-sensitive code for big static/const arrays! So we
|
||||
// avoid writing each operand individually and instead just make many copies
|
||||
// of the first element.
|
||||
let elem_size = first.layout.size;
|
||||
let first_ptr = first.ptr;
|
||||
let rest_ptr = first_ptr.offset(elem_size, self)?;
|
||||
self.memory.copy_repeatedly(
|
||||
first_ptr,
|
||||
first.align,
|
||||
rest_ptr,
|
||||
first.align,
|
||||
elem_size,
|
||||
length - 1,
|
||||
/*nonoverlapping:*/ true,
|
||||
)?;
|
||||
}
|
||||
}
|
||||
|
||||
Len(place) => {
|
||||
// FIXME(CTFE): don't allow computing the length of arrays in const eval
|
||||
let src = self.eval_place(place)?;
|
||||
let mplace = self.force_allocation(&src)?;
|
||||
let len = mplace.len(self)?;
|
||||
self.write_scalar(Scalar::from_machine_usize(len, self), &dest)?;
|
||||
}
|
||||
|
||||
AddressOf(_, place) | Ref(_, _, place) => {
|
||||
let src = self.eval_place(place)?;
|
||||
let place = self.force_allocation(&src)?;
|
||||
self.write_immediate(place.to_ref(self), &dest)?;
|
||||
}
|
||||
|
||||
NullaryOp(mir::NullOp::Box, _) => {
|
||||
M::box_alloc(self, &dest)?;
|
||||
}
|
||||
|
||||
NullaryOp(mir::NullOp::SizeOf, ty) => {
|
||||
let ty = self.subst_from_current_frame_and_normalize_erasing_regions(ty);
|
||||
let layout = self.layout_of(ty)?;
|
||||
if layout.is_unsized() {
|
||||
// FIXME: This should be a span_bug (#80742)
|
||||
self.tcx.sess.delay_span_bug(
|
||||
self.frame().current_span(),
|
||||
&format!("SizeOf nullary MIR operator called for unsized type {}", ty),
|
||||
);
|
||||
throw_inval!(SizeOfUnsizedType(ty));
|
||||
}
|
||||
self.write_scalar(Scalar::from_machine_usize(layout.size.bytes(), self), &dest)?;
|
||||
}
|
||||
|
||||
Cast(cast_kind, ref operand, cast_ty) => {
|
||||
let src = self.eval_operand(operand, None)?;
|
||||
let cast_ty = self.subst_from_current_frame_and_normalize_erasing_regions(cast_ty);
|
||||
self.cast(&src, cast_kind, cast_ty, &dest)?;
|
||||
}
|
||||
|
||||
Discriminant(place) => {
|
||||
let op = self.eval_place_to_op(place, None)?;
|
||||
let discr_val = self.read_discriminant(&op)?.0;
|
||||
self.write_scalar(discr_val, &dest)?;
|
||||
}
|
||||
}
|
||||
|
||||
trace!("{:?}", self.dump_place(*dest));
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
|
||||
info!("{:?}", terminator.kind);
|
||||
|
||||
self.eval_terminator(terminator)?;
|
||||
if !self.stack().is_empty() {
|
||||
if let Ok(loc) = self.frame().loc {
|
||||
info!("// executing {:?}", loc.block);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
517
compiler/rustc_const_eval/src/interpret/terminator.rs
Normal file
517
compiler/rustc_const_eval/src/interpret/terminator.rs
Normal file
|
@ -0,0 +1,517 @@
|
|||
use std::borrow::Cow;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
|
||||
use rustc_middle::ty::layout::{self, LayoutOf as _, TyAndLayout};
|
||||
use rustc_middle::ty::Instance;
|
||||
use rustc_middle::{
|
||||
mir,
|
||||
ty::{self, Ty},
|
||||
};
|
||||
use rustc_target::abi;
|
||||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::{
|
||||
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
|
||||
StackPopCleanup, StackPopUnwind,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
fn fn_can_unwind(&self, attrs: CodegenFnAttrFlags, abi: Abi) -> bool {
|
||||
layout::fn_can_unwind(*self.tcx, attrs, abi)
|
||||
}
|
||||
|
||||
pub(super) fn eval_terminator(
|
||||
&mut self,
|
||||
terminator: &mir::Terminator<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
use rustc_middle::mir::TerminatorKind::*;
|
||||
match terminator.kind {
|
||||
Return => {
|
||||
self.pop_stack_frame(/* unwinding */ false)?
|
||||
}
|
||||
|
||||
Goto { target } => self.go_to_block(target),
|
||||
|
||||
SwitchInt { ref discr, ref targets, switch_ty } => {
|
||||
let discr = self.read_immediate(&self.eval_operand(discr, None)?)?;
|
||||
trace!("SwitchInt({:?})", *discr);
|
||||
assert_eq!(discr.layout.ty, switch_ty);
|
||||
|
||||
// Branch to the `otherwise` case by default, if no match is found.
|
||||
assert!(!targets.iter().is_empty());
|
||||
let mut target_block = targets.otherwise();
|
||||
|
||||
for (const_int, target) in targets.iter() {
|
||||
// Compare using binary_op, to also support pointer values
|
||||
let res = self
|
||||
.overflowing_binary_op(
|
||||
mir::BinOp::Eq,
|
||||
&discr,
|
||||
&ImmTy::from_uint(const_int, discr.layout),
|
||||
)?
|
||||
.0;
|
||||
if res.to_bool()? {
|
||||
target_block = target;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.go_to_block(target_block);
|
||||
}
|
||||
|
||||
Call { ref func, ref args, destination, ref cleanup, from_hir_call: _, fn_span: _ } => {
|
||||
let old_stack = self.frame_idx();
|
||||
let old_loc = self.frame().loc;
|
||||
let func = self.eval_operand(func, None)?;
|
||||
let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() {
|
||||
ty::FnPtr(sig) => {
|
||||
let caller_abi = sig.abi();
|
||||
let fn_ptr = self.read_pointer(&func)?;
|
||||
let fn_val = self.memory.get_fn(fn_ptr)?;
|
||||
(
|
||||
fn_val,
|
||||
caller_abi,
|
||||
self.fn_can_unwind(CodegenFnAttrFlags::empty(), caller_abi),
|
||||
)
|
||||
}
|
||||
ty::FnDef(def_id, substs) => {
|
||||
let sig = func.layout.ty.fn_sig(*self.tcx);
|
||||
(
|
||||
FnVal::Instance(
|
||||
self.resolve(ty::WithOptConstParam::unknown(def_id), substs)?,
|
||||
),
|
||||
sig.abi(),
|
||||
self.fn_can_unwind(self.tcx.codegen_fn_attrs(def_id).flags, sig.abi()),
|
||||
)
|
||||
}
|
||||
_ => span_bug!(
|
||||
terminator.source_info.span,
|
||||
"invalid callee of type {:?}",
|
||||
func.layout.ty
|
||||
),
|
||||
};
|
||||
let args = self.eval_operands(args)?;
|
||||
let dest_place;
|
||||
let ret = match destination {
|
||||
Some((dest, ret)) => {
|
||||
dest_place = self.eval_place(dest)?;
|
||||
Some((&dest_place, ret))
|
||||
}
|
||||
None => None,
|
||||
};
|
||||
self.eval_fn_call(
|
||||
fn_val,
|
||||
abi,
|
||||
&args[..],
|
||||
ret,
|
||||
match (cleanup, caller_can_unwind) {
|
||||
(Some(cleanup), true) => StackPopUnwind::Cleanup(*cleanup),
|
||||
(None, true) => StackPopUnwind::Skip,
|
||||
(_, false) => StackPopUnwind::NotAllowed,
|
||||
},
|
||||
)?;
|
||||
// Sanity-check that `eval_fn_call` either pushed a new frame or
|
||||
// did a jump to another block.
|
||||
if self.frame_idx() == old_stack && self.frame().loc == old_loc {
|
||||
span_bug!(terminator.source_info.span, "evaluating this call made no progress");
|
||||
}
|
||||
}
|
||||
|
||||
Drop { place, target, unwind } => {
|
||||
let place = self.eval_place(place)?;
|
||||
let ty = place.layout.ty;
|
||||
trace!("TerminatorKind::drop: {:?}, type {}", place, ty);
|
||||
|
||||
let instance = Instance::resolve_drop_in_place(*self.tcx, ty);
|
||||
self.drop_in_place(&place, instance, target, unwind)?;
|
||||
}
|
||||
|
||||
Assert { ref cond, expected, ref msg, target, cleanup } => {
|
||||
let cond_val =
|
||||
self.read_immediate(&self.eval_operand(cond, None)?)?.to_scalar()?.to_bool()?;
|
||||
if expected == cond_val {
|
||||
self.go_to_block(target);
|
||||
} else {
|
||||
M::assert_panic(self, msg, cleanup)?;
|
||||
}
|
||||
}
|
||||
|
||||
Abort => {
|
||||
M::abort(self, "the program aborted execution".to_owned())?;
|
||||
}
|
||||
|
||||
// When we encounter Resume, we've finished unwinding
|
||||
// cleanup for the current stack frame. We pop it in order
|
||||
// to continue unwinding the next frame
|
||||
Resume => {
|
||||
trace!("unwinding: resuming from cleanup");
|
||||
// By definition, a Resume terminator means
|
||||
// that we're unwinding
|
||||
self.pop_stack_frame(/* unwinding */ true)?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// It is UB to ever encounter this.
|
||||
Unreachable => throw_ub!(Unreachable),
|
||||
|
||||
// These should never occur for MIR we actually run.
|
||||
DropAndReplace { .. }
|
||||
| FalseEdge { .. }
|
||||
| FalseUnwind { .. }
|
||||
| Yield { .. }
|
||||
| GeneratorDrop => span_bug!(
|
||||
terminator.source_info.span,
|
||||
"{:#?} should have been eliminated by MIR pass",
|
||||
terminator.kind
|
||||
),
|
||||
|
||||
// Inline assembly can't be interpreted.
|
||||
InlineAsm { .. } => throw_unsup_format!("inline assembly is not supported"),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_argument_compat(
|
||||
rust_abi: bool,
|
||||
caller: TyAndLayout<'tcx>,
|
||||
callee: TyAndLayout<'tcx>,
|
||||
) -> bool {
|
||||
if caller.ty == callee.ty {
|
||||
// No question
|
||||
return true;
|
||||
}
|
||||
if !rust_abi {
|
||||
// Don't risk anything
|
||||
return false;
|
||||
}
|
||||
// Compare layout
|
||||
match (&caller.abi, &callee.abi) {
|
||||
// Different valid ranges are okay (once we enforce validity,
|
||||
// that will take care to make it UB to leave the range, just
|
||||
// like for transmute).
|
||||
(abi::Abi::Scalar(ref caller), abi::Abi::Scalar(ref callee)) => {
|
||||
caller.value == callee.value
|
||||
}
|
||||
(
|
||||
abi::Abi::ScalarPair(ref caller1, ref caller2),
|
||||
abi::Abi::ScalarPair(ref callee1, ref callee2),
|
||||
) => caller1.value == callee1.value && caller2.value == callee2.value,
|
||||
// Be conservative
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Pass a single argument, checking the types for compatibility.
|
||||
fn pass_argument(
|
||||
&mut self,
|
||||
rust_abi: bool,
|
||||
caller_arg: &mut impl Iterator<Item = OpTy<'tcx, M::PointerTag>>,
|
||||
callee_arg: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
if rust_abi && callee_arg.layout.is_zst() {
|
||||
// Nothing to do.
|
||||
trace!("Skipping callee ZST");
|
||||
return Ok(());
|
||||
}
|
||||
let caller_arg = caller_arg.next().ok_or_else(|| {
|
||||
err_ub_format!("calling a function with fewer arguments than it requires")
|
||||
})?;
|
||||
if rust_abi {
|
||||
assert!(!caller_arg.layout.is_zst(), "ZSTs must have been already filtered out");
|
||||
}
|
||||
// Now, check
|
||||
if !Self::check_argument_compat(rust_abi, caller_arg.layout, callee_arg.layout) {
|
||||
throw_ub_format!(
|
||||
"calling a function with argument of type {:?} passing data of type {:?}",
|
||||
callee_arg.layout.ty,
|
||||
caller_arg.layout.ty
|
||||
)
|
||||
}
|
||||
// We allow some transmutes here
|
||||
self.copy_op_transmute(&caller_arg, callee_arg)
|
||||
}
|
||||
|
||||
/// Call this function -- pushing the stack frame and initializing the arguments.
|
||||
fn eval_fn_call(
|
||||
&mut self,
|
||||
fn_val: FnVal<'tcx, M::ExtraFnVal>,
|
||||
caller_abi: Abi,
|
||||
args: &[OpTy<'tcx, M::PointerTag>],
|
||||
ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
|
||||
mut unwind: StackPopUnwind,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("eval_fn_call: {:#?}", fn_val);
|
||||
|
||||
let instance = match fn_val {
|
||||
FnVal::Instance(instance) => instance,
|
||||
FnVal::Other(extra) => {
|
||||
return M::call_extra_fn(self, extra, caller_abi, args, ret, unwind);
|
||||
}
|
||||
};
|
||||
|
||||
let get_abi = |this: &Self, instance_ty: Ty<'tcx>| match instance_ty.kind() {
|
||||
ty::FnDef(..) => instance_ty.fn_sig(*this.tcx).abi(),
|
||||
ty::Closure(..) => Abi::RustCall,
|
||||
ty::Generator(..) => Abi::Rust,
|
||||
_ => span_bug!(this.cur_span(), "unexpected callee ty: {:?}", instance_ty),
|
||||
};
|
||||
|
||||
// ABI check
|
||||
let check_abi = |callee_abi: Abi| -> InterpResult<'tcx> {
|
||||
let normalize_abi = |abi| match abi {
|
||||
Abi::Rust | Abi::RustCall | Abi::RustIntrinsic | Abi::PlatformIntrinsic =>
|
||||
// These are all the same ABI, really.
|
||||
{
|
||||
Abi::Rust
|
||||
}
|
||||
abi => abi,
|
||||
};
|
||||
if normalize_abi(caller_abi) != normalize_abi(callee_abi) {
|
||||
throw_ub_format!(
|
||||
"calling a function with ABI {} using caller ABI {}",
|
||||
callee_abi.name(),
|
||||
caller_abi.name()
|
||||
)
|
||||
}
|
||||
Ok(())
|
||||
};
|
||||
|
||||
match instance.def {
|
||||
ty::InstanceDef::Intrinsic(..) => {
|
||||
if M::enforce_abi(self) {
|
||||
check_abi(get_abi(self, instance.ty(*self.tcx, self.param_env)))?;
|
||||
}
|
||||
assert!(caller_abi == Abi::RustIntrinsic || caller_abi == Abi::PlatformIntrinsic);
|
||||
M::call_intrinsic(self, instance, args, ret, unwind)
|
||||
}
|
||||
ty::InstanceDef::VtableShim(..)
|
||||
| ty::InstanceDef::ReifyShim(..)
|
||||
| ty::InstanceDef::ClosureOnceShim { .. }
|
||||
| ty::InstanceDef::FnPtrShim(..)
|
||||
| ty::InstanceDef::DropGlue(..)
|
||||
| ty::InstanceDef::CloneShim(..)
|
||||
| ty::InstanceDef::Item(_) => {
|
||||
// We need MIR for this fn
|
||||
let body =
|
||||
match M::find_mir_or_eval_fn(self, instance, caller_abi, args, ret, unwind)? {
|
||||
Some(body) => body,
|
||||
None => return Ok(()),
|
||||
};
|
||||
|
||||
// Check against the ABI of the MIR body we are calling (not the ABI of `instance`;
|
||||
// these can differ when `find_mir_or_eval_fn` does something clever like resolve
|
||||
// exported symbol names).
|
||||
let callee_def_id = body.source.def_id();
|
||||
let callee_abi = get_abi(self, self.tcx.type_of(callee_def_id));
|
||||
|
||||
if M::enforce_abi(self) {
|
||||
check_abi(callee_abi)?;
|
||||
}
|
||||
|
||||
if !matches!(unwind, StackPopUnwind::NotAllowed)
|
||||
&& !self
|
||||
.fn_can_unwind(self.tcx.codegen_fn_attrs(callee_def_id).flags, callee_abi)
|
||||
{
|
||||
// The callee cannot unwind.
|
||||
unwind = StackPopUnwind::NotAllowed;
|
||||
}
|
||||
|
||||
self.push_stack_frame(
|
||||
instance,
|
||||
body,
|
||||
ret.map(|p| p.0),
|
||||
StackPopCleanup::Goto { ret: ret.map(|p| p.1), unwind },
|
||||
)?;
|
||||
|
||||
// If an error is raised here, pop the frame again to get an accurate backtrace.
|
||||
// To this end, we wrap it all in a `try` block.
|
||||
let res: InterpResult<'tcx> = try {
|
||||
trace!(
|
||||
"caller ABI: {:?}, args: {:#?}",
|
||||
caller_abi,
|
||||
args.iter()
|
||||
.map(|arg| (arg.layout.ty, format!("{:?}", **arg)))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
trace!(
|
||||
"spread_arg: {:?}, locals: {:#?}",
|
||||
body.spread_arg,
|
||||
body.args_iter()
|
||||
.map(|local| (
|
||||
local,
|
||||
self.layout_of_local(self.frame(), local, None).unwrap().ty
|
||||
))
|
||||
.collect::<Vec<_>>()
|
||||
);
|
||||
|
||||
// Figure out how to pass which arguments.
|
||||
// The Rust ABI is special: ZST get skipped.
|
||||
let rust_abi = match caller_abi {
|
||||
Abi::Rust | Abi::RustCall => true,
|
||||
_ => false,
|
||||
};
|
||||
// We have two iterators: Where the arguments come from,
|
||||
// and where they go to.
|
||||
|
||||
// For where they come from: If the ABI is RustCall, we untuple the
|
||||
// last incoming argument. These two iterators do not have the same type,
|
||||
// so to keep the code paths uniform we accept an allocation
|
||||
// (for RustCall ABI only).
|
||||
let caller_args: Cow<'_, [OpTy<'tcx, M::PointerTag>]> =
|
||||
if caller_abi == Abi::RustCall && !args.is_empty() {
|
||||
// Untuple
|
||||
let (untuple_arg, args) = args.split_last().unwrap();
|
||||
trace!("eval_fn_call: Will pass last argument by untupling");
|
||||
Cow::from(
|
||||
args.iter()
|
||||
.map(|&a| Ok(a))
|
||||
.chain(
|
||||
(0..untuple_arg.layout.fields.count())
|
||||
.map(|i| self.operand_field(untuple_arg, i)),
|
||||
)
|
||||
.collect::<InterpResult<'_, Vec<OpTy<'tcx, M::PointerTag>>>>(
|
||||
)?,
|
||||
)
|
||||
} else {
|
||||
// Plain arg passing
|
||||
Cow::from(args)
|
||||
};
|
||||
// Skip ZSTs
|
||||
let mut caller_iter =
|
||||
caller_args.iter().filter(|op| !rust_abi || !op.layout.is_zst()).copied();
|
||||
|
||||
// Now we have to spread them out across the callee's locals,
|
||||
// taking into account the `spread_arg`. If we could write
|
||||
// this is a single iterator (that handles `spread_arg`), then
|
||||
// `pass_argument` would be the loop body. It takes care to
|
||||
// not advance `caller_iter` for ZSTs.
|
||||
for local in body.args_iter() {
|
||||
let dest = self.eval_place(mir::Place::from(local))?;
|
||||
if Some(local) == body.spread_arg {
|
||||
// Must be a tuple
|
||||
for i in 0..dest.layout.fields.count() {
|
||||
let dest = self.place_field(&dest, i)?;
|
||||
self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
|
||||
}
|
||||
} else {
|
||||
// Normal argument
|
||||
self.pass_argument(rust_abi, &mut caller_iter, &dest)?;
|
||||
}
|
||||
}
|
||||
// Now we should have no more caller args
|
||||
if caller_iter.next().is_some() {
|
||||
throw_ub_format!("calling a function with more arguments than it expected")
|
||||
}
|
||||
// Don't forget to check the return type!
|
||||
if let Some((caller_ret, _)) = ret {
|
||||
let callee_ret = self.eval_place(mir::Place::return_place())?;
|
||||
if !Self::check_argument_compat(
|
||||
rust_abi,
|
||||
caller_ret.layout,
|
||||
callee_ret.layout,
|
||||
) {
|
||||
throw_ub_format!(
|
||||
"calling a function with return type {:?} passing \
|
||||
return place of type {:?}",
|
||||
callee_ret.layout.ty,
|
||||
caller_ret.layout.ty
|
||||
)
|
||||
}
|
||||
} else {
|
||||
let local = mir::RETURN_PLACE;
|
||||
let callee_layout = self.layout_of_local(self.frame(), local, None)?;
|
||||
if !callee_layout.abi.is_uninhabited() {
|
||||
throw_ub_format!("calling a returning function without a return place")
|
||||
}
|
||||
}
|
||||
};
|
||||
match res {
|
||||
Err(err) => {
|
||||
self.stack_mut().pop();
|
||||
Err(err)
|
||||
}
|
||||
Ok(()) => Ok(()),
|
||||
}
|
||||
}
|
||||
// cannot use the shim here, because that will only result in infinite recursion
|
||||
ty::InstanceDef::Virtual(_, idx) => {
|
||||
let mut args = args.to_vec();
|
||||
// We have to implement all "object safe receivers". Currently we
|
||||
// support built-in pointers `(&, &mut, Box)` as well as unsized-self. We do
|
||||
// not yet support custom self types.
|
||||
// Also see `compiler/rustc_codegen_llvm/src/abi.rs` and `compiler/rustc_codegen_ssa/src/mir/block.rs`.
|
||||
let receiver_place = match args[0].layout.ty.builtin_deref(true) {
|
||||
Some(_) => {
|
||||
// Built-in pointer.
|
||||
self.deref_operand(&args[0])?
|
||||
}
|
||||
None => {
|
||||
// Unsized self.
|
||||
args[0].assert_mem_place()
|
||||
}
|
||||
};
|
||||
// Find and consult vtable
|
||||
let vtable = self.scalar_to_ptr(receiver_place.vtable());
|
||||
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
|
||||
|
||||
// `*mut receiver_place.layout.ty` is almost the layout that we
|
||||
// want for args[0]: We have to project to field 0 because we want
|
||||
// a thin pointer.
|
||||
assert!(receiver_place.layout.is_unsized());
|
||||
let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
|
||||
let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0);
|
||||
// Adjust receiver argument.
|
||||
args[0] = OpTy::from(ImmTy::from_immediate(
|
||||
Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
|
||||
this_receiver_ptr,
|
||||
));
|
||||
trace!("Patched self operand to {:#?}", args[0]);
|
||||
// recurse with concrete function
|
||||
self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn drop_in_place(
|
||||
&mut self,
|
||||
place: &PlaceTy<'tcx, M::PointerTag>,
|
||||
instance: ty::Instance<'tcx>,
|
||||
target: mir::BasicBlock,
|
||||
unwind: Option<mir::BasicBlock>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("drop_in_place: {:?},\n {:?}, {:?}", *place, place.layout.ty, instance);
|
||||
// We take the address of the object. This may well be unaligned, which is fine
|
||||
// for us here. However, unaligned accesses will probably make the actual drop
|
||||
// implementation fail -- a problem shared by rustc.
|
||||
let place = self.force_allocation(place)?;
|
||||
|
||||
let (instance, place) = match place.layout.ty.kind() {
|
||||
ty::Dynamic(..) => {
|
||||
// Dropping a trait object.
|
||||
self.unpack_dyn_trait(&place)?
|
||||
}
|
||||
_ => (instance, place),
|
||||
};
|
||||
|
||||
let arg = ImmTy::from_immediate(
|
||||
place.to_ref(self),
|
||||
self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
|
||||
);
|
||||
|
||||
let ty = self.tcx.mk_unit(); // return type is ()
|
||||
let dest = MPlaceTy::dangling(self.layout_of(ty)?);
|
||||
|
||||
self.eval_fn_call(
|
||||
FnVal::Instance(instance),
|
||||
Abi::Rust,
|
||||
&[arg.into()],
|
||||
Some((&dest.into(), target)),
|
||||
match unwind {
|
||||
Some(cleanup) => StackPopUnwind::Cleanup(cleanup),
|
||||
None => StackPopUnwind::Skip,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
142
compiler/rustc_const_eval/src/interpret/traits.rs
Normal file
142
compiler/rustc_const_eval/src/interpret/traits.rs
Normal file
|
@ -0,0 +1,142 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
|
||||
use rustc_middle::ty::{
|
||||
self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN,
|
||||
COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
|
||||
};
|
||||
use rustc_target::abi::{Align, Size};
|
||||
|
||||
use super::util::ensure_monomorphic_enough;
|
||||
use super::{FnVal, InterpCx, Machine};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
||||
/// objects.
|
||||
///
|
||||
/// The `trait_ref` encodes the erased self type. Hence, if we are
|
||||
/// making an object `Foo<Trait>` from a value of type `Foo<T>`, then
|
||||
/// `trait_ref` would map `T: Trait`.
|
||||
pub fn get_vtable(
|
||||
&mut self,
|
||||
ty: Ty<'tcx>,
|
||||
poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
trace!("get_vtable(trait_ref={:?})", poly_trait_ref);
|
||||
|
||||
let (ty, poly_trait_ref) = self.tcx.erase_regions((ty, poly_trait_ref));
|
||||
|
||||
// All vtables must be monomorphic, bail out otherwise.
|
||||
ensure_monomorphic_enough(*self.tcx, ty)?;
|
||||
ensure_monomorphic_enough(*self.tcx, poly_trait_ref)?;
|
||||
|
||||
let vtable_allocation = self.tcx.vtable_allocation(ty, poly_trait_ref);
|
||||
|
||||
let vtable_ptr = self.memory.global_base_pointer(Pointer::from(vtable_allocation))?;
|
||||
|
||||
Ok(vtable_ptr.into())
|
||||
}
|
||||
|
||||
/// Resolves the function at the specified slot in the provided
|
||||
/// vtable. Currently an index of '3' (`COMMON_VTABLE_ENTRIES.len()`)
|
||||
/// corresponds to the first method declared in the trait of the provided vtable.
|
||||
pub fn get_vtable_slot(
|
||||
&self,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
idx: u64,
|
||||
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
||||
let ptr_size = self.pointer_size();
|
||||
let vtable_slot = vtable.offset(ptr_size * idx, self)?;
|
||||
let vtable_slot = self
|
||||
.memory
|
||||
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
.expect("cannot be a ZST");
|
||||
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
|
||||
self.memory.get_fn(fn_ptr)
|
||||
}
|
||||
|
||||
/// Returns the drop fn instance as well as the actual dynamic type.
|
||||
pub fn read_drop_type_from_vtable(
|
||||
&self,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
|
||||
let pointer_size = self.pointer_size();
|
||||
// We don't care about the pointee type; we just want a pointer.
|
||||
let vtable = self
|
||||
.memory
|
||||
.get(
|
||||
vtable,
|
||||
pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES.len()).unwrap(),
|
||||
self.tcx.data_layout.pointer_align.abi,
|
||||
)?
|
||||
.expect("cannot be a ZST");
|
||||
let drop_fn = vtable
|
||||
.read_ptr_sized(
|
||||
pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_DROPINPLACE).unwrap(),
|
||||
)?
|
||||
.check_init()?;
|
||||
// We *need* an instance here, no other kind of function value, to be able
|
||||
// to determine the type.
|
||||
let drop_instance = self.memory.get_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
|
||||
trace!("Found drop fn: {:?}", drop_instance);
|
||||
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
|
||||
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
|
||||
// The drop function takes `*mut T` where `T` is the type being dropped, so get that.
|
||||
let args = fn_sig.inputs();
|
||||
if args.len() != 1 {
|
||||
throw_ub!(InvalidVtableDropFn(fn_sig));
|
||||
}
|
||||
let ty =
|
||||
args[0].builtin_deref(true).ok_or_else(|| err_ub!(InvalidVtableDropFn(fn_sig)))?.ty;
|
||||
Ok((drop_instance, ty))
|
||||
}
|
||||
|
||||
pub fn read_size_and_align_from_vtable(
|
||||
&self,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, (Size, Align)> {
|
||||
let pointer_size = self.pointer_size();
|
||||
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
|
||||
// the size, and the align (which we read below).
|
||||
let vtable = self
|
||||
.memory
|
||||
.get(
|
||||
vtable,
|
||||
pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES.len()).unwrap(),
|
||||
self.tcx.data_layout.pointer_align.abi,
|
||||
)?
|
||||
.expect("cannot be a ZST");
|
||||
let size = vtable
|
||||
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
|
||||
.check_init()?;
|
||||
let size = size.to_machine_usize(self)?;
|
||||
let align = vtable
|
||||
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
|
||||
.check_init()?;
|
||||
let align = align.to_machine_usize(self)?;
|
||||
let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
|
||||
|
||||
if size >= self.tcx.data_layout.obj_size_bound() {
|
||||
throw_ub!(InvalidVtableSize);
|
||||
}
|
||||
Ok((Size::from_bytes(size), align))
|
||||
}
|
||||
|
||||
pub fn read_new_vtable_after_trait_upcasting_from_vtable(
|
||||
&self,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
idx: u64,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
let pointer_size = self.pointer_size();
|
||||
|
||||
let vtable_slot = vtable.offset(pointer_size * idx, self)?;
|
||||
let new_vtable = self
|
||||
.memory
|
||||
.get(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
.expect("cannot be a ZST");
|
||||
|
||||
let new_vtable = self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?);
|
||||
|
||||
Ok(new_vtable)
|
||||
}
|
||||
}
|
84
compiler/rustc_const_eval/src/interpret/util.rs
Normal file
84
compiler/rustc_const_eval/src/interpret/util.rs
Normal file
|
@ -0,0 +1,84 @@
|
|||
use rustc_middle::mir::interpret::InterpResult;
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitor};
|
||||
use std::convert::TryInto;
|
||||
use std::ops::ControlFlow;
|
||||
|
||||
/// Returns `true` if a used generic parameter requires substitution.
|
||||
crate fn ensure_monomorphic_enough<'tcx, T>(tcx: TyCtxt<'tcx>, ty: T) -> InterpResult<'tcx>
|
||||
where
|
||||
T: TypeFoldable<'tcx>,
|
||||
{
|
||||
debug!("ensure_monomorphic_enough: ty={:?}", ty);
|
||||
if !ty.potentially_needs_subst() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
struct FoundParam;
|
||||
struct UsedParamsNeedSubstVisitor<'tcx> {
|
||||
tcx: TyCtxt<'tcx>,
|
||||
}
|
||||
|
||||
impl<'tcx> TypeVisitor<'tcx> for UsedParamsNeedSubstVisitor<'tcx> {
|
||||
type BreakTy = FoundParam;
|
||||
|
||||
fn tcx_for_anon_const_substs(&self) -> Option<TyCtxt<'tcx>> {
|
||||
Some(self.tcx)
|
||||
}
|
||||
|
||||
fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
|
||||
if !ty.potentially_needs_subst() {
|
||||
return ControlFlow::CONTINUE;
|
||||
}
|
||||
|
||||
match *ty.kind() {
|
||||
ty::Param(_) => ControlFlow::Break(FoundParam),
|
||||
ty::Closure(def_id, substs)
|
||||
| ty::Generator(def_id, substs, ..)
|
||||
| ty::FnDef(def_id, substs) => {
|
||||
let unused_params = self.tcx.unused_generic_params(def_id);
|
||||
for (index, subst) in substs.into_iter().enumerate() {
|
||||
let index = index
|
||||
.try_into()
|
||||
.expect("more generic parameters than can fit into a `u32`");
|
||||
let is_used = unused_params.contains(index).map_or(true, |unused| !unused);
|
||||
// Only recurse when generic parameters in fns, closures and generators
|
||||
// are used and require substitution.
|
||||
match (is_used, subst.definitely_needs_subst(self.tcx)) {
|
||||
// Just in case there are closures or generators within this subst,
|
||||
// recurse.
|
||||
(true, true) => return subst.super_visit_with(self),
|
||||
// Confirm that polymorphization replaced the parameter with
|
||||
// `ty::Param`/`ty::ConstKind::Param`.
|
||||
(false, true) if cfg!(debug_assertions) => match subst.unpack() {
|
||||
ty::subst::GenericArgKind::Type(ty) => {
|
||||
assert!(matches!(ty.kind(), ty::Param(_)))
|
||||
}
|
||||
ty::subst::GenericArgKind::Const(ct) => {
|
||||
assert!(matches!(ct.val, ty::ConstKind::Param(_)))
|
||||
}
|
||||
ty::subst::GenericArgKind::Lifetime(..) => (),
|
||||
},
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
ControlFlow::CONTINUE
|
||||
}
|
||||
_ => ty.super_visit_with(self),
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_const(&mut self, c: &'tcx ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
|
||||
match c.val {
|
||||
ty::ConstKind::Param(..) => ControlFlow::Break(FoundParam),
|
||||
_ => c.super_visit_with(self),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut vis = UsedParamsNeedSubstVisitor { tcx };
|
||||
if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
|
||||
throw_inval!(TooGeneric);
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
965
compiler/rustc_const_eval/src/interpret/validity.rs
Normal file
965
compiler/rustc_const_eval/src/interpret/validity.rs
Normal file
|
@ -0,0 +1,965 @@
|
|||
//! Check the validity invariant of a given value, and tell the user
|
||||
//! where in the value it got violated.
|
||||
//! In const context, this goes even further and tries to approximate const safety.
|
||||
//! That's useful because it means other passes (e.g. promotion) can rely on `const`s
|
||||
//! to be const-safe.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Write;
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir as hir;
|
||||
use rustc_middle::mir::interpret::InterpError;
|
||||
use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
use rustc_span::symbol::{sym, Symbol};
|
||||
use rustc_target::abi::{Abi, Scalar as ScalarAbi, Size, VariantIdx, Variants, WrappingRange};
|
||||
|
||||
use std::hash::Hash;
|
||||
|
||||
use super::{
|
||||
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
|
||||
MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
|
||||
};
|
||||
|
||||
macro_rules! throw_validation_failure {
|
||||
($where:expr, { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )?) => {{
|
||||
let mut msg = String::new();
|
||||
msg.push_str("encountered ");
|
||||
write!(&mut msg, $($what_fmt),+).unwrap();
|
||||
$(
|
||||
msg.push_str(", but expected ");
|
||||
write!(&mut msg, $($expected_fmt),+).unwrap();
|
||||
)?
|
||||
let path = rustc_middle::ty::print::with_no_trimmed_paths(|| {
|
||||
let where_ = &$where;
|
||||
if !where_.is_empty() {
|
||||
let mut path = String::new();
|
||||
write_path(&mut path, where_);
|
||||
Some(path)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
});
|
||||
throw_ub!(ValidationFailure { path, msg })
|
||||
}};
|
||||
}
|
||||
|
||||
/// If $e throws an error matching the pattern, throw a validation failure.
|
||||
/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
|
||||
/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
|
||||
/// This lets you use the patterns as a kind of validation list, asserting which errors
|
||||
/// can possibly happen:
|
||||
///
|
||||
/// ```
|
||||
/// let v = try_validation!(some_fn(), some_path, {
|
||||
/// Foo | Bar | Baz => { "some failure" },
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// An additional expected parameter can also be added to the failure message:
|
||||
///
|
||||
/// ```
|
||||
/// let v = try_validation!(some_fn(), some_path, {
|
||||
/// Foo | Bar | Baz => { "some failure" } expected { "something that wasn't a failure" },
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
/// An additional nicety is that both parameters actually take format args, so you can just write
|
||||
/// the format string in directly:
|
||||
///
|
||||
/// ```
|
||||
/// let v = try_validation!(some_fn(), some_path, {
|
||||
/// Foo | Bar | Baz => { "{:?}", some_failure } expected { "{}", expected_value },
|
||||
/// });
|
||||
/// ```
|
||||
///
|
||||
macro_rules! try_validation {
|
||||
($e:expr, $where:expr,
|
||||
$( $( $p:pat )|+ => { $( $what_fmt:expr ),+ } $( expected { $( $expected_fmt:expr ),+ } )? ),+ $(,)?
|
||||
) => {{
|
||||
match $e {
|
||||
Ok(x) => x,
|
||||
// We catch the error and turn it into a validation failure. We are okay with
|
||||
// allocation here as this can only slow down builds that fail anyway.
|
||||
Err(e) => match e.kind() {
|
||||
$(
|
||||
$($p)|+ =>
|
||||
throw_validation_failure!(
|
||||
$where,
|
||||
{ $( $what_fmt ),+ } $( expected { $( $expected_fmt ),+ } )?
|
||||
)
|
||||
),+,
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => Err::<!, _>(e)?,
|
||||
}
|
||||
}
|
||||
}};
|
||||
}
|
||||
|
||||
/// We want to show a nice path to the invalid field for diagnostics,
|
||||
/// but avoid string operations in the happy case where no error happens.
|
||||
/// So we track a `Vec<PathElem>` where `PathElem` contains all the data we
|
||||
/// need to later print something for the user.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub enum PathElem {
|
||||
Field(Symbol),
|
||||
Variant(Symbol),
|
||||
GeneratorState(VariantIdx),
|
||||
CapturedVar(Symbol),
|
||||
ArrayElem(usize),
|
||||
TupleElem(usize),
|
||||
Deref,
|
||||
EnumTag,
|
||||
GeneratorTag,
|
||||
DynDowncast,
|
||||
}
|
||||
|
||||
/// Extra things to check for during validation of CTFE results.
|
||||
pub enum CtfeValidationMode {
|
||||
/// Regular validation, nothing special happening.
|
||||
Regular,
|
||||
/// Validation of a `const`.
|
||||
/// `inner` says if this is an inner, indirect allocation (as opposed to the top-level const
|
||||
/// allocation). Being an inner allocation makes a difference because the top-level allocation
|
||||
/// of a `const` is copied for each use, but the inner allocations are implicitly shared.
|
||||
/// `allow_static_ptrs` says if pointers to statics are permitted (which is the case for promoteds in statics).
|
||||
Const { inner: bool, allow_static_ptrs: bool },
|
||||
}
|
||||
|
||||
/// State for tracking recursive validation of references
|
||||
pub struct RefTracking<T, PATH = ()> {
|
||||
pub seen: FxHashSet<T>,
|
||||
pub todo: Vec<(T, PATH)>,
|
||||
}
|
||||
|
||||
impl<T: Copy + Eq + Hash + std::fmt::Debug, PATH: Default> RefTracking<T, PATH> {
|
||||
pub fn empty() -> Self {
|
||||
RefTracking { seen: FxHashSet::default(), todo: vec![] }
|
||||
}
|
||||
pub fn new(op: T) -> Self {
|
||||
let mut ref_tracking_for_consts =
|
||||
RefTracking { seen: FxHashSet::default(), todo: vec![(op, PATH::default())] };
|
||||
ref_tracking_for_consts.seen.insert(op);
|
||||
ref_tracking_for_consts
|
||||
}
|
||||
|
||||
pub fn track(&mut self, op: T, path: impl FnOnce() -> PATH) {
|
||||
if self.seen.insert(op) {
|
||||
trace!("Recursing below ptr {:#?}", op);
|
||||
let path = path();
|
||||
// Remember to come back to this later.
|
||||
self.todo.push((op, path));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Format a path
|
||||
fn write_path(out: &mut String, path: &[PathElem]) {
|
||||
use self::PathElem::*;
|
||||
|
||||
for elem in path.iter() {
|
||||
match elem {
|
||||
Field(name) => write!(out, ".{}", name),
|
||||
EnumTag => write!(out, ".<enum-tag>"),
|
||||
Variant(name) => write!(out, ".<enum-variant({})>", name),
|
||||
GeneratorTag => write!(out, ".<generator-tag>"),
|
||||
GeneratorState(idx) => write!(out, ".<generator-state({})>", idx.index()),
|
||||
CapturedVar(name) => write!(out, ".<captured-var({})>", name),
|
||||
TupleElem(idx) => write!(out, ".{}", idx),
|
||||
ArrayElem(idx) => write!(out, "[{}]", idx),
|
||||
// `.<deref>` does not match Rust syntax, but it is more readable for long paths -- and
|
||||
// some of the other items here also are not Rust syntax. Actually we can't
|
||||
// even use the usual syntax because we are just showing the projections,
|
||||
// not the root.
|
||||
Deref => write!(out, ".<deref>"),
|
||||
DynDowncast => write!(out, ".<dyn-downcast>"),
|
||||
}
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// Formats such that a sentence like "expected something {}" to mean
|
||||
// "expected something <in the given range>" makes sense.
|
||||
fn wrapping_range_format(r: WrappingRange, max_hi: u128) -> String {
|
||||
let WrappingRange { start: lo, end: hi } = r;
|
||||
assert!(hi <= max_hi);
|
||||
if lo > hi {
|
||||
format!("less or equal to {}, or greater or equal to {}", hi, lo)
|
||||
} else if lo == hi {
|
||||
format!("equal to {}", lo)
|
||||
} else if lo == 0 {
|
||||
assert!(hi < max_hi, "should not be printing if the range covers everything");
|
||||
format!("less or equal to {}", hi)
|
||||
} else if hi == max_hi {
|
||||
assert!(lo > 0, "should not be printing if the range covers everything");
|
||||
format!("greater or equal to {}", lo)
|
||||
} else {
|
||||
format!("in the range {:?}", r)
|
||||
}
|
||||
}
|
||||
|
||||
struct ValidityVisitor<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
|
||||
/// The `path` may be pushed to, but the part that is present when a function
|
||||
/// starts must not be changed! `visit_fields` and `visit_array` rely on
|
||||
/// this stack discipline.
|
||||
path: Vec<PathElem>,
|
||||
ref_tracking: Option<&'rt mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
|
||||
/// `None` indicates this is not validating for CTFE (but for runtime).
|
||||
ctfe_mode: Option<CtfeValidationMode>,
|
||||
ecx: &'rt InterpCx<'mir, 'tcx, M>,
|
||||
}
|
||||
|
||||
impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, 'tcx, M> {
|
||||
fn aggregate_field_path_elem(&mut self, layout: TyAndLayout<'tcx>, field: usize) -> PathElem {
|
||||
// First, check if we are projecting to a variant.
|
||||
match layout.variants {
|
||||
Variants::Multiple { tag_field, .. } => {
|
||||
if tag_field == field {
|
||||
return match layout.ty.kind() {
|
||||
ty::Adt(def, ..) if def.is_enum() => PathElem::EnumTag,
|
||||
ty::Generator(..) => PathElem::GeneratorTag,
|
||||
_ => bug!("non-variant type {:?}", layout.ty),
|
||||
};
|
||||
}
|
||||
}
|
||||
Variants::Single { .. } => {}
|
||||
}
|
||||
|
||||
// Now we know we are projecting to a field, so figure out which one.
|
||||
match layout.ty.kind() {
|
||||
// generators and closures.
|
||||
ty::Closure(def_id, _) | ty::Generator(def_id, _, _) => {
|
||||
let mut name = None;
|
||||
// FIXME this should be more descriptive i.e. CapturePlace instead of CapturedVar
|
||||
// https://github.com/rust-lang/project-rfc-2229/issues/46
|
||||
if let Some(local_def_id) = def_id.as_local() {
|
||||
let tables = self.ecx.tcx.typeck(local_def_id);
|
||||
if let Some(captured_place) =
|
||||
tables.closure_min_captures_flattened(*def_id).nth(field)
|
||||
{
|
||||
// Sometimes the index is beyond the number of upvars (seen
|
||||
// for a generator).
|
||||
let var_hir_id = captured_place.get_root_variable();
|
||||
let node = self.ecx.tcx.hir().get(var_hir_id);
|
||||
if let hir::Node::Binding(pat) = node {
|
||||
if let hir::PatKind::Binding(_, _, ident, _) = pat.kind {
|
||||
name = Some(ident.name);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PathElem::CapturedVar(name.unwrap_or_else(|| {
|
||||
// Fall back to showing the field index.
|
||||
sym::integer(field)
|
||||
}))
|
||||
}
|
||||
|
||||
// tuples
|
||||
ty::Tuple(_) => PathElem::TupleElem(field),
|
||||
|
||||
// enums
|
||||
ty::Adt(def, ..) if def.is_enum() => {
|
||||
// we might be projecting *to* a variant, or to a field *in* a variant.
|
||||
match layout.variants {
|
||||
Variants::Single { index } => {
|
||||
// Inside a variant
|
||||
PathElem::Field(def.variants[index].fields[field].ident.name)
|
||||
}
|
||||
Variants::Multiple { .. } => bug!("we handled variants above"),
|
||||
}
|
||||
}
|
||||
|
||||
// other ADTs
|
||||
ty::Adt(def, _) => PathElem::Field(def.non_enum_variant().fields[field].ident.name),
|
||||
|
||||
// arrays/slices
|
||||
ty::Array(..) | ty::Slice(..) => PathElem::ArrayElem(field),
|
||||
|
||||
// dyn traits
|
||||
ty::Dynamic(..) => PathElem::DynDowncast,
|
||||
|
||||
// nothing else has an aggregate layout
|
||||
_ => bug!("aggregate_field_path_elem: got non-aggregate type {:?}", layout.ty),
|
||||
}
|
||||
}
|
||||
|
||||
fn with_elem<R>(
|
||||
&mut self,
|
||||
elem: PathElem,
|
||||
f: impl FnOnce(&mut Self) -> InterpResult<'tcx, R>,
|
||||
) -> InterpResult<'tcx, R> {
|
||||
// Remember the old state
|
||||
let path_len = self.path.len();
|
||||
// Record new element
|
||||
self.path.push(elem);
|
||||
// Perform operation
|
||||
let r = f(self)?;
|
||||
// Undo changes
|
||||
self.path.truncate(path_len);
|
||||
// Done
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
fn check_wide_ptr_meta(
|
||||
&mut self,
|
||||
meta: MemPlaceMeta<M::PointerTag>,
|
||||
pointee: TyAndLayout<'tcx>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
|
||||
match tail.kind() {
|
||||
ty::Dynamic(..) => {
|
||||
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
|
||||
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
|
||||
try_validation!(
|
||||
self.ecx.memory.check_ptr_access_align(
|
||||
vtable,
|
||||
3 * self.ecx.tcx.data_layout.pointer_size, // drop, size, align
|
||||
self.ecx.tcx.data_layout.pointer_align.abi,
|
||||
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
|
||||
),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(PointerUseAfterFree(..)) =>
|
||||
{ "dangling vtable pointer in wide pointer" },
|
||||
err_ub!(AlignmentCheckFailed { .. }) =>
|
||||
{ "unaligned vtable pointer in wide pointer" },
|
||||
err_ub!(PointerOutOfBounds { .. }) =>
|
||||
{ "too small vtable" },
|
||||
);
|
||||
try_validation!(
|
||||
self.ecx.read_drop_type_from_vtable(vtable),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(InvalidFunctionPointer(..)) =>
|
||||
{ "invalid drop function pointer in vtable (not pointing to a function)" },
|
||||
err_ub!(InvalidVtableDropFn(..)) =>
|
||||
{ "invalid drop function pointer in vtable (function has incompatible signature)" },
|
||||
);
|
||||
try_validation!(
|
||||
self.ecx.read_size_and_align_from_vtable(vtable),
|
||||
self.path,
|
||||
err_ub!(InvalidVtableSize) =>
|
||||
{ "invalid vtable: size is bigger than largest supported object" },
|
||||
err_ub!(InvalidVtableAlignment(msg)) =>
|
||||
{ "invalid vtable: alignment {}", msg },
|
||||
err_unsup!(ReadPointerAsBytes) => { "invalid size or align in vtable" },
|
||||
);
|
||||
// FIXME: More checks for the vtable.
|
||||
}
|
||||
ty::Slice(..) | ty::Str => {
|
||||
let _len = try_validation!(
|
||||
meta.unwrap_meta().to_machine_usize(self.ecx),
|
||||
self.path,
|
||||
err_unsup!(ReadPointerAsBytes) => { "non-integer slice length in wide pointer" },
|
||||
);
|
||||
// We do not check that `len * elem_size <= isize::MAX`:
|
||||
// that is only required for references, and there it falls out of the
|
||||
// "dereferenceable" check performed by Stacked Borrows.
|
||||
}
|
||||
ty::Foreign(..) => {
|
||||
// Unsized, but not wide.
|
||||
}
|
||||
_ => bug!("Unexpected unsized type tail: {:?}", tail),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check a reference or `Box`.
|
||||
fn check_safe_pointer(
|
||||
&mut self,
|
||||
value: &OpTy<'tcx, M::PointerTag>,
|
||||
kind: &str,
|
||||
) -> InterpResult<'tcx> {
|
||||
let value = try_validation!(
|
||||
self.ecx.read_immediate(value),
|
||||
self.path,
|
||||
err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
|
||||
);
|
||||
// Handle wide pointers.
|
||||
// Check metadata early, for better diagnostics
|
||||
let place = try_validation!(
|
||||
self.ecx.ref_to_mplace(&value),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(None)) => { "uninitialized {}", kind },
|
||||
);
|
||||
if place.layout.is_unsized() {
|
||||
self.check_wide_ptr_meta(place.meta, place.layout)?;
|
||||
}
|
||||
// Make sure this is dereferenceable and all.
|
||||
let size_and_align = try_validation!(
|
||||
self.ecx.size_and_align_of_mplace(&place),
|
||||
self.path,
|
||||
err_ub!(InvalidMeta(msg)) => { "invalid {} metadata: {}", kind, msg },
|
||||
);
|
||||
let (size, align) = size_and_align
|
||||
// for the purpose of validity, consider foreign types to have
|
||||
// alignment and size determined by the layout (size will be 0,
|
||||
// alignment should take attributes into account).
|
||||
.unwrap_or_else(|| (place.layout.size, place.layout.align.abi));
|
||||
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
|
||||
try_validation!(
|
||||
self.ecx.memory.check_ptr_access_align(
|
||||
place.ptr,
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::InboundsTest, // will anyway be replaced by validity message
|
||||
),
|
||||
self.path,
|
||||
err_ub!(AlignmentCheckFailed { required, has }) =>
|
||||
{
|
||||
"an unaligned {} (required {} byte alignment but found {})",
|
||||
kind,
|
||||
required.bytes(),
|
||||
has.bytes()
|
||||
},
|
||||
err_ub!(DanglingIntPointer(0, _)) =>
|
||||
{ "a null {}", kind },
|
||||
err_ub!(DanglingIntPointer(i, _)) =>
|
||||
{ "a dangling {} (address 0x{:x} is unallocated)", kind, i },
|
||||
err_ub!(PointerOutOfBounds { .. }) =>
|
||||
{ "a dangling {} (going beyond the bounds of its allocation)", kind },
|
||||
// This cannot happen during const-eval (because interning already detects
|
||||
// dangling pointers), but it can happen in Miri.
|
||||
err_ub!(PointerUseAfterFree(..)) =>
|
||||
{ "a dangling {} (use-after-free)", kind },
|
||||
);
|
||||
// Recursive checking
|
||||
if let Some(ref mut ref_tracking) = self.ref_tracking {
|
||||
// Proceed recursively even for ZST, no reason to skip them!
|
||||
// `!` is a ZST and we want to validate it.
|
||||
// Skip validation entirely for some external statics
|
||||
if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
|
||||
// not a ZST
|
||||
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
|
||||
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
|
||||
assert!(!self.ecx.tcx.is_thread_local_static(did));
|
||||
assert!(self.ecx.tcx.is_static(did));
|
||||
if matches!(
|
||||
self.ctfe_mode,
|
||||
Some(CtfeValidationMode::Const { allow_static_ptrs: false, .. })
|
||||
) {
|
||||
// See const_eval::machine::MemoryExtra::can_access_statics for why
|
||||
// this check is so important.
|
||||
// This check is reachable when the const just referenced the static,
|
||||
// but never read it (so we never entered `before_access_global`).
|
||||
throw_validation_failure!(self.path,
|
||||
{ "a {} pointing to a static variable", kind }
|
||||
);
|
||||
}
|
||||
// We skip checking other statics. These statics must be sound by
|
||||
// themselves, and the only way to get broken statics here is by using
|
||||
// unsafe code.
|
||||
// The reasons we don't check other statics is twofold. For one, in all
|
||||
// sound cases, the static was already validated on its own, and second, we
|
||||
// trigger cycle errors if we try to compute the value of the other static
|
||||
// and that static refers back to us.
|
||||
// We might miss const-invalid data,
|
||||
// but things are still sound otherwise (in particular re: consts
|
||||
// referring to statics).
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
let path = &self.path;
|
||||
ref_tracking.track(place, || {
|
||||
// We need to clone the path anyway, make sure it gets created
|
||||
// with enough space for the additional `Deref`.
|
||||
let mut new_path = Vec::with_capacity(path.len() + 1);
|
||||
new_path.clone_from(path);
|
||||
new_path.push(PathElem::Deref);
|
||||
new_path
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn read_scalar(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<M::PointerTag>> {
|
||||
Ok(try_validation!(
|
||||
self.ecx.read_scalar(op),
|
||||
self.path,
|
||||
err_unsup!(ReadPointerAsBytes) => { "(potentially part of) a pointer" } expected { "plain (non-pointer) bytes" },
|
||||
))
|
||||
}
|
||||
|
||||
/// Check if this is a value of primitive type, and if yes check the validity of the value
|
||||
/// at that type. Return `true` if the type is indeed primitive.
|
||||
fn try_visit_primitive(
|
||||
&mut self,
|
||||
value: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, bool> {
|
||||
// Go over all the primitive types
|
||||
let ty = value.layout.ty;
|
||||
match ty.kind() {
|
||||
ty::Bool => {
|
||||
let value = self.read_scalar(value)?;
|
||||
try_validation!(
|
||||
value.to_bool(),
|
||||
self.path,
|
||||
err_ub!(InvalidBool(..)) | err_ub!(InvalidUninitBytes(None)) =>
|
||||
{ "{}", value } expected { "a boolean" },
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
ty::Char => {
|
||||
let value = self.read_scalar(value)?;
|
||||
try_validation!(
|
||||
value.to_char(),
|
||||
self.path,
|
||||
err_ub!(InvalidChar(..)) | err_ub!(InvalidUninitBytes(None)) =>
|
||||
{ "{}", value } expected { "a valid unicode scalar value (in `0..=0x10FFFF` but not in `0xD800..=0xDFFF`)" },
|
||||
);
|
||||
Ok(true)
|
||||
}
|
||||
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
|
||||
let value = self.read_scalar(value)?;
|
||||
// NOTE: Keep this in sync with the array optimization for int/float
|
||||
// types below!
|
||||
if self.ctfe_mode.is_some() {
|
||||
// Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
|
||||
let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
|
||||
if !is_bits {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "{}", value } expected { "initialized plain (non-pointer) bytes" }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
// At run-time, for now, we accept *anything* for these types, including
|
||||
// uninit. We should fix that, but let's start low.
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
ty::RawPtr(..) => {
|
||||
// We are conservative with uninit for integers, but try to
|
||||
// actually enforce the strict rules for raw pointers (mostly because
|
||||
// that lets us re-use `ref_to_mplace`).
|
||||
let place = try_validation!(
|
||||
self.ecx.read_immediate(value).and_then(|ref i| self.ecx.ref_to_mplace(i)),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(None)) => { "uninitialized raw pointer" },
|
||||
err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
|
||||
);
|
||||
if place.layout.is_unsized() {
|
||||
self.check_wide_ptr_meta(place.meta, place.layout)?;
|
||||
}
|
||||
Ok(true)
|
||||
}
|
||||
ty::Ref(_, ty, mutbl) => {
|
||||
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { .. }))
|
||||
&& *mutbl == hir::Mutability::Mut
|
||||
{
|
||||
// A mutable reference inside a const? That does not seem right (except if it is
|
||||
// a ZST).
|
||||
let layout = self.ecx.layout_of(ty)?;
|
||||
if !layout.is_zst() {
|
||||
throw_validation_failure!(self.path, { "mutable reference in a `const`" });
|
||||
}
|
||||
}
|
||||
self.check_safe_pointer(value, "reference")?;
|
||||
Ok(true)
|
||||
}
|
||||
ty::Adt(def, ..) if def.is_box() => {
|
||||
self.check_safe_pointer(value, "box")?;
|
||||
Ok(true)
|
||||
}
|
||||
ty::FnPtr(_sig) => {
|
||||
let value = try_validation!(
|
||||
self.ecx.read_immediate(value),
|
||||
self.path,
|
||||
err_unsup!(ReadPointerAsBytes) => { "part of a pointer" } expected { "a proper pointer or integer value" },
|
||||
);
|
||||
// Make sure we print a `ScalarMaybeUninit` (and not an `ImmTy`) in the error
|
||||
// message below.
|
||||
let value = value.to_scalar_or_uninit();
|
||||
let _fn = try_validation!(
|
||||
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(self.ecx.scalar_to_ptr(ptr))),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(InvalidFunctionPointer(..)) |
|
||||
err_ub!(InvalidUninitBytes(None)) =>
|
||||
{ "{}", value } expected { "a function pointer" },
|
||||
);
|
||||
// FIXME: Check if the signature matches
|
||||
Ok(true)
|
||||
}
|
||||
ty::Never => throw_validation_failure!(self.path, { "a value of the never type `!`" }),
|
||||
ty::Foreign(..) | ty::FnDef(..) => {
|
||||
// Nothing to check.
|
||||
Ok(true)
|
||||
}
|
||||
// The above should be all the primitive types. The rest is compound, we
|
||||
// check them by visiting their fields/variants.
|
||||
ty::Adt(..)
|
||||
| ty::Tuple(..)
|
||||
| ty::Array(..)
|
||||
| ty::Slice(..)
|
||||
| ty::Str
|
||||
| ty::Dynamic(..)
|
||||
| ty::Closure(..)
|
||||
| ty::Generator(..) => Ok(false),
|
||||
// Some types only occur during typechecking, they have no layout.
|
||||
// We should not see them here and we could not check them anyway.
|
||||
ty::Error(_)
|
||||
| ty::Infer(..)
|
||||
| ty::Placeholder(..)
|
||||
| ty::Bound(..)
|
||||
| ty::Param(..)
|
||||
| ty::Opaque(..)
|
||||
| ty::Projection(..)
|
||||
| ty::GeneratorWitness(..) => bug!("Encountered invalid type {:?}", ty),
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_scalar(
|
||||
&mut self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
scalar_layout: &ScalarAbi,
|
||||
) -> InterpResult<'tcx> {
|
||||
let value = self.read_scalar(op)?;
|
||||
let valid_range = scalar_layout.valid_range.clone();
|
||||
let WrappingRange { start: lo, end: hi } = valid_range;
|
||||
// Determine the allowed range
|
||||
// `max_hi` is as big as the size fits
|
||||
let max_hi = u128::MAX >> (128 - op.layout.size.bits());
|
||||
assert!(hi <= max_hi);
|
||||
// We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
|
||||
if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
|
||||
// Nothing to check
|
||||
return Ok(());
|
||||
}
|
||||
// At least one value is excluded. Get the bits.
|
||||
let value = try_validation!(
|
||||
value.check_init(),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(None)) => { "{}", value }
|
||||
expected { "something {}", wrapping_range_format(valid_range, max_hi) },
|
||||
);
|
||||
let bits = match value.try_to_int() {
|
||||
Err(_) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
let ptr = self.ecx.scalar_to_ptr(value);
|
||||
if lo == 1 && hi == max_hi {
|
||||
// Only null is the niche. So make sure the ptr is NOT null.
|
||||
if self.ecx.memory.ptr_may_be_null(ptr) {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "a potentially null pointer" }
|
||||
expected {
|
||||
"something that cannot possibly fail to be {}",
|
||||
wrapping_range_format(valid_range, max_hi)
|
||||
}
|
||||
)
|
||||
}
|
||||
return Ok(());
|
||||
} else {
|
||||
// Conservatively, we reject, because the pointer *could* have a bad
|
||||
// value.
|
||||
throw_validation_failure!(self.path,
|
||||
{ "a pointer" }
|
||||
expected {
|
||||
"something that cannot possibly fail to be {}",
|
||||
wrapping_range_format(valid_range, max_hi)
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
Ok(int) => int.assert_bits(op.layout.size),
|
||||
};
|
||||
// Now compare. This is slightly subtle because this is a special "wrap-around" range.
|
||||
if valid_range.contains(bits) {
|
||||
Ok(())
|
||||
} else {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "{}", bits }
|
||||
expected { "something {}", wrapping_range_format(valid_range, max_hi) }
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
||||
for ValidityVisitor<'rt, 'mir, 'tcx, M>
|
||||
{
|
||||
type V = OpTy<'tcx, M::PointerTag>;
|
||||
|
||||
#[inline(always)]
|
||||
fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> {
|
||||
&self.ecx
|
||||
}
|
||||
|
||||
fn read_discriminant(
|
||||
&mut self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
self.with_elem(PathElem::EnumTag, move |this| {
|
||||
Ok(try_validation!(
|
||||
this.ecx.read_discriminant(op),
|
||||
this.path,
|
||||
err_ub!(InvalidTag(val)) =>
|
||||
{ "{}", val } expected { "a valid enum tag" },
|
||||
err_ub!(InvalidUninitBytes(None)) =>
|
||||
{ "uninitialized bytes" } expected { "a valid enum tag" },
|
||||
err_unsup!(ReadPointerAsBytes) =>
|
||||
{ "a pointer" } expected { "a valid enum tag" },
|
||||
)
|
||||
.1)
|
||||
})
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_field(
|
||||
&mut self,
|
||||
old_op: &OpTy<'tcx, M::PointerTag>,
|
||||
field: usize,
|
||||
new_op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let elem = self.aggregate_field_path_elem(old_op.layout, field);
|
||||
self.with_elem(elem, move |this| this.visit_value(new_op))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_variant(
|
||||
&mut self,
|
||||
old_op: &OpTy<'tcx, M::PointerTag>,
|
||||
variant_id: VariantIdx,
|
||||
new_op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let name = match old_op.layout.ty.kind() {
|
||||
ty::Adt(adt, _) => PathElem::Variant(adt.variants[variant_id].ident.name),
|
||||
// Generators also have variants
|
||||
ty::Generator(..) => PathElem::GeneratorState(variant_id),
|
||||
_ => bug!("Unexpected type with variant: {:?}", old_op.layout.ty),
|
||||
};
|
||||
self.with_elem(name, move |this| this.visit_value(new_op))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn visit_union(
|
||||
&mut self,
|
||||
_op: &OpTy<'tcx, M::PointerTag>,
|
||||
_fields: NonZeroUsize,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn visit_value(&mut self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
trace!("visit_value: {:?}, {:?}", *op, op.layout);
|
||||
|
||||
// Check primitive types -- the leafs of our recursive descend.
|
||||
if self.try_visit_primitive(op)? {
|
||||
return Ok(());
|
||||
}
|
||||
// Sanity check: `builtin_deref` does not know any pointers that are not primitive.
|
||||
assert!(op.layout.ty.builtin_deref(true).is_none());
|
||||
|
||||
// Special check preventing `UnsafeCell` in the inner part of constants
|
||||
if let Some(def) = op.layout.ty.ty_adt_def() {
|
||||
if matches!(self.ctfe_mode, Some(CtfeValidationMode::Const { inner: true, .. }))
|
||||
&& Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type()
|
||||
{
|
||||
throw_validation_failure!(self.path, { "`UnsafeCell` in a `const`" });
|
||||
}
|
||||
}
|
||||
|
||||
// Recursively walk the value at its type.
|
||||
self.walk_value(op)?;
|
||||
|
||||
// *After* all of this, check the ABI. We need to check the ABI to handle
|
||||
// types like `NonNull` where the `Scalar` info is more restrictive than what
|
||||
// the fields say (`rustc_layout_scalar_valid_range_start`).
|
||||
// But in most cases, this will just propagate what the fields say,
|
||||
// and then we want the error to point at the field -- so, first recurse,
|
||||
// then check ABI.
|
||||
//
|
||||
// FIXME: We could avoid some redundant checks here. For newtypes wrapping
|
||||
// scalars, we do the same check on every "level" (e.g., first we check
|
||||
// MyNewtype and then the scalar in there).
|
||||
match op.layout.abi {
|
||||
Abi::Uninhabited => {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "a value of uninhabited type {:?}", op.layout.ty }
|
||||
);
|
||||
}
|
||||
Abi::Scalar(ref scalar_layout) => {
|
||||
self.visit_scalar(op, scalar_layout)?;
|
||||
}
|
||||
Abi::ScalarPair { .. } | Abi::Vector { .. } => {
|
||||
// These have fields that we already visited above, so we already checked
|
||||
// all their scalar-level restrictions.
|
||||
// There is also no equivalent to `rustc_layout_scalar_valid_range_start`
|
||||
// that would make skipping them here an issue.
|
||||
}
|
||||
Abi::Aggregate { .. } => {
|
||||
// Nothing to do.
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
match op.layout.ty.kind() {
|
||||
ty::Str => {
|
||||
let mplace = op.assert_mem_place(); // strings are never immediate
|
||||
let len = mplace.len(self.ecx)?;
|
||||
try_validation!(
|
||||
self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
|
||||
self.path,
|
||||
err_ub!(InvalidUninitBytes(..)) => { "uninitialized data in `str`" },
|
||||
err_unsup!(ReadPointerAsBytes) => { "a pointer in `str`" },
|
||||
);
|
||||
}
|
||||
ty::Array(tys, ..) | ty::Slice(tys)
|
||||
// This optimization applies for types that can hold arbitrary bytes (such as
|
||||
// integer and floating point types) or for structs or tuples with no fields.
|
||||
// FIXME(wesleywiser) This logic could be extended further to arbitrary structs
|
||||
// or tuples made up of integer/floating point types or inhabited ZSTs with no
|
||||
// padding.
|
||||
if matches!(tys.kind(), ty::Int(..) | ty::Uint(..) | ty::Float(..))
|
||||
=>
|
||||
{
|
||||
// Optimized handling for arrays of integer/float type.
|
||||
|
||||
// Arrays cannot be immediate, slices are never immediate.
|
||||
let mplace = op.assert_mem_place();
|
||||
// This is the length of the array/slice.
|
||||
let len = mplace.len(self.ecx)?;
|
||||
// This is the element type size.
|
||||
let layout = self.ecx.layout_of(tys)?;
|
||||
// This is the size in bytes of the whole array. (This checks for overflow.)
|
||||
let size = layout.size * len;
|
||||
|
||||
// Optimization: we just check the entire range at once.
|
||||
// NOTE: Keep this in sync with the handling of integer and float
|
||||
// types above, in `visit_primitive`.
|
||||
// In run-time mode, we accept pointers in here. This is actually more
|
||||
// permissive than a per-element check would be, e.g., we accept
|
||||
// a &[u8] that contains a pointer even though bytewise checking would
|
||||
// reject it. However, that's good: We don't inherently want
|
||||
// to reject those pointers, we just do not have the machinery to
|
||||
// talk about parts of a pointer.
|
||||
// We also accept uninit, for consistency with the slow path.
|
||||
let alloc = match self.ecx.memory.get(mplace.ptr, size, mplace.align)? {
|
||||
Some(a) => a,
|
||||
None => {
|
||||
// Size 0, nothing more to check.
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
match alloc.check_bytes(
|
||||
alloc_range(Size::ZERO, size),
|
||||
/*allow_uninit_and_ptr*/ self.ctfe_mode.is_none(),
|
||||
) {
|
||||
// In the happy case, we needn't check anything else.
|
||||
Ok(()) => {}
|
||||
// Some error happened, try to provide a more detailed description.
|
||||
Err(err) => {
|
||||
// For some errors we might be able to provide extra information.
|
||||
// (This custom logic does not fit the `try_validation!` macro.)
|
||||
match err.kind() {
|
||||
err_ub!(InvalidUninitBytes(Some((_alloc_id, access)))) => {
|
||||
// Some byte was uninitialized, determine which
|
||||
// element that byte belongs to so we can
|
||||
// provide an index.
|
||||
let i = usize::try_from(
|
||||
access.uninit_offset.bytes() / layout.size.bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
self.path.push(PathElem::ArrayElem(i));
|
||||
|
||||
throw_validation_failure!(self.path, { "uninitialized bytes" })
|
||||
}
|
||||
err_unsup!(ReadPointerAsBytes) => {
|
||||
throw_validation_failure!(self.path, { "a pointer" } expected { "plain (non-pointer) bytes" })
|
||||
}
|
||||
|
||||
// Propagate upwards (that will also check for unexpected errors).
|
||||
_ => return Err(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Fast path for arrays and slices of ZSTs. We only need to check a single ZST element
|
||||
// of an array and not all of them, because there's only a single value of a specific
|
||||
// ZST type, so either validation fails for all elements or none.
|
||||
ty::Array(tys, ..) | ty::Slice(tys) if self.ecx.layout_of(tys)?.is_zst() => {
|
||||
// Validate just the first element (if any).
|
||||
self.walk_aggregate(op, fields.take(1))?
|
||||
}
|
||||
_ => {
|
||||
self.walk_aggregate(op, fields)? // default handler
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
fn validate_operand_internal(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
path: Vec<PathElem>,
|
||||
ref_tracking: Option<&mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>>,
|
||||
ctfe_mode: Option<CtfeValidationMode>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("validate_operand_internal: {:?}, {:?}", *op, op.layout.ty);
|
||||
|
||||
// Construct a visitor
|
||||
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
|
||||
|
||||
// Run it.
|
||||
match visitor.visit_value(&op) {
|
||||
Ok(()) => Ok(()),
|
||||
// Pass through validation failures.
|
||||
Err(err) if matches!(err.kind(), err_ub!(ValidationFailure { .. })) => Err(err),
|
||||
// Also pass through InvalidProgram, those just indicate that we could not
|
||||
// validate and each caller will know best what to do with them.
|
||||
Err(err) if matches!(err.kind(), InterpError::InvalidProgram(_)) => Err(err),
|
||||
// Avoid other errors as those do not show *where* in the value the issue lies.
|
||||
Err(err) => {
|
||||
err.print_backtrace();
|
||||
bug!("Unexpected error during validation: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// This function checks the data at `op` to be const-valid.
|
||||
/// `op` is assumed to cover valid memory if it is an indirect operand.
|
||||
/// It will error if the bits at the destination do not match the ones described by the layout.
|
||||
///
|
||||
/// `ref_tracking` is used to record references that we encounter so that they
|
||||
/// can be checked recursively by an outside driving loop.
|
||||
///
|
||||
/// `constant` controls whether this must satisfy the rules for constants:
|
||||
/// - no pointers to statics.
|
||||
/// - no `UnsafeCell` or non-ZST `&mut`.
|
||||
#[inline(always)]
|
||||
pub fn const_validate_operand(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
path: Vec<PathElem>,
|
||||
ref_tracking: &mut RefTracking<MPlaceTy<'tcx, M::PointerTag>, Vec<PathElem>>,
|
||||
ctfe_mode: CtfeValidationMode,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.validate_operand_internal(op, path, Some(ref_tracking), Some(ctfe_mode))
|
||||
}
|
||||
|
||||
/// This function checks the data at `op` to be runtime-valid.
|
||||
/// `op` is assumed to cover valid memory if it is an indirect operand.
|
||||
/// It will error if the bits at the destination do not match the ones described by the layout.
|
||||
#[inline(always)]
|
||||
pub fn validate_operand(&self, op: &OpTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
|
||||
self.validate_operand_internal(op, vec![], None, None)
|
||||
}
|
||||
}
|
278
compiler/rustc_const_eval/src/interpret/visitor.rs
Normal file
278
compiler/rustc_const_eval/src/interpret/visitor.rs
Normal file
|
@ -0,0 +1,278 @@
|
|||
//! Visitor for a run-time value with a given layout: Traverse enums, structs and other compound
|
||||
//! types until we arrive at the leaves, with custom handling for primitive types.
|
||||
|
||||
use rustc_middle::mir::interpret::InterpResult;
|
||||
use rustc_middle::ty;
|
||||
use rustc_middle::ty::layout::TyAndLayout;
|
||||
use rustc_target::abi::{FieldsShape, VariantIdx, Variants};
|
||||
|
||||
use std::num::NonZeroUsize;
|
||||
|
||||
use super::{InterpCx, MPlaceTy, Machine, OpTy};
|
||||
|
||||
// A thing that we can project into, and that has a layout.
|
||||
// This wouldn't have to depend on `Machine` but with the current type inference,
|
||||
// that's just more convenient to work with (avoids repeating all the `Machine` bounds).
|
||||
pub trait Value<'mir, 'tcx, M: Machine<'mir, 'tcx>>: Copy {
|
||||
/// Gets this value's layout.
|
||||
fn layout(&self) -> TyAndLayout<'tcx>;
|
||||
|
||||
/// Makes this into an `OpTy`.
|
||||
fn to_op(&self, ecx: &InterpCx<'mir, 'tcx, M>)
|
||||
-> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>>;
|
||||
|
||||
/// Creates this from an `MPlaceTy`.
|
||||
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self;
|
||||
|
||||
/// Projects to the given enum variant.
|
||||
fn project_downcast(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self>;
|
||||
|
||||
/// Projects to the n-th field.
|
||||
fn project_field(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self>;
|
||||
}
|
||||
|
||||
// Operands and memory-places are both values.
|
||||
// Places in general are not due to `place_field` having to do `force_allocation`.
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M> for OpTy<'tcx, M::PointerTag> {
|
||||
#[inline(always)]
|
||||
fn layout(&self) -> TyAndLayout<'tcx> {
|
||||
self.layout
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_op(
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
Ok(*self)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
|
||||
mplace.into()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_downcast(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.operand_downcast(self, variant)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.operand_field(self, field)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> Value<'mir, 'tcx, M>
|
||||
for MPlaceTy<'tcx, M::PointerTag>
|
||||
{
|
||||
#[inline(always)]
|
||||
fn layout(&self) -> TyAndLayout<'tcx> {
|
||||
self.layout
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn to_op(
|
||||
&self,
|
||||
_ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
Ok((*self).into())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn from_mem_place(mplace: MPlaceTy<'tcx, M::PointerTag>) -> Self {
|
||||
mplace
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_downcast(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.mplace_downcast(self, variant)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn project_field(
|
||||
&self,
|
||||
ecx: &InterpCx<'mir, 'tcx, M>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
ecx.mplace_field(self, field)
|
||||
}
|
||||
}
|
||||
|
||||
macro_rules! make_value_visitor {
|
||||
($visitor_trait_name:ident, $($mutability:ident)?) => {
|
||||
// How to traverse a value and what to do when we are at the leaves.
|
||||
pub trait $visitor_trait_name<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>>: Sized {
|
||||
type V: Value<'mir, 'tcx, M>;
|
||||
|
||||
/// The visitor must have an `InterpCx` in it.
|
||||
fn ecx(&$($mutability)? self)
|
||||
-> &$($mutability)? InterpCx<'mir, 'tcx, M>;
|
||||
|
||||
/// `read_discriminant` can be hooked for better error messages.
|
||||
#[inline(always)]
|
||||
fn read_discriminant(
|
||||
&mut self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
Ok(self.ecx().read_discriminant(op)?.1)
|
||||
}
|
||||
|
||||
// Recursive actions, ready to be overloaded.
|
||||
/// Visits the given value, dispatching as appropriate to more specialized visitors.
|
||||
#[inline(always)]
|
||||
fn visit_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
|
||||
{
|
||||
self.walk_value(v)
|
||||
}
|
||||
/// Visits the given value as a union. No automatic recursion can happen here.
|
||||
#[inline(always)]
|
||||
fn visit_union(&mut self, _v: &Self::V, _fields: NonZeroUsize) -> InterpResult<'tcx>
|
||||
{
|
||||
Ok(())
|
||||
}
|
||||
/// Visits this value as an aggregate, you are getting an iterator yielding
|
||||
/// all the fields (still in an `InterpResult`, you have to do error handling yourself).
|
||||
/// Recurses into the fields.
|
||||
#[inline(always)]
|
||||
fn visit_aggregate(
|
||||
&mut self,
|
||||
v: &Self::V,
|
||||
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.walk_aggregate(v, fields)
|
||||
}
|
||||
|
||||
/// Called each time we recurse down to a field of a "product-like" aggregate
|
||||
/// (structs, tuples, arrays and the like, but not enums), passing in old (outer)
|
||||
/// and new (inner) value.
|
||||
/// This gives the visitor the chance to track the stack of nested fields that
|
||||
/// we are descending through.
|
||||
#[inline(always)]
|
||||
fn visit_field(
|
||||
&mut self,
|
||||
_old_val: &Self::V,
|
||||
_field: usize,
|
||||
new_val: &Self::V,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.visit_value(new_val)
|
||||
}
|
||||
/// Called when recursing into an enum variant.
|
||||
/// This gives the visitor the chance to track the stack of nested fields that
|
||||
/// we are descending through.
|
||||
#[inline(always)]
|
||||
fn visit_variant(
|
||||
&mut self,
|
||||
_old_val: &Self::V,
|
||||
_variant: VariantIdx,
|
||||
new_val: &Self::V,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.visit_value(new_val)
|
||||
}
|
||||
|
||||
// Default recursors. Not meant to be overloaded.
|
||||
fn walk_aggregate(
|
||||
&mut self,
|
||||
v: &Self::V,
|
||||
fields: impl Iterator<Item=InterpResult<'tcx, Self::V>>,
|
||||
) -> InterpResult<'tcx> {
|
||||
// Now iterate over it.
|
||||
for (idx, field_val) in fields.enumerate() {
|
||||
self.visit_field(v, idx, &field_val?)?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
fn walk_value(&mut self, v: &Self::V) -> InterpResult<'tcx>
|
||||
{
|
||||
trace!("walk_value: type: {}", v.layout().ty);
|
||||
|
||||
// Special treatment for special types, where the (static) layout is not sufficient.
|
||||
match *v.layout().ty.kind() {
|
||||
// If it is a trait object, switch to the real type that was used to create it.
|
||||
ty::Dynamic(..) => {
|
||||
// immediate trait objects are not a thing
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let dest = op.assert_mem_place();
|
||||
let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
|
||||
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
|
||||
// recurse with the inner type
|
||||
return self.visit_field(&v, 0, &Value::from_mem_place(inner));
|
||||
},
|
||||
// Slices do not need special handling here: they have `Array` field
|
||||
// placement with length 0, so we enter the `Array` case below which
|
||||
// indirectly uses the metadata to determine the actual length.
|
||||
_ => {},
|
||||
};
|
||||
|
||||
// Visit the fields of this value.
|
||||
match v.layout().fields {
|
||||
FieldsShape::Primitive => {},
|
||||
FieldsShape::Union(fields) => {
|
||||
self.visit_union(v, fields)?;
|
||||
},
|
||||
FieldsShape::Arbitrary { ref offsets, .. } => {
|
||||
// FIXME: We collect in a vec because otherwise there are lifetime
|
||||
// errors: Projecting to a field needs access to `ecx`.
|
||||
let fields: Vec<InterpResult<'tcx, Self::V>> =
|
||||
(0..offsets.len()).map(|i| {
|
||||
v.project_field(self.ecx(), i)
|
||||
})
|
||||
.collect();
|
||||
self.visit_aggregate(v, fields.into_iter())?;
|
||||
},
|
||||
FieldsShape::Array { .. } => {
|
||||
// Let's get an mplace first.
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let mplace = op.assert_mem_place();
|
||||
// Now we can go over all the fields.
|
||||
// This uses the *run-time length*, i.e., if we are a slice,
|
||||
// the dynamic info from the metadata is used.
|
||||
let iter = self.ecx().mplace_array_fields(&mplace)?
|
||||
.map(|f| f.and_then(|f| {
|
||||
Ok(Value::from_mem_place(f))
|
||||
}));
|
||||
self.visit_aggregate(v, iter)?;
|
||||
}
|
||||
}
|
||||
|
||||
match v.layout().variants {
|
||||
// If this is a multi-variant layout, find the right variant and proceed
|
||||
// with *its* fields.
|
||||
Variants::Multiple { .. } => {
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let idx = self.read_discriminant(&op)?;
|
||||
let inner = v.project_downcast(self.ecx(), idx)?;
|
||||
trace!("walk_value: variant layout: {:#?}", inner.layout());
|
||||
// recurse with the inner type
|
||||
self.visit_variant(v, idx, &inner)
|
||||
}
|
||||
// For single-variant layouts, we already did anything there is to do.
|
||||
Variants::Single { .. } => Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
make_value_visitor!(ValueVisitor,);
|
||||
make_value_visitor!(MutValueVisitor, mut);
|
56
compiler/rustc_const_eval/src/lib.rs
Normal file
56
compiler/rustc_const_eval/src/lib.rs
Normal file
|
@ -0,0 +1,56 @@
|
|||
/*!
|
||||
|
||||
Rust MIR: a lowered representation of Rust.
|
||||
|
||||
*/
|
||||
|
||||
#![feature(assert_matches)]
|
||||
#![cfg_attr(bootstrap, feature(bindings_after_at))]
|
||||
#![feature(bool_to_option)]
|
||||
#![feature(box_patterns)]
|
||||
#![feature(control_flow_enum)]
|
||||
#![feature(crate_visibility_modifier)]
|
||||
#![feature(decl_macro)]
|
||||
#![feature(exact_size_is_empty)]
|
||||
#![feature(in_band_lifetimes)]
|
||||
#![feature(iter_zip)]
|
||||
#![feature(map_try_insert)]
|
||||
#![feature(min_specialization)]
|
||||
#![feature(slice_ptr_get)]
|
||||
#![feature(option_get_or_insert_default)]
|
||||
#![feature(never_type)]
|
||||
#![feature(trait_alias)]
|
||||
#![feature(trusted_len)]
|
||||
#![feature(trusted_step)]
|
||||
#![feature(try_blocks)]
|
||||
|
||||
#[macro_use]
|
||||
extern crate tracing;
|
||||
#[macro_use]
|
||||
extern crate rustc_middle;
|
||||
|
||||
pub mod const_eval;
|
||||
pub mod interpret;
|
||||
pub mod transform;
|
||||
pub mod util;
|
||||
|
||||
use rustc_middle::ty::query::Providers;
|
||||
|
||||
pub fn provide(providers: &mut Providers) {
|
||||
const_eval::provide(providers);
|
||||
providers.eval_to_const_value_raw = const_eval::eval_to_const_value_raw_provider;
|
||||
providers.eval_to_allocation_raw = const_eval::eval_to_allocation_raw_provider;
|
||||
providers.const_caller_location = const_eval::const_caller_location;
|
||||
providers.destructure_const = |tcx, param_env_and_value| {
|
||||
let (param_env, value) = param_env_and_value.into_parts();
|
||||
const_eval::destructure_const(tcx, param_env, value)
|
||||
};
|
||||
providers.const_to_valtree = |tcx, param_env_and_value| {
|
||||
let (param_env, raw) = param_env_and_value.into_parts();
|
||||
const_eval::const_to_valtree(tcx, param_env, raw)
|
||||
};
|
||||
providers.deref_const = |tcx, param_env_and_value| {
|
||||
let (param_env, value) = param_env_and_value.into_parts();
|
||||
const_eval::deref_const(tcx, param_env, value)
|
||||
};
|
||||
}
|
1110
compiler/rustc_const_eval/src/transform/check_consts/check.rs
Normal file
1110
compiler/rustc_const_eval/src/transform/check_consts/check.rs
Normal file
File diff suppressed because it is too large
Load diff
135
compiler/rustc_const_eval/src/transform/check_consts/mod.rs
Normal file
135
compiler/rustc_const_eval/src/transform/check_consts/mod.rs
Normal file
|
@ -0,0 +1,135 @@
|
|||
//! Check the bodies of `const`s, `static`s and `const fn`s for illegal operations.
|
||||
//!
|
||||
//! This module will eventually replace the parts of `qualify_consts.rs` that check whether a local
|
||||
//! has interior mutability or needs to be dropped, as well as the visitor that emits errors when
|
||||
//! it finds operations that are invalid in a certain context.
|
||||
|
||||
use rustc_attr as attr;
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::{DefId, LocalDefId};
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
use rustc_span::{sym, Symbol};
|
||||
|
||||
pub use self::qualifs::Qualif;
|
||||
|
||||
pub mod check;
|
||||
mod ops;
|
||||
pub mod post_drop_elaboration;
|
||||
pub mod qualifs;
|
||||
mod resolver;
|
||||
|
||||
/// Information about the item currently being const-checked, as well as a reference to the global
|
||||
/// context.
|
||||
pub struct ConstCx<'mir, 'tcx> {
|
||||
pub body: &'mir mir::Body<'tcx>,
|
||||
pub tcx: TyCtxt<'tcx>,
|
||||
pub param_env: ty::ParamEnv<'tcx>,
|
||||
pub const_kind: Option<hir::ConstContext>,
|
||||
}
|
||||
|
||||
impl ConstCx<'mir, 'tcx> {
|
||||
pub fn new(tcx: TyCtxt<'tcx>, body: &'mir mir::Body<'tcx>) -> Self {
|
||||
let def_id = body.source.def_id().expect_local();
|
||||
let param_env = tcx.param_env(def_id);
|
||||
Self::new_with_param_env(tcx, body, param_env)
|
||||
}
|
||||
|
||||
pub fn new_with_param_env(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
body: &'mir mir::Body<'tcx>,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
) -> Self {
|
||||
let const_kind = tcx.hir().body_const_context(body.source.def_id().expect_local());
|
||||
ConstCx { body, tcx, param_env, const_kind }
|
||||
}
|
||||
|
||||
pub fn def_id(&self) -> LocalDefId {
|
||||
self.body.source.def_id().expect_local()
|
||||
}
|
||||
|
||||
/// Returns the kind of const context this `Item` represents (`const`, `static`, etc.).
|
||||
///
|
||||
/// Panics if this `Item` is not const.
|
||||
pub fn const_kind(&self) -> hir::ConstContext {
|
||||
self.const_kind.expect("`const_kind` must not be called on a non-const fn")
|
||||
}
|
||||
|
||||
pub fn is_const_stable_const_fn(&self) -> bool {
|
||||
self.const_kind == Some(hir::ConstContext::ConstFn)
|
||||
&& self.tcx.features().staged_api
|
||||
&& is_const_stable_const_fn(self.tcx, self.def_id().to_def_id())
|
||||
}
|
||||
|
||||
/// Returns the function signature of the item being const-checked if it is a `fn` or `const fn`.
|
||||
pub fn fn_sig(&self) -> Option<&'tcx hir::FnSig<'tcx>> {
|
||||
// Get this from the HIR map instead of a query to avoid cycle errors.
|
||||
//
|
||||
// FIXME: Is this still an issue?
|
||||
let hir_map = self.tcx.hir();
|
||||
let hir_id = hir_map.local_def_id_to_hir_id(self.def_id());
|
||||
hir_map.fn_sig_by_hir_id(hir_id)
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if this `DefId` points to one of the official `panic` lang items.
|
||||
pub fn is_lang_panic_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
|
||||
// We can allow calls to these functions because `hook_panic_fn` in
|
||||
// `const_eval/machine.rs` ensures the calls are handled specially.
|
||||
// Keep in sync with what that function handles!
|
||||
Some(def_id) == tcx.lang_items().panic_fn()
|
||||
|| Some(def_id) == tcx.lang_items().panic_str()
|
||||
|| Some(def_id) == tcx.lang_items().begin_panic_fn()
|
||||
|| Some(def_id) == tcx.lang_items().panic_fmt()
|
||||
|| Some(def_id) == tcx.lang_items().begin_panic_fmt()
|
||||
}
|
||||
|
||||
pub fn rustc_allow_const_fn_unstable(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
def_id: DefId,
|
||||
feature_gate: Symbol,
|
||||
) -> bool {
|
||||
let attrs = tcx.get_attrs(def_id);
|
||||
attr::rustc_allow_const_fn_unstable(&tcx.sess, attrs).any(|name| name == feature_gate)
|
||||
}
|
||||
|
||||
// Returns `true` if the given `const fn` is "const-stable".
|
||||
//
|
||||
// Panics if the given `DefId` does not refer to a `const fn`.
|
||||
//
|
||||
// Const-stability is only relevant for `const fn` within a `staged_api` crate. Only "const-stable"
|
||||
// functions can be called in a const-context by users of the stable compiler. "const-stable"
|
||||
// functions are subject to more stringent restrictions than "const-unstable" functions: They
|
||||
// cannot use unstable features and can only call other "const-stable" functions.
|
||||
pub fn is_const_stable_const_fn(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool {
|
||||
use attr::{ConstStability, Stability, StabilityLevel};
|
||||
|
||||
// A default body marked const is not const-stable because const
|
||||
// trait fns currently cannot be const-stable. We shouldn't
|
||||
// restrict default bodies to only call const-stable functions.
|
||||
if tcx.has_attr(def_id, sym::default_method_body_is_const) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Const-stability is only relevant for `const fn`.
|
||||
assert!(tcx.is_const_fn_raw(def_id));
|
||||
|
||||
// Functions with `#[rustc_const_unstable]` are const-unstable.
|
||||
match tcx.lookup_const_stability(def_id) {
|
||||
Some(ConstStability { level: StabilityLevel::Unstable { .. }, .. }) => return false,
|
||||
Some(ConstStability { level: StabilityLevel::Stable { .. }, .. }) => return true,
|
||||
None => {}
|
||||
}
|
||||
|
||||
// Functions with `#[unstable]` are const-unstable.
|
||||
//
|
||||
// FIXME(ecstaticmorse): We should keep const-stability attributes wholly separate from normal stability
|
||||
// attributes. `#[unstable]` should be irrelevant.
|
||||
if let Some(Stability { level: StabilityLevel::Unstable { .. }, .. }) =
|
||||
tcx.lookup_stability(def_id)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
true
|
||||
}
|
628
compiler/rustc_const_eval/src/transform/check_consts/ops.rs
Normal file
628
compiler/rustc_const_eval/src/transform/check_consts/ops.rs
Normal file
|
@ -0,0 +1,628 @@
|
|||
//! Concrete error types for all operations which may be invalid in a certain const context.
|
||||
|
||||
use rustc_errors::{struct_span_err, DiagnosticBuilder};
|
||||
use rustc_hir as hir;
|
||||
use rustc_hir::def_id::DefId;
|
||||
use rustc_middle::mir;
|
||||
use rustc_session::parse::feature_err;
|
||||
use rustc_span::symbol::sym;
|
||||
use rustc_span::{Span, Symbol};
|
||||
|
||||
use super::ConstCx;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
|
||||
pub enum Status {
|
||||
Allowed,
|
||||
Unstable(Symbol),
|
||||
Forbidden,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum DiagnosticImportance {
|
||||
/// An operation that must be removed for const-checking to pass.
|
||||
Primary,
|
||||
|
||||
/// An operation that causes const-checking to fail, but is usually a side-effect of a `Primary` operation elsewhere.
|
||||
Secondary,
|
||||
}
|
||||
|
||||
/// An operation that is not *always* allowed in a const context.
|
||||
pub trait NonConstOp: std::fmt::Debug {
|
||||
/// Returns an enum indicating whether this operation is allowed within the given item.
|
||||
fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Forbidden
|
||||
}
|
||||
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
DiagnosticImportance::Primary
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx>;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FloatingPointOp;
|
||||
impl NonConstOp for FloatingPointOp {
|
||||
fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
|
||||
if ccx.const_kind() == hir::ConstContext::ConstFn {
|
||||
Status::Unstable(sym::const_fn_floating_point_arithmetic)
|
||||
} else {
|
||||
Status::Allowed
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_fn_floating_point_arithmetic,
|
||||
span,
|
||||
&format!("floating point arithmetic is not allowed in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A function call where the callee is a pointer.
|
||||
#[derive(Debug)]
|
||||
pub struct FnCallIndirect;
|
||||
impl NonConstOp for FnCallIndirect {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
ccx.tcx.sess.struct_span_err(span, "function pointers are not allowed in const fn")
|
||||
}
|
||||
}
|
||||
|
||||
/// A function call where the callee is not marked as `const`.
|
||||
#[derive(Debug)]
|
||||
pub struct FnCallNonConst;
|
||||
impl NonConstOp for FnCallNonConst {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0015,
|
||||
"calls in {}s are limited to constant functions, \
|
||||
tuple structs and tuple variants",
|
||||
ccx.const_kind(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function.
|
||||
///
|
||||
/// Contains the name of the feature that would allow the use of this function.
|
||||
#[derive(Debug)]
|
||||
pub struct FnCallUnstable(pub DefId, pub Option<Symbol>);
|
||||
|
||||
impl NonConstOp for FnCallUnstable {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let FnCallUnstable(def_id, feature) = *self;
|
||||
|
||||
let mut err = ccx.tcx.sess.struct_span_err(
|
||||
span,
|
||||
&format!("`{}` is not yet stable as a const fn", ccx.tcx.def_path_str(def_id)),
|
||||
);
|
||||
|
||||
if ccx.is_const_stable_const_fn() {
|
||||
err.help("Const-stable functions can only call other const-stable functions");
|
||||
} else if ccx.tcx.sess.is_nightly_build() {
|
||||
if let Some(feature) = feature {
|
||||
err.help(&format!(
|
||||
"add `#![feature({})]` to the crate attributes to enable",
|
||||
feature
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FnPtrCast;
|
||||
impl NonConstOp for FnPtrCast {
|
||||
fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
|
||||
if ccx.const_kind() != hir::ConstContext::ConstFn {
|
||||
Status::Allowed
|
||||
} else {
|
||||
Status::Unstable(sym::const_fn_fn_ptr_basics)
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_fn_fn_ptr_basics,
|
||||
span,
|
||||
&format!("function pointer casts are not allowed in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Generator(pub hir::GeneratorKind);
|
||||
impl NonConstOp for Generator {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
|
||||
Status::Unstable(sym::const_async_blocks)
|
||||
} else {
|
||||
Status::Forbidden
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let msg = format!("{}s are not allowed in {}s", self.0, ccx.const_kind());
|
||||
if let hir::GeneratorKind::Async(hir::AsyncGeneratorKind::Block) = self.0 {
|
||||
feature_err(&ccx.tcx.sess.parse_sess, sym::const_async_blocks, span, &msg)
|
||||
} else {
|
||||
ccx.tcx.sess.struct_span_err(span, &msg)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct HeapAllocation;
|
||||
impl NonConstOp for HeapAllocation {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0010,
|
||||
"allocations are not allowed in {}s",
|
||||
ccx.const_kind()
|
||||
);
|
||||
err.span_label(span, format!("allocation not allowed in {}s", ccx.const_kind()));
|
||||
if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
|
||||
err.note(
|
||||
"The value of statics and constants must be known at compile time, \
|
||||
and they live for the entire lifetime of a program. Creating a boxed \
|
||||
value allocates memory on the heap at runtime, and therefore cannot \
|
||||
be done at compile time.",
|
||||
);
|
||||
}
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct InlineAsm;
|
||||
impl NonConstOp for InlineAsm {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0015,
|
||||
"inline assembly is not allowed in {}s",
|
||||
ccx.const_kind()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct LiveDrop {
|
||||
pub dropped_at: Option<Span>,
|
||||
}
|
||||
impl NonConstOp for LiveDrop {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0493,
|
||||
"destructors cannot be evaluated at compile-time"
|
||||
);
|
||||
err.span_label(span, format!("{}s cannot evaluate destructors", ccx.const_kind()));
|
||||
if let Some(span) = self.dropped_at {
|
||||
err.span_label(span, "value is dropped here");
|
||||
}
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow never escapes to
|
||||
/// the final value of the constant.
|
||||
pub struct TransientCellBorrow;
|
||||
impl NonConstOp for TransientCellBorrow {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_refs_to_cell)
|
||||
}
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
// The cases that cannot possibly work will already emit a `CellBorrow`, so we should
|
||||
// not additionally emit a feature gate error if activating the feature gate won't work.
|
||||
DiagnosticImportance::Secondary
|
||||
}
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_refs_to_cell,
|
||||
span,
|
||||
"cannot borrow here, since the borrowed element may contain interior mutability",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// A borrow of a type that contains an `UnsafeCell` somewhere. The borrow might escape to
|
||||
/// the final value of the constant, and thus we cannot allow this (for now). We may allow
|
||||
/// it in the future for static items.
|
||||
pub struct CellBorrow;
|
||||
impl NonConstOp for CellBorrow {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0492,
|
||||
"{}s cannot refer to interior mutable data",
|
||||
ccx.const_kind(),
|
||||
);
|
||||
err.span_label(
|
||||
span,
|
||||
"this borrow of an interior mutable value may end up in the final value",
|
||||
);
|
||||
if let hir::ConstContext::Static(_) = ccx.const_kind() {
|
||||
err.help(
|
||||
"to fix this, the value can be extracted to a separate \
|
||||
`static` item and then referenced",
|
||||
);
|
||||
}
|
||||
if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
|
||||
err.note(
|
||||
"A constant containing interior mutable data behind a reference can allow you
|
||||
to modify that data. This would make multiple uses of a constant to be able to
|
||||
see different values and allow circumventing the `Send` and `Sync` requirements
|
||||
for shared mutable data, which is unsound.",
|
||||
);
|
||||
}
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
/// This op is for `&mut` borrows in the trailing expression of a constant
|
||||
/// which uses the "enclosing scopes rule" to leak its locals into anonymous
|
||||
/// static or const items.
|
||||
pub struct MutBorrow(pub hir::BorrowKind);
|
||||
|
||||
impl NonConstOp for MutBorrow {
|
||||
fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Forbidden
|
||||
}
|
||||
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
// If there were primary errors (like non-const function calls), do not emit further
|
||||
// errors about mutable references.
|
||||
DiagnosticImportance::Secondary
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let raw = match self.0 {
|
||||
hir::BorrowKind::Raw => "raw ",
|
||||
hir::BorrowKind::Ref => "",
|
||||
};
|
||||
|
||||
let mut err = struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0764,
|
||||
"{}mutable references are not allowed in the final value of {}s",
|
||||
raw,
|
||||
ccx.const_kind(),
|
||||
);
|
||||
|
||||
if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
|
||||
err.note(
|
||||
"References in statics and constants may only refer \
|
||||
to immutable values.\n\n\
|
||||
Statics are shared everywhere, and if they refer to \
|
||||
mutable data one might violate memory safety since \
|
||||
holding multiple mutable references to shared data \
|
||||
is not allowed.\n\n\
|
||||
If you really want global mutable state, try using \
|
||||
static mut or a global UnsafeCell.",
|
||||
);
|
||||
}
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TransientMutBorrow(pub hir::BorrowKind);
|
||||
|
||||
impl NonConstOp for TransientMutBorrow {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_mut_refs)
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let raw = match self.0 {
|
||||
hir::BorrowKind::Raw => "raw ",
|
||||
hir::BorrowKind::Ref => "",
|
||||
};
|
||||
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_mut_refs,
|
||||
span,
|
||||
&format!("{}mutable references are not allowed in {}s", raw, ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MutDeref;
|
||||
impl NonConstOp for MutDeref {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_mut_refs)
|
||||
}
|
||||
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
// Usually a side-effect of a `TransientMutBorrow` somewhere.
|
||||
DiagnosticImportance::Secondary
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_mut_refs,
|
||||
span,
|
||||
&format!("mutation through a reference is not allowed in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct Panic;
|
||||
impl NonConstOp for Panic {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_panic)
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_panic,
|
||||
span,
|
||||
&format!("panicking in {}s is unstable", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A call to a `panic()` lang item where the first argument is _not_ a `&str`.
|
||||
#[derive(Debug)]
|
||||
pub struct PanicNonStr;
|
||||
impl NonConstOp for PanicNonStr {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
ccx.tcx.sess.struct_span_err(
|
||||
span,
|
||||
"argument to `panic!()` in a const context must have type `&str`",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Comparing raw pointers for equality.
|
||||
/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
|
||||
/// allocation base addresses that are not known at compile-time.
|
||||
#[derive(Debug)]
|
||||
pub struct RawPtrComparison;
|
||||
impl NonConstOp for RawPtrComparison {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = ccx
|
||||
.tcx
|
||||
.sess
|
||||
.struct_span_err(span, "pointers cannot be reliably compared during const eval.");
|
||||
err.note(
|
||||
"see issue #53020 <https://github.com/rust-lang/rust/issues/53020> \
|
||||
for more information",
|
||||
);
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct RawPtrDeref;
|
||||
impl NonConstOp for RawPtrDeref {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_raw_ptr_deref)
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_raw_ptr_deref,
|
||||
span,
|
||||
&format!("dereferencing raw pointers in {}s is unstable", ccx.const_kind(),),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Casting raw pointer or function pointer to an integer.
|
||||
/// Not currently intended to ever be allowed, even behind a feature gate: operation depends on
|
||||
/// allocation base addresses that are not known at compile-time.
|
||||
#[derive(Debug)]
|
||||
pub struct RawPtrToIntCast;
|
||||
impl NonConstOp for RawPtrToIntCast {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = ccx
|
||||
.tcx
|
||||
.sess
|
||||
.struct_span_err(span, "pointers cannot be cast to integers during const eval.");
|
||||
err.note("at compile-time, pointers do not have an integer value");
|
||||
err.note(
|
||||
"avoiding this restriction via `transmute`, `union`, or raw pointers leads to compile-time undefined behavior",
|
||||
);
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
/// An access to a (non-thread-local) `static`.
|
||||
#[derive(Debug)]
|
||||
pub struct StaticAccess;
|
||||
impl NonConstOp for StaticAccess {
|
||||
fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
|
||||
if let hir::ConstContext::Static(_) = ccx.const_kind() {
|
||||
Status::Allowed
|
||||
} else {
|
||||
Status::Forbidden
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
let mut err = struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0013,
|
||||
"{}s cannot refer to statics",
|
||||
ccx.const_kind()
|
||||
);
|
||||
err.help(
|
||||
"consider extracting the value of the `static` to a `const`, and referring to that",
|
||||
);
|
||||
if ccx.tcx.sess.teach(&err.get_code().unwrap()) {
|
||||
err.note(
|
||||
"`static` and `const` variables can refer to other `const` variables. \
|
||||
A `const` variable, however, cannot refer to a `static` variable.",
|
||||
);
|
||||
err.help("To fix this, the value can be extracted to a `const` and then used.");
|
||||
}
|
||||
err
|
||||
}
|
||||
}
|
||||
|
||||
/// An access to a thread-local `static`.
|
||||
#[derive(Debug)]
|
||||
pub struct ThreadLocalAccess;
|
||||
impl NonConstOp for ThreadLocalAccess {
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
struct_span_err!(
|
||||
ccx.tcx.sess,
|
||||
span,
|
||||
E0625,
|
||||
"thread-local statics cannot be \
|
||||
accessed at compile-time"
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Types that cannot appear in the signature or locals of a `const fn`.
|
||||
pub mod ty {
|
||||
use super::*;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct MutRef(pub mir::LocalKind);
|
||||
impl NonConstOp for MutRef {
|
||||
fn status_in_item(&self, _ccx: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_mut_refs)
|
||||
}
|
||||
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
match self.0 {
|
||||
mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
|
||||
mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
|
||||
DiagnosticImportance::Primary
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_mut_refs,
|
||||
span,
|
||||
&format!("mutable references are not allowed in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FnPtr(pub mir::LocalKind);
|
||||
impl NonConstOp for FnPtr {
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
match self.0 {
|
||||
mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
|
||||
mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
|
||||
DiagnosticImportance::Primary
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
|
||||
if ccx.const_kind() != hir::ConstContext::ConstFn {
|
||||
Status::Allowed
|
||||
} else {
|
||||
Status::Unstable(sym::const_fn_fn_ptr_basics)
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_fn_fn_ptr_basics,
|
||||
span,
|
||||
&format!("function pointers cannot appear in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ImplTrait;
|
||||
impl NonConstOp for ImplTrait {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_impl_trait)
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_impl_trait,
|
||||
span,
|
||||
&format!("`impl Trait` is not allowed in {}s", ccx.const_kind()),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct TraitBound(pub mir::LocalKind);
|
||||
impl NonConstOp for TraitBound {
|
||||
fn importance(&self) -> DiagnosticImportance {
|
||||
match self.0 {
|
||||
mir::LocalKind::Var | mir::LocalKind::Temp => DiagnosticImportance::Secondary,
|
||||
mir::LocalKind::ReturnPointer | mir::LocalKind::Arg => {
|
||||
DiagnosticImportance::Primary
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn status_in_item(&self, ccx: &ConstCx<'_, '_>) -> Status {
|
||||
if ccx.const_kind() != hir::ConstContext::ConstFn {
|
||||
Status::Allowed
|
||||
} else {
|
||||
Status::Unstable(sym::const_fn_trait_bound)
|
||||
}
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_fn_trait_bound,
|
||||
span,
|
||||
"trait bounds other than `Sized` on const fn parameters are unstable",
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// A trait bound with the `?const Trait` opt-out
|
||||
#[derive(Debug)]
|
||||
pub struct TraitBoundNotConst;
|
||||
impl NonConstOp for TraitBoundNotConst {
|
||||
fn status_in_item(&self, _: &ConstCx<'_, '_>) -> Status {
|
||||
Status::Unstable(sym::const_trait_bound_opt_out)
|
||||
}
|
||||
|
||||
fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> DiagnosticBuilder<'tcx> {
|
||||
feature_err(
|
||||
&ccx.tcx.sess.parse_sess,
|
||||
sym::const_trait_bound_opt_out,
|
||||
span,
|
||||
"`?const Trait` syntax is unstable",
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
use rustc_middle::mir::visit::Visitor;
|
||||
use rustc_middle::mir::{self, BasicBlock, Location};
|
||||
use rustc_middle::ty::TyCtxt;
|
||||
use rustc_span::Span;
|
||||
|
||||
use super::check::Qualifs;
|
||||
use super::ops::{self, NonConstOp};
|
||||
use super::qualifs::{NeedsDrop, Qualif};
|
||||
use super::ConstCx;
|
||||
|
||||
/// Returns `true` if we should use the more precise live drop checker that runs after drop
|
||||
/// elaboration.
|
||||
pub fn checking_enabled(ccx: &ConstCx<'_, '_>) -> bool {
|
||||
// Const-stable functions must always use the stable live drop checker.
|
||||
if ccx.is_const_stable_const_fn() {
|
||||
return false;
|
||||
}
|
||||
|
||||
ccx.tcx.features().const_precise_live_drops
|
||||
}
|
||||
|
||||
/// Look for live drops in a const context.
|
||||
///
|
||||
/// This is separate from the rest of the const checking logic because it must run after drop
|
||||
/// elaboration.
|
||||
pub fn check_live_drops(tcx: TyCtxt<'tcx>, body: &mir::Body<'tcx>) {
|
||||
let def_id = body.source.def_id().expect_local();
|
||||
let const_kind = tcx.hir().body_const_context(def_id);
|
||||
if const_kind.is_none() {
|
||||
return;
|
||||
}
|
||||
|
||||
let ccx = ConstCx { body, tcx, const_kind, param_env: tcx.param_env(def_id) };
|
||||
if !checking_enabled(&ccx) {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut visitor = CheckLiveDrops { ccx: &ccx, qualifs: Qualifs::default() };
|
||||
|
||||
visitor.visit_body(body);
|
||||
}
|
||||
|
||||
struct CheckLiveDrops<'mir, 'tcx> {
|
||||
ccx: &'mir ConstCx<'mir, 'tcx>,
|
||||
qualifs: Qualifs<'mir, 'tcx>,
|
||||
}
|
||||
|
||||
// So we can access `body` and `tcx`.
|
||||
impl std::ops::Deref for CheckLiveDrops<'mir, 'tcx> {
|
||||
type Target = ConstCx<'mir, 'tcx>;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.ccx
|
||||
}
|
||||
}
|
||||
|
||||
impl CheckLiveDrops<'mir, 'tcx> {
|
||||
fn check_live_drop(&self, span: Span) {
|
||||
ops::LiveDrop { dropped_at: None }.build_error(self.ccx, span).emit();
|
||||
}
|
||||
}
|
||||
|
||||
impl Visitor<'tcx> for CheckLiveDrops<'mir, 'tcx> {
|
||||
fn visit_basic_block_data(&mut self, bb: BasicBlock, block: &mir::BasicBlockData<'tcx>) {
|
||||
trace!("visit_basic_block_data: bb={:?} is_cleanup={:?}", bb, block.is_cleanup);
|
||||
|
||||
// Ignore drop terminators in cleanup blocks.
|
||||
if block.is_cleanup {
|
||||
return;
|
||||
}
|
||||
|
||||
self.super_basic_block_data(bb, block);
|
||||
}
|
||||
|
||||
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
|
||||
trace!("visit_terminator: terminator={:?} location={:?}", terminator, location);
|
||||
|
||||
match &terminator.kind {
|
||||
mir::TerminatorKind::Drop { place: dropped_place, .. } => {
|
||||
let dropped_ty = dropped_place.ty(self.body, self.tcx).ty;
|
||||
if !NeedsDrop::in_any_value_of_ty(self.ccx, dropped_ty) {
|
||||
bug!(
|
||||
"Drop elaboration left behind a Drop for a type that does not need dropping"
|
||||
);
|
||||
}
|
||||
|
||||
if dropped_place.is_indirect() {
|
||||
self.check_live_drop(terminator.source_info.span);
|
||||
return;
|
||||
}
|
||||
|
||||
// Drop elaboration is not precise enough to accept code like
|
||||
// `src/test/ui/consts/control-flow/drop-pass.rs`; e.g., when an `Option<Vec<T>>` is
|
||||
// initialized with `None` and never changed, it still emits drop glue.
|
||||
// Hence we additionally check the qualifs here to allow more code to pass.
|
||||
if self.qualifs.needs_drop(self.ccx, dropped_place.local, location) {
|
||||
// Use the span where the dropped local was declared for the error.
|
||||
let span = self.body.local_decls[dropped_place.local].source_info.span;
|
||||
self.check_live_drop(span);
|
||||
}
|
||||
}
|
||||
|
||||
mir::TerminatorKind::DropAndReplace { .. } => span_bug!(
|
||||
terminator.source_info.span,
|
||||
"`DropAndReplace` should be removed by drop elaboration",
|
||||
),
|
||||
|
||||
mir::TerminatorKind::Abort
|
||||
| mir::TerminatorKind::Call { .. }
|
||||
| mir::TerminatorKind::Assert { .. }
|
||||
| mir::TerminatorKind::FalseEdge { .. }
|
||||
| mir::TerminatorKind::FalseUnwind { .. }
|
||||
| mir::TerminatorKind::GeneratorDrop
|
||||
| mir::TerminatorKind::Goto { .. }
|
||||
| mir::TerminatorKind::InlineAsm { .. }
|
||||
| mir::TerminatorKind::Resume
|
||||
| mir::TerminatorKind::Return
|
||||
| mir::TerminatorKind::SwitchInt { .. }
|
||||
| mir::TerminatorKind::Unreachable
|
||||
| mir::TerminatorKind::Yield { .. } => {}
|
||||
}
|
||||
}
|
||||
}
|
272
compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
Normal file
272
compiler/rustc_const_eval/src/transform/check_consts/qualifs.rs
Normal file
|
@ -0,0 +1,272 @@
|
|||
//! Structural const qualification.
|
||||
//!
|
||||
//! See the `Qualif` trait for more info.
|
||||
|
||||
use rustc_errors::ErrorReported;
|
||||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::{self, subst::SubstsRef, AdtDef, Ty};
|
||||
use rustc_span::DUMMY_SP;
|
||||
use rustc_trait_selection::traits;
|
||||
|
||||
use super::ConstCx;
|
||||
|
||||
pub fn in_any_value_of_ty(
|
||||
cx: &ConstCx<'_, 'tcx>,
|
||||
ty: Ty<'tcx>,
|
||||
error_occured: Option<ErrorReported>,
|
||||
) -> ConstQualifs {
|
||||
ConstQualifs {
|
||||
has_mut_interior: HasMutInterior::in_any_value_of_ty(cx, ty),
|
||||
needs_drop: NeedsDrop::in_any_value_of_ty(cx, ty),
|
||||
custom_eq: CustomEq::in_any_value_of_ty(cx, ty),
|
||||
error_occured,
|
||||
}
|
||||
}
|
||||
|
||||
/// A "qualif"(-ication) is a way to look for something "bad" in the MIR that would disqualify some
|
||||
/// code for promotion or prevent it from evaluating at compile time.
|
||||
///
|
||||
/// Normally, we would determine what qualifications apply to each type and error when an illegal
|
||||
/// operation is performed on such a type. However, this was found to be too imprecise, especially
|
||||
/// in the presence of `enum`s. If only a single variant of an enum has a certain qualification, we
|
||||
/// needn't reject code unless it actually constructs and operates on the qualified variant.
|
||||
///
|
||||
/// To accomplish this, const-checking and promotion use a value-based analysis (as opposed to a
|
||||
/// type-based one). Qualifications propagate structurally across variables: If a local (or a
|
||||
/// projection of a local) is assigned a qualified value, that local itself becomes qualified.
|
||||
pub trait Qualif {
|
||||
/// The name of the file used to debug the dataflow analysis that computes this qualif.
|
||||
const ANALYSIS_NAME: &'static str;
|
||||
|
||||
/// Whether this `Qualif` is cleared when a local is moved from.
|
||||
const IS_CLEARED_ON_MOVE: bool = false;
|
||||
|
||||
/// Extracts the field of `ConstQualifs` that corresponds to this `Qualif`.
|
||||
fn in_qualifs(qualifs: &ConstQualifs) -> bool;
|
||||
|
||||
/// Returns `true` if *any* value of the given type could possibly have this `Qualif`.
|
||||
///
|
||||
/// This function determines `Qualif`s when we cannot do a value-based analysis. Since qualif
|
||||
/// propagation is context-insenstive, this includes function arguments and values returned
|
||||
/// from a call to another function.
|
||||
///
|
||||
/// It also determines the `Qualif`s for primitive types.
|
||||
fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool;
|
||||
|
||||
/// Returns `true` if this `Qualif` is inherent to the given struct or enum.
|
||||
///
|
||||
/// By default, `Qualif`s propagate into ADTs in a structural way: An ADT only becomes
|
||||
/// qualified if part of it is assigned a value with that `Qualif`. However, some ADTs *always*
|
||||
/// have a certain `Qualif`, regardless of whether their fields have it. For example, a type
|
||||
/// with a custom `Drop` impl is inherently `NeedsDrop`.
|
||||
///
|
||||
/// Returning `true` for `in_adt_inherently` but `false` for `in_any_value_of_ty` is unsound.
|
||||
fn in_adt_inherently(
|
||||
cx: &ConstCx<'_, 'tcx>,
|
||||
adt: &'tcx AdtDef,
|
||||
substs: SubstsRef<'tcx>,
|
||||
) -> bool;
|
||||
}
|
||||
|
||||
/// Constant containing interior mutability (`UnsafeCell<T>`).
|
||||
/// This must be ruled out to make sure that evaluating the constant at compile-time
|
||||
/// and at *any point* during the run-time would produce the same result. In particular,
|
||||
/// promotion of temporaries must not change program behavior; if the promoted could be
|
||||
/// written to, that would be a problem.
|
||||
pub struct HasMutInterior;
|
||||
|
||||
impl Qualif for HasMutInterior {
|
||||
const ANALYSIS_NAME: &'static str = "flow_has_mut_interior";
|
||||
|
||||
fn in_qualifs(qualifs: &ConstQualifs) -> bool {
|
||||
qualifs.has_mut_interior
|
||||
}
|
||||
|
||||
fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
!ty.is_freeze(cx.tcx.at(DUMMY_SP), cx.param_env)
|
||||
}
|
||||
|
||||
fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
|
||||
// Exactly one type, `UnsafeCell`, has the `HasMutInterior` qualif inherently.
|
||||
// It arises structurally for all other types.
|
||||
Some(adt.did) == cx.tcx.lang_items().unsafe_cell_type()
|
||||
}
|
||||
}
|
||||
|
||||
/// Constant containing an ADT that implements `Drop`.
|
||||
/// This must be ruled out (a) because we cannot run `Drop` during compile-time
|
||||
/// as that might not be a `const fn`, and (b) because implicit promotion would
|
||||
/// remove side-effects that occur as part of dropping that value.
|
||||
pub struct NeedsDrop;
|
||||
|
||||
impl Qualif for NeedsDrop {
|
||||
const ANALYSIS_NAME: &'static str = "flow_needs_drop";
|
||||
const IS_CLEARED_ON_MOVE: bool = true;
|
||||
|
||||
fn in_qualifs(qualifs: &ConstQualifs) -> bool {
|
||||
qualifs.needs_drop
|
||||
}
|
||||
|
||||
fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
ty.needs_drop(cx.tcx, cx.param_env)
|
||||
}
|
||||
|
||||
fn in_adt_inherently(cx: &ConstCx<'_, 'tcx>, adt: &'tcx AdtDef, _: SubstsRef<'tcx>) -> bool {
|
||||
adt.has_dtor(cx.tcx)
|
||||
}
|
||||
}
|
||||
|
||||
/// A constant that cannot be used as part of a pattern in a `match` expression.
|
||||
pub struct CustomEq;
|
||||
|
||||
impl Qualif for CustomEq {
|
||||
const ANALYSIS_NAME: &'static str = "flow_custom_eq";
|
||||
|
||||
fn in_qualifs(qualifs: &ConstQualifs) -> bool {
|
||||
qualifs.custom_eq
|
||||
}
|
||||
|
||||
fn in_any_value_of_ty(cx: &ConstCx<'_, 'tcx>, ty: Ty<'tcx>) -> bool {
|
||||
// If *any* component of a composite data type does not implement `Structural{Partial,}Eq`,
|
||||
// we know that at least some values of that type are not structural-match. I say "some"
|
||||
// because that component may be part of an enum variant (e.g.,
|
||||
// `Option::<NonStructuralMatchTy>::Some`), in which case some values of this type may be
|
||||
// structural-match (`Option::None`).
|
||||
let id = cx.tcx.hir().local_def_id_to_hir_id(cx.def_id());
|
||||
traits::search_for_structural_match_violation(id, cx.body.span, cx.tcx, ty).is_some()
|
||||
}
|
||||
|
||||
fn in_adt_inherently(
|
||||
cx: &ConstCx<'_, 'tcx>,
|
||||
adt: &'tcx AdtDef,
|
||||
substs: SubstsRef<'tcx>,
|
||||
) -> bool {
|
||||
let ty = cx.tcx.mk_ty(ty::Adt(adt, substs));
|
||||
!ty.is_structural_eq_shallow(cx.tcx)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: Use `mir::visit::Visitor` for the `in_*` functions if/when it supports early return.
|
||||
|
||||
/// Returns `true` if this `Rvalue` contains qualif `Q`.
|
||||
pub fn in_rvalue<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, rvalue: &Rvalue<'tcx>) -> bool
|
||||
where
|
||||
Q: Qualif,
|
||||
F: FnMut(Local) -> bool,
|
||||
{
|
||||
match rvalue {
|
||||
Rvalue::ThreadLocalRef(_) | Rvalue::NullaryOp(..) => {
|
||||
Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx))
|
||||
}
|
||||
|
||||
Rvalue::Discriminant(place) | Rvalue::Len(place) => {
|
||||
in_place::<Q, _>(cx, in_local, place.as_ref())
|
||||
}
|
||||
|
||||
Rvalue::Use(operand)
|
||||
| Rvalue::Repeat(operand, _)
|
||||
| Rvalue::UnaryOp(_, operand)
|
||||
| Rvalue::Cast(_, operand, _) => in_operand::<Q, _>(cx, in_local, operand),
|
||||
|
||||
Rvalue::BinaryOp(_, box (lhs, rhs)) | Rvalue::CheckedBinaryOp(_, box (lhs, rhs)) => {
|
||||
in_operand::<Q, _>(cx, in_local, lhs) || in_operand::<Q, _>(cx, in_local, rhs)
|
||||
}
|
||||
|
||||
Rvalue::Ref(_, _, place) | Rvalue::AddressOf(_, place) => {
|
||||
// Special-case reborrows to be more like a copy of the reference.
|
||||
if let Some((place_base, ProjectionElem::Deref)) = place.as_ref().last_projection() {
|
||||
let base_ty = place_base.ty(cx.body, cx.tcx).ty;
|
||||
if let ty::Ref(..) = base_ty.kind() {
|
||||
return in_place::<Q, _>(cx, in_local, place_base);
|
||||
}
|
||||
}
|
||||
|
||||
in_place::<Q, _>(cx, in_local, place.as_ref())
|
||||
}
|
||||
|
||||
Rvalue::Aggregate(kind, operands) => {
|
||||
// Return early if we know that the struct or enum being constructed is always
|
||||
// qualified.
|
||||
if let AggregateKind::Adt(def, _, substs, ..) = **kind {
|
||||
if Q::in_adt_inherently(cx, def, substs) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// Otherwise, proceed structurally...
|
||||
operands.iter().any(|o| in_operand::<Q, _>(cx, in_local, o))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns `true` if this `Place` contains qualif `Q`.
|
||||
pub fn in_place<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, place: PlaceRef<'tcx>) -> bool
|
||||
where
|
||||
Q: Qualif,
|
||||
F: FnMut(Local) -> bool,
|
||||
{
|
||||
let mut place = place;
|
||||
while let Some((place_base, elem)) = place.last_projection() {
|
||||
match elem {
|
||||
ProjectionElem::Index(index) if in_local(index) => return true,
|
||||
|
||||
ProjectionElem::Deref
|
||||
| ProjectionElem::Field(_, _)
|
||||
| ProjectionElem::ConstantIndex { .. }
|
||||
| ProjectionElem::Subslice { .. }
|
||||
| ProjectionElem::Downcast(_, _)
|
||||
| ProjectionElem::Index(_) => {}
|
||||
}
|
||||
|
||||
let base_ty = place_base.ty(cx.body, cx.tcx);
|
||||
let proj_ty = base_ty.projection_ty(cx.tcx, elem).ty;
|
||||
if !Q::in_any_value_of_ty(cx, proj_ty) {
|
||||
return false;
|
||||
}
|
||||
|
||||
place = place_base;
|
||||
}
|
||||
|
||||
assert!(place.projection.is_empty());
|
||||
in_local(place.local)
|
||||
}
|
||||
|
||||
/// Returns `true` if this `Operand` contains qualif `Q`.
|
||||
pub fn in_operand<Q, F>(cx: &ConstCx<'_, 'tcx>, in_local: &mut F, operand: &Operand<'tcx>) -> bool
|
||||
where
|
||||
Q: Qualif,
|
||||
F: FnMut(Local) -> bool,
|
||||
{
|
||||
let constant = match operand {
|
||||
Operand::Copy(place) | Operand::Move(place) => {
|
||||
return in_place::<Q, _>(cx, in_local, place.as_ref());
|
||||
}
|
||||
|
||||
Operand::Constant(c) => c,
|
||||
};
|
||||
|
||||
// Check the qualifs of the value of `const` items.
|
||||
if let Some(ct) = constant.literal.const_for_ty() {
|
||||
if let ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs_: _, promoted }) = ct.val {
|
||||
assert!(promoted.is_none());
|
||||
// Don't peek inside trait associated constants.
|
||||
if cx.tcx.trait_of_item(def.did).is_none() {
|
||||
let qualifs = if let Some((did, param_did)) = def.as_const_arg() {
|
||||
cx.tcx.at(constant.span).mir_const_qualif_const_arg((did, param_did))
|
||||
} else {
|
||||
cx.tcx.at(constant.span).mir_const_qualif(def.did)
|
||||
};
|
||||
|
||||
if !Q::in_qualifs(&qualifs) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Just in case the type is more specific than
|
||||
// the definition, e.g., impl associated const
|
||||
// with type parameters, take it into account.
|
||||
}
|
||||
}
|
||||
}
|
||||
// Otherwise use the qualifs of the type.
|
||||
Q::in_any_value_of_ty(cx, constant.literal.ty())
|
||||
}
|
216
compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
Normal file
216
compiler/rustc_const_eval/src/transform/check_consts/resolver.rs
Normal file
|
@ -0,0 +1,216 @@
|
|||
//! Propagate `Qualif`s between locals and query the results.
|
||||
//!
|
||||
//! This contains the dataflow analysis used to track `Qualif`s on complex control-flow graphs.
|
||||
|
||||
use rustc_index::bit_set::BitSet;
|
||||
use rustc_middle::mir::visit::Visitor;
|
||||
use rustc_middle::mir::{self, BasicBlock, Local, Location};
|
||||
|
||||
use std::marker::PhantomData;
|
||||
|
||||
use super::{qualifs, ConstCx, Qualif};
|
||||
|
||||
/// A `Visitor` that propagates qualifs between locals. This defines the transfer function of
|
||||
/// `FlowSensitiveAnalysis`.
|
||||
///
|
||||
/// This transfer does nothing when encountering an indirect assignment. Consumers should rely on
|
||||
/// the `MaybeMutBorrowedLocals` dataflow pass to see if a `Local` may have become qualified via
|
||||
/// an indirect assignment or function call.
|
||||
struct TransferFunction<'a, 'mir, 'tcx, Q> {
|
||||
ccx: &'a ConstCx<'mir, 'tcx>,
|
||||
qualifs_per_local: &'a mut BitSet<Local>,
|
||||
|
||||
_qualif: PhantomData<Q>,
|
||||
}
|
||||
|
||||
impl<Q> TransferFunction<'a, 'mir, 'tcx, Q>
|
||||
where
|
||||
Q: Qualif,
|
||||
{
|
||||
fn new(ccx: &'a ConstCx<'mir, 'tcx>, qualifs_per_local: &'a mut BitSet<Local>) -> Self {
|
||||
TransferFunction { ccx, qualifs_per_local, _qualif: PhantomData }
|
||||
}
|
||||
|
||||
fn initialize_state(&mut self) {
|
||||
self.qualifs_per_local.clear();
|
||||
|
||||
for arg in self.ccx.body.args_iter() {
|
||||
let arg_ty = self.ccx.body.local_decls[arg].ty;
|
||||
if Q::in_any_value_of_ty(self.ccx, arg_ty) {
|
||||
self.qualifs_per_local.insert(arg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn assign_qualif_direct(&mut self, place: &mir::Place<'tcx>, value: bool) {
|
||||
debug_assert!(!place.is_indirect());
|
||||
|
||||
match (value, place.as_ref()) {
|
||||
(true, mir::PlaceRef { local, .. }) => {
|
||||
self.qualifs_per_local.insert(local);
|
||||
}
|
||||
|
||||
// For now, we do not clear the qualif if a local is overwritten in full by
|
||||
// an unqualified rvalue (e.g. `y = 5`). This is to be consistent
|
||||
// with aggregates where we overwrite all fields with assignments, which would not
|
||||
// get this feature.
|
||||
(false, mir::PlaceRef { local: _, projection: &[] }) => {
|
||||
// self.qualifs_per_local.remove(*local);
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
fn apply_call_return_effect(
|
||||
&mut self,
|
||||
_block: BasicBlock,
|
||||
_func: &mir::Operand<'tcx>,
|
||||
_args: &[mir::Operand<'tcx>],
|
||||
return_place: mir::Place<'tcx>,
|
||||
) {
|
||||
// We cannot reason about another function's internals, so use conservative type-based
|
||||
// qualification for the result of a function call.
|
||||
let return_ty = return_place.ty(self.ccx.body, self.ccx.tcx).ty;
|
||||
let qualif = Q::in_any_value_of_ty(self.ccx, return_ty);
|
||||
|
||||
if !return_place.is_indirect() {
|
||||
self.assign_qualif_direct(&return_place, qualif);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q> Visitor<'tcx> for TransferFunction<'_, '_, 'tcx, Q>
|
||||
where
|
||||
Q: Qualif,
|
||||
{
|
||||
fn visit_operand(&mut self, operand: &mir::Operand<'tcx>, location: Location) {
|
||||
self.super_operand(operand, location);
|
||||
|
||||
if !Q::IS_CLEARED_ON_MOVE {
|
||||
return;
|
||||
}
|
||||
|
||||
// If a local with no projections is moved from (e.g. `x` in `y = x`), record that
|
||||
// it no longer needs to be dropped.
|
||||
if let mir::Operand::Move(place) = operand {
|
||||
if let Some(local) = place.as_local() {
|
||||
self.qualifs_per_local.remove(local);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_assign(
|
||||
&mut self,
|
||||
place: &mir::Place<'tcx>,
|
||||
rvalue: &mir::Rvalue<'tcx>,
|
||||
location: Location,
|
||||
) {
|
||||
let qualif = qualifs::in_rvalue::<Q, _>(
|
||||
self.ccx,
|
||||
&mut |l| self.qualifs_per_local.contains(l),
|
||||
rvalue,
|
||||
);
|
||||
if !place.is_indirect() {
|
||||
self.assign_qualif_direct(place, qualif);
|
||||
}
|
||||
|
||||
// We need to assign qualifs to the left-hand side before visiting `rvalue` since
|
||||
// qualifs can be cleared on move.
|
||||
self.super_assign(place, rvalue, location);
|
||||
}
|
||||
|
||||
fn visit_terminator(&mut self, terminator: &mir::Terminator<'tcx>, location: Location) {
|
||||
// The effect of assignment to the return place in `TerminatorKind::Call` is not applied
|
||||
// here; that occurs in `apply_call_return_effect`.
|
||||
|
||||
if let mir::TerminatorKind::DropAndReplace { value, place, .. } = &terminator.kind {
|
||||
let qualif = qualifs::in_operand::<Q, _>(
|
||||
self.ccx,
|
||||
&mut |l| self.qualifs_per_local.contains(l),
|
||||
value,
|
||||
);
|
||||
|
||||
if !place.is_indirect() {
|
||||
self.assign_qualif_direct(place, qualif);
|
||||
}
|
||||
}
|
||||
|
||||
// We need to assign qualifs to the dropped location before visiting the operand that
|
||||
// replaces it since qualifs can be cleared on move.
|
||||
self.super_terminator(terminator, location);
|
||||
}
|
||||
}
|
||||
|
||||
/// The dataflow analysis used to propagate qualifs on arbitrary CFGs.
|
||||
pub(super) struct FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q> {
|
||||
ccx: &'a ConstCx<'mir, 'tcx>,
|
||||
_qualif: PhantomData<Q>,
|
||||
}
|
||||
|
||||
impl<'a, 'mir, 'tcx, Q> FlowSensitiveAnalysis<'a, 'mir, 'tcx, Q>
|
||||
where
|
||||
Q: Qualif,
|
||||
{
|
||||
pub(super) fn new(_: Q, ccx: &'a ConstCx<'mir, 'tcx>) -> Self {
|
||||
FlowSensitiveAnalysis { ccx, _qualif: PhantomData }
|
||||
}
|
||||
|
||||
fn transfer_function(
|
||||
&self,
|
||||
state: &'a mut BitSet<Local>,
|
||||
) -> TransferFunction<'a, 'mir, 'tcx, Q> {
|
||||
TransferFunction::<Q>::new(self.ccx, state)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q> rustc_mir_dataflow::AnalysisDomain<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
|
||||
where
|
||||
Q: Qualif,
|
||||
{
|
||||
type Domain = BitSet<Local>;
|
||||
|
||||
const NAME: &'static str = Q::ANALYSIS_NAME;
|
||||
|
||||
fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain {
|
||||
BitSet::new_empty(body.local_decls.len())
|
||||
}
|
||||
|
||||
fn initialize_start_block(&self, _body: &mir::Body<'tcx>, state: &mut Self::Domain) {
|
||||
self.transfer_function(state).initialize_state();
|
||||
}
|
||||
}
|
||||
|
||||
impl<Q> rustc_mir_dataflow::Analysis<'tcx> for FlowSensitiveAnalysis<'_, '_, 'tcx, Q>
|
||||
where
|
||||
Q: Qualif,
|
||||
{
|
||||
fn apply_statement_effect(
|
||||
&self,
|
||||
state: &mut Self::Domain,
|
||||
statement: &mir::Statement<'tcx>,
|
||||
location: Location,
|
||||
) {
|
||||
self.transfer_function(state).visit_statement(statement, location);
|
||||
}
|
||||
|
||||
fn apply_terminator_effect(
|
||||
&self,
|
||||
state: &mut Self::Domain,
|
||||
terminator: &mir::Terminator<'tcx>,
|
||||
location: Location,
|
||||
) {
|
||||
self.transfer_function(state).visit_terminator(terminator, location);
|
||||
}
|
||||
|
||||
fn apply_call_return_effect(
|
||||
&self,
|
||||
state: &mut Self::Domain,
|
||||
block: BasicBlock,
|
||||
func: &mir::Operand<'tcx>,
|
||||
args: &[mir::Operand<'tcx>],
|
||||
return_place: mir::Place<'tcx>,
|
||||
) {
|
||||
self.transfer_function(state).apply_call_return_effect(block, func, args, return_place)
|
||||
}
|
||||
}
|
5
compiler/rustc_const_eval/src/transform/mod.rs
Normal file
5
compiler/rustc_const_eval/src/transform/mod.rs
Normal file
|
@ -0,0 +1,5 @@
|
|||
pub mod check_consts;
|
||||
pub mod promote_consts;
|
||||
pub mod validate;
|
||||
|
||||
pub use rustc_middle::mir::MirPass;
|
1092
compiler/rustc_const_eval/src/transform/promote_consts.rs
Normal file
1092
compiler/rustc_const_eval/src/transform/promote_consts.rs
Normal file
File diff suppressed because it is too large
Load diff
523
compiler/rustc_const_eval/src/transform/validate.rs
Normal file
523
compiler/rustc_const_eval/src/transform/validate.rs
Normal file
|
@ -0,0 +1,523 @@
|
|||
//! Validates the MIR to ensure that invariants are upheld.
|
||||
|
||||
use super::MirPass;
|
||||
use rustc_index::bit_set::BitSet;
|
||||
use rustc_infer::infer::TyCtxtInferExt;
|
||||
use rustc_middle::mir::interpret::Scalar;
|
||||
use rustc_middle::mir::traversal;
|
||||
use rustc_middle::mir::visit::{PlaceContext, Visitor};
|
||||
use rustc_middle::mir::{
|
||||
AggregateKind, BasicBlock, Body, BorrowKind, Local, Location, MirPhase, Operand, PlaceElem,
|
||||
PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement, StatementKind, Terminator,
|
||||
TerminatorKind,
|
||||
};
|
||||
use rustc_middle::ty::fold::BottomUpFolder;
|
||||
use rustc_middle::ty::{self, ParamEnv, Ty, TyCtxt, TypeFoldable};
|
||||
use rustc_mir_dataflow::impls::MaybeStorageLive;
|
||||
use rustc_mir_dataflow::storage::AlwaysLiveLocals;
|
||||
use rustc_mir_dataflow::{Analysis, ResultsCursor};
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
enum EdgeKind {
|
||||
Unwind,
|
||||
Normal,
|
||||
}
|
||||
|
||||
pub struct Validator {
|
||||
/// Describes at which point in the pipeline this validation is happening.
|
||||
pub when: String,
|
||||
/// The phase for which we are upholding the dialect. If the given phase forbids a specific
|
||||
/// element, this validator will now emit errors if that specific element is encountered.
|
||||
/// Note that phases that change the dialect cause all *following* phases to check the
|
||||
/// invariants of the new dialect. A phase that changes dialects never checks the new invariants
|
||||
/// itself.
|
||||
pub mir_phase: MirPhase,
|
||||
}
|
||||
|
||||
impl<'tcx> MirPass<'tcx> for Validator {
|
||||
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
|
||||
let def_id = body.source.def_id();
|
||||
let param_env = tcx.param_env(def_id);
|
||||
let mir_phase = self.mir_phase;
|
||||
|
||||
let always_live_locals = AlwaysLiveLocals::new(body);
|
||||
let storage_liveness = MaybeStorageLive::new(always_live_locals)
|
||||
.into_engine(tcx, body)
|
||||
.iterate_to_fixpoint()
|
||||
.into_results_cursor(body);
|
||||
|
||||
TypeChecker {
|
||||
when: &self.when,
|
||||
body,
|
||||
tcx,
|
||||
param_env,
|
||||
mir_phase,
|
||||
reachable_blocks: traversal::reachable_as_bitset(body),
|
||||
storage_liveness,
|
||||
place_cache: Vec::new(),
|
||||
}
|
||||
.visit_body(body);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns whether the two types are equal up to lifetimes.
|
||||
/// All lifetimes, including higher-ranked ones, get ignored for this comparison.
|
||||
/// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
|
||||
///
|
||||
/// The point of this function is to approximate "equal up to subtyping". However,
|
||||
/// the approximation is incorrect as variance is ignored.
|
||||
pub fn equal_up_to_regions(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ParamEnv<'tcx>,
|
||||
src: Ty<'tcx>,
|
||||
dest: Ty<'tcx>,
|
||||
) -> bool {
|
||||
// Fast path.
|
||||
if src == dest {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Normalize lifetimes away on both sides, then compare.
|
||||
let param_env = param_env.with_reveal_all_normalized(tcx);
|
||||
let normalize = |ty: Ty<'tcx>| {
|
||||
tcx.normalize_erasing_regions(
|
||||
param_env,
|
||||
ty.fold_with(&mut BottomUpFolder {
|
||||
tcx,
|
||||
// FIXME: We erase all late-bound lifetimes, but this is not fully correct.
|
||||
// If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
|
||||
// this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
|
||||
// since one may have an `impl SomeTrait for fn(&32)` and
|
||||
// `impl SomeTrait for fn(&'static u32)` at the same time which
|
||||
// specify distinct values for Assoc. (See also #56105)
|
||||
lt_op: |_| tcx.lifetimes.re_erased,
|
||||
// Leave consts and types unchanged.
|
||||
ct_op: |ct| ct,
|
||||
ty_op: |ty| ty,
|
||||
}),
|
||||
)
|
||||
};
|
||||
tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
|
||||
}
|
||||
|
||||
struct TypeChecker<'a, 'tcx> {
|
||||
when: &'a str,
|
||||
body: &'a Body<'tcx>,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
param_env: ParamEnv<'tcx>,
|
||||
mir_phase: MirPhase,
|
||||
reachable_blocks: BitSet<BasicBlock>,
|
||||
storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
|
||||
place_cache: Vec<PlaceRef<'tcx>>,
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
|
||||
fn fail(&self, location: Location, msg: impl AsRef<str>) {
|
||||
let span = self.body.source_info(location).span;
|
||||
// We use `delay_span_bug` as we might see broken MIR when other errors have already
|
||||
// occurred.
|
||||
self.tcx.sess.diagnostic().delay_span_bug(
|
||||
span,
|
||||
&format!(
|
||||
"broken MIR in {:?} ({}) at {:?}:\n{}",
|
||||
self.body.source.instance,
|
||||
self.when,
|
||||
location,
|
||||
msg.as_ref()
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
|
||||
if let Some(bb) = self.body.basic_blocks().get(bb) {
|
||||
let src = self.body.basic_blocks().get(location.block).unwrap();
|
||||
match (src.is_cleanup, bb.is_cleanup, edge_kind) {
|
||||
// Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
|
||||
(false, false, EdgeKind::Normal)
|
||||
// Non-cleanup blocks can jump to cleanup blocks along unwind edges
|
||||
| (false, true, EdgeKind::Unwind)
|
||||
// Cleanup blocks can jump to cleanup blocks along non-unwind edges
|
||||
| (true, true, EdgeKind::Normal) => {}
|
||||
// All other jumps are invalid
|
||||
_ => {
|
||||
self.fail(
|
||||
location,
|
||||
format!(
|
||||
"{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
|
||||
edge_kind,
|
||||
bb,
|
||||
src.is_cleanup,
|
||||
bb.is_cleanup,
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if src can be assigned into dest.
|
||||
/// This is not precise, it will accept some incorrect assignments.
|
||||
fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
|
||||
// Fast path before we normalize.
|
||||
if src == dest {
|
||||
// Equal types, all is good.
|
||||
return true;
|
||||
}
|
||||
// Normalize projections and things like that.
|
||||
// FIXME: We need to reveal_all, as some optimizations change types in ways
|
||||
// that require unfolding opaque types.
|
||||
let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
|
||||
let src = self.tcx.normalize_erasing_regions(param_env, src);
|
||||
let dest = self.tcx.normalize_erasing_regions(param_env, dest);
|
||||
|
||||
// Type-changing assignments can happen when subtyping is used. While
|
||||
// all normal lifetimes are erased, higher-ranked types with their
|
||||
// late-bound lifetimes are still around and can lead to type
|
||||
// differences. So we compare ignoring lifetimes.
|
||||
equal_up_to_regions(self.tcx, param_env, src, dest)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
|
||||
fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
|
||||
if self.body.local_decls.get(*local).is_none() {
|
||||
self.fail(
|
||||
location,
|
||||
format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
|
||||
);
|
||||
}
|
||||
|
||||
if self.reachable_blocks.contains(location.block) && context.is_use() {
|
||||
// Uses of locals must occur while the local's storage is allocated.
|
||||
self.storage_liveness.seek_after_primary_effect(location);
|
||||
let locals_with_storage = self.storage_liveness.get();
|
||||
if !locals_with_storage.contains(*local) {
|
||||
self.fail(location, format!("use of local {:?}, which has no storage here", local));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
|
||||
// This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
|
||||
if self.tcx.sess.opts.debugging_opts.validate_mir {
|
||||
// `Operand::Copy` is only supposed to be used with `Copy` types.
|
||||
if let Operand::Copy(place) = operand {
|
||||
let ty = place.ty(&self.body.local_decls, self.tcx).ty;
|
||||
let span = self.body.source_info(location).span;
|
||||
|
||||
if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
|
||||
self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.super_operand(operand, location);
|
||||
}
|
||||
|
||||
fn visit_projection_elem(
|
||||
&mut self,
|
||||
local: Local,
|
||||
proj_base: &[PlaceElem<'tcx>],
|
||||
elem: PlaceElem<'tcx>,
|
||||
context: PlaceContext,
|
||||
location: Location,
|
||||
) {
|
||||
if let ProjectionElem::Index(index) = elem {
|
||||
let index_ty = self.body.local_decls[index].ty;
|
||||
if index_ty != self.tcx.types.usize {
|
||||
self.fail(location, format!("bad index ({:?} != usize)", index_ty))
|
||||
}
|
||||
}
|
||||
self.super_projection_elem(local, proj_base, elem, context, location);
|
||||
}
|
||||
|
||||
fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
|
||||
match &statement.kind {
|
||||
StatementKind::Assign(box (dest, rvalue)) => {
|
||||
// LHS and RHS of the assignment must have the same type.
|
||||
let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
|
||||
let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
|
||||
if !self.mir_assign_valid_types(right_ty, left_ty) {
|
||||
self.fail(
|
||||
location,
|
||||
format!(
|
||||
"encountered `{:?}` with incompatible types:\n\
|
||||
left-hand side has type: {}\n\
|
||||
right-hand side has type: {}",
|
||||
statement.kind, left_ty, right_ty,
|
||||
),
|
||||
);
|
||||
}
|
||||
match rvalue {
|
||||
// The sides of an assignment must not alias. Currently this just checks whether the places
|
||||
// are identical.
|
||||
Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) => {
|
||||
if dest == src {
|
||||
self.fail(
|
||||
location,
|
||||
"encountered `Assign` statement with overlapping memory",
|
||||
);
|
||||
}
|
||||
}
|
||||
// The deaggregator currently does not deaggreagate arrays.
|
||||
// So for now, we ignore them here.
|
||||
Rvalue::Aggregate(box AggregateKind::Array { .. }, _) => {}
|
||||
// All other aggregates must be gone after some phases.
|
||||
Rvalue::Aggregate(box kind, _) => {
|
||||
if self.mir_phase > MirPhase::DropLowering
|
||||
&& !matches!(kind, AggregateKind::Generator(..))
|
||||
{
|
||||
// Generators persist until the state machine transformation, but all
|
||||
// other aggregates must have been lowered.
|
||||
self.fail(
|
||||
location,
|
||||
format!("{:?} have been lowered to field assignments", rvalue),
|
||||
)
|
||||
} else if self.mir_phase > MirPhase::GeneratorLowering {
|
||||
// No more aggregates after drop and generator lowering.
|
||||
self.fail(
|
||||
location,
|
||||
format!("{:?} have been lowered to field assignments", rvalue),
|
||||
)
|
||||
}
|
||||
}
|
||||
Rvalue::Ref(_, BorrowKind::Shallow, _) => {
|
||||
if self.mir_phase > MirPhase::DropLowering {
|
||||
self.fail(
|
||||
location,
|
||||
"`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
|
||||
);
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
StatementKind::AscribeUserType(..) => {
|
||||
if self.mir_phase > MirPhase::DropLowering {
|
||||
self.fail(
|
||||
location,
|
||||
"`AscribeUserType` should have been removed after drop lowering phase",
|
||||
);
|
||||
}
|
||||
}
|
||||
StatementKind::FakeRead(..) => {
|
||||
if self.mir_phase > MirPhase::DropLowering {
|
||||
self.fail(
|
||||
location,
|
||||
"`FakeRead` should have been removed after drop lowering phase",
|
||||
);
|
||||
}
|
||||
}
|
||||
StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
|
||||
ref src,
|
||||
ref dst,
|
||||
ref count,
|
||||
}) => {
|
||||
let src_ty = src.ty(&self.body.local_decls, self.tcx);
|
||||
let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
|
||||
src_deref.ty
|
||||
} else {
|
||||
self.fail(
|
||||
location,
|
||||
format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
|
||||
);
|
||||
return;
|
||||
};
|
||||
let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
|
||||
let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
|
||||
dst_deref.ty
|
||||
} else {
|
||||
self.fail(
|
||||
location,
|
||||
format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
|
||||
);
|
||||
return;
|
||||
};
|
||||
// since CopyNonOverlapping is parametrized by 1 type,
|
||||
// we only need to check that they are equal and not keep an extra parameter.
|
||||
if op_src_ty != op_dst_ty {
|
||||
self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
|
||||
}
|
||||
|
||||
let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
|
||||
if op_cnt_ty != self.tcx.types.usize {
|
||||
self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
|
||||
}
|
||||
}
|
||||
StatementKind::SetDiscriminant { .. }
|
||||
| StatementKind::StorageLive(..)
|
||||
| StatementKind::StorageDead(..)
|
||||
| StatementKind::LlvmInlineAsm(..)
|
||||
| StatementKind::Retag(_, _)
|
||||
| StatementKind::Coverage(_)
|
||||
| StatementKind::Nop => {}
|
||||
}
|
||||
|
||||
self.super_statement(statement, location);
|
||||
}
|
||||
|
||||
fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
|
||||
match &terminator.kind {
|
||||
TerminatorKind::Goto { target } => {
|
||||
self.check_edge(location, *target, EdgeKind::Normal);
|
||||
}
|
||||
TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
|
||||
let ty = discr.ty(&self.body.local_decls, self.tcx);
|
||||
if ty != *switch_ty {
|
||||
self.fail(
|
||||
location,
|
||||
format!(
|
||||
"encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
|
||||
ty, switch_ty,
|
||||
),
|
||||
);
|
||||
}
|
||||
|
||||
let target_width = self.tcx.sess.target.pointer_width;
|
||||
|
||||
let size = Size::from_bits(match switch_ty.kind() {
|
||||
ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
|
||||
ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
|
||||
ty::Char => 32,
|
||||
ty::Bool => 1,
|
||||
other => bug!("unhandled type: {:?}", other),
|
||||
});
|
||||
|
||||
for (value, target) in targets.iter() {
|
||||
if Scalar::<()>::try_from_uint(value, size).is_none() {
|
||||
self.fail(
|
||||
location,
|
||||
format!("the value {:#x} is not a proper {:?}", value, switch_ty),
|
||||
)
|
||||
}
|
||||
|
||||
self.check_edge(location, target, EdgeKind::Normal);
|
||||
}
|
||||
self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
|
||||
}
|
||||
TerminatorKind::Drop { target, unwind, .. } => {
|
||||
self.check_edge(location, *target, EdgeKind::Normal);
|
||||
if let Some(unwind) = unwind {
|
||||
self.check_edge(location, *unwind, EdgeKind::Unwind);
|
||||
}
|
||||
}
|
||||
TerminatorKind::DropAndReplace { target, unwind, .. } => {
|
||||
if self.mir_phase > MirPhase::DropLowering {
|
||||
self.fail(
|
||||
location,
|
||||
"`DropAndReplace` is not permitted to exist after drop elaboration",
|
||||
);
|
||||
}
|
||||
self.check_edge(location, *target, EdgeKind::Normal);
|
||||
if let Some(unwind) = unwind {
|
||||
self.check_edge(location, *unwind, EdgeKind::Unwind);
|
||||
}
|
||||
}
|
||||
TerminatorKind::Call { func, args, destination, cleanup, .. } => {
|
||||
let func_ty = func.ty(&self.body.local_decls, self.tcx);
|
||||
match func_ty.kind() {
|
||||
ty::FnPtr(..) | ty::FnDef(..) => {}
|
||||
_ => self.fail(
|
||||
location,
|
||||
format!("encountered non-callable type {} in `Call` terminator", func_ty),
|
||||
),
|
||||
}
|
||||
if let Some((_, target)) = destination {
|
||||
self.check_edge(location, *target, EdgeKind::Normal);
|
||||
}
|
||||
if let Some(cleanup) = cleanup {
|
||||
self.check_edge(location, *cleanup, EdgeKind::Unwind);
|
||||
}
|
||||
|
||||
// The call destination place and Operand::Move place used as an argument might be
|
||||
// passed by a reference to the callee. Consequently they must be non-overlapping.
|
||||
// Currently this simply checks for duplicate places.
|
||||
self.place_cache.clear();
|
||||
if let Some((destination, _)) = destination {
|
||||
self.place_cache.push(destination.as_ref());
|
||||
}
|
||||
for arg in args {
|
||||
if let Operand::Move(place) = arg {
|
||||
self.place_cache.push(place.as_ref());
|
||||
}
|
||||
}
|
||||
let all_len = self.place_cache.len();
|
||||
self.place_cache.sort_unstable();
|
||||
self.place_cache.dedup();
|
||||
let has_duplicates = all_len != self.place_cache.len();
|
||||
if has_duplicates {
|
||||
self.fail(
|
||||
location,
|
||||
format!(
|
||||
"encountered overlapping memory in `Call` terminator: {:?}",
|
||||
terminator.kind,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
TerminatorKind::Assert { cond, target, cleanup, .. } => {
|
||||
let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
|
||||
if cond_ty != self.tcx.types.bool {
|
||||
self.fail(
|
||||
location,
|
||||
format!(
|
||||
"encountered non-boolean condition of type {} in `Assert` terminator",
|
||||
cond_ty
|
||||
),
|
||||
);
|
||||
}
|
||||
self.check_edge(location, *target, EdgeKind::Normal);
|
||||
if let Some(cleanup) = cleanup {
|
||||
self.check_edge(location, *cleanup, EdgeKind::Unwind);
|
||||
}
|
||||
}
|
||||
TerminatorKind::Yield { resume, drop, .. } => {
|
||||
if self.mir_phase > MirPhase::GeneratorLowering {
|
||||
self.fail(location, "`Yield` should have been replaced by generator lowering");
|
||||
}
|
||||
self.check_edge(location, *resume, EdgeKind::Normal);
|
||||
if let Some(drop) = drop {
|
||||
self.check_edge(location, *drop, EdgeKind::Normal);
|
||||
}
|
||||
}
|
||||
TerminatorKind::FalseEdge { real_target, imaginary_target } => {
|
||||
self.check_edge(location, *real_target, EdgeKind::Normal);
|
||||
self.check_edge(location, *imaginary_target, EdgeKind::Normal);
|
||||
}
|
||||
TerminatorKind::FalseUnwind { real_target, unwind } => {
|
||||
self.check_edge(location, *real_target, EdgeKind::Normal);
|
||||
if let Some(unwind) = unwind {
|
||||
self.check_edge(location, *unwind, EdgeKind::Unwind);
|
||||
}
|
||||
}
|
||||
TerminatorKind::InlineAsm { destination, .. } => {
|
||||
if let Some(destination) = destination {
|
||||
self.check_edge(location, *destination, EdgeKind::Normal);
|
||||
}
|
||||
}
|
||||
// Nothing to validate for these.
|
||||
TerminatorKind::Resume
|
||||
| TerminatorKind::Abort
|
||||
| TerminatorKind::Return
|
||||
| TerminatorKind::Unreachable
|
||||
| TerminatorKind::GeneratorDrop => {}
|
||||
}
|
||||
|
||||
self.super_terminator(terminator, location);
|
||||
}
|
||||
|
||||
fn visit_source_scope(&mut self, scope: &SourceScope) {
|
||||
if self.body.source_scopes.get(*scope).is_none() {
|
||||
self.tcx.sess.diagnostic().delay_span_bug(
|
||||
self.body.span,
|
||||
&format!(
|
||||
"broken MIR in {:?} ({}):\ninvalid source scope {:?}",
|
||||
self.body.source.instance, self.when, scope,
|
||||
),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
75
compiler/rustc_const_eval/src/util/aggregate.rs
Normal file
75
compiler/rustc_const_eval/src/util/aggregate.rs
Normal file
|
@ -0,0 +1,75 @@
|
|||
use rustc_index::vec::Idx;
|
||||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::{Ty, TyCtxt};
|
||||
use rustc_target::abi::VariantIdx;
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::iter::TrustedLen;
|
||||
|
||||
/// Expand `lhs = Rvalue::Aggregate(kind, operands)` into assignments to the fields.
|
||||
///
|
||||
/// Produces something like
|
||||
///
|
||||
/// (lhs as Variant).field0 = arg0; // We only have a downcast if this is an enum
|
||||
/// (lhs as Variant).field1 = arg1;
|
||||
/// discriminant(lhs) = variant_index; // If lhs is an enum or generator.
|
||||
pub fn expand_aggregate<'tcx>(
|
||||
mut lhs: Place<'tcx>,
|
||||
operands: impl Iterator<Item = (Operand<'tcx>, Ty<'tcx>)> + TrustedLen,
|
||||
kind: AggregateKind<'tcx>,
|
||||
source_info: SourceInfo,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
) -> impl Iterator<Item = Statement<'tcx>> + TrustedLen {
|
||||
let mut set_discriminant = None;
|
||||
let active_field_index = match kind {
|
||||
AggregateKind::Adt(adt_def, variant_index, _, _, active_field_index) => {
|
||||
if adt_def.is_enum() {
|
||||
set_discriminant = Some(Statement {
|
||||
kind: StatementKind::SetDiscriminant { place: Box::new(lhs), variant_index },
|
||||
source_info,
|
||||
});
|
||||
lhs = tcx.mk_place_downcast(lhs, adt_def, variant_index);
|
||||
}
|
||||
active_field_index
|
||||
}
|
||||
AggregateKind::Generator(..) => {
|
||||
// Right now we only support initializing generators to
|
||||
// variant 0 (Unresumed).
|
||||
let variant_index = VariantIdx::new(0);
|
||||
set_discriminant = Some(Statement {
|
||||
kind: StatementKind::SetDiscriminant { place: Box::new(lhs), variant_index },
|
||||
source_info,
|
||||
});
|
||||
|
||||
// Operands are upvars stored on the base place, so no
|
||||
// downcast is necessary.
|
||||
|
||||
None
|
||||
}
|
||||
_ => None,
|
||||
};
|
||||
|
||||
operands
|
||||
.enumerate()
|
||||
.map(move |(i, (op, ty))| {
|
||||
let lhs_field = if let AggregateKind::Array(_) = kind {
|
||||
let offset = u64::try_from(i).unwrap();
|
||||
tcx.mk_place_elem(
|
||||
lhs,
|
||||
ProjectionElem::ConstantIndex {
|
||||
offset,
|
||||
min_length: offset + 1,
|
||||
from_end: false,
|
||||
},
|
||||
)
|
||||
} else {
|
||||
let field = Field::new(active_field_index.unwrap_or(i));
|
||||
tcx.mk_place_field(lhs, field, ty)
|
||||
};
|
||||
Statement {
|
||||
source_info,
|
||||
kind: StatementKind::Assign(Box::new((lhs_field, Rvalue::Use(op)))),
|
||||
}
|
||||
})
|
||||
.chain(set_discriminant)
|
||||
}
|
70
compiler/rustc_const_eval/src/util/alignment.rs
Normal file
70
compiler/rustc_const_eval/src/util/alignment.rs
Normal file
|
@ -0,0 +1,70 @@
|
|||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
use rustc_target::abi::Align;
|
||||
|
||||
/// Returns `true` if this place is allowed to be less aligned
|
||||
/// than its containing struct (because it is within a packed
|
||||
/// struct).
|
||||
pub fn is_disaligned<'tcx, L>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
local_decls: &L,
|
||||
param_env: ty::ParamEnv<'tcx>,
|
||||
place: Place<'tcx>,
|
||||
) -> bool
|
||||
where
|
||||
L: HasLocalDecls<'tcx>,
|
||||
{
|
||||
debug!("is_disaligned({:?})", place);
|
||||
let pack = match is_within_packed(tcx, local_decls, place) {
|
||||
None => {
|
||||
debug!("is_disaligned({:?}) - not within packed", place);
|
||||
return false;
|
||||
}
|
||||
Some(pack) => pack,
|
||||
};
|
||||
|
||||
let ty = place.ty(local_decls, tcx).ty;
|
||||
match tcx.layout_of(param_env.and(ty)) {
|
||||
Ok(layout) if layout.align.abi <= pack => {
|
||||
// If the packed alignment is greater or equal to the field alignment, the type won't be
|
||||
// further disaligned.
|
||||
debug!(
|
||||
"is_disaligned({:?}) - align = {}, packed = {}; not disaligned",
|
||||
place,
|
||||
layout.align.abi.bytes(),
|
||||
pack.bytes()
|
||||
);
|
||||
false
|
||||
}
|
||||
_ => {
|
||||
debug!("is_disaligned({:?}) - true", place);
|
||||
true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_within_packed<'tcx, L>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
local_decls: &L,
|
||||
place: Place<'tcx>,
|
||||
) -> Option<Align>
|
||||
where
|
||||
L: HasLocalDecls<'tcx>,
|
||||
{
|
||||
for (place_base, elem) in place.iter_projections().rev() {
|
||||
match elem {
|
||||
// encountered a Deref, which is ABI-aligned
|
||||
ProjectionElem::Deref => break,
|
||||
ProjectionElem::Field(..) => {
|
||||
let ty = place_base.ty(local_decls, tcx).ty;
|
||||
match ty.kind() {
|
||||
ty::Adt(def, _) => return def.repr.pack,
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
36
compiler/rustc_const_eval/src/util/collect_writes.rs
Normal file
36
compiler/rustc_const_eval/src/util/collect_writes.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
use rustc_middle::mir::visit::PlaceContext;
|
||||
use rustc_middle::mir::visit::Visitor;
|
||||
use rustc_middle::mir::{Body, Local, Location};
|
||||
|
||||
pub trait FindAssignments {
|
||||
// Finds all statements that assign directly to local (i.e., X = ...)
|
||||
// and returns their locations.
|
||||
fn find_assignments(&self, local: Local) -> Vec<Location>;
|
||||
}
|
||||
|
||||
impl<'tcx> FindAssignments for Body<'tcx> {
|
||||
fn find_assignments(&self, local: Local) -> Vec<Location> {
|
||||
let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] };
|
||||
visitor.visit_body(self);
|
||||
visitor.locations
|
||||
}
|
||||
}
|
||||
|
||||
// The Visitor walks the MIR to return the assignment statements corresponding
|
||||
// to a Local.
|
||||
struct FindLocalAssignmentVisitor {
|
||||
needle: Local,
|
||||
locations: Vec<Location>,
|
||||
}
|
||||
|
||||
impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor {
|
||||
fn visit_local(&mut self, local: &Local, place_context: PlaceContext, location: Location) {
|
||||
if self.needle != *local {
|
||||
return;
|
||||
}
|
||||
|
||||
if place_context.is_place_assignment() {
|
||||
self.locations.push(location);
|
||||
}
|
||||
}
|
||||
}
|
36
compiler/rustc_const_eval/src/util/find_self_call.rs
Normal file
36
compiler/rustc_const_eval/src/util/find_self_call.rs
Normal file
|
@ -0,0 +1,36 @@
|
|||
use rustc_middle::mir::*;
|
||||
use rustc_middle::ty::subst::SubstsRef;
|
||||
use rustc_middle::ty::{self, TyCtxt};
|
||||
use rustc_span::def_id::DefId;
|
||||
|
||||
/// Checks if the specified `local` is used as the `self` parameter of a method call
|
||||
/// in the provided `BasicBlock`. If it is, then the `DefId` of the called method is
|
||||
/// returned.
|
||||
pub fn find_self_call<'tcx>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
body: &Body<'tcx>,
|
||||
local: Local,
|
||||
block: BasicBlock,
|
||||
) -> Option<(DefId, SubstsRef<'tcx>)> {
|
||||
debug!("find_self_call(local={:?}): terminator={:?}", local, &body[block].terminator);
|
||||
if let Some(Terminator { kind: TerminatorKind::Call { func, args, .. }, .. }) =
|
||||
&body[block].terminator
|
||||
{
|
||||
debug!("find_self_call: func={:?}", func);
|
||||
if let Operand::Constant(box Constant { literal, .. }) = func {
|
||||
if let ty::FnDef(def_id, substs) = *literal.ty().kind() {
|
||||
if let Some(ty::AssocItem { fn_has_self_parameter: true, .. }) =
|
||||
tcx.opt_associated_item(def_id)
|
||||
{
|
||||
debug!("find_self_call: args={:?}", args);
|
||||
if let [Operand::Move(self_place) | Operand::Copy(self_place), ..] = **args {
|
||||
if self_place.as_local() == Some(local) {
|
||||
return Some((def_id, substs));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
8
compiler/rustc_const_eval/src/util/mod.rs
Normal file
8
compiler/rustc_const_eval/src/util/mod.rs
Normal file
|
@ -0,0 +1,8 @@
|
|||
pub mod aggregate;
|
||||
mod alignment;
|
||||
pub mod collect_writes;
|
||||
mod find_self_call;
|
||||
|
||||
pub use self::aggregate::expand_aggregate;
|
||||
pub use self::alignment::is_disaligned;
|
||||
pub use self::find_self_call::find_self_call;
|
Loading…
Add table
Add a link
Reference in a new issue