Rollup merge of #130885 - RalfJung:interp-error-discard, r=oli-obk
panic when an interpreter error gets unintentionally discarded One important invariant of Miri is that when an interpreter error is raised (*in particular* a UB error), those must not be discarded: it's not okay to just check `foo().is_err()` and then continue executing. This seems to catch new contributors by surprise fairly regularly, so this PR tries to make it so that *if* this ever happens, we get a panic rather than a silent missed UB bug. The interpreter error type now contains a "guard" that panics on drop, and that is explicitly passed to `mem::forget` when an error is deliberately discarded. Fixes https://github.com/rust-lang/miri/issues/3855
This commit is contained in:
commit
ea453bb10b
102 changed files with 1574 additions and 1337 deletions
|
@ -6,7 +6,8 @@ use rustc_middle::{bug, span_bug, ty};
|
|||
use rustc_span::def_id::DefId;
|
||||
|
||||
use crate::interpret::{
|
||||
self, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic, throw_machine_stop,
|
||||
self, HasStaticRootDefId, ImmTy, Immediate, InterpCx, PointerArithmetic, interp_ok,
|
||||
throw_machine_stop,
|
||||
};
|
||||
|
||||
/// Macro for machine-specific `InterpError` without allocation.
|
||||
|
@ -79,7 +80,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
|
|||
throw_machine_stop_str!("can't access mutable globals in ConstProp");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn find_mir_or_eval_fn(
|
||||
|
@ -127,7 +128,7 @@ impl<'tcx> interpret::Machine<'tcx> for DummyMachine {
|
|||
right: &interpret::ImmTy<'tcx, Self::Provenance>,
|
||||
) -> interpret::InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
|
||||
use rustc_middle::mir::BinOp::*;
|
||||
Ok(match bin_op {
|
||||
interp_ok(match bin_op {
|
||||
Eq | Ne | Lt | Le | Gt | Ge => {
|
||||
// Types can differ, e.g. fn ptrs with different `for`.
|
||||
assert_eq!(left.layout.abi, right.layout.abi);
|
||||
|
|
|
@ -20,7 +20,7 @@ use crate::const_eval::CheckAlignment;
|
|||
use crate::interpret::{
|
||||
CtfeValidationMode, GlobalId, Immediate, InternKind, InternResult, InterpCx, InterpError,
|
||||
InterpResult, MPlaceTy, MemoryKind, OpTy, RefTracking, StackPopCleanup, create_static_alloc,
|
||||
eval_nullary_intrinsic, intern_const_alloc_recursive, throw_exhaust,
|
||||
eval_nullary_intrinsic, intern_const_alloc_recursive, interp_ok, throw_exhaust,
|
||||
};
|
||||
use crate::{CTRL_C_RECEIVED, errors};
|
||||
|
||||
|
@ -98,19 +98,19 @@ fn eval_body_using_ecx<'tcx, R: InterpretationResult<'tcx>>(
|
|||
return Err(ecx
|
||||
.tcx
|
||||
.dcx()
|
||||
.emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind })
|
||||
.into());
|
||||
.emit_err(errors::DanglingPtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
|
||||
.into();
|
||||
}
|
||||
Err(InternResult::FoundBadMutablePointer) => {
|
||||
return Err(ecx
|
||||
.tcx
|
||||
.dcx()
|
||||
.emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind })
|
||||
.into());
|
||||
.emit_err(errors::MutablePtrInFinal { span: ecx.tcx.span, kind: intern_kind }))
|
||||
.into();
|
||||
}
|
||||
}
|
||||
|
||||
Ok(R::make_result(ret, ecx))
|
||||
interp_ok(R::make_result(ret, ecx))
|
||||
}
|
||||
|
||||
/// The `InterpCx` is only meant to be used to do field and index projections into constants for
|
||||
|
@ -147,7 +147,8 @@ pub fn mk_eval_cx_for_const_val<'tcx>(
|
|||
ty: Ty<'tcx>,
|
||||
) -> Option<(CompileTimeInterpCx<'tcx>, OpTy<'tcx>)> {
|
||||
let ecx = mk_eval_cx_to_read_const_val(tcx.tcx, tcx.span, param_env, CanAccessMutGlobal::No);
|
||||
let op = ecx.const_val_to_op(val, ty, None).ok()?;
|
||||
// FIXME: is it a problem to discard the error here?
|
||||
let op = ecx.const_val_to_op(val, ty, None).discard_err()?;
|
||||
Some((ecx, op))
|
||||
}
|
||||
|
||||
|
@ -185,12 +186,16 @@ pub(super) fn op_to_const<'tcx>(
|
|||
_ => false,
|
||||
};
|
||||
let immediate = if force_as_immediate {
|
||||
match ecx.read_immediate(op) {
|
||||
match ecx.read_immediate(op).report_err() {
|
||||
Ok(imm) => Right(imm),
|
||||
Err(err) if !for_diagnostics => {
|
||||
panic!("normalization works on validated constants: {err:?}")
|
||||
Err(err) => {
|
||||
if for_diagnostics {
|
||||
// This discard the error, but for diagnostics that's okay.
|
||||
op.as_mplace_or_imm()
|
||||
} else {
|
||||
panic!("normalization works on validated constants: {err:?}")
|
||||
}
|
||||
}
|
||||
_ => op.as_mplace_or_imm(),
|
||||
}
|
||||
} else {
|
||||
op.as_mplace_or_imm()
|
||||
|
@ -283,17 +288,19 @@ pub fn eval_to_const_value_raw_provider<'tcx>(
|
|||
let ty::FnDef(_, args) = ty.kind() else {
|
||||
bug!("intrinsic with type {:?}", ty);
|
||||
};
|
||||
return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).map_err(|error| {
|
||||
let span = tcx.def_span(def_id);
|
||||
return eval_nullary_intrinsic(tcx, key.param_env, def_id, args).report_err().map_err(
|
||||
|error| {
|
||||
let span = tcx.def_span(def_id);
|
||||
|
||||
super::report(
|
||||
tcx,
|
||||
error.into_kind(),
|
||||
span,
|
||||
|| (span, vec![]),
|
||||
|span, _| errors::NullaryIntrinsicError { span },
|
||||
)
|
||||
});
|
||||
super::report(
|
||||
tcx,
|
||||
error.into_kind(),
|
||||
span,
|
||||
|| (span, vec![]),
|
||||
|span, _| errors::NullaryIntrinsicError { span },
|
||||
)
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
tcx.eval_to_allocation_raw(key).map(|val| turn_into_const_value(tcx, val, key))
|
||||
|
@ -376,6 +383,7 @@ fn eval_in_interpreter<'tcx, R: InterpretationResult<'tcx>>(
|
|||
);
|
||||
let res = ecx.load_mir(cid.instance.def, cid.promoted);
|
||||
res.and_then(|body| eval_body_using_ecx(&mut ecx, cid, body))
|
||||
.report_err()
|
||||
.map_err(|error| report_eval_error(&ecx, cid, error))
|
||||
}
|
||||
|
||||
|
@ -400,6 +408,7 @@ fn const_validate_mplace<'tcx>(
|
|||
}
|
||||
};
|
||||
ecx.const_validate_operand(&mplace.into(), path, &mut ref_tracking, mode)
|
||||
.report_err()
|
||||
// Instead of just reporting the `InterpError` via the usual machinery, we give a more targeted
|
||||
// error about the validation failure.
|
||||
.map_err(|error| report_validation_error(&ecx, cid, error, alloc_id))?;
|
||||
|
|
|
@ -24,8 +24,8 @@ use crate::fluent_generated as fluent;
|
|||
use crate::interpret::{
|
||||
self, AllocId, AllocRange, ConstAllocation, CtfeProvenance, FnArg, Frame, GlobalAlloc, ImmTy,
|
||||
InterpCx, InterpResult, MPlaceTy, OpTy, Pointer, PointerArithmetic, RangeSet, Scalar,
|
||||
StackPopCleanup, compile_time_machine, err_ub, throw_exhaust, throw_inval, throw_ub_custom,
|
||||
throw_unsup, throw_unsup_format,
|
||||
StackPopCleanup, compile_time_machine, interp_ok, throw_exhaust, throw_inval, throw_ub,
|
||||
throw_ub_custom, throw_unsup, throw_unsup_format,
|
||||
};
|
||||
|
||||
/// When hitting this many interpreted terminators we emit a deny by default lint
|
||||
|
@ -247,7 +247,7 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
|
|||
let msg = Symbol::intern(self.read_str(&msg_place)?);
|
||||
let span = self.find_closest_untracked_caller_location();
|
||||
let (file, line, col) = self.location_triple_for_span(span);
|
||||
return Err(ConstEvalErrKind::Panic { msg, file, line, col }.into());
|
||||
return Err(ConstEvalErrKind::Panic { msg, file, line, col }).into();
|
||||
} else if self.tcx.is_lang_item(def_id, LangItem::PanicFmt) {
|
||||
// For panic_fmt, call const_panic_fmt instead.
|
||||
let const_def_id = self.tcx.require_lang_item(LangItem::ConstPanicFmt, None);
|
||||
|
@ -259,16 +259,16 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
|
|||
self.cur_span(),
|
||||
);
|
||||
|
||||
return Ok(Some(new_instance));
|
||||
return interp_ok(Some(new_instance));
|
||||
} else if self.tcx.is_lang_item(def_id, LangItem::AlignOffset) {
|
||||
let args = self.copy_fn_args(args);
|
||||
// For align_offset, we replace the function call if the pointer has no address.
|
||||
match self.align_offset(instance, &args, dest, ret)? {
|
||||
ControlFlow::Continue(()) => return Ok(Some(instance)),
|
||||
ControlFlow::Break(()) => return Ok(None),
|
||||
ControlFlow::Continue(()) => return interp_ok(Some(instance)),
|
||||
ControlFlow::Break(()) => return interp_ok(None),
|
||||
}
|
||||
}
|
||||
Ok(Some(instance))
|
||||
interp_ok(Some(instance))
|
||||
}
|
||||
|
||||
/// `align_offset(ptr, target_align)` needs special handling in const eval, because the pointer
|
||||
|
@ -323,25 +323,25 @@ impl<'tcx> CompileTimeInterpCx<'tcx> {
|
|||
dest,
|
||||
StackPopCleanup::Goto { ret, unwind: mir::UnwindAction::Unreachable },
|
||||
)?;
|
||||
Ok(ControlFlow::Break(()))
|
||||
interp_ok(ControlFlow::Break(()))
|
||||
} else {
|
||||
// Not alignable in const, return `usize::MAX`.
|
||||
let usize_max = Scalar::from_target_usize(self.target_usize_max(), self);
|
||||
self.write_scalar(usize_max, dest)?;
|
||||
self.return_to_block(ret)?;
|
||||
Ok(ControlFlow::Break(()))
|
||||
interp_ok(ControlFlow::Break(()))
|
||||
}
|
||||
}
|
||||
Err(_addr) => {
|
||||
// The pointer has an address, continue with function call.
|
||||
Ok(ControlFlow::Continue(()))
|
||||
interp_ok(ControlFlow::Continue(()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// See documentation on the `ptr_guaranteed_cmp` intrinsic.
|
||||
fn guaranteed_cmp(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, u8> {
|
||||
Ok(match (a, b) {
|
||||
interp_ok(match (a, b) {
|
||||
// Comparisons between integers are always known.
|
||||
(Scalar::Int { .. }, Scalar::Int { .. }) => {
|
||||
if a == b {
|
||||
|
@ -403,8 +403,8 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
instance: ty::InstanceKind<'tcx>,
|
||||
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
|
||||
match instance {
|
||||
ty::InstanceKind::Item(def) => Ok(ecx.tcx.mir_for_ctfe(def)),
|
||||
_ => Ok(ecx.tcx.instance_mir(instance)),
|
||||
ty::InstanceKind::Item(def) => interp_ok(ecx.tcx.mir_for_ctfe(def)),
|
||||
_ => interp_ok(ecx.tcx.instance_mir(instance)),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -422,7 +422,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
// Replace some functions.
|
||||
let Some(instance) = ecx.hook_special_const_fn(orig_instance, args, dest, ret)? else {
|
||||
// Call has already been handled.
|
||||
return Ok(None);
|
||||
return interp_ok(None);
|
||||
};
|
||||
|
||||
// Only check non-glue functions
|
||||
|
@ -444,14 +444,14 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
// This is a const fn. Call it.
|
||||
// In case of replacement, we return the *original* instance to make backtraces work out
|
||||
// (and we hope this does not confuse the FnAbi checks too much).
|
||||
Ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
|
||||
interp_ok(Some((ecx.load_mir(instance.def, None)?, orig_instance)))
|
||||
}
|
||||
|
||||
fn panic_nounwind(ecx: &mut InterpCx<'tcx, Self>, msg: &str) -> InterpResult<'tcx> {
|
||||
let msg = Symbol::intern(msg);
|
||||
let span = ecx.find_closest_untracked_caller_location();
|
||||
let (file, line, col) = ecx.location_triple_for_span(span);
|
||||
Err(ConstEvalErrKind::Panic { msg, file, line, col }.into())
|
||||
Err(ConstEvalErrKind::Panic { msg, file, line, col }).into()
|
||||
}
|
||||
|
||||
fn call_intrinsic(
|
||||
|
@ -464,7 +464,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
) -> InterpResult<'tcx, Option<ty::Instance<'tcx>>> {
|
||||
// Shared intrinsics.
|
||||
if ecx.eval_intrinsic(instance, args, dest, target)? {
|
||||
return Ok(None);
|
||||
return interp_ok(None);
|
||||
}
|
||||
let intrinsic_name = ecx.tcx.item_name(instance.def_id());
|
||||
|
||||
|
@ -541,7 +541,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
"intrinsic `{intrinsic_name}` is not supported at compile-time"
|
||||
);
|
||||
}
|
||||
return Ok(Some(ty::Instance {
|
||||
return interp_ok(Some(ty::Instance {
|
||||
def: ty::InstanceKind::Item(instance.def_id()),
|
||||
args: instance.args,
|
||||
}));
|
||||
|
@ -550,7 +550,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
|
||||
// Intrinsic is done, jump to next block.
|
||||
ecx.return_to_block(target)?;
|
||||
Ok(None)
|
||||
interp_ok(None)
|
||||
}
|
||||
|
||||
fn assert_panic(
|
||||
|
@ -581,7 +581,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
}
|
||||
}
|
||||
};
|
||||
Err(ConstEvalErrKind::AssertFailure(err).into())
|
||||
Err(ConstEvalErrKind::AssertFailure(err)).into()
|
||||
}
|
||||
|
||||
fn binary_ptr_op(
|
||||
|
@ -652,7 +652,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -670,7 +670,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
if !ecx.recursion_limit.value_within_limit(ecx.stack().len() + 1) {
|
||||
throw_exhaust!(StackFrameLimitReached)
|
||||
} else {
|
||||
Ok(frame)
|
||||
interp_ok(frame)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -700,22 +700,22 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
if is_write {
|
||||
// Write access. These are never allowed, but we give a targeted error message.
|
||||
match alloc.mutability {
|
||||
Mutability::Not => Err(err_ub!(WriteToReadOnly(alloc_id)).into()),
|
||||
Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal.into()),
|
||||
Mutability::Not => throw_ub!(WriteToReadOnly(alloc_id)),
|
||||
Mutability::Mut => Err(ConstEvalErrKind::ModifiedGlobal).into(),
|
||||
}
|
||||
} else {
|
||||
// Read access. These are usually allowed, with some exceptions.
|
||||
if machine.can_access_mut_global == CanAccessMutGlobal::Yes {
|
||||
// Machine configuration allows us read from anything (e.g., `static` initializer).
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
} else if alloc.mutability == Mutability::Mut {
|
||||
// Machine configuration does not allow us to read statics (e.g., `const`
|
||||
// initializer).
|
||||
Err(ConstEvalErrKind::ConstAccessesMutGlobal.into())
|
||||
Err(ConstEvalErrKind::ConstAccessesMutGlobal).into()
|
||||
} else {
|
||||
// Immutable global, this read is fine.
|
||||
assert_eq!(alloc.mutability, Mutability::Not);
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -748,9 +748,9 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
// even when there is interior mutability.)
|
||||
place.map_provenance(CtfeProvenance::as_shared_ref)
|
||||
};
|
||||
Ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
|
||||
interp_ok(ImmTy::from_immediate(new_place.to_ref(ecx), val.layout))
|
||||
} else {
|
||||
Ok(val.clone())
|
||||
interp_ok(val.clone())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -763,20 +763,20 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
) -> InterpResult<'tcx> {
|
||||
if range.size == Size::ZERO {
|
||||
// Nothing to check.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
// Reject writes through immutable pointers.
|
||||
if immutable {
|
||||
return Err(ConstEvalErrKind::WriteThroughImmutablePointer.into());
|
||||
return Err(ConstEvalErrKind::WriteThroughImmutablePointer).into();
|
||||
}
|
||||
// Everything else is fine.
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn before_alloc_read(ecx: &InterpCx<'tcx, Self>, alloc_id: AllocId) -> InterpResult<'tcx> {
|
||||
// Check if this is the currently evaluated static.
|
||||
if Some(alloc_id) == ecx.machine.static_root_ids.map(|(id, _)| id) {
|
||||
return Err(ConstEvalErrKind::RecursiveStatic.into());
|
||||
return Err(ConstEvalErrKind::RecursiveStatic).into();
|
||||
}
|
||||
// If this is another static, make sure we fire off the query to detect cycles.
|
||||
// But only do that when checks for static recursion are enabled.
|
||||
|
@ -788,7 +788,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> {
|
|||
ecx.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn cached_union_data_range<'e>(
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
// Not in interpret to make sure we do not use private implementation details
|
||||
|
||||
use rustc_middle::mir::interpret::InterpErrorInfo;
|
||||
use rustc_middle::query::{Key, TyCtxtAt};
|
||||
use rustc_middle::ty::{self, Ty, TyCtxt};
|
||||
use rustc_middle::{bug, mir};
|
||||
use rustc_target::abi::VariantIdx;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::interpret::{InterpCx, format_interp_error};
|
||||
use crate::interpret::InterpCx;
|
||||
|
||||
mod dummy_machine;
|
||||
mod error;
|
||||
|
@ -33,17 +32,6 @@ pub(crate) enum ValTreeCreationError<'tcx> {
|
|||
}
|
||||
pub(crate) type ValTreeCreationResult<'tcx> = Result<ty::ValTree<'tcx>, ValTreeCreationError<'tcx>>;
|
||||
|
||||
impl<'tcx> From<InterpErrorInfo<'tcx>> for ValTreeCreationError<'tcx> {
|
||||
fn from(err: InterpErrorInfo<'tcx>) -> Self {
|
||||
ty::tls::with(|tcx| {
|
||||
bug!(
|
||||
"Unexpected Undefined Behavior error during valtree construction: {}",
|
||||
format_interp_error(tcx.dcx(), err),
|
||||
)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(tcx), level = "debug")]
|
||||
pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
|
||||
tcx: TyCtxtAt<'tcx>,
|
||||
|
@ -60,8 +48,8 @@ pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
|
|||
return None;
|
||||
}
|
||||
ty::Adt(def, _) => {
|
||||
let variant = ecx.read_discriminant(&op).ok()?;
|
||||
let down = ecx.project_downcast(&op, variant).ok()?;
|
||||
let variant = ecx.read_discriminant(&op).discard_err()?;
|
||||
let down = ecx.project_downcast(&op, variant).discard_err()?;
|
||||
(def.variants()[variant].fields.len(), Some(variant), down)
|
||||
}
|
||||
ty::Tuple(args) => (args.len(), None, op),
|
||||
|
@ -70,7 +58,7 @@ pub(crate) fn try_destructure_mir_constant_for_user_output<'tcx>(
|
|||
|
||||
let fields_iter = (0..field_count)
|
||||
.map(|i| {
|
||||
let field_op = ecx.project_field(&down, i).ok()?;
|
||||
let field_op = ecx.project_field(&down, i).discard_err()?;
|
||||
let val = op_to_const(&ecx, &field_op, /* for diagnostics */ true);
|
||||
Some((val, field_op.layout.ty))
|
||||
})
|
||||
|
|
|
@ -92,7 +92,7 @@ fn const_to_valtree_inner<'tcx>(
|
|||
Ok(ty::ValTree::zst())
|
||||
}
|
||||
ty::Bool | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Char => {
|
||||
let val = ecx.read_immediate(place)?;
|
||||
let val = ecx.read_immediate(place).unwrap();
|
||||
let val = val.to_scalar_int().unwrap();
|
||||
*num_nodes += 1;
|
||||
|
||||
|
@ -114,7 +114,7 @@ fn const_to_valtree_inner<'tcx>(
|
|||
// equality at compile-time (see `ptr_guaranteed_cmp`).
|
||||
// However we allow those that are just integers in disguise.
|
||||
// First, get the pointer. Remember it might be wide!
|
||||
let val = ecx.read_immediate(place)?;
|
||||
let val = ecx.read_immediate(place).unwrap();
|
||||
// We could allow wide raw pointers where both sides are integers in the future,
|
||||
// but for now we reject them.
|
||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||
|
@ -135,7 +135,7 @@ fn const_to_valtree_inner<'tcx>(
|
|||
ty::FnPtr(..) => Err(ValTreeCreationError::NonSupportedType(ty)),
|
||||
|
||||
ty::Ref(_, _, _) => {
|
||||
let derefd_place = ecx.deref_pointer(place)?;
|
||||
let derefd_place = ecx.deref_pointer(place).unwrap();
|
||||
const_to_valtree_inner(ecx, &derefd_place, num_nodes)
|
||||
}
|
||||
|
||||
|
@ -159,7 +159,7 @@ fn const_to_valtree_inner<'tcx>(
|
|||
bug!("uninhabited types should have errored and never gotten converted to valtree")
|
||||
}
|
||||
|
||||
let variant = ecx.read_discriminant(place)?;
|
||||
let variant = ecx.read_discriminant(place).unwrap();
|
||||
branches(ecx, place, def.variant(variant).fields.len(), def.is_enum().then_some(variant), num_nodes)
|
||||
}
|
||||
|
||||
|
|
|
@ -15,8 +15,8 @@ use tracing::{info, instrument, trace};
|
|||
|
||||
use super::{
|
||||
CtfeProvenance, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy,
|
||||
Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, throw_ub,
|
||||
throw_ub_custom, throw_unsup_format,
|
||||
Projectable, Provenance, ReturnAction, Scalar, StackPopCleanup, StackPopInfo, interp_ok,
|
||||
throw_ub, throw_ub_custom, throw_unsup_format,
|
||||
};
|
||||
use crate::fluent_generated as fluent;
|
||||
|
||||
|
@ -64,7 +64,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
arg: &FnArg<'tcx, M::Provenance>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
|
||||
Ok(match arg {
|
||||
interp_ok(match arg {
|
||||
FnArg::Copy(op) => FnArg::Copy(self.project_field(op, field)?),
|
||||
FnArg::InPlace(mplace) => FnArg::InPlace(self.project_field(mplace, field)?),
|
||||
})
|
||||
|
@ -97,7 +97,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// another type.
|
||||
let ty::Adt(def, args) = layout.ty.kind() else {
|
||||
// Not an ADT, so definitely no NPO.
|
||||
return Ok(layout);
|
||||
return interp_ok(layout);
|
||||
};
|
||||
let inner = if self.tcx.is_diagnostic_item(sym::Option, def.did()) {
|
||||
// The wrapped type is the only arg.
|
||||
|
@ -111,10 +111,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
} else if rhs.is_1zst() {
|
||||
lhs
|
||||
} else {
|
||||
return Ok(layout); // no NPO
|
||||
return interp_ok(layout); // no NPO
|
||||
}
|
||||
} else {
|
||||
return Ok(layout); // no NPO
|
||||
return interp_ok(layout); // no NPO
|
||||
};
|
||||
|
||||
// Check if the inner type is one of the NPO-guaranteed ones.
|
||||
|
@ -126,7 +126,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// Stop at NPO types so that we don't miss that attribute in the check below!
|
||||
def.is_struct() && !is_npo(def)
|
||||
});
|
||||
Ok(match inner.ty.kind() {
|
||||
interp_ok(match inner.ty.kind() {
|
||||
ty::Ref(..) | ty::FnPtr(..) => {
|
||||
// Option<&T> behaves like &T, and same for fn()
|
||||
inner
|
||||
|
@ -153,11 +153,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, bool> {
|
||||
// Fast path: equal types are definitely compatible.
|
||||
if caller.ty == callee.ty {
|
||||
return Ok(true);
|
||||
return interp_ok(true);
|
||||
}
|
||||
// 1-ZST are compatible with all 1-ZST (and with nothing else).
|
||||
if caller.is_1zst() || callee.is_1zst() {
|
||||
return Ok(caller.is_1zst() && callee.is_1zst());
|
||||
return interp_ok(caller.is_1zst() && callee.is_1zst());
|
||||
}
|
||||
// Unfold newtypes and NPO optimizations.
|
||||
let unfold = |layout: TyAndLayout<'tcx>| {
|
||||
|
@ -180,17 +180,17 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
_ => None,
|
||||
};
|
||||
if let (Some(caller), Some(callee)) = (thin_pointer(caller), thin_pointer(callee)) {
|
||||
return Ok(caller == callee);
|
||||
return interp_ok(caller == callee);
|
||||
}
|
||||
// For wide pointers we have to get the pointee type.
|
||||
let pointee_ty = |ty: Ty<'tcx>| -> InterpResult<'tcx, Option<Ty<'tcx>>> {
|
||||
// We cannot use `builtin_deref` here since we need to reject `Box<T, MyAlloc>`.
|
||||
Ok(Some(match ty.kind() {
|
||||
interp_ok(Some(match ty.kind() {
|
||||
ty::Ref(_, ty, _) => *ty,
|
||||
ty::RawPtr(ty, _) => *ty,
|
||||
// We only accept `Box` with the default allocator.
|
||||
_ if ty.is_box_global(*self.tcx) => ty.expect_boxed_ty(),
|
||||
_ => return Ok(None),
|
||||
_ => return interp_ok(None),
|
||||
}))
|
||||
};
|
||||
if let (Some(caller), Some(callee)) = (pointee_ty(caller.ty)?, pointee_ty(callee.ty)?) {
|
||||
|
@ -202,7 +202,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let normalize = |ty| self.tcx.normalize_erasing_regions(self.param_env, ty);
|
||||
ty.ptr_metadata_ty(*self.tcx, normalize)
|
||||
};
|
||||
return Ok(meta_ty(caller) == meta_ty(callee));
|
||||
return interp_ok(meta_ty(caller) == meta_ty(callee));
|
||||
}
|
||||
|
||||
// Compatible integer types (in particular, usize vs ptr-sized-u32/u64).
|
||||
|
@ -217,11 +217,11 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
};
|
||||
if let (Some(caller), Some(callee)) = (int_ty(caller.ty), int_ty(callee.ty)) {
|
||||
// This is okay if they are the same integer type.
|
||||
return Ok(caller == callee);
|
||||
return interp_ok(caller == callee);
|
||||
}
|
||||
|
||||
// Fall back to exact equality.
|
||||
Ok(caller == callee)
|
||||
interp_ok(caller == callee)
|
||||
}
|
||||
|
||||
fn check_argument_compat(
|
||||
|
@ -235,13 +235,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// Ensure that our checks imply actual ABI compatibility for this concrete call.
|
||||
// (This can fail e.g. if `#[rustc_nonnull_optimization_guaranteed]` is used incorrectly.)
|
||||
assert!(caller_abi.eq_abi(callee_abi));
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
} else {
|
||||
trace!(
|
||||
"check_argument_compat: incompatible ABIs:\ncaller: {:?}\ncallee: {:?}",
|
||||
caller_abi, callee_abi
|
||||
);
|
||||
Ok(false)
|
||||
interp_ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -266,7 +266,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if !already_live {
|
||||
self.storage_live(callee_arg.as_local().unwrap())?;
|
||||
}
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
// Find next caller arg.
|
||||
let Some((caller_arg, caller_abi)) = caller_args.next() else {
|
||||
|
@ -308,7 +308,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if let FnArg::InPlace(mplace) = caller_arg {
|
||||
M::protect_in_place_function_argument(self, mplace)?;
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// The main entry point for creating a new stack frame: performs ABI checks and initializes
|
||||
|
@ -536,7 +536,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
unwind,
|
||||
);
|
||||
} else {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
ty::InstanceKind::VTableShim(..)
|
||||
|
@ -561,7 +561,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
unwind,
|
||||
)?
|
||||
else {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
// Special handling for the closure ABI: untuple the last argument.
|
||||
|
@ -572,7 +572,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
trace!("init_fn_call: Will pass last argument by untupling");
|
||||
Cow::from(
|
||||
args.iter()
|
||||
.map(|a| Ok(a.clone()))
|
||||
.map(|a| interp_ok(a.clone()))
|
||||
.chain(
|
||||
(0..untuple_arg.layout().fields.count())
|
||||
.map(|i| self.fn_arg_field(untuple_arg, i)),
|
||||
|
@ -886,27 +886,25 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// this transmute.
|
||||
res
|
||||
} else {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
};
|
||||
|
||||
// All right, now it is time to actually pop the frame.
|
||||
let stack_pop_info = self.pop_stack_frame_raw(unwinding)?;
|
||||
|
||||
// Report error from return value copy, if any.
|
||||
copy_ret_result?;
|
||||
// An error here takes precedence over the copy error.
|
||||
let (stack_pop_info, ()) = self.pop_stack_frame_raw(unwinding).and(copy_ret_result)?;
|
||||
|
||||
match stack_pop_info.return_action {
|
||||
ReturnAction::Normal => {}
|
||||
ReturnAction::NoJump => {
|
||||
// The hook already did everything.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
ReturnAction::NoCleanup => {
|
||||
// If we are not doing cleanup, also skip everything else.
|
||||
assert!(self.stack().is_empty(), "only the topmost frame should ever be leaked");
|
||||
assert!(!unwinding, "tried to skip cleanup during unwinding");
|
||||
// Skip machine hook.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -931,7 +929,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.stack().is_empty(),
|
||||
"only the bottommost frame can have StackPopCleanup::Root"
|
||||
);
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,7 +14,8 @@ use tracing::trace;
|
|||
|
||||
use super::util::ensure_monomorphic_enough;
|
||||
use super::{
|
||||
FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, throw_ub, throw_ub_custom,
|
||||
FnVal, ImmTy, Immediate, InterpCx, Machine, OpTy, PlaceTy, err_inval, interp_ok, throw_ub,
|
||||
throw_ub_custom,
|
||||
};
|
||||
use crate::fluent_generated as fluent;
|
||||
|
||||
|
@ -157,7 +158,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.copy_op_allow_transmute(src, dest)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Handles 'IntToInt' and 'IntToFloat' casts.
|
||||
|
@ -169,7 +170,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
assert!(src.layout.ty.is_integral() || src.layout.ty.is_char() || src.layout.ty.is_bool());
|
||||
assert!(cast_to.ty.is_floating_point() || cast_to.ty.is_integral() || cast_to.ty.is_char());
|
||||
|
||||
Ok(ImmTy::from_scalar(
|
||||
interp_ok(ImmTy::from_scalar(
|
||||
self.cast_from_int_like(src.to_scalar(), src.layout, cast_to.ty)?,
|
||||
cast_to,
|
||||
))
|
||||
|
@ -192,7 +193,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
FloatTy::F64 => self.cast_from_float(src.to_scalar().to_f64()?, cast_to.ty),
|
||||
FloatTy::F128 => self.cast_from_float(src.to_scalar().to_f128()?, cast_to.ty),
|
||||
};
|
||||
Ok(ImmTy::from_scalar(val, cast_to))
|
||||
interp_ok(ImmTy::from_scalar(val, cast_to))
|
||||
}
|
||||
|
||||
/// Handles 'FnPtrToPtr' and 'PtrToPtr' casts.
|
||||
|
@ -206,14 +207,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// Handle casting any ptr to raw ptr (might be a fat ptr).
|
||||
if cast_to.size == src.layout.size {
|
||||
// Thin or fat pointer that just has the ptr kind of target type changed.
|
||||
return Ok(ImmTy::from_immediate(**src, cast_to));
|
||||
return interp_ok(ImmTy::from_immediate(**src, cast_to));
|
||||
} else {
|
||||
// Casting the metadata away from a fat ptr.
|
||||
assert_eq!(src.layout.size, 2 * self.pointer_size());
|
||||
assert_eq!(cast_to.size, self.pointer_size());
|
||||
assert!(src.layout.ty.is_unsafe_ptr());
|
||||
return match **src {
|
||||
Immediate::ScalarPair(data, _) => Ok(ImmTy::from_scalar(data, cast_to)),
|
||||
Immediate::ScalarPair(data, _) => interp_ok(ImmTy::from_scalar(data, cast_to)),
|
||||
Immediate::Scalar(..) => span_bug!(
|
||||
self.cur_span(),
|
||||
"{:?} input to a fat-to-thin cast ({} -> {})",
|
||||
|
@ -240,7 +241,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
Ok(ptr) => M::expose_ptr(self, ptr)?,
|
||||
Err(_) => {} // Do nothing, exposing an invalid pointer (`None` provenance) is a NOP.
|
||||
};
|
||||
Ok(ImmTy::from_scalar(self.cast_from_int_like(scalar, src.layout, cast_to.ty)?, cast_to))
|
||||
interp_ok(ImmTy::from_scalar(
|
||||
self.cast_from_int_like(scalar, src.layout, cast_to.ty)?,
|
||||
cast_to,
|
||||
))
|
||||
}
|
||||
|
||||
pub fn pointer_with_exposed_provenance_cast(
|
||||
|
@ -258,7 +262,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|
||||
// Then turn address into pointer.
|
||||
let ptr = M::ptr_from_addr_cast(self, addr)?;
|
||||
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
|
||||
interp_ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(ptr, self), cast_to))
|
||||
}
|
||||
|
||||
/// Low-level cast helper function. This works directly on scalars and can take 'int-like' input
|
||||
|
@ -280,7 +284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
_ => span_bug!(self.cur_span(), "invalid int-like cast from {}", src_layout.ty),
|
||||
};
|
||||
|
||||
Ok(match *cast_ty.kind() {
|
||||
interp_ok(match *cast_ty.kind() {
|
||||
// int -> int
|
||||
Int(_) | Uint(_) => {
|
||||
let size = match *cast_ty.kind() {
|
||||
|
@ -505,7 +509,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.unsize_into(&src_field, cast_ty_field, &dst_field)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
_ => {
|
||||
// Do not ICE if we are not monomorphic enough.
|
||||
|
|
|
@ -7,7 +7,8 @@ use rustc_target::abi::{self, TagEncoding, VariantIdx, Variants};
|
|||
use tracing::{instrument, trace};
|
||||
|
||||
use super::{
|
||||
ImmTy, InterpCx, InterpResult, Machine, Projectable, Scalar, Writeable, err_ub, throw_ub,
|
||||
ImmTy, InterpCx, InterpResult, Machine, Projectable, Scalar, Writeable, err_ub, interp_ok,
|
||||
throw_ub,
|
||||
};
|
||||
|
||||
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
|
@ -48,7 +49,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if actual_variant != variant_index {
|
||||
throw_ub!(InvalidNichedEnumVariantWritten { enum_ty: dest.layout().ty });
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -89,7 +90,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
throw_ub!(UninhabitedEnumVariantRead(index))
|
||||
}
|
||||
}
|
||||
return Ok(index);
|
||||
return interp_ok(index);
|
||||
}
|
||||
Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
|
||||
(tag, tag_encoding, tag_field)
|
||||
|
@ -205,7 +206,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if op.layout().for_variant(self, index).abi.is_uninhabited() {
|
||||
throw_ub!(UninhabitedEnumVariantRead(index))
|
||||
}
|
||||
Ok(index)
|
||||
interp_ok(index)
|
||||
}
|
||||
|
||||
pub fn discriminant_for_variant(
|
||||
|
@ -226,7 +227,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
Scalar::from_uint(variant.as_u32(), discr_layout.size)
|
||||
}
|
||||
};
|
||||
Ok(ImmTy::from_scalar(discr_value, discr_layout))
|
||||
interp_ok(ImmTy::from_scalar(discr_value, discr_layout))
|
||||
}
|
||||
|
||||
/// Computes how to write the tag of a given variant of enum `ty`:
|
||||
|
@ -247,7 +248,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// discriminant is encoded implicitly, so any attempt to write
|
||||
// the wrong discriminant for a `Single` enum will reliably
|
||||
// result in UB.
|
||||
Ok(None)
|
||||
interp_ok(None)
|
||||
}
|
||||
|
||||
abi::Variants::Multiple {
|
||||
|
@ -265,7 +266,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let tag_size = tag_layout.size(self);
|
||||
let tag_val = tag_size.truncate(discr_val);
|
||||
let tag = ScalarInt::try_from_uint(tag_val, tag_size).unwrap();
|
||||
Ok(Some((tag, tag_field)))
|
||||
interp_ok(Some((tag, tag_field)))
|
||||
}
|
||||
|
||||
abi::Variants::Multiple {
|
||||
|
@ -274,7 +275,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
} if untagged_variant == variant_index => {
|
||||
// The untagged variant is implicitly encoded simply by having a
|
||||
// value that is outside the niche variants.
|
||||
Ok(None)
|
||||
interp_ok(None)
|
||||
}
|
||||
|
||||
abi::Variants::Multiple {
|
||||
|
@ -299,7 +300,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let tag = self
|
||||
.binary_op(mir::BinOp::Add, &variant_index_relative_val, &niche_start_val)?
|
||||
.to_scalar_int()?;
|
||||
Ok(Some((tag, tag_field)))
|
||||
interp_ok(Some((tag, tag_field)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,9 +19,9 @@ use rustc_trait_selection::traits::ObligationCtxt;
|
|||
use tracing::{debug, instrument, trace};
|
||||
|
||||
use super::{
|
||||
Frame, FrameInfo, GlobalId, InterpErrorInfo, InterpResult, MPlaceTy, Machine, MemPlaceMeta,
|
||||
Memory, OpTy, Place, PlaceTy, PointerArithmetic, Projectable, Provenance, err_inval,
|
||||
throw_inval, throw_ub, throw_ub_custom,
|
||||
Frame, FrameInfo, GlobalId, InterpError, InterpErrorInfo, InterpResult, MPlaceTy, Machine,
|
||||
MemPlaceMeta, Memory, OpTy, Place, PlaceTy, PointerArithmetic, Projectable, Provenance,
|
||||
err_inval, interp_ok, throw_inval, throw_ub, throw_ub_custom,
|
||||
};
|
||||
use crate::{ReportErrorExt, fluent_generated as fluent, util};
|
||||
|
||||
|
@ -73,7 +73,7 @@ where
|
|||
}
|
||||
|
||||
impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> {
|
||||
type LayoutOfResult = InterpResult<'tcx, TyAndLayout<'tcx>>;
|
||||
type LayoutOfResult = Result<TyAndLayout<'tcx>, InterpError<'tcx>>;
|
||||
|
||||
#[inline]
|
||||
fn layout_tcx_at_span(&self) -> Span {
|
||||
|
@ -82,29 +82,24 @@ impl<'tcx, M: Machine<'tcx>> LayoutOfHelpers<'tcx> for InterpCx<'tcx, M> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn handle_layout_err(
|
||||
&self,
|
||||
err: LayoutError<'tcx>,
|
||||
_: Span,
|
||||
_: Ty<'tcx>,
|
||||
) -> InterpErrorInfo<'tcx> {
|
||||
err_inval!(Layout(err)).into()
|
||||
fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> InterpError<'tcx> {
|
||||
err_inval!(Layout(err))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, M: Machine<'tcx>> FnAbiOfHelpers<'tcx> for InterpCx<'tcx, M> {
|
||||
type FnAbiOfResult = InterpResult<'tcx, &'tcx FnAbi<'tcx, Ty<'tcx>>>;
|
||||
type FnAbiOfResult = Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, InterpError<'tcx>>;
|
||||
|
||||
fn handle_fn_abi_err(
|
||||
&self,
|
||||
err: FnAbiError<'tcx>,
|
||||
_span: Span,
|
||||
_fn_abi_request: FnAbiRequest<'tcx>,
|
||||
) -> InterpErrorInfo<'tcx> {
|
||||
) -> InterpError<'tcx> {
|
||||
match err {
|
||||
FnAbiError::Layout(err) => err_inval!(Layout(err)).into(),
|
||||
FnAbiError::Layout(err) => err_inval!(Layout(err)),
|
||||
FnAbiError::AdjustForForeignAbi(err) => {
|
||||
err_inval!(FnAbiAdjustForForeignAbi(err)).into()
|
||||
err_inval!(FnAbiAdjustForForeignAbi(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +155,7 @@ pub(super) fn from_known_layout<'tcx>(
|
|||
);
|
||||
}
|
||||
}
|
||||
Ok(known_layout)
|
||||
interp_ok(known_layout)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -262,7 +257,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if let Some(err) = body.tainted_by_errors {
|
||||
throw_inval!(AlreadyReported(ReportedErrorInfo::tainted_by_errors(err)));
|
||||
}
|
||||
Ok(body)
|
||||
interp_ok(body)
|
||||
}
|
||||
|
||||
/// Call this on things you got out of the MIR (so it is as generic as the current
|
||||
|
@ -305,7 +300,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
trace!("param_env: {:#?}", self.param_env);
|
||||
trace!("args: {:#?}", args);
|
||||
match ty::Instance::try_resolve(*self.tcx, self.param_env, def, args) {
|
||||
Ok(Some(instance)) => Ok(instance),
|
||||
Ok(Some(instance)) => interp_ok(instance),
|
||||
Ok(None) => throw_inval!(TooGeneric),
|
||||
|
||||
// FIXME(eddyb) this could be a bit more specific than `AlreadyReported`.
|
||||
|
@ -401,7 +396,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
layout: &TyAndLayout<'tcx>,
|
||||
) -> InterpResult<'tcx, Option<(Size, Align)>> {
|
||||
if layout.is_sized() {
|
||||
return Ok(Some((layout.size, layout.align.abi)));
|
||||
return interp_ok(Some((layout.size, layout.align.abi)));
|
||||
}
|
||||
match layout.ty.kind() {
|
||||
ty::Adt(..) | ty::Tuple(..) => {
|
||||
|
@ -425,7 +420,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
else {
|
||||
// A field with an extern type. We don't know the actual dynamic size
|
||||
// or the alignment.
|
||||
return Ok(None);
|
||||
return interp_ok(None);
|
||||
};
|
||||
|
||||
// # First compute the dynamic alignment
|
||||
|
@ -456,12 +451,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if full_size > self.max_size_of_val() {
|
||||
throw_ub!(InvalidMeta(InvalidMetaKind::TooBig));
|
||||
}
|
||||
Ok(Some((full_size, full_align)))
|
||||
interp_ok(Some((full_size, full_align)))
|
||||
}
|
||||
ty::Dynamic(expected_trait, _, ty::Dyn) => {
|
||||
let vtable = metadata.unwrap_meta().to_pointer(self)?;
|
||||
// Read size and align from vtable (already checks size).
|
||||
Ok(Some(self.get_vtable_size_and_align(vtable, Some(expected_trait))?))
|
||||
interp_ok(Some(self.get_vtable_size_and_align(vtable, Some(expected_trait))?))
|
||||
}
|
||||
|
||||
ty::Slice(_) | ty::Str => {
|
||||
|
@ -474,10 +469,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if size > self.max_size_of_val() {
|
||||
throw_ub!(InvalidMeta(InvalidMetaKind::SliceTooBig));
|
||||
}
|
||||
Ok(Some((size, elem.align.abi)))
|
||||
interp_ok(Some((size, elem.align.abi)))
|
||||
}
|
||||
|
||||
ty::Foreign(_) => Ok(None),
|
||||
ty::Foreign(_) => interp_ok(None),
|
||||
|
||||
_ => span_bug!(self.cur_span(), "size_and_align_of::<{}> not supported", layout.ty),
|
||||
}
|
||||
|
@ -503,7 +498,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
pub fn return_to_block(&mut self, target: Option<mir::BasicBlock>) -> InterpResult<'tcx> {
|
||||
if let Some(target) = target {
|
||||
self.go_to_block(target);
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
} else {
|
||||
throw_ub!(Unreachable)
|
||||
}
|
||||
|
@ -530,10 +525,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
M::unwind_terminate(self, reason)?;
|
||||
// This might have pushed a new stack frame, or it terminated execution.
|
||||
// Either way, `loc` will not be updated.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Call a query that can return `ErrorHandled`. Should be used for statics and other globals.
|
||||
|
|
|
@ -26,7 +26,9 @@ use rustc_span::def_id::LocalDefId;
|
|||
use rustc_span::sym;
|
||||
use tracing::{instrument, trace};
|
||||
|
||||
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, err_ub};
|
||||
use super::{
|
||||
AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, err_ub, interp_ok,
|
||||
};
|
||||
use crate::const_eval;
|
||||
use crate::errors::NestedStaticInThreadLocal;
|
||||
|
||||
|
@ -307,7 +309,7 @@ pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>
|
|||
) -> InterpResult<'tcx, ()> {
|
||||
if ecx.tcx.try_get_global_alloc(alloc_id).is_some() {
|
||||
// The constant is already in global memory. Do nothing.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
// Move allocation to `tcx`.
|
||||
if let Some(_) =
|
||||
|
@ -318,7 +320,7 @@ pub fn intern_const_alloc_for_constprop<'tcx, T, M: CompileTimeMachine<'tcx, T>>
|
|||
// proper recursive interning loop -- or just call `intern_const_alloc_recursive`.
|
||||
panic!("`intern_const_alloc_for_constprop` called on allocation with nested provenance")
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> {
|
||||
|
@ -342,6 +344,6 @@ impl<'tcx, M: super::intern::CompileTimeMachine<'tcx, !>> InterpCx<'tcx, M> {
|
|||
panic!("`intern_with_temp_alloc` with nested allocations");
|
||||
}
|
||||
}
|
||||
Ok(alloc_id)
|
||||
interp_ok(alloc_id)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ use super::util::ensure_monomorphic_enough;
|
|||
use super::{
|
||||
Allocation, CheckInAllocMsg, ConstAllocation, GlobalId, ImmTy, InterpCx, InterpResult,
|
||||
MPlaceTy, Machine, OpTy, Pointer, PointerArithmetic, Provenance, Scalar, err_inval,
|
||||
err_ub_custom, err_unsup_format, throw_inval, throw_ub_custom, throw_ub_format,
|
||||
err_ub_custom, err_unsup_format, interp_ok, throw_inval, throw_ub_custom, throw_ub_format,
|
||||
};
|
||||
use crate::fluent_generated as fluent;
|
||||
|
||||
|
@ -39,7 +39,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
|
|||
) -> InterpResult<'tcx, ConstValue<'tcx>> {
|
||||
let tp_ty = args.type_at(0);
|
||||
let name = tcx.item_name(def_id);
|
||||
Ok(match name {
|
||||
interp_ok(match name {
|
||||
sym::type_name => {
|
||||
ensure_monomorphic_enough(tcx, tp_ty)?;
|
||||
let alloc = alloc_type_name(tcx, tp_ty);
|
||||
|
@ -329,6 +329,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
fluent::const_eval_offset_from_different_allocations,
|
||||
name = intrinsic_name,
|
||||
)
|
||||
.into()
|
||||
})?;
|
||||
|
||||
// Perform division by size to compute return value.
|
||||
|
@ -378,7 +379,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|
||||
M::panic_nounwind(self, &msg)?;
|
||||
// Skip the `return_to_block` at the end (we panicked, we do not return).
|
||||
return Ok(true);
|
||||
return interp_ok(true);
|
||||
}
|
||||
}
|
||||
sym::simd_insert => {
|
||||
|
@ -438,12 +439,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
|
||||
// Unsupported intrinsic: skip the return_to_block below.
|
||||
_ => return Ok(false),
|
||||
_ => return interp_ok(false),
|
||||
}
|
||||
|
||||
trace!("{:?}", self.dump_place(&dest.clone().into()));
|
||||
self.return_to_block(ret)?;
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
pub(super) fn eval_nondiverging_intrinsic(
|
||||
|
@ -457,7 +458,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if !cond {
|
||||
throw_ub_custom!(fluent::const_eval_assume_false);
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
NonDivergingIntrinsic::CopyNonOverlapping(mir::CopyNonOverlapping {
|
||||
count,
|
||||
|
@ -499,7 +500,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
_ => bug!("not a numeric intrinsic: {}", name),
|
||||
};
|
||||
Ok(Scalar::from_uint(bits_out, ret_layout.size))
|
||||
interp_ok(Scalar::from_uint(bits_out, ret_layout.size))
|
||||
}
|
||||
|
||||
pub fn exact_div(
|
||||
|
@ -540,7 +541,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|
||||
let (val, overflowed) =
|
||||
self.binary_op(mir_op.wrapping_to_overflowing().unwrap(), l, r)?.to_scalar_pair();
|
||||
Ok(if overflowed.to_bool()? {
|
||||
interp_ok(if overflowed.to_bool()? {
|
||||
let size = l.layout.size;
|
||||
if l.layout.abi.is_signed() {
|
||||
// For signed ints the saturated value depends on the sign of the first
|
||||
|
@ -582,7 +583,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// The offset must be in bounds starting from `ptr`.
|
||||
self.check_ptr_access_signed(ptr, offset_bytes, CheckInAllocMsg::PointerArithmeticTest)?;
|
||||
// This also implies that there is no overflow, so we are done.
|
||||
Ok(ptr.wrapping_signed_offset(offset_bytes, self))
|
||||
interp_ok(ptr.wrapping_signed_offset(offset_bytes, self))
|
||||
}
|
||||
|
||||
/// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
|
||||
|
@ -628,7 +629,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.copy_op(&right, &left)?;
|
||||
self.copy_op(&temp, &right)?;
|
||||
self.deallocate_ptr(temp.ptr(), None, kind)?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
pub fn write_bytes_intrinsic(
|
||||
|
@ -669,7 +670,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|
||||
// `Ordering`'s discriminants are -1/0/+1, so casting does the right thing.
|
||||
let result = Ord::cmp(left_bytes, right_bytes) as i32;
|
||||
Ok(Scalar::from_i32(result))
|
||||
interp_ok(Scalar::from_i32(result))
|
||||
}
|
||||
|
||||
pub(crate) fn raw_eq_intrinsic(
|
||||
|
@ -687,13 +688,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
this.check_ptr_align(ptr, layout.align.abi)?;
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, layout.size)? else {
|
||||
// zero-sized access
|
||||
return Ok(&[]);
|
||||
return interp_ok(&[]);
|
||||
};
|
||||
alloc_ref.get_bytes_strip_provenance()
|
||||
};
|
||||
|
||||
let lhs_bytes = get_bytes(self, lhs)?;
|
||||
let rhs_bytes = get_bytes(self, rhs)?;
|
||||
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
|
||||
interp_ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,7 +20,8 @@ use rustc_target::spec::abi::Abi as CallAbi;
|
|||
use super::{
|
||||
AllocBytes, AllocId, AllocKind, AllocRange, Allocation, CTFE_ALLOC_SALT, ConstAllocation,
|
||||
CtfeProvenance, FnArg, Frame, ImmTy, InterpCx, InterpResult, MPlaceTy, MemoryKind,
|
||||
Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, throw_unsup, throw_unsup_format,
|
||||
Misalignment, OpTy, PlaceTy, Pointer, Provenance, RangeSet, interp_ok, throw_unsup,
|
||||
throw_unsup_format,
|
||||
};
|
||||
|
||||
/// Data returned by [`Machine::after_stack_pop`], and consumed by
|
||||
|
@ -185,7 +186,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
ecx: &InterpCx<'tcx, Self>,
|
||||
instance: ty::InstanceKind<'tcx>,
|
||||
) -> InterpResult<'tcx, &'tcx mir::Body<'tcx>> {
|
||||
Ok(ecx.tcx.instance_mir(instance))
|
||||
interp_ok(ecx.tcx.instance_mir(instance))
|
||||
}
|
||||
|
||||
/// Entry point to all function calls.
|
||||
|
@ -280,7 +281,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
/// Called before a basic block terminator is executed.
|
||||
#[inline]
|
||||
fn before_terminator(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Determines the result of a `NullaryOp::UbChecks` invocation.
|
||||
|
@ -290,7 +291,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
/// You can use this to detect long or endlessly running programs.
|
||||
#[inline]
|
||||
fn increment_const_eval_counter(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called before a global allocation is accessed.
|
||||
|
@ -304,7 +305,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_static_def_id: Option<DefId>,
|
||||
_is_write: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Return the `AllocId` for the given thread-local static in the current thread.
|
||||
|
@ -422,7 +423,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_prov: (AllocId, Self::ProvenanceExtra),
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Hook for performing extra checks on any memory read access,
|
||||
|
@ -433,7 +434,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
/// Used to prevent statics from self-initializing by reading from their own memory
|
||||
/// as it is being initialized.
|
||||
fn before_alloc_read(_ecx: &InterpCx<'tcx, Self>, _alloc_id: AllocId) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Hook for performing extra checks on a memory write access.
|
||||
|
@ -446,7 +447,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_prov: (AllocId, Self::ProvenanceExtra),
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Hook for performing extra operations on a memory deallocation.
|
||||
|
@ -460,7 +461,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_align: Align,
|
||||
_kind: MemoryKind<Self::MemoryKind>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Executes a retagging operation for a single pointer.
|
||||
|
@ -471,7 +472,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_kind: mir::RetagKind,
|
||||
val: &ImmTy<'tcx, Self::Provenance>,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, Self::Provenance>> {
|
||||
Ok(val.clone())
|
||||
interp_ok(val.clone())
|
||||
}
|
||||
|
||||
/// Executes a retagging operation on a compound value.
|
||||
|
@ -482,7 +483,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_kind: mir::RetagKind,
|
||||
_place: &PlaceTy<'tcx, Self::Provenance>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called on places used for in-place function argument and return value handling.
|
||||
|
@ -516,7 +517,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
|
||||
/// Called immediately after a stack frame got pushed and its locals got initialized.
|
||||
fn after_stack_push(_ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called just before the return value is copied to the caller-provided return place.
|
||||
|
@ -524,7 +525,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_ecx: &InterpCx<'tcx, Self>,
|
||||
_frame: &Frame<'tcx, Self::Provenance, Self::FrameExtra>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called immediately after a stack frame got popped, but before jumping back to the caller.
|
||||
|
@ -537,14 +538,14 @@ pub trait Machine<'tcx>: Sized {
|
|||
) -> InterpResult<'tcx, ReturnAction> {
|
||||
// By default, we do not support unwinding from panics
|
||||
assert!(!unwinding);
|
||||
Ok(ReturnAction::Normal)
|
||||
interp_ok(ReturnAction::Normal)
|
||||
}
|
||||
|
||||
/// Called immediately after an "immediate" local variable is read
|
||||
/// (i.e., this is called for reads that do not end up accessing addressable memory).
|
||||
#[inline(always)]
|
||||
fn after_local_read(_ecx: &InterpCx<'tcx, Self>, _local: mir::Local) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called immediately after an "immediate" local variable is assigned a new value
|
||||
|
@ -556,7 +557,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_local: mir::Local,
|
||||
_storage_live: bool,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called immediately after actual memory was allocated for a local
|
||||
|
@ -567,7 +568,7 @@ pub trait Machine<'tcx>: Sized {
|
|||
_local: mir::Local,
|
||||
_mplace: &MPlaceTy<'tcx, Self::Provenance>,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Evaluate the given constant. The `eval` function will do all the required evaluation,
|
||||
|
@ -645,7 +646,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
) -> InterpResult<$tcx> {
|
||||
// For now we don't do any checking here. We can't use `tcx.sess` because that can differ
|
||||
// between crates, and we need to ensure that const-eval always behaves the same.
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -665,7 +666,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
fn ub_checks(_ecx: &InterpCx<$tcx, Self>) -> InterpResult<$tcx, bool> {
|
||||
// We can't look at `tcx.sess` here as that can differ across crates, which can lead to
|
||||
// unsound differences in evaluating the same constant at different instantiation sites.
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -675,7 +676,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
alloc: &'b Allocation,
|
||||
) -> InterpResult<$tcx, Cow<'b, Allocation<Self::Provenance>>> {
|
||||
// Overwrite default implementation: no need to adjust anything.
|
||||
Ok(Cow::Borrowed(alloc))
|
||||
interp_ok(Cow::Borrowed(alloc))
|
||||
}
|
||||
|
||||
fn init_alloc_extra(
|
||||
|
@ -685,7 +686,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
_size: Size,
|
||||
_align: Align,
|
||||
) -> InterpResult<$tcx, Self::AllocExtra> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn extern_static_pointer(
|
||||
|
@ -693,7 +694,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
def_id: DefId,
|
||||
) -> InterpResult<$tcx, Pointer> {
|
||||
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
|
||||
Ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO))
|
||||
interp_ok(Pointer::new(ecx.tcx.reserve_and_set_static_alloc(def_id).into(), Size::ZERO))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -702,7 +703,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
ptr: Pointer<CtfeProvenance>,
|
||||
_kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> InterpResult<$tcx, Pointer<CtfeProvenance>> {
|
||||
Ok(ptr)
|
||||
interp_ok(ptr)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -713,7 +714,7 @@ pub macro compile_time_machine(<$tcx: lifetime>) {
|
|||
// Allow these casts, but make the pointer not dereferenceable.
|
||||
// (I.e., they behave like transmutation.)
|
||||
// This is correct because no pointers can ever be exposed in compile-time evaluation.
|
||||
Ok(Pointer::from_addr_invalid(addr))
|
||||
interp_ok(Pointer::from_addr_invalid(addr))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
|
|
@ -23,7 +23,7 @@ use tracing::{debug, instrument, trace};
|
|||
use super::{
|
||||
AllocBytes, AllocId, AllocMap, AllocRange, Allocation, CheckAlignMsg, CheckInAllocMsg,
|
||||
CtfeProvenance, GlobalAlloc, InterpCx, InterpResult, Machine, MayLeak, Misalignment, Pointer,
|
||||
PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, err_ub_custom, throw_ub,
|
||||
PointerArithmetic, Provenance, Scalar, alloc_range, err_ub, err_ub_custom, interp_ok, throw_ub,
|
||||
throw_ub_custom, throw_unsup, throw_unsup_format,
|
||||
};
|
||||
use crate::fluent_generated as fluent;
|
||||
|
@ -82,7 +82,7 @@ pub enum FnVal<'tcx, Other> {
|
|||
impl<'tcx, Other> FnVal<'tcx, Other> {
|
||||
pub fn as_instance(self) -> InterpResult<'tcx, Instance<'tcx>> {
|
||||
match self {
|
||||
FnVal::Instance(instance) => Ok(instance),
|
||||
FnVal::Instance(instance) => interp_ok(instance),
|
||||
FnVal::Other(_) => {
|
||||
throw_unsup_format!("'foreign' function pointers are not supported in this context")
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.mem_copy(ptr, new_ptr.into(), old_size.min(new_size), /*nonoverlapping*/ true)?;
|
||||
self.deallocate_ptr(ptr, old_size_and_align, kind)?;
|
||||
|
||||
Ok(new_ptr)
|
||||
interp_ok(new_ptr)
|
||||
}
|
||||
|
||||
#[instrument(skip(self), level = "debug")]
|
||||
|
@ -330,8 +330,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
)
|
||||
}
|
||||
None => err_ub!(PointerUseAfterFree(alloc_id, CheckInAllocMsg::MemoryAccessTest)),
|
||||
}
|
||||
.into());
|
||||
})
|
||||
.into();
|
||||
};
|
||||
|
||||
if alloc.mutability.is_not() {
|
||||
|
@ -376,7 +376,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
bug!("Nothing can be deallocated twice");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Internal helper function to determine the allocation and offset of a pointer (if any).
|
||||
|
@ -395,7 +395,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|this, alloc_id, offset, prov| {
|
||||
let (size, align) = this
|
||||
.get_live_alloc_size_and_align(alloc_id, CheckInAllocMsg::MemoryAccessTest)?;
|
||||
Ok((size, align, (alloc_id, offset, prov)))
|
||||
interp_ok((size, align, (alloc_id, offset, prov)))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
@ -412,9 +412,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let size = i64::try_from(size.bytes()).unwrap(); // it would be an error to even ask for more than isize::MAX bytes
|
||||
Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
|
||||
let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
|
||||
Ok((size, align, ()))
|
||||
interp_ok((size, align, ()))
|
||||
})?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Check whether the given pointer points to live memory for a signed amount of bytes.
|
||||
|
@ -428,9 +428,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx> {
|
||||
Self::check_and_deref_ptr(self, ptr, size, msg, |this, alloc_id, _, _| {
|
||||
let (size, align) = this.get_live_alloc_size_and_align(alloc_id, msg)?;
|
||||
Ok((size, align, ()))
|
||||
interp_ok((size, align, ()))
|
||||
})?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
|
||||
|
@ -455,10 +455,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, Option<T>> {
|
||||
// Everything is okay with size 0.
|
||||
if size == 0 {
|
||||
return Ok(None);
|
||||
return interp_ok(None);
|
||||
}
|
||||
|
||||
Ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
|
||||
interp_ok(match this.borrow().ptr_try_get_alloc_id(ptr, size) {
|
||||
Err(addr) => {
|
||||
// We couldn't get a proper allocation.
|
||||
throw_ub!(DanglingIntPointer { addr, inbounds_size: size, msg });
|
||||
|
@ -498,7 +498,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if let Some(misaligned) = misaligned {
|
||||
throw_ub!(AlignmentCheckFailed(misaligned, msg))
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
pub(super) fn is_ptr_misaligned(
|
||||
|
@ -634,7 +634,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
|
||||
// So the error type is `InterpResult<'tcx, &Allocation<M::Provenance>>`.
|
||||
let a = self.memory.alloc_map.get_or(id, || {
|
||||
let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
|
||||
// We have to funnel the `InterpErrorInfo` through a `Result` to match the `get_or` API,
|
||||
// so we use `report_err` for that.
|
||||
let alloc = self.get_global_alloc(id, /*is_write*/ false).report_err().map_err(Err)?;
|
||||
match alloc {
|
||||
Cow::Borrowed(alloc) => {
|
||||
// We got a ref, cheaply return that as an "error" so that the
|
||||
|
@ -653,8 +655,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
});
|
||||
// Now unpack that funny error type
|
||||
match a {
|
||||
Ok(a) => Ok(&a.1),
|
||||
Err(a) => a,
|
||||
Ok(a) => interp_ok(&a.1),
|
||||
Err(a) => a.into(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -662,7 +664,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
/// The caller is responsible for calling the access hooks!
|
||||
pub fn get_alloc_bytes_unchecked_raw(&self, id: AllocId) -> InterpResult<'tcx, *const u8> {
|
||||
let alloc = self.get_alloc_raw(id)?;
|
||||
Ok(alloc.get_bytes_unchecked_raw())
|
||||
interp_ok(alloc.get_bytes_unchecked_raw())
|
||||
}
|
||||
|
||||
/// Bounds-checked *but not align-checked* allocation access.
|
||||
|
@ -680,7 +682,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|this, alloc_id, offset, prov| {
|
||||
let alloc = this.get_alloc_raw(alloc_id)?;
|
||||
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
|
||||
interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc)))
|
||||
},
|
||||
)?;
|
||||
// We want to call the hook on *all* accesses that involve an AllocId, including zero-sized
|
||||
|
@ -703,20 +705,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
range,
|
||||
)?;
|
||||
}
|
||||
Ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
|
||||
interp_ok(Some(AllocRef { alloc, range, tcx: *self.tcx, alloc_id }))
|
||||
} else {
|
||||
Ok(None)
|
||||
interp_ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Return the `extra` field of the given allocation.
|
||||
pub fn get_alloc_extra<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, &'a M::AllocExtra> {
|
||||
Ok(&self.get_alloc_raw(id)?.extra)
|
||||
interp_ok(&self.get_alloc_raw(id)?.extra)
|
||||
}
|
||||
|
||||
/// Return the `mutability` field of the given allocation.
|
||||
pub fn get_alloc_mutability<'a>(&'a self, id: AllocId) -> InterpResult<'tcx, Mutability> {
|
||||
Ok(self.get_alloc_raw(id)?.mutability)
|
||||
interp_ok(self.get_alloc_raw(id)?.mutability)
|
||||
}
|
||||
|
||||
/// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
|
||||
|
@ -750,7 +752,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if alloc.mutability.is_not() {
|
||||
throw_ub!(WriteToReadOnly(id))
|
||||
}
|
||||
Ok((alloc, &mut self.machine))
|
||||
interp_ok((alloc, &mut self.machine))
|
||||
}
|
||||
|
||||
/// Gives raw, mutable access to the `Allocation` address, without bounds or alignment checks.
|
||||
|
@ -760,7 +762,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
id: AllocId,
|
||||
) -> InterpResult<'tcx, *mut u8> {
|
||||
let alloc = self.get_alloc_raw_mut(id)?.0;
|
||||
Ok(alloc.get_bytes_unchecked_raw_mut())
|
||||
interp_ok(alloc.get_bytes_unchecked_raw_mut())
|
||||
}
|
||||
|
||||
/// Bounds-checked *but not align-checked* allocation access.
|
||||
|
@ -781,7 +783,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|this, alloc_id, offset, prov| {
|
||||
let (alloc, machine) = this.get_alloc_raw_mut(alloc_id)?;
|
||||
Ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
|
||||
interp_ok((alloc.size(), alloc.align, (alloc_id, offset, prov, alloc, machine)))
|
||||
},
|
||||
)?;
|
||||
|
||||
|
@ -790,9 +792,9 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if !validation_in_progress {
|
||||
M::before_memory_write(tcx, machine, &mut alloc.extra, (alloc_id, prov), range)?;
|
||||
}
|
||||
Ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
|
||||
interp_ok(Some(AllocRefMut { alloc, range, tcx: *tcx, alloc_id }))
|
||||
} else {
|
||||
Ok(None)
|
||||
interp_ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -802,7 +804,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
id: AllocId,
|
||||
) -> InterpResult<'tcx, (&'a mut M::AllocExtra, &'a mut M)> {
|
||||
let (alloc, machine) = self.get_alloc_raw_mut(id)?;
|
||||
Ok((&mut alloc.extra, machine))
|
||||
interp_ok((&mut alloc.extra, machine))
|
||||
}
|
||||
|
||||
/// Check whether an allocation is live. This is faster than calling
|
||||
|
@ -904,7 +906,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if matches!(kind, AllocKind::Dead) {
|
||||
throw_ub!(PointerUseAfterFree(id, msg))
|
||||
}
|
||||
Ok((size, align))
|
||||
interp_ok((size, align))
|
||||
}
|
||||
|
||||
fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
|
||||
|
@ -928,7 +930,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
|
||||
}
|
||||
self.get_fn_alloc(alloc_id)
|
||||
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
|
||||
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Get the dynamic type of the given vtable pointer.
|
||||
|
@ -951,12 +954,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if let Some(expected_dyn_type) = expected_trait {
|
||||
self.check_vtable_for_type(vtable_dyn_type, expected_dyn_type)?;
|
||||
}
|
||||
Ok(ty)
|
||||
interp_ok(ty)
|
||||
}
|
||||
|
||||
pub fn alloc_mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
|
||||
self.get_alloc_raw_mut(id)?.0.mutability = Mutability::Not;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Create a lazy debug printer that prints the given allocation and all allocations it points
|
||||
|
@ -1144,10 +1147,11 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
|
|||
pub fn write_scalar(&mut self, range: AllocRange, val: Scalar<Prov>) -> InterpResult<'tcx> {
|
||||
let range = self.range.subrange(range);
|
||||
debug!("write_scalar at {:?}{range:?}: {val:?}", self.alloc_id);
|
||||
Ok(self
|
||||
.alloc
|
||||
|
||||
self.alloc
|
||||
.write_scalar(&self.tcx, range, val)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// `offset` is relative to this allocation reference, not the base of the allocation.
|
||||
|
@ -1158,26 +1162,27 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>
|
|||
/// Mark the given sub-range (relative to this allocation reference) as uninitialized.
|
||||
pub fn write_uninit(&mut self, range: AllocRange) -> InterpResult<'tcx> {
|
||||
let range = self.range.subrange(range);
|
||||
Ok(self
|
||||
.alloc
|
||||
|
||||
self.alloc
|
||||
.write_uninit(&self.tcx, range)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Mark the entire referenced range as uninitialized
|
||||
pub fn write_uninit_full(&mut self) -> InterpResult<'tcx> {
|
||||
Ok(self
|
||||
.alloc
|
||||
self.alloc
|
||||
.write_uninit(&self.tcx, self.range)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Remove all provenance in the reference range.
|
||||
pub fn clear_provenance(&mut self) -> InterpResult<'tcx> {
|
||||
Ok(self
|
||||
.alloc
|
||||
self.alloc
|
||||
.clear_provenance(&self.tcx, self.range)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1189,12 +1194,10 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
|
|||
read_provenance: bool,
|
||||
) -> InterpResult<'tcx, Scalar<Prov>> {
|
||||
let range = self.range.subrange(range);
|
||||
let res = self
|
||||
.alloc
|
||||
self.alloc
|
||||
.read_scalar(&self.tcx, range, read_provenance)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?;
|
||||
debug!("read_scalar at {:?}{range:?}: {res:?}", self.alloc_id);
|
||||
Ok(res)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// `range` is relative to this allocation reference, not the base of the allocation.
|
||||
|
@ -1212,10 +1215,10 @@ impl<'a, 'tcx, Prov: Provenance, Extra, Bytes: AllocBytes> AllocRef<'a, 'tcx, Pr
|
|||
|
||||
/// `range` is relative to this allocation reference, not the base of the allocation.
|
||||
pub fn get_bytes_strip_provenance<'b>(&'b self) -> InterpResult<'tcx, &'a [u8]> {
|
||||
Ok(self
|
||||
.alloc
|
||||
self.alloc
|
||||
.get_bytes_strip_provenance(&self.tcx, self.range)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))?)
|
||||
.map_err(|e| e.to_interp_error(self.alloc_id))
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Returns whether the allocation has provenance anywhere in the range of the `AllocRef`.
|
||||
|
@ -1236,14 +1239,16 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, &[u8]> {
|
||||
let Some(alloc_ref) = self.get_ptr_alloc(ptr, size)? else {
|
||||
// zero-sized access
|
||||
return Ok(&[]);
|
||||
return interp_ok(&[]);
|
||||
};
|
||||
// Side-step AllocRef and directly access the underlying bytes more efficiently.
|
||||
// (We are staying inside the bounds here so all is good.)
|
||||
Ok(alloc_ref
|
||||
.alloc
|
||||
.get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
|
||||
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?)
|
||||
interp_ok(
|
||||
alloc_ref
|
||||
.alloc
|
||||
.get_bytes_strip_provenance(&alloc_ref.tcx, alloc_ref.range)
|
||||
.map_err(|e| e.to_interp_error(alloc_ref.alloc_id))?,
|
||||
)
|
||||
}
|
||||
|
||||
/// Writes the given stream of bytes into memory.
|
||||
|
@ -1263,7 +1268,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let Some(alloc_ref) = self.get_ptr_alloc_mut(ptr, size)? else {
|
||||
// zero-sized access
|
||||
assert_matches!(src.next(), None, "iterator said it was empty but returned an element");
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
// Side-step AllocRef and directly access the underlying bytes more efficiently.
|
||||
|
@ -1279,7 +1284,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
*dest = src.next().expect("iterator was shorter than it said it would be");
|
||||
}
|
||||
assert_matches!(src.next(), None, "iterator was longer than it said it would be");
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
pub fn mem_copy(
|
||||
|
@ -1316,7 +1321,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// Source alloc preparations and access hooks.
|
||||
let Some((src_alloc_id, src_offset, src_prov)) = src_parts else {
|
||||
// Zero-sized *source*, that means dest is also zero-sized and we have nothing to do.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
let src_alloc = self.get_alloc_raw(src_alloc_id)?;
|
||||
let src_range = alloc_range(src_offset, size);
|
||||
|
@ -1332,7 +1337,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// We already did the source checks and called the hooks so we are good to return early.
|
||||
let Some((dest_alloc_id, dest_offset, dest_prov)) = dest_parts else {
|
||||
// Zero-sized *destination*.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
// Prepare getting source provenance.
|
||||
|
@ -1375,7 +1380,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
.write_uninit(&tcx, dest_range)
|
||||
.map_err(|e| e.to_interp_error(dest_alloc_id))?;
|
||||
// We can forget about the provenance, this is all not initialized anyway.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
// SAFE: The above indexing would have panicked if there weren't at least `size` bytes
|
||||
|
@ -1432,7 +1437,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// copy the provenance to the destination
|
||||
dest_alloc.provenance_apply_copy(provenance);
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1441,7 +1446,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
/// Test if this value might be null.
|
||||
/// If the machine does not support ptr-to-int casts, this is conservative.
|
||||
pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> {
|
||||
Ok(match scalar.try_to_scalar_int() {
|
||||
interp_ok(match scalar.try_to_scalar_int() {
|
||||
Ok(int) => int.is_null(),
|
||||
Err(_) => {
|
||||
// Can only happen during CTFE.
|
||||
|
@ -1508,13 +1513,14 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
ptr: Pointer<Option<M::Provenance>>,
|
||||
size: i64,
|
||||
) -> InterpResult<'tcx, (AllocId, Size, M::ProvenanceExtra)> {
|
||||
self.ptr_try_get_alloc_id(ptr, size).map_err(|offset| {
|
||||
err_ub!(DanglingIntPointer {
|
||||
addr: offset,
|
||||
inbounds_size: size,
|
||||
msg: CheckInAllocMsg::InboundsTest
|
||||
self.ptr_try_get_alloc_id(ptr, size)
|
||||
.map_err(|offset| {
|
||||
err_ub!(DanglingIntPointer {
|
||||
addr: offset,
|
||||
inbounds_size: size,
|
||||
msg: CheckInAllocMsg::InboundsTest
|
||||
})
|
||||
})
|
||||
.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ use tracing::trace;
|
|||
use super::{
|
||||
CtfeProvenance, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, OffsetMode,
|
||||
PlaceTy, Pointer, Projectable, Provenance, Scalar, alloc_range, err_ub, from_known_layout,
|
||||
mir_assign_valid_types, throw_ub,
|
||||
interp_ok, mir_assign_valid_types, throw_ub,
|
||||
};
|
||||
|
||||
/// An `Immediate` represents a single immediate self-contained Rust value.
|
||||
|
@ -149,7 +149,7 @@ impl<Prov: Provenance> Immediate<Prov> {
|
|||
}
|
||||
Immediate::Uninit => {}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -307,7 +307,7 @@ impl<'tcx, Prov: Provenance> ImmTy<'tcx, Prov> {
|
|||
data_size: s.size().bytes(),
|
||||
}));
|
||||
}
|
||||
Ok(s)
|
||||
interp_ok(s)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -430,7 +430,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
|
|||
ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
assert_matches!(meta, MemPlaceMeta::None); // we can't store this anywhere anyway
|
||||
Ok(self.offset_(offset, layout, ecx))
|
||||
interp_ok(self.offset_(offset, layout, ecx))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -438,7 +438,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for ImmTy<'tcx, Prov> {
|
|||
&self,
|
||||
_ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
Ok(self.clone().into())
|
||||
interp_ok(self.clone().into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -514,11 +514,13 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
|
|||
ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
match self.as_mplace_or_imm() {
|
||||
Left(mplace) => Ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into()),
|
||||
Left(mplace) => {
|
||||
interp_ok(mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into())
|
||||
}
|
||||
Right(imm) => {
|
||||
assert_matches!(meta, MemPlaceMeta::None); // no place to store metadata here
|
||||
// Every part of an uninit is uninit.
|
||||
Ok(imm.offset_(offset, layout, ecx).into())
|
||||
interp_ok(imm.offset_(offset, layout, ecx).into())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -528,7 +530,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for OpTy<'tcx, Prov> {
|
|||
&self,
|
||||
_ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
Ok(self.clone())
|
||||
interp_ok(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -543,12 +545,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, Option<ImmTy<'tcx, M::Provenance>>> {
|
||||
if mplace.layout.is_unsized() {
|
||||
// Don't touch unsized
|
||||
return Ok(None);
|
||||
return interp_ok(None);
|
||||
}
|
||||
|
||||
let Some(alloc) = self.get_place_alloc(mplace)? else {
|
||||
// zero-sized type can be left uninit
|
||||
return Ok(Some(ImmTy::uninit(mplace.layout)));
|
||||
return interp_ok(Some(ImmTy::uninit(mplace.layout)));
|
||||
};
|
||||
|
||||
// It may seem like all types with `Scalar` or `ScalarPair` ABI are fair game at this point.
|
||||
|
@ -557,7 +559,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// case where some of the bytes are initialized and others are not. So, we need an extra
|
||||
// check that walks over the type of `mplace` to make sure it is truly correct to treat this
|
||||
// like a `Scalar` (or `ScalarPair`).
|
||||
Ok(match mplace.layout.abi {
|
||||
interp_ok(match mplace.layout.abi {
|
||||
Abi::Scalar(abi::Scalar::Initialized { value: s, .. }) => {
|
||||
let size = s.size(self);
|
||||
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
|
||||
|
@ -606,7 +608,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
&self,
|
||||
src: &impl Projectable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, Either<MPlaceTy<'tcx, M::Provenance>, ImmTy<'tcx, M::Provenance>>> {
|
||||
Ok(match src.to_op(self)?.as_mplace_or_imm() {
|
||||
interp_ok(match src.to_op(self)?.as_mplace_or_imm() {
|
||||
Left(ref mplace) => {
|
||||
if let Some(val) = self.read_immediate_from_mplace_raw(mplace)? {
|
||||
Right(val)
|
||||
|
@ -637,7 +639,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
if matches!(*imm, Immediate::Uninit) {
|
||||
throw_ub!(InvalidUninitBytes(None));
|
||||
}
|
||||
Ok(imm)
|
||||
interp_ok(imm)
|
||||
}
|
||||
|
||||
/// Read a scalar from a place
|
||||
|
@ -645,7 +647,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
&self,
|
||||
op: &impl Projectable<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
||||
Ok(self.read_immediate(op)?.to_scalar())
|
||||
interp_ok(self.read_immediate(op)?.to_scalar())
|
||||
}
|
||||
|
||||
// Pointer-sized reads are fairly common and need target layout access, so we wrap them in
|
||||
|
@ -678,7 +680,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let len = mplace.len(self)?;
|
||||
let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?;
|
||||
let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?;
|
||||
Ok(str)
|
||||
interp_ok(str)
|
||||
}
|
||||
|
||||
/// Read from a local of the current frame.
|
||||
|
@ -698,7 +700,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
assert!(!layout.is_unsized());
|
||||
}
|
||||
M::after_local_read(self, local)?;
|
||||
Ok(OpTy { op, layout })
|
||||
interp_ok(OpTy { op, layout })
|
||||
}
|
||||
|
||||
/// Every place can be read from, so we can turn them into an operand.
|
||||
|
@ -709,12 +711,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
place: &PlaceTy<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
match place.as_mplace_or_local() {
|
||||
Left(mplace) => Ok(mplace.into()),
|
||||
Left(mplace) => interp_ok(mplace.into()),
|
||||
Right((local, offset, locals_addr, _)) => {
|
||||
debug_assert!(place.layout.is_sized()); // only sized locals can ever be `Place::Local`.
|
||||
debug_assert_eq!(locals_addr, self.frame().locals_addr());
|
||||
let base = self.local_to_op(local, None)?;
|
||||
Ok(match offset {
|
||||
interp_ok(match offset {
|
||||
Some(offset) => base.offset(offset, place.layout, self)?,
|
||||
None => {
|
||||
// In the common case this hasn't been projected.
|
||||
|
@ -764,7 +766,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
)
|
||||
}
|
||||
}
|
||||
Ok(op)
|
||||
interp_ok(op)
|
||||
}
|
||||
|
||||
/// Evaluate the operand, returning a place where you can then find the data.
|
||||
|
@ -794,7 +796,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
};
|
||||
trace!("{:?}: {:?}", mir_op, op);
|
||||
Ok(op)
|
||||
interp_ok(op)
|
||||
}
|
||||
|
||||
pub(crate) fn const_val_to_op(
|
||||
|
@ -805,12 +807,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
// Other cases need layout.
|
||||
let adjust_scalar = |scalar| -> InterpResult<'tcx, _> {
|
||||
Ok(match scalar {
|
||||
interp_ok(match scalar {
|
||||
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_root_pointer(ptr)?, size),
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
})
|
||||
};
|
||||
let layout = from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty))?;
|
||||
let layout =
|
||||
from_known_layout(self.tcx, self.param_env, layout, || self.layout_of(ty).into())?;
|
||||
let imm = match val_val {
|
||||
mir::ConstValue::Indirect { alloc_id, offset } => {
|
||||
// This is const data, no mutation allowed.
|
||||
|
@ -818,7 +821,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
CtfeProvenance::from(alloc_id).as_immutable(),
|
||||
offset,
|
||||
))?;
|
||||
return Ok(self.ptr_to_mplace(ptr.into(), layout).into());
|
||||
return interp_ok(self.ptr_to_mplace(ptr.into(), layout).into());
|
||||
}
|
||||
mir::ConstValue::Scalar(x) => adjust_scalar(x)?.into(),
|
||||
mir::ConstValue::ZeroSized => Immediate::Uninit,
|
||||
|
@ -829,7 +832,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
Immediate::new_slice(self.global_root_pointer(ptr)?.into(), meta, self)
|
||||
}
|
||||
};
|
||||
Ok(OpTy { op: Operand::Immediate(imm), layout })
|
||||
interp_ok(OpTy { op: Operand::Immediate(imm), layout })
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -9,7 +9,7 @@ use rustc_span::symbol::sym;
|
|||
use rustc_target::abi::Size;
|
||||
use tracing::trace;
|
||||
|
||||
use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, throw_ub};
|
||||
use super::{ImmTy, InterpCx, Machine, MemPlaceMeta, interp_ok, throw_ub};
|
||||
|
||||
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
fn three_way_compare<T: Ord>(&self, lhs: T, rhs: T) -> ImmTy<'tcx, M::Provenance> {
|
||||
|
@ -156,7 +156,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
});
|
||||
}
|
||||
|
||||
return Ok(ImmTy::from_scalar_int(result, left.layout));
|
||||
return interp_ok(ImmTy::from_scalar_int(result, left.layout));
|
||||
}
|
||||
|
||||
// For the remaining ops, the types must be the same on both sides
|
||||
|
@ -181,10 +181,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
_ => None,
|
||||
};
|
||||
if let Some(op) = op {
|
||||
return Ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
|
||||
return interp_ok(ImmTy::from_bool(op(&l_signed(), &r_signed()), *self.tcx));
|
||||
}
|
||||
if bin_op == Cmp {
|
||||
return Ok(self.three_way_compare(l_signed(), r_signed()));
|
||||
return interp_ok(self.three_way_compare(l_signed(), r_signed()));
|
||||
}
|
||||
let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
|
||||
Div if r.is_null() => throw_ub!(DivisionByZero),
|
||||
|
@ -221,7 +221,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
throw_ub!(ArithOverflow { intrinsic });
|
||||
}
|
||||
let res = ImmTy::from_scalar_int(result, left.layout);
|
||||
return Ok(if with_overflow {
|
||||
return interp_ok(if with_overflow {
|
||||
let overflow = ImmTy::from_bool(overflow, *self.tcx);
|
||||
ImmTy::from_pair(res, overflow, *self.tcx)
|
||||
} else {
|
||||
|
@ -234,10 +234,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let r = r_unsigned();
|
||||
|
||||
if bin_op == Cmp {
|
||||
return Ok(self.three_way_compare(l, r));
|
||||
return interp_ok(self.three_way_compare(l, r));
|
||||
}
|
||||
|
||||
Ok(match bin_op {
|
||||
interp_ok(match bin_op {
|
||||
Eq => ImmTy::from_bool(l == r, *self.tcx),
|
||||
Ne => ImmTy::from_bool(l != r, *self.tcx),
|
||||
|
||||
|
@ -339,7 +339,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
throw_ub!(PointerArithOverflow)
|
||||
}
|
||||
let offset_ptr = self.ptr_offset_inbounds(ptr, offset_bytes)?;
|
||||
Ok(ImmTy::from_scalar(Scalar::from_maybe_pointer(offset_ptr, self), left.layout))
|
||||
interp_ok(ImmTy::from_scalar(
|
||||
Scalar::from_maybe_pointer(offset_ptr, self),
|
||||
left.layout,
|
||||
))
|
||||
}
|
||||
|
||||
// Fall back to machine hook so Miri can support more pointer ops.
|
||||
|
@ -366,20 +369,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let left = left.to_scalar();
|
||||
let right = right.to_scalar();
|
||||
Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
|
||||
interp_ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
|
||||
}
|
||||
ty::Bool => {
|
||||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let left = left.to_scalar();
|
||||
let right = right.to_scalar();
|
||||
Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
|
||||
interp_ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
|
||||
}
|
||||
ty::Float(fty) => {
|
||||
assert_eq!(left.layout.ty, right.layout.ty);
|
||||
let layout = left.layout;
|
||||
let left = left.to_scalar();
|
||||
let right = right.to_scalar();
|
||||
Ok(match fty {
|
||||
interp_ok(match fty {
|
||||
FloatTy::F16 => {
|
||||
self.binary_float_op(bin_op, layout, left.to_f16()?, right.to_f16()?)
|
||||
}
|
||||
|
@ -447,7 +450,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
Not => !val,
|
||||
_ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
|
||||
};
|
||||
Ok(ImmTy::from_bool(res, *self.tcx))
|
||||
interp_ok(ImmTy::from_bool(res, *self.tcx))
|
||||
}
|
||||
ty::Float(fty) => {
|
||||
let val = val.to_scalar();
|
||||
|
@ -462,7 +465,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
FloatTy::F64 => Scalar::from_f64(-val.to_f64()?),
|
||||
FloatTy::F128 => Scalar::from_f128(-val.to_f128()?),
|
||||
};
|
||||
Ok(ImmTy::from_scalar(res, layout))
|
||||
interp_ok(ImmTy::from_scalar(res, layout))
|
||||
}
|
||||
ty::Int(..) => {
|
||||
let val = val.to_scalar().to_int(layout.size)?;
|
||||
|
@ -472,7 +475,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
_ => span_bug!(self.cur_span(), "Invalid integer op {:?}", un_op),
|
||||
};
|
||||
let res = ScalarInt::truncate_from_int(res, layout.size).0;
|
||||
Ok(ImmTy::from_scalar(res.into(), layout))
|
||||
interp_ok(ImmTy::from_scalar(res.into(), layout))
|
||||
}
|
||||
ty::Uint(..) => {
|
||||
let val = val.to_scalar().to_uint(layout.size)?;
|
||||
|
@ -481,12 +484,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
_ => span_bug!(self.cur_span(), "Invalid unsigned integer op {:?}", un_op),
|
||||
};
|
||||
let res = ScalarInt::truncate_from_uint(res, layout.size).0;
|
||||
Ok(ImmTy::from_scalar(res.into(), layout))
|
||||
interp_ok(ImmTy::from_scalar(res.into(), layout))
|
||||
}
|
||||
ty::RawPtr(..) | ty::Ref(..) => {
|
||||
assert_eq!(un_op, PtrMetadata);
|
||||
let (_, meta) = val.to_scalar_and_meta();
|
||||
Ok(match meta {
|
||||
interp_ok(match meta {
|
||||
MemPlaceMeta::Meta(scalar) => {
|
||||
let ty = un_op.ty(*self.tcx, val.layout.ty);
|
||||
let layout = self.layout_of(ty)?;
|
||||
|
@ -514,7 +517,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let layout = self.layout_of(arg_ty)?;
|
||||
let usize_layout = || self.layout_of(self.tcx.types.usize).unwrap();
|
||||
|
||||
Ok(match null_op {
|
||||
interp_ok(match null_op {
|
||||
SizeOf => {
|
||||
if !layout.abi.is_sized() {
|
||||
span_bug!(self.cur_span(), "unsized type for `NullaryOp::SizeOf`");
|
||||
|
|
|
@ -15,7 +15,7 @@ use tracing::{instrument, trace};
|
|||
use super::{
|
||||
AllocRef, AllocRefMut, CheckAlignMsg, CtfeProvenance, ImmTy, Immediate, InterpCx, InterpResult,
|
||||
Machine, MemoryKind, Misalignment, OffsetMode, OpTy, Operand, Pointer, Projectable, Provenance,
|
||||
Scalar, alloc_range, mir_assign_valid_types,
|
||||
Scalar, alloc_range, interp_ok, mir_assign_valid_types,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug)]
|
||||
|
@ -90,7 +90,7 @@ impl<Prov: Provenance> MemPlace<Prov> {
|
|||
}
|
||||
OffsetMode::Wrapping => self.ptr.wrapping_offset(offset, ecx),
|
||||
};
|
||||
Ok(MemPlace { ptr, meta, misaligned: self.misaligned })
|
||||
interp_ok(MemPlace { ptr, meta, misaligned: self.misaligned })
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,10 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
|
|||
layout: TyAndLayout<'tcx>,
|
||||
ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
Ok(MPlaceTy { mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?, layout })
|
||||
interp_ok(MPlaceTy {
|
||||
mplace: self.mplace.offset_with_meta_(offset, mode, meta, ecx)?,
|
||||
layout,
|
||||
})
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -171,7 +174,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
|
|||
&self,
|
||||
_ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::Provenance>> {
|
||||
Ok(self.clone().into())
|
||||
interp_ok(self.clone().into())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -279,7 +282,7 @@ impl<'tcx, Prov: Provenance> Projectable<'tcx, Prov> for PlaceTy<'tcx, Prov> {
|
|||
layout: TyAndLayout<'tcx>,
|
||||
ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
Ok(match self.as_mplace_or_local() {
|
||||
interp_ok(match self.as_mplace_or_local() {
|
||||
Left(mplace) => mplace.offset_with_meta(offset, mode, meta, layout, ecx)?.into(),
|
||||
Right((local, old_offset, locals_addr, _)) => {
|
||||
debug_assert!(layout.is_sized(), "unsized locals should live in memory");
|
||||
|
@ -367,7 +370,7 @@ impl<'tcx, Prov: Provenance> Writeable<'tcx, Prov> for MPlaceTy<'tcx, Prov> {
|
|||
&self,
|
||||
_ecx: &mut InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, Prov>> {
|
||||
Ok(self.clone())
|
||||
interp_ok(self.clone())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -425,7 +428,7 @@ where
|
|||
// `ref_to_mplace` is called on raw pointers even if they don't actually get dereferenced;
|
||||
// we hence can't call `size_and_align_of` since that asserts more validity than we want.
|
||||
let ptr = ptr.to_pointer(self)?;
|
||||
Ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
|
||||
interp_ok(self.ptr_with_meta_to_mplace(ptr, meta, layout, /*unaligned*/ false))
|
||||
}
|
||||
|
||||
/// Turn a mplace into a (thin or wide) mutable raw pointer, pointing to the same space.
|
||||
|
@ -437,7 +440,7 @@ where
|
|||
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
||||
let imm = mplace.mplace.to_ref(self);
|
||||
let layout = self.layout_of(Ty::new_mut_ptr(self.tcx.tcx, mplace.layout.ty))?;
|
||||
Ok(ImmTy::from_immediate(imm, layout))
|
||||
interp_ok(ImmTy::from_immediate(imm, layout))
|
||||
}
|
||||
|
||||
/// Take an operand, representing a pointer, and dereference it to a place.
|
||||
|
@ -458,7 +461,7 @@ where
|
|||
trace!("deref to {} on {:?}", val.layout.ty, *val);
|
||||
|
||||
let mplace = self.ref_to_mplace(&val)?;
|
||||
Ok(mplace)
|
||||
interp_ok(mplace)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -474,7 +477,7 @@ where
|
|||
// If an access is both OOB and misaligned, we want to see the bounds error.
|
||||
let a = self.get_ptr_alloc(mplace.ptr(), size)?;
|
||||
self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
Ok(a)
|
||||
interp_ok(a)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -489,10 +492,10 @@ where
|
|||
// We check alignment separately, and raise that error *after* checking everything else.
|
||||
// If an access is both OOB and misaligned, we want to see the bounds error.
|
||||
// However we have to call `check_misalign` first to make the borrow checker happy.
|
||||
let misalign_err = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
|
||||
let a = self.get_ptr_alloc_mut(mplace.ptr(), size)?;
|
||||
misalign_err?;
|
||||
Ok(a)
|
||||
let misalign_res = self.check_misalign(mplace.mplace.misaligned, CheckAlignMsg::BasedOn);
|
||||
// An error from get_ptr_alloc_mut takes precedence.
|
||||
let (a, ()) = self.get_ptr_alloc_mut(mplace.ptr(), size).and(misalign_res)?;
|
||||
interp_ok(a)
|
||||
}
|
||||
|
||||
/// Turn a local in the current frame into a place.
|
||||
|
@ -512,7 +515,7 @@ where
|
|||
Operand::Indirect(mplace) => Place::Ptr(*mplace),
|
||||
}
|
||||
};
|
||||
Ok(PlaceTy { place, layout })
|
||||
interp_ok(PlaceTy { place, layout })
|
||||
}
|
||||
|
||||
/// Computes a place. You should only use this if you intend to write into this
|
||||
|
@ -549,7 +552,7 @@ where
|
|||
)
|
||||
}
|
||||
}
|
||||
Ok(place)
|
||||
interp_ok(place)
|
||||
}
|
||||
|
||||
/// Given a place, returns either the underlying mplace or a reference to where the value of
|
||||
|
@ -565,7 +568,7 @@ where
|
|||
(&mut Immediate<M::Provenance>, TyAndLayout<'tcx>, mir::Local),
|
||||
>,
|
||||
> {
|
||||
Ok(match place.to_place().as_mplace_or_local() {
|
||||
interp_ok(match place.to_place().as_mplace_or_local() {
|
||||
Left(mplace) => Left(mplace),
|
||||
Right((local, offset, locals_addr, layout)) => {
|
||||
if offset.is_some() {
|
||||
|
@ -610,7 +613,7 @@ where
|
|||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Write a scalar to a place
|
||||
|
@ -660,7 +663,7 @@ where
|
|||
self.write_immediate_to_mplace_no_validate(src, mplace.layout, mplace.mplace)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Write an immediate to memory.
|
||||
|
@ -683,7 +686,7 @@ where
|
|||
let tcx = *self.tcx;
|
||||
let Some(mut alloc) = self.get_place_alloc_mut(&MPlaceTy { mplace: dest, layout })? else {
|
||||
// zero-sized access
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
|
||||
match value {
|
||||
|
@ -708,7 +711,7 @@ where
|
|||
alloc.write_scalar(alloc_range(Size::ZERO, a_val.size()), a_val)?;
|
||||
alloc.write_scalar(alloc_range(b_offset, b_val.size()), b_val)?;
|
||||
// We don't have to reset padding here, `write_immediate` will anyway do a validation run.
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
Immediate::Uninit => alloc.write_uninit_full(),
|
||||
}
|
||||
|
@ -729,12 +732,12 @@ where
|
|||
Left(mplace) => {
|
||||
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
|
||||
// Zero-sized access
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
alloc.write_uninit_full()?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Remove all provenance in the given place.
|
||||
|
@ -753,12 +756,12 @@ where
|
|||
Left(mplace) => {
|
||||
let Some(mut alloc) = self.get_place_alloc_mut(&mplace)? else {
|
||||
// Zero-sized access
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
alloc.clear_provenance()?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place.
|
||||
|
@ -841,7 +844,7 @@ where
|
|||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Copies the data from an operand to a place.
|
||||
|
@ -918,7 +921,7 @@ where
|
|||
self.mem_copy(src.ptr(), dest.ptr(), dest_size, /*nonoverlapping*/ true)?;
|
||||
self.check_misalign(src.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
self.check_misalign(dest.mplace.misaligned, CheckAlignMsg::BasedOn)?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Ensures that a place is in memory, and returns where it is.
|
||||
|
@ -980,7 +983,7 @@ where
|
|||
Place::Ptr(mplace) => mplace,
|
||||
};
|
||||
// Return with the original layout and align, so that the caller can go on
|
||||
Ok(MPlaceTy { mplace, layout: place.layout })
|
||||
interp_ok(MPlaceTy { mplace, layout: place.layout })
|
||||
}
|
||||
|
||||
pub fn allocate_dyn(
|
||||
|
@ -993,7 +996,7 @@ where
|
|||
span_bug!(self.cur_span(), "cannot allocate space for `extern` type, size is not known")
|
||||
};
|
||||
let ptr = self.allocate_ptr(size, align, kind)?;
|
||||
Ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
|
||||
interp_ok(self.ptr_with_meta_to_mplace(ptr.into(), meta, layout, /*unaligned*/ false))
|
||||
}
|
||||
|
||||
pub fn allocate(
|
||||
|
@ -1028,7 +1031,7 @@ where
|
|||
};
|
||||
let meta = Scalar::from_target_usize(u64::try_from(str.len()).unwrap(), self);
|
||||
let layout = self.layout_of(self.tcx.types.str_).unwrap();
|
||||
Ok(self.ptr_with_meta_to_mplace(
|
||||
interp_ok(self.ptr_with_meta_to_mplace(
|
||||
ptr.into(),
|
||||
MemPlaceMeta::Meta(meta),
|
||||
layout,
|
||||
|
@ -1044,7 +1047,7 @@ where
|
|||
let _ = self.tcx.global_alloc(raw.alloc_id);
|
||||
let ptr = self.global_root_pointer(Pointer::from(raw.alloc_id))?;
|
||||
let layout = self.layout_of(raw.ty)?;
|
||||
Ok(self.ptr_to_mplace(ptr.into(), layout))
|
||||
interp_ok(self.ptr_to_mplace(ptr.into(), layout))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ use tracing::{debug, instrument};
|
|||
|
||||
use super::{
|
||||
InterpCx, InterpResult, MPlaceTy, Machine, MemPlaceMeta, OpTy, Provenance, Scalar, err_ub,
|
||||
throw_ub, throw_unsup,
|
||||
interp_ok, throw_ub, throw_unsup,
|
||||
};
|
||||
|
||||
/// Describes the constraints placed on offset-projections.
|
||||
|
@ -54,7 +54,7 @@ pub trait Projectable<'tcx, Prov: Provenance>: Sized + std::fmt::Debug {
|
|||
// Go through the layout. There are lots of types that support a length,
|
||||
// e.g., SIMD types. (But not all repr(simd) types even have FieldsShape::Array!)
|
||||
match layout.fields {
|
||||
abi::FieldsShape::Array { count, .. } => Ok(count),
|
||||
abi::FieldsShape::Array { count, .. } => interp_ok(count),
|
||||
_ => bug!("len not supported on sized type {:?}", layout.ty),
|
||||
}
|
||||
}
|
||||
|
@ -115,9 +115,9 @@ impl<'a, 'tcx, Prov: Provenance, P: Projectable<'tcx, Prov>> ArrayIterator<'a, '
|
|||
&mut self,
|
||||
ecx: &InterpCx<'tcx, M>,
|
||||
) -> InterpResult<'tcx, Option<(u64, P)>> {
|
||||
let Some(idx) = self.range.next() else { return Ok(None) };
|
||||
let Some(idx) = self.range.next() else { return interp_ok(None) };
|
||||
// We use `Wrapping` here since the offset has already been checked when the iterator was created.
|
||||
Ok(Some((
|
||||
interp_ok(Some((
|
||||
idx,
|
||||
self.base.offset_with_meta(
|
||||
self.stride * idx,
|
||||
|
@ -258,7 +258,7 @@ where
|
|||
// SIMD types must be newtypes around arrays, so all we have to do is project to their only field.
|
||||
let array = self.project_field(base, 0)?;
|
||||
let len = array.len(self)?;
|
||||
Ok((array, len))
|
||||
interp_ok((array, len))
|
||||
}
|
||||
|
||||
fn project_constant_index<P: Projectable<'tcx, M::Provenance>>(
|
||||
|
@ -300,7 +300,13 @@ where
|
|||
debug!("project_array_fields: {base:?} {len}");
|
||||
base.offset(len * stride, self.layout_of(self.tcx.types.unit).unwrap(), self)?;
|
||||
// Create the iterator.
|
||||
Ok(ArrayIterator { base, range: 0..len, stride, field_layout, _phantom: PhantomData })
|
||||
interp_ok(ArrayIterator {
|
||||
base,
|
||||
range: 0..len,
|
||||
stride,
|
||||
field_layout,
|
||||
_phantom: PhantomData,
|
||||
})
|
||||
}
|
||||
|
||||
/// Subslicing
|
||||
|
@ -367,7 +373,7 @@ where
|
|||
P: Projectable<'tcx, M::Provenance> + From<MPlaceTy<'tcx, M::Provenance>> + std::fmt::Debug,
|
||||
{
|
||||
use rustc_middle::mir::ProjectionElem::*;
|
||||
Ok(match proj_elem {
|
||||
interp_ok(match proj_elem {
|
||||
OpaqueCast(ty) => {
|
||||
span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck")
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ use tracing::{info_span, instrument, trace};
|
|||
use super::{
|
||||
AllocId, CtfeProvenance, Immediate, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace,
|
||||
MemPlaceMeta, MemoryKind, Operand, Pointer, Provenance, ReturnAction, Scalar,
|
||||
from_known_layout, throw_ub, throw_unsup,
|
||||
from_known_layout, interp_ok, throw_ub, throw_unsup,
|
||||
};
|
||||
use crate::errors;
|
||||
|
||||
|
@ -189,7 +189,7 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
|
|||
pub(super) fn access(&self) -> InterpResult<'tcx, &Operand<Prov>> {
|
||||
match &self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
|
||||
LocalValue::Live(val) => Ok(val),
|
||||
LocalValue::Live(val) => interp_ok(val),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -199,7 +199,7 @@ impl<'tcx, Prov: Provenance> LocalState<'tcx, Prov> {
|
|||
pub(super) fn access_mut(&mut self) -> InterpResult<'tcx, &mut Operand<Prov>> {
|
||||
match &mut self.value {
|
||||
LocalValue::Dead => throw_ub!(DeadLocal), // could even be "invalid program"?
|
||||
LocalValue::Live(val) => Ok(val),
|
||||
LocalValue::Live(val) => interp_ok(val),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -391,7 +391,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let span = info_span!("frame", "{}", instance);
|
||||
self.frame_mut().tracing_span.enter(span);
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Low-level helper that pops a stack frame from the stack and returns some information about
|
||||
|
@ -426,7 +426,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
return_action = ReturnAction::NoCleanup;
|
||||
};
|
||||
|
||||
Ok(StackPopInfo { return_action, return_to_block, return_place })
|
||||
interp_ok(StackPopInfo { return_action, return_to_block, return_place })
|
||||
}
|
||||
|
||||
/// A private helper for [`pop_stack_frame_raw`](InterpCx::pop_stack_frame_raw).
|
||||
|
@ -449,7 +449,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(cleanup)
|
||||
interp_ok(cleanup)
|
||||
}
|
||||
|
||||
/// In the current stack frame, mark all locals as live that are not arguments and don't have
|
||||
|
@ -464,7 +464,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
self.storage_live(local)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
pub fn storage_live_dyn(
|
||||
|
@ -550,7 +550,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// If the local is already live, deallocate its old memory.
|
||||
let old = mem::replace(&mut self.frame_mut().locals[local].value, local_val);
|
||||
self.deallocate_local(old)?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Mark a storage as live, killing the previous content.
|
||||
|
@ -566,7 +566,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// If the local is already dead, this is a NOP.
|
||||
let old = mem::replace(&mut self.frame_mut().locals[local].value, LocalValue::Dead);
|
||||
self.deallocate_local(old)?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
fn deallocate_local(&mut self, local: LocalValue<M::Provenance>) -> InterpResult<'tcx> {
|
||||
|
@ -581,7 +581,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
);
|
||||
self.deallocate_ptr(ptr, None, MemoryKind::Stack)?;
|
||||
};
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -593,19 +593,19 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
) -> InterpResult<'tcx, TyAndLayout<'tcx>> {
|
||||
let state = &frame.locals[local];
|
||||
if let Some(layout) = state.layout.get() {
|
||||
return Ok(layout);
|
||||
return interp_ok(layout);
|
||||
}
|
||||
|
||||
let layout = from_known_layout(self.tcx, self.param_env, layout, || {
|
||||
let local_ty = frame.body.local_decls[local].ty;
|
||||
let local_ty =
|
||||
self.instantiate_from_frame_and_normalize_erasing_regions(frame, local_ty)?;
|
||||
self.layout_of(local_ty)
|
||||
self.layout_of(local_ty).into()
|
||||
})?;
|
||||
|
||||
// Layouts of locals are requested a lot, so we cache them.
|
||||
state.layout.set(Some(layout));
|
||||
Ok(layout)
|
||||
interp_ok(layout)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ use tracing::{info, instrument, trace};
|
|||
|
||||
use super::{
|
||||
FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy,
|
||||
Projectable, Scalar, throw_ub,
|
||||
Projectable, Scalar, interp_ok, throw_ub,
|
||||
};
|
||||
use crate::util;
|
||||
|
||||
|
@ -36,7 +36,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
#[inline(always)]
|
||||
pub fn step(&mut self) -> InterpResult<'tcx, bool> {
|
||||
if self.stack().is_empty() {
|
||||
return Ok(false);
|
||||
return interp_ok(false);
|
||||
}
|
||||
|
||||
let Either::Left(loc) = self.frame().loc else {
|
||||
|
@ -44,7 +44,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// Just go on unwinding.
|
||||
trace!("unwinding: skipping frame");
|
||||
self.return_from_current_stack_frame(/* unwinding */ true)?;
|
||||
return Ok(true);
|
||||
return interp_ok(true);
|
||||
};
|
||||
let basic_block = &self.body().basic_blocks[loc.block];
|
||||
|
||||
|
@ -55,7 +55,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
assert_eq!(old_frames, self.frame_idx());
|
||||
// Advance the program counter.
|
||||
self.frame_mut().loc.as_mut().left().unwrap().statement_index += 1;
|
||||
return Ok(true);
|
||||
return interp_ok(true);
|
||||
}
|
||||
|
||||
M::before_terminator(self)?;
|
||||
|
@ -67,7 +67,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
info!("// executing {:?}", loc.block);
|
||||
}
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
|
||||
/// Runs the interpretation logic for the given `mir::Statement` at the current frame and
|
||||
|
@ -145,7 +145,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
Nop => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Evaluate an assignment statement.
|
||||
|
@ -277,7 +277,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
|
||||
trace!("{:?}", self.dump_place(&dest));
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Writes the aggregate to the destination.
|
||||
|
@ -313,7 +313,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, self);
|
||||
let ptr = ImmTy::from_immediate(ptr_imm, dest.layout);
|
||||
self.copy_op(&ptr, dest)?;
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
_ => (FIRST_VARIANT, dest.clone(), None),
|
||||
};
|
||||
|
@ -365,7 +365,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
)?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Evaluate the arguments of a function call
|
||||
|
@ -373,7 +373,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
&self,
|
||||
op: &mir::Operand<'tcx>,
|
||||
) -> InterpResult<'tcx, FnArg<'tcx, M::Provenance>> {
|
||||
Ok(match op {
|
||||
interp_ok(match op {
|
||||
mir::Operand::Copy(_) | mir::Operand::Constant(_) => {
|
||||
// Make a regular copy.
|
||||
let op = self.eval_operand(op, None)?;
|
||||
|
@ -442,7 +442,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
};
|
||||
|
||||
Ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
|
||||
interp_ok(EvaluatedCalleeAndArgs { callee, args, fn_sig, fn_abi, with_caller_location })
|
||||
}
|
||||
|
||||
fn eval_terminator(&mut self, terminator: &mir::Terminator<'tcx>) -> InterpResult<'tcx> {
|
||||
|
@ -537,7 +537,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// generic. In order to make sure that generic and non-generic code behaves
|
||||
// roughly the same (and in keeping with Mir semantics) we do nothing here.
|
||||
self.go_to_block(target);
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
trace!("TerminatorKind::drop: {:?}, type {}", place, place.layout.ty);
|
||||
self.init_drop_in_place_call(&place, instance, target, unwind)?;
|
||||
|
@ -566,7 +566,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// By definition, a Resume terminator means
|
||||
// that we're unwinding
|
||||
self.return_from_current_stack_frame(/* unwinding */ true)?;
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
// It is UB to ever encounter this.
|
||||
|
@ -584,6 +584,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,7 +5,9 @@ use rustc_target::abi::{Align, Size};
|
|||
use tracing::trace;
|
||||
|
||||
use super::util::ensure_monomorphic_enough;
|
||||
use super::{InterpCx, MPlaceTy, Machine, MemPlaceMeta, OffsetMode, Projectable, throw_ub};
|
||||
use super::{
|
||||
InterpCx, MPlaceTy, Machine, MemPlaceMeta, OffsetMode, Projectable, interp_ok, throw_ub,
|
||||
};
|
||||
|
||||
impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
||||
/// Creates a dynamic vtable for the given type and vtable origin. This is used only for
|
||||
|
@ -31,7 +33,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let salt = M::get_global_alloc_salt(self, None);
|
||||
let vtable_symbolic_allocation = self.tcx.reserve_and_set_vtable_alloc(ty, dyn_ty, salt);
|
||||
let vtable_ptr = self.global_root_pointer(Pointer::from(vtable_symbolic_allocation))?;
|
||||
Ok(vtable_ptr.into())
|
||||
interp_ok(vtable_ptr.into())
|
||||
}
|
||||
|
||||
pub fn get_vtable_size_and_align(
|
||||
|
@ -42,7 +44,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
let ty = self.get_ptr_vtable_ty(vtable, expected_trait)?;
|
||||
let layout = self.layout_of(ty)?;
|
||||
assert!(layout.is_sized(), "there are no vtables for unsized types");
|
||||
Ok((layout.size, layout.align.abi))
|
||||
interp_ok((layout.size, layout.align.abi))
|
||||
}
|
||||
|
||||
pub(super) fn vtable_entries(
|
||||
|
@ -102,7 +104,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
|
||||
|
@ -127,7 +129,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
layout,
|
||||
self,
|
||||
)?;
|
||||
Ok(mplace)
|
||||
interp_ok(mplace)
|
||||
}
|
||||
|
||||
/// Turn a `dyn* Trait` type into an value with the actual dynamic type.
|
||||
|
@ -147,6 +149,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
// `data` is already the right thing but has the wrong type. So we transmute it.
|
||||
let layout = self.layout_of(ty)?;
|
||||
let data = data.transmute(layout, self)?;
|
||||
Ok(data)
|
||||
interp_ok(data)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ use rustc_middle::ty::{
|
|||
};
|
||||
use tracing::debug;
|
||||
|
||||
use super::{InterpCx, MPlaceTy, MemoryKind, throw_inval};
|
||||
use super::{InterpCx, MPlaceTy, MemoryKind, interp_ok, throw_inval};
|
||||
use crate::const_eval::{CompileTimeInterpCx, CompileTimeMachine, InterpretationResult};
|
||||
|
||||
/// Checks whether a type contains generic parameters which must be instantiated.
|
||||
|
@ -23,7 +23,7 @@ where
|
|||
{
|
||||
debug!("ensure_monomorphic_enough: ty={:?}", ty);
|
||||
if !ty.has_param() {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
struct FoundParam;
|
||||
|
@ -78,7 +78,7 @@ where
|
|||
if matches!(ty.visit_with(&mut vis), ControlFlow::Break(FoundParam)) {
|
||||
throw_inval!(TooGeneric);
|
||||
} else {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,5 +103,5 @@ pub(crate) fn create_static_alloc<'tcx>(
|
|||
assert_eq!(ecx.machine.static_root_ids, None);
|
||||
ecx.machine.static_root_ids = Some((alloc_id, static_def_id));
|
||||
assert!(ecx.memory.alloc_map.insert(alloc_id, (MemoryKind::Stack, alloc)).is_none());
|
||||
Ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
|
||||
interp_ok(ecx.ptr_to_mplace(Pointer::from(alloc_id).into(), layout))
|
||||
}
|
||||
|
|
|
@ -17,8 +17,8 @@ use rustc_hir as hir;
|
|||
use rustc_middle::bug;
|
||||
use rustc_middle::mir::interpret::ValidationErrorKind::{self, *};
|
||||
use rustc_middle::mir::interpret::{
|
||||
ExpectedKind, InterpError, InvalidMetaKind, Misalignment, PointerKind, Provenance,
|
||||
UnsupportedOpInfo, ValidationErrorInfo, alloc_range,
|
||||
ExpectedKind, InterpError, InterpErrorInfo, InvalidMetaKind, Misalignment, PointerKind,
|
||||
Provenance, UnsupportedOpInfo, ValidationErrorInfo, alloc_range, interp_ok,
|
||||
};
|
||||
use rustc_middle::ty::layout::{LayoutCx, LayoutOf, TyAndLayout};
|
||||
use rustc_middle::ty::{self, Ty};
|
||||
|
@ -32,7 +32,7 @@ use super::machine::AllocMap;
|
|||
use super::{
|
||||
AllocId, AllocKind, CheckInAllocMsg, GlobalAlloc, ImmTy, Immediate, InterpCx, InterpResult,
|
||||
MPlaceTy, Machine, MemPlaceMeta, PlaceTy, Pointer, Projectable, Scalar, ValueVisitor, err_ub,
|
||||
format_interp_error, throw_ub,
|
||||
format_interp_error,
|
||||
};
|
||||
|
||||
// for the validation errors
|
||||
|
@ -42,7 +42,7 @@ use super::InterpError::Unsupported as Unsup;
|
|||
use super::UndefinedBehaviorInfo::*;
|
||||
use super::UnsupportedOpInfo::*;
|
||||
|
||||
macro_rules! throw_validation_failure {
|
||||
macro_rules! err_validation_failure {
|
||||
($where:expr, $kind: expr) => {{
|
||||
let where_ = &$where;
|
||||
let path = if !where_.is_empty() {
|
||||
|
@ -53,10 +53,16 @@ macro_rules! throw_validation_failure {
|
|||
None
|
||||
};
|
||||
|
||||
throw_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
|
||||
err_ub!(ValidationError(ValidationErrorInfo { path, kind: $kind }))
|
||||
}};
|
||||
}
|
||||
|
||||
macro_rules! throw_validation_failure {
|
||||
($where:expr, $kind: expr) => {
|
||||
do yeet err_validation_failure!($where, $kind)
|
||||
};
|
||||
}
|
||||
|
||||
/// If $e throws an error matching the pattern, throw a validation failure.
|
||||
/// Other errors are passed back to the caller, unchanged -- and if they reach the root of
|
||||
/// the visitor, we make sure only validation errors and `InvalidProgram` errors are left.
|
||||
|
@ -91,22 +97,22 @@ macro_rules! try_validation {
|
|||
($e:expr, $where:expr,
|
||||
$( $( $p:pat_param )|+ => $kind: expr ),+ $(,)?
|
||||
) => {{
|
||||
match $e {
|
||||
Ok(x) => x,
|
||||
$e.map_err(|e| {
|
||||
// We catch the error and turn it into a validation failure. We are okay with
|
||||
// allocation here as this can only slow down builds that fail anyway.
|
||||
Err(e) => match e.kind() {
|
||||
let (kind, backtrace) = e.into_parts();
|
||||
match kind {
|
||||
$(
|
||||
$($p)|+ =>
|
||||
throw_validation_failure!(
|
||||
$($p)|+ => {
|
||||
err_validation_failure!(
|
||||
$where,
|
||||
$kind
|
||||
)
|
||||
).into()
|
||||
}
|
||||
),+,
|
||||
#[allow(unreachable_patterns)]
|
||||
_ => Err::<!, _>(e)?,
|
||||
_ => InterpErrorInfo::from_parts(kind, backtrace),
|
||||
}
|
||||
}
|
||||
})?
|
||||
}};
|
||||
}
|
||||
|
||||
|
@ -378,7 +384,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
// Undo changes
|
||||
self.path.truncate(path_len);
|
||||
// Done
|
||||
Ok(r)
|
||||
interp_ok(r)
|
||||
}
|
||||
|
||||
fn read_immediate(
|
||||
|
@ -386,7 +392,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
val: &PlaceTy<'tcx, M::Provenance>,
|
||||
expected: ExpectedKind,
|
||||
) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
|
||||
Ok(try_validation!(
|
||||
interp_ok(try_validation!(
|
||||
self.ecx.read_immediate(val),
|
||||
self.path,
|
||||
Ub(InvalidUninitBytes(None)) =>
|
||||
|
@ -404,7 +410,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
val: &PlaceTy<'tcx, M::Provenance>,
|
||||
expected: ExpectedKind,
|
||||
) -> InterpResult<'tcx, Scalar<M::Provenance>> {
|
||||
Ok(self.read_immediate(val, expected)?.to_scalar())
|
||||
interp_ok(self.read_immediate(val, expected)?.to_scalar())
|
||||
}
|
||||
|
||||
fn deref_pointer(
|
||||
|
@ -469,7 +475,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
_ => bug!("Unexpected unsized type tail: {:?}", tail),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Check a reference or `Box`.
|
||||
|
@ -510,7 +516,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
Ub(DanglingIntPointer { addr: i, .. }) => DanglingPtrNoProvenance {
|
||||
ptr_kind,
|
||||
// FIXME this says "null pointer" when null but we need translate
|
||||
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(*i))
|
||||
pointer: format!("{}", Pointer::<Option<AllocId>>::from_addr_invalid(i))
|
||||
},
|
||||
Ub(PointerOutOfBounds { .. }) => DanglingPtrOutOfBounds {
|
||||
ptr_kind
|
||||
|
@ -632,7 +638,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
}
|
||||
// Potentially skip recursive check.
|
||||
if skip_recursive_check {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
} else {
|
||||
// This is not CTFE, so it's Miri with recursive checking.
|
||||
|
@ -641,7 +647,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
// FIXME: should we also skip `UnsafeCell` behind shared references? Currently that is not
|
||||
// needed since validation reads bypass Stacked Borrows and data race checks.
|
||||
if matches!(ptr_kind, PointerKind::Box) {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
}
|
||||
let path = &self.path;
|
||||
|
@ -654,7 +660,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
new_path
|
||||
});
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Check if this is a value of primitive type, and if yes check the validity of the value
|
||||
|
@ -681,7 +687,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
self.ecx.clear_provenance(value)?;
|
||||
self.add_data_range_place(value);
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::Char => {
|
||||
let scalar = self.read_scalar(value, ExpectedKind::Char)?;
|
||||
|
@ -696,7 +702,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
self.ecx.clear_provenance(value)?;
|
||||
self.add_data_range_place(value);
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::Float(_) | ty::Int(_) | ty::Uint(_) => {
|
||||
// NOTE: Keep this in sync with the array optimization for int/float
|
||||
|
@ -713,18 +719,18 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
self.ecx.clear_provenance(value)?;
|
||||
self.add_data_range_place(value);
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::RawPtr(..) => {
|
||||
let place = self.deref_pointer(value, ExpectedKind::RawPtr)?;
|
||||
if place.layout.is_unsized() {
|
||||
self.check_wide_ptr_meta(place.meta(), place.layout)?;
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::Ref(_, _ty, mutbl) => {
|
||||
self.check_safe_pointer(value, PointerKind::Ref(*mutbl))?;
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::FnPtr(..) => {
|
||||
let scalar = self.read_scalar(value, ExpectedKind::FnPtr)?;
|
||||
|
@ -753,12 +759,12 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
}
|
||||
self.add_data_range_place(value);
|
||||
}
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
ty::Never => throw_validation_failure!(self.path, NeverVal),
|
||||
ty::Foreign(..) | ty::FnDef(..) => {
|
||||
// Nothing to check.
|
||||
Ok(true)
|
||||
interp_ok(true)
|
||||
}
|
||||
// The above should be all the primitive types. The rest is compound, we
|
||||
// check them by visiting their fields/variants.
|
||||
|
@ -771,7 +777,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
| ty::Closure(..)
|
||||
| ty::Pat(..)
|
||||
| ty::CoroutineClosure(..)
|
||||
| ty::Coroutine(..) => Ok(false),
|
||||
| ty::Coroutine(..) => interp_ok(false),
|
||||
// Some types only occur during typechecking, they have no layout.
|
||||
// We should not see them here and we could not check them anyway.
|
||||
ty::Error(_)
|
||||
|
@ -808,11 +814,11 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
max_value
|
||||
})
|
||||
} else {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
} else if scalar_layout.is_always_valid(self.ecx) {
|
||||
// Easy. (This is reachable if `enforce_number_validity` is set.)
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
} else {
|
||||
// Conservatively, we reject, because the pointer *could* have a bad
|
||||
// value.
|
||||
|
@ -825,7 +831,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
};
|
||||
// Now compare.
|
||||
if valid_range.contains(bits) {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
} else {
|
||||
throw_validation_failure!(self.path, OutOfRange {
|
||||
value: format!("{bits}"),
|
||||
|
@ -884,7 +890,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
}
|
||||
|
||||
fn reset_padding(&mut self, place: &PlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx> {
|
||||
let Some(data_bytes) = self.data_bytes.as_mut() else { return Ok(()) };
|
||||
let Some(data_bytes) = self.data_bytes.as_mut() else { return interp_ok(()) };
|
||||
// Our value must be in memory, otherwise we would not have set up `data_bytes`.
|
||||
let mplace = self.ecx.force_allocation(place)?;
|
||||
// Determine starting offset and size.
|
||||
|
@ -896,14 +902,14 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
// If there is no padding at all, we can skip the rest: check for
|
||||
// a single data range covering the entire value.
|
||||
if data_bytes.0 == &[(start_offset, size)] {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
// Get a handle for the allocation. Do this only once, to avoid looking up the same
|
||||
// allocation over and over again. (Though to be fair, iterating the value already does
|
||||
// exactly that.)
|
||||
let Some(mut alloc) = self.ecx.get_ptr_alloc_mut(mplace.ptr(), size)? else {
|
||||
// A ZST, no padding to clear.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
};
|
||||
// Add a "finalizer" data range at the end, so that the iteration below finds all gaps
|
||||
// between ranges.
|
||||
|
@ -930,7 +936,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValidityVisitor<'rt, 'tcx, M> {
|
|||
padding_cleared_until = offset + size;
|
||||
}
|
||||
assert!(padding_cleared_until == start_offset + size);
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Computes the data range of this union type:
|
||||
|
@ -1070,7 +1076,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
val: &PlaceTy<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx, VariantIdx> {
|
||||
self.with_elem(PathElem::EnumTag, move |this| {
|
||||
Ok(try_validation!(
|
||||
interp_ok(try_validation!(
|
||||
this.ecx.read_discriminant(val),
|
||||
this.path,
|
||||
Ub(InvalidTag(val)) => InvalidEnumTag {
|
||||
|
@ -1134,7 +1140,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
data_bytes.add_range(base_offset + offset, size);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -1144,7 +1150,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
val: &PlaceTy<'tcx, M::Provenance>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.check_safe_pointer(val, PointerKind::Box)?;
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -1157,7 +1163,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
// We assume that the Scalar validity range does not restrict these values
|
||||
// any further than `try_visit_primitive` does!
|
||||
if self.try_visit_primitive(val)? {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
// Special check preventing `UnsafeCell` in the inner part of constants
|
||||
|
@ -1204,7 +1210,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
// If the size is 0, there is nothing to check.
|
||||
// (`size` can only be 0 if `len` is 0, and empty arrays are always valid.)
|
||||
if size == Size::ZERO {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
// Now that we definitely have a non-ZST array, we know it lives in memory -- except it may
|
||||
// be an uninitialized local variable, those are also "immediate".
|
||||
|
@ -1224,36 +1230,33 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
// No need for an alignment check here, this is not an actual memory access.
|
||||
let alloc = self.ecx.get_ptr_alloc(mplace.ptr(), size)?.expect("we already excluded size 0");
|
||||
|
||||
match alloc.get_bytes_strip_provenance() {
|
||||
// In the happy case, we needn't check anything else.
|
||||
Ok(_) => {}
|
||||
alloc.get_bytes_strip_provenance().map_err(|err| {
|
||||
// Some error happened, try to provide a more detailed description.
|
||||
Err(err) => {
|
||||
// For some errors we might be able to provide extra information.
|
||||
// (This custom logic does not fit the `try_validation!` macro.)
|
||||
match err.kind() {
|
||||
Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
|
||||
// Some byte was uninitialized, determine which
|
||||
// element that byte belongs to so we can
|
||||
// provide an index.
|
||||
let i = usize::try_from(
|
||||
access.bad.start.bytes() / layout.size.bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
self.path.push(PathElem::ArrayElem(i));
|
||||
// For some errors we might be able to provide extra information.
|
||||
// (This custom logic does not fit the `try_validation!` macro.)
|
||||
let (kind, backtrace) = err.into_parts();
|
||||
match kind {
|
||||
Ub(InvalidUninitBytes(Some((_alloc_id, access)))) | Unsup(ReadPointerAsInt(Some((_alloc_id, access)))) => {
|
||||
// Some byte was uninitialized, determine which
|
||||
// element that byte belongs to so we can
|
||||
// provide an index.
|
||||
let i = usize::try_from(
|
||||
access.bad.start.bytes() / layout.size.bytes(),
|
||||
)
|
||||
.unwrap();
|
||||
self.path.push(PathElem::ArrayElem(i));
|
||||
|
||||
if matches!(err.kind(), Ub(InvalidUninitBytes(_))) {
|
||||
throw_validation_failure!(self.path, Uninit { expected })
|
||||
} else {
|
||||
throw_validation_failure!(self.path, PointerAsInt { expected })
|
||||
}
|
||||
if matches!(kind, Ub(InvalidUninitBytes(_))) {
|
||||
err_validation_failure!(self.path, Uninit { expected }).into()
|
||||
} else {
|
||||
err_validation_failure!(self.path, PointerAsInt { expected }).into()
|
||||
}
|
||||
|
||||
// Propagate upwards (that will also check for unexpected errors).
|
||||
_ => return Err(err),
|
||||
}
|
||||
|
||||
// Propagate upwards (that will also check for unexpected errors).
|
||||
_ => return InterpErrorInfo::from_parts(kind, backtrace),
|
||||
}
|
||||
}
|
||||
})?;
|
||||
|
||||
// Don't forget that these are all non-pointer types, and thus do not preserve
|
||||
// provenance.
|
||||
|
@ -1282,7 +1285,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
// It's not great to catch errors here, since we can't give a very good path,
|
||||
// but it's better than ICEing.
|
||||
Ub(InvalidVTableTrait { vtable_dyn_type, expected_dyn_type }) => {
|
||||
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type: *expected_dyn_type }
|
||||
InvalidMetaWrongTrait { vtable_dyn_type, expected_dyn_type }
|
||||
},
|
||||
);
|
||||
}
|
||||
|
@ -1331,7 +1334,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt,
|
|||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1347,7 +1350,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
trace!("validate_operand_internal: {:?}, {:?}", *val, val.layout.ty);
|
||||
|
||||
// Run the visitor.
|
||||
match self.run_for_validation(|ecx| {
|
||||
self.run_for_validation(|ecx| {
|
||||
let reset_padding = reset_provenance_and_padding && {
|
||||
// Check if `val` is actually stored in memory. If not, padding is not even
|
||||
// represented and we need not reset it.
|
||||
|
@ -1363,29 +1366,22 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
};
|
||||
v.visit_value(val)?;
|
||||
v.reset_padding(val)?;
|
||||
InterpResult::Ok(())
|
||||
}) {
|
||||
Ok(()) => Ok(()),
|
||||
// Pass through validation failures and "invalid program" issues.
|
||||
Err(err)
|
||||
if matches!(
|
||||
err.kind(),
|
||||
err_ub!(ValidationError { .. })
|
||||
| InterpError::InvalidProgram(_)
|
||||
| InterpError::Unsupported(UnsupportedOpInfo::ExternTypeField)
|
||||
) =>
|
||||
{
|
||||
Err(err)
|
||||
}
|
||||
// Complain about any other kind of error -- those are bad because we'd like to
|
||||
// report them in a way that shows *where* in the value the issue lies.
|
||||
Err(err) => {
|
||||
interp_ok(())
|
||||
})
|
||||
.map_err(|err| {
|
||||
if !matches!(
|
||||
err.kind(),
|
||||
err_ub!(ValidationError { .. })
|
||||
| InterpError::InvalidProgram(_)
|
||||
| InterpError::Unsupported(UnsupportedOpInfo::ExternTypeField)
|
||||
) {
|
||||
bug!(
|
||||
"Unexpected error during validation: {}",
|
||||
format_interp_error(self.tcx.dcx(), err)
|
||||
);
|
||||
}
|
||||
}
|
||||
err
|
||||
})
|
||||
}
|
||||
|
||||
/// This function checks the data at `op` to be const-valid.
|
||||
|
@ -1456,6 +1452,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
|
|||
/*reset_provenance_and_padding*/ false,
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,7 +10,7 @@ use rustc_middle::ty::{self, Ty};
|
|||
use rustc_target::abi::{FieldIdx, FieldsShape, VariantIdx, Variants};
|
||||
use tracing::trace;
|
||||
|
||||
use super::{InterpCx, MPlaceTy, Machine, Projectable, throw_inval};
|
||||
use super::{InterpCx, MPlaceTy, Machine, Projectable, interp_ok, throw_inval};
|
||||
|
||||
/// How to traverse a value and what to do when we are at the leaves.
|
||||
pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
|
||||
|
@ -46,14 +46,14 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
|
|||
/// Visits the given value as a union. No automatic recursion can happen here.
|
||||
#[inline(always)]
|
||||
fn visit_union(&mut self, _v: &Self::V, _fields: NonZero<usize>) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
/// Visits the given value as the pointer of a `Box`. There is nothing to recurse into.
|
||||
/// The type of `v` will be a raw pointer to `T`, but this is a field of `Box<T>` and the
|
||||
/// pointee type is the actual `T`. `box_ty` provides the full type of the `Box` itself.
|
||||
#[inline(always)]
|
||||
fn visit_box(&mut self, _box_ty: Ty<'tcx>, _v: &Self::V) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
/// Called each time we recurse down to a field of a "product-like" aggregate
|
||||
|
@ -165,7 +165,7 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
|
|||
self.visit_field(v, 1, &alloc)?;
|
||||
|
||||
// We visited all parts of this one.
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
// Non-normalized types should never show up here.
|
||||
|
@ -222,6 +222,6 @@ pub trait ValueVisitor<'tcx, M: Machine<'tcx>>: Sized {
|
|||
Variants::Single { .. } => {}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,7 +75,8 @@ fn check_validity_requirement_strict<'tcx>(
|
|||
/*recursive*/ false,
|
||||
/*reset_provenance_and_padding*/ false,
|
||||
)
|
||||
.is_ok())
|
||||
.discard_err()
|
||||
.is_some())
|
||||
}
|
||||
|
||||
/// Implements the 'lax' (default) version of the [`check_validity_requirement`] checks; see that
|
||||
|
|
|
@ -61,6 +61,8 @@
|
|||
#![feature(trait_upcasting)]
|
||||
#![feature(trusted_len)]
|
||||
#![feature(try_blocks)]
|
||||
#![feature(try_trait_v2)]
|
||||
#![feature(try_trait_v2_yeet)]
|
||||
#![feature(type_alias_impl_trait)]
|
||||
#![feature(yeet_expr)]
|
||||
#![warn(unreachable_pub)]
|
||||
|
|
|
@ -148,7 +148,7 @@ impl<'tcx> ConstValue<'tcx> {
|
|||
/* read_provenance */ true,
|
||||
)
|
||||
.ok()?;
|
||||
let ptr = ptr.to_pointer(&tcx).ok()?;
|
||||
let ptr = ptr.to_pointer(&tcx).discard_err()?;
|
||||
let len = a
|
||||
.read_scalar(
|
||||
&tcx,
|
||||
|
@ -156,7 +156,7 @@ impl<'tcx> ConstValue<'tcx> {
|
|||
/* read_provenance */ false,
|
||||
)
|
||||
.ok()?;
|
||||
let len = len.to_target_usize(&tcx).ok()?;
|
||||
let len = len.to_target_usize(&tcx).discard_err()?;
|
||||
if len == 0 {
|
||||
return Some(&[]);
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
|
|||
use super::{
|
||||
AllocId, BadBytesAccess, CtfeProvenance, InterpError, InterpResult, Pointer, PointerArithmetic,
|
||||
Provenance, ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo,
|
||||
UnsupportedOpInfo, read_target_uint, write_target_uint,
|
||||
UnsupportedOpInfo, interp_ok, read_target_uint, write_target_uint,
|
||||
};
|
||||
use crate::ty;
|
||||
|
||||
|
@ -318,8 +318,9 @@ impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
|
|||
pub fn try_uninit<'tcx>(size: Size, align: Align) -> InterpResult<'tcx, Self> {
|
||||
Self::uninit_inner(size, align, || {
|
||||
ty::tls::with(|tcx| tcx.dcx().delayed_bug("exhausted memory during interpretation"));
|
||||
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted).into()
|
||||
InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
|
||||
})
|
||||
.into()
|
||||
}
|
||||
|
||||
/// Try to create an Allocation of `size` bytes, panics if there is not enough memory
|
||||
|
@ -355,12 +356,12 @@ impl<Prov: Provenance, Bytes: AllocBytes> Allocation<Prov, (), Bytes> {
|
|||
impl Allocation {
|
||||
/// Adjust allocation from the ones in `tcx` to a custom Machine instance
|
||||
/// with a different `Provenance` and `Byte` type.
|
||||
pub fn adjust_from_tcx<Prov: Provenance, Bytes: AllocBytes, Err>(
|
||||
pub fn adjust_from_tcx<'tcx, Prov: Provenance, Bytes: AllocBytes>(
|
||||
&self,
|
||||
cx: &impl HasDataLayout,
|
||||
mut alloc_bytes: impl FnMut(&[u8], Align) -> Result<Bytes, Err>,
|
||||
mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> Result<Pointer<Prov>, Err>,
|
||||
) -> Result<Allocation<Prov, (), Bytes>, Err> {
|
||||
mut alloc_bytes: impl FnMut(&[u8], Align) -> InterpResult<'tcx, Bytes>,
|
||||
mut adjust_ptr: impl FnMut(Pointer<CtfeProvenance>) -> InterpResult<'tcx, Pointer<Prov>>,
|
||||
) -> InterpResult<'tcx, Allocation<Prov, (), Bytes>> {
|
||||
// Copy the data.
|
||||
let mut bytes = alloc_bytes(&*self.bytes, self.align)?;
|
||||
// Adjust provenance of pointers stored in this allocation.
|
||||
|
@ -377,7 +378,7 @@ impl Allocation {
|
|||
new_provenance.push((offset, ptr_prov));
|
||||
}
|
||||
// Create allocation.
|
||||
Ok(Allocation {
|
||||
interp_ok(Allocation {
|
||||
bytes,
|
||||
provenance: ProvenanceMap::from_presorted_ptrs(new_provenance),
|
||||
init_mask: self.init_mask.clone(),
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::any::Any;
|
||||
use std::backtrace::Backtrace;
|
||||
use std::borrow::Cow;
|
||||
use std::fmt;
|
||||
use std::{convert, fmt, mem, ops};
|
||||
|
||||
use either::Either;
|
||||
use rustc_ast_ir::Mutability;
|
||||
|
@ -104,6 +104,10 @@ rustc_data_structures::static_assert_size!(InterpErrorInfo<'_>, 8);
|
|||
/// These should always be constructed by calling `.into()` on
|
||||
/// an `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
|
||||
/// macros for this.
|
||||
///
|
||||
/// Interpreter errors must *not* be silently discarded (that will lead to a panic). Instead,
|
||||
/// explicitly call `discard_err` if this is really the right thing to do. Note that if
|
||||
/// this happens during const-eval or in Miri, it could lead to a UB error being lost!
|
||||
#[derive(Debug)]
|
||||
pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
|
||||
|
||||
|
@ -156,8 +160,11 @@ impl<'tcx> InterpErrorInfo<'tcx> {
|
|||
}
|
||||
|
||||
pub fn into_kind(self) -> InterpError<'tcx> {
|
||||
let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
|
||||
kind
|
||||
self.0.kind
|
||||
}
|
||||
|
||||
pub fn from_parts(kind: InterpError<'tcx>, backtrace: InterpErrorBacktrace) -> Self {
|
||||
Self(Box::new(InterpErrorInfoInner { kind, backtrace }))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -599,8 +606,6 @@ pub enum InterpError<'tcx> {
|
|||
MachineStop(Box<dyn MachineStopType>),
|
||||
}
|
||||
|
||||
pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
|
||||
|
||||
impl InterpError<'_> {
|
||||
/// Some errors do string formatting even if the error is never printed.
|
||||
/// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
|
||||
|
@ -728,3 +733,182 @@ macro_rules! throw_exhaust {
|
|||
macro_rules! throw_machine_stop {
|
||||
($($tt:tt)*) => { do yeet $crate::err_machine_stop!($($tt)*) };
|
||||
}
|
||||
|
||||
/// Guard type that panics on drop.
|
||||
#[derive(Debug)]
|
||||
struct Guard;
|
||||
|
||||
impl Drop for Guard {
|
||||
fn drop(&mut self) {
|
||||
// We silence the guard if we are already panicking, to avoid double-panics.
|
||||
if !std::thread::panicking() {
|
||||
panic!(
|
||||
"an interpreter error got improperly discarded; use `discard_err()` if this is intentional"
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// The result type used by the interpreter. This is a newtype around `Result`
|
||||
/// to block access to operations like `ok()` that discard UB errors.
|
||||
///
|
||||
/// We also make things panic if this type is ever implicitly dropped.
|
||||
#[derive(Debug)]
|
||||
pub struct InterpResult_<'tcx, T> {
|
||||
res: Result<T, InterpErrorInfo<'tcx>>,
|
||||
guard: Guard,
|
||||
}
|
||||
|
||||
// Type alias to be able to set a default type argument.
|
||||
pub type InterpResult<'tcx, T = ()> = InterpResult_<'tcx, T>;
|
||||
|
||||
impl<'tcx, T> ops::Try for InterpResult_<'tcx, T> {
|
||||
type Output = T;
|
||||
type Residual = InterpResult_<'tcx, convert::Infallible>;
|
||||
|
||||
#[inline]
|
||||
fn from_output(output: Self::Output) -> Self {
|
||||
InterpResult_::new(Ok(output))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn branch(self) -> ops::ControlFlow<Self::Residual, Self::Output> {
|
||||
match self.disarm() {
|
||||
Ok(v) => ops::ControlFlow::Continue(v),
|
||||
Err(e) => ops::ControlFlow::Break(InterpResult_::new(Err(e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, T> ops::FromResidual for InterpResult_<'tcx, T> {
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
fn from_residual(residual: InterpResult_<'tcx, convert::Infallible>) -> Self {
|
||||
match residual.disarm() {
|
||||
Err(e) => Self::new(Err(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Allow `yeet`ing `InterpError` in functions returning `InterpResult_`.
|
||||
impl<'tcx, T> ops::FromResidual<ops::Yeet<InterpError<'tcx>>> for InterpResult_<'tcx, T> {
|
||||
#[inline]
|
||||
fn from_residual(ops::Yeet(e): ops::Yeet<InterpError<'tcx>>) -> Self {
|
||||
Self::new(Err(e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
// Allow `?` on `Result<_, InterpError>` in functions returning `InterpResult_`.
|
||||
// This is useful e.g. for `option.ok_or_else(|| err_ub!(...))`.
|
||||
impl<'tcx, T, E: Into<InterpErrorInfo<'tcx>>> ops::FromResidual<Result<convert::Infallible, E>>
|
||||
for InterpResult_<'tcx, T>
|
||||
{
|
||||
#[inline]
|
||||
fn from_residual(residual: Result<convert::Infallible, E>) -> Self {
|
||||
match residual {
|
||||
Err(e) => Self::new(Err(e.into())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, T, E: Into<InterpErrorInfo<'tcx>>> From<Result<T, E>> for InterpResult<'tcx, T> {
|
||||
#[inline]
|
||||
fn from(value: Result<T, E>) -> Self {
|
||||
Self::new(value.map_err(|e| e.into()))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, T, V: FromIterator<T>> FromIterator<InterpResult<'tcx, T>> for InterpResult<'tcx, V> {
|
||||
fn from_iter<I: IntoIterator<Item = InterpResult<'tcx, T>>>(iter: I) -> Self {
|
||||
Self::new(iter.into_iter().map(|x| x.disarm()).collect())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, T> InterpResult_<'tcx, T> {
|
||||
#[inline(always)]
|
||||
fn new(res: Result<T, InterpErrorInfo<'tcx>>) -> Self {
|
||||
Self { res, guard: Guard }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn disarm(self) -> Result<T, InterpErrorInfo<'tcx>> {
|
||||
mem::forget(self.guard);
|
||||
self.res
|
||||
}
|
||||
|
||||
/// Discard the error information in this result. Only use this if ignoring Undefined Behavior is okay!
|
||||
#[inline]
|
||||
pub fn discard_err(self) -> Option<T> {
|
||||
self.disarm().ok()
|
||||
}
|
||||
|
||||
/// Look at the `Result` wrapped inside of this.
|
||||
/// Must only be used to report the error!
|
||||
#[inline]
|
||||
pub fn report_err(self) -> Result<T, InterpErrorInfo<'tcx>> {
|
||||
self.disarm()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn map<U>(self, f: impl FnOnce(T) -> U) -> InterpResult<'tcx, U> {
|
||||
InterpResult_::new(self.disarm().map(f))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn map_err(
|
||||
self,
|
||||
f: impl FnOnce(InterpErrorInfo<'tcx>) -> InterpErrorInfo<'tcx>,
|
||||
) -> InterpResult<'tcx, T> {
|
||||
InterpResult_::new(self.disarm().map_err(f))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn inspect_err(self, f: impl FnOnce(&InterpErrorInfo<'tcx>)) -> InterpResult<'tcx, T> {
|
||||
InterpResult_::new(self.disarm().inspect_err(f))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
pub fn unwrap(self) -> T {
|
||||
self.disarm().unwrap()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
pub fn unwrap_or_else(self, f: impl FnOnce(InterpErrorInfo<'tcx>) -> T) -> T {
|
||||
self.disarm().unwrap_or_else(f)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[track_caller]
|
||||
pub fn expect(self, msg: &str) -> T {
|
||||
self.disarm().expect(msg)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn and_then<U>(self, f: impl FnOnce(T) -> InterpResult<'tcx, U>) -> InterpResult<'tcx, U> {
|
||||
InterpResult_::new(self.disarm().and_then(|t| f(t).disarm()))
|
||||
}
|
||||
|
||||
/// Returns success if both `self` and `other` succeed, while ensuring we don't
|
||||
/// accidentally drop an error.
|
||||
///
|
||||
/// If both are an error, `self` will be reported.
|
||||
#[inline]
|
||||
pub fn and<U>(self, other: InterpResult<'tcx, U>) -> InterpResult<'tcx, (T, U)> {
|
||||
match self.disarm() {
|
||||
Ok(t) => interp_ok((t, other?)),
|
||||
Err(e) => {
|
||||
// Discard the other error.
|
||||
drop(other.disarm());
|
||||
// Return `self`.
|
||||
InterpResult_::new(Err(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn interp_ok<'tcx, T>(x: T) -> InterpResult<'tcx, T> {
|
||||
InterpResult_::new(Ok(x))
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ pub use self::error::{
|
|||
InterpError, InterpErrorInfo, InterpResult, InvalidMetaKind, InvalidProgramInfo,
|
||||
MachineStopType, Misalignment, PointerKind, ReportedErrorInfo, ResourceExhaustionInfo,
|
||||
ScalarSizeMismatch, UndefinedBehaviorInfo, UnsupportedOpInfo, ValidationErrorInfo,
|
||||
ValidationErrorKind,
|
||||
ValidationErrorKind, interp_ok,
|
||||
};
|
||||
pub use self::pointer::{CtfeProvenance, Pointer, PointerArithmetic, Provenance};
|
||||
pub use self::value::Scalar;
|
||||
|
|
|
@ -8,7 +8,7 @@ use rustc_target::abi::{HasDataLayout, Size};
|
|||
|
||||
use super::{
|
||||
AllocId, CtfeProvenance, InterpResult, Pointer, PointerArithmetic, Provenance,
|
||||
ScalarSizeMismatch,
|
||||
ScalarSizeMismatch, interp_ok,
|
||||
};
|
||||
use crate::ty::ScalarInt;
|
||||
|
||||
|
@ -273,10 +273,10 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
.to_bits_or_ptr_internal(cx.pointer_size())
|
||||
.map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
|
||||
{
|
||||
Right(ptr) => Ok(ptr.into()),
|
||||
Right(ptr) => interp_ok(ptr.into()),
|
||||
Left(bits) => {
|
||||
let addr = u64::try_from(bits).unwrap();
|
||||
Ok(Pointer::from_addr_invalid(addr))
|
||||
interp_ok(Pointer::from_addr_invalid(addr))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -311,12 +311,12 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
if matches!(self, Scalar::Ptr(..)) {
|
||||
*self = self.to_scalar_int()?.into();
|
||||
}
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn to_scalar_int(self) -> InterpResult<'tcx, ScalarInt> {
|
||||
self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None)).into())
|
||||
self.try_to_scalar_int().map_err(|_| err_unsup!(ReadPointerAsInt(None))).into()
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
|
@ -330,20 +330,22 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
#[inline]
|
||||
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
|
||||
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
|
||||
self.to_scalar_int()?.try_to_bits(target_size).map_err(|size| {
|
||||
err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: size.bytes(),
|
||||
}))
|
||||
self.to_scalar_int()?
|
||||
.try_to_bits(target_size)
|
||||
.map_err(|size| {
|
||||
err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
|
||||
target_size: target_size.bytes(),
|
||||
data_size: size.bytes(),
|
||||
}))
|
||||
})
|
||||
.into()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
|
||||
let val = self.to_u8()?;
|
||||
match val {
|
||||
0 => Ok(false),
|
||||
1 => Ok(true),
|
||||
0 => interp_ok(false),
|
||||
1 => interp_ok(true),
|
||||
_ => throw_ub!(InvalidBool(val)),
|
||||
}
|
||||
}
|
||||
|
@ -351,7 +353,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
pub fn to_char(self) -> InterpResult<'tcx, char> {
|
||||
let val = self.to_u32()?;
|
||||
match std::char::from_u32(val) {
|
||||
Some(c) => Ok(c),
|
||||
Some(c) => interp_ok(c),
|
||||
None => throw_ub!(InvalidChar(val)),
|
||||
}
|
||||
}
|
||||
|
@ -392,7 +394,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
/// Fails if the scalar is a pointer.
|
||||
pub fn to_target_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
|
||||
let b = self.to_uint(cx.data_layout().pointer_size)?;
|
||||
Ok(u64::try_from(b).unwrap())
|
||||
interp_ok(u64::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce a signed integer of the given size.
|
||||
|
@ -400,7 +402,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
#[inline]
|
||||
pub fn to_int(self, size: Size) -> InterpResult<'tcx, i128> {
|
||||
let b = self.to_bits(size)?;
|
||||
Ok(size.sign_extend(b))
|
||||
interp_ok(size.sign_extend(b))
|
||||
}
|
||||
|
||||
/// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
|
||||
|
@ -432,13 +434,13 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
|
|||
/// Fails if the scalar is a pointer.
|
||||
pub fn to_target_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
|
||||
let b = self.to_int(cx.data_layout().pointer_size)?;
|
||||
Ok(i64::try_from(b).unwrap())
|
||||
interp_ok(i64::try_from(b).unwrap())
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn to_float<F: Float>(self) -> InterpResult<'tcx, F> {
|
||||
// Going through `to_bits` to check size and truncation.
|
||||
Ok(F::from_bits(self.to_bits(Size::from_bits(F::BITS))?))
|
||||
interp_ok(F::from_bits(self.to_bits(Size::from_bits(F::BITS))?))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -519,7 +519,7 @@ impl<'tcx> Const<'tcx> {
|
|||
}
|
||||
|
||||
pub fn try_to_bool(self) -> Option<bool> {
|
||||
self.try_to_scalar()?.to_bool().ok()
|
||||
self.try_to_valtree()?.try_to_scalar_int()?.try_to_bool().ok()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
|
|
@ -3,7 +3,9 @@
|
|||
//! Currently, this pass only propagates scalar values.
|
||||
|
||||
use rustc_const_eval::const_eval::{DummyMachine, throw_machine_stop_str};
|
||||
use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable};
|
||||
use rustc_const_eval::interpret::{
|
||||
ImmTy, Immediate, InterpCx, OpTy, PlaceTy, Projectable, interp_ok,
|
||||
};
|
||||
use rustc_data_structures::fx::FxHashMap;
|
||||
use rustc_hir::def::DefKind;
|
||||
use rustc_middle::bug;
|
||||
|
@ -236,6 +238,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
|
|||
FlatSet::Elem(op) => self
|
||||
.ecx
|
||||
.int_to_int_or_float(&op, layout)
|
||||
.discard_err()
|
||||
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
|
||||
FlatSet::Bottom => FlatSet::Bottom,
|
||||
FlatSet::Top => FlatSet::Top,
|
||||
|
@ -249,6 +252,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
|
|||
FlatSet::Elem(op) => self
|
||||
.ecx
|
||||
.float_to_float_or_int(&op, layout)
|
||||
.discard_err()
|
||||
.map_or(FlatSet::Top, |result| self.wrap_immediate(*result)),
|
||||
FlatSet::Bottom => FlatSet::Bottom,
|
||||
FlatSet::Top => FlatSet::Top,
|
||||
|
@ -271,6 +275,7 @@ impl<'tcx> ValueAnalysis<'tcx> for ConstAnalysis<'_, 'tcx> {
|
|||
FlatSet::Elem(value) => self
|
||||
.ecx
|
||||
.unary_op(*op, &value)
|
||||
.discard_err()
|
||||
.map_or(FlatSet::Top, |val| self.wrap_immediate(*val)),
|
||||
FlatSet::Bottom => FlatSet::Bottom,
|
||||
FlatSet::Top => FlatSet::Top,
|
||||
|
@ -364,8 +369,8 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
}
|
||||
}
|
||||
Operand::Constant(box constant) => {
|
||||
if let Ok(constant) =
|
||||
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
|
||||
if let Some(constant) =
|
||||
self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err()
|
||||
{
|
||||
self.assign_constant(state, place, constant, &[]);
|
||||
}
|
||||
|
@ -387,7 +392,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
for &(mut proj_elem) in projection {
|
||||
if let PlaceElem::Index(index) = proj_elem {
|
||||
if let FlatSet::Elem(index) = state.get(index.into(), &self.map)
|
||||
&& let Ok(offset) = index.to_target_usize(&self.tcx)
|
||||
&& let Some(offset) = index.to_target_usize(&self.tcx).discard_err()
|
||||
&& let Some(min_length) = offset.checked_add(1)
|
||||
{
|
||||
proj_elem = PlaceElem::ConstantIndex { offset, min_length, from_end: false };
|
||||
|
@ -395,7 +400,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
return;
|
||||
}
|
||||
}
|
||||
operand = if let Ok(operand) = self.ecx.project(&operand, proj_elem) {
|
||||
operand = if let Some(operand) = self.ecx.project(&operand, proj_elem).discard_err() {
|
||||
operand
|
||||
} else {
|
||||
return;
|
||||
|
@ -406,24 +411,24 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
place,
|
||||
operand,
|
||||
&mut |elem, op| match elem {
|
||||
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
|
||||
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
|
||||
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(),
|
||||
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(),
|
||||
TrackElem::Discriminant => {
|
||||
let variant = self.ecx.read_discriminant(op).ok()?;
|
||||
let variant = self.ecx.read_discriminant(op).discard_err()?;
|
||||
let discr_value =
|
||||
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
|
||||
self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?;
|
||||
Some(discr_value.into())
|
||||
}
|
||||
TrackElem::DerefLen => {
|
||||
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
|
||||
let len_usize = op.len(&self.ecx).ok()?;
|
||||
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into();
|
||||
let len_usize = op.len(&self.ecx).discard_err()?;
|
||||
let layout =
|
||||
self.tcx.layout_of(self.param_env.and(self.tcx.types.usize)).unwrap();
|
||||
Some(ImmTy::from_uint(len_usize, layout).into())
|
||||
}
|
||||
},
|
||||
&mut |place, op| {
|
||||
if let Ok(imm) = self.ecx.read_immediate_raw(op)
|
||||
if let Some(imm) = self.ecx.read_immediate_raw(op).discard_err()
|
||||
&& let Some(imm) = imm.right()
|
||||
{
|
||||
let elem = self.wrap_immediate(*imm);
|
||||
|
@ -447,11 +452,11 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
(FlatSet::Bottom, _) | (_, FlatSet::Bottom) => (FlatSet::Bottom, FlatSet::Bottom),
|
||||
// Both sides are known, do the actual computation.
|
||||
(FlatSet::Elem(left), FlatSet::Elem(right)) => {
|
||||
match self.ecx.binary_op(op, &left, &right) {
|
||||
match self.ecx.binary_op(op, &left, &right).discard_err() {
|
||||
// Ideally this would return an Immediate, since it's sometimes
|
||||
// a pair and sometimes not. But as a hack we always return a pair
|
||||
// and just make the 2nd component `Bottom` when it does not exist.
|
||||
Ok(val) => {
|
||||
Some(val) => {
|
||||
if matches!(val.layout.abi, Abi::ScalarPair(..)) {
|
||||
let (val, overflow) = val.to_scalar_pair();
|
||||
(FlatSet::Elem(val), FlatSet::Elem(overflow))
|
||||
|
@ -470,7 +475,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
}
|
||||
|
||||
let arg_scalar = const_arg.to_scalar();
|
||||
let Ok(arg_value) = arg_scalar.to_bits(layout.size) else {
|
||||
let Some(arg_value) = arg_scalar.to_bits(layout.size).discard_err() else {
|
||||
return (FlatSet::Top, FlatSet::Top);
|
||||
};
|
||||
|
||||
|
@ -519,7 +524,7 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> {
|
|||
}
|
||||
let enum_ty_layout = self.tcx.layout_of(self.param_env.and(enum_ty)).ok()?;
|
||||
let discr_value =
|
||||
self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).ok()?;
|
||||
self.ecx.discriminant_for_variant(enum_ty_layout.ty, variant_index).discard_err()?;
|
||||
Some(discr_value.to_scalar())
|
||||
}
|
||||
|
||||
|
@ -595,7 +600,7 @@ impl<'a, 'tcx> Collector<'a, 'tcx> {
|
|||
.intern_with_temp_alloc(layout, |ecx, dest| {
|
||||
try_write_constant(ecx, dest, place, ty, state, map)
|
||||
})
|
||||
.ok()?;
|
||||
.discard_err()?;
|
||||
return Some(Const::Val(ConstValue::Indirect { alloc_id, offset: Size::ZERO }, ty));
|
||||
}
|
||||
|
||||
|
@ -632,7 +637,7 @@ fn try_write_constant<'tcx>(
|
|||
|
||||
// Fast path for ZSTs.
|
||||
if layout.is_zst() {
|
||||
return Ok(());
|
||||
return interp_ok(());
|
||||
}
|
||||
|
||||
// Fast path for scalars.
|
||||
|
@ -717,7 +722,7 @@ fn try_write_constant<'tcx>(
|
|||
ty::Error(_) | ty::Infer(..) | ty::CoroutineWitness(..) => bug!(),
|
||||
}
|
||||
|
||||
Ok(())
|
||||
interp_ok(())
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx>
|
||||
|
@ -830,7 +835,7 @@ impl<'tcx> MutVisitor<'tcx> for Patch<'tcx> {
|
|||
if let PlaceElem::Index(local) = elem {
|
||||
let offset = self.before_effect.get(&(location, local.into()))?;
|
||||
let offset = offset.try_to_scalar()?;
|
||||
let offset = offset.to_target_usize(&self.tcx).ok()?;
|
||||
let offset = offset.to_target_usize(&self.tcx).discard_err()?;
|
||||
let min_length = offset.checked_add(1)?;
|
||||
Some(PlaceElem::ConstantIndex { offset, min_length, from_end: false })
|
||||
} else {
|
||||
|
|
|
@ -393,7 +393,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
Repeat(..) => return None,
|
||||
|
||||
Constant { ref value, disambiguator: _ } => {
|
||||
self.ecx.eval_mir_constant(value, DUMMY_SP, None).ok()?
|
||||
self.ecx.eval_mir_constant(value, DUMMY_SP, None).discard_err()?
|
||||
}
|
||||
Aggregate(kind, variant, ref fields) => {
|
||||
let fields = fields
|
||||
|
@ -419,29 +419,32 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
ImmTy::uninit(ty).into()
|
||||
} else if matches!(kind, AggregateTy::RawPtr { .. }) {
|
||||
// Pointers don't have fields, so don't `project_field` them.
|
||||
let data = self.ecx.read_pointer(fields[0]).ok()?;
|
||||
let data = self.ecx.read_pointer(fields[0]).discard_err()?;
|
||||
let meta = if fields[1].layout.is_zst() {
|
||||
MemPlaceMeta::None
|
||||
} else {
|
||||
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).ok()?)
|
||||
MemPlaceMeta::Meta(self.ecx.read_scalar(fields[1]).discard_err()?)
|
||||
};
|
||||
let ptr_imm = Immediate::new_pointer_with_meta(data, meta, &self.ecx);
|
||||
ImmTy::from_immediate(ptr_imm, ty).into()
|
||||
} else if matches!(ty.abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
|
||||
let dest = self.ecx.allocate(ty, MemoryKind::Stack).ok()?;
|
||||
let dest = self.ecx.allocate(ty, MemoryKind::Stack).discard_err()?;
|
||||
let variant_dest = if let Some(variant) = variant {
|
||||
self.ecx.project_downcast(&dest, variant).ok()?
|
||||
self.ecx.project_downcast(&dest, variant).discard_err()?
|
||||
} else {
|
||||
dest.clone()
|
||||
};
|
||||
for (field_index, op) in fields.into_iter().enumerate() {
|
||||
let field_dest = self.ecx.project_field(&variant_dest, field_index).ok()?;
|
||||
self.ecx.copy_op(op, &field_dest).ok()?;
|
||||
let field_dest =
|
||||
self.ecx.project_field(&variant_dest, field_index).discard_err()?;
|
||||
self.ecx.copy_op(op, &field_dest).discard_err()?;
|
||||
}
|
||||
self.ecx.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest).ok()?;
|
||||
self.ecx
|
||||
.write_discriminant(variant.unwrap_or(FIRST_VARIANT), &dest)
|
||||
.discard_err()?;
|
||||
self.ecx
|
||||
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
|
||||
.ok()?;
|
||||
.discard_err()?;
|
||||
dest.into()
|
||||
} else {
|
||||
return None;
|
||||
|
@ -467,7 +470,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
// This should have been replaced by a `ConstantIndex` earlier.
|
||||
ProjectionElem::Index(_) => return None,
|
||||
};
|
||||
self.ecx.project(value, elem).ok()?
|
||||
self.ecx.project(value, elem).discard_err()?
|
||||
}
|
||||
Address { place, kind, provenance: _ } => {
|
||||
if !place.is_indirect_first_projection() {
|
||||
|
@ -475,14 +478,14 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
}
|
||||
let local = self.locals[place.local]?;
|
||||
let pointer = self.evaluated[local].as_ref()?;
|
||||
let mut mplace = self.ecx.deref_pointer(pointer).ok()?;
|
||||
let mut mplace = self.ecx.deref_pointer(pointer).discard_err()?;
|
||||
for proj in place.projection.iter().skip(1) {
|
||||
// We have no call stack to associate a local with a value, so we cannot
|
||||
// interpret indexing.
|
||||
if matches!(proj, ProjectionElem::Index(_)) {
|
||||
return None;
|
||||
}
|
||||
mplace = self.ecx.project(&mplace, proj).ok()?;
|
||||
mplace = self.ecx.project(&mplace, proj).discard_err()?;
|
||||
}
|
||||
let pointer = mplace.to_ref(&self.ecx);
|
||||
let ty = match kind {
|
||||
|
@ -500,15 +503,15 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
|
||||
Discriminant(base) => {
|
||||
let base = self.evaluated[base].as_ref()?;
|
||||
let variant = self.ecx.read_discriminant(base).ok()?;
|
||||
let variant = self.ecx.read_discriminant(base).discard_err()?;
|
||||
let discr_value =
|
||||
self.ecx.discriminant_for_variant(base.layout.ty, variant).ok()?;
|
||||
self.ecx.discriminant_for_variant(base.layout.ty, variant).discard_err()?;
|
||||
discr_value.into()
|
||||
}
|
||||
Len(slice) => {
|
||||
let slice = self.evaluated[slice].as_ref()?;
|
||||
let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
||||
let len = slice.len(&self.ecx).ok()?;
|
||||
let len = slice.len(&self.ecx).discard_err()?;
|
||||
let imm = ImmTy::from_uint(len, usize_layout);
|
||||
imm.into()
|
||||
}
|
||||
|
@ -535,31 +538,31 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
}
|
||||
UnaryOp(un_op, operand) => {
|
||||
let operand = self.evaluated[operand].as_ref()?;
|
||||
let operand = self.ecx.read_immediate(operand).ok()?;
|
||||
let val = self.ecx.unary_op(un_op, &operand).ok()?;
|
||||
let operand = self.ecx.read_immediate(operand).discard_err()?;
|
||||
let val = self.ecx.unary_op(un_op, &operand).discard_err()?;
|
||||
val.into()
|
||||
}
|
||||
BinaryOp(bin_op, lhs, rhs) => {
|
||||
let lhs = self.evaluated[lhs].as_ref()?;
|
||||
let lhs = self.ecx.read_immediate(lhs).ok()?;
|
||||
let lhs = self.ecx.read_immediate(lhs).discard_err()?;
|
||||
let rhs = self.evaluated[rhs].as_ref()?;
|
||||
let rhs = self.ecx.read_immediate(rhs).ok()?;
|
||||
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).ok()?;
|
||||
let rhs = self.ecx.read_immediate(rhs).discard_err()?;
|
||||
let val = self.ecx.binary_op(bin_op, &lhs, &rhs).discard_err()?;
|
||||
val.into()
|
||||
}
|
||||
Cast { kind, value, from: _, to } => match kind {
|
||||
CastKind::IntToInt | CastKind::IntToFloat => {
|
||||
let value = self.evaluated[value].as_ref()?;
|
||||
let value = self.ecx.read_immediate(value).ok()?;
|
||||
let value = self.ecx.read_immediate(value).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
|
||||
let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?;
|
||||
res.into()
|
||||
}
|
||||
CastKind::FloatToFloat | CastKind::FloatToInt => {
|
||||
let value = self.evaluated[value].as_ref()?;
|
||||
let value = self.ecx.read_immediate(value).ok()?;
|
||||
let value = self.ecx.read_immediate(value).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
|
||||
let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?;
|
||||
res.into()
|
||||
}
|
||||
CastKind::Transmute => {
|
||||
|
@ -574,28 +577,28 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
_ => return None,
|
||||
}
|
||||
}
|
||||
value.offset(Size::ZERO, to, &self.ecx).ok()?
|
||||
value.offset(Size::ZERO, to, &self.ecx).discard_err()?
|
||||
}
|
||||
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) => {
|
||||
let src = self.evaluated[value].as_ref()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let dest = self.ecx.allocate(to, MemoryKind::Stack).ok()?;
|
||||
self.ecx.unsize_into(src, to, &dest.clone().into()).ok()?;
|
||||
let dest = self.ecx.allocate(to, MemoryKind::Stack).discard_err()?;
|
||||
self.ecx.unsize_into(src, to, &dest.clone().into()).discard_err()?;
|
||||
self.ecx
|
||||
.alloc_mark_immutable(dest.ptr().provenance.unwrap().alloc_id())
|
||||
.ok()?;
|
||||
.discard_err()?;
|
||||
dest.into()
|
||||
}
|
||||
CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
|
||||
let src = self.evaluated[value].as_ref()?;
|
||||
let src = self.ecx.read_immediate(src).ok()?;
|
||||
let src = self.ecx.read_immediate(src).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let ret = self.ecx.ptr_to_ptr(&src, to).ok()?;
|
||||
let ret = self.ecx.ptr_to_ptr(&src, to).discard_err()?;
|
||||
ret.into()
|
||||
}
|
||||
CastKind::PointerCoercion(ty::adjustment::PointerCoercion::UnsafeFnPointer, _) => {
|
||||
let src = self.evaluated[value].as_ref()?;
|
||||
let src = self.ecx.read_immediate(src).ok()?;
|
||||
let src = self.ecx.read_immediate(src).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
ImmTy::from_immediate(*src, to).into()
|
||||
}
|
||||
|
@ -708,7 +711,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
&& let Some(idx) = self.locals[idx_local]
|
||||
{
|
||||
if let Some(offset) = self.evaluated[idx].as_ref()
|
||||
&& let Ok(offset) = self.ecx.read_target_usize(offset)
|
||||
&& let Some(offset) = self.ecx.read_target_usize(offset).discard_err()
|
||||
&& let Some(min_length) = offset.checked_add(1)
|
||||
{
|
||||
projection.to_mut()[i] =
|
||||
|
@ -868,7 +871,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
&& let DefKind::Enum = self.tcx.def_kind(enum_did)
|
||||
{
|
||||
let enum_ty = self.tcx.type_of(enum_did).instantiate(self.tcx, enum_args);
|
||||
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).ok()?;
|
||||
let discr = self.ecx.discriminant_for_variant(enum_ty, variant).discard_err()?;
|
||||
return Some(self.insert_scalar(discr.to_scalar(), discr.layout.ty));
|
||||
}
|
||||
|
||||
|
@ -1223,8 +1226,8 @@ impl<'body, 'tcx> VnState<'body, 'tcx> {
|
|||
let as_bits = |value| {
|
||||
let constant = self.evaluated[value].as_ref()?;
|
||||
if layout.abi.is_scalar() {
|
||||
let scalar = self.ecx.read_scalar(constant).ok()?;
|
||||
scalar.to_bits(constant.layout.size).ok()
|
||||
let scalar = self.ecx.read_scalar(constant).discard_err()?;
|
||||
scalar.to_bits(constant.layout.size).discard_err()
|
||||
} else {
|
||||
// `constant` is a wide pointer. Do not evaluate to bits.
|
||||
None
|
||||
|
@ -1484,7 +1487,7 @@ fn op_to_prop_const<'tcx>(
|
|||
|
||||
// If this constant has scalar ABI, return it as a `ConstValue::Scalar`.
|
||||
if let Abi::Scalar(abi::Scalar::Initialized { .. }) = op.layout.abi
|
||||
&& let Ok(scalar) = ecx.read_scalar(op)
|
||||
&& let Some(scalar) = ecx.read_scalar(op).discard_err()
|
||||
{
|
||||
if !scalar.try_to_scalar_int().is_ok() {
|
||||
// Check that we do not leak a pointer.
|
||||
|
@ -1498,12 +1501,12 @@ fn op_to_prop_const<'tcx>(
|
|||
// If this constant is already represented as an `Allocation`,
|
||||
// try putting it into global memory to return it.
|
||||
if let Either::Left(mplace) = op.as_mplace_or_imm() {
|
||||
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).ok()??;
|
||||
let (size, _align) = ecx.size_and_align_of_mplace(&mplace).discard_err()??;
|
||||
|
||||
// Do not try interning a value that contains provenance.
|
||||
// Due to https://github.com/rust-lang/rust/issues/79738, doing so could lead to bugs.
|
||||
// FIXME: remove this hack once that issue is fixed.
|
||||
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).ok()??;
|
||||
let alloc_ref = ecx.get_ptr_alloc(mplace.ptr(), size).discard_err()??;
|
||||
if alloc_ref.has_provenance() {
|
||||
return None;
|
||||
}
|
||||
|
@ -1511,7 +1514,7 @@ fn op_to_prop_const<'tcx>(
|
|||
let pointer = mplace.ptr().into_pointer_or_addr().ok()?;
|
||||
let (prov, offset) = pointer.into_parts();
|
||||
let alloc_id = prov.alloc_id();
|
||||
intern_const_alloc_for_constprop(ecx, alloc_id).ok()?;
|
||||
intern_const_alloc_for_constprop(ecx, alloc_id).discard_err()?;
|
||||
|
||||
// `alloc_id` may point to a static. Codegen will choke on an `Indirect` with anything
|
||||
// by `GlobalAlloc::Memory`, so do fall through to copying if needed.
|
||||
|
@ -1526,7 +1529,8 @@ fn op_to_prop_const<'tcx>(
|
|||
}
|
||||
|
||||
// Everything failed: create a new allocation to hold the data.
|
||||
let alloc_id = ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).ok()?;
|
||||
let alloc_id =
|
||||
ecx.intern_with_temp_alloc(op.layout, |ecx, dest| ecx.copy_op(op, dest)).discard_err()?;
|
||||
let value = ConstValue::Indirect { alloc_id, offset: Size::ZERO };
|
||||
|
||||
// Check that we do not leak a pointer.
|
||||
|
|
|
@ -200,7 +200,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
debug!(?discr, ?bb);
|
||||
|
||||
let discr_ty = discr.ty(self.body, self.tcx).ty;
|
||||
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
|
||||
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let Some(discr) = self.map.find(discr.as_ref()) else { return };
|
||||
debug!(?discr);
|
||||
|
@ -388,24 +390,24 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
lhs,
|
||||
constant,
|
||||
&mut |elem, op| match elem {
|
||||
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).ok(),
|
||||
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).ok(),
|
||||
TrackElem::Field(idx) => self.ecx.project_field(op, idx.as_usize()).discard_err(),
|
||||
TrackElem::Variant(idx) => self.ecx.project_downcast(op, idx).discard_err(),
|
||||
TrackElem::Discriminant => {
|
||||
let variant = self.ecx.read_discriminant(op).ok()?;
|
||||
let variant = self.ecx.read_discriminant(op).discard_err()?;
|
||||
let discr_value =
|
||||
self.ecx.discriminant_for_variant(op.layout.ty, variant).ok()?;
|
||||
self.ecx.discriminant_for_variant(op.layout.ty, variant).discard_err()?;
|
||||
Some(discr_value.into())
|
||||
}
|
||||
TrackElem::DerefLen => {
|
||||
let op: OpTy<'_> = self.ecx.deref_pointer(op).ok()?.into();
|
||||
let len_usize = op.len(&self.ecx).ok()?;
|
||||
let op: OpTy<'_> = self.ecx.deref_pointer(op).discard_err()?.into();
|
||||
let len_usize = op.len(&self.ecx).discard_err()?;
|
||||
let layout = self.ecx.layout_of(self.tcx.types.usize).unwrap();
|
||||
Some(ImmTy::from_uint(len_usize, layout).into())
|
||||
}
|
||||
},
|
||||
&mut |place, op| {
|
||||
if let Some(conditions) = state.try_get_idx(place, &self.map)
|
||||
&& let Ok(imm) = self.ecx.read_immediate_raw(op)
|
||||
&& let Some(imm) = self.ecx.read_immediate_raw(op).discard_err()
|
||||
&& let Some(imm) = imm.right()
|
||||
&& let Immediate::Scalar(Scalar::Int(int)) = *imm
|
||||
{
|
||||
|
@ -429,8 +431,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
match rhs {
|
||||
// If we expect `lhs ?= A`, we have an opportunity if we assume `constant == A`.
|
||||
Operand::Constant(constant) => {
|
||||
let Ok(constant) =
|
||||
self.ecx.eval_mir_constant(&constant.const_, constant.span, None)
|
||||
let Some(constant) =
|
||||
self.ecx.eval_mir_constant(&constant.const_, constant.span, None).discard_err()
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
@ -469,8 +471,10 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
AggregateKind::Adt(.., Some(_)) => return,
|
||||
AggregateKind::Adt(_, variant_index, ..) if agg_ty.is_enum() => {
|
||||
if let Some(discr_target) = self.map.apply(lhs, TrackElem::Discriminant)
|
||||
&& let Ok(discr_value) =
|
||||
self.ecx.discriminant_for_variant(agg_ty, *variant_index)
|
||||
&& let Some(discr_value) = self
|
||||
.ecx
|
||||
.discriminant_for_variant(agg_ty, *variant_index)
|
||||
.discard_err()
|
||||
{
|
||||
self.process_immediate(bb, discr_target, discr_value, state);
|
||||
}
|
||||
|
@ -555,7 +559,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
// `SetDiscriminant` may be a no-op if the assigned variant is the untagged variant
|
||||
// of a niche encoding. If we cannot ensure that we write to the discriminant, do
|
||||
// nothing.
|
||||
let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else { return };
|
||||
let Ok(enum_layout) = self.ecx.layout_of(enum_ty) else {
|
||||
return;
|
||||
};
|
||||
let writes_discriminant = match enum_layout.variants {
|
||||
Variants::Single { index } => {
|
||||
assert_eq!(index, *variant_index);
|
||||
|
@ -568,7 +574,8 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
} => *variant_index != untagged_variant,
|
||||
};
|
||||
if writes_discriminant {
|
||||
let Ok(discr) = self.ecx.discriminant_for_variant(enum_ty, *variant_index)
|
||||
let Some(discr) =
|
||||
self.ecx.discriminant_for_variant(enum_ty, *variant_index).discard_err()
|
||||
else {
|
||||
return;
|
||||
};
|
||||
|
@ -645,7 +652,9 @@ impl<'a, 'tcx> TOFinder<'a, 'tcx> {
|
|||
|
||||
let Some(discr) = discr.place() else { return };
|
||||
let discr_ty = discr.ty(self.body, self.tcx).ty;
|
||||
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else { return };
|
||||
let Ok(discr_layout) = self.ecx.layout_of(discr_ty) else {
|
||||
return;
|
||||
};
|
||||
let Some(conditions) = state.try_get(discr.as_ref(), &self.map) else { return };
|
||||
|
||||
if let Some((value, _)) = targets.iter().find(|&(_, target)| target == target_bb) {
|
||||
|
|
|
@ -6,7 +6,7 @@ use std::fmt::Debug;
|
|||
|
||||
use rustc_const_eval::const_eval::DummyMachine;
|
||||
use rustc_const_eval::interpret::{
|
||||
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error,
|
||||
ImmTy, InterpCx, InterpResult, Projectable, Scalar, format_interp_error, interp_ok,
|
||||
};
|
||||
use rustc_data_structures::fx::FxHashSet;
|
||||
use rustc_hir::HirId;
|
||||
|
@ -101,7 +101,7 @@ impl<'tcx> Value<'tcx> {
|
|||
}
|
||||
(PlaceElem::Index(idx), Value::Aggregate { fields, .. }) => {
|
||||
let idx = prop.get_const(idx.into())?.immediate()?;
|
||||
let idx = prop.ecx.read_target_usize(idx).ok()?.try_into().ok()?;
|
||||
let idx = prop.ecx.read_target_usize(idx).discard_err()?.try_into().ok()?;
|
||||
if idx <= FieldIdx::MAX_AS_U32 {
|
||||
fields.get(FieldIdx::from_u32(idx)).unwrap_or(&Value::Uninit)
|
||||
} else {
|
||||
|
@ -231,21 +231,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
where
|
||||
F: FnOnce(&mut Self) -> InterpResult<'tcx, T>,
|
||||
{
|
||||
match f(self) {
|
||||
Ok(val) => Some(val),
|
||||
Err(error) => {
|
||||
trace!("InterpCx operation failed: {:?}", error);
|
||||
f(self)
|
||||
.map_err(|err| {
|
||||
trace!("InterpCx operation failed: {:?}", err);
|
||||
// Some errors shouldn't come up because creating them causes
|
||||
// an allocation, which we should avoid. When that happens,
|
||||
// dedicated error variants should be introduced instead.
|
||||
assert!(
|
||||
!error.kind().formatted_string(),
|
||||
!err.kind().formatted_string(),
|
||||
"known panics lint encountered formatting error: {}",
|
||||
format_interp_error(self.ecx.tcx.dcx(), error),
|
||||
format_interp_error(self.ecx.tcx.dcx(), err),
|
||||
);
|
||||
None
|
||||
}
|
||||
}
|
||||
err
|
||||
})
|
||||
.discard_err()
|
||||
}
|
||||
|
||||
/// Returns the value, if any, of evaluating `c`.
|
||||
|
@ -315,7 +314,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
.ecx
|
||||
.binary_op(BinOp::SubWithOverflow, &ImmTy::from_int(0, arg.layout), &arg)?
|
||||
.to_scalar_pair();
|
||||
Ok((arg, overflow.to_bool()?))
|
||||
interp_ok((arg, overflow.to_bool()?))
|
||||
})?;
|
||||
if overflow {
|
||||
self.report_assert_as_lint(
|
||||
|
@ -349,7 +348,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
let left_ty = left.ty(self.local_decls(), self.tcx);
|
||||
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
|
||||
let right_size = r.layout.size;
|
||||
let r_bits = r.to_scalar().to_bits(right_size).ok();
|
||||
let r_bits = r.to_scalar().to_bits(right_size).discard_err();
|
||||
if r_bits.is_some_and(|b| b >= left_size.bits() as u128) {
|
||||
debug!("check_binary_op: reporting assert for {:?}", location);
|
||||
let panic = AssertKind::Overflow(
|
||||
|
@ -496,7 +495,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
// This can be `None` if the lhs wasn't const propagated and we just
|
||||
// triggered the assert on the value of the rhs.
|
||||
self.eval_operand(op)
|
||||
.and_then(|op| self.ecx.read_immediate(&op).ok())
|
||||
.and_then(|op| self.ecx.read_immediate(&op).discard_err())
|
||||
.map_or(DbgVal::Underscore, |op| DbgVal::Val(op.to_const_int()))
|
||||
};
|
||||
let msg = match msg {
|
||||
|
@ -602,7 +601,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
|
||||
Len(place) => {
|
||||
let len = match self.get_const(place)? {
|
||||
Value::Immediate(src) => src.len(&self.ecx).ok()?,
|
||||
Value::Immediate(src) => src.len(&self.ecx).discard_err()?,
|
||||
Value::Aggregate { fields, .. } => fields.len() as u64,
|
||||
Value::Uninit => match place.ty(self.local_decls(), self.tcx).ty.kind() {
|
||||
ty::Array(_, n) => n.try_eval_target_usize(self.tcx, self.param_env)?,
|
||||
|
@ -615,7 +614,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
Ref(..) | RawPtr(..) => return None,
|
||||
|
||||
NullaryOp(ref null_op, ty) => {
|
||||
let op_layout = self.use_ecx(|this| this.ecx.layout_of(ty))?;
|
||||
let op_layout = self.ecx.layout_of(ty).ok()?;
|
||||
let val = match null_op {
|
||||
NullOp::SizeOf => op_layout.size.bytes(),
|
||||
NullOp::AlignOf => op_layout.align.abi.bytes(),
|
||||
|
@ -633,16 +632,16 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
Cast(ref kind, ref value, to) => match kind {
|
||||
CastKind::IntToInt | CastKind::IntToFloat => {
|
||||
let value = self.eval_operand(value)?;
|
||||
let value = self.ecx.read_immediate(&value).ok()?;
|
||||
let value = self.ecx.read_immediate(&value).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let res = self.ecx.int_to_int_or_float(&value, to).ok()?;
|
||||
let res = self.ecx.int_to_int_or_float(&value, to).discard_err()?;
|
||||
res.into()
|
||||
}
|
||||
CastKind::FloatToFloat | CastKind::FloatToInt => {
|
||||
let value = self.eval_operand(value)?;
|
||||
let value = self.ecx.read_immediate(&value).ok()?;
|
||||
let value = self.ecx.read_immediate(&value).discard_err()?;
|
||||
let to = self.ecx.layout_of(to).ok()?;
|
||||
let res = self.ecx.float_to_float_or_int(&value, to).ok()?;
|
||||
let res = self.ecx.float_to_float_or_int(&value, to).discard_err()?;
|
||||
res.into()
|
||||
}
|
||||
CastKind::Transmute => {
|
||||
|
@ -656,7 +655,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
_ => return None,
|
||||
}
|
||||
|
||||
value.offset(Size::ZERO, to, &self.ecx).ok()?.into()
|
||||
value.offset(Size::ZERO, to, &self.ecx).discard_err()?.into()
|
||||
}
|
||||
_ => return None,
|
||||
},
|
||||
|
@ -781,7 +780,7 @@ impl<'tcx> Visitor<'tcx> for ConstPropagator<'_, 'tcx> {
|
|||
TerminatorKind::SwitchInt { ref discr, ref targets } => {
|
||||
if let Some(ref value) = self.eval_operand(discr)
|
||||
&& let Some(value_const) = self.use_ecx(|this| this.ecx.read_scalar(value))
|
||||
&& let Ok(constant) = value_const.to_bits(value_const.size())
|
||||
&& let Some(constant) = value_const.to_bits(value_const.size()).discard_err()
|
||||
{
|
||||
// We managed to evaluate the discriminant, so we know we only need to visit
|
||||
// one target.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue