Auto merge of #87123 - RalfJung:miri-provenance-overhaul, r=oli-obk
CTFE/Miri engine Pointer type overhaul This fixes the long-standing problem that we are using `Scalar` as a type to represent pointers that might be integer values (since they point to a ZST). The main problem is that with int-to-ptr casts, there are multiple ways to represent the same pointer as a `Scalar` and it is unclear if "normalization" (i.e., the cast) already happened or not. This leads to ugly methods like `force_mplace_ptr` and `force_op_ptr`. Another problem this solves is that in Miri, it would make a lot more sense to have the `Pointer::offset` field represent the full absolute address (instead of being relative to the `AllocId`). This means we can do ptr-to-int casts without access to any machine state, and it means that the overflow checks on pointer arithmetic are (finally!) accurate. To solve this, the `Pointer` type is made entirely parametric over the provenance, so that we can use `Pointer<AllocId>` inside `Scalar` but use `Pointer<Option<AllocId>>` when accessing memory (where `None` represents the case that we could not figure out an `AllocId`; in that case the `offset` is an absolute address). Moreover, the `Provenance` trait determines if a pointer with a given provenance can be cast to an integer by simply dropping the provenance. I hope this can be read commit-by-commit, but the first commit does the bulk of the work. It introduces some FIXMEs that are resolved later. Fixes https://github.com/rust-lang/miri/issues/841 Miri PR: https://github.com/rust-lang/miri/pull/1851 r? `@oli-obk`
This commit is contained in:
commit
c78ebb7bdc
106 changed files with 1317 additions and 1407 deletions
|
@ -16,7 +16,6 @@ use crate::interpret::{
|
|||
#[derive(Clone, Debug)]
|
||||
pub enum ConstEvalErrKind {
|
||||
NeedsRfc(String),
|
||||
PtrToIntCast,
|
||||
ConstAccessesStatic,
|
||||
ModifiedGlobal,
|
||||
AssertFailure(AssertKind<ConstInt>),
|
||||
|
@ -49,12 +48,6 @@ impl fmt::Display for ConstEvalErrKind {
|
|||
NeedsRfc(ref msg) => {
|
||||
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
|
||||
}
|
||||
PtrToIntCast => {
|
||||
write!(
|
||||
f,
|
||||
"cannot cast pointer to integer because it was not created by cast from integer"
|
||||
)
|
||||
}
|
||||
ConstAccessesStatic => write!(f, "constant accesses static"),
|
||||
ModifiedGlobal => {
|
||||
write!(f, "modifying a static's initial value from another static's initializer")
|
||||
|
|
|
@ -136,19 +136,19 @@ pub(super) fn op_to_const<'tcx>(
|
|||
// by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
|
||||
// "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
|
||||
// structs containing such.
|
||||
op.try_as_mplace(ecx)
|
||||
op.try_as_mplace()
|
||||
};
|
||||
|
||||
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr {
|
||||
Scalar::Ptr(ptr) => {
|
||||
let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory();
|
||||
ConstValue::ByRef { alloc, offset: ptr.offset }
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
|
||||
ConstValue::ByRef { alloc, offset }
|
||||
}
|
||||
Scalar::Int(int) => {
|
||||
(None, offset) => {
|
||||
assert!(mplace.layout.is_zst());
|
||||
assert_eq!(
|
||||
int.assert_bits(ecx.tcx.data_layout.pointer_size)
|
||||
% u128::from(mplace.layout.align.abi.bytes()),
|
||||
offset.bytes() % mplace.layout.align.abi.bytes(),
|
||||
0,
|
||||
"this MPlaceTy must come from a validated constant, thus we can assume the \
|
||||
alignment is correct",
|
||||
|
@ -162,14 +162,15 @@ pub(super) fn op_to_const<'tcx>(
|
|||
Err(imm) => match *imm {
|
||||
Immediate::Scalar(x) => match x {
|
||||
ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
|
||||
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place(ecx)),
|
||||
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
|
||||
},
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
let (data, start) = match a.check_init().unwrap() {
|
||||
Scalar::Ptr(ptr) => {
|
||||
(ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes())
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
|
||||
(Some(alloc_id), offset) => {
|
||||
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
|
||||
}
|
||||
Scalar::Int { .. } => (
|
||||
(None, _offset) => (
|
||||
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
|
||||
b"" as &[u8],
|
||||
)),
|
||||
|
@ -369,6 +370,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
|
|||
inner = true;
|
||||
}
|
||||
};
|
||||
let alloc_id = mplace.ptr.provenance.unwrap();
|
||||
if let Err(error) = validation {
|
||||
// Validation failed, report an error. This is always a hard error.
|
||||
let err = ConstEvalErr::new(&ecx, error, None);
|
||||
|
@ -381,9 +383,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
|
|||
"the raw bytes of the constant ({}",
|
||||
display_allocation(
|
||||
*ecx.tcx,
|
||||
ecx.tcx
|
||||
.global_alloc(mplace.ptr.assert_ptr().alloc_id)
|
||||
.unwrap_memory()
|
||||
ecx.tcx.global_alloc(alloc_id).unwrap_memory()
|
||||
)
|
||||
));
|
||||
diag.emit();
|
||||
|
@ -391,7 +391,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
|
|||
))
|
||||
} else {
|
||||
// Convert to raw constant
|
||||
Ok(ConstAlloc { alloc_id: mplace.ptr.assert_ptr().alloc_id, ty: mplace.layout.ty })
|
||||
Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,8 +16,8 @@ use rustc_target::abi::{Align, Size};
|
|||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use crate::interpret::{
|
||||
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, Memory,
|
||||
OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind,
|
||||
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, OpTy,
|
||||
PlaceTy, Scalar, StackPopUnwind,
|
||||
};
|
||||
|
||||
use super::error::*;
|
||||
|
@ -59,7 +59,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
|
|||
pub steps_remaining: usize,
|
||||
|
||||
/// The virtual call stack.
|
||||
pub(crate) stack: Vec<Frame<'mir, 'tcx, (), ()>>,
|
||||
pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
|
@ -168,11 +168,11 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
|||
// Comparisons between integers are always known.
|
||||
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
|
||||
// Equality with integers can never be known for sure.
|
||||
(Scalar::Int { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Int { .. }) => false,
|
||||
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
|
||||
// FIXME: return `true` for when both sides are the same pointer, *except* that
|
||||
// some things (like functions and vtables) do not have stable addresses
|
||||
// so we need to be careful around them (see e.g. #73722).
|
||||
(Scalar::Ptr(_), Scalar::Ptr(_)) => false,
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -183,13 +183,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
|
|||
// Comparisons of abstract pointers with null pointers are known if the pointer
|
||||
// is in bounds, because if they are in bounds, the pointer can't be null.
|
||||
// Inequality with integers other than null can never be known for sure.
|
||||
(Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
|
||||
int.is_null() && !self.memory.ptr_may_be_null(ptr)
|
||||
(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
|
||||
int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
|
||||
}
|
||||
// FIXME: return `true` for at least some comparisons where we can reliably
|
||||
// determine the result of runtime inequality tests at compile-time.
|
||||
// Examples include comparison of addresses in different static items.
|
||||
(Scalar::Ptr(_), Scalar::Ptr(_)) => false,
|
||||
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -312,7 +312,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
|||
align,
|
||||
interpret::MemoryKind::Machine(MemoryKind::Heap),
|
||||
)?;
|
||||
ecx.write_scalar(Scalar::Ptr(ptr), dest)?;
|
||||
ecx.write_pointer(ptr, dest)?;
|
||||
}
|
||||
_ => {
|
||||
return Err(ConstEvalErrKind::NeedsRfc(format!(
|
||||
|
@ -356,10 +356,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
|
|||
Err(ConstEvalErrKind::Abort(msg).into())
|
||||
}
|
||||
|
||||
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
|
||||
Err(ConstEvalErrKind::PtrToIntCast.into())
|
||||
}
|
||||
|
||||
fn binary_ptr_op(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
_bin_op: mir::BinOp,
|
||||
|
|
|
@ -35,7 +35,7 @@ pub(crate) fn const_caller_location(
|
|||
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
|
||||
bug!("intern_const_alloc_recursive should not error in this case")
|
||||
}
|
||||
ConstValue::Scalar(loc_place.ptr)
|
||||
ConstValue::Scalar(Scalar::from_pointer(loc_place.ptr.into_pointer_or_addr().unwrap(), &tcx))
|
||||
}
|
||||
|
||||
/// Convert an evaluated constant to a type level constant
|
||||
|
@ -179,9 +179,9 @@ pub(crate) fn deref_const<'tcx>(
|
|||
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
|
||||
let op = ecx.const_to_op(val, None).unwrap();
|
||||
let mplace = ecx.deref_operand(&op).unwrap();
|
||||
if let Scalar::Ptr(ptr) = mplace.ptr {
|
||||
if let Some(alloc_id) = mplace.ptr.provenance {
|
||||
assert_eq!(
|
||||
tcx.get_global_alloc(ptr.alloc_id).unwrap().unwrap_memory().mutability,
|
||||
tcx.get_global_alloc(alloc_id).unwrap().unwrap_memory().mutability,
|
||||
Mutability::Not,
|
||||
"deref_const cannot be used with mutable allocations as \
|
||||
that could allow pattern matching to observe mutable statics",
|
||||
|
|
|
@ -57,7 +57,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.ok_or_else(|| err_inval!(TooGeneric))?;
|
||||
|
||||
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
|
||||
self.write_scalar(fn_ptr, dest)?;
|
||||
self.write_pointer(fn_ptr, dest)?;
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
ty::ClosureKind::FnOnce,
|
||||
);
|
||||
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
|
||||
self.write_scalar(fn_ptr, dest)?;
|
||||
self.write_pointer(fn_ptr, dest)?;
|
||||
}
|
||||
_ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
|
||||
}
|
||||
|
@ -175,7 +175,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// (a) cast a raw ptr to usize, or
|
||||
// (b) cast from an integer-like (including bool, char, enums).
|
||||
// In both cases we want the bits.
|
||||
let bits = self.force_bits(src.to_scalar()?, src.layout.size)?;
|
||||
let bits = src.to_scalar()?.to_bits(src.layout.size)?;
|
||||
Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
|
||||
}
|
||||
|
||||
|
@ -280,7 +280,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// Initial cast from sized to dyn trait
|
||||
let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
|
||||
let ptr = self.read_immediate(src)?.to_scalar()?;
|
||||
let val = Immediate::new_dyn_trait(ptr, vtable);
|
||||
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
|
||||
self.write_immediate(val, dest)
|
||||
}
|
||||
|
||||
|
|
|
@ -8,7 +8,6 @@ use rustc_index::vec::IndexVec;
|
|||
use rustc_macros::HashStable;
|
||||
use rustc_middle::ich::StableHashingContext;
|
||||
use rustc_middle::mir;
|
||||
use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
|
||||
use rustc_middle::ty::layout::{self, TyAndLayout};
|
||||
use rustc_middle::ty::{
|
||||
self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
|
||||
|
@ -18,8 +17,9 @@ use rustc_span::{Pos, Span};
|
|||
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
|
||||
|
||||
use super::{
|
||||
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place,
|
||||
PlaceTy, ScalarMaybeUninit, StackPopJump,
|
||||
AllocId, GlobalId, Immediate, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory,
|
||||
MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance, Scalar, ScalarMaybeUninit,
|
||||
StackPopJump,
|
||||
};
|
||||
use crate::transform::validate::equal_up_to_regions;
|
||||
use crate::util::storage::AlwaysLiveLocals;
|
||||
|
@ -80,7 +80,7 @@ impl Drop for SpanGuard {
|
|||
}
|
||||
|
||||
/// A stack frame.
|
||||
pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> {
|
||||
pub struct Frame<'mir, 'tcx, Tag: Provenance = AllocId, Extra = ()> {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
// Function and callsite information
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -161,7 +161,7 @@ pub enum StackPopCleanup {
|
|||
|
||||
/// State of a local variable including a memoized layout
|
||||
#[derive(Clone, PartialEq, Eq, HashStable)]
|
||||
pub struct LocalState<'tcx, Tag = ()> {
|
||||
pub struct LocalState<'tcx, Tag: Provenance = AllocId> {
|
||||
pub value: LocalValue<Tag>,
|
||||
/// Don't modify if `Some`, this is only used to prevent computing the layout twice
|
||||
#[stable_hasher(ignore)]
|
||||
|
@ -169,8 +169,8 @@ pub struct LocalState<'tcx, Tag = ()> {
|
|||
}
|
||||
|
||||
/// Current value of a local variable
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these
|
||||
pub enum LocalValue<Tag = ()> {
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Debug)] // Miri debug-prints these
|
||||
pub enum LocalValue<Tag: Provenance = AllocId> {
|
||||
/// This local is not currently alive, and cannot be used at all.
|
||||
Dead,
|
||||
/// This local is alive but not yet initialized. It can be written to
|
||||
|
@ -186,7 +186,7 @@ pub enum LocalValue<Tag = ()> {
|
|||
Live(Operand<Tag>),
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance + 'static> LocalState<'tcx, Tag> {
|
||||
/// Read the local's value or error if the local is not yet live or not live anymore.
|
||||
///
|
||||
/// Note: This may only be invoked from the `Machine::access_local` hook and not from
|
||||
|
@ -220,7 +220,7 @@ impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> {
|
||||
impl<'mir, 'tcx, Tag: Provenance> Frame<'mir, 'tcx, Tag> {
|
||||
pub fn with_extra<Extra>(self, extra: Extra) -> Frame<'mir, 'tcx, Tag, Extra> {
|
||||
Frame {
|
||||
body: self.body,
|
||||
|
@ -235,7 +235,7 @@ impl<'mir, 'tcx, Tag> Frame<'mir, 'tcx, Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'mir, 'tcx, Tag, Extra> Frame<'mir, 'tcx, Tag, Extra> {
|
||||
impl<'mir, 'tcx, Tag: Provenance, Extra> Frame<'mir, 'tcx, Tag, Extra> {
|
||||
/// Get the current location within the Frame.
|
||||
///
|
||||
/// If this is `Err`, we are not currently executing any particular statement in
|
||||
|
@ -406,20 +406,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn force_ptr(
|
||||
&self,
|
||||
scalar: Scalar<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
||||
self.memory.force_ptr(scalar)
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn force_bits(
|
||||
&self,
|
||||
scalar: Scalar<M::PointerTag>,
|
||||
size: Size,
|
||||
) -> InterpResult<'tcx, u128> {
|
||||
self.memory.force_bits(scalar, size)
|
||||
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
|
||||
self.memory.scalar_to_ptr(scalar)
|
||||
}
|
||||
|
||||
/// Call this to turn untagged "global" pointers (obtained via `tcx`) into
|
||||
|
@ -650,7 +638,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(Some((size, align)))
|
||||
}
|
||||
ty::Dynamic(..) => {
|
||||
let vtable = metadata.unwrap_meta();
|
||||
let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
|
||||
// Read size and align from vtable (already checks size).
|
||||
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
|
||||
}
|
||||
|
@ -897,9 +885,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
|
||||
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
|
||||
// All locals have a backing allocation, even if the allocation is empty
|
||||
// due to the local having ZST type.
|
||||
let ptr = ptr.assert_ptr();
|
||||
trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id));
|
||||
// due to the local having ZST type. Hence we can `unwrap`.
|
||||
trace!(
|
||||
"deallocating local {:?}: {:?}",
|
||||
local,
|
||||
self.memory.dump_alloc(ptr.provenance.unwrap().get_alloc_id())
|
||||
);
|
||||
self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
|
||||
};
|
||||
Ok(())
|
||||
|
@ -975,46 +966,45 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
|
|||
match self.ecx.stack()[frame].locals[local].value {
|
||||
LocalValue::Dead => write!(fmt, " is dead")?,
|
||||
LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
|
||||
LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr {
|
||||
Scalar::Ptr(ptr) => {
|
||||
write!(
|
||||
fmt,
|
||||
" by align({}){} ref:",
|
||||
mplace.align.bytes(),
|
||||
match mplace.meta {
|
||||
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
|
||||
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
|
||||
}
|
||||
)?;
|
||||
allocs.push(ptr.alloc_id);
|
||||
}
|
||||
ptr => write!(fmt, " by integral ref: {:?}", ptr)?,
|
||||
},
|
||||
LocalValue::Live(Operand::Indirect(mplace)) => {
|
||||
write!(
|
||||
fmt,
|
||||
" by align({}){} ref {:?}:",
|
||||
mplace.align.bytes(),
|
||||
match mplace.meta {
|
||||
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
|
||||
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
|
||||
},
|
||||
mplace.ptr,
|
||||
)?;
|
||||
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
|
||||
}
|
||||
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
|
||||
write!(fmt, " {:?}", val)?;
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
|
||||
allocs.push(ptr.alloc_id);
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
|
||||
allocs.push(ptr.provenance.get_alloc_id());
|
||||
}
|
||||
}
|
||||
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
|
||||
write!(fmt, " ({:?}, {:?})", val1, val2)?;
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
|
||||
allocs.push(ptr.alloc_id);
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
|
||||
allocs.push(ptr.provenance.get_alloc_id());
|
||||
}
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
|
||||
allocs.push(ptr.alloc_id);
|
||||
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
|
||||
allocs.push(ptr.provenance.get_alloc_id());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
|
||||
}
|
||||
Place::Ptr(mplace) => match mplace.ptr {
|
||||
Scalar::Ptr(ptr) => write!(
|
||||
Place::Ptr(mplace) => match mplace.ptr.provenance.map(Provenance::get_alloc_id) {
|
||||
Some(alloc_id) => write!(
|
||||
fmt,
|
||||
"by align({}) ref: {:?}",
|
||||
"by align({}) ref {:?}: {:?}",
|
||||
mplace.align.bytes(),
|
||||
self.ecx.memory.dump_alloc(ptr.alloc_id)
|
||||
mplace.ptr,
|
||||
self.ecx.memory.dump_alloc(alloc_id)
|
||||
),
|
||||
ptr => write!(fmt, " integral by ref: {:?}", ptr),
|
||||
},
|
||||
|
@ -1022,7 +1012,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
|
|||
}
|
||||
}
|
||||
|
||||
impl<'ctx, 'mir, 'tcx, Tag, Extra> HashStable<StableHashingContext<'ctx>>
|
||||
impl<'ctx, 'mir, 'tcx, Tag: Provenance, Extra> HashStable<StableHashingContext<'ctx>>
|
||||
for Frame<'mir, 'tcx, Tag, Extra>
|
||||
where
|
||||
Extra: HashStable<StableHashingContext<'ctx>>,
|
||||
|
|
|
@ -20,18 +20,17 @@ use rustc_errors::ErrorReported;
|
|||
use rustc_hir as hir;
|
||||
use rustc_middle::mir::interpret::InterpResult;
|
||||
use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
|
||||
use rustc_target::abi::Size;
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
|
||||
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, Scalar, ValueVisitor};
|
||||
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, PlaceTy, ValueVisitor};
|
||||
use crate::const_eval;
|
||||
|
||||
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
|
||||
'mir,
|
||||
'tcx,
|
||||
MemoryKind = T,
|
||||
PointerTag = (),
|
||||
PointerTag = AllocId,
|
||||
ExtraFnVal = !,
|
||||
FrameExtra = (),
|
||||
AllocExtra = (),
|
||||
|
@ -136,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
|
|||
};
|
||||
// link the alloc id to the actual allocation
|
||||
let alloc = tcx.intern_const_alloc(alloc);
|
||||
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc));
|
||||
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
|
||||
tcx.set_alloc_id_memory(alloc_id, alloc);
|
||||
None
|
||||
}
|
||||
|
@ -203,10 +202,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
|||
if let ty::Dynamic(..) =
|
||||
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
|
||||
{
|
||||
if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() {
|
||||
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
|
||||
if let Some(alloc_id) = ptr.provenance {
|
||||
// Explicitly choose const mode here, since vtables are immutable, even
|
||||
// if the reference of the fat pointer is mutable.
|
||||
self.intern_shallow(vtable.alloc_id, InternMode::Const, None);
|
||||
self.intern_shallow(alloc_id, InternMode::Const, None);
|
||||
} else {
|
||||
// Validation will error (with a better message) on an invalid vtable pointer.
|
||||
// Let validation show the error message, but make sure it *does* error.
|
||||
|
@ -216,7 +216,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
|||
}
|
||||
// Check if we have encountered this pointer+layout combination before.
|
||||
// Only recurse for allocation-backed pointers.
|
||||
if let Scalar::Ptr(ptr) = mplace.ptr {
|
||||
if let Some(alloc_id) = mplace.ptr.provenance {
|
||||
// Compute the mode with which we intern this. Our goal here is to make as many
|
||||
// statics as we can immutable so they can be placed in read-only memory by LLVM.
|
||||
let ref_mode = match self.mode {
|
||||
|
@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
|
|||
InternMode::Const
|
||||
}
|
||||
};
|
||||
match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) {
|
||||
match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
|
||||
// No need to recurse, these are interned already and statics may have
|
||||
// cycles, so we don't want to recurse there
|
||||
Some(IsStaticOrFn) => {}
|
||||
|
@ -321,7 +321,7 @@ where
|
|||
leftover_allocations,
|
||||
// The outermost allocation must exist, because we allocated it with
|
||||
// `Memory::allocate`.
|
||||
ret.ptr.assert_ptr().alloc_id,
|
||||
ret.ptr.provenance.unwrap(),
|
||||
base_intern_mode,
|
||||
Some(ret.layout.ty),
|
||||
);
|
||||
|
@ -395,9 +395,9 @@ where
|
|||
}
|
||||
let alloc = tcx.intern_const_alloc(alloc);
|
||||
tcx.set_alloc_id_memory(alloc_id, alloc);
|
||||
for &(_, ((), reloc)) in alloc.relocations().iter() {
|
||||
if leftover_allocations.insert(reloc) {
|
||||
todo.push(reloc);
|
||||
for &(_, alloc_id) in alloc.relocations().iter() {
|
||||
if leftover_allocations.insert(alloc_id) {
|
||||
todo.push(alloc_id);
|
||||
}
|
||||
}
|
||||
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
|
||||
|
@ -425,14 +425,12 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
|
|||
layout: TyAndLayout<'tcx>,
|
||||
f: impl FnOnce(
|
||||
&mut InterpCx<'mir, 'tcx, M>,
|
||||
&MPlaceTy<'tcx, M::PointerTag>,
|
||||
&PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, ()>,
|
||||
) -> InterpResult<'tcx, &'tcx Allocation> {
|
||||
let dest = self.allocate(layout, MemoryKind::Stack)?;
|
||||
f(self, &dest)?;
|
||||
let ptr = dest.ptr.assert_ptr();
|
||||
assert_eq!(ptr.offset, Size::ZERO);
|
||||
let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
|
||||
f(self, &dest.into())?;
|
||||
let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
|
||||
alloc.mutability = Mutability::Not;
|
||||
Ok(self.tcx.intern_const_alloc(alloc))
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ use rustc_target::abi::{Abi, Align, LayoutOf as _, Primitive, Size};
|
|||
|
||||
use super::{
|
||||
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
|
||||
Pointer,
|
||||
};
|
||||
|
||||
mod caller_location;
|
||||
|
@ -138,7 +139,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
sym::caller_location => {
|
||||
let span = self.find_closest_untracked_caller_location();
|
||||
let location = self.alloc_caller_location_for_span(span);
|
||||
self.write_scalar(location.ptr, dest)?;
|
||||
self.write_immediate(location.to_ref(self), dest)?;
|
||||
}
|
||||
|
||||
sym::min_align_of_val | sym::size_of_val => {
|
||||
|
@ -190,7 +191,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let ty = substs.type_at(0);
|
||||
let layout_of = self.layout_of(ty)?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let bits = self.force_bits(val, layout_of.size)?;
|
||||
let bits = val.to_bits(layout_of.size)?;
|
||||
let kind = match layout_of.abi {
|
||||
Abi::Scalar(ref scalar) => scalar.value,
|
||||
_ => span_bug!(
|
||||
|
@ -238,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// term since the sign of the second term can be inferred from this and
|
||||
// the fact that the operation has overflowed (if either is 0 no
|
||||
// overflow can occur)
|
||||
let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
|
||||
let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
|
||||
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
|
||||
if first_term_positive {
|
||||
// Negative overflow not possible since the positive first term
|
||||
|
@ -298,7 +299,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
|
||||
if overflowed {
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
|
||||
let r_val = r.to_scalar()?.to_bits(layout.size)?;
|
||||
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
|
||||
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
|
||||
} else {
|
||||
|
@ -312,9 +313,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||
let layout = self.layout_of(substs.type_at(0))?;
|
||||
let val = self.read_scalar(&args[0])?.check_init()?;
|
||||
let val_bits = self.force_bits(val, layout.size)?;
|
||||
let val_bits = val.to_bits(layout.size)?;
|
||||
let raw_shift = self.read_scalar(&args[1])?.check_init()?;
|
||||
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
|
||||
let raw_shift_bits = raw_shift.to_bits(layout.size)?;
|
||||
let width_bits = u128::from(layout.size.bits());
|
||||
let shift_bits = raw_shift_bits % width_bits;
|
||||
let inv_shift_bits = (width_bits - shift_bits) % width_bits;
|
||||
|
@ -331,22 +332,22 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
|
||||
}
|
||||
sym::offset => {
|
||||
let ptr = self.read_scalar(&args[0])?.check_init()?;
|
||||
let ptr = self.read_pointer(&args[0])?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
|
||||
self.write_scalar(offset_ptr, dest)?;
|
||||
self.write_pointer(offset_ptr, dest)?;
|
||||
}
|
||||
sym::arith_offset => {
|
||||
let ptr = self.read_scalar(&args[0])?.check_init()?;
|
||||
let ptr = self.read_pointer(&args[0])?;
|
||||
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
|
||||
let pointee_ty = substs.type_at(0);
|
||||
|
||||
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
|
||||
let offset_bytes = offset_count.wrapping_mul(pointee_size);
|
||||
let offset_ptr = ptr.ptr_wrapping_signed_offset(offset_bytes, self);
|
||||
self.write_scalar(offset_ptr, dest)?;
|
||||
let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
|
||||
self.write_pointer(offset_ptr, dest)?;
|
||||
}
|
||||
sym::ptr_offset_from => {
|
||||
let a = self.read_immediate(&args[0])?.to_scalar()?;
|
||||
|
@ -361,9 +362,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
//
|
||||
// Control flow is weird because we cannot early-return (to reach the
|
||||
// `go_to_block` at the end).
|
||||
let done = if a.is_bits() && b.is_bits() {
|
||||
let a = a.to_machine_usize(self)?;
|
||||
let b = b.to_machine_usize(self)?;
|
||||
let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
|
||||
let a = a.try_to_machine_usize(*self.tcx).unwrap();
|
||||
let b = b.try_to_machine_usize(*self.tcx).unwrap();
|
||||
if a == b && a != 0 {
|
||||
self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
|
||||
true
|
||||
|
@ -376,9 +377,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
|
||||
if !done {
|
||||
// General case: we need two pointers.
|
||||
let a = self.force_ptr(a)?;
|
||||
let b = self.force_ptr(b)?;
|
||||
if a.alloc_id != b.alloc_id {
|
||||
let a = self.scalar_to_ptr(a);
|
||||
let b = self.scalar_to_ptr(b);
|
||||
let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
|
||||
let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
|
||||
if a_alloc_id != b_alloc_id {
|
||||
throw_ub_format!(
|
||||
"ptr_offset_from cannot compute offset of pointers into different \
|
||||
allocations.",
|
||||
|
@ -386,8 +389,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
let usize_layout = self.layout_of(self.tcx.types.usize)?;
|
||||
let isize_layout = self.layout_of(self.tcx.types.isize)?;
|
||||
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
|
||||
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
|
||||
let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
|
||||
let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
|
||||
let (val, _overflowed, _ty) =
|
||||
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
|
||||
let pointee_layout = self.layout_of(substs.type_at(0))?;
|
||||
|
@ -513,10 +516,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
/// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
|
||||
pub fn ptr_offset_inbounds(
|
||||
&self,
|
||||
ptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
pointee_ty: Ty<'tcx>,
|
||||
offset_count: i64,
|
||||
) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
// We cannot overflow i64 as a type's size must be <= isize::MAX.
|
||||
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
|
||||
// The computed offset, in bytes, cannot overflow an isize.
|
||||
|
@ -524,7 +527,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
|
||||
// The offset being in bounds cannot rely on "wrapping around" the address space.
|
||||
// So, first rule out overflows in the pointer arithmetic.
|
||||
let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
|
||||
let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
|
||||
// ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
|
||||
// memory between these pointers must be accessible. Note that we do not require the
|
||||
// pointers to be properly aligned (unlike a read/write operation).
|
||||
|
@ -558,8 +561,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
)
|
||||
})?;
|
||||
|
||||
let src = self.read_scalar(&src)?.check_init()?;
|
||||
let dst = self.read_scalar(&dst)?.check_init()?;
|
||||
let src = self.read_pointer(&src)?;
|
||||
let dst = self.read_pointer(&dst)?;
|
||||
|
||||
self.memory.copy(src, align, dst, align, size, nonoverlapping)
|
||||
}
|
||||
|
@ -572,8 +575,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
|
||||
assert!(!layout.is_unsized());
|
||||
|
||||
let lhs = self.read_scalar(lhs)?.check_init()?;
|
||||
let rhs = self.read_scalar(rhs)?.check_init()?;
|
||||
let lhs = self.read_pointer(lhs)?;
|
||||
let rhs = self.read_pointer(rhs)?;
|
||||
let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
|
||||
let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
|
||||
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
|
||||
|
|
|
@ -96,7 +96,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
|
||||
|
||||
// Initialize fields.
|
||||
self.write_immediate(file.to_ref(), &self.mplace_field(&location, 0).unwrap().into())
|
||||
self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
|
||||
.expect("writing to memory we just allocated cannot fail");
|
||||
self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
|
||||
.expect("writing to memory we just allocated cannot fail");
|
||||
|
|
|
@ -13,8 +13,8 @@ use rustc_target::abi::Size;
|
|||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::{
|
||||
AllocId, Allocation, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult, LocalValue,
|
||||
MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar, StackPopUnwind,
|
||||
AllocId, AllocRange, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace,
|
||||
Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
|
||||
};
|
||||
|
||||
/// Data returned by Machine::stack_pop,
|
||||
|
@ -84,12 +84,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
/// Additional memory kinds a machine wishes to distinguish from the builtin ones
|
||||
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
|
||||
|
||||
/// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows"
|
||||
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>.
|
||||
/// The `default()` is used for pointers to consts, statics, vtables and functions.
|
||||
/// The `Debug` formatting is used for displaying pointers; we cannot use `Display`
|
||||
/// as `()` does not implement that, but it should be "nice" output.
|
||||
type PointerTag: Debug + Copy + Eq + Hash + 'static;
|
||||
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
|
||||
type PointerTag: Provenance + Eq + Hash + 'static;
|
||||
|
||||
/// Machines can define extra (non-instance) things that represent values of function pointers.
|
||||
/// For example, Miri uses this to return a function pointer from `dlsym`
|
||||
|
@ -266,28 +262,39 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
}
|
||||
|
||||
/// Return the `AllocId` for the given thread-local static in the current thread.
|
||||
fn thread_local_static_alloc_id(
|
||||
fn thread_local_static_base_pointer(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, AllocId> {
|
||||
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
|
||||
throw_unsup!(ThreadLocalStatic(def_id))
|
||||
}
|
||||
|
||||
/// Return the `AllocId` backing the given `extern static`.
|
||||
fn extern_static_alloc_id(
|
||||
/// Return the root pointer for the given `extern static`.
|
||||
fn extern_static_base_pointer(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<'tcx, AllocId> {
|
||||
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
|
||||
Ok(mem.tcx.create_static_alloc(def_id))
|
||||
}
|
||||
) -> InterpResult<'tcx, Pointer<Self::PointerTag>>;
|
||||
|
||||
/// Return the "base" tag for the given *global* allocation: the one that is used for direct
|
||||
/// accesses to this static/const/fn allocation. If `id` is not a global allocation,
|
||||
/// this will return an unusable tag (i.e., accesses will be UB)!
|
||||
/// Return a "base" pointer for the given allocation: the one that is used for direct
|
||||
/// accesses to this static/const/fn allocation, or the one returned from the heap allocator.
|
||||
///
|
||||
/// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed.
|
||||
fn tag_global_base_pointer(memory_extra: &Self::MemoryExtra, id: AllocId) -> Self::PointerTag;
|
||||
/// Not called on `extern` or thread-local statics (those use the methods above).
|
||||
fn tag_alloc_base_pointer(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ptr: Pointer,
|
||||
) -> Pointer<Self::PointerTag>;
|
||||
|
||||
/// "Int-to-pointer cast"
|
||||
fn ptr_from_addr(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
addr: u64,
|
||||
) -> Pointer<Option<Self::PointerTag>>;
|
||||
|
||||
/// Convert a pointer with provenance into an allocation-offset pair.
|
||||
fn ptr_get_alloc(
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
ptr: Pointer<Self::PointerTag>,
|
||||
) -> (AllocId, Size);
|
||||
|
||||
/// Called to initialize the "extra" state of an allocation and make the pointers
|
||||
/// it contains (in relocations) tagged. The way we construct allocations is
|
||||
|
@ -302,16 +309,12 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
/// allocation (because a copy had to be done to add tags or metadata), machine memory will
|
||||
/// cache the result. (This relies on `AllocMap::get_or` being able to add the
|
||||
/// owned allocation to the map even when the map is shared.)
|
||||
///
|
||||
/// Also return the "base" tag to use for this allocation: the one that is used for direct
|
||||
/// accesses to this allocation. If `kind == STATIC_KIND`, this tag must be consistent
|
||||
/// with `tag_global_base_pointer`.
|
||||
fn init_allocation_extra<'b>(
|
||||
memory_extra: &Self::MemoryExtra,
|
||||
mem: &Memory<'mir, 'tcx, Self>,
|
||||
id: AllocId,
|
||||
alloc: Cow<'b, Allocation>,
|
||||
kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> (Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>, Self::PointerTag);
|
||||
) -> Cow<'b, Allocation<Self::PointerTag, Self::AllocExtra>>;
|
||||
|
||||
/// Hook for performing extra checks on a memory read access.
|
||||
///
|
||||
|
@ -322,8 +325,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
fn memory_read(
|
||||
_memory_extra: &Self::MemoryExtra,
|
||||
_alloc_extra: &Self::AllocExtra,
|
||||
_ptr: Pointer<Self::PointerTag>,
|
||||
_size: Size,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -333,8 +336,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
fn memory_written(
|
||||
_memory_extra: &mut Self::MemoryExtra,
|
||||
_alloc_extra: &mut Self::AllocExtra,
|
||||
_ptr: Pointer<Self::PointerTag>,
|
||||
_size: Size,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -344,17 +347,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
fn memory_deallocated(
|
||||
_memory_extra: &mut Self::MemoryExtra,
|
||||
_alloc_extra: &mut Self::AllocExtra,
|
||||
_ptr: Pointer<Self::PointerTag>,
|
||||
_size: Size,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Called after initializing static memory using the interpreter.
|
||||
fn after_static_mem_initialized(
|
||||
_ecx: &mut InterpCx<'mir, 'tcx, Self>,
|
||||
_ptr: Pointer<Self::PointerTag>,
|
||||
_size: Size,
|
||||
_tag: Self::PointerTag,
|
||||
_range: AllocRange,
|
||||
) -> InterpResult<'tcx> {
|
||||
Ok(())
|
||||
}
|
||||
|
@ -399,32 +393,12 @@ pub trait Machine<'mir, 'tcx>: Sized {
|
|||
// By default, we do not support unwinding from panics
|
||||
Ok(StackPopJump::Normal)
|
||||
}
|
||||
|
||||
fn int_to_ptr(
|
||||
_mem: &Memory<'mir, 'tcx, Self>,
|
||||
int: u64,
|
||||
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> {
|
||||
Err((if int == 0 {
|
||||
// This is UB, seriously.
|
||||
// (`DanglingIntPointer` with these exact arguments has special printing code.)
|
||||
err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
|
||||
} else {
|
||||
// This is just something we cannot support during const-eval.
|
||||
err_unsup!(ReadBytesAsPointer)
|
||||
})
|
||||
.into())
|
||||
}
|
||||
|
||||
fn ptr_to_int(
|
||||
_mem: &Memory<'mir, 'tcx, Self>,
|
||||
_ptr: Pointer<Self::PointerTag>,
|
||||
) -> InterpResult<'tcx, u64>;
|
||||
}
|
||||
|
||||
// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
|
||||
// (CTFE and ConstProp) use the same instance. Here, we share that code.
|
||||
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
|
||||
type PointerTag = ();
|
||||
type PointerTag = AllocId;
|
||||
type ExtraFnVal = !;
|
||||
|
||||
type MemoryMap =
|
||||
|
@ -466,20 +440,40 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
|
|||
|
||||
#[inline(always)]
|
||||
fn init_allocation_extra<'b>(
|
||||
_memory_extra: &Self::MemoryExtra,
|
||||
_mem: &Memory<$mir, $tcx, Self>,
|
||||
_id: AllocId,
|
||||
alloc: Cow<'b, Allocation>,
|
||||
_kind: Option<MemoryKind<Self::MemoryKind>>,
|
||||
) -> (Cow<'b, Allocation<Self::PointerTag>>, Self::PointerTag) {
|
||||
) -> Cow<'b, Allocation<Self::PointerTag>> {
|
||||
// We do not use a tag so we can just cheaply forward the allocation
|
||||
(alloc, ())
|
||||
alloc
|
||||
}
|
||||
|
||||
fn extern_static_base_pointer(
|
||||
mem: &Memory<$mir, $tcx, Self>,
|
||||
def_id: DefId,
|
||||
) -> InterpResult<$tcx, Pointer> {
|
||||
// Use the `AllocId` associated with the `DefId`. Any actual *access* will fail.
|
||||
Ok(Pointer::new(mem.tcx.create_static_alloc(def_id), Size::ZERO))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn tag_global_base_pointer(
|
||||
_memory_extra: &Self::MemoryExtra,
|
||||
_id: AllocId,
|
||||
) -> Self::PointerTag {
|
||||
()
|
||||
fn tag_alloc_base_pointer(
|
||||
_mem: &Memory<$mir, $tcx, Self>,
|
||||
ptr: Pointer<AllocId>,
|
||||
) -> Pointer<AllocId> {
|
||||
ptr
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
|
||||
Pointer::new(None, Size::from_bytes(addr))
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
fn ptr_get_alloc(_mem: &Memory<$mir, $tcx, Self>, ptr: Pointer<AllocId>) -> (AllocId, Size) {
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (alloc_id, offset) = ptr.into_parts();
|
||||
(alloc_id, offset)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
use std::assert_matches::assert_matches;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::VecDeque;
|
||||
use std::convert::{TryFrom, TryInto};
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt;
|
||||
use std::ptr;
|
||||
|
||||
|
@ -20,7 +20,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
|
|||
|
||||
use super::{
|
||||
alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
|
||||
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit,
|
||||
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
|
||||
ScalarMaybeUninit,
|
||||
};
|
||||
use crate::util::pretty;
|
||||
|
||||
|
@ -163,25 +164,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
#[inline]
|
||||
pub fn global_base_pointer(
|
||||
&self,
|
||||
mut ptr: Pointer,
|
||||
ptr: Pointer<AllocId>,
|
||||
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
||||
// We know `offset` is relative to the allocation, so we can use `into_parts`.
|
||||
let (alloc_id, offset) = ptr.into_parts();
|
||||
// We need to handle `extern static`.
|
||||
let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) {
|
||||
match self.tcx.get_global_alloc(alloc_id) {
|
||||
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
|
||||
bug!("global memory cannot point to thread-local static")
|
||||
}
|
||||
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
|
||||
ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?;
|
||||
ptr
|
||||
return M::extern_static_base_pointer(self, def_id);
|
||||
}
|
||||
_ => {
|
||||
// No need to change the `AllocId`.
|
||||
ptr
|
||||
}
|
||||
};
|
||||
_ => {}
|
||||
}
|
||||
// And we need to get the tag.
|
||||
let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id);
|
||||
Ok(ptr.with_tag(tag))
|
||||
Ok(M::tag_alloc_base_pointer(self, Pointer::new(alloc_id, offset)))
|
||||
}
|
||||
|
||||
pub fn create_fn_alloc(
|
||||
|
@ -235,21 +233,21 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
M::GLOBAL_KIND.map(MemoryKind::Machine),
|
||||
"dynamically allocating global memory"
|
||||
);
|
||||
// This is a new allocation, not a new global one, so no `global_base_ptr`.
|
||||
let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
|
||||
let alloc = M::init_allocation_extra(self, id, Cow::Owned(alloc), Some(kind));
|
||||
self.alloc_map.insert(id, (kind, alloc.into_owned()));
|
||||
Pointer::from(id).with_tag(tag)
|
||||
M::tag_alloc_base_pointer(self, Pointer::from(id))
|
||||
}
|
||||
|
||||
pub fn reallocate(
|
||||
&mut self,
|
||||
ptr: Pointer<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
old_size_and_align: Option<(Size, Align)>,
|
||||
new_size: Size,
|
||||
new_align: Align,
|
||||
kind: MemoryKind<M::MemoryKind>,
|
||||
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
||||
if ptr.offset.bytes() != 0 {
|
||||
let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
|
||||
if offset.bytes() != 0 {
|
||||
throw_ub_format!(
|
||||
"reallocating {:?} which does not point to the beginning of an object",
|
||||
ptr
|
||||
|
@ -261,7 +259,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
let new_ptr = self.allocate(new_size, new_align, kind)?;
|
||||
let old_size = match old_size_and_align {
|
||||
Some((size, _align)) => size,
|
||||
None => self.get_raw(ptr.alloc_id)?.size(),
|
||||
None => self.get_raw(alloc_id)?.size(),
|
||||
};
|
||||
// This will also call the access hooks.
|
||||
self.copy(
|
||||
|
@ -272,50 +270,51 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
old_size.min(new_size),
|
||||
/*nonoverlapping*/ true,
|
||||
)?;
|
||||
self.deallocate(ptr, old_size_and_align, kind)?;
|
||||
self.deallocate(ptr.into(), old_size_and_align, kind)?;
|
||||
|
||||
Ok(new_ptr)
|
||||
}
|
||||
|
||||
pub fn deallocate(
|
||||
&mut self,
|
||||
ptr: Pointer<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
old_size_and_align: Option<(Size, Align)>,
|
||||
kind: MemoryKind<M::MemoryKind>,
|
||||
) -> InterpResult<'tcx> {
|
||||
trace!("deallocating: {}", ptr.alloc_id);
|
||||
let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
|
||||
trace!("deallocating: {}", alloc_id);
|
||||
|
||||
if ptr.offset.bytes() != 0 {
|
||||
if offset.bytes() != 0 {
|
||||
throw_ub_format!(
|
||||
"deallocating {:?} which does not point to the beginning of an object",
|
||||
ptr
|
||||
);
|
||||
}
|
||||
|
||||
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) {
|
||||
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&alloc_id) {
|
||||
Some(alloc) => alloc,
|
||||
None => {
|
||||
// Deallocating global memory -- always an error
|
||||
return Err(match self.tcx.get_global_alloc(ptr.alloc_id) {
|
||||
return Err(match self.tcx.get_global_alloc(alloc_id) {
|
||||
Some(GlobalAlloc::Function(..)) => {
|
||||
err_ub_format!("deallocating {}, which is a function", ptr.alloc_id)
|
||||
err_ub_format!("deallocating {}, which is a function", alloc_id)
|
||||
}
|
||||
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
|
||||
err_ub_format!("deallocating {}, which is static memory", ptr.alloc_id)
|
||||
err_ub_format!("deallocating {}, which is static memory", alloc_id)
|
||||
}
|
||||
None => err_ub!(PointerUseAfterFree(ptr.alloc_id)),
|
||||
None => err_ub!(PointerUseAfterFree(alloc_id)),
|
||||
}
|
||||
.into());
|
||||
}
|
||||
};
|
||||
|
||||
if alloc.mutability == Mutability::Not {
|
||||
throw_ub_format!("deallocating immutable allocation {}", ptr.alloc_id);
|
||||
throw_ub_format!("deallocating immutable allocation {}", alloc_id);
|
||||
}
|
||||
if alloc_kind != kind {
|
||||
throw_ub_format!(
|
||||
"deallocating {}, which is {} memory, using {} deallocation operation",
|
||||
ptr.alloc_id,
|
||||
alloc_id,
|
||||
alloc_kind,
|
||||
kind
|
||||
);
|
||||
|
@ -324,7 +323,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
if size != alloc.size() || align != alloc.align {
|
||||
throw_ub_format!(
|
||||
"incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
|
||||
ptr.alloc_id,
|
||||
alloc_id,
|
||||
alloc.size().bytes(),
|
||||
alloc.align.bytes(),
|
||||
size.bytes(),
|
||||
|
@ -335,10 +334,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
|
||||
// Let the machine take some extra action
|
||||
let size = alloc.size();
|
||||
M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr, size)?;
|
||||
M::memory_deallocated(
|
||||
&mut self.extra,
|
||||
&mut alloc.extra,
|
||||
ptr.provenance,
|
||||
alloc_range(Size::ZERO, size),
|
||||
)?;
|
||||
|
||||
// Don't forget to remember size and align of this now-dead allocation
|
||||
let old = self.dead_alloc_map.insert(ptr.alloc_id, (size, alloc.align));
|
||||
let old = self.dead_alloc_map.insert(alloc_id, (size, alloc.align));
|
||||
if old.is_some() {
|
||||
bug!("Nothing can be deallocated twice");
|
||||
}
|
||||
|
@ -346,52 +350,69 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Internal helper function for APIs that offer memory access based on `Scalar` pointers.
|
||||
/// Internal helper function to determine the allocation and offset of a pointer (if any).
|
||||
#[inline(always)]
|
||||
pub(super) fn check_ptr_access(
|
||||
fn get_ptr_access(
|
||||
&self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> {
|
||||
) -> InterpResult<'tcx, Option<(AllocId, Size, Pointer<M::PointerTag>)>> {
|
||||
let align = M::enforce_alignment(&self.extra).then_some(align);
|
||||
self.check_and_deref_ptr(sptr, size, align, CheckInAllocMsg::MemoryAccessTest, |ptr| {
|
||||
let (size, align) =
|
||||
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
|
||||
Ok((size, align, ptr))
|
||||
})
|
||||
self.check_and_deref_ptr(
|
||||
ptr,
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|alloc_id, offset, ptr| {
|
||||
let (size, align) =
|
||||
self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
|
||||
Ok((size, align, (alloc_id, offset, ptr)))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if the given scalar is allowed to do a memory access of given `size` and `align`
|
||||
/// Check if the given pointerpoints to live memory of given `size` and `align`
|
||||
/// (ignoring `M::enforce_alignment`). The caller can control the error message for the
|
||||
/// out-of-bounds case.
|
||||
#[inline(always)]
|
||||
pub fn check_ptr_access_align(
|
||||
&self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
msg: CheckInAllocMsg,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.check_and_deref_ptr(sptr, size, Some(align), msg, |ptr| {
|
||||
let (size, align) =
|
||||
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
|
||||
self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
|
||||
let check = match msg {
|
||||
CheckInAllocMsg::DerefTest | CheckInAllocMsg::MemoryAccessTest => {
|
||||
AllocCheck::Dereferenceable
|
||||
}
|
||||
CheckInAllocMsg::PointerArithmeticTest | CheckInAllocMsg::InboundsTest => {
|
||||
AllocCheck::Live
|
||||
}
|
||||
};
|
||||
let (size, align) = self.get_size_and_align(alloc_id, check)?;
|
||||
Ok((size, align, ()))
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
|
||||
/// to the allocation it points to. Supports both shared and mutable references, to the actual
|
||||
/// to the allocation it points to. Supports both shared and mutable references, as the actual
|
||||
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check
|
||||
/// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
|
||||
fn check_and_deref_ptr<T>(
|
||||
&self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
align: Option<Align>,
|
||||
msg: CheckInAllocMsg,
|
||||
alloc_size: impl FnOnce(Pointer<M::PointerTag>) -> InterpResult<'tcx, (Size, Align, T)>,
|
||||
alloc_size: impl FnOnce(
|
||||
AllocId,
|
||||
Size,
|
||||
Pointer<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (Size, Align, T)>,
|
||||
) -> InterpResult<'tcx, Option<T>> {
|
||||
fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
|
||||
if offset % align.bytes() == 0 {
|
||||
|
@ -406,53 +427,50 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
}
|
||||
}
|
||||
|
||||
// Normalize to a `Pointer` if we definitely need one.
|
||||
let normalized = if size.bytes() == 0 {
|
||||
// Can be an integer, just take what we got. We do NOT `force_bits` here;
|
||||
// if this is already a `Pointer` we want to do the bounds checks!
|
||||
sptr
|
||||
// Extract from the pointer an `Option<AllocId>` and an offset, which is relative to the
|
||||
// allocation or (if that is `None`) an absolute address.
|
||||
let ptr_or_addr = if size.bytes() == 0 {
|
||||
// Let's see what we can do, but don't throw errors if there's nothing there.
|
||||
self.ptr_try_get_alloc(ptr)
|
||||
} else {
|
||||
// A "real" access, we must get a pointer to be able to check the bounds.
|
||||
Scalar::from(self.force_ptr(sptr)?)
|
||||
// A "real" access, we insist on getting an `AllocId`.
|
||||
Ok(self.ptr_get_alloc(ptr)?)
|
||||
};
|
||||
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) {
|
||||
Ok(bits) => {
|
||||
let bits = u64::try_from(bits).unwrap(); // it's ptr-sized
|
||||
assert!(size.bytes() == 0);
|
||||
Ok(match ptr_or_addr {
|
||||
Err(addr) => {
|
||||
// No memory is actually being accessed.
|
||||
debug_assert!(size.bytes() == 0);
|
||||
// Must be non-null.
|
||||
if bits == 0 {
|
||||
if addr == 0 {
|
||||
throw_ub!(DanglingIntPointer(0, msg))
|
||||
}
|
||||
// Must be aligned.
|
||||
if let Some(align) = align {
|
||||
check_offset_align(bits, align)?;
|
||||
check_offset_align(addr, align)?;
|
||||
}
|
||||
None
|
||||
}
|
||||
Err(ptr) => {
|
||||
let (allocation_size, alloc_align, ret_val) = alloc_size(ptr)?;
|
||||
Ok((alloc_id, offset, ptr)) => {
|
||||
let (allocation_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, ptr)?;
|
||||
// Test bounds. This also ensures non-null.
|
||||
// It is sufficient to check this for the end pointer. The addition
|
||||
// checks for overflow.
|
||||
let end_ptr = ptr.offset(size, self)?;
|
||||
if end_ptr.offset > allocation_size {
|
||||
// equal is okay!
|
||||
throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
|
||||
// It is sufficient to check this for the end pointer. Also check for overflow!
|
||||
if offset.checked_add(size, &self.tcx).map_or(true, |end| end > allocation_size) {
|
||||
throw_ub!(PointerOutOfBounds { alloc_id, offset, size, allocation_size, msg })
|
||||
}
|
||||
// Test align. Check this last; if both bounds and alignment are violated
|
||||
// we want the error to be about the bounds.
|
||||
if let Some(align) = align {
|
||||
if M::force_int_for_alignment_check(&self.extra) {
|
||||
let bits = self
|
||||
.force_bits(ptr.into(), self.pointer_size())
|
||||
let addr = Scalar::from_pointer(ptr, &self.tcx)
|
||||
.to_machine_usize(&self.tcx)
|
||||
.expect("ptr-to-int cast for align check should never fail");
|
||||
check_offset_align(bits.try_into().unwrap(), align)?;
|
||||
check_offset_align(addr, align)?;
|
||||
} else {
|
||||
// Check allocation alignment and offset alignment.
|
||||
if alloc_align.bytes() < align.bytes() {
|
||||
throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
|
||||
}
|
||||
check_offset_align(ptr.offset.bytes(), align)?;
|
||||
check_offset_align(offset.bytes(), align)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -464,13 +482,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
}
|
||||
|
||||
/// Test if the pointer might be null.
|
||||
pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool {
|
||||
let (size, _align) = self
|
||||
.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead)
|
||||
.expect("alloc info with MaybeDead cannot fail");
|
||||
// If the pointer is out-of-bounds, it may be null.
|
||||
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
|
||||
ptr.offset > size
|
||||
pub fn ptr_may_be_null(&self, ptr: Pointer<Option<M::PointerTag>>) -> bool {
|
||||
match self.ptr_try_get_alloc(ptr) {
|
||||
Ok((alloc_id, offset, _)) => {
|
||||
let (size, _align) = self
|
||||
.get_size_and_align(alloc_id, AllocCheck::MaybeDead)
|
||||
.expect("alloc info with MaybeDead cannot fail");
|
||||
// If the pointer is out-of-bounds, it may be null.
|
||||
// Note that one-past-the-end (offset == size) is still inbounds, and never null.
|
||||
offset > size
|
||||
}
|
||||
Err(offset) => offset == 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -482,12 +505,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
/// this machine use the same pointer tag, so it is indirected through
|
||||
/// `M::tag_allocation`.
|
||||
fn get_global_alloc(
|
||||
memory_extra: &M::MemoryExtra,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
&self,
|
||||
id: AllocId,
|
||||
is_write: bool,
|
||||
) -> InterpResult<'tcx, Cow<'tcx, Allocation<M::PointerTag, M::AllocExtra>>> {
|
||||
let (alloc, def_id) = match tcx.get_global_alloc(id) {
|
||||
let (alloc, def_id) = match self.tcx.get_global_alloc(id) {
|
||||
Some(GlobalAlloc::Memory(mem)) => {
|
||||
// Memory of a constant or promoted or anonymous memory referenced by a static.
|
||||
(mem, None)
|
||||
|
@ -495,8 +517,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
Some(GlobalAlloc::Function(..)) => throw_ub!(DerefFunctionPointer(id)),
|
||||
None => throw_ub!(PointerUseAfterFree(id)),
|
||||
Some(GlobalAlloc::Static(def_id)) => {
|
||||
assert!(tcx.is_static(def_id));
|
||||
assert!(!tcx.is_thread_local_static(def_id));
|
||||
assert!(self.tcx.is_static(def_id));
|
||||
assert!(!self.tcx.is_thread_local_static(def_id));
|
||||
// Notice that every static has two `AllocId` that will resolve to the same
|
||||
// thing here: one maps to `GlobalAlloc::Static`, this is the "lazy" ID,
|
||||
// and the other one is maps to `GlobalAlloc::Memory`, this is returned by
|
||||
|
@ -507,24 +529,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// The `GlobalAlloc::Memory` branch here is still reachable though; when a static
|
||||
// contains a reference to memory that was created during its evaluation (i.e., not
|
||||
// to another static), those inner references only exist in "resolved" form.
|
||||
if tcx.is_foreign_item(def_id) {
|
||||
if self.tcx.is_foreign_item(def_id) {
|
||||
throw_unsup!(ReadExternStatic(def_id));
|
||||
}
|
||||
|
||||
(tcx.eval_static_initializer(def_id)?, Some(def_id))
|
||||
(self.tcx.eval_static_initializer(def_id)?, Some(def_id))
|
||||
}
|
||||
};
|
||||
M::before_access_global(memory_extra, id, alloc, def_id, is_write)?;
|
||||
M::before_access_global(&self.extra, id, alloc, def_id, is_write)?;
|
||||
let alloc = Cow::Borrowed(alloc);
|
||||
// We got tcx memory. Let the machine initialize its "extra" stuff.
|
||||
let (alloc, tag) = M::init_allocation_extra(
|
||||
memory_extra,
|
||||
let alloc = M::init_allocation_extra(
|
||||
self,
|
||||
id, // always use the ID we got as input, not the "hidden" one.
|
||||
alloc,
|
||||
M::GLOBAL_KIND.map(MemoryKind::Machine),
|
||||
);
|
||||
// Sanity check that this is the same pointer we would have gotten via `global_base_pointer`.
|
||||
debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id));
|
||||
Ok(alloc)
|
||||
}
|
||||
|
||||
|
@ -539,8 +559,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// `get_global_alloc` that we can actually use directly without inserting anything anywhere.
|
||||
// So the error type is `InterpResult<'tcx, &Allocation<M::PointerTag>>`.
|
||||
let a = self.alloc_map.get_or(id, || {
|
||||
let alloc = Self::get_global_alloc(&self.extra, self.tcx, id, /*is_write*/ false)
|
||||
.map_err(Err)?;
|
||||
let alloc = self.get_global_alloc(id, /*is_write*/ false).map_err(Err)?;
|
||||
match alloc {
|
||||
Cow::Borrowed(alloc) => {
|
||||
// We got a ref, cheaply return that as an "error" so that the
|
||||
|
@ -567,30 +586,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
/// "Safe" (bounds and align-checked) allocation access.
|
||||
pub fn get<'a>(
|
||||
&'a self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
|
||||
let align = M::enforce_alignment(&self.extra).then_some(align);
|
||||
let ptr_and_alloc = self.check_and_deref_ptr(
|
||||
sptr,
|
||||
ptr,
|
||||
size,
|
||||
align,
|
||||
CheckInAllocMsg::MemoryAccessTest,
|
||||
|ptr| {
|
||||
let alloc = self.get_raw(ptr.alloc_id)?;
|
||||
Ok((alloc.size(), alloc.align, (ptr, alloc)))
|
||||
|alloc_id, offset, ptr| {
|
||||
let alloc = self.get_raw(alloc_id)?;
|
||||
Ok((alloc.size(), alloc.align, (alloc_id, offset, ptr, alloc)))
|
||||
},
|
||||
)?;
|
||||
if let Some((ptr, alloc)) = ptr_and_alloc {
|
||||
M::memory_read(&self.extra, &alloc.extra, ptr, size)?;
|
||||
let range = alloc_range(ptr.offset, size);
|
||||
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id: ptr.alloc_id }))
|
||||
if let Some((alloc_id, offset, ptr, alloc)) = ptr_and_alloc {
|
||||
let range = alloc_range(offset, size);
|
||||
M::memory_read(&self.extra, &alloc.extra, ptr.provenance, range)?;
|
||||
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id }))
|
||||
} else {
|
||||
// Even in this branch we have to be sure that we actually access the allocation, in
|
||||
// order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
|
||||
// magically pulling *any* ZST value from the ether. However, the `get_raw` above is
|
||||
// always called when `sptr` is truly a `Pointer`, so we are good.
|
||||
// always called when `ptr` has an `AllocId`.
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
@ -610,48 +629,44 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
id: AllocId,
|
||||
) -> InterpResult<'tcx, (&mut Allocation<M::PointerTag, M::AllocExtra>, &mut M::MemoryExtra)>
|
||||
{
|
||||
let tcx = self.tcx;
|
||||
let memory_extra = &mut self.extra;
|
||||
let a = self.alloc_map.get_mut_or(id, || {
|
||||
// Need to make a copy, even if `get_global_alloc` is able
|
||||
// to give us a cheap reference.
|
||||
let alloc = Self::get_global_alloc(memory_extra, tcx, id, /*is_write*/ true)?;
|
||||
// We have "NLL problem case #3" here, which cannot be worked around without loss of
|
||||
// efficiency even for the common case where the key is in the map.
|
||||
// <https://rust-lang.github.io/rfcs/2094-nll.html#problem-case-3-conditional-control-flow-across-functions>
|
||||
// (Cannot use `get_mut_or` since `get_global_alloc` needs `&self`.)
|
||||
if self.alloc_map.get_mut(id).is_none() {
|
||||
// Slow path.
|
||||
// Allocation not found locally, go look global.
|
||||
let alloc = self.get_global_alloc(id, /*is_write*/ true)?;
|
||||
let kind = M::GLOBAL_KIND.expect(
|
||||
"I got a global allocation that I have to copy but the machine does \
|
||||
not expect that to happen",
|
||||
);
|
||||
Ok((MemoryKind::Machine(kind), alloc.into_owned()))
|
||||
});
|
||||
// Unpack the error type manually because type inference doesn't
|
||||
// work otherwise (and we cannot help it because `impl Trait`)
|
||||
match a {
|
||||
Err(e) => Err(e),
|
||||
Ok(a) => {
|
||||
let a = &mut a.1;
|
||||
if a.mutability == Mutability::Not {
|
||||
throw_ub!(WriteToReadOnly(id))
|
||||
}
|
||||
Ok((a, memory_extra))
|
||||
}
|
||||
self.alloc_map.insert(id, (MemoryKind::Machine(kind), alloc.into_owned()));
|
||||
}
|
||||
|
||||
let (_kind, alloc) = self.alloc_map.get_mut(id).unwrap();
|
||||
if alloc.mutability == Mutability::Not {
|
||||
throw_ub!(WriteToReadOnly(id))
|
||||
}
|
||||
Ok((alloc, &mut self.extra))
|
||||
}
|
||||
|
||||
/// "Safe" (bounds and align-checked) allocation access.
|
||||
pub fn get_mut<'a>(
|
||||
&'a mut self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
align: Align,
|
||||
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
|
||||
let ptr = self.check_ptr_access(sptr, size, align)?;
|
||||
if let Some(ptr) = ptr {
|
||||
let parts = self.get_ptr_access(ptr, size, align)?;
|
||||
if let Some((alloc_id, offset, ptr)) = parts {
|
||||
let tcx = self.tcx;
|
||||
// FIXME: can we somehow avoid looking up the allocation twice here?
|
||||
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
|
||||
let (alloc, extra) = self.get_raw_mut(ptr.alloc_id)?;
|
||||
M::memory_written(extra, &mut alloc.extra, ptr, size)?;
|
||||
let range = alloc_range(ptr.offset, size);
|
||||
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id: ptr.alloc_id }))
|
||||
let (alloc, extra) = self.get_raw_mut(alloc_id)?;
|
||||
let range = alloc_range(offset, size);
|
||||
M::memory_written(extra, &mut alloc.extra, ptr.provenance, range)?;
|
||||
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
|
@ -728,7 +743,6 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
}
|
||||
|
||||
fn get_fn_alloc(&self, id: AllocId) -> Option<FnVal<'tcx, M::ExtraFnVal>> {
|
||||
trace!("reading fn ptr: {}", id);
|
||||
if let Some(extra) = self.extra_fn_ptr_map.get(&id) {
|
||||
Some(FnVal::Other(*extra))
|
||||
} else {
|
||||
|
@ -741,14 +755,15 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
|
||||
pub fn get_fn(
|
||||
&self,
|
||||
ptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
||||
let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value.
|
||||
if ptr.offset.bytes() != 0 {
|
||||
throw_ub!(InvalidFunctionPointer(ptr.erase_tag()))
|
||||
trace!("get_fn({:?})", ptr);
|
||||
let (alloc_id, offset, _ptr) = self.ptr_get_alloc(ptr)?;
|
||||
if offset.bytes() != 0 {
|
||||
throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
|
||||
}
|
||||
self.get_fn_alloc(ptr.alloc_id)
|
||||
.ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into())
|
||||
self.get_fn_alloc(alloc_id)
|
||||
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
|
||||
}
|
||||
|
||||
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
|
||||
|
@ -787,7 +802,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
if reachable.insert(id) {
|
||||
// This is a new allocation, add its relocations to `todo`.
|
||||
if let Some((_, alloc)) = self.alloc_map.get(id) {
|
||||
todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id));
|
||||
todo.extend(alloc.relocations().values().map(|tag| tag.get_alloc_id()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -821,14 +836,14 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
|
|||
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
|
||||
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
// Cannot be a closure because it is generic in `Tag`, `Extra`.
|
||||
fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>(
|
||||
fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
|
||||
fmt: &mut std::fmt::Formatter<'_>,
|
||||
tcx: TyCtxt<'tcx>,
|
||||
allocs_to_print: &mut VecDeque<AllocId>,
|
||||
alloc: &Allocation<Tag, Extra>,
|
||||
) -> std::fmt::Result {
|
||||
for &(_, target_id) in alloc.relocations().values() {
|
||||
allocs_to_print.push_back(target_id);
|
||||
for alloc_id in alloc.relocations().values().map(|tag| tag.get_alloc_id()) {
|
||||
allocs_to_print.push_back(alloc_id);
|
||||
}
|
||||
write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
|
||||
}
|
||||
|
@ -931,8 +946,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
/// Reads the given number of bytes from memory. Returns them as a slice.
|
||||
///
|
||||
/// Performs appropriate bounds checks.
|
||||
pub fn read_bytes(&self, sptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> {
|
||||
let alloc_ref = match self.get(sptr, size, Align::ONE)? {
|
||||
pub fn read_bytes(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
size: Size,
|
||||
) -> InterpResult<'tcx, &[u8]> {
|
||||
let alloc_ref = match self.get(ptr, size, Align::ONE)? {
|
||||
Some(a) => a,
|
||||
None => return Ok(&[]), // zero-sized access
|
||||
};
|
||||
|
@ -949,7 +968,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
/// Performs appropriate bounds checks.
|
||||
pub fn write_bytes(
|
||||
&mut self,
|
||||
sptr: Scalar<M::PointerTag>,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
src: impl IntoIterator<Item = u8>,
|
||||
) -> InterpResult<'tcx> {
|
||||
let mut src = src.into_iter();
|
||||
|
@ -958,7 +977,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
assert_eq!(lower, len, "can only write iterators with a precise length");
|
||||
|
||||
let size = Size::from_bytes(len);
|
||||
let alloc_ref = match self.get_mut(sptr, size, Align::ONE)? {
|
||||
let alloc_ref = match self.get_mut(ptr, size, Align::ONE)? {
|
||||
Some(alloc_ref) => alloc_ref,
|
||||
None => {
|
||||
// zero-sized access
|
||||
|
@ -985,9 +1004,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
|
||||
pub fn copy(
|
||||
&mut self,
|
||||
src: Scalar<M::PointerTag>,
|
||||
src: Pointer<Option<M::PointerTag>>,
|
||||
src_align: Align,
|
||||
dest: Scalar<M::PointerTag>,
|
||||
dest: Pointer<Option<M::PointerTag>>,
|
||||
dest_align: Align,
|
||||
size: Size,
|
||||
nonoverlapping: bool,
|
||||
|
@ -997,9 +1016,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
|
||||
pub fn copy_repeatedly(
|
||||
&mut self,
|
||||
src: Scalar<M::PointerTag>,
|
||||
src: Pointer<Option<M::PointerTag>>,
|
||||
src_align: Align,
|
||||
dest: Scalar<M::PointerTag>,
|
||||
dest: Pointer<Option<M::PointerTag>>,
|
||||
dest_align: Align,
|
||||
size: Size,
|
||||
num_copies: u64,
|
||||
|
@ -1007,22 +1026,23 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
) -> InterpResult<'tcx> {
|
||||
let tcx = self.tcx;
|
||||
// We need to do our own bounds-checks.
|
||||
let src = self.check_ptr_access(src, size, src_align)?;
|
||||
let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
|
||||
let src_parts = self.get_ptr_access(src, size, src_align)?;
|
||||
let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
|
||||
|
||||
// FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
|
||||
// and once below to get the underlying `&[mut] Allocation`.
|
||||
|
||||
// Source alloc preparations and access hooks.
|
||||
let src = match src {
|
||||
let (src_alloc_id, src_offset, src) = match src_parts {
|
||||
None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
|
||||
Some(src_ptr) => src_ptr,
|
||||
};
|
||||
let src_alloc = self.get_raw(src.alloc_id)?;
|
||||
M::memory_read(&self.extra, &src_alloc.extra, src, size)?;
|
||||
let src_alloc = self.get_raw(src_alloc_id)?;
|
||||
let src_range = alloc_range(src_offset, size);
|
||||
M::memory_read(&self.extra, &src_alloc.extra, src.provenance, src_range)?;
|
||||
// We need the `dest` ptr for the next operation, so we get it now.
|
||||
// We already did the source checks and called the hooks so we are good to return early.
|
||||
let dest = match dest {
|
||||
let (dest_alloc_id, dest_offset, dest) = match dest_parts {
|
||||
None => return Ok(()), // Zero-sized *destiantion*.
|
||||
Some(dest_ptr) => dest_ptr,
|
||||
};
|
||||
|
@ -1032,26 +1052,21 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// since we don't want to keep any relocations at the target.
|
||||
// (`get_bytes_with_uninit_and_ptr` below checks that there are no
|
||||
// relocations overlapping the edges; those would not be handled correctly).
|
||||
let relocations = src_alloc.prepare_relocation_copy(
|
||||
self,
|
||||
alloc_range(src.offset, size),
|
||||
dest.offset,
|
||||
num_copies,
|
||||
);
|
||||
let relocations =
|
||||
src_alloc.prepare_relocation_copy(self, src_range, dest_offset, num_copies);
|
||||
// Prepare a copy of the initialization mask.
|
||||
let compressed = src_alloc.compress_uninit_range(alloc_range(src.offset, size));
|
||||
let compressed = src_alloc.compress_uninit_range(src_range);
|
||||
// This checks relocation edges on the src.
|
||||
let src_bytes = src_alloc
|
||||
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size))
|
||||
.map_err(|e| e.to_interp_error(src.alloc_id))?
|
||||
.get_bytes_with_uninit_and_ptr(&tcx, src_range)
|
||||
.map_err(|e| e.to_interp_error(src_alloc_id))?
|
||||
.as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
|
||||
|
||||
// Destination alloc preparations and access hooks.
|
||||
let (dest_alloc, extra) = self.get_raw_mut(dest.alloc_id)?;
|
||||
M::memory_written(extra, &mut dest_alloc.extra, dest, size * num_copies)?;
|
||||
let dest_bytes = dest_alloc
|
||||
.get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies))
|
||||
.as_mut_ptr();
|
||||
let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
|
||||
let dest_range = alloc_range(dest_offset, size * num_copies);
|
||||
M::memory_written(extra, &mut dest_alloc.extra, dest.provenance, dest_range)?;
|
||||
let dest_bytes = dest_alloc.get_bytes_mut_ptr(&tcx, dest_range).as_mut_ptr();
|
||||
|
||||
if compressed.no_bytes_init() {
|
||||
// Fast path: If all bytes are `uninit` then there is nothing to copy. The target range
|
||||
|
@ -1060,7 +1075,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// This also avoids writing to the target bytes so that the backing allocation is never
|
||||
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
|
||||
// operating system this can avoid physically allocating the page.
|
||||
dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication
|
||||
dest_alloc.mark_init(dest_range, false); // `Size` multiplication
|
||||
dest_alloc.mark_relocation_range(relocations);
|
||||
return Ok(());
|
||||
}
|
||||
|
@ -1071,11 +1086,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// The pointers above remain valid even if the `HashMap` table is moved around because they
|
||||
// point into the `Vec` storing the bytes.
|
||||
unsafe {
|
||||
if src.alloc_id == dest.alloc_id {
|
||||
if src_alloc_id == dest_alloc_id {
|
||||
if nonoverlapping {
|
||||
// `Size` additions
|
||||
if (src.offset <= dest.offset && src.offset + size > dest.offset)
|
||||
|| (dest.offset <= src.offset && dest.offset + size > src.offset)
|
||||
if (src_offset <= dest_offset && src_offset + size > dest_offset)
|
||||
|| (dest_offset <= src_offset && dest_offset + size > src_offset)
|
||||
{
|
||||
throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
|
||||
}
|
||||
|
@ -1102,7 +1117,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
// now fill in all the "init" data
|
||||
dest_alloc.mark_compressed_init_range(
|
||||
&compressed,
|
||||
alloc_range(dest.offset, size),
|
||||
alloc_range(dest_offset, size), // just a single copy (i.e., not full `dest_range`)
|
||||
num_copies,
|
||||
);
|
||||
// copy the relocations to the destination
|
||||
|
@ -1114,24 +1129,41 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
|||
|
||||
/// Machine pointer introspection.
|
||||
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
|
||||
pub fn force_ptr(
|
||||
&self,
|
||||
scalar: Scalar<M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
|
||||
match scalar {
|
||||
Scalar::Ptr(ptr) => Ok(ptr),
|
||||
_ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?),
|
||||
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
|
||||
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
|
||||
// call to force getting out a pointer.
|
||||
match scalar.to_bits_or_ptr_internal(self.pointer_size()) {
|
||||
Err(ptr) => ptr.into(),
|
||||
Ok(bits) => {
|
||||
let addr = u64::try_from(bits).unwrap();
|
||||
M::ptr_from_addr(&self, addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn force_bits(
|
||||
/// Turning a "maybe pointer" into a proper pointer (and some information
|
||||
/// about where it points), or an absolute address.
|
||||
pub fn ptr_try_get_alloc(
|
||||
&self,
|
||||
scalar: Scalar<M::PointerTag>,
|
||||
size: Size,
|
||||
) -> InterpResult<'tcx, u128> {
|
||||
match scalar.to_bits_or_ptr(size, self) {
|
||||
Ok(bits) => Ok(bits),
|
||||
Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()),
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
) -> Result<(AllocId, Size, Pointer<M::PointerTag>), u64> {
|
||||
match ptr.into_pointer_or_addr() {
|
||||
Ok(ptr) => {
|
||||
let (alloc_id, offset) = M::ptr_get_alloc(self, ptr);
|
||||
Ok((alloc_id, offset, ptr))
|
||||
}
|
||||
Err(addr) => Err(addr.bytes()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Turning a "maybe pointer" into a proper pointer (and some information about where it points).
|
||||
#[inline(always)]
|
||||
pub fn ptr_get_alloc(
|
||||
&self,
|
||||
ptr: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, (AllocId, Size, Pointer<M::PointerTag>)> {
|
||||
self.ptr_try_get_alloc(ptr).map_err(|offset| {
|
||||
err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,8 +15,9 @@ use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
|
|||
use rustc_target::abi::{VariantIdx, Variants};
|
||||
|
||||
use super::{
|
||||
alloc_range, from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx,
|
||||
InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit,
|
||||
alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
|
||||
InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
|
||||
Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
||||
/// An `Immediate` represents a single immediate self-contained Rust value.
|
||||
|
@ -26,8 +27,8 @@ use super::{
|
|||
/// operations and wide pointers. This idea was taken from rustc's codegen.
|
||||
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
|
||||
/// defined on `Immediate`, and do not have to work with a `Place`.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
|
||||
pub enum Immediate<Tag = ()> {
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
|
||||
pub enum Immediate<Tag: Provenance = AllocId> {
|
||||
Scalar(ScalarMaybeUninit<Tag>),
|
||||
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
|
||||
}
|
||||
|
@ -35,34 +36,35 @@ pub enum Immediate<Tag = ()> {
|
|||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(Immediate, 56);
|
||||
|
||||
impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
|
||||
impl<Tag: Provenance> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: ScalarMaybeUninit<Tag>) -> Self {
|
||||
Immediate::Scalar(val)
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
|
||||
impl<Tag: Provenance> From<Scalar<Tag>> for Immediate<Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: Scalar<Tag>) -> Self {
|
||||
Immediate::Scalar(val.into())
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: Pointer<Tag>) -> Self {
|
||||
Immediate::Scalar(Scalar::from(val).into())
|
||||
impl<'tcx, Tag: Provenance> Immediate<Tag> {
|
||||
pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
|
||||
}
|
||||
|
||||
pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag> Immediate<Tag> {
|
||||
pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
|
||||
}
|
||||
|
||||
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
|
||||
Immediate::ScalarPair(val.into(), vtable.into())
|
||||
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
|
||||
Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_pointer(vtable, cx))
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -82,7 +84,7 @@ impl<'tcx, Tag> Immediate<Tag> {
|
|||
// ScalarPair needs a type to interpret, so we often have an immediate and a type together
|
||||
// as input for binary and cast operations.
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct ImmTy<'tcx, Tag = ()> {
|
||||
pub struct ImmTy<'tcx, Tag: Provenance = AllocId> {
|
||||
imm: Immediate<Tag>,
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
@ -90,17 +92,23 @@ pub struct ImmTy<'tcx, Tag = ()> {
|
|||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
|
||||
|
||||
impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
|
||||
impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
/// Helper function for printing a scalar to a FmtPrinter
|
||||
fn p<'a, 'tcx, F: std::fmt::Write, Tag>(
|
||||
fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
|
||||
cx: FmtPrinter<'a, 'tcx, F>,
|
||||
s: ScalarMaybeUninit<Tag>,
|
||||
ty: Ty<'tcx>,
|
||||
) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
|
||||
match s {
|
||||
ScalarMaybeUninit::Scalar(s) => {
|
||||
cx.pretty_print_const_scalar(s.erase_tag(), ty, true)
|
||||
ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
|
||||
cx.pretty_print_const_scalar_int(int, ty, true)
|
||||
}
|
||||
ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
|
||||
// Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
|
||||
// print what is points to, which would fail since it has no access to the local
|
||||
// memory.
|
||||
cx.pretty_print_const_pointer(ptr, ty, true)
|
||||
}
|
||||
ScalarMaybeUninit::Uninit => cx.typed_value(
|
||||
|mut this| {
|
||||
|
@ -120,18 +128,18 @@ impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
|
|||
p(cx, s, ty)?;
|
||||
return Ok(());
|
||||
}
|
||||
write!(f, "{}: {}", s.erase_tag(), self.layout.ty)
|
||||
write!(f, "{}: {}", s, self.layout.ty)
|
||||
}
|
||||
Immediate::ScalarPair(a, b) => {
|
||||
// FIXME(oli-obk): at least print tuples and slices nicely
|
||||
write!(f, "({}, {}): {}", a.erase_tag(), b.erase_tag(), self.layout.ty,)
|
||||
write!(f, "({}, {}): {}", a, b, self.layout.ty,)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for ImmTy<'tcx, Tag> {
|
||||
type Target = Immediate<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Immediate<Tag> {
|
||||
|
@ -142,22 +150,22 @@ impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
|
|||
/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
|
||||
/// or still in memory. The latter is an optimization, to delay reading that chunk of
|
||||
/// memory and to avoid having to store arbitrary-sized data here.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)]
|
||||
pub enum Operand<Tag = ()> {
|
||||
#[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash, Debug)]
|
||||
pub enum Operand<Tag: Provenance = AllocId> {
|
||||
Immediate(Immediate<Tag>),
|
||||
Indirect(MemPlace<Tag>),
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct OpTy<'tcx, Tag = ()> {
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
|
||||
pub struct OpTy<'tcx, Tag: Provenance = AllocId> {
|
||||
op: Operand<Tag>, // Keep this private; it helps enforce invariants.
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
|
||||
rustc_data_structures::static_assert_size!(OpTy<'_>, 80);
|
||||
|
||||
impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for OpTy<'tcx, Tag> {
|
||||
type Target = Operand<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Operand<Tag> {
|
||||
|
@ -165,28 +173,28 @@ impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Copy> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Indirect(*mplace), layout: mplace.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Copy> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> From<&'_ MPlaceTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: &MPlaceTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Indirect(**mplace), layout: mplace.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> From<ImmTy<'tcx, Tag>> for OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(val: ImmTy<'tcx, Tag>) -> Self {
|
||||
OpTy { op: Operand::Immediate(val.imm), layout: val.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> ImmTy<'tcx, Tag> {
|
||||
#[inline]
|
||||
pub fn from_scalar(val: Scalar<Tag>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
ImmTy { imm: val.into(), layout }
|
||||
|
@ -225,19 +233,6 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
|
|||
}
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
/// Normalize `place.ptr` to a `Pointer` if this is a place and not a ZST.
|
||||
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
|
||||
#[inline]
|
||||
pub fn force_op_ptr(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
match op.try_as_mplace(self) {
|
||||
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
|
||||
Err(imm) => Ok(imm.into()), // Nothing to cast/force
|
||||
}
|
||||
}
|
||||
|
||||
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
|
||||
/// Returns `None` if the layout does not permit loading this as a value.
|
||||
fn try_read_immediate_from_mplace(
|
||||
|
@ -291,7 +286,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
&self,
|
||||
src: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
|
||||
Ok(match src.try_as_mplace(self) {
|
||||
Ok(match src.try_as_mplace() {
|
||||
Ok(ref mplace) => {
|
||||
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
|
||||
Ok(val)
|
||||
|
@ -324,6 +319,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Ok(self.read_immediate(op)?.to_scalar_or_uninit())
|
||||
}
|
||||
|
||||
/// Read a pointer from a place.
|
||||
pub fn read_pointer(
|
||||
&self,
|
||||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
|
||||
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
|
||||
}
|
||||
|
||||
// Turn the wide MPlace into a string (must already be dereferenced!)
|
||||
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
|
||||
let len = mplace.len(self)?;
|
||||
|
@ -338,7 +341,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
op: &OpTy<'tcx, M::PointerTag>,
|
||||
field: usize,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
let base = match op.try_as_mplace(self) {
|
||||
let base = match op.try_as_mplace() {
|
||||
Ok(ref mplace) => {
|
||||
// We can reuse the mplace field computation logic for indirect operands.
|
||||
let field = self.mplace_field(mplace, field)?;
|
||||
|
@ -381,7 +384,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
self.operand_field(op, index)
|
||||
} else {
|
||||
// Indexing into a big array. This must be an mplace.
|
||||
let mplace = op.assert_mem_place(self);
|
||||
let mplace = op.assert_mem_place();
|
||||
Ok(self.mplace_index(&mplace, index)?.into())
|
||||
}
|
||||
}
|
||||
|
@ -392,7 +395,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
variant: VariantIdx,
|
||||
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
|
||||
// Downcasts only change the layout
|
||||
Ok(match op.try_as_mplace(self) {
|
||||
Ok(match op.try_as_mplace() {
|
||||
Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
|
||||
Err(..) => {
|
||||
let layout = op.layout.for_variant(self, variant);
|
||||
|
@ -414,7 +417,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Subslice { .. } | ConstantIndex { .. } | Index(_) => {
|
||||
// The rest should only occur as mplace, we do not use Immediates for types
|
||||
// allowing such operations. This matches place_projection forcing an allocation.
|
||||
let mplace = base.assert_mem_place(self);
|
||||
let mplace = base.assert_mem_place();
|
||||
self.mplace_projection(&mplace, proj_elem)?.into()
|
||||
}
|
||||
})
|
||||
|
@ -569,7 +572,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// Other cases need layout.
|
||||
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
|
||||
Ok(match scalar {
|
||||
Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
|
||||
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
|
||||
Scalar::Int(int) => Scalar::Int(int),
|
||||
})
|
||||
};
|
||||
|
@ -580,9 +583,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// We rely on mutability being set correctly in that allocation to prevent writes
|
||||
// where none should happen.
|
||||
let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
|
||||
Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi))
|
||||
Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
|
||||
}
|
||||
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()),
|
||||
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x.into())?.into()),
|
||||
ConstValue::Slice { data, start, end } => {
|
||||
// We rely on mutability being set correctly in `data` to prevent writes
|
||||
// where none should happen.
|
||||
|
@ -591,7 +594,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
Size::from_bytes(start), // offset: `start`
|
||||
);
|
||||
Operand::Immediate(Immediate::new_slice(
|
||||
self.global_base_pointer(ptr)?.into(),
|
||||
Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
|
||||
u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
|
||||
self,
|
||||
))
|
||||
|
@ -658,9 +661,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// Figure out which discriminant and variant this corresponds to.
|
||||
Ok(match *tag_encoding {
|
||||
TagEncoding::Direct => {
|
||||
let tag_bits = self
|
||||
.force_bits(tag_val, tag_layout.size)
|
||||
.map_err(|_| err_ub!(InvalidTag(tag_val.erase_tag())))?;
|
||||
let tag_bits = tag_val
|
||||
.try_to_int()
|
||||
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
|
||||
.assert_bits(tag_layout.size);
|
||||
// Cast bits from tag layout to discriminant layout.
|
||||
let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
|
||||
let discr_bits = discr_val.assert_bits(discr_layout.size);
|
||||
|
@ -677,7 +681,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
|
||||
}
|
||||
.ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_tag())))?;
|
||||
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
|
||||
// Return the cast value, and the index.
|
||||
(discr_val, index.0)
|
||||
}
|
||||
|
@ -686,18 +690,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// discriminant (encoded in niche/tag) and variant index are the same.
|
||||
let variants_start = niche_variants.start().as_u32();
|
||||
let variants_end = niche_variants.end().as_u32();
|
||||
let variant = match tag_val.to_bits_or_ptr(tag_layout.size, self) {
|
||||
Err(ptr) => {
|
||||
// The niche must be just 0 (which an inbounds pointer value never is)
|
||||
let variant = match tag_val.try_to_int() {
|
||||
Err(dbg_val) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
let ptr = self.scalar_to_ptr(tag_val);
|
||||
// The niche must be just 0, and the ptr not null, then we know this is
|
||||
// okay. Everything else, we conservatively reject.
|
||||
let ptr_valid = niche_start == 0
|
||||
&& variants_start == variants_end
|
||||
&& !self.memory.ptr_may_be_null(ptr);
|
||||
if !ptr_valid {
|
||||
throw_ub!(InvalidTag(tag_val.erase_tag()))
|
||||
throw_ub!(InvalidTag(dbg_val))
|
||||
}
|
||||
dataful_variant
|
||||
}
|
||||
Ok(tag_bits) => {
|
||||
let tag_bits = tag_bits.assert_bits(tag_layout.size);
|
||||
// We need to use machine arithmetic to get the relative variant idx:
|
||||
// variant_index_relative = tag_val - niche_start_val
|
||||
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);
|
||||
|
|
|
@ -318,8 +318,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
right.layout.ty
|
||||
);
|
||||
|
||||
let l = self.force_bits(left.to_scalar()?, left.layout.size)?;
|
||||
let r = self.force_bits(right.to_scalar()?, right.layout.size)?;
|
||||
let l = left.to_scalar()?.to_bits(left.layout.size)?;
|
||||
let r = right.to_scalar()?.to_bits(right.layout.size)?;
|
||||
self.binary_int_op(bin_op, l, left.layout, r, right.layout)
|
||||
}
|
||||
_ if left.layout.ty.is_any_ptr() => {
|
||||
|
@ -386,7 +386,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
_ => {
|
||||
assert!(layout.ty.is_integral());
|
||||
let val = self.force_bits(val, layout.size)?;
|
||||
let val = val.to_bits(layout.size)?;
|
||||
let (res, overflow) = match un_op {
|
||||
Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
|
||||
Neg => {
|
||||
|
|
|
@ -3,7 +3,6 @@
|
|||
//! All high-level functions to write to memory work on places as destinations.
|
||||
|
||||
use std::convert::TryFrom;
|
||||
use std::fmt::Debug;
|
||||
use std::hash::Hash;
|
||||
|
||||
use rustc_ast::Mutability;
|
||||
|
@ -15,14 +14,14 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
|
|||
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
|
||||
|
||||
use super::{
|
||||
alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, ConstAlloc, ImmTy, Immediate,
|
||||
InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer,
|
||||
PointerArithmetic, Scalar, ScalarMaybeUninit,
|
||||
alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
|
||||
ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
|
||||
Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
|
||||
};
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
|
||||
/// Information required for the sound usage of a `MemPlace`.
|
||||
pub enum MemPlaceMeta<Tag = ()> {
|
||||
pub enum MemPlaceMeta<Tag: Provenance = AllocId> {
|
||||
/// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
|
||||
Meta(Scalar<Tag>),
|
||||
/// `Sized` types or unsized `extern type`
|
||||
|
@ -37,7 +36,7 @@ pub enum MemPlaceMeta<Tag = ()> {
|
|||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
|
||||
|
||||
impl<Tag> MemPlaceMeta<Tag> {
|
||||
impl<Tag: Provenance> MemPlaceMeta<Tag> {
|
||||
pub fn unwrap_meta(self) -> Scalar<Tag> {
|
||||
match self {
|
||||
Self::Meta(s) => s,
|
||||
|
@ -52,22 +51,12 @@ impl<Tag> MemPlaceMeta<Tag> {
|
|||
Self::None | Self::Poison => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn erase_tag(self) -> MemPlaceMeta<()> {
|
||||
match self {
|
||||
Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()),
|
||||
Self::None => MemPlaceMeta::None,
|
||||
Self::Poison => MemPlaceMeta::Poison,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
|
||||
pub struct MemPlace<Tag = ()> {
|
||||
/// A place may have an integral pointer for ZSTs, and since it might
|
||||
/// be turned back into a reference before ever being dereferenced.
|
||||
/// However, it may never be uninit.
|
||||
pub ptr: Scalar<Tag>,
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
|
||||
pub struct MemPlace<Tag: Provenance = AllocId> {
|
||||
/// The pointer can be a pure integer, with the `None` tag.
|
||||
pub ptr: Pointer<Option<Tag>>,
|
||||
pub align: Align,
|
||||
/// Metadata for unsized places. Interpretation is up to the type.
|
||||
/// Must not be present for sized types, but can be missing for unsized types
|
||||
|
@ -76,10 +65,10 @@ pub struct MemPlace<Tag = ()> {
|
|||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(MemPlace, 56);
|
||||
rustc_data_structures::static_assert_size!(MemPlace, 48);
|
||||
|
||||
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)]
|
||||
pub enum Place<Tag = ()> {
|
||||
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable, Debug)]
|
||||
pub enum Place<Tag: Provenance = AllocId> {
|
||||
/// A place referring to a value allocated in the `Memory` system.
|
||||
Ptr(MemPlace<Tag>),
|
||||
|
||||
|
@ -89,18 +78,18 @@ pub enum Place<Tag = ()> {
|
|||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(Place, 64);
|
||||
rustc_data_structures::static_assert_size!(Place, 56);
|
||||
|
||||
#[derive(Copy, Clone, Debug)]
|
||||
pub struct PlaceTy<'tcx, Tag = ()> {
|
||||
pub struct PlaceTy<'tcx, Tag: Provenance = AllocId> {
|
||||
place: Place<Tag>, // Keep this private; it helps enforce invariants.
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80);
|
||||
rustc_data_structures::static_assert_size!(PlaceTy<'_>, 72);
|
||||
|
||||
impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for PlaceTy<'tcx, Tag> {
|
||||
type Target = Place<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &Place<Tag> {
|
||||
|
@ -109,16 +98,16 @@ impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
|
|||
}
|
||||
|
||||
/// A MemPlace with its layout. Constructing it is only possible in this module.
|
||||
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)]
|
||||
pub struct MPlaceTy<'tcx, Tag = ()> {
|
||||
#[derive(Copy, Clone, Hash, Eq, PartialEq, Debug)]
|
||||
pub struct MPlaceTy<'tcx, Tag: Provenance = AllocId> {
|
||||
mplace: MemPlace<Tag>,
|
||||
pub layout: TyAndLayout<'tcx>,
|
||||
}
|
||||
|
||||
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
|
||||
rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72);
|
||||
rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 64);
|
||||
|
||||
impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> std::ops::Deref for MPlaceTy<'tcx, Tag> {
|
||||
type Target = MemPlace<Tag>;
|
||||
#[inline(always)]
|
||||
fn deref(&self) -> &MemPlace<Tag> {
|
||||
|
@ -126,42 +115,33 @@ impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
fn from(mplace: MPlaceTy<'tcx, Tag>) -> Self {
|
||||
PlaceTy { place: Place::Ptr(mplace.mplace), layout: mplace.layout }
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tag> MemPlace<Tag> {
|
||||
/// Replace ptr tag, maintain vtable tag (if any)
|
||||
#[inline]
|
||||
pub fn replace_tag(self, new_tag: Tag) -> Self {
|
||||
MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn erase_tag(self) -> MemPlace {
|
||||
MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() }
|
||||
}
|
||||
|
||||
impl<Tag: Provenance> MemPlace<Tag> {
|
||||
#[inline(always)]
|
||||
fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
|
||||
pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
|
||||
MemPlace { ptr, align, meta: MemPlaceMeta::None }
|
||||
}
|
||||
|
||||
#[inline(always)]
|
||||
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
|
||||
Self::from_scalar_ptr(ptr.into(), align)
|
||||
/// Adjust the provenance of the main pointer (metadata is unaffected).
|
||||
pub fn map_provenance(self, f: impl FnOnce(Option<Tag>) -> Option<Tag>) -> Self {
|
||||
MemPlace { ptr: self.ptr.map_provenance(f), ..self }
|
||||
}
|
||||
|
||||
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
|
||||
/// This is the inverse of `ref_to_mplace`.
|
||||
#[inline(always)]
|
||||
pub fn to_ref(self) -> Immediate<Tag> {
|
||||
pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
|
||||
match self.meta {
|
||||
MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()),
|
||||
MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()),
|
||||
MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
|
||||
MemPlaceMeta::Meta(meta) => {
|
||||
Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
|
||||
}
|
||||
MemPlaceMeta::Poison => bug!(
|
||||
"MPlaceTy::dangling may never be used to produce a \
|
||||
place that will have the address of its pointee taken"
|
||||
|
@ -177,29 +157,23 @@ impl<Tag> MemPlace<Tag> {
|
|||
cx: &impl HasDataLayout,
|
||||
) -> InterpResult<'tcx, Self> {
|
||||
Ok(MemPlace {
|
||||
ptr: self.ptr.ptr_offset(offset, cx)?,
|
||||
ptr: self.ptr.offset(offset, cx)?,
|
||||
align: self.align.restrict_for_offset(offset),
|
||||
meta,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> MPlaceTy<'tcx, Tag> {
|
||||
/// Produces a MemPlace that works for ZST but nothing else
|
||||
#[inline]
|
||||
pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
|
||||
pub fn dangling(layout: TyAndLayout<'tcx>) -> Self {
|
||||
let align = layout.align.abi;
|
||||
let ptr = Scalar::from_machine_usize(align.bytes(), cx);
|
||||
let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address
|
||||
// `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
|
||||
MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
|
||||
}
|
||||
|
||||
/// Replace ptr tag, maintain vtable tag (if any)
|
||||
#[inline]
|
||||
pub fn replace_tag(&self, new_tag: Tag) -> Self {
|
||||
MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub fn offset(
|
||||
&self,
|
||||
|
@ -212,7 +186,7 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
|
|||
}
|
||||
|
||||
#[inline]
|
||||
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
pub fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
|
||||
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
|
||||
}
|
||||
|
||||
|
@ -244,19 +218,14 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
|
|||
}
|
||||
|
||||
// These are defined here because they produce a place.
|
||||
impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> OpTy<'tcx, Tag> {
|
||||
#[inline(always)]
|
||||
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
|
||||
/// read from the resulting mplace, not to get its address back.
|
||||
pub fn try_as_mplace(
|
||||
&self,
|
||||
cx: &impl HasDataLayout,
|
||||
) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
|
||||
pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
|
||||
match **self {
|
||||
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
|
||||
Operand::Immediate(_) if self.layout.is_zst() => {
|
||||
Ok(MPlaceTy::dangling(self.layout, cx))
|
||||
}
|
||||
Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)),
|
||||
Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
|
||||
}
|
||||
}
|
||||
|
@ -264,12 +233,12 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
|
|||
#[inline(always)]
|
||||
/// Note: do not call `as_ref` on the resulting place. This function should only be used to
|
||||
/// read from the resulting mplace, not to get its address back.
|
||||
pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> {
|
||||
self.try_as_mplace(cx).unwrap()
|
||||
pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag> {
|
||||
self.try_as_mplace().unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
impl<Tag: Debug> Place<Tag> {
|
||||
impl<Tag: Provenance> Place<Tag> {
|
||||
#[inline]
|
||||
pub fn assert_mem_place(self) -> MemPlace<Tag> {
|
||||
match self {
|
||||
|
@ -279,7 +248,7 @@ impl<Tag: Debug> Place<Tag> {
|
|||
}
|
||||
}
|
||||
|
||||
impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
|
||||
impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
|
||||
#[inline]
|
||||
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
|
||||
MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
|
||||
|
@ -290,7 +259,7 @@ impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
|
|||
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
|
||||
where
|
||||
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385
|
||||
Tag: Debug + Copy + Eq + Hash + 'static,
|
||||
Tag: Provenance + Eq + Hash + 'static,
|
||||
M: Machine<'mir, 'tcx, PointerTag = Tag>,
|
||||
{
|
||||
/// Take a value, which represents a (thin or wide) reference, and make it a place.
|
||||
|
@ -307,14 +276,12 @@ where
|
|||
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
|
||||
let layout = self.layout_of(pointee_type)?;
|
||||
let (ptr, meta) = match **val {
|
||||
Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None),
|
||||
Immediate::ScalarPair(ptr, meta) => {
|
||||
(ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?))
|
||||
}
|
||||
Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
|
||||
Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
|
||||
};
|
||||
|
||||
let mplace = MemPlace {
|
||||
ptr,
|
||||
ptr: self.scalar_to_ptr(ptr.check_init()?),
|
||||
// We could use the run-time alignment here. For now, we do not, because
|
||||
// the point of tracking the alignment here is to make sure that the *static*
|
||||
// alignment information emitted with the loads is correct. The run-time
|
||||
|
@ -333,8 +300,9 @@ where
|
|||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
let val = self.read_immediate(src)?;
|
||||
trace!("deref to {} on {:?}", val.layout.ty, *val);
|
||||
let place = self.ref_to_mplace(&val)?;
|
||||
self.mplace_access_checked(place, None)
|
||||
let mplace = self.ref_to_mplace(&val)?;
|
||||
self.check_mplace_access(mplace, CheckInAllocMsg::DerefTest)?;
|
||||
Ok(mplace)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
|
@ -359,38 +327,19 @@ where
|
|||
self.memory.get_mut(place.ptr, size, place.align)
|
||||
}
|
||||
|
||||
/// Return the "access-checked" version of this `MPlace`, where for non-ZST
|
||||
/// this is definitely a `Pointer`.
|
||||
///
|
||||
/// `force_align` must only be used when correct alignment does not matter,
|
||||
/// like in Stacked Borrows.
|
||||
pub fn mplace_access_checked(
|
||||
/// Check if this mplace is dereferencable and sufficiently aligned.
|
||||
fn check_mplace_access(
|
||||
&self,
|
||||
mut place: MPlaceTy<'tcx, M::PointerTag>,
|
||||
force_align: Option<Align>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
mplace: MPlaceTy<'tcx, M::PointerTag>,
|
||||
msg: CheckInAllocMsg,
|
||||
) -> InterpResult<'tcx> {
|
||||
let (size, align) = self
|
||||
.size_and_align_of_mplace(&place)?
|
||||
.unwrap_or((place.layout.size, place.layout.align.abi));
|
||||
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?");
|
||||
let align = force_align.unwrap_or(align);
|
||||
// Record new (stricter, unless forced) alignment requirement in place.
|
||||
place.mplace.align = align;
|
||||
// When dereferencing a pointer, it must be non-null, aligned, and live.
|
||||
if let Some(ptr) = self.memory.check_ptr_access(place.ptr, size, align)? {
|
||||
place.mplace.ptr = ptr.into();
|
||||
}
|
||||
Ok(place)
|
||||
}
|
||||
|
||||
/// Force `place.ptr` to a `Pointer`.
|
||||
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
|
||||
pub(super) fn force_mplace_ptr(
|
||||
&self,
|
||||
mut place: MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
|
||||
Ok(place)
|
||||
.size_and_align_of_mplace(&mplace)?
|
||||
.unwrap_or((mplace.layout.size, mplace.layout.align.abi));
|
||||
assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?");
|
||||
let align = M::enforce_alignment(&self.memory.extra).then_some(align);
|
||||
self.memory.check_ptr_access_align(mplace.ptr, size, align.unwrap_or(Align::ONE), msg)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
|
||||
|
@ -558,10 +507,7 @@ where
|
|||
let layout = self.layout_of(self.tcx.types.usize)?;
|
||||
let n = self.access_local(self.frame(), local, Some(layout))?;
|
||||
let n = self.read_scalar(&n)?;
|
||||
let n = u64::try_from(
|
||||
self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
|
||||
)
|
||||
.unwrap();
|
||||
let n = n.to_machine_usize(self)?;
|
||||
self.mplace_index(base, n)?
|
||||
}
|
||||
|
||||
|
@ -677,16 +623,6 @@ where
|
|||
Ok(place_ty)
|
||||
}
|
||||
|
||||
/// Write a scalar to a place
|
||||
#[inline(always)]
|
||||
pub fn write_scalar(
|
||||
&mut self,
|
||||
val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.write_immediate(Immediate::Scalar(val.into()), dest)
|
||||
}
|
||||
|
||||
/// Write an immediate to a place
|
||||
#[inline(always)]
|
||||
pub fn write_immediate(
|
||||
|
@ -704,21 +640,24 @@ where
|
|||
Ok(())
|
||||
}
|
||||
|
||||
/// Write an `Immediate` to memory.
|
||||
/// Write a scalar to a place
|
||||
#[inline(always)]
|
||||
pub fn write_immediate_to_mplace(
|
||||
pub fn write_scalar(
|
||||
&mut self,
|
||||
src: Immediate<M::PointerTag>,
|
||||
dest: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
val: impl Into<ScalarMaybeUninit<M::PointerTag>>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.write_immediate_to_mplace_no_validate(src, dest)?;
|
||||
self.write_immediate(Immediate::Scalar(val.into()), dest)
|
||||
}
|
||||
|
||||
if M::enforce_validity(self) {
|
||||
// Data got changed, better make sure it matches the type!
|
||||
self.validate_operand(&dest.into())?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
/// Write a pointer to a place
|
||||
#[inline(always)]
|
||||
pub fn write_pointer(
|
||||
&mut self,
|
||||
ptr: impl Into<Pointer<Option<M::PointerTag>>>,
|
||||
dest: &PlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx> {
|
||||
self.write_scalar(Scalar::from_maybe_pointer(ptr.into(), self), dest)
|
||||
}
|
||||
|
||||
/// Write an immediate to a place.
|
||||
|
@ -733,7 +672,7 @@ where
|
|||
// This is a very common path, avoid some checks in release mode
|
||||
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
|
||||
match src {
|
||||
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(_))) => assert_eq!(
|
||||
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
|
||||
self.pointer_size(),
|
||||
dest.layout.size,
|
||||
"Size mismatch when writing pointer"
|
||||
|
@ -1020,7 +959,7 @@ where
|
|||
kind: MemoryKind<M::MemoryKind>,
|
||||
) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> {
|
||||
let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
|
||||
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
|
||||
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
|
||||
}
|
||||
|
||||
/// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
|
||||
|
@ -1125,7 +1064,7 @@ where
|
|||
let _ = self.tcx.global_alloc(raw.alloc_id);
|
||||
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
|
||||
let layout = self.layout_of(raw.ty)?;
|
||||
Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
|
||||
Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
|
||||
}
|
||||
|
||||
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
|
||||
|
@ -1134,7 +1073,7 @@ where
|
|||
&self,
|
||||
mplace: &MPlaceTy<'tcx, M::PointerTag>,
|
||||
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
|
||||
let vtable = mplace.vtable(); // also sanity checks the type
|
||||
let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
|
||||
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
|
||||
let layout = self.layout_of(ty)?;
|
||||
|
||||
|
|
|
@ -162,9 +162,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
use rustc_middle::mir::Rvalue::*;
|
||||
match *rvalue {
|
||||
ThreadLocalRef(did) => {
|
||||
let id = M::thread_local_static_alloc_id(self, did)?;
|
||||
let val = self.global_base_pointer(id.into())?;
|
||||
self.write_scalar(val, &dest)?;
|
||||
let ptr = M::thread_local_static_base_pointer(self, did)?;
|
||||
self.write_pointer(ptr, &dest)?;
|
||||
}
|
||||
|
||||
Use(ref operand) => {
|
||||
|
@ -240,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// of the first element.
|
||||
let elem_size = first.layout.size;
|
||||
let first_ptr = first.ptr;
|
||||
let rest_ptr = first_ptr.ptr_offset(elem_size, self)?;
|
||||
let rest_ptr = first_ptr.offset(elem_size, self)?;
|
||||
self.memory.copy_repeatedly(
|
||||
first_ptr,
|
||||
first.align,
|
||||
|
@ -264,11 +263,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
AddressOf(_, place) | Ref(_, _, place) => {
|
||||
let src = self.eval_place(place)?;
|
||||
let place = self.force_allocation(&src)?;
|
||||
if place.layout.size.bytes() > 0 {
|
||||
// definitely not a ZST
|
||||
assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
|
||||
}
|
||||
self.write_immediate(place.to_ref(), &dest)?;
|
||||
self.write_immediate(place.to_ref(self), &dest)?;
|
||||
}
|
||||
|
||||
NullaryOp(mir::NullOp::Box, _) => {
|
||||
|
|
|
@ -12,8 +12,8 @@ use rustc_target::abi::{self, LayoutOf as _};
|
|||
use rustc_target::spec::abi::Abi;
|
||||
|
||||
use super::{
|
||||
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, StackPopCleanup,
|
||||
StackPopUnwind,
|
||||
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
|
||||
StackPopCleanup, StackPopUnwind,
|
||||
};
|
||||
|
||||
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
||||
|
@ -72,8 +72,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() {
|
||||
ty::FnPtr(sig) => {
|
||||
let caller_abi = sig.abi();
|
||||
let fn_ptr = self.read_scalar(&func)?.check_init()?;
|
||||
let fn_val = self.memory.get_fn(fn_ptr)?;
|
||||
let fn_ptr = self.read_pointer(&func)?;
|
||||
let fn_val = self.memory.get_fn(fn_ptr.into())?;
|
||||
(
|
||||
fn_val,
|
||||
caller_abi,
|
||||
|
@ -454,11 +454,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
}
|
||||
None => {
|
||||
// Unsized self.
|
||||
args[0].assert_mem_place(self)
|
||||
args[0].assert_mem_place()
|
||||
}
|
||||
};
|
||||
// Find and consult vtable
|
||||
let vtable = receiver_place.vtable();
|
||||
let vtable = self.scalar_to_ptr(receiver_place.vtable());
|
||||
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
|
||||
|
||||
// `*mut receiver_place.layout.ty` is almost the layout that we
|
||||
|
@ -468,8 +468,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
|
||||
let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
|
||||
// Adjust receiver argument.
|
||||
args[0] =
|
||||
OpTy::from(ImmTy::from_immediate(receiver_place.ptr.into(), this_receiver_ptr));
|
||||
args[0] = OpTy::from(ImmTy::from_immediate(
|
||||
Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
|
||||
this_receiver_ptr,
|
||||
));
|
||||
trace!("Patched self operand to {:#?}", args[0]);
|
||||
// recurse with concrete function
|
||||
self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind)
|
||||
|
@ -499,12 +501,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
};
|
||||
|
||||
let arg = ImmTy::from_immediate(
|
||||
place.to_ref(),
|
||||
place.to_ref(self),
|
||||
self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
|
||||
);
|
||||
|
||||
let ty = self.tcx.mk_unit(); // return type is ()
|
||||
let dest = MPlaceTy::dangling(self.layout_of(ty)?, self);
|
||||
let dest = MPlaceTy::dangling(self.layout_of(ty)?);
|
||||
|
||||
self.eval_fn_call(
|
||||
FnVal::Instance(instance),
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::convert::TryFrom;
|
||||
|
||||
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
|
||||
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
|
||||
use rustc_middle::ty::{
|
||||
self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN,
|
||||
COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
|
||||
|
@ -42,23 +42,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
/// corresponds to the first method declared in the trait of the provided vtable.
|
||||
pub fn get_vtable_slot(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
idx: u64,
|
||||
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
|
||||
let ptr_size = self.pointer_size();
|
||||
let vtable_slot = vtable.ptr_offset(ptr_size * idx, self)?;
|
||||
let vtable_slot = vtable.offset(ptr_size * idx, self)?;
|
||||
let vtable_slot = self
|
||||
.memory
|
||||
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
|
||||
.expect("cannot be a ZST");
|
||||
let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?;
|
||||
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
|
||||
self.memory.get_fn(fn_ptr)
|
||||
}
|
||||
|
||||
/// Returns the drop fn instance as well as the actual dynamic type.
|
||||
pub fn read_drop_type_from_vtable(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
|
||||
let pointer_size = self.pointer_size();
|
||||
// We don't care about the pointee type; we just want a pointer.
|
||||
|
@ -77,7 +77,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
.check_init()?;
|
||||
// We *need* an instance here, no other kind of function value, to be able
|
||||
// to determine the type.
|
||||
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?;
|
||||
let drop_instance = self.memory.get_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
|
||||
trace!("Found drop fn: {:?}", drop_instance);
|
||||
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
|
||||
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
|
||||
|
@ -93,7 +93,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
|
||||
pub fn read_size_and_align_from_vtable(
|
||||
&self,
|
||||
vtable: Scalar<M::PointerTag>,
|
||||
vtable: Pointer<Option<M::PointerTag>>,
|
||||
) -> InterpResult<'tcx, (Size, Align)> {
|
||||
let pointer_size = self.pointer_size();
|
||||
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
|
||||
|
@ -109,11 +109,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
let size = vtable
|
||||
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
|
||||
.check_init()?;
|
||||
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap();
|
||||
let size = size.to_machine_usize(self)?;
|
||||
let align = vtable
|
||||
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
|
||||
.check_init()?;
|
||||
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap();
|
||||
let align = align.to_machine_usize(self)?;
|
||||
let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
|
||||
|
||||
if size >= self.tcx.data_layout.obj_size_bound() {
|
||||
|
|
|
@ -21,7 +21,7 @@ use std::hash::Hash;
|
|||
|
||||
use super::{
|
||||
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
|
||||
MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor,
|
||||
MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
|
||||
};
|
||||
|
||||
macro_rules! throw_validation_failure {
|
||||
|
@ -324,7 +324,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
|
||||
match tail.kind() {
|
||||
ty::Dynamic(..) => {
|
||||
let vtable = meta.unwrap_meta();
|
||||
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
|
||||
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
|
||||
try_validation!(
|
||||
self.ecx.memory.check_ptr_access_align(
|
||||
|
@ -335,8 +335,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(PointerUseAfterFree(..)) |
|
||||
err_unsup!(ReadBytesAsPointer) =>
|
||||
err_ub!(PointerUseAfterFree(..)) =>
|
||||
{ "dangling vtable pointer in wide pointer" },
|
||||
err_ub!(AlignmentCheckFailed { .. }) =>
|
||||
{ "unaligned vtable pointer in wide pointer" },
|
||||
|
@ -347,8 +346,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
self.ecx.read_drop_type_from_vtable(vtable),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(InvalidFunctionPointer(..)) |
|
||||
err_unsup!(ReadBytesAsPointer) =>
|
||||
err_ub!(InvalidFunctionPointer(..)) =>
|
||||
{ "invalid drop function pointer in vtable (not pointing to a function)" },
|
||||
err_ub!(InvalidVtableDropFn(..)) =>
|
||||
{ "invalid drop function pointer in vtable (function has incompatible signature)" },
|
||||
|
@ -437,8 +435,6 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
{ "a dangling {} (address 0x{:x} is unallocated)", kind, i },
|
||||
err_ub!(PointerOutOfBounds { .. }) =>
|
||||
{ "a dangling {} (going beyond the bounds of its allocation)", kind },
|
||||
err_unsup!(ReadBytesAsPointer) =>
|
||||
{ "a dangling {} (created from integer)", kind },
|
||||
// This cannot happen during const-eval (because interning already detects
|
||||
// dangling pointers), but it can happen in Miri.
|
||||
err_ub!(PointerUseAfterFree(..)) =>
|
||||
|
@ -448,17 +444,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
if let Some(ref mut ref_tracking) = self.ref_tracking {
|
||||
// Proceed recursively even for ZST, no reason to skip them!
|
||||
// `!` is a ZST and we want to validate it.
|
||||
// Normalize before handing `place` to tracking because that will
|
||||
// check for duplicates.
|
||||
let place = if size.bytes() > 0 {
|
||||
self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
|
||||
} else {
|
||||
place
|
||||
};
|
||||
// Skip validation entirely for some external statics
|
||||
if let Scalar::Ptr(ptr) = place.ptr {
|
||||
if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
|
||||
// not a ZST
|
||||
let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id);
|
||||
let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
|
||||
if let Some(GlobalAlloc::Static(did)) = alloc_kind {
|
||||
assert!(!self.ecx.tcx.is_thread_local_static(did));
|
||||
assert!(self.ecx.tcx.is_static(did));
|
||||
|
@ -546,7 +535,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
// types below!
|
||||
if self.ctfe_mode.is_some() {
|
||||
// Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
|
||||
let is_bits = value.check_init().map_or(false, |v| v.is_bits());
|
||||
let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
|
||||
if !is_bits {
|
||||
throw_validation_failure!(self.path,
|
||||
{ "{}", value } expected { "initialized plain (non-pointer) bytes" }
|
||||
|
@ -601,12 +590,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
// message below.
|
||||
let value = value.to_scalar_or_uninit();
|
||||
let _fn = try_validation!(
|
||||
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)),
|
||||
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(self.ecx.scalar_to_ptr(ptr))),
|
||||
self.path,
|
||||
err_ub!(DanglingIntPointer(..)) |
|
||||
err_ub!(InvalidFunctionPointer(..)) |
|
||||
err_ub!(InvalidUninitBytes(None)) |
|
||||
err_unsup!(ReadBytesAsPointer) =>
|
||||
err_ub!(InvalidUninitBytes(None)) =>
|
||||
{ "{}", value } expected { "a function pointer" },
|
||||
);
|
||||
// FIXME: Check if the signature matches
|
||||
|
@ -664,8 +652,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
err_ub!(InvalidUninitBytes(None)) => { "{}", value }
|
||||
expected { "something {}", wrapping_range_format(valid_range, max_hi) },
|
||||
);
|
||||
let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
|
||||
Err(ptr) => {
|
||||
let bits = match value.try_to_int() {
|
||||
Err(_) => {
|
||||
// So this is a pointer then, and casting to an int failed.
|
||||
// Can only happen during CTFE.
|
||||
let ptr = self.ecx.scalar_to_ptr(value);
|
||||
if lo == 1 && hi == max_hi {
|
||||
// Only null is the niche. So make sure the ptr is NOT null.
|
||||
if self.ecx.memory.ptr_may_be_null(ptr) {
|
||||
|
@ -690,7 +681,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
|
|||
)
|
||||
}
|
||||
}
|
||||
Ok(data) => data,
|
||||
Ok(int) => int.assert_bits(op.layout.size),
|
||||
};
|
||||
// Now compare. This is slightly subtle because this is a special "wrap-around" range.
|
||||
if wrapping_range_contains(&valid_range, bits) {
|
||||
|
@ -832,7 +823,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
|||
) -> InterpResult<'tcx> {
|
||||
match op.layout.ty.kind() {
|
||||
ty::Str => {
|
||||
let mplace = op.assert_mem_place(self.ecx); // strings are never immediate
|
||||
let mplace = op.assert_mem_place(); // strings are never immediate
|
||||
let len = mplace.len(self.ecx)?;
|
||||
try_validation!(
|
||||
self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
|
||||
|
@ -853,7 +844,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
|
|||
// Optimized handling for arrays of integer/float type.
|
||||
|
||||
// Arrays cannot be immediate, slices are never immediate.
|
||||
let mplace = op.assert_mem_place(self.ecx);
|
||||
let mplace = op.assert_mem_place();
|
||||
// This is the length of the array/slice.
|
||||
let len = mplace.len(self.ecx)?;
|
||||
// This is the element type size.
|
||||
|
@ -940,9 +931,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
|
|||
// Construct a visitor
|
||||
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
|
||||
|
||||
// Try to cast to ptr *once* instead of all the time.
|
||||
let op = self.force_op_ptr(&op).unwrap_or(*op);
|
||||
|
||||
// Run it.
|
||||
match visitor.visit_value(&op) {
|
||||
Ok(()) => Ok(()),
|
||||
|
|
|
@ -211,7 +211,8 @@ macro_rules! make_value_visitor {
|
|||
// If it is a trait object, switch to the real type that was used to create it.
|
||||
ty::Dynamic(..) => {
|
||||
// immediate trait objects are not a thing
|
||||
let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let dest = op.assert_mem_place();
|
||||
let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
|
||||
trace!("walk_value: dyn object layout: {:#?}", inner.layout);
|
||||
// recurse with the inner type
|
||||
|
@ -241,7 +242,8 @@ macro_rules! make_value_visitor {
|
|||
},
|
||||
FieldsShape::Array { .. } => {
|
||||
// Let's get an mplace first.
|
||||
let mplace = v.to_op(self.ecx())?.assert_mem_place(self.ecx());
|
||||
let op = v.to_op(self.ecx())?;
|
||||
let mplace = op.assert_mem_place();
|
||||
// Now we can go over all the fields.
|
||||
// This uses the *run-time length*, i.e., if we are a slice,
|
||||
// the dynamic info from the metadata is used.
|
||||
|
|
|
@ -403,7 +403,7 @@ fn collect_items_rec<'tcx>(
|
|||
recursion_depth_reset = None;
|
||||
|
||||
if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
|
||||
for &((), id) in alloc.relocations().values() {
|
||||
for &id in alloc.relocations().values() {
|
||||
collect_miri(tcx, id, &mut neighbors);
|
||||
}
|
||||
}
|
||||
|
@ -1369,7 +1369,7 @@ fn collect_miri<'tcx>(
|
|||
}
|
||||
GlobalAlloc::Memory(alloc) => {
|
||||
trace!("collecting {:?} with {:#?}", alloc_id, alloc);
|
||||
for &((), inner) in alloc.relocations().values() {
|
||||
for &inner in alloc.relocations().values() {
|
||||
rustc_data_structures::stack::ensure_sufficient_stack(|| {
|
||||
collect_miri(tcx, inner, output);
|
||||
});
|
||||
|
@ -1402,9 +1402,9 @@ fn collect_const_value<'tcx>(
|
|||
output: &mut Vec<Spanned<MonoItem<'tcx>>>,
|
||||
) {
|
||||
match value {
|
||||
ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output),
|
||||
ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
|
||||
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
|
||||
for &((), id) in alloc.relocations().values() {
|
||||
for &id in alloc.relocations().values() {
|
||||
collect_miri(tcx, id, output);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,9 +31,8 @@ use rustc_trait_selection::traits;
|
|||
use crate::const_eval::ConstEvalErr;
|
||||
use crate::interpret::{
|
||||
self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy,
|
||||
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy,
|
||||
Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup,
|
||||
StackPopUnwind,
|
||||
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, MemoryKind, OpTy,
|
||||
Operand as InterpOperand, PlaceTy, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
|
||||
};
|
||||
use crate::transform::MirPass;
|
||||
|
||||
|
@ -157,7 +156,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
|
|||
|
||||
struct ConstPropMachine<'mir, 'tcx> {
|
||||
/// The virtual call stack.
|
||||
stack: Vec<Frame<'mir, 'tcx, (), ()>>,
|
||||
stack: Vec<Frame<'mir, 'tcx>>,
|
||||
/// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
|
||||
written_only_inside_own_block_locals: FxHashSet<Local>,
|
||||
/// Locals that need to be cleared after every block terminates.
|
||||
|
@ -223,10 +222,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
|
|||
bug!("panics terminators are not evaluated in ConstProp")
|
||||
}
|
||||
|
||||
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
|
||||
throw_unsup!(ReadPointerAsBytes)
|
||||
}
|
||||
|
||||
fn binary_ptr_op(
|
||||
_ecx: &InterpCx<'mir, 'tcx, Self>,
|
||||
_bin_op: BinOp,
|
||||
|
@ -587,8 +582,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
|
||||
let right_size = r.layout.size;
|
||||
let r_bits = r.to_scalar().ok();
|
||||
// This is basically `force_bits`.
|
||||
let r_bits = r_bits.and_then(|r| r.to_bits_or_ptr(right_size, &self.tcx).ok());
|
||||
let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
|
||||
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
|
||||
debug!("check_binary_op: reporting assert for {:?}", source_info);
|
||||
self.report_assert_as_lint(
|
||||
|
@ -759,8 +753,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
}
|
||||
};
|
||||
|
||||
let arg_value =
|
||||
this.ecx.force_bits(const_arg.to_scalar()?, const_arg.layout.size)?;
|
||||
let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?;
|
||||
let dest = this.ecx.eval_place(place)?;
|
||||
|
||||
match op {
|
||||
|
@ -876,7 +869,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
let alloc = this
|
||||
.ecx
|
||||
.intern_with_temp_alloc(value.layout, |ecx, dest| {
|
||||
ecx.write_immediate_to_mplace(*imm, dest)
|
||||
ecx.write_immediate(*imm, dest)
|
||||
})
|
||||
.unwrap();
|
||||
Ok(Some(alloc))
|
||||
|
@ -928,12 +921,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
|
|||
|
||||
match **op {
|
||||
interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
|
||||
s.is_bits()
|
||||
s.try_to_int().is_ok()
|
||||
}
|
||||
interpret::Operand::Immediate(Immediate::ScalarPair(
|
||||
ScalarMaybeUninit::Scalar(l),
|
||||
ScalarMaybeUninit::Scalar(r),
|
||||
)) => l.is_bits() && r.is_bits(),
|
||||
)) => l.try_to_int().is_ok() && r.try_to_int().is_ok(),
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
|
|||
.expect("if we have an evaluated constant we must know the layout");
|
||||
int.assert_bits(layout.size)
|
||||
}
|
||||
Scalar::Ptr(_) => continue,
|
||||
Scalar::Ptr(..) => continue,
|
||||
};
|
||||
const FALSE: u128 = 0;
|
||||
|
||||
|
@ -211,7 +211,7 @@ fn find_branch_value_info<'tcx>(
|
|||
return None;
|
||||
};
|
||||
let branch_value_scalar = branch_value.literal.try_to_scalar()?;
|
||||
Some((branch_value_scalar, branch_value_ty, *to_switch_on))
|
||||
Some((branch_value_scalar.into(), branch_value_ty, *to_switch_on))
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use std::collections::BTreeSet;
|
||||
use std::fmt::Display;
|
||||
use std::fmt::Write as _;
|
||||
use std::fmt::{Debug, Display};
|
||||
use std::fs;
|
||||
use std::io::{self, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
@ -13,7 +13,7 @@ use rustc_data_structures::fx::FxHashMap;
|
|||
use rustc_hir::def_id::DefId;
|
||||
use rustc_index::vec::Idx;
|
||||
use rustc_middle::mir::interpret::{
|
||||
read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer,
|
||||
read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer, Provenance,
|
||||
};
|
||||
use rustc_middle::mir::visit::Visitor;
|
||||
use rustc_middle::mir::*;
|
||||
|
@ -665,12 +665,12 @@ pub fn write_allocations<'tcx>(
|
|||
w: &mut dyn Write,
|
||||
) -> io::Result<()> {
|
||||
fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
|
||||
alloc.relocations().values().map(|(_, id)| *id)
|
||||
alloc.relocations().values().map(|id| *id)
|
||||
}
|
||||
fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
|
||||
match val {
|
||||
ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
|
||||
Either::Left(Either::Left(std::iter::once(ptr.alloc_id)))
|
||||
ConstValue::Scalar(interpret::Scalar::Ptr(ptr, _size)) => {
|
||||
Either::Left(Either::Left(std::iter::once(ptr.provenance)))
|
||||
}
|
||||
ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
|
||||
Either::Left(Either::Right(std::iter::empty()))
|
||||
|
@ -755,7 +755,7 @@ pub fn write_allocations<'tcx>(
|
|||
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
|
||||
/// characters or characters whose value is larger than 127) with a `.`
|
||||
/// This also prints relocations adequately.
|
||||
pub fn display_allocation<Tag: Copy + Debug, Extra>(
|
||||
pub fn display_allocation<Tag, Extra>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
alloc: &'a Allocation<Tag, Extra>,
|
||||
) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
|
||||
|
@ -768,7 +768,7 @@ pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
|
|||
alloc: &'a Allocation<Tag, Extra>,
|
||||
}
|
||||
|
||||
impl<Tag: Copy + Debug, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
|
||||
impl<Tag: Provenance, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
|
||||
fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let RenderAllocation { tcx, alloc } = *self;
|
||||
write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?;
|
||||
|
@ -811,7 +811,7 @@ fn write_allocation_newline(
|
|||
/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
|
||||
/// is only one line). Note that your prefix should contain a trailing space as the lines are
|
||||
/// printed directly after it.
|
||||
fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
|
||||
fn write_allocation_bytes<Tag: Provenance, Extra>(
|
||||
tcx: TyCtxt<'tcx>,
|
||||
alloc: &Allocation<Tag, Extra>,
|
||||
w: &mut dyn std::fmt::Write,
|
||||
|
@ -847,7 +847,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
|
|||
if i != line_start {
|
||||
write!(w, " ")?;
|
||||
}
|
||||
if let Some(&(tag, target_id)) = alloc.relocations().get(&i) {
|
||||
if let Some(&tag) = alloc.relocations().get(&i) {
|
||||
// Memory with a relocation must be defined
|
||||
let j = i.bytes_usize();
|
||||
let offset = alloc
|
||||
|
@ -855,7 +855,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
|
|||
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
|
||||
let offset = Size::from_bytes(offset);
|
||||
let relocation_width = |bytes| bytes * 3;
|
||||
let ptr = Pointer::new_with_tag(target_id, offset, tag);
|
||||
let ptr = Pointer::new(tag, offset);
|
||||
let mut target = format!("{:?}", ptr);
|
||||
if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
|
||||
// This is too long, try to save some space.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue