1
Fork 0

CTFE/Miri engine Pointer type overhaul: make Scalar-to-Pointer conversion infallible

This resolves all the problems we had around "normalizing" the representation of a Scalar in case it carries a Pointer value: we can just use Pointer if we want to have a value taht we are sure is already normalized.
This commit is contained in:
Ralf Jung 2021-07-12 18:22:15 +02:00
parent 5aff6dd07a
commit d4f7dd6702
34 changed files with 839 additions and 724 deletions

View file

@ -244,7 +244,8 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
} }
} }
Scalar::Ptr(ptr) => { Scalar::Ptr(ptr) => {
let (base_addr, base_addr_space) = match self.tcx.global_alloc(ptr.alloc_id) { let (alloc_id, offset) = ptr.into_parts();
let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => { GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_llvm(self, alloc); let init = const_alloc_to_llvm(self, alloc);
let value = match alloc.mutability { let value = match alloc.mutability {
@ -252,7 +253,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
_ => self.static_addr_of(init, alloc.align, None), _ => self.static_addr_of(init, alloc.align, None),
}; };
if !self.sess().fewer_names() { if !self.sess().fewer_names() {
llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes()); llvm::set_value_name(value, format!("{:?}", alloc_id).as_bytes());
} }
(value, AddressSpace::DATA) (value, AddressSpace::DATA)
} }
@ -269,7 +270,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let llval = unsafe { let llval = unsafe {
llvm::LLVMConstInBoundsGEP( llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
&self.const_usize(ptr.offset.bytes()), &self.const_usize(offset.bytes()),
1, 1,
) )
}; };

View file

@ -25,7 +25,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
let pointer_size = dl.pointer_size.bytes() as usize; let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0; let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations().iter() { for &(offset, alloc_id) in alloc.relocations().iter() {
let offset = offset.bytes(); let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset); assert_eq!(offset as usize as u64, offset);
let offset = offset as usize; let offset = offset as usize;

View file

@ -25,7 +25,7 @@ use crate::ty;
/// module provides higher-level access. /// module provides higher-level access.
#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)] #[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
#[derive(HashStable)] #[derive(HashStable)]
pub struct Allocation<Tag = (), Extra = ()> { pub struct Allocation<Tag = AllocId, Extra = ()> {
/// The actual bytes of the allocation. /// The actual bytes of the allocation.
/// Note that the bytes of a pointer represent the offset of the pointer. /// Note that the bytes of a pointer represent the offset of the pointer.
bytes: Vec<u8>, bytes: Vec<u8>,
@ -154,25 +154,17 @@ impl<Tag> Allocation<Tag> {
} }
} }
impl Allocation<()> { impl Allocation {
/// Add Tag and Extra fields /// Convert Tag and add Extra fields
pub fn with_tags_and_extra<T, E>( pub fn with_prov_and_extra<Tag, Extra>(
self, self,
mut tagger: impl FnMut(AllocId) -> T, mut tagger: impl FnMut(AllocId) -> Tag,
extra: E, extra: Extra,
) -> Allocation<T, E> { ) -> Allocation<Tag, Extra> {
Allocation { Allocation {
bytes: self.bytes, bytes: self.bytes,
relocations: Relocations::from_presorted( relocations: Relocations::from_presorted(
self.relocations self.relocations.iter().map(|&(offset, tag)| (offset, tagger(tag))).collect(),
.iter()
// The allocations in the relocations (pointers stored *inside* this allocation)
// all get the base pointer tag.
.map(|&(offset, ((), alloc))| {
let tag = tagger(alloc);
(offset, (tag, alloc))
})
.collect(),
), ),
init_mask: self.init_mask, init_mask: self.init_mask,
align: self.align, align: self.align,
@ -339,8 +331,8 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
self.check_relocations(cx, range)?; self.check_relocations(cx, range)?;
} else { } else {
// Maybe a pointer. // Maybe a pointer.
if let Some(&(tag, alloc_id)) = self.relocations.get(&range.start) { if let Some(&prov) = self.relocations.get(&range.start) {
let ptr = Pointer::new_with_tag(alloc_id, Size::from_bytes(bits), tag); let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(ScalarMaybeUninit::Scalar(ptr.into())); return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
} }
} }
@ -371,9 +363,12 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
} }
}; };
let bytes = match val.to_bits_or_ptr(range.size, cx) { let (bytes, provenance) = match val.to_bits_or_ptr(range.size, cx) {
Err(val) => u128::from(val.offset.bytes()), Err(val) => {
Ok(data) => data, let (provenance, offset) = val.into_parts();
(u128::from(offset.bytes()), Some(provenance))
}
Ok(data) => (data, None),
}; };
let endian = cx.data_layout().endian; let endian = cx.data_layout().endian;
@ -381,8 +376,8 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
write_target_uint(endian, dst, bytes).unwrap(); write_target_uint(endian, dst, bytes).unwrap();
// See if we have to also write a relocation. // See if we have to also write a relocation.
if let Scalar::Ptr(val) = val { if let Some(provenance) = provenance {
self.relocations.insert(range.start, (val.tag, val.alloc_id)); self.relocations.insert(range.start, provenance);
} }
Ok(()) Ok(())
@ -392,11 +387,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Relocations. /// Relocations.
impl<Tag: Copy, Extra> Allocation<Tag, Extra> { impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
/// Returns all relocations overlapping with the given pointer-offset pair. /// Returns all relocations overlapping with the given pointer-offset pair.
pub fn get_relocations( pub fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Tag)] {
&self,
cx: &impl HasDataLayout,
range: AllocRange,
) -> &[(Size, (Tag, AllocId))] {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range. // the beginning of this range.
let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
@ -582,24 +573,24 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
} }
} }
/// Relocations. /// "Relocations" stores the provenance information of pointers stored in memory.
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
pub struct Relocations<Tag = (), Id = AllocId>(SortedMap<Size, (Tag, Id)>); pub struct Relocations<Tag = AllocId>(SortedMap<Size, Tag>);
impl<Tag, Id> Relocations<Tag, Id> { impl<Tag> Relocations<Tag> {
pub fn new() -> Self { pub fn new() -> Self {
Relocations(SortedMap::new()) Relocations(SortedMap::new())
} }
// The caller must guarantee that the given relocations are already sorted // The caller must guarantee that the given relocations are already sorted
// by address and contain no duplicates. // by address and contain no duplicates.
pub fn from_presorted(r: Vec<(Size, (Tag, Id))>) -> Self { pub fn from_presorted(r: Vec<(Size, Tag)>) -> Self {
Relocations(SortedMap::from_presorted_elements(r)) Relocations(SortedMap::from_presorted_elements(r))
} }
} }
impl<Tag> Deref for Relocations<Tag> { impl<Tag> Deref for Relocations<Tag> {
type Target = SortedMap<Size, (Tag, AllocId)>; type Target = SortedMap<Size, Tag>;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.0 &self.0
@ -614,7 +605,7 @@ impl<Tag> DerefMut for Relocations<Tag> {
/// A partial, owned list of relocations to transfer into another allocation. /// A partial, owned list of relocations to transfer into another allocation.
pub struct AllocationRelocations<Tag> { pub struct AllocationRelocations<Tag> {
relative_relocations: Vec<(Size, (Tag, AllocId))>, relative_relocations: Vec<(Size, Tag)>,
} }
impl<Tag: Copy, Extra> Allocation<Tag, Extra> { impl<Tag: Copy, Extra> Allocation<Tag, Extra> {

View file

@ -238,7 +238,9 @@ pub enum UndefinedBehaviorInfo<'tcx> {
PointerUseAfterFree(AllocId), PointerUseAfterFree(AllocId),
/// Used a pointer outside the bounds it is valid for. /// Used a pointer outside the bounds it is valid for.
PointerOutOfBounds { PointerOutOfBounds {
ptr: Pointer, alloc_id: AllocId,
offset: Size,
size: Size,
msg: CheckInAllocMsg, msg: CheckInAllocMsg,
allocation_size: Size, allocation_size: Size,
}, },
@ -307,19 +309,19 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
InvalidVtableAlignment(msg) => write!(f, "invalid vtable: alignment {}", msg), InvalidVtableAlignment(msg) => write!(f, "invalid vtable: alignment {}", msg),
UnterminatedCString(p) => write!( UnterminatedCString(p) => write!(
f, f,
"reading a null-terminated string starting at {} with no null found before end of allocation", "reading a null-terminated string starting at {:?} with no null found before end of allocation",
p, p,
), ),
PointerUseAfterFree(a) => { PointerUseAfterFree(a) => {
write!(f, "pointer to {} was dereferenced after this allocation got freed", a) write!(f, "pointer to {} was dereferenced after this allocation got freed", a)
} }
PointerOutOfBounds { ptr, msg, allocation_size } => write!( PointerOutOfBounds { alloc_id, offset, size, msg, allocation_size } => write!(
f, f,
"{}pointer must be in-bounds at offset {}, \ "{}pointer must be in-bounds for {} bytes at offset {}, but {} has size {}",
but is outside bounds of {} which has size {}",
msg, msg,
ptr.offset.bytes(), size.bytes(),
ptr.alloc_id, offset.bytes(),
alloc_id,
allocation_size.bytes() allocation_size.bytes()
), ),
DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => { DanglingIntPointer(0, CheckInAllocMsg::InboundsTest) => {
@ -348,13 +350,13 @@ impl fmt::Display for UndefinedBehaviorInfo<'_> {
} }
InvalidTag(val) => write!(f, "enum value has invalid tag: {}", val), InvalidTag(val) => write!(f, "enum value has invalid tag: {}", val),
InvalidFunctionPointer(p) => { InvalidFunctionPointer(p) => {
write!(f, "using {} as function pointer but it does not point to a function", p) write!(f, "using {:?} as function pointer but it does not point to a function", p)
} }
InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err), InvalidStr(err) => write!(f, "this string is not valid UTF-8: {}", err),
InvalidUninitBytes(Some((alloc, access))) => write!( InvalidUninitBytes(Some((alloc, access))) => write!(
f, f,
"reading {} byte{} of memory starting at {}, \ "reading {} byte{} of memory starting at {:?}, \
but {} byte{} {} uninitialized starting at {}, \ but {} byte{} {} uninitialized starting at {:?}, \
and this operation requires initialized memory", and this operation requires initialized memory",
access.access_size.bytes(), access.access_size.bytes(),
pluralize!(access.access_size.bytes()), pluralize!(access.access_size.bytes()),

View file

@ -127,7 +127,7 @@ pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMay
pub use self::allocation::{alloc_range, AllocRange, Allocation, InitMask, Relocations}; pub use self::allocation::{alloc_range, AllocRange, Allocation, InitMask, Relocations};
pub use self::pointer::{Pointer, PointerArithmetic}; pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
/// Uniquely identifies one of the following: /// Uniquely identifies one of the following:
/// - A constant /// - A constant

View file

@ -83,27 +83,35 @@ pub trait PointerArithmetic: HasDataLayout {
impl<T: HasDataLayout> PointerArithmetic for T {} impl<T: HasDataLayout> PointerArithmetic for T {}
/// Represents a pointer in the Miri engine. /// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
/// /// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// `Pointer` is generic over the `Tag` associated with each pointer, /// some global state.
/// which is used to do provenance tracking during execution. pub trait Provenance: Copy {
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] /// Says whether the `offset` field of `Pointer` is the actual physical address.
#[derive(HashStable)] /// If `true, ptr-to-int casts work by simply discarding the provenance.
pub struct Pointer<Tag = ()> { /// If `false`, ptr-to-int casts are not supported.
pub alloc_id: AllocId, const OFFSET_IS_ADDR: bool;
pub offset: Size,
pub tag: Tag, /// Determines how a pointer should be printed.
fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
where
Self: Sized;
/// "Erasing" a tag converts it to the default tag type if possible. Used only for formatting purposes!
fn erase_for_fmt(self) -> AllocId;
} }
static_assert_size!(Pointer, 16); impl Provenance for AllocId {
// With the `AllocId` as provenance, the `offset` is interpreted *relative to the allocation*,
// so ptr-to-int casts are not possible (since we do not know the global physical offset).
const OFFSET_IS_ADDR: bool = false;
/// Print the address of a pointer (without the tag) fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fn print_ptr_addr<Tag>(ptr: &Pointer<Tag>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// Forward `alternate` flag to `alloc_id` printing. // Forward `alternate` flag to `alloc_id` printing.
if f.alternate() { if f.alternate() {
write!(f, "{:#?}", ptr.alloc_id)?; write!(f, "{:#?}", ptr.provenance)?;
} else { } else {
write!(f, "{:?}", ptr.alloc_id)?; write!(f, "{:?}", ptr.provenance)?;
} }
// Print offset only if it is non-zero. // Print offset only if it is non-zero.
if ptr.offset.bytes() > 0 { if ptr.offset.bytes() > 0 {
@ -112,26 +120,37 @@ fn print_ptr_addr<Tag>(ptr: &Pointer<Tag>, f: &mut fmt::Formatter<'_>) -> fmt::R
Ok(()) Ok(())
} }
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for fn erase_for_fmt(self) -> AllocId {
// all the Miri types. self
// We have to use `Debug` output for the tag, because `()` does not implement
// `Display` so we cannot specialize that.
impl<Tag: fmt::Debug> fmt::Debug for Pointer<Tag> {
default fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
print_ptr_addr(self, f)?;
write!(f, "[{:?}]", self.tag)
}
}
// Specialization for no tag
impl fmt::Debug for Pointer<()> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
print_ptr_addr(self, f)
} }
} }
impl<Tag: fmt::Debug> fmt::Display for Pointer<Tag> { /// Represents a pointer in the Miri engine.
///
/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)]
pub struct Pointer<Tag = AllocId> {
pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Tag` type)
pub provenance: Tag,
}
//FIXME static_assert_size!(Pointer, 16);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: Provenance> fmt::Debug for Pointer<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(self, f) Tag::fmt(self, f)
}
}
impl<Tag: Provenance> fmt::Debug for Pointer<Option<Tag>> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.provenance {
Some(tag) => Tag::fmt(&Pointer::new(tag, self.offset), f),
None => write!(f, "0x{:x}", self.offset.bytes()),
}
} }
} }
@ -143,37 +162,66 @@ impl From<AllocId> for Pointer {
} }
} }
impl Pointer<()> { impl<Tag> From<Pointer<Tag>> for Pointer<Option<Tag>> {
#[inline(always)] #[inline(always)]
pub fn new(alloc_id: AllocId, offset: Size) -> Self { fn from(ptr: Pointer<Tag>) -> Self {
Pointer { alloc_id, offset, tag: () } let (tag, offset) = ptr.into_parts();
Pointer::new(Some(tag), offset)
}
}
impl<Tag> Pointer<Option<Tag>> {
pub fn into_pointer_or_offset(self) -> Result<Pointer<Tag>, Size> {
match self.provenance {
Some(tag) => Ok(Pointer::new(tag, self.offset)),
None => Err(self.offset),
}
} }
#[inline(always)] #[inline(always)]
pub fn with_tag<Tag>(self, tag: Tag) -> Pointer<Tag> { pub fn map_erase_for_fmt(self) -> Pointer<Option<AllocId>>
Pointer::new_with_tag(self.alloc_id, self.offset, tag) where
Tag: Provenance,
{
Pointer { offset: self.offset, provenance: self.provenance.map(Provenance::erase_for_fmt) }
} }
} }
impl<'tcx, Tag> Pointer<Tag> { impl<'tcx, Tag> Pointer<Tag> {
#[inline(always)] #[inline(always)]
pub fn new_with_tag(alloc_id: AllocId, offset: Size, tag: Tag) -> Self { pub fn new(provenance: Tag, offset: Size) -> Self {
Pointer { alloc_id, offset, tag } Pointer { provenance, offset }
}
/// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Tag`!
/// This function must only be used in the implementation of `Machine::ptr_get_alloc`,
/// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`.
#[inline(always)]
pub fn into_parts(self) -> (Tag, Size) {
(self.provenance, self.offset)
}
#[inline(always)]
pub fn erase_for_fmt(self) -> Pointer
where
Tag: Provenance,
{
Pointer { offset: self.offset, provenance: self.provenance.erase_for_fmt() }
} }
#[inline] #[inline]
pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> { pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag( Ok(Pointer {
self.alloc_id, offset: Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?), ..self
self.tag, })
))
} }
#[inline] #[inline]
pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) { pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes()); let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) let ptr = Pointer { offset: Size::from_bytes(res), ..self };
(ptr, over)
} }
#[inline(always)] #[inline(always)]
@ -183,26 +231,21 @@ impl<'tcx, Tag> Pointer<Tag> {
#[inline] #[inline]
pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> { pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
Ok(Pointer::new_with_tag( Ok(Pointer {
self.alloc_id, offset: Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?), ..self
self.tag, })
))
} }
#[inline] #[inline]
pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) { pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i); let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
(Pointer::new_with_tag(self.alloc_id, Size::from_bytes(res), self.tag), over) let ptr = Pointer { offset: Size::from_bytes(res), ..self };
(ptr, over)
} }
#[inline(always)] #[inline(always)]
pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self { pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
self.overflowing_signed_offset(i, cx).0 self.overflowing_signed_offset(i, cx).0
} }
#[inline(always)]
pub fn erase_tag(self) -> Pointer {
Pointer { alloc_id: self.alloc_id, offset: self.offset, tag: () }
}
} }

View file

@ -10,7 +10,9 @@ use rustc_target::abi::{HasDataLayout, Size, TargetDataLayout};
use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt}; use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
use super::{AllocId, AllocRange, Allocation, InterpResult, Pointer, PointerArithmetic}; use super::{
AllocId, AllocRange, Allocation, InterpResult, Pointer, PointerArithmetic, Provenance,
};
/// Represents the result of const evaluation via the `eval_to_allocation` query. /// Represents the result of const evaluation via the `eval_to_allocation` query.
#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)] #[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
@ -47,12 +49,6 @@ pub enum ConstValue<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ConstValue<'_>, 32); static_assert_size!(ConstValue<'_>, 32);
impl From<Scalar> for ConstValue<'tcx> {
fn from(s: Scalar) -> Self {
Self::Scalar(s)
}
}
impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> { impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
type Lifted = ConstValue<'tcx>; type Lifted = ConstValue<'tcx>;
fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> { fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
@ -70,7 +66,7 @@ impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
impl<'tcx> ConstValue<'tcx> { impl<'tcx> ConstValue<'tcx> {
#[inline] #[inline]
pub fn try_to_scalar(&self) -> Option<Scalar> { pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
match *self { match *self {
ConstValue::ByRef { .. } | ConstValue::Slice { .. } => None, ConstValue::ByRef { .. } | ConstValue::Slice { .. } => None,
ConstValue::Scalar(val) => Some(val), ConstValue::Scalar(val) => Some(val),
@ -120,9 +116,12 @@ impl<'tcx> ConstValue<'tcx> {
/// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 16 bytes in /// `memory::Allocation`. It is in many ways like a small chunk of a `Allocation`, up to 16 bytes in
/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes /// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
/// of a simple value or a pointer into another `Allocation` /// of a simple value or a pointer into another `Allocation`
///
/// These variants would be private if there was a convenient way to achieve that in Rust.
/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)] #[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
#[derive(HashStable)] #[derive(HashStable)]
pub enum Scalar<Tag = ()> { pub enum Scalar<Tag = AllocId> {
/// The raw bytes of a simple value. /// The raw bytes of a simple value.
Int(ScalarInt), Int(ScalarInt),
@ -133,11 +132,11 @@ pub enum Scalar<Tag = ()> {
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(Scalar, 24); //FIXME static_assert_size!(Scalar, 24);
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for // We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types. // all the Miri types.
impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> { impl<Tag: Provenance> fmt::Debug for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Scalar::Ptr(ptr) => write!(f, "{:?}", ptr), Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
@ -146,11 +145,11 @@ impl<Tag: fmt::Debug> fmt::Debug for Scalar<Tag> {
} }
} }
impl<Tag: fmt::Debug> fmt::Display for Scalar<Tag> { impl<Tag: Provenance> fmt::Display for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
Scalar::Ptr(ptr) => write!(f, "pointer to {}", ptr), Scalar::Ptr(ptr) => write!(f, "pointer to {:?}", ptr),
Scalar::Int { .. } => fmt::Debug::fmt(self, f), Scalar::Int(int) => write!(f, "{:?}", int),
} }
} }
} }
@ -169,38 +168,38 @@ impl<Tag> From<Double> for Scalar<Tag> {
} }
} }
impl Scalar<()> { impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
/// Tag this scalar with `new_tag` if it is a pointer, leave it unchanged otherwise. #[inline(always)]
/// fn from(ptr: Pointer<Tag>) -> Self {
/// Used by `MemPlace::replace_tag`. Scalar::Ptr(ptr)
#[inline]
pub fn with_tag<Tag>(self, new_tag: Tag) -> Scalar<Tag> {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.with_tag(new_tag)),
Scalar::Int(int) => Scalar::Int(int),
} }
} }
impl<Tag> From<ScalarInt> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Scalar::Int(ptr)
}
} }
impl<'tcx, Tag> Scalar<Tag> { impl<'tcx, Tag> Scalar<Tag> {
pub const ZST: Self = Scalar::Int(ScalarInt::ZST); pub const ZST: Self = Scalar::Int(ScalarInt::ZST);
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_tag(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_tag()),
Scalar::Int(int) => Scalar::Int(int),
}
}
#[inline] #[inline]
pub fn null_ptr(cx: &impl HasDataLayout) -> Self { pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size)) Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size))
} }
/// Create a Scalar from a pointer with an `Option<_>` tag (where `None` represents a plain integer).
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
match ptr.into_parts() {
(Some(tag), offset) => Scalar::Ptr(Pointer::new(tag, offset)),
(None, offset) => {
Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
}
}
}
#[inline(always)] #[inline(always)]
fn ptr_op( fn ptr_op(
self, self,
@ -332,10 +331,11 @@ impl<'tcx, Tag> Scalar<Tag> {
Scalar::Int(f.into()) Scalar::Int(f.into())
} }
/// This is very rarely the method you want! You should dispatch on the type /// This is almost certainly not the method you want! You should dispatch on the type
/// and use `force_bits`/`assert_bits`/`force_ptr`/`assert_ptr`. /// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
///
/// This method only exists for the benefit of low-level memory operations /// This method only exists for the benefit of low-level memory operations
/// as well as the implementation of the `force_*` methods. /// as well as the implementation of the above methods.
#[inline] #[inline]
pub fn to_bits_or_ptr( pub fn to_bits_or_ptr(
self, self,
@ -352,28 +352,13 @@ impl<'tcx, Tag> Scalar<Tag> {
} }
} }
/// This method is intentionally private! /// Do not call this method! It does not do ptr-to-int casts when needed.
/// It is just a helper for other methods in this file.
#[inline]
fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
err_ub!(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
})
.into()
}),
Scalar::Ptr(_) => throw_unsup!(ReadPointerAsBytes),
}
}
#[inline(always)] #[inline(always)]
pub fn assert_bits(self, target_size: Size) -> u128 { pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).expect("expected Raw bits but got a Pointer") self.assert_int().assert_bits(target_size)
} }
/// Do not call this method! It does not do ptr-to-int casts when needed.
#[inline] #[inline]
pub fn assert_int(self) -> ScalarInt { pub fn assert_int(self) -> ScalarInt {
match self { match self {
@ -382,6 +367,7 @@ impl<'tcx, Tag> Scalar<Tag> {
} }
} }
/// Do not call this method! It does not do int-to-ptr casts when needed.
#[inline] #[inline]
pub fn assert_ptr(self) -> Pointer<Tag> { pub fn assert_ptr(self) -> Pointer<Tag> {
match self { match self {
@ -401,6 +387,44 @@ impl<'tcx, Tag> Scalar<Tag> {
pub fn is_ptr(self) -> bool { pub fn is_ptr(self) -> bool {
matches!(self, Scalar::Ptr(_)) matches!(self, Scalar::Ptr(_))
} }
}
impl<'tcx, Tag: Provenance> Scalar<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_for_fmt(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_for_fmt()),
Scalar::Int(int) => Scalar::Int(int),
}
}
/// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you
/// likely want to use instead.
///
/// Will perform ptr-to-int casts if needed and possible.
#[inline]
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
err_ub!(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
})
.into()
}),
Scalar::Ptr(ptr) => {
if Tag::OFFSET_IS_ADDR {
Ok(ptr.offset.bytes().into())
} else {
throw_unsup!(ReadPointerAsBytes)
}
}
}
}
pub fn to_bool(self) -> InterpResult<'tcx, bool> { pub fn to_bool(self) -> InterpResult<'tcx, bool> {
let val = self.to_u8()?; let val = self.to_u8()?;
@ -507,28 +531,14 @@ impl<'tcx, Tag> Scalar<Tag> {
} }
} }
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
impl<Tag> From<ScalarInt> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
Scalar::Int(ptr)
}
}
#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)] #[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
pub enum ScalarMaybeUninit<Tag = ()> { pub enum ScalarMaybeUninit<Tag = AllocId> {
Scalar(Scalar<Tag>), Scalar(Scalar<Tag>),
Uninit, Uninit,
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ScalarMaybeUninit, 24); //FIXME static_assert_size!(ScalarMaybeUninit, 24);
impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> { impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
#[inline(always)] #[inline(always)]
@ -546,7 +556,7 @@ impl<Tag> From<Pointer<Tag>> for ScalarMaybeUninit<Tag> {
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for // We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types. // all the Miri types.
impl<Tag: fmt::Debug> fmt::Debug for ScalarMaybeUninit<Tag> { impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"), ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
@ -555,7 +565,7 @@ impl<Tag: fmt::Debug> fmt::Debug for ScalarMaybeUninit<Tag> {
} }
} }
impl<Tag: fmt::Debug> fmt::Display for ScalarMaybeUninit<Tag> { impl<Tag: Provenance> fmt::Display for ScalarMaybeUninit<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self { match self {
ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"), ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
@ -564,18 +574,7 @@ impl<Tag: fmt::Debug> fmt::Display for ScalarMaybeUninit<Tag> {
} }
} }
impl<'tcx, Tag> ScalarMaybeUninit<Tag> { impl<Tag> ScalarMaybeUninit<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_tag(self) -> ScalarMaybeUninit {
match self {
ScalarMaybeUninit::Scalar(s) => ScalarMaybeUninit::Scalar(s.erase_tag()),
ScalarMaybeUninit::Uninit => ScalarMaybeUninit::Uninit,
}
}
#[inline] #[inline]
pub fn check_init(self) -> InterpResult<'static, Scalar<Tag>> { pub fn check_init(self) -> InterpResult<'static, Scalar<Tag>> {
match self { match self {
@ -583,6 +582,19 @@ impl<'tcx, Tag> ScalarMaybeUninit<Tag> {
ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)), ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
} }
} }
}
impl<'tcx, Tag: Provenance> ScalarMaybeUninit<Tag> {
/// Erase the tag from the scalar, if any.
///
/// Used by error reporting code to avoid having the error type depend on `Tag`.
#[inline]
pub fn erase_for_fmt(self) -> ScalarMaybeUninit {
match self {
ScalarMaybeUninit::Scalar(s) => ScalarMaybeUninit::Scalar(s.erase_for_fmt()),
ScalarMaybeUninit::Uninit => ScalarMaybeUninit::Uninit,
}
}
#[inline(always)] #[inline(always)]
pub fn to_bool(self) -> InterpResult<'tcx, bool> { pub fn to_bool(self) -> InterpResult<'tcx, bool> {

View file

@ -3,7 +3,7 @@
//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
use crate::mir::coverage::{CodeRegion, CoverageKind}; use crate::mir::coverage::{CodeRegion, CoverageKind};
use crate::mir::interpret::{Allocation, GlobalAlloc, Scalar}; use crate::mir::interpret::{Allocation, ConstValue, GlobalAlloc, Scalar};
use crate::mir::visit::MirVisitable; use crate::mir::visit::MirVisitable;
use crate::ty::adjustment::PointerCast; use crate::ty::adjustment::PointerCast;
use crate::ty::codec::{TyDecoder, TyEncoder}; use crate::ty::codec::{TyDecoder, TyEncoder};
@ -2095,7 +2095,7 @@ impl<'tcx> Operand<'tcx> {
Operand::Constant(box Constant { Operand::Constant(box Constant {
span, span,
user_ty: None, user_ty: None,
literal: ConstantKind::Val(val.into(), ty), literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
}) })
} }
@ -2458,7 +2458,7 @@ pub enum ConstantKind<'tcx> {
impl Constant<'tcx> { impl Constant<'tcx> {
pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> { pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
match self.literal.const_for_ty()?.val.try_to_scalar() { match self.literal.const_for_ty()?.val.try_to_scalar() {
Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.alloc_id) { Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.provenance) {
GlobalAlloc::Static(def_id) => { GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id)); assert!(!tcx.is_thread_local_static(def_id));
Some(def_id) Some(def_id)

View file

@ -1,7 +1,6 @@
use std::convert::TryInto; use std::convert::TryInto;
use crate::mir::interpret::ConstValue; use crate::mir::interpret::{AllocId, ConstValue, Scalar};
use crate::mir::interpret::Scalar;
use crate::mir::Promoted; use crate::mir::Promoted;
use crate::ty::subst::{InternalSubsts, SubstsRef}; use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::ParamEnv; use crate::ty::ParamEnv;
@ -59,7 +58,7 @@ impl<'tcx> ConstKind<'tcx> {
} }
#[inline] #[inline]
pub fn try_to_scalar(self) -> Option<Scalar> { pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
self.try_to_value()?.try_to_scalar() self.try_to_value()?.try_to_scalar()
} }

View file

@ -987,6 +987,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> { ) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self); define_scoped_cx!(self);
let (alloc_id, offset) = ptr.into_parts();
match ty.kind() { match ty.kind() {
// Byte strings (&[u8; N]) // Byte strings (&[u8; N])
ty::Ref( ty::Ref(
@ -1002,10 +1003,10 @@ pub trait PrettyPrinter<'tcx>:
.. ..
}, },
_, _,
) => match self.tcx().get_global_alloc(ptr.alloc_id) { ) => match self.tcx().get_global_alloc(alloc_id) {
Some(GlobalAlloc::Memory(alloc)) => { Some(GlobalAlloc::Memory(alloc)) => {
let len = int.assert_bits(self.tcx().data_layout.pointer_size); let len = int.assert_bits(self.tcx().data_layout.pointer_size);
let range = AllocRange { start: ptr.offset, size: Size::from_bytes(len) }; let range = AllocRange { start: offset, size: Size::from_bytes(len) };
if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), range) { if let Ok(byte_str) = alloc.get_bytes(&self.tcx(), range) {
p!(pretty_print_byte_str(byte_str)) p!(pretty_print_byte_str(byte_str))
} else { } else {
@ -1020,7 +1021,7 @@ pub trait PrettyPrinter<'tcx>:
ty::FnPtr(_) => { ty::FnPtr(_) => {
// FIXME: We should probably have a helper method to share code with the "Byte strings" // FIXME: We should probably have a helper method to share code with the "Byte strings"
// printing above (which also has to handle pointers to all sorts of things). // printing above (which also has to handle pointers to all sorts of things).
match self.tcx().get_global_alloc(ptr.alloc_id) { match self.tcx().get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function(instance)) => { Some(GlobalAlloc::Function(instance)) => {
self = self.typed_value( self = self.typed_value(
|this| this.print_value_path(instance.def_id(), instance.substs), |this| this.print_value_path(instance.def_id(), instance.substs),
@ -1068,8 +1069,8 @@ pub trait PrettyPrinter<'tcx>:
ty::Char if char::try_from(int).is_ok() => { ty::Char if char::try_from(int).is_ok() => {
p!(write("{:?}", char::try_from(int).unwrap())) p!(write("{:?}", char::try_from(int).unwrap()))
} }
// Raw pointers // Pointer types
ty::RawPtr(_) | ty::FnPtr(_) => { ty::Ref(..) | ty::RawPtr(_) | ty::FnPtr(_) => {
let data = int.assert_bits(self.tcx().data_layout.pointer_size); let data = int.assert_bits(self.tcx().data_layout.pointer_size);
self = self.typed_value( self = self.typed_value(
|mut this| { |mut this| {

View file

@ -597,7 +597,7 @@ fn check_const_value_eq<R: TypeRelation<'tcx>>(
} }
(ConstValue::Scalar(Scalar::Ptr(a_val)), ConstValue::Scalar(Scalar::Ptr(b_val))) => { (ConstValue::Scalar(Scalar::Ptr(a_val)), ConstValue::Scalar(Scalar::Ptr(b_val))) => {
a_val == b_val a_val == b_val
|| match (tcx.global_alloc(a_val.alloc_id), tcx.global_alloc(b_val.alloc_id)) { || match (tcx.global_alloc(a_val.provenance), tcx.global_alloc(b_val.provenance)) {
(GlobalAlloc::Function(a_instance), GlobalAlloc::Function(b_instance)) => { (GlobalAlloc::Function(a_instance), GlobalAlloc::Function(b_instance)) => {
a_instance == b_instance a_instance == b_instance
} }

View file

@ -16,7 +16,6 @@ use crate::interpret::{
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub enum ConstEvalErrKind { pub enum ConstEvalErrKind {
NeedsRfc(String), NeedsRfc(String),
PtrToIntCast,
ConstAccessesStatic, ConstAccessesStatic,
ModifiedGlobal, ModifiedGlobal,
AssertFailure(AssertKind<ConstInt>), AssertFailure(AssertKind<ConstInt>),
@ -49,12 +48,6 @@ impl fmt::Display for ConstEvalErrKind {
NeedsRfc(ref msg) => { NeedsRfc(ref msg) => {
write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg) write!(f, "\"{}\" needs an rfc before being allowed inside constants", msg)
} }
PtrToIntCast => {
write!(
f,
"cannot cast pointer to integer because it was not created by cast from integer"
)
}
ConstAccessesStatic => write!(f, "constant accesses static"), ConstAccessesStatic => write!(f, "constant accesses static"),
ModifiedGlobal => { ModifiedGlobal => {
write!(f, "modifying a static's initial value from another static's initializer") write!(f, "modifying a static's initial value from another static's initializer")

View file

@ -136,19 +136,18 @@ pub(super) fn op_to_const<'tcx>(
// by-val is if we are in destructure_const, i.e., if this is (a field of) something that we // by-val is if we are in destructure_const, i.e., if this is (a field of) something that we
// "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or // "tried to make immediate" before. We wouldn't do that for non-slice scalar pairs or
// structs containing such. // structs containing such.
op.try_as_mplace(ecx) op.try_as_mplace()
}; };
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr { let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
Scalar::Ptr(ptr) => { (Some(alloc_id), offset) => {
let alloc = ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(); let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
ConstValue::ByRef { alloc, offset: ptr.offset } ConstValue::ByRef { alloc, offset }
} }
Scalar::Int(int) => { (None, offset) => {
assert!(mplace.layout.is_zst()); assert!(mplace.layout.is_zst());
assert_eq!( assert_eq!(
int.assert_bits(ecx.tcx.data_layout.pointer_size) offset.bytes() % mplace.layout.align.abi.bytes(),
% u128::from(mplace.layout.align.abi.bytes()),
0, 0,
"this MPlaceTy must come from a validated constant, thus we can assume the \ "this MPlaceTy must come from a validated constant, thus we can assume the \
alignment is correct", alignment is correct",
@ -162,14 +161,14 @@ pub(super) fn op_to_const<'tcx>(
Err(imm) => match *imm { Err(imm) => match *imm {
Immediate::Scalar(x) => match x { Immediate::Scalar(x) => match x {
ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s), ScalarMaybeUninit::Scalar(s) => ConstValue::Scalar(s),
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place(ecx)), ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
}, },
Immediate::ScalarPair(a, b) => { Immediate::ScalarPair(a, b) => {
let (data, start) = match a.check_init().unwrap() { let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
Scalar::Ptr(ptr) => { (Some(alloc_id), offset) => {
(ecx.tcx.global_alloc(ptr.alloc_id).unwrap_memory(), ptr.offset.bytes()) (ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
} }
Scalar::Int { .. } => ( (None, _offset) => (
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable( ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
b"" as &[u8], b"" as &[u8],
)), )),
@ -369,6 +368,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
inner = true; inner = true;
} }
}; };
let alloc_id = mplace.ptr.provenance.unwrap();
if let Err(error) = validation { if let Err(error) = validation {
// Validation failed, report an error. This is always a hard error. // Validation failed, report an error. This is always a hard error.
let err = ConstEvalErr::new(&ecx, error, None); let err = ConstEvalErr::new(&ecx, error, None);
@ -381,9 +381,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
"the raw bytes of the constant ({}", "the raw bytes of the constant ({}",
display_allocation( display_allocation(
*ecx.tcx, *ecx.tcx,
ecx.tcx ecx.tcx.global_alloc(alloc_id).unwrap_memory()
.global_alloc(mplace.ptr.assert_ptr().alloc_id)
.unwrap_memory()
) )
)); ));
diag.emit(); diag.emit();
@ -391,7 +389,7 @@ pub fn eval_to_allocation_raw_provider<'tcx>(
)) ))
} else { } else {
// Convert to raw constant // Convert to raw constant
Ok(ConstAlloc { alloc_id: mplace.ptr.assert_ptr().alloc_id, ty: mplace.layout.ty }) Ok(ConstAlloc { alloc_id, ty: mplace.layout.ty })
} }
} }
} }

View file

@ -16,8 +16,8 @@ use rustc_target::abi::{Align, Size};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use crate::interpret::{ use crate::interpret::{
self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, Memory, self, compile_time_machine, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, OpTy,
OpTy, PlaceTy, Pointer, Scalar, StackPopUnwind, PlaceTy, Scalar, StackPopUnwind,
}; };
use super::error::*; use super::error::*;
@ -59,7 +59,7 @@ pub struct CompileTimeInterpreter<'mir, 'tcx> {
pub steps_remaining: usize, pub steps_remaining: usize,
/// The virtual call stack. /// The virtual call stack.
pub(crate) stack: Vec<Frame<'mir, 'tcx, (), ()>>, pub(crate) stack: Vec<Frame<'mir, 'tcx, AllocId, ()>>,
} }
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -184,7 +184,7 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// is in bounds, because if they are in bounds, the pointer can't be null. // is in bounds, because if they are in bounds, the pointer can't be null.
// Inequality with integers other than null can never be known for sure. // Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => { (Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
int.is_null() && !self.memory.ptr_may_be_null(ptr) int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
} }
// FIXME: return `true` for at least some comparisons where we can reliably // FIXME: return `true` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time. // determine the result of runtime inequality tests at compile-time.
@ -356,10 +356,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
Err(ConstEvalErrKind::Abort(msg).into()) Err(ConstEvalErrKind::Abort(msg).into())
} }
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
Err(ConstEvalErrKind::PtrToIntCast.into())
}
fn binary_ptr_op( fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>, _ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: mir::BinOp, _bin_op: mir::BinOp,

View file

@ -35,7 +35,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() { if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case") bug!("intern_const_alloc_recursive should not error in this case")
} }
ConstValue::Scalar(loc_place.ptr) ConstValue::Scalar(Scalar::Ptr(loc_place.ptr.into_pointer_or_offset().unwrap()))
} }
/// Convert an evaluated constant to a type level constant /// Convert an evaluated constant to a type level constant
@ -179,9 +179,9 @@ pub(crate) fn deref_const<'tcx>(
let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false); let ecx = mk_eval_cx(tcx, DUMMY_SP, param_env, false);
let op = ecx.const_to_op(val, None).unwrap(); let op = ecx.const_to_op(val, None).unwrap();
let mplace = ecx.deref_operand(&op).unwrap(); let mplace = ecx.deref_operand(&op).unwrap();
if let Scalar::Ptr(ptr) = mplace.ptr { if let Some(alloc_id) = mplace.ptr.provenance {
assert_eq!( assert_eq!(
tcx.get_global_alloc(ptr.alloc_id).unwrap().unwrap_memory().mutability, tcx.get_global_alloc(alloc_id).unwrap().unwrap_memory().mutability,
Mutability::Not, Mutability::Not,
"deref_const cannot be used with mutable allocations as \ "deref_const cannot be used with mutable allocations as \
that could allow pattern matching to observe mutable statics", that could allow pattern matching to observe mutable statics",

View file

@ -175,7 +175,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// (a) cast a raw ptr to usize, or // (a) cast a raw ptr to usize, or
// (b) cast from an integer-like (including bool, char, enums). // (b) cast from an integer-like (including bool, char, enums).
// In both cases we want the bits. // In both cases we want the bits.
let bits = self.force_bits(src.to_scalar()?, src.layout.size)?; let bits = src.to_scalar()?.to_bits(src.layout.size)?;
Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into()) Ok(self.cast_from_scalar(bits, src.layout, cast_ty).into())
} }

View file

@ -8,7 +8,6 @@ use rustc_index::vec::IndexVec;
use rustc_macros::HashStable; use rustc_macros::HashStable;
use rustc_middle::ich::StableHashingContext; use rustc_middle::ich::StableHashingContext;
use rustc_middle::mir; use rustc_middle::mir;
use rustc_middle::mir::interpret::{GlobalId, InterpResult, Pointer, Scalar};
use rustc_middle::ty::layout::{self, TyAndLayout}; use rustc_middle::ty::layout::{self, TyAndLayout};
use rustc_middle::ty::{ use rustc_middle::ty::{
self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable, self, query::TyCtxtAt, subst::SubstsRef, ParamEnv, Ty, TyCtxt, TypeFoldable,
@ -18,8 +17,9 @@ use rustc_span::{Pos, Span};
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout}; use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size, TargetDataLayout};
use super::{ use super::{
Immediate, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory, MemoryKind, Operand, Place, AllocId, GlobalId, Immediate, InterpResult, MPlaceTy, Machine, MemPlace, MemPlaceMeta, Memory,
PlaceTy, ScalarMaybeUninit, StackPopJump, MemoryKind, Operand, Place, PlaceTy, Pointer, Provenance, Scalar, ScalarMaybeUninit,
StackPopJump,
}; };
use crate::transform::validate::equal_up_to_regions; use crate::transform::validate::equal_up_to_regions;
use crate::util::storage::AlwaysLiveLocals; use crate::util::storage::AlwaysLiveLocals;
@ -80,7 +80,7 @@ impl Drop for SpanGuard {
} }
/// A stack frame. /// A stack frame.
pub struct Frame<'mir, 'tcx, Tag = (), Extra = ()> { pub struct Frame<'mir, 'tcx, Tag = AllocId, Extra = ()> {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// Function and callsite information // Function and callsite information
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -161,7 +161,7 @@ pub enum StackPopCleanup {
/// State of a local variable including a memoized layout /// State of a local variable including a memoized layout
#[derive(Clone, PartialEq, Eq, HashStable)] #[derive(Clone, PartialEq, Eq, HashStable)]
pub struct LocalState<'tcx, Tag = ()> { pub struct LocalState<'tcx, Tag = AllocId> {
pub value: LocalValue<Tag>, pub value: LocalValue<Tag>,
/// Don't modify if `Some`, this is only used to prevent computing the layout twice /// Don't modify if `Some`, this is only used to prevent computing the layout twice
#[stable_hasher(ignore)] #[stable_hasher(ignore)]
@ -169,8 +169,8 @@ pub struct LocalState<'tcx, Tag = ()> {
} }
/// Current value of a local variable /// Current value of a local variable
#[derive(Copy, Clone, PartialEq, Eq, Debug, HashStable)] // Miri debug-prints these #[derive(Copy, Clone, PartialEq, Eq, HashStable)]
pub enum LocalValue<Tag = ()> { pub enum LocalValue<Tag = AllocId> {
/// This local is not currently alive, and cannot be used at all. /// This local is not currently alive, and cannot be used at all.
Dead, Dead,
/// This local is alive but not yet initialized. It can be written to /// This local is alive but not yet initialized. It can be written to
@ -186,6 +186,18 @@ pub enum LocalValue<Tag = ()> {
Live(Operand<Tag>), Live(Operand<Tag>),
} }
impl<Tag: Provenance> std::fmt::Debug for LocalValue<Tag> {
// Miri debug-prints these
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use LocalValue::*;
match self {
Dead => f.debug_tuple("Dead").finish(),
Uninitialized => f.debug_tuple("Uninitialized").finish(),
Live(o) => f.debug_tuple("Live").field(o).finish(),
}
}
}
impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> { impl<'tcx, Tag: Copy + 'static> LocalState<'tcx, Tag> {
/// Read the local's value or error if the local is not yet live or not live anymore. /// Read the local's value or error if the local is not yet live or not live anymore.
/// ///
@ -406,20 +418,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
#[inline(always)] #[inline(always)]
pub fn force_ptr( pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
&self, self.memory.scalar_to_ptr(scalar)
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
self.memory.force_ptr(scalar)
}
#[inline(always)]
pub fn force_bits(
&self,
scalar: Scalar<M::PointerTag>,
size: Size,
) -> InterpResult<'tcx, u128> {
self.memory.force_bits(scalar, size)
} }
/// Call this to turn untagged "global" pointers (obtained via `tcx`) into /// Call this to turn untagged "global" pointers (obtained via `tcx`) into
@ -650,7 +650,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(Some((size, align))) Ok(Some((size, align)))
} }
ty::Dynamic(..) => { ty::Dynamic(..) => {
let vtable = metadata.unwrap_meta(); let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
// Read size and align from vtable (already checks size). // Read size and align from vtable (already checks size).
Ok(Some(self.read_size_and_align_from_vtable(vtable)?)) Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
} }
@ -898,8 +898,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local { if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty // All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type. // due to the local having ZST type.
let ptr = ptr.assert_ptr(); trace!(
trace!("deallocating local: {:?}", self.memory.dump_alloc(ptr.alloc_id)); "deallocating local {:?}: {:?}",
local,
self.memory.dump_alloc(ptr.provenance.unwrap().erase_for_fmt())
);
self.memory.deallocate(ptr, None, MemoryKind::Stack)?; self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
}; };
Ok(()) Ok(())
@ -975,46 +978,45 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
match self.ecx.stack()[frame].locals[local].value { match self.ecx.stack()[frame].locals[local].value {
LocalValue::Dead => write!(fmt, " is dead")?, LocalValue::Dead => write!(fmt, " is dead")?,
LocalValue::Uninitialized => write!(fmt, " is uninitialized")?, LocalValue::Uninitialized => write!(fmt, " is uninitialized")?,
LocalValue::Live(Operand::Indirect(mplace)) => match mplace.ptr { LocalValue::Live(Operand::Indirect(mplace)) => {
Scalar::Ptr(ptr) => {
write!( write!(
fmt, fmt,
" by align({}){} ref:", " by align({}){} ref {:?}:",
mplace.align.bytes(), mplace.align.bytes(),
match mplace.meta { match mplace.meta {
MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta), MemPlaceMeta::Meta(meta) => format!(" meta({:?})", meta),
MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(), MemPlaceMeta::Poison | MemPlaceMeta::None => String::new(),
}
)?;
allocs.push(ptr.alloc_id);
}
ptr => write!(fmt, " by integral ref: {:?}", ptr)?,
}, },
mplace.ptr,
)?;
allocs.extend(mplace.ptr.map_erase_for_fmt().provenance);
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => { LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {:?}", val)?; write!(fmt, " {:?}", val)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val { if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
allocs.push(ptr.alloc_id); allocs.push(ptr.provenance.erase_for_fmt());
} }
} }
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => { LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({:?}, {:?})", val1, val2)?; write!(fmt, " ({:?}, {:?})", val1, val2)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 { if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
allocs.push(ptr.alloc_id); allocs.push(ptr.provenance.erase_for_fmt());
} }
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 { if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
allocs.push(ptr.alloc_id); allocs.push(ptr.provenance.erase_for_fmt());
} }
} }
} }
write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs)) write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
} }
Place::Ptr(mplace) => match mplace.ptr { Place::Ptr(mplace) => match mplace.ptr.map_erase_for_fmt().provenance {
Scalar::Ptr(ptr) => write!( Some(alloc_id) => write!(
fmt, fmt,
"by align({}) ref: {:?}", "by align({}) ref {:?}: {:?}",
mplace.align.bytes(), mplace.align.bytes(),
self.ecx.memory.dump_alloc(ptr.alloc_id) mplace.ptr,
self.ecx.memory.dump_alloc(alloc_id)
), ),
ptr => write!(fmt, " integral by ref: {:?}", ptr), ptr => write!(fmt, " integral by ref: {:?}", ptr),
}, },

View file

@ -20,18 +20,17 @@ use rustc_errors::ErrorReported;
use rustc_hir as hir; use rustc_hir as hir;
use rustc_middle::mir::interpret::InterpResult; use rustc_middle::mir::interpret::InterpResult;
use rustc_middle::ty::{self, layout::TyAndLayout, Ty}; use rustc_middle::ty::{self, layout::TyAndLayout, Ty};
use rustc_target::abi::Size;
use rustc_ast::Mutability; use rustc_ast::Mutability;
use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, Scalar, ValueVisitor}; use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, ValueVisitor};
use crate::const_eval; use crate::const_eval;
pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine< pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine<
'mir, 'mir,
'tcx, 'tcx,
MemoryKind = T, MemoryKind = T,
PointerTag = (), PointerTag = AllocId,
ExtraFnVal = !, ExtraFnVal = !,
FrameExtra = (), FrameExtra = (),
AllocExtra = (), AllocExtra = (),
@ -136,7 +135,7 @@ fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval:
}; };
// link the alloc id to the actual allocation // link the alloc id to the actual allocation
let alloc = tcx.intern_const_alloc(alloc); let alloc = tcx.intern_const_alloc(alloc);
leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc)); leftover_allocations.extend(alloc.relocations().iter().map(|&(_, alloc_id)| alloc_id));
tcx.set_alloc_id_memory(alloc_id, alloc); tcx.set_alloc_id_memory(alloc_id, alloc);
None None
} }
@ -203,10 +202,11 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
if let ty::Dynamic(..) = if let ty::Dynamic(..) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind() tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{ {
if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() { let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even // Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable. // if the reference of the fat pointer is mutable.
self.intern_shallow(vtable.alloc_id, InternMode::Const, None); self.intern_shallow(alloc_id, InternMode::Const, None);
} else { } else {
// Validation will error (with a better message) on an invalid vtable pointer. // Validation will error (with a better message) on an invalid vtable pointer.
// Let validation show the error message, but make sure it *does* error. // Let validation show the error message, but make sure it *does* error.
@ -216,7 +216,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
} }
// Check if we have encountered this pointer+layout combination before. // Check if we have encountered this pointer+layout combination before.
// Only recurse for allocation-backed pointers. // Only recurse for allocation-backed pointers.
if let Scalar::Ptr(ptr) = mplace.ptr { if let Some(alloc_id) = mplace.ptr.provenance {
// Compute the mode with which we intern this. Our goal here is to make as many // Compute the mode with which we intern this. Our goal here is to make as many
// statics as we can immutable so they can be placed in read-only memory by LLVM. // statics as we can immutable so they can be placed in read-only memory by LLVM.
let ref_mode = match self.mode { let ref_mode = match self.mode {
@ -259,7 +259,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
InternMode::Const InternMode::Const
} }
}; };
match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) { match self.intern_shallow(alloc_id, ref_mode, Some(referenced_ty)) {
// No need to recurse, these are interned already and statics may have // No need to recurse, these are interned already and statics may have
// cycles, so we don't want to recurse there // cycles, so we don't want to recurse there
Some(IsStaticOrFn) => {} Some(IsStaticOrFn) => {}
@ -321,7 +321,7 @@ where
leftover_allocations, leftover_allocations,
// The outermost allocation must exist, because we allocated it with // The outermost allocation must exist, because we allocated it with
// `Memory::allocate`. // `Memory::allocate`.
ret.ptr.assert_ptr().alloc_id, ret.ptr.provenance.unwrap(),
base_intern_mode, base_intern_mode,
Some(ret.layout.ty), Some(ret.layout.ty),
); );
@ -395,9 +395,9 @@ where
} }
let alloc = tcx.intern_const_alloc(alloc); let alloc = tcx.intern_const_alloc(alloc);
tcx.set_alloc_id_memory(alloc_id, alloc); tcx.set_alloc_id_memory(alloc_id, alloc);
for &(_, ((), reloc)) in alloc.relocations().iter() { for &(_, alloc_id) in alloc.relocations().iter() {
if leftover_allocations.insert(reloc) { if leftover_allocations.insert(alloc_id) {
todo.push(reloc); todo.push(alloc_id);
} }
} }
} else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) { } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) {
@ -430,9 +430,7 @@ impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>>
) -> InterpResult<'tcx, &'tcx Allocation> { ) -> InterpResult<'tcx, &'tcx Allocation> {
let dest = self.allocate(layout, MemoryKind::Stack)?; let dest = self.allocate(layout, MemoryKind::Stack)?;
f(self, &dest)?; f(self, &dest)?;
let ptr = dest.ptr.assert_ptr(); let mut alloc = self.memory.alloc_map.remove(&dest.ptr.provenance.unwrap()).unwrap().1;
assert_eq!(ptr.offset, Size::ZERO);
let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1;
alloc.mutability = Mutability::Not; alloc.mutability = Mutability::Not;
Ok(self.tcx.intern_const_alloc(alloc)) Ok(self.tcx.intern_const_alloc(alloc))
} }

View file

@ -18,6 +18,7 @@ use rustc_target::abi::{Abi, Align, LayoutOf as _, Primitive, Size};
use super::{ use super::{
util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy, util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
Pointer,
}; };
mod caller_location; mod caller_location;
@ -138,7 +139,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
sym::caller_location => { sym::caller_location => {
let span = self.find_closest_untracked_caller_location(); let span = self.find_closest_untracked_caller_location();
let location = self.alloc_caller_location_for_span(span); let location = self.alloc_caller_location_for_span(span);
self.write_scalar(location.ptr, dest)?; self.write_immediate(location.to_ref(self), dest)?;
} }
sym::min_align_of_val | sym::size_of_val => { sym::min_align_of_val | sym::size_of_val => {
@ -190,7 +191,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let ty = substs.type_at(0); let ty = substs.type_at(0);
let layout_of = self.layout_of(ty)?; let layout_of = self.layout_of(ty)?;
let val = self.read_scalar(&args[0])?.check_init()?; let val = self.read_scalar(&args[0])?.check_init()?;
let bits = self.force_bits(val, layout_of.size)?; let bits = val.to_bits(layout_of.size)?;
let kind = match layout_of.abi { let kind = match layout_of.abi {
Abi::Scalar(ref scalar) => scalar.value, Abi::Scalar(ref scalar) => scalar.value,
_ => span_bug!( _ => span_bug!(
@ -238,7 +239,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// term since the sign of the second term can be inferred from this and // term since the sign of the second term can be inferred from this and
// the fact that the operation has overflowed (if either is 0 no // the fact that the operation has overflowed (if either is 0 no
// overflow can occur) // overflow can occur)
let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?; let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
let first_term_positive = first_term & (1 << (num_bits - 1)) == 0; let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
if first_term_positive { if first_term_positive {
// Negative overflow not possible since the positive first term // Negative overflow not possible since the positive first term
@ -298,7 +299,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?; let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
if overflowed { if overflowed {
let layout = self.layout_of(substs.type_at(0))?; let layout = self.layout_of(substs.type_at(0))?;
let r_val = self.force_bits(r.to_scalar()?, layout.size)?; let r_val = r.to_scalar()?.to_bits(layout.size)?;
if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name { if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name); throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
} else { } else {
@ -312,9 +313,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let layout = self.layout_of(substs.type_at(0))?; let layout = self.layout_of(substs.type_at(0))?;
let val = self.read_scalar(&args[0])?.check_init()?; let val = self.read_scalar(&args[0])?.check_init()?;
let val_bits = self.force_bits(val, layout.size)?; let val_bits = val.to_bits(layout.size)?;
let raw_shift = self.read_scalar(&args[1])?.check_init()?; let raw_shift = self.read_scalar(&args[1])?.check_init()?;
let raw_shift_bits = self.force_bits(raw_shift, layout.size)?; let raw_shift_bits = raw_shift.to_bits(layout.size)?;
let width_bits = u128::from(layout.size.bits()); let width_bits = u128::from(layout.size.bits());
let shift_bits = raw_shift_bits % width_bits; let shift_bits = raw_shift_bits % width_bits;
let inv_shift_bits = (width_bits - shift_bits) % width_bits; let inv_shift_bits = (width_bits - shift_bits) % width_bits;
@ -331,12 +332,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?; self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
} }
sym::offset => { sym::offset => {
let ptr = self.read_scalar(&args[0])?.check_init()?; let ptr = self.read_pointer(&args[0])?;
let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?; let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
let pointee_ty = substs.type_at(0); let pointee_ty = substs.type_at(0);
let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?; let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
self.write_scalar(offset_ptr, dest)?; self.write_scalar(Scalar::from_maybe_pointer(offset_ptr, self), dest)?;
} }
sym::arith_offset => { sym::arith_offset => {
let ptr = self.read_scalar(&args[0])?.check_init()?; let ptr = self.read_scalar(&args[0])?.check_init()?;
@ -376,9 +377,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if !done { if !done {
// General case: we need two pointers. // General case: we need two pointers.
let a = self.force_ptr(a)?; let a = self.scalar_to_ptr(a);
let b = self.force_ptr(b)?; let b = self.scalar_to_ptr(b);
if a.alloc_id != b.alloc_id { let (a_alloc_id, a_offset, _) = self.memory.ptr_force_alloc(a)?;
let (b_alloc_id, b_offset, _) = self.memory.ptr_force_alloc(b)?;
if a_alloc_id != b_alloc_id {
throw_ub_format!( throw_ub_format!(
"ptr_offset_from cannot compute offset of pointers into different \ "ptr_offset_from cannot compute offset of pointers into different \
allocations.", allocations.",
@ -386,8 +389,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
let usize_layout = self.layout_of(self.tcx.types.usize)?; let usize_layout = self.layout_of(self.tcx.types.usize)?;
let isize_layout = self.layout_of(self.tcx.types.isize)?; let isize_layout = self.layout_of(self.tcx.types.isize)?;
let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout); let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout); let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
let (val, _overflowed, _ty) = let (val, _overflowed, _ty) =
self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?; self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
let pointee_layout = self.layout_of(substs.type_at(0))?; let pointee_layout = self.layout_of(substs.type_at(0))?;
@ -513,10 +516,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value. /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
pub fn ptr_offset_inbounds( pub fn ptr_offset_inbounds(
&self, &self,
ptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
pointee_ty: Ty<'tcx>, pointee_ty: Ty<'tcx>,
offset_count: i64, offset_count: i64,
) -> InterpResult<'tcx, Scalar<M::PointerTag>> { ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
// We cannot overflow i64 as a type's size must be <= isize::MAX. // We cannot overflow i64 as a type's size must be <= isize::MAX.
let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap(); let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
// The computed offset, in bytes, cannot overflow an isize. // The computed offset, in bytes, cannot overflow an isize.
@ -524,7 +527,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?; offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
// The offset being in bounds cannot rely on "wrapping around" the address space. // The offset being in bounds cannot rely on "wrapping around" the address space.
// So, first rule out overflows in the pointer arithmetic. // So, first rule out overflows in the pointer arithmetic.
let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?; let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
// ptr and offset_ptr must be in bounds of the same allocated object. This means all of the // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
// memory between these pointers must be accessible. Note that we do not require the // memory between these pointers must be accessible. Note that we do not require the
// pointers to be properly aligned (unlike a read/write operation). // pointers to be properly aligned (unlike a read/write operation).
@ -558,8 +561,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) )
})?; })?;
let src = self.read_scalar(&src)?.check_init()?; let src = self.read_pointer(&src)?;
let dst = self.read_scalar(&dst)?.check_init()?; let dst = self.read_pointer(&dst)?;
self.memory.copy(src, align, dst, align, size, nonoverlapping) self.memory.copy(src, align, dst, align, size, nonoverlapping)
} }
@ -572,8 +575,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?; let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
assert!(!layout.is_unsized()); assert!(!layout.is_unsized());
let lhs = self.read_scalar(lhs)?.check_init()?; let lhs = self.read_pointer(lhs)?;
let rhs = self.read_scalar(rhs)?.check_init()?; let rhs = self.read_pointer(rhs)?;
let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?; let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?; let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
Ok(Scalar::from_bool(lhs_bytes == rhs_bytes)) Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))

View file

@ -96,7 +96,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap(); let location = self.allocate(loc_layout, MemoryKind::CallerLocation).unwrap();
// Initialize fields. // Initialize fields.
self.write_immediate(file.to_ref(), &self.mplace_field(&location, 0).unwrap().into()) self.write_immediate(file.to_ref(self), &self.mplace_field(&location, 0).unwrap().into())
.expect("writing to memory we just allocated cannot fail"); .expect("writing to memory we just allocated cannot fail");
self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into()) self.write_scalar(line, &self.mplace_field(&location, 1).unwrap().into())
.expect("writing to memory we just allocated cannot fail"); .expect("writing to memory we just allocated cannot fail");

View file

@ -13,8 +13,8 @@ use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use super::{ use super::{
AllocId, Allocation, CheckInAllocMsg, Frame, ImmTy, InterpCx, InterpResult, LocalValue, AllocId, Allocation, Frame, ImmTy, InterpCx, InterpResult, LocalValue, MemPlace, Memory,
MemPlace, Memory, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Scalar, StackPopUnwind, MemoryKind, OpTy, Operand, PlaceTy, Pointer, Provenance, Scalar, StackPopUnwind,
}; };
/// Data returned by Machine::stack_pop, /// Data returned by Machine::stack_pop,
@ -84,12 +84,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// Additional memory kinds a machine wishes to distinguish from the builtin ones /// Additional memory kinds a machine wishes to distinguish from the builtin ones
type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static; type MemoryKind: Debug + std::fmt::Display + MayLeak + Eq + 'static;
/// Tag tracked alongside every pointer. This is used to implement "Stacked Borrows" /// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
/// <https://www.ralfj.de/blog/2018/08/07/stacked-borrows.html>. type PointerTag: Provenance + Eq + Hash + 'static;
/// The `default()` is used for pointers to consts, statics, vtables and functions.
/// The `Debug` formatting is used for displaying pointers; we cannot use `Display`
/// as `()` does not implement that, but it should be "nice" output.
type PointerTag: Debug + Copy + Eq + Hash + 'static;
/// Machines can define extra (non-instance) things that represent values of function pointers. /// Machines can define extra (non-instance) things that represent values of function pointers.
/// For example, Miri uses this to return a function pointer from `dlsym` /// For example, Miri uses this to return a function pointer from `dlsym`
@ -287,7 +283,10 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// this will return an unusable tag (i.e., accesses will be UB)! /// this will return an unusable tag (i.e., accesses will be UB)!
/// ///
/// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed. /// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed.
fn tag_global_base_pointer(memory_extra: &Self::MemoryExtra, id: AllocId) -> Self::PointerTag; fn tag_global_base_pointer(
memory_extra: &Self::MemoryExtra,
ptr: Pointer,
) -> Pointer<Self::PointerTag>;
/// Called to initialize the "extra" state of an allocation and make the pointers /// Called to initialize the "extra" state of an allocation and make the pointers
/// it contains (in relocations) tagged. The way we construct allocations is /// it contains (in relocations) tagged. The way we construct allocations is
@ -400,31 +399,24 @@ pub trait Machine<'mir, 'tcx>: Sized {
Ok(StackPopJump::Normal) Ok(StackPopJump::Normal)
} }
fn int_to_ptr( /// "Int-to-pointer cast"
_mem: &Memory<'mir, 'tcx, Self>, fn ptr_from_addr(
int: u64, mem: &Memory<'mir, 'tcx, Self>,
) -> InterpResult<'tcx, Pointer<Self::PointerTag>> { addr: u64,
Err((if int == 0 { ) -> Pointer<Option<Self::PointerTag>>;
// This is UB, seriously.
// (`DanglingIntPointer` with these exact arguments has special printing code.)
err_ub!(DanglingIntPointer(0, CheckInAllocMsg::InboundsTest))
} else {
// This is just something we cannot support during const-eval.
err_unsup!(ReadBytesAsPointer)
})
.into())
}
fn ptr_to_int( /// Convert a pointer with provenance into an allocation-offset pair,
_mem: &Memory<'mir, 'tcx, Self>, /// or a `None` with an absolute address if that conversion is not possible.
_ptr: Pointer<Self::PointerTag>, fn ptr_get_alloc(
) -> InterpResult<'tcx, u64>; mem: &Memory<'mir, 'tcx, Self>,
ptr: Pointer<Self::PointerTag>,
) -> (Option<AllocId>, Size);
} }
// A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines // A lot of the flexibility above is just needed for `Miri`, but all "compile-time" machines
// (CTFE and ConstProp) use the same instance. Here, we share that code. // (CTFE and ConstProp) use the same instance. Here, we share that code.
pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) { pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
type PointerTag = (); type PointerTag = AllocId;
type ExtraFnVal = !; type ExtraFnVal = !;
type MemoryMap = type MemoryMap =
@ -467,19 +459,33 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
#[inline(always)] #[inline(always)]
fn init_allocation_extra<'b>( fn init_allocation_extra<'b>(
_memory_extra: &Self::MemoryExtra, _memory_extra: &Self::MemoryExtra,
_id: AllocId, id: AllocId,
alloc: Cow<'b, Allocation>, alloc: Cow<'b, Allocation>,
_kind: Option<MemoryKind<Self::MemoryKind>>, _kind: Option<MemoryKind<Self::MemoryKind>>,
) -> (Cow<'b, Allocation<Self::PointerTag>>, Self::PointerTag) { ) -> (Cow<'b, Allocation<Self::PointerTag>>, Self::PointerTag) {
// We do not use a tag so we can just cheaply forward the allocation // We do not use a tag so we can just cheaply forward the allocation
(alloc, ()) (alloc, id)
} }
#[inline(always)] #[inline(always)]
fn tag_global_base_pointer( fn tag_global_base_pointer(
_memory_extra: &Self::MemoryExtra, _memory_extra: &Self::MemoryExtra,
_id: AllocId, ptr: Pointer<AllocId>,
) -> Self::PointerTag { ) -> Pointer<AllocId> {
() ptr
}
#[inline(always)]
fn ptr_from_addr(_mem: &Memory<$mir, $tcx, Self>, addr: u64) -> Pointer<Option<AllocId>> {
Pointer::new(None, Size::from_bytes(addr))
}
#[inline(always)]
fn ptr_get_alloc(
_mem: &Memory<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> (Option<AllocId>, Size) {
let (alloc_id, offset) = ptr.into_parts();
(Some(alloc_id), offset)
} }
} }

View file

@ -8,7 +8,7 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::convert::{TryFrom, TryInto}; use std::convert::TryFrom;
use std::fmt; use std::fmt;
use std::ptr; use std::ptr;
@ -19,7 +19,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size, TargetDataLayout};
use super::{ use super::{
alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc, alloc_range, AllocId, AllocMap, AllocRange, Allocation, CheckInAllocMsg, GlobalAlloc,
InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Scalar, ScalarMaybeUninit, InterpResult, Machine, MayLeak, Pointer, PointerArithmetic, Provenance, Scalar,
ScalarMaybeUninit,
}; };
use crate::util::pretty; use crate::util::pretty;
@ -162,25 +163,24 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
#[inline] #[inline]
pub fn global_base_pointer( pub fn global_base_pointer(
&self, &self,
mut ptr: Pointer, ptr: Pointer<AllocId>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> { ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
let (alloc_id, offset) = ptr.into_parts();
// We need to handle `extern static`. // We need to handle `extern static`.
let ptr = match self.tcx.get_global_alloc(ptr.alloc_id) { let alloc_id = match self.tcx.get_global_alloc(alloc_id) {
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => { Some(GlobalAlloc::Static(def_id)) if self.tcx.is_thread_local_static(def_id) => {
bug!("global memory cannot point to thread-local static") bug!("global memory cannot point to thread-local static")
} }
Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => { Some(GlobalAlloc::Static(def_id)) if self.tcx.is_foreign_item(def_id) => {
ptr.alloc_id = M::extern_static_alloc_id(self, def_id)?; M::extern_static_alloc_id(self, def_id)?
ptr
} }
_ => { _ => {
// No need to change the `AllocId`. // No need to change the `AllocId`.
ptr alloc_id
} }
}; };
// And we need to get the tag. // And we need to get the tag.
let tag = M::tag_global_base_pointer(&self.extra, ptr.alloc_id); Ok(M::tag_global_base_pointer(&self.extra, Pointer::new(alloc_id, offset)))
Ok(ptr.with_tag(tag))
} }
pub fn create_fn_alloc( pub fn create_fn_alloc(
@ -237,18 +237,19 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// This is a new allocation, not a new global one, so no `global_base_ptr`. // This is a new allocation, not a new global one, so no `global_base_ptr`.
let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind)); let (alloc, tag) = M::init_allocation_extra(&self.extra, id, Cow::Owned(alloc), Some(kind));
self.alloc_map.insert(id, (kind, alloc.into_owned())); self.alloc_map.insert(id, (kind, alloc.into_owned()));
Pointer::from(id).with_tag(tag) Pointer::new(tag, Size::ZERO)
} }
pub fn reallocate( pub fn reallocate(
&mut self, &mut self,
ptr: Pointer<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
old_size_and_align: Option<(Size, Align)>, old_size_and_align: Option<(Size, Align)>,
new_size: Size, new_size: Size,
new_align: Align, new_align: Align,
kind: MemoryKind<M::MemoryKind>, kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> { ) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
if ptr.offset.bytes() != 0 { let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
if offset.bytes() != 0 {
throw_ub_format!( throw_ub_format!(
"reallocating {:?} which does not point to the beginning of an object", "reallocating {:?} which does not point to the beginning of an object",
ptr ptr
@ -260,7 +261,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
let new_ptr = self.allocate(new_size, new_align, kind)?; let new_ptr = self.allocate(new_size, new_align, kind)?;
let old_size = match old_size_and_align { let old_size = match old_size_and_align {
Some((size, _align)) => size, Some((size, _align)) => size,
None => self.get_raw(ptr.alloc_id)?.size(), None => self.get_raw(alloc_id)?.size(),
}; };
// This will also call the access hooks. // This will also call the access hooks.
self.copy( self.copy(
@ -271,50 +272,51 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
old_size.min(new_size), old_size.min(new_size),
/*nonoverlapping*/ true, /*nonoverlapping*/ true,
)?; )?;
self.deallocate(ptr, old_size_and_align, kind)?; self.deallocate(ptr.into(), old_size_and_align, kind)?;
Ok(new_ptr) Ok(new_ptr)
} }
pub fn deallocate( pub fn deallocate(
&mut self, &mut self,
ptr: Pointer<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
old_size_and_align: Option<(Size, Align)>, old_size_and_align: Option<(Size, Align)>,
kind: MemoryKind<M::MemoryKind>, kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id); let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
trace!("deallocating: {}", alloc_id);
if ptr.offset.bytes() != 0 { if offset.bytes() != 0 {
throw_ub_format!( throw_ub_format!(
"deallocating {:?} which does not point to the beginning of an object", "deallocating {:?} which does not point to the beginning of an object",
ptr ptr
); );
} }
let (alloc_kind, mut alloc) = match self.alloc_map.remove(&ptr.alloc_id) { let (alloc_kind, mut alloc) = match self.alloc_map.remove(&alloc_id) {
Some(alloc) => alloc, Some(alloc) => alloc,
None => { None => {
// Deallocating global memory -- always an error // Deallocating global memory -- always an error
return Err(match self.tcx.get_global_alloc(ptr.alloc_id) { return Err(match self.tcx.get_global_alloc(alloc_id) {
Some(GlobalAlloc::Function(..)) => { Some(GlobalAlloc::Function(..)) => {
err_ub_format!("deallocating {}, which is a function", ptr.alloc_id) err_ub_format!("deallocating {}, which is a function", alloc_id)
} }
Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => { Some(GlobalAlloc::Static(..) | GlobalAlloc::Memory(..)) => {
err_ub_format!("deallocating {}, which is static memory", ptr.alloc_id) err_ub_format!("deallocating {}, which is static memory", alloc_id)
} }
None => err_ub!(PointerUseAfterFree(ptr.alloc_id)), None => err_ub!(PointerUseAfterFree(alloc_id)),
} }
.into()); .into());
} }
}; };
if alloc.mutability == Mutability::Not { if alloc.mutability == Mutability::Not {
throw_ub_format!("deallocating immutable allocation {}", ptr.alloc_id); throw_ub_format!("deallocating immutable allocation {}", alloc_id);
} }
if alloc_kind != kind { if alloc_kind != kind {
throw_ub_format!( throw_ub_format!(
"deallocating {}, which is {} memory, using {} deallocation operation", "deallocating {}, which is {} memory, using {} deallocation operation",
ptr.alloc_id, alloc_id,
alloc_kind, alloc_kind,
kind kind
); );
@ -323,7 +325,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if size != alloc.size() || align != alloc.align { if size != alloc.size() || align != alloc.align {
throw_ub_format!( throw_ub_format!(
"incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}", "incorrect layout on deallocation: {} has size {} and alignment {}, but gave size {} and alignment {}",
ptr.alloc_id, alloc_id,
alloc.size().bytes(), alloc.size().bytes(),
alloc.align.bytes(), alloc.align.bytes(),
size.bytes(), size.bytes(),
@ -337,7 +339,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr, size)?; M::memory_deallocated(&mut self.extra, &mut alloc.extra, ptr, size)?;
// Don't forget to remember size and align of this now-dead allocation // Don't forget to remember size and align of this now-dead allocation
let old = self.dead_alloc_map.insert(ptr.alloc_id, (size, alloc.align)); let old = self.dead_alloc_map.insert(alloc_id, (size, alloc.align));
if old.is_some() { if old.is_some() {
bug!("Nothing can be deallocated twice"); bug!("Nothing can be deallocated twice");
} }
@ -345,52 +347,61 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Ok(()) Ok(())
} }
/// Internal helper function for APIs that offer memory access based on `Scalar` pointers. /// Internal helper function to determine the allocation and offset of a pointer (if any).
#[inline(always)] #[inline(always)]
pub(super) fn check_ptr_access( fn get_ptr_access(
&self, &self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, size: Size,
align: Align, align: Align,
) -> InterpResult<'tcx, Option<Pointer<M::PointerTag>>> { ) -> InterpResult<'tcx, Option<(AllocId, Size, Pointer<M::PointerTag>)>> {
let align = M::enforce_alignment(&self.extra).then_some(align); let align = M::enforce_alignment(&self.extra).then_some(align);
self.check_and_deref_ptr(sptr, size, align, CheckInAllocMsg::MemoryAccessTest, |ptr| { self.check_and_deref_ptr(
ptr,
size,
align,
CheckInAllocMsg::MemoryAccessTest,
|alloc_id, offset, ptr| {
let (size, align) = let (size, align) =
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?; self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ptr)) Ok((size, align, (alloc_id, offset, ptr)))
}) },
)
} }
/// Check if the given scalar is allowed to do a memory access of given `size` and `align` /// Check if the given pointer is allowed to do a memory access of given `size` and `align`
/// (ignoring `M::enforce_alignment`). The caller can control the error message for the /// (ignoring `M::enforce_alignment`). The caller can control the error message for the
/// out-of-bounds case. /// out-of-bounds case.
#[inline(always)] #[inline(always)]
pub fn check_ptr_access_align( pub fn check_ptr_access_align(
&self, &self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, size: Size,
align: Align, align: Align,
msg: CheckInAllocMsg, msg: CheckInAllocMsg,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
self.check_and_deref_ptr(sptr, size, Some(align), msg, |ptr| { self.check_and_deref_ptr(ptr, size, Some(align), msg, |alloc_id, _, _| {
let (size, align) = let (size, align) = self.get_size_and_align(alloc_id, AllocCheck::Dereferenceable)?;
self.get_size_and_align(ptr.alloc_id, AllocCheck::Dereferenceable)?;
Ok((size, align, ())) Ok((size, align, ()))
})?; })?;
Ok(()) Ok(())
} }
/// Low-level helper function to check if a ptr is in-bounds and potentially return a reference /// Low-level helper function to check if a ptr is in-bounds and potentially return a reference
/// to the allocation it points to. Supports both shared and mutable references, to the actual /// to the allocation it points to. Supports both shared and mutable references, as the actual
/// checking is offloaded to a helper closure. `align` defines whether and which alignment check /// checking is offloaded to a helper closure. `align` defines whether and which alignment check
/// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned. /// is done. Returns `None` for size 0, and otherwise `Some` of what `alloc_size` returned.
fn check_and_deref_ptr<T>( fn check_and_deref_ptr<T>(
&self, &self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, size: Size,
align: Option<Align>, align: Option<Align>,
msg: CheckInAllocMsg, msg: CheckInAllocMsg,
alloc_size: impl FnOnce(Pointer<M::PointerTag>) -> InterpResult<'tcx, (Size, Align, T)>, alloc_size: impl FnOnce(
AllocId,
Size,
Pointer<M::PointerTag>,
) -> InterpResult<'tcx, (Size, Align, T)>,
) -> InterpResult<'tcx, Option<T>> { ) -> InterpResult<'tcx, Option<T>> {
fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> { fn check_offset_align(offset: u64, align: Align) -> InterpResult<'static> {
if offset % align.bytes() == 0 { if offset % align.bytes() == 0 {
@ -405,53 +416,50 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
} }
} }
// Normalize to a `Pointer` if we definitely need one. // Extract from the pointer an `Option<AllocId>` and an offset, which is relative to the
let normalized = if size.bytes() == 0 { // allocation or (if that is `None`) an absolute address.
// Can be an integer, just take what we got. We do NOT `force_bits` here; let ptr_or_addr = if size.bytes() == 0 {
// if this is already a `Pointer` we want to do the bounds checks! // Let's see what we can do, but don't throw errors if there's nothing there.
sptr self.ptr_try_get_alloc(ptr)
} else { } else {
// A "real" access, we must get a pointer to be able to check the bounds. // A "real" access, we insist on getting an `AllocId`.
Scalar::from(self.force_ptr(sptr)?) Ok(self.ptr_force_alloc(ptr)?)
}; };
Ok(match normalized.to_bits_or_ptr(self.pointer_size(), self) { Ok(match ptr_or_addr {
Ok(bits) => { Err(addr) => {
let bits = u64::try_from(bits).unwrap(); // it's ptr-sized // No memory is actually being accessed.
assert!(size.bytes() == 0); debug_assert!(size.bytes() == 0);
// Must be non-null. // Must be non-null.
if bits == 0 { if addr == 0 {
throw_ub!(DanglingIntPointer(0, msg)) throw_ub!(DanglingIntPointer(0, msg))
} }
// Must be aligned. // Must be aligned.
if let Some(align) = align { if let Some(align) = align {
check_offset_align(bits, align)?; check_offset_align(addr, align)?;
} }
None None
} }
Err(ptr) => { Ok((alloc_id, offset, ptr)) => {
let (allocation_size, alloc_align, ret_val) = alloc_size(ptr)?; let (allocation_size, alloc_align, ret_val) = alloc_size(alloc_id, offset, ptr)?;
// Test bounds. This also ensures non-null. // Test bounds. This also ensures non-null.
// It is sufficient to check this for the end pointer. The addition // It is sufficient to check this for the end pointer. Also check for overflow!
// checks for overflow. if offset.checked_add(size, &self.tcx).map_or(true, |end| end > allocation_size) {
let end_ptr = ptr.offset(size, self)?; throw_ub!(PointerOutOfBounds { alloc_id, offset, size, allocation_size, msg })
if end_ptr.offset > allocation_size {
// equal is okay!
throw_ub!(PointerOutOfBounds { ptr: end_ptr.erase_tag(), msg, allocation_size })
} }
// Test align. Check this last; if both bounds and alignment are violated // Test align. Check this last; if both bounds and alignment are violated
// we want the error to be about the bounds. // we want the error to be about the bounds.
if let Some(align) = align { if let Some(align) = align {
if M::force_int_for_alignment_check(&self.extra) { if M::force_int_for_alignment_check(&self.extra) {
let bits = self let addr = Scalar::from(ptr)
.force_bits(ptr.into(), self.pointer_size()) .to_machine_usize(&self.tcx)
.expect("ptr-to-int cast for align check should never fail"); .expect("ptr-to-int cast for align check should never fail");
check_offset_align(bits.try_into().unwrap(), align)?; check_offset_align(addr, align)?;
} else { } else {
// Check allocation alignment and offset alignment. // Check allocation alignment and offset alignment.
if alloc_align.bytes() < align.bytes() { if alloc_align.bytes() < align.bytes() {
throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align }); throw_ub!(AlignmentCheckFailed { has: alloc_align, required: align });
} }
check_offset_align(ptr.offset.bytes(), align)?; check_offset_align(offset.bytes(), align)?;
} }
} }
@ -463,13 +471,18 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
} }
/// Test if the pointer might be null. /// Test if the pointer might be null.
pub fn ptr_may_be_null(&self, ptr: Pointer<M::PointerTag>) -> bool { pub fn ptr_may_be_null(&self, ptr: Pointer<Option<M::PointerTag>>) -> bool {
match self.ptr_try_get_alloc(ptr) {
Ok((alloc_id, offset, _)) => {
let (size, _align) = self let (size, _align) = self
.get_size_and_align(ptr.alloc_id, AllocCheck::MaybeDead) .get_size_and_align(alloc_id, AllocCheck::MaybeDead)
.expect("alloc info with MaybeDead cannot fail"); .expect("alloc info with MaybeDead cannot fail");
// If the pointer is out-of-bounds, it may be null. // If the pointer is out-of-bounds, it may be null.
// Note that one-past-the-end (offset == size) is still inbounds, and never null. // Note that one-past-the-end (offset == size) is still inbounds, and never null.
ptr.offset > size offset > size
}
Err(offset) => offset == 0,
}
} }
} }
@ -522,8 +535,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
alloc, alloc,
M::GLOBAL_KIND.map(MemoryKind::Machine), M::GLOBAL_KIND.map(MemoryKind::Machine),
); );
// Sanity check that this is the same pointer we would have gotten via `global_base_pointer`. // Sanity check that this is the same tag we would have gotten via `global_base_pointer`.
debug_assert_eq!(tag, M::tag_global_base_pointer(memory_extra, id)); debug_assert!(tag == M::tag_global_base_pointer(memory_extra, id.into()).provenance);
Ok(alloc) Ok(alloc)
} }
@ -566,30 +579,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// "Safe" (bounds and align-checked) allocation access. /// "Safe" (bounds and align-checked) allocation access.
pub fn get<'a>( pub fn get<'a>(
&'a self, &'a self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, size: Size,
align: Align, align: Align,
) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> { ) -> InterpResult<'tcx, Option<AllocRef<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let align = M::enforce_alignment(&self.extra).then_some(align); let align = M::enforce_alignment(&self.extra).then_some(align);
let ptr_and_alloc = self.check_and_deref_ptr( let ptr_and_alloc = self.check_and_deref_ptr(
sptr, ptr,
size, size,
align, align,
CheckInAllocMsg::MemoryAccessTest, CheckInAllocMsg::MemoryAccessTest,
|ptr| { |alloc_id, offset, ptr| {
let alloc = self.get_raw(ptr.alloc_id)?; let alloc = self.get_raw(alloc_id)?;
Ok((alloc.size(), alloc.align, (ptr, alloc))) Ok((alloc.size(), alloc.align, (alloc_id, offset, ptr, alloc)))
}, },
)?; )?;
if let Some((ptr, alloc)) = ptr_and_alloc { if let Some((alloc_id, offset, ptr, alloc)) = ptr_and_alloc {
M::memory_read(&self.extra, &alloc.extra, ptr, size)?; M::memory_read(&self.extra, &alloc.extra, ptr, size)?;
let range = alloc_range(ptr.offset, size); let range = alloc_range(offset, size);
Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id: ptr.alloc_id })) Ok(Some(AllocRef { alloc, range, tcx: self.tcx, alloc_id }))
} else { } else {
// Even in this branch we have to be sure that we actually access the allocation, in // Even in this branch we have to be sure that we actually access the allocation, in
// order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of // order to ensure that `static FOO: Type = FOO;` causes a cycle error instead of
// magically pulling *any* ZST value from the ether. However, the `get_raw` above is // magically pulling *any* ZST value from the ether. However, the `get_raw` above is
// always called when `sptr` is truly a `Pointer`, so we are good. // always called when `ptr` has an `AllocId`.
Ok(None) Ok(None)
} }
} }
@ -638,19 +651,19 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// "Safe" (bounds and align-checked) allocation access. /// "Safe" (bounds and align-checked) allocation access.
pub fn get_mut<'a>( pub fn get_mut<'a>(
&'a mut self, &'a mut self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, size: Size,
align: Align, align: Align,
) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> { ) -> InterpResult<'tcx, Option<AllocRefMut<'a, 'tcx, M::PointerTag, M::AllocExtra>>> {
let ptr = self.check_ptr_access(sptr, size, align)?; let parts = self.get_ptr_access(ptr, size, align)?;
if let Some(ptr) = ptr { if let Some((alloc_id, offset, ptr)) = parts {
let tcx = self.tcx; let tcx = self.tcx;
// FIXME: can we somehow avoid looking up the allocation twice here? // FIXME: can we somehow avoid looking up the allocation twice here?
// We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`. // We cannot call `get_raw_mut` inside `check_and_deref_ptr` as that would duplicate `&mut self`.
let (alloc, extra) = self.get_raw_mut(ptr.alloc_id)?; let (alloc, extra) = self.get_raw_mut(alloc_id)?;
M::memory_written(extra, &mut alloc.extra, ptr, size)?; M::memory_written(extra, &mut alloc.extra, ptr, size)?;
let range = alloc_range(ptr.offset, size); let range = alloc_range(offset, size);
Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id: ptr.alloc_id })) Ok(Some(AllocRefMut { alloc, range, tcx, alloc_id }))
} else { } else {
Ok(None) Ok(None)
} }
@ -740,14 +753,14 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn get_fn( pub fn get_fn(
&self, &self,
ptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr = self.force_ptr(ptr)?; // We definitely need a pointer value. let (alloc_id, offset, ptr) = self.ptr_force_alloc(ptr)?;
if ptr.offset.bytes() != 0 { if offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(ptr.erase_tag())) throw_ub!(InvalidFunctionPointer(ptr.erase_for_fmt()))
} }
self.get_fn_alloc(ptr.alloc_id) self.get_fn_alloc(alloc_id)
.ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_tag())).into()) .ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_for_fmt())).into())
} }
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> { pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
@ -786,7 +799,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if reachable.insert(id) { if reachable.insert(id) {
// This is a new allocation, add its relocations to `todo`. // This is a new allocation, add its relocations to `todo`.
if let Some((_, alloc)) = self.alloc_map.get(id) { if let Some((_, alloc)) = self.alloc_map.get(id) {
todo.extend(alloc.relocations().values().map(|&(_, target_id)| target_id)); todo.extend(alloc.relocations().values().map(|tag| tag.erase_for_fmt()));
} }
} }
} }
@ -820,14 +833,14 @@ pub struct DumpAllocs<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> {
impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> { impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a, 'mir, 'tcx, M> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
// Cannot be a closure because it is generic in `Tag`, `Extra`. // Cannot be a closure because it is generic in `Tag`, `Extra`.
fn write_allocation_track_relocs<'tcx, Tag: Copy + fmt::Debug, Extra>( fn write_allocation_track_relocs<'tcx, Tag: Provenance, Extra>(
fmt: &mut std::fmt::Formatter<'_>, fmt: &mut std::fmt::Formatter<'_>,
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
allocs_to_print: &mut VecDeque<AllocId>, allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>, alloc: &Allocation<Tag, Extra>,
) -> std::fmt::Result { ) -> std::fmt::Result {
for &(_, target_id) in alloc.relocations().values() { for alloc_id in alloc.relocations().values().map(|tag| tag.erase_for_fmt()) {
allocs_to_print.push_back(target_id); allocs_to_print.push_back(alloc_id);
} }
write!(fmt, "{}", pretty::display_allocation(tcx, alloc)) write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
} }
@ -930,8 +943,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Reads the given number of bytes from memory. Returns them as a slice. /// Reads the given number of bytes from memory. Returns them as a slice.
/// ///
/// Performs appropriate bounds checks. /// Performs appropriate bounds checks.
pub fn read_bytes(&self, sptr: Scalar<M::PointerTag>, size: Size) -> InterpResult<'tcx, &[u8]> { pub fn read_bytes(
let alloc_ref = match self.get(sptr, size, Align::ONE)? { &self,
ptr: Pointer<Option<M::PointerTag>>,
size: Size,
) -> InterpResult<'tcx, &[u8]> {
let alloc_ref = match self.get(ptr, size, Align::ONE)? {
Some(a) => a, Some(a) => a,
None => return Ok(&[]), // zero-sized access None => return Ok(&[]), // zero-sized access
}; };
@ -948,7 +965,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks. /// Performs appropriate bounds checks.
pub fn write_bytes( pub fn write_bytes(
&mut self, &mut self,
sptr: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
src: impl IntoIterator<Item = u8>, src: impl IntoIterator<Item = u8>,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let mut src = src.into_iter(); let mut src = src.into_iter();
@ -957,7 +974,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
assert_eq!(lower, len, "can only write iterators with a precise length"); assert_eq!(lower, len, "can only write iterators with a precise length");
let size = Size::from_bytes(len); let size = Size::from_bytes(len);
let alloc_ref = match self.get_mut(sptr, size, Align::ONE)? { let alloc_ref = match self.get_mut(ptr, size, Align::ONE)? {
Some(alloc_ref) => alloc_ref, Some(alloc_ref) => alloc_ref,
None => { None => {
// zero-sized access // zero-sized access
@ -984,9 +1001,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn copy( pub fn copy(
&mut self, &mut self,
src: Scalar<M::PointerTag>, src: Pointer<Option<M::PointerTag>>,
src_align: Align, src_align: Align,
dest: Scalar<M::PointerTag>, dest: Pointer<Option<M::PointerTag>>,
dest_align: Align, dest_align: Align,
size: Size, size: Size,
nonoverlapping: bool, nonoverlapping: bool,
@ -996,9 +1013,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn copy_repeatedly( pub fn copy_repeatedly(
&mut self, &mut self,
src: Scalar<M::PointerTag>, src: Pointer<Option<M::PointerTag>>,
src_align: Align, src_align: Align,
dest: Scalar<M::PointerTag>, dest: Pointer<Option<M::PointerTag>>,
dest_align: Align, dest_align: Align,
size: Size, size: Size,
num_copies: u64, num_copies: u64,
@ -1006,22 +1023,22 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let tcx = self.tcx; let tcx = self.tcx;
// We need to do our own bounds-checks. // We need to do our own bounds-checks.
let src = self.check_ptr_access(src, size, src_align)?; let src_parts = self.get_ptr_access(src, size, src_align)?;
let dest = self.check_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication let dest_parts = self.get_ptr_access(dest, size * num_copies, dest_align)?; // `Size` multiplication
// FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access` // FIXME: we look up both allocations twice here, once ebfore for the `check_ptr_access`
// and once below to get the underlying `&[mut] Allocation`. // and once below to get the underlying `&[mut] Allocation`.
// Source alloc preparations and access hooks. // Source alloc preparations and access hooks.
let src = match src { let (src_alloc_id, src_offset, src) = match src_parts {
None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do. None => return Ok(()), // Zero-sized *source*, that means dst is also zero-sized and we have nothing to do.
Some(src_ptr) => src_ptr, Some(src_ptr) => src_ptr,
}; };
let src_alloc = self.get_raw(src.alloc_id)?; let src_alloc = self.get_raw(src_alloc_id)?;
M::memory_read(&self.extra, &src_alloc.extra, src, size)?; M::memory_read(&self.extra, &src_alloc.extra, src, size)?;
// We need the `dest` ptr for the next operation, so we get it now. // We need the `dest` ptr for the next operation, so we get it now.
// We already did the source checks and called the hooks so we are good to return early. // We already did the source checks and called the hooks so we are good to return early.
let dest = match dest { let (dest_alloc_id, dest_offset, dest) = match dest_parts {
None => return Ok(()), // Zero-sized *destiantion*. None => return Ok(()), // Zero-sized *destiantion*.
Some(dest_ptr) => dest_ptr, Some(dest_ptr) => dest_ptr,
}; };
@ -1033,23 +1050,23 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// relocations overlapping the edges; those would not be handled correctly). // relocations overlapping the edges; those would not be handled correctly).
let relocations = src_alloc.prepare_relocation_copy( let relocations = src_alloc.prepare_relocation_copy(
self, self,
alloc_range(src.offset, size), alloc_range(src_offset, size),
dest.offset, dest_offset,
num_copies, num_copies,
); );
// Prepare a copy of the initialization mask. // Prepare a copy of the initialization mask.
let compressed = src_alloc.compress_uninit_range(alloc_range(src.offset, size)); let compressed = src_alloc.compress_uninit_range(alloc_range(src_offset, size));
// This checks relocation edges on the src. // This checks relocation edges on the src.
let src_bytes = src_alloc let src_bytes = src_alloc
.get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src.offset, size)) .get_bytes_with_uninit_and_ptr(&tcx, alloc_range(src_offset, size))
.map_err(|e| e.to_interp_error(src.alloc_id))? .map_err(|e| e.to_interp_error(src_alloc_id))?
.as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation .as_ptr(); // raw ptr, so we can also get a ptr to the destination allocation
// Destination alloc preparations and access hooks. // Destination alloc preparations and access hooks.
let (dest_alloc, extra) = self.get_raw_mut(dest.alloc_id)?; let (dest_alloc, extra) = self.get_raw_mut(dest_alloc_id)?;
M::memory_written(extra, &mut dest_alloc.extra, dest, size * num_copies)?; M::memory_written(extra, &mut dest_alloc.extra, dest, size * num_copies)?;
let dest_bytes = dest_alloc let dest_bytes = dest_alloc
.get_bytes_mut_ptr(&tcx, alloc_range(dest.offset, size * num_copies)) .get_bytes_mut_ptr(&tcx, alloc_range(dest_offset, size * num_copies))
.as_mut_ptr(); .as_mut_ptr();
if compressed.no_bytes_init() { if compressed.no_bytes_init() {
@ -1059,7 +1076,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// This also avoids writing to the target bytes so that the backing allocation is never // This also avoids writing to the target bytes so that the backing allocation is never
// touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary // touched if the bytes stay uninitialized for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page. // operating system this can avoid physically allocating the page.
dest_alloc.mark_init(alloc_range(dest.offset, size * num_copies), false); // `Size` multiplication dest_alloc.mark_init(alloc_range(dest_offset, size * num_copies), false); // `Size` multiplication
dest_alloc.mark_relocation_range(relocations); dest_alloc.mark_relocation_range(relocations);
return Ok(()); return Ok(());
} }
@ -1070,11 +1087,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// The pointers above remain valid even if the `HashMap` table is moved around because they // The pointers above remain valid even if the `HashMap` table is moved around because they
// point into the `Vec` storing the bytes. // point into the `Vec` storing the bytes.
unsafe { unsafe {
if src.alloc_id == dest.alloc_id { if src_alloc_id == dest_alloc_id {
if nonoverlapping { if nonoverlapping {
// `Size` additions // `Size` additions
if (src.offset <= dest.offset && src.offset + size > dest.offset) if (src_offset <= dest_offset && src_offset + size > dest_offset)
|| (dest.offset <= src.offset && dest.offset + size > src.offset) || (dest_offset <= src_offset && dest_offset + size > src_offset)
{ {
throw_ub_format!("copy_nonoverlapping called on overlapping ranges") throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
} }
@ -1101,7 +1118,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// now fill in all the "init" data // now fill in all the "init" data
dest_alloc.mark_compressed_init_range( dest_alloc.mark_compressed_init_range(
&compressed, &compressed,
alloc_range(dest.offset, size), alloc_range(dest_offset, size),
num_copies, num_copies,
); );
// copy the relocations to the destination // copy the relocations to the destination
@ -1113,24 +1130,44 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Machine pointer introspection. /// Machine pointer introspection.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> { impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn force_ptr( pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
&self, match scalar.to_bits_or_ptr(self.pointer_size(), &self.tcx) {
scalar: Scalar<M::PointerTag>, Err(ptr) => ptr.into(),
) -> InterpResult<'tcx, Pointer<M::PointerTag>> { Ok(bits) => {
match scalar { let addr = u64::try_from(bits).unwrap();
Scalar::Ptr(ptr) => Ok(ptr), M::ptr_from_addr(&self, addr)
_ => M::int_to_ptr(&self, scalar.to_machine_usize(self)?), }
} }
} }
pub fn force_bits( /// Internal helper for turning a "maybe pointer" into a proper pointer (and some information
/// about where it points), or an absolute address.
pub(super) fn ptr_try_get_alloc(
&self, &self,
scalar: Scalar<M::PointerTag>, ptr: Pointer<Option<M::PointerTag>>,
size: Size, ) -> Result<(AllocId, Size, Pointer<M::PointerTag>), u64> {
) -> InterpResult<'tcx, u128> { match ptr.into_pointer_or_offset() {
match scalar.to_bits_or_ptr(size, self) { Ok(ptr) => {
Ok(bits) => Ok(bits), let (alloc_id, offset) = M::ptr_get_alloc(self, ptr);
Err(ptr) => Ok(M::ptr_to_int(&self, ptr)?.into()), if let Some(alloc_id) = alloc_id {
Ok((alloc_id, offset, ptr))
} else {
Err(offset.bytes())
} }
} }
Err(offset) => Err(offset.bytes()),
}
}
/// Internal helper for turning a "maybe pointer" into a proper pointer (and some information
/// about where it points).
#[inline(always)]
pub(super) fn ptr_force_alloc(
&self,
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (AllocId, Size, Pointer<M::PointerTag>)> {
self.ptr_try_get_alloc(ptr).map_err(|offset| {
err_ub!(DanglingIntPointer(offset, CheckInAllocMsg::InboundsTest)).into()
})
}
} }

View file

@ -15,8 +15,9 @@ use rustc_target::abi::{Abi, HasDataLayout, LayoutOf, Size, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants}; use rustc_target::abi::{VariantIdx, Variants};
use super::{ use super::{
alloc_range, from_known_layout, mir_assign_valid_types, ConstValue, GlobalId, InterpCx, alloc_range, from_known_layout, mir_assign_valid_types, AllocId, ConstValue, GlobalId,
InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, InterpCx, InterpResult, MPlaceTy, Machine, MemPlace, Place, PlaceTy, Pointer, Provenance,
Scalar, ScalarMaybeUninit,
}; };
/// An `Immediate` represents a single immediate self-contained Rust value. /// An `Immediate` represents a single immediate self-contained Rust value.
@ -26,14 +27,24 @@ use super::{
/// operations and wide pointers. This idea was taken from rustc's codegen. /// operations and wide pointers. This idea was taken from rustc's codegen.
/// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely /// In particular, thanks to `ScalarPair`, arithmetic operations and casts can be entirely
/// defined on `Immediate`, and do not have to work with a `Place`. /// defined on `Immediate`, and do not have to work with a `Place`.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)] #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash)]
pub enum Immediate<Tag = ()> { pub enum Immediate<Tag = AllocId> {
Scalar(ScalarMaybeUninit<Tag>), Scalar(ScalarMaybeUninit<Tag>),
ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>), ScalarPair(ScalarMaybeUninit<Tag>, ScalarMaybeUninit<Tag>),
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Immediate, 56); //FIXME rustc_data_structures::static_assert_size!(Immediate, 56);
impl<Tag: Provenance> std::fmt::Debug for Immediate<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Immediate::*;
match self {
Scalar(s) => f.debug_tuple("Scalar").field(s).finish(),
ScalarPair(s1, s2) => f.debug_tuple("ScalarPair").field(s1).field(s2).finish(),
}
}
}
impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> { impl<Tag> From<ScalarMaybeUninit<Tag>> for Immediate<Tag> {
#[inline(always)] #[inline(always)]
@ -81,26 +92,33 @@ impl<'tcx, Tag> Immediate<Tag> {
// ScalarPair needs a type to interpret, so we often have an immediate and a type together // ScalarPair needs a type to interpret, so we often have an immediate and a type together
// as input for binary and cast operations. // as input for binary and cast operations.
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone)]
pub struct ImmTy<'tcx, Tag = ()> { pub struct ImmTy<'tcx, Tag = AllocId> {
imm: Immediate<Tag>, imm: Immediate<Tag>,
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(ImmTy<'_>, 72); //FIXME rustc_data_structures::static_assert_size!(ImmTy<'_>, 72);
impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> { impl<'tcx, Tag: Provenance> std::fmt::Debug for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let ImmTy { imm, layout } = self;
f.debug_struct("ImmTy").field("imm", imm).field("layout", layout).finish()
}
}
impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
/// Helper function for printing a scalar to a FmtPrinter /// Helper function for printing a scalar to a FmtPrinter
fn p<'a, 'tcx, F: std::fmt::Write, Tag>( fn p<'a, 'tcx, F: std::fmt::Write, Tag: Provenance>(
cx: FmtPrinter<'a, 'tcx, F>, cx: FmtPrinter<'a, 'tcx, F>,
s: ScalarMaybeUninit<Tag>, s: ScalarMaybeUninit<Tag>,
ty: Ty<'tcx>, ty: Ty<'tcx>,
) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> { ) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
match s { match s {
ScalarMaybeUninit::Scalar(s) => { ScalarMaybeUninit::Scalar(s) => {
cx.pretty_print_const_scalar(s.erase_tag(), ty, true) cx.pretty_print_const_scalar(s.erase_for_fmt(), ty, true)
} }
ScalarMaybeUninit::Uninit => cx.typed_value( ScalarMaybeUninit::Uninit => cx.typed_value(
|mut this| { |mut this| {
@ -120,11 +138,11 @@ impl<Tag: Copy> std::fmt::Display for ImmTy<'tcx, Tag> {
p(cx, s, ty)?; p(cx, s, ty)?;
return Ok(()); return Ok(());
} }
write!(f, "{}: {}", s.erase_tag(), self.layout.ty) write!(f, "{}: {}", s.erase_for_fmt(), self.layout.ty)
} }
Immediate::ScalarPair(a, b) => { Immediate::ScalarPair(a, b) => {
// FIXME(oli-obk): at least print tuples and slices nicely // FIXME(oli-obk): at least print tuples and slices nicely
write!(f, "({}, {}): {}", a.erase_tag(), b.erase_tag(), self.layout.ty,) write!(f, "({}, {}): {}", a.erase_for_fmt(), b.erase_for_fmt(), self.layout.ty,)
} }
} }
}) })
@ -142,14 +160,24 @@ impl<'tcx, Tag> std::ops::Deref for ImmTy<'tcx, Tag> {
/// An `Operand` is the result of computing a `mir::Operand`. It can be immediate, /// An `Operand` is the result of computing a `mir::Operand`. It can be immediate,
/// or still in memory. The latter is an optimization, to delay reading that chunk of /// or still in memory. The latter is an optimization, to delay reading that chunk of
/// memory and to avoid having to store arbitrary-sized data here. /// memory and to avoid having to store arbitrary-sized data here.
#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, Hash)] #[derive(Copy, Clone, PartialEq, Eq, HashStable, Hash)]
pub enum Operand<Tag = ()> { pub enum Operand<Tag = AllocId> {
Immediate(Immediate<Tag>), Immediate(Immediate<Tag>),
Indirect(MemPlace<Tag>), Indirect(MemPlace<Tag>),
} }
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] impl<Tag: Provenance> std::fmt::Debug for Operand<Tag> {
pub struct OpTy<'tcx, Tag = ()> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Operand::*;
match self {
Immediate(i) => f.debug_tuple("Immediate").field(i).finish(),
Indirect(p) => f.debug_tuple("Indirect").field(p).finish(),
}
}
}
#[derive(Copy, Clone, PartialEq, Eq, Hash)]
pub struct OpTy<'tcx, Tag = AllocId> {
op: Operand<Tag>, // Keep this private; it helps enforce invariants. op: Operand<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
@ -157,6 +185,13 @@ pub struct OpTy<'tcx, Tag = ()> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80); rustc_data_structures::static_assert_size!(OpTy<'_, ()>, 80);
impl<'tcx, Tag: Provenance> std::fmt::Debug for OpTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let OpTy { op, layout } = self;
f.debug_struct("OpTy").field("op", op).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> { impl<'tcx, Tag> std::ops::Deref for OpTy<'tcx, Tag> {
type Target = Operand<Tag>; type Target = Operand<Tag>;
#[inline(always)] #[inline(always)]
@ -225,19 +260,6 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
} }
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Normalize `place.ptr` to a `Pointer` if this is a place and not a ZST.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
#[inline]
pub fn force_op_ptr(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
match op.try_as_mplace(self) {
Ok(mplace) => Ok(self.force_mplace_ptr(mplace)?.into()),
Err(imm) => Ok(imm.into()), // Nothing to cast/force
}
}
/// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`. /// Try reading an immediate in memory; this is interesting particularly for `ScalarPair`.
/// Returns `None` if the layout does not permit loading this as a value. /// Returns `None` if the layout does not permit loading this as a value.
fn try_read_immediate_from_mplace( fn try_read_immediate_from_mplace(
@ -291,7 +313,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self, &self,
src: &OpTy<'tcx, M::PointerTag>, src: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> { ) -> InterpResult<'tcx, Result<ImmTy<'tcx, M::PointerTag>, MPlaceTy<'tcx, M::PointerTag>>> {
Ok(match src.try_as_mplace(self) { Ok(match src.try_as_mplace() {
Ok(ref mplace) => { Ok(ref mplace) => {
if let Some(val) = self.try_read_immediate_from_mplace(mplace)? { if let Some(val) = self.try_read_immediate_from_mplace(mplace)? {
Ok(val) Ok(val)
@ -324,6 +346,14 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(self.read_immediate(op)?.to_scalar_or_uninit()) Ok(self.read_immediate(op)?.to_scalar_or_uninit())
} }
/// Read a pointer from a place.
pub fn read_pointer(
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
}
// Turn the wide MPlace into a string (must already be dereferenced!) // Turn the wide MPlace into a string (must already be dereferenced!)
pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> { pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx, &str> {
let len = mplace.len(self)?; let len = mplace.len(self)?;
@ -338,7 +368,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
op: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
field: usize, field: usize,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
let base = match op.try_as_mplace(self) { let base = match op.try_as_mplace() {
Ok(ref mplace) => { Ok(ref mplace) => {
// We can reuse the mplace field computation logic for indirect operands. // We can reuse the mplace field computation logic for indirect operands.
let field = self.mplace_field(mplace, field)?; let field = self.mplace_field(mplace, field)?;
@ -381,7 +411,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.operand_field(op, index) self.operand_field(op, index)
} else { } else {
// Indexing into a big array. This must be an mplace. // Indexing into a big array. This must be an mplace.
let mplace = op.assert_mem_place(self); let mplace = op.assert_mem_place();
Ok(self.mplace_index(&mplace, index)?.into()) Ok(self.mplace_index(&mplace, index)?.into())
} }
} }
@ -392,7 +422,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
variant: VariantIdx, variant: VariantIdx,
) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> { ) -> InterpResult<'tcx, OpTy<'tcx, M::PointerTag>> {
// Downcasts only change the layout // Downcasts only change the layout
Ok(match op.try_as_mplace(self) { Ok(match op.try_as_mplace() {
Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(), Ok(ref mplace) => self.mplace_downcast(mplace, variant)?.into(),
Err(..) => { Err(..) => {
let layout = op.layout.for_variant(self, variant); let layout = op.layout.for_variant(self, variant);
@ -414,7 +444,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Subslice { .. } | ConstantIndex { .. } | Index(_) => { Subslice { .. } | ConstantIndex { .. } | Index(_) => {
// The rest should only occur as mplace, we do not use Immediates for types // The rest should only occur as mplace, we do not use Immediates for types
// allowing such operations. This matches place_projection forcing an allocation. // allowing such operations. This matches place_projection forcing an allocation.
let mplace = base.assert_mem_place(self); let mplace = base.assert_mem_place();
self.mplace_projection(&mplace, proj_elem)?.into() self.mplace_projection(&mplace, proj_elem)?.into()
} }
}) })
@ -580,9 +610,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// We rely on mutability being set correctly in that allocation to prevent writes // We rely on mutability being set correctly in that allocation to prevent writes
// where none should happen. // where none should happen.
let ptr = self.global_base_pointer(Pointer::new(id, offset))?; let ptr = self.global_base_pointer(Pointer::new(id, offset))?;
Operand::Indirect(MemPlace::from_ptr(ptr, layout.align.abi)) Operand::Indirect(MemPlace::from_ptr(ptr.into(), layout.align.abi))
} }
ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x)?.into()), ConstValue::Scalar(x) => Operand::Immediate(tag_scalar(x.into())?.into()),
ConstValue::Slice { data, start, end } => { ConstValue::Slice { data, start, end } => {
// We rely on mutability being set correctly in `data` to prevent writes // We rely on mutability being set correctly in `data` to prevent writes
// where none should happen. // where none should happen.
@ -658,9 +688,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Figure out which discriminant and variant this corresponds to. // Figure out which discriminant and variant this corresponds to.
Ok(match *tag_encoding { Ok(match *tag_encoding {
TagEncoding::Direct => { TagEncoding::Direct => {
let tag_bits = self let tag_bits = tag_val
.force_bits(tag_val, tag_layout.size) .to_bits(tag_layout.size)
.map_err(|_| err_ub!(InvalidTag(tag_val.erase_tag())))?; .map_err(|_| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
// Cast bits from tag layout to discriminant layout. // Cast bits from tag layout to discriminant layout.
let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty); let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
let discr_bits = discr_val.assert_bits(discr_layout.size); let discr_bits = discr_val.assert_bits(discr_layout.size);
@ -677,7 +707,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"), _ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
} }
.ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_tag())))?; .ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
// Return the cast value, and the index. // Return the cast value, and the index.
(discr_val, index.0) (discr_val, index.0)
} }
@ -691,9 +721,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// The niche must be just 0 (which an inbounds pointer value never is) // The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0 let ptr_valid = niche_start == 0
&& variants_start == variants_end && variants_start == variants_end
&& !self.memory.ptr_may_be_null(ptr); && !self.memory.ptr_may_be_null(ptr.into());
if !ptr_valid { if !ptr_valid {
throw_ub!(InvalidTag(tag_val.erase_tag())) throw_ub!(InvalidTag(tag_val.erase_for_fmt()))
} }
dataful_variant dataful_variant
} }

View file

@ -318,8 +318,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
right.layout.ty right.layout.ty
); );
let l = self.force_bits(left.to_scalar()?, left.layout.size)?; let l = left.to_scalar()?.to_bits(left.layout.size)?;
let r = self.force_bits(right.to_scalar()?, right.layout.size)?; let r = right.to_scalar()?.to_bits(right.layout.size)?;
self.binary_int_op(bin_op, l, left.layout, r, right.layout) self.binary_int_op(bin_op, l, left.layout, r, right.layout)
} }
_ if left.layout.ty.is_any_ptr() => { _ if left.layout.ty.is_any_ptr() => {
@ -386,7 +386,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
_ => { _ => {
assert!(layout.ty.is_integral()); assert!(layout.ty.is_integral());
let val = self.force_bits(val, layout.size)?; let val = val.to_bits(layout.size)?;
let (res, overflow) = match un_op { let (res, overflow) = match un_op {
Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
Neg => { Neg => {

View file

@ -3,7 +3,6 @@
//! All high-level functions to write to memory work on places as destinations. //! All high-level functions to write to memory work on places as destinations.
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt::Debug;
use std::hash::Hash; use std::hash::Hash;
use rustc_ast::Mutability; use rustc_ast::Mutability;
@ -15,14 +14,14 @@ use rustc_target::abi::{Abi, Align, FieldsShape, TagEncoding};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants}; use rustc_target::abi::{HasDataLayout, LayoutOf, Size, VariantIdx, Variants};
use super::{ use super::{
alloc_range, mir_assign_valid_types, AllocRef, AllocRefMut, ConstAlloc, ImmTy, Immediate, alloc_range, mir_assign_valid_types, AllocId, AllocRef, AllocRefMut, CheckInAllocMsg,
InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy, Operand, Pointer, ConstAlloc, ImmTy, Immediate, InterpCx, InterpResult, LocalValue, Machine, MemoryKind, OpTy,
PointerArithmetic, Scalar, ScalarMaybeUninit, Operand, Pointer, PointerArithmetic, Provenance, Scalar, ScalarMaybeUninit,
}; };
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
/// Information required for the sound usage of a `MemPlace`. /// Information required for the sound usage of a `MemPlace`.
pub enum MemPlaceMeta<Tag = ()> { pub enum MemPlaceMeta<Tag = AllocId> {
/// The unsized payload (e.g. length for slices or vtable pointer for trait objects). /// The unsized payload (e.g. length for slices or vtable pointer for trait objects).
Meta(Scalar<Tag>), Meta(Scalar<Tag>),
/// `Sized` types or unsized `extern type` /// `Sized` types or unsized `extern type`
@ -35,7 +34,18 @@ pub enum MemPlaceMeta<Tag = ()> {
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MemPlaceMeta, 24); //FIXME rustc_data_structures::static_assert_size!(MemPlaceMeta, 24);
impl<Tag: Provenance> std::fmt::Debug for MemPlaceMeta<Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use MemPlaceMeta::*;
match self {
Meta(s) => f.debug_tuple("Meta").field(s).finish(),
None => f.debug_tuple("None").finish(),
Poison => f.debug_tuple("Poison").finish(),
}
}
}
impl<Tag> MemPlaceMeta<Tag> { impl<Tag> MemPlaceMeta<Tag> {
pub fn unwrap_meta(self) -> Scalar<Tag> { pub fn unwrap_meta(self) -> Scalar<Tag> {
@ -53,21 +63,22 @@ impl<Tag> MemPlaceMeta<Tag> {
} }
} }
pub fn erase_tag(self) -> MemPlaceMeta<()> { pub fn erase_for_fmt(self) -> MemPlaceMeta
where
Tag: Provenance,
{
match self { match self {
Self::Meta(s) => MemPlaceMeta::Meta(s.erase_tag()), Self::Meta(s) => MemPlaceMeta::Meta(s.erase_for_fmt()),
Self::None => MemPlaceMeta::None, Self::None => MemPlaceMeta::None,
Self::Poison => MemPlaceMeta::Poison, Self::Poison => MemPlaceMeta::Poison,
} }
} }
} }
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] #[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
pub struct MemPlace<Tag = ()> { pub struct MemPlace<Tag = AllocId> {
/// A place may have an integral pointer for ZSTs, and since it might /// The pointer can be a pure integer, with the `None` tag.
/// be turned back into a reference before ever being dereferenced. pub ptr: Pointer<Option<Tag>>,
/// However, it may never be uninit.
pub ptr: Scalar<Tag>,
pub align: Align, pub align: Align,
/// Metadata for unsized places. Interpretation is up to the type. /// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types /// Must not be present for sized types, but can be missing for unsized types
@ -76,10 +87,21 @@ pub struct MemPlace<Tag = ()> {
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MemPlace, 56); //FIXME rustc_data_structures::static_assert_size!(MemPlace, 56);
#[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable)] impl<Tag: Provenance> std::fmt::Debug for MemPlace<Tag> {
pub enum Place<Tag = ()> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let MemPlace { ptr, align, meta } = self;
f.debug_struct("MemPlace")
.field("ptr", ptr)
.field("align", align)
.field("meta", meta)
.finish()
}
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
pub enum Place<Tag = AllocId> {
/// A place referring to a value allocated in the `Memory` system. /// A place referring to a value allocated in the `Memory` system.
Ptr(MemPlace<Tag>), Ptr(MemPlace<Tag>),
@ -89,16 +111,35 @@ pub enum Place<Tag = ()> {
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(Place, 64); //FIXME rustc_data_structures::static_assert_size!(Place, 64);
#[derive(Copy, Clone, Debug)] impl<Tag: Provenance> std::fmt::Debug for Place<Tag> {
pub struct PlaceTy<'tcx, Tag = ()> { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Place::*;
match self {
Ptr(p) => f.debug_tuple("Ptr").field(p).finish(),
Local { frame, local } => {
f.debug_struct("Local").field("frame", frame).field("local", local).finish()
}
}
}
}
#[derive(Copy, Clone)]
pub struct PlaceTy<'tcx, Tag = AllocId> {
place: Place<Tag>, // Keep this private; it helps enforce invariants. place: Place<Tag>, // Keep this private; it helps enforce invariants.
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80); //FIXME rustc_data_structures::static_assert_size!(PlaceTy<'_>, 80);
impl<'tcx, Tag: Provenance> std::fmt::Debug for PlaceTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let PlaceTy { place, layout } = self;
f.debug_struct("PlaceTy").field("place", place).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> { impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
type Target = Place<Tag>; type Target = Place<Tag>;
@ -109,14 +150,21 @@ impl<'tcx, Tag> std::ops::Deref for PlaceTy<'tcx, Tag> {
} }
/// A MemPlace with its layout. Constructing it is only possible in this module. /// A MemPlace with its layout. Constructing it is only possible in this module.
#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(Copy, Clone, Hash, Eq, PartialEq)]
pub struct MPlaceTy<'tcx, Tag = ()> { pub struct MPlaceTy<'tcx, Tag = AllocId> {
mplace: MemPlace<Tag>, mplace: MemPlace<Tag>,
pub layout: TyAndLayout<'tcx>, pub layout: TyAndLayout<'tcx>,
} }
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72); //FIXME rustc_data_structures::static_assert_size!(MPlaceTy<'_>, 72);
impl<'tcx, Tag: Provenance> std::fmt::Debug for MPlaceTy<'tcx, Tag> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let MPlaceTy { mplace, layout } = self;
f.debug_struct("MPlaceTy").field("mplace", mplace).field("layout", layout).finish()
}
}
impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> { impl<'tcx, Tag> std::ops::Deref for MPlaceTy<'tcx, Tag> {
type Target = MemPlace<Tag>; type Target = MemPlace<Tag>;
@ -134,34 +182,32 @@ impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
} }
impl<Tag> MemPlace<Tag> { impl<Tag> MemPlace<Tag> {
/// Replace ptr tag, maintain vtable tag (if any)
#[inline] #[inline]
pub fn replace_tag(self, new_tag: Tag) -> Self { pub fn erase_for_fmt(self) -> MemPlace
MemPlace { ptr: self.ptr.erase_tag().with_tag(new_tag), align: self.align, meta: self.meta } where
Tag: Provenance,
{
MemPlace {
ptr: self.ptr.map_erase_for_fmt(),
align: self.align,
meta: self.meta.erase_for_fmt(),
} }
#[inline]
pub fn erase_tag(self) -> MemPlace {
MemPlace { ptr: self.ptr.erase_tag(), align: self.align, meta: self.meta.erase_tag() }
} }
#[inline(always)] #[inline(always)]
fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self { pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
MemPlace { ptr, align, meta: MemPlaceMeta::None } MemPlace { ptr, align, meta: MemPlaceMeta::None }
} }
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
Self::from_scalar_ptr(ptr.into(), align)
}
/// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space. /// Turn a mplace into a (thin or wide) pointer, as a reference, pointing to the same space.
/// This is the inverse of `ref_to_mplace`. /// This is the inverse of `ref_to_mplace`.
#[inline(always)] #[inline(always)]
pub fn to_ref(self) -> Immediate<Tag> { pub fn to_ref(self, cx: &impl HasDataLayout) -> Immediate<Tag> {
match self.meta { match self.meta {
MemPlaceMeta::None => Immediate::Scalar(self.ptr.into()), MemPlaceMeta::None => Immediate::from(Scalar::from_maybe_pointer(self.ptr, cx)),
MemPlaceMeta::Meta(meta) => Immediate::ScalarPair(self.ptr.into(), meta.into()), MemPlaceMeta::Meta(meta) => {
Immediate::ScalarPair(Scalar::from_maybe_pointer(self.ptr, cx).into(), meta.into())
}
MemPlaceMeta::Poison => bug!( MemPlaceMeta::Poison => bug!(
"MPlaceTy::dangling may never be used to produce a \ "MPlaceTy::dangling may never be used to produce a \
place that will have the address of its pointee taken" place that will have the address of its pointee taken"
@ -177,7 +223,7 @@ impl<Tag> MemPlace<Tag> {
cx: &impl HasDataLayout, cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Self> { ) -> InterpResult<'tcx, Self> {
Ok(MemPlace { Ok(MemPlace {
ptr: self.ptr.ptr_offset(offset, cx)?, ptr: self.ptr.offset(offset, cx)?,
align: self.align.restrict_for_offset(offset), align: self.align.restrict_for_offset(offset),
meta, meta,
}) })
@ -187,19 +233,13 @@ impl<Tag> MemPlace<Tag> {
impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> { impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
/// Produces a MemPlace that works for ZST but nothing else /// Produces a MemPlace that works for ZST but nothing else
#[inline] #[inline]
pub fn dangling(layout: TyAndLayout<'tcx>, cx: &impl HasDataLayout) -> Self { pub fn dangling(layout: TyAndLayout<'tcx>) -> Self {
let align = layout.align.abi; let align = layout.align.abi;
let ptr = Scalar::from_machine_usize(align.bytes(), cx); let ptr = Pointer::new(None, Size::from_bytes(align.bytes())); // no provenance, absolute address
// `Poison` this to make sure that the pointer value `ptr` is never observable by the program. // `Poison` this to make sure that the pointer value `ptr` is never observable by the program.
MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout } MPlaceTy { mplace: MemPlace { ptr, align, meta: MemPlaceMeta::Poison }, layout }
} }
/// Replace ptr tag, maintain vtable tag (if any)
#[inline]
pub fn replace_tag(&self, new_tag: Tag) -> Self {
MPlaceTy { mplace: self.mplace.replace_tag(new_tag), layout: self.layout }
}
#[inline] #[inline]
pub fn offset( pub fn offset(
&self, &self,
@ -212,12 +252,15 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
} }
#[inline] #[inline]
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyAndLayout<'tcx>) -> Self { fn from_aligned_ptr(ptr: Pointer<Option<Tag>>, layout: TyAndLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout } MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
} }
#[inline] #[inline]
pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { pub(super) fn len(&self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64>
where
Tag: Provenance,
{
if self.layout.is_unsized() { if self.layout.is_unsized() {
// We need to consult `meta` metadata // We need to consult `meta` metadata
match self.layout.ty.kind() { match self.layout.ty.kind() {
@ -244,19 +287,14 @@ impl<'tcx, Tag: Copy> MPlaceTy<'tcx, Tag> {
} }
// These are defined here because they produce a place. // These are defined here because they produce a place.
impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> { impl<'tcx, Tag: Copy> OpTy<'tcx, Tag> {
#[inline(always)] #[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to /// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back. /// read from the resulting mplace, not to get its address back.
pub fn try_as_mplace( pub fn try_as_mplace(&self) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
&self,
cx: &impl HasDataLayout,
) -> Result<MPlaceTy<'tcx, Tag>, ImmTy<'tcx, Tag>> {
match **self { match **self {
Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }), Operand::Indirect(mplace) => Ok(MPlaceTy { mplace, layout: self.layout }),
Operand::Immediate(_) if self.layout.is_zst() => { Operand::Immediate(_) if self.layout.is_zst() => Ok(MPlaceTy::dangling(self.layout)),
Ok(MPlaceTy::dangling(self.layout, cx))
}
Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)), Operand::Immediate(imm) => Err(ImmTy::from_immediate(imm, self.layout)),
} }
} }
@ -264,12 +302,15 @@ impl<'tcx, Tag: Debug + Copy> OpTy<'tcx, Tag> {
#[inline(always)] #[inline(always)]
/// Note: do not call `as_ref` on the resulting place. This function should only be used to /// Note: do not call `as_ref` on the resulting place. This function should only be used to
/// read from the resulting mplace, not to get its address back. /// read from the resulting mplace, not to get its address back.
pub fn assert_mem_place(&self, cx: &impl HasDataLayout) -> MPlaceTy<'tcx, Tag> { pub fn assert_mem_place(&self) -> MPlaceTy<'tcx, Tag>
self.try_as_mplace(cx).unwrap() where
Tag: Provenance,
{
self.try_as_mplace().unwrap()
} }
} }
impl<Tag: Debug> Place<Tag> { impl<Tag: Provenance> Place<Tag> {
#[inline] #[inline]
pub fn assert_mem_place(self) -> MemPlace<Tag> { pub fn assert_mem_place(self) -> MemPlace<Tag> {
match self { match self {
@ -279,7 +320,7 @@ impl<Tag: Debug> Place<Tag> {
} }
} }
impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> { impl<'tcx, Tag: Provenance> PlaceTy<'tcx, Tag> {
#[inline] #[inline]
pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> { pub fn assert_mem_place(self) -> MPlaceTy<'tcx, Tag> {
MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout } MPlaceTy { mplace: self.place.assert_mem_place(), layout: self.layout }
@ -290,7 +331,7 @@ impl<'tcx, Tag: Debug> PlaceTy<'tcx, Tag> {
impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M> impl<'mir, 'tcx: 'mir, Tag, M> InterpCx<'mir, 'tcx, M>
where where
// FIXME: Working around https://github.com/rust-lang/rust/issues/54385 // FIXME: Working around https://github.com/rust-lang/rust/issues/54385
Tag: Debug + Copy + Eq + Hash + 'static, Tag: Provenance + Eq + Hash + 'static,
M: Machine<'mir, 'tcx, PointerTag = Tag>, M: Machine<'mir, 'tcx, PointerTag = Tag>,
{ {
/// Take a value, which represents a (thin or wide) reference, and make it a place. /// Take a value, which represents a (thin or wide) reference, and make it a place.
@ -307,14 +348,12 @@ where
val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty; val.layout.ty.builtin_deref(true).expect("`ref_to_mplace` called on non-ptr type").ty;
let layout = self.layout_of(pointee_type)?; let layout = self.layout_of(pointee_type)?;
let (ptr, meta) = match **val { let (ptr, meta) = match **val {
Immediate::Scalar(ptr) => (ptr.check_init()?, MemPlaceMeta::None), Immediate::Scalar(ptr) => (ptr, MemPlaceMeta::None),
Immediate::ScalarPair(ptr, meta) => { Immediate::ScalarPair(ptr, meta) => (ptr, MemPlaceMeta::Meta(meta.check_init()?)),
(ptr.check_init()?, MemPlaceMeta::Meta(meta.check_init()?))
}
}; };
let mplace = MemPlace { let mplace = MemPlace {
ptr, ptr: self.scalar_to_ptr(ptr.check_init()?),
// We could use the run-time alignment here. For now, we do not, because // We could use the run-time alignment here. For now, we do not, because
// the point of tracking the alignment here is to make sure that the *static* // the point of tracking the alignment here is to make sure that the *static*
// alignment information emitted with the loads is correct. The run-time // alignment information emitted with the loads is correct. The run-time
@ -333,8 +372,9 @@ where
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> { ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let val = self.read_immediate(src)?; let val = self.read_immediate(src)?;
trace!("deref to {} on {:?}", val.layout.ty, *val); trace!("deref to {} on {:?}", val.layout.ty, *val);
let place = self.ref_to_mplace(&val)?; let mplace = self.ref_to_mplace(&val)?;
self.mplace_access_checked(place, None) self.check_mplace_access(mplace)?;
Ok(mplace)
} }
#[inline] #[inline]
@ -359,38 +399,20 @@ where
self.memory.get_mut(place.ptr, size, place.align) self.memory.get_mut(place.ptr, size, place.align)
} }
/// Return the "access-checked" version of this `MPlace`, where for non-ZST /// Check if this mplace is dereferencable and sufficiently aligned.
/// this is definitely a `Pointer`. pub fn check_mplace_access(&self, mplace: MPlaceTy<'tcx, M::PointerTag>) -> InterpResult<'tcx> {
///
/// `force_align` must only be used when correct alignment does not matter,
/// like in Stacked Borrows.
pub fn mplace_access_checked(
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
force_align: Option<Align>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
let (size, align) = self let (size, align) = self
.size_and_align_of_mplace(&place)? .size_and_align_of_mplace(&mplace)?
.unwrap_or((place.layout.size, place.layout.align.abi)); .unwrap_or((mplace.layout.size, mplace.layout.align.abi));
assert!(place.mplace.align <= align, "dynamic alignment less strict than static one?"); assert!(mplace.mplace.align <= align, "dynamic alignment less strict than static one?");
let align = force_align.unwrap_or(align); let align = M::enforce_alignment(&self.memory.extra).then_some(align);
// Record new (stricter, unless forced) alignment requirement in place. self.memory.check_ptr_access_align(
place.mplace.align = align; mplace.ptr,
// When dereferencing a pointer, it must be non-null, aligned, and live. size,
if let Some(ptr) = self.memory.check_ptr_access(place.ptr, size, align)? { align.unwrap_or(Align::ONE),
place.mplace.ptr = ptr.into(); CheckInAllocMsg::MemoryAccessTest, // FIXME sth more specific?
} )?;
Ok(place) Ok(())
}
/// Force `place.ptr` to a `Pointer`.
/// Can be helpful to avoid lots of `force_ptr` calls later, if this place is used a lot.
pub(super) fn force_mplace_ptr(
&self,
mut place: MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, MPlaceTy<'tcx, M::PointerTag>> {
place.mplace.ptr = self.force_ptr(place.mplace.ptr)?.into();
Ok(place)
} }
/// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is /// Offset a pointer to project to a field of a struct/union. Unlike `place_field`, this is
@ -558,10 +580,7 @@ where
let layout = self.layout_of(self.tcx.types.usize)?; let layout = self.layout_of(self.tcx.types.usize)?;
let n = self.access_local(self.frame(), local, Some(layout))?; let n = self.access_local(self.frame(), local, Some(layout))?;
let n = self.read_scalar(&n)?; let n = self.read_scalar(&n)?;
let n = u64::try_from( let n = n.to_machine_usize(self)?;
self.force_bits(n.check_init()?, self.tcx.data_layout.pointer_size)?,
)
.unwrap();
self.mplace_index(base, n)? self.mplace_index(base, n)?
} }
@ -1020,7 +1039,7 @@ where
kind: MemoryKind<M::MemoryKind>, kind: MemoryKind<M::MemoryKind>,
) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> { ) -> InterpResult<'static, MPlaceTy<'tcx, M::PointerTag>> {
let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?; let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
} }
/// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation. /// Returns a wide MPlace of type `&'static [mut] str` to a new 1-aligned allocation.
@ -1125,7 +1144,7 @@ where
let _ = self.tcx.global_alloc(raw.alloc_id); let _ = self.tcx.global_alloc(raw.alloc_id);
let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?; let ptr = self.global_base_pointer(Pointer::from(raw.alloc_id))?;
let layout = self.layout_of(raw.ty)?; let layout = self.layout_of(raw.ty)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) Ok(MPlaceTy::from_aligned_ptr(ptr.into(), layout))
} }
/// Turn a place with a `dyn Trait` type into a place with the actual dynamic type. /// Turn a place with a `dyn Trait` type into a place with the actual dynamic type.
@ -1134,7 +1153,7 @@ where
&self, &self,
mplace: &MPlaceTy<'tcx, M::PointerTag>, mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> { ) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = mplace.vtable(); // also sanity checks the type let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?; let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;

View file

@ -240,7 +240,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// of the first element. // of the first element.
let elem_size = first.layout.size; let elem_size = first.layout.size;
let first_ptr = first.ptr; let first_ptr = first.ptr;
let rest_ptr = first_ptr.ptr_offset(elem_size, self)?; let rest_ptr = first_ptr.offset(elem_size, self)?;
self.memory.copy_repeatedly( self.memory.copy_repeatedly(
first_ptr, first_ptr,
first.align, first.align,
@ -264,11 +264,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
AddressOf(_, place) | Ref(_, _, place) => { AddressOf(_, place) | Ref(_, _, place) => {
let src = self.eval_place(place)?; let src = self.eval_place(place)?;
let place = self.force_allocation(&src)?; let place = self.force_allocation(&src)?;
if place.layout.size.bytes() > 0 { self.write_immediate(place.to_ref(self), &dest)?;
// definitely not a ZST
assert!(place.ptr.is_ptr(), "non-ZST places should be normalized to `Pointer`");
}
self.write_immediate(place.to_ref(), &dest)?;
} }
NullaryOp(mir::NullOp::Box, _) => { NullaryOp(mir::NullOp::Box, _) => {

View file

@ -12,8 +12,8 @@ use rustc_target::abi::{self, LayoutOf as _};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
use super::{ use super::{
FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, StackPopCleanup, FnVal, ImmTy, InterpCx, InterpResult, MPlaceTy, Machine, OpTy, PlaceTy, Scalar,
StackPopUnwind, StackPopCleanup, StackPopUnwind,
}; };
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> { impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
@ -72,8 +72,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() { let (fn_val, abi, caller_can_unwind) = match *func.layout.ty.kind() {
ty::FnPtr(sig) => { ty::FnPtr(sig) => {
let caller_abi = sig.abi(); let caller_abi = sig.abi();
let fn_ptr = self.read_scalar(&func)?.check_init()?; let fn_ptr = self.read_pointer(&func)?;
let fn_val = self.memory.get_fn(fn_ptr)?; let fn_val = self.memory.get_fn(fn_ptr.into())?;
( (
fn_val, fn_val,
caller_abi, caller_abi,
@ -454,11 +454,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
} }
None => { None => {
// Unsized self. // Unsized self.
args[0].assert_mem_place(self) args[0].assert_mem_place()
} }
}; };
// Find and consult vtable // Find and consult vtable
let vtable = receiver_place.vtable(); let vtable = self.scalar_to_ptr(receiver_place.vtable());
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?; let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we // `*mut receiver_place.layout.ty` is almost the layout that we
@ -468,8 +468,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty); let receiver_ptr_ty = self.tcx.mk_mut_ptr(receiver_place.layout.ty);
let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?; let this_receiver_ptr = self.layout_of(receiver_ptr_ty)?.field(self, 0)?;
// Adjust receiver argument. // Adjust receiver argument.
args[0] = args[0] = OpTy::from(ImmTy::from_immediate(
OpTy::from(ImmTy::from_immediate(receiver_place.ptr.into(), this_receiver_ptr)); Scalar::from_maybe_pointer(receiver_place.ptr, self).into(),
this_receiver_ptr,
));
trace!("Patched self operand to {:#?}", args[0]); trace!("Patched self operand to {:#?}", args[0]);
// recurse with concrete function // recurse with concrete function
self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind) self.eval_fn_call(fn_val, caller_abi, &args, ret, unwind)
@ -499,12 +501,12 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}; };
let arg = ImmTy::from_immediate( let arg = ImmTy::from_immediate(
place.to_ref(), place.to_ref(self),
self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?, self.layout_of(self.tcx.mk_mut_ptr(place.layout.ty))?,
); );
let ty = self.tcx.mk_unit(); // return type is () let ty = self.tcx.mk_unit(); // return type is ()
let dest = MPlaceTy::dangling(self.layout_of(ty)?, self); let dest = MPlaceTy::dangling(self.layout_of(ty)?);
self.eval_fn_call( self.eval_fn_call(
FnVal::Instance(instance), FnVal::Instance(instance),

View file

@ -1,6 +1,6 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar}; use rustc_middle::mir::interpret::{InterpResult, Pointer, PointerArithmetic};
use rustc_middle::ty::{ use rustc_middle::ty::{
self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN, self, Ty, COMMON_VTABLE_ENTRIES, COMMON_VTABLE_ENTRIES_ALIGN,
COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE, COMMON_VTABLE_ENTRIES_DROPINPLACE, COMMON_VTABLE_ENTRIES_SIZE,
@ -42,23 +42,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// corresponds to the first method declared in the trait of the provided vtable. /// corresponds to the first method declared in the trait of the provided vtable.
pub fn get_vtable_slot( pub fn get_vtable_slot(
&self, &self,
vtable: Scalar<M::PointerTag>, vtable: Pointer<Option<M::PointerTag>>,
idx: u64, idx: u64,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> { ) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
let vtable_slot = vtable.ptr_offset(ptr_size * idx, self)?; let vtable_slot = vtable.offset(ptr_size * idx, self)?;
let vtable_slot = self let vtable_slot = self
.memory .memory
.get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)? .get(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST"); .expect("cannot be a ZST");
let fn_ptr = vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?; let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
self.memory.get_fn(fn_ptr) self.memory.get_fn(fn_ptr)
} }
/// Returns the drop fn instance as well as the actual dynamic type. /// Returns the drop fn instance as well as the actual dynamic type.
pub fn read_drop_type_from_vtable( pub fn read_drop_type_from_vtable(
&self, &self,
vtable: Scalar<M::PointerTag>, vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> { ) -> InterpResult<'tcx, (ty::Instance<'tcx>, Ty<'tcx>)> {
let pointer_size = self.pointer_size(); let pointer_size = self.pointer_size();
// We don't care about the pointee type; we just want a pointer. // We don't care about the pointee type; we just want a pointer.
@ -77,7 +77,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.check_init()?; .check_init()?;
// We *need* an instance here, no other kind of function value, to be able // We *need* an instance here, no other kind of function value, to be able
// to determine the type. // to determine the type.
let drop_instance = self.memory.get_fn(drop_fn)?.as_instance()?; let drop_instance = self.memory.get_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance); trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx); let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig); let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
@ -93,7 +93,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn read_size_and_align_from_vtable( pub fn read_size_and_align_from_vtable(
&self, &self,
vtable: Scalar<M::PointerTag>, vtable: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, (Size, Align)> { ) -> InterpResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size(); let pointer_size = self.pointer_size();
// We check for `size = 3 * ptr_size`, which covers the drop fn (unused here), // We check for `size = 3 * ptr_size`, which covers the drop fn (unused here),
@ -109,11 +109,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let size = vtable let size = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())? .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_SIZE).unwrap())?
.check_init()?; .check_init()?;
let size = u64::try_from(self.force_bits(size, pointer_size)?).unwrap(); let size = size.to_machine_usize(self)?;
let align = vtable let align = vtable
.read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())? .read_ptr_sized(pointer_size * u64::try_from(COMMON_VTABLE_ENTRIES_ALIGN).unwrap())?
.check_init()?; .check_init()?;
let align = u64::try_from(self.force_bits(align, pointer_size)?).unwrap(); let align = align.to_machine_usize(self)?;
let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?; let align = Align::from_bytes(align).map_err(|e| err_ub!(InvalidVtableAlignment(e)))?;
if size >= self.tcx.data_layout.obj_size_bound() { if size >= self.tcx.data_layout.obj_size_bound() {

View file

@ -21,7 +21,7 @@ use std::hash::Hash;
use super::{ use super::{
alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine, alloc_range, CheckInAllocMsg, GlobalAlloc, InterpCx, InterpResult, MPlaceTy, Machine,
MemPlaceMeta, OpTy, Scalar, ScalarMaybeUninit, ValueVisitor, MemPlaceMeta, OpTy, ScalarMaybeUninit, ValueVisitor,
}; };
macro_rules! throw_validation_failure { macro_rules! throw_validation_failure {
@ -324,7 +324,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env); let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() { match tail.kind() {
ty::Dynamic(..) => { ty::Dynamic(..) => {
let vtable = meta.unwrap_meta(); let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines. // Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!( try_validation!(
self.ecx.memory.check_ptr_access_align( self.ecx.memory.check_ptr_access_align(
@ -448,17 +448,10 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
if let Some(ref mut ref_tracking) = self.ref_tracking { if let Some(ref mut ref_tracking) = self.ref_tracking {
// Proceed recursively even for ZST, no reason to skip them! // Proceed recursively even for ZST, no reason to skip them!
// `!` is a ZST and we want to validate it. // `!` is a ZST and we want to validate it.
// Normalize before handing `place` to tracking because that will
// check for duplicates.
let place = if size.bytes() > 0 {
self.ecx.force_mplace_ptr(place).expect("we already bounds-checked")
} else {
place
};
// Skip validation entirely for some external statics // Skip validation entirely for some external statics
if let Scalar::Ptr(ptr) = place.ptr { if let Ok((alloc_id, _offset, _ptr)) = self.ecx.memory.ptr_try_get_alloc(place.ptr) {
// not a ZST // not a ZST
let alloc_kind = self.ecx.tcx.get_global_alloc(ptr.alloc_id); let alloc_kind = self.ecx.tcx.get_global_alloc(alloc_id);
if let Some(GlobalAlloc::Static(did)) = alloc_kind { if let Some(GlobalAlloc::Static(did)) = alloc_kind {
assert!(!self.ecx.tcx.is_thread_local_static(did)); assert!(!self.ecx.tcx.is_thread_local_static(did));
assert!(self.ecx.tcx.is_static(did)); assert!(self.ecx.tcx.is_static(did));
@ -601,7 +594,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// message below. // message below.
let value = value.to_scalar_or_uninit(); let value = value.to_scalar_or_uninit();
let _fn = try_validation!( let _fn = try_validation!(
value.check_init().and_then(|ptr| self.ecx.memory.get_fn(ptr)), value.check_init().and_then(|ptr| self.ecx.memory.get_fn(self.ecx.scalar_to_ptr(ptr))),
self.path, self.path,
err_ub!(DanglingIntPointer(..)) | err_ub!(DanglingIntPointer(..)) |
err_ub!(InvalidFunctionPointer(..)) | err_ub!(InvalidFunctionPointer(..)) |
@ -668,7 +661,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
Err(ptr) => { Err(ptr) => {
if lo == 1 && hi == max_hi { if lo == 1 && hi == max_hi {
// Only null is the niche. So make sure the ptr is NOT null. // Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.memory.ptr_may_be_null(ptr) { if self.ecx.memory.ptr_may_be_null(ptr.into()) {
throw_validation_failure!(self.path, throw_validation_failure!(self.path,
{ "a potentially null pointer" } { "a potentially null pointer" }
expected { expected {
@ -832,7 +825,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
match op.layout.ty.kind() { match op.layout.ty.kind() {
ty::Str => { ty::Str => {
let mplace = op.assert_mem_place(self.ecx); // strings are never immediate let mplace = op.assert_mem_place(); // strings are never immediate
let len = mplace.len(self.ecx)?; let len = mplace.len(self.ecx)?;
try_validation!( try_validation!(
self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)), self.ecx.memory.read_bytes(mplace.ptr, Size::from_bytes(len)),
@ -853,7 +846,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Optimized handling for arrays of integer/float type. // Optimized handling for arrays of integer/float type.
// Arrays cannot be immediate, slices are never immediate. // Arrays cannot be immediate, slices are never immediate.
let mplace = op.assert_mem_place(self.ecx); let mplace = op.assert_mem_place();
// This is the length of the array/slice. // This is the length of the array/slice.
let len = mplace.len(self.ecx)?; let len = mplace.len(self.ecx)?;
// This is the element type size. // This is the element type size.
@ -940,9 +933,6 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Construct a visitor // Construct a visitor
let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self }; let mut visitor = ValidityVisitor { path, ref_tracking, ctfe_mode, ecx: self };
// Try to cast to ptr *once* instead of all the time.
let op = self.force_op_ptr(&op).unwrap_or(*op);
// Run it. // Run it.
match visitor.visit_value(&op) { match visitor.visit_value(&op) {
Ok(()) => Ok(()), Ok(()) => Ok(()),

View file

@ -211,7 +211,8 @@ macro_rules! make_value_visitor {
// If it is a trait object, switch to the real type that was used to create it. // If it is a trait object, switch to the real type that was used to create it.
ty::Dynamic(..) => { ty::Dynamic(..) => {
// immediate trait objects are not a thing // immediate trait objects are not a thing
let dest = v.to_op(self.ecx())?.assert_mem_place(self.ecx()); let op = v.to_op(self.ecx())?;
let dest = op.assert_mem_place();
let inner = self.ecx().unpack_dyn_trait(&dest)?.1; let inner = self.ecx().unpack_dyn_trait(&dest)?.1;
trace!("walk_value: dyn object layout: {:#?}", inner.layout); trace!("walk_value: dyn object layout: {:#?}", inner.layout);
// recurse with the inner type // recurse with the inner type
@ -241,7 +242,8 @@ macro_rules! make_value_visitor {
}, },
FieldsShape::Array { .. } => { FieldsShape::Array { .. } => {
// Let's get an mplace first. // Let's get an mplace first.
let mplace = v.to_op(self.ecx())?.assert_mem_place(self.ecx()); let op = v.to_op(self.ecx())?;
let mplace = op.assert_mem_place();
// Now we can go over all the fields. // Now we can go over all the fields.
// This uses the *run-time length*, i.e., if we are a slice, // This uses the *run-time length*, i.e., if we are a slice,
// the dynamic info from the metadata is used. // the dynamic info from the metadata is used.

View file

@ -403,7 +403,7 @@ fn collect_items_rec<'tcx>(
recursion_depth_reset = None; recursion_depth_reset = None;
if let Ok(alloc) = tcx.eval_static_initializer(def_id) { if let Ok(alloc) = tcx.eval_static_initializer(def_id) {
for &((), id) in alloc.relocations().values() { for &id in alloc.relocations().values() {
collect_miri(tcx, id, &mut neighbors); collect_miri(tcx, id, &mut neighbors);
} }
} }
@ -1369,7 +1369,7 @@ fn collect_miri<'tcx>(
} }
GlobalAlloc::Memory(alloc) => { GlobalAlloc::Memory(alloc) => {
trace!("collecting {:?} with {:#?}", alloc_id, alloc); trace!("collecting {:?} with {:#?}", alloc_id, alloc);
for &((), inner) in alloc.relocations().values() { for &inner in alloc.relocations().values() {
rustc_data_structures::stack::ensure_sufficient_stack(|| { rustc_data_structures::stack::ensure_sufficient_stack(|| {
collect_miri(tcx, inner, output); collect_miri(tcx, inner, output);
}); });
@ -1402,9 +1402,9 @@ fn collect_const_value<'tcx>(
output: &mut Vec<Spanned<MonoItem<'tcx>>>, output: &mut Vec<Spanned<MonoItem<'tcx>>>,
) { ) {
match value { match value {
ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.alloc_id, output), ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => { ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
for &((), id) in alloc.relocations().values() { for &id in alloc.relocations().values() {
collect_miri(tcx, id, output); collect_miri(tcx, id, output);
} }
} }

View file

@ -31,9 +31,8 @@ use rustc_trait_selection::traits;
use crate::const_eval::ConstEvalErr; use crate::const_eval::ConstEvalErr;
use crate::interpret::{ use crate::interpret::{
self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy, self, compile_time_machine, AllocId, Allocation, ConstValue, CtfeValidationMode, Frame, ImmTy,
Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, Memory, MemoryKind, OpTy, Immediate, InterpCx, InterpResult, LocalState, LocalValue, MemPlace, MemoryKind, OpTy,
Operand as InterpOperand, PlaceTy, Pointer, Scalar, ScalarMaybeUninit, StackPopCleanup, Operand as InterpOperand, PlaceTy, Scalar, ScalarMaybeUninit, StackPopCleanup, StackPopUnwind,
StackPopUnwind,
}; };
use crate::transform::MirPass; use crate::transform::MirPass;
@ -157,7 +156,7 @@ impl<'tcx> MirPass<'tcx> for ConstProp {
struct ConstPropMachine<'mir, 'tcx> { struct ConstPropMachine<'mir, 'tcx> {
/// The virtual call stack. /// The virtual call stack.
stack: Vec<Frame<'mir, 'tcx, (), ()>>, stack: Vec<Frame<'mir, 'tcx>>,
/// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end. /// `OnlyInsideOwnBlock` locals that were written in the current block get erased at the end.
written_only_inside_own_block_locals: FxHashSet<Local>, written_only_inside_own_block_locals: FxHashSet<Local>,
/// Locals that need to be cleared after every block terminates. /// Locals that need to be cleared after every block terminates.
@ -223,10 +222,6 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for ConstPropMachine<'mir, 'tcx>
bug!("panics terminators are not evaluated in ConstProp") bug!("panics terminators are not evaluated in ConstProp")
} }
fn ptr_to_int(_mem: &Memory<'mir, 'tcx, Self>, _ptr: Pointer) -> InterpResult<'tcx, u64> {
throw_unsup!(ReadPointerAsBytes)
}
fn binary_ptr_op( fn binary_ptr_op(
_ecx: &InterpCx<'mir, 'tcx, Self>, _ecx: &InterpCx<'mir, 'tcx, Self>,
_bin_op: BinOp, _bin_op: BinOp,
@ -759,8 +754,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
} }
}; };
let arg_value = let arg_value = const_arg.to_scalar()?.to_bits(const_arg.layout.size)?;
this.ecx.force_bits(const_arg.to_scalar()?, const_arg.layout.size)?;
let dest = this.ecx.eval_place(place)?; let dest = this.ecx.eval_place(place)?;
match op { match op {

View file

@ -211,7 +211,7 @@ fn find_branch_value_info<'tcx>(
return None; return None;
}; };
let branch_value_scalar = branch_value.literal.try_to_scalar()?; let branch_value_scalar = branch_value.literal.try_to_scalar()?;
Some((branch_value_scalar, branch_value_ty, *to_switch_on)) Some((branch_value_scalar.into(), branch_value_ty, *to_switch_on))
} }
_ => None, _ => None,
} }

View file

@ -1,6 +1,6 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::fmt::Display;
use std::fmt::Write as _; use std::fmt::Write as _;
use std::fmt::{Debug, Display};
use std::fs; use std::fs;
use std::io::{self, Write}; use std::io::{self, Write};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@ -13,7 +13,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx; use rustc_index::vec::Idx;
use rustc_middle::mir::interpret::{ use rustc_middle::mir::interpret::{
read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer, read_target_uint, AllocId, Allocation, ConstValue, GlobalAlloc, Pointer, Provenance,
}; };
use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::visit::Visitor;
use rustc_middle::mir::*; use rustc_middle::mir::*;
@ -665,12 +665,12 @@ pub fn write_allocations<'tcx>(
w: &mut dyn Write, w: &mut dyn Write,
) -> io::Result<()> { ) -> io::Result<()> {
fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ { fn alloc_ids_from_alloc(alloc: &Allocation) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
alloc.relocations().values().map(|(_, id)| *id) alloc.relocations().values().map(|id| *id)
} }
fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ { fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
match val { match val {
ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => { ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
Either::Left(Either::Left(std::iter::once(ptr.alloc_id))) Either::Left(Either::Left(std::iter::once(ptr.provenance)))
} }
ConstValue::Scalar(interpret::Scalar::Int { .. }) => { ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
Either::Left(Either::Right(std::iter::empty())) Either::Left(Either::Right(std::iter::empty()))
@ -755,7 +755,7 @@ pub fn write_allocations<'tcx>(
/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control /// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
/// characters or characters whose value is larger than 127) with a `.` /// characters or characters whose value is larger than 127) with a `.`
/// This also prints relocations adequately. /// This also prints relocations adequately.
pub fn display_allocation<Tag: Copy + Debug, Extra>( pub fn display_allocation<Tag, Extra>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
alloc: &'a Allocation<Tag, Extra>, alloc: &'a Allocation<Tag, Extra>,
) -> RenderAllocation<'a, 'tcx, Tag, Extra> { ) -> RenderAllocation<'a, 'tcx, Tag, Extra> {
@ -768,7 +768,7 @@ pub struct RenderAllocation<'a, 'tcx, Tag, Extra> {
alloc: &'a Allocation<Tag, Extra>, alloc: &'a Allocation<Tag, Extra>,
} }
impl<Tag: Copy + Debug, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> { impl<Tag: Provenance, Extra> std::fmt::Display for RenderAllocation<'a, 'tcx, Tag, Extra> {
fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let RenderAllocation { tcx, alloc } = *self; let RenderAllocation { tcx, alloc } = *self;
write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?; write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?;
@ -811,7 +811,7 @@ fn write_allocation_newline(
/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there /// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
/// is only one line). Note that your prefix should contain a trailing space as the lines are /// is only one line). Note that your prefix should contain a trailing space as the lines are
/// printed directly after it. /// printed directly after it.
fn write_allocation_bytes<Tag: Copy + Debug, Extra>( fn write_allocation_bytes<Tag: Provenance, Extra>(
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
alloc: &Allocation<Tag, Extra>, alloc: &Allocation<Tag, Extra>,
w: &mut dyn std::fmt::Write, w: &mut dyn std::fmt::Write,
@ -847,7 +847,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
if i != line_start { if i != line_start {
write!(w, " ")?; write!(w, " ")?;
} }
if let Some(&(tag, target_id)) = alloc.relocations().get(&i) { if let Some(&tag) = alloc.relocations().get(&i) {
// Memory with a relocation must be defined // Memory with a relocation must be defined
let j = i.bytes_usize(); let j = i.bytes_usize();
let offset = alloc let offset = alloc
@ -855,7 +855,7 @@ fn write_allocation_bytes<Tag: Copy + Debug, Extra>(
let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap(); let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
let offset = Size::from_bytes(offset); let offset = Size::from_bytes(offset);
let relocation_width = |bytes| bytes * 3; let relocation_width = |bytes| bytes * 3;
let ptr = Pointer::new_with_tag(target_id, offset, tag); let ptr = Pointer::new(tag, offset);
let mut target = format!("{:?}", ptr); let mut target = format!("{:?}", ptr);
if target.len() > relocation_width(ptr_size.bytes_usize() - 1) { if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
// This is too long, try to save some space. // This is too long, try to save some space.