1
Fork 0

consistently treat None-tagged pointers as ints; get rid of some deprecated Scalar methods

This commit is contained in:
Ralf Jung 2021-07-12 20:29:05 +02:00
parent d4f7dd6702
commit 626605cea0
29 changed files with 145 additions and 139 deletions

View file

@ -243,7 +243,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.const_bitcast(llval, llty)
}
}
Scalar::Ptr(ptr) => {
Scalar::Ptr(ptr, _size) => {
let (alloc_id, offset) = ptr.into_parts();
let (base_addr, base_addr_space) = match self.tcx.global_alloc(alloc_id) {
GlobalAlloc::Memory(alloc) => {

View file

@ -11,7 +11,7 @@ use rustc_codegen_ssa::traits::*;
use rustc_hir::def_id::DefId;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer,
read_target_uint, Allocation, ErrorHandled, GlobalAlloc, Pointer, Scalar as InterpScalar,
};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{self, Instance, Ty};
@ -55,7 +55,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
};
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
InterpScalar::from_pointer(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx,
),
&Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
cx.type_i8p_ext(address_space),
));

View file

@ -90,10 +90,10 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::ScalarPair(ref a, _) => a,
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout),
};
let a = Scalar::from(Pointer::new(
bx.tcx().create_memory_alloc(data),
Size::from_bytes(start),
));
let a = Scalar::from_pointer(
Pointer::new(bx.tcx().create_memory_alloc(data), Size::from_bytes(start)),
&bx.tcx(),
);
let a_llval = bx.scalar_to_backend(
a,
a_scalar,

View file

@ -333,7 +333,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
// Maybe a pointer.
if let Some(&prov) = self.relocations.get(&range.start) {
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(ScalarMaybeUninit::Scalar(ptr.into()));
return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
}
}
// We don't. Just return the bits.
@ -363,7 +363,7 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
}
};
let (bytes, provenance) = match val.to_bits_or_ptr(range.size, cx) {
let (bytes, provenance) = match val.to_bits_or_ptr(range.size) {
Err(val) => {
let (provenance, offset) = val.into_parts();
(u128::from(offset.bytes()), Some(provenance))

View file

@ -87,7 +87,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {}
/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
/// some global state.
pub trait Provenance: Copy {
/// Says whether the `offset` field of `Pointer` is the actual physical address.
/// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
/// If `true, ptr-to-int casts work by simply discarding the provenance.
/// If `false`, ptr-to-int casts are not supported.
const OFFSET_IS_ADDR: bool;

View file

@ -128,7 +128,11 @@ pub enum Scalar<Tag = AllocId> {
/// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
/// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
/// relocation and its associated offset together as a `Pointer` here.
Ptr(Pointer<Tag>),
///
/// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
/// The size is always the pointer size of the current target, but this is not information
/// that we always have readily available.
Ptr(Pointer<Tag>, u8),
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
@ -139,7 +143,7 @@ pub enum Scalar<Tag = AllocId> {
impl<Tag: Provenance> fmt::Debug for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr) => write!(f, "{:?}", ptr),
Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr),
Scalar::Int(int) => write!(f, "{:?}", int),
}
}
@ -148,7 +152,7 @@ impl<Tag: Provenance> fmt::Debug for Scalar<Tag> {
impl<Tag: Provenance> fmt::Display for Scalar<Tag> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Scalar::Ptr(ptr) => write!(f, "pointer to {:?}", ptr),
Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
Scalar::Int(int) => write!(f, "{:?}", int),
}
}
@ -168,13 +172,6 @@ impl<Tag> From<Double> for Scalar<Tag> {
}
}
impl<Tag> From<Pointer<Tag>> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: Pointer<Tag>) -> Self {
Scalar::Ptr(ptr)
}
}
impl<Tag> From<ScalarInt> for Scalar<Tag> {
#[inline(always)]
fn from(ptr: ScalarInt) -> Self {
@ -185,21 +182,26 @@ impl<Tag> From<ScalarInt> for Scalar<Tag> {
impl<'tcx, Tag> Scalar<Tag> {
pub const ZST: Self = Scalar::Int(ScalarInt::ZST);
#[inline]
pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
Scalar::Int(ScalarInt::null(cx.data_layout().pointer_size))
#[inline(always)]
pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
Scalar::Ptr(ptr, u8::try_from(cx.pointer_size().bytes()).unwrap())
}
/// Create a Scalar from a pointer with an `Option<_>` tag (where `None` represents a plain integer).
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
match ptr.into_parts() {
(Some(tag), offset) => Scalar::Ptr(Pointer::new(tag, offset)),
(Some(tag), offset) => Scalar::from_pointer(Pointer::new(tag, offset), cx),
(None, offset) => {
Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
}
}
}
#[inline]
pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
Scalar::Int(ScalarInt::null(cx.pointer_size()))
}
#[inline(always)]
fn ptr_op(
self,
@ -209,7 +211,10 @@ impl<'tcx, Tag> Scalar<Tag> {
) -> InterpResult<'tcx, Self> {
match self {
Scalar::Int(int) => Ok(Scalar::Int(int.ptr_sized_op(dl, f_int)?)),
Scalar::Ptr(ptr) => Ok(Scalar::Ptr(f_ptr(ptr)?)),
Scalar::Ptr(ptr, sz) => {
debug_assert_eq!(u64::from(sz), dl.pointer_size().bytes());
Ok(Scalar::Ptr(f_ptr(ptr)?, sz))
}
}
}
@ -334,59 +339,18 @@ impl<'tcx, Tag> Scalar<Tag> {
/// This is almost certainly not the method you want! You should dispatch on the type
/// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
///
/// This method only exists for the benefit of low-level memory operations
/// as well as the implementation of the above methods.
/// This method only exists for the benefit of low-level memory operations.
#[inline]
pub fn to_bits_or_ptr(
self,
target_size: Size,
cx: &impl HasDataLayout,
) -> Result<u128, Pointer<Tag>> {
pub fn to_bits_or_ptr(self, target_size: Size) -> Result<u128, Pointer<Tag>> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => Ok(int.assert_bits(target_size)),
Scalar::Ptr(ptr) => {
assert_eq!(target_size, cx.data_layout().pointer_size);
Scalar::Ptr(ptr, sz) => {
assert_eq!(target_size.bytes(), u64::from(sz));
Err(ptr)
}
}
}
/// Do not call this method! It does not do ptr-to-int casts when needed.
#[inline(always)]
pub fn assert_bits(self, target_size: Size) -> u128 {
self.assert_int().assert_bits(target_size)
}
/// Do not call this method! It does not do ptr-to-int casts when needed.
#[inline]
pub fn assert_int(self) -> ScalarInt {
match self {
Scalar::Ptr(_) => bug!("expected an int but got an abstract pointer"),
Scalar::Int(int) => int,
}
}
/// Do not call this method! It does not do int-to-ptr casts when needed.
#[inline]
pub fn assert_ptr(self) -> Pointer<Tag> {
match self {
Scalar::Ptr(p) => p,
Scalar::Int { .. } => bug!("expected a Pointer but got Raw bits"),
}
}
/// Do not call this method! Dispatch based on the type instead.
#[inline]
pub fn is_bits(self) -> bool {
matches!(self, Scalar::Int { .. })
}
/// Do not call this method! Dispatch based on the type instead.
#[inline]
pub fn is_ptr(self) -> bool {
matches!(self, Scalar::Ptr(_))
}
}
impl<'tcx, Tag: Provenance> Scalar<Tag> {
@ -396,7 +360,7 @@ impl<'tcx, Tag: Provenance> Scalar<Tag> {
#[inline]
pub fn erase_for_fmt(self) -> Scalar {
match self {
Scalar::Ptr(ptr) => Scalar::Ptr(ptr.erase_for_fmt()),
Scalar::Ptr(ptr, sz) => Scalar::Ptr(ptr.erase_for_fmt(), sz),
Scalar::Int(int) => Scalar::Int(int),
}
}
@ -405,25 +369,45 @@ impl<'tcx, Tag: Provenance> Scalar<Tag> {
/// likely want to use instead.
///
/// Will perform ptr-to-int casts if needed and possible.
#[inline]
pub fn try_to_int(self) -> Option<ScalarInt> {
match self {
Scalar::Int(int) => Some(int),
Scalar::Ptr(ptr, sz) => {
if Tag::OFFSET_IS_ADDR {
Some(
ScalarInt::try_from_uint(ptr.offset.bytes(), Size::from_bytes(sz)).unwrap(),
)
} else {
None
}
}
}
}
#[inline(always)]
pub fn assert_int(self) -> ScalarInt {
self.try_to_int().unwrap()
}
#[inline]
pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
match self {
Scalar::Int(int) => int.to_bits(target_size).map_err(|size| {
self.try_to_int()
.ok_or_else(|| err_unsup!(ReadPointerAsBytes))?
.to_bits(target_size)
.map_err(|size| {
err_ub!(ScalarSizeMismatch {
target_size: target_size.bytes(),
data_size: size.bytes(),
})
.into()
}),
Scalar::Ptr(ptr) => {
if Tag::OFFSET_IS_ADDR {
Ok(ptr.offset.bytes().into())
} else {
throw_unsup!(ReadPointerAsBytes)
}
}
}
})
}
#[inline(always)]
pub fn assert_bits(self, target_size: Size) -> u128 {
self.to_bits(target_size).unwrap()
}
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
@ -547,13 +531,6 @@ impl<Tag> From<Scalar<Tag>> for ScalarMaybeUninit<Tag> {
}
}
impl<Tag> From<Pointer<Tag>> for ScalarMaybeUninit<Tag> {
#[inline(always)]
fn from(s: Pointer<Tag>) -> Self {
ScalarMaybeUninit::Scalar(s.into())
}
}
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Tag: Provenance> fmt::Debug for ScalarMaybeUninit<Tag> {
@ -575,6 +552,16 @@ impl<Tag: Provenance> fmt::Display for ScalarMaybeUninit<Tag> {
}
impl<Tag> ScalarMaybeUninit<Tag> {
#[inline]
pub fn from_pointer(ptr: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
}
#[inline]
pub fn from_maybe_pointer(ptr: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
#[inline]
pub fn check_init(self) -> InterpResult<'static, Scalar<Tag>> {
match self {

View file

@ -2458,7 +2458,7 @@ pub enum ConstantKind<'tcx> {
impl Constant<'tcx> {
pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
match self.literal.const_for_ty()?.val.try_to_scalar() {
Some(Scalar::Ptr(ptr)) => match tcx.global_alloc(ptr.provenance) {
Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
GlobalAlloc::Static(def_id) => {
assert!(!tcx.is_thread_local_static(def_id));
Some(def_id)

View file

@ -974,7 +974,7 @@ pub trait PrettyPrinter<'tcx>:
print_ty: bool,
) -> Result<Self::Const, Self::Error> {
match scalar {
Scalar::Ptr(ptr) => self.pretty_print_const_scalar_ptr(ptr, ty, print_ty),
Scalar::Ptr(ptr, _size) => self.pretty_print_const_scalar_ptr(ptr, ty, print_ty),
Scalar::Int(int) => self.pretty_print_const_scalar_int(int, ty, print_ty),
}
}

View file

@ -595,7 +595,10 @@ fn check_const_value_eq<R: TypeRelation<'tcx>>(
(ConstValue::Scalar(Scalar::Int(a_val)), ConstValue::Scalar(Scalar::Int(b_val))) => {
a_val == b_val
}
(ConstValue::Scalar(Scalar::Ptr(a_val)), ConstValue::Scalar(Scalar::Ptr(b_val))) => {
(
ConstValue::Scalar(Scalar::Ptr(a_val, _a_size)),
ConstValue::Scalar(Scalar::Ptr(b_val, _b_size)),
) => {
a_val == b_val
|| match (tcx.global_alloc(a_val.provenance), tcx.global_alloc(b_val.provenance)) {
(GlobalAlloc::Function(a_instance), GlobalAlloc::Function(b_instance)) => {

View file

@ -1,6 +1,6 @@
use std::convert::TryFrom;
use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar};
use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit};
use crate::ty::fold::TypeFoldable;
use crate::ty::{self, DefId, SubstsRef, Ty, TyCtxt};
use rustc_ast::Mutability;
@ -74,7 +74,7 @@ impl<'tcx> TyCtxt<'tcx> {
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
fn_ptr.into()
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
@ -90,7 +90,7 @@ impl<'tcx> TyCtxt<'tcx> {
.polymorphize(tcx);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
fn_ptr.into()
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
}
};
vtable

View file

@ -139,6 +139,7 @@ pub(super) fn op_to_const<'tcx>(
op.try_as_mplace()
};
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let to_const_value = |mplace: &MPlaceTy<'_>| match mplace.ptr.into_parts() {
(Some(alloc_id), offset) => {
let alloc = ecx.tcx.global_alloc(alloc_id).unwrap_memory();
@ -164,6 +165,7 @@ pub(super) fn op_to_const<'tcx>(
ScalarMaybeUninit::Uninit => to_const_value(&op.assert_mem_place()),
},
Immediate::ScalarPair(a, b) => {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
(Some(alloc_id), offset) => {
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())

View file

@ -168,11 +168,11 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// Comparisons between integers are always known.
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
// Equality with integers can never be known for sure.
(Scalar::Int { .. }, Scalar::Ptr(_)) | (Scalar::Ptr(_), Scalar::Int { .. }) => false,
(Scalar::Int { .. }, Scalar::Ptr(..)) | (Scalar::Ptr(..), Scalar::Int { .. }) => false,
// FIXME: return `true` for when both sides are the same pointer, *except* that
// some things (like functions and vtables) do not have stable addresses
// so we need to be careful around them (see e.g. #73722).
(Scalar::Ptr(_), Scalar::Ptr(_)) => false,
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
}
}
@ -183,13 +183,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// Comparisons of abstract pointers with null pointers are known if the pointer
// is in bounds, because if they are in bounds, the pointer can't be null.
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), Scalar::Ptr(ptr)) | (Scalar::Ptr(ptr), Scalar::Int(int)) => {
(Scalar::Int(int), Scalar::Ptr(ptr, _)) | (Scalar::Ptr(ptr, _), Scalar::Int(int)) => {
int.is_null() && !self.memory.ptr_may_be_null(ptr.into())
}
// FIXME: return `true` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items.
(Scalar::Ptr(_), Scalar::Ptr(_)) => false,
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
}
}
}
@ -312,7 +312,7 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
align,
interpret::MemoryKind::Machine(MemoryKind::Heap),
)?;
ecx.write_scalar(Scalar::Ptr(ptr), dest)?;
ecx.write_scalar(Scalar::from_pointer(ptr, &*ecx.tcx), dest)?;
}
_ => {
return Err(ConstEvalErrKind::NeedsRfc(format!(

View file

@ -35,7 +35,7 @@ pub(crate) fn const_caller_location(
if intern_const_alloc_recursive(&mut ecx, InternKind::Constant, &loc_place).is_err() {
bug!("intern_const_alloc_recursive should not error in this case")
}
ConstValue::Scalar(Scalar::Ptr(loc_place.ptr.into_pointer_or_offset().unwrap()))
ConstValue::Scalar(Scalar::from_pointer(loc_place.ptr.into_pointer_or_offset().unwrap(), &tcx))
}
/// Convert an evaluated constant to a type level constant

View file

@ -57,7 +57,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
self.write_scalar(fn_ptr, dest)?;
self.write_scalar(Scalar::from_pointer(fn_ptr, &*self.tcx), dest)?;
}
_ => span_bug!(self.cur_span(), "reify fn pointer on {:?}", src.layout.ty),
}
@ -88,7 +88,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ty::ClosureKind::FnOnce,
);
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
self.write_scalar(fn_ptr, dest)?;
self.write_scalar(Scalar::from_pointer(fn_ptr, &*self.tcx), dest)?;
}
_ => span_bug!(self.cur_span(), "closure fn pointer on {:?}", src.layout.ty),
}
@ -280,7 +280,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Initial cast from sized to dyn trait
let vtable = self.get_vtable(src_pointee_ty, data.principal())?;
let ptr = self.read_immediate(src)?.to_scalar()?;
let val = Immediate::new_dyn_trait(ptr, vtable);
let val = Immediate::new_dyn_trait(ptr, vtable, &*self.tcx);
self.write_immediate(val, dest)
}

View file

@ -993,16 +993,16 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {:?}", val)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val {
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
allocs.push(ptr.provenance.erase_for_fmt());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({:?}, {:?})", val1, val2)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val1 {
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
allocs.push(ptr.provenance.erase_for_fmt());
}
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr)) = val2 {
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
allocs.push(ptr.provenance.erase_for_fmt());
}
}

View file

@ -362,9 +362,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
//
// Control flow is weird because we cannot early-return (to reach the
// `go_to_block` at the end).
let done = if a.is_bits() && b.is_bits() {
let a = a.to_machine_usize(self)?;
let b = b.to_machine_usize(self)?;
let done = if let (Some(a), Some(b)) = (a.try_to_int(), b.try_to_int()) {
let a = a.try_to_machine_usize(*self.tcx).unwrap();
let b = b.try_to_machine_usize(*self.tcx).unwrap();
if a == b && a != 0 {
self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
true

View file

@ -283,6 +283,8 @@ pub trait Machine<'mir, 'tcx>: Sized {
/// this will return an unusable tag (i.e., accesses will be UB)!
///
/// Called on the id returned by `thread_local_static_alloc_id` and `extern_static_alloc_id`, if needed.
///
/// `offset` is relative inside the allocation.
fn tag_global_base_pointer(
memory_extra: &Self::MemoryExtra,
ptr: Pointer,
@ -485,6 +487,7 @@ pub macro compile_time_machine(<$mir: lifetime, $tcx: lifetime>) {
_mem: &Memory<$mir, $tcx, Self>,
ptr: Pointer<AllocId>,
) -> (Option<AllocId>, Size) {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (alloc_id, offset) = ptr.into_parts();
(Some(alloc_id), offset)
}

View file

@ -165,6 +165,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
&self,
ptr: Pointer<AllocId>,
) -> InterpResult<'tcx, Pointer<M::PointerTag>> {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (alloc_id, offset) = ptr.into_parts();
// We need to handle `extern static`.
let alloc_id = match self.tcx.get_global_alloc(alloc_id) {
@ -450,7 +451,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// we want the error to be about the bounds.
if let Some(align) = align {
if M::force_int_for_alignment_check(&self.extra) {
let addr = Scalar::from(ptr)
let addr = Scalar::from_pointer(ptr, &self.tcx)
.to_machine_usize(&self.tcx)
.expect("ptr-to-int cast for align check should never fail");
check_offset_align(addr, align)?;
@ -1131,7 +1132,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Machine pointer introspection.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
match scalar.to_bits_or_ptr(self.pointer_size(), &self.tcx) {
match scalar.to_bits_or_ptr(self.pointer_size()) {
Err(ptr) => ptr.into(),
Ok(bits) => {
let addr = u64::try_from(bits).unwrap();

View file

@ -60,20 +60,21 @@ impl<Tag> From<Scalar<Tag>> for Immediate<Tag> {
}
}
impl<Tag> From<Pointer<Tag>> for Immediate<Tag> {
#[inline(always)]
fn from(val: Pointer<Tag>) -> Self {
Immediate::Scalar(Scalar::from(val).into())
}
}
impl<'tcx, Tag> Immediate<Tag> {
pub fn from_pointer(p: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(ScalarMaybeUninit::from_pointer(p, cx))
}
pub fn from_maybe_pointer(p: Pointer<Option<Tag>>, cx: &impl HasDataLayout) -> Self {
Immediate::Scalar(ScalarMaybeUninit::from_maybe_pointer(p, cx))
}
pub fn new_slice(val: Scalar<Tag>, len: u64, cx: &impl HasDataLayout) -> Self {
Immediate::ScalarPair(val.into(), Scalar::from_machine_usize(len, cx).into())
}
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>) -> Self {
Immediate::ScalarPair(val.into(), vtable.into())
pub fn new_dyn_trait(val: Scalar<Tag>, vtable: Pointer<Tag>, cx: &impl HasDataLayout) -> Self {
Immediate::ScalarPair(val.into(), ScalarMaybeUninit::from_pointer(vtable, cx))
}
#[inline]
@ -252,7 +253,10 @@ impl<'tcx, Tag: Copy> ImmTy<'tcx, Tag> {
}
#[inline]
pub fn to_const_int(self) -> ConstInt {
pub fn to_const_int(self) -> ConstInt
where
Tag: Provenance,
{
assert!(self.layout.ty.is_integral());
let int = self.to_scalar().expect("to_const_int doesn't work on scalar pairs").assert_int();
ConstInt::new(int, self.layout.ty.is_signed(), self.layout.ty.is_ptr_sized_integral())
@ -599,7 +603,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// Other cases need layout.
let tag_scalar = |scalar| -> InterpResult<'tcx, _> {
Ok(match scalar {
Scalar::Ptr(ptr) => Scalar::Ptr(self.global_base_pointer(ptr)?),
Scalar::Ptr(ptr, size) => Scalar::Ptr(self.global_base_pointer(ptr)?, size),
Scalar::Int(int) => Scalar::Int(int),
})
};
@ -621,7 +625,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Size::from_bytes(start), // offset: `start`
);
Operand::Immediate(Immediate::new_slice(
self.global_base_pointer(ptr)?.into(),
Scalar::from_pointer(self.global_base_pointer(ptr)?, &*self.tcx),
u64::try_from(end.checked_sub(start).unwrap()).unwrap(), // len: `end - start`
self,
))
@ -716,7 +720,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variant = match tag_val.to_bits_or_ptr(tag_layout.size, self) {
let variant = match tag_val.to_bits_or_ptr(tag_layout.size) {
Err(ptr) => {
// The niche must be just 0 (which an inbounds pointer value never is)
let ptr_valid = niche_start == 0

View file

@ -752,7 +752,7 @@ where
// This is a very common path, avoid some checks in release mode
assert!(!dest.layout.is_unsized(), "Cannot write unsized data");
match src {
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(_))) => assert_eq!(
Immediate::Scalar(ScalarMaybeUninit::Scalar(Scalar::Ptr(..))) => assert_eq!(
self.pointer_size(),
dest.layout.size,
"Size mismatch when writing pointer"

View file

@ -164,7 +164,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ThreadLocalRef(did) => {
let id = M::thread_local_static_alloc_id(self, did)?;
let val = self.global_base_pointer(id.into())?;
self.write_scalar(val, &dest)?;
self.write_scalar(Scalar::from_pointer(val, &*self.tcx), &dest)?;
}
Use(ref operand) => {

View file

@ -539,7 +539,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// types below!
if self.ctfe_mode.is_some() {
// Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
let is_bits = value.check_init().map_or(false, |v| v.is_bits());
let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_some());
if !is_bits {
throw_validation_failure!(self.path,
{ "{}", value } expected { "initialized plain (non-pointer) bytes" }
@ -657,7 +657,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
err_ub!(InvalidUninitBytes(None)) => { "{}", value }
expected { "something {}", wrapping_range_format(valid_range, max_hi) },
);
let bits = match value.to_bits_or_ptr(op.layout.size, self.ecx) {
let bits = match value.to_bits_or_ptr(op.layout.size) {
Err(ptr) => {
if lo == 1 && hi == max_hi {
// Only null is the niche. So make sure the ptr is NOT null.

View file

@ -1402,7 +1402,7 @@ fn collect_const_value<'tcx>(
output: &mut Vec<Spanned<MonoItem<'tcx>>>,
) {
match value {
ConstValue::Scalar(Scalar::Ptr(ptr)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Scalar(Scalar::Ptr(ptr, _size)) => collect_miri(tcx, ptr.provenance, output),
ConstValue::Slice { data: alloc, start: _, end: _ } | ConstValue::ByRef { alloc, .. } => {
for &id in alloc.relocations().values() {
collect_miri(tcx, id, output);

View file

@ -582,8 +582,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
let left_size = self.ecx.layout_of(left_ty).ok()?.size;
let right_size = r.layout.size;
let r_bits = r.to_scalar().ok();
// This is basically `force_bits`.
let r_bits = r_bits.and_then(|r| r.to_bits_or_ptr(right_size, &self.tcx).ok());
let r_bits = r_bits.and_then(|r| r.to_bits(right_size).ok());
if r_bits.map_or(false, |b| b >= left_size.bits() as u128) {
debug!("check_binary_op: reporting assert for {:?}", source_info);
self.report_assert_as_lint(
@ -922,12 +921,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
match **op {
interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
s.is_bits()
s.try_to_int().is_some()
}
interpret::Operand::Immediate(Immediate::ScalarPair(
ScalarMaybeUninit::Scalar(l),
ScalarMaybeUninit::Scalar(r),
)) => l.is_bits() && r.is_bits(),
)) => l.try_to_int().is_some() && r.try_to_int().is_some(),
_ => false,
}
}

View file

@ -46,7 +46,7 @@ impl<'tcx> MirPass<'tcx> for SimplifyComparisonIntegral {
.expect("if we have an evaluated constant we must know the layout");
int.assert_bits(layout.size)
}
Scalar::Ptr(_) => continue,
Scalar::Ptr(..) => continue,
};
const FALSE: u128 = 0;

View file

@ -669,7 +669,7 @@ pub fn write_allocations<'tcx>(
}
fn alloc_ids_from_const(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
match val {
ConstValue::Scalar(interpret::Scalar::Ptr(ptr)) => {
ConstValue::Scalar(interpret::Scalar::Ptr(ptr, _size)) => {
Either::Left(Either::Left(std::iter::once(ptr.provenance)))
}
ConstValue::Scalar(interpret::Scalar::Int { .. }) => {

View file

@ -38,7 +38,7 @@ crate fn lit_to_const<'tcx>(
}
(ast::LitKind::ByteStr(data), ty::Ref(_, inner_ty, _)) if inner_ty.is_array() => {
let id = tcx.allocate_bytes(data);
ConstValue::Scalar(Scalar::Ptr(id.into()))
ConstValue::Scalar(Scalar::from_pointer(id.into(), &tcx))
}
(ast::LitKind::Byte(n), ty::Uint(ty::UintTy::U8)) => {
ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1)))

View file

@ -928,7 +928,11 @@ impl<'tcx> Cx<'tcx> {
} else {
let ptr = self.tcx.create_static_alloc(id);
ExprKind::StaticRef {
literal: ty::Const::from_scalar(self.tcx, Scalar::Ptr(ptr.into()), ty),
literal: ty::Const::from_scalar(
self.tcx,
Scalar::from_pointer(ptr.into(), &self.tcx),
ty,
),
def_id: id,
}
};

View file

@ -123,7 +123,7 @@ impl IntRange {
// straight to the result, after doing a bit of checking. (We
// could remove this branch and just fall through, which
// is more general but much slower.)
if let Ok(bits) = scalar.to_bits_or_ptr(target_size, &tcx) {
if let Ok(bits) = scalar.to_bits_or_ptr(target_size) {
return Some(bits);
}
}