1
Fork 0

interpret: err instead of ICE on size mismatches in to_bits_or_ptr_internal

This commit is contained in:
Ralf Jung 2022-04-07 16:22:09 -04:00
parent b6ab1fae73
commit 38004b72bc
16 changed files with 107 additions and 67 deletions

View file

@ -167,17 +167,18 @@ pub(super) fn op_to_const<'tcx>(
},
Immediate::ScalarPair(a, b) => {
// We know `offset` is relative to the allocation, so we can use `into_parts`.
let (data, start) = match ecx.scalar_to_ptr(a.check_init().unwrap()).into_parts() {
(Some(alloc_id), offset) => {
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
}
(None, _offset) => (
ecx.tcx.intern_const_alloc(Allocation::from_bytes_byte_aligned_immutable(
b"" as &[u8],
)),
0,
),
};
let (data, start) =
match ecx.scalar_to_ptr(a.check_init().unwrap()).unwrap().into_parts() {
(Some(alloc_id), offset) => {
(ecx.tcx.global_alloc(alloc_id).unwrap_memory(), offset.bytes())
}
(None, _offset) => (
ecx.tcx.intern_const_alloc(
Allocation::from_bytes_byte_aligned_immutable(b"" as &[u8]),
),
0,
),
};
let len = b.to_machine_usize(ecx).unwrap();
let start = start.try_into().unwrap();
let len: usize = len.try_into().unwrap();

View file

@ -197,8 +197,8 @@ impl interpret::MayLeak for ! {
}
impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> bool {
match (a, b) {
fn guaranteed_eq(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
Ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int { .. }, Scalar::Int { .. }) => a == b,
// Equality with integers can never be known for sure.
@ -207,11 +207,11 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// some things (like functions and vtables) do not have stable addresses
// so we need to be careful around them (see e.g. #73722).
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
}
})
}
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> bool {
match (a, b) {
fn guaranteed_ne(&mut self, a: Scalar, b: Scalar) -> InterpResult<'tcx, bool> {
Ok(match (a, b) {
// Comparisons between integers are always known.
(Scalar::Int(_), Scalar::Int(_)) => a != b,
// Comparisons of abstract pointers with null pointers are known if the pointer
@ -219,13 +219,13 @@ impl<'mir, 'tcx: 'mir> CompileTimeEvalContext<'mir, 'tcx> {
// Inequality with integers other than null can never be known for sure.
(Scalar::Int(int), ptr @ Scalar::Ptr(..))
| (ptr @ Scalar::Ptr(..), Scalar::Int(int)) => {
int.is_null() && !self.scalar_may_be_null(ptr)
int.is_null() && !self.scalar_may_be_null(ptr)?
}
// FIXME: return `true` for at least some comparisons where we can reliably
// determine the result of runtime inequality tests at compile-time.
// Examples include comparison of addresses in different static items.
(Scalar::Ptr(..), Scalar::Ptr(..)) => false,
}
})
}
}
@ -329,9 +329,9 @@ impl<'mir, 'tcx> interpret::Machine<'mir, 'tcx> for CompileTimeInterpreter<'mir,
let a = ecx.read_immediate(&args[0])?.to_scalar()?;
let b = ecx.read_immediate(&args[1])?.to_scalar()?;
let cmp = if intrinsic_name == sym::ptr_guaranteed_eq {
ecx.guaranteed_eq(a, b)
ecx.guaranteed_eq(a, b)?
} else {
ecx.guaranteed_ne(a, b)
ecx.guaranteed_ne(a, b)?
};
ecx.write_scalar(Scalar::from_bool(cmp), dest)?;
}

View file

@ -283,7 +283,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
if let Some(entry_idx) = vptr_entry_idx {
let entry_idx = u64::try_from(entry_idx).unwrap();
let (old_data, old_vptr) = val.to_scalar_pair()?;
let old_vptr = self.scalar_to_ptr(old_vptr);
let old_vptr = self.scalar_to_ptr(old_vptr)?;
let new_vptr = self
.read_new_vtable_after_trait_upcasting_from_vtable(old_vptr, entry_idx)?;
self.write_immediate(Immediate::new_dyn_trait(old_data, new_vptr, self), dest)

View file

@ -640,7 +640,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(Some((size, align)))
}
ty::Dynamic(..) => {
let vtable = self.scalar_to_ptr(metadata.unwrap_meta());
let vtable = self.scalar_to_ptr(metadata.unwrap_meta())?;
// Read size and align from vtable (already checks size).
Ok(Some(self.read_size_and_align_from_vtable(vtable)?))
}

View file

@ -202,7 +202,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::Memory
if let ty::Dynamic(..) =
tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind()
{
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta());
let ptr = self.ecx.scalar_to_ptr(mplace.meta.unwrap_meta())?;
if let Some(alloc_id) = ptr.provenance {
// Explicitly choose const mode here, since vtables are immutable, even
// if the reference of the fat pointer is mutable.

View file

@ -1102,30 +1102,38 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
/// Machine pointer introspection.
impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
pub fn scalar_to_ptr(
&self,
scalar: Scalar<M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
// call to force getting out a pointer.
match scalar.to_bits_or_ptr_internal(self.pointer_size()) {
Err(ptr) => ptr.into(),
Ok(bits) => {
let addr = u64::try_from(bits).unwrap();
let ptr = M::ptr_from_addr(&self, addr);
if addr == 0 {
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
Ok(
match scalar
.to_bits_or_ptr_internal(self.pointer_size())
.map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
{
Err(ptr) => ptr.into(),
Ok(bits) => {
let addr = u64::try_from(bits).unwrap();
let ptr = M::ptr_from_addr(&self, addr);
if addr == 0 {
assert!(ptr.provenance.is_none(), "null pointer can never have an AllocId");
}
ptr
}
ptr
}
}
},
)
}
/// Test if this value might be null.
/// If the machine does not support ptr-to-int casts, this is conservative.
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> bool {
match scalar.try_to_int() {
pub fn scalar_may_be_null(&self, scalar: Scalar<M::PointerTag>) -> InterpResult<'tcx, bool> {
Ok(match scalar.try_to_int() {
Ok(int) => int.is_null(),
Err(_) => {
// Can only happen during CTFE.
let ptr = self.scalar_to_ptr(scalar);
let ptr = self.scalar_to_ptr(scalar)?;
match self.ptr_try_get_alloc_id(ptr) {
Ok((alloc_id, offset, _)) => {
let (size, _align) = self
@ -1138,7 +1146,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Err(_offset) => bug!("a non-int scalar is always a pointer"),
}
}
}
})
}
/// Turning a "maybe pointer" into a proper pointer (and some information

View file

@ -342,7 +342,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
&self,
op: &OpTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
Ok(self.scalar_to_ptr(self.read_scalar(op)?.check_init()?))
self.scalar_to_ptr(self.read_scalar(op)?.check_init()?)
}
// Turn the wide MPlace into a string (must already be dereferenced!)
@ -738,7 +738,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.scalar_may_be_null(tag_val);
&& !self.scalar_may_be_null(tag_val)?;
if !ptr_valid {
throw_ub!(InvalidTag(dbg_val))
}

View file

@ -281,7 +281,7 @@ where
};
let mplace = MemPlace {
ptr: self.scalar_to_ptr(ptr.check_init()?),
ptr: self.scalar_to_ptr(ptr.check_init()?)?,
// We could use the run-time alignment here. For now, we do not, because
// the point of tracking the alignment here is to make sure that the *static*
// alignment information emitted with the loads is correct. The run-time
@ -1104,7 +1104,7 @@ where
&self,
mplace: &MPlaceTy<'tcx, M::PointerTag>,
) -> InterpResult<'tcx, (ty::Instance<'tcx>, MPlaceTy<'tcx, M::PointerTag>)> {
let vtable = self.scalar_to_ptr(mplace.vtable()); // also sanity checks the type
let vtable = self.scalar_to_ptr(mplace.vtable())?; // also sanity checks the type
let (instance, ty) = self.read_drop_type_from_vtable(vtable)?;
let layout = self.layout_of(ty)?;

View file

@ -519,7 +519,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.kind(),
ty::Dynamic(..)
));
let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta());
let vtable = self.scalar_to_ptr(receiver_place.meta.unwrap_meta())?;
let fn_val = self.get_vtable_slot(vtable, u64::try_from(idx).unwrap())?;
// `*mut receiver_place.layout.ty` is almost the layout that we

View file

@ -50,7 +50,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let vtable_slot = self
.get_ptr_alloc(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?);
let fn_ptr = self.scalar_to_ptr(vtable_slot.read_ptr_sized(Size::ZERO)?.check_init()?)?;
self.get_ptr_fn(fn_ptr)
}
@ -75,7 +75,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.check_init()?;
// We *need* an instance here, no other kind of function value, to be able
// to determine the type.
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn))?.as_instance()?;
let drop_instance = self.get_ptr_fn(self.scalar_to_ptr(drop_fn)?)?.as_instance()?;
trace!("Found drop fn: {:?}", drop_instance);
let fn_sig = drop_instance.ty(*self.tcx, self.param_env).fn_sig(*self.tcx);
let fn_sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, fn_sig);
@ -132,7 +132,8 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
.get_ptr_alloc(vtable_slot, pointer_size, self.tcx.data_layout.pointer_align.abi)?
.expect("cannot be a ZST");
let new_vtable = self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?);
let new_vtable =
self.scalar_to_ptr(new_vtable.read_ptr_sized(Size::ZERO)?.check_init()?)?;
Ok(new_vtable)
}

View file

@ -312,7 +312,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
let tail = self.ecx.tcx.struct_tail_erasing_lifetimes(pointee.ty, self.ecx.param_env);
match tail.kind() {
ty::Dynamic(..) => {
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta());
let vtable = self.ecx.scalar_to_ptr(meta.unwrap_meta())?;
// Direct call to `check_ptr_access_align` checks alignment even on CTFE machines.
try_validation!(
self.ecx.check_ptr_access_align(
@ -577,7 +577,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// If we check references recursively, also check that this points to a function.
if let Some(_) = self.ref_tracking {
let ptr = self.ecx.scalar_to_ptr(value);
let ptr = self.ecx.scalar_to_ptr(value)?;
let _fn = try_validation!(
self.ecx.get_ptr_fn(ptr),
self.path,
@ -590,7 +590,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// FIXME: Check if the signature matches
} else {
// Otherwise (for standalone Miri), we have to still check it to be non-null.
if self.ecx.scalar_may_be_null(value) {
if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path, { "a null function pointer" });
}
}
@ -667,7 +667,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// We support 2 kinds of ranges here: full range, and excluding zero.
if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.scalar_may_be_null(value) {
if self.ecx.scalar_may_be_null(value)? {
throw_validation_failure!(self.path,
{ "a potentially null pointer" }
expected {