1
Fork 0

Auto merge of #100043 - RalfJung:scalar-always-init, r=RalfJung

interpret: remove support for uninitialized scalars

With Miri no longer supporting `-Zmiri-allow-uninit-numbers`, we no longer need to support storing uninit data in a `Scalar`. We anyway already only use this representation for types with *initialized* `Scalar` layout (and we have to, due to partial initialization), so let's get rid of the `ScalarMaybeUninit` type entirely.

I tried to stage this into meaningful commits, but the one that changes `read_immediate` to always trigger UB on uninit is the largest chunk of the PR and I don't see how it could be subdivided.

Fixes https://github.com/rust-lang/miri/issues/2187
r? `@oli-obk`
This commit is contained in:
bors 2022-08-26 21:50:09 +00:00
commit 2b443a8d97
54 changed files with 506 additions and 935 deletions

View file

@ -16,8 +16,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size};
use super::{
read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
UninitBytesAccess, UnsupportedOpInfo,
ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess,
UnsupportedOpInfo,
};
use crate::ty;
@ -415,25 +415,10 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Reading and writing.
impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
/// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
/// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
/// given range contains no uninitialized bytes/relocations.
pub fn check_bytes(
&self,
cx: &impl HasDataLayout,
range: AllocRange,
allow_uninit: bool,
allow_ptr: bool,
) -> AllocResult {
// Check bounds and relocations on the edges.
self.get_bytes_with_uninit_and_ptr(cx, range)?;
// Check uninit and ptr.
if !allow_uninit {
self.check_init(range)?;
}
if !allow_ptr {
self.check_relocations(cx, range)?;
}
/// Validates that this memory range is initiailized and contains no relocations.
pub fn check_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
// This implicitly does all the checking we are asking for.
self.get_bytes(cx, range)?;
Ok(())
}
@ -452,16 +437,14 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
cx: &impl HasDataLayout,
range: AllocRange,
read_provenance: bool,
) -> AllocResult<ScalarMaybeUninit<Prov>> {
) -> AllocResult<Scalar<Prov>> {
if read_provenance {
assert_eq!(range.size, cx.data_layout().pointer_size);
}
// First and foremost, if anything is uninit, bail.
if self.is_init(range).is_err() {
// This inflates uninitialized bytes to the entire scalar, even if only a few
// bytes are uninitialized.
return Ok(ScalarMaybeUninit::Uninit);
return Err(AllocError::InvalidUninitBytes(None));
}
// If we are doing a pointer read, and there is a relocation exactly where we
@ -471,7 +454,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
let bytes = self.get_bytes_even_more_internal(range);
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
let ptr = Pointer::new(prov, Size::from_bytes(bits));
return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
return Ok(Scalar::from_pointer(ptr, cx));
}
// If we are *not* reading a pointer, and we can just ignore relocations,
@ -480,7 +463,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// We just strip provenance.
let bytes = self.get_bytes_even_more_internal(range);
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)));
return Ok(Scalar::from_uint(bits, range.size));
}
// It's complicated. Better make sure there is no provenance anywhere.
@ -492,7 +475,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
// underlying bits.
let bytes = self.get_bytes(cx, range)?;
let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
Ok(Scalar::from_uint(bits, range.size))
}
/// Writes a *non-ZST* scalar.
@ -507,17 +490,10 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
&mut self,
cx: &impl HasDataLayout,
range: AllocRange,
val: ScalarMaybeUninit<Prov>,
val: Scalar<Prov>,
) -> AllocResult {
assert!(self.mutability == Mutability::Mut);
let val = match val {
ScalarMaybeUninit::Scalar(scalar) => scalar,
ScalarMaybeUninit::Uninit => {
return self.write_uninit(cx, range);
}
};
// `to_bits_or_ptr_internal` is the right method because we just want to store this data
// as-is into memory.
let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {

View file

@ -124,7 +124,7 @@ pub use self::error::{
UninitBytesAccess, UnsupportedOpInfo,
};
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar};
pub use self::allocation::{
alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,

View file

@ -504,139 +504,6 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> {
}
}
#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
pub enum ScalarMaybeUninit<Prov = AllocId> {
Scalar(Scalar<Prov>),
Uninit,
}
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
static_assert_size!(ScalarMaybeUninit, 24);
impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
#[inline(always)]
fn from(s: Scalar<Prov>) -> Self {
ScalarMaybeUninit::Scalar(s)
}
}
// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
// all the Miri types.
impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
}
}
}
impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s),
}
}
}
impl<Prov> ScalarMaybeUninit<Prov> {
#[inline]
pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
}
#[inline]
pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
}
#[inline]
pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
match self {
ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
}
}
}
impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
#[inline(always)]
pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
self.check_init()?.to_pointer(cx)
}
#[inline(always)]
pub fn to_bool(self) -> InterpResult<'tcx, bool> {
self.check_init()?.to_bool()
}
#[inline(always)]
pub fn to_char(self) -> InterpResult<'tcx, char> {
self.check_init()?.to_char()
}
#[inline(always)]
pub fn to_f32(self) -> InterpResult<'tcx, Single> {
self.check_init()?.to_f32()
}
#[inline(always)]
pub fn to_f64(self) -> InterpResult<'tcx, Double> {
self.check_init()?.to_f64()
}
#[inline(always)]
pub fn to_u8(self) -> InterpResult<'tcx, u8> {
self.check_init()?.to_u8()
}
#[inline(always)]
pub fn to_u16(self) -> InterpResult<'tcx, u16> {
self.check_init()?.to_u16()
}
#[inline(always)]
pub fn to_u32(self) -> InterpResult<'tcx, u32> {
self.check_init()?.to_u32()
}
#[inline(always)]
pub fn to_u64(self) -> InterpResult<'tcx, u64> {
self.check_init()?.to_u64()
}
#[inline(always)]
pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
self.check_init()?.to_machine_usize(cx)
}
#[inline(always)]
pub fn to_i8(self) -> InterpResult<'tcx, i8> {
self.check_init()?.to_i8()
}
#[inline(always)]
pub fn to_i16(self) -> InterpResult<'tcx, i16> {
self.check_init()?.to_i16()
}
#[inline(always)]
pub fn to_i32(self) -> InterpResult<'tcx, i32> {
self.check_init()?.to_i32()
}
#[inline(always)]
pub fn to_i64(self) -> InterpResult<'tcx, i64> {
self.check_init()?.to_i64()
}
#[inline(always)]
pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
self.check_init()?.to_machine_isize(cx)
}
}
/// Gets the bytes of a constant slice value.
pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
if let ConstValue::Slice { data, start, end } = val {

View file

@ -1,7 +1,7 @@
use std::convert::TryFrom;
use std::fmt;
use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit};
use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar};
use crate::ty::{self, Instance, PolyTraitRef, Ty, TyCtxt};
use rustc_ast::Mutability;
@ -87,7 +87,7 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
Scalar::from_pointer(fn_ptr, &tcx)
}
VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
@ -97,14 +97,14 @@ pub(super) fn vtable_allocation_provider<'tcx>(
let instance = instance.polymorphize(tcx);
let fn_alloc_id = tcx.create_fn_alloc(instance);
let fn_ptr = Pointer::from(fn_alloc_id);
ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
Scalar::from_pointer(fn_ptr, &tcx)
}
VtblEntry::TraitVPtr(trait_ref) => {
let super_trait_ref = trait_ref
.map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref)));
let vptr = Pointer::from(supertrait_alloc_id);
ScalarMaybeUninit::from_pointer(vptr, &tcx)
Scalar::from_pointer(vptr, &tcx)
}
};
vtable