1
Fork 0

get rid of incorrect erase_for_fmt

This commit is contained in:
Ralf Jung 2021-07-16 09:39:35 +02:00
parent 4e28065618
commit 7c720ce612
12 changed files with 81 additions and 119 deletions

View file

@ -897,11 +897,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
fn deallocate_local(&mut self, local: LocalValue<M::PointerTag>) -> InterpResult<'tcx> {
if let LocalValue::Live(Operand::Indirect(MemPlace { ptr, .. })) = local {
// All locals have a backing allocation, even if the allocation is empty
// due to the local having ZST type.
// due to the local having ZST type. Hence we can `unwrap`.
trace!(
"deallocating local {:?}: {:?}",
local,
self.memory.dump_alloc(ptr.provenance.unwrap().erase_for_fmt())
self.memory.dump_alloc(ptr.provenance.unwrap().get_alloc_id())
);
self.memory.deallocate(ptr, None, MemoryKind::Stack)?;
};
@ -989,28 +989,28 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> std::fmt::Debug
},
mplace.ptr,
)?;
allocs.extend(mplace.ptr.map_erase_for_fmt().provenance);
allocs.extend(mplace.ptr.provenance.map(Provenance::get_alloc_id));
}
LocalValue::Live(Operand::Immediate(Immediate::Scalar(val))) => {
write!(fmt, " {:?}", val)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val {
allocs.push(ptr.provenance.erase_for_fmt());
allocs.push(ptr.provenance.get_alloc_id());
}
}
LocalValue::Live(Operand::Immediate(Immediate::ScalarPair(val1, val2))) => {
write!(fmt, " ({:?}, {:?})", val1, val2)?;
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val1 {
allocs.push(ptr.provenance.erase_for_fmt());
allocs.push(ptr.provenance.get_alloc_id());
}
if let ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _size)) = val2 {
allocs.push(ptr.provenance.erase_for_fmt());
allocs.push(ptr.provenance.get_alloc_id());
}
}
}
write!(fmt, ": {:?}", self.ecx.memory.dump_allocs(allocs))
}
Place::Ptr(mplace) => match mplace.ptr.map_erase_for_fmt().provenance {
Place::Ptr(mplace) => match mplace.ptr.provenance.map(Provenance::get_alloc_id) {
Some(alloc_id) => write!(
fmt,
"by align({}) ref {:?}: {:?}",

View file

@ -362,7 +362,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
//
// Control flow is weird because we cannot early-return (to reach the
// `go_to_block` at the end).
let done = if let (Some(a), Some(b)) = (a.try_to_int(), b.try_to_int()) {
let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
let a = a.try_to_machine_usize(*self.tcx).unwrap();
let b = b.try_to_machine_usize(*self.tcx).unwrap();
if a == b && a != 0 {

View file

@ -757,12 +757,12 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
ptr: Pointer<Option<M::PointerTag>>,
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
trace!("get_fn({:?})", ptr);
let (alloc_id, offset, ptr) = self.ptr_get_alloc(ptr)?;
let (alloc_id, offset, _ptr) = self.ptr_get_alloc(ptr)?;
if offset.bytes() != 0 {
throw_ub!(InvalidFunctionPointer(ptr.erase_for_fmt()))
throw_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset)))
}
self.get_fn_alloc(alloc_id)
.ok_or_else(|| err_ub!(InvalidFunctionPointer(ptr.erase_for_fmt())).into())
.ok_or_else(|| err_ub!(InvalidFunctionPointer(Pointer::new(alloc_id, offset))).into())
}
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
@ -801,7 +801,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
if reachable.insert(id) {
// This is a new allocation, add its relocations to `todo`.
if let Some((_, alloc)) = self.alloc_map.get(id) {
todo.extend(alloc.relocations().values().map(|tag| tag.erase_for_fmt()));
todo.extend(alloc.relocations().values().map(|tag| tag.get_alloc_id()));
}
}
}
@ -841,7 +841,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'mir, 'tcx>> std::fmt::Debug for DumpAllocs<'a,
allocs_to_print: &mut VecDeque<AllocId>,
alloc: &Allocation<Tag, Extra>,
) -> std::fmt::Result {
for alloc_id in alloc.relocations().values().map(|tag| tag.erase_for_fmt()) {
for alloc_id in alloc.relocations().values().map(|tag| tag.get_alloc_id()) {
allocs_to_print.push_back(alloc_id);
}
write!(fmt, "{}", pretty::display_allocation(tcx, alloc))
@ -1129,7 +1129,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Machine pointer introspection.
impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
pub fn scalar_to_ptr(&self, scalar: Scalar<M::PointerTag>) -> Pointer<Option<M::PointerTag>> {
match scalar.to_bits_or_ptr(self.pointer_size()) {
// We use `to_bits_or_ptr_internal` since we are just implementing the method people need to
// call to force getting out a pointer.
match scalar.to_bits_or_ptr_internal(self.pointer_size()) {
Err(ptr) => ptr.into(),
Ok(bits) => {
let addr = u64::try_from(bits).unwrap();

View file

@ -118,8 +118,14 @@ impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
ty: Ty<'tcx>,
) -> Result<FmtPrinter<'a, 'tcx, F>, std::fmt::Error> {
match s {
ScalarMaybeUninit::Scalar(s) => {
cx.pretty_print_const_scalar(s.erase_for_fmt(), ty, true)
ScalarMaybeUninit::Scalar(Scalar::Int(int)) => {
cx.pretty_print_const_scalar_int(int, ty, true)
}
ScalarMaybeUninit::Scalar(Scalar::Ptr(ptr, _sz)) => {
// Just print the ptr value. `pretty_print_const_scalar_ptr` would also try to
// print what is points to, which would fail since it has no access to the local
// memory.
cx.pretty_print_const_pointer(ptr, ty, true)
}
ScalarMaybeUninit::Uninit => cx.typed_value(
|mut this| {
@ -139,11 +145,11 @@ impl<Tag: Provenance> std::fmt::Display for ImmTy<'tcx, Tag> {
p(cx, s, ty)?;
return Ok(());
}
write!(f, "{}: {}", s.erase_for_fmt(), self.layout.ty)
write!(f, "{}: {}", s, self.layout.ty)
}
Immediate::ScalarPair(a, b) => {
// FIXME(oli-obk): at least print tuples and slices nicely
write!(f, "({}, {}): {}", a.erase_for_fmt(), b.erase_for_fmt(), self.layout.ty,)
write!(f, "({}, {}): {}", a, b, self.layout.ty,)
}
}
})
@ -693,8 +699,9 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
Ok(match *tag_encoding {
TagEncoding::Direct => {
let tag_bits = tag_val
.to_bits(tag_layout.size)
.map_err(|_| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
.try_to_int()
.map_err(|dbg_val| err_ub!(InvalidTag(dbg_val)))?
.assert_bits(tag_layout.size);
// Cast bits from tag layout to discriminant layout.
let discr_val = self.cast_from_scalar(tag_bits, tag_layout, discr_layout.ty);
let discr_bits = discr_val.assert_bits(discr_layout.size);
@ -711,7 +718,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
}
_ => span_bug!(self.cur_span(), "tagged layout for non-adt non-generator"),
}
.ok_or_else(|| err_ub!(InvalidTag(tag_val.erase_for_fmt())))?;
.ok_or_else(|| err_ub!(InvalidTag(Scalar::from_uint(tag_bits, tag_layout.size))))?;
// Return the cast value, and the index.
(discr_val, index.0)
}
@ -720,18 +727,23 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// discriminant (encoded in niche/tag) and variant index are the same.
let variants_start = niche_variants.start().as_u32();
let variants_end = niche_variants.end().as_u32();
let variant = match tag_val.to_bits_or_ptr(tag_layout.size) {
Err(ptr) => {
// The niche must be just 0 (which an inbounds pointer value never is)
let variant = match tag_val.try_to_int() {
Err(dbg_val) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
let ptr = self.scalar_to_ptr(tag_val);
// The niche must be just 0, and the ptr not null, then we know this is
// okay. Everything else, we conservatively reject.
let ptr_valid = niche_start == 0
&& variants_start == variants_end
&& !self.memory.ptr_may_be_null(ptr.into());
&& !self.memory.ptr_may_be_null(ptr);
if !ptr_valid {
throw_ub!(InvalidTag(tag_val.erase_for_fmt()))
throw_ub!(InvalidTag(dbg_val))
}
dataful_variant
}
Ok(tag_bits) => {
let tag_bits = tag_bits.assert_bits(tag_layout.size);
// We need to use machine arithmetic to get the relative variant idx:
// variant_index_relative = tag_val - niche_start_val
let tag_val = ImmTy::from_uint(tag_bits, tag_layout);

View file

@ -62,17 +62,6 @@ impl<Tag> MemPlaceMeta<Tag> {
Self::None | Self::Poison => false,
}
}
pub fn erase_for_fmt(self) -> MemPlaceMeta
where
Tag: Provenance,
{
match self {
Self::Meta(s) => MemPlaceMeta::Meta(s.erase_for_fmt()),
Self::None => MemPlaceMeta::None,
Self::Poison => MemPlaceMeta::Poison,
}
}
}
#[derive(Copy, Clone, Hash, PartialEq, Eq, HashStable)]
@ -182,18 +171,6 @@ impl<'tcx, Tag> From<MPlaceTy<'tcx, Tag>> for PlaceTy<'tcx, Tag> {
}
impl<Tag> MemPlace<Tag> {
#[inline]
pub fn erase_for_fmt(self) -> MemPlace
where
Tag: Provenance,
{
MemPlace {
ptr: self.ptr.map_erase_for_fmt(),
align: self.align,
meta: self.meta.erase_for_fmt(),
}
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Option<Tag>>, align: Align) -> Self {
MemPlace { ptr, align, meta: MemPlaceMeta::None }

View file

@ -535,7 +535,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
// types below!
if self.ctfe_mode.is_some() {
// Integers/floats in CTFE: Must be scalar bits, pointers are dangerous
let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_some());
let is_bits = value.check_init().map_or(false, |v| v.try_to_int().is_ok());
if !is_bits {
throw_validation_failure!(self.path,
{ "{}", value } expected { "initialized plain (non-pointer) bytes" }
@ -652,11 +652,14 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
err_ub!(InvalidUninitBytes(None)) => { "{}", value }
expected { "something {}", wrapping_range_format(valid_range, max_hi) },
);
let bits = match value.to_bits_or_ptr(op.layout.size) {
Err(ptr) => {
let bits = match value.try_to_int() {
Err(_) => {
// So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE.
let ptr = self.ecx.scalar_to_ptr(value);
if lo == 1 && hi == max_hi {
// Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.memory.ptr_may_be_null(ptr.into()) {
if self.ecx.memory.ptr_may_be_null(ptr) {
throw_validation_failure!(self.path,
{ "a potentially null pointer" }
expected {
@ -678,7 +681,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
)
}
}
Ok(data) => data,
Ok(int) => int.assert_bits(op.layout.size),
};
// Now compare. This is slightly subtle because this is a special "wrap-around" range.
if wrapping_range_contains(&valid_range, bits) {

View file

@ -921,12 +921,12 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> {
match **op {
interpret::Operand::Immediate(Immediate::Scalar(ScalarMaybeUninit::Scalar(s))) => {
s.try_to_int().is_some()
s.try_to_int().is_ok()
}
interpret::Operand::Immediate(Immediate::ScalarPair(
ScalarMaybeUninit::Scalar(l),
ScalarMaybeUninit::Scalar(r),
)) => l.try_to_int().is_some() && r.try_to_int().is_some(),
)) => l.try_to_int().is_ok() && r.try_to_int().is_ok(),
_ => false,
}
}