1
Fork 0

Rollup merge of #66043 - RalfJung:memory-get-raw, r=cramertj

rename Memory::get methods to get_raw to indicate their unchecked nature

Some recent Miri PRs started using these methods when they should not; this should discourage their use.

In fact we could make these methods private to the `interp` module as far as Miri is concerned -- with the exception of the `uninit` intrinsic which will hopefully go away soon. @bjorn3 @oli-obk does priroda need these methods? It would be great to be able to seal them away.
This commit is contained in:
Mazdak Farrokhzad 2019-11-08 16:50:38 +01:00 committed by GitHub
commit 3b0438aa7b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 45 additions and 42 deletions

View file

@ -37,7 +37,8 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let col_out = self.force_ptr(self.mplace_field(location, 2)?.ptr)?; let col_out = self.force_ptr(self.mplace_field(location, 2)?.ptr)?;
let layout = &self.tcx.data_layout; let layout = &self.tcx.data_layout;
let alloc = self.memory.get_mut(file_ptr_out.alloc_id)?; // We just allocated this, so we can skip the bounds checks.
let alloc = self.memory.get_raw_mut(file_ptr_out.alloc_id)?;
alloc.write_scalar(layout, file_ptr_out, file.into(), ptr_size)?; alloc.write_scalar(layout, file_ptr_out, file.into(), ptr_size)?;
alloc.write_scalar(layout, file_len_out, file_len.into(), ptr_size)?; alloc.write_scalar(layout, file_len_out, file_len.into(), ptr_size)?;

View file

@ -210,7 +210,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
let new_ptr = self.allocate(new_size, new_align, kind); let new_ptr = self.allocate(new_size, new_align, kind);
let old_size = match old_size_and_align { let old_size = match old_size_and_align {
Some((size, _align)) => size, Some((size, _align)) => size,
None => self.get(ptr.alloc_id)?.size, None => self.get_raw(ptr.alloc_id)?.size,
}; };
self.copy( self.copy(
ptr, ptr,
@ -480,7 +480,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
).0) ).0)
} }
pub fn get( /// Gives raw access to the `Allocation`, without bounds or alignment checks.
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
pub fn get_raw(
&self, &self,
id: AllocId, id: AllocId,
) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> { ) -> InterpResult<'tcx, &Allocation<M::PointerTag, M::AllocExtra>> {
@ -513,7 +515,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
} }
} }
pub fn get_mut( /// Gives raw mutable access to the `Allocation`, without bounds or alignment checks.
/// Use the higher-level, `PlaceTy`- and `OpTy`-based APIs in `InterpCtx` instead!
pub fn get_raw_mut(
&mut self, &mut self,
id: AllocId, id: AllocId,
) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> { ) -> InterpResult<'tcx, &mut Allocation<M::PointerTag, M::AllocExtra>> {
@ -555,7 +559,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
liveness: AllocCheck, liveness: AllocCheck,
) -> InterpResult<'static, (Size, Align)> { ) -> InterpResult<'static, (Size, Align)> {
// # Regular allocations // # Regular allocations
// Don't use `self.get` here as that will // Don't use `self.get_raw` here as that will
// a) cause cycles in case `id` refers to a static // a) cause cycles in case `id` refers to a static
// b) duplicate a static's allocation in miri // b) duplicate a static's allocation in miri
if let Some((_, alloc)) = self.alloc_map.get(id) { if let Some((_, alloc)) = self.alloc_map.get(id) {
@ -627,7 +631,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
} }
pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> { pub fn mark_immutable(&mut self, id: AllocId) -> InterpResult<'tcx> {
self.get_mut(id)?.mutability = Mutability::Immutable; self.get_raw_mut(id)?.mutability = Mutability::Immutable;
Ok(()) Ok(())
} }
@ -776,7 +780,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
Some(ptr) => ptr, Some(ptr) => ptr,
None => return Ok(&[]), // zero-sized access None => return Ok(&[]), // zero-sized access
}; };
self.get(ptr.alloc_id)?.get_bytes(self, ptr, size) self.get_raw(ptr.alloc_id)?.get_bytes(self, ptr, size)
} }
/// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice. /// Reads a 0-terminated sequence of bytes from memory. Returns them as a slice.
@ -784,7 +788,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
/// Performs appropriate bounds checks. /// Performs appropriate bounds checks.
pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> { pub fn read_c_str(&self, ptr: Scalar<M::PointerTag>) -> InterpResult<'tcx, &[u8]> {
let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr. let ptr = self.force_ptr(ptr)?; // We need to read at least 1 byte, so we *need* a ptr.
self.get(ptr.alloc_id)?.read_c_str(self, ptr) self.get_raw(ptr.alloc_id)?.read_c_str(self, ptr)
} }
/// Writes the given stream of bytes into memory. /// Writes the given stream of bytes into memory.
@ -804,7 +808,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
None => return Ok(()), // zero-sized access None => return Ok(()), // zero-sized access
}; };
let tcx = self.tcx.tcx; let tcx = self.tcx.tcx;
self.get_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src) self.get_raw_mut(ptr.alloc_id)?.write_bytes(&tcx, ptr, src)
} }
/// Expects the caller to have checked bounds and alignment. /// Expects the caller to have checked bounds and alignment.
@ -832,16 +836,16 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// since we don't want to keep any relocations at the target. // since we don't want to keep any relocations at the target.
// (`get_bytes_with_undef_and_ptr` below checks that there are no // (`get_bytes_with_undef_and_ptr` below checks that there are no
// relocations overlapping the edges; those would not be handled correctly). // relocations overlapping the edges; those would not be handled correctly).
let relocations = self.get(src.alloc_id)? let relocations = self.get_raw(src.alloc_id)?
.prepare_relocation_copy(self, src, size, dest, length); .prepare_relocation_copy(self, src, size, dest, length);
let tcx = self.tcx.tcx; let tcx = self.tcx.tcx;
// This checks relocation edges on the src. // This checks relocation edges on the src.
let src_bytes = self.get(src.alloc_id)? let src_bytes = self.get_raw(src.alloc_id)?
.get_bytes_with_undef_and_ptr(&tcx, src, size)? .get_bytes_with_undef_and_ptr(&tcx, src, size)?
.as_ptr(); .as_ptr();
let dest_bytes = self.get_mut(dest.alloc_id)? let dest_bytes = self.get_raw_mut(dest.alloc_id)?
.get_bytes_mut(&tcx, dest, size * length)? .get_bytes_mut(&tcx, dest, size * length)?
.as_mut_ptr(); .as_mut_ptr();
@ -880,7 +884,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// copy definedness to the destination // copy definedness to the destination
self.copy_undef_mask(src, dest, size, length)?; self.copy_undef_mask(src, dest, size, length)?;
// copy the relocations to the destination // copy the relocations to the destination
self.get_mut(dest.alloc_id)?.mark_relocation_range(relocations); self.get_raw_mut(dest.alloc_id)?.mark_relocation_range(relocations);
Ok(()) Ok(())
} }
@ -899,11 +903,11 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// The bits have to be saved locally before writing to dest in case src and dest overlap. // The bits have to be saved locally before writing to dest in case src and dest overlap.
assert_eq!(size.bytes() as usize as u64, size.bytes()); assert_eq!(size.bytes() as usize as u64, size.bytes());
let src_alloc = self.get(src.alloc_id)?; let src_alloc = self.get_raw(src.alloc_id)?;
let compressed = src_alloc.compress_undef_range(src, size); let compressed = src_alloc.compress_undef_range(src, size);
// now fill in all the data // now fill in all the data
let dest_allocation = self.get_mut(dest.alloc_id)?; let dest_allocation = self.get_raw_mut(dest.alloc_id)?;
dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat); dest_allocation.mark_compressed_undef_range(&compressed, dest, size, repeat);
Ok(()) Ok(())

View file

@ -248,7 +248,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
match mplace.layout.abi { match mplace.layout.abi {
layout::Abi::Scalar(..) => { layout::Abi::Scalar(..) => {
let scalar = self.memory let scalar = self.memory
.get(ptr.alloc_id)? .get_raw(ptr.alloc_id)?
.read_scalar(self, ptr, mplace.layout.size)?; .read_scalar(self, ptr, mplace.layout.size)?;
Ok(Some(ImmTy { Ok(Some(ImmTy {
imm: scalar.into(), imm: scalar.into(),
@ -266,10 +266,10 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields assert!(b_offset.bytes() > 0); // we later use the offset to tell apart the fields
let b_ptr = ptr.offset(b_offset, self)?; let b_ptr = ptr.offset(b_offset, self)?;
let a_val = self.memory let a_val = self.memory
.get(ptr.alloc_id)? .get_raw(ptr.alloc_id)?
.read_scalar(self, a_ptr, a_size)?; .read_scalar(self, a_ptr, a_size)?;
let b_val = self.memory let b_val = self.memory
.get(ptr.alloc_id)? .get_raw(ptr.alloc_id)?
.read_scalar(self, b_ptr, b_size)?; .read_scalar(self, b_ptr, b_size)?;
Ok(Some(ImmTy { Ok(Some(ImmTy {
imm: Immediate::ScalarPair(a_val, b_val), imm: Immediate::ScalarPair(a_val, b_val),

View file

@ -808,7 +808,7 @@ where
_ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}", _ => bug!("write_immediate_to_mplace: invalid Scalar layout: {:#?}",
dest.layout) dest.layout)
} }
self.memory.get_mut(ptr.alloc_id)?.write_scalar( self.memory.get_raw_mut(ptr.alloc_id)?.write_scalar(
tcx, ptr, scalar, dest.layout.size tcx, ptr, scalar, dest.layout.size
) )
} }
@ -830,10 +830,10 @@ where
// fields do not match the `ScalarPair` components. // fields do not match the `ScalarPair` components.
self.memory self.memory
.get_mut(ptr.alloc_id)? .get_raw_mut(ptr.alloc_id)?
.write_scalar(tcx, ptr, a_val, a_size)?; .write_scalar(tcx, ptr, a_val, a_size)?;
self.memory self.memory
.get_mut(b_ptr.alloc_id)? .get_raw_mut(b_ptr.alloc_id)?
.write_scalar(tcx, b_ptr, b_val, b_size) .write_scalar(tcx, b_ptr, b_val, b_size)
} }
} }

View file

@ -392,7 +392,7 @@ impl<'b, 'mir, 'tcx> SnapshotContext<'b>
for Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>> for Memory<'mir, 'tcx, CompileTimeInterpreter<'mir, 'tcx>>
{ {
fn resolve(&'b self, id: &AllocId) -> Option<&'b Allocation> { fn resolve(&'b self, id: &AllocId) -> Option<&'b Allocation> {
self.get(*id).ok() self.get_raw(*id).ok()
} }
} }

View file

@ -445,7 +445,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
ptr_size, ptr_size,
self.tcx.data_layout.pointer_align.abi, self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST"); )?.expect("cannot be a ZST");
let fn_ptr = self.memory.get(vtable_slot.alloc_id)? let fn_ptr = self.memory.get_raw(vtable_slot.alloc_id)?
.read_ptr_sized(self, vtable_slot)?.not_undef()?; .read_ptr_sized(self, vtable_slot)?.not_undef()?;
let drop_fn = self.memory.get_fn(fn_ptr)?; let drop_fn = self.memory.get_fn(fn_ptr)?;

View file

@ -63,35 +63,30 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
let drop = Instance::resolve_drop_in_place(*tcx, ty); let drop = Instance::resolve_drop_in_place(*tcx, ty);
let drop = self.memory.create_fn_alloc(FnVal::Instance(drop)); let drop = self.memory.create_fn_alloc(FnVal::Instance(drop));
// no need to do any alignment checks on the memory accesses below, because we know the // No need to do any alignment checks on the memory accesses below, because we know the
// allocation is correctly aligned as we created it above. Also we're only offsetting by // allocation is correctly aligned as we created it above. Also we're only offsetting by
// multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`. // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
self.memory let vtable_alloc = self.memory.get_raw_mut(vtable.alloc_id)?;
.get_mut(vtable.alloc_id)? vtable_alloc.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
.write_ptr_sized(tcx, vtable, Scalar::Ptr(drop).into())?;
let size_ptr = vtable.offset(ptr_size, self)?; let size_ptr = vtable.offset(ptr_size, tcx)?;
self.memory vtable_alloc.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?;
.get_mut(size_ptr.alloc_id)? let align_ptr = vtable.offset(ptr_size * 2, tcx)?;
.write_ptr_sized(tcx, size_ptr, Scalar::from_uint(size, ptr_size).into())?; vtable_alloc.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
let align_ptr = vtable.offset(ptr_size * 2, self)?;
self.memory
.get_mut(align_ptr.alloc_id)?
.write_ptr_sized(tcx, align_ptr, Scalar::from_uint(align, ptr_size).into())?;
for (i, method) in methods.iter().enumerate() { for (i, method) in methods.iter().enumerate() {
if let Some((def_id, substs)) = *method { if let Some((def_id, substs)) = *method {
// resolve for vtable: insert shims where needed // resolve for vtable: insert shims where needed
let instance = ty::Instance::resolve_for_vtable( let instance = ty::Instance::resolve_for_vtable(
*self.tcx, *tcx,
self.param_env, self.param_env,
def_id, def_id,
substs, substs,
).ok_or_else(|| err_inval!(TooGeneric))?; ).ok_or_else(|| err_inval!(TooGeneric))?;
let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance)); let fn_ptr = self.memory.create_fn_alloc(FnVal::Instance(instance));
let method_ptr = vtable.offset(ptr_size * (3 + i as u64), self)?; // We cannot use `vtable_allic` as we are creating fn ptrs in this loop.
self.memory let method_ptr = vtable.offset(ptr_size * (3 + i as u64), tcx)?;
.get_mut(method_ptr.alloc_id)? self.memory.get_raw_mut(vtable.alloc_id)?
.write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?; .write_ptr_sized(tcx, method_ptr, Scalar::Ptr(fn_ptr).into())?;
} }
} }
@ -114,7 +109,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
self.tcx.data_layout.pointer_align.abi, self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST"); )?.expect("cannot be a ZST");
let drop_fn = self.memory let drop_fn = self.memory
.get(vtable.alloc_id)? .get_raw(vtable.alloc_id)?
.read_ptr_sized(self, vtable)? .read_ptr_sized(self, vtable)?
.not_undef()?; .not_undef()?;
// We *need* an instance here, no other kind of function value, to be able // We *need* an instance here, no other kind of function value, to be able
@ -140,7 +135,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
3*pointer_size, 3*pointer_size,
self.tcx.data_layout.pointer_align.abi, self.tcx.data_layout.pointer_align.abi,
)?.expect("cannot be a ZST"); )?.expect("cannot be a ZST");
let alloc = self.memory.get(vtable.alloc_id)?; let alloc = self.memory.get_raw(vtable.alloc_id)?;
let size = alloc.read_ptr_sized( let size = alloc.read_ptr_sized(
self, self,
vtable.offset(pointer_size, self)? vtable.offset(pointer_size, self)?

View file

@ -586,6 +586,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
_ => false, _ => false,
} }
} => { } => {
// Optimized handling for arrays of integer/float type.
// bailing out for zsts is ok, since the array element type can only be int/float // bailing out for zsts is ok, since the array element type can only be int/float
if op.layout.is_zst() { if op.layout.is_zst() {
return Ok(()); return Ok(());
@ -605,6 +607,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// Size is not 0, get a pointer. // Size is not 0, get a pointer.
let ptr = self.ecx.force_ptr(mplace.ptr)?; let ptr = self.ecx.force_ptr(mplace.ptr)?;
// This is the optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float // NOTE: Keep this in sync with the handling of integer and float
// types above, in `visit_primitive`. // types above, in `visit_primitive`.
// In run-time mode, we accept pointers in here. This is actually more // In run-time mode, we accept pointers in here. This is actually more
@ -614,7 +617,7 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to // to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer. // talk about parts of a pointer.
// We also accept undef, for consistency with the slow path. // We also accept undef, for consistency with the slow path.
match self.ecx.memory.get(ptr.alloc_id)?.check_bytes( match self.ecx.memory.get_raw(ptr.alloc_id)?.check_bytes(
self.ecx, self.ecx,
ptr, ptr,
size, size,