1
Fork 0

interpret: refactor projection handling code

Moves our projection handling code into a common file, and avoids the use of a
general mplace-based fallback function by have more specialized implementations.

mplace_index (and the other slice-related functions) could be more efficient by
copy-pasting the body of operand_index. Or we could do some trait magic to share
the code between them. But for now this is probably fine.
This commit is contained in:
Ralf Jung 2022-07-04 08:48:05 -04:00
parent f893495e3d
commit ab225ade1e
7 changed files with 531 additions and 421 deletions

View file

@ -847,6 +847,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
);
}
Abi::Scalar(scalar_layout) => {
// We use a 'forced' read because we always need a `Immediate` here
// and treating "partially uninit" as "fully uninit" is fine for us.
let scalar = self.read_immediate_forced(op)?.to_scalar_or_uninit();
self.visit_scalar(scalar, scalar_layout)?;
}
@ -856,6 +858,8 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// is subtle due to enums having ScalarPair layout, where one field
// is the discriminant.
if cfg!(debug_assertions) {
// We use a 'forced' read because we always need a `Immediate` here
// and treating "partially uninit" as "fully uninit" is fine for us.
let (a, b) = self.read_immediate_forced(op)?.to_scalar_or_uninit_pair();
self.visit_scalar(a, a_layout)?;
self.visit_scalar(b, b_layout)?;
@ -880,7 +884,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
) -> InterpResult<'tcx> {
match op.layout.ty.kind() {
ty::Str => {
let mplace = op.assert_mem_place(); // strings are never immediate
let mplace = op.assert_mem_place(); // strings are unsized and hence never immediate
let len = mplace.len(self.ecx)?;
try_validation!(
self.ecx.read_bytes_ptr(mplace.ptr, Size::from_bytes(len)),
@ -900,14 +904,27 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
{
// Optimized handling for arrays of integer/float type.
// Arrays cannot be immediate, slices are never immediate.
let mplace = op.assert_mem_place();
// This is the length of the array/slice.
let len = mplace.len(self.ecx)?;
let len = op.len(self.ecx)?;
// This is the element type size.
let layout = self.ecx.layout_of(*tys)?;
// This is the size in bytes of the whole array. (This checks for overflow.)
let size = layout.size * len;
// If the size is 0, there is nothing to check.
// (`size` can only be 0 of `len` is 0, and empty arrays are always valid.)
if size == Size::ZERO {
return Ok(());
}
// Now that we definitely have a non-ZST array, we know it lives in memory.
let mplace = match op.try_as_mplace() {
Ok(mplace) => mplace,
Err(imm) => match *imm {
Immediate::Uninit =>
throw_validation_failure!(self.path, { "uninitialized bytes" }),
Immediate::Scalar(..) | Immediate::ScalarPair(..) =>
bug!("arrays/slices can never have Scalar/ScalarPair layout"),
}
};
// Optimization: we just check the entire range at once.
// NOTE: Keep this in sync with the handling of integer and float
@ -919,10 +936,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
// to reject those pointers, we just do not have the machinery to
// talk about parts of a pointer.
// We also accept uninit, for consistency with the slow path.
let Some(alloc) = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)? else {
// Size 0, nothing more to check.
return Ok(());
};
let alloc = self.ecx.get_ptr_alloc(mplace.ptr, size, mplace.align)?.expect("we already excluded size 0");
match alloc.check_bytes(
alloc_range(Size::ZERO, size),