1
Fork 0

go back to infix ops for Size

This commit is contained in:
Ralf Jung 2020-03-24 16:43:50 +01:00
parent 1d67ca00a1
commit b7db7320ad
6 changed files with 30 additions and 33 deletions

View file

@ -3,7 +3,7 @@
use std::borrow::Cow;
use std::convert::TryFrom;
use std::iter;
use std::ops::{Add, Deref, DerefMut, Mul, Range, Sub};
use std::ops::{Deref, DerefMut, Range};
use rustc_ast::ast::Mutability;
use rustc_data_structures::sorted_map::SortedMap;
@ -183,7 +183,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
/// Returns the range of this allocation that was meant.
#[inline]
fn check_bounds(&self, offset: Size, size: Size) -> Range<usize> {
let end = Size::add(offset, size); // This does overflow checking.
let end = offset + size; // This does overflow checking.
let end = usize::try_from(end.bytes()).expect("access too big for this host architecture");
assert!(
end <= self.len(),
@ -293,7 +293,7 @@ impl<'tcx, Tag: Copy, Extra: AllocationExtra<Tag>> Allocation<Tag, Extra> {
let offset = usize::try_from(ptr.offset.bytes()).unwrap();
Ok(match self.bytes[offset..].iter().position(|&c| c == 0) {
Some(size) => {
let size_with_null = Size::add(Size::from_bytes(size), Size::from_bytes(1));
let size_with_null = Size::from_bytes(size) + Size::from_bytes(1);
// Go through `get_bytes` for checks and AllocationExtra hooks.
// We read the null, so we include it in the request, but we want it removed
// from the result, so we do subslicing.
@ -474,7 +474,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
// We have to go back `pointer_size - 1` bytes, as that one would still overlap with
// the beginning of this range.
let start = ptr.offset.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
let end = Size::add(ptr.offset, size); // This does overflow checking.
let end = ptr.offset + size; // This does overflow checking.
self.relocations.range(Size::from_bytes(start)..end)
}
@ -519,7 +519,7 @@ impl<'tcx, Tag: Copy, Extra> Allocation<Tag, Extra> {
)
};
let start = ptr.offset;
let end = Size::add(start, size);
let end = start + size; // `Size` addition
// Mark parts of the outermost relocations as undefined if they partially fall outside the
// given range.
@ -558,7 +558,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
#[inline]
fn check_defined(&self, ptr: Pointer<Tag>, size: Size) -> InterpResult<'tcx> {
self.undef_mask
.is_range_defined(ptr.offset, Size::add(ptr.offset, size))
.is_range_defined(ptr.offset, ptr.offset + size) // `Size` addition
.or_else(|idx| throw_ub!(InvalidUndefBytes(Some(Pointer::new(ptr.alloc_id, idx)))))
}
@ -566,7 +566,7 @@ impl<'tcx, Tag, Extra> Allocation<Tag, Extra> {
if size.bytes() == 0 {
return;
}
self.undef_mask.set_range(ptr.offset, Size::add(ptr.offset, size), new_state);
self.undef_mask.set_range(ptr.offset, ptr.offset + size, new_state);
}
}
@ -611,7 +611,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
for i in 1..size.bytes() {
// FIXME: optimize to bitshift the current undef block's bits and read the top bit.
if self.undef_mask.get(Size::add(src.offset, Size::from_bytes(i))) == cur {
if self.undef_mask.get(src.offset + Size::from_bytes(i)) == cur {
cur_len += 1;
} else {
ranges.push(cur_len);
@ -638,7 +638,7 @@ impl<Tag, Extra> Allocation<Tag, Extra> {
if defined.ranges.len() <= 1 {
self.undef_mask.set_range_inbounds(
dest.offset,
Size::add(dest.offset, Size::mul(size, repeat)),
dest.offset + size * repeat, // `Size` operations
defined.initial,
);
return;
@ -716,10 +716,10 @@ impl<Tag: Copy, Extra> Allocation<Tag, Extra> {
for i in 0..length {
new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
// compute offset for current repetition
let dest_offset = Size::add(dest.offset, Size::mul(size, i));
let dest_offset = dest.offset + size * i; // `Size` operations
(
// shift offsets from source allocation to destination allocation
Size::sub(Size::add(offset, dest_offset), src.offset),
(offset + dest_offset) - src.offset, // `Size` operations
reloc,
)
}));
@ -867,7 +867,7 @@ impl UndefMask {
}
let start = self.len;
self.len += amount;
self.set_range_inbounds(start, Size::add(start, amount), new_state);
self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
}
}

View file

@ -1,7 +1,6 @@
use std::cell::Cell;
use std::fmt::Write;
use std::mem;
use std::ops::Add;
use rustc::ich::StableHashingContext;
use rustc::mir;
@ -454,7 +453,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// here. But this is where the add would go.)
// Return the sum of sizes and max of aligns.
let size = Size::add(sized_size, unsized_size);
let size = sized_size + unsized_size; // `Size` addition
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).

View file

@ -9,7 +9,6 @@
use std::borrow::Cow;
use std::collections::VecDeque;
use std::convert::TryFrom;
use std::ops::{Add, Mul};
use std::ptr;
use rustc::ty::layout::{Align, HasDataLayout, Size, TargetDataLayout};
@ -880,7 +879,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
let src_bytes =
self.get_raw(src.alloc_id)?.get_bytes_with_undef_and_ptr(&tcx, src, size)?.as_ptr();
let dest_bytes =
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, Size::mul(size, length))?;
self.get_raw_mut(dest.alloc_id)?.get_bytes_mut(&tcx, dest, size * length)?; // `Size` multiplication
// If `dest_bytes` is empty we just optimize to not run anything for zsts.
// See #67539
@ -901,7 +900,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
// touched if the bytes stay undef for the whole interpreter execution. On contemporary
// operating system this can avoid physically allocating the page.
let dest_alloc = self.get_raw_mut(dest.alloc_id)?;
dest_alloc.mark_definedness(dest, Size::mul(size, length), false);
dest_alloc.mark_definedness(dest, size * length, false); // `Size` multiplication
dest_alloc.mark_relocation_range(relocations);
return Ok(());
}
@ -914,8 +913,9 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
unsafe {
if src.alloc_id == dest.alloc_id {
if nonoverlapping {
if (src.offset <= dest.offset && Size::add(src.offset, size) > dest.offset)
|| (dest.offset <= src.offset && Size::add(dest.offset, size) > src.offset)
// `Size` additions
if (src.offset <= dest.offset && src.offset + size > dest.offset)
|| (dest.offset <= src.offset && dest.offset + size > src.offset)
{
throw_ub_format!("copy_nonoverlapping called on overlapping ranges")
}
@ -924,7 +924,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
for i in 0..length {
ptr::copy(
src_bytes,
dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication
usize::try_from(size.bytes()).unwrap(),
);
}
@ -932,7 +932,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> Memory<'mir, 'tcx, M> {
for i in 0..length {
ptr::copy_nonoverlapping(
src_bytes,
dest_bytes.offset(isize::try_from(Size::mul(size, i).bytes()).unwrap()),
dest_bytes.offset(isize::try_from((size * i).bytes()).unwrap()), // `Size` multiplication
usize::try_from(size.bytes()).unwrap(),
);
}

View file

@ -4,7 +4,6 @@
use std::convert::TryFrom;
use std::hash::Hash;
use std::ops::Mul;
use rustc::mir;
use rustc::mir::interpret::truncate;
@ -444,7 +443,7 @@ where
// This can only be reached in ConstProp and non-rustc-MIR.
throw_ub!(BoundsCheckFailed { len, index });
}
let offset = Size::mul(stride, index);
let offset = stride * index; // `Size` multiplication
// All fields have the same layout.
let field_layout = base.layout.field(self, 0)?;
@ -469,7 +468,8 @@ where
};
let layout = base.layout.field(self, 0)?;
let dl = &self.tcx.data_layout;
Ok((0..len).map(move |i| base.offset(Size::mul(stride, i), MemPlaceMeta::None, layout, dl)))
// `Size` multiplication
Ok((0..len).map(move |i| base.offset(stride * i, MemPlaceMeta::None, layout, dl)))
}
fn mplace_subslice(
@ -493,7 +493,7 @@ where
// Not using layout method because that works with usize, and does not work with slices
// (that have count 0 in their layout).
let from_offset = match base.layout.fields {
layout::FieldPlacement::Array { stride, .. } => Size::mul(stride, from), // `Size` multiplication is checked
layout::FieldPlacement::Array { stride, .. } => stride * from, // `Size` multiplication is checked
_ => bug!("Unexpected layout of index access: {:#?}", base.layout),
};

View file

@ -1,5 +1,4 @@
use std::convert::TryFrom;
use std::ops::Mul;
use rustc::mir::interpret::{InterpResult, Pointer, PointerArithmetic, Scalar};
use rustc::ty::layout::{Align, HasDataLayout, LayoutOf, Size};
@ -57,7 +56,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
// `get_vtable` in `rust_codegen_llvm/meth.rs`.
// /////////////////////////////////////////////////////////////////////////////////////////
let vtable = self.memory.allocate(
Size::mul(ptr_size, u64::try_from(methods.len()).unwrap().checked_add(3).unwrap()),
ptr_size * u64::try_from(methods.len()).unwrap().checked_add(3).unwrap(),
ptr_align,
MemoryKind::Vtable,
);
@ -110,8 +109,7 @@ impl<'mir, 'tcx, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
) -> InterpResult<'tcx, FnVal<'tcx, M::ExtraFnVal>> {
let ptr_size = self.pointer_size();
// Skip over the 'drop_ptr', 'size', and 'align' fields.
let vtable_slot =
vtable.ptr_offset(Size::mul(ptr_size, idx.checked_add(3).unwrap()), self)?;
let vtable_slot = vtable.ptr_offset(ptr_size * idx.checked_add(3).unwrap(), self)?;
let vtable_slot = self
.memory
.check_ptr_access(vtable_slot, ptr_size, self.tcx.data_layout.pointer_align.abi)?

View file

@ -6,10 +6,10 @@
use std::convert::TryFrom;
use std::fmt::Write;
use std::ops::{Mul, RangeInclusive};
use std::ops::RangeInclusive;
use rustc::ty;
use rustc::ty::layout::{self, LayoutOf, Size, TyLayout, VariantIdx};
use rustc::ty::layout::{self, LayoutOf, TyLayout, VariantIdx};
use rustc_data_structures::fx::FxHashSet;
use rustc_hir as hir;
use rustc_span::symbol::{sym, Symbol};
@ -747,8 +747,8 @@ impl<'rt, 'mir, 'tcx, M: Machine<'mir, 'tcx>> ValueVisitor<'mir, 'tcx, M>
}
// This is the element type size.
let layout = self.ecx.layout_of(tys)?;
// This is the size in bytes of the whole array.
let size = Size::mul(layout.size, len);
// This is the size in bytes of the whole array. (This checks for overflow.)
let size = layout.size * len;
// Size is not 0, get a pointer.
let ptr = self.ecx.force_ptr(mplace.ptr)?;