1
Fork 0

Add methods for checking for full ranges to Scalar and WrappingRange

Move *_max methods back to util

change to inline instead of inline(always)

Remove valid_range_exclusive from scalar
Use WrappingRange instead

implement always_valid_for in a safer way

Fix accidental edit
This commit is contained in:
Andreas Liljeqvist 2021-08-25 15:21:45 +02:00
parent c5cbf7852a
commit 05cd48b008
7 changed files with 41 additions and 56 deletions

View file

@ -541,11 +541,8 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
// become 0..0 when the type becomes i1, which would be rejected // become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier. // by the LLVM verifier.
if let Int(..) = scalar.value { if let Int(..) = scalar.value {
if !scalar.is_bool() { if !scalar.is_bool() && !scalar.is_always_valid_for(bx) {
let range = scalar.valid_range_exclusive(bx); bx.range_metadata(callsite, &scalar.valid_range);
if range.start != range.end {
bx.range_metadata(callsite, range);
}
} }
} }
} }

View file

@ -18,12 +18,12 @@ use rustc_hir::def_id::DefId;
use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout}; use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout};
use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_middle::ty::{self, Ty, TyCtxt};
use rustc_span::Span; use rustc_span::Span;
use rustc_target::abi::{self, Align, Size}; use rustc_target::abi::{self, Align, Size, WrappingRange};
use rustc_target::spec::{HasTargetSpec, Target}; use rustc_target::spec::{HasTargetSpec, Target};
use std::borrow::Cow; use std::borrow::Cow;
use std::ffi::CStr; use std::ffi::CStr;
use std::iter; use std::iter;
use std::ops::{Deref, Range}; use std::ops::Deref;
use std::ptr; use std::ptr;
use tracing::debug; use tracing::debug;
@ -464,9 +464,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
) { ) {
match scalar.value { match scalar.value {
abi::Int(..) => { abi::Int(..) => {
let range = scalar.valid_range_exclusive(bx); if !scalar.is_always_valid_for(bx) {
if range.start != range.end { bx.range_metadata(load, &scalar.valid_range);
bx.range_metadata(load, range);
} }
} }
abi::Pointer if !scalar.valid_range.contains_zero() => { abi::Pointer if !scalar.valid_range.contains_zero() => {
@ -555,7 +554,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
next_bx next_bx
} }
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) { fn range_metadata(&mut self, load: &'ll Value, range: &WrappingRange) {
if self.sess().target.arch == "amdgpu" { if self.sess().target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks an i64 value is // amdgpu/LLVM does something weird and thinks an i64 value is
// split into a v2i32, halving the bitwidth LLVM expects, // split into a v2i32, halving the bitwidth LLVM expects,
@ -568,7 +567,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let llty = self.cx.val_ty(load); let llty = self.cx.val_ty(load);
let v = [ let v = [
self.cx.const_uint_big(llty, range.start), self.cx.const_uint_big(llty, range.start),
self.cx.const_uint_big(llty, range.end), self.cx.const_uint_big(llty, range.end.wrapping_add(1)),
]; ];
llvm::LLVMSetMetadata( llvm::LLVMSetMetadata(

View file

@ -20,7 +20,7 @@ use rustc_middle::ty::{self, Instance, Ty, TypeFoldable};
use rustc_span::source_map::Span; use rustc_span::source_map::Span;
use rustc_span::{sym, Symbol}; use rustc_span::{sym, Symbol};
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode}; use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
use rustc_target::abi::{self, HasDataLayout}; use rustc_target::abi::{self, HasDataLayout, WrappingRange};
use rustc_target::spec::abi::Abi; use rustc_target::spec::abi::Abi;
/// Used by `FunctionCx::codegen_terminator` for emitting common patterns /// Used by `FunctionCx::codegen_terminator` for emitting common patterns
@ -1104,7 +1104,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval = bx.load(bx.backend_type(arg.layout), llval, align); llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() { if scalar.is_bool() {
bx.range_metadata(llval, 0..2); bx.range_metadata(llval, &WrappingRange { start: 0, end: 1 });
} }
} }
// We store bools as `i8` so we need to truncate to `i1`. // We store bools as `i8` so we need to truncate to `i1`.

View file

@ -308,8 +308,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// then `i1 1` (i.e., E::B) is effectively `i8 -1`. // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
signed = !scalar.is_bool() && s; signed = !scalar.is_bool() && s;
let er = scalar.valid_range_exclusive(bx.cx()); if !scalar.is_always_valid_for(bx.cx())
if er.end != er.start
&& scalar.valid_range.end >= scalar.valid_range.start && scalar.valid_range.end >= scalar.valid_range.start
{ {
// We want `table[e as usize ± k]` to not // We want `table[e as usize ± k]` to not

View file

@ -16,11 +16,9 @@ use crate::MemFlags;
use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout}; use rustc_middle::ty::layout::{HasParamEnv, TyAndLayout};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
use rustc_span::Span; use rustc_span::Span;
use rustc_target::abi::{Abi, Align, Scalar, Size}; use rustc_target::abi::{Abi, Align, Scalar, Size, WrappingRange};
use rustc_target::spec::HasTargetSpec; use rustc_target::spec::HasTargetSpec;
use std::ops::Range;
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub enum OverflowOp { pub enum OverflowOp {
Add, Add,
@ -158,7 +156,7 @@ pub trait BuilderMethods<'a, 'tcx>:
dest: PlaceRef<'tcx, Self::Value>, dest: PlaceRef<'tcx, Self::Value>,
) -> Self; ) -> Self;
fn range_metadata(&mut self, load: Self::Value, range: Range<u128>); fn range_metadata(&mut self, load: Self::Value, range: &WrappingRange);
fn nonnull_metadata(&mut self, load: Self::Value); fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;

View file

@ -620,38 +620,36 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
op: &OpTy<'tcx, M::PointerTag>, op: &OpTy<'tcx, M::PointerTag>,
scalar_layout: &ScalarAbi, scalar_layout: &ScalarAbi,
) -> InterpResult<'tcx> { ) -> InterpResult<'tcx> {
let value = self.read_scalar(op)?; if scalar_layout.valid_range.is_full_for(op.layout.size) {
let valid_range = scalar_layout.valid_range.clone();
let WrappingRange { start: lo, end: hi } = valid_range;
// Determine the allowed range
// `max_hi` is as big as the size fits
let max_hi = u128::MAX >> (128 - op.layout.size.bits());
assert!(hi <= max_hi);
// We could also write `(hi + 1) % (max_hi + 1) == lo` but `max_hi + 1` overflows for `u128`
if (lo == 0 && hi == max_hi) || (hi + 1 == lo) {
// Nothing to check // Nothing to check
return Ok(()); return Ok(());
} }
// At least one value is excluded. Get the bits. // At least one value is excluded.
let valid_range = scalar_layout.valid_range.clone();
let WrappingRange { start, end } = valid_range;
let max_value = u128::MAX >> (128 - op.layout.size.bits());
assert!(end <= max_value);
// Determine the allowed range
let value = self.read_scalar(op)?;
let value = try_validation!( let value = try_validation!(
value.check_init(), value.check_init(),
self.path, self.path,
err_ub!(InvalidUninitBytes(None)) => { "{}", value } err_ub!(InvalidUninitBytes(None)) => { "{}", value }
expected { "something {}", wrapping_range_format(valid_range, max_hi) }, expected { "something {}", wrapping_range_format(valid_range, max_value) },
); );
let bits = match value.try_to_int() { let bits = match value.try_to_int() {
Err(_) => { Err(_) => {
// So this is a pointer then, and casting to an int failed. // So this is a pointer then, and casting to an int failed.
// Can only happen during CTFE. // Can only happen during CTFE.
let ptr = self.ecx.scalar_to_ptr(value); let ptr = self.ecx.scalar_to_ptr(value);
if lo == 1 && hi == max_hi { if start == 1 && end == max_value {
// Only null is the niche. So make sure the ptr is NOT null. // Only null is the niche. So make sure the ptr is NOT null.
if self.ecx.memory.ptr_may_be_null(ptr) { if self.ecx.memory.ptr_may_be_null(ptr) {
throw_validation_failure!(self.path, throw_validation_failure!(self.path,
{ "a potentially null pointer" } { "a potentially null pointer" }
expected { expected {
"something that cannot possibly fail to be {}", "something that cannot possibly fail to be {}",
wrapping_range_format(valid_range, max_hi) wrapping_range_format(valid_range, max_value)
} }
) )
} }
@ -663,7 +661,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
{ "a pointer" } { "a pointer" }
expected { expected {
"something that cannot possibly fail to be {}", "something that cannot possibly fail to be {}",
wrapping_range_format(valid_range, max_hi) wrapping_range_format(valid_range, max_value)
} }
) )
} }
@ -676,7 +674,7 @@ impl<'rt, 'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> ValidityVisitor<'rt, 'mir, '
} else { } else {
throw_validation_failure!(self.path, throw_validation_failure!(self.path,
{ "{}", bits } { "{}", bits }
expected { "something {}", wrapping_range_format(valid_range, max_hi) } expected { "something {}", wrapping_range_format(valid_range, max_value) }
) )
} }
} }

View file

@ -7,7 +7,7 @@ use std::convert::{TryFrom, TryInto};
use std::fmt; use std::fmt;
use std::iter::Step; use std::iter::Step;
use std::num::NonZeroUsize; use std::num::NonZeroUsize;
use std::ops::{Add, AddAssign, Deref, Mul, Range, RangeInclusive, Sub}; use std::ops::{Add, AddAssign, Deref, Mul, RangeInclusive, Sub};
use std::str::FromStr; use std::str::FromStr;
use rustc_index::vec::{Idx, IndexVec}; use rustc_index::vec::{Idx, IndexVec};
@ -779,6 +779,14 @@ impl WrappingRange {
self.end = end; self.end = end;
self self
} }
/// Returns `true` if `size` completely fills the range.
#[inline]
pub fn is_full_for(&self, size: Size) -> bool {
let max_value = u128::MAX >> (128 - size.bits());
debug_assert!(self.start <= max_value && self.end <= max_value);
(self.start == 0 && self.end == max_value) || (self.end + 1 == self.start)
}
} }
impl fmt::Debug for WrappingRange { impl fmt::Debug for WrappingRange {
@ -807,21 +815,10 @@ impl Scalar {
&& matches!(self.valid_range, WrappingRange { start: 0, end: 1 }) && matches!(self.valid_range, WrappingRange { start: 0, end: 1 })
} }
/// Returns the valid range as a `x..y` range. /// Returns `true` if all possible numbers are valid, i.e `valid_range` covers the whole layout
/// #[inline]
/// If `x` and `y` are equal, the range is full, not empty. pub fn is_always_valid_for<C: HasDataLayout>(&self, cx: &C) -> bool {
pub fn valid_range_exclusive<C: HasDataLayout>(&self, cx: &C) -> Range<u128> { self.valid_range.is_full_for(self.value.size(cx))
// For a (max) value of -1, max will be `-1 as usize`, which overflows.
// However, that is fine here (it would still represent the full range),
// i.e., if the range is everything.
let bits = self.value.size(cx).bits();
assert!(bits <= 128);
let mask = !0u128 >> (128 - bits);
let start = self.valid_range.start;
let end = self.valid_range.end;
assert_eq!(start, start & mask);
assert_eq!(end, end & mask);
start..(end.wrapping_add(1) & mask)
} }
} }
@ -1269,11 +1266,8 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
// The range must contain 0. // The range must contain 0.
s.valid_range.contains_zero() s.valid_range.contains_zero()
} else { } else {
// The range must include all values. `valid_range_exclusive` handles // The range must include all values.
// the wrap-around using target arithmetic; with wrap-around then the full s.is_always_valid_for(cx)
// range is one where `start == end`.
let range = s.valid_range_exclusive(cx);
range.start == range.end
} }
}; };