From de3e581e29b1fd02fe4ef5cc415e5173f30e2ca7 Mon Sep 17 00:00:00 2001 From: Eduard-Mihai Burtescu Date: Sat, 23 Sep 2017 15:04:37 +0300 Subject: [PATCH] rustc: support u128 discriminant ranges. --- src/librustc/lib.rs | 1 + src/librustc/ty/layout.rs | 34 +++++++------- src/librustc_trans/builder.rs | 28 +++++------- src/librustc_trans/common.rs | 4 +- src/librustc_trans/meth.rs | 3 +- src/librustc_trans/mir/block.rs | 11 ++--- src/librustc_trans/mir/constant.rs | 8 ++-- src/librustc_trans/mir/lvalue.rs | 72 +++++++++++------------------- src/librustc_trans/mir/rvalue.rs | 10 ++--- 9 files changed, 70 insertions(+), 101 deletions(-) diff --git a/src/librustc/lib.rs b/src/librustc/lib.rs index 44039817e72..b59f7480476 100644 --- a/src/librustc/lib.rs +++ b/src/librustc/lib.rs @@ -46,6 +46,7 @@ #![feature(const_fn)] #![feature(core_intrinsics)] #![feature(drain_filter)] +#![feature(i128)] #![feature(i128_type)] #![feature(inclusive_range)] #![feature(inclusive_range_syntax)] diff --git a/src/librustc/ty/layout.rs b/src/librustc/ty/layout.rs index bfde8a58e49..a97574681a2 100644 --- a/src/librustc/ty/layout.rs +++ b/src/librustc/ty/layout.rs @@ -20,7 +20,7 @@ use syntax_pos::DUMMY_SP; use std::cmp; use std::fmt; -use std::i64; +use std::i128; use std::iter; use std::mem; use std::ops::{Add, Sub, Mul, AddAssign, Deref, RangeInclusive}; @@ -467,7 +467,7 @@ impl<'a, 'tcx> Integer { } /// Find the smallest Integer type which can represent the signed value. - pub fn fit_signed(x: i64) -> Integer { + pub fn fit_signed(x: i128) -> Integer { match x { -0x0000_0000_0000_0001...0x0000_0000_0000_0000 => I1, -0x0000_0000_0000_0080...0x0000_0000_0000_007f => I8, @@ -479,7 +479,7 @@ impl<'a, 'tcx> Integer { } /// Find the smallest Integer type which can represent the unsigned value. - pub fn fit_unsigned(x: u64) -> Integer { + pub fn fit_unsigned(x: u128) -> Integer { match x { 0...0x0000_0000_0000_0001 => I1, 0...0x0000_0000_0000_00ff => I8, @@ -495,7 +495,7 @@ impl<'a, 'tcx> Integer { let dl = cx.data_layout(); let wanted = align.abi(); - for &candidate in &[I8, I16, I32, I64] { + for &candidate in &[I8, I16, I32, I64, I128] { let ty = Int(candidate, false); if wanted == ty.align(dl).abi() && wanted == ty.size(dl).bytes() { return Some(candidate); @@ -522,19 +522,19 @@ impl<'a, 'tcx> Integer { /// Find the appropriate Integer type and signedness for the given /// signed discriminant range and #[repr] attribute. - /// N.B.: u64 values above i64::MAX will be treated as signed, but + /// N.B.: u128 values above i128::MAX will be treated as signed, but /// that shouldn't affect anything, other than maybe debuginfo. fn repr_discr(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>, repr: &ReprOptions, - min: i64, - max: i64) + min: i128, + max: i128) -> (Integer, bool) { // Theoretically, negative values could be larger in unsigned representation // than the unsigned representation of the signed minimum. However, if there - // are any negative values, the only valid unsigned representation is u64 - // which can fit all i64 values, so the result remains unaffected. - let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u64, max as u64)); + // are any negative values, the only valid unsigned representation is u128 + // which can fit all i128 values, so the result remains unaffected. + let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128)); let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max)); let mut min_from_extern = None; @@ -782,11 +782,11 @@ pub enum Variants { Tagged { discr: Primitive, /// Inclusive wrap-around range of discriminant values, that is, - /// if min > max, it represents min..=u64::MAX followed by 0..=max. + /// if min > max, it represents min..=u128::MAX followed by 0..=max. // FIXME(eddyb) always use the shortest range, e.g. by finding // the largest space between two consecutive discriminants and // taking everything else as the (shortest) discriminant range. - discr_range: RangeInclusive, + discr_range: RangeInclusive, variants: Vec, }, @@ -1375,14 +1375,12 @@ impl<'a, 'tcx> CachedLayout { } } - let (mut min, mut max) = (i64::max_value(), i64::min_value()); + let (mut min, mut max) = (i128::max_value(), i128::min_value()); for discr in def.discriminants(tcx) { - let x = discr.to_u128_unchecked() as i64; + let x = discr.to_u128_unchecked() as i128; if x < min { min = x; } if x > max { max = x; } } - // FIXME: should handle i128? signed-value based impl is weird and hard to - // grok. let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr, min, max); let mut align = dl.aggregate_align; @@ -1479,9 +1477,7 @@ impl<'a, 'tcx> CachedLayout { tcx.intern_layout(CachedLayout { variants: Variants::Tagged { discr, - - // FIXME: should be u128? - discr_range: (min as u64)..=(max as u64), + discr_range: (min as u128)..=(max as u128), variants }, // FIXME(eddyb): using `FieldPlacement::Arbitrary` here results diff --git a/src/librustc_trans/builder.rs b/src/librustc_trans/builder.rs index 6ad12a13eca..9da3a479f0c 100644 --- a/src/librustc_trans/builder.rs +++ b/src/librustc_trans/builder.rs @@ -24,6 +24,7 @@ use rustc::session::{config, Session}; use std::borrow::Cow; use std::ffi::CString; +use std::ops::Range; use std::ptr; use syntax_pos::Span; @@ -549,35 +550,26 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } - pub fn load_range_assert(&self, ptr: ValueRef, lo: u64, - hi: u64, signed: llvm::Bool, - align: Option) -> ValueRef { - let value = self.load(ptr, align); - + pub fn range_metadata(&self, load: ValueRef, range: Range) { unsafe { - let t = llvm::LLVMGetElementType(llvm::LLVMTypeOf(ptr)); - let min = llvm::LLVMConstInt(t, lo, signed); - let max = llvm::LLVMConstInt(t, hi, signed); + let llty = val_ty(load); + let v = [ + C_uint_big(llty, range.start), + C_uint_big(llty, range.end) + ]; - let v = [min, max]; - - llvm::LLVMSetMetadata(value, llvm::MD_range as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), v.as_ptr(), v.len() as c_uint)); } - - value } - pub fn load_nonnull(&self, ptr: ValueRef, align: Option) -> ValueRef { - let value = self.load(ptr, align); + pub fn nonnull_metadata(&self, load: ValueRef) { unsafe { - llvm::LLVMSetMetadata(value, llvm::MD_nonnull as c_uint, + llvm::LLVMSetMetadata(load, llvm::MD_nonnull as c_uint, llvm::LLVMMDNodeInContext(self.ccx.llcx(), ptr::null(), 0)); } - - value } pub fn store(&self, val: ValueRef, ptr: ValueRef, align: Option) -> ValueRef { diff --git a/src/librustc_trans/common.rs b/src/librustc_trans/common.rs index 7ccac606923..f476416619e 100644 --- a/src/librustc_trans/common.rs +++ b/src/librustc_trans/common.rs @@ -178,9 +178,9 @@ pub fn C_uint(t: Type, i: u64) -> ValueRef { } } -pub fn C_big_integral(t: Type, u: u128) -> ValueRef { +pub fn C_uint_big(t: Type, u: u128) -> ValueRef { unsafe { - let words = [u as u64, u.wrapping_shr(64) as u64]; + let words = [u as u64, (u >> 64) as u64]; llvm::LLVMConstIntOfArbitraryPrecision(t.to_ref(), 2, words.as_ptr()) } } diff --git a/src/librustc_trans/meth.rs b/src/librustc_trans/meth.rs index 697f4ecd2be..a7d467f1cc5 100644 --- a/src/librustc_trans/meth.rs +++ b/src/librustc_trans/meth.rs @@ -40,7 +40,8 @@ impl<'a, 'tcx> VirtualIndex { debug!("get_fn({:?}, {:?})", Value(llvtable), self); let llvtable = bcx.pointercast(llvtable, fn_ty.llvm_type(bcx.ccx).ptr_to().ptr_to()); - let ptr = bcx.load_nonnull(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); + let ptr = bcx.load(bcx.inbounds_gep(llvtable, &[C_usize(bcx.ccx, self.0)]), None); + bcx.nonnull_metadata(ptr); // Vtable loads are invariant bcx.set_invariant_load(ptr); ptr diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs index e775c4897f7..cc0bbb8145d 100644 --- a/src/librustc_trans/mir/block.rs +++ b/src/librustc_trans/mir/block.rs @@ -666,17 +666,18 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { if by_ref && !arg.is_indirect() { // Have to load the argument, maybe while casting it. - if arg.layout.ty == bcx.tcx().types.bool { - llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); - // We store bools as i8 so we need to truncate to i1. - llval = base::to_immediate(bcx, llval, arg.layout); - } else if let Some(ty) = arg.cast { + if let Some(ty) = arg.cast { llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()), (align | Alignment::Packed(arg.layout.align)) .non_abi()); } else { llval = bcx.load(llval, align.non_abi()); } + if arg.layout.ty == bcx.tcx().types.bool { + bcx.range_metadata(llval, 0..2); + // We store bools as i8 so we need to truncate to i1. + llval = base::to_immediate(bcx, llval, arg.layout); + } } llargs.push(llval); diff --git a/src/librustc_trans/mir/constant.rs b/src/librustc_trans/mir/constant.rs index 5a2dcf4fb18..e8ff9ae3248 100644 --- a/src/librustc_trans/mir/constant.rs +++ b/src/librustc_trans/mir/constant.rs @@ -28,7 +28,7 @@ use abi::{self, Abi}; use callee; use builder::Builder; use common::{self, CrateContext, const_get_elt, val_ty}; -use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_big_integral, C_u32, C_u64}; +use common::{C_array, C_bool, C_bytes, C_int, C_uint, C_uint_big, C_u32, C_u64}; use common::{C_null, C_struct, C_str_slice, C_undef, C_usize, C_vector, C_fat_ptr}; use common::const_to_opt_u128; use consts; @@ -70,13 +70,13 @@ impl<'a, 'tcx> Const<'tcx> { I16(v) => (C_int(Type::i16(ccx), v as i64), tcx.types.i16), I32(v) => (C_int(Type::i32(ccx), v as i64), tcx.types.i32), I64(v) => (C_int(Type::i64(ccx), v as i64), tcx.types.i64), - I128(v) => (C_big_integral(Type::i128(ccx), v as u128), tcx.types.i128), + I128(v) => (C_uint_big(Type::i128(ccx), v as u128), tcx.types.i128), Isize(v) => (C_int(Type::isize(ccx), v.as_i64()), tcx.types.isize), U8(v) => (C_uint(Type::i8(ccx), v as u64), tcx.types.u8), U16(v) => (C_uint(Type::i16(ccx), v as u64), tcx.types.u16), U32(v) => (C_uint(Type::i32(ccx), v as u64), tcx.types.u32), U64(v) => (C_uint(Type::i64(ccx), v), tcx.types.u64), - U128(v) => (C_big_integral(Type::i128(ccx), v), tcx.types.u128), + U128(v) => (C_uint_big(Type::i128(ccx), v), tcx.types.u128), Usize(v) => (C_uint(Type::isize(ccx), v.as_u64()), tcx.types.usize), }; Const { llval: llval, ty: ty } @@ -994,7 +994,7 @@ unsafe fn cast_const_float_to_int(ccx: &CrateContext, let err = ConstEvalErr { span: span, kind: ErrKind::CannotCast }; err.report(ccx.tcx(), span, "expression"); } - C_big_integral(int_ty, cast_result.value) + C_uint_big(int_ty, cast_result.value) } unsafe fn cast_const_int_to_float(ccx: &CrateContext, diff --git a/src/librustc_trans/mir/lvalue.rs b/src/librustc_trans/mir/lvalue.rs index 5a558e3652b..6da9c7a4657 100644 --- a/src/librustc_trans/mir/lvalue.rs +++ b/src/librustc_trans/mir/lvalue.rs @@ -14,10 +14,9 @@ use rustc::ty::layout::{self, Align, TyLayout, LayoutOf}; use rustc::mir; use rustc::mir::tcx::LvalueTy; use rustc_data_structures::indexed_vec::Idx; -use abi; use base; use builder::Builder; -use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null, val_ty}; +use common::{self, CrateContext, C_usize, C_u8, C_u32, C_uint, C_int, C_null}; use consts; use type_of::LayoutLlvmExt; use type_::Type; @@ -140,30 +139,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> { return OperandRef::new_zst(bcx.ccx, self.layout); } - let val = if common::type_is_fat_ptr(bcx.ccx, self.layout.ty) { - let data = self.project_field(bcx, abi::FAT_PTR_ADDR); - let lldata = if self.layout.ty.is_region_ptr() || self.layout.ty.is_box() { - bcx.load_nonnull(data.llval, data.alignment.non_abi()) - } else { - bcx.load(data.llval, data.alignment.non_abi()) - }; - - let extra = self.project_field(bcx, abi::FAT_PTR_EXTRA); - let meta_ty = val_ty(extra.llval); - // If the 'extra' field is a pointer, it's a vtable, so use load_nonnull - // instead - let llextra = if meta_ty.element_type().kind() == llvm::TypeKind::Pointer { - bcx.load_nonnull(extra.llval, extra.alignment.non_abi()) - } else { - bcx.load(extra.llval, extra.alignment.non_abi()) - }; - - OperandValue::Pair(lldata, llextra) - } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { - OperandValue::Pair( - self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), - self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) - } else if self.layout.is_llvm_immediate() { + let val = if self.layout.is_llvm_immediate() { let mut const_llval = ptr::null_mut(); unsafe { let global = llvm::LLVMIsAGlobalVariable(self.llval); @@ -174,22 +150,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { let llval = if !const_llval.is_null() { const_llval - } else if self.layout.ty.is_bool() { - bcx.load_range_assert(self.llval, 0, 2, llvm::False, - self.alignment.non_abi()) - } else if self.layout.ty.is_char() { - // a char is a Unicode codepoint, and so takes values from 0 - // to 0x10FFFF inclusive only. - bcx.load_range_assert(self.llval, 0, 0x10FFFF + 1, llvm::False, - self.alignment.non_abi()) - } else if self.layout.ty.is_region_ptr() || - self.layout.ty.is_box() || - self.layout.ty.is_fn() { - bcx.load_nonnull(self.llval, self.alignment.non_abi()) } else { - bcx.load(self.llval, self.alignment.non_abi()) + let load = bcx.load(self.llval, self.alignment.non_abi()); + if self.layout.ty.is_bool() { + bcx.range_metadata(load, 0..2); + } else if self.layout.ty.is_char() { + // a char is a Unicode codepoint, and so takes values from 0 + // to 0x10FFFF inclusive only. + bcx.range_metadata(load, 0..0x10FFFF+1); + } else if self.layout.ty.is_region_ptr() || + self.layout.ty.is_box() || + self.layout.ty.is_fn() { + bcx.nonnull_metadata(load); + } + load }; OperandValue::Immediate(base::to_immediate(bcx, llval, self.layout)) + } else if common::type_is_imm_pair(bcx.ccx, self.layout.ty) { + OperandValue::Pair( + self.project_field(bcx, 0).load(bcx).pack_if_pair(bcx).immediate(), + self.project_field(bcx, 1).load(bcx).pack_if_pair(bcx).immediate()) } else { OperandValue::Ref(self.llval, self.alignment) }; @@ -314,28 +294,26 @@ impl<'a, 'tcx> LvalueRef<'tcx> { layout::Variants::Tagged { ref discr_range, .. } => { (discr_range.start, discr_range.end) } - _ => (0, u64::max_value()), + _ => (0, !0), }; let max_next = max.wrapping_add(1); let bits = discr_scalar.size(bcx.ccx).bits(); - assert!(bits <= 64); - let mask = !0u64 >> (64 - bits); - let lldiscr = match discr_scalar { + assert!(bits <= 128); + let mask = !0u128 >> (128 - bits); + let lldiscr = bcx.load(discr.llval, discr.alignment.non_abi()); + match discr_scalar { // For a (max) discr of -1, max will be `-1 as usize`, which overflows. // However, that is fine here (it would still represent the full range), layout::Int(..) if max_next & mask != min & mask => { // llvm::ConstantRange can deal with ranges that wrap around, // so an overflow on (max + 1) is fine. - bcx.load_range_assert(discr.llval, min, max_next, - /* signed: */ llvm::True, - discr.alignment.non_abi()) + bcx.range_metadata(lldiscr, min..max_next); } _ => { // i.e., if the range is everything. The lo==hi case would be // rejected by the LLVM verifier (it would mean either an // empty set, which is impossible, or the entire range of the // type, which is pointless). - bcx.load(discr.llval, discr.alignment.non_abi()) } }; match self.layout.variants { diff --git a/src/librustc_trans/mir/rvalue.rs b/src/librustc_trans/mir/rvalue.rs index d3677e2eefd..f584c6a653e 100644 --- a/src/librustc_trans/mir/rvalue.rs +++ b/src/librustc_trans/mir/rvalue.rs @@ -23,7 +23,7 @@ use base; use builder::Builder; use callee; use common::{self, val_ty}; -use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_big_integral}; +use common::{C_bool, C_u8, C_i32, C_u32, C_u64, C_null, C_usize, C_uint, C_uint_big}; use consts; use monomorphize; use type_::Type; @@ -289,7 +289,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> { base::call_assume(&bcx, bcx.icmp( llvm::IntULE, llval, - C_uint(ll_t_in, discr_range.end) + C_uint_big(ll_t_in, discr_range.end) )); } _ => {} @@ -807,7 +807,7 @@ fn cast_int_to_float(bcx: &Builder, if is_u128_to_f32 { // All inputs greater or equal to (f32::MAX + 0.5 ULP) are rounded to infinity, // and for everything else LLVM's uitofp works just fine. - let max = C_big_integral(int_ty, MAX_F32_PLUS_HALF_ULP); + let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let overflow = bcx.icmp(llvm::IntUGE, x, max); let infinity_bits = C_u32(bcx.ccx, ieee::Single::INFINITY.to_bits() as u32); let infinity = consts::bitcast(infinity_bits, float_ty); @@ -934,8 +934,8 @@ fn cast_float_to_int(bcx: &Builder, // performed is ultimately up to the backend, but at least x86 does perform them. let less_or_nan = bcx.fcmp(llvm::RealULT, x, f_min); let greater = bcx.fcmp(llvm::RealOGT, x, f_max); - let int_max = C_big_integral(int_ty, int_max(signed, int_ty)); - let int_min = C_big_integral(int_ty, int_min(signed, int_ty) as u128); + let int_max = C_uint_big(int_ty, int_max(signed, int_ty)); + let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128); let s0 = bcx.select(less_or_nan, int_min, fptosui_result); let s1 = bcx.select(greater, int_max, s0);