1
Fork 0

rustc: unpack scalar newtype layout ABIs.

This commit is contained in:
Eduard-Mihai Burtescu 2017-10-09 02:31:06 +03:00
parent 0b8697241f
commit 37a7521ef9
7 changed files with 129 additions and 50 deletions

View file

@ -1078,6 +1078,30 @@ impl<'a, 'tcx> CachedLayout {
packed
};
// Unpack newtype ABIs.
if sized && optimize && size.bytes() > 0 {
// All but one field must be ZSTs, and so they all start at 0.
if offsets.iter().all(|o| o.bytes() == 0) {
let mut non_zst_fields = fields.iter().filter(|f| !f.is_zst());
// We have exactly one non-ZST field.
match (non_zst_fields.next(), non_zst_fields.next()) {
(Some(field), None) => {
// Field size match and it has a scalar ABI.
if size == field.size {
match field.abi {
Abi::Scalar(_) => {
abi = field.abi.clone();
}
_ => {}
}
}
}
_ => {}
}
}
}
// Look for a scalar pair, as an ABI optimization.
// FIXME(eddyb) ignore extra ZST fields and field ordering.
if sized && !packed && fields.len() == 2 {
@ -1424,6 +1448,18 @@ impl<'a, 'tcx> CachedLayout {
let mut st = univariant_uninterned(&variants[v], &def.repr, kind)?;
st.variants = Variants::Single { index: v };
// Exclude 0 from the range of a newtype ABI NonZero<T>.
if Some(def.did) == cx.tcx().lang_items().non_zero() {
match st.abi {
Abi::Scalar(ref mut scalar) |
Abi::ScalarPair(ref mut scalar, _) => {
if scalar.valid_range.start == 0 {
scalar.valid_range.start = 1;
}
}
_ => {}
}
}
return Ok(tcx.intern_layout(st));
}
@ -2284,20 +2320,6 @@ impl<'a, 'tcx> TyLayout<'tcx> {
};
}
// Is this the NonZero lang item wrapping a pointer or integer type?
if let ty::TyAdt(def, _) = self.ty.sty {
if Some(def.did) == cx.tcx().lang_items().non_zero() {
let field = self.field(cx, 0)?;
let offset = self.fields.offset(0);
if let Abi::Scalar(Scalar { value, ref valid_range }) = field.abi {
return Ok(Some((offset, Scalar {
value,
valid_range: 0..=valid_range.end
}, 0)));
}
}
}
// Perhaps one of the fields is non-zero, let's recurse and find out.
if let FieldPlacement::Union(_) = self.fields {
// Only Rust enums have safe-to-inspect fields

View file

@ -139,7 +139,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
let ccx = self.cx.ccx;
if let mir::Lvalue::Projection(ref proj) = *lvalue {
// Allow uses of projections that are ZSTs or from immediate scalar fields.
// Allow uses of projections that are ZSTs or from scalar fields.
if let LvalueContext::Consume = context {
let base_ty = proj.base.ty(self.cx.mir, ccx.tcx());
let base_ty = self.cx.monomorphize(&base_ty);
@ -153,7 +153,7 @@ impl<'mir, 'a, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'tcx> {
if let mir::ProjectionElem::Field(..) = proj.elem {
let layout = ccx.layout_of(base_ty.to_ty(ccx.tcx()));
if layout.is_llvm_scalar_pair() {
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
// Recurse as a `Consume` instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `mark_as_lvalue` on locals.

View file

@ -700,11 +700,12 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let elem = if field.is_zst() {
C_undef(field.llvm_type(bcx.ccx))
} else {
bcx.extract_value(llval, tuple.layout.llvm_field_index(i))
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
bcx.bitcast(llval, field.immediate_llvm_type(bcx.ccx))
};
// If the tuple is immediate, the elements are as well
let op = OperandRef {
val: Immediate(base::to_immediate(bcx, elem, field)),
val: Immediate(elem),
layout: field,
};
self.trans_argument(bcx, op, llargs, &args[i]);
@ -712,7 +713,8 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}
Pair(a, b) => {
let elems = [a, b];
for i in 0..tuple.layout.fields.count() {
assert_eq!(tuple.layout.fields.count(), 2);
for i in 0..2 {
// Pair is always made up of immediates
let op = OperandRef {
val: Immediate(elems[i]),

View file

@ -118,10 +118,27 @@ impl<'a, 'tcx> Const<'tcx> {
fn get_field(&self, ccx: &CrateContext<'a, 'tcx>, i: usize) -> ValueRef {
let layout = ccx.layout_of(self.ty);
if let layout::Abi::ScalarPair(..) = layout.abi {
const_get_elt(self.llval, i as u64)
} else {
const_get_elt(self.llval, layout.llvm_field_index(i))
let field = layout.field(ccx, i);
if field.is_zst() {
return C_undef(field.immediate_llvm_type(ccx));
}
match layout.abi {
layout::Abi::Scalar(_) => self.llval,
layout::Abi::ScalarPair(ref a, ref b) => {
let offset = layout.fields.offset(i);
if offset.bytes() == 0 {
assert_eq!(field.size, a.value.size(ccx));
const_get_elt(self.llval, 0)
} else {
assert_eq!(offset, a.value.size(ccx)
.abi_align(b.value.align(ccx)));
assert_eq!(field.size, b.value.size(ccx));
const_get_elt(self.llval, 1)
}
}
_ => {
const_get_elt(self.llval, layout.llvm_field_index(i))
}
}
}
@ -159,7 +176,8 @@ impl<'a, 'tcx> Const<'tcx> {
// a constant LLVM global and cast its address if necessary.
let align = ccx.align_of(self.ty);
let ptr = consts::addr_of(ccx, self.llval, align, "const");
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
OperandValue::Ref(consts::ptrcast(ptr, layout.llvm_type(ccx).ptr_to()),
Alignment::AbiAligned)
};
OperandRef {
@ -1179,12 +1197,26 @@ fn build_const_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-> Const<'tcx> {
assert_eq!(vals.len(), layout.fields.count());
if let layout::Abi::ScalarPair(..) = layout.abi {
assert_eq!(vals.len(), 2);
return Const::new(C_struct(ccx, &[
vals[0].llval,
vals[1].llval,
], false), layout.ty);
match layout.abi {
layout::Abi::Scalar(_) |
layout::Abi::ScalarPair(..) if discr.is_none() => {
let mut non_zst_fields = vals.iter().enumerate().map(|(i, f)| {
(f, layout.fields.offset(i))
}).filter(|&(f, _)| !ccx.layout_of(f.ty).is_zst());
match (non_zst_fields.next(), non_zst_fields.next()) {
(Some((x, offset)), None) if offset.bytes() == 0 => {
return Const::new(x.llval, layout.ty);
}
(Some((a, a_offset)), Some((b, _))) if a_offset.bytes() == 0 => {
return Const::new(C_struct(ccx, &[a.llval, b.llval], false), layout.ty);
}
(Some((a, _)), Some((b, b_offset))) if b_offset.bytes() == 0 => {
return Const::new(C_struct(ccx, &[b.llval, a.llval], false), layout.ty);
}
_ => {}
}
}
_ => {}
}
// offset of current value

View file

@ -10,12 +10,12 @@
use llvm::ValueRef;
use rustc::ty;
use rustc::ty::layout::{LayoutOf, TyLayout};
use rustc::ty::layout::{self, LayoutOf, TyLayout};
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use base;
use common::{CrateContext, C_undef};
use common::{CrateContext, C_undef, C_usize};
use builder::Builder;
use value::Value;
use type_of::LayoutLlvmExt;
@ -207,24 +207,47 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if let mir::ProjectionElem::Field(ref f, _) = proj.elem {
if let Some(o) = self.maybe_trans_consume_direct(bcx, &proj.base) {
let layout = o.layout.field(bcx.ccx, f.index());
let offset = o.layout.fields.offset(f.index());
// Handled in `trans_consume`.
assert!(!layout.is_zst());
match o.val {
OperandValue::Pair(a, b) => {
let llval = [a, b][f.index()];
// HACK(eddyb) have to bitcast pointers
// until LLVM removes pointee types.
let llval = bcx.bitcast(llval,
layout.immediate_llvm_type(bcx.ccx));
return Some(OperandRef {
val: OperandValue::Immediate(llval),
layout
});
// Offset has to match a scalar component.
let llval = match (o.val, &o.layout.abi) {
(OperandValue::Immediate(llval),
&layout::Abi::Scalar(ref scalar)) => {
assert_eq!(offset.bytes(), 0);
assert_eq!(layout.size, scalar.value.size(bcx.ccx));
llval
}
_ => {}
}
(OperandValue::Pair(a_llval, b_llval),
&layout::Abi::ScalarPair(ref a, ref b)) => {
if offset.bytes() == 0 {
assert_eq!(layout.size, a.value.size(bcx.ccx));
a_llval
} else {
assert_eq!(offset, a.value.size(bcx.ccx)
.abi_align(b.value.align(bcx.ccx)));
assert_eq!(layout.size, b.value.size(bcx.ccx));
b_llval
}
}
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval),
&layout::Abi::Vector) => {
bcx.extract_element(llval, C_usize(bcx.ccx, f.index() as u64))
}
_ => return None
};
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let llval = bcx.bitcast(llval, layout.immediate_llvm_type(bcx.ccx));
return Some(OperandRef {
val: OperandValue::Immediate(llval),
layout
});
}
}
}

View file

@ -46,13 +46,13 @@ pub fn static_borrow(_: &'static i32) {
pub fn named_borrow<'r>(_: &'r i32) {
}
// CHECK: @unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// CHECK: @unsafe_borrow(i16* dereferenceable(2) %arg0)
// unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle]
pub fn unsafe_borrow(_: &UnsafeInner) {
}
// CHECK: @mutable_unsafe_borrow(%UnsafeInner* dereferenceable(2) %arg0)
// CHECK: @mutable_unsafe_borrow(i16* dereferenceable(2) %arg0)
// ... unless this is a mutable borrow, those never alias
// ... except that there's this LLVM bug that forces us to not use noalias, see #29485
#[no_mangle]
@ -110,7 +110,7 @@ pub fn slice(_: &[u8]) {
pub fn mutable_slice(_: &mut [u8]) {
}
// CHECK: @unsafe_slice([0 x %UnsafeInner]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// CHECK: @unsafe_slice([0 x i16]* nonnull %arg0.0, [[USIZE]] %arg0.1)
// unsafe interior means this isn't actually readonly and there may be aliases ...
#[no_mangle]
pub fn unsafe_slice(_: &[UnsafeInner]) {

View file

@ -15,7 +15,7 @@
#[no_mangle]
pub struct F32(f32);
// CHECK: define float @add_newtype_f32(float, float)
// CHECK: define float @add_newtype_f32(float %a, float %b)
#[inline(never)]
#[no_mangle]
pub fn add_newtype_f32(a: F32, b: F32) -> F32 {
@ -25,7 +25,7 @@ pub fn add_newtype_f32(a: F32, b: F32) -> F32 {
#[no_mangle]
pub struct F64(f64);
// CHECK: define double @add_newtype_f64(double, double)
// CHECK: define double @add_newtype_f64(double %a, double %b)
#[inline(never)]
#[no_mangle]
pub fn add_newtype_f64(a: F64, b: F64) -> F64 {