1
Fork 0

Auto merge of #40658 - eddyb:lay-more-out, r=arielb1

Use ty::layout for ABI computation instead of LLVM types.

This is the first step in creating a backend-agnostic library for computing call ABI details from signatures.
I wanted to open the PR *before* attempting to move `cabi_*` from trans to avoid rebase churn in #39999.
**EDIT**: As I suspected, #39999 needs this PR to fully work (see https://github.com/rust-lang/rust/pull/39999#issuecomment-287723379).

The first 3 commits add more APIs to `ty::layout` and replace non-ABI uses of `sizing_type_of`.
These APIs are probably usable by other backends, and miri too (cc @stoklund @solson).

The last commit rewrites `rustc_trans::cabi_*` to use `ty::layout` and new `rustc_trans::abi` APIs.
Also, during the process, a couple trivial bugs were identified and fixed:
* `msp430`, `nvptx`, `nvptx64`: type sizes *in bytes* were compared with `32` and `64`
* `x86` (`fastcall`): `f64` was incorrectly not treated the same way as `f32`

Although not urgent, this PR also uses the more general "homogenous aggregate" logic to fix #32045.
This commit is contained in:
bors 2017-04-09 13:08:10 +00:00
commit 2c48ae6f7f
38 changed files with 1468 additions and 1845 deletions

View file

@ -89,7 +89,7 @@ impl<'a, 'gcx, 'tcx> ExprVisitor<'a, 'gcx, 'tcx> {
let from = unpack_option_like(self.infcx.tcx.global_tcx(), from); let from = unpack_option_like(self.infcx.tcx.global_tcx(), from);
match (&from.sty, sk_to) { match (&from.sty, sk_to) {
(&ty::TyFnDef(..), SizeSkeleton::Known(size_to)) (&ty::TyFnDef(..), SizeSkeleton::Known(size_to))
if size_to == Pointer.size(&self.infcx.tcx.data_layout) => { if size_to == Pointer.size(self.infcx) => {
struct_span_err!(self.infcx.tcx.sess, span, E0591, struct_span_err!(self.infcx.tcx.sess, span, E0591,
"`{}` is zero-sized and can't be transmuted to `{}`", "`{}` is zero-sized and can't be transmuted to `{}`",
from, to) from, to)

View file

@ -25,6 +25,7 @@ use std::cmp;
use std::fmt; use std::fmt;
use std::i64; use std::i64;
use std::iter; use std::iter;
use std::ops::Deref;
/// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout) /// Parsed [Data layout](http://llvm.org/docs/LangRef.html#data-layout)
/// for a target, which contains everything needed to compute layouts. /// for a target, which contains everything needed to compute layouts.
@ -201,6 +202,16 @@ impl TargetDataLayout {
} }
} }
pub trait HasDataLayout: Copy {
fn data_layout(&self) -> &TargetDataLayout;
}
impl<'a> HasDataLayout for &'a TargetDataLayout {
fn data_layout(&self) -> &TargetDataLayout {
self
}
}
/// Endianness of the target, which must match cfg(target-endian). /// Endianness of the target, which must match cfg(target-endian).
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub enum Endian { pub enum Endian {
@ -241,7 +252,9 @@ impl Size {
Size::from_bytes((self.bytes() + mask) & !mask) Size::from_bytes((self.bytes() + mask) & !mask)
} }
pub fn checked_add(self, offset: Size, dl: &TargetDataLayout) -> Option<Size> { pub fn checked_add<C: HasDataLayout>(self, offset: Size, cx: C) -> Option<Size> {
let dl = cx.data_layout();
// Each Size is less than dl.obj_size_bound(), so the sum is // Each Size is less than dl.obj_size_bound(), so the sum is
// also less than 1 << 62 (and therefore can't overflow). // also less than 1 << 62 (and therefore can't overflow).
let bytes = self.bytes() + offset.bytes(); let bytes = self.bytes() + offset.bytes();
@ -253,7 +266,9 @@ impl Size {
} }
} }
pub fn checked_mul(self, count: u64, dl: &TargetDataLayout) -> Option<Size> { pub fn checked_mul<C: HasDataLayout>(self, count: u64, cx: C) -> Option<Size> {
let dl = cx.data_layout();
// Each Size is less than dl.obj_size_bound(), so the sum is // Each Size is less than dl.obj_size_bound(), so the sum is
// also less than 1 << 62 (and therefore can't overflow). // also less than 1 << 62 (and therefore can't overflow).
match self.bytes().checked_mul(count) { match self.bytes().checked_mul(count) {
@ -353,7 +368,9 @@ impl Integer {
} }
} }
pub fn align(&self, dl: &TargetDataLayout)-> Align { pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
let dl = cx.data_layout();
match *self { match *self {
I1 => dl.i1_align, I1 => dl.i1_align,
I8 => dl.i8_align, I8 => dl.i8_align,
@ -407,7 +424,9 @@ impl Integer {
} }
/// Find the smallest integer with the given alignment. /// Find the smallest integer with the given alignment.
pub fn for_abi_align(dl: &TargetDataLayout, align: Align) -> Option<Integer> { pub fn for_abi_align<C: HasDataLayout>(cx: C, align: Align) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi(); let wanted = align.abi();
for &candidate in &[I8, I16, I32, I64] { for &candidate in &[I8, I16, I32, I64] {
let ty = Int(candidate); let ty = Int(candidate);
@ -419,7 +438,9 @@ impl Integer {
} }
/// Get the Integer type from an attr::IntType. /// Get the Integer type from an attr::IntType.
pub fn from_attr(dl: &TargetDataLayout, ity: attr::IntType) -> Integer { pub fn from_attr<C: HasDataLayout>(cx: C, ity: attr::IntType) -> Integer {
let dl = cx.data_layout();
match ity { match ity {
attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8, attr::SignedInt(IntTy::I8) | attr::UnsignedInt(UintTy::U8) => I8,
attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16, attr::SignedInt(IntTy::I16) | attr::UnsignedInt(UintTy::U16) => I16,
@ -449,7 +470,7 @@ impl Integer {
let min_default = I8; let min_default = I8;
if let Some(ity) = repr.int { if let Some(ity) = repr.int {
let discr = Integer::from_attr(&tcx.data_layout, ity); let discr = Integer::from_attr(tcx, ity);
let fit = if ity.is_signed() { signed_fit } else { unsigned_fit }; let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
if discr < fit { if discr < fit {
bug!("Integer::repr_discr: `#[repr]` hint too small for \ bug!("Integer::repr_discr: `#[repr]` hint too small for \
@ -490,7 +511,9 @@ pub enum Primitive {
} }
impl Primitive { impl Primitive {
pub fn size(self, dl: &TargetDataLayout) -> Size { pub fn size<C: HasDataLayout>(self, cx: C) -> Size {
let dl = cx.data_layout();
match self { match self {
Int(I1) | Int(I8) => Size::from_bits(8), Int(I1) | Int(I8) => Size::from_bits(8),
Int(I16) => Size::from_bits(16), Int(I16) => Size::from_bits(16),
@ -501,7 +524,9 @@ impl Primitive {
} }
} }
pub fn align(self, dl: &TargetDataLayout) -> Align { pub fn align<C: HasDataLayout>(self, cx: C) -> Align {
let dl = cx.data_layout();
match self { match self {
Int(I1) => dl.i1_align, Int(I1) => dl.i1_align,
Int(I8) => dl.i8_align, Int(I8) => dl.i8_align,
@ -681,8 +706,8 @@ impl<'a, 'gcx, 'tcx> Struct {
} }
/// Determine whether a structure would be zero-sized, given its fields. /// Determine whether a structure would be zero-sized, given its fields.
pub fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I) fn would_be_zero_sized<I>(dl: &TargetDataLayout, fields: I)
-> Result<bool, LayoutError<'gcx>> -> Result<bool, LayoutError<'gcx>>
where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> { where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
for field in fields { for field in fields {
let field = field?; let field = field?;
@ -830,7 +855,7 @@ pub struct Union {
} }
impl<'a, 'gcx, 'tcx> Union { impl<'a, 'gcx, 'tcx> Union {
pub fn new(dl: &TargetDataLayout, packed: bool) -> Union { fn new(dl: &TargetDataLayout, packed: bool) -> Union {
Union { Union {
align: if packed { dl.i8_align } else { dl.aggregate_align }, align: if packed { dl.i8_align } else { dl.aggregate_align },
min_size: Size::from_bytes(0), min_size: Size::from_bytes(0),
@ -839,10 +864,10 @@ impl<'a, 'gcx, 'tcx> Union {
} }
/// Extend the Struct with more fields. /// Extend the Struct with more fields.
pub fn extend<I>(&mut self, dl: &TargetDataLayout, fn extend<I>(&mut self, dl: &TargetDataLayout,
fields: I, fields: I,
scapegoat: Ty<'gcx>) scapegoat: Ty<'gcx>)
-> Result<(), LayoutError<'gcx>> -> Result<(), LayoutError<'gcx>>
where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> { where I: Iterator<Item=Result<&'a Layout, LayoutError<'gcx>>> {
for (index, field) in fields.enumerate() { for (index, field) in fields.enumerate() {
let field = field?; let field = field?;
@ -904,7 +929,8 @@ pub enum Layout {
/// If true, the size is exact, otherwise it's only a lower bound. /// If true, the size is exact, otherwise it's only a lower bound.
sized: bool, sized: bool,
align: Align, align: Align,
size: Size element_size: Size,
count: u64
}, },
/// TyRawPtr or TyRef with a !Sized pointee. /// TyRawPtr or TyRef with a !Sized pointee.
@ -1087,25 +1113,35 @@ impl<'a, 'gcx, 'tcx> Layout {
// Arrays and slices. // Arrays and slices.
ty::TyArray(element, count) => { ty::TyArray(element, count) => {
let element = element.layout(infcx)?; let element = element.layout(infcx)?;
let element_size = element.size(dl);
// FIXME(eddyb) Don't use host `usize` for array lengths.
let usize_count: usize = count;
let count = usize_count as u64;
if element_size.checked_mul(count, dl).is_none() {
return Err(LayoutError::SizeOverflow(ty));
}
Array { Array {
sized: true, sized: true,
align: element.align(dl), align: element.align(dl),
size: element.size(dl).checked_mul(count as u64, dl) element_size: element_size,
.map_or(Err(LayoutError::SizeOverflow(ty)), Ok)? count: count
} }
} }
ty::TySlice(element) => { ty::TySlice(element) => {
let element = element.layout(infcx)?;
Array { Array {
sized: false, sized: false,
align: element.layout(infcx)?.align(dl), align: element.align(dl),
size: Size::from_bytes(0) element_size: element.size(dl),
count: 0
} }
} }
ty::TyStr => { ty::TyStr => {
Array { Array {
sized: false, sized: false,
align: dl.i8_align, align: dl.i8_align,
size: Size::from_bytes(0) element_size: Size::from_bytes(1),
count: 0
} }
} }
@ -1440,22 +1476,32 @@ impl<'a, 'gcx, 'tcx> Layout {
} }
} }
pub fn size(&self, dl: &TargetDataLayout) -> Size { pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
let dl = cx.data_layout();
match *self { match *self {
Scalar { value, .. } | RawNullablePointer { value, .. } => { Scalar { value, .. } | RawNullablePointer { value, .. } => {
value.size(dl) value.size(dl)
} }
Vector { element, count } => { Vector { element, count } => {
let elem_size = element.size(dl); let element_size = element.size(dl);
let vec_size = match elem_size.checked_mul(count, dl) { let vec_size = match element_size.checked_mul(count, dl) {
Some(size) => size, Some(size) => size,
None => bug!("Layout::size({:?}): {} * {} overflowed", None => bug!("Layout::size({:?}): {} * {} overflowed",
self, elem_size.bytes(), count) self, element_size.bytes(), count)
}; };
vec_size.abi_align(self.align(dl)) vec_size.abi_align(self.align(dl))
} }
Array { element_size, count, .. } => {
match element_size.checked_mul(count, dl) {
Some(size) => size,
None => bug!("Layout::size({:?}): {} * {} overflowed",
self, element_size.bytes(), count)
}
}
FatPointer { metadata, .. } => { FatPointer { metadata, .. } => {
// Effectively a (ptr, meta) tuple. // Effectively a (ptr, meta) tuple.
Pointer.size(dl).abi_align(metadata.align(dl)) Pointer.size(dl).abi_align(metadata.align(dl))
@ -1464,7 +1510,7 @@ impl<'a, 'gcx, 'tcx> Layout {
} }
CEnum { discr, .. } => Int(discr).size(dl), CEnum { discr, .. } => Int(discr).size(dl),
Array { size, .. } | General { size, .. } => size, General { size, .. } => size,
UntaggedUnion { ref variants } => variants.stride(), UntaggedUnion { ref variants } => variants.stride(),
Univariant { ref variant, .. } | Univariant { ref variant, .. } |
@ -1474,7 +1520,9 @@ impl<'a, 'gcx, 'tcx> Layout {
} }
} }
pub fn align(&self, dl: &TargetDataLayout) -> Align { pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
let dl = cx.data_layout();
match *self { match *self {
Scalar { value, .. } | RawNullablePointer { value, .. } => { Scalar { value, .. } | RawNullablePointer { value, .. } => {
value.align(dl) value.align(dl)
@ -1513,6 +1561,61 @@ impl<'a, 'gcx, 'tcx> Layout {
} }
} }
} }
pub fn field_offset<C: HasDataLayout>(&self,
cx: C,
i: usize,
variant_index: Option<usize>)
-> Size {
let dl = cx.data_layout();
match *self {
Scalar { .. } |
CEnum { .. } |
UntaggedUnion { .. } |
RawNullablePointer { .. } => {
Size::from_bytes(0)
}
Vector { element, count } => {
let element_size = element.size(dl);
let i = i as u64;
assert!(i < count);
Size::from_bytes(element_size.bytes() * count)
}
Array { element_size, count, .. } => {
let i = i as u64;
assert!(i < count);
Size::from_bytes(element_size.bytes() * count)
}
FatPointer { metadata, .. } => {
// Effectively a (ptr, meta) tuple.
assert!(i < 2);
if i == 0 {
Size::from_bytes(0)
} else {
Pointer.size(dl).abi_align(metadata.align(dl))
}
}
Univariant { ref variant, .. } => variant.offsets[i],
General { ref variants, .. } => {
let v = variant_index.expect("variant index required");
variants[v].offsets[i + 1]
}
StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
if Some(nndiscr as usize) == variant_index {
nonnull.offsets[i]
} else {
Size::from_bytes(0)
}
}
}
}
} }
/// Type size "skeleton", i.e. the only information determining a type's size. /// Type size "skeleton", i.e. the only information determining a type's size.
@ -1544,7 +1647,7 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
// First try computing a static layout. // First try computing a static layout.
let err = match ty.layout(infcx) { let err = match ty.layout(infcx) {
Ok(layout) => { Ok(layout) => {
return Ok(SizeSkeleton::Known(layout.size(&tcx.data_layout))); return Ok(SizeSkeleton::Known(layout.size(tcx)));
} }
Err(err) => err Err(err) => err
}; };
@ -1658,3 +1761,192 @@ impl<'a, 'gcx, 'tcx> SizeSkeleton<'gcx> {
} }
} }
} }
/// A pair of a type and its layout. Implements various
/// type traversal APIs (e.g. recursing into fields).
#[derive(Copy, Clone, Debug)]
pub struct TyLayout<'tcx> {
pub ty: Ty<'tcx>,
pub layout: &'tcx Layout,
pub variant_index: Option<usize>,
}
impl<'tcx> Deref for TyLayout<'tcx> {
type Target = Layout;
fn deref(&self) -> &Layout {
self.layout
}
}
pub trait HasTyCtxt<'tcx>: HasDataLayout {
fn tcx<'a>(&'a self) -> TyCtxt<'a, 'tcx, 'tcx>;
}
impl<'a, 'gcx, 'tcx> HasDataLayout for TyCtxt<'a, 'gcx, 'tcx> {
fn data_layout(&self) -> &TargetDataLayout {
&self.data_layout
}
}
impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for TyCtxt<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
self.global_tcx()
}
}
impl<'a, 'gcx, 'tcx> HasDataLayout for &'a InferCtxt<'a, 'gcx, 'tcx> {
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'gcx, 'tcx> HasTyCtxt<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'gcx, 'gcx> {
self.tcx.global_tcx()
}
}
pub trait LayoutTyper<'tcx>: HasTyCtxt<'tcx> {
type TyLayout;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout;
}
impl<'a, 'gcx, 'tcx> LayoutTyper<'gcx> for &'a InferCtxt<'a, 'gcx, 'tcx> {
type TyLayout = Result<TyLayout<'gcx>, LayoutError<'gcx>>;
fn layout_of(self, ty: Ty<'gcx>) -> Self::TyLayout {
let ty = normalize_associated_type(self, ty);
Ok(TyLayout {
ty: ty,
layout: ty.layout(self)?,
variant_index: None
})
}
}
impl<'a, 'tcx> TyLayout<'tcx> {
pub fn for_variant(&self, variant_index: usize) -> Self {
TyLayout {
variant_index: Some(variant_index),
..*self
}
}
pub fn field_offset<C: HasDataLayout>(&self, cx: C, i: usize) -> Size {
self.layout.field_offset(cx, i, self.variant_index)
}
pub fn field_count(&self) -> usize {
// Handle enum/union through the type rather than Layout.
if let ty::TyAdt(def, _) = self.ty.sty {
let v = self.variant_index.unwrap_or(0);
if def.variants.is_empty() {
assert_eq!(v, 0);
return 0;
} else {
return def.variants[v].fields.len();
}
}
match *self.layout {
Scalar { .. } => {
bug!("TyLayout::field_count({:?}): not applicable", self)
}
// Handled above (the TyAdt case).
CEnum { .. } |
General { .. } |
UntaggedUnion { .. } |
RawNullablePointer { .. } |
StructWrappedNullablePointer { .. } => bug!(),
FatPointer { .. } => 2,
Vector { count, .. } |
Array { count, .. } => {
let usize_count = count as usize;
assert_eq!(usize_count as u64, count);
usize_count
}
Univariant { ref variant, .. } => variant.offsets.len(),
}
}
pub fn field_type<C: HasTyCtxt<'tcx>>(&self, cx: C, i: usize) -> Ty<'tcx> {
let tcx = cx.tcx();
let ptr_field_type = |pointee: Ty<'tcx>| {
let slice = |element: Ty<'tcx>| {
assert!(i < 2);
if i == 0 {
tcx.mk_mut_ptr(element)
} else {
tcx.types.usize
}
};
match tcx.struct_tail(pointee).sty {
ty::TySlice(element) => slice(element),
ty::TyStr => slice(tcx.types.u8),
ty::TyDynamic(..) => tcx.mk_mut_ptr(tcx.mk_nil()),
_ => bug!("TyLayout::field_type({:?}): not applicable", self)
}
};
match self.ty.sty {
ty::TyBool |
ty::TyChar |
ty::TyInt(_) |
ty::TyUint(_) |
ty::TyFloat(_) |
ty::TyFnPtr(_) |
ty::TyNever |
ty::TyFnDef(..) |
ty::TyDynamic(..) => {
bug!("TyLayout::field_type({:?}): not applicable", self)
}
// Potentially-fat pointers.
ty::TyRef(_, ty::TypeAndMut { ty: pointee, .. }) |
ty::TyRawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
ptr_field_type(pointee)
}
ty::TyAdt(def, _) if def.is_box() => {
ptr_field_type(self.ty.boxed_ty())
}
// Arrays and slices.
ty::TyArray(element, _) |
ty::TySlice(element) => element,
ty::TyStr => tcx.types.u8,
// Tuples and closures.
ty::TyClosure(def_id, ref substs) => {
substs.upvar_tys(def_id, tcx).nth(i).unwrap()
}
ty::TyTuple(tys, _) => tys[i],
// SIMD vector types.
ty::TyAdt(def, ..) if def.repr.simd => {
self.ty.simd_type(tcx)
}
// ADTs.
ty::TyAdt(def, substs) => {
def.variants[self.variant_index.unwrap_or(0)].fields[i].ty(tcx, substs)
}
ty::TyProjection(_) | ty::TyAnon(..) | ty::TyParam(_) |
ty::TyInfer(_) | ty::TyError => {
bug!("TyLayout::field_type: unexpected type `{}`", self.ty)
}
}
}
pub fn field<C: LayoutTyper<'tcx>>(&self, cx: C, i: usize) -> C::TyLayout {
cx.layout_of(self.field_type(cx, i))
}
}

View file

@ -733,7 +733,7 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
}); });
if let Layout::General { ref variants, ref size, discr, .. } = *layout { if let Layout::General { ref variants, ref size, discr, .. } = *layout {
let discr_size = Primitive::Int(discr).size(&cx.tcx.data_layout).bytes(); let discr_size = Primitive::Int(discr).size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}", debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, size.bytes(), layout); t, size.bytes(), layout);

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use llvm::{self, ValueRef, Integer, Pointer, Float, Double, Struct, Array, Vector, AttributePlace}; use llvm::{self, ValueRef, AttributePlace};
use base; use base;
use builder::Builder; use builder::Builder;
use common::{type_is_fat_ptr, C_uint}; use common::{type_is_fat_ptr, C_uint};
@ -29,19 +29,20 @@ use cabi_sparc;
use cabi_sparc64; use cabi_sparc64;
use cabi_nvptx; use cabi_nvptx;
use cabi_nvptx64; use cabi_nvptx64;
use machine::{llalign_of_min, llsize_of, llsize_of_alloc}; use machine::llalign_of_min;
use type_::Type; use type_::Type;
use type_of; use type_of;
use rustc::hir; use rustc::hir;
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Layout, LayoutTyper, TyLayout, Size};
use libc::c_uint; use libc::c_uint;
use std::cmp; use std::cmp;
use std::iter;
pub use syntax::abi::Abi; pub use syntax::abi::Abi;
pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA}; pub use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc::ty::layout::Layout;
#[derive(Clone, Copy, PartialEq, Debug)] #[derive(Clone, Copy, PartialEq, Debug)]
enum ArgKind { enum ArgKind {
@ -132,33 +133,293 @@ impl ArgAttributes {
} }
} }
} }
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum RegKind {
Integer,
Float,
Vector
}
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub struct Reg {
pub kind: RegKind,
pub size: Size,
}
macro_rules! reg_ctor {
($name:ident, $kind:ident, $bits:expr) => {
pub fn $name() -> Reg {
Reg {
kind: RegKind::$kind,
size: Size::from_bits($bits)
}
}
}
}
impl Reg {
reg_ctor!(i8, Integer, 8);
reg_ctor!(i16, Integer, 16);
reg_ctor!(i32, Integer, 32);
reg_ctor!(i64, Integer, 64);
reg_ctor!(f32, Float, 32);
reg_ctor!(f64, Float, 64);
}
impl Reg {
fn llvm_type(&self, ccx: &CrateContext) -> Type {
match self.kind {
RegKind::Integer => Type::ix(ccx, self.size.bits()),
RegKind::Float => {
match self.size.bits() {
32 => Type::f32(ccx),
64 => Type::f64(ccx),
_ => bug!("unsupported float: {:?}", self)
}
}
RegKind::Vector => {
Type::vector(&Type::i8(ccx), self.size.bytes())
}
}
}
}
/// An argument passed entirely registers with the
/// same kind (e.g. HFA / HVA on PPC64 and AArch64).
#[derive(Copy, Clone)]
pub struct Uniform {
pub unit: Reg,
/// The total size of the argument, which can be:
/// * equal to `unit.size` (one scalar/vector)
/// * a multiple of `unit.size` (an array of scalar/vectors)
/// * if `unit.kind` is `Integer`, the last element
/// can be shorter, i.e. `{ i64, i64, i32 }` for
/// 64-bit integers with a total size of 20 bytes
pub total: Size,
}
impl From<Reg> for Uniform {
fn from(unit: Reg) -> Uniform {
Uniform {
unit,
total: unit.size
}
}
}
impl Uniform {
fn llvm_type(&self, ccx: &CrateContext) -> Type {
let llunit = self.unit.llvm_type(ccx);
if self.total <= self.unit.size {
return llunit;
}
let count = self.total.bytes() / self.unit.size.bytes();
let rem_bytes = self.total.bytes() % self.unit.size.bytes();
if rem_bytes == 0 {
return Type::array(&llunit, count);
}
// Only integers can be really split further.
assert_eq!(self.unit.kind, RegKind::Integer);
let args: Vec<_> = (0..count).map(|_| llunit)
.chain(iter::once(Type::ix(ccx, rem_bytes * 8)))
.collect();
Type::struct_(ccx, &args, false)
}
}
pub trait LayoutExt<'tcx> {
fn is_aggregate(&self) -> bool;
fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg>;
}
impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
fn is_aggregate(&self) -> bool {
match *self.layout {
Layout::Scalar { .. } |
Layout::RawNullablePointer { .. } |
Layout::CEnum { .. } |
Layout::Vector { .. } => false,
Layout::Array { .. } |
Layout::FatPointer { .. } |
Layout::Univariant { .. } |
Layout::UntaggedUnion { .. } |
Layout::General { .. } |
Layout::StructWrappedNullablePointer { .. } => true
}
}
fn homogenous_aggregate<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Option<Reg> {
match *self.layout {
// The primitives for this algorithm.
Layout::Scalar { value, .. } |
Layout::RawNullablePointer { value, .. } => {
let kind = match value {
layout::Int(_) |
layout::Pointer => RegKind::Integer,
layout::F32 |
layout::F64 => RegKind::Float
};
Some(Reg {
kind,
size: self.size(ccx)
})
}
Layout::CEnum { .. } => {
Some(Reg {
kind: RegKind::Integer,
size: self.size(ccx)
})
}
Layout::Vector { .. } => {
Some(Reg {
kind: RegKind::Integer,
size: self.size(ccx)
})
}
Layout::Array { count, .. } => {
if count > 0 {
self.field(ccx, 0).homogenous_aggregate(ccx)
} else {
None
}
}
Layout::Univariant { ref variant, .. } => {
let mut unaligned_offset = Size::from_bytes(0);
let mut result = None;
for i in 0..self.field_count() {
if unaligned_offset != variant.offsets[i] {
return None;
}
let field = self.field(ccx, i);
match (result, field.homogenous_aggregate(ccx)) {
// The field itself must be a homogenous aggregate.
(_, None) => return None,
// If this is the first field, record the unit.
(None, Some(unit)) => {
result = Some(unit);
}
// For all following fields, the unit must be the same.
(Some(prev_unit), Some(unit)) => {
if prev_unit != unit {
return None;
}
}
}
// Keep track of the offset (without padding).
let size = field.size(ccx);
match unaligned_offset.checked_add(size, ccx) {
Some(offset) => unaligned_offset = offset,
None => return None
}
}
// There needs to be no padding.
if unaligned_offset != self.size(ccx) {
None
} else {
result
}
}
Layout::UntaggedUnion { .. } => {
let mut max = Size::from_bytes(0);
let mut result = None;
for i in 0..self.field_count() {
let field = self.field(ccx, i);
match (result, field.homogenous_aggregate(ccx)) {
// The field itself must be a homogenous aggregate.
(_, None) => return None,
// If this is the first field, record the unit.
(None, Some(unit)) => {
result = Some(unit);
}
// For all following fields, the unit must be the same.
(Some(prev_unit), Some(unit)) => {
if prev_unit != unit {
return None;
}
}
}
// Keep track of the offset (without padding).
let size = field.size(ccx);
if size > max {
max = size;
}
}
// There needs to be no padding.
if max != self.size(ccx) {
None
} else {
result
}
}
// Rust-specific types, which we can ignore for C ABIs.
Layout::FatPointer { .. } |
Layout::General { .. } |
Layout::StructWrappedNullablePointer { .. } => None
}
}
}
pub enum CastTarget {
Uniform(Uniform),
Pair(Reg, Reg)
}
impl From<Reg> for CastTarget {
fn from(unit: Reg) -> CastTarget {
CastTarget::Uniform(Uniform::from(unit))
}
}
impl From<Uniform> for CastTarget {
fn from(uniform: Uniform) -> CastTarget {
CastTarget::Uniform(uniform)
}
}
impl CastTarget {
fn llvm_type(&self, ccx: &CrateContext) -> Type {
match *self {
CastTarget::Uniform(u) => u.llvm_type(ccx),
CastTarget::Pair(a, b) => {
Type::struct_(ccx, &[
a.llvm_type(ccx),
b.llvm_type(ccx)
], false)
}
}
}
}
/// Information about how a specific C type /// Information about how a specific C type
/// should be passed to or returned from a function /// should be passed to or returned from a function
/// ///
/// This is borrowed from clang's ABIInfo.h /// This is borrowed from clang's ABIInfo.h
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]
pub struct ArgType { pub struct ArgType<'tcx> {
kind: ArgKind, kind: ArgKind,
/// Original LLVM type pub layout: TyLayout<'tcx>,
pub original_ty: Type,
/// Sizing LLVM type (pointers are opaque).
/// Unlike original_ty, this is guaranteed to be complete.
///
/// For example, while we're computing the function pointer type in
/// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`.
/// The field type will likely end up being `void(%Foo)*`, but we cannot
/// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`,
/// until `%Foo` is completed by having all of its field types inserted,
/// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers
/// with opaque ones, resulting in `{i8*}` for `Foo`.
/// ABI-specific logic can then look at the size, alignment and fields of
/// `{i8*}` in order to determine how the argument will be passed.
/// Only later will `original_ty` aka `%Foo` be used in the LLVM function
/// pointer type, without ever having introspected it.
pub ty: Type,
/// Signedness for integer types, None for other types
pub signedness: Option<bool>,
/// Coerced LLVM Type /// Coerced LLVM Type
pub cast: Option<Type>, pub cast: Option<Type>,
/// Dummy argument, which is emitted before the real argument /// Dummy argument, which is emitted before the real argument
@ -167,26 +428,24 @@ pub struct ArgType {
pub attrs: ArgAttributes pub attrs: ArgAttributes
} }
impl ArgType { impl<'a, 'tcx> ArgType<'tcx> {
fn new(original_ty: Type, ty: Type) -> ArgType { fn new(layout: TyLayout<'tcx>) -> ArgType<'tcx> {
ArgType { ArgType {
kind: ArgKind::Direct, kind: ArgKind::Direct,
original_ty: original_ty, layout: layout,
ty: ty,
signedness: None,
cast: None, cast: None,
pad: None, pad: None,
attrs: ArgAttributes::default() attrs: ArgAttributes::default()
} }
} }
pub fn make_indirect(&mut self, ccx: &CrateContext) { pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) {
assert_eq!(self.kind, ArgKind::Direct); assert_eq!(self.kind, ArgKind::Direct);
// Wipe old attributes, likely not valid through indirection. // Wipe old attributes, likely not valid through indirection.
self.attrs = ArgAttributes::default(); self.attrs = ArgAttributes::default();
let llarg_sz = llsize_of_alloc(ccx, self.ty); let llarg_sz = self.layout.size(ccx).bytes();
// For non-immediate arguments the callee gets its own copy of // For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also // the value on the stack, so there are no aliases. It's also
@ -205,17 +464,44 @@ impl ArgType {
pub fn extend_integer_width_to(&mut self, bits: u64) { pub fn extend_integer_width_to(&mut self, bits: u64) {
// Only integers have signedness // Only integers have signedness
if let Some(signed) = self.signedness { let (i, signed) = match *self.layout {
if self.ty.int_width() < bits { Layout::Scalar { value, .. } => {
self.attrs.set(if signed { match value {
ArgAttribute::SExt layout::Int(i) => {
} else { if self.layout.ty.is_integral() {
ArgAttribute::ZExt (i, self.layout.ty.is_signed())
}); } else {
return;
}
}
_ => return
}
} }
// Rust enum types that map onto C enums also need to follow
// the target ABI zero-/sign-extension rules.
Layout::CEnum { discr, signed, .. } => (discr, signed),
_ => return
};
if i.size().bits() < bits {
self.attrs.set(if signed {
ArgAttribute::SExt
} else {
ArgAttribute::ZExt
});
} }
} }
pub fn cast_to<T: Into<CastTarget>>(&mut self, ccx: &CrateContext, target: T) {
self.cast = Some(target.into().llvm_type(ccx));
}
pub fn pad_with(&mut self, ccx: &CrateContext, reg: Reg) {
self.pad = Some(reg.llvm_type(ccx));
}
pub fn is_indirect(&self) -> bool { pub fn is_indirect(&self) -> bool {
self.kind == ArgKind::Indirect self.kind == ArgKind::Indirect
} }
@ -224,18 +510,24 @@ impl ArgType {
self.kind == ArgKind::Ignore self.kind == ArgKind::Ignore
} }
/// Get the LLVM type for an lvalue of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
type_of::type_of(ccx, self.layout.ty)
}
/// Store a direct/indirect value described by this ArgType into a /// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return. /// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables /// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations. /// or results of call/invoke instructions into their destinations.
pub fn store(&self, bcx: &Builder, mut val: ValueRef, dst: ValueRef) { pub fn store(&self, bcx: &Builder<'a, 'tcx>, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() { if self.is_ignore() {
return; return;
} }
let ccx = bcx.ccx; let ccx = bcx.ccx;
if self.is_indirect() { if self.is_indirect() {
let llsz = llsize_of(ccx, self.ty); let llsz = C_uint(ccx, self.layout.size(ccx).bytes());
let llalign = llalign_of_min(ccx, self.ty); let llalign = self.layout.align(ccx).abi();
base::call_memcpy(bcx, dst, val, llsz, llalign as u32); base::call_memcpy(bcx, dst, val, llsz, llalign as u32);
} else if let Some(ty) = self.cast { } else if let Some(ty) = self.cast {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang // FIXME(eddyb): Figure out when the simpler Store is safe, clang
@ -243,8 +535,8 @@ impl ArgType {
let can_store_through_cast_ptr = false; let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr { if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst, ty.ptr_to()); let cast_dst = bcx.pointercast(dst, ty.ptr_to());
let llalign = llalign_of_min(ccx, self.ty); let llalign = self.layout.align(ccx).abi();
bcx.store(val, cast_dst, Some(llalign)); bcx.store(val, cast_dst, Some(llalign as u32));
} else { } else {
// The actual return type is a struct, but the ABI // The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The // adaptation code has cast it into some scalar type. The
@ -271,21 +563,21 @@ impl ArgType {
base::call_memcpy(bcx, base::call_memcpy(bcx,
bcx.pointercast(dst, Type::i8p(ccx)), bcx.pointercast(dst, Type::i8p(ccx)),
bcx.pointercast(llscratch, Type::i8p(ccx)), bcx.pointercast(llscratch, Type::i8p(ccx)),
C_uint(ccx, llsize_of_alloc(ccx, self.ty)), C_uint(ccx, self.layout.size(ccx).bytes()),
cmp::min(llalign_of_min(ccx, self.ty), cmp::min(self.layout.align(ccx).abi() as u32,
llalign_of_min(ccx, ty)) as u32); llalign_of_min(ccx, ty)));
base::Lifetime::End.call(bcx, llscratch); base::Lifetime::End.call(bcx, llscratch);
} }
} else { } else {
if self.original_ty == Type::i1(ccx) { if self.layout.ty == ccx.tcx().types.bool {
val = bcx.zext(val, Type::i8(ccx)); val = bcx.zext(val, Type::i8(ccx));
} }
bcx.store(val, dst, None); bcx.store(val, dst, None);
} }
} }
pub fn store_fn_arg(&self, bcx: &Builder, idx: &mut usize, dst: ValueRef) { pub fn store_fn_arg(&self, bcx: &Builder<'a, 'tcx>, idx: &mut usize, dst: ValueRef) {
if self.pad.is_some() { if self.pad.is_some() {
*idx += 1; *idx += 1;
} }
@ -304,30 +596,30 @@ impl ArgType {
/// I will do my best to describe this structure, but these /// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM /// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct FnType { pub struct FnType<'tcx> {
/// The LLVM types of each argument. /// The LLVM types of each argument.
pub args: Vec<ArgType>, pub args: Vec<ArgType<'tcx>>,
/// LLVM return type. /// LLVM return type.
pub ret: ArgType, pub ret: ArgType<'tcx>,
pub variadic: bool, pub variadic: bool,
pub cconv: llvm::CallConv pub cconv: llvm::CallConv
} }
impl FnType { impl<'a, 'tcx> FnType<'tcx> {
pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn new(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>, sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType { extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
fn_ty.adjust_for_abi(ccx, sig); fn_ty.adjust_for_abi(ccx, sig);
fn_ty fn_ty
} }
pub fn new_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn new_vtable(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>, sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType { extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args); let mut fn_ty = FnType::unadjusted(ccx, sig, extra_args);
// Don't pass the vtable, it's not an argument of the virtual fn. // Don't pass the vtable, it's not an argument of the virtual fn.
fn_ty.args[1].ignore(); fn_ty.args[1].ignore();
@ -335,9 +627,9 @@ impl FnType {
fn_ty fn_ty
} }
fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn unadjusted(ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>, sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType { extra_args: &[Ty<'tcx>]) -> FnType<'tcx> {
use self::Abi::*; use self::Abi::*;
let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) { let cconv = match ccx.sess().target.target.adjust_abi(sig.abi) {
RustIntrinsic | PlatformIntrinsic | RustIntrinsic | PlatformIntrinsic |
@ -394,23 +686,11 @@ impl FnType {
}; };
let arg_of = |ty: Ty<'tcx>, is_return: bool| { let arg_of = |ty: Ty<'tcx>, is_return: bool| {
let mut arg = ArgType::new(ccx.layout_of(ty));
if ty.is_bool() { if ty.is_bool() {
let llty = Type::i1(ccx);
let mut arg = ArgType::new(llty, llty);
arg.attrs.set(ArgAttribute::ZExt); arg.attrs.set(ArgAttribute::ZExt);
arg
} else { } else {
let mut arg = ArgType::new(type_of::type_of(ccx, ty), if arg.layout.size(ccx).bytes() == 0 {
type_of::sizing_type_of(ccx, ty));
if ty.is_integral() {
arg.signedness = Some(ty.is_signed());
}
// Rust enum types that map onto C enums also need to follow
// the target ABI zero-/sign-extension rules.
if let Layout::CEnum { signed, .. } = *ccx.layout_of(ty) {
arg.signedness = Some(signed);
}
if llsize_of_alloc(ccx, arg.ty) == 0 {
// For some forsaken reason, x86_64-pc-windows-gnu // For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments. // doesn't ignore zero-sized struct arguments.
// The same is true for s390x-unknown-linux-gnu. // The same is true for s390x-unknown-linux-gnu.
@ -419,8 +699,8 @@ impl FnType {
arg.ignore(); arg.ignore();
} }
} }
arg
} }
arg
}; };
let ret_ty = sig.output(); let ret_ty = sig.output();
@ -439,14 +719,10 @@ impl FnType {
match ret_ty.sty { match ret_ty.sty {
// These are not really pointers but pairs, (pointer, len) // These are not really pointers but pairs, (pointer, len)
ty::TyRef(_, ty::TypeAndMut { ty, .. }) => { ty::TyRef(_, ty::TypeAndMut { ty, .. }) => {
let llty = type_of::sizing_type_of(ccx, ty); ret.attrs.set_dereferenceable(ccx.size_of(ty));
let llsz = llsize_of_alloc(ccx, llty);
ret.attrs.set_dereferenceable(llsz);
} }
ty::TyAdt(def, _) if def.is_box() => { ty::TyAdt(def, _) if def.is_box() => {
let llty = type_of::sizing_type_of(ccx, ret_ty.boxed_ty()); ret.attrs.set_dereferenceable(ccx.size_of(ret_ty.boxed_ty()));
let llsz = llsize_of_alloc(ccx, llty);
ret.attrs.set_dereferenceable(llsz);
} }
_ => {} _ => {}
} }
@ -495,13 +771,9 @@ impl FnType {
for ty in inputs.iter().chain(extra_args.iter()) { for ty in inputs.iter().chain(extra_args.iter()) {
let mut arg = arg_of(ty, false); let mut arg = arg_of(ty, false);
if type_is_fat_ptr(ccx, ty) { if let ty::layout::FatPointer { .. } = *arg.layout {
let original_tys = arg.original_ty.field_types(); let mut data = ArgType::new(arg.layout.field(ccx, 0));
let sizing_tys = arg.ty.field_types(); let mut info = ArgType::new(arg.layout.field(ccx, 1));
assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
let mut data = ArgType::new(original_tys[0], sizing_tys[0]);
let mut info = ArgType::new(original_tys[1], sizing_tys[1]);
if let Some(inner) = rust_ptr_attrs(ty, &mut data) { if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
data.attrs.set(ArgAttribute::NonNull); data.attrs.set(ArgAttribute::NonNull);
@ -517,9 +789,7 @@ impl FnType {
args.push(info); args.push(info);
} else { } else {
if let Some(inner) = rust_ptr_attrs(ty, &mut arg) { if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
let llty = type_of::sizing_type_of(ccx, inner); arg.attrs.set_dereferenceable(ccx.size_of(inner));
let llsz = llsize_of_alloc(ccx, llty);
arg.attrs.set_dereferenceable(llsz);
} }
args.push(arg); args.push(arg);
} }
@ -533,43 +803,51 @@ impl FnType {
} }
} }
fn adjust_for_abi<'a, 'tcx>(&mut self, fn adjust_for_abi(&mut self,
ccx: &CrateContext<'a, 'tcx>, ccx: &CrateContext<'a, 'tcx>,
sig: ty::FnSig<'tcx>) { sig: ty::FnSig<'tcx>) {
let abi = sig.abi; let abi = sig.abi;
if abi == Abi::Unadjusted { return } if abi == Abi::Unadjusted { return }
if abi == Abi::Rust || abi == Abi::RustCall || if abi == Abi::Rust || abi == Abi::RustCall ||
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
let fixup = |arg: &mut ArgType| { let fixup = |arg: &mut ArgType<'tcx>| {
let mut llty = arg.ty; if !arg.layout.is_aggregate() {
// Replace newtypes with their inner-most type.
while llty.kind() == llvm::TypeKind::Struct {
let inner = llty.field_types();
if inner.len() != 1 {
break;
}
llty = inner[0];
}
if !llty.is_aggregate() {
// Scalars and vectors, always immediate.
if llty != arg.ty {
// Needs a cast as we've unpacked a newtype.
arg.cast = Some(llty);
}
return; return;
} }
let size = llsize_of_alloc(ccx, llty); let size = arg.layout.size(ccx);
if size > llsize_of_alloc(ccx, ccx.int_type()) {
if let Some(unit) = arg.layout.homogenous_aggregate(ccx) {
// Replace newtypes with their inner-most type.
if unit.size == size {
// Needs a cast as we've unpacked a newtype.
arg.cast_to(ccx, unit);
return;
}
// Pairs of floats.
if unit.kind == RegKind::Float {
if unit.size.checked_mul(2, ccx) == Some(size) {
// FIXME(eddyb) This should be using Uniform instead of a pair,
// but the resulting [2 x float/double] breaks emscripten.
// See https://github.com/kripken/emscripten-fastcomp/issues/178.
arg.cast_to(ccx, CastTarget::Pair(unit, unit));
return;
}
}
}
if size > layout::Pointer.size(ccx) {
arg.make_indirect(ccx); arg.make_indirect(ccx);
} else if size > 0 { } else {
// We want to pass small aggregates as immediates, but using // We want to pass small aggregates as immediates, but using
// a LLVM aggregate type for this leads to bad optimizations, // a LLVM aggregate type for this leads to bad optimizations,
// so we pick an appropriately sized integer type instead. // so we pick an appropriately sized integer type instead.
arg.cast = Some(Type::ix(ccx, size * 8)); arg.cast_to(ccx, Reg {
kind: RegKind::Integer,
size
});
} }
}; };
// Fat pointers are returned by-value. // Fat pointers are returned by-value.
@ -605,14 +883,7 @@ impl FnType {
cabi_x86_64::compute_abi_info(ccx, self); cabi_x86_64::compute_abi_info(ccx, self);
}, },
"aarch64" => cabi_aarch64::compute_abi_info(ccx, self), "aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
"arm" => { "arm" => cabi_arm::compute_abi_info(ccx, self),
let flavor = if ccx.sess().target.target.target_os == "ios" {
cabi_arm::Flavor::Ios
} else {
cabi_arm::Flavor::General
};
cabi_arm::compute_abi_info(ccx, self, flavor);
},
"mips" => cabi_mips::compute_abi_info(ccx, self), "mips" => cabi_mips::compute_abi_info(ccx, self),
"mips64" => cabi_mips64::compute_abi_info(ccx, self), "mips64" => cabi_mips64::compute_abi_info(ccx, self),
"powerpc" => cabi_powerpc::compute_abi_info(ccx, self), "powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
@ -633,16 +904,18 @@ impl FnType {
} }
} }
pub fn llvm_type(&self, ccx: &CrateContext) -> Type { pub fn llvm_type(&self, ccx: &CrateContext<'a, 'tcx>) -> Type {
let mut llargument_tys = Vec::new(); let mut llargument_tys = Vec::new();
let llreturn_ty = if self.ret.is_ignore() { let llreturn_ty = if self.ret.is_ignore() {
Type::void(ccx) Type::void(ccx)
} else if self.ret.is_indirect() { } else if self.ret.is_indirect() {
llargument_tys.push(self.ret.original_ty.ptr_to()); llargument_tys.push(self.ret.memory_ty(ccx).ptr_to());
Type::void(ccx) Type::void(ccx)
} else { } else {
self.ret.cast.unwrap_or(self.ret.original_ty) self.ret.cast.unwrap_or_else(|| {
type_of::immediate_type_of(ccx, self.ret.layout.ty)
})
}; };
for arg in &self.args { for arg in &self.args {
@ -655,9 +928,11 @@ impl FnType {
} }
let llarg_ty = if arg.is_indirect() { let llarg_ty = if arg.is_indirect() {
arg.original_ty.ptr_to() arg.memory_ty(ccx).ptr_to()
} else { } else {
arg.cast.unwrap_or(arg.original_ty) arg.cast.unwrap_or_else(|| {
type_of::immediate_type_of(ccx, arg.layout.ty)
})
}; };
llargument_tys.push(llarg_ty); llargument_tys.push(llarg_ty);
@ -705,72 +980,6 @@ impl FnType {
} }
} }
pub fn align_up_to(off: usize, a: usize) -> usize { pub fn align_up_to(off: u64, a: u64) -> u64 {
return (off + a - 1) / a * a; (off + a - 1) / a * a
}
fn align(off: usize, ty: Type, pointer: usize) -> usize {
let a = ty_align(ty, pointer);
return align_up_to(off, a);
}
pub fn ty_align(ty: Type, pointer: usize) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => pointer,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t, pointer)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt, pointer)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ty_align(elt, pointer) * len
}
_ => bug!("ty_align: unhandled type")
}
}
pub fn ty_size(ty: Type, pointer: usize) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => pointer,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t, pointer))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| {
align(s, *t, pointer) + ty_size(*t, pointer)
});
align(size, ty, pointer)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, pointer);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, pointer);
len * eltsz
},
_ => bug!("ty_size: unhandled type")
}
} }

View file

@ -46,8 +46,8 @@ use super::Disr;
use std; use std;
use llvm::{ValueRef, True, IntEQ, IntNE}; use llvm::{ValueRef, True, IntEQ, IntNE};
use rustc::ty::layout; use rustc::ty::{self, Ty};
use rustc::ty::{self, Ty, AdtKind}; use rustc::ty::layout::{self, LayoutTyper};
use common::*; use common::*;
use builder::Builder; use builder::Builder;
use base; use base;
@ -95,15 +95,6 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
generic_type_of(cx, t, None, false, false) generic_type_of(cx, t, None, false, false)
} }
// Pass dst=true if the type you are passing is a DST. Yes, we could figure
// this out, but if you call this on an unsized type without realising it, you
// are going to get the wrong type (it will not include the unsized parts of it).
pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>, dst: bool) -> Type {
generic_type_of(cx, t, None, true, dst)
}
pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, pub fn incomplete_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
t: Ty<'tcx>, name: &str) -> Type { t: Ty<'tcx>, name: &str) -> Type {
generic_type_of(cx, t, Some(name), false, false) generic_type_of(cx, t, Some(name), false, false)
@ -149,7 +140,11 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}; };
let nnty = monomorphize::field_ty(cx.tcx(), substs, let nnty = monomorphize::field_ty(cx.tcx(), substs,
&def.variants[nndiscr as usize].fields[0]); &def.variants[nndiscr as usize].fields[0]);
type_of::sizing_type_of(cx, nnty) if let layout::Scalar { value: layout::Pointer, .. } = *cx.layout_of(nnty) {
Type::i8p(cx)
} else {
type_of::type_of(cx, nnty)
}
} }
layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => { layout::StructWrappedNullablePointer { nndiscr, ref nonnull, .. } => {
let fields = compute_fields(cx, t, nndiscr as usize, false); let fields = compute_fields(cx, t, nndiscr as usize, false);
@ -181,10 +176,6 @@ fn generic_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
} }
} }
} }
layout::Vector { element, count } => {
let elem_ty = Type::from_primitive(cx, element);
Type::vector(&elem_ty, count)
}
layout::UntaggedUnion { ref variants, .. }=> { layout::UntaggedUnion { ref variants, .. }=> {
// Use alignment-sized ints to fill all the union storage. // Use alignment-sized ints to fill all the union storage.
let size = variants.stride().bytes(); let size = variants.stride().bytes();
@ -246,9 +237,8 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type {
assert_eq!(size%align, 0); assert_eq!(size%align, 0);
assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align); assert_eq!(align.count_ones(), 1, "Alignment must be a power fof 2. Got {}", align);
let align_units = size/align; let align_units = size/align;
let dl = &cx.tcx().data_layout;
let layout_align = layout::Align::from_bytes(align, align).unwrap(); let layout_align = layout::Align::from_bytes(align, align).unwrap();
if let Some(ity) = layout::Integer::for_abi_align(dl, layout_align) { if let Some(ity) = layout::Integer::for_abi_align(cx, layout_align) {
Type::array(&Type::from_integer(cx, ity), align_units) Type::array(&Type::from_integer(cx, ity), align_units)
} else { } else {
Type::array(&Type::vector(&Type::i32(cx), align/4), Type::array(&Type::vector(&Type::i32(cx), align/4),
@ -259,11 +249,10 @@ fn union_fill(cx: &CrateContext, size: u64, align: u64) -> Type {
fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>, fn struct_llfields<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fields: &Vec<Ty<'tcx>>,
variant: &layout::Struct, variant: &layout::Struct,
sizing: bool, dst: bool) -> Vec<Type> { sizing: bool, _dst: bool) -> Vec<Type> {
let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]); let fields = variant.field_index_by_increasing_offset().map(|i| fields[i as usize]);
if sizing { if sizing {
fields.filter(|ty| !dst || cx.shared().type_is_sized(*ty)) bug!()
.map(|ty| type_of::sizing_type_of(cx, ty)).collect()
} else { } else {
fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect() fields.map(|ty| type_of::in_memory_type_of(cx, ty)).collect()
} }
@ -285,11 +274,6 @@ pub fn trans_get_discr<'a, 'tcx>(
cast_to: Option<Type>, cast_to: Option<Type>,
range_assert: bool range_assert: bool
) -> ValueRef { ) -> ValueRef {
let (def, substs) = match t.sty {
ty::TyAdt(ref def, substs) if def.adt_kind() == AdtKind::Enum => (def, substs),
_ => bug!("{} is not an enum", t)
};
debug!("trans_get_discr t: {:?}", t); debug!("trans_get_discr t: {:?}", t);
let l = bcx.ccx.layout_of(t); let l = bcx.ccx.layout_of(t);
@ -297,19 +281,17 @@ pub fn trans_get_discr<'a, 'tcx>(
layout::CEnum { discr, min, max, .. } => { layout::CEnum { discr, min, max, .. } => {
load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert) load_discr(bcx, discr, scrutinee, alignment, min, max, range_assert)
} }
layout::General { discr, .. } => { layout::General { discr, ref variants, .. } => {
let ptr = bcx.struct_gep(scrutinee, 0); let ptr = bcx.struct_gep(scrutinee, 0);
load_discr(bcx, discr, ptr, alignment, load_discr(bcx, discr, ptr, alignment,
0, def.variants.len() as u64 - 1, 0, variants.len() as u64 - 1,
range_assert) range_assert)
} }
layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0), layout::Univariant { .. } | layout::UntaggedUnion { .. } => C_u8(bcx.ccx, 0),
layout::RawNullablePointer { nndiscr, .. } => { layout::RawNullablePointer { nndiscr, .. } => {
let cmp = if nndiscr == 0 { IntEQ } else { IntNE }; let cmp = if nndiscr == 0 { IntEQ } else { IntNE };
let llptrty = type_of::sizing_type_of(bcx.ccx, let discr = bcx.load(scrutinee, alignment.to_align());
monomorphize::field_ty(bcx.tcx(), substs, bcx.icmp(cmp, discr, C_null(val_ty(discr)))
&def.variants[nndiscr as usize].fields[0]));
bcx.icmp(cmp, bcx.load(scrutinee, alignment.to_align()), C_null(llptrty))
} }
layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => { layout::StructWrappedNullablePointer { nndiscr, ref discrfield, .. } => {
struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment) struct_wrapped_nullable_bitdiscr(bcx, nndiscr, discrfield, scrutinee, alignment)
@ -383,9 +365,8 @@ pub fn trans_set_discr<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, val: Valu
assert_eq!(to, Disr(0)); assert_eq!(to, Disr(0));
} }
layout::RawNullablePointer { nndiscr, .. } => { layout::RawNullablePointer { nndiscr, .. } => {
let nnty = compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
if to.0 != nndiscr { if to.0 != nndiscr {
let llptrty = type_of::sizing_type_of(bcx.ccx, nnty); let llptrty = val_ty(val).element_type();
bcx.store(C_null(llptrty), val, None); bcx.store(C_null(llptrty), val, None);
} }
} }

View file

@ -59,7 +59,6 @@ use context::{SharedCrateContext, CrateContextList};
use debuginfo; use debuginfo;
use declare; use declare;
use machine; use machine;
use machine::llsize_of;
use meth; use meth;
use mir; use mir;
use monomorphize::{self, Instance}; use monomorphize::{self, Instance};
@ -534,14 +533,13 @@ pub fn memcpy_ty<'a, 'tcx>(
) { ) {
let ccx = bcx.ccx; let ccx = bcx.ccx;
if type_is_zero_size(ccx, t) { let size = ccx.size_of(t);
if size == 0 {
return; return;
} }
let llty = type_of::type_of(ccx, t); let align = align.unwrap_or_else(|| ccx.align_of(t));
let llsz = llsize_of(ccx, llty); call_memcpy(bcx, dst, src, C_uint(ccx, size), align);
let llalign = align.unwrap_or_else(|| type_of::align_of(ccx, t));
call_memcpy(bcx, dst, src, llsz, llalign as u32);
} }
pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>, pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,
@ -1297,8 +1295,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
// (delay format until we actually need it) // (delay format until we actually need it)
let record = |kind, opt_discr_size, variants| { let record = |kind, opt_discr_size, variants| {
let type_desc = format!("{:?}", ty); let type_desc = format!("{:?}", ty);
let overall_size = layout.size(&tcx.data_layout); let overall_size = layout.size(tcx);
let align = layout.align(&tcx.data_layout); let align = layout.align(tcx);
tcx.sess.code_stats.borrow_mut().record_type_size(kind, tcx.sess.code_stats.borrow_mut().record_type_size(kind,
type_desc, type_desc,
align, align,
@ -1334,8 +1332,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
session::FieldInfo { session::FieldInfo {
name: field_name.to_string(), name: field_name.to_string(),
offset: offset.bytes(), offset: offset.bytes(),
size: field_layout.size(&tcx.data_layout).bytes(), size: field_layout.size(tcx).bytes(),
align: field_layout.align(&tcx.data_layout).abi(), align: field_layout.align(tcx).abi(),
} }
} }
} }
@ -1345,8 +1343,8 @@ fn gather_type_sizes<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
session::VariantInfo { session::VariantInfo {
name: Some(name.to_string()), name: Some(name.to_string()),
kind: session::SizeKind::Exact, kind: session::SizeKind::Exact,
align: value.align(&tcx.data_layout).abi(), align: value.align(tcx).abi(),
size: value.size(&tcx.data_layout).bytes(), size: value.size(tcx).bytes(),
fields: vec![], fields: vec![],
} }
}; };

View file

@ -8,163 +8,99 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
#![allow(non_upper_case_globals)] use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use abi::{self, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize { fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
abi::ty_size(ty, 8) -> Option<Uniform> {
} arg.layout.homogenous_aggregate(ccx).and_then(|unit| {
let size = arg.layout.size(ccx);
fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { // Ensure we have at most four uniquely addressable members.
fn check_array(ty: Type) -> Option<(Type, u64)> { if size > unit.size.checked_mul(4, ccx).unwrap() {
let len = ty.array_length() as u64; return None;
if len == 0 {
return None
}
let elt = ty.element_type();
// if our element is an HFA/HVA, so are we; multiply members by our len
is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
}
fn check_struct(ty: Type) -> Option<(Type, u64)> {
let str_tys = ty.field_types();
if str_tys.len() == 0 {
return None
} }
let mut prev_base_ty = None; let valid_unit = match unit.kind {
let mut members = 0; RegKind::Integer => false,
for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { RegKind::Float => true,
match (prev_base_ty, opt_homog_agg) { RegKind::Vector => size.bits() == 64 || size.bits() == 128
// field isn't itself an HFA, so we aren't either };
(_, None) => return None,
// first field - store its type and number of members if valid_unit {
(None, Some((field_ty, field_members))) => { Some(Uniform {
prev_base_ty = Some(field_ty); unit,
members = field_members; total: size
}, })
// 2nd or later field - give up if it's a different type; otherwise incr. members
(Some(prev_ty), Some((field_ty, field_members))) => {
if prev_ty != field_ty {
return None;
}
members += field_members;
}
}
}
// Because of previous checks, we know prev_base_ty is Some(...) because
// 1. str_tys has at least one element; and
// 2. prev_base_ty was filled in (or we would've returned early)
let (base_ty, members) = (prev_base_ty.unwrap(), members);
// Ensure there is no padding.
if ty_size(ty) == ty_size(base_ty) * (members as usize) {
Some((base_ty, members))
} else {
None
}
}
let homog_agg = match ty.kind() {
Float => Some((ty, 1)),
Double => Some((ty, 1)),
Array => check_array(ty),
Struct => check_struct(ty),
Vector => match ty_size(ty) {
4|8 => Some((ty, 1)),
_ => None
},
_ => None
};
// Ensure we have at most four uniquely addressable members
homog_agg.and_then(|(base_ty, members)| {
if members > 0 && members <= 4 {
Some((base_ty, members))
} else { } else {
None None
} }
}) })
} }
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if is_reg_ty(ret.ty) { if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
return; return;
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { if let Some(uniform) = is_homogenous_aggregate(ccx, ret) {
ret.cast = Some(Type::array(&base_ty, members)); ret.cast_to(ccx, uniform);
return; return;
} }
let size = ty_size(ret.ty); let size = ret.layout.size(ccx);
if size <= 16 { let bits = size.bits();
let llty = if size <= 1 { if bits <= 128 {
Type::i8(ccx) let unit = if bits <= 8 {
} else if size <= 2 { Reg::i8()
Type::i16(ccx) } else if bits <= 16 {
} else if size <= 4 { Reg::i16()
Type::i32(ccx) } else if bits <= 32 {
} else if size <= 8 { Reg::i32()
Type::i64(ccx)
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Reg::i64()
}; };
ret.cast = Some(llty);
ret.cast_to(ccx, Uniform {
unit,
total: size
});
return; return;
} }
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if is_reg_ty(arg.ty) { if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
return; return;
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { if let Some(uniform) = is_homogenous_aggregate(ccx, arg) {
arg.cast = Some(Type::array(&base_ty, members)); arg.cast_to(ccx, uniform);
return; return;
} }
let size = ty_size(arg.ty); let size = arg.layout.size(ccx);
if size <= 16 { let bits = size.bits();
let llty = if size == 0 { if bits <= 128 {
Type::array(&Type::i64(ccx), 0) let unit = if bits <= 8 {
} else if size == 1 { Reg::i8()
Type::i8(ccx) } else if bits <= 16 {
} else if size == 2 { Reg::i16()
Type::i16(ccx) } else if bits <= 32 {
} else if size <= 4 { Reg::i32()
Type::i32(ccx)
} else if size <= 8 {
Type::i64(ccx)
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Reg::i64()
}; };
arg.cast = Some(llty);
arg.cast_to(ccx, Uniform {
unit,
total: size
});
return; return;
} }
arg.make_indirect(ccx); arg.make_indirect(ccx);
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,156 +8,53 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; use abi::{FnType, ArgType, LayoutExt, Reg, Uniform};
use abi::{self, align_up_to, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
use std::cmp; fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if !ret.layout.is_aggregate() {
pub enum Flavor {
General,
Ios
}
type TyAlignFn = fn(ty: Type) -> usize;
fn align(off: usize, ty: Type, align_fn: TyAlignFn) -> usize {
let a = align_fn(ty);
return align_up_to(off, a);
}
fn general_ty_align(ty: Type) -> usize {
abi::ty_align(ty, 4)
}
// For more information see:
// ARMv7
// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
// /iPhoneOSABIReference/Articles/ARMv7FunctionCallingConventions.html
// ARMv6
// https://developer.apple.com/library/ios/documentation/Xcode/Conceptual
// /iPhoneOSABIReference/Articles/ARMv6FunctionCallingConventions.html
fn ios_ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => cmp::min(4, ((ty.int_width() as usize) + 7) / 8),
Pointer => 4,
Float => 4,
Double => 4,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ios_ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ios_ty_align(elt)
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
ios_ty_align(elt) * len
}
_ => bug!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 4,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t, align_fn))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter()
.fold(0, |s, t| {
align(s, *t, align_fn) + ty_size(*t, align_fn)
});
align(size, ty, align_fn)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, align_fn);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt, align_fn);
len * eltsz
}
_ => bug!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
return; return;
} }
let size = ty_size(ret.ty, align_fn); let size = ret.layout.size(ccx);
if size <= 4 { let bits = size.bits();
let llty = if size <= 1 { if bits <= 32 {
Type::i8(ccx) let unit = if bits <= 8 {
} else if size <= 2 { Reg::i8()
Type::i16(ccx) } else if bits <= 16 {
Reg::i16()
} else { } else {
Type::i32(ccx) Reg::i32()
}; };
ret.cast = Some(llty); ret.cast_to(ccx, Uniform {
unit,
total: size
});
return; return;
} }
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if is_reg_ty(arg.ty) { if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
return; return;
} }
let align = align_fn(arg.ty); let align = arg.layout.align(ccx).abi();
let size = ty_size(arg.ty, align_fn); let total = arg.layout.size(ccx);
let llty = if align <= 4 { arg.cast_to(ccx, Uniform {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
} else { total
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) });
};
arg.cast = Some(llty);
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
}
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
let align_fn = match flavor {
Flavor::General => general_ty_align as TyAlignFn,
Flavor::Ios => ios_ty_align as TyAlignFn,
};
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret, align_fn); classify_ret_ty(ccx, &mut fty.ret);
} }
for arg in &mut fty.args { for arg in &mut fty.args {
if arg.is_ignore() { continue; } if arg.is_ignore() { continue; }
classify_arg_ty(ccx, arg, align_fn); classify_arg_ty(ccx, arg);
} }
} }

View file

@ -8,10 +8,7 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
#![allow(non_upper_case_globals)] use abi::{FnType, ArgType, ArgAttribute, LayoutExt, Uniform};
use llvm::{Struct, Array};
use abi::{FnType, ArgType, ArgAttribute};
use context::CrateContext; use context::CrateContext;
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
@ -19,31 +16,31 @@ use context::CrateContext;
// See the https://github.com/kripken/emscripten-fastcomp-clang repository. // See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
match ret.ty.kind() { if ret.layout.is_aggregate() {
Struct => { if let Some(unit) = ret.layout.homogenous_aggregate(ccx) {
let field_types = ret.ty.field_types(); let size = ret.layout.size(ccx);
if field_types.len() == 1 { if unit.size == size {
ret.cast = Some(field_types[0]); ret.cast_to(ccx, Uniform {
} else { unit,
ret.make_indirect(ccx); total: size
});
return;
} }
} }
Array => {
ret.make_indirect(ccx); ret.make_indirect(ccx);
}
_ => {}
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.ty.is_aggregate() { if arg.layout.is_aggregate() {
arg.make_indirect(ccx); arg.make_indirect(ccx);
arg.attrs.set(ArgAttribute::ByVal); arg.attrs.set(ArgAttribute::ByVal);
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,94 +8,40 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
#![allow(non_upper_case_globals)]
use libc::c_uint;
use std::cmp; use std::cmp;
use llvm; use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
use llvm::{Integer, Pointer, Float, Double, Vector};
use abi::{self, align_up_to, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_align(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
abi::ty_align(ty, 4) if !ret.layout.is_aggregate() {
}
fn ty_size(ty: Type) -> usize {
abi::ty_size(ty, 4)
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
} else { } else {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
let orig_offset = *offset; let size = arg.layout.size(ccx);
let size = ty_size(arg.ty) * 8; let mut align = arg.layout.align(ccx).abi();
let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8;
if !is_reg_ty(arg.ty) { if arg.layout.is_aggregate() {
arg.cast = Some(struct_ty(ccx, arg.ty)); arg.cast_to(ccx, Uniform {
arg.pad = padding_ty(ccx, align, orig_offset); unit: Reg::i32(),
total: size
});
if ((align - 1) & *offset) > 0 {
arg.pad_with(ccx, Reg::i32());
}
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
*offset = align_up_to(*offset, align);
*offset += align_up_to(size.bytes(), align);
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
return match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
};
}
fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
if ((align - 1 ) & offset) > 0 {
Some(Type::i32(ccx))
} else {
None
}
}
fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
let int_ty = Type::i32(ccx);
let mut args = Vec::new();
let mut n = size / 32;
while n > 0 {
args.push(int_ty);
n -= 1;
}
let r = size % 32;
if r > 0 {
unsafe {
args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_int(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,94 +8,40 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
#![allow(non_upper_case_globals)]
use libc::c_uint;
use std::cmp; use std::cmp;
use llvm; use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
use llvm::{Integer, Pointer, Float, Double, Vector};
use abi::{self, align_up_to, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_align(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
abi::ty_align(ty, 8) if !ret.layout.is_aggregate() {
}
fn ty_size(ty: Type) -> usize {
abi::ty_size(ty, 8)
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(64); ret.extend_integer_width_to(64);
} else { } else {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
let orig_offset = *offset; let size = arg.layout.size(ccx);
let size = ty_size(arg.ty) * 8; let mut align = arg.layout.align(ccx).abi();
let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8;
if !is_reg_ty(arg.ty) { if arg.layout.is_aggregate() {
arg.cast = Some(struct_ty(ccx, arg.ty)); arg.cast_to(ccx, Uniform {
arg.pad = padding_ty(ccx, align, orig_offset); unit: Reg::i64(),
total: size
});
if ((align - 1) & *offset) > 0 {
arg.pad_with(ccx, Reg::i64());
}
} else { } else {
arg.extend_integer_width_to(64); arg.extend_integer_width_to(64);
} }
*offset = align_up_to(*offset, align);
*offset += align_up_to(size.bytes(), align);
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
return match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
};
}
fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
if ((align - 1 ) & offset) > 0 {
Some(Type::i64(ccx))
} else {
None
}
}
fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
let int_ty = Type::i64(ccx);
let mut args = Vec::new();
let mut n = size / 64;
while n > 0 {
args.push(int_ty);
n -= 1;
}
let r = size % 64;
if r > 0 {
unsafe {
args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_int(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -11,17 +11,8 @@
// Reference: MSP430 Embedded Application Binary Interface // Reference: MSP430 Embedded Application Binary Interface
// http://www.ti.com/lit/an/slaa534/slaa534.pdf // http://www.ti.com/lit/an/slaa534/slaa534.pdf
#![allow(non_upper_case_globals)] use abi::{ArgType, FnType, LayoutExt};
use llvm::Struct;
use abi::{self, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize {
abi::ty_size(ty, 2)
}
// 3.5 Structures or Unions Passed and Returned by Reference // 3.5 Structures or Unions Passed and Returned by Reference
// //
@ -29,23 +20,23 @@ fn ty_size(ty: Type) -> usize {
// returned by reference. To pass a structure or union by reference, the caller // returned by reference. To pass a structure or union by reference, the caller
// places its address in the appropriate location: either in a register or on // places its address in the appropriate location: either in a register or on
// the stack, according to its position in the argument list. (..)" // the stack, according to its position in the argument list. (..)"
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.ty.kind() == Struct && ty_size(ret.ty) > 32 { if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} else { } else {
ret.extend_integer_width_to(16); ret.extend_integer_width_to(16);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.ty.kind() == Struct && ty_size(arg.ty) > 32 { if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
arg.make_indirect(ccx); arg.make_indirect(ccx);
} else { } else {
arg.extend_integer_width_to(16); arg.extend_integer_width_to(16);
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -11,35 +11,26 @@
// Reference: PTX Writer's Guide to Interoperability // Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
#![allow(non_upper_case_globals)] use abi::{ArgType, FnType, LayoutExt};
use llvm::Struct;
use abi::{self, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
abi::ty_size(ty, 4) if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if ret.ty.kind() == Struct && ty_size(ret.ty) > 32 {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} else { } else {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.ty.kind() == Struct && ty_size(arg.ty) > 32 { if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
arg.make_indirect(ccx); arg.make_indirect(ccx);
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -11,35 +11,26 @@
// Reference: PTX Writer's Guide to Interoperability // Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability // http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
#![allow(non_upper_case_globals)] use abi::{ArgType, FnType, LayoutExt};
use llvm::Struct;
use abi::{self, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
abi::ty_size(ty, 8) if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 {
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if ret.ty.kind() == Struct && ty_size(ret.ty) > 64 {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} else { } else {
ret.extend_integer_width_to(64); ret.extend_integer_width_to(64);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.ty.kind() == Struct && ty_size(arg.ty) > 64 { if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 {
arg.make_indirect(ccx); arg.make_indirect(ccx);
} else { } else {
arg.extend_integer_width_to(64); arg.extend_integer_width_to(64);
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,100 +8,41 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use libc::c_uint; use abi::{align_up_to, FnType, ArgType, LayoutExt, Reg, Uniform};
use llvm;
use llvm::{Integer, Pointer, Float, Double, Vector};
use abi::{self, align_up_to, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
use std::cmp; use std::cmp;
fn ty_align(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ty.kind() == Vector { if !ret.layout.is_aggregate() {
bug!("ty_size: unhandled type")
} else {
abi::ty_align(ty, 4)
}
}
fn ty_size(ty: Type) -> usize {
if ty.kind() == Vector {
bug!("ty_size: unhandled type")
} else {
abi::ty_size(ty, 4)
}
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
} else { } else {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
let orig_offset = *offset; let size = arg.layout.size(ccx);
let size = ty_size(arg.ty) * 8; let mut align = arg.layout.align(ccx).abi();
let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8;
if !is_reg_ty(arg.ty) { if arg.layout.is_aggregate() {
arg.cast = Some(struct_ty(ccx, arg.ty)); arg.cast_to(ccx, Uniform {
arg.pad = padding_ty(ccx, align, orig_offset); unit: Reg::i32(),
total: size
});
if ((align - 1) & *offset) > 0 {
arg.pad_with(ccx, Reg::i32());
}
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
*offset = align_up_to(*offset, align);
*offset += align_up_to(size.bytes(), align);
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
return match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
};
}
fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
if ((align - 1 ) & offset) > 0 {
Some(Type::i32(ccx))
} else {
None
}
}
fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
let int_ty = Type::i32(ccx);
let mut args = Vec::new();
let mut n = size / 32;
while n > 0 {
args.push(int_ty);
n -= 1;
}
let r = size % 32;
if r > 0 {
unsafe {
args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
}
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_int(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,100 +8,42 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
// FIXME: The PowerPC64 ABI needs to zero or sign extend function // FIXME:
// call parameters, but compute_abi_info() is passed LLVM types
// which have no sign information.
//
// Alignment of 128 bit types is not currently handled, this will // Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added. // need to be fixed when PowerPC vector support is added.
use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array}; use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use abi::{self, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize { fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
if ty.kind() == Vector { -> Option<Uniform> {
bug!("ty_size: unhandled type") arg.layout.homogenous_aggregate(ccx).and_then(|unit| {
} else { let size = arg.layout.size(ccx);
abi::ty_size(ty, 8)
}
}
fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { // Ensure we have at most eight uniquely addressable members.
fn check_array(ty: Type) -> Option<(Type, u64)> { if size > unit.size.checked_mul(8, ccx).unwrap() {
let len = ty.array_length() as u64; return None;
if len == 0 {
return None
}
let elt = ty.element_type();
// if our element is an HFA/HVA, so are we; multiply members by our len
is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
}
fn check_struct(ty: Type) -> Option<(Type, u64)> {
let str_tys = ty.field_types();
if str_tys.len() == 0 {
return None
} }
let mut prev_base_ty = None; let valid_unit = match unit.kind {
let mut members = 0; RegKind::Integer => false,
for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { RegKind::Float => true,
match (prev_base_ty, opt_homog_agg) { RegKind::Vector => size.bits() == 128
// field isn't itself an HFA, so we aren't either };
(_, None) => return None,
// first field - store its type and number of members if valid_unit {
(None, Some((field_ty, field_members))) => { Some(Uniform {
prev_base_ty = Some(field_ty); unit,
members = field_members; total: size
}, })
// 2nd or later field - give up if it's a different type; otherwise incr. members
(Some(prev_ty), Some((field_ty, field_members))) => {
if prev_ty != field_ty {
return None;
}
members += field_members;
}
}
}
// Because of previous checks, we know prev_base_ty is Some(...) because
// 1. str_tys has at least one element; and
// 2. prev_base_ty was filled in (or we would've returned early)
let (base_ty, members) = (prev_base_ty.unwrap(), members);
// Ensure there is no padding.
if ty_size(ty) == ty_size(base_ty) * (members as usize) {
Some((base_ty, members))
} else {
None
}
}
let homog_agg = match ty.kind() {
Float => Some((ty, 1)),
Double => Some((ty, 1)),
Array => check_array(ty),
Struct => check_struct(ty),
_ => None
};
// Ensure we have at most eight uniquely addressable members
homog_agg.and_then(|(base_ty, members)| {
if members > 0 && members <= 8 {
Some((base_ty, members))
} else { } else {
None None
} }
}) })
} }
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if is_reg_ty(ret.ty) { if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64); ret.extend_integer_width_to(64);
return; return;
} }
@ -111,78 +53,52 @@ fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) { if let Some(uniform) = is_homogenous_aggregate(ccx, ret) {
ret.cast = Some(Type::array(&base_ty, members)); ret.cast_to(ccx, uniform);
return; return;
} }
let size = ty_size(ret.ty); let size = ret.layout.size(ccx);
if size <= 16 { let bits = size.bits();
let llty = if size <= 1 { if bits <= 128 {
Type::i8(ccx) let unit = if bits <= 8 {
} else if size <= 2 { Reg::i8()
Type::i16(ccx) } else if bits <= 16 {
} else if size <= 4 { Reg::i16()
Type::i32(ccx) } else if bits <= 32 {
} else if size <= 8 { Reg::i32()
Type::i64(ccx)
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Reg::i64()
}; };
ret.cast = Some(llty);
ret.cast_to(ccx, Uniform {
unit,
total: size
});
return; return;
} }
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if is_reg_ty(arg.ty) { if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64); arg.extend_integer_width_to(64);
return; return;
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { if let Some(uniform) = is_homogenous_aggregate(ccx, arg) {
arg.cast = Some(Type::array(&base_ty, members)); arg.cast_to(ccx, uniform);
return; return;
} }
arg.cast = Some(struct_ty(ccx, arg.ty)); let total = arg.layout.size(ccx);
arg.cast_to(ccx, Uniform {
unit: Reg::i64(),
total
});
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec<Type> {
let long_ty = Type::i64(ccx);
let mut args = Vec::new();
let mut n = size / 64;
while n > 0 {
args.push(long_ty);
n -= 1;
}
let r = size % 64;
if r > 0 {
args.push(Type::ix(ccx, r as u64));
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_long(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -11,130 +11,60 @@
// FIXME: The assumes we're using the non-vector ABI, i.e. compiling // FIXME: The assumes we're using the non-vector ABI, i.e. compiling
// for a pre-z13 machine or using -mno-vx. // for a pre-z13 machine or using -mno-vx.
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector}; use abi::{FnType, ArgType, LayoutExt, Reg};
use abi::{align_up_to, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
use std::cmp; use rustc::ty::layout::{self, Layout, TyLayout};
fn align(off: usize, ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
let a = ty_align(ty); if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 {
return align_up_to(off, a);
}
fn ty_align(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
1
} else {
let str_tys = ty.field_types();
str_tys.iter().fold(1, |a, t| cmp::max(a, ty_align(*t)))
}
}
Array => {
let elt = ty.element_type();
ty_align(elt)
}
Vector => ty_size(ty),
_ => bug!("ty_align: unhandled type")
}
}
fn ty_size(ty: Type) -> usize {
match ty.kind() {
Integer => ((ty.int_width() as usize) + 7) / 8,
Pointer => 8,
Float => 4,
Double => 8,
Struct => {
if ty.is_packed() {
let str_tys = ty.field_types();
str_tys.iter().fold(0, |s, t| s + ty_size(*t))
} else {
let str_tys = ty.field_types();
let size = str_tys.iter().fold(0, |s, t| align(s, *t) + ty_size(*t));
align(size, ty)
}
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
len * eltsz
}
_ => bug!("ty_size: unhandled type")
}
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(64); ret.extend_integer_width_to(64);
} else { } else {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if arg.ty.kind() == Struct { layout: TyLayout<'tcx>) -> bool {
fn is_single_fp_element(tys: &[Type]) -> bool { match *layout {
if tys.len() != 1 { Layout::Scalar { value: layout::F32, .. } |
return false; Layout::Scalar { value: layout::F64, .. } => true,
} Layout::Univariant { .. } => {
match tys[0].kind() { if layout.field_count() == 1 {
Float | Double => true, is_single_fp_element(ccx, layout.field(ccx, 0))
Struct => is_single_fp_element(&tys[0].field_types()), } else {
_ => false false
} }
} }
if is_single_fp_element(&arg.ty.field_types()) {
match ty_size(arg.ty) {
4 => arg.cast = Some(Type::f32(ccx)),
8 => arg.cast = Some(Type::f64(ccx)),
_ => arg.make_indirect(ccx)
}
} else {
match ty_size(arg.ty) {
1 => arg.cast = Some(Type::i8(ccx)),
2 => arg.cast = Some(Type::i16(ccx)),
4 => arg.cast = Some(Type::i32(ccx)),
8 => arg.cast = Some(Type::i64(ccx)),
_ => arg.make_indirect(ccx)
}
}
return;
}
if is_reg_ty(arg.ty) {
arg.extend_integer_width_to(64);
} else {
arg.make_indirect(ccx);
}
}
fn is_reg_ty(ty: Type) -> bool {
match ty.kind() {
Integer
| Pointer
| Float
| Double => ty_size(ty) <= 8,
_ => false _ => false
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
let size = arg.layout.size(ccx);
if !arg.layout.is_aggregate() && size.bits() <= 64 {
arg.extend_integer_width_to(64);
return;
}
if is_single_fp_element(ccx, arg.layout) {
match size.bytes() {
4 => arg.cast_to(ccx, Reg::f32()),
8 => arg.cast_to(ccx, Reg::f64()),
_ => arg.make_indirect(ccx)
}
} else {
match size.bytes() {
1 => arg.cast_to(ccx, Reg::i8()),
2 => arg.cast_to(ccx, Reg::i16()),
4 => arg.cast_to(ccx, Reg::i32()),
8 => arg.cast_to(ccx, Reg::i64()),
_ => arg.make_indirect(ccx)
}
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,94 +8,40 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
#![allow(non_upper_case_globals)]
use libc::c_uint;
use std::cmp; use std::cmp;
use llvm; use abi::{align_up_to, ArgType, FnType, LayoutExt, Reg, Uniform};
use llvm::{Integer, Pointer, Float, Double, Vector};
use abi::{self, align_up_to, ArgType, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_align(ty: Type) -> usize { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
abi::ty_align(ty, 4) if !ret.layout.is_aggregate() {
}
fn ty_size(ty: Type) -> usize {
abi::ty_size(ty, 4)
}
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ret.ty) {
ret.extend_integer_width_to(32); ret.extend_integer_width_to(32);
} else { } else {
ret.make_indirect(ccx); ret.make_indirect(ccx);
} }
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut u64) {
let orig_offset = *offset; let size = arg.layout.size(ccx);
let size = ty_size(arg.ty) * 8; let mut align = arg.layout.align(ccx).abi();
let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8;
if !is_reg_ty(arg.ty) { if arg.layout.is_aggregate() {
arg.cast = Some(struct_ty(ccx, arg.ty)); arg.cast_to(ccx, Uniform {
arg.pad = padding_ty(ccx, align, orig_offset); unit: Reg::i32(),
} else { total: size
arg.extend_integer_width_to(32); });
} if ((align - 1) & *offset) > 0 {
} arg.pad_with(ccx, Reg::i32());
fn is_reg_ty(ty: Type) -> bool {
return match ty.kind() {
Integer
| Pointer
| Float
| Double
| Vector => true,
_ => false
};
}
fn padding_ty(ccx: &CrateContext, align: usize, offset: usize) -> Option<Type> {
if ((align - 1 ) & offset) > 0 {
Some(Type::i32(ccx))
} else {
None
}
}
fn coerce_to_int(ccx: &CrateContext, size: usize) -> Vec<Type> {
let int_ty = Type::i32(ccx);
let mut args = Vec::new();
let mut n = size / 32;
while n > 0 {
args.push(int_ty);
n -= 1;
}
let r = size % 32;
if r > 0 {
unsafe {
args.push(Type::from_ref(llvm::LLVMIntTypeInContext(ccx.llcx(), r as c_uint)));
} }
} else {
arg.extend_integer_width_to(32)
} }
args *offset = align_up_to(*offset, align);
*offset += align_up_to(size.bytes(), align);
} }
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_int(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -10,170 +10,89 @@
// FIXME: This needs an audit for correctness and completeness. // FIXME: This needs an audit for correctness and completeness.
use llvm::{Integer, Pointer, Float, Double, Struct, Vector, Array}; use abi::{FnType, ArgType, LayoutExt, Reg, RegKind, Uniform};
use abi::{self, FnType, ArgType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
fn ty_size(ty: Type) -> usize { fn is_homogenous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
if ty.kind() == Vector { -> Option<Uniform> {
bug!("ty_size: unhandled type") arg.layout.homogenous_aggregate(ccx).and_then(|unit| {
} else { let size = arg.layout.size(ccx);
abi::ty_size(ty, 8)
}
}
fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> { // Ensure we have at most eight uniquely addressable members.
fn check_array(ty: Type) -> Option<(Type, u64)> { if size > unit.size.checked_mul(8, ccx).unwrap() {
let len = ty.array_length() as u64; return None;
if len == 0 {
return None
}
let elt = ty.element_type();
// if our element is an HFA/HVA, so are we; multiply members by our len
is_homogenous_aggregate_ty(elt).map(|(base_ty, members)| (base_ty, len * members))
}
fn check_struct(ty: Type) -> Option<(Type, u64)> {
let str_tys = ty.field_types();
if str_tys.len() == 0 {
return None
} }
let mut prev_base_ty = None; let valid_unit = match unit.kind {
let mut members = 0; RegKind::Integer => false,
for opt_homog_agg in str_tys.iter().map(|t| is_homogenous_aggregate_ty(*t)) { RegKind::Float => true,
match (prev_base_ty, opt_homog_agg) { RegKind::Vector => size.bits() == 128
// field isn't itself an HFA, so we aren't either };
(_, None) => return None,
// first field - store its type and number of members if valid_unit {
(None, Some((field_ty, field_members))) => { Some(Uniform {
prev_base_ty = Some(field_ty); unit,
members = field_members; total: size
}, })
// 2nd or later field - give up if it's a different type; otherwise incr. members
(Some(prev_ty), Some((field_ty, field_members))) => {
if prev_ty != field_ty {
return None;
}
members += field_members;
}
}
}
// Because of previous checks, we know prev_base_ty is Some(...) because
// 1. str_tys has at least one element; and
// 2. prev_base_ty was filled in (or we would've returned early)
let (base_ty, members) = (prev_base_ty.unwrap(), members);
// Ensure there is no padding.
if ty_size(ty) == ty_size(base_ty) * (members as usize) {
Some((base_ty, members))
} else {
None
}
}
let homog_agg = match ty.kind() {
Float => Some((ty, 1)),
Double => Some((ty, 1)),
Array => check_array(ty),
Struct => check_struct(ty),
_ => None
};
// Ensure we have at most eight uniquely addressable members
homog_agg.and_then(|(base_ty, members)| {
if members > 0 && members <= 8 {
Some((base_ty, members))
} else { } else {
None None
} }
}) })
} }
fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) { fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if is_reg_ty(ret.ty) { if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64); ret.extend_integer_width_to(64);
return; return;
} }
if let Some(uniform) = is_homogenous_aggregate(ccx, ret) {
ret.cast_to(ccx, uniform);
return;
}
let size = ret.layout.size(ccx);
let bits = size.bits();
if bits <= 128 {
let unit = if bits <= 8 {
Reg::i8()
} else if bits <= 16 {
Reg::i16()
} else if bits <= 32 {
Reg::i32()
} else {
Reg::i64()
};
ret.cast_to(ccx, Uniform {
unit,
total: size
});
return;
}
// don't return aggregates in registers // don't return aggregates in registers
ret.make_indirect(ccx); ret.make_indirect(ccx);
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
ret.cast = Some(Type::array(&base_ty, members));
return;
}
let size = ty_size(ret.ty);
if size <= 16 {
let llty = if size <= 1 {
Type::i8(ccx)
} else if size <= 2 {
Type::i16(ccx)
} else if size <= 4 {
Type::i32(ccx)
} else if size <= 8 {
Type::i64(ccx)
} else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
};
ret.cast = Some(llty);
return;
}
} }
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) { fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if is_reg_ty(arg.ty) { if !arg.layout.is_aggregate() {
arg.extend_integer_width_to(64); arg.extend_integer_width_to(64);
return; return;
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) { if let Some(uniform) = is_homogenous_aggregate(ccx, arg) {
arg.cast = Some(Type::array(&base_ty, members)); arg.cast_to(ccx, uniform);
return; return;
} }
arg.cast = Some(struct_ty(ccx, arg.ty)); let total = arg.layout.size(ccx);
arg.cast_to(ccx, Uniform {
unit: Reg::i64(),
total
});
} }
fn is_reg_ty(ty: Type) -> bool { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
match ty.kind() {
Integer
| Pointer
| Float
| Double => true,
_ => false
}
}
fn coerce_to_long(ccx: &CrateContext, size: usize) -> Vec<Type> {
let long_ty = Type::i64(ccx);
let mut args = Vec::new();
let mut n = size / 64;
while n > 0 {
args.push(long_ty);
n -= 1;
}
let r = size % 64;
if r > 0 {
args.push(Type::ix(ccx, r as u64));
}
args
}
fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
let size = ty_size(ty) * 8;
Type::struct_(ccx, &coerce_to_long(ccx, size), false)
}
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret); classify_ret_ty(ccx, &mut fty.ret);
} }

View file

@ -8,11 +8,8 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use llvm::*; use abi::{ArgAttribute, FnType, LayoutExt, Reg, RegKind};
use abi::{ArgAttribute, FnType}; use common::CrateContext;
use type_::Type;
use super::common::*;
use super::machine::*;
#[derive(PartialEq)] #[derive(PartialEq)]
pub enum Flavor { pub enum Flavor {
@ -20,9 +17,11 @@ pub enum Flavor {
Fastcall Fastcall
} }
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fty: &mut FnType<'tcx>,
flavor: Flavor) {
if !fty.ret.is_ignore() { if !fty.ret.is_ignore() {
if fty.ret.ty.kind() == Struct { if fty.ret.layout.is_aggregate() {
// Returning a structure. Most often, this will use // Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though, // a hidden first argument. On some platforms, though,
// small structs are returned as integers. // small structs are returned as integers.
@ -33,11 +32,12 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
let t = &ccx.sess().target.target; let t = &ccx.sess().target.target;
if t.options.is_like_osx || t.options.is_like_windows if t.options.is_like_osx || t.options.is_like_windows
|| t.options.is_like_openbsd { || t.options.is_like_openbsd {
match llsize_of_alloc(ccx, fty.ret.ty) { let size = fty.ret.layout.size(ccx);
1 => fty.ret.cast = Some(Type::i8(ccx)), match size.bytes() {
2 => fty.ret.cast = Some(Type::i16(ccx)), 1 => fty.ret.cast_to(ccx, Reg::i8()),
4 => fty.ret.cast = Some(Type::i32(ccx)), 2 => fty.ret.cast_to(ccx, Reg::i16()),
8 => fty.ret.cast = Some(Type::i64(ccx)), 4 => fty.ret.cast_to(ccx, Reg::i32()),
8 => fty.ret.cast_to(ccx, Reg::i64()),
_ => fty.ret.make_indirect(ccx) _ => fty.ret.make_indirect(ccx)
} }
} else { } else {
@ -50,7 +50,7 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
for arg in &mut fty.args { for arg in &mut fty.args {
if arg.is_ignore() { continue; } if arg.is_ignore() { continue; }
if arg.ty.kind() == Struct { if arg.layout.is_aggregate() {
arg.make_indirect(ccx); arg.make_indirect(ccx);
arg.attrs.set(ArgAttribute::ByVal); arg.attrs.set(ArgAttribute::ByVal);
} else { } else {
@ -73,12 +73,15 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
for arg in &mut fty.args { for arg in &mut fty.args {
if arg.is_ignore() || arg.is_indirect() { continue; } if arg.is_ignore() || arg.is_indirect() { continue; }
if arg.ty.kind() == Float { // At this point we know this must be a primitive of sorts.
let unit = arg.layout.homogenous_aggregate(ccx).unwrap();
let size = arg.layout.size(ccx);
assert_eq!(unit.size, size);
if unit.kind == RegKind::Float {
continue; continue;
} }
let size = llbitsize_of_real(ccx, arg.ty); let size_in_regs = (size.bits() + 31) / 32;
let size_in_regs = (size + 31) / 32;
if size_in_regs == 0 { if size_in_regs == 0 {
continue; continue;
@ -90,7 +93,7 @@ pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
free_regs -= size_in_regs; free_regs -= size_in_regs;
if size <= 32 && (arg.ty.kind() == Pointer || arg.ty.kind() == Integer) { if size.bits() <= 32 && unit.kind == RegKind::Integer {
arg.attrs.set(ArgAttribute::InReg); arg.attrs.set(ArgAttribute::InReg);
} }

View file

@ -11,388 +11,250 @@
// The classification code for the x86_64 ABI is taken from the clay language // The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp // https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
#![allow(non_upper_case_globals)] use abi::{ArgType, ArgAttribute, CastTarget, FnType, LayoutExt, Reg, RegKind};
use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Vector};
use abi::{self, ArgType, ArgAttribute, FnType};
use context::CrateContext; use context::CrateContext;
use type_::Type;
#[derive(Clone, Copy, PartialEq)] use rustc::ty::layout::{self, Layout, TyLayout, Size};
enum RegClass {
NoClass, #[derive(Clone, Copy, PartialEq, Debug)]
enum Class {
None,
Int, Int,
SSEFs, Sse,
SSEFv, SseUp
SSEDs,
SSEDv,
SSEInt(/* bitwidth */ u64),
/// Data that can appear in the upper half of an SSE register.
SSEUp,
X87,
X87Up,
ComplexX87,
Memory
} }
trait TypeMethods { #[derive(Clone, Copy, Debug)]
fn is_reg_ty(&self) -> bool; struct Memory;
}
impl TypeMethods for Type { // Currently supported vector size (AVX).
fn is_reg_ty(&self) -> bool { const LARGEST_VECTOR_SIZE: usize = 256;
match self.kind() { const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
Integer | Pointer | Float | Double => true,
_ => false
}
}
}
impl RegClass { fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
fn is_sse(&self) -> bool { -> Result<[Class; MAX_EIGHTBYTES], Memory> {
match *self { fn unify(cls: &mut [Class],
SSEFs | SSEFv | SSEDs | SSEDv | SSEInt(_) => true, off: u64,
_ => false c: Class) {
} let i = (off / 8) as usize;
} let to_write = match (cls[i], c) {
} (Class::None, _) => c,
(_, Class::None) => return,
trait ClassList { (Class::Int, _) |
fn is_pass_byval(&self) -> bool; (_, Class::Int) => Class::Int,
fn is_ret_bysret(&self) -> bool;
}
impl ClassList for [RegClass] { (Class::Sse, _) |
fn is_pass_byval(&self) -> bool { (_, Class::Sse) => Class::Sse,
if self.is_empty() { return false; }
let class = self[0]; (Class::SseUp, Class::SseUp) => Class::SseUp
class == Memory
|| class == X87
|| class == ComplexX87
}
fn is_ret_bysret(&self) -> bool {
if self.is_empty() { return false; }
self[0] == Memory
}
}
fn classify_ty(ty: Type) -> Vec<RegClass> {
fn align(off: usize, ty: Type) -> usize {
let a = ty_align(ty);
return (off + a - 1) / a * a;
}
fn ty_align(ty: Type) -> usize {
abi::ty_align(ty, 8)
}
fn ty_size(ty: Type) -> usize {
abi::ty_size(ty, 8)
}
fn all_mem(cls: &mut [RegClass]) {
for elt in cls {
*elt = Memory;
}
}
fn unify(cls: &mut [RegClass],
i: usize,
newv: RegClass) {
if cls[i] == newv { return }
let to_write = match (cls[i], newv) {
(NoClass, _) => newv,
(_, NoClass) => return,
(Memory, _) |
(_, Memory) => Memory,
(Int, _) |
(_, Int) => Int,
(X87, _) |
(X87Up, _) |
(ComplexX87, _) |
(_, X87) |
(_, X87Up) |
(_, ComplexX87) => Memory,
(SSEFv, SSEUp) |
(SSEFs, SSEUp) |
(SSEDv, SSEUp) |
(SSEDs, SSEUp) |
(SSEInt(_), SSEUp) => return,
(..) => newv
}; };
cls[i] = to_write; cls[i] = to_write;
} }
fn classify_struct(tys: &[Type], fn classify<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
cls: &mut [RegClass], layout: TyLayout<'tcx>,
i: usize, cls: &mut [Class],
off: usize, off: u64)
packed: bool) { -> Result<(), Memory> {
let mut field_off = off; if off % layout.align(ccx).abi() != 0 {
for ty in tys { if layout.size(ccx).bytes() > 0 {
if !packed { return Err(Memory);
field_off = align(field_off, *ty);
} }
classify(*ty, cls, i, field_off); return Ok(());
field_off += ty_size(*ty);
}
}
fn classify(ty: Type,
cls: &mut [RegClass], ix: usize,
off: usize) {
let t_align = ty_align(ty);
let t_size = ty_size(ty);
let misalign = off % t_align;
if misalign != 0 {
let mut i = off / 8;
let e = (off + t_size + 7) / 8;
while i < e {
unify(cls, ix + i, Memory);
i += 1;
}
return;
} }
match ty.kind() { match *layout {
Integer | Layout::Scalar { value, .. } |
Pointer => { Layout::RawNullablePointer { value, .. } => {
unify(cls, ix + off / 8, Int); let reg = match value {
} layout::Int(_) |
Float => { layout::Pointer => Class::Int,
if off % 8 == 4 { layout::F32 |
unify(cls, ix + off / 8, SSEFv); layout::F64 => Class::Sse
} else {
unify(cls, ix + off / 8, SSEFs);
}
}
Double => {
unify(cls, ix + off / 8, SSEDs);
}
Struct => {
classify_struct(&ty.field_types(), cls, ix, off, ty.is_packed());
}
Array => {
let len = ty.array_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut i = 0;
while i < len {
classify(elt, cls, ix, off + i * eltsz);
i += 1;
}
}
Vector => {
let len = ty.vector_length();
let elt = ty.element_type();
let eltsz = ty_size(elt);
let mut reg = match elt.kind() {
Integer => SSEInt(elt.int_width()),
Float => SSEFv,
Double => SSEDv,
_ => bug!("classify: unhandled vector element type")
}; };
unify(cls, off, reg);
}
let mut i = 0; Layout::CEnum { .. } => {
while i < len { unify(cls, off, Class::Int);
unify(cls, ix + (off + i * eltsz) / 8, reg); }
// everything after the first one is the upper Layout::Vector { element, count } => {
// half of a register. unify(cls, off, Class::Sse);
reg = SSEUp;
i += 1; // everything after the first one is the upper
// half of a register.
let eltsz = element.size(ccx).bytes();
for i in 1..count {
unify(cls, off + i * eltsz, Class::SseUp);
} }
} }
_ => bug!("classify: unhandled type")
}
}
fn fixup(ty: Type, cls: &mut [RegClass]) { Layout::Array { count, .. } => {
let mut i = 0; if count > 0 {
let ty_kind = ty.kind(); let elt = layout.field(ccx, 0);
let e = cls.len(); let eltsz = elt.size(ccx).bytes();
if cls.len() > 2 && (ty_kind == Struct || ty_kind == Array || ty_kind == Vector) { for i in 0..count {
if cls[i].is_sse() { classify(ccx, elt, cls, off + i * eltsz)?;
i += 1;
while i < e {
if cls[i] != SSEUp {
all_mem(cls);
return;
} }
i += 1;
}
} else {
all_mem(cls);
return
}
} else {
while i < e {
if cls[i] == Memory {
all_mem(cls);
return;
}
if cls[i] == X87Up {
// for darwin
// cls[i] = SSEDs;
all_mem(cls);
return;
}
if cls[i] == SSEUp {
cls[i] = SSEDv;
} else if cls[i].is_sse() {
i += 1;
while i != e && cls[i] == SSEUp { i += 1; }
} else if cls[i] == X87 {
i += 1;
while i != e && cls[i] == X87Up { i += 1; }
} else {
i += 1;
} }
} }
Layout::Univariant { ref variant, .. } => {
for i in 0..layout.field_count() {
let field_off = off + variant.offsets[i].bytes();
classify(ccx, layout.field(ccx, i), cls, field_off)?;
}
}
Layout::UntaggedUnion { .. } => {
for i in 0..layout.field_count() {
classify(ccx, layout.field(ccx, i), cls, off)?;
}
}
Layout::FatPointer { .. } |
Layout::General { .. } |
Layout::StructWrappedNullablePointer { .. } => return Err(Memory)
} }
Ok(())
} }
let words = (ty_size(ty) + 7) / 8; let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize;
let mut cls = vec![NoClass; words]; if n > MAX_EIGHTBYTES {
if words > 4 { return Err(Memory);
all_mem(&mut cls);
return cls;
}
classify(ty, &mut cls, 0, 0);
fixup(ty, &mut cls);
return cls;
}
fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
fn llvec_len(cls: &[RegClass]) -> usize {
let mut len = 1;
for c in cls {
if *c != SSEUp {
break;
}
len += 1;
}
return len;
} }
let mut tys = Vec::new(); let mut cls = [Class::None; MAX_EIGHTBYTES];
let mut i = 0; classify(ccx, arg.layout, &mut cls, 0)?;
let e = cls.len(); if n > 2 {
while i < e { if cls[0] != Class::Sse {
match cls[i] { return Err(Memory);
Int => { }
tys.push(Type::i64(ccx)); if cls[1..n].iter().any(|&c| c != Class::SseUp) {
} return Err(Memory);
SSEFv | SSEDv | SSEInt(_) => {
let (elts_per_word, elt_ty) = match cls[i] {
SSEFv => (2, Type::f32(ccx)),
SSEDv => (1, Type::f64(ccx)),
SSEInt(bits) => {
assert!(bits == 8 || bits == 16 || bits == 32 || bits == 64,
"llreg_ty: unsupported SSEInt width {}", bits);
(64 / bits, Type::ix(ccx, bits))
}
_ => bug!(),
};
let vec_len = llvec_len(&cls[i + 1..]);
let vec_ty = Type::vector(&elt_ty, vec_len as u64 * elts_per_word);
tys.push(vec_ty);
i += vec_len;
continue;
}
SSEFs => {
tys.push(Type::f32(ccx));
}
SSEDs => {
tys.push(Type::f64(ccx));
}
_ => bug!("llregtype: unhandled class")
} }
i += 1;
}
if tys.len() == 1 && tys[0].kind() == Vector {
// if the type contains only a vector, pass it as that vector.
tys[0]
} else { } else {
Type::struct_(ccx, &tys, false) let mut i = 0;
} while i < n {
} if cls[i] == Class::SseUp {
cls[i] = Class::Sse;
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { } else if cls[i] == Class::Sse {
fn x86_64_ty<F>(ccx: &CrateContext, i += 1;
arg: &mut ArgType, while i != n && cls[i] == Class::SseUp { i += 1; }
is_mem_cls: F,
ind_attr: Option<ArgAttribute>)
where F: FnOnce(&[RegClass]) -> bool
{
if !arg.ty.is_reg_ty() {
let cls = classify_ty(arg.ty);
if is_mem_cls(&cls) {
arg.make_indirect(ccx);
if let Some(attr) = ind_attr {
arg.attrs.set(attr);
}
} else { } else {
arg.cast = Some(llreg_ty(ccx, &cls)); i += 1;
} }
} else {
arg.extend_integer_width_to(32);
} }
} }
Ok(cls)
}
fn reg_component(cls: &[Class], i: &mut usize, size: u64) -> Option<Reg> {
if *i >= cls.len() {
return None;
}
match cls[*i] {
Class::None => None,
Class::Int => {
*i += 1;
Some(match size {
1 => Reg::i8(),
2 => Reg::i16(),
3 |
4 => Reg::i32(),
_ => Reg::i64()
})
}
Class::Sse => {
let vec_len = 1 + cls[*i+1..].iter().take_while(|&&c| c == Class::SseUp).count();
*i += vec_len;
Some(match size {
4 => Reg::f32(),
8 => Reg::f64(),
_ => {
Reg {
kind: RegKind::Vector,
size: Size::from_bytes(vec_len as u64 * 8)
}
}
})
}
c => bug!("reg_component: unhandled class {:?}", c)
}
}
fn cast_target(cls: &[Class], size: u64) -> CastTarget {
let mut i = 0;
let lo = reg_component(cls, &mut i, size).unwrap();
let offset = i as u64 * 8;
let target = if size <= offset {
CastTarget::from(lo)
} else {
let hi = reg_component(cls, &mut i, size - offset).unwrap();
CastTarget::Pair(lo, hi)
};
assert_eq!(reg_component(cls, &mut i, 0), None);
target
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
let mut sse_regs = 8; // XMM0-7 let mut sse_regs = 8; // XMM0-7
if !fty.ret.is_ignore() { let mut x86_64_ty = |arg: &mut ArgType<'tcx>, is_arg: bool| {
x86_64_ty(ccx, &mut fty.ret, |cls| { let cls = classify_arg(ccx, arg);
if cls.is_ret_bysret() {
// `sret` parameter thus one less register available let mut needed_int = 0;
int_regs -= 1; let mut needed_sse = 0;
true let in_mem = match cls {
} else { Err(Memory) => true,
false Ok(ref cls) if is_arg => {
for &c in cls {
match c {
Class::Int => needed_int += 1,
Class::Sse => needed_sse += 1,
_ => {}
}
}
arg.layout.is_aggregate() &&
(int_regs < needed_int || sse_regs < needed_sse)
} }
}, None); Ok(_) => false
};
if in_mem {
// `sret` / `byval` parameter thus one less integer register available
int_regs -= 1;
arg.make_indirect(ccx);
if is_arg {
arg.attrs.set(ArgAttribute::ByVal);
}
} else {
// split into sized chunks passed individually
int_regs -= needed_int;
sse_regs -= needed_sse;
if arg.layout.is_aggregate() {
let size = arg.layout.size(ccx).bytes();
arg.cast_to(ccx, cast_target(cls.as_ref().unwrap(), size))
} else {
arg.extend_integer_width_to(32);
}
}
};
if !fty.ret.is_ignore() {
x86_64_ty(&mut fty.ret, false);
} }
for arg in &mut fty.args { for arg in &mut fty.args {
if arg.is_ignore() { continue; } if arg.is_ignore() { continue; }
x86_64_ty(ccx, arg, |cls| { x86_64_ty(arg, true);
let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize;
let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize;
let in_mem = cls.is_pass_byval() ||
int_regs < needed_int ||
sse_regs < needed_sse;
if in_mem {
// `byval` parameter thus one less integer register available
int_regs -= 1;
} else {
// split into sized chunks passed individually
int_regs -= needed_int;
sse_regs -= needed_sse;
}
in_mem
}, Some(ArgAttribute::ByVal));
// An integer, pointer, double or float parameter
// thus the above closure passed to `x86_64_ty` won't
// get called.
match arg.ty.kind() {
Integer | Pointer => int_regs -= 1,
Double | Float => sse_regs -= 1,
_ => {}
}
} }
} }

View file

@ -8,30 +8,33 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use llvm::*; use abi::{ArgType, FnType, LayoutExt, Reg};
use super::common::*; use common::CrateContext;
use super::machine::*;
use abi::{ArgType, FnType}; use rustc::ty::layout::Layout;
use type_::Type;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) { pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let fixup = |a: &mut ArgType| { let fixup = |a: &mut ArgType<'tcx>| {
match a.ty.kind() { let size = a.layout.size(ccx);
Struct => match llsize_of_alloc(ccx, a.ty) { if a.layout.is_aggregate() {
1 => a.cast = Some(Type::i8(ccx)), match size.bits() {
2 => a.cast = Some(Type::i16(ccx)), 8 => a.cast_to(ccx, Reg::i8()),
4 => a.cast = Some(Type::i32(ccx)), 16 => a.cast_to(ccx, Reg::i16()),
8 => a.cast = Some(Type::i64(ccx)), 32 => a.cast_to(ccx, Reg::i32()),
_ => a.make_indirect(ccx) 64 => a.cast_to(ccx, Reg::i64()),
}, _ => a.make_indirect(ccx)
Integer => match llsize_of_alloc(ccx, a.ty) { };
1 ... 8 => a.extend_integer_width_to(32), } else {
16 => a.make_indirect(ccx), if let Layout::Vector { .. } = *a.layout {
_ => bug!(), // FIXME(eddyb) there should be a size cap here
}, // (probably what clang calls "illegal vectors").
_ => (), } else if size.bytes() > 8 {
a.make_indirect(ccx);
} else {
a.extend_integer_width_to(32);
}
} }
}; };

View file

@ -27,7 +27,7 @@ use monomorphize;
use type_::Type; use type_::Type;
use value::Value; use value::Value;
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::Layout; use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::ty::subst::{Subst, Substs}; use rustc::ty::subst::{Subst, Substs};
use rustc::hir; use rustc::hir;
@ -63,7 +63,7 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
Layout::UntaggedUnion { .. } | Layout::UntaggedUnion { .. } |
Layout::RawNullablePointer { .. } | Layout::RawNullablePointer { .. } |
Layout::StructWrappedNullablePointer { .. } => { Layout::StructWrappedNullablePointer { .. } => {
!layout.is_unsized() && layout.size(&ccx.tcx().data_layout).bytes() == 0 !layout.is_unsized() && layout.size(ccx).bytes() == 0
} }
} }
} }
@ -125,10 +125,8 @@ pub fn type_is_imm_pair<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>)
/// Identify types which have size zero at runtime. /// Identify types which have size zero at runtime.
pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
use machine::llsize_of_alloc; let layout = ccx.layout_of(ty);
use type_of::sizing_type_of; !layout.is_unsized() && layout.size(ccx).bytes() == 0
let llty = sizing_type_of(ccx, ty);
llsize_of_alloc(ccx, llty) == 0
} }
/* /*

View file

@ -255,7 +255,7 @@ pub fn trans_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ccx.statics_to_rauw().borrow_mut().push((g, new_g)); ccx.statics_to_rauw().borrow_mut().push((g, new_g));
new_g new_g
}; };
llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); llvm::LLVMSetAlignment(g, ccx.align_of(ty));
llvm::LLVMSetInitializer(g, v); llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior // As an optimization, all shared statics which do not have interior

View file

@ -28,6 +28,7 @@ use type_::Type;
use rustc_data_structures::base_n; use rustc_data_structures::base_n;
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty, TyCtxt}; use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{LayoutTyper, TyLayout};
use session::config::NoDebugInfo; use session::config::NoDebugInfo;
use session::Session; use session::Session;
use session::config; use session::config;
@ -828,18 +829,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
TypeOfDepthLock(self.local()) TypeOfDepthLock(self.local())
} }
pub fn layout_of(&self, ty: Ty<'tcx>) -> &'tcx ty::layout::Layout {
self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
ty.layout(&infcx).unwrap_or_else(|e| {
match e {
ty::layout::LayoutError::SizeOverflow(_) =>
self.sess().fatal(&e.to_string()),
_ => bug!("failed to get layout for `{}`: {}", ty, e)
}
})
})
}
pub fn check_overflow(&self) -> bool { pub fn check_overflow(&self) -> bool {
self.shared.check_overflow self.shared.check_overflow
} }
@ -951,6 +940,54 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
} }
} }
impl<'a, 'tcx> ty::layout::HasDataLayout for &'a SharedCrateContext<'a, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.tcx.data_layout
}
}
impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a SharedCrateContext<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.tcx
}
}
impl<'a, 'tcx> ty::layout::HasDataLayout for &'a CrateContext<'a, 'tcx> {
fn data_layout(&self) -> &ty::layout::TargetDataLayout {
&self.shared.tcx.data_layout
}
}
impl<'a, 'tcx> ty::layout::HasTyCtxt<'tcx> for &'a CrateContext<'a, 'tcx> {
fn tcx<'b>(&'b self) -> TyCtxt<'b, 'tcx, 'tcx> {
self.shared.tcx
}
}
impl<'a, 'tcx> LayoutTyper<'tcx> for &'a SharedCrateContext<'a, 'tcx> {
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
infcx.layout_of(ty).unwrap_or_else(|e| {
match e {
ty::layout::LayoutError::SizeOverflow(_) =>
self.sess().fatal(&e.to_string()),
_ => bug!("failed to get layout for `{}`: {}", ty, e)
}
})
})
}
}
impl<'a, 'tcx> LayoutTyper<'tcx> for &'a CrateContext<'a, 'tcx> {
type TyLayout = TyLayout<'tcx>;
fn layout_of(self, ty: Ty<'tcx>) -> Self::TyLayout {
self.shared.layout_of(ty)
}
}
pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>); pub struct TypeOfDepthLock<'a, 'tcx: 'a>(&'a LocalCrateContext<'tcx>);
impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> { impl<'a, 'tcx> Drop for TypeOfDepthLock<'a, 'tcx> {

View file

@ -35,7 +35,8 @@ use rustc_data_structures::ToHex;
use {type_of, machine, monomorphize}; use {type_of, machine, monomorphize};
use common::{self, CrateContext}; use common::{self, CrateContext};
use type_::Type; use type_::Type;
use rustc::ty::{self, AdtKind, Ty, layout}; use rustc::ty::{self, AdtKind, Ty};
use rustc::ty::layout::{self, LayoutTyper};
use session::config; use session::config;
use util::nodemap::FxHashMap; use util::nodemap::FxHashMap;
use util::common::path2cstr; use util::common::path2cstr;
@ -900,7 +901,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
let offsets = match *layout { let offsets = match *layout {
layout::Univariant { ref variant, .. } => &variant.offsets, layout::Univariant { ref variant, .. } => &variant.offsets,
layout::Vector { element, count } => { layout::Vector { element, count } => {
let element_size = element.size(&cx.tcx().data_layout).bytes(); let element_size = element.size(cx).bytes();
tmp = (0..count). tmp = (0..count).
map(|i| layout::Size::from_bytes(i*element_size)) map(|i| layout::Size::from_bytes(i*element_size))
.collect::<Vec<layout::Size>>(); .collect::<Vec<layout::Size>>();
@ -1564,7 +1565,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
enum_llvm_type, enum_llvm_type,
EnumMDF(EnumMemberDescriptionFactory { EnumMDF(EnumMemberDescriptionFactory {
enum_type: enum_type, enum_type: enum_type,
type_rep: type_rep, type_rep: type_rep.layout,
discriminant_type_metadata: discriminant_type_metadata, discriminant_type_metadata: discriminant_type_metadata,
containing_scope: containing_scope, containing_scope: containing_scope,
file_metadata: file_metadata, file_metadata: file_metadata,
@ -1772,7 +1773,7 @@ pub fn create_global_var_metadata(cx: &CrateContext,
let var_name = CString::new(var_name).unwrap(); let var_name = CString::new(var_name).unwrap();
let linkage_name = CString::new(linkage_name).unwrap(); let linkage_name = CString::new(linkage_name).unwrap();
let global_align = type_of::align_of(cx, variable_type); let global_align = cx.align_of(variable_type);
unsafe { unsafe {
llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx), llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx),

View file

@ -449,7 +449,7 @@ pub fn declare_local<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
LocalVariable | LocalVariable |
CapturedVariable => (0, DW_TAG_auto_variable) CapturedVariable => (0, DW_TAG_auto_variable)
}; };
let align = ::type_of::align_of(cx, variable_type); let align = cx.align_of(variable_type);
let name = CString::new(variable_name.as_str().as_bytes()).unwrap(); let name = CString::new(variable_name.as_str().as_bytes()).unwrap();
match (variable_access, &[][..]) { match (variable_access, &[][..]) {

View file

@ -18,11 +18,10 @@ use llvm;
use llvm::{ValueRef}; use llvm::{ValueRef};
use rustc::traits; use rustc::traits;
use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::LayoutTyper;
use common::*; use common::*;
use machine::*;
use meth; use meth;
use monomorphize; use monomorphize;
use type_of::{sizing_type_of, align_of};
use value::Value; use value::Value;
use builder::Builder; use builder::Builder;
@ -49,7 +48,7 @@ pub fn needs_drop_glue<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>, t: Ty<'tcx>
if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) { if !scx.type_needs_drop(typ) && scx.type_is_sized(typ) {
scx.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| { scx.tcx().infer_ctxt((), traits::Reveal::All).enter(|infcx| {
let layout = t.layout(&infcx).unwrap(); let layout = t.layout(&infcx).unwrap();
if layout.size(&scx.tcx().data_layout).bytes() == 0 { if layout.size(scx).bytes() == 0 {
// `Box<ZeroSizeType>` does not allocate. // `Box<ZeroSizeType>` does not allocate.
false false
} else { } else {
@ -69,9 +68,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
debug!("calculate size of DST: {}; with lost info: {:?}", debug!("calculate size of DST: {}; with lost info: {:?}",
t, Value(info)); t, Value(info));
if bcx.ccx.shared().type_is_sized(t) { if bcx.ccx.shared().type_is_sized(t) {
let sizing_type = sizing_type_of(bcx.ccx, t); let size = bcx.ccx.size_of(t);
let size = llsize_of_alloc(bcx.ccx, sizing_type); let align = bcx.ccx.align_of(t);
let align = align_of(bcx.ccx, t);
debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}", debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
t, Value(info), size, align); t, Value(info), size, align);
let size = C_uint(bcx.ccx, size); let size = C_uint(bcx.ccx, size);
@ -82,9 +80,8 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
ty::TyAdt(def, substs) => { ty::TyAdt(def, substs) => {
let ccx = bcx.ccx; let ccx = bcx.ccx;
// First get the size of all statically known fields. // First get the size of all statically known fields.
// Don't use type_of::sizing_type_of because that expects t to be sized, // Don't use size_of because it also rounds up to alignment, which we
// and it also rounds up to alignment, which we want to avoid, // want to avoid, as the unsized field's alignment could be smaller.
// as the unsized field's alignment could be smaller.
assert!(!t.is_simd()); assert!(!t.is_simd());
let layout = ccx.layout_of(t); let layout = ccx.layout_of(t);
debug!("DST {} layout: {:?}", t, layout); debug!("DST {} layout: {:?}", t, layout);
@ -154,14 +151,11 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
(meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info)) (meth::SIZE.get_usize(bcx, info), meth::ALIGN.get_usize(bcx, info))
} }
ty::TySlice(_) | ty::TyStr => { ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx()); let unit = t.sequence_element_type(bcx.tcx());
// The info in this case is the length of the str, so the size is that // The info in this case is the length of the str, so the size is that
// times the unit size. // times the unit size.
let llunit_ty = sizing_type_of(bcx.ccx, unit_ty); (bcx.mul(info, C_uint(bcx.ccx, bcx.ccx.size_of(unit))),
let unit_align = llalign_of_min(bcx.ccx, llunit_ty); C_uint(bcx.ccx, bcx.ccx.align_of(unit)))
let unit_size = llsize_of_alloc(bcx.ccx, llunit_ty);
(bcx.mul(info, C_uint(bcx.ccx, unit_size)),
C_uint(bcx.ccx, unit_align))
} }
_ => bug!("Unexpected unsized type, found {}", t) _ => bug!("Unexpected unsized type, found {}", t)
} }

View file

@ -151,7 +151,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
} }
"min_align_of" => { "min_align_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
C_uint(ccx, type_of::align_of(ccx, tp_ty)) C_uint(ccx, ccx.align_of(tp_ty))
} }
"min_align_of_val" => { "min_align_of_val" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
@ -160,7 +160,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]);
llalign llalign
} else { } else {
C_uint(ccx, type_of::align_of(ccx, tp_ty)) C_uint(ccx, ccx.align_of(tp_ty))
} }
} }
"pref_align_of" => { "pref_align_of" => {
@ -234,7 +234,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
} }
let load = bcx.volatile_load(ptr); let load = bcx.volatile_load(ptr);
unsafe { unsafe {
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); llvm::LLVMSetAlignment(load, ccx.align_of(tp_ty));
} }
to_immediate(bcx, load, tp_ty) to_immediate(bcx, load, tp_ty)
}, },
@ -252,7 +252,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to()); let ptr = bcx.pointercast(llargs[0], val_ty(val).ptr_to());
let store = bcx.volatile_store(val, ptr); let store = bcx.volatile_store(val, ptr);
unsafe { unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); llvm::LLVMSetAlignment(store, ccx.align_of(tp_ty));
} }
} }
C_nil(ccx) C_nil(ccx)
@ -634,7 +634,7 @@ pub fn trans_intrinsic_call<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { if val_ty(llval) != Type::void(ccx) && machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
if let Some(ty) = fn_ty.ret.cast { if let Some(ty) = fn_ty.ret.cast {
let ptr = bcx.pointercast(llresult, ty.ptr_to()); let ptr = bcx.pointercast(llresult, ty.ptr_to());
bcx.store(llval, ptr, Some(type_of::align_of(ccx, ret_ty))); bcx.store(llval, ptr, Some(ccx.align_of(ret_ty)));
} else { } else {
store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty); store_ty(bcx, llval, llresult, Alignment::AbiAligned, ret_ty);
} }
@ -651,7 +651,7 @@ fn copy_intrinsic<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
-> ValueRef { -> ValueRef {
let ccx = bcx.ccx; let ccx = bcx.ccx;
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32); let align = C_i32(ccx, ccx.align_of(tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty); let size = machine::llsize_of(ccx, lltp_ty);
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type()); let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
@ -685,7 +685,7 @@ fn memset_intrinsic<'a, 'tcx>(
count: ValueRef count: ValueRef
) -> ValueRef { ) -> ValueRef {
let ccx = bcx.ccx; let ccx = bcx.ccx;
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32); let align = C_i32(ccx, ccx.align_of(ty) as i32);
let lltp_ty = type_of::type_of(ccx, ty); let lltp_ty = type_of::type_of(ccx, ty);
let size = machine::llsize_of(ccx, lltp_ty); let size = machine::llsize_of(ccx, lltp_ty);
let dst = bcx.pointercast(dst, Type::i8p(ccx)); let dst = bcx.pointercast(dst, Type::i8p(ccx));

View file

@ -17,7 +17,6 @@ use consts;
use machine; use machine;
use monomorphize; use monomorphize;
use type_::Type; use type_::Type;
use type_of::*;
use value::Value; use value::Value;
use rustc::ty; use rustc::ty;
@ -80,14 +79,10 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// Not in the cache. Build it. // Not in the cache. Build it.
let nullptr = C_null(Type::nil(ccx).ptr_to()); let nullptr = C_null(Type::nil(ccx).ptr_to());
let size_ty = sizing_type_of(ccx, ty);
let size = machine::llsize_of_alloc(ccx, size_ty);
let align = align_of(ccx, ty);
let mut components: Vec<_> = [ let mut components: Vec<_> = [
callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)), callee::get_fn(ccx, monomorphize::resolve_drop_in_place(ccx.shared(), ty)),
C_uint(ccx, size), C_uint(ccx, ccx.size_of(ty)),
C_uint(ccx, align) C_uint(ccx, ccx.align_of(ty))
].iter().cloned().collect(); ].iter().cloned().collect();
if let Some(trait_ref) = trait_ref { if let Some(trait_ref) = trait_ref {

View file

@ -12,7 +12,8 @@ use llvm::{self, ValueRef, BasicBlockRef};
use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err}; use rustc_const_eval::{ErrKind, ConstEvalErr, note_const_eval_err};
use rustc::middle::lang_items; use rustc::middle::lang_items;
use rustc::middle::const_val::ConstInt; use rustc::middle::const_val::ConstInt;
use rustc::ty::{self, layout, TypeFoldable}; use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir; use rustc::mir;
use abi::{Abi, FnType, ArgType}; use abi::{Abi, FnType, ArgType};
use base::{self, Lifetime}; use base::{self, Lifetime};
@ -24,8 +25,8 @@ use consts;
use machine::llalign_of_min; use machine::llalign_of_min;
use meth; use meth;
use monomorphize; use monomorphize;
use type_of;
use tvec; use tvec;
use type_of::{self, align_of};
use type_::Type; use type_::Type;
use rustc_data_structures::indexed_vec::IndexVec; use rustc_data_structures::indexed_vec::IndexVec;
@ -177,7 +178,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}; };
let llslot = match op.val { let llslot = match op.val {
Immediate(_) | Pair(..) => { Immediate(_) | Pair(..) => {
let llscratch = bcx.alloca(ret.original_ty, "ret"); let llscratch = bcx.alloca(ret.memory_ty(bcx.ccx), "ret");
self.store_operand(&bcx, llscratch, None, op); self.store_operand(&bcx, llscratch, None, op);
llscratch llscratch
} }
@ -189,7 +190,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
}; };
let load = bcx.load( let load = bcx.load(
bcx.pointercast(llslot, cast_ty.ptr_to()), bcx.pointercast(llslot, cast_ty.ptr_to()),
Some(llalign_of_min(bcx.ccx, ret.ty))); Some(ret.layout.align(bcx.ccx).abi() as u32));
load load
} else { } else {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER)); let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
@ -515,7 +516,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
(llargs[0], &llargs[1..]) (llargs[0], &llargs[1..])
} }
ReturnDest::Nothing => { ReturnDest::Nothing => {
(C_undef(fn_ty.ret.original_ty.ptr_to()), &llargs[..]) (C_undef(fn_ty.ret.memory_ty(bcx.ccx).ptr_to()), &llargs[..])
} }
ReturnDest::IndirectOperand(dst, _) | ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => (dst, &llargs[..]), ReturnDest::Store(dst) => (dst, &llargs[..]),
@ -534,7 +535,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: Ref(dst, Alignment::AbiAligned), val: Ref(dst, Alignment::AbiAligned),
ty: sig.output(), ty: sig.output(),
}; };
self.store_return(&bcx, ret_dest, fn_ty.ret, op); self.store_return(&bcx, ret_dest, &fn_ty.ret, op);
} }
if let Some((_, target)) = *destination { if let Some((_, target)) = *destination {
@ -573,7 +574,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: Immediate(invokeret), val: Immediate(invokeret),
ty: sig.output(), ty: sig.output(),
}; };
self.store_return(&ret_bcx, ret_dest, fn_ty.ret, op); self.store_return(&ret_bcx, ret_dest, &fn_ty.ret, op);
} }
} else { } else {
let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle); let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle);
@ -583,7 +584,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
val: Immediate(llret), val: Immediate(llret),
ty: sig.output(), ty: sig.output(),
}; };
self.store_return(&bcx, ret_dest, fn_ty.ret, op); self.store_return(&bcx, ret_dest, &fn_ty.ret, op);
funclet_br(self, bcx, target); funclet_br(self, bcx, target);
} else { } else {
bcx.unreachable(); bcx.unreachable();
@ -597,7 +598,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx: &Builder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
op: OperandRef<'tcx>, op: OperandRef<'tcx>,
llargs: &mut Vec<ValueRef>, llargs: &mut Vec<ValueRef>,
fn_ty: &FnType, fn_ty: &FnType<'tcx>,
next_idx: &mut usize, next_idx: &mut usize,
llfn: &mut Option<ValueRef>, llfn: &mut Option<ValueRef>,
def: &Option<ty::InstanceDef<'tcx>>) { def: &Option<ty::InstanceDef<'tcx>>) {
@ -640,7 +641,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let (mut llval, align, by_ref) = match op.val { let (mut llval, align, by_ref) = match op.val {
Immediate(_) | Pair(..) => { Immediate(_) | Pair(..) => {
if arg.is_indirect() || arg.cast.is_some() { if arg.is_indirect() || arg.cast.is_some() {
let llscratch = bcx.alloca(arg.original_ty, "arg"); let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
self.store_operand(bcx, llscratch, None, op); self.store_operand(bcx, llscratch, None, op);
(llscratch, Alignment::AbiAligned, true) (llscratch, Alignment::AbiAligned, true)
} else { } else {
@ -652,7 +653,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around. // have scary latent bugs around.
let llscratch = bcx.alloca(arg.original_ty, "arg"); let llscratch = bcx.alloca(arg.memory_ty(bcx.ccx), "arg");
base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1)); base::memcpy_ty(bcx, llscratch, llval, op.ty, Some(1));
(llscratch, Alignment::AbiAligned, true) (llscratch, Alignment::AbiAligned, true)
} }
@ -661,13 +662,13 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if by_ref && !arg.is_indirect() { if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it. // Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx) { if arg.layout.ty == bcx.tcx().types.bool {
// We store bools as i8 so we need to truncate to i1. // We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None); llval = bcx.load_range_assert(llval, 0, 2, llvm::False, None);
llval = bcx.trunc(llval, arg.original_ty); llval = bcx.trunc(llval, Type::i1(bcx.ccx));
} else if let Some(ty) = arg.cast { } else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()), llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()),
align.min_with(llalign_of_min(bcx.ccx, arg.ty))); align.min_with(arg.layout.align(bcx.ccx).abi() as u32));
} else { } else {
llval = bcx.load(llval, align.to_align()); llval = bcx.load(llval, align.to_align());
} }
@ -680,7 +681,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
bcx: &Builder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
operand: &mir::Operand<'tcx>, operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>, llargs: &mut Vec<ValueRef>,
fn_ty: &FnType, fn_ty: &FnType<'tcx>,
next_idx: &mut usize, next_idx: &mut usize,
llfn: &mut Option<ValueRef>, llfn: &mut Option<ValueRef>,
def: &Option<ty::InstanceDef<'tcx>>) { def: &Option<ty::InstanceDef<'tcx>>) {
@ -910,7 +911,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to()); let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
let in_type = val.ty; let in_type = val.ty;
let out_type = dst.ty.to_ty(bcx.tcx());; let out_type = dst.ty.to_ty(bcx.tcx());;
let llalign = cmp::min(align_of(bcx.ccx, in_type), align_of(bcx.ccx, out_type)); let llalign = cmp::min(bcx.ccx.align_of(in_type), bcx.ccx.align_of(out_type));
self.store_operand(bcx, cast_ptr, Some(llalign), val); self.store_operand(bcx, cast_ptr, Some(llalign), val);
} }
@ -919,7 +920,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
fn store_return(&mut self, fn store_return(&mut self,
bcx: &Builder<'a, 'tcx>, bcx: &Builder<'a, 'tcx>,
dest: ReturnDest, dest: ReturnDest,
ret_ty: ArgType, ret_ty: &ArgType<'tcx>,
op: OperandRef<'tcx>) { op: OperandRef<'tcx>) {
use self::ReturnDest::*; use self::ReturnDest::*;

View file

@ -18,7 +18,8 @@ use rustc::hir::def_id::DefId;
use rustc::infer::TransNormalize; use rustc::infer::TransNormalize;
use rustc::mir; use rustc::mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc::ty::{self, layout, Ty, TyCtxt, TypeFoldable}; use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::subst::{Kind, Substs, Subst}; use rustc::ty::subst::{Kind, Substs, Subst};
use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::indexed_vec::{Idx, IndexVec};
@ -148,7 +149,7 @@ impl<'tcx> Const<'tcx> {
} else { } else {
// Otherwise, or if the value is not immediate, we create // Otherwise, or if the value is not immediate, we create
// a constant LLVM global and cast its address if necessary. // a constant LLVM global and cast its address if necessary.
let align = type_of::align_of(ccx, self.ty); let align = ccx.align_of(self.ty);
let ptr = consts::addr_of(ccx, self.llval, align, "const"); let ptr = consts::addr_of(ccx, self.llval, align, "const");
OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned) OperandValue::Ref(consts::ptrcast(ptr, llty.ptr_to()), Alignment::AbiAligned)
}; };
@ -717,7 +718,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
Base::Value(llval) => { Base::Value(llval) => {
// FIXME: may be wrong for &*(&simd_vec as &fmt::Debug) // FIXME: may be wrong for &*(&simd_vec as &fmt::Debug)
let align = if self.ccx.shared().type_is_sized(ty) { let align = if self.ccx.shared().type_is_sized(ty) {
type_of::align_of(self.ccx, ty) self.ccx.align_of(ty)
} else { } else {
self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign self.ccx.tcx().data_layout.pointer_align.abi() as machine::llalign
}; };
@ -979,7 +980,6 @@ fn trans_const<'a, 'tcx>(
vals: &[ValueRef] vals: &[ValueRef]
) -> ValueRef { ) -> ValueRef {
let l = ccx.layout_of(t); let l = ccx.layout_of(t);
let dl = &ccx.tcx().data_layout;
let variant_index = match *kind { let variant_index = match *kind {
mir::AggregateKind::Adt(_, index, _, _) => index, mir::AggregateKind::Adt(_, index, _, _) => index,
_ => 0, _ => 0,
@ -1002,7 +1002,7 @@ fn trans_const<'a, 'tcx>(
let mut vals_with_discr = vec![lldiscr]; let mut vals_with_discr = vec![lldiscr];
vals_with_discr.extend_from_slice(vals); vals_with_discr.extend_from_slice(vals);
let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]); let mut contents = build_const_struct(ccx, &variant, &vals_with_discr[..]);
let needed_padding = l.size(dl).bytes() - variant.stride().bytes(); let needed_padding = l.size(ccx).bytes() - variant.stride().bytes();
if needed_padding > 0 { if needed_padding > 0 {
contents.push(padding(ccx, needed_padding)); contents.push(padding(ccx, needed_padding));
} }
@ -1022,25 +1022,20 @@ fn trans_const<'a, 'tcx>(
C_vector(vals) C_vector(vals)
} }
layout::RawNullablePointer { nndiscr, .. } => { layout::RawNullablePointer { nndiscr, .. } => {
let nnty = adt::compute_fields(ccx, t, nndiscr as usize, false)[0];
if variant_index as u64 == nndiscr { if variant_index as u64 == nndiscr {
assert_eq!(vals.len(), 1); assert_eq!(vals.len(), 1);
vals[0] vals[0]
} else { } else {
C_null(type_of::sizing_type_of(ccx, nnty)) C_null(type_of::type_of(ccx, t))
} }
} }
layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
if variant_index as u64 == nndiscr { if variant_index as u64 == nndiscr {
C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false) C_struct(ccx, &build_const_struct(ccx, &nonnull, vals), false)
} else { } else {
let fields = adt::compute_fields(ccx, t, nndiscr as usize, false); // Always use null even if it's not the `discrfield`th
let vals = fields.iter().map(|&ty| { // field; see #8506.
// Always use null even if it's not the `discrfield`th C_null(type_of::type_of(ccx, t))
// field; see #8506.
C_null(type_of::sizing_type_of(ccx, ty))
}).collect::<Vec<ValueRef>>();
C_struct(ccx, &build_const_struct(ccx, &nonnull, &vals[..]), false)
} }
} }
_ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l) _ => bug!("trans_const: cannot handle type {} repreented as {:#?}", t, l)

View file

@ -9,7 +9,8 @@
// except according to those terms. // except according to those terms.
use llvm::ValueRef; use llvm::ValueRef;
use rustc::ty::{self, layout, Ty, TypeFoldable}; use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir; use rustc::mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;

View file

@ -11,7 +11,8 @@
use libc::c_uint; use libc::c_uint;
use llvm::{self, ValueRef, BasicBlockRef}; use llvm::{self, ValueRef, BasicBlockRef};
use llvm::debuginfo::DIScope; use llvm::debuginfo::DIScope;
use rustc::ty::{self, layout}; use rustc::ty;
use rustc::ty::layout::{self, LayoutTyper};
use rustc::mir::{self, Mir}; use rustc::mir::{self, Mir};
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc::ty::subst::Substs; use rustc::ty::subst::Substs;
@ -52,7 +53,7 @@ pub struct MirContext<'a, 'tcx:'a> {
ccx: &'a CrateContext<'a, 'tcx>, ccx: &'a CrateContext<'a, 'tcx>,
fn_ty: FnType, fn_ty: FnType<'tcx>,
/// When unwinding is initiated, we have to store this personality /// When unwinding is initiated, we have to store this personality
/// value somewhere so that we can load it and re-use it in the /// value somewhere so that we can load it and re-use it in the
@ -454,6 +455,23 @@ fn arg_local_refs<'a, 'tcx>(bcx: &Builder<'a, 'tcx>,
assert_eq!((meta.cast, meta.pad), (None, None)); assert_eq!((meta.cast, meta.pad), (None, None));
let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint); let llmeta = llvm::get_param(bcx.llfn(), llarg_idx as c_uint);
llarg_idx += 1; llarg_idx += 1;
// FIXME(eddyb) As we can't perfectly represent the data and/or
// vtable pointer in a fat pointers in Rust's typesystem, and
// because we split fat pointers into two ArgType's, they're
// not the right type so we have to cast them for now.
let pointee = match arg_ty.sty {
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => ty,
ty::TyAdt(def, _) if def.is_box() => arg_ty.boxed_ty(),
_ => bug!()
};
let data_llty = type_of::in_memory_type_of(bcx.ccx, pointee);
let meta_llty = type_of::unsized_info_ty(bcx.ccx, pointee);
let llarg = bcx.pointercast(llarg, data_llty.ptr_to());
let llmeta = bcx.pointercast(llmeta, meta_llty);
OperandValue::Pair(llarg, llmeta) OperandValue::Pair(llarg, llmeta)
} else { } else {
OperandValue::Immediate(llarg) OperandValue::Immediate(llarg)

View file

@ -10,7 +10,7 @@
use llvm::ValueRef; use llvm::ValueRef;
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::layout::Layout; use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::mir; use rustc::mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc_data_structures::indexed_vec::Idx; use rustc_data_structures::indexed_vec::Idx;

View file

@ -11,7 +11,7 @@
use llvm::{self, ValueRef}; use llvm::{self, ValueRef};
use rustc::ty::{self, Ty}; use rustc::ty::{self, Ty};
use rustc::ty::cast::{CastTy, IntTy}; use rustc::ty::cast::{CastTy, IntTy};
use rustc::ty::layout::Layout; use rustc::ty::layout::{Layout, LayoutTyper};
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use rustc::mir; use rustc::mir;
use middle::lang_items::ExchangeMallocFnLangItem; use middle::lang_items::ExchangeMallocFnLangItem;
@ -438,7 +438,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let llty = type_of::type_of(bcx.ccx, content_ty); let llty = type_of::type_of(bcx.ccx, content_ty);
let llsize = machine::llsize_of(bcx.ccx, llty); let llsize = machine::llsize_of(bcx.ccx, llty);
let align = type_of::align_of(bcx.ccx, content_ty); let align = bcx.ccx.align_of(content_ty);
let llalign = C_uint(bcx.ccx, align); let llalign = C_uint(bcx.ccx, align);
let llty_ptr = llty.ptr_to(); let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty); let box_ty = bcx.tcx().mk_box(content_ty);

View file

@ -13,127 +13,12 @@ use adt;
use common::*; use common::*;
use machine; use machine;
use rustc::ty::{self, Ty, TypeFoldable}; use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::LayoutTyper;
use trans_item::DefPathBasedNames; use trans_item::DefPathBasedNames;
use type_::Type; use type_::Type;
use syntax::ast; use syntax::ast;
// A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because:
//
// (1) It may be cheaper to compute the sizing type than the full type if all
// you're interested in is the size and/or alignment;
//
// (2) It won't make any recursive calls to determine the structure of the
// type behind pointers. This can help prevent infinite loops for
// recursive types. For example, enum types rely on this behavior.
pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
if let Some(t) = cx.llsizingtypes().borrow().get(&t).cloned() {
return t;
}
debug!("sizing_type_of {:?}", t);
let _recursion_lock = cx.enter_type_of(t);
let ptr_sizing_ty = |ty: Ty<'tcx>| {
if cx.shared().type_is_sized(ty) {
Type::i8p(cx)
} else {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
}
};
let llsizingty = match t.sty {
_ if !cx.shared().type_is_sized(t) => {
Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
}
ty::TyBool => Type::bool(cx),
ty::TyChar => Type::char(cx),
ty::TyInt(t) => Type::int_from_ty(cx, t),
ty::TyUint(t) => Type::uint_from_ty(cx, t),
ty::TyFloat(t) => Type::float_from_ty(cx, t),
ty::TyNever => Type::nil(cx),
ty::TyRef(_, ty::TypeAndMut{ty, ..}) |
ty::TyRawPtr(ty::TypeAndMut{ty, ..}) => {
ptr_sizing_ty(ty)
}
ty::TyAdt(def, _) if def.is_box() => {
ptr_sizing_ty(t.boxed_ty())
}
ty::TyFnDef(..) => Type::nil(cx),
ty::TyFnPtr(_) => Type::i8p(cx),
ty::TyArray(ty, size) => {
let llty = sizing_type_of(cx, ty);
let size = size as u64;
Type::array(&llty, size)
}
ty::TyTuple(ref tys, _) if tys.is_empty() => {
Type::nil(cx)
}
ty::TyAdt(..) if t.is_simd() => {
let e = t.simd_type(cx.tcx());
if !e.is_machine() {
cx.sess().fatal(&format!("monomorphising SIMD type `{}` with \
a non-machine element type `{}`",
t, e))
}
let llet = type_of(cx, e);
let n = t.simd_size(cx.tcx()) as u64;
Type::vector(&llet, n)
}
ty::TyTuple(..) | ty::TyAdt(..) | ty::TyClosure(..) => {
adt::sizing_type_of(cx, t, false)
}
ty::TyProjection(..) | ty::TyInfer(..) | ty::TyParam(..) |
ty::TyAnon(..) | ty::TyError => {
bug!("fictitious type {:?} in sizing_type_of()", t)
}
ty::TySlice(_) | ty::TyDynamic(..) | ty::TyStr => bug!()
};
debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty);
cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
// FIXME(eddyb) Temporary sanity check for ty::layout.
let layout = cx.layout_of(t);
if !cx.shared().type_is_sized(t) {
if !layout.is_unsized() {
bug!("layout should be unsized for type `{}` / {:#?}",
t, layout);
}
// Unsized types get turned into a fat pointer for LLVM.
return llsizingty;
}
let r = layout.size(&cx.tcx().data_layout).bytes();
let l = machine::llsize_of_alloc(cx, llsizingty);
if r != l {
bug!("size differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
r, l, t, layout);
}
let r = layout.align(&cx.tcx().data_layout).abi();
let l = machine::llalign_of_min(cx, llsizingty) as u64;
if r != l {
bug!("align differs (rustc: {}, llvm: {}) for type `{}` / {:#?}",
r, l, t, layout);
}
llsizingty
}
pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
match ty.sty { match ty.sty {
ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) | ty::TyRef(_, ty::TypeAndMut { ty: t, .. }) |
@ -147,7 +32,7 @@ pub fn fat_ptr_base_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) ->
} }
} }
fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type { pub fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
let unsized_part = ccx.tcx().struct_tail(ty); let unsized_part = ccx.tcx().struct_tail(ty);
match unsized_part.sty { match unsized_part.sty {
ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => { ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
@ -196,7 +81,6 @@ pub fn type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
/// of that field's type - this is useful for taking the address of /// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment. /// that field and ensuring the struct has the right alignment.
/// For the LLVM type of a value as a whole, see `type_of`. /// For the LLVM type of a value as a whole, see `type_of`.
/// NB: If you update this, be sure to update `sizing_type_of()` as well.
pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
// Check the cache. // Check the cache.
if let Some(&llty) = cx.lltypes().borrow().get(&t) { if let Some(&llty) = cx.lltypes().borrow().get(&t) {
@ -322,10 +206,14 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
llty llty
} }
pub fn align_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) impl<'a, 'tcx> CrateContext<'a, 'tcx> {
-> machine::llalign { pub fn align_of(&self, ty: Ty<'tcx>) -> machine::llalign {
let layout = cx.layout_of(t); self.layout_of(ty).align(self).abi() as machine::llalign
layout.align(&cx.tcx().data_layout).abi() as machine::llalign }
pub fn size_of(&self, ty: Ty<'tcx>) -> machine::llsize {
self.layout_of(ty).size(self).bytes() as machine::llsize
}
} }
fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String { fn llvm_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> String {

View file

@ -121,13 +121,13 @@ pub fn unsafe_slice(_: &[UnsafeInner]) {
fn str(_: &[u8]) { fn str(_: &[u8]) {
} }
// CHECK: @trait_borrow(i8* nonnull, void (i8*)** noalias nonnull readonly) // CHECK: @trait_borrow({}* nonnull, {}* noalias nonnull readonly)
// FIXME #25759 This should also have `nocapture` // FIXME #25759 This should also have `nocapture`
#[no_mangle] #[no_mangle]
fn trait_borrow(_: &Drop) { fn trait_borrow(_: &Drop) {
} }
// CHECK: @trait_box(i8* noalias nonnull, void (i8*)** noalias nonnull readonly) // CHECK: @trait_box({}* noalias nonnull, {}* noalias nonnull readonly)
#[no_mangle] #[no_mangle]
fn trait_box(_: Box<Drop>) { fn trait_box(_: Box<Drop>) {
} }