1
Fork 0

rustc: move size, align & primitive_align from Abi::Aggregate to layout.

This commit is contained in:
Eduard-Mihai Burtescu 2017-09-22 22:44:40 +03:00
parent b723af284a
commit b28f668e26
30 changed files with 299 additions and 383 deletions

View file

@ -747,10 +747,7 @@ pub enum Abi {
Aggregate {
/// If true, the size is exact, otherwise it's only a lower bound.
sized: bool,
packed: bool,
align: Align,
primitive_align: Align,
size: Size
packed: bool
}
}
@ -770,68 +767,6 @@ impl Abi {
Abi::Aggregate { packed, .. } => packed
}
}
/// Returns true if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
match *self {
Abi::Scalar(_) => false,
Abi::Vector { count, .. } => count == 0,
Abi::Aggregate { sized, size, .. } => sized && size.bytes() == 0
}
}
pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
let dl = cx.data_layout();
match *self {
Abi::Scalar(value) => value.size(dl),
Abi::Vector { element, count } => {
let element_size = element.size(dl);
let vec_size = match element_size.checked_mul(count, dl) {
Some(size) => size,
None => bug!("Layout::size({:?}): {} * {} overflowed",
self, element_size.bytes(), count)
};
vec_size.abi_align(self.align(dl))
}
Abi::Aggregate { size, .. } => size
}
}
pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
let dl = cx.data_layout();
match *self {
Abi::Scalar(value) => value.align(dl),
Abi::Vector { element, count } => {
let elem_size = element.size(dl);
let vec_size = match elem_size.checked_mul(count, dl) {
Some(size) => size,
None => bug!("Layout::align({:?}): {} * {} overflowed",
self, elem_size.bytes(), count)
};
dl.vector_align(vec_size)
}
Abi::Aggregate { align, .. } => align
}
}
pub fn size_and_align<C: HasDataLayout>(&self, cx: C) -> (Size, Align) {
(self.size(cx), self.align(cx))
}
/// Returns alignment before repr alignment is applied
pub fn primitive_align<C: HasDataLayout>(&self, cx: C) -> Align {
match *self {
Abi::Aggregate { primitive_align, .. } => primitive_align,
_ => self.align(cx.data_layout())
}
}
}
/// Type layout, from which size and alignment can be cheaply computed.
@ -911,6 +846,9 @@ pub struct CachedLayout {
pub layout: Layout,
pub fields: FieldPlacement,
pub abi: Abi,
pub align: Align,
pub primitive_align: Align,
pub size: Size
}
fn layout_raw<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
@ -947,12 +885,16 @@ impl<'a, 'tcx> Layout {
-> Result<&'tcx CachedLayout, LayoutError<'tcx>> {
let cx = (tcx, param_env);
let dl = cx.data_layout();
let scalar = |value| {
let scalar = |value: Primitive| {
let align = value.align(dl);
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Scalar,
fields: FieldPlacement::Union(0),
abi: Abi::Scalar(value)
abi: Abi::Scalar(value),
size: value.size(dl),
align,
primitive_align: align
})
};
#[derive(Copy, Clone, Debug)]
@ -1005,11 +947,11 @@ impl<'a, 'tcx> Layout {
if end > 0 {
let optimizing = &mut inverse_memory_index[..end];
if sort_ascending {
optimizing.sort_by_key(|&x| fields[x as usize].align(dl).abi());
optimizing.sort_by_key(|&x| fields[x as usize].align.abi());
} else {
optimizing.sort_by(| &a, &b | {
let a = fields[a as usize].align(dl).abi();
let b = fields[b as usize].align(dl).abi();
let a = fields[a as usize].align.abi();
let b = fields[b as usize].align.abi();
b.cmp(&a)
});
}
@ -1046,16 +988,15 @@ impl<'a, 'tcx> Layout {
// Invariant: offset < dl.obj_size_bound() <= 1<<61
if !packed {
let field_align = field.align(dl);
align = align.max(field_align);
primitive_align = primitive_align.max(field.primitive_align(dl));
offset = offset.abi_align(field_align);
offset = offset.abi_align(field.align);
align = align.max(field.align);
primitive_align = primitive_align.max(field.primitive_align);
}
debug!("univariant offset: {:?} field: {:?} {:?}", offset, field, field.size(dl));
debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[*i as usize] = offset;
offset = offset.checked_add(field.size(dl), dl)
offset = offset.checked_add(field.size, dl)
.ok_or(LayoutError::SizeOverflow(ty))?;
}
@ -1095,11 +1036,11 @@ impl<'a, 'tcx> Layout {
},
abi: Abi::Aggregate {
sized,
packed,
align,
primitive_align,
size: min_size.abi_align(align)
}
packed
},
align,
primitive_align,
size: min_size.abi_align(align)
})
};
let univariant = |fields: &[TyLayout], repr: &ReprOptions, kind| {
@ -1137,11 +1078,11 @@ impl<'a, 'tcx> Layout {
fields,
abi: Abi::Aggregate {
sized: true,
packed: false,
align,
primitive_align: align,
size: (meta_offset + metadata.size(dl)).abi_align(align)
}
packed: false
},
align,
primitive_align: align,
size: (meta_offset + metadata.size(dl)).abi_align(align)
}))
};
@ -1183,25 +1124,24 @@ impl<'a, 'tcx> Layout {
}
let element = cx.layout_of(element)?;
let element_size = element.size(dl);
let count = count.val.to_const_int().unwrap().to_u64().unwrap();
let size = element_size.checked_mul(count, dl)
let size = element.size.checked_mul(count, dl)
.ok_or(LayoutError::SizeOverflow(ty))?;
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Array,
fields: FieldPlacement::Array {
stride: element_size,
stride: element.size,
count
},
abi: Abi::Aggregate {
sized: true,
packed: false,
align: element.align(dl),
primitive_align: element.primitive_align(dl),
size
}
packed: false
},
align: element.align,
primitive_align: element.primitive_align,
size
})
}
ty::TySlice(element) => {
@ -1210,16 +1150,16 @@ impl<'a, 'tcx> Layout {
variant_index: None,
layout: Layout::Array,
fields: FieldPlacement::Array {
stride: element.size(dl),
stride: element.size,
count: 0
},
abi: Abi::Aggregate {
sized: false,
packed: false,
align: element.align(dl),
primitive_align: element.primitive_align(dl),
size: Size::from_bytes(0)
}
packed: false
},
align: element.align,
primitive_align: element.primitive_align,
size: Size::from_bytes(0)
})
}
ty::TyStr => {
@ -1232,11 +1172,11 @@ impl<'a, 'tcx> Layout {
},
abi: Abi::Aggregate {
sized: false,
packed: false,
align: dl.i8_align,
primitive_align: dl.i8_align,
size: Size::from_bytes(0)
}
packed: false
},
align: dl.i8_align,
primitive_align: dl.i8_align,
size: Size::from_bytes(0)
})
}
@ -1283,23 +1223,34 @@ impl<'a, 'tcx> Layout {
// SIMD vector types.
ty::TyAdt(def, ..) if def.repr.simd() => {
let count = ty.simd_size(tcx) as u64;
let element = ty.simd_type(tcx);
let element = match cx.layout_of(element)?.abi {
let element = cx.layout_of(ty.simd_type(tcx))?;
let element_scalar = match element.abi {
Abi::Scalar(value) => value,
_ => {
tcx.sess.fatal(&format!("monomorphising SIMD type `{}` with \
a non-machine element type `{}`",
ty, element));
ty, element.ty));
}
};
let size = element.size.checked_mul(count, dl)
.ok_or(LayoutError::SizeOverflow(ty))?;
let align = dl.vector_align(size);
let size = size.abi_align(align);
tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::Vector,
fields: FieldPlacement::Array {
stride: element.size(tcx),
stride: element.size,
count
},
abi: Abi::Vector { element, count }
abi: Abi::Vector {
element: element_scalar,
count
},
size,
align,
primitive_align: align
})
}
@ -1344,10 +1295,10 @@ impl<'a, 'tcx> Layout {
assert!(!field.is_unsized());
if !packed {
align = align.max(field.align(dl));
primitive_align = primitive_align.max(field.primitive_align(dl));
align = align.max(field.align);
primitive_align = primitive_align.max(field.primitive_align);
}
size = cmp::max(size, field.size(dl));
size = cmp::max(size, field.size);
}
return Ok(tcx.intern_layout(CachedLayout {
@ -1356,11 +1307,11 @@ impl<'a, 'tcx> Layout {
fields: FieldPlacement::Union(variants[0].len()),
abi: Abi::Aggregate {
sized: true,
packed,
align,
primitive_align,
size: size.abi_align(align)
}
packed
},
align,
primitive_align,
size: size.abi_align(align)
}));
}
@ -1411,27 +1362,26 @@ impl<'a, 'tcx> Layout {
st[0].variant_index = Some(0);
st[1].variant_index = Some(1);
let offset = st[i].fields.offset(field_index) + offset;
let mut abi = st[i].abi;
if offset.bytes() == 0 && discr.size(dl) == abi.size(dl) {
abi = Abi::Scalar(discr);
}
let CachedLayout {
mut abi,
size,
mut align,
mut primitive_align,
..
} = st[i];
let mut discr_align = discr.align(dl);
match abi {
Abi::Aggregate {
ref mut align,
ref mut primitive_align,
ref mut packed,
..
} => {
if offset.abi_align(discr_align) != offset {
*packed = true;
discr_align = dl.i8_align;
}
*align = align.max(discr_align);
*primitive_align = primitive_align.max(discr_align);
if offset.bytes() == 0 && discr.size(dl) == size {
abi = Abi::Scalar(discr);
} else if let Abi::Aggregate { ref mut packed, .. } = abi {
if offset.abi_align(discr_align) != offset {
*packed = true;
discr_align = dl.i8_align;
}
_ => {}
}
align = align.max(discr_align);
primitive_align = primitive_align.max(discr_align);
return Ok(tcx.intern_layout(CachedLayout {
variant_index: None,
layout: Layout::NullablePointer {
@ -1444,7 +1394,10 @@ impl<'a, 'tcx> Layout {
offsets: vec![offset],
memory_index: vec![0]
},
abi
abi,
size,
align,
primitive_align
}));
}
}
@ -1477,15 +1430,14 @@ impl<'a, 'tcx> Layout {
// Find the first field we can't move later
// to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
let field_align = field.align(dl);
if !field.is_zst() || field_align.abi() != 1 {
start_align = start_align.min(field_align);
if !field.is_zst() || field.align.abi() != 1 {
start_align = start_align.min(field.align);
break;
}
}
size = cmp::max(size, st.abi.size(dl));
align = align.max(st.abi.align(dl));
primitive_align = primitive_align.max(st.abi.primitive_align(dl));
size = cmp::max(size, st.size);
align = align.max(st.align);
primitive_align = primitive_align.max(st.primitive_align);
Ok(st)
}).collect::<Result<Vec<_>, _>>()?;
@ -1534,9 +1486,8 @@ impl<'a, 'tcx> Layout {
let old_ity_size = min_ity.size();
let new_ity_size = ity.size();
for variant in &mut variants {
match (&mut variant.fields, &mut variant.abi) {
(&mut FieldPlacement::Arbitrary { ref mut offsets, .. },
&mut Abi::Aggregate { ref mut size, .. }) => {
match variant.fields {
FieldPlacement::Arbitrary { ref mut offsets, .. } => {
for i in offsets {
if *i <= old_ity_size {
assert_eq!(*i, old_ity_size);
@ -1544,8 +1495,8 @@ impl<'a, 'tcx> Layout {
}
}
// We might be making the struct larger.
if *size <= old_ity_size {
*size = new_ity_size;
if variant.size <= old_ity_size {
variant.size = new_ity_size;
}
}
_ => bug!()
@ -1572,12 +1523,12 @@ impl<'a, 'tcx> Layout {
} else {
Abi::Aggregate {
sized: true,
packed: false,
align,
primitive_align,
size
packed: false
}
}
},
align,
primitive_align,
size
})
}
@ -1629,12 +1580,10 @@ impl<'a, 'tcx> Layout {
// (delay format until we actually need it)
let record = |kind, opt_discr_size, variants| {
let type_desc = format!("{:?}", ty);
let overall_size = layout.size(tcx);
let align = layout.align(tcx);
tcx.sess.code_stats.borrow_mut().record_type_size(kind,
type_desc,
align,
overall_size,
layout.align,
layout.size,
opt_discr_size,
variants);
};
@ -1670,16 +1619,15 @@ impl<'a, 'tcx> Layout {
}
Ok(field_layout) => {
let offset = layout.fields.offset(i);
let field_size = field_layout.size(tcx);
let field_end = offset + field_size;
let field_end = offset + field_layout.size;
if min_size < field_end {
min_size = field_end;
}
session::FieldInfo {
name: name.to_string(),
offset: offset.bytes(),
size: field_size.bytes(),
align: field_layout.align(tcx).abi(),
size: field_layout.size.bytes(),
align: field_layout.align.abi(),
}
}
}
@ -1692,9 +1640,9 @@ impl<'a, 'tcx> Layout {
} else {
session::SizeKind::Exact
},
align: layout.align(tcx).abi(),
align: layout.align.abi(),
size: if min_size.bytes() == 0 {
layout.size(tcx).bytes()
layout.size.bytes()
} else {
min_size.bytes()
},
@ -1795,7 +1743,7 @@ impl<'a, 'tcx> SizeSkeleton<'tcx> {
// First try computing a static layout.
let err = match (tcx, param_env).layout_of(ty) {
Ok(layout) => {
return Ok(SizeSkeleton::Known(layout.size(tcx)));
return Ok(SizeSkeleton::Known(layout.size));
}
Err(err) => err
};
@ -2174,24 +2122,15 @@ impl<'a, 'tcx> TyLayout<'tcx> {
/// Returns true if the type is a ZST and not unsized.
pub fn is_zst(&self) -> bool {
self.abi.is_zst()
match self.abi {
Abi::Scalar(_) => false,
Abi::Vector { count, .. } => count == 0,
Abi::Aggregate { sized, .. } => sized && self.size.bytes() == 0
}
}
pub fn size<C: HasDataLayout>(&self, cx: C) -> Size {
self.abi.size(cx)
}
pub fn align<C: HasDataLayout>(&self, cx: C) -> Align {
self.abi.align(cx)
}
pub fn size_and_align<C: HasDataLayout>(&self, cx: C) -> (Size, Align) {
self.abi.size_and_align(cx)
}
/// Returns alignment before repr alignment is applied
pub fn primitive_align<C: HasDataLayout>(&self, cx: C) -> Align {
self.abi.primitive_align(cx)
pub fn size_and_align(&self) -> (Size, Align) {
(self.size, self.align)
}
/// Find the offset of a non-zero leaf field, starting from
@ -2331,12 +2270,9 @@ impl<'gcx> HashStable<StableHashingContext<'gcx>> for Abi {
element.hash_stable(hcx, hasher);
count.hash_stable(hcx, hasher);
}
Aggregate { packed, sized, size, align, primitive_align } => {
Aggregate { packed, sized } => {
packed.hash_stable(hcx, hasher);
sized.hash_stable(hcx, hasher);
size.hash_stable(hcx, hasher);
align.hash_stable(hcx, hasher);
primitive_align.hash_stable(hcx, hasher);
}
}
}
@ -2346,7 +2282,10 @@ impl_stable_hash_for!(struct ::ty::layout::CachedLayout {
variant_index,
layout,
fields,
abi
abi,
size,
align,
primitive_align
});
impl_stable_hash_for!(enum ::ty::layout::Integer {

View file

@ -320,12 +320,12 @@ fn eval_const_expr_partial<'a, 'tcx>(cx: &ConstContext<'a, 'tcx>,
};
match &tcx.item_name(def_id)[..] {
"size_of" => {
let size = layout_of(substs.type_at(0))?.size(tcx).bytes();
let size = layout_of(substs.type_at(0))?.size.bytes();
return Ok(mk_const(Integral(Usize(ConstUsize::new(size,
tcx.sess.target.usize_ty).unwrap()))));
}
"min_align_of" => {
let align = layout_of(substs.type_at(0))?.align(tcx).abi();
let align = layout_of(substs.type_at(0))?.align.abi();
return Ok(mk_const(Integral(Usize(ConstUsize::new(align,
tcx.sess.target.usize_ty).unwrap()))));
}

View file

@ -757,15 +757,14 @@ impl<'a, 'tcx> LateLintPass<'a, 'tcx> for VariantSizeDifferences {
let discr_size = discr.size(cx.tcx).bytes();
debug!("enum `{}` is {} bytes large with layout:\n{:#?}",
t, layout.size(cx.tcx).bytes(), layout);
t, layout.size.bytes(), layout);
let (largest, slargest, largest_index) = enum_definition.variants
.iter()
.zip(variants)
.map(|(variant, variant_layout)| {
// Subtract the size of the enum discriminant
let bytes = variant_layout.abi.size(cx.tcx)
.bytes()
let bytes = variant_layout.size.bytes()
.saturating_sub(discr_size);
debug!("- variant `{}` is {} bytes large", variant.node.name, bytes);

View file

@ -626,7 +626,7 @@ impl<'a, 'tcx> Inliner<'a, 'tcx> {
fn type_size_of<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
param_env: ty::ParamEnv<'tcx>,
ty: Ty<'tcx>) -> Option<u64> {
(tcx, param_env).layout_of(ty).ok().map(|layout| layout.size(tcx).bytes())
(tcx, param_env).layout_of(ty).ok().map(|layout| layout.size.bytes())
}
fn subst_and_normalize<'a, 'tcx: 'a>(

View file

@ -296,14 +296,14 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
};
Some(Reg {
kind,
size: self.size(ccx)
size: self.size
})
}
layout::Abi::Vector { .. } => {
Some(Reg {
kind: RegKind::Vector,
size: self.size(ccx)
size: self.size
})
}
@ -345,7 +345,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
}
// Keep track of the offset (without padding).
let size = field.size(ccx);
let size = field.size;
if is_union {
total = cmp::max(total, size);
} else {
@ -354,7 +354,7 @@ impl<'tcx> LayoutExt<'tcx> for TyLayout<'tcx> {
}
// There needs to be no padding.
if total != self.size(ccx) {
if total != self.size {
None
} else {
result
@ -446,7 +446,7 @@ impl<'a, 'tcx> ArgType<'tcx> {
}
}
pub fn make_indirect(&mut self, ccx: &CrateContext<'a, 'tcx>) {
pub fn make_indirect(&mut self) {
assert!(self.nested.is_empty());
assert_eq!(self.kind, ArgKind::Direct);
@ -458,7 +458,7 @@ impl<'a, 'tcx> ArgType<'tcx> {
// program-invisible so can't possibly capture
self.attrs.set(ArgAttribute::NoAlias)
.set(ArgAttribute::NoCapture)
.set_dereferenceable(self.layout.size(ccx));
.set_dereferenceable(self.layout.size);
self.kind = ArgKind::Indirect;
}
@ -520,15 +520,15 @@ impl<'a, 'tcx> ArgType<'tcx> {
}
let ccx = bcx.ccx;
if self.is_indirect() {
let llsz = C_usize(ccx, self.layout.size(ccx).bytes());
base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align(ccx));
let llsz = C_usize(ccx, self.layout.size.bytes());
base::call_memcpy(bcx, dst.llval, val, llsz, self.layout.align);
} else if let Some(ty) = self.cast {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_dst = bcx.pointercast(dst.llval, ty.llvm_type(ccx).ptr_to());
bcx.store(val, cast_dst, Some(self.layout.align(ccx)));
bcx.store(val, cast_dst, Some(self.layout.align));
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
@ -556,8 +556,8 @@ impl<'a, 'tcx> ArgType<'tcx> {
base::call_memcpy(bcx,
bcx.pointercast(dst.llval, Type::i8p(ccx)),
bcx.pointercast(llscratch, Type::i8p(ccx)),
C_usize(ccx, self.layout.size(ccx).bytes()),
self.layout.align(ccx).min(ty.align(ccx)));
C_usize(ccx, self.layout.size.bytes()),
self.layout.align.min(ty.align(ccx)));
bcx.lifetime_end(llscratch, scratch_size);
}
@ -828,7 +828,7 @@ impl<'a, 'tcx> FnType<'tcx> {
_ => return
}
let size = arg.layout.size(ccx);
let size = arg.layout.size;
if let Some(unit) = arg.layout.homogeneous_aggregate(ccx) {
// Replace newtypes with their inner-most type.
@ -851,7 +851,7 @@ impl<'a, 'tcx> FnType<'tcx> {
}
if size > layout::Pointer.size(ccx) {
arg.make_indirect(ccx);
arg.make_indirect();
} else {
// We want to pass small aggregates as immediates, but using
// a LLVM aggregate type for this leads to bad optimizations,
@ -897,7 +897,7 @@ impl<'a, 'tcx> FnType<'tcx> {
"x86_64" => if abi == Abi::SysV64 {
cabi_x86_64::compute_abi_info(ccx, self);
} else if abi == Abi::Win64 || ccx.sess().target.target.options.is_like_windows {
cabi_x86_win64::compute_abi_info(ccx, self);
cabi_x86_win64::compute_abi_info(self);
} else {
cabi_x86_64::compute_abi_info(ccx, self);
},
@ -910,12 +910,12 @@ impl<'a, 'tcx> FnType<'tcx> {
"s390x" => cabi_s390x::compute_abi_info(ccx, self),
"asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
"wasm32" => cabi_asmjs::compute_abi_info(ccx, self),
"msp430" => cabi_msp430::compute_abi_info(ccx, self),
"msp430" => cabi_msp430::compute_abi_info(self),
"sparc" => cabi_sparc::compute_abi_info(ccx, self),
"sparc64" => cabi_sparc64::compute_abi_info(ccx, self),
"nvptx" => cabi_nvptx::compute_abi_info(ccx, self),
"nvptx64" => cabi_nvptx64::compute_abi_info(ccx, self),
"hexagon" => cabi_hexagon::compute_abi_info(ccx, self),
"nvptx" => cabi_nvptx::compute_abi_info(self),
"nvptx64" => cabi_nvptx64::compute_abi_info(self),
"hexagon" => cabi_hexagon::compute_abi_info(self),
a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
}

View file

@ -406,15 +406,13 @@ pub fn memcpy_ty<'a, 'tcx>(
layout: TyLayout<'tcx>,
align: Option<Align>,
) {
let ccx = bcx.ccx;
let size = layout.size(ccx).bytes();
let size = layout.size.bytes();
if size == 0 {
return;
}
let align = align.unwrap_or_else(|| layout.align(ccx));
call_memcpy(bcx, dst, src, C_usize(ccx, size), align);
let align = align.unwrap_or(layout.align);
call_memcpy(bcx, dst, src, C_usize(bcx.ccx, size), align);
}
pub fn call_memset<'a, 'tcx>(b: &Builder<'a, 'tcx>,

View file

@ -14,7 +14,7 @@ use context::CrateContext;
fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
let size = arg.layout.size(ccx);
let size = arg.layout.size;
// Ensure we have at most four uniquely addressable members.
if size > unit.size.checked_mul(4, ccx).unwrap() {
@ -47,7 +47,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
ret.cast_to(uniform);
return;
}
let size = ret.layout.size(ccx);
let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
let unit = if bits <= 8 {
@ -66,7 +66,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
});
return;
}
ret.make_indirect(ccx);
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
@ -78,7 +78,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
arg.cast_to(uniform);
return;
}
let size = arg.layout.size(ccx);
let size = arg.layout.size;
let bits = size.bits();
if bits <= 128 {
let unit = if bits <= 8 {
@ -97,7 +97,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
});
return;
}
arg.make_indirect(ccx);
arg.make_indirect();
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {

View file

@ -15,7 +15,7 @@ use llvm::CallConv;
fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
let size = arg.layout.size(ccx);
let size = arg.layout.size;
// Ensure we have at most four uniquely addressable members.
if size > unit.size.checked_mul(4, ccx).unwrap() {
@ -52,7 +52,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
}
}
let size = ret.layout.size(ccx);
let size = ret.layout.size;
let bits = size.bits();
if bits <= 32 {
let unit = if bits <= 8 {
@ -68,7 +68,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
});
return;
}
ret.make_indirect(ccx);
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, vfp: bool) {
@ -84,8 +84,8 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
}
}
let align = arg.layout.align(ccx).abi();
let total = arg.layout.size(ccx);
let align = arg.layout.align.abi();
let total = arg.layout.size;
arg.cast_to(Uniform {
unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
total

View file

@ -19,7 +19,7 @@ use context::CrateContext;
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.layout.is_aggregate() {
if let Some(unit) = ret.layout.homogeneous_aggregate(ccx) {
let size = ret.layout.size(ccx);
let size = ret.layout.size;
if unit.size == size {
ret.cast_to(Uniform {
unit,
@ -29,13 +29,13 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
}
}
ret.make_indirect(ccx);
ret.make_indirect();
}
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() {
arg.make_indirect(ccx);
arg.make_indirect();
arg.attrs.set(ArgAttribute::ByVal);
}
}
@ -47,6 +47,6 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
classify_arg_ty(ccx, arg);
classify_arg_ty(arg);
}
}

View file

@ -11,33 +11,32 @@
#![allow(non_upper_case_globals)]
use abi::{FnType, ArgType, LayoutExt};
use context::CrateContext;
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 {
ret.make_indirect(ccx);
fn classify_ret_ty(ret: &mut ArgType) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
ret.extend_integer_width_to(32);
}
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 {
arg.make_indirect(ccx);
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
arg.extend_integer_width_to(32);
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info(fty: &mut FnType) {
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(ccx, arg);
classify_arg_ty(arg);
}
}

View file

@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect(ccx);
ret.make_indirect();
*offset += ccx.tcx().data_layout.pointer_size;
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
let dl = &ccx.tcx().data_layout;
let size = arg.layout.size(ccx);
let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align);
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {

View file

@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(64);
} else {
ret.make_indirect(ccx);
ret.make_indirect();
*offset += ccx.tcx().data_layout.pointer_size;
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
let dl = &ccx.tcx().data_layout;
let size = arg.layout.size(ccx);
let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align);
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {

View file

@ -12,7 +12,6 @@
// http://www.ti.com/lit/an/slaa534/slaa534.pdf
use abi::{ArgType, FnType, LayoutExt};
use context::CrateContext;
// 3.5 Structures or Unions Passed and Returned by Reference
//
@ -20,31 +19,31 @@ use context::CrateContext;
// returned by reference. To pass a structure or union by reference, the caller
// places its address in the appropriate location: either in a register or on
// the stack, according to its position in the argument list. (..)"
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
ret.make_indirect(ccx);
fn classify_ret_ty(ret: &mut ArgType) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
ret.extend_integer_width_to(16);
}
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
arg.make_indirect(ccx);
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
arg.extend_integer_width_to(16);
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info(fty: &mut FnType) {
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(ccx, arg);
classify_arg_ty(arg);
}
}

View file

@ -12,33 +12,32 @@
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use abi::{ArgType, FnType, LayoutExt};
use context::CrateContext;
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 32 {
ret.make_indirect(ccx);
fn classify_ret_ty(ret: &mut ArgType) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
ret.extend_integer_width_to(32);
}
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 32 {
arg.make_indirect(ccx);
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
arg.extend_integer_width_to(32);
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info(fty: &mut FnType) {
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(ccx, arg);
classify_arg_ty(arg);
}
}

View file

@ -12,33 +12,32 @@
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use abi::{ArgType, FnType, LayoutExt};
use context::CrateContext;
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if ret.layout.is_aggregate() && ret.layout.size(ccx).bits() > 64 {
ret.make_indirect(ccx);
fn classify_ret_ty(ret: &mut ArgType) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
ret.extend_integer_width_to(64);
}
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
if arg.layout.is_aggregate() && arg.layout.size(ccx).bits() > 64 {
arg.make_indirect(ccx);
fn classify_arg_ty(arg: &mut ArgType) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
arg.extend_integer_width_to(64);
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
pub fn compute_abi_info(fty: &mut FnType) {
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(ccx, arg);
classify_arg_ty(arg);
}
}

View file

@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect(ccx);
ret.make_indirect();
*offset += ccx.tcx().data_layout.pointer_size;
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
let dl = &ccx.tcx().data_layout;
let size = arg.layout.size(ccx);
let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align);
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {

View file

@ -28,25 +28,23 @@ fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
abi: ABI)
-> Option<Uniform> {
arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
let size = arg.layout.size(ccx);
// ELFv1 only passes one-member aggregates transparently.
// ELFv2 passes up to eight uniquely addressable members.
if (abi == ELFv1 && size > unit.size)
|| size > unit.size.checked_mul(8, ccx).unwrap() {
if (abi == ELFv1 && arg.layout.size > unit.size)
|| arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => size.bits() == 128
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: size
total: arg.layout.size
})
} else {
None
@ -62,7 +60,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
// The ELFv1 ABI doesn't return aggregates in registers
if abi == ELFv1 {
ret.make_indirect(ccx);
ret.make_indirect();
return;
}
@ -71,7 +69,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
return;
}
let size = ret.layout.size(ccx);
let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
let unit = if bits <= 8 {
@ -91,7 +89,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
return;
}
ret.make_indirect(ccx);
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>, abi: ABI) {
@ -105,7 +103,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
return;
}
let size = arg.layout.size(ccx);
let size = arg.layout.size;
let (unit, total) = match abi {
ELFv1 => {
// In ELFv1, aggregates smaller than a doubleword should appear in

View file

@ -16,11 +16,11 @@ use context::CrateContext;
use rustc::ty::layout::{self, TyLayout};
fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tcx>) {
if !ret.layout.is_aggregate() && ret.layout.size(ccx).bits() <= 64 {
fn classify_ret_ty(ret: &mut ArgType) {
if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
ret.extend_integer_width_to(64);
} else {
ret.make_indirect(ccx);
ret.make_indirect();
}
}
@ -41,32 +41,31 @@ fn is_single_fp_element<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
let size = arg.layout.size(ccx);
if !arg.layout.is_aggregate() && size.bits() <= 64 {
if !arg.layout.is_aggregate() && arg.layout.size.bits() <= 64 {
arg.extend_integer_width_to(64);
return;
}
if is_single_fp_element(ccx, arg.layout) {
match size.bytes() {
match arg.layout.size.bytes() {
4 => arg.cast_to(Reg::f32()),
8 => arg.cast_to(Reg::f64()),
_ => arg.make_indirect(ccx)
_ => arg.make_indirect()
}
} else {
match size.bytes() {
match arg.layout.size.bytes() {
1 => arg.cast_to(Reg::i8()),
2 => arg.cast_to(Reg::i16()),
4 => arg.cast_to(Reg::i32()),
8 => arg.cast_to(Reg::i64()),
_ => arg.make_indirect(ccx)
_ => arg.make_indirect()
}
}
}
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
if !fty.ret.is_ignore() {
classify_ret_ty(ccx, &mut fty.ret);
classify_ret_ty(&mut fty.ret);
}
for arg in &mut fty.args {

View file

@ -19,15 +19,15 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if !ret.layout.is_aggregate() {
ret.extend_integer_width_to(32);
} else {
ret.make_indirect(ccx);
ret.make_indirect();
*offset += ccx.tcx().data_layout.pointer_size;
}
}
fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut Size) {
let dl = &ccx.tcx().data_layout;
let size = arg.layout.size(ccx);
let align = arg.layout.align(ccx).max(dl.i32_align).min(dl.i64_align);
let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align);
if arg.layout.is_aggregate() {
arg.cast_to(Uniform {

View file

@ -16,23 +16,21 @@ use context::CrateContext;
fn is_homogeneous_aggregate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>)
-> Option<Uniform> {
arg.layout.homogeneous_aggregate(ccx).and_then(|unit| {
let size = arg.layout.size(ccx);
// Ensure we have at most eight uniquely addressable members.
if size > unit.size.checked_mul(8, ccx).unwrap() {
if arg.layout.size > unit.size.checked_mul(8, ccx).unwrap() {
return None;
}
let valid_unit = match unit.kind {
RegKind::Integer => false,
RegKind::Float => true,
RegKind::Vector => size.bits() == 128
RegKind::Vector => arg.layout.size.bits() == 128
};
if valid_unit {
Some(Uniform {
unit,
total: size
total: arg.layout.size
})
} else {
None
@ -50,7 +48,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
ret.cast_to(uniform);
return;
}
let size = ret.layout.size(ccx);
let size = ret.layout.size;
let bits = size.bits();
if bits <= 128 {
let unit = if bits <= 8 {
@ -71,7 +69,7 @@ fn classify_ret_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ret: &mut ArgType<'tc
}
// don't return aggregates in registers
ret.make_indirect(ccx);
ret.make_indirect();
}
fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tcx>) {
@ -85,7 +83,7 @@ fn classify_arg_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &mut ArgType<'tc
return;
}
let total = arg.layout.size(ccx);
let total = arg.layout.size;
arg.cast_to(Uniform {
unit: Reg::i64(),
total

View file

@ -50,27 +50,25 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let t = &ccx.sess().target.target;
if t.options.is_like_osx || t.options.is_like_windows
|| t.options.is_like_openbsd {
let size = fty.ret.layout.size(ccx);
// According to Clang, everyone but MSVC returns single-element
// float aggregates directly in a floating-point register.
if !t.options.is_like_msvc && is_single_fp_element(ccx, fty.ret.layout) {
match size.bytes() {
match fty.ret.layout.size.bytes() {
4 => fty.ret.cast_to(Reg::f32()),
8 => fty.ret.cast_to(Reg::f64()),
_ => fty.ret.make_indirect(ccx)
_ => fty.ret.make_indirect()
}
} else {
match size.bytes() {
match fty.ret.layout.size.bytes() {
1 => fty.ret.cast_to(Reg::i8()),
2 => fty.ret.cast_to(Reg::i16()),
4 => fty.ret.cast_to(Reg::i32()),
8 => fty.ret.cast_to(Reg::i64()),
_ => fty.ret.make_indirect(ccx)
_ => fty.ret.make_indirect()
}
}
} else {
fty.ret.make_indirect(ccx);
fty.ret.make_indirect();
}
} else {
fty.ret.extend_integer_width_to(32);
@ -80,7 +78,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
if arg.layout.is_aggregate() {
arg.make_indirect(ccx);
arg.make_indirect();
arg.attrs.set(ArgAttribute::ByVal);
} else {
arg.extend_integer_width_to(32);
@ -104,13 +102,12 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// At this point we know this must be a primitive of sorts.
let unit = arg.layout.homogeneous_aggregate(ccx).unwrap();
let size = arg.layout.size(ccx);
assert_eq!(unit.size, size);
assert_eq!(unit.size, arg.layout.size);
if unit.kind == RegKind::Float {
continue;
}
let size_in_regs = (size.bits() + 31) / 32;
let size_in_regs = (arg.layout.size.bits() + 31) / 32;
if size_in_regs == 0 {
continue;
@ -122,7 +119,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
free_regs -= size_in_regs;
if size.bits() <= 32 && unit.kind == RegKind::Integer {
if arg.layout.size.bits() <= 32 && unit.kind == RegKind::Integer {
arg.attrs.set(ArgAttribute::InReg);
}

View file

@ -57,7 +57,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
cls: &mut [Class],
off: Size)
-> Result<(), Memory> {
if !off.is_abi_aligned(layout.align(ccx)) {
if !off.is_abi_aligned(layout.align) {
if !layout.is_zst() {
return Err(Memory);
}
@ -106,7 +106,7 @@ fn classify_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, arg: &ArgType<'tcx>)
Ok(())
}
let n = ((arg.layout.size(ccx).bytes() + 7) / 8) as usize;
let n = ((arg.layout.size.bytes() + 7) / 8) as usize;
if n > MAX_EIGHTBYTES {
return Err(Memory);
}
@ -213,7 +213,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
};
if in_mem {
arg.make_indirect(ccx);
arg.make_indirect();
if is_arg {
arg.attrs.set(ArgAttribute::ByVal);
} else {
@ -226,7 +226,7 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
sse_regs -= needed_sse;
if arg.layout.is_aggregate() {
let size = arg.layout.size(ccx);
let size = arg.layout.size;
arg.cast_to(cast_target(cls.as_ref().unwrap(), size))
} else {
arg.extend_integer_width_to(32);

View file

@ -9,23 +9,21 @@
// except according to those terms.
use abi::{ArgType, FnType, Reg};
use common::CrateContext;
use rustc::ty::layout;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType<'tcx>) {
let fixup = |a: &mut ArgType<'tcx>| {
let size = a.layout.size(ccx);
pub fn compute_abi_info(fty: &mut FnType) {
let fixup = |a: &mut ArgType| {
match a.layout.abi {
layout::Abi::Aggregate { .. } => {
match size.bits() {
match a.layout.size.bits() {
8 => a.cast_to(Reg::i8()),
16 => a.cast_to(Reg::i16()),
32 => a.cast_to(Reg::i32()),
64 => a.cast_to(Reg::i64()),
_ => a.make_indirect(ccx)
_ => a.make_indirect()
}
}
layout::Abi::Vector { .. } => {
@ -33,8 +31,8 @@ pub fn compute_abi_info<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fty: &mut FnType
// (probably what clang calls "illegal vectors").
}
layout::Abi::Scalar(_) => {
if size.bytes() > 8 {
a.make_indirect(ccx);
if a.layout.size.bytes() > 8 {
a.make_indirect();
} else {
a.extend_integer_width_to(32);
}

View file

@ -430,16 +430,16 @@ fn trait_pointer_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
syntax_pos::DUMMY_SP),
offset: layout.fields.offset(0),
size: data_ptr_field.size(cx),
align: data_ptr_field.align(cx),
size: data_ptr_field.size,
align: data_ptr_field.align,
flags: DIFlags::FlagArtificial,
},
MemberDescription {
name: "vtable".to_string(),
type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
offset: layout.fields.offset(1),
size: vtable_field.size(cx),
align: vtable_field.align(cx),
size: vtable_field.size,
align: vtable_field.align,
flags: DIFlags::FlagArtificial,
},
];
@ -946,7 +946,7 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
f.name.to_string()
};
let field = layout.field(cx, i);
let (size, align) = field.size_and_align(cx);
let (size, align) = field.size_and_align();
MemberDescription {
name,
type_metadata: type_metadata(cx, field.ty, self.span),
@ -1062,7 +1062,7 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
-> Vec<MemberDescription> {
self.variant.fields.iter().enumerate().map(|(i, f)| {
let field = self.layout.field(cx, i);
let (size, align) = field.size_and_align(cx);
let (size, align) = field.size_and_align();
MemberDescription {
name: f.name.to_string(),
type_metadata: type_metadata(cx, field.ty, self.span),
@ -1153,8 +1153,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
name: "".to_string(),
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
size: variant.size(cx),
align: variant.align(cx),
size: variant.size,
align: variant.align,
flags: DIFlags::FlagZero
}
}).collect()
@ -1184,8 +1184,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
name: "".to_string(),
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
size: self.type_rep.size(cx),
align: self.type_rep.align(cx),
size: self.type_rep.size,
align: self.type_rep.align,
flags: DIFlags::FlagZero
}
]
@ -1230,7 +1230,7 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
}
let inner_offset = offset - field_offset;
let field = layout.field(ccx, i);
if inner_offset + size <= field.size(ccx) {
if inner_offset + size <= field.size {
write!(name, "{}$", i).unwrap();
compute_field_path(ccx, name, field, inner_offset, size);
}
@ -1248,8 +1248,8 @@ impl<'tcx> EnumMemberDescriptionFactory<'tcx> {
name,
type_metadata: variant_type_metadata,
offset: Size::from_bytes(0),
size: variant.size(cx),
align: variant.align(cx),
size: variant.size,
align: variant.align,
flags: DIFlags::FlagZero
}
]
@ -1443,7 +1443,7 @@ fn prepare_enum_metadata<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
_ => {}
}
let (enum_type_size, enum_type_align) = type_rep.size_and_align(cx);
let (enum_type_size, enum_type_align) = type_rep.size_and_align();
let enum_name = CString::new(enum_name).unwrap();
let unique_type_id_str = CString::new(

View file

@ -60,7 +60,7 @@ pub fn size_and_align_of_dst<'a, 'tcx>(bcx: &Builder<'a, 'tcx>, t: Ty<'tcx>, inf
let i = layout.fields.count() - 1;
let sized_size = layout.fields.offset(i).bytes();
let sized_align = layout.align(ccx).abi();
let sized_align = layout.align.abi();
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_usize(ccx, sized_size);

View file

@ -245,7 +245,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
};
let load = bcx.load(
bcx.pointercast(llslot, cast_ty.llvm_type(bcx.ccx).ptr_to()),
Some(self.fn_ty.ret.layout.align(bcx.ccx)));
Some(self.fn_ty.ret.layout.align));
load
} else {
let op = self.trans_consume(&bcx, &mir::Lvalue::Local(mir::RETURN_POINTER));
@ -672,7 +672,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
llval = base::to_immediate(bcx, llval, arg.layout);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.llvm_type(bcx.ccx).ptr_to()),
(align | Alignment::Packed(arg.layout.align(bcx.ccx)))
(align | Alignment::Packed(arg.layout.align))
.non_abi());
} else {
llval = bcx.load(llval, align.non_abi());
@ -892,7 +892,7 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
let src = self.trans_operand(bcx, src);
let llty = src.layout.llvm_type(bcx.ccx);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
let align = src.layout.align(bcx.ccx).min(dst.layout.align(bcx.ccx));
let align = src.layout.align.min(dst.layout.align);
src.val.store(bcx,
LvalueRef::new_sized(cast_ptr, src.layout, Alignment::Packed(align)));
}

View file

@ -1116,7 +1116,7 @@ fn trans_const_adt<'a, 'tcx>(
assert_eq!(variant_index, 0);
let contents = [
vals[0].llval,
padding(ccx, l.size(ccx) - ccx.size_of(vals[0].ty))
padding(ccx, l.size - ccx.size_of(vals[0].ty))
];
Const::new(C_struct(ccx, &contents, l.is_packed()), t)

View file

@ -56,8 +56,8 @@ impl ops::BitOr for Alignment {
impl<'a> From<TyLayout<'a>> for Alignment {
fn from(layout: TyLayout) -> Self {
if let layout::Abi::Aggregate { packed: true, align, .. } = layout.abi {
Alignment::Packed(align)
if layout.is_packed() {
Alignment::Packed(layout.align)
} else {
Alignment::AbiAligned
}
@ -109,7 +109,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
-> LvalueRef<'tcx> {
debug!("alloca({:?}: {:?})", name, layout);
let tmp = bcx.alloca(
layout.llvm_type(bcx.ccx), name, layout.over_align(bcx.ccx));
layout.llvm_type(bcx.ccx), name, layout.over_align());
Self::new_sized(tmp, layout, Alignment::AbiAligned)
}
@ -374,7 +374,7 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
// than storing null to single target field.
let llptr = bcx.pointercast(self.llval, Type::i8(bcx.ccx).ptr_to());
let fill_byte = C_u8(bcx.ccx, 0);
let (size, align) = self.layout.size_and_align(bcx.ccx);
let (size, align) = self.layout.size_and_align();
let size = C_usize(bcx.ccx, size.bytes());
let align = C_u32(bcx.ccx, align.abi() as u32);
base::call_memset(bcx, llptr, fill_byte, size, align, false);
@ -414,11 +414,11 @@ impl<'a, 'tcx> LvalueRef<'tcx> {
}
pub fn storage_live(&self, bcx: &Builder<'a, 'tcx>) {
bcx.lifetime_start(self.llval, self.layout.size(bcx.ccx));
bcx.lifetime_start(self.llval, self.layout.size);
}
pub fn storage_dead(&self, bcx: &Builder<'a, 'tcx>) {
bcx.lifetime_end(self.llval, self.layout.size(bcx.ccx));
bcx.lifetime_end(self.llval, self.layout.size);
}
}

View file

@ -107,9 +107,9 @@ impl<'a, 'tcx> MirContext<'a, 'tcx> {
if let OperandValue::Immediate(v) = tr_elem.val {
let align = dest.alignment.non_abi()
.unwrap_or_else(|| tr_elem.layout.align(bcx.ccx));
.unwrap_or(tr_elem.layout.align);
let align = C_i32(bcx.ccx, align.abi() as i32);
let size = C_usize(bcx.ccx, dest.layout.size(bcx.ccx).bytes());
let size = C_usize(bcx.ccx, dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if common::is_const_integral(v) && common::const_to_uint(v) == 0 {

View file

@ -50,7 +50,7 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
match layout.fields {
layout::FieldPlacement::Union(_) => {
let size = layout.size(ccx).bytes();
let size = layout.size.bytes();
let fill = Type::array(&Type::i8(ccx), size);
match name {
None => {
@ -84,8 +84,6 @@ fn uncached_llvm_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
layout: TyLayout<'tcx>) -> Vec<Type> {
debug!("struct_llfields: {:#?}", layout);
let align = layout.align(ccx);
let size = layout.size(ccx);
let field_count = layout.fields.count();
let mut offset = Size::from_bytes(0);
@ -105,27 +103,26 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
if layout.is_packed() {
assert_eq!(padding.bytes(), 0);
} else {
let field_align = field.align(ccx);
assert!(field_align.abi() <= align.abi(),
assert!(field.align.abi() <= layout.align.abi(),
"non-packed type has field with larger align ({}): {:#?}",
field_align.abi(), layout);
field.align.abi(), layout);
}
offset = target_offset + field.size(ccx);
offset = target_offset + field.size;
}
if !layout.is_unsized() && field_count > 0 {
if offset > size {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}",
layout, size, offset);
layout, layout.size, offset);
}
let padding = size - offset;
let padding = layout.size - offset;
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, size);
padding, offset, layout.size);
result.push(Type::array(&Type::i8(ccx), padding.bytes()));
assert!(result.len() == 1 + field_count * 2);
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}",
offset, size);
offset, layout.size);
}
result
@ -133,16 +130,15 @@ fn struct_llfields<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
impl<'a, 'tcx> CrateContext<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
self.layout_of(ty).align(self)
self.layout_of(ty).align
}
pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
self.layout_of(ty).size(self)
self.layout_of(ty).size
}
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
let layout = self.layout_of(ty);
(layout.size(self), layout.align(self))
self.layout_of(ty).size_and_align()
}
}
@ -150,7 +146,7 @@ pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool;
fn llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn immediate_llvm_type<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> Type;
fn over_align(&self, ccx: &CrateContext) -> Option<Align>;
fn over_align(&self) -> Option<Align>;
fn llvm_field_index(&self, index: usize) -> u64;
}
@ -251,11 +247,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
}
}
fn over_align(&self, ccx: &CrateContext) -> Option<Align> {
let align = self.align(ccx);
let primitive_align = self.primitive_align(ccx);
if align != primitive_align {
Some(align)
fn over_align(&self) -> Option<Align> {
if self.align != self.primitive_align {
Some(self.align)
} else {
None
}