1
Fork 0

Auto merge of #54071 - eddyb:alignsssss, r=oli-obk

rustc_target: separate out an individual alignment quantity type from Align.

Before this PR, `rustc_target::abi::Align` combined "power-of-two alignment quantity" semantics, with a distinction between ABI (required) and preferred alignment (by having two quantities).

After this PR, `Align` is only *one* such quantity, and a new `AbiAndPrefAlign` type is introduced to hold the pair of ABI and preferred `Align` quantities.

`Align` is used everywhere one quantity is necessary/sufficient, simplifying some of the code in codegen/miri, while `AbiAndPrefAlign` only in layout computation (to propagate preferred alignment).

r? @oli-obk cc @nagisa @RalfJung @nikomatsakis
This commit is contained in:
bors 2018-11-23 01:02:21 +00:00
commit 6a2d1b4e15
41 changed files with 366 additions and 377 deletions

View file

@ -104,7 +104,7 @@ impl<Tag, Extra: Default> Allocation<Tag, Extra> {
} }
pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self { pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self {
Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap()) Allocation::from_bytes(slice, Align::from_bytes(1).unwrap())
} }
pub fn undef(size: Size, align: Align) -> Self { pub fn undef(size: Size, align: Align) -> Self {

View file

@ -527,7 +527,7 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> {
write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c), write!(f, "tried to interpret an invalid 32-bit value as a char: {}", c),
AlignmentCheckFailed { required, has } => AlignmentCheckFailed { required, has } =>
write!(f, "tried to access memory with alignment {}, but alignment {} is required", write!(f, "tried to access memory with alignment {}, but alignment {} is required",
has.abi(), required.abi()), has.bytes(), required.bytes()),
TypeNotPrimitive(ty) => TypeNotPrimitive(ty) =>
write!(f, "expected primitive type, got {}", ty), write!(f, "expected primitive type, got {}", ty),
Layout(ref err) => Layout(ref err) =>
@ -537,8 +537,9 @@ impl<'tcx, O: fmt::Debug> fmt::Debug for EvalErrorKind<'tcx, O> {
MachineError(ref inner) => MachineError(ref inner) =>
write!(f, "{}", inner), write!(f, "{}", inner),
IncorrectAllocationInformation(size, size2, align, align2) => IncorrectAllocationInformation(size, size2, align, align2) =>
write!(f, "incorrect alloc info: expected size {} and align {}, got size {} and \ write!(f, "incorrect alloc info: expected size {} and align {}, \
align {}", size.bytes(), align.abi(), size2.bytes(), align2.abi()), got size {} and align {}",
size.bytes(), align.bytes(), size2.bytes(), align2.bytes()),
Panic { ref msg, line, col, ref file } => Panic { ref msg, line, col, ref file } =>
write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col), write!(f, "the evaluated program panicked at '{}', {}:{}:{}", msg, file, line, col),
InvalidDiscriminant(val) => InvalidDiscriminant(val) =>

View file

@ -71,7 +71,7 @@ impl CodeStats {
let info = TypeSizeInfo { let info = TypeSizeInfo {
kind, kind,
type_description: type_desc.to_string(), type_description: type_desc.to_string(),
align: align.abi(), align: align.bytes(),
overall_size: overall_size.bytes(), overall_size: overall_size.bytes(),
packed: packed, packed: packed,
opt_discr_size: opt_discr_size.map(|s| s.bytes()), opt_discr_size: opt_discr_size.map(|s| s.bytes()),

View file

@ -226,9 +226,10 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value))) tcx.intern_layout(LayoutDetails::scalar(self, scalar_unit(value)))
}; };
let scalar_pair = |a: Scalar, b: Scalar| { let scalar_pair = |a: Scalar, b: Scalar| {
let align = a.value.align(dl).max(b.value.align(dl)).max(dl.aggregate_align); let b_align = b.value.align(dl);
let b_offset = a.value.size(dl).abi_align(b.value.align(dl)); let align = a.value.align(dl).max(b_align).max(dl.aggregate_align);
let size = (b_offset + b.value.size(dl)).abi_align(align); let b_offset = a.value.size(dl).align_to(b_align.abi);
let size = (b_offset + b.value.size(dl)).align_to(align.abi);
LayoutDetails { LayoutDetails {
variants: Variants::Single { index: VariantIdx::new(0) }, variants: Variants::Single { index: VariantIdx::new(0) },
fields: FieldPlacement::Arbitrary { fields: FieldPlacement::Arbitrary {
@ -257,10 +258,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
bug!("struct cannot be packed and aligned"); bug!("struct cannot be packed and aligned");
} }
let pack = { let pack = Align::from_bytes(repr.pack as u64).unwrap();
let pack = repr.pack as u64;
Align::from_bytes(pack, pack).unwrap()
};
let mut align = if packed { let mut align = if packed {
dl.i8_align dl.i8_align
@ -274,7 +272,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut optimize = !repr.inhibit_struct_field_reordering_opt(); let mut optimize = !repr.inhibit_struct_field_reordering_opt();
if let StructKind::Prefixed(_, align) = kind { if let StructKind::Prefixed(_, align) = kind {
optimize &= align.abi() == 1; optimize &= align.bytes() == 1;
} }
if optimize { if optimize {
@ -285,7 +283,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
}; };
let optimizing = &mut inverse_memory_index[..end]; let optimizing = &mut inverse_memory_index[..end];
let field_align = |f: &TyLayout<'_>| { let field_align = |f: &TyLayout<'_>| {
if packed { f.align.min(pack).abi() } else { f.align.abi() } if packed { f.align.abi.min(pack) } else { f.align.abi }
}; };
match kind { match kind {
StructKind::AlwaysSized | StructKind::AlwaysSized |
@ -312,13 +310,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut offset = Size::ZERO; let mut offset = Size::ZERO;
if let StructKind::Prefixed(prefix_size, prefix_align) = kind { if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
if packed { let prefix_align = if packed {
let prefix_align = prefix_align.min(pack); prefix_align.min(pack)
align = align.max(prefix_align);
} else { } else {
align = align.max(prefix_align); prefix_align
} };
offset = prefix_size.abi_align(prefix_align); align = align.max(AbiAndPrefAlign::new(prefix_align));
offset = prefix_size.align_to(prefix_align);
} }
for &i in &inverse_memory_index { for &i in &inverse_memory_index {
@ -333,15 +331,13 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
} }
// Invariant: offset < dl.obj_size_bound() <= 1<<61 // Invariant: offset < dl.obj_size_bound() <= 1<<61
if packed { let field_align = if packed {
let field_pack = field.align.min(pack); field.align.min(AbiAndPrefAlign::new(pack))
offset = offset.abi_align(field_pack); } else {
align = align.max(field_pack); field.align
} };
else { offset = offset.align_to(field_align.abi);
offset = offset.abi_align(field.align); align = align.max(field_align);
align = align.max(field.align);
}
debug!("univariant offset: {:?} field: {:#?}", offset, field); debug!("univariant offset: {:?} field: {:#?}", offset, field);
offsets[i as usize] = offset; offsets[i as usize] = offset;
@ -352,7 +348,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if repr.align > 0 { if repr.align > 0 {
let repr_align = repr.align as u64; let repr_align = repr.align as u64;
align = align.max(Align::from_bytes(repr_align, repr_align).unwrap()); align = align.max(AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
debug!("univariant repr_align: {:?}", repr_align); debug!("univariant repr_align: {:?}", repr_align);
} }
@ -377,7 +373,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
memory_index = inverse_memory_index; memory_index = inverse_memory_index;
} }
let size = min_size.abi_align(align); let size = min_size.align_to(align.abi);
let mut abi = Abi::Aggregate { sized }; let mut abi = Abi::Aggregate { sized };
// Unpack newtype ABIs and find scalar pairs. // Unpack newtype ABIs and find scalar pairs.
@ -394,7 +390,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
(Some((i, field)), None, None) => { (Some((i, field)), None, None) => {
// Field fills the struct and it has a scalar or scalar pair ABI. // Field fills the struct and it has a scalar or scalar pair ABI.
if offsets[i].bytes() == 0 && if offsets[i].bytes() == 0 &&
align.abi() == field.align.abi() && align.abi == field.align.abi &&
size == field.size { size == field.size {
match field.abi { match field.abi {
// For plain scalars, or vectors of them, we can't unpack // For plain scalars, or vectors of them, we can't unpack
@ -648,7 +644,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let size = element.size.checked_mul(count, dl) let size = element.size.checked_mul(count, dl)
.ok_or(LayoutError::SizeOverflow(ty))?; .ok_or(LayoutError::SizeOverflow(ty))?;
let align = dl.vector_align(size); let align = dl.vector_align(size);
let size = size.abi_align(align); let size = size.align_to(align.abi);
tcx.intern_layout(LayoutDetails { tcx.intern_layout(LayoutDetails {
variants: Variants::Single { index: VariantIdx::new(0) }, variants: Variants::Single { index: VariantIdx::new(0) },
@ -680,10 +676,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
bug!("Union cannot be packed and aligned"); bug!("Union cannot be packed and aligned");
} }
let pack = { let pack = Align::from_bytes(def.repr.pack as u64).unwrap();
let pack = def.repr.pack as u64;
Align::from_bytes(pack, pack).unwrap()
};
let mut align = if packed { let mut align = if packed {
dl.i8_align dl.i8_align
@ -694,7 +687,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if def.repr.align > 0 { if def.repr.align > 0 {
let repr_align = def.repr.align as u64; let repr_align = def.repr.align as u64;
align = align.max( align = align.max(
Align::from_bytes(repr_align, repr_align).unwrap()); AbiAndPrefAlign::new(Align::from_bytes(repr_align).unwrap()));
} }
let optimize = !def.repr.inhibit_union_abi_opt(); let optimize = !def.repr.inhibit_union_abi_opt();
@ -704,12 +697,12 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
for field in &variants[index] { for field in &variants[index] {
assert!(!field.is_unsized()); assert!(!field.is_unsized());
if packed { let field_align = if packed {
let field_pack = field.align.min(pack); field.align.min(AbiAndPrefAlign::new(pack))
align = align.max(field_pack);
} else { } else {
align = align.max(field.align); field.align
} };
align = align.max(field_align);
// If all non-ZST fields have the same ABI, forward this ABI // If all non-ZST fields have the same ABI, forward this ABI
if optimize && !field.is_zst() { if optimize && !field.is_zst() {
@ -749,7 +742,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
fields: FieldPlacement::Union(variants[index].len()), fields: FieldPlacement::Union(variants[index].len()),
abi, abi,
align, align,
size: size.abi_align(align) size: size.align_to(align.abi)
})); }));
} }
@ -964,19 +957,19 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut size = Size::ZERO; let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large. // We're interested in the smallest alignment, so start large.
let mut start_align = Align::from_bytes(256, 256).unwrap(); let mut start_align = Align::from_bytes(256).unwrap();
assert_eq!(Integer::for_abi_align(dl, start_align), None); assert_eq!(Integer::for_align(dl, start_align), None);
// repr(C) on an enum tells us to make a (tag, union) layout, // repr(C) on an enum tells us to make a (tag, union) layout,
// so we need to grow the prefix alignment to be at least // so we need to grow the prefix alignment to be at least
// the alignment of the union. (This value is used both for // the alignment of the union. (This value is used both for
// determining the alignment of the overall enum, and the // determining the alignment of the overall enum, and the
// determining the alignment of the payload after the tag.) // determining the alignment of the payload after the tag.)
let mut prefix_align = min_ity.align(dl); let mut prefix_align = min_ity.align(dl).abi;
if def.repr.c() { if def.repr.c() {
for fields in &variants { for fields in &variants {
for field in fields { for field in fields {
prefix_align = prefix_align.max(field.align); prefix_align = prefix_align.max(field.align.abi);
} }
} }
} }
@ -989,8 +982,8 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
// Find the first field we can't move later // Find the first field we can't move later
// to make room for a larger discriminant. // to make room for a larger discriminant.
for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) { for field in st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) {
if !field.is_zst() || field.align.abi() != 1 { if !field.is_zst() || field.align.abi.bytes() != 1 {
start_align = start_align.min(field.align); start_align = start_align.min(field.align.abi);
break; break;
} }
} }
@ -1000,7 +993,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
}).collect::<Result<IndexVec<VariantIdx, _>, _>>()?; }).collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
// Align the maximum variant size to the largest alignment. // Align the maximum variant size to the largest alignment.
size = size.abi_align(align); size = size.align_to(align.abi);
if size.bytes() >= dl.obj_size_bound() { if size.bytes() >= dl.obj_size_bound() {
return Err(LayoutError::SizeOverflow(ty)); return Err(LayoutError::SizeOverflow(ty));
@ -1036,7 +1029,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut ity = if def.repr.c() || def.repr.int.is_some() { let mut ity = if def.repr.c() || def.repr.int.is_some() {
min_ity min_ity
} else { } else {
Integer::for_abi_align(dl, start_align).unwrap_or(min_ity) Integer::for_align(dl, start_align).unwrap_or(min_ity)
}; };
// If the alignment is not larger than the chosen discriminant size, // If the alignment is not larger than the chosen discriminant size,
@ -1204,7 +1197,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let type_desc = format!("{:?}", layout.ty); let type_desc = format!("{:?}", layout.ty);
self.tcx.sess.code_stats.borrow_mut().record_type_size(kind, self.tcx.sess.code_stats.borrow_mut().record_type_size(kind,
type_desc, type_desc,
layout.align, layout.align.abi,
layout.size, layout.size,
packed, packed,
opt_discr_size, opt_discr_size,
@ -1251,7 +1244,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
name: name.to_string(), name: name.to_string(),
offset: offset.bytes(), offset: offset.bytes(),
size: field_layout.size.bytes(), size: field_layout.size.bytes(),
align: field_layout.align.abi(), align: field_layout.align.abi.bytes(),
} }
} }
} }
@ -1264,7 +1257,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
} else { } else {
session::SizeKind::Exact session::SizeKind::Exact
}, },
align: layout.align.abi(), align: layout.align.abi.bytes(),
size: if min_size.bytes() == 0 { size: if min_size.bytes() == 0 {
layout.size.bytes() layout.size.bytes()
} else { } else {
@ -1823,7 +1816,9 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
Abi::ScalarPair(ref a, ref b) => { Abi::ScalarPair(ref a, ref b) => {
// HACK(nox): We iter on `b` and then `a` because `max_by_key` // HACK(nox): We iter on `b` and then `a` because `max_by_key`
// returns the last maximum. // returns the last maximum.
let niche = iter::once((b, a.value.size(self).abi_align(b.value.align(self)))) let niche = iter::once(
(b, a.value.size(self).align_to(b.value.align(self).abi))
)
.chain(iter::once((a, Size::ZERO))) .chain(iter::once((a, Size::ZERO)))
.filter_map(|(scalar, offset)| scalar_niche(scalar, offset)) .filter_map(|(scalar, offset)| scalar_niche(scalar, offset))
.max_by_key(|niche| niche.available); .max_by_key(|niche| niche.available);
@ -1994,12 +1989,16 @@ impl_stable_hash_for!(enum ::ty::layout::Primitive {
Pointer Pointer
}); });
impl_stable_hash_for!(struct ::ty::layout::AbiAndPrefAlign {
abi,
pref
});
impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align { impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
fn hash_stable<W: StableHasherResult>(&self, fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>, hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>) { hasher: &mut StableHasher<W>) {
self.abi().hash_stable(hcx, hasher); self.bytes().hash_stable(hcx, hasher);
self.pref().hash_stable(hcx, hasher);
} }
} }

View file

@ -73,7 +73,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align { if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn, llvm::LLVMRustAddAlignmentAttr(llfn,
idx.as_uint(), idx.as_uint(),
align.abi() as u32); align.bytes() as u32);
} }
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn)); regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
} }
@ -98,7 +98,7 @@ impl ArgAttributesExt for ArgAttributes {
if let Some(align) = self.pointee_align { if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(callsite, llvm::LLVMRustAddAlignmentCallSiteAttr(callsite,
idx.as_uint(), idx.as_uint(),
align.abi() as u32); align.bytes() as u32);
} }
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite)); regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
} }
@ -204,7 +204,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
return; return;
} }
if self.is_sized_indirect() { if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align).store(bx, dst) OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() { } else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg"); bug!("unsized ArgType must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode { } else if let PassMode::Cast(cast) = self.mode {
@ -214,7 +214,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
if can_store_through_cast_ptr { if can_store_through_cast_ptr {
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx())); let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty); let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align); bx.store(val, cast_dst, self.layout.align.abi);
} else { } else {
// The actual return type is a struct, but the ABI // The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The // adaptation code has cast it into some scalar type. The
@ -242,7 +242,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
// ...and then memcpy it to the intended destination. // ...and then memcpy it to the intended destination.
bx.memcpy( bx.memcpy(
dst.llval, dst.llval,
self.layout.align, self.layout.align.abi,
llscratch, llscratch,
scratch_align, scratch_align,
bx.cx().const_usize(self.layout.size.bytes()), bx.cx().const_usize(self.layout.size.bytes()),
@ -273,7 +273,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
OperandValue::Pair(next(), next()).store(bx, dst); OperandValue::Pair(next(), next()).store(bx, dst);
} }
PassMode::Indirect(_, Some(_)) => { PassMode::Indirect(_, Some(_)) => {
OperandValue::Ref(next(), Some(next()), self.layout.align).store(bx, dst); OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
} }
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => { PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
self.store(bx, next(), dst); self.store(bx, next(), dst);
@ -545,7 +545,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
adjust_for_rust_scalar(&mut b_attrs, adjust_for_rust_scalar(&mut b_attrs,
b, b,
arg.layout, arg.layout,
a.value.size(cx).abi_align(b.value.align(cx)), a.value.size(cx).align_to(b.value.align(cx).abi),
false); false);
arg.mode = PassMode::Pair(a_attrs, b_attrs); arg.mode = PassMode::Pair(a_attrs, b_attrs);
return arg; return arg;

View file

@ -475,7 +475,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildAlloca(self.llbuilder, ty, llvm::LLVMBuildAlloca(self.llbuilder, ty,
name.as_ptr()) name.as_ptr())
}; };
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca alloca
} }
} }
@ -494,7 +494,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len, llvm::LLVMBuildArrayAlloca(self.llbuilder, ty, len,
name.as_ptr()) name.as_ptr())
}; };
llvm::LLVMSetAlignment(alloca, align.abi() as c_uint); llvm::LLVMSetAlignment(alloca, align.bytes() as c_uint);
alloca alloca
} }
} }
@ -503,7 +503,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
self.count_insn("load"); self.count_insn("load");
unsafe { unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname()); let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
llvm::LLVMSetAlignment(load, align.abi() as c_uint); llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load load
} }
} }
@ -658,7 +658,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let align = if flags.contains(MemFlags::UNALIGNED) { let align = if flags.contains(MemFlags::UNALIGNED) {
1 1
} else { } else {
align.abi() as c_uint align.bytes() as c_uint
}; };
llvm::LLVMSetAlignment(store, align); llvm::LLVMSetAlignment(store, align);
if flags.contains(MemFlags::VOLATILE) { if flags.contains(MemFlags::VOLATILE) {
@ -893,8 +893,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p()); let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p()); let src = self.pointercast(src, self.cx().type_i8p());
unsafe { unsafe {
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.abi() as c_uint, llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile); src, src_align.bytes() as c_uint, size, is_volatile);
} }
} }
@ -913,8 +913,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let dst = self.pointercast(dst, self.cx().type_i8p()); let dst = self.pointercast(dst, self.cx().type_i8p());
let src = self.pointercast(src, self.cx().type_i8p()); let src = self.pointercast(src, self.cx().type_i8p());
unsafe { unsafe {
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.abi() as c_uint, llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
src, src_align.abi() as c_uint, size, is_volatile); src, src_align.bytes() as c_uint, size, is_volatile);
} }
} }
@ -930,7 +930,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key); let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p()); let ptr = self.pointercast(ptr, self.cx().type_i8p());
let align = self.cx().const_u32(align.abi() as u32); let align = self.cx().const_u32(align.bytes() as u32);
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE)); let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None); self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
} }

View file

@ -357,7 +357,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
offset: Size, offset: Size,
) -> PlaceRef<'tcx, &'ll Value> { ) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(self, alloc); let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, layout.align, None); let base_addr = self.static_addr_of(init, layout.align.abi, None);
let llval = unsafe { llvm::LLVMConstInBoundsGEP( let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()), self.static_bitcast(base_addr, self.type_i8p()),

View file

@ -94,7 +94,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables, // Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet. // which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align { if let Some(min) = cx.sess().target.target.options.min_global_align {
match ty::layout::Align::from_bits(min, min) { match Align::from_bits(min) {
Ok(min) => align = align.max(min), Ok(min) => align = align.max(min),
Err(err) => { Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err)); cx.sess().err(&format!("invalid minimum global alignment: {}", err));
@ -102,7 +102,7 @@ fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
} }
} }
unsafe { unsafe {
llvm::LLVMSetAlignment(gv, align.abi() as u32); llvm::LLVMSetAlignment(gv, align.bytes() as u32);
} }
} }
@ -219,7 +219,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { unsafe {
// Upgrade the alignment in cases where the same constant is used with different // Upgrade the alignment in cases where the same constant is used with different
// alignment requirements // alignment requirements
let llalign = align.abi() as u32; let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) { if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign); llvm::LLVMSetAlignment(gv, llalign);
} }

View file

@ -323,7 +323,7 @@ fn fixed_vec_metadata(
llvm::LLVMRustDIBuilderCreateArrayType( llvm::LLVMRustDIBuilderCreateArrayType(
DIB(cx), DIB(cx),
size.bits(), size.bits(),
align.abi_bits() as u32, align.bits() as u32,
element_type_metadata, element_type_metadata,
subscripts) subscripts)
}; };
@ -465,7 +465,7 @@ fn trait_pointer_metadata(
syntax_pos::DUMMY_SP), syntax_pos::DUMMY_SP),
offset: layout.fields.offset(0), offset: layout.fields.offset(0),
size: data_ptr_field.size, size: data_ptr_field.size,
align: data_ptr_field.align, align: data_ptr_field.align.abi,
flags: DIFlags::FlagArtificial, flags: DIFlags::FlagArtificial,
discriminant: None, discriminant: None,
}, },
@ -474,7 +474,7 @@ fn trait_pointer_metadata(
type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP), type_metadata: type_metadata(cx, vtable_field.ty, syntax_pos::DUMMY_SP),
offset: layout.fields.offset(1), offset: layout.fields.offset(1),
size: vtable_field.size, size: vtable_field.size,
align: vtable_field.align, align: vtable_field.align.abi,
flags: DIFlags::FlagArtificial, flags: DIFlags::FlagArtificial,
discriminant: None, discriminant: None,
}, },
@ -787,7 +787,7 @@ fn basic_type_metadata(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll DIType {
DIB(cx), DIB(cx),
name.as_ptr(), name.as_ptr(),
size.bits(), size.bits(),
align.abi_bits() as u32, align.bits() as u32,
encoding) encoding)
}; };
@ -818,7 +818,7 @@ fn pointer_type_metadata(
DIB(cx), DIB(cx),
pointee_type_metadata, pointee_type_metadata,
pointer_size.bits(), pointer_size.bits(),
pointer_align.abi_bits() as u32, pointer_align.bits() as u32,
name.as_ptr()) name.as_ptr())
} }
} }
@ -985,13 +985,12 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
f.ident.to_string() f.ident.to_string()
}; };
let field = layout.field(cx, i); let field = layout.field(cx, i);
let (size, align) = field.size_and_align();
MemberDescription { MemberDescription {
name, name,
type_metadata: type_metadata(cx, field.ty, self.span), type_metadata: type_metadata(cx, field.ty, self.span),
offset: layout.fields.offset(i), offset: layout.fields.offset(i),
size, size: field.size,
align, align: field.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: None, discriminant: None,
} }
@ -1109,13 +1108,12 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
-> Vec<MemberDescription<'ll>> { -> Vec<MemberDescription<'ll>> {
self.variant.fields.iter().enumerate().map(|(i, f)| { self.variant.fields.iter().enumerate().map(|(i, f)| {
let field = self.layout.field(cx, i); let field = self.layout.field(cx, i);
let (size, align) = field.size_and_align();
MemberDescription { MemberDescription {
name: f.ident.to_string(), name: f.ident.to_string(),
type_metadata: type_metadata(cx, field.ty, self.span), type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::ZERO, offset: Size::ZERO,
size, size: field.size,
align, align: field.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: None, discriminant: None,
} }
@ -1228,7 +1226,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata, type_metadata: variant_type_metadata,
offset: Size::ZERO, offset: Size::ZERO,
size: self.layout.size, size: self.layout.size,
align: self.layout.align, align: self.layout.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: None, discriminant: None,
} }
@ -1267,7 +1265,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata, type_metadata: variant_type_metadata,
offset: Size::ZERO, offset: Size::ZERO,
size: self.layout.size, size: self.layout.size,
align: self.layout.align, align: self.layout.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: Some(self.layout.ty.ty_adt_def().unwrap() discriminant: Some(self.layout.ty.ty_adt_def().unwrap()
.discriminant_for_variant(cx.tcx, i) .discriminant_for_variant(cx.tcx, i)
@ -1336,7 +1334,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata, type_metadata: variant_type_metadata,
offset: Size::ZERO, offset: Size::ZERO,
size: variant.size, size: variant.size,
align: variant.align, align: variant.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: None, discriminant: None,
} }
@ -1374,7 +1372,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
type_metadata: variant_type_metadata, type_metadata: variant_type_metadata,
offset: Size::ZERO, offset: Size::ZERO,
size: self.layout.size, size: self.layout.size,
align: self.layout.align, align: self.layout.align.abi,
flags: DIFlags::FlagZero, flags: DIFlags::FlagZero,
discriminant: niche_value, discriminant: niche_value,
} }
@ -1565,7 +1563,7 @@ fn prepare_enum_metadata(
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
discriminant_size.bits(), discriminant_size.bits(),
discriminant_align.abi_bits() as u32, discriminant_align.abi.bits() as u32,
create_DIArray(DIB(cx), &enumerators_metadata), create_DIArray(DIB(cx), &enumerators_metadata),
discriminant_base_type_metadata, true) discriminant_base_type_metadata, true)
}; };
@ -1587,8 +1585,6 @@ fn prepare_enum_metadata(
_ => {} _ => {}
} }
let (enum_type_size, enum_type_align) = layout.size_and_align();
let enum_name = SmallCStr::new(&enum_name); let enum_name = SmallCStr::new(&enum_name);
let unique_type_id_str = SmallCStr::new( let unique_type_id_str = SmallCStr::new(
debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id) debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id)
@ -1610,8 +1606,8 @@ fn prepare_enum_metadata(
enum_name.as_ptr(), enum_name.as_ptr(),
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
enum_type_size.bits(), layout.size.bits(),
enum_type_align.abi_bits() as u32, layout.align.abi.bits() as u32,
DIFlags::FlagZero, DIFlags::FlagZero,
None, None,
0, // RuntimeLang 0, // RuntimeLang
@ -1659,7 +1655,7 @@ fn prepare_enum_metadata(
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
size.bits(), size.bits(),
align.abi_bits() as u32, align.abi.bits() as u32,
layout.fields.offset(0).bits(), layout.fields.offset(0).bits(),
DIFlags::FlagArtificial, DIFlags::FlagArtificial,
discr_metadata)) discr_metadata))
@ -1679,7 +1675,7 @@ fn prepare_enum_metadata(
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
size.bits(), size.bits(),
align.abi_bits() as u32, align.bits() as u32,
layout.fields.offset(0).bits(), layout.fields.offset(0).bits(),
DIFlags::FlagArtificial, DIFlags::FlagArtificial,
discr_metadata)) discr_metadata))
@ -1695,8 +1691,8 @@ fn prepare_enum_metadata(
ptr::null_mut(), ptr::null_mut(),
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
enum_type_size.bits(), layout.size.bits(),
enum_type_align.abi_bits() as u32, layout.align.abi.bits() as u32,
DIFlags::FlagZero, DIFlags::FlagZero,
discriminator_metadata, discriminator_metadata,
empty_array, empty_array,
@ -1712,8 +1708,8 @@ fn prepare_enum_metadata(
enum_name.as_ptr(), enum_name.as_ptr(),
file_metadata, file_metadata,
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
enum_type_size.bits(), layout.size.bits(),
enum_type_align.abi_bits() as u32, layout.align.abi.bits() as u32,
DIFlags::FlagZero, DIFlags::FlagZero,
None, None,
type_array, type_array,
@ -1807,7 +1803,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
unknown_file_metadata(cx), unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
member_description.size.bits(), member_description.size.bits(),
member_description.align.abi_bits() as u32, member_description.align.bits() as u32,
member_description.offset.bits(), member_description.offset.bits(),
match member_description.discriminant { match member_description.discriminant {
None => None, None => None,
@ -1855,7 +1851,7 @@ fn create_struct_stub(
unknown_file_metadata(cx), unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
struct_size.bits(), struct_size.bits(),
struct_align.abi_bits() as u32, struct_align.bits() as u32,
DIFlags::FlagZero, DIFlags::FlagZero,
None, None,
empty_array, empty_array,
@ -1893,7 +1889,7 @@ fn create_union_stub(
unknown_file_metadata(cx), unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
union_size.bits(), union_size.bits(),
union_align.abi_bits() as u32, union_align.bits() as u32,
DIFlags::FlagZero, DIFlags::FlagZero,
Some(empty_array), Some(empty_array),
0, // RuntimeLang 0, // RuntimeLang
@ -1962,7 +1958,7 @@ pub fn create_global_var_metadata(
is_local_to_unit, is_local_to_unit,
global, global,
None, None,
global_align.abi() as u32, global_align.bytes() as u32,
); );
} }
} }
@ -2000,7 +1996,7 @@ pub fn create_vtable_metadata(
unknown_file_metadata(cx), unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER, UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(), Size::ZERO.bits(),
cx.tcx.data_layout.pointer_align.abi_bits() as u32, cx.tcx.data_layout.pointer_align.abi.bits() as u32,
DIFlags::FlagArtificial, DIFlags::FlagArtificial,
None, None,
empty_array, empty_array,

View file

@ -201,7 +201,7 @@ impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
cx.sess().opts.optimize != config::OptLevel::No, cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero, DIFlags::FlagZero,
argument_index, argument_index,
align.abi() as u32, align.bytes() as u32,
) )
}; };
source_loc::set_debug_location(self, source_loc::set_debug_location(self,

View file

@ -110,7 +110,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let name = &*tcx.item_name(def_id).as_str(); let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx()); let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align); let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
let simple = get_simple_intrinsic(self.cx(), name); let simple = get_simple_intrinsic(self.cx(), name);
let llval = match name { let llval = match name {
@ -158,7 +158,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
} }
"min_align_of" => { "min_align_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).abi()) self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
} }
"min_align_of_val" => { "min_align_of_val" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
@ -167,12 +167,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
glue::size_and_align_of_dst(self, tp_ty, Some(meta)); glue::size_and_align_of_dst(self, tp_ty, Some(meta));
llalign llalign
} else { } else {
self.cx().const_usize(self.cx().align_of(tp_ty).abi()) self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
} }
} }
"pref_align_of" => { "pref_align_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
self.cx().const_usize(self.cx().align_of(tp_ty).pref()) self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes())
} }
"type_name" => { "type_name" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
@ -261,7 +261,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let align = if name == "unaligned_volatile_load" { let align = if name == "unaligned_volatile_load" {
1 1
} else { } else {
self.cx().align_of(tp_ty).abi() as u32 self.cx().align_of(tp_ty).bytes() as u32
}; };
unsafe { unsafe {
llvm::LLVMSetAlignment(load, align); llvm::LLVMSetAlignment(load, align);
@ -815,7 +815,7 @@ fn try_intrinsic(
) { ) {
if bx.cx().sess().no_landing_pads() { if bx.cx().sess().no_landing_pads() {
bx.call(func, &[data], None); bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align); bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.cx().sess()) { } else if wants_msvc_seh(bx.cx().sess()) {
codegen_msvc_try(bx, func, data, local_ptr, dest); codegen_msvc_try(bx, func, data, local_ptr, dest);
@ -890,7 +890,7 @@ fn codegen_msvc_try(
// //
// More information can be found in libstd's seh.rs implementation. // More information can be found in libstd's seh.rs implementation.
let i64p = bx.cx().type_ptr_to(bx.cx().type_i64()); let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(i64p, "slot", ptr_align); let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
@ -906,7 +906,7 @@ fn codegen_msvc_try(
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]); let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align); let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align; let i64_align = bx.tcx().data_layout.i64_align.abi;
let arg1 = catchpad.load(addr, i64_align); let arg1 = catchpad.load(addr, i64_align);
let val1 = bx.cx().const_i32(1); let val1 = bx.cx().const_i32(1);
let gep1 = catchpad.inbounds_gep(addr, &[val1]); let gep1 = catchpad.inbounds_gep(addr, &[val1]);
@ -923,7 +923,7 @@ fn codegen_msvc_try(
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None); let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align; let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align); bx.store(ret, dest, i32_align);
} }
@ -982,7 +982,7 @@ fn codegen_gnu_try(
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p())); catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
let ptr = catch.extract_value(vals, 0); let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p())); let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
catch.store(ptr, bitcast, ptr_align); catch.store(ptr, bitcast, ptr_align);
catch.ret(bx.cx().const_i32(1)); catch.ret(bx.cx().const_i32(1));
@ -991,7 +991,7 @@ fn codegen_gnu_try(
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = bx.call(llfn, &[func, data, local_ptr], None); let ret = bx.call(llfn, &[func, data, local_ptr], None);
let i32_align = bx.tcx().data_layout.i32_align; let i32_align = bx.tcx().data_layout.i32_align.abi;
bx.store(ret, dest, i32_align); bx.store(ret, dest, i32_align);
} }
@ -1436,7 +1436,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value: // Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32(); let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s: // Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = { let (mask, mask_ty) = {
@ -1536,7 +1536,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value: // Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().type_i32(); let alignment_ty = bx.cx().type_i32();
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
// Truncate the mask vector to a vector of i1s: // Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = { let (mask, mask_ty) = {

View file

@ -80,7 +80,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
match layout.fields { match layout.fields {
layout::FieldPlacement::Union(_) => { layout::FieldPlacement::Union(_) => {
let fill = cx.type_padding_filler( layout.size, layout.align); let fill = cx.type_padding_filler(layout.size, layout.align.abi);
let packed = false; let packed = false;
match name { match name {
None => { None => {
@ -120,23 +120,23 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let mut packed = false; let mut packed = false;
let mut offset = Size::ZERO; let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align; let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2); let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() { for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize); let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i); let field = layout.field(cx, i);
let effective_field_align = layout.align let effective_field_align = layout.align.abi
.min(field.align) .min(field.align.abi)
.restrict_for_offset(target_offset); .restrict_for_offset(target_offset);
packed |= effective_field_align.abi() < field.align.abi(); packed |= effective_field_align < field.align.abi;
debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \ debug!("struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}", effective_field_align: {}",
i, field, offset, target_offset, effective_field_align.abi()); i, field, offset, target_offset, effective_field_align.bytes());
assert!(target_offset >= offset); assert!(target_offset >= offset);
let padding = target_offset - offset; let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align); let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.abi_align(padding_align) + padding, target_offset); assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler( padding, padding_align)); result.push(cx.type_padding_filler( padding, padding_align));
debug!(" padding before: {:?}", padding); debug!(" padding before: {:?}", padding);
@ -151,7 +151,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
} }
let padding = layout.size - offset; let padding = layout.size - offset;
let padding_align = prev_effective_align; let padding_align = prev_effective_align;
assert_eq!(offset.abi_align(padding_align) + padding, layout.size); assert_eq!(offset.align_to(padding_align) + padding, layout.size);
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}", debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size); padding, offset, layout.size);
result.push(cx.type_padding_filler(padding, padding_align)); result.push(cx.type_padding_filler(padding, padding_align));
@ -166,7 +166,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
impl<'a, 'tcx> CodegenCx<'a, 'tcx> { impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> Align { pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
self.layout_of(ty).align self.layout_of(ty).align.abi
} }
pub fn size_of(&self, ty: Ty<'tcx>) -> Size { pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
@ -174,7 +174,8 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
} }
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) { pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
self.layout_of(ty).size_and_align() let layout = self.layout_of(ty);
(layout.size, layout.align.abi)
} }
} }
@ -332,7 +333,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
layout::Pointer => { layout::Pointer => {
// If we know the alignment, pick something better than i8. // If we know the alignment, pick something better than i8.
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) { let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_abi_align( pointee.align) cx.type_pointee_for_align(pointee.align)
} else { } else {
cx.type_i8() cx.type_i8()
}; };
@ -376,7 +377,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
let offset = if index == 0 { let offset = if index == 0 {
Size::ZERO Size::ZERO
} else { } else {
a.value.size(cx).abi_align(b.value.align(cx)) a.value.size(cx).align_to(b.value.align(cx).abi)
}; };
self.scalar_llvm_type_at(cx, scalar, offset) self.scalar_llvm_type_at(cx, scalar, offset)
} }

View file

@ -25,14 +25,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
t: Ty<'tcx>, t: Ty<'tcx>,
info: Option<Bx::Value> info: Option<Bx::Value>
) -> (Bx::Value, Bx::Value) { ) -> (Bx::Value, Bx::Value) {
debug!("calculate size of DST: {}; with lost info: {:?}", let layout = bx.cx().layout_of(t);
t, info); debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
if bx.cx().type_is_sized(t) { t, info, layout);
let (size, align) = bx.cx().layout_of(t).size_and_align(); if !layout.is_unsized() {
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", let size = bx.cx().const_usize(layout.size.bytes());
t, info, size, align); let align = bx.cx().const_usize(layout.align.abi.bytes());
let size = bx.cx().const_usize(size.bytes());
let align = bx.cx().const_usize(align.abi());
return (size, align); return (size, align);
} }
match t.sty { match t.sty {
@ -42,24 +40,22 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable)) (meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
} }
ty::Slice(_) | ty::Str => { ty::Slice(_) | ty::Str => {
let unit = t.sequence_element_type(bx.tcx()); let unit = layout.field(bx.cx(), 0);
// The info in this case is the length of the str, so the size is that // The info in this case is the length of the str, so the size is that
// times the unit size. // times the unit size.
let (size, align) = bx.cx().layout_of(unit).size_and_align(); (bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())),
(bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())), bx.cx().const_usize(unit.align.abi.bytes()))
bx.cx().const_usize(align.abi()))
} }
_ => { _ => {
// First get the size of all statically known fields. // First get the size of all statically known fields.
// Don't use size_of because it also rounds up to alignment, which we // Don't use size_of because it also rounds up to alignment, which we
// want to avoid, as the unsized field's alignment could be smaller. // want to avoid, as the unsized field's alignment could be smaller.
assert!(!t.is_simd()); assert!(!t.is_simd());
let layout = bx.cx().layout_of(t);
debug!("DST {} layout: {:?}", t, layout); debug!("DST {} layout: {:?}", t, layout);
let i = layout.fields.count() - 1; let i = layout.fields.count() - 1;
let sized_size = layout.fields.offset(i).bytes(); let sized_size = layout.fields.offset(i).bytes();
let sized_align = layout.align.abi(); let sized_align = layout.align.abi.bytes();
debug!("DST {} statically sized prefix size: {} align: {}", debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align); t, sized_size, sized_align);
let sized_size = bx.cx().const_usize(sized_size); let sized_size = bx.cx().const_usize(sized_size);

View file

@ -41,7 +41,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
llvtable, llvtable,
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty)) bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
); );
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
let ptr = bx.load(gep, ptr_align); let ptr = bx.load(gep, ptr_align);
bx.nonnull_metadata(ptr); bx.nonnull_metadata(ptr);
@ -59,7 +59,7 @@ impl<'a, 'tcx: 'a> VirtualIndex {
debug!("get_int({:?}, {:?})", llvtable, self); debug!("get_int({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize())); let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
let usize_align = bx.tcx().data_layout.pointer_align; let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]); let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
let ptr = bx.load(gep, usize_align); let ptr = bx.load(gep, usize_align);
// Vtable loads are invariant // Vtable loads are invariant
@ -100,19 +100,19 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
}) })
}); });
let (size, align) = cx.layout_of(ty).size_and_align(); let layout = cx.layout_of(ty);
// ///////////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to // If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in rust_mir/interpret/traits.rs // `get_vtable` in rust_mir/interpret/traits.rs
// ///////////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [ let components: Vec<_> = [
cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)), cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)),
cx.const_usize(size.bytes()), cx.const_usize(layout.size.bytes()),
cx.const_usize(align.abi()) cx.const_usize(layout.align.abi.bytes())
].iter().cloned().chain(methods).collect(); ].iter().cloned().chain(methods).collect();
let vtable_const = cx.const_struct(&components, false); let vtable_const = cx.const_struct(&components, false);
let align = cx.data_layout().pointer_align; let align = cx.data_layout().pointer_align.abi;
let vtable = cx.static_addr_of(vtable_const, align, Some("vtable")); let vtable = cx.static_addr_of(vtable_const, align, Some("vtable"));
cx.create_vtable_metadata(ty, vtable); cx.create_vtable_metadata(ty, vtable);

View file

@ -280,7 +280,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
scratch.llval scratch.llval
} }
Ref(llval, _, align) => { Ref(llval, _, align) => {
assert_eq!(align.abi(), op.layout.align.abi(), assert_eq!(align, op.layout.align.abi,
"return place is unaligned!"); "return place is unaligned!");
llval llval
} }
@ -288,7 +288,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let addr = bx.pointercast(llslot, bx.cx().type_ptr_to( let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&cast_ty) bx.cx().cast_backend_type(&cast_ty)
)); ));
bx.load(addr, self.fn_ty.ret.layout.align) bx.load(addr, self.fn_ty.ret.layout.align.abi)
} }
}; };
bx.ret(llval); bx.ret(llval);
@ -386,9 +386,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let filename = bx.cx().const_str_slice(filename); let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32); let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align let align = tcx.data_layout.aggregate_align.abi
.max(tcx.data_layout.i32_align) .max(tcx.data_layout.i32_align.abi)
.max(tcx.data_layout.pointer_align); .max(tcx.data_layout.pointer_align.abi);
// Put together the arguments to the panic entry point. // Put together the arguments to the panic entry point.
let (lang_item, args) = match *msg { let (lang_item, args) = match *msg {
@ -522,9 +522,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let filename = bx.cx().const_str_slice(filename); let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32); let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1); let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align let align = tcx.data_layout.aggregate_align.abi
.max(tcx.data_layout.i32_align) .max(tcx.data_layout.i32_align.abi)
.max(tcx.data_layout.pointer_align); .max(tcx.data_layout.pointer_align.abi);
let str = format!( let str = format!(
"Attempted to instantiate uninhabited type {} using mem::{}", "Attempted to instantiate uninhabited type {} using mem::{}",
@ -800,12 +800,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(scratch.llval, scratch.align, true) (scratch.llval, scratch.align, true)
} }
_ => { _ => {
(op.immediate_or_packed_pair(bx), arg.layout.align, false) (op.immediate_or_packed_pair(bx), arg.layout.align.abi, false)
} }
} }
} }
Ref(llval, _, align) => { Ref(llval, _, align) => {
if arg.is_indirect() && align.abi() < arg.layout.align.abi() { if arg.is_indirect() && align < arg.layout.align.abi {
// `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I // `foo(packed.large_field)`. We can't pass the (unaligned) field directly. I
// think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't // think that ATM (Rust 1.16) we only pass temporaries, but we shouldn't
// have scary latent bugs around. // have scary latent bugs around.
@ -826,7 +826,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let addr = bx.pointercast(llval, bx.cx().type_ptr_to( let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&ty)) bx.cx().cast_backend_type(&ty))
); );
llval = bx.load(addr, align.min(arg.layout.align)); llval = bx.load(addr, align.min(arg.layout.align.abi));
} else { } else {
// We can't use `PlaceRef::load` here because the argument // We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI // may have a type we don't treat as immediate, but the ABI
@ -1006,7 +1006,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.codegen_place(bx, dest) self.codegen_place(bx, dest)
}; };
if fn_ret.is_indirect() { if fn_ret.is_indirect() {
if dest.align.abi() < dest.layout.align.abi() { if dest.align < dest.layout.align.abi {
// Currently, MIR code generation does not create calls // Currently, MIR code generation does not create calls
// that store directly to fields of packed structs (in // that store directly to fields of packed structs (in
// fact, the calls it creates write only to temps), // fact, the calls it creates write only to temps),
@ -1062,7 +1062,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let src = self.codegen_operand(bx, src); let src = self.codegen_operand(bx, src);
let llty = bx.cx().backend_type(src.layout); let llty = bx.cx().backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty)); let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
let align = src.layout.align.min(dst.layout.align); let align = src.layout.align.abi.min(dst.align);
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align)); src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
} }

View file

@ -304,7 +304,7 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() { if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local); debug!("alloc: {:?} (return place) -> place", local);
let llretptr = fx.cx.get_param(llfn, 0); let llretptr = fx.cx.get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align)) LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align.abi))
} else if memory_locals.contains(local) { } else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local); debug!("alloc: {:?} -> place", local);
if layout.is_unsized() { if layout.is_unsized() {
@ -555,7 +555,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint); let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name); bx.set_value_name(llarg, &name);
llarg_idx += 1; llarg_idx += 1;
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align) PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
} else if arg.is_unsized_indirect() { } else if arg.is_unsized_indirect() {
// As the storage for the indirect argument lives during // As the storage for the indirect argument lives during
// the whole function call, we just copy the fat pointer. // the whole function call, we just copy the fat pointer.

View file

@ -152,7 +152,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
llval: llptr, llval: llptr,
llextra, llextra,
layout, layout,
align: layout.align, align: layout.align.abi,
} }
} }
@ -228,7 +228,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
OperandValue::Immediate(a_llval) OperandValue::Immediate(a_llval)
} else { } else {
assert_eq!(offset, a.value.size(bx.cx()) assert_eq!(offset, a.value.size(bx.cx())
.abi_align(b.value.align(bx.cx()))); .align_to(b.value.align(bx.cx()).abi));
assert_eq!(field.size, b.value.size(bx.cx())); assert_eq!(field.size, b.value.size(bx.cx()));
OperandValue::Immediate(b_llval) OperandValue::Immediate(b_llval)
} }
@ -348,8 +348,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
}; };
// FIXME: choose an appropriate alignment, or use dynamic align somehow // FIXME: choose an appropriate alignment, or use dynamic align somehow
let max_align = Align::from_bits(128, 128).unwrap(); let max_align = Align::from_bits(128).unwrap();
let min_align = Align::from_bits(8, 8).unwrap(); let min_align = Align::from_bits(8).unwrap();
// Allocate an appropriate region on the stack, and copy the value into it // Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra)); let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
@ -470,7 +470,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx.load_operand(PlaceRef::new_sized( bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))), bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
layout, layout,
layout.align, layout.align.abi,
)) ))
}) })
} }

View file

@ -58,8 +58,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self { ) -> Self {
debug!("alloca({:?}: {:?})", name, layout); debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place"); assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align); let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
Self::new_sized(tmp, layout, layout.align) Self::new_sized(tmp, layout, layout.align.abi)
} }
/// Returns a place for an indirect reference to an unsized place. /// Returns a place for an indirect reference to an unsized place.
@ -109,7 +109,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
self.llval self.llval
} else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi { } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
// Offsets have to match either first or second field. // Offsets have to match either first or second field.
assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx()))); assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
bx.struct_gep(self.llval, 1) bx.struct_gep(self.llval, 1)
} else { } else {
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix)) bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
@ -143,7 +143,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
if def.repr.packed() { if def.repr.packed() {
// FIXME(eddyb) generalize the adjustment when we // FIXME(eddyb) generalize the adjustment when we
// start supporting packing to larger alignments. // start supporting packing to larger alignments.
assert_eq!(self.layout.align.abi(), 1); assert_eq!(self.layout.align.abi.bytes(), 1);
return simple(); return simple();
} }
} }
@ -308,9 +308,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
// Issue #34427: As workaround for LLVM bug on ARM, // Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value. // use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0); let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align(); let size = bx.cx().const_usize(self.layout.size.bytes());
let size = bx.cx().const_usize(size.bytes()); bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
} }
let niche = self.project_field(bx, 0); let niche = self.project_field(bx, 0);
@ -419,13 +418,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llval = bx.cx().const_undef( let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(bx.cx().backend_type(layout)) bx.cx().type_ptr_to(bx.cx().backend_type(layout))
); );
PlaceRef::new_sized(llval, layout, layout.align) PlaceRef::new_sized(llval, layout, layout.align.abi)
} }
} }
} }
mir::Place::Static(box mir::Static { def_id, ty }) => { mir::Place::Static(box mir::Static { def_id, ty }) => {
let layout = cx.layout_of(self.monomorphize(&ty)); let layout = cx.layout_of(self.monomorphize(&ty));
PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align) PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align.abi)
}, },
mir::Place::Projection(box mir::Projection { mir::Place::Projection(box mir::Projection {
ref base, ref base,

View file

@ -496,10 +496,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let content_ty = self.monomorphize(&content_ty);
let (size, align) = bx.cx().layout_of(content_ty).size_and_align(); let content_layout = bx.cx().layout_of(content_ty);
let llsize = bx.cx().const_usize(size.bytes()); let llsize = bx.cx().const_usize(content_layout.size.bytes());
let llalign = bx.cx().const_usize(align.abi()); let llalign = bx.cx().const_usize(content_layout.align.abi.bytes());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = bx.cx().backend_type(box_layout); let llty_ptr = bx.cx().backend_type(box_layout);

View file

@ -15,10 +15,10 @@ use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods; use super::type_::ArgTypeMethods;
use super::HasCodegen; use super::HasCodegen;
use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope}; use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
use std::ffi::CStr;
use mir::operand::OperandRef; use mir::operand::OperandRef;
use mir::place::PlaceRef; use mir::place::PlaceRef;
use rustc::ty::layout::{Align, Size}; use rustc::ty::layout::{Align, Size};
use std::ffi::CStr;
use MemFlags; use MemFlags;
use std::borrow::Cow; use std::borrow::Cow;

View file

@ -120,16 +120,16 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
} }
} }
fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type { fn type_pointee_for_align(&self, align: Align) -> Self::Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align. // FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = layout::Integer::approximate_abi_align(self, align); let ity = layout::Integer::approximate_align(self, align);
self.type_from_integer(ity) self.type_from_integer(ity)
} }
/// Return a LLVM type that has at most the required alignment, /// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array. /// and exactly the required size, as a best-effort padding array.
fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type { fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type {
let unit = layout::Integer::approximate_abi_align(self, align); let unit = layout::Integer::approximate_align(self, align);
let size = size.bytes(); let size = size.bytes();
let unit_size = unit.size().bytes(); let unit_size = unit.size().bytes();
assert_eq!(size % unit_size, 0); assert_eq!(size % unit_size, 0);

View file

@ -129,7 +129,7 @@ pub fn op_to_const<'tcx>(
assert!(meta.is_none()); assert!(meta.is_none());
let ptr = ptr.to_ptr()?; let ptr = ptr.to_ptr()?;
let alloc = ecx.memory.get(ptr.alloc_id)?; let alloc = ecx.memory.get(ptr.alloc_id)?;
assert!(alloc.align.abi() >= align.abi()); assert!(alloc.align >= align);
assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes()); assert!(alloc.bytes.len() as u64 - ptr.offset.bytes() >= op.layout.size.bytes());
let mut alloc = alloc.clone(); let mut alloc = alloc.clone();
alloc.align = align; alloc.align = align;

View file

@ -316,7 +316,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
layout: TyLayout<'tcx>, layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Size, Align)>> { ) -> EvalResult<'tcx, Option<(Size, Align)>> {
if !layout.is_unsized() { if !layout.is_unsized() {
return Ok(Some(layout.size_and_align())); return Ok(Some((layout.size, layout.align.abi)));
} }
match layout.ty.sty { match layout.ty.sty {
ty::Adt(..) | ty::Tuple(..) => { ty::Adt(..) | ty::Tuple(..) => {
@ -328,7 +328,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
trace!("DST layout: {:?}", layout); trace!("DST layout: {:?}", layout);
let sized_size = layout.fields.offset(layout.fields.count() - 1); let sized_size = layout.fields.offset(layout.fields.count() - 1);
let sized_align = layout.align; let sized_align = layout.align.abi;
trace!( trace!(
"DST {} statically sized prefix size: {:?} align: {:?}", "DST {} statically sized prefix size: {:?} align: {:?}",
layout.ty, layout.ty,
@ -381,7 +381,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
// //
// `(size + (align-1)) & -align` // `(size + (align-1)) & -align`
Ok(Some((size.abi_align(align), align))) Ok(Some((size.align_to(align), align)))
} }
ty::Dynamic(..) => { ty::Dynamic(..) => {
let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?; let vtable = metadata.expect("dyn trait fat ptr must have vtable").to_ptr()?;
@ -391,8 +391,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
ty::Slice(_) | ty::Str => { ty::Slice(_) | ty::Str => {
let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?; let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?;
let (elem_size, align) = layout.field(self, 0)?.size_and_align(); let elem = layout.field(self, 0)?;
Ok(Some((elem_size * len, align))) Ok(Some((elem.size * len, elem.align.abi)))
} }
ty::Foreign(_) => { ty::Foreign(_) => {
@ -636,7 +636,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
let (ptr, align) = mplace.to_scalar_ptr_align(); let (ptr, align) = mplace.to_scalar_ptr_align();
match ptr { match ptr {
Scalar::Ptr(ptr) => { Scalar::Ptr(ptr) => {
write!(msg, " by align({}) ref:", align.abi()).unwrap(); write!(msg, " by align({}) ref:", align.bytes()).unwrap();
allocs.push(ptr.alloc_id); allocs.push(ptr.alloc_id);
} }
ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(), ptr => write!(msg, " by integral ref: {:?}", ptr).unwrap(),
@ -665,7 +665,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
Place::Ptr(mplace) => { Place::Ptr(mplace) => {
match mplace.ptr { match mplace.ptr {
Scalar::Ptr(ptr) => { Scalar::Ptr(ptr) => {
trace!("by align({}) ref:", mplace.align.abi()); trace!("by align({}) ref:", mplace.align.bytes());
self.memory.dump_alloc(ptr.alloc_id); self.memory.dump_alloc(ptr.alloc_id);
} }
ptr => trace!(" integral by ref: {:?}", ptr), ptr => trace!(" integral by ref: {:?}", ptr),

View file

@ -60,7 +60,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
match intrinsic_name { match intrinsic_name {
"min_align_of" => { "min_align_of" => {
let elem_ty = substs.type_at(0); let elem_ty = substs.type_at(0);
let elem_align = self.layout_of(elem_ty)?.align.abi(); let elem_align = self.layout_of(elem_ty)?.align.abi.bytes();
let align_val = Scalar::from_uint(elem_align, dest.layout.size); let align_val = Scalar::from_uint(elem_align, dest.layout.size);
self.write_scalar(align_val, dest)?; self.write_scalar(align_val, dest)?;
} }

View file

@ -268,18 +268,18 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
} }
}; };
// Check alignment // Check alignment
if alloc_align.abi() < required_align.abi() { if alloc_align.bytes() < required_align.bytes() {
return err!(AlignmentCheckFailed { return err!(AlignmentCheckFailed {
has: alloc_align, has: alloc_align,
required: required_align, required: required_align,
}); });
} }
if offset % required_align.abi() == 0 { if offset % required_align.bytes() == 0 {
Ok(()) Ok(())
} else { } else {
let has = offset % required_align.abi(); let has = offset % required_align.bytes();
err!(AlignmentCheckFailed { err!(AlignmentCheckFailed {
has: Align::from_bytes(has, has).unwrap(), has: Align::from_bytes(has).unwrap(),
required: required_align, required: required_align,
}) })
} }
@ -449,14 +449,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
} }
// Could also be a fn ptr or extern static // Could also be a fn ptr or extern static
match self.tcx.alloc_map.lock().get(id) { match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()), Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1).unwrap()),
Some(AllocType::Static(did)) => { Some(AllocType::Static(did)) => {
// The only way `get` couldn't have worked here is if this is an extern static // The only way `get` couldn't have worked here is if this is an extern static
assert!(self.tcx.is_foreign_item(did)); assert!(self.tcx.is_foreign_item(did));
// Use size and align of the type // Use size and align of the type
let ty = self.tcx.type_of(did); let ty = self.tcx.type_of(did);
let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap(); let layout = self.tcx.layout_of(ParamEnv::empty().and(ty)).unwrap();
(layout.size, layout.align) (layout.size, layout.align.abi)
} }
_ => { _ => {
// Must be a deallocated pointer // Must be a deallocated pointer
@ -521,7 +521,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
"{}({} bytes, alignment {}){}", "{}({} bytes, alignment {}){}",
msg, msg,
alloc.bytes.len(), alloc.bytes.len(),
alloc.align.abi(), alloc.align.bytes(),
extra extra
); );
@ -863,7 +863,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
allow_ptr_and_undef: bool, allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap(); let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 { if size.bytes() == 0 {
self.check_align(ptr, align)?; self.check_align(ptr, align)?;
return Ok(()); return Ok(());
@ -881,7 +881,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> { pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap(); let align = Align::from_bytes(1).unwrap();
if size.bytes() == 0 { if size.bytes() == 0 {
self.check_align(ptr, align)?; self.check_align(ptr, align)?;
return Ok(&[]); return Ok(&[]);
@ -891,7 +891,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> { pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap(); let align = Align::from_bytes(1).unwrap();
if src.is_empty() { if src.is_empty() {
self.check_align(ptr, align)?; self.check_align(ptr, align)?;
return Ok(()); return Ok(());
@ -908,7 +908,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
count: Size count: Size
) -> EvalResult<'tcx> { ) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL // Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap(); let align = Align::from_bytes(1).unwrap();
if count.bytes() == 0 { if count.bytes() == 0 {
self.check_align(ptr, align)?; self.check_align(ptr, align)?;
return Ok(()); return Ok(());
@ -1035,7 +1035,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
16 => layout::I128, 16 => layout::I128,
_ => bug!("bad integer size: {}", size.bytes()), _ => bug!("bad integer size: {}", size.bytes()),
}; };
ity.align(self) ity.align(self).abi
} }
} }

View file

@ -285,7 +285,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let (a, b) = (&a.value, &b.value); let (a, b) = (&a.value, &b.value);
let (a_size, b_size) = (a.size(self), b.size(self)); let (a_size, b_size) = (a.size(self), b.size(self));
let a_ptr = ptr; let a_ptr = ptr;
let b_offset = a_size.abi_align(b.align(self)); let b_offset = a_size.align_to(b.align(self).abi);
assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use assert!(b_offset.bytes() > 0); // we later use the offset to test which field to use
let b_ptr = ptr.offset(b_offset, self)?.into(); let b_ptr = ptr.offset(b_offset, self)?.into();
let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?; let a_val = self.memory.read_scalar(a_ptr, ptr_align, a_size)?;

View file

@ -127,7 +127,7 @@ impl<Tag> MemPlace<Tag> {
/// Produces a Place that will error if attempted to be read from or written to /// Produces a Place that will error if attempted to be read from or written to
#[inline(always)] #[inline(always)]
pub fn null(cx: &impl HasDataLayout) -> Self { pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap()) Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1).unwrap())
} }
#[inline(always)] #[inline(always)]
@ -167,8 +167,8 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self { pub fn dangling(layout: TyLayout<'tcx>, cx: &impl HasDataLayout) -> Self {
MPlaceTy { MPlaceTy {
mplace: MemPlace::from_scalar_ptr( mplace: MemPlace::from_scalar_ptr(
Scalar::from_uint(layout.align.abi(), cx.pointer_size()), Scalar::from_uint(layout.align.abi.bytes(), cx.pointer_size()),
layout.align layout.align.abi
), ),
layout layout
} }
@ -176,7 +176,7 @@ impl<'tcx, Tag> MPlaceTy<'tcx, Tag> {
#[inline] #[inline]
fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self { fn from_aligned_ptr(ptr: Pointer<Tag>, layout: TyLayout<'tcx>) -> Self {
MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align), layout } MPlaceTy { mplace: MemPlace::from_ptr(ptr, layout.align.abi), layout }
} }
#[inline] #[inline]
@ -287,7 +287,7 @@ where
let mplace = MemPlace { let mplace = MemPlace {
ptr: val.to_scalar_ptr()?, ptr: val.to_scalar_ptr()?,
align: layout.align, align: layout.align.abi,
meta: val.to_meta()?, meta: val.to_meta()?,
}; };
Ok(MPlaceTy { mplace, layout }) Ok(MPlaceTy { mplace, layout })
@ -356,11 +356,11 @@ where
// FIXME: Once we have made decisions for how to handle size and alignment // FIXME: Once we have made decisions for how to handle size and alignment
// of `extern type`, this should be adapted. It is just a temporary hack // of `extern type`, this should be adapted. It is just a temporary hack
// to get some code to work that probably ought to work. // to get some code to work that probably ought to work.
field_layout.align, field_layout.align.abi,
None => None =>
bug!("Cannot compute offset for extern type field at non-0 offset"), bug!("Cannot compute offset for extern type field at non-0 offset"),
}; };
(base.meta, offset.abi_align(align)) (base.meta, offset.align_to(align))
} else { } else {
// base.meta could be present; we might be accessing a sized field of an unsized // base.meta could be present; we might be accessing a sized field of an unsized
// struct. // struct.
@ -730,7 +730,7 @@ where
} }
self.memory.write_scalar( self.memory.write_scalar(
ptr, ptr_align.min(dest.layout.align), scalar, dest.layout.size ptr, ptr_align.min(dest.layout.align.abi), scalar, dest.layout.size
) )
} }
Immediate::ScalarPair(a_val, b_val) => { Immediate::ScalarPair(a_val, b_val) => {
@ -740,8 +740,8 @@ where
dest.layout) dest.layout)
}; };
let (a_size, b_size) = (a.size(self), b.size(self)); let (a_size, b_size) = (a.size(self), b.size(self));
let (a_align, b_align) = (a.align(self), b.align(self)); let (a_align, b_align) = (a.align(self).abi, b.align(self).abi);
let b_offset = a_size.abi_align(b_align); let b_offset = a_size.align_to(b_align);
let b_ptr = ptr.offset(b_offset, self)?.into(); let b_ptr = ptr.offset(b_offset, self)?.into();
// It is tempting to verify `b_offset` against `layout.fields.offset(1)`, // It is tempting to verify `b_offset` against `layout.fields.offset(1)`,
@ -899,7 +899,7 @@ where
// FIXME: What should we do here? We should definitely also tag! // FIXME: What should we do here? We should definitely also tag!
Ok(MPlaceTy::dangling(layout, self)) Ok(MPlaceTy::dangling(layout, self))
} else { } else {
let ptr = self.memory.allocate(layout.size, layout.align, kind)?; let ptr = self.memory.allocate(layout.size, layout.align.abi, kind)?;
let ptr = M::tag_new_allocation(self, ptr, kind)?; let ptr = M::tag_new_allocation(self, ptr, kind)?;
Ok(MPlaceTy::from_aligned_ptr(ptr, layout)) Ok(MPlaceTy::from_aligned_ptr(ptr, layout))
} }
@ -998,7 +998,8 @@ where
if cfg!(debug_assertions) { if cfg!(debug_assertions) {
let (size, align) = self.read_size_and_align_from_vtable(vtable)?; let (size, align) = self.read_size_and_align_from_vtable(vtable)?;
assert_eq!(size, layout.size); assert_eq!(size, layout.size);
assert_eq!(align.abi(), layout.align.abi()); // only ABI alignment is preserved // only ABI alignment is preserved
assert_eq!(align, layout.align.abi);
} }
let mplace = MPlaceTy { let mplace = MPlaceTy {

View file

@ -401,7 +401,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
// cannot use the shim here, because that will only result in infinite recursion // cannot use the shim here, because that will only result in infinite recursion
ty::InstanceDef::Virtual(_, idx) => { ty::InstanceDef::Virtual(_, idx) => {
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align; let ptr_align = self.tcx.data_layout.pointer_align.abi;
let ptr = self.deref_operand(args[0])?; let ptr = self.deref_operand(args[0])?;
let vtable = ptr.vtable()?; let vtable = ptr.vtable()?;
let fn_ptr = self.memory.read_ptr_sized( let fn_ptr = self.memory.read_ptr_sized(

View file

@ -42,10 +42,10 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
let layout = self.layout_of(ty)?; let layout = self.layout_of(ty)?;
assert!(!layout.is_unsized(), "can't create a vtable for an unsized type"); assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
let size = layout.size.bytes(); let size = layout.size.bytes();
let align = layout.align.abi(); let align = layout.align.abi.bytes();
let ptr_size = self.pointer_size(); let ptr_size = self.pointer_size();
let ptr_align = self.tcx.data_layout.pointer_align; let ptr_align = self.tcx.data_layout.pointer_align.abi;
// ///////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to // If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in rust_codegen_llvm/meth.rs // `get_vtable` in rust_codegen_llvm/meth.rs
@ -87,7 +87,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable: Pointer<M::PointerTag>, vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> { ) -> EvalResult<'tcx, (ty::Instance<'tcx>, ty::Ty<'tcx>)> {
// we don't care about the pointee type, we just want a pointer // we don't care about the pointee type, we just want a pointer
let pointer_align = self.tcx.data_layout.pointer_align; let pointer_align = self.tcx.data_layout.pointer_align.abi;
let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?; let drop_fn = self.memory.read_ptr_sized(vtable, pointer_align)?.to_ptr()?;
let drop_instance = self.memory.get_fn(drop_fn)?; let drop_instance = self.memory.get_fn(drop_fn)?;
trace!("Found drop fn: {:?}", drop_instance); trace!("Found drop fn: {:?}", drop_instance);
@ -103,13 +103,13 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable: Pointer<M::PointerTag>, vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, Align)> { ) -> EvalResult<'tcx, (Size, Align)> {
let pointer_size = self.pointer_size(); let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align; let pointer_align = self.tcx.data_layout.pointer_align.abi;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)? let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
.to_bits(pointer_size)? as u64; .to_bits(pointer_size)? as u64;
let align = self.memory.read_ptr_sized( let align = self.memory.read_ptr_sized(
vtable.offset(pointer_size * 2, self)?, vtable.offset(pointer_size * 2, self)?,
pointer_align pointer_align
)?.to_bits(pointer_size)? as u64; )?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap())) Ok((Size::from_bytes(size), Align::from_bytes(align).unwrap()))
} }
} }

View file

@ -355,7 +355,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// for the purpose of validity, consider foreign types to have // for the purpose of validity, consider foreign types to have
// alignment and size determined by the layout (size will be 0, // alignment and size determined by the layout (size will be 0,
// alignment should take attributes into account). // alignment should take attributes into account).
.unwrap_or_else(|| layout.size_and_align()); .unwrap_or_else(|| (layout.size, layout.align.abi));
match self.ecx.memory.check_align(ptr, align) { match self.ecx.memory.check_align(ptr, align) {
Ok(_) => {}, Ok(_) => {},
Err(err) => { Err(err) => {
@ -463,7 +463,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// for function pointers. // for function pointers.
let non_null = let non_null =
self.ecx.memory.check_align( self.ecx.memory.check_align(
Scalar::Ptr(ptr), Align::from_bytes(1, 1).unwrap() Scalar::Ptr(ptr), Align::from_bytes(1).unwrap()
).is_ok() || ).is_ok() ||
self.ecx.memory.get_fn(ptr).is_ok(); self.ecx.memory.get_fn(ptr).is_ok();
if !non_null { if !non_null {

View file

@ -30,7 +30,7 @@ pub fn is_disaligned<'a, 'tcx, L>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
let ty = place.ty(local_decls, tcx).to_ty(tcx); let ty = place.ty(local_decls, tcx).to_ty(tcx);
match tcx.layout_raw(param_env.and(ty)) { match tcx.layout_raw(param_env.and(ty)) {
Ok(layout) if layout.align.abi() == 1 => { Ok(layout) if layout.align.abi.bytes() == 1 => {
// if the alignment is 1, the type can't be further // if the alignment is 1, the type can't be further
// disaligned. // disaligned.
debug!("is_disaligned({:?}) - align = 1", place); debug!("is_disaligned({:?}) - align = 1", place);

View file

@ -93,7 +93,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
} }
} }
let align = arg.layout.align.abi(); let align = arg.layout.align.abi.bytes();
let total = arg.layout.size; let total = arg.layout.size;
arg.cast_to(Uniform { arg.cast_to(Uniform {
unit: if align <= 4 { Reg::i32() } else { Reg::i64() }, unit: if align <= 4 { Reg::i32() } else { Reg::i64() },

View file

@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{ {
let dl = cx.data_layout(); let dl = cx.data_layout();
let size = arg.layout.size; let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() { if arg.layout.is_aggregate() {
arg.cast_to(Uniform { arg.cast_to(Uniform {
unit: Reg::i32(), unit: Reg::i32(),
total: size total: size
}); });
if !offset.is_abi_aligned(align) { if !offset.is_aligned(align) {
arg.pad_with(Reg::i32()); arg.pad_with(Reg::i32());
} }
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
*offset = offset.abi_align(align) + size.abi_align(align); *offset = offset.align_to(align) + size.align_to(align);
} }
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>) pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)

View file

@ -118,9 +118,9 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
// We only care about aligned doubles // We only care about aligned doubles
if let abi::Abi::Scalar(ref scalar) = field.abi { if let abi::Abi::Scalar(ref scalar) = field.abi {
if let abi::Float(abi::FloatTy::F64) = scalar.value { if let abi::Float(abi::FloatTy::F64) = scalar.value {
if offset.is_abi_aligned(dl.f64_align) { if offset.is_aligned(dl.f64_align.abi) {
// Insert enough integers to cover [last_offset, offset) // Insert enough integers to cover [last_offset, offset)
assert!(last_offset.is_abi_aligned(dl.f64_align)); assert!(last_offset.is_aligned(dl.f64_align.abi));
for _ in 0..((offset - last_offset).bits() / 64) for _ in 0..((offset - last_offset).bits() / 64)
.min((prefix.len() - prefix_index) as u64) { .min((prefix.len() - prefix_index) as u64) {

View file

@ -142,23 +142,23 @@ impl Reg {
match self.kind { match self.kind {
RegKind::Integer => { RegKind::Integer => {
match self.size.bits() { match self.size.bits() {
1 => dl.i1_align, 1 => dl.i1_align.abi,
2..=8 => dl.i8_align, 2..=8 => dl.i8_align.abi,
9..=16 => dl.i16_align, 9..=16 => dl.i16_align.abi,
17..=32 => dl.i32_align, 17..=32 => dl.i32_align.abi,
33..=64 => dl.i64_align, 33..=64 => dl.i64_align.abi,
65..=128 => dl.i128_align, 65..=128 => dl.i128_align.abi,
_ => panic!("unsupported integer: {:?}", self) _ => panic!("unsupported integer: {:?}", self)
} }
} }
RegKind::Float => { RegKind::Float => {
match self.size.bits() { match self.size.bits() {
32 => dl.f32_align, 32 => dl.f32_align.abi,
64 => dl.f64_align, 64 => dl.f64_align.abi,
_ => panic!("unsupported float: {:?}", self) _ => panic!("unsupported float: {:?}", self)
} }
} }
RegKind::Vector => dl.vector_align(self.size) RegKind::Vector => dl.vector_align(self.size).abi,
} }
} }
} }
@ -227,13 +227,13 @@ impl CastTarget {
pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size { pub fn size<C: HasDataLayout>(&self, cx: &C) -> Size {
(self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64) (self.prefix_chunk * self.prefix.iter().filter(|x| x.is_some()).count() as u64)
.abi_align(self.rest.align(cx)) + self.rest.total .align_to(self.rest.align(cx)) + self.rest.total
} }
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align { pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
self.prefix.iter() self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx))) .filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)), .fold(cx.data_layout().aggregate_align.abi.max(self.rest.align(cx)),
|acc, align| acc.max(align)) |acc, align| acc.max(align))
} }
} }
@ -369,7 +369,7 @@ impl<'a, Ty> ArgType<'a, Ty> {
attrs.pointee_size = self.layout.size; attrs.pointee_size = self.layout.size;
// FIXME(eddyb) We should be doing this, but at least on // FIXME(eddyb) We should be doing this, but at least on
// i686-pc-windows-msvc, it results in wrong stack offsets. // i686-pc-windows-msvc, it results in wrong stack offsets.
// attrs.pointee_align = Some(self.layout.align); // attrs.pointee_align = Some(self.layout.align.abi);
let extra_attrs = if self.layout.is_unsized() { let extra_attrs = if self.layout.is_unsized() {
Some(ArgAttributes::new()) Some(ArgAttributes::new())

View file

@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{ {
let dl = cx.data_layout(); let dl = cx.data_layout();
let size = arg.layout.size; let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() { if arg.layout.is_aggregate() {
arg.cast_to(Uniform { arg.cast_to(Uniform {
unit: Reg::i32(), unit: Reg::i32(),
total: size total: size
}); });
if !offset.is_abi_aligned(align) { if !offset.is_aligned(align) {
arg.pad_with(Reg::i32()); arg.pad_with(Reg::i32());
} }
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
*offset = offset.abi_align(align) + size.abi_align(align); *offset = offset.align_to(align) + size.align_to(align);
} }
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>) pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)

View file

@ -13,7 +13,7 @@
// need to be fixed when PowerPC vector support is added. // need to be fixed when PowerPC vector support is added.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform}; use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods}; use abi::{Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec; use spec::HasTargetSpec;
#[derive(Debug, Clone, Copy, PartialEq)] #[derive(Debug, Clone, Copy, PartialEq)]
@ -120,8 +120,8 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
} else { } else {
// Aggregates larger than a doubleword should be padded // Aggregates larger than a doubleword should be padded
// at the tail to fill out a whole number of doublewords. // at the tail to fill out a whole number of doublewords.
let align = Align::from_bits(64, 64).unwrap(); let reg_i64 = Reg::i64();
(Reg::i64(), size.abi_align(align)) (reg_i64, size.align_to(reg_i64.align(cx)))
}; };
arg.cast_to(Uniform { arg.cast_to(Uniform {

View file

@ -27,21 +27,21 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<Ty>, offset: &mut Size)
{ {
let dl = cx.data_layout(); let dl = cx.data_layout();
let size = arg.layout.size; let size = arg.layout.size;
let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align); let align = arg.layout.align.max(dl.i32_align).min(dl.i64_align).abi;
if arg.layout.is_aggregate() { if arg.layout.is_aggregate() {
arg.cast_to(Uniform { arg.cast_to(Uniform {
unit: Reg::i32(), unit: Reg::i32(),
total: size total: size
}); });
if !offset.is_abi_aligned(align) { if !offset.is_aligned(align) {
arg.pad_with(Reg::i32()); arg.pad_with(Reg::i32());
} }
} else { } else {
arg.extend_integer_width_to(32); arg.extend_integer_width_to(32);
} }
*offset = offset.abi_align(align) + size.abi_align(align); *offset = offset.align_to(align) + size.align_to(align);
} }
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>) pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<Ty>)

View file

@ -41,7 +41,7 @@ fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy, where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{ {
if !off.is_abi_aligned(layout.align) { if !off.is_aligned(layout.align.abi) {
if !layout.is_zst() { if !layout.is_zst() {
return Err(Memory); return Err(Memory);
} }

View file

@ -13,7 +13,7 @@ pub use self::Primitive::*;
use spec::Target; use spec::Target;
use std::{cmp, fmt}; use std::fmt;
use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive}; use std::ops::{Add, Deref, Sub, Mul, AddAssign, Range, RangeInclusive};
use rustc_data_structures::indexed_vec::{Idx, IndexVec}; use rustc_data_structures::indexed_vec::{Idx, IndexVec};
@ -24,42 +24,44 @@ pub mod call;
/// for a target, which contains everything needed to compute layouts. /// for a target, which contains everything needed to compute layouts.
pub struct TargetDataLayout { pub struct TargetDataLayout {
pub endian: Endian, pub endian: Endian,
pub i1_align: Align, pub i1_align: AbiAndPrefAlign,
pub i8_align: Align, pub i8_align: AbiAndPrefAlign,
pub i16_align: Align, pub i16_align: AbiAndPrefAlign,
pub i32_align: Align, pub i32_align: AbiAndPrefAlign,
pub i64_align: Align, pub i64_align: AbiAndPrefAlign,
pub i128_align: Align, pub i128_align: AbiAndPrefAlign,
pub f32_align: Align, pub f32_align: AbiAndPrefAlign,
pub f64_align: Align, pub f64_align: AbiAndPrefAlign,
pub pointer_size: Size, pub pointer_size: Size,
pub pointer_align: Align, pub pointer_align: AbiAndPrefAlign,
pub aggregate_align: Align, pub aggregate_align: AbiAndPrefAlign,
/// Alignments for vector types. /// Alignments for vector types.
pub vector_align: Vec<(Size, Align)>, pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
pub instruction_address_space: u32, pub instruction_address_space: u32,
} }
impl Default for TargetDataLayout { impl Default for TargetDataLayout {
/// Creates an instance of `TargetDataLayout`. /// Creates an instance of `TargetDataLayout`.
fn default() -> TargetDataLayout { fn default() -> TargetDataLayout {
let align = |bits| Align::from_bits(bits).unwrap();
TargetDataLayout { TargetDataLayout {
endian: Endian::Big, endian: Endian::Big,
i1_align: Align::from_bits(8, 8).unwrap(), i1_align: AbiAndPrefAlign::new(align(8)),
i8_align: Align::from_bits(8, 8).unwrap(), i8_align: AbiAndPrefAlign::new(align(8)),
i16_align: Align::from_bits(16, 16).unwrap(), i16_align: AbiAndPrefAlign::new(align(16)),
i32_align: Align::from_bits(32, 32).unwrap(), i32_align: AbiAndPrefAlign::new(align(32)),
i64_align: Align::from_bits(32, 64).unwrap(), i64_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
i128_align: Align::from_bits(32, 64).unwrap(), i128_align: AbiAndPrefAlign { abi: align(32), pref: align(64) },
f32_align: Align::from_bits(32, 32).unwrap(), f32_align: AbiAndPrefAlign::new(align(32)),
f64_align: Align::from_bits(64, 64).unwrap(), f64_align: AbiAndPrefAlign::new(align(64)),
pointer_size: Size::from_bits(64), pointer_size: Size::from_bits(64),
pointer_align: Align::from_bits(64, 64).unwrap(), pointer_align: AbiAndPrefAlign::new(align(64)),
aggregate_align: Align::from_bits(0, 64).unwrap(), aggregate_align: AbiAndPrefAlign { abi: align(0), pref: align(64) },
vector_align: vec![ vector_align: vec![
(Size::from_bits(64), Align::from_bits(64, 64).unwrap()), (Size::from_bits(64), AbiAndPrefAlign::new(align(64))),
(Size::from_bits(128), Align::from_bits(128, 128).unwrap()) (Size::from_bits(128), AbiAndPrefAlign::new(align(128))),
], ],
instruction_address_space: 0, instruction_address_space: 0,
} }
@ -94,13 +96,19 @@ impl TargetDataLayout {
if s.is_empty() { if s.is_empty() {
return Err(format!("missing alignment for `{}` in \"data-layout\"", cause)); return Err(format!("missing alignment for `{}` in \"data-layout\"", cause));
} }
let abi = parse_bits(s[0], "alignment", cause)?; let align_from_bits = |bits| {
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?; Align::from_bits(bits).map_err(|err| {
Align::from_bits(abi, pref).map_err(|err| {
format!("invalid alignment for `{}` in \"data-layout\": {}", format!("invalid alignment for `{}` in \"data-layout\": {}",
cause, err) cause, err)
}) })
}; };
let abi = parse_bits(s[0], "alignment", cause)?;
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
Ok(AbiAndPrefAlign {
abi: align_from_bits(abi)?,
pref: align_from_bits(pref)?,
})
};
let mut dl = TargetDataLayout::default(); let mut dl = TargetDataLayout::default();
let mut i128_align_src = 64; let mut i128_align_src = 64;
@ -205,7 +213,7 @@ impl TargetDataLayout {
} }
} }
pub fn vector_align(&self, vec_size: Size) -> Align { pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
for &(size, align) in &self.vector_align { for &(size, align) in &self.vector_align {
if size == vec_size { if size == vec_size {
return align; return align;
@ -213,8 +221,7 @@ impl TargetDataLayout {
} }
// Default to natural alignment, which is what LLVM does. // Default to natural alignment, which is what LLVM does.
// That is, use the size, rounded up to a power of 2. // That is, use the size, rounded up to a power of 2.
let align = vec_size.bytes().next_power_of_two(); AbiAndPrefAlign::new(Align::from_bytes(vec_size.bytes().next_power_of_two()).unwrap())
Align::from_bytes(align, align).unwrap()
} }
} }
@ -270,14 +277,14 @@ impl Size {
} }
#[inline] #[inline]
pub fn abi_align(self, align: Align) -> Size { pub fn align_to(self, align: Align) -> Size {
let mask = align.abi() - 1; let mask = align.bytes() - 1;
Size::from_bytes((self.bytes() + mask) & !mask) Size::from_bytes((self.bytes() + mask) & !mask)
} }
#[inline] #[inline]
pub fn is_abi_aligned(self, align: Align) -> bool { pub fn is_aligned(self, align: Align) -> bool {
let mask = align.abi() - 1; let mask = align.bytes() - 1;
self.bytes() & mask == 0 self.bytes() & mask == 0
} }
@ -358,78 +365,45 @@ impl AddAssign for Size {
} }
} }
/// Alignment of a type in bytes, both ABI-mandated and preferred. /// Alignment of a type in bytes (always a power of two).
/// Each field is a power of two, giving the alignment a maximum value #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, RustcEncodable, RustcDecodable)]
/// of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a
/// maximum capacity of 2<sup>29</sup> or 536870912.
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Align { pub struct Align {
abi_pow2: u8, pow2: u8,
pref_pow2: u8,
} }
impl Align { impl Align {
pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> { pub fn from_bits(bits: u64) -> Result<Align, String> {
Align::from_bytes(Size::from_bits(abi).bytes(), Align::from_bytes(Size::from_bits(bits).bytes())
Size::from_bits(pref).bytes())
} }
pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> { pub fn from_bytes(align: u64) -> Result<Align, String> {
let log2 = |align: u64| {
// Treat an alignment of 0 bytes like 1-byte alignment. // Treat an alignment of 0 bytes like 1-byte alignment.
if align == 0 { if align == 0 {
return Ok(0); return Ok(Align { pow2: 0 });
} }
let mut bytes = align; let mut bytes = align;
let mut pow: u8 = 0; let mut pow2: u8 = 0;
while (bytes & 1) == 0 { while (bytes & 1) == 0 {
pow += 1; pow2 += 1;
bytes >>= 1; bytes >>= 1;
} }
if bytes != 1 { if bytes != 1 {
Err(format!("`{}` is not a power of 2", align)) return Err(format!("`{}` is not a power of 2", align));
} else if pow > 29 {
Err(format!("`{}` is too large", align))
} else {
Ok(pow)
} }
}; if pow2 > 29 {
return Err(format!("`{}` is too large", align));
Ok(Align {
abi_pow2: log2(abi)?,
pref_pow2: log2(pref)?,
})
} }
pub fn abi(self) -> u64 { Ok(Align { pow2 })
1 << self.abi_pow2
} }
pub fn pref(self) -> u64 { pub fn bytes(self) -> u64 {
1 << self.pref_pow2 1 << self.pow2
} }
pub fn abi_bits(self) -> u64 { pub fn bits(self) -> u64 {
self.abi() * 8 self.bytes() * 8
}
pub fn pref_bits(self) -> u64 {
self.pref() * 8
}
pub fn min(self, other: Align) -> Align {
Align {
abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2),
}
}
pub fn max(self, other: Align) -> Align {
Align {
abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2),
}
} }
/// Compute the best alignment possible for the given offset /// Compute the best alignment possible for the given offset
@ -437,10 +411,8 @@ impl Align {
/// ///
/// NB: for an offset of `0`, this happens to return `2^64`. /// NB: for an offset of `0`, this happens to return `2^64`.
pub fn max_for_offset(offset: Size) -> Align { pub fn max_for_offset(offset: Size) -> Align {
let pow2 = offset.bytes().trailing_zeros() as u8;
Align { Align {
abi_pow2: pow2, pow2: offset.bytes().trailing_zeros() as u8,
pref_pow2: pow2,
} }
} }
@ -451,6 +423,36 @@ impl Align {
} }
} }
/// A pair of aligments, ABI-mandated and preferred.
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct AbiAndPrefAlign {
pub abi: Align,
pub pref: Align,
}
impl AbiAndPrefAlign {
pub fn new(align: Align) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: align,
pref: align,
}
}
pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: self.abi.min(other.abi),
pref: self.pref.min(other.pref),
}
}
pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi: self.abi.max(other.abi),
pref: self.pref.max(other.pref),
}
}
}
/// Integers, also used for enum discriminants. /// Integers, also used for enum discriminants.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub enum Integer { pub enum Integer {
@ -472,7 +474,7 @@ impl Integer {
} }
} }
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align { pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
@ -507,12 +509,11 @@ impl Integer {
} }
/// Find the smallest integer with the given alignment. /// Find the smallest integer with the given alignment.
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Option<Integer> { pub fn for_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Option<Integer> {
let dl = cx.data_layout(); let dl = cx.data_layout();
let wanted = align.abi();
for &candidate in &[I8, I16, I32, I64, I128] { for &candidate in &[I8, I16, I32, I64, I128] {
if wanted == candidate.align(dl).abi() && wanted == candidate.size().bytes() { if wanted == candidate.align(dl).abi && wanted.bytes() == candidate.size().bytes() {
return Some(candidate); return Some(candidate);
} }
} }
@ -520,13 +521,12 @@ impl Integer {
} }
/// Find the largest integer with the given alignment or less. /// Find the largest integer with the given alignment or less.
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Integer { pub fn approximate_align<C: HasDataLayout>(cx: &C, wanted: Align) -> Integer {
let dl = cx.data_layout(); let dl = cx.data_layout();
let wanted = align.abi();
// FIXME(eddyb) maybe include I128 in the future, when it works everywhere. // FIXME(eddyb) maybe include I128 in the future, when it works everywhere.
for &candidate in &[I64, I32, I16] { for &candidate in &[I64, I32, I16] {
if wanted >= candidate.align(dl).abi() && wanted >= candidate.size().bytes() { if wanted >= candidate.align(dl).abi && wanted.bytes() >= candidate.size().bytes() {
return candidate; return candidate;
} }
} }
@ -597,7 +597,7 @@ impl<'a, 'tcx> Primitive {
} }
} }
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align { pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
let dl = cx.data_layout(); let dl = cx.data_layout();
match self { match self {
@ -868,7 +868,7 @@ pub struct LayoutDetails {
pub variants: Variants, pub variants: Variants,
pub fields: FieldPlacement, pub fields: FieldPlacement,
pub abi: Abi, pub abi: Abi,
pub align: Align, pub align: AbiAndPrefAlign,
pub size: Size pub size: Size
} }
@ -949,8 +949,4 @@ impl<'a, Ty> TyLayout<'a, Ty> {
Abi::Aggregate { sized } => sized && self.size.bytes() == 0 Abi::Aggregate { sized } => sized && self.size.bytes() == 0
} }
} }
pub fn size_and_align(&self) -> (Size, Align) {
(self.size, self.align)
}
} }

View file

@ -1779,7 +1779,7 @@ fn check_transparent<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, sp: Span, def_id: De
// We are currently checking the type this field came from, so it must be local // We are currently checking the type this field came from, so it must be local
let span = tcx.hir.span_if_local(field.did).unwrap(); let span = tcx.hir.span_if_local(field.did).unwrap();
let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false); let zst = layout.map(|layout| layout.is_zst()).unwrap_or(false);
let align1 = layout.map(|layout| layout.align.abi() == 1).unwrap_or(false); let align1 = layout.map(|layout| layout.align.abi.bytes() == 1).unwrap_or(false);
(span, zst, align1) (span, zst, align1)
}); });