rustc_target: rename abi::Align to AbiAndPrefAlign.

This commit is contained in:
Eduard-Mihai Burtescu 2018-09-08 22:14:55 +03:00
parent 780658a464
commit d56e892085
26 changed files with 220 additions and 218 deletions

View file

@ -12,7 +12,7 @@
use super::{Pointer, EvalResult, AllocId};
use ty::layout::{Size, Align};
use ty::layout::{Size, AbiAndPrefAlign};
use syntax::ast::Mutability;
use std::iter;
use mir;
@ -40,7 +40,7 @@ pub struct Allocation<Tag=(),Extra=()> {
/// Denotes undefined memory. Reading from undefined memory is forbidden in miri
pub undef_mask: UndefMask,
/// The alignment of the allocation to detect unaligned reads.
pub align: Align,
pub align: AbiAndPrefAlign,
/// Whether the allocation is mutable.
/// Also used by codegen to determine if a static should be put into mutable memory,
/// which happens for `static mut` and `static` with interior mutability.
@ -90,7 +90,7 @@ impl AllocationExtra<()> for () {}
impl<Tag, Extra: Default> Allocation<Tag, Extra> {
/// Creates a read-only allocation initialized by the given bytes
pub fn from_bytes(slice: &[u8], align: Align) -> Self {
pub fn from_bytes(slice: &[u8], align: AbiAndPrefAlign) -> Self {
let mut undef_mask = UndefMask::new(Size::ZERO);
undef_mask.grow(Size::from_bytes(slice.len() as u64), true);
Self {
@ -104,10 +104,10 @@ impl<Tag, Extra: Default> Allocation<Tag, Extra> {
}
pub fn from_byte_aligned_bytes(slice: &[u8]) -> Self {
Allocation::from_bytes(slice, Align::from_bytes(1, 1).unwrap())
Allocation::from_bytes(slice, AbiAndPrefAlign::from_bytes(1, 1).unwrap())
}
pub fn undef(size: Size, align: Align) -> Self {
pub fn undef(size: Size, align: AbiAndPrefAlign) -> Self {
assert_eq!(size.bytes() as usize as u64, size.bytes());
Allocation {
bytes: vec![0; size.bytes() as usize],

View file

@ -13,7 +13,7 @@ use std::{fmt, env};
use hir::map::definitions::DefPathData;
use mir;
use ty::{self, Ty, layout};
use ty::layout::{Size, Align, LayoutError};
use ty::layout::{Size, AbiAndPrefAlign, LayoutError};
use rustc_target::spec::abi::Abi;
use super::{RawConst, Pointer, InboundsCheck, ScalarMaybeUndef};
@ -301,8 +301,8 @@ pub enum EvalErrorKind<'tcx, O> {
TlsOutOfBounds,
AbiViolation(String),
AlignmentCheckFailed {
required: Align,
has: Align,
required: AbiAndPrefAlign,
has: AbiAndPrefAlign,
},
ValidationFailure(String),
CalledClosureAsFunction,
@ -315,7 +315,7 @@ pub enum EvalErrorKind<'tcx, O> {
DeallocatedWrongMemoryKind(String, String),
ReallocateNonBasePtr,
DeallocateNonBasePtr,
IncorrectAllocationInformation(Size, Size, Align, Align),
IncorrectAllocationInformation(Size, Size, AbiAndPrefAlign, AbiAndPrefAlign),
Layout(layout::LayoutError<'tcx>),
HeapAllocZeroBytes,
HeapAllocNonPowerOfTwoAlignment(u64),

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc_target::abi::{Align, Size};
use rustc_target::abi::{AbiAndPrefAlign, Size};
use rustc_data_structures::fx::{FxHashSet};
use std::cmp::{self, Ordering};
@ -63,7 +63,7 @@ impl CodeStats {
pub fn record_type_size<S: ToString>(&mut self,
kind: DataTypeKind,
type_desc: S,
align: Align,
align: AbiAndPrefAlign,
overall_size: Size,
packed: bool,
opt_discr_size: Option<Size>,

View file

@ -248,7 +248,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
/// A univariant, the last field of which may be coerced to unsized.
MaybeUnsized,
/// A univariant, but with a prefix of an arbitrary size & alignment (e.g. enum tag).
Prefixed(Size, Align),
Prefixed(Size, AbiAndPrefAlign),
}
let univariant_uninterned = |fields: &[TyLayout<'_>], repr: &ReprOptions, kind| {
@ -259,7 +259,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let pack = {
let pack = repr.pack as u64;
Align::from_bytes(pack, pack).unwrap()
AbiAndPrefAlign::from_bytes(pack, pack).unwrap()
};
let mut align = if packed {
@ -352,7 +352,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if repr.align > 0 {
let repr_align = repr.align as u64;
align = align.max(Align::from_bytes(repr_align, repr_align).unwrap());
align = align.max(AbiAndPrefAlign::from_bytes(repr_align, repr_align).unwrap());
debug!("univariant repr_align: {:?}", repr_align);
}
@ -682,7 +682,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let pack = {
let pack = def.repr.pack as u64;
Align::from_bytes(pack, pack).unwrap()
AbiAndPrefAlign::from_bytes(pack, pack).unwrap()
};
let mut align = if packed {
@ -694,7 +694,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
if def.repr.align > 0 {
let repr_align = def.repr.align as u64;
align = align.max(
Align::from_bytes(repr_align, repr_align).unwrap());
AbiAndPrefAlign::from_bytes(repr_align, repr_align).unwrap());
}
let optimize = !def.repr.inhibit_union_abi_opt();
@ -964,7 +964,7 @@ impl<'a, 'tcx> LayoutCx<'tcx, TyCtxt<'a, 'tcx, 'tcx>> {
let mut size = Size::ZERO;
// We're interested in the smallest alignment, so start large.
let mut start_align = Align::from_bytes(256, 256).unwrap();
let mut start_align = AbiAndPrefAlign::from_bytes(256, 256).unwrap();
assert_eq!(Integer::for_abi_align(dl, start_align), None);
// repr(C) on an enum tells us to make a (tag, union) layout,
@ -1994,7 +1994,7 @@ impl_stable_hash_for!(enum ::ty::layout::Primitive {
Pointer
});
impl<'gcx> HashStable<StableHashingContext<'gcx>> for Align {
impl<'gcx> HashStable<StableHashingContext<'gcx>> for AbiAndPrefAlign {
fn hash_stable<W: StableHasherResult>(&self,
hcx: &mut StableHashingContext<'gcx>,
hasher: &mut StableHasher<W>) {

View file

@ -19,7 +19,7 @@ use type_of::LayoutLlvmExt;
use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::layout::{self, AbiAndPrefAlign, Size, TyLayout};
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
@ -457,7 +457,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
fn alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
let mut bx = Builder::with_cx(self.cx);
bx.position_at_start(unsafe {
llvm::LLVMGetFirstBasicBlock(self.llfn())
@ -465,7 +465,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.dynamic_alloca(ty, name, align)
}
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: Align) -> &'ll Value {
fn dynamic_alloca(&mut self, ty: &'ll Type, name: &str, align: AbiAndPrefAlign) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
@ -484,7 +484,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ty: &'ll Type,
len: &'ll Value,
name: &str,
align: Align) -> &'ll Value {
align: AbiAndPrefAlign) -> &'ll Value {
self.count_insn("alloca");
unsafe {
let alloca = if name.is_empty() {
@ -499,7 +499,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
fn load(&mut self, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
self.count_insn("load");
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, noname());
@ -639,7 +639,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: Align) -> &'ll Value {
fn store(&mut self, val: &'ll Value, ptr: &'ll Value, align: AbiAndPrefAlign) -> &'ll Value {
self.store_with_flags(val, ptr, align, MemFlags::empty())
}
@ -647,7 +647,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
&mut self,
val: &'ll Value,
ptr: &'ll Value,
align: Align,
align: AbiAndPrefAlign,
flags: MemFlags,
) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
@ -878,8 +878,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn memcpy(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
fn memcpy(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
@ -898,8 +898,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn memmove(&mut self, dst: &'ll Value, dst_align: Align,
src: &'ll Value, src_align: Align,
fn memmove(&mut self, dst: &'ll Value, dst_align: AbiAndPrefAlign,
src: &'ll Value, src_align: AbiAndPrefAlign,
size: &'ll Value, flags: MemFlags) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
@ -923,7 +923,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
ptr: &'ll Value,
fill_byte: &'ll Value,
size: &'ll Value,
align: Align,
align: AbiAndPrefAlign,
flags: MemFlags,
) {
let ptr_width = &self.cx().sess().target.target.target_pointer_width;

View file

@ -28,7 +28,7 @@ use value::Value;
use rustc::ty::{self, Ty};
use rustc_codegen_ssa::traits::*;
use rustc::ty::layout::{self, Size, Align, LayoutOf};
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, LayoutOf};
use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
@ -89,12 +89,12 @@ pub fn codegen_static_initializer(
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,
mut align: Align) {
mut align: AbiAndPrefAlign) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align {
match ty::layout::Align::from_bits(min, min) {
match ty::layout::AbiAndPrefAlign::from_bits(min, min) {
Ok(min) => align = align.max(min),
Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
@ -186,7 +186,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn static_addr_of_mut(
&self,
cv: &'ll Value,
align: Align,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> &'ll Value {
unsafe {
@ -212,7 +212,7 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn static_addr_of(
&self,
cv: &'ll Value,
align: Align,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {

View file

@ -35,7 +35,7 @@ use rustc_data_structures::fingerprint::Fingerprint;
use rustc::ty::Instance;
use common::CodegenCx;
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf,
use rustc::ty::layout::{self, AbiAndPrefAlign, HasDataLayout, Integer, IntegerExt, LayoutOf,
PrimitiveExt, Size, TyLayout};
use rustc::session::config;
use rustc::util::nodemap::FxHashMap;
@ -923,7 +923,7 @@ struct MemberDescription<'ll> {
type_metadata: &'ll DIType,
offset: Size,
size: Size,
align: Align,
align: AbiAndPrefAlign,
flags: DIFlags,
discriminant: Option<u64>,
}
@ -985,13 +985,12 @@ impl<'tcx> StructMemberDescriptionFactory<'tcx> {
f.ident.to_string()
};
let field = layout.field(cx, i);
let (size, align) = field.size_and_align();
MemberDescription {
name,
type_metadata: type_metadata(cx, field.ty, self.span),
offset: layout.fields.offset(i),
size,
align,
size: field.size,
align: field.align,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1109,13 +1108,12 @@ impl<'tcx> UnionMemberDescriptionFactory<'tcx> {
-> Vec<MemberDescription<'ll>> {
self.variant.fields.iter().enumerate().map(|(i, f)| {
let field = self.layout.field(cx, i);
let (size, align) = field.size_and_align();
MemberDescription {
name: f.ident.to_string(),
type_metadata: type_metadata(cx, field.ty, self.span),
offset: Size::ZERO,
size,
align,
size: field.size,
align: field.align,
flags: DIFlags::FlagZero,
discriminant: None,
}
@ -1587,8 +1585,6 @@ fn prepare_enum_metadata(
_ => {}
}
let (enum_type_size, enum_type_align) = layout.size_and_align();
let enum_name = SmallCStr::new(&enum_name);
let unique_type_id_str = SmallCStr::new(
debug_context(cx).type_map.borrow().get_unique_type_id_as_string(unique_type_id)
@ -1610,8 +1606,8 @@ fn prepare_enum_metadata(
enum_name.as_ptr(),
file_metadata,
UNKNOWN_LINE_NUMBER,
enum_type_size.bits(),
enum_type_align.abi_bits() as u32,
layout.size.bits(),
layout.align.abi_bits() as u32,
DIFlags::FlagZero,
None,
0, // RuntimeLang
@ -1695,8 +1691,8 @@ fn prepare_enum_metadata(
ptr::null_mut(),
file_metadata,
UNKNOWN_LINE_NUMBER,
enum_type_size.bits(),
enum_type_align.abi_bits() as u32,
layout.size.bits(),
layout.align.abi_bits() as u32,
DIFlags::FlagZero,
discriminator_metadata,
empty_array,
@ -1712,8 +1708,8 @@ fn prepare_enum_metadata(
enum_name.as_ptr(),
file_metadata,
UNKNOWN_LINE_NUMBER,
enum_type_size.bits(),
enum_type_align.abi_bits() as u32,
layout.size.bits(),
layout.align.abi_bits() as u32,
DIFlags::FlagZero,
None,
type_array,

View file

@ -12,7 +12,7 @@ use abi::{FnType, FnTypeExt};
use common::*;
use rustc::hir;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc::ty::layout::{self, AbiAndPrefAlign, LayoutOf, Size, TyLayout};
use rustc_target::abi::FloatTy;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use type_::Type;
@ -80,7 +80,7 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
match layout.fields {
layout::FieldPlacement::Union(_) => {
let fill = cx.type_padding_filler( layout.size, layout.align);
let fill = cx.type_padding_filler(layout.size, layout.align);
let packed = false;
match name {
None => {
@ -165,7 +165,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
pub fn align_of(&self, ty: Ty<'tcx>) -> AbiAndPrefAlign {
self.layout_of(ty).align
}
@ -173,8 +173,9 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
self.layout_of(ty).size
}
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
self.layout_of(ty).size_and_align()
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, AbiAndPrefAlign) {
let layout = self.layout_of(ty);
(layout.size, layout.align)
}
}
@ -196,7 +197,7 @@ pub enum PointerKind {
#[derive(Copy, Clone)]
pub struct PointeeInfo {
pub size: Size,
pub align: Align,
pub align: AbiAndPrefAlign,
pub safe: Option<PointerKind>,
}

View file

@ -31,7 +31,7 @@ use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::weak_lang_items;
use rustc::mir::mono::{Stats, CodegenUnitNameBuilder};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::util::common::{time, print_time_passes_entry};
@ -410,9 +410,9 @@ pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx,
dst: Bx::Value,
dst_align: Align,
dst_align: AbiAndPrefAlign,
src: Bx::Value,
src_align: Align,
src_align: AbiAndPrefAlign,
layout: TyLayout<'tcx>,
flags: MemFlags,
) {

View file

@ -25,14 +25,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
t: Ty<'tcx>,
info: Option<Bx::Value>
) -> (Bx::Value, Bx::Value) {
debug!("calculate size of DST: {}; with lost info: {:?}",
t, info);
if bx.cx().type_is_sized(t) {
let (size, align) = bx.cx().layout_of(t).size_and_align();
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, info, size, align);
let size = bx.cx().const_usize(size.bytes());
let align = bx.cx().const_usize(align.abi());
let layout = bx.cx().layout_of(t);
debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
t, info, layout);
if !layout.is_unsized() {
let size = bx.cx().const_usize(layout.size.bytes());
let align = bx.cx().const_usize(layout.align.abi());
return (size, align);
}
match t.sty {
@ -42,19 +40,17 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
}
ty::Slice(_) | ty::Str => {
let unit = t.sequence_element_type(bx.tcx());
let unit = layout.field(bx.cx(), 0);
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bx.cx().layout_of(unit).size_and_align();
(bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())),
bx.cx().const_usize(align.abi()))
(bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())),
bx.cx().const_usize(unit.align.abi()))
}
_ => {
// First get the size of all statically known fields.
// Don't use size_of because it also rounds up to alignment, which we
// want to avoid, as the unsized field's alignment could be smaller.
assert!(!t.is_simd());
let layout = bx.cx().layout_of(t);
debug!("DST {} layout: {:?}", t, layout);
let i = layout.fields.count() - 1;

View file

@ -100,15 +100,15 @@ pub fn get_vtable<'tcx, Cx: CodegenMethods<'tcx>>(
})
});
let (size, align) = cx.layout_of(ty).size_and_align();
let layout = cx.layout_of(ty);
// /////////////////////////////////////////////////////////////////////////////////////////////
// If you touch this code, be sure to also make the corresponding changes to
// `get_vtable` in rust_mir/interpret/traits.rs
// /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [
cx.get_fn(monomorphize::resolve_drop_in_place(cx.tcx(), ty)),
cx.const_usize(size.bytes()),
cx.const_usize(align.abi())
cx.const_usize(layout.size.bytes()),
cx.const_usize(layout.align.abi())
].iter().cloned().chain(methods).collect();
let vtable_const = cx.const_struct(&components, false);

View file

@ -11,7 +11,7 @@
use rustc::mir::interpret::{ConstValue, ErrorHandled};
use rustc::mir;
use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use rustc::ty::layout::{self, AbiAndPrefAlign, LayoutOf, TyLayout};
use base;
use MemFlags;
@ -33,7 +33,7 @@ pub enum OperandValue<V> {
/// to be valid for the operand's lifetime.
/// The second value, if any, is the extra data (vtable or length)
/// which indicates that it refers to an unsized rvalue.
Ref(V, Option<V>, Align),
Ref(V, Option<V>, AbiAndPrefAlign),
/// A single LLVM value.
Immediate(V),
/// A pair of immediate LLVM values. Used by fat pointers too.
@ -348,8 +348,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
};
// FIXME: choose an appropriate alignment, or use dynamic align somehow
let max_align = Align::from_bits(128, 128).unwrap();
let min_align = Align::from_bits(8, 8).unwrap();
let max_align = AbiAndPrefAlign::from_bits(128, 128).unwrap();
let min_align = AbiAndPrefAlign::from_bits(8, 8).unwrap();
// Allocate an appropriate region on the stack, and copy the value into it
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));

View file

@ -9,7 +9,7 @@
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use MemFlags;
@ -33,14 +33,14 @@ pub struct PlaceRef<'tcx, V> {
pub layout: TyLayout<'tcx>,
/// What alignment we know for this place
pub align: Align,
pub align: AbiAndPrefAlign,
}
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(
llval: V,
layout: TyLayout<'tcx>,
align: Align,
align: AbiAndPrefAlign,
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
@ -308,9 +308,8 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align();
let size = bx.cx().const_usize(size.bytes());
bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
let size = bx.cx().const_usize(self.layout.size.bytes());
bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
}
let niche = self.project_field(bx, 0);

View file

@ -496,10 +496,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let (size, align) = bx.cx().layout_of(content_ty).size_and_align();
let llsize = bx.cx().const_usize(size.bytes());
let llalign = bx.cx().const_usize(align.abi());
let content_ty = self.monomorphize(&content_ty);
let content_layout = bx.cx().layout_of(content_ty);
let llsize = bx.cx().const_usize(content_layout.size.bytes());
let llalign = bx.cx().const_usize(content_layout.align.abi());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = bx.cx().backend_type(box_layout);

View file

@ -15,10 +15,10 @@ use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods;
use super::HasCodegen;
use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
use std::ffi::CStr;
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::ty::layout::{Align, Size};
use rustc::ty::layout::{AbiAndPrefAlign, Size};
use std::ffi::CStr;
use MemFlags;
use std::borrow::Cow;
@ -97,17 +97,18 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
fn fneg(&mut self, v: Self::Value) -> Self::Value;
fn not(&mut self, v: Self::Value) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: Align) -> Self::Value;
fn alloca(&mut self, ty: Self::Type, name: &str, align: AbiAndPrefAlign) -> Self::Value;
fn dynamic_alloca(&mut self, ty: Self::Type, name: &str, align: AbiAndPrefAlign)
-> Self::Value;
fn array_alloca(
&mut self,
ty: Self::Type,
len: Self::Value,
name: &str,
align: Align,
align: AbiAndPrefAlign,
) -> Self::Value;
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
fn load(&mut self, ptr: Self::Value, align: AbiAndPrefAlign) -> Self::Value;
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
@ -116,12 +117,12 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
fn range_metadata(&mut self, load: Self::Value, range: Range<u128>);
fn nonnull_metadata(&mut self, load: Self::Value);
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: AbiAndPrefAlign) -> Self::Value;
fn store_with_flags(
&mut self,
val: Self::Value,
ptr: Self::Value,
align: Align,
align: AbiAndPrefAlign,
flags: MemFlags,
) -> Self::Value;
fn atomic_store(
@ -174,18 +175,18 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
fn memcpy(
&mut self,
dst: Self::Value,
dst_align: Align,
dst_align: AbiAndPrefAlign,
src: Self::Value,
src_align: Align,
src_align: AbiAndPrefAlign,
size: Self::Value,
flags: MemFlags,
);
fn memmove(
&mut self,
dst: Self::Value,
dst_align: Align,
dst_align: AbiAndPrefAlign,
src: Self::Value,
src_align: Align,
src_align: AbiAndPrefAlign,
size: Self::Value,
flags: MemFlags,
);
@ -194,7 +195,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>:
ptr: Self::Value,
fill_byte: Self::Value,
size: Self::Value,
align: Align,
align: AbiAndPrefAlign,
flags: MemFlags,
);

View file

@ -10,13 +10,23 @@
use super::Backend;
use rustc::hir::def_id::DefId;
use rustc::ty::layout::Align;
use rustc::ty::layout::AbiAndPrefAlign;
pub trait StaticMethods<'tcx>: Backend<'tcx> {
fn static_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn static_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn static_addr_of_mut(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
fn static_addr_of(&self, cv: Self::Value, align: Align, kind: Option<&str>) -> Self::Value;
fn static_addr_of_mut(
&self,
cv: Self::Value,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> Self::Value;
fn static_addr_of(
&self,
cv: Self::Value,
align: AbiAndPrefAlign,
kind: Option<&str>,
) -> Self::Value;
fn get_static(&self, def_id: DefId) -> Self::Value;
fn codegen_static(&self, def_id: DefId, is_mutable: bool);
unsafe fn static_replace_all_uses(&self, old_g: Self::Value, new_g: Self::Value);

View file

@ -13,7 +13,7 @@ use super::Backend;
use super::HasCodegen;
use common::{self, TypeKind};
use mir::place::PlaceRef;
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::ty::layout::{self, AbiAndPrefAlign, Size, TyLayout};
use rustc::ty::{self, Ty};
use rustc::util::nodemap::FxHashMap;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
@ -120,7 +120,7 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
}
}
fn type_pointee_for_abi_align(&self, align: Align) -> Self::Type {
fn type_pointee_for_abi_align(&self, align: AbiAndPrefAlign) -> Self::Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = layout::Integer::approximate_abi_align(self, align);
self.type_from_integer(ity)
@ -128,7 +128,7 @@ pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
/// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
fn type_padding_filler(&self, size: Size, align: Align) -> Self::Type {
fn type_padding_filler(&self, size: Size, align: AbiAndPrefAlign) -> Self::Type {
let unit = layout::Integer::approximate_abi_align(self, align);
let size = size.bytes();
let unit_size = unit.size().bytes();

View file

@ -16,7 +16,7 @@ use rustc::hir::def_id::DefId;
use rustc::hir::def::Def;
use rustc::mir;
use rustc::ty::layout::{
self, Size, Align, HasDataLayout, LayoutOf, TyLayout
self, Size, AbiAndPrefAlign, HasDataLayout, LayoutOf, TyLayout
};
use rustc::ty::subst::{Subst, Substs};
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
@ -314,9 +314,9 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
&self,
metadata: Option<Scalar<M::PointerTag>>,
layout: TyLayout<'tcx>,
) -> EvalResult<'tcx, Option<(Size, Align)>> {
) -> EvalResult<'tcx, Option<(Size, AbiAndPrefAlign)>> {
if !layout.is_unsized() {
return Ok(Some(layout.size_and_align()));
return Ok(Some((layout.size, layout.align)));
}
match layout.ty.sty {
ty::Adt(..) | ty::Tuple(..) => {
@ -391,8 +391,8 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
ty::Slice(_) | ty::Str => {
let len = metadata.expect("slice fat ptr must have vtable").to_usize(self)?;
let (elem_size, align) = layout.field(self, 0)?.size_and_align();
Ok(Some((elem_size * len, align)))
let elem = layout.field(self, 0)?;
Ok(Some((elem.size * len, elem.align)))
}
ty::Foreign(_) => {
@ -406,7 +406,7 @@ impl<'a, 'mir, 'tcx: 'mir, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tc
pub fn size_and_align_of_mplace(
&self,
mplace: MPlaceTy<'tcx, M::PointerTag>
) -> EvalResult<'tcx, Option<(Size, Align)>> {
) -> EvalResult<'tcx, Option<(Size, AbiAndPrefAlign)>> {
self.size_and_align_of(mplace.meta, mplace.layout)
}

View file

@ -21,7 +21,7 @@ use std::ptr;
use std::borrow::Cow;
use rustc::ty::{self, Instance, ParamEnv, query::TyCtxtAt};
use rustc::ty::layout::{self, Align, TargetDataLayout, Size, HasDataLayout};
use rustc::ty::layout::{self, AbiAndPrefAlign, TargetDataLayout, Size, HasDataLayout};
pub use rustc::mir::interpret::{truncate, write_target_uint, read_target_uint};
use rustc_data_structures::fx::{FxHashSet, FxHashMap};
@ -71,7 +71,7 @@ pub struct Memory<'a, 'mir, 'tcx: 'a + 'mir, M: Machine<'a, 'mir, 'tcx>> {
/// To be able to compare pointers with NULL, and to check alignment for accesses
/// to ZSTs (where pointers may dangle), we keep track of the size even for allocations
/// that do not exist any more.
dead_alloc_map: FxHashMap<AllocId, (Size, Align)>,
dead_alloc_map: FxHashMap<AllocId, (Size, AbiAndPrefAlign)>,
/// Lets us implement `HasDataLayout`, which is awfully convenient.
pub(super) tcx: TyCtxtAt<'a, 'tcx, 'tcx>,
@ -130,7 +130,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn allocate(
&mut self,
size: Size,
align: Align,
align: AbiAndPrefAlign,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
Ok(Pointer::from(self.allocate_with(Allocation::undef(size, align), kind)?))
@ -140,9 +140,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
&mut self,
ptr: Pointer<M::PointerTag>,
old_size: Size,
old_align: Align,
old_align: AbiAndPrefAlign,
new_size: Size,
new_align: Align,
new_align: AbiAndPrefAlign,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx, Pointer> {
if ptr.offset.bytes() != 0 {
@ -179,7 +179,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn deallocate(
&mut self,
ptr: Pointer<M::PointerTag>,
size_and_align: Option<(Size, Align)>,
size_and_align: Option<(Size, AbiAndPrefAlign)>,
kind: MemoryKind<M::MemoryKinds>,
) -> EvalResult<'tcx> {
trace!("deallocating: {}", ptr.alloc_id);
@ -244,7 +244,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn check_align(
&self,
ptr: Scalar<M::PointerTag>,
required_align: Align
required_align: AbiAndPrefAlign
) -> EvalResult<'tcx> {
// Check non-NULL/Undef, extract offset
let (offset, alloc_align) = match ptr {
@ -279,7 +279,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
} else {
let has = offset % required_align.abi();
err!(AlignmentCheckFailed {
has: Align::from_bytes(has, has).unwrap(),
has: AbiAndPrefAlign::from_bytes(has, has).unwrap(),
required: required_align,
})
}
@ -443,13 +443,15 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
}
}
pub fn get_size_and_align(&self, id: AllocId) -> (Size, Align) {
pub fn get_size_and_align(&self, id: AllocId) -> (Size, AbiAndPrefAlign) {
if let Ok(alloc) = self.get(id) {
return (Size::from_bytes(alloc.bytes.len() as u64), alloc.align);
}
// Could also be a fn ptr or extern static
match self.tcx.alloc_map.lock().get(id) {
Some(AllocType::Function(..)) => (Size::ZERO, Align::from_bytes(1, 1).unwrap()),
Some(AllocType::Function(..)) => {
(Size::ZERO, AbiAndPrefAlign::from_bytes(1, 1).unwrap())
}
Some(AllocType::Static(did)) => {
// The only way `get` couldn't have worked here is if this is an extern static
assert!(self.tcx.is_foreign_item(did));
@ -622,7 +624,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align,
align: AbiAndPrefAlign,
check_defined_and_ptr: bool,
) -> EvalResult<'tcx, &[u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
@ -651,7 +653,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
align: AbiAndPrefAlign
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, true)
}
@ -663,7 +665,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
&self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align
align: AbiAndPrefAlign
) -> EvalResult<'tcx, &[u8]> {
self.get_bytes_internal(ptr, size, align, false)
}
@ -674,7 +676,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
&mut self,
ptr: Pointer<M::PointerTag>,
size: Size,
align: Align,
align: AbiAndPrefAlign,
) -> EvalResult<'tcx, &mut [u8]> {
assert_ne!(size.bytes(), 0, "0-sized accesses should never even get a `Pointer`");
self.check_align(ptr.into(), align)?;
@ -747,9 +749,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn copy(
&mut self,
src: Scalar<M::PointerTag>,
src_align: Align,
src_align: AbiAndPrefAlign,
dest: Scalar<M::PointerTag>,
dest_align: Align,
dest_align: AbiAndPrefAlign,
size: Size,
nonoverlapping: bool,
) -> EvalResult<'tcx> {
@ -759,9 +761,9 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn copy_repeatedly(
&mut self,
src: Scalar<M::PointerTag>,
src_align: Align,
src_align: AbiAndPrefAlign,
dest: Scalar<M::PointerTag>,
dest_align: Align,
dest_align: AbiAndPrefAlign,
size: Size,
length: u64,
nonoverlapping: bool,
@ -863,7 +865,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
allow_ptr_and_undef: bool,
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
@ -881,7 +883,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn read_bytes(&self, ptr: Scalar<M::PointerTag>, size: Size) -> EvalResult<'tcx, &[u8]> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
if size.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(&[]);
@ -891,7 +893,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn write_bytes(&mut self, ptr: Scalar<M::PointerTag>, src: &[u8]) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
if src.is_empty() {
self.check_align(ptr, align)?;
return Ok(());
@ -908,7 +910,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
count: Size
) -> EvalResult<'tcx> {
// Empty accesses don't need to be valid pointers, but they should still be non-NULL
let align = Align::from_bytes(1, 1).unwrap();
let align = AbiAndPrefAlign::from_bytes(1, 1).unwrap();
if count.bytes() == 0 {
self.check_align(ptr, align)?;
return Ok(());
@ -924,7 +926,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn read_scalar(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
ptr_align: AbiAndPrefAlign,
size: Size
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
// get_bytes_unchecked tests alignment and relocation edges
@ -961,7 +963,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn read_ptr_sized(
&self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align
ptr_align: AbiAndPrefAlign
) -> EvalResult<'tcx, ScalarMaybeUndef<M::PointerTag>> {
self.read_scalar(ptr, ptr_align, self.pointer_size())
}
@ -970,7 +972,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn write_scalar(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
ptr_align: AbiAndPrefAlign,
val: ScalarMaybeUndef<M::PointerTag>,
type_size: Size,
) -> EvalResult<'tcx> {
@ -1017,14 +1019,14 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> Memory<'a, 'mir, 'tcx, M> {
pub fn write_ptr_sized(
&mut self,
ptr: Pointer<M::PointerTag>,
ptr_align: Align,
ptr_align: AbiAndPrefAlign,
val: ScalarMaybeUndef<M::PointerTag>
) -> EvalResult<'tcx> {
let ptr_size = self.pointer_size();
self.write_scalar(ptr.into(), ptr_align, val, ptr_size)
}
fn int_align(&self, size: Size) -> Align {
fn int_align(&self, size: Size) -> AbiAndPrefAlign {
// We assume pointer-sized integers have the same alignment as pointers.
// We also assume signed and unsigned integers of the same size have the same alignment.
let ity = match size.bytes() {

View file

@ -18,7 +18,7 @@ use std::hash::Hash;
use rustc::hir;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Size, Align, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, LayoutOf, TyLayout, HasDataLayout, VariantIdx};
use super::{
GlobalId, AllocId, Allocation, Scalar, EvalResult, Pointer, PointerArithmetic,
@ -32,7 +32,7 @@ pub struct MemPlace<Tag=(), Id=AllocId> {
/// be turned back into a reference before ever being dereferenced.
/// However, it may never be undef.
pub ptr: Scalar<Tag, Id>,
pub align: Align,
pub align: AbiAndPrefAlign,
/// Metadata for unsized places. Interpretation is up to the type.
/// Must not be present for sized types, but can be missing for unsized types
/// (e.g. `extern type`).
@ -116,7 +116,7 @@ impl<Tag> MemPlace<Tag> {
}
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: AbiAndPrefAlign) -> Self {
MemPlace {
ptr,
align,
@ -127,16 +127,16 @@ impl<Tag> MemPlace<Tag> {
/// Produces a Place that will error if attempted to be read from or written to
#[inline(always)]
pub fn null(cx: &impl HasDataLayout) -> Self {
Self::from_scalar_ptr(Scalar::ptr_null(cx), Align::from_bytes(1, 1).unwrap())
Self::from_scalar_ptr(Scalar::ptr_null(cx), AbiAndPrefAlign::from_bytes(1, 1).unwrap())
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
pub fn from_ptr(ptr: Pointer<Tag>, align: AbiAndPrefAlign) -> Self {
Self::from_scalar_ptr(ptr.into(), align)
}
#[inline(always)]
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, AbiAndPrefAlign) {
assert!(self.meta.is_none());
(self.ptr, self.align)
}
@ -230,12 +230,12 @@ impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
}
#[inline(always)]
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: Align) -> Self {
pub fn from_scalar_ptr(ptr: Scalar<Tag>, align: AbiAndPrefAlign) -> Self {
Place::Ptr(MemPlace::from_scalar_ptr(ptr, align))
}
#[inline(always)]
pub fn from_ptr(ptr: Pointer<Tag>, align: Align) -> Self {
pub fn from_ptr(ptr: Pointer<Tag>, align: AbiAndPrefAlign) -> Self {
Place::Ptr(MemPlace::from_ptr(ptr, align))
}
@ -249,7 +249,7 @@ impl<'tcx, Tag: ::std::fmt::Debug> Place<Tag> {
}
#[inline]
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, Align) {
pub fn to_scalar_ptr_align(self) -> (Scalar<Tag>, AbiAndPrefAlign) {
self.to_mem_place().to_scalar_ptr_align()
}

View file

@ -16,7 +16,7 @@ use rustc::mir::interpret::{
};
use rustc::ty::{self, TyCtxt};
use rustc::ty::layout::Align;
use rustc::ty::layout::AbiAndPrefAlign;
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
@ -276,7 +276,7 @@ struct AllocationSnapshot<'a> {
bytes: &'a [u8],
relocations: Relocations<(), AllocIdSnapshot<'a>>,
undef_mask: &'a UndefMask,
align: &'a Align,
align: &'a AbiAndPrefAlign,
mutability: &'a Mutability,
}

View file

@ -9,7 +9,7 @@
// except according to those terms.
use rustc::ty::{self, Ty};
use rustc::ty::layout::{Size, Align, LayoutOf};
use rustc::ty::layout::{Size, AbiAndPrefAlign, LayoutOf};
use rustc::mir::interpret::{Scalar, Pointer, EvalResult, PointerArithmetic};
use super::{EvalContext, Machine, MemoryKind};
@ -101,7 +101,7 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
pub fn read_size_and_align_from_vtable(
&self,
vtable: Pointer<M::PointerTag>,
) -> EvalResult<'tcx, (Size, Align)> {
) -> EvalResult<'tcx, (Size, AbiAndPrefAlign)> {
let pointer_size = self.pointer_size();
let pointer_align = self.tcx.data_layout.pointer_align;
let size = self.memory.read_ptr_sized(vtable.offset(pointer_size, self)?,pointer_align)?
@ -110,6 +110,6 @@ impl<'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>> EvalContext<'a, 'mir, 'tcx, M>
vtable.offset(pointer_size * 2, self)?,
pointer_align
)?.to_bits(pointer_size)? as u64;
Ok((Size::from_bytes(size), Align::from_bytes(align, align).unwrap()))
Ok((Size::from_bytes(size), AbiAndPrefAlign::from_bytes(align, align).unwrap()))
}
}

View file

@ -13,7 +13,7 @@ use std::hash::Hash;
use std::ops::RangeInclusive;
use syntax_pos::symbol::Symbol;
use rustc::ty::layout::{self, Size, Align, TyLayout, LayoutOf, VariantIdx};
use rustc::ty::layout::{self, Size, AbiAndPrefAlign, TyLayout, LayoutOf, VariantIdx};
use rustc::ty;
use rustc_data_structures::fx::FxHashSet;
use rustc::mir::interpret::{
@ -355,7 +355,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// for the purpose of validity, consider foreign types to have
// alignment and size determined by the layout (size will be 0,
// alignment should take attributes into account).
.unwrap_or_else(|| layout.size_and_align());
.unwrap_or_else(|| (layout.size, layout.align));
match self.ecx.memory.check_align(ptr, align) {
Ok(_) => {},
Err(err) => {
@ -463,7 +463,7 @@ impl<'rt, 'a, 'mir, 'tcx, M: Machine<'a, 'mir, 'tcx>>
// for function pointers.
let non_null =
self.ecx.memory.check_align(
Scalar::Ptr(ptr), Align::from_bytes(1, 1).unwrap()
Scalar::Ptr(ptr), AbiAndPrefAlign::from_bytes(1, 1).unwrap()
).is_ok() ||
self.ecx.memory.get_fn(ptr).is_ok();
if !non_null {

View file

@ -8,7 +8,7 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{self, Abi, Align, FieldPlacement, Size};
use abi::{self, Abi, AbiAndPrefAlign, FieldPlacement, Size};
use abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
@ -80,7 +80,7 @@ mod attr_impl {
pub struct ArgAttributes {
pub regular: ArgAttribute,
pub pointee_size: Size,
pub pointee_align: Option<Align>
pub pointee_align: Option<AbiAndPrefAlign>
}
impl ArgAttributes {
@ -137,7 +137,7 @@ impl Reg {
}
impl Reg {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
let dl = cx.data_layout();
match self.kind {
RegKind::Integer => {
@ -188,7 +188,7 @@ impl From<Reg> for Uniform {
}
impl Uniform {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
self.unit.align(cx)
}
}
@ -230,7 +230,7 @@ impl CastTarget {
.abi_align(self.rest.align(cx)) + self.rest.total
}
pub fn align<C: HasDataLayout>(&self, cx: &C) -> Align {
pub fn align<C: HasDataLayout>(&self, cx: &C) -> AbiAndPrefAlign {
self.prefix.iter()
.filter_map(|x| x.map(|kind| Reg { kind, size: self.prefix_chunk }.align(cx)))
.fold(cx.data_layout().aggregate_align.max(self.rest.align(cx)),

View file

@ -13,7 +13,7 @@
// need to be fixed when PowerPC vector support is added.
use abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use abi::{Align, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use abi::{AbiAndPrefAlign, Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use spec::HasTargetSpec;
#[derive(Debug, Clone, Copy, PartialEq)]
@ -120,7 +120,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
} else {
// Aggregates larger than a doubleword should be padded
// at the tail to fill out a whole number of doublewords.
let align = Align::from_bits(64, 64).unwrap();
let align = AbiAndPrefAlign::from_bits(64, 64).unwrap();
(Reg::i64(), size.abi_align(align))
};

View file

@ -24,20 +24,21 @@ pub mod call;
/// for a target, which contains everything needed to compute layouts.
pub struct TargetDataLayout {
pub endian: Endian,
pub i1_align: Align,
pub i8_align: Align,
pub i16_align: Align,
pub i32_align: Align,
pub i64_align: Align,
pub i128_align: Align,
pub f32_align: Align,
pub f64_align: Align,
pub i1_align: AbiAndPrefAlign,
pub i8_align: AbiAndPrefAlign,
pub i16_align: AbiAndPrefAlign,
pub i32_align: AbiAndPrefAlign,
pub i64_align: AbiAndPrefAlign,
pub i128_align: AbiAndPrefAlign,
pub f32_align: AbiAndPrefAlign,
pub f64_align: AbiAndPrefAlign,
pub pointer_size: Size,
pub pointer_align: Align,
pub aggregate_align: Align,
pub pointer_align: AbiAndPrefAlign,
pub aggregate_align: AbiAndPrefAlign,
/// Alignments for vector types.
pub vector_align: Vec<(Size, Align)>,
pub vector_align: Vec<(Size, AbiAndPrefAlign)>,
pub instruction_address_space: u32,
}
@ -46,20 +47,20 @@ impl Default for TargetDataLayout {
fn default() -> TargetDataLayout {
TargetDataLayout {
endian: Endian::Big,
i1_align: Align::from_bits(8, 8).unwrap(),
i8_align: Align::from_bits(8, 8).unwrap(),
i16_align: Align::from_bits(16, 16).unwrap(),
i32_align: Align::from_bits(32, 32).unwrap(),
i64_align: Align::from_bits(32, 64).unwrap(),
i128_align: Align::from_bits(32, 64).unwrap(),
f32_align: Align::from_bits(32, 32).unwrap(),
f64_align: Align::from_bits(64, 64).unwrap(),
i1_align: AbiAndPrefAlign::from_bits(8, 8).unwrap(),
i8_align: AbiAndPrefAlign::from_bits(8, 8).unwrap(),
i16_align: AbiAndPrefAlign::from_bits(16, 16).unwrap(),
i32_align: AbiAndPrefAlign::from_bits(32, 32).unwrap(),
i64_align: AbiAndPrefAlign::from_bits(32, 64).unwrap(),
i128_align: AbiAndPrefAlign::from_bits(32, 64).unwrap(),
f32_align: AbiAndPrefAlign::from_bits(32, 32).unwrap(),
f64_align: AbiAndPrefAlign::from_bits(64, 64).unwrap(),
pointer_size: Size::from_bits(64),
pointer_align: Align::from_bits(64, 64).unwrap(),
aggregate_align: Align::from_bits(0, 64).unwrap(),
pointer_align: AbiAndPrefAlign::from_bits(64, 64).unwrap(),
aggregate_align: AbiAndPrefAlign::from_bits(0, 64).unwrap(),
vector_align: vec![
(Size::from_bits(64), Align::from_bits(64, 64).unwrap()),
(Size::from_bits(128), Align::from_bits(128, 128).unwrap())
(Size::from_bits(64), AbiAndPrefAlign::from_bits(64, 64).unwrap()),
(Size::from_bits(128), AbiAndPrefAlign::from_bits(128, 128).unwrap())
],
instruction_address_space: 0,
}
@ -96,7 +97,7 @@ impl TargetDataLayout {
}
let abi = parse_bits(s[0], "alignment", cause)?;
let pref = s.get(1).map_or(Ok(abi), |pref| parse_bits(pref, "alignment", cause))?;
Align::from_bits(abi, pref).map_err(|err| {
AbiAndPrefAlign::from_bits(abi, pref).map_err(|err| {
format!("invalid alignment for `{}` in \"data-layout\": {}",
cause, err)
})
@ -205,7 +206,7 @@ impl TargetDataLayout {
}
}
pub fn vector_align(&self, vec_size: Size) -> Align {
pub fn vector_align(&self, vec_size: Size) -> AbiAndPrefAlign {
for &(size, align) in &self.vector_align {
if size == vec_size {
return align;
@ -214,7 +215,7 @@ impl TargetDataLayout {
// Default to natural alignment, which is what LLVM does.
// That is, use the size, rounded up to a power of 2.
let align = vec_size.bytes().next_power_of_two();
Align::from_bytes(align, align).unwrap()
AbiAndPrefAlign::from_bytes(align, align).unwrap()
}
}
@ -270,13 +271,13 @@ impl Size {
}
#[inline]
pub fn abi_align(self, align: Align) -> Size {
pub fn abi_align(self, align: AbiAndPrefAlign) -> Size {
let mask = align.abi() - 1;
Size::from_bytes((self.bytes() + mask) & !mask)
}
#[inline]
pub fn is_abi_aligned(self, align: Align) -> bool {
pub fn is_abi_aligned(self, align: AbiAndPrefAlign) -> bool {
let mask = align.abi() - 1;
self.bytes() & mask == 0
}
@ -358,23 +359,23 @@ impl AddAssign for Size {
}
}
/// Alignment of a type in bytes, both ABI-mandated and preferred.
/// Alignments of a type in bytes, both ABI-mandated and preferred.
/// Each field is a power of two, giving the alignment a maximum value
/// of 2<sup>(2<sup>8</sup> - 1)</sup>, which is limited by LLVM to a
/// maximum capacity of 2<sup>29</sup> or 536870912.
#[derive(Copy, Clone, PartialEq, Eq, Ord, PartialOrd, Hash, Debug, RustcEncodable, RustcDecodable)]
pub struct Align {
pub struct AbiAndPrefAlign {
abi_pow2: u8,
pref_pow2: u8,
}
impl Align {
pub fn from_bits(abi: u64, pref: u64) -> Result<Align, String> {
Align::from_bytes(Size::from_bits(abi).bytes(),
impl AbiAndPrefAlign {
pub fn from_bits(abi: u64, pref: u64) -> Result<AbiAndPrefAlign, String> {
AbiAndPrefAlign::from_bytes(Size::from_bits(abi).bytes(),
Size::from_bits(pref).bytes())
}
pub fn from_bytes(abi: u64, pref: u64) -> Result<Align, String> {
pub fn from_bytes(abi: u64, pref: u64) -> Result<AbiAndPrefAlign, String> {
let log2 = |align: u64| {
// Treat an alignment of 0 bytes like 1-byte alignment.
if align == 0 {
@ -396,7 +397,7 @@ impl Align {
}
};
Ok(Align {
Ok(AbiAndPrefAlign {
abi_pow2: log2(abi)?,
pref_pow2: log2(pref)?,
})
@ -418,15 +419,15 @@ impl Align {
self.pref() * 8
}
pub fn min(self, other: Align) -> Align {
Align {
pub fn min(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi_pow2: cmp::min(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::min(self.pref_pow2, other.pref_pow2),
}
}
pub fn max(self, other: Align) -> Align {
Align {
pub fn max(self, other: AbiAndPrefAlign) -> AbiAndPrefAlign {
AbiAndPrefAlign {
abi_pow2: cmp::max(self.abi_pow2, other.abi_pow2),
pref_pow2: cmp::max(self.pref_pow2, other.pref_pow2),
}
@ -436,9 +437,9 @@ impl Align {
/// (the largest power of two that the offset is a multiple of).
///
/// NB: for an offset of `0`, this happens to return `2^64`.
pub fn max_for_offset(offset: Size) -> Align {
pub fn max_for_offset(offset: Size) -> AbiAndPrefAlign {
let pow2 = offset.bytes().trailing_zeros() as u8;
Align {
AbiAndPrefAlign {
abi_pow2: pow2,
pref_pow2: pow2,
}
@ -446,8 +447,8 @@ impl Align {
/// Lower the alignment, if necessary, such that the given offset
/// is aligned to it (the offset is a multiple of the alignment).
pub fn restrict_for_offset(self, offset: Size) -> Align {
self.min(Align::max_for_offset(offset))
pub fn restrict_for_offset(self, offset: Size) -> AbiAndPrefAlign {
self.min(AbiAndPrefAlign::max_for_offset(offset))
}
}
@ -472,7 +473,7 @@ impl Integer {
}
}
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
let dl = cx.data_layout();
match self {
@ -507,7 +508,7 @@ impl Integer {
}
/// Find the smallest integer with the given alignment.
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Option<Integer> {
pub fn for_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Option<Integer> {
let dl = cx.data_layout();
let wanted = align.abi();
@ -520,7 +521,7 @@ impl Integer {
}
/// Find the largest integer with the given alignment or less.
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: Align) -> Integer {
pub fn approximate_abi_align<C: HasDataLayout>(cx: &C, align: AbiAndPrefAlign) -> Integer {
let dl = cx.data_layout();
let wanted = align.abi();
@ -597,7 +598,7 @@ impl<'a, 'tcx> Primitive {
}
}
pub fn align<C: HasDataLayout>(self, cx: &C) -> Align {
pub fn align<C: HasDataLayout>(self, cx: &C) -> AbiAndPrefAlign {
let dl = cx.data_layout();
match self {
@ -868,7 +869,7 @@ pub struct LayoutDetails {
pub variants: Variants,
pub fields: FieldPlacement,
pub abi: Abi,
pub align: Align,
pub align: AbiAndPrefAlign,
pub size: Size
}
@ -949,8 +950,4 @@ impl<'a, Ty> TyLayout<'a, Ty> {
Abi::Aggregate { sized } => sized && self.size.bytes() == 0
}
}
pub fn size_and_align(&self) -> (Size, Align) {
(self.size, self.align)
}
}