Make abi::Abi
Copy
and remove a *lot* of refs
fix fix Remove more refs and clones fix more fix
This commit is contained in:
parent
86ff6aeb82
commit
5b2f757dae
33 changed files with 139 additions and 163 deletions
|
@ -536,13 +536,13 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
}
|
||||
_ => {}
|
||||
}
|
||||
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
|
||||
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
||||
// If the value is a boolean, the range is 0..2 and that ultimately
|
||||
// become 0..0 when the type becomes i1, which would be rejected
|
||||
// by the LLVM verifier.
|
||||
if let Int(..) = scalar.value {
|
||||
if !scalar.is_bool() && !scalar.is_always_valid_for(bx) {
|
||||
bx.range_metadata(callsite, &scalar.valid_range);
|
||||
bx.range_metadata(callsite, scalar.valid_range);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -792,7 +792,7 @@ fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll
|
|||
|
||||
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
|
||||
/// the equivalent integer type.
|
||||
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
|
||||
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: Scalar) -> &'ll Type {
|
||||
match scalar.value {
|
||||
Primitive::Int(Integer::I8, _) => cx.type_i8(),
|
||||
Primitive::Int(Integer::I16, _) => cx.type_i16(),
|
||||
|
@ -812,7 +812,7 @@ fn llvm_fixup_input(
|
|||
reg: InlineAsmRegClass,
|
||||
layout: &TyAndLayout<'tcx>,
|
||||
) -> &'ll Value {
|
||||
match (reg, &layout.abi) {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
||||
|
@ -835,7 +835,7 @@ fn llvm_fixup_input(
|
|||
Abi::Vector { element, count },
|
||||
) if layout.size.bytes() == 8 => {
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
let vec_ty = bx.cx.type_vector(elem_ty, *count);
|
||||
let vec_ty = bx.cx.type_vector(elem_ty, count);
|
||||
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
|
@ -890,7 +890,7 @@ fn llvm_fixup_output(
|
|||
reg: InlineAsmRegClass,
|
||||
layout: &TyAndLayout<'tcx>,
|
||||
) -> &'ll Value {
|
||||
match (reg, &layout.abi) {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
bx.extract_element(value, bx.const_i32(0))
|
||||
|
@ -910,8 +910,8 @@ fn llvm_fixup_output(
|
|||
Abi::Vector { element, count },
|
||||
) if layout.size.bytes() == 8 => {
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
|
||||
let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
|
||||
let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
|
||||
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
|
@ -965,7 +965,7 @@ fn llvm_fixup_output_type(
|
|||
reg: InlineAsmRegClass,
|
||||
layout: &TyAndLayout<'tcx>,
|
||||
) -> &'ll Type {
|
||||
match (reg, &layout.abi) {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
cx.type_vector(cx.type_i8(), 8)
|
||||
|
|
|
@ -382,7 +382,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
val
|
||||
}
|
||||
}
|
||||
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: &abi::Scalar) -> Self::Value {
|
||||
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
|
||||
if scalar.is_bool() {
|
||||
return self.trunc(val, self.cx().type_i1());
|
||||
}
|
||||
|
@ -460,12 +460,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
fn scalar_load_metadata<'a, 'll, 'tcx>(
|
||||
bx: &mut Builder<'a, 'll, 'tcx>,
|
||||
load: &'ll Value,
|
||||
scalar: &abi::Scalar,
|
||||
scalar: abi::Scalar,
|
||||
) {
|
||||
match scalar.value {
|
||||
abi::Int(..) => {
|
||||
if !scalar.is_always_valid_for(bx) {
|
||||
bx.range_metadata(load, &scalar.valid_range);
|
||||
bx.range_metadata(load, scalar.valid_range);
|
||||
}
|
||||
}
|
||||
abi::Pointer if !scalar.valid_range.contains(0) => {
|
||||
|
@ -488,17 +488,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
}
|
||||
let llval = const_llval.unwrap_or_else(|| {
|
||||
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
|
||||
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
|
||||
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
||||
scalar_load_metadata(self, load, scalar);
|
||||
}
|
||||
load
|
||||
});
|
||||
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
||||
} else if let abi::Abi::ScalarPair(ref a, ref b) = place.layout.abi {
|
||||
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
|
||||
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
|
||||
let pair_ty = place.layout.llvm_type(self);
|
||||
|
||||
let mut load = |i, scalar: &abi::Scalar, align| {
|
||||
let mut load = |i, scalar: abi::Scalar, align| {
|
||||
let llptr = self.struct_gep(pair_ty, place.llval, i as u64);
|
||||
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
|
||||
let load = self.load(llty, llptr, align);
|
||||
|
@ -554,7 +554,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
next_bx
|
||||
}
|
||||
|
||||
fn range_metadata(&mut self, load: &'ll Value, range: &WrappingRange) {
|
||||
fn range_metadata(&mut self, load: &'ll Value, range: WrappingRange) {
|
||||
if self.sess().target.arch == "amdgpu" {
|
||||
// amdgpu/LLVM does something weird and thinks an i64 value is
|
||||
// split into a v2i32, halving the bitwidth LLVM expects,
|
||||
|
|
|
@ -228,7 +228,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|||
})
|
||||
}
|
||||
|
||||
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
|
||||
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
|
||||
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
|
||||
match cv {
|
||||
Scalar::Int(ScalarInt::ZST) => {
|
||||
|
|
|
@ -111,7 +111,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
|
|||
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
|
||||
&cx.tcx,
|
||||
),
|
||||
&Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
|
||||
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
|
||||
cx.type_i8p_ext(address_space),
|
||||
));
|
||||
next_offset = offset + pointer_size;
|
||||
|
|
|
@ -1656,7 +1656,7 @@ impl EnumMemberDescriptionFactory<'ll, 'tcx> {
|
|||
Variants::Multiple {
|
||||
tag_encoding:
|
||||
TagEncoding::Niche { ref niche_variants, niche_start, dataful_variant },
|
||||
ref tag,
|
||||
tag,
|
||||
ref variants,
|
||||
tag_field,
|
||||
} => {
|
||||
|
@ -2082,10 +2082,8 @@ fn prepare_enum_metadata(
|
|||
|
||||
let layout = cx.layout_of(enum_type);
|
||||
|
||||
if let (
|
||||
&Abi::Scalar(_),
|
||||
&Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. },
|
||||
) = (&layout.abi, &layout.variants)
|
||||
if let (Abi::Scalar(_), Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. }) =
|
||||
(layout.abi, &layout.variants)
|
||||
{
|
||||
return FinalMetadata(discriminant_type_metadata(tag.value));
|
||||
}
|
||||
|
@ -2093,8 +2091,8 @@ fn prepare_enum_metadata(
|
|||
if use_enum_fallback(cx) {
|
||||
let discriminant_type_metadata = match layout.variants {
|
||||
Variants::Single { .. } => None,
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, ref tag, .. }
|
||||
| Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, .. } => {
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. }
|
||||
| Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
|
||||
Some(discriminant_type_metadata(tag.value))
|
||||
}
|
||||
};
|
||||
|
@ -2146,9 +2144,7 @@ fn prepare_enum_metadata(
|
|||
// A single-variant enum has no discriminant.
|
||||
Variants::Single { .. } => None,
|
||||
|
||||
Variants::Multiple {
|
||||
tag_encoding: TagEncoding::Niche { .. }, ref tag, tag_field, ..
|
||||
} => {
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, tag_field, .. } => {
|
||||
// Find the integer type of the correct size.
|
||||
let size = tag.value.size(cx);
|
||||
let align = tag.value.align(cx);
|
||||
|
@ -2179,7 +2175,7 @@ fn prepare_enum_metadata(
|
|||
}
|
||||
}
|
||||
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Direct, ref tag, tag_field, .. } => {
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, tag_field, .. } => {
|
||||
let discr_type = tag.value.to_ty(cx.tcx);
|
||||
let (size, align) = cx.size_and_align_of(discr_type);
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
}
|
||||
sym::va_arg => {
|
||||
match fn_abi.ret.layout.abi {
|
||||
abi::Abi::Scalar(ref scalar) => {
|
||||
abi::Abi::Scalar(scalar) => {
|
||||
match scalar.value {
|
||||
Primitive::Int(..) => {
|
||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||
|
|
|
@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
|
|||
) -> &'a Type {
|
||||
match layout.abi {
|
||||
Abi::Scalar(_) => bug!("handled elsewhere"),
|
||||
Abi::Vector { ref element, count } => {
|
||||
Abi::Vector { element, count } => {
|
||||
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
|
||||
return cx.type_vector(element, count);
|
||||
}
|
||||
|
@ -177,7 +177,7 @@ pub trait LayoutLlvmExt<'tcx> {
|
|||
fn scalar_llvm_type_at<'a>(
|
||||
&self,
|
||||
cx: &CodegenCx<'a, 'tcx>,
|
||||
scalar: &Scalar,
|
||||
scalar: Scalar,
|
||||
offset: Size,
|
||||
) -> &'a Type;
|
||||
fn scalar_pair_element_llvm_type<'a>(
|
||||
|
@ -218,7 +218,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
/// of that field's type - this is useful for taking the address of
|
||||
/// that field and ensuring the struct has the right alignment.
|
||||
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||
if let Abi::Scalar(ref scalar) = self.abi {
|
||||
if let Abi::Scalar(scalar) = self.abi {
|
||||
// Use a different cache for scalars because pointers to DSTs
|
||||
// can be either fat or thin (data pointers of fat pointers).
|
||||
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
|
||||
|
@ -286,7 +286,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
}
|
||||
|
||||
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||
if let Abi::Scalar(ref scalar) = self.abi {
|
||||
if let Abi::Scalar(scalar) = self.abi {
|
||||
if scalar.is_bool() {
|
||||
return cx.type_i1();
|
||||
}
|
||||
|
@ -297,7 +297,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
fn scalar_llvm_type_at<'a>(
|
||||
&self,
|
||||
cx: &CodegenCx<'a, 'tcx>,
|
||||
scalar: &Scalar,
|
||||
scalar: Scalar,
|
||||
offset: Size,
|
||||
) -> &'a Type {
|
||||
match scalar.value {
|
||||
|
@ -337,7 +337,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
}
|
||||
|
||||
let (a, b) = match self.abi {
|
||||
Abi::ScalarPair(ref a, ref b) => (a, b),
|
||||
Abi::ScalarPair(a, b) => (a, b),
|
||||
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
|
||||
};
|
||||
let scalar = [a, b][index];
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue