Mark scalar layout unions so that backends that do not support partially initialized scalars can special case them.
This commit is contained in:
parent
2ed6786404
commit
d32ce37a17
37 changed files with 356 additions and 288 deletions
|
@ -510,9 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
// If the value is a boolean, the range is 0..2 and that ultimately
|
||||
// become 0..0 when the type becomes i1, which would be rejected
|
||||
// by the LLVM verifier.
|
||||
if let Int(..) = scalar.value {
|
||||
if let Int(..) = scalar.primitive() {
|
||||
if !scalar.is_bool() && !scalar.is_always_valid(bx) {
|
||||
bx.range_metadata(callsite, scalar.valid_range);
|
||||
bx.range_metadata(callsite, scalar.valid_range(bx));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -753,7 +753,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
|
|||
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
|
||||
/// the equivalent integer type.
|
||||
fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
|
||||
match scalar.value {
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(Integer::I8, _) => cx.type_i8(),
|
||||
Primitive::Int(Integer::I16, _) => cx.type_i16(),
|
||||
Primitive::Int(Integer::I32, _) => cx.type_i32(),
|
||||
|
@ -774,7 +774,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
) -> &'ll Value {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
} else {
|
||||
|
@ -785,7 +785,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
|
||||
let count = 16 / layout.size.bytes();
|
||||
let vec_ty = bx.cx.type_vector(elem_ty, count);
|
||||
if let Primitive::Pointer = s.value {
|
||||
if let Primitive::Pointer = s.primitive() {
|
||||
value = bx.ptrtoint(value, bx.cx.type_isize());
|
||||
}
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
|
@ -800,7 +800,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
if s.value == Primitive::F64 =>
|
||||
if s.primitive() == Primitive::F64 =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
}
|
||||
|
@ -812,7 +812,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.value {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f32())
|
||||
} else {
|
||||
value
|
||||
|
@ -826,19 +826,21 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.value {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
||||
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
|
||||
Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
|
||||
_ => value,
|
||||
},
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
||||
Primitive::F32 => bx.bitcast(value, bx.cx.type_i32()),
|
||||
Primitive::F64 => bx.bitcast(value, bx.cx.type_i64()),
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
|
@ -852,7 +854,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
) -> &'ll Value {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
bx.extract_element(value, bx.const_i32(0))
|
||||
} else {
|
||||
value
|
||||
|
@ -860,7 +862,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
}
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
|
||||
value = bx.extract_element(value, bx.const_i32(0));
|
||||
if let Primitive::Pointer = s.value {
|
||||
if let Primitive::Pointer = s.primitive() {
|
||||
value = bx.inttoptr(value, layout.llvm_type(bx.cx));
|
||||
}
|
||||
value
|
||||
|
@ -875,7 +877,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
if s.value == Primitive::F64 =>
|
||||
if s.primitive() == Primitive::F64 =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
}
|
||||
|
@ -887,7 +889,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.value {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i32())
|
||||
} else {
|
||||
value
|
||||
|
@ -901,20 +903,22 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.value {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
||||
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
|
||||
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
|
||||
Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
|
||||
_ => value,
|
||||
},
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
||||
Primitive::Int(Integer::I16, _) => bx.trunc(value, bx.cx.type_i16()),
|
||||
Primitive::F32 => bx.bitcast(value, bx.cx.type_f32()),
|
||||
Primitive::F64 => bx.bitcast(value, bx.cx.type_f64()),
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
_ => value,
|
||||
}
|
||||
}
|
||||
|
@ -927,7 +931,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
) -> &'ll Type {
|
||||
match (reg, layout.abi) {
|
||||
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.value {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
cx.type_vector(cx.type_i8(), 8)
|
||||
} else {
|
||||
layout.llvm_type(cx)
|
||||
|
@ -946,7 +950,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
cx.type_vector(elem_ty, count * 2)
|
||||
}
|
||||
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
if s.value == Primitive::F64 =>
|
||||
if s.primitive() == Primitive::F64 =>
|
||||
{
|
||||
cx.type_i64()
|
||||
}
|
||||
|
@ -958,7 +962,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.value {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
cx.type_f32()
|
||||
} else {
|
||||
layout.llvm_type(cx)
|
||||
|
@ -972,19 +976,21 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
),
|
||||
Abi::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.value {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
cx.type_f64()
|
||||
} else {
|
||||
layout.llvm_type(cx)
|
||||
}
|
||||
}
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => match s.value {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
||||
Primitive::F32 => cx.type_i32(),
|
||||
Primitive::F64 => cx.type_i64(),
|
||||
_ => layout.llvm_type(cx),
|
||||
},
|
||||
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
||||
Primitive::F32 => cx.type_i32(),
|
||||
Primitive::F64 => cx.type_i64(),
|
||||
_ => layout.llvm_type(cx),
|
||||
}
|
||||
}
|
||||
_ => layout.llvm_type(cx),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -484,14 +484,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
bx.noundef_metadata(load);
|
||||
}
|
||||
|
||||
match scalar.value {
|
||||
match scalar.primitive() {
|
||||
abi::Int(..) => {
|
||||
if !scalar.is_always_valid(bx) {
|
||||
bx.range_metadata(load, scalar.valid_range);
|
||||
bx.range_metadata(load, scalar.valid_range(bx));
|
||||
}
|
||||
}
|
||||
abi::Pointer => {
|
||||
if !scalar.valid_range.contains(0) {
|
||||
if !scalar.valid_range(bx).contains(0) {
|
||||
bx.nonnull_metadata(load);
|
||||
}
|
||||
|
||||
|
@ -525,7 +525,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
});
|
||||
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
||||
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
|
||||
let b_offset = a.value.size(self).align_to(b.value.align(self).abi);
|
||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||
let pair_ty = place.layout.llvm_type(self);
|
||||
|
||||
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
||||
|
|
|
@ -221,16 +221,16 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|||
}
|
||||
|
||||
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: &'ll Type) -> &'ll Value {
|
||||
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
|
||||
let bitsize = if layout.is_bool() { 1 } else { layout.size(self).bits() };
|
||||
match cv {
|
||||
Scalar::Int(ScalarInt::ZST) => {
|
||||
assert_eq!(0, layout.value.size(self).bytes());
|
||||
assert_eq!(0, layout.size(self).bytes());
|
||||
self.const_undef(self.type_ix(0))
|
||||
}
|
||||
Scalar::Int(int) => {
|
||||
let data = int.assert_bits(layout.value.size(self));
|
||||
let data = int.assert_bits(layout.size(self));
|
||||
let llval = self.const_uint_big(self.type_ix(bitsize), data);
|
||||
if layout.value == Pointer {
|
||||
if layout.primitive() == Pointer {
|
||||
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
||||
} else {
|
||||
self.const_bitcast(llval, llty)
|
||||
|
@ -269,7 +269,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
|
|||
1,
|
||||
)
|
||||
};
|
||||
if layout.value != Pointer {
|
||||
if layout.primitive() != Pointer {
|
||||
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
|
||||
} else {
|
||||
self.const_bitcast(llval, llty)
|
||||
|
|
|
@ -109,7 +109,10 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
|
|||
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
|
||||
&cx.tcx,
|
||||
),
|
||||
Scalar { value: Primitive::Pointer, valid_range: WrappingRange { start: 0, end: !0 } },
|
||||
Scalar::Initialized {
|
||||
value: Primitive::Pointer,
|
||||
valid_range: WrappingRange { start: 0, end: !0 },
|
||||
},
|
||||
cx.type_i8p_ext(address_space),
|
||||
));
|
||||
next_offset = offset + pointer_size;
|
||||
|
|
|
@ -118,7 +118,7 @@ fn tag_base_type<'ll, 'tcx>(
|
|||
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, tag, .. } => {
|
||||
// Niche tags are always normalized to unsized integers of the correct size.
|
||||
match tag.value {
|
||||
match tag.primitive() {
|
||||
Primitive::Int(t, _) => t,
|
||||
Primitive::F32 => Integer::I32,
|
||||
Primitive::F64 => Integer::I64,
|
||||
|
@ -136,7 +136,7 @@ fn tag_base_type<'ll, 'tcx>(
|
|||
|
||||
Variants::Multiple { tag_encoding: TagEncoding::Direct, tag, .. } => {
|
||||
// Direct tags preserve the sign.
|
||||
tag.value.to_ty(cx.tcx)
|
||||
tag.primitive().to_ty(cx.tcx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -425,7 +425,7 @@ fn compute_discriminant_value<'ll, 'tcx>(
|
|||
let value = (variant_index.as_u32() as u128)
|
||||
.wrapping_sub(niche_variants.start().as_u32() as u128)
|
||||
.wrapping_add(niche_start);
|
||||
let value = tag.value.size(cx).truncate(value);
|
||||
let value = tag.size(cx).truncate(value);
|
||||
// NOTE(eddyb) do *NOT* remove this assert, until
|
||||
// we pass the full 128-bit value to LLVM, otherwise
|
||||
// truncation will be silent and remain undetected.
|
||||
|
|
|
@ -134,7 +134,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||
sym::va_arg => {
|
||||
match fn_abi.ret.layout.abi {
|
||||
abi::Abi::Scalar(scalar) => {
|
||||
match scalar.value {
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(..) => {
|
||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||
// `va_arg` should not be called on an integer type
|
||||
|
|
|
@ -309,7 +309,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
scalar: Scalar,
|
||||
offset: Size,
|
||||
) -> &'a Type {
|
||||
match scalar.value {
|
||||
match scalar.primitive() {
|
||||
Int(i, _) => cx.type_from_integer(i),
|
||||
F32 => cx.type_f32(),
|
||||
F64 => cx.type_f64(),
|
||||
|
@ -362,8 +362,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
return cx.type_i1();
|
||||
}
|
||||
|
||||
let offset =
|
||||
if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
|
||||
let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) };
|
||||
self.scalar_llvm_type_at(cx, scalar, offset)
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue