Rollup merge of #132246 - workingjubilee:campaign-on-irform, r=compiler-errors
Rename `rustc_abi::Abi` to `BackendRepr` Remove the confabulation of `rustc_abi::Abi` with what "ABI" actually means by renaming it to `BackendRepr`, and rename `Abi::Aggregate` to `BackendRepr::Memory`. The type never actually represented how things are passed, as that has to have `PassMode` considered, at minimum, but rather it just is how we represented some things to the backend. This conflation arose because LLVM, the primary backend at the time, would lower certain IR forms using certain ABIs. Even that only somewhat was true, as it broke down when one ventured significantly afield of what is described by the System V AMD64 ABI either by using different architectures, ABI-modifying IR annotations, the same architecture **with different ISA extensions enabled**, or other... unexpected delights. Unfortunately both names are still somewhat of a misnomer right now, as people have written code for years based on this misunderstanding. Still, their original names are even moreso, and for better or worse, this backend code hasn't received as much maintenance as the rest of the compiler, lately. Actually arriving at a correct end-state will simply require us to disentangle a lot of code in order to fix, much of it pointlessly repeated in several places. Thus this is not an "actual fix", just a way to deflect further misunderstandings.
This commit is contained in:
commit
847b6fe6b0
98 changed files with 873 additions and 643 deletions
|
@ -458,7 +458,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
match &self.ret.mode {
|
||||
PassMode::Direct(attrs) => {
|
||||
attrs.apply_attrs_to_llfn(llvm::AttributePlace::ReturnValue, cx, llfn);
|
||||
if let abi::Abi::Scalar(scalar) = self.ret.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr {
|
||||
apply_range_attr(llvm::AttributePlace::ReturnValue, scalar);
|
||||
}
|
||||
}
|
||||
|
@ -495,7 +495,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
}
|
||||
PassMode::Direct(attrs) => {
|
||||
let i = apply(attrs);
|
||||
if let abi::Abi::Scalar(scalar) = arg.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = arg.layout.backend_repr {
|
||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar);
|
||||
}
|
||||
}
|
||||
|
@ -510,7 +510,9 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
PassMode::Pair(a, b) => {
|
||||
let i = apply(a);
|
||||
let ii = apply(b);
|
||||
if let abi::Abi::ScalarPair(scalar_a, scalar_b) = arg.layout.abi {
|
||||
if let abi::BackendRepr::ScalarPair(scalar_a, scalar_b) =
|
||||
arg.layout.backend_repr
|
||||
{
|
||||
apply_range_attr(llvm::AttributePlace::Argument(i), scalar_a);
|
||||
apply_range_attr(llvm::AttributePlace::Argument(ii), scalar_b);
|
||||
}
|
||||
|
@ -570,7 +572,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
|
|||
}
|
||||
if bx.cx.sess().opts.optimize != config::OptLevel::No
|
||||
&& llvm_util::get_version() < (19, 0, 0)
|
||||
&& let abi::Abi::Scalar(scalar) = self.ret.layout.abi
|
||||
&& let abi::BackendRepr::Scalar(scalar) = self.ret.layout.backend_repr
|
||||
&& matches!(scalar.primitive(), Int(..))
|
||||
// If the value is a boolean, the range is 0..2 and that ultimately
|
||||
// become 0..0 when the type becomes i1, which would be rejected
|
||||
|
|
|
@ -880,8 +880,8 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
) -> &'ll Value {
|
||||
use InlineAsmRegClass::*;
|
||||
let dl = &bx.tcx.data_layout;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
|
@ -889,7 +889,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
value
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
|
||||
|
@ -902,7 +902,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
}
|
||||
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
|
@ -910,14 +910,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
|
||||
(
|
||||
X86(
|
||||
|
@ -925,7 +925,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
|
@ -937,7 +937,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||
let value = bx.insert_element(
|
||||
bx.const_undef(bx.type_vector(bx.type_f16(), 8)),
|
||||
|
@ -952,11 +952,14 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f32())
|
||||
} else {
|
||||
|
@ -969,7 +972,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
|
@ -986,11 +989,11 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
|
||||
|
@ -999,7 +1002,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
|
|||
_ => value,
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
|
@ -1022,15 +1025,15 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
instance: Instance<'_>,
|
||||
) -> &'ll Value {
|
||||
use InlineAsmRegClass::*;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
bx.extract_element(value, bx.const_i32(0))
|
||||
} else {
|
||||
value
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
value = bx.extract_element(value, bx.const_i32(0));
|
||||
|
@ -1039,7 +1042,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
}
|
||||
value
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
|
||||
|
@ -1047,14 +1050,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
|
||||
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
bx.bitcast(value, bx.cx.type_f64())
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
|
||||
(
|
||||
X86(
|
||||
|
@ -1062,7 +1065,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if bx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
|
@ -1074,7 +1077,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => {
|
||||
let value = bx.bitcast(value, bx.type_vector(bx.type_f16(), 8));
|
||||
bx.extract_element(value, bx.const_usize(0))
|
||||
|
@ -1085,11 +1088,14 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i32())
|
||||
} else {
|
||||
|
@ -1102,7 +1108,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
bx.bitcast(value, bx.cx.type_i64())
|
||||
|
@ -1119,11 +1125,11 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
|
||||
|
@ -1133,7 +1139,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
|
|||
_ => value,
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
|
@ -1153,35 +1159,35 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
instance: Instance<'_>,
|
||||
) -> &'ll Type {
|
||||
use InlineAsmRegClass::*;
|
||||
match (reg, layout.abi) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
|
||||
match (reg, layout.backend_repr) {
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg), BackendRepr::Scalar(s)) => {
|
||||
if let Primitive::Int(Integer::I8, _) = s.primitive() {
|
||||
cx.type_vector(cx.type_i8(), 8)
|
||||
} else {
|
||||
layout.llvm_type(cx)
|
||||
}
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Scalar(s))
|
||||
if s.primitive() != Primitive::Float(Float::F128) =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(cx, s);
|
||||
let count = 16 / layout.size.bytes();
|
||||
cx.type_vector(elem_ty, count)
|
||||
}
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
|
||||
(AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count })
|
||||
if layout.size.bytes() == 8 =>
|
||||
{
|
||||
let elem_ty = llvm_asm_scalar_type(cx, element);
|
||||
cx.type_vector(elem_ty, count * 2)
|
||||
}
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
|
||||
(X86(X86InlineAsmRegClass::reg_abcd), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F64) =>
|
||||
{
|
||||
cx.type_i64()
|
||||
}
|
||||
(
|
||||
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
|
||||
Abi::Vector { .. },
|
||||
BackendRepr::Vector { .. },
|
||||
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
|
||||
(
|
||||
X86(
|
||||
|
@ -1189,7 +1195,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if cx.sess().asm_arch == Some(InlineAsmArch::X86)
|
||||
&& s.primitive() == Primitive::Float(Float::F128) =>
|
||||
{
|
||||
|
@ -1201,7 +1207,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
|
||||
(
|
||||
X86(
|
||||
|
@ -1209,11 +1215,14 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
| X86InlineAsmRegClass::ymm_reg
|
||||
| X86InlineAsmRegClass::zmm_reg,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (8 | 16) },
|
||||
BackendRepr::Vector { element, count: count @ (8 | 16) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
cx.type_vector(cx.type_i16(), count)
|
||||
}
|
||||
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
|
||||
(
|
||||
Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I32, _) = s.primitive() {
|
||||
cx.type_f32()
|
||||
} else {
|
||||
|
@ -1226,7 +1235,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::dreg_low8
|
||||
| ArmInlineAsmRegClass::dreg_low16,
|
||||
),
|
||||
Abi::Scalar(s),
|
||||
BackendRepr::Scalar(s),
|
||||
) => {
|
||||
if let Primitive::Int(Integer::I64, _) = s.primitive() {
|
||||
cx.type_f64()
|
||||
|
@ -1243,11 +1252,11 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
| ArmInlineAsmRegClass::qreg_low4
|
||||
| ArmInlineAsmRegClass::qreg_low8,
|
||||
),
|
||||
Abi::Vector { element, count: count @ (4 | 8) },
|
||||
BackendRepr::Vector { element, count: count @ (4 | 8) },
|
||||
) if element.primitive() == Primitive::Float(Float::F16) => {
|
||||
cx.type_vector(cx.type_i16(), count)
|
||||
}
|
||||
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
|
||||
(Mips(MipsInlineAsmRegClass::reg), BackendRepr::Scalar(s)) => {
|
||||
match s.primitive() {
|
||||
// MIPS only supports register-length arithmetics.
|
||||
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
|
||||
|
@ -1256,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
|
|||
_ => layout.llvm_type(cx),
|
||||
}
|
||||
}
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
|
||||
(RiscV(RiscVInlineAsmRegClass::freg), BackendRepr::Scalar(s))
|
||||
if s.primitive() == Primitive::Float(Float::F16)
|
||||
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
|
||||
{
|
||||
|
|
|
@ -543,13 +543,13 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
|||
}
|
||||
let llval = const_llval.unwrap_or_else(|| {
|
||||
let load = self.load(llty, place.val.llval, place.val.align);
|
||||
if let abi::Abi::Scalar(scalar) = place.layout.abi {
|
||||
if let abi::BackendRepr::Scalar(scalar) = place.layout.backend_repr {
|
||||
scalar_load_metadata(self, load, scalar, place.layout, Size::ZERO);
|
||||
}
|
||||
load
|
||||
});
|
||||
OperandValue::Immediate(self.to_immediate(llval, place.layout))
|
||||
} else if let abi::Abi::ScalarPair(a, b) = place.layout.abi {
|
||||
} else if let abi::BackendRepr::ScalarPair(a, b) = place.layout.backend_repr {
|
||||
let b_offset = a.size(self).align_to(b.align(self).abi);
|
||||
|
||||
let mut load = |i, scalar: abi::Scalar, layout, align, offset| {
|
||||
|
|
|
@ -258,8 +258,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||
self.call_intrinsic("llvm.va_copy", &[args[0].immediate(), args[1].immediate()])
|
||||
}
|
||||
sym::va_arg => {
|
||||
match fn_abi.ret.layout.abi {
|
||||
abi::Abi::Scalar(scalar) => {
|
||||
match fn_abi.ret.layout.backend_repr {
|
||||
abi::BackendRepr::Scalar(scalar) => {
|
||||
match scalar.primitive() {
|
||||
Primitive::Int(..) => {
|
||||
if self.cx().size_of(ret_ty).bytes() < 4 {
|
||||
|
@ -436,13 +436,13 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||
}
|
||||
|
||||
sym::raw_eq => {
|
||||
use abi::Abi::*;
|
||||
use abi::BackendRepr::*;
|
||||
let tp_ty = fn_args.type_at(0);
|
||||
let layout = self.layout_of(tp_ty).layout;
|
||||
let use_integer_compare = match layout.abi() {
|
||||
let use_integer_compare = match layout.backend_repr() {
|
||||
Scalar(_) | ScalarPair(_, _) => true,
|
||||
Uninhabited | Vector { .. } => false,
|
||||
Aggregate { .. } => {
|
||||
Memory { .. } => {
|
||||
// For rusty ABIs, small aggregates are actually passed
|
||||
// as `RegKind::Integer` (see `FnAbi::adjust_for_abi`),
|
||||
// so we re-use that same threshold here.
|
||||
|
@ -549,7 +549,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
|
|||
}
|
||||
|
||||
let llret_ty = if ret_ty.is_simd()
|
||||
&& let abi::Abi::Aggregate { .. } = self.layout_of(ret_ty).layout.abi
|
||||
&& let abi::BackendRepr::Memory { .. } =
|
||||
self.layout_of(ret_ty).layout.backend_repr
|
||||
{
|
||||
let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx());
|
||||
let elem_ll_ty = match elem_ty.kind() {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
use std::fmt::Write;
|
||||
|
||||
use rustc_abi::Primitive::{Float, Int, Pointer};
|
||||
use rustc_abi::{Abi, Align, FieldsShape, Scalar, Size, Variants};
|
||||
use rustc_abi::{Align, BackendRepr, FieldsShape, Scalar, Size, Variants};
|
||||
use rustc_codegen_ssa::traits::*;
|
||||
use rustc_middle::bug;
|
||||
use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
|
||||
|
@ -17,13 +17,13 @@ fn uncached_llvm_type<'a, 'tcx>(
|
|||
layout: TyAndLayout<'tcx>,
|
||||
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
|
||||
) -> &'a Type {
|
||||
match layout.abi {
|
||||
Abi::Scalar(_) => bug!("handled elsewhere"),
|
||||
Abi::Vector { element, count } => {
|
||||
match layout.backend_repr {
|
||||
BackendRepr::Scalar(_) => bug!("handled elsewhere"),
|
||||
BackendRepr::Vector { element, count } => {
|
||||
let element = layout.scalar_llvm_type_at(cx, element);
|
||||
return cx.type_vector(element, count);
|
||||
}
|
||||
Abi::Uninhabited | Abi::Aggregate { .. } | Abi::ScalarPair(..) => {}
|
||||
BackendRepr::Uninhabited | BackendRepr::Memory { .. } | BackendRepr::ScalarPair(..) => {}
|
||||
}
|
||||
|
||||
let name = match layout.ty.kind() {
|
||||
|
@ -170,16 +170,21 @@ pub(crate) trait LayoutLlvmExt<'tcx> {
|
|||
|
||||
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
||||
fn is_llvm_immediate(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::Scalar(_) | Abi::Vector { .. } => true,
|
||||
Abi::ScalarPair(..) | Abi::Uninhabited | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true,
|
||||
BackendRepr::ScalarPair(..) | BackendRepr::Uninhabited | BackendRepr::Memory { .. } => {
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_llvm_scalar_pair(&self) -> bool {
|
||||
match self.abi {
|
||||
Abi::ScalarPair(..) => true,
|
||||
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
|
||||
match self.backend_repr {
|
||||
BackendRepr::ScalarPair(..) => true,
|
||||
BackendRepr::Uninhabited
|
||||
| BackendRepr::Scalar(_)
|
||||
| BackendRepr::Vector { .. }
|
||||
| BackendRepr::Memory { .. } => false,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,7 +203,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
if let Abi::Scalar(scalar) = self.abi {
|
||||
if let BackendRepr::Scalar(scalar) = self.backend_repr {
|
||||
// Use a different cache for scalars because pointers to DSTs
|
||||
// can be either wide or thin (data pointers of wide pointers).
|
||||
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
|
||||
|
@ -248,13 +253,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
}
|
||||
|
||||
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||
match self.abi {
|
||||
Abi::Scalar(scalar) => {
|
||||
match self.backend_repr {
|
||||
BackendRepr::Scalar(scalar) => {
|
||||
if scalar.is_bool() {
|
||||
return cx.type_i1();
|
||||
}
|
||||
}
|
||||
Abi::ScalarPair(..) => {
|
||||
BackendRepr::ScalarPair(..) => {
|
||||
// An immediate pair always contains just the two elements, without any padding
|
||||
// filler, as it should never be stored to memory.
|
||||
return cx.type_struct(
|
||||
|
@ -287,7 +292,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
|
|||
// This must produce the same result for `repr(transparent)` wrappers as for the inner type!
|
||||
// In other words, this should generally not look at the type at all, but only at the
|
||||
// layout.
|
||||
let Abi::ScalarPair(a, b) = self.abi else {
|
||||
let BackendRepr::ScalarPair(a, b) = self.backend_repr else {
|
||||
bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self);
|
||||
};
|
||||
let scalar = [a, b][index];
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue