1
Fork 0

Auto merge of #130506 - nnethercote:rustc_codegen_llvm-cleanups, r=jieyouxu

`rustc_codegen_llvm` cleanups

Some improvements I found while reading through this crate's code.

r? `@michaelwoerister`
This commit is contained in:
bors 2024-09-20 11:55:32 +00:00
commit 1a5a2240bc
21 changed files with 515 additions and 648 deletions

View file

@ -80,22 +80,14 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
self.const_undef(typ)
}
fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
self.gcc_int(typ, int)
}
fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
self.gcc_uint(typ, int)
}
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
self.gcc_uint_big(typ, num)
}
fn const_bool(&self, val: bool) -> RValue<'gcc> {
self.const_uint(self.type_i1(), val as u64)
}
fn const_i8(&self, i: i8) -> RValue<'gcc> {
self.const_int(self.type_i8(), i as i64)
}
fn const_i16(&self, i: i16) -> RValue<'gcc> {
self.const_int(self.type_i16(), i as i64)
}
@ -104,8 +96,12 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
self.const_int(self.type_i32(), i as i64)
}
fn const_i8(&self, i: i8) -> RValue<'gcc> {
self.const_int(self.type_i8(), i as i64)
fn const_int(&self, typ: Type<'gcc>, int: i64) -> RValue<'gcc> {
self.gcc_int(typ, int)
}
fn const_u8(&self, i: u8) -> RValue<'gcc> {
self.const_uint(self.type_u8(), i as u64)
}
fn const_u32(&self, i: u32) -> RValue<'gcc> {
@ -130,8 +126,12 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
self.const_uint(self.usize_type, i)
}
fn const_u8(&self, i: u8) -> RValue<'gcc> {
self.const_uint(self.type_u8(), i as u64)
fn const_uint(&self, typ: Type<'gcc>, int: u64) -> RValue<'gcc> {
self.gcc_uint(typ, int)
}
fn const_uint_big(&self, typ: Type<'gcc>, num: u128) -> RValue<'gcc> {
self.gcc_uint_big(typ, num)
}
fn const_real(&self, typ: Type<'gcc>, val: f64) -> RValue<'gcc> {

View file

@ -520,24 +520,16 @@ pub(crate) fn inline_asm_call<'ll>(
/// If the register is an xmm/ymm/zmm register then return its index.
fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
use X86InlineAsmReg::*;
match reg {
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::xmm0 as u32
&& reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
InlineAsmReg::X86(reg) if reg as u32 >= xmm0 as u32 && reg as u32 <= xmm15 as u32 => {
Some(reg as u32 - xmm0 as u32)
}
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::ymm0 as u32
&& reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
InlineAsmReg::X86(reg) if reg as u32 >= ymm0 as u32 && reg as u32 <= ymm15 as u32 => {
Some(reg as u32 - ymm0 as u32)
}
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::zmm0 as u32
&& reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
InlineAsmReg::X86(reg) if reg as u32 >= zmm0 as u32 && reg as u32 <= zmm31 as u32 => {
Some(reg as u32 - zmm0 as u32)
}
_ => None,
}
@ -545,50 +537,56 @@ fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
/// If the register is an AArch64 integer register then return its index.
fn a64_reg_index(reg: InlineAsmReg) -> Option<u32> {
match reg {
InlineAsmReg::AArch64(AArch64InlineAsmReg::x0) => Some(0),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x1) => Some(1),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x2) => Some(2),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x3) => Some(3),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x4) => Some(4),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x5) => Some(5),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x6) => Some(6),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x7) => Some(7),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x8) => Some(8),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x9) => Some(9),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x10) => Some(10),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x11) => Some(11),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x12) => Some(12),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x13) => Some(13),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x14) => Some(14),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x15) => Some(15),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x16) => Some(16),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x17) => Some(17),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x18) => Some(18),
// x19 is reserved
InlineAsmReg::AArch64(AArch64InlineAsmReg::x20) => Some(20),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x21) => Some(21),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x22) => Some(22),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x23) => Some(23),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x24) => Some(24),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x25) => Some(25),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x26) => Some(26),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x27) => Some(27),
InlineAsmReg::AArch64(AArch64InlineAsmReg::x28) => Some(28),
// x29 is reserved
InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) => Some(30),
_ => None,
}
use AArch64InlineAsmReg::*;
// Unlike `a64_vreg_index`, we can't subtract `x0` to get the u32 because
// `x19` and `x29` are missing and the integer constants for the
// `x0`..`x30` enum variants don't all match the register number. E.g. the
// integer constant for `x18` is 18, but the constant for `x20` is 19.
Some(match reg {
InlineAsmReg::AArch64(r) => match r {
x0 => 0,
x1 => 1,
x2 => 2,
x3 => 3,
x4 => 4,
x5 => 5,
x6 => 6,
x7 => 7,
x8 => 8,
x9 => 9,
x10 => 10,
x11 => 11,
x12 => 12,
x13 => 13,
x14 => 14,
x15 => 15,
x16 => 16,
x17 => 17,
x18 => 18,
// x19 is reserved
x20 => 20,
x21 => 21,
x22 => 22,
x23 => 23,
x24 => 24,
x25 => 25,
x26 => 26,
x27 => 27,
x28 => 28,
// x29 is reserved
x30 => 30,
_ => return None,
},
_ => return None,
})
}
/// If the register is an AArch64 vector register then return its index.
fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
use AArch64InlineAsmReg::*;
match reg {
InlineAsmReg::AArch64(reg)
if reg as u32 >= AArch64InlineAsmReg::v0 as u32
&& reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
{
Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
InlineAsmReg::AArch64(reg) if reg as u32 >= v0 as u32 && reg as u32 <= v31 as u32 => {
Some(reg as u32 - v0 as u32)
}
_ => None,
}
@ -596,6 +594,7 @@ fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
/// Converts a register class to an LLVM constraint code.
fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) -> String {
use InlineAsmRegClass::*;
match reg {
// For vector registers LLVM wants the register name to match the type size.
InlineAsmRegOrRegClass::Reg(reg) => {
@ -652,75 +651,66 @@ fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'_>>) ->
// The constraints can be retrieved from
// https://llvm.org/docs/LangRef.html#supported-constraint-code-list
InlineAsmRegOrRegClass::RegClass(reg) => match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
AArch64(AArch64InlineAsmRegClass::reg) => "r",
AArch64(AArch64InlineAsmRegClass::vreg) => "w",
AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
Arm(ArmInlineAsmRegClass::reg) => "r",
Arm(ArmInlineAsmRegClass::sreg)
| Arm(ArmInlineAsmRegClass::dreg_low16)
| Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
Arm(ArmInlineAsmRegClass::sreg_low16)
| Arm(ArmInlineAsmRegClass::dreg_low8)
| Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
Arm(ArmInlineAsmRegClass::dreg) | Arm(ArmInlineAsmRegClass::qreg) => "w",
Hexagon(HexagonInlineAsmRegClass::reg) => "r",
LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
Mips(MipsInlineAsmRegClass::reg) => "r",
Mips(MipsInlineAsmRegClass::freg) => "f",
Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => "b",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
InlineAsmRegClass::X86(
RiscV(RiscVInlineAsmRegClass::reg) => "r",
RiscV(RiscVInlineAsmRegClass::freg) => "f",
RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
X86(X86InlineAsmRegClass::reg) => "r",
X86(X86InlineAsmRegClass::reg_abcd) => "Q",
X86(X86InlineAsmRegClass::reg_byte) => "q",
X86(X86InlineAsmRegClass::xmm_reg) | X86(X86InlineAsmRegClass::ymm_reg) => "x",
X86(X86InlineAsmRegClass::zmm_reg) => "v",
X86(X86InlineAsmRegClass::kreg) => "^Yk",
X86(
X86InlineAsmRegClass::x87_reg
| X86InlineAsmRegClass::mmx_reg
| X86InlineAsmRegClass::kreg0
| X86InlineAsmRegClass::tmm_reg,
) => unreachable!("clobber-only"),
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => "r",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => "w",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => "d",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => "r",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => "w",
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a",
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
InlineAsmRegClass::Err => unreachable!(),
Wasm(WasmInlineAsmRegClass::local) => "r",
Bpf(BpfInlineAsmRegClass::reg) => "r",
Bpf(BpfInlineAsmRegClass::wreg) => "w",
Avr(AvrInlineAsmRegClass::reg) => "r",
Avr(AvrInlineAsmRegClass::reg_upper) => "d",
Avr(AvrInlineAsmRegClass::reg_pair) => "r",
Avr(AvrInlineAsmRegClass::reg_iw) => "w",
Avr(AvrInlineAsmRegClass::reg_ptr) => "e",
S390x(S390xInlineAsmRegClass::reg) => "r",
S390x(S390xInlineAsmRegClass::reg_addr) => "a",
S390x(S390xInlineAsmRegClass::freg) => "f",
Msp430(Msp430InlineAsmRegClass::reg) => "r",
M68k(M68kInlineAsmRegClass::reg) => "r",
M68k(M68kInlineAsmRegClass::reg_addr) => "a",
M68k(M68kInlineAsmRegClass::reg_data) => "d",
CSKY(CSKYInlineAsmRegClass::reg) => "r",
CSKY(CSKYInlineAsmRegClass::freg) => "f",
SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
Err => unreachable!(),
}
.to_string(),
}
@ -732,44 +722,41 @@ fn modifier_to_llvm(
reg: InlineAsmRegClass,
modifier: Option<char>,
) -> Option<char> {
use InlineAsmRegClass::*;
// The modifiers can be retrieved from
// https://llvm.org/docs/LangRef.html#asm-template-argument-modifiers
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
if modifier == Some('v') { None } else { modifier }
AArch64(AArch64InlineAsmRegClass::reg) => modifier,
AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
if modifier == Some('v') {
None
} else {
modifier
}
}
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => None,
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
Arm(ArmInlineAsmRegClass::reg) => None,
Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => None,
Arm(ArmInlineAsmRegClass::dreg)
| Arm(ArmInlineAsmRegClass::dreg_low16)
| Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
Arm(ArmInlineAsmRegClass::qreg)
| Arm(ArmInlineAsmRegClass::qreg_low8)
| Arm(ArmInlineAsmRegClass::qreg_low4) => {
if modifier.is_none() {
Some('q')
} else {
modifier
}
}
InlineAsmRegClass::Hexagon(_) => None,
InlineAsmRegClass::LoongArch(_) => None,
InlineAsmRegClass::Mips(_) => None,
InlineAsmRegClass::Nvptx(_) => None,
InlineAsmRegClass::PowerPC(_) => None,
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
| InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
Hexagon(_) => None,
LoongArch(_) => None,
Mips(_) => None,
Nvptx(_) => None,
PowerPC(_) => None,
RiscV(RiscVInlineAsmRegClass::reg) | RiscV(RiscVInlineAsmRegClass::freg) => None,
RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
None if arch == InlineAsmArch::X86_64 => Some('q'),
None => Some('k'),
Some('l') => Some('b'),
@ -779,10 +766,10 @@ fn modifier_to_llvm(
Some('r') => Some('q'),
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
X86(X86InlineAsmRegClass::reg_byte) => None,
X86(reg @ X86InlineAsmRegClass::xmm_reg)
| X86(reg @ X86InlineAsmRegClass::ymm_reg)
| X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
(X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
(X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
(X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
@ -791,116 +778,97 @@ fn modifier_to_llvm(
(_, Some('z')) => Some('g'),
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
InlineAsmRegClass::X86(
X86(X86InlineAsmRegClass::kreg) => None,
X86(
X86InlineAsmRegClass::x87_reg
| X86InlineAsmRegClass::mmx_reg
| X86InlineAsmRegClass::kreg0
| X86InlineAsmRegClass::tmm_reg,
) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => None,
InlineAsmRegClass::Bpf(_) => None,
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair)
| InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw)
| InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
) => unreachable!("clobber-only"),
Wasm(WasmInlineAsmRegClass::local) => None,
Bpf(_) => None,
Avr(AvrInlineAsmRegClass::reg_pair)
| Avr(AvrInlineAsmRegClass::reg_iw)
| Avr(AvrInlineAsmRegClass::reg_ptr) => match modifier {
Some('h') => Some('B'),
Some('l') => Some('A'),
_ => None,
},
InlineAsmRegClass::Avr(_) => None,
InlineAsmRegClass::S390x(_) => None,
InlineAsmRegClass::Msp430(_) => None,
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
InlineAsmRegClass::M68k(_) => None,
InlineAsmRegClass::CSKY(_) => None,
InlineAsmRegClass::Err => unreachable!(),
Avr(_) => None,
S390x(_) => None,
Msp430(_) => None,
SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
M68k(_) => None,
CSKY(_) => None,
Err => unreachable!(),
}
}
/// Type to use for outputs that are discarded. It doesn't really matter what
/// the type is, as long as it is valid for the constraint code.
fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'ll Type {
use InlineAsmRegClass::*;
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
AArch64(AArch64InlineAsmRegClass::vreg) | AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
cx.type_vector(cx.type_i64(), 2)
}
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::preg) => {
AArch64(AArch64InlineAsmRegClass::preg) => unreachable!("clobber-only"),
Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
Arm(ArmInlineAsmRegClass::sreg) | Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
Arm(ArmInlineAsmRegClass::dreg)
| Arm(ArmInlineAsmRegClass::dreg_low16)
| Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
Arm(ArmInlineAsmRegClass::qreg)
| Arm(ArmInlineAsmRegClass::qreg_low8)
| Arm(ArmInlineAsmRegClass::qreg_low4) => cx.type_vector(cx.type_i64(), 2),
Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
PowerPC(PowerPCInlineAsmRegClass::cr) | PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
cx.type_vector(cx.type_i64(), 2)
}
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::LoongArch(LoongArchInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::reg_nonzero) => cx.type_i32(),
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::freg) => cx.type_f64(),
InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::cr)
| InlineAsmRegClass::PowerPC(PowerPCInlineAsmRegClass::xer) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::vreg) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
InlineAsmRegClass::X86(
RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
RiscV(RiscVInlineAsmRegClass::vreg) => unreachable!("clobber-only"),
X86(X86InlineAsmRegClass::reg) | X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
X86(X86InlineAsmRegClass::xmm_reg)
| X86(X86InlineAsmRegClass::ymm_reg)
| X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
X86(
X86InlineAsmRegClass::x87_reg
| X86InlineAsmRegClass::mmx_reg
| X86InlineAsmRegClass::kreg0
| X86InlineAsmRegClass::tmm_reg,
) => {
unreachable!("clobber-only")
}
InlineAsmRegClass::Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
InlineAsmRegClass::Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
InlineAsmRegClass::Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
InlineAsmRegClass::S390x(
S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr,
) => cx.type_i32(),
InlineAsmRegClass::S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => {
bug!("LLVM backend does not support SPIR-V")
}
InlineAsmRegClass::Err => unreachable!(),
) => unreachable!("clobber-only"),
Wasm(WasmInlineAsmRegClass::local) => cx.type_i32(),
Bpf(BpfInlineAsmRegClass::reg) => cx.type_i64(),
Bpf(BpfInlineAsmRegClass::wreg) => cx.type_i32(),
Avr(AvrInlineAsmRegClass::reg) => cx.type_i8(),
Avr(AvrInlineAsmRegClass::reg_upper) => cx.type_i8(),
Avr(AvrInlineAsmRegClass::reg_pair) => cx.type_i16(),
Avr(AvrInlineAsmRegClass::reg_iw) => cx.type_i16(),
Avr(AvrInlineAsmRegClass::reg_ptr) => cx.type_i16(),
S390x(S390xInlineAsmRegClass::reg | S390xInlineAsmRegClass::reg_addr) => cx.type_i32(),
S390x(S390xInlineAsmRegClass::freg) => cx.type_f64(),
Msp430(Msp430InlineAsmRegClass::reg) => cx.type_i16(),
M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(),
M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(),
M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(),
CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(),
CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(),
SpirV(SpirVInlineAsmRegClass::reg) => bug!("LLVM backend does not support SPIR-V"),
Err => unreachable!(),
}
}
@ -940,9 +908,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
layout: &TyAndLayout<'tcx>,
instance: Instance<'_>,
) -> &'ll Value {
use InlineAsmRegClass::*;
let dl = &bx.tcx.data_layout;
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
@ -950,7 +919,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
value
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
@ -963,26 +932,25 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_i64())
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -994,7 +962,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
bx.bitcast(value, bx.type_vector(bx.type_i32(), 4))
}
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1009,7 +977,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
bx.bitcast(value, bx.type_vector(bx.type_i16(), 8))
}
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1018,10 +986,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_f32())
} else {
@ -1029,7 +994,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
@ -1043,7 +1008,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16
@ -1055,7 +1020,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_i16(), count))
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => bx.zext(value, bx.cx.type_i32()),
@ -1064,7 +1029,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
_ => value,
}
}
(InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@ -1086,15 +1051,16 @@ fn llvm_fixup_output<'ll, 'tcx>(
layout: &TyAndLayout<'tcx>,
instance: Instance<'_>,
) -> &'ll Value {
use InlineAsmRegClass::*;
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
bx.extract_element(value, bx.const_i32(0))
} else {
value
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
value = bx.extract_element(value, bx.const_i32(0));
@ -1103,26 +1069,25 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
value
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, count * 2);
let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
bx.bitcast(value, bx.cx.type_f64())
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1134,7 +1099,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
bx.bitcast(value, bx.type_f128())
}
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1145,7 +1110,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
bx.extract_element(value, bx.const_usize(0))
}
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1154,10 +1119,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
bx.bitcast(value, bx.cx.type_i32())
} else {
@ -1165,7 +1127,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
@ -1179,7 +1141,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16
@ -1191,7 +1153,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
bx.bitcast(value, bx.type_vector(bx.type_f16(), count))
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8, _) => bx.trunc(value, bx.cx.type_i8()),
@ -1201,7 +1163,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
_ => value,
}
}
(InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(bx, instance, &[sym::zfhmin, sym::zfh]) =>
{
@ -1220,39 +1182,39 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
layout: &TyAndLayout<'tcx>,
instance: Instance<'_>,
) -> &'ll Type {
use InlineAsmRegClass::*;
match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
(AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() {
cx.type_vector(cx.type_i8(), 8)
} else {
layout.llvm_type(cx)
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s))
if s.primitive() != Primitive::Float(Float::F128) =>
{
let elem_ty = llvm_asm_scalar_type(cx, s);
let count = 16 / layout.size.bytes();
cx.type_vector(elem_ty, count)
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
(AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Vector { element, count })
if layout.size.bytes() == 8 =>
{
let elem_ty = llvm_asm_scalar_type(cx, element);
cx.type_vector(elem_ty, count * 2)
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
(X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F64) =>
{
cx.type_i64()
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1264,7 +1226,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
cx.type_vector(cx.type_i32(), 4)
}
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1272,7 +1234,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
Abi::Scalar(s),
) if s.primitive() == Primitive::Float(Float::F16) => cx.type_vector(cx.type_i16(), 8),
(
InlineAsmRegClass::X86(
X86(
X86InlineAsmRegClass::xmm_reg
| X86InlineAsmRegClass::ymm_reg
| X86InlineAsmRegClass::zmm_reg,
@ -1281,10 +1243,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
(Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I32, _) = s.primitive() {
cx.type_f32()
} else {
@ -1292,7 +1251,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
@ -1306,7 +1265,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
}
}
(
InlineAsmRegClass::Arm(
Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16
@ -1318,7 +1277,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
) if element.primitive() == Primitive::Float(Float::F16) => {
cx.type_vector(cx.type_i16(), count)
}
(InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
(Mips(MipsInlineAsmRegClass::reg), Abi::Scalar(s)) => {
match s.primitive() {
// MIPS only supports register-length arithmetics.
Primitive::Int(Integer::I8 | Integer::I16, _) => cx.type_i32(),
@ -1327,7 +1286,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>(
_ => layout.llvm_type(cx),
}
}
(InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
(RiscV(RiscVInlineAsmRegClass::freg), Abi::Scalar(s))
if s.primitive() == Primitive::Float(Float::F16)
&& !any_target_feature_enabled(cx, instance, &[sym::zfhmin, sym::zfh]) =>
{

View file

@ -403,8 +403,9 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
to_add.push(AttributeKind::Naked.create_attr(cx.llcx));
// HACK(jubilee): "indirect branch tracking" works by attaching prologues to functions.
// And it is a module-level attribute, so the alternative is pulling naked functions into new LLVM modules.
// Otherwise LLVM's "naked" functions come with endbr prefixes per https://github.com/rust-lang/rust/issues/98768
// And it is a module-level attribute, so the alternative is pulling naked functions into
// new LLVM modules. Otherwise LLVM's "naked" functions come with endbr prefixes per
// https://github.com/rust-lang/rust/issues/98768
to_add.push(AttributeKind::NoCfCheck.create_attr(cx.llcx));
if llvm_util::get_version() < (19, 0, 0) {
// Prior to LLVM 19, branch-target-enforcement was disabled by setting the attribute to
@ -454,7 +455,8 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>(
flags |= AllocKindFlags::Zeroed;
}
to_add.push(llvm::CreateAllocKindAttr(cx.llcx, flags));
// apply to return place instead of function (unlike all other attributes applied in this function)
// apply to return place instead of function (unlike all other attributes applied in this
// function)
let no_alias = AttributeKind::NoAlias.create_attr(cx.llcx);
attributes::apply_to_llfn(llfn, AttributePlace::ReturnValue, &[no_alias]);
}

View file

@ -156,15 +156,15 @@ fn get_bitcode_slice_from_object_data<'a>(
obj: &'a [u8],
cgcx: &CodegenContext<LlvmCodegenBackend>,
) -> Result<&'a [u8], LtoBitcodeFromRlib> {
// We're about to assume the data here is an object file with sections, but if it's raw LLVM IR that
// won't work. Fortunately, if that's what we have we can just return the object directly, so we sniff
// the relevant magic strings here and return.
// We're about to assume the data here is an object file with sections, but if it's raw LLVM IR
// that won't work. Fortunately, if that's what we have we can just return the object directly,
// so we sniff the relevant magic strings here and return.
if obj.starts_with(b"\xDE\xC0\x17\x0B") || obj.starts_with(b"BC\xC0\xDE") {
return Ok(obj);
}
// We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment name"
// which in the public API for sections gets treated as part of the section name, but internally
// in MachOObjectFile.cpp gets treated separately.
// We drop the "__LLVM," prefix here because on Apple platforms there's a notion of "segment
// name" which in the public API for sections gets treated as part of the section name, but
// internally in MachOObjectFile.cpp gets treated separately.
let section_name = bitcode_section_name(cgcx).trim_start_matches("__LLVM,");
let mut len = 0;
let data = unsafe {

View file

@ -30,7 +30,7 @@ impl OwnedTargetMachine {
data_sections: bool,
unique_section_names: bool,
trap_unreachable: bool,
singletree: bool,
singlethread: bool,
verbose_asm: bool,
emit_stack_size_section: bool,
relax_elf_relocations: bool,
@ -62,7 +62,7 @@ impl OwnedTargetMachine {
data_sections,
unique_section_names,
trap_unreachable,
singletree,
singlethread,
verbose_asm,
emit_stack_size_section,
relax_elf_relocations,
@ -86,15 +86,17 @@ impl Deref for OwnedTargetMachine {
type Target = llvm::TargetMachine;
fn deref(&self) -> &Self::Target {
// SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
// SAFETY: constructing ensures we have a valid pointer created by
// llvm::LLVMRustCreateTargetMachine.
unsafe { self.tm_unique.as_ref() }
}
}
impl Drop for OwnedTargetMachine {
fn drop(&mut self) {
// SAFETY: constructing ensures we have a valid pointer created by llvm::LLVMRustCreateTargetMachine
// OwnedTargetMachine is not copyable so there is no double free or use after free
// SAFETY: constructing ensures we have a valid pointer created by
// llvm::LLVMRustCreateTargetMachine OwnedTargetMachine is not copyable so there is no
// double free or use after free.
unsafe {
llvm::LLVMRustDisposeTargetMachine(self.tm_unique.as_mut());
}

View file

@ -38,7 +38,7 @@ use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, UnknownCompression,
WithLlvmError, WriteBytecode,
};
use crate::llvm::diagnostic::OptimizationDiagnosticKind;
use crate::llvm::diagnostic::OptimizationDiagnosticKind::*;
use crate::llvm::{self, DiagnosticInfo, PassManager};
use crate::type_::Type;
use crate::{base, common, llvm_util, LlvmCodegenBackend, ModuleLlvm};
@ -157,7 +157,8 @@ fn to_pass_builder_opt_level(cfg: config::OptLevel) -> llvm::PassBuilderOptLevel
fn to_llvm_relocation_model(relocation_model: RelocModel) -> llvm::RelocModel {
match relocation_model {
RelocModel::Static => llvm::RelocModel::Static,
// LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra attribute.
// LLVM doesn't have a PIE relocation model, it represents PIE as PIC with an extra
// attribute.
RelocModel::Pic | RelocModel::Pie => llvm::RelocModel::PIC,
RelocModel::DynamicNoPic => llvm::RelocModel::DynamicNoPic,
RelocModel::Ropi => llvm::RelocModel::ROPI,
@ -188,8 +189,8 @@ pub(crate) fn target_machine_factory(
let use_softfp = if sess.target.arch == "arm" && sess.target.abi == "eabihf" {
sess.opts.cg.soft_float
} else {
// `validate_commandline_args_with_session_available` has already warned about this being ignored.
// Let's make sure LLVM doesn't suddenly start using this flag on more targets.
// `validate_commandline_args_with_session_available` has already warned about this being
// ignored. Let's make sure LLVM doesn't suddenly start using this flag on more targets.
false
};
@ -446,13 +447,12 @@ unsafe extern "C" fn diagnostic_handler(info: &DiagnosticInfo, user: *mut c_void
column: opt.column,
pass_name: &opt.pass_name,
kind: match opt.kind {
OptimizationDiagnosticKind::OptimizationRemark => "success",
OptimizationDiagnosticKind::OptimizationMissed
| OptimizationDiagnosticKind::OptimizationFailure => "missed",
OptimizationDiagnosticKind::OptimizationAnalysis
| OptimizationDiagnosticKind::OptimizationAnalysisFPCommute
| OptimizationDiagnosticKind::OptimizationAnalysisAliasing => "analysis",
OptimizationDiagnosticKind::OptimizationRemarkOther => "other",
OptimizationRemark => "success",
OptimizationMissed | OptimizationFailure => "missed",
OptimizationAnalysis
| OptimizationAnalysisFPCommute
| OptimizationAnalysisAliasing => "analysis",
OptimizationRemarkOther => "other",
},
message: &opt.message,
});
@ -945,11 +945,12 @@ fn create_section_with_flags_asm(section_name: &str, section_flags: &str, data:
}
fn target_is_apple(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {
cgcx.opts.target_triple.triple().contains("-ios")
|| cgcx.opts.target_triple.triple().contains("-darwin")
|| cgcx.opts.target_triple.triple().contains("-tvos")
|| cgcx.opts.target_triple.triple().contains("-watchos")
|| cgcx.opts.target_triple.triple().contains("-visionos")
let triple = cgcx.opts.target_triple.triple();
triple.contains("-ios")
|| triple.contains("-darwin")
|| triple.contains("-tvos")
|| triple.contains("-watchos")
|| triple.contains("-visionos")
}
fn target_is_aix(cgcx: &CodegenContext<LlvmCodegenBackend>) -> bool {

View file

@ -120,7 +120,7 @@ impl<'ll, 'tcx> Deref for Builder<'_, 'll, 'tcx> {
}
}
macro_rules! builder_methods_for_value_instructions {
macro_rules! math_builder_methods {
($($name:ident($($arg:ident),*) => $llvm_capi:ident),+ $(,)?) => {
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
unsafe {
@ -130,6 +130,18 @@ macro_rules! builder_methods_for_value_instructions {
}
}
macro_rules! set_math_builder_methods {
($($name:ident($($arg:ident),*) => ($llvm_capi:ident, $llvm_set_math:ident)),+ $(,)?) => {
$(fn $name(&mut self, $($arg: &'ll Value),*) -> &'ll Value {
unsafe {
let instr = llvm::$llvm_capi(self.llbuilder, $($arg,)* UNNAMED);
llvm::$llvm_set_math(instr);
instr
}
})+
}
}
impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
type CodegenCx = CodegenCx<'ll, 'tcx>;
@ -267,7 +279,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
builder_methods_for_value_instructions! {
math_builder_methods! {
add(a, b) => LLVMBuildAdd,
fadd(a, b) => LLVMBuildFAdd,
sub(a, b) => LLVMBuildSub,
@ -299,84 +311,17 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unchecked_umul(x, y) => LLVMBuildNUWMul,
}
fn fadd_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetFastMath(instr);
instr
}
}
fn fsub_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetFastMath(instr);
instr
}
}
fn fmul_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetFastMath(instr);
instr
}
}
fn fdiv_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetFastMath(instr);
instr
}
}
fn frem_fast(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetFastMath(instr);
instr
}
}
fn fadd_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFAdd(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetAlgebraicMath(instr);
instr
}
}
fn fsub_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFSub(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetAlgebraicMath(instr);
instr
}
}
fn fmul_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFMul(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetAlgebraicMath(instr);
instr
}
}
fn fdiv_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFDiv(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetAlgebraicMath(instr);
instr
}
}
fn frem_algebraic(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value {
unsafe {
let instr = llvm::LLVMBuildFRem(self.llbuilder, lhs, rhs, UNNAMED);
llvm::LLVMRustSetAlgebraicMath(instr);
instr
}
set_math_builder_methods! {
fadd_fast(x, y) => (LLVMBuildFAdd, LLVMRustSetFastMath),
fsub_fast(x, y) => (LLVMBuildFSub, LLVMRustSetFastMath),
fmul_fast(x, y) => (LLVMBuildFMul, LLVMRustSetFastMath),
fdiv_fast(x, y) => (LLVMBuildFDiv, LLVMRustSetFastMath),
frem_fast(x, y) => (LLVMBuildFRem, LLVMRustSetFastMath),
fadd_algebraic(x, y) => (LLVMBuildFAdd, LLVMRustSetAlgebraicMath),
fsub_algebraic(x, y) => (LLVMBuildFSub, LLVMRustSetAlgebraicMath),
fmul_algebraic(x, y) => (LLVMBuildFMul, LLVMRustSetAlgebraicMath),
fdiv_algebraic(x, y) => (LLVMBuildFDiv, LLVMRustSetAlgebraicMath),
frem_algebraic(x, y) => (LLVMBuildFRem, LLVMRustSetAlgebraicMath),
}
fn checked_binop(
@ -459,6 +404,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
val
}
}
fn to_immediate_scalar(&mut self, val: Self::Value, scalar: abi::Scalar) -> Self::Value {
if scalar.is_bool() {
return self.trunc(val, self.cx().type_i1());
@ -727,11 +673,11 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
// for performance. LLVM doesn't seem to care about this, and will happily treat
// `!nontemporal` stores as-if they were normal stores (for reordering optimizations
// etc) even on x86, despite later lowering them to MOVNT which do *not* behave like
// regular stores but require special fences.
// So we keep a list of architectures where `!nontemporal` is known to be truly just
// a hint, and use regular stores everywhere else.
// (In the future, we could alternatively ensure that an sfence gets emitted after a sequence of movnt
// before any kind of synchronizing operation. But it's not clear how to do that with LLVM.)
// regular stores but require special fences. So we keep a list of architectures
// where `!nontemporal` is known to be truly just a hint, and use regular stores
// everywhere else. (In the future, we could alternatively ensure that an sfence
// gets emitted after a sequence of movnt before any kind of synchronizing
// operation. But it's not clear how to do that with LLVM.)
// For more context, see <https://github.com/rust-lang/rust/issues/114582> and
// <https://github.com/llvm/llvm-project/issues/64521>.
const WELL_BEHAVED_NONTEMPORAL_ARCHS: &[&str] =
@ -1160,6 +1106,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
(val, success)
}
}
fn atomic_rmw(
&mut self,
op: rustc_codegen_ssa::common::AtomicRmwBinOp,

View file

@ -15,11 +15,6 @@ use crate::value::Value;
/// Codegens a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
///
/// # Parameters
///
/// - `cx`: the crate context
/// - `instance`: the instance to be instantiated
pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
let tcx = cx.tcx();
@ -106,62 +101,42 @@ pub(crate) fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'t
let is_generic =
instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
// on whether we are in share-generics mode.
if cx.tcx.sess.opts.share_generics() {
// We are in share_generics mode.
let is_hidden = if is_generic {
// This is a monomorphization of a generic function.
if !cx.tcx.sess.opts.share_generics() {
// When not sharing generics, all instances are in the same
// crate and have hidden visibility.
true
} else {
if let Some(instance_def_id) = instance_def_id.as_local() {
// This is a definition from the current crate. If the
// definition is unreachable for downstream crates or
// the current crate does not re-export generics, the
// definition of the instance will have been declared
// as `hidden`.
if cx.tcx.is_unreachable_local_definition(instance_def_id)
// This is a monomorphization of a generic function
// defined in the current crate. It is hidden if:
// - the definition is unreachable for downstream
// crates, or
// - the current crate does not re-export generics
// (because the crate is a C library or executable)
cx.tcx.is_unreachable_local_definition(instance_def_id)
|| !cx.tcx.local_crate_exports_generics()
{
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a monomorphization of a generic function
// defined in an upstream crate.
if instance.upstream_monomorphization(tcx).is_some() {
// This is instantiated in another crate. It cannot
// be `hidden`.
} else {
// This is a local instantiation of an upstream definition.
// If the current crate does not re-export it
// (because it is a C library or an executable), it
// will have been declared `hidden`.
if !cx.tcx.local_crate_exports_generics() {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
}
// defined in an upstream crate. It is hidden if:
// - it is instantiated in this crate, and
// - the current crate does not re-export generics
instance.upstream_monomorphization(tcx).is_none()
&& !cx.tcx.local_crate_exports_generics()
}
} else {
// When not sharing generics, all instances are in the same
// crate and have hidden visibility
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a non-generic function
if cx.tcx.is_codegened_item(instance_def_id) {
// This is a function that is instantiated in the local crate
if instance_def_id.is_local() {
// This is function that is defined in the local crate.
// If it is not reachable, it is hidden.
if !cx.tcx.is_reachable_non_generic(instance_def_id) {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a function from an upstream crate that has
// been instantiated here. These are always hidden.
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
}
// This is a non-generic function. It is hidden if:
// - it is instantiated in the local crate, and
// - it is defined an upstream crate (non-local), or
// - it is not reachable
cx.tcx.is_codegened_item(instance_def_id)
&& (!instance_def_id.is_local()
|| !cx.tcx.is_reachable_non_generic(instance_def_id))
};
if is_hidden {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
// MinGW: For backward compatibility we rely on the linker to decide whether it

View file

@ -126,25 +126,14 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { llvm::LLVMGetPoison(t) }
}
fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
}
fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i, False) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn const_bool(&self, val: bool) -> &'ll Value {
self.const_uint(self.type_i1(), val as u64)
}
fn const_i8(&self, i: i8) -> &'ll Value {
self.const_int(self.type_i8(), i as i64)
}
fn const_i16(&self, i: i16) -> &'ll Value {
self.const_int(self.type_i16(), i as i64)
}
@ -153,8 +142,12 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.const_int(self.type_i32(), i as i64)
}
fn const_i8(&self, i: i8) -> &'ll Value {
self.const_int(self.type_i8(), i as i64)
fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
}
fn const_u8(&self, i: u8) -> &'ll Value {
self.const_uint(self.type_i8(), i as u64)
}
fn const_u32(&self, i: u32) -> &'ll Value {
@ -179,8 +172,15 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> {
self.const_uint(self.isize_ty, i)
}
fn const_u8(&self, i: u8) -> &'ll Value {
self.const_uint(self.type_i8(), i as u64)
fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i, False) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {

View file

@ -73,8 +73,8 @@ pub(crate) fn const_alloc_to_llvm<'ll>(
// Generating partially-uninit consts is limited to small numbers of chunks,
// to avoid the cost of generating large complex const expressions.
// For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element,
// and would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
// For example, `[(u32, u8); 1024 * 1024]` contains uninit padding in each element, and
// would result in `{ [5 x i8] zeroinitializer, [3 x i8] undef, ...repeat 1M times... }`.
let max = cx.sess().opts.unstable_opts.uninit_const_chunk_threshold;
let allow_uninit_chunks = chunks.clone().take(max.saturating_add(1)).count() <= max;
@ -249,8 +249,8 @@ impl<'ll> CodegenCx<'ll, '_> {
trace!(?instance);
let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
// Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
// the llvm type from the actual evaluated initializer.
// Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure
// out the llvm type from the actual evaluated initializer.
let llty = if nested {
self.type_i8()
} else {
@ -262,7 +262,7 @@ impl<'ll> CodegenCx<'ll, '_> {
}
#[instrument(level = "debug", skip(self, llty))]
pub(crate) fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
fn get_static_inner(&self, def_id: DefId, llty: &'ll Type) -> &'ll Value {
let instance = Instance::mono(self.tcx, def_id);
if let Some(&g) = self.instances.borrow().get(&instance) {
trace!("used cached value");
@ -320,15 +320,16 @@ impl<'ll> CodegenCx<'ll, '_> {
}
if !def_id.is_local() {
let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
let needs_dll_storage_attr = self.use_dll_storage_attrs
&& !self.tcx.is_foreign_item(def_id)
// Local definitions can never be imported, so we must not apply
// the DLLImport annotation.
!dso_local &&
&& !dso_local
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the attrs. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin based LTO is enabled.
!self.tcx.sess.opts.cg.linker_plugin_lto.enabled() &&
self.tcx.sess.lto() != Lto::Thin;
&& !self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
&& self.tcx.sess.lto() != Lto::Thin;
// If this assertion triggers, there's something wrong with commandline
// argument validation.
@ -551,8 +552,8 @@ impl<'ll> CodegenCx<'ll, '_> {
// `#[used(compiler)]` is explicitly requested. This is to avoid similar breakage
// on other targets, in particular MachO targets have *their* static constructor
// lists broken if `llvm.compiler.used` is emitted rather than `llvm.used`. However,
// that check happens when assigning the `CodegenFnAttrFlags` in `rustc_hir_analysis`,
// so we don't need to take care of it here.
// that check happens when assigning the `CodegenFnAttrFlags` in
// `rustc_hir_analysis`, so we don't need to take care of it here.
self.add_compiler_used_global(g);
}
if attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) {

View file

@ -35,8 +35,8 @@ use crate::type_::Type;
use crate::value::Value;
use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util};
/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
/// `llvm::Context` so that several compilation units may be optimized in parallel.
/// There is one `CodegenCx` per codegen unit. Each one has its own LLVM
/// `llvm::Context` so that several codegen units may be processed in parallel.
/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
pub(crate) struct CodegenCx<'ll, 'tcx> {
pub tcx: TyCtxt<'tcx>,
@ -231,7 +231,8 @@ pub(crate) unsafe fn create_module<'ll>(
}
}
// Enable LTO unit splitting if specified or if CFI is enabled. (See https://reviews.llvm.org/D53891.)
// Enable LTO unit splitting if specified or if CFI is enabled. (See
// https://reviews.llvm.org/D53891.)
if sess.is_split_lto_unit_enabled() || sess.is_sanitizer_cfi_enabled() {
let enable_split_lto_unit = c"EnableSplitLTOUnit".as_ptr();
unsafe {

View file

@ -121,7 +121,8 @@ mod mcdc {
num_conditions: u16,
}
// ConditionId in llvm is `unsigned int` at 18 while `int16_t` at [19](https://github.com/llvm/llvm-project/pull/81257)
// ConditionId in llvm is `unsigned int` at 18 while `int16_t` at
// [19](https://github.com/llvm/llvm-project/pull/81257).
type LLVMConditionId = i16;
/// Must match the layout of `LLVMRustMCDCBranchParameters`.

View file

@ -48,11 +48,10 @@ impl<'ll, 'tcx> CrateCoverageContext<'ll, 'tcx> {
self.function_coverage_map.replace(FxIndexMap::default())
}
/// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is called condition bitmap.
/// In order to handle nested decisions, several condition bitmaps can be
/// allocated for a function body.
/// These values are named `mcdc.addr.{i}` and are a 32-bit integers.
/// They respectively hold the condition bitmaps for decisions with a depth of `i`.
/// LLVM use a temp value to record evaluated mcdc test vector of each decision, which is
/// called condition bitmap. In order to handle nested decisions, several condition bitmaps can
/// be allocated for a function body. These values are named `mcdc.addr.{i}` and are a 32-bit
/// integers. They respectively hold the condition bitmaps for decisions with a depth of `i`.
fn try_get_mcdc_condition_bitmap(
&self,
instance: &Instance<'tcx>,
@ -157,8 +156,8 @@ impl<'tcx> CoverageInfoBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> {
),
CoverageKind::CounterIncrement { id } => {
func_coverage.mark_counter_id_seen(id);
// We need to explicitly drop the `RefMut` before calling into `instrprof_increment`,
// as that needs an exclusive borrow.
// We need to explicitly drop the `RefMut` before calling into
// `instrprof_increment`, as that needs an exclusive borrow.
drop(coverage_map);
// The number of counters passed to `llvm.instrprof.increment` might

View file

@ -44,7 +44,8 @@ pub(crate) fn get_or_insert_gdb_debug_scripts_section_global<'ll>(
// Add the pretty printers for the standard library first.
section_contents.extend_from_slice(b"\x01gdb_load_rust_pretty_printers.py\0");
// Next, add the pretty printers that were specified via the `#[debugger_visualizer]` attribute.
// Next, add the pretty printers that were specified via the `#[debugger_visualizer]`
// attribute.
let visualizers = collect_debugger_visualizers_transitive(
cx.tcx,
DebuggerVisualizerType::GdbPrettyPrinter,

View file

@ -216,8 +216,9 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
// need to make sure that we don't break existing debuginfo consumers
// by doing that (at least not without a warning period).
let layout_type = if ptr_type.is_box() {
// The assertion at the start of this function ensures we have a ZST allocator.
// We'll make debuginfo "skip" all ZST allocators, not just the default allocator.
// The assertion at the start of this function ensures we have a ZST
// allocator. We'll make debuginfo "skip" all ZST allocators, not just the
// default allocator.
Ty::new_mut_ptr(cx.tcx, pointee_type)
} else {
ptr_type
@ -280,8 +281,7 @@ fn build_subroutine_type_di_node<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
unique_type_id: UniqueTypeId<'tcx>,
) -> DINodeCreationResult<'ll> {
// It's possible to create a self-referential
// type in Rust by using 'impl trait':
// It's possible to create a self-referential type in Rust by using 'impl trait':
//
// fn foo() -> impl Copy { foo }
//
@ -573,14 +573,14 @@ pub(crate) fn file_metadata<'ll>(cx: &CodegenCx<'ll, '_>, source_file: &SourceFi
{
// If the compiler's working directory (which also is the DW_AT_comp_dir of
// the compilation unit) is a prefix of the path we are about to emit, then
// only emit the part relative to the working directory.
// Because of path remapping we sometimes see strange things here: `abs_path`
// might actually look like a relative path
// (e.g. `<crate-name-and-version>/src/lib.rs`), so if we emit it without
// taking the working directory into account, downstream tooling will
// interpret it as `<working-directory>/<crate-name-and-version>/src/lib.rs`,
// which makes no sense. Usually in such cases the working directory will also
// be remapped to `<crate-name-and-version>` or some other prefix of the path
// only emit the part relative to the working directory. Because of path
// remapping we sometimes see strange things here: `abs_path` might
// actually look like a relative path (e.g.
// `<crate-name-and-version>/src/lib.rs`), so if we emit it without taking
// the working directory into account, downstream tooling will interpret it
// as `<working-directory>/<crate-name-and-version>/src/lib.rs`, which
// makes no sense. Usually in such cases the working directory will also be
// remapped to `<crate-name-and-version>` or some other prefix of the path
// we are remapping, so we end up with
// `<crate-name-and-version>/<crate-name-and-version>/src/lib.rs`.
// By moving the working directory portion into the `directory` part of the

View file

@ -13,8 +13,7 @@ pub(crate) fn mangled_name_of_instance<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
instance: Instance<'tcx>,
) -> ty::SymbolName<'tcx> {
let tcx = cx.tcx;
tcx.symbol_name(instance)
cx.tcx.symbol_name(instance)
}
pub(crate) fn item_namespace<'ll>(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {

View file

@ -404,7 +404,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let llvm_name =
&format!("llvm.fsh{}.i{}", if is_left { 'l' } else { 'r' }, width);
// llvm expects shift to be the same type as the values, but rust always uses `u32`
// llvm expects shift to be the same type as the values, but rust
// always uses `u32`.
let raw_shift = self.intcast(raw_shift, self.val_ty(val), false);
self.call_intrinsic(llvm_name, &[val, val, raw_shift])
@ -573,8 +574,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> {
span,
) {
Ok(llval) => llval,
// If there was an error, just skip this invocation... we'll abort compilation anyway,
// but we can keep codegen'ing to find more errors.
// If there was an error, just skip this invocation... we'll abort compilation
// anyway, but we can keep codegen'ing to find more errors.
Err(()) => return Ok(()),
}
}
@ -1847,7 +1848,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
require!(
matches!(
*pointer_ty.kind(),
ty::RawPtr(p_ty, p_mutbl) if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
ty::RawPtr(p_ty, p_mutbl)
if p_ty == values_elem && p_ty.kind() == values_elem.kind() && p_mutbl.is_mut()
),
InvalidMonomorphization::ExpectedElementType {
span,

View file

@ -220,17 +220,18 @@ pub enum IntPredicate {
impl IntPredicate {
pub fn from_generic(intpre: rustc_codegen_ssa::common::IntPredicate) -> Self {
use rustc_codegen_ssa::common::IntPredicate as Common;
match intpre {
rustc_codegen_ssa::common::IntPredicate::IntEQ => IntPredicate::IntEQ,
rustc_codegen_ssa::common::IntPredicate::IntNE => IntPredicate::IntNE,
rustc_codegen_ssa::common::IntPredicate::IntUGT => IntPredicate::IntUGT,
rustc_codegen_ssa::common::IntPredicate::IntUGE => IntPredicate::IntUGE,
rustc_codegen_ssa::common::IntPredicate::IntULT => IntPredicate::IntULT,
rustc_codegen_ssa::common::IntPredicate::IntULE => IntPredicate::IntULE,
rustc_codegen_ssa::common::IntPredicate::IntSGT => IntPredicate::IntSGT,
rustc_codegen_ssa::common::IntPredicate::IntSGE => IntPredicate::IntSGE,
rustc_codegen_ssa::common::IntPredicate::IntSLT => IntPredicate::IntSLT,
rustc_codegen_ssa::common::IntPredicate::IntSLE => IntPredicate::IntSLE,
Common::IntEQ => Self::IntEQ,
Common::IntNE => Self::IntNE,
Common::IntUGT => Self::IntUGT,
Common::IntUGE => Self::IntUGE,
Common::IntULT => Self::IntULT,
Common::IntULE => Self::IntULE,
Common::IntSGT => Self::IntSGT,
Common::IntSGE => Self::IntSGE,
Common::IntSLT => Self::IntSLT,
Common::IntSLE => Self::IntSLE,
}
}
}
@ -259,27 +260,24 @@ pub enum RealPredicate {
impl RealPredicate {
pub fn from_generic(realp: rustc_codegen_ssa::common::RealPredicate) -> Self {
use rustc_codegen_ssa::common::RealPredicate as Common;
match realp {
rustc_codegen_ssa::common::RealPredicate::RealPredicateFalse => {
RealPredicate::RealPredicateFalse
}
rustc_codegen_ssa::common::RealPredicate::RealOEQ => RealPredicate::RealOEQ,
rustc_codegen_ssa::common::RealPredicate::RealOGT => RealPredicate::RealOGT,
rustc_codegen_ssa::common::RealPredicate::RealOGE => RealPredicate::RealOGE,
rustc_codegen_ssa::common::RealPredicate::RealOLT => RealPredicate::RealOLT,
rustc_codegen_ssa::common::RealPredicate::RealOLE => RealPredicate::RealOLE,
rustc_codegen_ssa::common::RealPredicate::RealONE => RealPredicate::RealONE,
rustc_codegen_ssa::common::RealPredicate::RealORD => RealPredicate::RealORD,
rustc_codegen_ssa::common::RealPredicate::RealUNO => RealPredicate::RealUNO,
rustc_codegen_ssa::common::RealPredicate::RealUEQ => RealPredicate::RealUEQ,
rustc_codegen_ssa::common::RealPredicate::RealUGT => RealPredicate::RealUGT,
rustc_codegen_ssa::common::RealPredicate::RealUGE => RealPredicate::RealUGE,
rustc_codegen_ssa::common::RealPredicate::RealULT => RealPredicate::RealULT,
rustc_codegen_ssa::common::RealPredicate::RealULE => RealPredicate::RealULE,
rustc_codegen_ssa::common::RealPredicate::RealUNE => RealPredicate::RealUNE,
rustc_codegen_ssa::common::RealPredicate::RealPredicateTrue => {
RealPredicate::RealPredicateTrue
}
Common::RealPredicateFalse => Self::RealPredicateFalse,
Common::RealOEQ => Self::RealOEQ,
Common::RealOGT => Self::RealOGT,
Common::RealOGE => Self::RealOGE,
Common::RealOLT => Self::RealOLT,
Common::RealOLE => Self::RealOLE,
Common::RealONE => Self::RealONE,
Common::RealORD => Self::RealORD,
Common::RealUNO => Self::RealUNO,
Common::RealUEQ => Self::RealUEQ,
Common::RealUGT => Self::RealUGT,
Common::RealUGE => Self::RealUGE,
Common::RealULT => Self::RealULT,
Common::RealULE => Self::RealULE,
Common::RealUNE => Self::RealUNE,
Common::RealPredicateTrue => Self::RealPredicateTrue,
}
}
}
@ -311,26 +309,27 @@ pub enum TypeKind {
impl TypeKind {
pub fn to_generic(self) -> rustc_codegen_ssa::common::TypeKind {
use rustc_codegen_ssa::common::TypeKind as Common;
match self {
TypeKind::Void => rustc_codegen_ssa::common::TypeKind::Void,
TypeKind::Half => rustc_codegen_ssa::common::TypeKind::Half,
TypeKind::Float => rustc_codegen_ssa::common::TypeKind::Float,
TypeKind::Double => rustc_codegen_ssa::common::TypeKind::Double,
TypeKind::X86_FP80 => rustc_codegen_ssa::common::TypeKind::X86_FP80,
TypeKind::FP128 => rustc_codegen_ssa::common::TypeKind::FP128,
TypeKind::PPC_FP128 => rustc_codegen_ssa::common::TypeKind::PPC_FP128,
TypeKind::Label => rustc_codegen_ssa::common::TypeKind::Label,
TypeKind::Integer => rustc_codegen_ssa::common::TypeKind::Integer,
TypeKind::Function => rustc_codegen_ssa::common::TypeKind::Function,
TypeKind::Struct => rustc_codegen_ssa::common::TypeKind::Struct,
TypeKind::Array => rustc_codegen_ssa::common::TypeKind::Array,
TypeKind::Pointer => rustc_codegen_ssa::common::TypeKind::Pointer,
TypeKind::Vector => rustc_codegen_ssa::common::TypeKind::Vector,
TypeKind::Metadata => rustc_codegen_ssa::common::TypeKind::Metadata,
TypeKind::Token => rustc_codegen_ssa::common::TypeKind::Token,
TypeKind::ScalableVector => rustc_codegen_ssa::common::TypeKind::ScalableVector,
TypeKind::BFloat => rustc_codegen_ssa::common::TypeKind::BFloat,
TypeKind::X86_AMX => rustc_codegen_ssa::common::TypeKind::X86_AMX,
Self::Void => Common::Void,
Self::Half => Common::Half,
Self::Float => Common::Float,
Self::Double => Common::Double,
Self::X86_FP80 => Common::X86_FP80,
Self::FP128 => Common::FP128,
Self::PPC_FP128 => Common::PPC_FP128,
Self::Label => Common::Label,
Self::Integer => Common::Integer,
Self::Function => Common::Function,
Self::Struct => Common::Struct,
Self::Array => Common::Array,
Self::Pointer => Common::Pointer,
Self::Vector => Common::Vector,
Self::Metadata => Common::Metadata,
Self::Token => Common::Token,
Self::ScalableVector => Common::ScalableVector,
Self::BFloat => Common::BFloat,
Self::X86_AMX => Common::X86_AMX,
}
}
}
@ -354,18 +353,19 @@ pub enum AtomicRmwBinOp {
impl AtomicRmwBinOp {
pub fn from_generic(op: rustc_codegen_ssa::common::AtomicRmwBinOp) -> Self {
use rustc_codegen_ssa::common::AtomicRmwBinOp as Common;
match op {
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXchg => AtomicRmwBinOp::AtomicXchg,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAdd => AtomicRmwBinOp::AtomicAdd,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicSub => AtomicRmwBinOp::AtomicSub,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicAnd => AtomicRmwBinOp::AtomicAnd,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicNand => AtomicRmwBinOp::AtomicNand,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicOr => AtomicRmwBinOp::AtomicOr,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicXor => AtomicRmwBinOp::AtomicXor,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMax => AtomicRmwBinOp::AtomicMax,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicMin => AtomicRmwBinOp::AtomicMin,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMax => AtomicRmwBinOp::AtomicUMax,
rustc_codegen_ssa::common::AtomicRmwBinOp::AtomicUMin => AtomicRmwBinOp::AtomicUMin,
Common::AtomicXchg => Self::AtomicXchg,
Common::AtomicAdd => Self::AtomicAdd,
Common::AtomicSub => Self::AtomicSub,
Common::AtomicAnd => Self::AtomicAnd,
Common::AtomicNand => Self::AtomicNand,
Common::AtomicOr => Self::AtomicOr,
Common::AtomicXor => Self::AtomicXor,
Common::AtomicMax => Self::AtomicMax,
Common::AtomicMin => Self::AtomicMin,
Common::AtomicUMax => Self::AtomicUMax,
Common::AtomicUMin => Self::AtomicUMin,
}
}
}
@ -387,17 +387,14 @@ pub enum AtomicOrdering {
impl AtomicOrdering {
pub fn from_generic(ao: rustc_codegen_ssa::common::AtomicOrdering) -> Self {
use rustc_codegen_ssa::common::AtomicOrdering as Common;
match ao {
rustc_codegen_ssa::common::AtomicOrdering::Unordered => AtomicOrdering::Unordered,
rustc_codegen_ssa::common::AtomicOrdering::Relaxed => AtomicOrdering::Monotonic,
rustc_codegen_ssa::common::AtomicOrdering::Acquire => AtomicOrdering::Acquire,
rustc_codegen_ssa::common::AtomicOrdering::Release => AtomicOrdering::Release,
rustc_codegen_ssa::common::AtomicOrdering::AcquireRelease => {
AtomicOrdering::AcquireRelease
}
rustc_codegen_ssa::common::AtomicOrdering::SequentiallyConsistent => {
AtomicOrdering::SequentiallyConsistent
}
Common::Unordered => Self::Unordered,
Common::Relaxed => Self::Monotonic,
Common::Acquire => Self::Acquire,
Common::Release => Self::Release,
Common::AcquireRelease => Self::AcquireRelease,
Common::SequentiallyConsistent => Self::SequentiallyConsistent,
}
}
}
@ -563,13 +560,11 @@ pub enum ArchiveKind {
K_AIXBIG,
}
// LLVMRustThinLTOData
unsafe extern "C" {
// LLVMRustThinLTOData
pub type ThinLTOData;
}
// LLVMRustThinLTOBuffer
unsafe extern "C" {
// LLVMRustThinLTOBuffer
pub type ThinLTOBuffer;
}
@ -633,26 +628,12 @@ struct InvariantOpaque<'a> {
// Opaque pointer types
unsafe extern "C" {
pub type Module;
}
unsafe extern "C" {
pub type Context;
}
unsafe extern "C" {
pub type Type;
}
unsafe extern "C" {
pub type Value;
}
unsafe extern "C" {
pub type ConstantInt;
}
unsafe extern "C" {
pub type Attribute;
}
unsafe extern "C" {
pub type Metadata;
}
unsafe extern "C" {
pub type BasicBlock;
}
#[repr(C)]
@ -661,11 +642,7 @@ pub struct Builder<'a>(InvariantOpaque<'a>);
pub struct PassManager<'a>(InvariantOpaque<'a>);
unsafe extern "C" {
pub type Pass;
}
unsafe extern "C" {
pub type TargetMachine;
}
unsafe extern "C" {
pub type Archive;
}
#[repr(C)]
@ -674,11 +651,7 @@ pub struct ArchiveIterator<'a>(InvariantOpaque<'a>);
pub struct ArchiveChild<'a>(InvariantOpaque<'a>);
unsafe extern "C" {
pub type Twine;
}
unsafe extern "C" {
pub type DiagnosticInfo;
}
unsafe extern "C" {
pub type SMDiagnostic;
}
#[repr(C)]
@ -2177,7 +2150,8 @@ unsafe extern "C" {
pub fn LLVMRustGetHostCPUName(len: *mut usize) -> *const c_char;
// This function makes copies of pointed to data, so the data's lifetime may end after this function returns
// This function makes copies of pointed to data, so the data's lifetime may end after this
// function returns.
pub fn LLVMRustCreateTargetMachine(
Triple: *const c_char,
CPU: *const c_char,

View file

@ -217,10 +217,10 @@ impl<'a> IntoIterator for LLVMFeature<'a> {
// where `{ARCH}` is the architecture name. Look for instances of `SubtargetFeature`.
//
// Check the current rustc fork of LLVM in the repo at https://github.com/rust-lang/llvm-project/.
// The commit in use can be found via the `llvm-project` submodule in https://github.com/rust-lang/rust/tree/master/src
// Though note that Rust can also be build with an external precompiled version of LLVM
// which might lead to failures if the oldest tested / supported LLVM version
// doesn't yet support the relevant intrinsics
// The commit in use can be found via the `llvm-project` submodule in
// https://github.com/rust-lang/rust/tree/master/src Though note that Rust can also be build with
// an external precompiled version of LLVM which might lead to failures if the oldest tested /
// supported LLVM version doesn't yet support the relevant intrinsics.
pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFeature<'a>> {
let arch = if sess.target.arch == "x86_64" {
"x86"
@ -259,8 +259,8 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea
("aarch64", "fp16") => Some(LLVMFeature::new("fullfp16")),
// Filter out features that are not supported by the current LLVM version
("aarch64", "fpmr") if get_version().0 != 18 => None,
// In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single feature called
// `fast-unaligned-access`. In LLVM 19, it was split back out.
// In LLVM 18, `unaligned-scalar-mem` was merged with `unaligned-vector-mem` into a single
// feature called `fast-unaligned-access`. In LLVM 19, it was split back out.
("riscv32" | "riscv64", "unaligned-scalar-mem") if get_version().0 == 18 => {
Some(LLVMFeature::new("fast-unaligned-access"))
}
@ -406,7 +406,8 @@ fn print_target_features(out: &mut String, sess: &Session, tm: &llvm::TargetMach
.supported_target_features()
.iter()
.filter_map(|(feature, _gate, _implied)| {
// LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these strings.
// LLVM asserts that these are sorted. LLVM and Rust both use byte comparison for these
// strings.
let llvm_feature = to_llvm_features(sess, *feature)?.llvm_feature_name;
let desc =
match llvm_target_features.binary_search_by_key(&llvm_feature, |(f, _d)| f).ok() {

View file

@ -24,8 +24,8 @@ impl<'tcx> PreDefineCodegenMethods<'tcx> for CodegenCx<'_, 'tcx> {
) {
let instance = Instance::mono(self.tcx, def_id);
let DefKind::Static { nested, .. } = self.tcx.def_kind(def_id) else { bug!() };
// Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure out
// the llvm type from the actual evaluated initializer.
// Nested statics do not have a type, so pick a dummy type and let `codegen_static` figure
// out the llvm type from the actual evaluated initializer.
let ty = if nested {
self.tcx.types.unit
} else {

View file

@ -14,18 +14,20 @@ pub trait ConstCodegenMethods<'tcx>: BackendTypes {
/// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a
/// poison value.
fn const_poison(&self, t: Self::Type) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
fn const_bool(&self, val: bool) -> Self::Value;
fn const_i8(&self, i: i8) -> Self::Value;
fn const_i16(&self, i: i16) -> Self::Value;
fn const_i32(&self, i: i32) -> Self::Value;
fn const_i8(&self, i: i8) -> Self::Value;
fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn const_u8(&self, i: u8) -> Self::Value;
fn const_u32(&self, i: u32) -> Self::Value;
fn const_u64(&self, i: u64) -> Self::Value;
fn const_u128(&self, i: u128) -> Self::Value;
fn const_usize(&self, i: u64) -> Self::Value;
fn const_u8(&self, i: u8) -> Self::Value;
fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
fn const_real(&self, t: Self::Type, val: f64) -> Self::Value;
fn const_str(&self, s: &str) -> (Self::Value, Self::Value);