1
Fork 0

mv compiler to compiler/

This commit is contained in:
mark 2020-08-27 22:58:48 -05:00 committed by Vadim Petrochenkov
parent db534b3ac2
commit 9e5f7d5631
1686 changed files with 941 additions and 1051 deletions

View file

@ -0,0 +1,502 @@
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::llvm::{self, AttributePlace};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_codegen_ssa::mir::operand::OperandValue;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::MemFlags;
use rustc_middle::bug;
pub use rustc_middle::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use rustc_middle::ty::Ty;
use rustc_target::abi::call::ArgAbi;
pub use rustc_target::abi::call::*;
use rustc_target::abi::{self, HasDataLayout, Int, LayoutOf};
pub use rustc_target::spec::abi::Abi;
use libc::c_uint;
macro_rules! for_each_kind {
($flags: ident, $f: ident, $($kind: ident),+) => ({
$(if $flags.contains(ArgAttribute::$kind) { $f(llvm::Attribute::$kind) })+
})
}
trait ArgAttributeExt {
fn for_each_kind<F>(&self, f: F)
where
F: FnMut(llvm::Attribute);
}
impl ArgAttributeExt for ArgAttribute {
fn for_each_kind<F>(&self, mut f: F)
where
F: FnMut(llvm::Attribute),
{
for_each_kind!(self, f, NoAlias, NoCapture, NonNull, ReadOnly, SExt, StructRet, ZExt, InReg)
}
}
pub trait ArgAttributesExt {
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>);
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>);
}
impl ArgAttributesExt for ArgAttributes {
fn apply_llfn(&self, idx: AttributePlace, llfn: &Value, ty: Option<&Type>) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableAttr(llfn, idx.as_uint(), deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullAttr(llfn, idx.as_uint(), deref);
}
regular -= ArgAttribute::NonNull;
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentAttr(llfn, idx.as_uint(), align.bytes() as u32);
}
if regular.contains(ArgAttribute::ByVal) {
llvm::LLVMRustAddByValAttr(llfn, idx.as_uint(), ty.unwrap());
}
regular.for_each_kind(|attr| attr.apply_llfn(idx, llfn));
}
}
fn apply_callsite(&self, idx: AttributePlace, callsite: &Value, ty: Option<&Type>) {
let mut regular = self.regular;
unsafe {
let deref = self.pointee_size.bytes();
if deref != 0 {
if regular.contains(ArgAttribute::NonNull) {
llvm::LLVMRustAddDereferenceableCallSiteAttr(callsite, idx.as_uint(), deref);
} else {
llvm::LLVMRustAddDereferenceableOrNullCallSiteAttr(
callsite,
idx.as_uint(),
deref,
);
}
regular -= ArgAttribute::NonNull;
}
if let Some(align) = self.pointee_align {
llvm::LLVMRustAddAlignmentCallSiteAttr(
callsite,
idx.as_uint(),
align.bytes() as u32,
);
}
if regular.contains(ArgAttribute::ByVal) {
llvm::LLVMRustAddByValCallSiteAttr(callsite, idx.as_uint(), ty.unwrap());
}
regular.for_each_kind(|attr| attr.apply_callsite(idx, callsite));
}
}
}
pub trait LlvmType {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type;
}
impl LlvmType for Reg {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
match self.kind {
RegKind::Integer => cx.type_ix(self.size.bits()),
RegKind::Float => match self.size.bits() {
32 => cx.type_f32(),
64 => cx.type_f64(),
_ => bug!("unsupported float: {:?}", self),
},
RegKind::Vector => cx.type_vector(cx.type_i8(), self.size.bytes()),
}
}
}
impl LlvmType for CastTarget {
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
let rest_ll_unit = self.rest.unit.llvm_type(cx);
let (rest_count, rem_bytes) = if self.rest.unit.size.bytes() == 0 {
(0, 0)
} else {
(
self.rest.total.bytes() / self.rest.unit.size.bytes(),
self.rest.total.bytes() % self.rest.unit.size.bytes(),
)
};
if self.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if self.rest.total <= self.rest.unit.size {
return rest_ll_unit;
}
// Simplify to array when all chunks are the same size and type
if rem_bytes == 0 {
return cx.type_array(rest_ll_unit, rest_count);
}
}
// Create list of fields in the main structure
let mut args: Vec<_> = self
.prefix
.iter()
.flat_map(|option_kind| {
option_kind.map(|kind| Reg { kind, size: self.prefix_chunk }.llvm_type(cx))
})
.chain((0..rest_count).map(|_| rest_ll_unit))
.collect();
// Append final integer
if rem_bytes != 0 {
// Only integers can be really split further.
assert_eq!(self.rest.unit.kind, RegKind::Integer);
args.push(cx.type_ix(rem_bytes * 8));
}
cx.type_struct(&args, false)
}
}
pub trait ArgAbiExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
);
fn store_fn_arg(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
);
}
impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
/// Gets the LLVM type for a place of the original Rust type of
/// this argument/return, i.e., the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
self.layout.llvm_type(cx)
}
/// Stores a direct/indirect value described by this ArgAbi into a
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
fn store(
&self,
bx: &mut Builder<'_, 'll, 'tcx>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
) {
if self.is_ignore() {
return;
}
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized `ArgAbi` must be handled through `store_fn_arg`");
} else if let PassMode::Cast(cast) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else {
// The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The
// code that follows is the only reliable way I have
// found to do a transform like i64 -> {i32,i32}.
// Basically we dump the data onto the stack then memcpy it.
//
// Other approaches I tried:
// - Casting rust ret pointer to the foreign type and using Store
// is (a) unsafe if size of foreign type > size of rust type and
// (b) runs afoul of strict aliasing rules, yielding invalid
// assembly under -O (specifically, the store gets removed).
// - Truncating foreign type to correct integral type and then
// bitcasting to the struct type yields invalid cast errors.
// We instead thus allocate some scratch space...
let scratch_size = cast.size(bx);
let scratch_align = cast.align(bx);
let llscratch = bx.alloca(cast.llvm_type(bx), scratch_align);
bx.lifetime_start(llscratch, scratch_size);
// ... where we first store the value...
bx.store(val, llscratch, scratch_align);
// ... and then memcpy it to the intended destination.
bx.memcpy(
dst.llval,
self.layout.align.abi,
llscratch,
scratch_align,
bx.const_usize(self.layout.size.bytes()),
MemFlags::empty(),
);
bx.lifetime_end(llscratch, scratch_size);
}
} else {
OperandValue::Immediate(val).store(bx, dst);
}
}
fn store_fn_arg(
&self,
bx: &mut Builder<'a, 'll, 'tcx>,
idx: &mut usize,
dst: PlaceRef<'tcx, &'ll Value>,
) {
let mut next = || {
let val = llvm::get_param(bx.llfn(), *idx as c_uint);
*idx += 1;
val
};
match self.mode {
PassMode::Ignore => {}
PassMode::Pair(..) => {
OperandValue::Pair(next(), next()).store(bx, dst);
}
PassMode::Indirect(_, Some(_)) => {
OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst);
}
PassMode::Direct(_) | PassMode::Indirect(_, None) | PassMode::Cast(_) => {
let next_arg = next();
self.store(bx, next_arg, dst);
}
}
}
}
impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn store_fn_arg(
&mut self,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
idx: &mut usize,
dst: PlaceRef<'tcx, Self::Value>,
) {
arg_abi.store_fn_arg(self, idx, dst)
}
fn store_arg(
&mut self,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>,
) {
arg_abi.store(self, val, dst)
}
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
arg_abi.memory_ty(self)
}
}
pub trait FnAbiLlvmExt<'tcx> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value);
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
}
impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
if let PassMode::Pair(_, _) = arg.mode { 2 } else { 1 }
).sum();
let mut llargument_tys = Vec::with_capacity(
if let PassMode::Indirect(..) = self.ret.mode { 1 } else { 0 } + args_capacity,
);
let llreturn_ty = match self.ret.mode {
PassMode::Ignore => cx.type_void(),
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(..) => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
cx.type_void()
}
};
for arg in &self.args {
// add padding
if let Some(ty) = arg.pad {
llargument_tys.push(ty.llvm_type(cx));
}
let llarg_ty = match arg.mode {
PassMode::Ignore => continue,
PassMode::Direct(_) => arg.layout.immediate_llvm_type(cx),
PassMode::Pair(..) => {
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(arg.layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
PassMode::Indirect(_, Some(_)) => {
let ptr_ty = cx.tcx.mk_mut_ptr(arg.layout.ty);
let ptr_layout = cx.layout_of(ptr_ty);
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 0, true));
llargument_tys.push(ptr_layout.scalar_pair_element_llvm_type(cx, 1, true));
continue;
}
PassMode::Cast(cast) => cast.llvm_type(cx),
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
};
llargument_tys.push(llarg_ty);
}
if self.c_variadic {
cx.type_variadic_func(&llargument_tys, llreturn_ty)
} else {
cx.type_func(&llargument_tys, llreturn_ty)
}
}
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
unsafe {
llvm::LLVMPointerType(
self.llvm_type(cx),
cx.data_layout().instruction_address_space.0 as c_uint,
)
}
}
fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C | Conv::Rust => llvm::CCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::AvrInterrupt => llvm::AvrInterrupt,
Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
Conv::PtxKernel => llvm::PtxKernel,
Conv::X86Fastcall => llvm::X86FastcallCallConv,
Conv::X86Intr => llvm::X86_Intr,
Conv::X86Stdcall => llvm::X86StdcallCallConv,
Conv::X86ThisCall => llvm::X86_ThisCall,
Conv::X86VectorCall => llvm::X86_VectorCall,
Conv::X86_64SysV => llvm::X86_64_SysV,
Conv::X86_64Win64 => llvm::X86_64_Win64,
}
}
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
// FIXME(eddyb) can this also be applied to callsites?
if self.ret.layout.abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
}
// FIXME(eddyb, wesleywiser): apply this to callsites as well?
if !self.can_unwind {
llvm::Attribute::NoUnwind.apply_llfn(llvm::AttributePlace::Function, llfn);
}
let mut i = 0;
let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_llfn(llvm::AttributePlace::ReturnValue, llfn, None);
}
PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(cx))),
_ => {}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new(), None);
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
apply(attrs, Some(arg.layout.llvm_type(cx)))
}
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs, None);
apply(extra_attrs, None);
}
PassMode::Pair(ref a, ref b) => {
apply(a, None);
apply(b, None);
}
PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
}
}
}
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value) {
// FIXME(wesleywiser, eddyb): We should apply `nounwind` and `noreturn` as appropriate to this callsite.
let mut i = 0;
let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
attrs.apply_callsite(llvm::AttributePlace::Argument(i), callsite, ty);
i += 1;
};
match self.ret.mode {
PassMode::Direct(ref attrs) => {
attrs.apply_callsite(llvm::AttributePlace::ReturnValue, callsite, None);
}
PassMode::Indirect(ref attrs, _) => apply(attrs, Some(self.ret.layout.llvm_type(bx))),
_ => {}
}
if let abi::Abi::Scalar(ref scalar) = self.ret.layout.abi {
// If the value is a boolean, the range is 0..2 and that ultimately
// become 0..0 when the type becomes i1, which would be rejected
// by the LLVM verifier.
if let Int(..) = scalar.value {
if !scalar.is_bool() {
let range = scalar.valid_range_exclusive(bx);
if range.start != range.end {
bx.range_metadata(callsite, range);
}
}
}
}
for arg in &self.args {
if arg.pad.is_some() {
apply(&ArgAttributes::new(), None);
}
match arg.mode {
PassMode::Ignore => {}
PassMode::Direct(ref attrs) | PassMode::Indirect(ref attrs, None) => {
apply(attrs, Some(arg.layout.llvm_type(bx)))
}
PassMode::Indirect(ref attrs, Some(ref extra_attrs)) => {
apply(attrs, None);
apply(extra_attrs, None);
}
PassMode::Pair(ref a, ref b) => {
apply(a, None);
apply(b, None);
}
PassMode::Cast(_) => apply(&ArgAttributes::new(), None),
}
}
let cconv = self.llvm_cconv();
if cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, cconv);
}
}
}
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value) {
fn_abi.apply_attrs_callsite(self, callsite)
}
fn get_param(&self, index: usize) -> Self::Value {
llvm::get_param(self.llfn(), index as c_uint)
}
}

View file

@ -0,0 +1,85 @@
use crate::attributes;
use libc::c_uint;
use rustc_ast::expand::allocator::{AllocatorKind, AllocatorTy, ALLOCATOR_METHODS};
use rustc_middle::bug;
use rustc_middle::ty::TyCtxt;
use crate::llvm::{self, False, True};
use crate::ModuleLlvm;
pub(crate) unsafe fn codegen(tcx: TyCtxt<'_>, mods: &mut ModuleLlvm, kind: AllocatorKind) {
let llcx = &*mods.llcx;
let llmod = mods.llmod();
let usize = match &tcx.sess.target.target.target_pointer_width[..] {
"16" => llvm::LLVMInt16TypeInContext(llcx),
"32" => llvm::LLVMInt32TypeInContext(llcx),
"64" => llvm::LLVMInt64TypeInContext(llcx),
tws => bug!("Unsupported target word size for int: {}", tws),
};
let i8 = llvm::LLVMInt8TypeInContext(llcx);
let i8p = llvm::LLVMPointerType(i8, 0);
let void = llvm::LLVMVoidTypeInContext(llcx);
for method in ALLOCATOR_METHODS {
let mut args = Vec::with_capacity(method.inputs.len());
for ty in method.inputs.iter() {
match *ty {
AllocatorTy::Layout => {
args.push(usize); // size
args.push(usize); // align
}
AllocatorTy::Ptr => args.push(i8p),
AllocatorTy::Usize => args.push(usize),
AllocatorTy::ResultPtr | AllocatorTy::Unit => panic!("invalid allocator arg"),
}
}
let output = match method.output {
AllocatorTy::ResultPtr => Some(i8p),
AllocatorTy::Unit => None,
AllocatorTy::Layout | AllocatorTy::Usize | AllocatorTy::Ptr => {
panic!("invalid allocator output")
}
};
let ty = llvm::LLVMFunctionType(
output.unwrap_or(void),
args.as_ptr(),
args.len() as c_uint,
False,
);
let name = format!("__rust_{}", method.name);
let llfn = llvm::LLVMRustGetOrInsertFunction(llmod, name.as_ptr().cast(), name.len(), ty);
if tcx.sess.target.target.options.default_hidden_visibility {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
if tcx.sess.must_emit_unwind_tables() {
attributes::emit_uwtable(llfn, true);
}
let callee = kind.fn_name(method.name);
let callee =
llvm::LLVMRustGetOrInsertFunction(llmod, callee.as_ptr().cast(), callee.len(), ty);
llvm::LLVMRustSetVisibility(callee, llvm::Visibility::Hidden);
let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, "entry\0".as_ptr().cast());
let llbuilder = llvm::LLVMCreateBuilderInContext(llcx);
llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb);
let args = args
.iter()
.enumerate()
.map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint))
.collect::<Vec<_>>();
let ret =
llvm::LLVMRustBuildCall(llbuilder, callee, args.as_ptr(), args.len() as c_uint, None);
llvm::LLVMSetTailCall(ret, True);
if output.is_some() {
llvm::LLVMBuildRet(llbuilder, ret);
} else {
llvm::LLVMBuildRetVoid(llbuilder);
}
llvm::LLVMDisposeBuilder(llbuilder);
}
}

View file

@ -0,0 +1,836 @@
use crate::builder::Builder;
use crate::context::CodegenCx;
use crate::llvm;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_ast::LlvmAsmDialect;
use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
use rustc_codegen_ssa::mir::operand::OperandValue;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
use rustc_middle::span_bug;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::{Pos, Span};
use rustc_target::abi::*;
use rustc_target::asm::*;
use libc::{c_char, c_uint};
use tracing::debug;
impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn codegen_llvm_inline_asm(
&mut self,
ia: &hir::LlvmInlineAsmInner,
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
mut inputs: Vec<&'ll Value>,
span: Span,
) -> bool {
let mut ext_constraints = vec![];
let mut output_types = vec![];
// Prepare the output operands
let mut indirect_outputs = vec![];
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
let operand = self.load_operand(place);
if let OperandValue::Immediate(_) = operand.val {
inputs.push(operand.immediate());
}
ext_constraints.push(i.to_string());
}
if out.is_indirect {
let operand = self.load_operand(place);
if let OperandValue::Immediate(_) = operand.val {
indirect_outputs.push(operand.immediate());
}
} else {
output_types.push(place.layout.llvm_type(self.cx));
}
}
if !indirect_outputs.is_empty() {
indirect_outputs.extend_from_slice(&inputs);
inputs = indirect_outputs;
}
let clobbers = ia.clobbers.iter().map(|s| format!("~{{{}}}", &s));
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &self.sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new(),
};
let all_constraints = ia
.outputs
.iter()
.map(|out| out.constraint.to_string())
.chain(ia.inputs.iter().map(|s| s.to_string()))
.chain(ext_constraints)
.chain(clobbers)
.chain(arch_clobbers.iter().map(|s| (*s).to_string()))
.collect::<Vec<String>>()
.join(",");
debug!("Asm Constraints: {}", &all_constraints);
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => self.type_void(),
1 => output_types[0],
_ => self.type_struct(&output_types, false),
};
let asm = ia.asm.as_str();
let r = inline_asm_call(
self,
&asm,
&all_constraints,
&inputs,
output_type,
ia.volatile,
ia.alignstack,
ia.dialect,
&[span],
);
if r.is_none() {
return false;
}
let r = r.unwrap();
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(self, place);
}
true
}
fn codegen_inline_asm(
&mut self,
template: &[InlineAsmTemplatePiece],
operands: &[InlineAsmOperandRef<'tcx, Self>],
options: InlineAsmOptions,
line_spans: &[Span],
) {
let asm_arch = self.tcx.sess.asm_arch.unwrap();
// Collect the types of output operands
let mut constraints = vec![];
let mut output_types = vec![];
let mut op_idx = FxHashMap::default();
for (idx, op) in operands.iter().enumerate() {
match *op {
InlineAsmOperandRef::Out { reg, late, place } => {
let mut layout = None;
let ty = if let Some(ref place) = place {
layout = Some(&place.layout);
llvm_fixup_output_type(self.cx, reg.reg_class(), &place.layout)
} else {
// If the output is discarded, we don't really care what
// type is used. We're just using this to tell LLVM to
// reserve the register.
dummy_output_type(self.cx, reg.reg_class())
};
output_types.push(ty);
op_idx.insert(idx, constraints.len());
let prefix = if late { "=" } else { "=&" };
constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, layout)));
}
InlineAsmOperandRef::InOut { reg, late, in_value, out_place } => {
let layout = if let Some(ref out_place) = out_place {
&out_place.layout
} else {
// LLVM required tied operands to have the same type,
// so we just use the type of the input.
&in_value.layout
};
let ty = llvm_fixup_output_type(self.cx, reg.reg_class(), layout);
output_types.push(ty);
op_idx.insert(idx, constraints.len());
let prefix = if late { "=" } else { "=&" };
constraints.push(format!("{}{}", prefix, reg_to_llvm(reg, Some(layout))));
}
_ => {}
}
}
// Collect input operands
let mut inputs = vec![];
for (idx, op) in operands.iter().enumerate() {
match *op {
InlineAsmOperandRef::In { reg, value } => {
let llval =
llvm_fixup_input(self, value.immediate(), reg.reg_class(), &value.layout);
inputs.push(llval);
op_idx.insert(idx, constraints.len());
constraints.push(reg_to_llvm(reg, Some(&value.layout)));
}
InlineAsmOperandRef::InOut { reg, late: _, in_value, out_place: _ } => {
let value = llvm_fixup_input(
self,
in_value.immediate(),
reg.reg_class(),
&in_value.layout,
);
inputs.push(value);
constraints.push(format!("{}", op_idx[&idx]));
}
InlineAsmOperandRef::SymFn { instance } => {
inputs.push(self.cx.get_fn(instance));
op_idx.insert(idx, constraints.len());
constraints.push("s".to_string());
}
InlineAsmOperandRef::SymStatic { def_id } => {
inputs.push(self.cx.get_static(def_id));
op_idx.insert(idx, constraints.len());
constraints.push("s".to_string());
}
_ => {}
}
}
// Build the template string
let mut template_str = String::new();
for piece in template {
match *piece {
InlineAsmTemplatePiece::String(ref s) => {
if s.contains('$') {
for c in s.chars() {
if c == '$' {
template_str.push_str("$$");
} else {
template_str.push(c);
}
}
} else {
template_str.push_str(s)
}
}
InlineAsmTemplatePiece::Placeholder { operand_idx, modifier, span: _ } => {
match operands[operand_idx] {
InlineAsmOperandRef::In { reg, .. }
| InlineAsmOperandRef::Out { reg, .. }
| InlineAsmOperandRef::InOut { reg, .. } => {
let modifier = modifier_to_llvm(asm_arch, reg.reg_class(), modifier);
if let Some(modifier) = modifier {
template_str.push_str(&format!(
"${{{}:{}}}",
op_idx[&operand_idx], modifier
));
} else {
template_str.push_str(&format!("${{{}}}", op_idx[&operand_idx]));
}
}
InlineAsmOperandRef::Const { ref string } => {
// Const operands get injected directly into the template
template_str.push_str(string);
}
InlineAsmOperandRef::SymFn { .. }
| InlineAsmOperandRef::SymStatic { .. } => {
// Only emit the raw symbol name
template_str.push_str(&format!("${{{}:c}}", op_idx[&operand_idx]));
}
}
}
}
}
if !options.contains(InlineAsmOptions::PRESERVES_FLAGS) {
match asm_arch {
InlineAsmArch::AArch64 | InlineAsmArch::Arm => {
constraints.push("~{cc}".to_string());
}
InlineAsmArch::X86 | InlineAsmArch::X86_64 => {
constraints.extend_from_slice(&[
"~{dirflag}".to_string(),
"~{fpsr}".to_string(),
"~{flags}".to_string(),
]);
}
InlineAsmArch::RiscV32 | InlineAsmArch::RiscV64 => {}
InlineAsmArch::Nvptx64 => {}
InlineAsmArch::Hexagon => {}
}
}
if !options.contains(InlineAsmOptions::NOMEM) {
// This is actually ignored by LLVM, but it's probably best to keep
// it just in case. LLVM instead uses the ReadOnly/ReadNone
// attributes on the call instruction to optimize.
constraints.push("~{memory}".to_string());
}
let volatile = !options.contains(InlineAsmOptions::PURE);
let alignstack = !options.contains(InlineAsmOptions::NOSTACK);
let output_type = match &output_types[..] {
[] => self.type_void(),
[ty] => ty,
tys => self.type_struct(&tys, false),
};
let dialect = match asm_arch {
InlineAsmArch::X86 | InlineAsmArch::X86_64
if !options.contains(InlineAsmOptions::ATT_SYNTAX) =>
{
LlvmAsmDialect::Intel
}
_ => LlvmAsmDialect::Att,
};
let result = inline_asm_call(
self,
&template_str,
&constraints.join(","),
&inputs,
output_type,
volatile,
alignstack,
dialect,
line_spans,
)
.unwrap_or_else(|| span_bug!(line_spans[0], "LLVM asm constraint validation failed"));
if options.contains(InlineAsmOptions::PURE) {
if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::ReadNone.apply_callsite(llvm::AttributePlace::Function, result);
} else if options.contains(InlineAsmOptions::READONLY) {
llvm::Attribute::ReadOnly.apply_callsite(llvm::AttributePlace::Function, result);
}
} else {
if options.contains(InlineAsmOptions::NOMEM) {
llvm::Attribute::InaccessibleMemOnly
.apply_callsite(llvm::AttributePlace::Function, result);
} else {
// LLVM doesn't have an attribute to represent ReadOnly + SideEffect
}
}
// Write results to outputs
for (idx, op) in operands.iter().enumerate() {
if let InlineAsmOperandRef::Out { reg, place: Some(place), .. }
| InlineAsmOperandRef::InOut { reg, out_place: Some(place), .. } = *op
{
let value = if output_types.len() == 1 {
result
} else {
self.extract_value(result, op_idx[&idx] as u64)
};
let value = llvm_fixup_output(self, value, reg.reg_class(), &place.layout);
OperandValue::Immediate(value).store(self, place);
}
}
}
}
impl AsmMethods for CodegenCx<'ll, 'tcx> {
fn codegen_global_asm(&self, ga: &hir::GlobalAsm) {
let asm = ga.asm.as_str();
unsafe {
llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr().cast(), asm.len());
}
}
}
fn inline_asm_call(
bx: &mut Builder<'a, 'll, 'tcx>,
asm: &str,
cons: &str,
inputs: &[&'ll Value],
output: &'ll llvm::Type,
volatile: bool,
alignstack: bool,
dia: LlvmAsmDialect,
line_spans: &[Span],
) -> Option<&'ll Value> {
let volatile = if volatile { llvm::True } else { llvm::False };
let alignstack = if alignstack { llvm::True } else { llvm::False };
let argtys = inputs
.iter()
.map(|v| {
debug!("Asm Input Type: {:?}", *v);
bx.cx.val_ty(*v)
})
.collect::<Vec<_>>();
debug!("Asm Output Type: {:?}", output);
let fty = bx.cx.type_func(&argtys[..], output);
unsafe {
// Ask LLVM to verify that the constraints are well-formed.
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr().cast(), cons.len());
debug!("constraint verification result: {:?}", constraints_ok);
if constraints_ok {
let v = llvm::LLVMRustInlineAsm(
fty,
asm.as_ptr().cast(),
asm.len(),
cons.as_ptr().cast(),
cons.len(),
volatile,
alignstack,
llvm::AsmDialect::from_generic(dia),
);
let call = bx.call(v, inputs, None);
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(
bx.llcx,
key.as_ptr() as *const c_char,
key.len() as c_uint,
);
// srcloc contains one integer for each line of assembly code.
// Unfortunately this isn't enough to encode a full span so instead
// we just encode the start position of each line.
// FIXME: Figure out a way to pass the entire line spans.
let mut srcloc = vec![];
if dia == LlvmAsmDialect::Intel && line_spans.len() > 1 {
// LLVM inserts an extra line to add the ".intel_syntax", so add
// a dummy srcloc entry for it.
//
// Don't do this if we only have 1 line span since that may be
// due to the asm template string coming from a macro. LLVM will
// default to the first srcloc for lines that don't have an
// associated srcloc.
srcloc.push(bx.const_i32(0));
}
srcloc.extend(line_spans.iter().map(|span| bx.const_i32(span.lo().to_u32() as i32)));
let md = llvm::LLVMMDNodeInContext(bx.llcx, srcloc.as_ptr(), srcloc.len() as u32);
llvm::LLVMSetMetadata(call, kind, md);
Some(call)
} else {
// LLVM has detected an issue with our constraints, bail out
None
}
}
}
/// If the register is an xmm/ymm/zmm register then return its index.
fn xmm_reg_index(reg: InlineAsmReg) -> Option<u32> {
match reg {
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::xmm0 as u32
&& reg as u32 <= X86InlineAsmReg::xmm15 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::xmm0 as u32)
}
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::ymm0 as u32
&& reg as u32 <= X86InlineAsmReg::ymm15 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::ymm0 as u32)
}
InlineAsmReg::X86(reg)
if reg as u32 >= X86InlineAsmReg::zmm0 as u32
&& reg as u32 <= X86InlineAsmReg::zmm31 as u32 =>
{
Some(reg as u32 - X86InlineAsmReg::zmm0 as u32)
}
_ => None,
}
}
/// If the register is an AArch64 vector register then return its index.
fn a64_vreg_index(reg: InlineAsmReg) -> Option<u32> {
match reg {
InlineAsmReg::AArch64(reg)
if reg as u32 >= AArch64InlineAsmReg::v0 as u32
&& reg as u32 <= AArch64InlineAsmReg::v31 as u32 =>
{
Some(reg as u32 - AArch64InlineAsmReg::v0 as u32)
}
_ => None,
}
}
/// Converts a register class to an LLVM constraint code.
fn reg_to_llvm(reg: InlineAsmRegOrRegClass, layout: Option<&TyAndLayout<'tcx>>) -> String {
match reg {
// For vector registers LLVM wants the register name to match the type size.
InlineAsmRegOrRegClass::Reg(reg) => {
if let Some(idx) = xmm_reg_index(reg) {
let class = if let Some(layout) = layout {
match layout.size.bytes() {
64 => 'z',
32 => 'y',
_ => 'x',
}
} else {
// We use f32 as the type for discarded outputs
'x'
};
format!("{{{}mm{}}}", class, idx)
} else if let Some(idx) = a64_vreg_index(reg) {
let class = if let Some(layout) = layout {
match layout.size.bytes() {
16 => 'q',
8 => 'd',
4 => 's',
2 => 'h',
1 => 'd', // We fixup i8 to i8x8
_ => unreachable!(),
}
} else {
// We use i64x2 as the type for discarded outputs
'q'
};
format!("{{{}{}}}", class, idx)
} else if reg == InlineAsmReg::AArch64(AArch64InlineAsmReg::x30) {
// LLVM doesn't recognize x30
"{lr}".to_string()
} else {
format!("{{{}}}", reg.name())
}
}
InlineAsmRegOrRegClass::RegClass(reg) => match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg) => "w",
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => "x",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => "l",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8) => "t",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => "x",
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg) => "w",
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => "h",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => "r",
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => "l",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => "r",
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => "f",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg) => "r",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => "Q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => "q",
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg) => "x",
InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => "v",
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => "^Yk",
}
.to_string(),
}
}
/// Converts a modifier into LLVM's equivalent modifier.
fn modifier_to_llvm(
arch: InlineAsmArch,
reg: InlineAsmRegClass,
modifier: Option<char>,
) -> Option<char> {
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => modifier,
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
if modifier == Some('v') { None } else { modifier }
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => None,
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => None,
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => Some('P'),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
if modifier.is_none() {
Some('q')
} else {
modifier
}
}
InlineAsmRegClass::Hexagon(_) => None,
InlineAsmRegClass::Nvptx(_) => None,
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg)
| InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => None,
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => match modifier {
None if arch == InlineAsmArch::X86_64 => Some('q'),
None => Some('k'),
Some('l') => Some('b'),
Some('h') => Some('h'),
Some('x') => Some('w'),
Some('e') => Some('k'),
Some('r') => Some('q'),
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => None,
InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(reg @ X86InlineAsmRegClass::zmm_reg) => match (reg, modifier) {
(X86InlineAsmRegClass::xmm_reg, None) => Some('x'),
(X86InlineAsmRegClass::ymm_reg, None) => Some('t'),
(X86InlineAsmRegClass::zmm_reg, None) => Some('g'),
(_, Some('x')) => Some('x'),
(_, Some('y')) => Some('t'),
(_, Some('z')) => Some('g'),
_ => unreachable!(),
},
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => None,
}
}
/// Type to use for outputs that are discarded. It doesn't really matter what
/// the type is, as long as it is valid for the constraint code.
fn dummy_output_type(cx: &CodegenCx<'ll, 'tcx>, reg: InlineAsmRegClass) -> &'ll Type {
match reg {
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg)
| InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16) => {
cx.type_vector(cx.type_i64(), 2)
}
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::reg_thumb) => cx.type_i32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg_low16) => cx.type_f32(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low16)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::dreg_low8) => cx.type_f64(),
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low8)
| InlineAsmRegClass::Arm(ArmInlineAsmRegClass::qreg_low4) => {
cx.type_vector(cx.type_i64(), 2)
}
InlineAsmRegClass::Hexagon(HexagonInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg16) => cx.type_i16(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg32) => cx.type_i32(),
InlineAsmRegClass::Nvptx(NvptxInlineAsmRegClass::reg64) => cx.type_i64(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::reg) => cx.type_i32(),
InlineAsmRegClass::RiscV(RiscVInlineAsmRegClass::freg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd) => cx.type_i32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_byte) => cx.type_i8(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::ymm_reg)
| InlineAsmRegClass::X86(X86InlineAsmRegClass::zmm_reg) => cx.type_f32(),
InlineAsmRegClass::X86(X86InlineAsmRegClass::kreg) => cx.type_i16(),
}
}
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type.
fn llvm_asm_scalar_type(cx: &CodegenCx<'ll, 'tcx>, scalar: &Scalar) -> &'ll Type {
match scalar.value {
Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(),
Primitive::Int(Integer::I32, _) => cx.type_i32(),
Primitive::Int(Integer::I64, _) => cx.type_i64(),
Primitive::F32 => cx.type_f32(),
Primitive::F64 => cx.type_f64(),
Primitive::Pointer => cx.type_isize(),
_ => unreachable!(),
}
}
/// Fix up an input value to work around LLVM bugs.
fn llvm_fixup_input(
bx: &mut Builder<'a, 'll, 'tcx>,
mut value: &'ll Value,
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
let vec_ty = bx.cx.type_vector(bx.cx.type_i8(), 8);
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
} else {
value
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
let elem_ty = llvm_asm_scalar_type(bx.cx, s);
let count = 16 / layout.size.bytes();
let vec_ty = bx.cx.type_vector(elem_ty, count);
if let Primitive::Pointer = s.value {
value = bx.ptrtoint(value, bx.cx.type_isize());
}
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count);
let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
{
bx.bitcast(value, bx.cx.type_i64())
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)),
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
bx.bitcast(value, bx.cx.type_f32())
} else {
value
}
}
(
InlineAsmRegClass::Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
bx.bitcast(value, bx.cx.type_f64())
} else {
value
}
}
_ => value,
}
}
/// Fix up an output value to work around LLVM bugs.
fn llvm_fixup_output(
bx: &mut Builder<'a, 'll, 'tcx>,
mut value: &'ll Value,
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Value {
match (reg, &layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
bx.extract_element(value, bx.const_i32(0))
} else {
value
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
value = bx.extract_element(value, bx.const_i32(0));
if let Primitive::Pointer = s.value {
value = bx.inttoptr(value, layout.llvm_type(bx.cx));
}
value
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(bx.cx, element);
let vec_ty = bx.cx.type_vector(elem_ty, *count * 2);
let indices: Vec<_> = (0..*count).map(|x| bx.const_i32(x as i32)).collect();
bx.shuffle_vector(value, bx.const_undef(vec_ty), bx.const_vector(&indices))
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
{
bx.bitcast(value, bx.cx.type_f64())
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)),
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
bx.bitcast(value, bx.cx.type_i32())
} else {
value
}
}
(
InlineAsmRegClass::Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
bx.bitcast(value, bx.cx.type_i64())
} else {
value
}
}
_ => value,
}
}
/// Output type to use for llvm_fixup_output.
fn llvm_fixup_output_type(
cx: &CodegenCx<'ll, 'tcx>,
reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>,
) -> &'ll Type {
match (reg, &layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.value {
cx.type_vector(cx.type_i8(), 8)
} else {
layout.llvm_type(cx)
}
}
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
let elem_ty = llvm_asm_scalar_type(cx, s);
let count = 16 / layout.size.bytes();
cx.type_vector(elem_ty, count)
}
(
InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16),
Abi::Vector { element, count },
) if layout.size.bytes() == 8 => {
let elem_ty = llvm_asm_scalar_type(cx, element);
cx.type_vector(elem_ty, count * 2)
}
(InlineAsmRegClass::X86(X86InlineAsmRegClass::reg_abcd), Abi::Scalar(s))
if s.value == Primitive::F64 =>
{
cx.type_i64()
}
(
InlineAsmRegClass::X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg),
Abi::Vector { .. },
) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8),
(
InlineAsmRegClass::Arm(ArmInlineAsmRegClass::sreg | ArmInlineAsmRegClass::sreg_low16),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I32, _) = s.value {
cx.type_f32()
} else {
layout.llvm_type(cx)
}
}
(
InlineAsmRegClass::Arm(
ArmInlineAsmRegClass::dreg
| ArmInlineAsmRegClass::dreg_low8
| ArmInlineAsmRegClass::dreg_low16,
),
Abi::Scalar(s),
) => {
if let Primitive::Int(Integer::I64, _) = s.value {
cx.type_f64()
} else {
layout.llvm_type(cx)
}
}
_ => layout.llvm_type(cx),
}
}

View file

@ -0,0 +1,397 @@
//! Set and unset common attributes on LLVM values.
use std::ffi::CString;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::const_cstr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::query::Providers;
use rustc_middle::ty::{self, TyCtxt};
use rustc_session::config::{OptLevel, SanitizerSet};
use rustc_session::Session;
use crate::attributes;
use crate::llvm::AttributePlace::Function;
use crate::llvm::{self, Attribute};
use crate::llvm_util;
pub use rustc_attr::{InlineAttr, OptimizeAttr};
use crate::context::CodegenCx;
use crate::value::Value;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) {
use self::InlineAttr::*;
match inline {
Hint => Attribute::InlineHint.apply_llfn(Function, val),
Always => Attribute::AlwaysInline.apply_llfn(Function, val),
Never => {
if cx.tcx().sess.target.target.arch != "amdgpu" {
Attribute::NoInline.apply_llfn(Function, val);
}
}
None => {
Attribute::InlineHint.unapply_llfn(Function, val);
Attribute::AlwaysInline.unapply_llfn(Function, val);
Attribute::NoInline.unapply_llfn(Function, val);
}
};
}
/// Apply LLVM sanitize attributes.
#[inline]
pub fn sanitize(cx: &CodegenCx<'ll, '_>, no_sanitize: SanitizerSet, llfn: &'ll Value) {
let enabled = cx.tcx.sess.opts.debugging_opts.sanitizer - no_sanitize;
if enabled.contains(SanitizerSet::ADDRESS) {
llvm::Attribute::SanitizeAddress.apply_llfn(Function, llfn);
}
if enabled.contains(SanitizerSet::MEMORY) {
llvm::Attribute::SanitizeMemory.apply_llfn(Function, llfn);
}
if enabled.contains(SanitizerSet::THREAD) {
llvm::Attribute::SanitizeThread.apply_llfn(Function, llfn);
}
}
/// Tell LLVM to emit or not emit the information necessary to unwind the stack for the function.
#[inline]
pub fn emit_uwtable(val: &'ll Value, emit: bool) {
Attribute::UWTable.toggle_llfn(Function, val, emit);
}
/// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue.
#[inline]
fn naked(val: &'ll Value, is_naked: bool) {
Attribute::Naked.toggle_llfn(Function, val, is_naked);
}
pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
if cx.sess().must_not_eliminate_frame_pointers() {
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("frame-pointer"),
const_cstr!("all"),
);
}
}
/// Tell LLVM what instrument function to insert.
#[inline]
fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
if cx.sess().instrument_mcount() {
// Similar to `clang -pg` behavior. Handled by the
// `post-inline-ee-instrument` LLVM pass.
// The function name varies on platforms.
// See test/CodeGen/mcount.c in clang.
let mcount_name =
CString::new(cx.sess().target.target.options.target_mcount.as_str().as_bytes())
.unwrap();
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("instrument-function-entry-inlined"),
&mcount_name,
);
}
}
fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
// Only use stack probes if the target specification indicates that we
// should be using stack probes
if !cx.sess().target.target.options.stack_probes {
return;
}
// Currently stack probes seem somewhat incompatible with the address
// sanitizer and thread sanitizer. With asan we're already protected from
// stack overflow anyway so we don't really need stack probes regardless.
if cx
.sess()
.opts
.debugging_opts
.sanitizer
.intersects(SanitizerSet::ADDRESS | SanitizerSet::THREAD)
{
return;
}
// probestack doesn't play nice either with `-C profile-generate`.
if cx.sess().opts.cg.profile_generate.enabled() {
return;
}
// probestack doesn't play nice either with gcov profiling.
if cx.sess().opts.debugging_opts.profile {
return;
}
// FIXME(richkadel): Make sure probestack plays nice with `-Z instrument-coverage`
// or disable it if not, similar to above early exits.
// Flag our internal `__rust_probestack` function as the stack probe symbol.
// This is defined in the `compiler-builtins` crate for each architecture.
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("probe-stack"),
const_cstr!("__rust_probestack"),
);
}
fn translate_obsolete_target_features(feature: &str) -> &str {
const LLVM9_FEATURE_CHANGES: &[(&str, &str)] =
&[("+fp-only-sp", "-fp64"), ("-fp-only-sp", "+fp64"), ("+d16", "-d32"), ("-d16", "+d32")];
if llvm_util::get_major_version() >= 9 {
for &(old, new) in LLVM9_FEATURE_CHANGES {
if feature == old {
return new;
}
}
} else {
for &(old, new) in LLVM9_FEATURE_CHANGES {
if feature == new {
return old;
}
}
}
feature
}
pub fn llvm_target_features(sess: &Session) -> impl Iterator<Item = &str> {
const RUSTC_SPECIFIC_FEATURES: &[&str] = &["crt-static"];
let cmdline = sess
.opts
.cg
.target_feature
.split(',')
.filter(|f| !RUSTC_SPECIFIC_FEATURES.iter().any(|s| f.contains(s)));
sess.target
.target
.options
.features
.split(',')
.chain(cmdline)
.filter(|l| !l.is_empty())
.map(translate_obsolete_target_features)
}
pub fn apply_target_cpu_attr(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
let target_cpu = SmallCStr::new(llvm_util::target_cpu(cx.tcx.sess));
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("target-cpu"),
target_cpu.as_c_str(),
);
}
/// Sets the `NonLazyBind` LLVM attribute on a given function,
/// assuming the codegen options allow skipping the PLT.
pub fn non_lazy_bind(sess: &Session, llfn: &'ll Value) {
// Don't generate calls through PLT if it's not necessary
if !sess.needs_plt() {
Attribute::NonLazyBind.apply_llfn(Function, llfn);
}
}
pub(crate) fn default_optimisation_attrs(sess: &Session, llfn: &'ll Value) {
match sess.opts.optimize {
OptLevel::Size => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
}
OptLevel::SizeMin => {
llvm::Attribute::MinSize.apply_llfn(Function, llfn);
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
}
OptLevel::No => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
}
_ => {}
}
}
/// Composite function which sets LLVM attributes for function depending on its AST (`#[attribute]`)
/// attributes.
pub fn from_fn_attrs(cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value, instance: ty::Instance<'tcx>) {
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
match codegen_fn_attrs.optimize {
OptimizeAttr::None => {
default_optimisation_attrs(cx.tcx.sess, llfn);
}
OptimizeAttr::Speed => {
llvm::Attribute::MinSize.unapply_llfn(Function, llfn);
llvm::Attribute::OptimizeForSize.unapply_llfn(Function, llfn);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
}
OptimizeAttr::Size => {
llvm::Attribute::MinSize.apply_llfn(Function, llfn);
llvm::Attribute::OptimizeForSize.apply_llfn(Function, llfn);
llvm::Attribute::OptimizeNone.unapply_llfn(Function, llfn);
}
}
// FIXME(eddyb) consolidate these two `inline` calls (and avoid overwrites).
if instance.def.requires_inline(cx.tcx) {
inline(cx, llfn, attributes::InlineAttr::Hint);
}
inline(cx, llfn, codegen_fn_attrs.inline.clone());
// The `uwtable` attribute according to LLVM is:
//
// This attribute indicates that the ABI being targeted requires that an
// unwind table entry be produced for this function even if we can show
// that no exceptions passes by it. This is normally the case for the
// ELF x86-64 abi, but it can be disabled for some compilation units.
//
// Typically when we're compiling with `-C panic=abort` (which implies this
// `no_landing_pads` check) we don't need `uwtable` because we can't
// generate any exceptions! On Windows, however, exceptions include other
// events such as illegal instructions, segfaults, etc. This means that on
// Windows we end up still needing the `uwtable` attribute even if the `-C
// panic=abort` flag is passed.
//
// You can also find more info on why Windows always requires uwtables here:
// https://bugzilla.mozilla.org/show_bug.cgi?id=1302078
if cx.sess().must_emit_unwind_tables() {
attributes::emit_uwtable(llfn, true);
}
set_frame_pointer_elimination(cx, llfn);
set_instrument_function(cx, llfn);
set_probestack(cx, llfn);
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::COLD) {
Attribute::Cold.apply_llfn(Function, llfn);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_RETURNS_TWICE) {
Attribute::ReturnsTwice.apply_llfn(Function, llfn);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_PURE) {
Attribute::ReadOnly.apply_llfn(Function, llfn);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::FFI_CONST) {
Attribute::ReadNone.apply_llfn(Function, llfn);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) {
naked(llfn, true);
}
if codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::ALLOCATOR) {
Attribute::NoAlias.apply_llfn(llvm::AttributePlace::ReturnValue, llfn);
}
sanitize(cx, codegen_fn_attrs.no_sanitize, llfn);
// Always annotate functions with the target-cpu they are compiled for.
// Without this, ThinLTO won't inline Rust functions into Clang generated
// functions (because Clang annotates functions this way too).
apply_target_cpu_attr(cx, llfn);
let features = llvm_target_features(cx.tcx.sess)
.map(|s| s.to_string())
.chain(codegen_fn_attrs.target_features.iter().map(|f| {
let feature = &f.as_str();
format!("+{}", llvm_util::to_llvm_feature(cx.tcx.sess, feature))
}))
.collect::<Vec<String>>()
.join(",");
if !features.is_empty() {
let val = CString::new(features).unwrap();
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("target-features"),
&val,
);
}
// Note that currently the `wasm-import-module` doesn't do anything, but
// eventually LLVM 7 should read this and ferry the appropriate import
// module to the output file.
if cx.tcx.sess.target.target.arch == "wasm32" {
if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("wasm-import-module"),
&module,
);
let name =
codegen_fn_attrs.link_name.unwrap_or_else(|| cx.tcx.item_name(instance.def_id()));
let name = CString::new(&name.as_str()[..]).unwrap();
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("wasm-import-name"),
&name,
);
}
}
}
pub fn provide(providers: &mut Providers) {
providers.supported_target_features = |tcx, cnum| {
assert_eq!(cnum, LOCAL_CRATE);
if tcx.sess.opts.actually_rustdoc {
// rustdoc needs to be able to document functions that use all the features, so
// provide them all.
llvm_util::all_known_features().map(|(a, b)| (a.to_string(), b)).collect()
} else {
llvm_util::supported_target_features(tcx.sess)
.iter()
.map(|&(a, b)| (a.to_string(), b))
.collect()
}
};
provide_extern(providers);
}
pub fn provide_extern(providers: &mut Providers) {
providers.wasm_import_module_map = |tcx, cnum| {
// Build up a map from DefId to a `NativeLib` structure, where
// `NativeLib` internally contains information about
// `#[link(wasm_import_module = "...")]` for example.
let native_libs = tcx.native_libraries(cnum);
let def_id_to_native_lib = native_libs
.iter()
.filter_map(|lib| lib.foreign_module.map(|id| (id, lib)))
.collect::<FxHashMap<_, _>>();
let mut ret = FxHashMap::default();
for lib in tcx.foreign_modules(cnum).iter() {
let module = def_id_to_native_lib.get(&lib.def_id).and_then(|s| s.wasm_import_module);
let module = match module {
Some(s) => s,
None => continue,
};
ret.extend(lib.foreign_items.iter().map(|id| {
assert_eq!(id.krate, cnum);
(*id, module.to_string())
}));
}
ret
};
}
fn wasm_import_module(tcx: TyCtxt<'_>, id: DefId) -> Option<CString> {
tcx.wasm_import_module_map(id.krate).get(&id).map(|s| CString::new(&s[..]).unwrap())
}

View file

@ -0,0 +1,316 @@
//! A helper class for dealing with static archives
use std::ffi::{CStr, CString};
use std::io;
use std::mem;
use std::path::{Path, PathBuf};
use std::ptr;
use std::str;
use crate::llvm::archive_ro::{ArchiveRO, Child};
use crate::llvm::{self, ArchiveKind};
use rustc_codegen_ssa::back::archive::{find_library, ArchiveBuilder};
use rustc_codegen_ssa::{looks_like_rust_object_file, METADATA_FILENAME};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
struct ArchiveConfig<'a> {
pub sess: &'a Session,
pub dst: PathBuf,
pub src: Option<PathBuf>,
pub lib_search_paths: Vec<PathBuf>,
}
/// Helper for adding many files to an archive.
#[must_use = "must call build() to finish building the archive"]
pub struct LlvmArchiveBuilder<'a> {
config: ArchiveConfig<'a>,
removals: Vec<String>,
additions: Vec<Addition>,
should_update_symbols: bool,
src_archive: Option<Option<ArchiveRO>>,
}
enum Addition {
File { path: PathBuf, name_in_archive: String },
Archive { path: PathBuf, archive: ArchiveRO, skip: Box<dyn FnMut(&str) -> bool> },
}
impl Addition {
fn path(&self) -> &Path {
match self {
Addition::File { path, .. } | Addition::Archive { path, .. } => path,
}
}
}
fn is_relevant_child(c: &Child<'_>) -> bool {
match c.name() {
Some(name) => !name.contains("SYMDEF"),
None => false,
}
}
fn archive_config<'a>(sess: &'a Session, output: &Path, input: Option<&Path>) -> ArchiveConfig<'a> {
use rustc_codegen_ssa::back::link::archive_search_paths;
ArchiveConfig {
sess,
dst: output.to_path_buf(),
src: input.map(|p| p.to_path_buf()),
lib_search_paths: archive_search_paths(sess),
}
}
impl<'a> ArchiveBuilder<'a> for LlvmArchiveBuilder<'a> {
/// Creates a new static archive, ready for modifying the archive specified
/// by `config`.
fn new(sess: &'a Session, output: &Path, input: Option<&Path>) -> LlvmArchiveBuilder<'a> {
let config = archive_config(sess, output, input);
LlvmArchiveBuilder {
config,
removals: Vec::new(),
additions: Vec::new(),
should_update_symbols: false,
src_archive: None,
}
}
/// Removes a file from this archive
fn remove_file(&mut self, file: &str) {
self.removals.push(file.to_string());
}
/// Lists all files in an archive
fn src_files(&mut self) -> Vec<String> {
if self.src_archive().is_none() {
return Vec::new();
}
let archive = self.src_archive.as_ref().unwrap().as_ref().unwrap();
archive
.iter()
.filter_map(|child| child.ok())
.filter(is_relevant_child)
.filter_map(|child| child.name())
.filter(|name| !self.removals.iter().any(|x| x == name))
.map(|name| name.to_owned())
.collect()
}
/// Adds all of the contents of a native library to this archive. This will
/// search in the relevant locations for a library named `name`.
fn add_native_library(&mut self, name: Symbol) {
let location = find_library(name, &self.config.lib_search_paths, self.config.sess);
self.add_archive(&location, |_| false).unwrap_or_else(|e| {
self.config.sess.fatal(&format!(
"failed to add native library {}: {}",
location.to_string_lossy(),
e
));
});
}
/// Adds all of the contents of the rlib at the specified path to this
/// archive.
///
/// This ignores adding the bytecode from the rlib, and if LTO is enabled
/// then the object file also isn't added.
fn add_rlib(
&mut self,
rlib: &Path,
name: &str,
lto: bool,
skip_objects: bool,
) -> io::Result<()> {
// Ignoring obj file starting with the crate name
// as simple comparison is not enough - there
// might be also an extra name suffix
let obj_start = name.to_owned();
self.add_archive(rlib, move |fname: &str| {
// Ignore metadata files, no matter the name.
if fname == METADATA_FILENAME {
return true;
}
// Don't include Rust objects if LTO is enabled
if lto && looks_like_rust_object_file(fname) {
return true;
}
// Otherwise if this is *not* a rust object and we're skipping
// objects then skip this file
if skip_objects && (!fname.starts_with(&obj_start) || !fname.ends_with(".o")) {
return true;
}
// ok, don't skip this
false
})
}
/// Adds an arbitrary file to this archive
fn add_file(&mut self, file: &Path) {
let name = file.file_name().unwrap().to_str().unwrap();
self.additions
.push(Addition::File { path: file.to_path_buf(), name_in_archive: name.to_owned() });
}
/// Indicate that the next call to `build` should update all symbols in
/// the archive (equivalent to running 'ar s' over it).
fn update_symbols(&mut self) {
self.should_update_symbols = true;
}
/// Combine the provided files, rlibs, and native libraries into a single
/// `Archive`.
fn build(mut self) {
let kind = self.llvm_archive_kind().unwrap_or_else(|kind| {
self.config.sess.fatal(&format!("Don't know how to build archive of type: {}", kind))
});
if let Err(e) = self.build_with_llvm(kind) {
self.config.sess.fatal(&format!("failed to build archive: {}", e));
}
}
}
impl<'a> LlvmArchiveBuilder<'a> {
fn src_archive(&mut self) -> Option<&ArchiveRO> {
if let Some(ref a) = self.src_archive {
return a.as_ref();
}
let src = self.config.src.as_ref()?;
self.src_archive = Some(ArchiveRO::open(src).ok());
self.src_archive.as_ref().unwrap().as_ref()
}
fn add_archive<F>(&mut self, archive: &Path, skip: F) -> io::Result<()>
where
F: FnMut(&str) -> bool + 'static,
{
let archive_ro = match ArchiveRO::open(archive) {
Ok(ar) => ar,
Err(e) => return Err(io::Error::new(io::ErrorKind::Other, e)),
};
if self.additions.iter().any(|ar| ar.path() == archive) {
return Ok(());
}
self.additions.push(Addition::Archive {
path: archive.to_path_buf(),
archive: archive_ro,
skip: Box::new(skip),
});
Ok(())
}
fn llvm_archive_kind(&self) -> Result<ArchiveKind, &str> {
let kind = &*self.config.sess.target.target.options.archive_format;
kind.parse().map_err(|_| kind)
}
fn build_with_llvm(&mut self, kind: ArchiveKind) -> io::Result<()> {
let removals = mem::take(&mut self.removals);
let mut additions = mem::take(&mut self.additions);
let mut strings = Vec::new();
let mut members = Vec::new();
let dst = CString::new(self.config.dst.to_str().unwrap())?;
let should_update_symbols = self.should_update_symbols;
unsafe {
if let Some(archive) = self.src_archive() {
for child in archive.iter() {
let child = child.map_err(string_to_io_error)?;
let child_name = match child.name() {
Some(s) => s,
None => continue,
};
if removals.iter().any(|r| r == child_name) {
continue;
}
let name = CString::new(child_name)?;
members.push(llvm::LLVMRustArchiveMemberNew(
ptr::null(),
name.as_ptr(),
Some(child.raw),
));
strings.push(name);
}
}
for addition in &mut additions {
match addition {
Addition::File { path, name_in_archive } => {
let path = CString::new(path.to_str().unwrap())?;
let name = CString::new(name_in_archive.clone())?;
members.push(llvm::LLVMRustArchiveMemberNew(
path.as_ptr(),
name.as_ptr(),
None,
));
strings.push(path);
strings.push(name);
}
Addition::Archive { archive, skip, .. } => {
for child in archive.iter() {
let child = child.map_err(string_to_io_error)?;
if !is_relevant_child(&child) {
continue;
}
let child_name = child.name().unwrap();
if skip(child_name) {
continue;
}
// It appears that LLVM's archive writer is a little
// buggy if the name we pass down isn't just the
// filename component, so chop that off here and
// pass it in.
//
// See LLVM bug 25877 for more info.
let child_name =
Path::new(child_name).file_name().unwrap().to_str().unwrap();
let name = CString::new(child_name)?;
let m = llvm::LLVMRustArchiveMemberNew(
ptr::null(),
name.as_ptr(),
Some(child.raw),
);
members.push(m);
strings.push(name);
}
}
}
}
let r = llvm::LLVMRustWriteArchive(
dst.as_ptr(),
members.len() as libc::size_t,
members.as_ptr() as *const &_,
should_update_symbols,
kind,
);
let ret = if r.into_result().is_err() {
let err = llvm::LLVMRustGetLastError();
let msg = if err.is_null() {
"failed to write archive".into()
} else {
String::from_utf8_lossy(CStr::from_ptr(err).to_bytes())
};
Err(io::Error::new(io::ErrorKind::Other, msg))
} else {
Ok(())
};
for member in members {
llvm::LLVMRustArchiveMemberFree(member);
}
ret
}
}
}
fn string_to_io_error(s: String) -> io::Error {
io::Error::new(io::ErrorKind::Other, format!("bad archive: {}", s))
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,58 @@
use measureme::{event_id::SEPARATOR_BYTE, EventId, StringComponent, StringId};
use rustc_data_structures::profiling::{SelfProfiler, TimingGuard};
use std::ffi::{c_void, CStr};
use std::os::raw::c_char;
use std::sync::Arc;
fn llvm_args_to_string_id(profiler: &SelfProfiler, pass_name: &str, ir_name: &str) -> EventId {
let pass_name = profiler.get_or_alloc_cached_string(pass_name);
let mut components = vec![StringComponent::Ref(pass_name)];
// handle that LazyCallGraph::SCC is a comma separated list within parentheses
let parentheses: &[_] = &['(', ')'];
let trimmed = ir_name.trim_matches(parentheses);
for part in trimmed.split(", ") {
let demangled_ir_name = rustc_demangle::demangle(part).to_string();
let ir_name = profiler.get_or_alloc_cached_string(demangled_ir_name);
components.push(StringComponent::Value(SEPARATOR_BYTE));
components.push(StringComponent::Ref(ir_name));
}
EventId::from_label(profiler.alloc_string(components.as_slice()))
}
pub struct LlvmSelfProfiler<'a> {
profiler: Arc<SelfProfiler>,
stack: Vec<TimingGuard<'a>>,
llvm_pass_event_kind: StringId,
}
impl<'a> LlvmSelfProfiler<'a> {
pub fn new(profiler: Arc<SelfProfiler>) -> Self {
let llvm_pass_event_kind = profiler.alloc_string("LLVM Pass");
Self { profiler, stack: Vec::default(), llvm_pass_event_kind }
}
fn before_pass_callback(&'a mut self, pass_name: &str, ir_name: &str) {
let event_id = llvm_args_to_string_id(&self.profiler, pass_name, ir_name);
self.stack.push(TimingGuard::start(&self.profiler, self.llvm_pass_event_kind, event_id));
}
fn after_pass_callback(&mut self) {
self.stack.pop();
}
}
pub unsafe extern "C" fn selfprofile_before_pass_callback(
llvm_self_profiler: *mut c_void,
pass_name: *const c_char,
ir_name: *const c_char,
) {
let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
let pass_name = CStr::from_ptr(pass_name).to_str().expect("valid UTF-8");
let ir_name = CStr::from_ptr(ir_name).to_str().expect("valid UTF-8");
llvm_self_profiler.before_pass_callback(pass_name, ir_name);
}
pub unsafe extern "C" fn selfprofile_after_pass_callback(llvm_self_profiler: *mut c_void) {
let llvm_self_profiler = &mut *(llvm_self_profiler as *mut LlvmSelfProfiler<'_>);
llvm_self_profiler.after_pass_callback();
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,205 @@
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the `Ty` type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its `llvm::Type` by calling `val_ty`,
//! but one `llvm::Type` corresponds to many `Ty`s; for instance, `tup(int, int,
//! int)` and `rec(x=int, y=int, z=int)` will have the same `llvm::Type`.
use super::ModuleLlvm;
use crate::attributes;
use crate::builder::Builder;
use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::metadata;
use crate::value::Value;
use rustc_codegen_ssa::base::maybe_create_entry_wrapper;
use rustc_codegen_ssa::mono_item::MonoItemExt;
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_middle::dep_graph;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs;
use rustc_middle::middle::cstore::EncodedMetadata;
use rustc_middle::middle::exported_symbols;
use rustc_middle::mir::mono::{Linkage, Visibility};
use rustc_middle::ty::TyCtxt;
use rustc_session::config::{DebugInfo, SanitizerSet};
use rustc_span::symbol::Symbol;
use std::ffi::CString;
use std::time::Instant;
pub fn write_compressed_metadata<'tcx>(
tcx: TyCtxt<'tcx>,
metadata: &EncodedMetadata,
llvm_module: &mut ModuleLlvm,
) {
use snap::write::FrameEncoder;
use std::io::Write;
let (metadata_llcx, metadata_llmod) = (&*llvm_module.llcx, llvm_module.llmod());
let mut compressed = tcx.metadata_encoding_version();
FrameEncoder::new(&mut compressed).write_all(&metadata.raw_data).unwrap();
let llmeta = common::bytes_in_context(metadata_llcx, &compressed);
let llconst = common::struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal =
unsafe { llvm::LLVMAddGlobal(metadata_llmod, common::val_ty(llconst), buf.as_ptr()) };
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
let section_name = metadata::metadata_section_name(&tcx.sess.target.target);
let name = SmallCStr::new(section_name);
llvm::LLVMSetSection(llglobal, name.as_ptr());
// Also generate a .section directive to force no
// flags, at least for ELF outputs, so that the
// metadata doesn't get loaded into memory.
let directive = format!(".section {}", section_name);
llvm::LLVMSetModuleInlineAsm2(metadata_llmod, directive.as_ptr().cast(), directive.len())
}
}
pub struct ValueIter<'ll> {
cur: Option<&'ll Value>,
step: unsafe extern "C" fn(&'ll Value) -> Option<&'ll Value>,
}
impl Iterator for ValueIter<'ll> {
type Item = &'ll Value;
fn next(&mut self) -> Option<&'ll Value> {
let old = self.cur;
if let Some(old) = old {
self.cur = unsafe { (self.step)(old) };
}
old
}
}
pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
unsafe { ValueIter { cur: llvm::LLVMGetFirstGlobal(llmod), step: llvm::LLVMGetNextGlobal } }
}
pub fn compile_codegen_unit(
tcx: TyCtxt<'tcx>,
cgu_name: Symbol,
) -> (ModuleCodegen<ModuleLlvm>, u64) {
let prof_timer = tcx.prof.generic_activity_with_arg("codegen_module", cgu_name.to_string());
let start_time = Instant::now();
let dep_node = tcx.codegen_unit(cgu_name).codegen_dep_node(tcx);
let (module, _) =
tcx.dep_graph.with_task(dep_node, tcx, cgu_name, module_codegen, dep_graph::hash_result);
let time_to_codegen = start_time.elapsed();
drop(prof_timer);
// We assume that the cost to run LLVM on a CGU is proportional to
// the time we needed for codegenning it.
let cost = time_to_codegen.as_secs() * 1_000_000_000 + time_to_codegen.subsec_nanos() as u64;
fn module_codegen(tcx: TyCtxt<'_>, cgu_name: Symbol) -> ModuleCodegen<ModuleLlvm> {
let cgu = tcx.codegen_unit(cgu_name);
// Instantiate monomorphizations without filling out definitions yet...
let llvm_module = ModuleLlvm::new(tcx, &cgu_name.as_str());
{
let cx = CodegenCx::new(tcx, cgu, &llvm_module);
let mono_items = cx.codegen_unit.items_in_deterministic_order(cx.tcx);
for &(mono_item, (linkage, visibility)) in &mono_items {
mono_item.predefine::<Builder<'_, '_, '_>>(&cx, linkage, visibility);
}
// ... and now that we have everything pre-defined, fill out those definitions.
for &(mono_item, _) in &mono_items {
mono_item.define::<Builder<'_, '_, '_>>(&cx);
}
// If this codegen unit contains the main function, also create the
// wrapper here
if let Some(entry) = maybe_create_entry_wrapper::<Builder<'_, '_, '_>>(&cx) {
attributes::sanitize(&cx, SanitizerSet::empty(), entry);
}
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
}
// Finalize code coverage by injecting the coverage map. Note, the coverage map will
// also be added to the `llvm.used` variable, created next.
if cx.sess().opts.debugging_opts.instrument_coverage {
cx.coverageinfo_finalize();
}
// Create the llvm.used variable
// This variable has type [N x i8*] and is stored in the llvm.metadata section
if !cx.used_statics().borrow().is_empty() {
cx.create_used_variable()
}
// Finalize debuginfo
if cx.sess().opts.debuginfo != DebugInfo::None {
cx.debuginfo_finalize();
}
}
ModuleCodegen {
name: cgu_name.to_string(),
module_llvm: llvm_module,
kind: ModuleKind::Regular,
}
}
(module, cost)
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(&sect.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {
match linkage {
Linkage::External => llvm::Linkage::ExternalLinkage,
Linkage::AvailableExternally => llvm::Linkage::AvailableExternallyLinkage,
Linkage::LinkOnceAny => llvm::Linkage::LinkOnceAnyLinkage,
Linkage::LinkOnceODR => llvm::Linkage::LinkOnceODRLinkage,
Linkage::WeakAny => llvm::Linkage::WeakAnyLinkage,
Linkage::WeakODR => llvm::Linkage::WeakODRLinkage,
Linkage::Appending => llvm::Linkage::AppendingLinkage,
Linkage::Internal => llvm::Linkage::InternalLinkage,
Linkage::Private => llvm::Linkage::PrivateLinkage,
Linkage::ExternalWeak => llvm::Linkage::ExternalWeakLinkage,
Linkage::Common => llvm::Linkage::CommonLinkage,
}
}
pub fn visibility_to_llvm(linkage: Visibility) -> llvm::Visibility {
match linkage {
Visibility::Default => llvm::Visibility::Default,
Visibility::Hidden => llvm::Visibility::Hidden,
Visibility::Protected => llvm::Visibility::Protected,
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,192 @@
//! Handles codegen of callees as well as other call-related
//! things. Callees are a superset of normal rust values and sometimes
//! have different representations. In particular, top-level fn items
//! and methods are represented as just a fn ptr and not a full
//! closure.
use crate::abi::{FnAbi, FnAbiLlvmExt};
use crate::attributes;
use crate::context::CodegenCx;
use crate::llvm;
use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use tracing::debug;
use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeFoldable};
/// Codegens a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
///
/// # Parameters
///
/// - `cx`: the crate context
/// - `instance`: the instance to be instantiated
pub fn get_fn(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) -> &'ll Value {
let tcx = cx.tcx();
debug!("get_fn(instance={:?})", instance);
assert!(!instance.substs.needs_infer());
assert!(!instance.substs.has_escaping_bound_vars());
if let Some(&llfn) = cx.instances.borrow().get(&instance) {
return llfn;
}
let sym = tcx.symbol_name(instance).name;
debug!(
"get_fn({:?}: {:?}) => {}",
instance,
instance.ty(cx.tcx(), ty::ParamEnv::reveal_all()),
sym
);
let fn_abi = FnAbi::of_instance(cx, instance, &[]);
let llfn = if let Some(llfn) = cx.get_declared_value(&sym) {
// Create a fn pointer with the new signature.
let llptrty = fn_abi.ptr_to_llvm_type(cx);
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
cx.const_ptrcast(llfn, llptrty)
} else {
debug!("get_fn: not casting pointer!");
llfn
}
} else {
let llfn = cx.declare_fn(&sym, &fn_abi);
debug!("get_fn: not casting pointer!");
attributes::from_fn_attrs(cx, llfn, instance);
let instance_def_id = instance.def_id();
// Apply an appropriate linkage/visibility value to our item that we
// just declared.
//
// This is sort of subtle. Inside our codegen unit we started off
// compilation by predefining all our own `MonoItem` instances. That
// is, everything we're codegenning ourselves is already defined. That
// means that anything we're actually codegenning in this codegen unit
// will have hit the above branch in `get_declared_value`. As a result,
// we're guaranteed here that we're declaring a symbol that won't get
// defined, or in other words we're referencing a value from another
// codegen unit or even another crate.
//
// So because this is a foreign value we blanket apply an external
// linkage directive because it's coming from a different object file.
// The visibility here is where it gets tricky. This symbol could be
// referencing some foreign crate or foreign library (an `extern`
// block) in which case we want to leave the default visibility. We may
// also, though, have multiple codegen units. It could be a
// monomorphization, in which case its expected visibility depends on
// whether we are sharing generics or not. The important thing here is
// that the visibility we apply to the declaration is the same one that
// has been applied to the definition (wherever that definition may be).
unsafe {
llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::ExternalLinkage);
let is_generic = instance.substs.non_erasable_generics().next().is_some();
if is_generic {
// This is a monomorphization. Its expected visibility depends
// on whether we are in share-generics mode.
if cx.tcx.sess.opts.share_generics() {
// We are in share_generics mode.
if let Some(instance_def_id) = instance_def_id.as_local() {
// This is a definition from the current crate. If the
// definition is unreachable for downstream crates or
// the current crate does not re-export generics, the
// definition of the instance will have been declared
// as `hidden`.
if cx.tcx.is_unreachable_local_definition(instance_def_id)
|| !cx.tcx.local_crate_exports_generics()
{
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a monomorphization of a generic function
// defined in an upstream crate.
if instance.upstream_monomorphization(tcx).is_some() {
// This is instantiated in another crate. It cannot
// be `hidden`.
} else {
// This is a local instantiation of an upstream definition.
// If the current crate does not re-export it
// (because it is a C library or an executable), it
// will have been declared `hidden`.
if !cx.tcx.local_crate_exports_generics() {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
}
}
} else {
// When not sharing generics, all instances are in the same
// crate and have hidden visibility
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a non-generic function
if cx.tcx.is_codegened_item(instance_def_id) {
// This is a function that is instantiated in the local crate
if instance_def_id.is_local() {
// This is function that is defined in the local crate.
// If it is not reachable, it is hidden.
if !cx.tcx.is_reachable_non_generic(instance_def_id) {
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
} else {
// This is a function from an upstream crate that has
// been instantiated here. These are always hidden.
llvm::LLVMRustSetVisibility(llfn, llvm::Visibility::Hidden);
}
}
}
}
// MinGW: For backward compatibility we rely on the linker to decide whether it
// should use dllimport for functions.
if cx.use_dll_storage_attrs
&& tcx.is_dllimport_foreign_item(instance_def_id)
&& tcx.sess.target.target.target_env != "gnu"
{
unsafe {
llvm::LLVMSetDLLStorageClass(llfn, llvm::DLLStorageClass::DllImport);
}
}
llfn
};
cx.instances.borrow_mut().insert(instance, llfn);
llfn
}

View file

@ -0,0 +1,341 @@
#![allow(non_camel_case_types, non_snake_case)]
//! Code that is useful in various codegen modules.
use crate::consts::{self, const_alloc_to_llvm};
pub use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_ast::Mutability;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::mir::interpret::{Allocation, GlobalAlloc, Scalar};
use rustc_middle::ty::layout::TyAndLayout;
use rustc_span::symbol::Symbol;
use rustc_target::abi::{self, AddressSpace, HasDataLayout, LayoutOf, Pointer, Size};
use libc::{c_char, c_uint};
use tracing::debug;
/*
* A note on nomenclature of linking: "extern", "foreign", and "upcall".
*
* An "extern" is an LLVM symbol we wind up emitting an undefined external
* reference to. This means "we don't have the thing in this compilation unit,
* please make sure you link it in at runtime". This could be a reference to
* C code found in a C library, or rust code found in a rust crate.
*
* Most "externs" are implicitly declared (automatically) as a result of a
* user declaring an extern _module_ dependency; this causes the rust driver
* to locate an extern crate, scan its compilation metadata, and emit extern
* declarations for any symbols used by the declaring crate.
*
* A "foreign" is an extern that references C (or other non-rust ABI) code.
* There is no metadata to scan for extern references so in these cases either
* a header-digester like bindgen, or manual function prototypes, have to
* serve as declarators. So these are usually given explicitly as prototype
* declarations, in rust code, with ABI attributes on them noting which ABI to
* link via.
*
* An "upcall" is a foreign call generated by the compiler (not corresponding
* to any user-written call in the code) into the runtime library, to perform
* some helper task such as bringing a task to life, allocating memory, etc.
*
*/
/// A structure representing an active landing pad for the duration of a basic
/// block.
///
/// Each `Block` may contain an instance of this, indicating whether the block
/// is part of a landing pad or not. This is used to make decision about whether
/// to emit `invoke` instructions (e.g., in a landing pad we don't continue to
/// use `invoke`) and also about various function call metadata.
///
/// For GNU exceptions (`landingpad` + `resume` instructions) this structure is
/// just a bunch of `None` instances (not too interesting), but for MSVC
/// exceptions (`cleanuppad` + `cleanupret` instructions) this contains data.
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll>,
}
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
Funclet { cleanuppad, operand: OperandBundleDef::new("funclet", &[cleanuppad]) }
}
pub fn cleanuppad(&self) -> &'ll Value {
self.cleanuppad
}
pub fn bundle(&self) -> &OperandBundleDef<'ll> {
&self.operand
}
}
impl BackendTypes for CodegenCx<'ll, 'tcx> {
type Value = &'ll Value;
type Function = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Funclet = Funclet<'ll>;
type DIScope = &'ll llvm::debuginfo::DIScope;
type DIVariable = &'ll llvm::debuginfo::DIVariable;
}
impl CodegenCx<'ll, 'tcx> {
pub fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
unsafe { llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint) }
}
pub fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
bytes_in_context(self.llcx, bytes)
}
fn const_cstr(&self, s: Symbol, null_terminated: bool) -> &'ll Value {
unsafe {
if let Some(&llval) = self.const_cstr_cache.borrow().get(&s) {
return llval;
}
let s_str = s.as_str();
let sc = llvm::LLVMConstStringInContext(
self.llcx,
s_str.as_ptr() as *const c_char,
s_str.len() as c_uint,
!null_terminated as Bool,
);
let sym = self.generate_local_symbol_name("str");
let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
self.const_cstr_cache.borrow_mut().insert(s, g);
g
}
}
pub fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, idx={}, r={:?})", v, idx, r);
r
}
}
}
impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn const_null(&self, t: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstNull(t) }
}
fn const_undef(&self, t: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMGetUndef(t) }
}
fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i as u64, True) }
}
fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe { llvm::LLVMConstInt(t, i, False) }
}
fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn const_bool(&self, val: bool) -> &'ll Value {
self.const_uint(self.type_i1(), val as u64)
}
fn const_i32(&self, i: i32) -> &'ll Value {
self.const_int(self.type_i32(), i as i64)
}
fn const_u32(&self, i: u32) -> &'ll Value {
self.const_uint(self.type_i32(), i as u64)
}
fn const_u64(&self, i: u64) -> &'ll Value {
self.const_uint(self.type_i64(), i)
}
fn const_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1 << bit_size));
}
self.const_uint(self.isize_ty, i)
}
fn const_u8(&self, i: u8) -> &'ll Value {
self.const_uint(self.type_i8(), i as u64)
}
fn const_real(&self, t: &'ll Type, val: f64) -> &'ll Value {
unsafe { llvm::LLVMConstReal(t, val) }
}
fn const_str(&self, s: Symbol) -> (&'ll Value, &'ll Value) {
let len = s.as_str().len();
let cs = consts::ptrcast(
self.const_cstr(s, false),
self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
);
(cs, self.const_usize(len as u64))
}
fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
struct_in_context(self.llcx, elts, packed)
}
fn const_to_opt_uint(&self, v: &'ll Value) -> Option<u64> {
try_as_const_integral(v).map(|v| unsafe { llvm::LLVMConstIntGetZExtValue(v) })
}
fn const_to_opt_u128(&self, v: &'ll Value, sign_ext: bool) -> Option<u128> {
try_as_const_integral(v).and_then(|v| unsafe {
let (mut lo, mut hi) = (0u64, 0u64);
let success = llvm::LLVMRustConstInt128Get(v, sign_ext, &mut hi, &mut lo);
success.then_some(hi_lo_to_u128(lo, hi))
})
}
fn scalar_to_backend(&self, cv: Scalar, layout: &abi::Scalar, llty: &'ll Type) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Raw { size: 0, .. } => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
}
Scalar::Raw { data, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.value == Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.const_bitcast(llval, llty)
}
}
Scalar::Ptr(ptr) => {
let (base_addr, base_addr_space) = match self.tcx.global_alloc(ptr.alloc_id) {
GlobalAlloc::Memory(alloc) => {
let init = const_alloc_to_llvm(self, alloc);
let value = match alloc.mutability {
Mutability::Mut => self.static_addr_of_mut(init, alloc.align, None),
_ => self.static_addr_of(init, alloc.align, None),
};
if !self.sess().fewer_names() {
llvm::set_value_name(value, format!("{:?}", ptr.alloc_id).as_bytes());
}
(value, AddressSpace::DATA)
}
GlobalAlloc::Function(fn_instance) => (
self.get_fn_addr(fn_instance.polymorphize(self.tcx)),
self.data_layout().instruction_address_space,
),
GlobalAlloc::Static(def_id) => {
assert!(self.tcx.is_static(def_id));
assert!(!self.tcx.is_thread_local_static(def_id));
(self.get_static(def_id), AddressSpace::DATA)
}
};
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)),
&self.const_usize(ptr.offset.bytes()),
1,
)
};
if layout.value != Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.const_bitcast(llval, llty)
}
}
}
}
fn from_const_alloc(
&self,
layout: TyAndLayout<'tcx>,
alloc: &Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
assert_eq!(alloc.align, layout.align.abi);
let llty = self.type_ptr_to(layout.llvm_type(self));
let llval = if layout.size == Size::ZERO {
let llval = self.const_usize(alloc.align.bytes());
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, alloc.align, None);
let llval = unsafe {
llvm::LLVMConstInBoundsGEP(
self.const_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
)
};
self.const_bitcast(llval, llty)
};
PlaceRef::new_sized(llval, layout)
}
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
}
pub fn val_ty(v: &Value) -> &Type {
unsafe { llvm::LLVMTypeOf(v) }
}
pub fn bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True)
}
}
pub fn struct_in_context(llcx: &'a llvm::Context, elts: &[&'a Value], packed: bool) -> &'a Value {
unsafe {
llvm::LLVMConstStructInContext(llcx, elts.as_ptr(), elts.len() as c_uint, packed as Bool)
}
}
#[inline]
fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
fn try_as_const_integral(v: &Value) -> Option<&ConstantInt> {
unsafe { llvm::LLVMIsAConstantInt(v) }
}

View file

@ -0,0 +1,512 @@
use crate::base;
use crate::common::CodegenCx;
use crate::debuginfo;
use crate::llvm::{self, True};
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use libc::c_uint;
use rustc_codegen_ssa::traits::*;
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_hir::Node;
use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc_middle::mir::interpret::{
read_target_uint, Allocation, ConstValue, ErrorHandled, GlobalAlloc, Pointer,
};
use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::{bug, span_bug};
use rustc_span::symbol::sym;
use rustc_span::Span;
use rustc_target::abi::{AddressSpace, Align, HasDataLayout, LayoutOf, Primitive, Scalar, Size};
use tracing::debug;
use std::ffi::CStr;
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations().len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations().iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
// This `inspect` is okay since we have checked that it is not within a relocation, it
// is within the bounds of the allocation, and it doesn't affect interpreter execution
// (we inspect the result after interpreter execution). Any undef byte is replaced with
// some arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(next_offset..offset);
llvals.push(cx.const_bytes(bytes));
}
let ptr_offset = read_target_uint(
dl.endian,
// This `inspect` is okay since it is within the bounds of the allocation, it doesn't
// affect interpreter execution (we inspect the result after interpreter execution),
// and we properly interpret the relocation as a relocation pointer offset.
alloc.inspect_with_uninit_and_ptr_outside_interpreter(offset..(offset + pointer_size)),
)
.expect("const_alloc_to_llvm: could not read relocation pointer")
as u64;
let address_space = match cx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) => AddressSpace::DATA,
};
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&Scalar { value: Primitive::Pointer, valid_range: 0..=!0 },
cx.type_i8p_ext(address_space),
));
next_offset = offset + pointer_size;
}
if alloc.len() >= next_offset {
let range = next_offset..alloc.len();
// This `inspect` is okay since we have check that it is after all relocations, it is
// within the bounds of the allocation, and it doesn't affect interpreter execution (we
// inspect the result after interpreter execution). Any undef byte is replaced with some
// arbitrary byte value.
//
// FIXME: relay undef bytes to codegen as undef const bytes
let bytes = alloc.inspect_with_uninit_and_ptr_outside_interpreter(range);
llvals.push(cx.const_bytes(bytes));
}
cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
let alloc = match cx.tcx.const_eval_poly(def_id)? {
ConstValue::ByRef { alloc, offset } if offset.bytes() == 0 => alloc,
val => bug!("static const eval returned {:#?}", val),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
fn set_global_alignment(cx: &CodegenCx<'ll, '_>, gv: &'ll Value, mut align: Align) {
// The target may require greater alignment for globals than the type does.
// Note: GCC and Clang also allow `__attribute__((aligned))` on variables,
// which can force it to be smaller. Rust doesn't support this yet.
if let Some(min) = cx.sess().target.target.options.min_global_align {
match Align::from_bits(min) {
Ok(min) => align = align.max(min),
Err(err) => {
cx.sess().err(&format!("invalid minimum global alignment: {}", err));
}
}
}
unsafe {
llvm::LLVMSetAlignment(gv, align.bytes() as u32);
}
}
fn check_and_apply_linkage(
cx: &CodegenCx<'ll, 'tcx>,
attrs: &CodegenFnAttrs,
ty: Ty<'tcx>,
sym: &str,
span: Span,
) -> &'ll Value {
let llty = cx.layout_of(ty).llvm_type(cx);
if let Some(linkage) = attrs.linkage {
debug!("get_static: sym={} linkage={:?}", sym, linkage);
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
let llty2 = if let ty::RawPtr(ref mt) = ty.kind {
cx.layout_of(mt.ty).llvm_type(cx)
} else {
cx.sess().span_fatal(
span,
"must have type `*const T` or `*mut T` due to `#[linkage]` attribute",
)
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = cx.declare_global(&sym, llty2);
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(|| {
cx.sess().span_fatal(span, &format!("symbol `{}` is already defined", &sym))
});
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
g2
}
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
cx.declare_global(&sym, llty)
}
}
pub fn ptrcast(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstPointerCast(val, ty) }
}
impl CodegenCx<'ll, 'tcx> {
crate fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstBitCast(val, ty) }
}
crate fn static_addr_of_mut(
&self,
cv: &'ll Value,
align: Align,
kind: Option<&str>,
) -> &'ll Value {
unsafe {
let gv = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = self.define_global(&name[..], self.val_ty(cv)).unwrap_or_else(|| {
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
}
_ => self.define_private_global(self.val_ty(cv)),
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(&self, gv, align);
llvm::SetUnnamedAddress(gv, llvm::UnnamedAddr::Global);
gv
}
}
crate fn get_static(&self, def_id: DefId) -> &'ll Value {
let instance = Instance::mono(self.tcx, def_id);
if let Some(&g) = self.instances.borrow().get(&instance) {
return g;
}
let defined_in_current_codegen_unit =
self.codegen_unit.items().contains_key(&MonoItem::Static(def_id));
assert!(
!defined_in_current_codegen_unit,
"consts::get_static() should always hit the cache for \
statics defined in the same CGU, but did not for `{:?}`",
def_id
);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let sym = self.tcx.symbol_name(instance).name;
debug!("get_static: sym={} instance={:?}", sym, instance);
let g = if let Some(def_id) = def_id.as_local() {
let id = self.tcx.hir().local_def_id_to_hir_id(def_id);
let llty = self.layout_of(ty).llvm_type(self);
// FIXME: refactor this to work without accessing the HIR
let (g, attrs) = match self.tcx.hir().get(id) {
Node::Item(&hir::Item { attrs, span, kind: hir::ItemKind::Static(..), .. }) => {
if let Some(g) = self.get_declared_value(sym) {
if self.val_ty(g) != self.type_ptr_to(llty) {
span_bug!(span, "Conflicting types for static");
}
}
let g = self.declare_global(sym, llty);
if !self.tcx.is_reachable_non_generic(def_id) {
unsafe {
llvm::LLVMRustSetVisibility(g, llvm::Visibility::Hidden);
}
}
(g, attrs)
}
Node::ForeignItem(&hir::ForeignItem {
ref attrs,
span,
kind: hir::ForeignItemKind::Static(..),
..
}) => {
let fn_attrs = self.tcx.codegen_fn_attrs(def_id);
(check_and_apply_linkage(&self, &fn_attrs, ty, sym, span), &**attrs)
}
item => bug!("get_static: expected static, found {:?}", item),
};
debug!("get_static: sym={} attrs={:?}", sym, attrs);
for attr in attrs {
if self.tcx.sess.check_name(attr, sym::thread_local) {
llvm::set_thread_local_mode(g, self.tls_model);
}
}
g
} else {
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
debug!("get_static: sym={} item_attr={:?}", sym, self.tcx.item_attrs(def_id));
let attrs = self.tcx.codegen_fn_attrs(def_id);
let span = self.tcx.def_span(def_id);
let g = check_and_apply_linkage(&self, &attrs, ty, sym, span);
// Thread-local statics in some other crate need to *always* be linked
// against in a thread-local fashion, so we need to be sure to apply the
// thread-local attribute locally if it was present remotely. If we
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
llvm::set_thread_local_mode(g, self.tls_model);
}
let needs_dll_storage_attr = self.use_dll_storage_attrs && !self.tcx.is_foreign_item(def_id) &&
// ThinLTO can't handle this workaround in all cases, so we don't
// emit the attrs. Instead we make them unnecessary by disallowing
// dynamic linking when linker plugin based LTO is enabled.
!self.tcx.sess.opts.cg.linker_plugin_lto.enabled();
// If this assertion triggers, there's something wrong with commandline
// argument validation.
debug_assert!(
!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
&& self.tcx.sess.target.target.options.is_like_windows
&& self.tcx.sess.opts.cg.prefer_dynamic)
);
if needs_dll_storage_attr {
// This item is external but not foreign, i.e., it originates from an external Rust
// crate. Since we don't know whether this crate will be linked dynamically or
// statically in the final application, we always mark such symbols as 'dllimport'.
// If final linkage happens to be static, we rely on compiler-emitted __imp_ stubs
// to make things work.
//
// However, in some scenarios we defer emission of statics to downstream
// crates, so there are cases where a static with an upstream DefId
// is actually present in the current crate. We can find out via the
// is_codegened_item query.
if !self.tcx.is_codegened_item(def_id) {
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
}
}
}
g
};
if self.use_dll_storage_attrs && self.tcx.is_dllimport_foreign_item(def_id) {
// For foreign (native) libs we know the exact storage type to use.
unsafe {
llvm::LLVMSetDLLStorageClass(g, llvm::DLLStorageClass::DllImport);
}
}
self.instances.borrow_mut().insert(instance, g);
g
}
}
impl StaticMethods for CodegenCx<'ll, 'tcx> {
fn static_addr_of(&self, cv: &'ll Value, align: Align, kind: Option<&str>) -> &'ll Value {
if let Some(&gv) = self.const_globals.borrow().get(&cv) {
unsafe {
// Upgrade the alignment in cases where the same constant is used with different
// alignment requirements
let llalign = align.bytes() as u32;
if llalign > llvm::LLVMGetAlignment(gv) {
llvm::LLVMSetAlignment(gv, llalign);
}
}
return gv;
}
let gv = self.static_addr_of_mut(cv, align, kind);
unsafe {
llvm::LLVMSetGlobalConstant(gv, True);
}
self.const_globals.borrow_mut().insert(cv, gv);
gv
}
fn codegen_static(&self, def_id: DefId, is_mutable: bool) {
unsafe {
let attrs = self.tcx.codegen_fn_attrs(def_id);
let (v, alloc) = match codegen_static_initializer(&self, def_id) {
Ok(v) => v,
// Error has already been reported
Err(_) => return,
};
let g = self.get_static(def_id);
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let mut val_llty = self.val_ty(v);
let v = if val_llty == self.type_i1() {
val_llty = self.type_i8();
llvm::LLVMConstZExt(v, val_llty)
} else {
v
};
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let llty = self.layout_of(ty).llvm_type(self);
let g = if val_llty == llty {
g
} else {
// If we created the global with the wrong type,
// correct the type.
let name = llvm::get_value_name(g).to_vec();
llvm::set_value_name(g, b"");
let linkage = llvm::LLVMRustGetLinkage(g);
let visibility = llvm::LLVMRustGetVisibility(g);
let new_g = llvm::LLVMRustGetOrInsertGlobal(
self.llmod,
name.as_ptr().cast(),
name.len(),
val_llty,
);
llvm::LLVMRustSetLinkage(new_g, linkage);
llvm::LLVMRustSetVisibility(new_g, visibility);
// To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it
// with the new global later. (See base::codegen_backend.)
self.statics_to_rauw.borrow_mut().push((g, new_g));
new_g
};
set_global_alignment(&self, g, self.align_of(ty));
llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory.
if !is_mutable {
if self.type_is_freeze(ty) {
llvm::LLVMSetGlobalConstant(g, llvm::True);
}
}
debuginfo::create_global_var_metadata(&self, def_id, g);
if attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL) {
llvm::set_thread_local_mode(g, self.tls_model);
// Do not allow LLVM to change the alignment of a TLS on macOS.
//
// By default a global's alignment can be freely increased.
// This allows LLVM to generate more performant instructions
// e.g., using load-aligned into a SIMD register.
//
// However, on macOS 10.10 or below, the dynamic linker does not
// respect any alignment given on the TLS (radar 24221680).
// This will violate the alignment assumption, and causing segfault at runtime.
//
// This bug is very easy to trigger. In `println!` and `panic!`,
// the `LOCAL_STDOUT`/`LOCAL_STDERR` handles are stored in a TLS,
// which the values would be `mem::replace`d on initialization.
// The implementation of `mem::replace` will use SIMD
// whenever the size is 32 bytes or higher. LLVM notices SIMD is used
// and tries to align `LOCAL_STDOUT`/`LOCAL_STDERR` to a 32-byte boundary,
// which macOS's dyld disregarded and causing crashes
// (see issues #51794, #51758, #50867, #48866 and #44056).
//
// To workaround the bug, we trick LLVM into not increasing
// the global's alignment by explicitly assigning a section to it
// (equivalent to automatically generating a `#[link_section]` attribute).
// See the comment in the `GlobalValue::canIncreaseAlignment()` function
// of `lib/IR/Globals.cpp` for why this works.
//
// When the alignment is not increased, the optimized `mem::replace`
// will use load-unaligned instructions instead, and thus avoiding the crash.
//
// We could remove this hack whenever we decide to drop macOS 10.10 support.
if self.tcx.sess.target.target.options.is_like_osx {
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state
// (not as part of the interpreter execution).
//
// FIXME: This check requires that the (arbitrary) value of undefined bytes
// happens to be zero. Instead, we should only check the value of defined bytes
// and set all undefined bytes to zero if this allocation is headed for the
// BSS.
let all_bytes_are_zero = alloc.relocations().is_empty()
&& alloc
.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len())
.iter()
.all(|&byte| byte == 0);
let sect_name = if all_bytes_are_zero {
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_bss\0")
} else {
CStr::from_bytes_with_nul_unchecked(b"__DATA,__thread_data\0")
};
llvm::LLVMSetSection(g, sect_name.as_ptr());
}
}
// Wasm statics with custom link sections get special treatment as they
// go into custom sections of the wasm executable.
if self.tcx.sess.opts.target_triple.triple().starts_with("wasm32") {
if let Some(section) = attrs.link_section {
let section = llvm::LLVMMDStringInContext(
self.llcx,
section.as_str().as_ptr().cast(),
section.as_str().len() as c_uint,
);
assert!(alloc.relocations().is_empty());
// The `inspect` method is okay here because we checked relocations, and
// because we are doing this access to inspect the final interpreter state (not
// as part of the interpreter execution).
let bytes =
alloc.inspect_with_uninit_and_ptr_outside_interpreter(0..alloc.len());
let alloc = llvm::LLVMMDStringInContext(
self.llcx,
bytes.as_ptr().cast(),
bytes.len() as c_uint,
);
let data = [section, alloc];
let meta = llvm::LLVMMDNodeInContext(self.llcx, data.as_ptr(), 2);
llvm::LLVMAddNamedMetadataOperand(
self.llmod,
"wasm.custom_sections\0".as_ptr().cast(),
meta,
);
}
} else {
base::set_link_section(g, &attrs);
}
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
self.add_used_global(g);
}
}
}
/// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*.
fn add_used_global(&self, global: &'ll Value) {
let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) };
self.used_statics.borrow_mut().push(cast);
}
}

View file

@ -0,0 +1,902 @@
use crate::attributes;
use crate::callee::get_fn;
use crate::coverageinfo;
use crate::debuginfo;
use crate::llvm;
use crate::llvm_util;
use crate::type_::Type;
use crate::value::Value;
use rustc_codegen_ssa::base::wants_msvc_seh;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::base_n;
use rustc_data_structures::const_cstr;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_middle::bug;
use rustc_middle::mir::mono::CodegenUnit;
use rustc_middle::ty::layout::{HasParamEnv, LayoutError, TyAndLayout};
use rustc_middle::ty::{self, Instance, Ty, TyCtxt};
use rustc_session::config::{CFGuard, CrateType, DebugInfo};
use rustc_session::Session;
use rustc_span::source_map::{Span, DUMMY_SP};
use rustc_span::symbol::Symbol;
use rustc_target::abi::{HasDataLayout, LayoutOf, PointeeInfo, Size, TargetDataLayout, VariantIdx};
use rustc_target::spec::{HasTargetSpec, RelocModel, Target, TlsModel};
use std::cell::{Cell, RefCell};
use std::ffi::CStr;
use std::str;
/// There is one `CodegenCx` per compilation unit. Each one has its own LLVM
/// `llvm::Context` so that several compilation units may be optimized in parallel.
/// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`.
pub struct CodegenCx<'ll, 'tcx> {
pub tcx: TyCtxt<'tcx>,
pub check_overflow: bool,
pub use_dll_storage_attrs: bool,
pub tls_model: llvm::ThreadLocalMode,
pub llmod: &'ll llvm::Module,
pub llcx: &'ll llvm::Context,
pub codegen_unit: &'tcx CodegenUnit<'tcx>,
/// Cache instances of monomorphic and polymorphic items
pub instances: RefCell<FxHashMap<Instance<'tcx>, &'ll Value>>,
/// Cache generated vtables
pub vtables:
RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>,
/// Cache of constant strings,
pub const_cstr_cache: RefCell<FxHashMap<Symbol, &'ll Value>>,
/// Reverse-direction for const ptrs cast from globals.
///
/// Key is a Value holding a `*T`,
/// Val is a Value holding a `*[T]`.
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during codegen
/// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
/// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
/// List of globals for static variables which need to be passed to the
/// LLVM function ReplaceAllUsesWith (RAUW) when codegen is complete.
/// (We have to make sure we don't invalidate any Values referring
/// to constants.)
pub statics_to_rauw: RefCell<Vec<(&'ll Value, &'ll Value)>>,
/// Statics that will be placed in the llvm.used variable
/// See <http://llvm.org/docs/LangRef.html#the-llvm-used-global-variable> for details
pub used_statics: RefCell<Vec<&'ll Value>>,
pub lltypes: RefCell<FxHashMap<(Ty<'tcx>, Option<VariantIdx>), &'ll Type>>,
pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>,
pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
pub isize_ty: &'ll Type,
pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'tcx>>,
pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
eh_personality: Cell<Option<&'ll Value>>,
eh_catch_typeinfo: Cell<Option<&'ll Value>>,
pub rust_try_fn: Cell<Option<&'ll Value>>,
intrinsics: RefCell<FxHashMap<&'static str, &'ll Value>>,
/// A counter that is used for generating local symbol names
local_gen_sym_counter: Cell<usize>,
}
fn to_llvm_tls_model(tls_model: TlsModel) -> llvm::ThreadLocalMode {
match tls_model {
TlsModel::GeneralDynamic => llvm::ThreadLocalMode::GeneralDynamic,
TlsModel::LocalDynamic => llvm::ThreadLocalMode::LocalDynamic,
TlsModel::InitialExec => llvm::ThreadLocalMode::InitialExec,
TlsModel::LocalExec => llvm::ThreadLocalMode::LocalExec,
}
}
fn strip_function_ptr_alignment(data_layout: String) -> String {
// FIXME: Make this more general.
data_layout.replace("-Fi8-", "-")
}
fn strip_x86_address_spaces(data_layout: String) -> String {
data_layout.replace("-p270:32:32-p271:32:32-p272:64:64-", "-")
}
pub unsafe fn create_module(
tcx: TyCtxt<'_>,
llcx: &'ll llvm::Context,
mod_name: &str,
) -> &'ll llvm::Module {
let sess = tcx.sess;
let mod_name = SmallCStr::new(mod_name);
let llmod = llvm::LLVMModuleCreateWithNameInContext(mod_name.as_ptr(), llcx);
let mut target_data_layout = sess.target.target.data_layout.clone();
if llvm_util::get_major_version() < 9 {
target_data_layout = strip_function_ptr_alignment(target_data_layout);
}
if llvm_util::get_major_version() < 10 {
if sess.target.target.arch == "x86" || sess.target.target.arch == "x86_64" {
target_data_layout = strip_x86_address_spaces(target_data_layout);
}
}
// Ensure the data-layout values hardcoded remain the defaults.
if sess.target.target.options.is_builtin {
let tm = crate::back::write::create_informational_target_machine(tcx.sess);
llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm);
llvm::LLVMRustDisposeTargetMachine(tm);
let llvm_data_layout = llvm::LLVMGetDataLayoutStr(llmod);
let llvm_data_layout = str::from_utf8(CStr::from_ptr(llvm_data_layout).to_bytes())
.expect("got a non-UTF8 data-layout from LLVM");
// Unfortunately LLVM target specs change over time, and right now we
// don't have proper support to work with any more than one
// `data_layout` than the one that is in the rust-lang/rust repo. If
// this compiler is configured against a custom LLVM, we may have a
// differing data layout, even though we should update our own to use
// that one.
//
// As an interim hack, if CFG_LLVM_ROOT is not an empty string then we
// disable this check entirely as we may be configured with something
// that has a different target layout.
//
// Unsure if this will actually cause breakage when rustc is configured
// as such.
//
// FIXME(#34960)
let cfg_llvm_root = option_env!("CFG_LLVM_ROOT").unwrap_or("");
let custom_llvm_used = cfg_llvm_root.trim() != "";
if !custom_llvm_used && target_data_layout != llvm_data_layout {
bug!(
"data-layout for builtin `{}` target, `{}`, \
differs from LLVM default, `{}`",
sess.target.target.llvm_target,
target_data_layout,
llvm_data_layout
);
}
}
let data_layout = SmallCStr::new(&target_data_layout);
llvm::LLVMSetDataLayout(llmod, data_layout.as_ptr());
let llvm_target = SmallCStr::new(&sess.target.target.llvm_target);
llvm::LLVMRustSetNormalizedTarget(llmod, llvm_target.as_ptr());
if sess.relocation_model() == RelocModel::Pic {
llvm::LLVMRustSetModulePICLevel(llmod);
// PIE is potentially more effective than PIC, but can only be used in executables.
// If all our outputs are executables, then we can relax PIC to PIE.
if sess.crate_types().iter().all(|ty| *ty == CrateType::Executable) {
llvm::LLVMRustSetModulePIELevel(llmod);
}
}
// If skipping the PLT is enabled, we need to add some module metadata
// to ensure intrinsic calls don't use it.
if !sess.needs_plt() {
let avoid_plt = "RtLibUseGOT\0".as_ptr().cast();
llvm::LLVMRustAddModuleFlag(llmod, avoid_plt, 1);
}
// Control Flow Guard is currently only supported by the MSVC linker on Windows.
if sess.target.target.options.is_like_msvc {
match sess.opts.cg.control_flow_guard {
CFGuard::Disabled => {}
CFGuard::NoChecks => {
// Set `cfguard=1` module flag to emit metadata only.
llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 1)
}
CFGuard::Checks => {
// Set `cfguard=2` module flag to emit metadata and checks.
llvm::LLVMRustAddModuleFlag(llmod, "cfguard\0".as_ptr() as *const _, 2)
}
}
}
llmod
}
impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
crate fn new(
tcx: TyCtxt<'tcx>,
codegen_unit: &'tcx CodegenUnit<'tcx>,
llvm_module: &'ll crate::ModuleLlvm,
) -> Self {
// An interesting part of Windows which MSVC forces our hand on (and
// apparently MinGW didn't) is the usage of `dllimport` and `dllexport`
// attributes in LLVM IR as well as native dependencies (in C these
// correspond to `__declspec(dllimport)`).
//
// LD (BFD) in MinGW mode can often correctly guess `dllexport` but
// relying on that can result in issues like #50176.
// LLD won't support that and expects symbols with proper attributes.
// Because of that we make MinGW target emit dllexport just like MSVC.
// When it comes to dllimport we use it for constants but for functions
// rely on the linker to do the right thing. Opposed to dllexport this
// task is easy for them (both LD and LLD) and allows us to easily use
// symbols from static libraries in shared libraries.
//
// Whenever a dynamic library is built on Windows it must have its public
// interface specified by functions tagged with `dllexport` or otherwise
// they're not available to be linked against. This poses a few problems
// for the compiler, some of which are somewhat fundamental, but we use
// the `use_dll_storage_attrs` variable below to attach the `dllexport`
// attribute to all LLVM functions that are exported e.g., they're
// already tagged with external linkage). This is suboptimal for a few
// reasons:
//
// * If an object file will never be included in a dynamic library,
// there's no need to attach the dllexport attribute. Most object
// files in Rust are not destined to become part of a dll as binaries
// are statically linked by default.
// * If the compiler is emitting both an rlib and a dylib, the same
// source object file is currently used but with MSVC this may be less
// feasible. The compiler may be able to get around this, but it may
// involve some invasive changes to deal with this.
//
// The flipside of this situation is that whenever you link to a dll and
// you import a function from it, the import should be tagged with
// `dllimport`. At this time, however, the compiler does not emit
// `dllimport` for any declarations other than constants (where it is
// required), which is again suboptimal for even more reasons!
//
// * Calling a function imported from another dll without using
// `dllimport` causes the linker/compiler to have extra overhead (one
// `jmp` instruction on x86) when calling the function.
// * The same object file may be used in different circumstances, so a
// function may be imported from a dll if the object is linked into a
// dll, but it may be just linked against if linked into an rlib.
// * The compiler has no knowledge about whether native functions should
// be tagged dllimport or not.
//
// For now the compiler takes the perf hit (I do not have any numbers to
// this effect) by marking very little as `dllimport` and praying the
// linker will take care of everything. Fixing this problem will likely
// require adding a few attributes to Rust itself (feature gated at the
// start) and then strongly recommending static linkage on Windows!
let use_dll_storage_attrs = tcx.sess.target.target.options.is_like_windows;
let check_overflow = tcx.sess.overflow_checks();
let tls_model = to_llvm_tls_model(tcx.sess.tls_model());
let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
let coverage_cx = if tcx.sess.opts.debugging_opts.instrument_coverage {
let covctx = coverageinfo::CrateCoverageContext::new();
Some(covctx)
} else {
None
};
let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
let dctx = debuginfo::CrateDebugContext::new(llmod);
debuginfo::metadata::compile_unit_metadata(tcx, &codegen_unit.name().as_str(), &dctx);
Some(dctx)
} else {
None
};
let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits());
CodegenCx {
tcx,
check_overflow,
use_dll_storage_attrs,
tls_model,
llmod,
llcx,
codegen_unit,
instances: Default::default(),
vtables: Default::default(),
const_cstr_cache: Default::default(),
const_unsized: Default::default(),
const_globals: Default::default(),
statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()),
lltypes: Default::default(),
scalar_lltypes: Default::default(),
pointee_infos: Default::default(),
isize_ty,
coverage_cx,
dbg_cx,
eh_personality: Cell::new(None),
eh_catch_typeinfo: Cell::new(None),
rust_try_fn: Cell::new(None),
intrinsics: Default::default(),
local_gen_sym_counter: Cell::new(0),
}
}
crate fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
&self.statics_to_rauw
}
#[inline]
pub fn coverage_context(&'a self) -> &'a coverageinfo::CrateCoverageContext<'tcx> {
self.coverage_cx.as_ref().unwrap()
}
}
impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn vtables(
&self,
) -> &RefCell<FxHashMap<(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>), &'ll Value>>
{
&self.vtables
}
fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
get_fn(self, instance)
}
fn get_fn_addr(&self, instance: Instance<'tcx>) -> &'ll Value {
get_fn(self, instance)
}
fn eh_personality(&self) -> &'ll Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality;
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !wants_msvc_seh(self.sess()) => self.get_fn_addr(
ty::Instance::resolve(
tcx,
ty::ParamEnv::reveal_all(),
def_id,
tcx.intern_substs(&[]),
)
.unwrap()
.unwrap(),
),
_ => {
let name = if wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
self.declare_cfn(name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
fn sess(&self) -> &Session {
&self.tcx.sess
}
fn check_overflow(&self) -> bool {
self.check_overflow
}
fn codegen_unit(&self) -> &'tcx CodegenUnit<'tcx> {
self.codegen_unit
}
fn used_statics(&self) -> &RefCell<Vec<&'ll Value>> {
&self.used_statics
}
fn set_frame_pointer_elimination(&self, llfn: &'ll Value) {
attributes::set_frame_pointer_elimination(self, llfn)
}
fn apply_target_cpu_attr(&self, llfn: &'ll Value) {
attributes::apply_target_cpu_attr(self, llfn)
}
fn create_used_variable(&self) {
let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
let array =
self.const_array(&self.type_ptr_to(self.type_i8()), &*self.used_statics.borrow());
unsafe {
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
llvm::LLVMSetInitializer(g, array);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);
llvm::LLVMSetSection(g, section.as_ptr());
}
}
}
impl CodegenCx<'b, 'tcx> {
crate fn get_intrinsic(&self, key: &str) -> &'b Value {
if let Some(v) = self.intrinsics.borrow().get(key).cloned() {
return v;
}
self.declare_intrinsic(key).unwrap_or_else(|| bug!("unknown intrinsic '{}'", key))
}
fn insert_intrinsic(
&self,
name: &'static str,
args: Option<&[&'b llvm::Type]>,
ret: &'b llvm::Type,
) -> &'b llvm::Value {
let fn_ty = if let Some(args) = args {
self.type_func(args, ret)
} else {
self.type_variadic_func(&[], ret)
};
let f = self.declare_cfn(name, fn_ty);
llvm::SetUnnamedAddress(f, llvm::UnnamedAddr::No);
self.intrinsics.borrow_mut().insert(name, f);
f
}
fn declare_intrinsic(&self, key: &str) -> Option<&'b Value> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, Some(&[]), $ret));
}
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, None, $ret));
}
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
return Some(self.insert_intrinsic($name, Some(&[$($arg),*]), $ret));
}
);
}
macro_rules! mk_struct {
($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
}
let i8p = self.type_i8p();
let void = self.type_void();
let i1 = self.type_i1();
let t_i8 = self.type_i8();
let t_i16 = self.type_i16();
let t_i32 = self.type_i32();
let t_i64 = self.type_i64();
let t_i128 = self.type_i128();
let t_f32 = self.type_f32();
let t_f64 = self.type_f64();
macro_rules! vector_types {
($id_out:ident: $elem_ty:ident, $len:expr) => {
let $id_out = self.type_vector($elem_ty, $len);
};
($($id_out:ident: $elem_ty:ident, $len:expr;)*) => {
$(vector_types!($id_out: $elem_ty, $len);)*
}
}
vector_types! {
t_v2f32: t_f32, 2;
t_v4f32: t_f32, 4;
t_v8f32: t_f32, 8;
t_v16f32: t_f32, 16;
t_v2f64: t_f64, 2;
t_v4f64: t_f64, 4;
t_v8f64: t_f64, 8;
}
ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.unsigned.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.saturate.unsigned.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.saturate.signed.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.signed.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.saturate.signed.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.saturate.signed.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.unsigned.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i32.f32", fn(t_f32) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i32.f64", fn(t_f64) -> t_i32);
ifn!("llvm.wasm.trunc.signed.i64.f32", fn(t_f32) -> t_i64);
ifn!("llvm.wasm.trunc.signed.i64.f64", fn(t_f64) -> t_i64);
ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void);
ifn!("llvm.frameaddress", fn(t_i32) -> i8p);
ifn!("llvm.sideeffect", fn() -> void);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.v2f32", fn(t_v2f32, t_i32) -> t_v2f32);
ifn!("llvm.powi.v4f32", fn(t_v4f32, t_i32) -> t_v4f32);
ifn!("llvm.powi.v8f32", fn(t_v8f32, t_i32) -> t_v8f32);
ifn!("llvm.powi.v16f32", fn(t_v16f32, t_i32) -> t_v16f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
ifn!("llvm.powi.v2f64", fn(t_v2f64, t_i32) -> t_v2f64);
ifn!("llvm.powi.v4f64", fn(t_v4f64, t_i32) -> t_v4f64);
ifn!("llvm.powi.v8f64", fn(t_v8f64, t_i32) -> t_v8f64);
ifn!("llvm.pow.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.pow.v2f32", fn(t_v2f32, t_v2f32) -> t_v2f32);
ifn!("llvm.pow.v4f32", fn(t_v4f32, t_v4f32) -> t_v4f32);
ifn!("llvm.pow.v8f32", fn(t_v8f32, t_v8f32) -> t_v8f32);
ifn!("llvm.pow.v16f32", fn(t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.pow.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.pow.v2f64", fn(t_v2f64, t_v2f64) -> t_v2f64);
ifn!("llvm.pow.v4f64", fn(t_v4f64, t_v4f64) -> t_v4f64);
ifn!("llvm.pow.v8f64", fn(t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.sqrt.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sqrt.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.sqrt.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.sqrt.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.sqrt.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sqrt.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sqrt.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.sqrt.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.sqrt.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.sin.f32", fn(t_f32) -> t_f32);
ifn!("llvm.sin.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.sin.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.sin.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.sin.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.sin.f64", fn(t_f64) -> t_f64);
ifn!("llvm.sin.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.sin.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.sin.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.cos.f32", fn(t_f32) -> t_f32);
ifn!("llvm.cos.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.cos.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.cos.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.cos.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.cos.f64", fn(t_f64) -> t_f64);
ifn!("llvm.cos.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.cos.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.cos.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.exp.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.exp.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.exp.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.exp.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.exp.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.exp2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.exp2.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.exp2.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.exp2.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.exp2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.exp2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.exp2.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.exp2.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.exp2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log10.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log10.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log10.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log10.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log10.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log10.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log10.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log10.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log10.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.log2.f32", fn(t_f32) -> t_f32);
ifn!("llvm.log2.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.log2.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.log2.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.log2.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.log2.f64", fn(t_f64) -> t_f64);
ifn!("llvm.log2.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.log2.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.log2.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.fma.f32", fn(t_f32, t_f32, t_f32) -> t_f32);
ifn!("llvm.fma.v2f32", fn(t_v2f32, t_v2f32, t_v2f32) -> t_v2f32);
ifn!("llvm.fma.v4f32", fn(t_v4f32, t_v4f32, t_v4f32) -> t_v4f32);
ifn!("llvm.fma.v8f32", fn(t_v8f32, t_v8f32, t_v8f32) -> t_v8f32);
ifn!("llvm.fma.v16f32", fn(t_v16f32, t_v16f32, t_v16f32) -> t_v16f32);
ifn!("llvm.fma.f64", fn(t_f64, t_f64, t_f64) -> t_f64);
ifn!("llvm.fma.v2f64", fn(t_v2f64, t_v2f64, t_v2f64) -> t_v2f64);
ifn!("llvm.fma.v4f64", fn(t_v4f64, t_v4f64, t_v4f64) -> t_v4f64);
ifn!("llvm.fma.v8f64", fn(t_v8f64, t_v8f64, t_v8f64) -> t_v8f64);
ifn!("llvm.fabs.f32", fn(t_f32) -> t_f32);
ifn!("llvm.fabs.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.fabs.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.fabs.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.fabs.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.fabs.f64", fn(t_f64) -> t_f64);
ifn!("llvm.fabs.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.fabs.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.fabs.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.minnum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.minnum.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.maxnum.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.maxnum.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.floor.f32", fn(t_f32) -> t_f32);
ifn!("llvm.floor.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.floor.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.floor.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.floor.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.floor.f64", fn(t_f64) -> t_f64);
ifn!("llvm.floor.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.floor.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.floor.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.ceil.f32", fn(t_f32) -> t_f32);
ifn!("llvm.ceil.v2f32", fn(t_v2f32) -> t_v2f32);
ifn!("llvm.ceil.v4f32", fn(t_v4f32) -> t_v4f32);
ifn!("llvm.ceil.v8f32", fn(t_v8f32) -> t_v8f32);
ifn!("llvm.ceil.v16f32", fn(t_v16f32) -> t_v16f32);
ifn!("llvm.ceil.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ceil.v2f64", fn(t_v2f64) -> t_v2f64);
ifn!("llvm.ceil.v4f64", fn(t_v4f64) -> t_v4f64);
ifn!("llvm.ceil.v8f64", fn(t_v8f64) -> t_v8f64);
ifn!("llvm.trunc.f32", fn(t_f32) -> t_f32);
ifn!("llvm.trunc.f64", fn(t_f64) -> t_f64);
ifn!("llvm.copysign.f32", fn(t_f32, t_f32) -> t_f32);
ifn!("llvm.copysign.f64", fn(t_f64, t_f64) -> t_f64);
ifn!("llvm.round.f32", fn(t_f32) -> t_f32);
ifn!("llvm.round.f64", fn(t_f64) -> t_f64);
ifn!("llvm.rint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.rint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.nearbyint.f32", fn(t_f32) -> t_f32);
ifn!("llvm.nearbyint.f64", fn(t_f64) -> t_f64);
ifn!("llvm.ctpop.i8", fn(t_i8) -> t_i8);
ifn!("llvm.ctpop.i16", fn(t_i16) -> t_i16);
ifn!("llvm.ctpop.i32", fn(t_i32) -> t_i32);
ifn!("llvm.ctpop.i64", fn(t_i64) -> t_i64);
ifn!("llvm.ctpop.i128", fn(t_i128) -> t_i128);
ifn!("llvm.ctlz.i8", fn(t_i8, i1) -> t_i8);
ifn!("llvm.ctlz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.ctlz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.ctlz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.ctlz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.cttz.i8", fn(t_i8, i1) -> t_i8);
ifn!("llvm.cttz.i16", fn(t_i16, i1) -> t_i16);
ifn!("llvm.cttz.i32", fn(t_i32, i1) -> t_i32);
ifn!("llvm.cttz.i64", fn(t_i64, i1) -> t_i64);
ifn!("llvm.cttz.i128", fn(t_i128, i1) -> t_i128);
ifn!("llvm.bswap.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bswap.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bswap.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bswap.i128", fn(t_i128) -> t_i128);
ifn!("llvm.bitreverse.i8", fn(t_i8) -> t_i8);
ifn!("llvm.bitreverse.i16", fn(t_i16) -> t_i16);
ifn!("llvm.bitreverse.i32", fn(t_i32) -> t_i32);
ifn!("llvm.bitreverse.i64", fn(t_i64) -> t_i64);
ifn!("llvm.bitreverse.i128", fn(t_i128) -> t_i128);
ifn!("llvm.fshl.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshl.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshl.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshl.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshl.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.fshr.i8", fn(t_i8, t_i8, t_i8) -> t_i8);
ifn!("llvm.fshr.i16", fn(t_i16, t_i16, t_i16) -> t_i16);
ifn!("llvm.fshr.i32", fn(t_i32, t_i32, t_i32) -> t_i32);
ifn!("llvm.fshr.i64", fn(t_i64, t_i64, t_i64) -> t_i64);
ifn!("llvm.fshr.i128", fn(t_i128, t_i128, t_i128) -> t_i128);
ifn!("llvm.sadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.sadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.sadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.sadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.sadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.uadd.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.uadd.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.uadd.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.uadd.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.uadd.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.ssub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.ssub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.ssub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.ssub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.ssub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.usub.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.usub.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.usub.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.usub.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.usub.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.smul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.smul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.smul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.smul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.smul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.umul.with.overflow.i8", fn(t_i8, t_i8) -> mk_struct! {t_i8, i1});
ifn!("llvm.umul.with.overflow.i16", fn(t_i16, t_i16) -> mk_struct! {t_i16, i1});
ifn!("llvm.umul.with.overflow.i32", fn(t_i32, t_i32) -> mk_struct! {t_i32, i1});
ifn!("llvm.umul.with.overflow.i64", fn(t_i64, t_i64) -> mk_struct! {t_i64, i1});
ifn!("llvm.umul.with.overflow.i128", fn(t_i128, t_i128) -> mk_struct! {t_i128, i1});
ifn!("llvm.sadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.sadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.sadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.sadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.sadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.uadd.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.uadd.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.uadd.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.uadd.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.uadd.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.ssub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.ssub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.ssub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.ssub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.ssub.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.usub.sat.i8", fn(t_i8, t_i8) -> t_i8);
ifn!("llvm.usub.sat.i16", fn(t_i16, t_i16) -> t_i16);
ifn!("llvm.usub.sat.i32", fn(t_i32, t_i32) -> t_i32);
ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p);
ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p);
ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
// variadic intrinsics
ifn!("llvm.va_start", fn(i8p) -> void);
ifn!("llvm.va_end", fn(i8p) -> void);
ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
if self.sess().opts.debugging_opts.instrument_coverage {
ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
}
if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
}
None
}
crate fn eh_catch_typeinfo(&self) -> &'b Value {
if let Some(eh_catch_typeinfo) = self.eh_catch_typeinfo.get() {
return eh_catch_typeinfo;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.is_like_emscripten);
let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
Some(def_id) => self.get_static(def_id),
_ => {
let ty = self
.type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
self.declare_global("rust_eh_catch_typeinfo", ty)
}
};
let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
eh_catch_typeinfo
}
}
impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
/// Generates a new symbol name with the given prefix. This symbol name must
/// only be used for definitions with `internal` or `private` linkage.
pub fn generate_local_symbol_name(&self, prefix: &str) -> String {
let idx = self.local_gen_sym_counter.get();
self.local_gen_sym_counter.set(idx + 1);
// Include a '.' character, so there can be no accidental conflicts with
// user defined names
let mut name = String::with_capacity(prefix.len() + 6);
name.push_str(prefix);
name.push_str(".");
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
}
impl HasDataLayout for CodegenCx<'ll, 'tcx> {
fn data_layout(&self) -> &TargetDataLayout {
&self.tcx.data_layout
}
}
impl HasTargetSpec for CodegenCx<'ll, 'tcx> {
fn target_spec(&self) -> &Target {
&self.tcx.sess.target.target
}
}
impl ty::layout::HasTyCtxt<'tcx> for CodegenCx<'ll, 'tcx> {
fn tcx(&self) -> TyCtxt<'tcx> {
self.tcx
}
}
impl LayoutOf for CodegenCx<'ll, 'tcx> {
type Ty = Ty<'tcx>;
type TyAndLayout = TyAndLayout<'tcx>;
fn layout_of(&self, ty: Ty<'tcx>) -> Self::TyAndLayout {
self.spanned_layout_of(ty, DUMMY_SP)
}
fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::TyAndLayout {
self.tcx.layout_of(ty::ParamEnv::reveal_all().and(ty)).unwrap_or_else(|e| {
if let LayoutError::SizeOverflow(_) = e {
self.sess().span_fatal(span, &e.to_string())
} else {
bug!("failed to get layout for `{}`: {}", ty, e)
}
})
}
}
impl<'tcx, 'll> HasParamEnv<'tcx> for CodegenCx<'ll, 'tcx> {
fn param_env(&self) -> ty::ParamEnv<'tcx> {
ty::ParamEnv::reveal_all()
}
}

View file

@ -0,0 +1,229 @@
use crate::common::CodegenCx;
use crate::coverageinfo;
use crate::llvm;
use llvm::coverageinfo::CounterMappingRegion;
use rustc_codegen_ssa::coverageinfo::map::{Counter, CounterExpression};
use rustc_codegen_ssa::traits::{BaseTypeMethods, ConstMethods};
use rustc_data_structures::fx::FxIndexSet;
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::CodeRegion;
use std::ffi::CString;
use tracing::debug;
/// Generates and exports the Coverage Map.
///
/// This Coverage Map complies with Coverage Mapping Format version 3 (zero-based encoded as 2),
/// as defined at [LLVM Code Coverage Mapping Format](https://github.com/rust-lang/llvm-project/blob/llvmorg-8.0.0/llvm/docs/CoverageMappingFormat.rst#llvm-code-coverage-mapping-format)
/// and published in Rust's current (July 2020) fork of LLVM. This version is supported by the
/// LLVM coverage tools (`llvm-profdata` and `llvm-cov`) bundled with Rust's fork of LLVM.
///
/// Consequently, Rust's bundled version of Clang also generates Coverage Maps compliant with
/// version 3. Clang's implementation of Coverage Map generation was referenced when implementing
/// this Rust version, and though the format documentation is very explicit and detailed, some
/// undocumented details in Clang's implementation (that may or may not be important) were also
/// replicated for Rust's Coverage Map.
pub fn finalize<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>) {
let function_coverage_map = cx.coverage_context().take_function_coverage_map();
if function_coverage_map.is_empty() {
// This module has no functions with coverage instrumentation
return;
}
let mut mapgen = CoverageMapGenerator::new();
// Encode coverage mappings and generate function records
let mut function_records = Vec::<&'ll llvm::Value>::new();
let coverage_mappings_buffer = llvm::build_byte_buffer(|coverage_mappings_buffer| {
for (instance, function_coverage) in function_coverage_map.into_iter() {
debug!("Generate coverage map for: {:?}", instance);
let mangled_function_name = cx.tcx.symbol_name(instance).to_string();
let function_source_hash = function_coverage.source_hash();
let (expressions, counter_regions) =
function_coverage.get_expressions_and_counter_regions();
let old_len = coverage_mappings_buffer.len();
mapgen.write_coverage_mappings(expressions, counter_regions, coverage_mappings_buffer);
let mapping_data_size = coverage_mappings_buffer.len() - old_len;
debug_assert!(
mapping_data_size > 0,
"Every `FunctionCoverage` should have at least one counter"
);
let function_record = mapgen.make_function_record(
cx,
mangled_function_name,
function_source_hash,
mapping_data_size,
);
function_records.push(function_record);
}
});
// Encode all filenames referenced by counters/expressions in this module
let filenames_buffer = llvm::build_byte_buffer(|filenames_buffer| {
coverageinfo::write_filenames_section_to_buffer(&mapgen.filenames, filenames_buffer);
});
// Generate the LLVM IR representation of the coverage map and store it in a well-known global
mapgen.save_generated_coverage_map(
cx,
function_records,
filenames_buffer,
coverage_mappings_buffer,
);
}
struct CoverageMapGenerator {
filenames: FxIndexSet<CString>,
}
impl CoverageMapGenerator {
fn new() -> Self {
Self { filenames: FxIndexSet::default() }
}
/// Using the `expressions` and `counter_regions` collected for the current function, generate
/// the `mapping_regions` and `virtual_file_mapping`, and capture any new filenames. Then use
/// LLVM APIs to encode the `virtual_file_mapping`, `expressions`, and `mapping_regions` into
/// the given `coverage_mappings` byte buffer, compliant with the LLVM Coverage Mapping format.
fn write_coverage_mappings(
&mut self,
expressions: Vec<CounterExpression>,
counter_regions: impl Iterator<Item = (Counter, &'a CodeRegion)>,
coverage_mappings_buffer: &RustString,
) {
let mut counter_regions = counter_regions.collect::<Vec<_>>();
if counter_regions.is_empty() {
return;
}
let mut virtual_file_mapping = Vec::new();
let mut mapping_regions = Vec::new();
let mut current_file_name = None;
let mut current_file_id = 0;
// Convert the list of (Counter, CodeRegion) pairs to an array of `CounterMappingRegion`, sorted
// by filename and position. Capture any new files to compute the `CounterMappingRegion`s
// `file_id` (indexing files referenced by the current function), and construct the
// function-specific `virtual_file_mapping` from `file_id` to its index in the module's
// `filenames` array.
counter_regions.sort_unstable_by_key(|(_counter, region)| *region);
for (counter, region) in counter_regions {
let CodeRegion { file_name, start_line, start_col, end_line, end_col } = *region;
let same_file = current_file_name.as_ref().map_or(false, |p| *p == file_name);
if !same_file {
if current_file_name.is_some() {
current_file_id += 1;
}
current_file_name = Some(file_name);
let c_filename = CString::new(file_name.to_string())
.expect("null error converting filename to C string");
debug!(" file_id: {} = '{:?}'", current_file_id, c_filename);
let (filenames_index, _) = self.filenames.insert_full(c_filename);
virtual_file_mapping.push(filenames_index as u32);
}
mapping_regions.push(CounterMappingRegion::code_region(
counter,
current_file_id,
start_line,
start_col,
end_line,
end_col,
));
}
// Encode and append the current function's coverage mapping data
coverageinfo::write_mapping_to_buffer(
virtual_file_mapping,
expressions,
mapping_regions,
coverage_mappings_buffer,
);
}
/// Generate and return the function record `Value`
fn make_function_record(
&mut self,
cx: &CodegenCx<'ll, 'tcx>,
mangled_function_name: String,
function_source_hash: u64,
mapping_data_size: usize,
) -> &'ll llvm::Value {
let name_ref = coverageinfo::compute_hash(&mangled_function_name);
let name_ref_val = cx.const_u64(name_ref);
let mapping_data_size_val = cx.const_u32(mapping_data_size as u32);
let func_hash_val = cx.const_u64(function_source_hash);
cx.const_struct(
&[name_ref_val, mapping_data_size_val, func_hash_val],
/*packed=*/ true,
)
}
/// Combine the filenames and coverage mappings buffers, construct coverage map header and the
/// array of function records, and combine everything into the complete coverage map. Save the
/// coverage map data into the LLVM IR as a static global using a specific, well-known section
/// and name.
fn save_generated_coverage_map(
self,
cx: &CodegenCx<'ll, 'tcx>,
function_records: Vec<&'ll llvm::Value>,
filenames_buffer: Vec<u8>,
mut coverage_mappings_buffer: Vec<u8>,
) {
// Concatenate the encoded filenames and encoded coverage mappings, and add additional zero
// bytes as-needed to ensure 8-byte alignment.
let mut coverage_size = coverage_mappings_buffer.len();
let filenames_size = filenames_buffer.len();
let remaining_bytes =
(filenames_size + coverage_size) % coverageinfo::COVMAP_VAR_ALIGN_BYTES;
if remaining_bytes > 0 {
let pad = coverageinfo::COVMAP_VAR_ALIGN_BYTES - remaining_bytes;
coverage_mappings_buffer.append(&mut [0].repeat(pad));
coverage_size += pad;
}
let filenames_and_coverage_mappings = [filenames_buffer, coverage_mappings_buffer].concat();
let filenames_and_coverage_mappings_val =
cx.const_bytes(&filenames_and_coverage_mappings[..]);
debug!(
"cov map: n_records = {}, filenames_size = {}, coverage_size = {}, 0-based version = {}",
function_records.len(),
filenames_size,
coverage_size,
coverageinfo::mapping_version()
);
// Create the coverage data header
let n_records_val = cx.const_u32(function_records.len() as u32);
let filenames_size_val = cx.const_u32(filenames_size as u32);
let coverage_size_val = cx.const_u32(coverage_size as u32);
let version_val = cx.const_u32(coverageinfo::mapping_version());
let cov_data_header_val = cx.const_struct(
&[n_records_val, filenames_size_val, coverage_size_val, version_val],
/*packed=*/ false,
);
// Create the function records array
let name_ref_from_u64 = cx.type_i64();
let mapping_data_size_from_u32 = cx.type_i32();
let func_hash_from_u64 = cx.type_i64();
let function_record_ty = cx.type_struct(
&[name_ref_from_u64, mapping_data_size_from_u32, func_hash_from_u64],
/*packed=*/ true,
);
let function_records_val = cx.const_array(function_record_ty, &function_records[..]);
// Create the complete LLVM coverage data value to add to the LLVM IR
let cov_data_val = cx.const_struct(
&[cov_data_header_val, function_records_val, filenames_and_coverage_mappings_val],
/*packed=*/ false,
);
// Save the coverage data value to LLVM IR
coverageinfo::save_map_to_mod(cx, cov_data_val);
}
}

View file

@ -0,0 +1,179 @@
use crate::llvm;
use crate::builder::Builder;
use crate::common::CodegenCx;
use libc::c_uint;
use llvm::coverageinfo::CounterMappingRegion;
use rustc_codegen_ssa::coverageinfo::map::{CounterExpression, FunctionCoverage};
use rustc_codegen_ssa::traits::{
BaseTypeMethods, CoverageInfoBuilderMethods, CoverageInfoMethods, MiscMethods, StaticMethods,
};
use rustc_data_structures::fx::FxHashMap;
use rustc_llvm::RustString;
use rustc_middle::mir::coverage::{
CodeRegion, CounterValueReference, ExpressionOperandId, InjectedExpressionIndex, Op,
};
use rustc_middle::ty::Instance;
use std::cell::RefCell;
use std::ffi::CString;
use tracing::debug;
pub mod mapgen;
const COVMAP_VAR_ALIGN_BYTES: usize = 8;
/// A context object for maintaining all state needed by the coverageinfo module.
pub struct CrateCoverageContext<'tcx> {
// Coverage region data for each instrumented function identified by DefId.
pub(crate) function_coverage_map: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverage>>,
}
impl<'tcx> CrateCoverageContext<'tcx> {
pub fn new() -> Self {
Self { function_coverage_map: Default::default() }
}
pub fn take_function_coverage_map(&self) -> FxHashMap<Instance<'tcx>, FunctionCoverage> {
self.function_coverage_map.replace(FxHashMap::default())
}
}
impl CoverageInfoMethods for CodegenCx<'ll, 'tcx> {
fn coverageinfo_finalize(&self) {
mapgen::finalize(self)
}
}
impl CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
/// Calls llvm::createPGOFuncNameVar() with the given function instance's mangled function name.
/// The LLVM API returns an llvm::GlobalVariable containing the function name, with the specific
/// variable name and linkage required by LLVM InstrProf source-based coverage instrumentation.
fn create_pgo_func_name_var(&self, instance: Instance<'tcx>) -> Self::Value {
let llfn = self.cx.get_fn(instance);
let mangled_fn_name = CString::new(self.tcx.symbol_name(instance).name)
.expect("error converting function name to C string");
unsafe { llvm::LLVMRustCoverageCreatePGOFuncNameVar(llfn, mangled_fn_name.as_ptr()) }
}
fn add_counter_region(
&mut self,
instance: Instance<'tcx>,
function_source_hash: u64,
id: CounterValueReference,
region: CodeRegion,
) {
debug!(
"adding counter to coverage_regions: instance={:?}, function_source_hash={}, id={:?}, \
at {:?}",
instance, function_source_hash, id, region,
);
let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter(function_source_hash, id, region);
}
fn add_counter_expression_region(
&mut self,
instance: Instance<'tcx>,
id: InjectedExpressionIndex,
lhs: ExpressionOperandId,
op: Op,
rhs: ExpressionOperandId,
region: CodeRegion,
) {
debug!(
"adding counter expression to coverage_regions: instance={:?}, id={:?}, {:?} {:?} {:?}, \
at {:?}",
instance, id, lhs, op, rhs, region,
);
let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_counter_expression(id, lhs, op, rhs, region);
}
fn add_unreachable_region(&mut self, instance: Instance<'tcx>, region: CodeRegion) {
debug!(
"adding unreachable code to coverage_regions: instance={:?}, at {:?}",
instance, region,
);
let mut coverage_regions = self.coverage_context().function_coverage_map.borrow_mut();
coverage_regions
.entry(instance)
.or_insert_with(|| FunctionCoverage::new(self.tcx, instance))
.add_unreachable_region(region);
}
}
pub(crate) fn write_filenames_section_to_buffer<'a>(
filenames: impl IntoIterator<Item = &'a CString>,
buffer: &RustString,
) {
let c_str_vec = filenames.into_iter().map(|cstring| cstring.as_ptr()).collect::<Vec<_>>();
unsafe {
llvm::LLVMRustCoverageWriteFilenamesSectionToBuffer(
c_str_vec.as_ptr(),
c_str_vec.len(),
buffer,
);
}
}
pub(crate) fn write_mapping_to_buffer(
virtual_file_mapping: Vec<u32>,
expressions: Vec<CounterExpression>,
mut mapping_regions: Vec<CounterMappingRegion>,
buffer: &RustString,
) {
unsafe {
llvm::LLVMRustCoverageWriteMappingToBuffer(
virtual_file_mapping.as_ptr(),
virtual_file_mapping.len() as c_uint,
expressions.as_ptr(),
expressions.len() as c_uint,
mapping_regions.as_mut_ptr(),
mapping_regions.len() as c_uint,
buffer,
);
}
}
pub(crate) fn compute_hash(name: &str) -> u64 {
let name = CString::new(name).expect("null error converting hashable name to C string");
unsafe { llvm::LLVMRustCoverageComputeHash(name.as_ptr()) }
}
pub(crate) fn mapping_version() -> u32 {
unsafe { llvm::LLVMRustCoverageMappingVersion() }
}
pub(crate) fn save_map_to_mod<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
cov_data_val: &'ll llvm::Value,
) {
let covmap_var_name = llvm::build_string(|s| unsafe {
llvm::LLVMRustCoverageWriteMappingVarNameToString(s);
})
.expect("Rust Coverage Mapping var name failed UTF-8 conversion");
debug!("covmap var name: {:?}", covmap_var_name);
let covmap_section_name = llvm::build_string(|s| unsafe {
llvm::LLVMRustCoverageWriteSectionNameToString(cx.llmod, s);
})
.expect("Rust Coverage section name failed UTF-8 conversion");
debug!("covmap section name: {:?}", covmap_section_name);
let llglobal = llvm::add_global(cx.llmod, cx.val_ty(cov_data_val), &covmap_var_name);
llvm::set_initializer(llglobal, cov_data_val);
llvm::set_global_constant(llglobal, true);
llvm::set_linkage(llglobal, llvm::Linkage::InternalLinkage);
llvm::set_section(llglobal, &covmap_section_name);
llvm::set_alignment(llglobal, COVMAP_VAR_ALIGN_BYTES);
cx.add_used_global(llglobal);
}

View file

@ -0,0 +1,95 @@
use super::metadata::{file_metadata, UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
use super::utils::DIB;
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext};
use rustc_codegen_ssa::traits::*;
use crate::common::CodegenCx;
use crate::llvm;
use crate::llvm::debuginfo::{DIScope, DISubprogram};
use rustc_middle::mir::{Body, SourceScope};
use rustc_session::config::DebugInfo;
use rustc_index::bit_set::BitSet;
use rustc_index::vec::Idx;
/// Produces DIScope DIEs for each MIR Scope which has variables defined in it.
pub fn compute_mir_scopes(
cx: &CodegenCx<'ll, '_>,
mir: &Body<'_>,
fn_metadata: &'ll DISubprogram,
debug_context: &mut FunctionDebugContext<&'ll DIScope>,
) {
// Find all the scopes with variables defined in them.
let mut has_variables = BitSet::new_empty(mir.source_scopes.len());
// Only consider variables when they're going to be emitted.
// FIXME(eddyb) don't even allocate `has_variables` otherwise.
if cx.sess().opts.debuginfo == DebugInfo::Full {
// FIXME(eddyb) take into account that arguments always have debuginfo,
// irrespective of their name (assuming full debuginfo is enabled).
// NOTE(eddyb) actually, on second thought, those are always in the
// function scope, which always exists.
for var_debug_info in &mir.var_debug_info {
has_variables.insert(var_debug_info.source_info.scope);
}
}
// Instantiate all scopes.
for idx in 0..mir.source_scopes.len() {
let scope = SourceScope::new(idx);
make_mir_scope(cx, &mir, fn_metadata, &has_variables, debug_context, scope);
}
}
fn make_mir_scope(
cx: &CodegenCx<'ll, '_>,
mir: &Body<'_>,
fn_metadata: &'ll DISubprogram,
has_variables: &BitSet<SourceScope>,
debug_context: &mut FunctionDebugContext<&'ll DISubprogram>,
scope: SourceScope,
) {
if debug_context.scopes[scope].is_valid() {
return;
}
let scope_data = &mir.source_scopes[scope];
let parent_scope = if let Some(parent) = scope_data.parent_scope {
make_mir_scope(cx, mir, fn_metadata, has_variables, debug_context, parent);
debug_context.scopes[parent]
} else {
// The root is the function itself.
let loc = cx.lookup_debug_loc(mir.span.lo());
debug_context.scopes[scope] = DebugScope {
scope_metadata: Some(fn_metadata),
file_start_pos: loc.file.start_pos,
file_end_pos: loc.file.end_pos,
};
return;
};
if !has_variables.contains(scope) {
// Do not create a DIScope if there are no variables
// defined in this MIR Scope, to avoid debuginfo bloat.
debug_context.scopes[scope] = parent_scope;
return;
}
let loc = cx.lookup_debug_loc(scope_data.span.lo());
let file_metadata = file_metadata(cx, &loc.file, debug_context.defining_crate);
let scope_metadata = unsafe {
Some(llvm::LLVMRustDIBuilderCreateLexicalBlock(
DIB(cx),
parent_scope.scope_metadata.unwrap(),
file_metadata,
loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
loc.col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
))
};
debug_context.scopes[scope] = DebugScope {
scope_metadata,
file_start_pos: loc.file.start_pos,
file_end_pos: loc.file.end_pos,
};
}

View file

@ -0,0 +1,179 @@
//! # Debug Info Module
//!
//! This module serves the purpose of generating debug symbols. We use LLVM's
//! [source level debugging](https://llvm.org/docs/SourceLevelDebugging.html)
//! features for generating the debug information. The general principle is
//! this:
//!
//! Given the right metadata in the LLVM IR, the LLVM code generator is able to
//! create DWARF debug symbols for the given code. The
//! [metadata](https://llvm.org/docs/LangRef.html#metadata-type) is structured
//! much like DWARF *debugging information entries* (DIE), representing type
//! information such as datatype layout, function signatures, block layout,
//! variable location and scope information, etc. It is the purpose of this
//! module to generate correct metadata and insert it into the LLVM IR.
//!
//! As the exact format of metadata trees may change between different LLVM
//! versions, we now use LLVM
//! [DIBuilder](https://llvm.org/docs/doxygen/html/classllvm_1_1DIBuilder.html)
//! to create metadata where possible. This will hopefully ease the adaption of
//! this module to future LLVM versions.
//!
//! The public API of the module is a set of functions that will insert the
//! correct metadata into the LLVM IR when called with the right parameters.
//! The module is thus driven from an outside client with functions like
//! `debuginfo::create_local_var_metadata(bx: block, local: &ast::local)`.
//!
//! Internally the module will try to reuse already created metadata by
//! utilizing a cache. The way to get a shared metadata node when needed is
//! thus to just call the corresponding function in this module:
//!
//! let file_metadata = file_metadata(crate_context, path);
//!
//! The function will take care of probing the cache for an existing node for
//! that exact file path.
//!
//! All private state used by the module is stored within either the
//! CrateDebugContext struct (owned by the CodegenCx) or the
//! FunctionDebugContext (owned by the FunctionCx).
//!
//! This file consists of three conceptual sections:
//! 1. The public interface of the module
//! 2. Module-internal metadata creation functions
//! 3. Minor utility functions
//!
//!
//! ## Recursive Types
//!
//! Some kinds of types, such as structs and enums can be recursive. That means
//! that the type definition of some type X refers to some other type which in
//! turn (transitively) refers to X. This introduces cycles into the type
//! referral graph. A naive algorithm doing an on-demand, depth-first traversal
//! of this graph when describing types, can get trapped in an endless loop
//! when it reaches such a cycle.
//!
//! For example, the following simple type for a singly-linked list...
//!
//! ```
//! struct List {
//! value: i32,
//! tail: Option<Box<List>>,
//! }
//! ```
//!
//! will generate the following callstack with a naive DFS algorithm:
//!
//! ```
//! describe(t = List)
//! describe(t = i32)
//! describe(t = Option<Box<List>>)
//! describe(t = Box<List>)
//! describe(t = List) // at the beginning again...
//! ...
//! ```
//!
//! To break cycles like these, we use "forward declarations". That is, when
//! the algorithm encounters a possibly recursive type (any struct or enum), it
//! immediately creates a type description node and inserts it into the cache
//! *before* describing the members of the type. This type description is just
//! a stub (as type members are not described and added to it yet) but it
//! allows the algorithm to already refer to the type. After the stub is
//! inserted into the cache, the algorithm continues as before. If it now
//! encounters a recursive reference, it will hit the cache and does not try to
//! describe the type anew.
//!
//! This behavior is encapsulated in the 'RecursiveTypeDescription' enum,
//! which represents a kind of continuation, storing all state needed to
//! continue traversal at the type members after the type has been registered
//! with the cache. (This implementation approach might be a tad over-
//! engineered and may change in the future)
//!
//!
//! ## Source Locations and Line Information
//!
//! In addition to data type descriptions the debugging information must also
//! allow to map machine code locations back to source code locations in order
//! to be useful. This functionality is also handled in this module. The
//! following functions allow to control source mappings:
//!
//! + set_source_location()
//! + clear_source_location()
//! + start_emitting_source_locations()
//!
//! `set_source_location()` allows to set the current source location. All IR
//! instructions created after a call to this function will be linked to the
//! given source location, until another location is specified with
//! `set_source_location()` or the source location is cleared with
//! `clear_source_location()`. In the later case, subsequent IR instruction
//! will not be linked to any source location. As you can see, this is a
//! stateful API (mimicking the one in LLVM), so be careful with source
//! locations set by previous calls. It's probably best to not rely on any
//! specific state being present at a given point in code.
//!
//! One topic that deserves some extra attention is *function prologues*. At
//! the beginning of a function's machine code there are typically a few
//! instructions for loading argument values into allocas and checking if
//! there's enough stack space for the function to execute. This *prologue* is
//! not visible in the source code and LLVM puts a special PROLOGUE END marker
//! into the line table at the first non-prologue instruction of the function.
//! In order to find out where the prologue ends, LLVM looks for the first
//! instruction in the function body that is linked to a source location. So,
//! when generating prologue instructions we have to make sure that we don't
//! emit source location information until the 'real' function body begins. For
//! this reason, source location emission is disabled by default for any new
//! function being codegened and is only activated after a call to the third
//! function from the list above, `start_emitting_source_locations()`. This
//! function should be called right before regularly starting to codegen the
//! top-level block of the given function.
//!
//! There is one exception to the above rule: `llvm.dbg.declare` instruction
//! must be linked to the source location of the variable being declared. For
//! function parameters these `llvm.dbg.declare` instructions typically occur
//! in the middle of the prologue, however, they are ignored by LLVM's prologue
//! detection. The `create_argument_metadata()` and related functions take care
//! of linking the `llvm.dbg.declare` instructions to the correct source
//! locations even while source location emission is still disabled, so there
//! is no need to do anything special with source location handling here.
//!
//! ## Unique Type Identification
//!
//! In order for link-time optimization to work properly, LLVM needs a unique
//! type identifier that tells it across compilation units which types are the
//! same as others. This type identifier is created by
//! `TypeMap::get_unique_type_id_of_type()` using the following algorithm:
//!
//! (1) Primitive types have their name as ID
//! (2) Structs, enums and traits have a multipart identifier
//!
//! (1) The first part is the SVH (strict version hash) of the crate they
//! were originally defined in
//!
//! (2) The second part is the ast::NodeId of the definition in their
//! original crate
//!
//! (3) The final part is a concatenation of the type IDs of their concrete
//! type arguments if they are generic types.
//!
//! (3) Tuple-, pointer and function types are structurally identified, which
//! means that they are equivalent if their component types are equivalent
//! (i.e., (i32, i32) is the same regardless in which crate it is used).
//!
//! This algorithm also provides a stable ID for types that are defined in one
//! crate but instantiated from metadata within another crate. We just have to
//! take care to always map crate and `NodeId`s back to the original crate
//! context.
//!
//! As a side-effect these unique type IDs also help to solve a problem arising
//! from lifetime parameters. Since lifetime parameters are completely omitted
//! in debuginfo, more than one `Ty` instance may map to the same debuginfo
//! type metadata, that is, some struct `Struct<'a>` may have N instantiations
//! with different concrete substitutions for `'a`, and thus there will be N
//! `Ty` instances for the type `Struct<'a>` even though it is not generic
//! otherwise. Unfortunately this means that we cannot use `ty::type_id()` as
//! cheap identifier for type metadata -- we have done this in the past, but it
//! led to unnecessary metadata duplication in the best case and LLVM
//! assertions in the worst. However, the unique type ID as described above
//! *can* be used as identifier. Since it is comparatively expensive to
//! construct, though, `ty::type_id()` is still used additionally as an
//! optimization for cases where the exact same type has been seen before
//! (which is most of the time).

View file

@ -0,0 +1,71 @@
// .debug_gdb_scripts binary section.
use crate::llvm;
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_session::config::DebugInfo;
use rustc_span::symbol::sym;
/// Inserts a side-effect free instruction sequence that makes sure that the
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
if needs_gdb_debug_scripts_section(bx) {
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let indices = [bx.const_i32(0), bx.const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
unsafe {
llvm::LLVMSetAlignment(volative_load_instruction, 1);
}
}
}
/// Allocates the global variable responsible for the .debug_gdb_scripts binary
/// section.
pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>) -> &'ll Value {
let c_section_var_name = "__rustc_debug_gdb_scripts_section__\0";
let section_var_name = &c_section_var_name[..c_section_var_name.len() - 1];
let section_var =
unsafe { llvm::LLVMGetNamedGlobal(cx.llmod, c_section_var_name.as_ptr().cast()) };
section_var.unwrap_or_else(|| {
let section_name = b".debug_gdb_scripts\0";
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
unsafe {
let llvm_type = cx.type_array(cx.type_i8(), section_contents.len() as u64);
let section_var = cx
.define_global(section_var_name, llvm_type)
.unwrap_or_else(|| bug!("symbol `{}` is already defined", section_var_name));
llvm::LLVMSetSection(section_var, section_name.as_ptr().cast());
llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddress(section_var, llvm::UnnamedAddr::Global);
llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);
// This should make sure that the whole section is not larger than
// the string it contains. Otherwise we get a warning from GDB.
llvm::LLVMSetAlignment(section_var, 1);
section_var
}
})
}
pub fn needs_gdb_debug_scripts_section(cx: &CodegenCx<'_, '_>) -> bool {
let omit_gdb_pretty_printer_section = cx
.tcx
.sess
.contains_name(&cx.tcx.hir().krate_attrs(), sym::omit_gdb_pretty_printer_section);
!omit_gdb_pretty_printer_section
&& cx.sess().opts.debuginfo != DebugInfo::None
&& cx.sess().target.target.options.emit_debug_gdb_scripts
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,563 @@
// See doc.rs for documentation.
mod doc;
use rustc_codegen_ssa::mir::debuginfo::VariableKind::*;
use self::metadata::{file_metadata, type_metadata, TypeMap, UNKNOWN_LINE_NUMBER};
use self::namespace::mangled_name_of_instance;
use self::type_names::compute_debuginfo_type_name;
use self::utils::{create_DIArray, is_node_local_to_unit, DIB};
use crate::abi::FnAbi;
use crate::builder::Builder;
use crate::common::CodegenCx;
use crate::llvm;
use crate::llvm::debuginfo::{
DIArray, DIBuilder, DIFile, DIFlags, DILexicalBlock, DISPFlags, DIScope, DIType, DIVariable,
};
use crate::value::Value;
use rustc_codegen_ssa::debuginfo::type_names;
use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind};
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LOCAL_CRATE};
use rustc_index::vec::IndexVec;
use rustc_middle::mir;
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::subst::{GenericArgKind, SubstsRef};
use rustc_middle::ty::{self, Instance, ParamEnv, Ty, TypeFoldable};
use rustc_session::config::{self, DebugInfo};
use rustc_span::symbol::Symbol;
use rustc_span::{self, BytePos, Span};
use rustc_target::abi::{LayoutOf, Primitive, Size};
use libc::c_uint;
use smallvec::SmallVec;
use std::cell::RefCell;
use tracing::debug;
mod create_scope_map;
pub mod gdb;
pub mod metadata;
mod namespace;
mod source_loc;
mod utils;
pub use self::create_scope_map::compute_mir_scopes;
pub use self::metadata::create_global_var_metadata;
pub use self::metadata::extend_scope_to_file;
#[allow(non_upper_case_globals)]
const DW_TAG_auto_variable: c_uint = 0x100;
#[allow(non_upper_case_globals)]
const DW_TAG_arg_variable: c_uint = 0x101;
/// A context object for maintaining all state needed by the debuginfo module.
pub struct CrateDebugContext<'a, 'tcx> {
llcontext: &'a llvm::Context,
llmod: &'a llvm::Module,
builder: &'a mut DIBuilder<'a>,
created_files: RefCell<FxHashMap<(Option<String>, Option<String>), &'a DIFile>>,
created_enum_disr_types: RefCell<FxHashMap<(DefId, Primitive), &'a DIType>>,
type_map: RefCell<TypeMap<'a, 'tcx>>,
namespace_map: RefCell<DefIdMap<&'a DIScope>>,
// This collection is used to assert that composite types (structs, enums,
// ...) have their members only set once:
composite_types_completed: RefCell<FxHashSet<&'a DIType>>,
}
impl Drop for CrateDebugContext<'a, 'tcx> {
fn drop(&mut self) {
unsafe {
llvm::LLVMRustDIBuilderDispose(&mut *(self.builder as *mut _));
}
}
}
impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
pub fn new(llmod: &'a llvm::Module) -> Self {
debug!("CrateDebugContext::new");
let builder = unsafe { llvm::LLVMRustDIBuilderCreate(llmod) };
// DIBuilder inherits context from the module, so we'd better use the same one
let llcontext = unsafe { llvm::LLVMGetModuleContext(llmod) };
CrateDebugContext {
llcontext,
llmod,
builder,
created_files: Default::default(),
created_enum_disr_types: Default::default(),
type_map: Default::default(),
namespace_map: RefCell::new(Default::default()),
composite_types_completed: Default::default(),
}
}
}
/// Creates any deferred debug metadata nodes
pub fn finalize(cx: &CodegenCx<'_, '_>) {
if cx.dbg_cx.is_none() {
return;
}
debug!("finalize");
if gdb::needs_gdb_debug_scripts_section(cx) {
// Add a .debug_gdb_scripts section to this compile-unit. This will
// cause GDB to try and load the gdb_load_rust_pretty_printers.py file,
// which activates the Rust pretty printers for binary this section is
// contained in.
gdb::get_or_insert_gdb_debug_scripts_section_global(cx);
}
unsafe {
llvm::LLVMRustDIBuilderFinalize(DIB(cx));
// Debuginfo generation in LLVM by default uses a higher
// version of dwarf than macOS currently understands. We can
// instruct LLVM to emit an older version of dwarf, however,
// for macOS to understand. For more info see #11352
// This can be overridden using --llvm-opts -dwarf-version,N.
// Android has the same issue (#22398)
if cx.sess().target.target.options.is_like_osx
|| cx.sess().target.target.options.is_like_android
{
llvm::LLVMRustAddModuleFlag(cx.llmod, "Dwarf Version\0".as_ptr().cast(), 2)
}
// Indicate that we want CodeView debug information on MSVC
if cx.sess().target.target.options.is_like_msvc {
llvm::LLVMRustAddModuleFlag(cx.llmod, "CodeView\0".as_ptr().cast(), 1)
}
// Prevent bitcode readers from deleting the debug info.
let ptr = "Debug Info Version\0".as_ptr();
llvm::LLVMRustAddModuleFlag(cx.llmod, ptr.cast(), llvm::LLVMRustDebugMetadataVersion());
};
}
impl DebugInfoBuilderMethods for Builder<'a, 'll, 'tcx> {
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn dbg_var_addr(
&mut self,
dbg_var: &'ll DIVariable,
scope_metadata: &'ll DIScope,
variable_alloca: Self::Value,
direct_offset: Size,
indirect_offsets: &[Size],
span: Span,
) {
let cx = self.cx();
// Convert the direct and indirect offsets to address ops.
// FIXME(eddyb) use `const`s instead of getting the values via FFI,
// the values should match the ones in the DWARF standard anyway.
let op_deref = || unsafe { llvm::LLVMRustDIBuilderCreateOpDeref() };
let op_plus_uconst = || unsafe { llvm::LLVMRustDIBuilderCreateOpPlusUconst() };
let mut addr_ops = SmallVec::<[_; 8]>::new();
if direct_offset.bytes() > 0 {
addr_ops.push(op_plus_uconst());
addr_ops.push(direct_offset.bytes() as i64);
}
for &offset in indirect_offsets {
addr_ops.push(op_deref());
if offset.bytes() > 0 {
addr_ops.push(op_plus_uconst());
addr_ops.push(offset.bytes() as i64);
}
}
// FIXME(eddyb) maybe this information could be extracted from `dbg_var`,
// to avoid having to pass it down in both places?
// NB: `var` doesn't seem to know about the column, so that's a limitation.
let dbg_loc = cx.create_debug_loc(scope_metadata, span);
unsafe {
// FIXME(eddyb) replace `llvm.dbg.declare` with `llvm.dbg.addr`.
llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
DIB(cx),
variable_alloca,
dbg_var,
addr_ops.as_ptr(),
addr_ops.len() as c_uint,
dbg_loc,
self.llbb(),
);
}
}
fn set_source_location(&mut self, scope: &'ll DIScope, span: Span) {
debug!("set_source_location: {}", self.sess().source_map().span_to_string(span));
let dbg_loc = self.cx().create_debug_loc(scope, span);
unsafe {
llvm::LLVMSetCurrentDebugLocation(self.llbuilder, dbg_loc);
}
}
fn insert_reference_to_gdb_debug_scripts_section_global(&mut self) {
gdb::insert_reference_to_gdb_debug_scripts_section_global(self)
}
fn set_var_name(&mut self, value: &'ll Value, name: &str) {
// Avoid wasting time if LLVM value names aren't even enabled.
if self.sess().fewer_names() {
return;
}
// Only function parameters and instructions are local to a function,
// don't change the name of anything else (e.g. globals).
let param_or_inst = unsafe {
llvm::LLVMIsAArgument(value).is_some() || llvm::LLVMIsAInstruction(value).is_some()
};
if !param_or_inst {
return;
}
// Avoid replacing the name if it already exists.
// While we could combine the names somehow, it'd
// get noisy quick, and the usefulness is dubious.
if llvm::get_value_name(value).is_empty() {
llvm::set_value_name(value, name.as_bytes());
}
}
}
impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: &'ll Value,
mir: &mir::Body<'_>,
) -> Option<FunctionDebugContext<&'ll DIScope>> {
if self.sess().opts.debuginfo == DebugInfo::None {
return None;
}
let span = mir.span;
// This can be the case for functions inlined from another crate
if span.is_dummy() {
// FIXME(simulacrum): Probably can't happen; remove.
return None;
}
let def_id = instance.def_id();
let containing_scope = get_containing_scope(self, instance);
let loc = self.lookup_debug_loc(span.lo());
let file_metadata = file_metadata(self, &loc.file, def_id.krate);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(self, fn_abi);
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), fn_signature)
};
// Find the enclosing function, in case this is a closure.
let def_key = self.tcx().def_key(def_id);
let mut name = def_key.disambiguated_data.data.to_string();
let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
let generics = self.tcx().generics_of(enclosing_fn_def_id);
let substs = instance.substs.truncate_to(self.tcx(), generics);
let template_parameters = get_template_parameters(self, &generics, substs, &mut name);
let linkage_name = &mangled_name_of_instance(self, instance).name;
// Omit the linkage_name if it is the same as subprogram name.
let linkage_name = if &name == linkage_name { "" } else { linkage_name };
// FIXME(eddyb) does this need to be separate from `loc.line` for some reason?
let scope_line = loc.line;
let mut flags = DIFlags::FlagPrototyped;
if fn_abi.ret.layout.abi.is_uninhabited() {
flags |= DIFlags::FlagNoReturn;
}
let mut spflags = DISPFlags::SPFlagDefinition;
if is_node_local_to_unit(self, def_id) {
spflags |= DISPFlags::SPFlagLocalToUnit;
}
if self.sess().opts.optimize != config::OptLevel::No {
spflags |= DISPFlags::SPFlagOptimized;
}
if let Some((id, _)) = self.tcx.entry_fn(LOCAL_CRATE) {
if id.to_def_id() == def_id {
spflags |= DISPFlags::SPFlagMainSubprogram;
}
}
let fn_metadata = unsafe {
llvm::LLVMRustDIBuilderCreateFunction(
DIB(self),
containing_scope,
name.as_ptr().cast(),
name.len(),
linkage_name.as_ptr().cast(),
linkage_name.len(),
file_metadata,
loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
function_type_metadata,
scope_line.unwrap_or(UNKNOWN_LINE_NUMBER),
flags,
spflags,
llfn,
template_parameters,
None,
)
};
// Initialize fn debug context (including scopes).
// FIXME(eddyb) figure out a way to not need `Option` for `scope_metadata`.
let null_scope = DebugScope {
scope_metadata: None,
file_start_pos: BytePos(0),
file_end_pos: BytePos(0),
};
let mut fn_debug_context = FunctionDebugContext {
scopes: IndexVec::from_elem(null_scope, &mir.source_scopes),
defining_crate: def_id.krate,
};
// Fill in all the scopes, with the information from the MIR body.
compute_mir_scopes(self, mir, fn_metadata, &mut fn_debug_context);
return Some(fn_debug_context);
fn get_function_signature<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
) -> &'ll DIArray {
if cx.sess().opts.debuginfo == DebugInfo::Limited {
return create_DIArray(DIB(cx), &[]);
}
let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(if fn_abi.ret.is_ignore() {
None
} else {
Some(type_metadata(cx, fn_abi.ret.layout.ty, rustc_span::DUMMY_SP))
});
// Arguments types
if cx.sess().target.target.options.is_like_msvc {
// FIXME(#42800):
// There is a bug in MSDIA that leads to a crash when it encounters
// a fixed-size array of `u8` or something zero-sized in a
// function-type (see #40477).
// As a workaround, we replace those fixed-size arrays with a
// pointer-type. So a function `fn foo(a: u8, b: [u8; 4])` would
// appear as `fn foo(a: u8, b: *const u8)` in debuginfo,
// and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
// This transformed type is wrong, but these function types are
// already inaccurate due to ABI adjustments (see #42800).
signature.extend(fn_abi.args.iter().map(|arg| {
let t = arg.layout.ty;
let t = match t.kind {
ty::Array(ct, _)
if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() =>
{
cx.tcx.mk_imm_ptr(ct)
}
_ => t,
};
Some(type_metadata(cx, t, rustc_span::DUMMY_SP))
}));
} else {
signature.extend(
fn_abi
.args
.iter()
.map(|arg| Some(type_metadata(cx, arg.layout.ty, rustc_span::DUMMY_SP))),
);
}
create_DIArray(DIB(cx), &signature[..])
}
fn get_template_parameters<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
generics: &ty::Generics,
substs: SubstsRef<'tcx>,
name_to_append_suffix_to: &mut String,
) -> &'ll DIArray {
if substs.types().next().is_none() {
return create_DIArray(DIB(cx), &[]);
}
name_to_append_suffix_to.push('<');
for (i, actual_type) in substs.types().enumerate() {
if i != 0 {
name_to_append_suffix_to.push_str(",");
}
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx.tcx(), actual_type, true);
name_to_append_suffix_to.push_str(&actual_type_name[..]);
}
name_to_append_suffix_to.push('>');
// Again, only create type information if full debuginfo is enabled
let template_params: Vec<_> = if cx.sess().opts.debuginfo == DebugInfo::Full {
let names = get_parameter_names(cx, generics);
substs
.iter()
.zip(names)
.filter_map(|(kind, name)| {
if let GenericArgKind::Type(ty) = kind.unpack() {
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
let actual_type_metadata =
type_metadata(cx, actual_type, rustc_span::DUMMY_SP);
let name = name.as_str();
Some(unsafe {
Some(llvm::LLVMRustDIBuilderCreateTemplateTypeParameter(
DIB(cx),
None,
name.as_ptr().cast(),
name.len(),
actual_type_metadata,
))
})
} else {
None
}
})
.collect()
} else {
vec![]
};
create_DIArray(DIB(cx), &template_params[..])
}
fn get_parameter_names(cx: &CodegenCx<'_, '_>, generics: &ty::Generics) -> Vec<Symbol> {
let mut names = generics
.parent
.map_or(vec![], |def_id| get_parameter_names(cx, cx.tcx.generics_of(def_id)));
names.extend(generics.params.iter().map(|param| param.name));
names
}
fn get_containing_scope<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll DIScope {
// First, let's see if this is a method within an inherent impl. Because
// if yes, we want to make the result subroutine DIE a child of the
// subroutine's self-type.
let self_type = cx.tcx.impl_of_method(instance.def_id()).and_then(|impl_def_id| {
// If the method does *not* belong to a trait, proceed
if cx.tcx.trait_id_of_impl(impl_def_id).is_none() {
let impl_self_ty = cx.tcx.subst_and_normalize_erasing_regions(
instance.substs,
ty::ParamEnv::reveal_all(),
&cx.tcx.type_of(impl_def_id),
);
// Only "class" methods are generally understood by LLVM,
// so avoid methods on other types (e.g., `<*mut T>::null`).
match impl_self_ty.kind {
ty::Adt(def, ..) if !def.is_box() => {
// Again, only create type information if full debuginfo is enabled
if cx.sess().opts.debuginfo == DebugInfo::Full
&& !impl_self_ty.needs_subst()
{
Some(type_metadata(cx, impl_self_ty, rustc_span::DUMMY_SP))
} else {
Some(namespace::item_namespace(cx, def.did))
}
}
_ => None,
}
} else {
// For trait method impls we still use the "parallel namespace"
// strategy
None
}
});
self_type.unwrap_or_else(|| {
namespace::item_namespace(
cx,
DefId {
krate: instance.def_id().krate,
index: cx
.tcx
.def_key(instance.def_id())
.parent
.expect("get_containing_scope: missing parent?"),
},
)
})
}
}
fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value) {
metadata::create_vtable_metadata(self, ty, vtable)
}
fn extend_scope_to_file(
&self,
scope_metadata: &'ll DIScope,
file: &rustc_span::SourceFile,
defining_crate: CrateNum,
) -> &'ll DILexicalBlock {
metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
}
fn debuginfo_finalize(&self) {
finalize(self)
}
// FIXME(eddyb) find a common convention for all of the debuginfo-related
// names (choose between `dbg`, `debug`, `debuginfo`, `debug_info` etc.).
fn create_dbg_var(
&self,
dbg_context: &FunctionDebugContext<&'ll DIScope>,
variable_name: Symbol,
variable_type: Ty<'tcx>,
scope_metadata: &'ll DIScope,
variable_kind: VariableKind,
span: Span,
) -> &'ll DIVariable {
let loc = self.lookup_debug_loc(span.lo());
let file_metadata = file_metadata(self, &loc.file, dbg_context.defining_crate);
let type_metadata = type_metadata(self, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable => (0, DW_TAG_auto_variable),
};
let align = self.align_of(variable_type);
let name = variable_name.as_str();
unsafe {
llvm::LLVMRustDIBuilderCreateVariable(
DIB(self),
dwarf_tag,
scope_metadata,
name.as_ptr().cast(),
name.len(),
file_metadata,
loc.line.unwrap_or(UNKNOWN_LINE_NUMBER),
type_metadata,
true,
DIFlags::FlagZero,
argument_index,
align.bytes() as u32,
)
}
}
}

View file

@ -0,0 +1,48 @@
// Namespace Handling.
use super::utils::{debug_context, DIB};
use rustc_middle::ty::{self, Instance};
use crate::common::CodegenCx;
use crate::llvm;
use crate::llvm::debuginfo::DIScope;
use rustc_hir::def_id::DefId;
use rustc_hir::definitions::DefPathData;
pub fn mangled_name_of_instance<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
instance: Instance<'tcx>,
) -> ty::SymbolName<'tcx> {
let tcx = cx.tcx;
tcx.symbol_name(instance)
}
pub fn item_namespace(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
if let Some(&scope) = debug_context(cx).namespace_map.borrow().get(&def_id) {
return scope;
}
let def_key = cx.tcx.def_key(def_id);
let parent_scope = def_key
.parent
.map(|parent| item_namespace(cx, DefId { krate: def_id.krate, index: parent }));
let namespace_name = match def_key.disambiguated_data.data {
DefPathData::CrateRoot => cx.tcx.crate_name(def_id.krate),
data => data.as_symbol(),
};
let namespace_name = namespace_name.as_str();
let scope = unsafe {
llvm::LLVMRustDIBuilderCreateNameSpace(
DIB(cx),
parent_scope,
namespace_name.as_ptr().cast(),
namespace_name.len(),
false, // ExportSymbols (only relevant for C++ anonymous namespaces)
)
};
debug_context(cx).namespace_map.borrow_mut().insert(def_id, scope);
scope
}

View file

@ -0,0 +1,61 @@
use super::metadata::{UNKNOWN_COLUMN_NUMBER, UNKNOWN_LINE_NUMBER};
use super::utils::debug_context;
use crate::common::CodegenCx;
use crate::llvm::debuginfo::DIScope;
use crate::llvm::{self, Value};
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::sync::Lrc;
use rustc_span::{BytePos, Pos, SourceFile, SourceFileAndLine, Span};
/// A source code location used to generate debug information.
pub struct DebugLoc {
/// Information about the original source file.
pub file: Lrc<SourceFile>,
/// The (1-based) line number.
pub line: Option<u32>,
/// The (1-based) column number.
pub col: Option<u32>,
}
impl CodegenCx<'ll, '_> {
/// Looks up debug source information about a `BytePos`.
pub fn lookup_debug_loc(&self, pos: BytePos) -> DebugLoc {
let (file, line, col) = match self.sess().source_map().lookup_line(pos) {
Ok(SourceFileAndLine { sf: file, line }) => {
let line_pos = file.line_begin_pos(pos);
// Use 1-based indexing.
let line = (line + 1) as u32;
let col = (pos - line_pos).to_u32() + 1;
(file, Some(line), Some(col))
}
Err(file) => (file, None, None),
};
// For MSVC, omit the column number.
// Otherwise, emit it. This mimics clang behaviour.
// See discussion in https://github.com/rust-lang/rust/issues/42921
if self.sess().target.target.options.is_like_msvc {
DebugLoc { file, line, col: None }
} else {
DebugLoc { file, line, col }
}
}
pub fn create_debug_loc(&self, scope: &'ll DIScope, span: Span) -> &'ll Value {
let DebugLoc { line, col, .. } = self.lookup_debug_loc(span.lo());
unsafe {
llvm::LLVMRustDIBuilderCreateDebugLocation(
debug_context(self).llcontext,
line.unwrap_or(UNKNOWN_LINE_NUMBER),
col.unwrap_or(UNKNOWN_COLUMN_NUMBER),
scope,
None,
)
}
}
}

View file

@ -0,0 +1,43 @@
// Utility Functions.
use super::namespace::item_namespace;
use super::CrateDebugContext;
use rustc_hir::def_id::DefId;
use rustc_middle::ty::DefIdTree;
use crate::common::CodegenCx;
use crate::llvm;
use crate::llvm::debuginfo::{DIArray, DIBuilder, DIDescriptor, DIScope};
pub fn is_node_local_to_unit(cx: &CodegenCx<'_, '_>, def_id: DefId) -> bool {
// The is_local_to_unit flag indicates whether a function is local to the
// current compilation unit (i.e., if it is *static* in the C-sense). The
// *reachable* set should provide a good approximation of this, as it
// contains everything that might leak out of the current crate (by being
// externally visible or by being inlined into something externally
// visible). It might better to use the `exported_items` set from
// `driver::CrateAnalysis` in the future, but (atm) this set is not
// available in the codegen pass.
!cx.tcx.is_reachable_non_generic(def_id)
}
#[allow(non_snake_case)]
pub fn create_DIArray(builder: &DIBuilder<'ll>, arr: &[Option<&'ll DIDescriptor>]) -> &'ll DIArray {
unsafe { llvm::LLVMRustDIBuilderGetOrCreateArray(builder, arr.as_ptr(), arr.len() as u32) }
}
#[inline]
pub fn debug_context(cx: &'a CodegenCx<'ll, 'tcx>) -> &'a CrateDebugContext<'ll, 'tcx> {
cx.dbg_cx.as_ref().unwrap()
}
#[inline]
#[allow(non_snake_case)]
pub fn DIB(cx: &'a CodegenCx<'ll, '_>) -> &'a DIBuilder<'ll> {
cx.dbg_cx.as_ref().unwrap().builder
}
pub fn get_namespace_for_item(cx: &CodegenCx<'ll, '_>, def_id: DefId) -> &'ll DIScope {
item_namespace(cx, cx.tcx.parent(def_id).expect("get_namespace_for_item: missing parent?"))
}

View file

@ -0,0 +1,95 @@
//! Declare various LLVM values.
//!
//! Prefer using functions and methods from this module rather than calling LLVM
//! functions directly. These functions do some additional work to ensure we do
//! the right thing given the preconceptions of codegen.
//!
//! Some useful guidelines:
//!
//! * Use declare_* family of methods if you are declaring, but are not
//! interested in defining the Value they return.
//! * Use define_* family of methods when you might be defining the Value.
//! * When in doubt, define.
use crate::abi::{FnAbi, FnAbiLlvmExt};
use crate::attributes;
use crate::context::CodegenCx;
use crate::llvm;
use crate::llvm::AttributePlace::Function;
use crate::type_::Type;
use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc_middle::ty::Ty;
use tracing::debug;
/// Declare a function.
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_raw_fn(
cx: &CodegenCx<'ll, '_>,
name: &str,
callconv: llvm::CallConv,
ty: &'ll Type,
) -> &'ll Value {
debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
let llfn = unsafe {
llvm::LLVMRustGetOrInsertFunction(cx.llmod, name.as_ptr().cast(), name.len(), ty)
};
llvm::SetFunctionCallConv(llfn, callconv);
// Function addresses in Rust are never significant, allowing functions to
// be merged.
llvm::SetUnnamedAddress(llfn, llvm::UnnamedAddr::Global);
if cx.tcx.sess.opts.cg.no_redzone.unwrap_or(cx.tcx.sess.target.target.options.disable_redzone) {
llvm::Attribute::NoRedZone.apply_llfn(Function, llfn);
}
attributes::default_optimisation_attrs(cx.tcx.sess, llfn);
attributes::non_lazy_bind(cx.sess(), llfn);
llfn
}
impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value {
debug!("declare_global(name={:?})", name);
unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_ptr().cast(), name.len(), ty) }
}
fn declare_cfn(&self, name: &str, fn_type: &'ll Type) -> &'ll Value {
declare_raw_fn(self, name, llvm::CCallConv, fn_type)
}
fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
fn_abi.apply_attrs_llfn(self, llfn);
llfn
}
fn define_global(&self, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
if self.get_defined_value(name).is_some() {
None
} else {
Some(self.declare_global(name, ty))
}
}
fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty) }
}
fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, name.as_ptr().cast(), name.len()) }
}
fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
self.get_declared_value(name).and_then(|val| {
let declaration = unsafe { llvm::LLVMIsDeclaration(val) != 0 };
if !declaration { Some(val) } else { None }
})
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,400 @@
//! The Rust compiler.
//!
//! # Note
//!
//! This API is completely unstable and subject to change.
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/")]
#![feature(bool_to_option)]
#![feature(const_cstr_unchecked)]
#![feature(crate_visibility_modifier)]
#![feature(extern_types)]
#![feature(in_band_lifetimes)]
#![feature(nll)]
#![feature(or_patterns)]
#![feature(trusted_len)]
#![recursion_limit = "256"]
use back::write::{create_informational_target_machine, create_target_machine};
pub use llvm_util::target_features;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig};
use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule};
use rustc_errors::{ErrorReported, FatalError, Handler};
use rustc_middle::dep_graph::{DepGraph, WorkProduct};
use rustc_middle::middle::cstore::{EncodedMetadata, MetadataLoaderDyn};
use rustc_middle::ty::{self, TyCtxt};
use rustc_serialize::json;
use rustc_session::config::{self, OptLevel, OutputFilenames, PrintRequest};
use rustc_session::Session;
use rustc_span::symbol::Symbol;
use std::any::Any;
use std::ffi::CStr;
use std::fs;
use std::sync::Arc;
mod back {
pub mod archive;
pub mod lto;
mod profiling;
pub mod write;
}
mod abi;
mod allocator;
mod asm;
mod attributes;
mod base;
mod builder;
mod callee;
mod common;
mod consts;
mod context;
mod coverageinfo;
mod debuginfo;
mod declare;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
#[path = "llvm/mod.rs"]
mod llvm_;
pub mod llvm {
pub use super::llvm_::*;
}
mod llvm_util;
mod metadata;
mod mono_item;
mod type_;
mod type_of;
mod va_arg;
mod value;
#[derive(Clone)]
pub struct LlvmCodegenBackend(());
impl ExtraBackendMethods for LlvmCodegenBackend {
fn new_metadata(&self, tcx: TyCtxt<'_>, mod_name: &str) -> ModuleLlvm {
ModuleLlvm::new_metadata(tcx, mod_name)
}
fn write_compressed_metadata<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
metadata: &EncodedMetadata,
llvm_module: &mut ModuleLlvm,
) {
base::write_compressed_metadata(tcx, metadata, llvm_module)
}
fn codegen_allocator<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
mods: &mut ModuleLlvm,
kind: AllocatorKind,
) {
unsafe { allocator::codegen(tcx, mods, kind) }
}
fn compile_codegen_unit(
&self,
tcx: TyCtxt<'_>,
cgu_name: Symbol,
) -> (ModuleCodegen<ModuleLlvm>, u64) {
base::compile_codegen_unit(tcx, cgu_name)
}
fn target_machine_factory(
&self,
sess: &Session,
optlvl: OptLevel,
) -> Arc<dyn Fn() -> Result<&'static mut llvm::TargetMachine, String> + Send + Sync> {
back::write::target_machine_factory(sess, optlvl)
}
fn target_cpu<'b>(&self, sess: &'b Session) -> &'b str {
llvm_util::target_cpu(sess)
}
}
impl WriteBackendMethods for LlvmCodegenBackend {
type Module = ModuleLlvm;
type ModuleBuffer = back::lto::ModuleBuffer;
type Context = llvm::Context;
type TargetMachine = &'static mut llvm::TargetMachine;
type ThinData = back::lto::ThinData;
type ThinBuffer = back::lto::ThinBuffer;
fn print_pass_timings(&self) {
unsafe {
llvm::LLVMRustPrintPassTimings();
}
}
fn run_fat_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<FatLTOInput<Self>>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<LtoModuleCodegen<Self>, FatalError> {
back::lto::run_fat(cgcx, modules, cached_modules)
}
fn run_thin_lto(
cgcx: &CodegenContext<Self>,
modules: Vec<(String, Self::ThinBuffer)>,
cached_modules: Vec<(SerializedModule<Self::ModuleBuffer>, WorkProduct)>,
) -> Result<(Vec<LtoModuleCodegen<Self>>, Vec<WorkProduct>), FatalError> {
back::lto::run_thin(cgcx, modules, cached_modules)
}
unsafe fn optimize(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<(), FatalError> {
back::write::optimize(cgcx, diag_handler, module, config)
}
unsafe fn optimize_thin(
cgcx: &CodegenContext<Self>,
thin: &mut ThinModule<Self>,
) -> Result<ModuleCodegen<Self::Module>, FatalError> {
back::lto::optimize_thin_module(thin, cgcx)
}
unsafe fn codegen(
cgcx: &CodegenContext<Self>,
diag_handler: &Handler,
module: ModuleCodegen<Self::Module>,
config: &ModuleConfig,
) -> Result<CompiledModule, FatalError> {
back::write::codegen(cgcx, diag_handler, module, config)
}
fn prepare_thin(module: ModuleCodegen<Self::Module>) -> (String, Self::ThinBuffer) {
back::lto::prepare_thin(module)
}
fn serialize_module(module: ModuleCodegen<Self::Module>) -> (String, Self::ModuleBuffer) {
(module.name, back::lto::ModuleBuffer::new(module.module_llvm.llmod()))
}
fn run_lto_pass_manager(
cgcx: &CodegenContext<Self>,
module: &ModuleCodegen<Self::Module>,
config: &ModuleConfig,
thin: bool,
) {
back::lto::run_pass_manager(cgcx, module, config, thin)
}
}
unsafe impl Send for LlvmCodegenBackend {} // Llvm is on a per-thread basis
unsafe impl Sync for LlvmCodegenBackend {}
impl LlvmCodegenBackend {
pub fn new() -> Box<dyn CodegenBackend> {
Box::new(LlvmCodegenBackend(()))
}
}
impl CodegenBackend for LlvmCodegenBackend {
fn init(&self, sess: &Session) {
llvm_util::init(sess); // Make sure llvm is inited
}
fn print(&self, req: PrintRequest, sess: &Session) {
match req {
PrintRequest::RelocationModels => {
println!("Available relocation models:");
for name in
&["static", "pic", "dynamic-no-pic", "ropi", "rwpi", "ropi-rwpi", "default"]
{
println!(" {}", name);
}
println!();
}
PrintRequest::CodeModels => {
println!("Available code models:");
for name in &["tiny", "small", "kernel", "medium", "large"] {
println!(" {}", name);
}
println!();
}
PrintRequest::TlsModels => {
println!("Available TLS models:");
for name in &["global-dynamic", "local-dynamic", "initial-exec", "local-exec"] {
println!(" {}", name);
}
println!();
}
req => llvm_util::print(req, sess),
}
}
fn print_passes(&self) {
llvm_util::print_passes();
}
fn print_version(&self) {
llvm_util::print_version();
}
fn target_features(&self, sess: &Session) -> Vec<Symbol> {
target_features(sess)
}
fn metadata_loader(&self) -> Box<MetadataLoaderDyn> {
Box::new(metadata::LlvmMetadataLoader)
}
fn provide(&self, providers: &mut ty::query::Providers) {
attributes::provide(providers);
}
fn provide_extern(&self, providers: &mut ty::query::Providers) {
attributes::provide_extern(providers);
}
fn codegen_crate<'tcx>(
&self,
tcx: TyCtxt<'tcx>,
metadata: EncodedMetadata,
need_metadata_module: bool,
) -> Box<dyn Any> {
Box::new(rustc_codegen_ssa::base::codegen_crate(
LlvmCodegenBackend(()),
tcx,
metadata,
need_metadata_module,
))
}
fn join_codegen(
&self,
ongoing_codegen: Box<dyn Any>,
sess: &Session,
dep_graph: &DepGraph,
) -> Result<Box<dyn Any>, ErrorReported> {
let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
.join(sess);
if sess.opts.debugging_opts.incremental_info {
rustc_codegen_ssa::back::write::dump_incremental_data(&codegen_results);
}
sess.time("serialize_work_products", move || {
rustc_incremental::save_work_product_index(sess, &dep_graph, work_products)
});
sess.compile_status()?;
Ok(Box::new(codegen_results))
}
fn link(
&self,
sess: &Session,
codegen_results: Box<dyn Any>,
outputs: &OutputFilenames,
) -> Result<(), ErrorReported> {
let codegen_results = codegen_results
.downcast::<CodegenResults>()
.expect("Expected CodegenResults, found Box<Any>");
if sess.opts.debugging_opts.no_link {
// FIXME: use a binary format to encode the `.rlink` file
let rlink_data = json::encode(&codegen_results).map_err(|err| {
sess.fatal(&format!("failed to encode rlink: {}", err));
})?;
let rlink_file = outputs.with_extension(config::RLINK_EXT);
fs::write(&rlink_file, rlink_data).map_err(|err| {
sess.fatal(&format!("failed to write file {}: {}", rlink_file.display(), err));
})?;
return Ok(());
}
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
sess.time("link_crate", || {
use crate::back::archive::LlvmArchiveBuilder;
use rustc_codegen_ssa::back::link::link_binary;
let target_cpu = crate::llvm_util::target_cpu(sess);
link_binary::<LlvmArchiveBuilder<'_>>(
sess,
&codegen_results,
outputs,
&codegen_results.crate_name.as_str(),
target_cpu,
);
});
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
rustc_incremental::finalize_session_directory(sess, codegen_results.crate_hash);
sess.time("llvm_dump_timing_file", || {
if sess.opts.debugging_opts.llvm_time_trace {
llvm_util::time_trace_profiler_finish("llvm_timings.json");
}
});
Ok(())
}
}
pub struct ModuleLlvm {
llcx: &'static mut llvm::Context,
llmod_raw: *const llvm::Module,
tm: &'static mut llvm::TargetMachine,
}
unsafe impl Send for ModuleLlvm {}
unsafe impl Sync for ModuleLlvm {}
impl ModuleLlvm {
fn new(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
ModuleLlvm { llmod_raw, llcx, tm: create_target_machine(tcx) }
}
}
fn new_metadata(tcx: TyCtxt<'_>, mod_name: &str) -> Self {
unsafe {
let llcx = llvm::LLVMRustContextCreate(tcx.sess.fewer_names());
let llmod_raw = context::create_module(tcx, llcx, mod_name) as *const _;
ModuleLlvm { llmod_raw, llcx, tm: create_informational_target_machine(tcx.sess) }
}
}
fn parse(
cgcx: &CodegenContext<LlvmCodegenBackend>,
name: &CStr,
buffer: &[u8],
handler: &Handler,
) -> Result<Self, FatalError> {
unsafe {
let llcx = llvm::LLVMRustContextCreate(cgcx.fewer_names);
let llmod_raw = back::lto::parse_module(llcx, name, buffer, handler)?;
let tm = match (cgcx.tm_factory.0)() {
Ok(m) => m,
Err(e) => {
handler.struct_err(&e).emit();
return Err(FatalError);
}
};
Ok(ModuleLlvm { llmod_raw, llcx, tm })
}
}
fn llmod(&self) -> &llvm::Module {
unsafe { &*self.llmod_raw }
}
}
impl Drop for ModuleLlvm {
fn drop(&mut self) {
unsafe {
llvm::LLVMContextDispose(&mut *(self.llcx as *mut _));
llvm::LLVMRustDisposeTargetMachine(&mut *(self.tm as *mut _));
}
}
}

View file

@ -0,0 +1,105 @@
//! A wrapper around LLVM's archive (.a) code
use rustc_fs_util::path_to_c_string;
use std::path::Path;
use std::slice;
use std::str;
pub struct ArchiveRO {
pub raw: &'static mut super::Archive,
}
unsafe impl Send for ArchiveRO {}
pub struct Iter<'a> {
raw: &'a mut super::ArchiveIterator<'a>,
}
pub struct Child<'a> {
pub raw: &'a mut super::ArchiveChild<'a>,
}
impl ArchiveRO {
/// Opens a static archive for read-only purposes. This is more optimized
/// than the `open` method because it uses LLVM's internal `Archive` class
/// rather than shelling out to `ar` for everything.
///
/// If this archive is used with a mutable method, then an error will be
/// raised.
pub fn open(dst: &Path) -> Result<ArchiveRO, String> {
unsafe {
let s = path_to_c_string(dst);
let ar = super::LLVMRustOpenArchive(s.as_ptr()).ok_or_else(|| {
super::last_error().unwrap_or_else(|| "failed to open archive".to_owned())
})?;
Ok(ArchiveRO { raw: ar })
}
}
pub fn iter(&self) -> Iter<'_> {
unsafe { Iter { raw: super::LLVMRustArchiveIteratorNew(self.raw) } }
}
}
impl Drop for ArchiveRO {
fn drop(&mut self) {
unsafe {
super::LLVMRustDestroyArchive(&mut *(self.raw as *mut _));
}
}
}
impl<'a> Iterator for Iter<'a> {
type Item = Result<Child<'a>, String>;
fn next(&mut self) -> Option<Result<Child<'a>, String>> {
unsafe {
match super::LLVMRustArchiveIteratorNext(self.raw) {
Some(raw) => Some(Ok(Child { raw })),
None => super::last_error().map(Err),
}
}
}
}
impl<'a> Drop for Iter<'a> {
fn drop(&mut self) {
unsafe {
super::LLVMRustArchiveIteratorFree(&mut *(self.raw as *mut _));
}
}
}
impl<'a> Child<'a> {
pub fn name(&self) -> Option<&'a str> {
unsafe {
let mut name_len = 0;
let name_ptr = super::LLVMRustArchiveChildName(self.raw, &mut name_len);
if name_ptr.is_null() {
None
} else {
let name = slice::from_raw_parts(name_ptr as *const u8, name_len as usize);
str::from_utf8(name).ok().map(|s| s.trim())
}
}
}
pub fn data(&self) -> &'a [u8] {
unsafe {
let mut data_len = 0;
let data_ptr = super::LLVMRustArchiveChildData(self.raw, &mut data_len);
if data_ptr.is_null() {
panic!("failed to read data from archive child");
}
slice::from_raw_parts(data_ptr as *const u8, data_len as usize)
}
}
}
impl<'a> Drop for Child<'a> {
fn drop(&mut self) {
unsafe {
super::LLVMRustArchiveChildFree(&mut *(self.raw as *mut _));
}
}
}

View file

@ -0,0 +1,166 @@
//! LLVM diagnostic reports.
pub use self::Diagnostic::*;
pub use self::OptimizationDiagnosticKind::*;
use crate::value::Value;
use libc::c_uint;
use super::{DiagnosticInfo, Twine};
#[derive(Copy, Clone)]
pub enum OptimizationDiagnosticKind {
OptimizationRemark,
OptimizationMissed,
OptimizationAnalysis,
OptimizationAnalysisFPCommute,
OptimizationAnalysisAliasing,
OptimizationFailure,
OptimizationRemarkOther,
}
impl OptimizationDiagnosticKind {
pub fn describe(self) -> &'static str {
match self {
OptimizationRemark | OptimizationRemarkOther => "remark",
OptimizationMissed => "missed",
OptimizationAnalysis => "analysis",
OptimizationAnalysisFPCommute => "floating-point",
OptimizationAnalysisAliasing => "aliasing",
OptimizationFailure => "failure",
}
}
}
pub struct OptimizationDiagnostic<'ll> {
pub kind: OptimizationDiagnosticKind,
pub pass_name: String,
pub function: &'ll Value,
pub line: c_uint,
pub column: c_uint,
pub filename: String,
pub message: String,
}
impl OptimizationDiagnostic<'ll> {
unsafe fn unpack(kind: OptimizationDiagnosticKind, di: &'ll DiagnosticInfo) -> Self {
let mut function = None;
let mut line = 0;
let mut column = 0;
let mut message = None;
let mut filename = None;
let pass_name = super::build_string(|pass_name| {
message = super::build_string(|message| {
filename = super::build_string(|filename| {
super::LLVMRustUnpackOptimizationDiagnostic(
di,
pass_name,
&mut function,
&mut line,
&mut column,
filename,
message,
)
})
.ok()
})
.ok()
})
.ok();
let mut filename = filename.unwrap_or_default();
if filename.is_empty() {
filename.push_str("<unknown file>");
}
OptimizationDiagnostic {
kind,
pass_name: pass_name.expect("got a non-UTF8 pass name from LLVM"),
function: function.unwrap(),
line,
column,
filename,
message: message.expect("got a non-UTF8 OptimizationDiagnostic message from LLVM"),
}
}
}
#[derive(Copy, Clone)]
pub struct InlineAsmDiagnostic<'ll> {
pub level: super::DiagnosticLevel,
pub cookie: c_uint,
pub message: &'ll Twine,
pub instruction: Option<&'ll Value>,
}
impl InlineAsmDiagnostic<'ll> {
unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
let mut cookie = 0;
let mut message = None;
let mut instruction = None;
let mut level = super::DiagnosticLevel::Error;
super::LLVMRustUnpackInlineAsmDiagnostic(
di,
&mut level,
&mut cookie,
&mut message,
&mut instruction,
);
InlineAsmDiagnostic { level, cookie, message: message.unwrap(), instruction }
}
}
pub enum Diagnostic<'ll> {
Optimization(OptimizationDiagnostic<'ll>),
InlineAsm(InlineAsmDiagnostic<'ll>),
PGO(&'ll DiagnosticInfo),
Linker(&'ll DiagnosticInfo),
/// LLVM has other types that we do not wrap here.
UnknownDiagnostic(&'ll DiagnosticInfo),
}
impl Diagnostic<'ll> {
pub unsafe fn unpack(di: &'ll DiagnosticInfo) -> Self {
use super::DiagnosticKind as Dk;
let kind = super::LLVMRustGetDiagInfoKind(di);
match kind {
Dk::InlineAsm => InlineAsm(InlineAsmDiagnostic::unpack(di)),
Dk::OptimizationRemark => {
Optimization(OptimizationDiagnostic::unpack(OptimizationRemark, di))
}
Dk::OptimizationRemarkOther => {
Optimization(OptimizationDiagnostic::unpack(OptimizationRemarkOther, di))
}
Dk::OptimizationRemarkMissed => {
Optimization(OptimizationDiagnostic::unpack(OptimizationMissed, di))
}
Dk::OptimizationRemarkAnalysis => {
Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysis, di))
}
Dk::OptimizationRemarkAnalysisFPCommute => {
Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisFPCommute, di))
}
Dk::OptimizationRemarkAnalysisAliasing => {
Optimization(OptimizationDiagnostic::unpack(OptimizationAnalysisAliasing, di))
}
Dk::OptimizationFailure => {
Optimization(OptimizationDiagnostic::unpack(OptimizationFailure, di))
}
Dk::PGOProfile => PGO(di),
Dk::Linker => Linker(di),
_ => UnknownDiagnostic(di),
}
}
}

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,310 @@
#![allow(non_snake_case)]
pub use self::AtomicRmwBinOp::*;
pub use self::CallConv::*;
pub use self::CodeGenOptSize::*;
pub use self::IntPredicate::*;
pub use self::Linkage::*;
pub use self::MetadataType::*;
pub use self::RealPredicate::*;
use libc::c_uint;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_llvm::RustString;
use std::cell::RefCell;
use std::ffi::{CStr, CString};
use std::str::FromStr;
use std::string::FromUtf8Error;
pub mod archive_ro;
pub mod diagnostic;
mod ffi;
pub use self::ffi::*;
impl LLVMRustResult {
pub fn into_result(self) -> Result<(), ()> {
match self {
LLVMRustResult::Success => Ok(()),
LLVMRustResult::Failure => Err(()),
}
}
}
pub fn AddFunctionAttrStringValue(llfn: &'a Value, idx: AttributePlace, attr: &CStr, value: &CStr) {
unsafe {
LLVMRustAddFunctionAttrStringValue(llfn, idx.as_uint(), attr.as_ptr(), value.as_ptr())
}
}
#[derive(Copy, Clone)]
pub enum AttributePlace {
ReturnValue,
Argument(u32),
Function,
}
impl AttributePlace {
pub fn as_uint(self) -> c_uint {
match self {
AttributePlace::ReturnValue => 0,
AttributePlace::Argument(i) => 1 + i,
AttributePlace::Function => !0,
}
}
}
#[derive(Copy, Clone, PartialEq)]
#[repr(C)]
pub enum CodeGenOptSize {
CodeGenOptSizeNone = 0,
CodeGenOptSizeDefault = 1,
CodeGenOptSizeAggressive = 2,
}
impl FromStr for ArchiveKind {
type Err = ();
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"gnu" => Ok(ArchiveKind::K_GNU),
"bsd" => Ok(ArchiveKind::K_BSD),
"darwin" => Ok(ArchiveKind::K_DARWIN),
"coff" => Ok(ArchiveKind::K_COFF),
_ => Err(()),
}
}
}
pub fn SetInstructionCallConv(instr: &'a Value, cc: CallConv) {
unsafe {
LLVMSetInstructionCallConv(instr, cc as c_uint);
}
}
pub fn SetFunctionCallConv(fn_: &'a Value, cc: CallConv) {
unsafe {
LLVMSetFunctionCallConv(fn_, cc as c_uint);
}
}
// Externally visible symbols that might appear in multiple codegen units need to appear in
// their own comdat section so that the duplicates can be discarded at link time. This can for
// example happen for generics when using multiple codegen units. This function simply uses the
// value's name as the comdat value to make sure that it is in a 1-to-1 relationship to the
// function.
// For more details on COMDAT sections see e.g., http://www.airs.com/blog/archives/52
pub fn SetUniqueComdat(llmod: &Module, val: &'a Value) {
unsafe {
let name = get_value_name(val);
LLVMRustSetComdat(llmod, val, name.as_ptr().cast(), name.len());
}
}
pub fn UnsetComdat(val: &'a Value) {
unsafe {
LLVMRustUnsetComdat(val);
}
}
pub fn SetUnnamedAddress(global: &'a Value, unnamed: UnnamedAddr) {
unsafe {
LLVMSetUnnamedAddress(global, unnamed);
}
}
pub fn set_thread_local(global: &'a Value, is_thread_local: bool) {
unsafe {
LLVMSetThreadLocal(global, is_thread_local as Bool);
}
}
pub fn set_thread_local_mode(global: &'a Value, mode: ThreadLocalMode) {
unsafe {
LLVMSetThreadLocalMode(global, mode);
}
}
impl Attribute {
pub fn apply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustAddFunctionAttribute(llfn, idx.as_uint(), *self) }
}
pub fn apply_callsite(&self, idx: AttributePlace, callsite: &Value) {
unsafe { LLVMRustAddCallSiteAttribute(callsite, idx.as_uint(), *self) }
}
pub fn unapply_llfn(&self, idx: AttributePlace, llfn: &Value) {
unsafe { LLVMRustRemoveFunctionAttributes(llfn, idx.as_uint(), *self) }
}
pub fn toggle_llfn(&self, idx: AttributePlace, llfn: &Value, set: bool) {
if set {
self.apply_llfn(idx, llfn);
} else {
self.unapply_llfn(idx, llfn);
}
}
}
// Memory-managed interface to object files.
pub struct ObjectFile {
pub llof: &'static mut ffi::ObjectFile,
}
unsafe impl Send for ObjectFile {}
impl ObjectFile {
// This will take ownership of llmb
pub fn new(llmb: &'static mut MemoryBuffer) -> Option<ObjectFile> {
unsafe {
let llof = LLVMCreateObjectFile(llmb)?;
Some(ObjectFile { llof })
}
}
}
impl Drop for ObjectFile {
fn drop(&mut self) {
unsafe {
LLVMDisposeObjectFile(&mut *(self.llof as *mut _));
}
}
}
// Memory-managed interface to section iterators.
pub struct SectionIter<'a> {
pub llsi: &'a mut SectionIterator<'a>,
}
impl Drop for SectionIter<'a> {
fn drop(&mut self) {
unsafe {
LLVMDisposeSectionIterator(&mut *(self.llsi as *mut _));
}
}
}
pub fn mk_section_iter(llof: &ffi::ObjectFile) -> SectionIter<'_> {
unsafe { SectionIter { llsi: LLVMGetSections(llof) } }
}
pub fn set_section(llglobal: &Value, section_name: &str) {
let section_name_cstr = CString::new(section_name).expect("unexpected CString error");
unsafe {
LLVMSetSection(llglobal, section_name_cstr.as_ptr());
}
}
pub fn add_global<'a>(llmod: &'a Module, ty: &'a Type, name: &str) -> &'a Value {
let name_cstr = CString::new(name).expect("unexpected CString error");
unsafe { LLVMAddGlobal(llmod, ty, name_cstr.as_ptr()) }
}
pub fn set_initializer(llglobal: &Value, constant_val: &Value) {
unsafe {
LLVMSetInitializer(llglobal, constant_val);
}
}
pub fn set_global_constant(llglobal: &Value, is_constant: bool) {
unsafe {
LLVMSetGlobalConstant(llglobal, if is_constant { ffi::True } else { ffi::False });
}
}
pub fn set_linkage(llglobal: &Value, linkage: Linkage) {
unsafe {
LLVMRustSetLinkage(llglobal, linkage);
}
}
pub fn set_alignment(llglobal: &Value, bytes: usize) {
unsafe {
ffi::LLVMSetAlignment(llglobal, bytes as c_uint);
}
}
/// Safe wrapper around `LLVMGetParam`, because segfaults are no fun.
pub fn get_param(llfn: &Value, index: c_uint) -> &Value {
unsafe {
assert!(
index < LLVMCountParams(llfn),
"out of bounds argument access: {} out of {} arguments",
index,
LLVMCountParams(llfn)
);
LLVMGetParam(llfn, index)
}
}
/// Safe wrapper for `LLVMGetValueName2` into a byte slice
pub fn get_value_name(value: &Value) -> &[u8] {
unsafe {
let mut len = 0;
let data = LLVMGetValueName2(value, &mut len);
std::slice::from_raw_parts(data.cast(), len)
}
}
/// Safe wrapper for `LLVMSetValueName2` from a byte slice
pub fn set_value_name(value: &Value, name: &[u8]) {
unsafe {
let data = name.as_ptr().cast();
LLVMSetValueName2(value, data, name.len());
}
}
pub fn build_string(f: impl FnOnce(&RustString)) -> Result<String, FromUtf8Error> {
let sr = RustString { bytes: RefCell::new(Vec::new()) };
f(&sr);
String::from_utf8(sr.bytes.into_inner())
}
pub fn build_byte_buffer(f: impl FnOnce(&RustString)) -> Vec<u8> {
let sr = RustString { bytes: RefCell::new(Vec::new()) };
f(&sr);
sr.bytes.into_inner()
}
pub fn twine_to_string(tr: &Twine) -> String {
unsafe {
build_string(|s| LLVMRustWriteTwineToString(tr, s)).expect("got a non-UTF8 Twine from LLVM")
}
}
pub fn last_error() -> Option<String> {
unsafe {
let cstr = LLVMRustGetLastError();
if cstr.is_null() {
None
} else {
let err = CStr::from_ptr(cstr).to_bytes();
let err = String::from_utf8_lossy(err).to_string();
libc::free(cstr as *mut _);
Some(err)
}
}
}
pub struct OperandBundleDef<'a> {
pub raw: &'a mut ffi::OperandBundleDef<'a>,
}
impl OperandBundleDef<'a> {
pub fn new(name: &str, vals: &[&'a Value]) -> Self {
let name = SmallCStr::new(name);
let def = unsafe {
LLVMRustBuildOperandBundleDef(name.as_ptr(), vals.as_ptr(), vals.len() as c_uint)
};
OperandBundleDef { raw: def }
}
}
impl Drop for OperandBundleDef<'a> {
fn drop(&mut self) {
unsafe {
LLVMRustFreeOperandBundleDef(&mut *(self.raw as *mut _));
}
}
}

View file

@ -0,0 +1,368 @@
use crate::back::write::create_informational_target_machine;
use crate::llvm;
use libc::c_int;
use rustc_data_structures::fx::FxHashSet;
use rustc_feature::UnstableFeatures;
use rustc_middle::bug;
use rustc_session::config::PrintRequest;
use rustc_session::Session;
use rustc_span::symbol::sym;
use rustc_span::symbol::Symbol;
use rustc_target::spec::{MergeFunctions, PanicStrategy};
use std::ffi::CString;
use std::slice;
use std::str;
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Once;
static POISONED: AtomicBool = AtomicBool::new(false);
static INIT: Once = Once::new();
pub(crate) fn init(sess: &Session) {
unsafe {
// Before we touch LLVM, make sure that multithreading is enabled.
INIT.call_once(|| {
if llvm::LLVMStartMultithreaded() != 1 {
// use an extra bool to make sure that all future usage of LLVM
// cannot proceed despite the Once not running more than once.
POISONED.store(true, Ordering::SeqCst);
}
configure_llvm(sess);
});
if POISONED.load(Ordering::SeqCst) {
bug!("couldn't enable multi-threaded LLVM");
}
}
}
fn require_inited() {
INIT.call_once(|| bug!("llvm is not initialized"));
if POISONED.load(Ordering::SeqCst) {
bug!("couldn't enable multi-threaded LLVM");
}
}
unsafe fn configure_llvm(sess: &Session) {
let n_args = sess.opts.cg.llvm_args.len() + sess.target.target.options.llvm_args.len();
let mut llvm_c_strs = Vec::with_capacity(n_args + 1);
let mut llvm_args = Vec::with_capacity(n_args + 1);
llvm::LLVMRustInstallFatalErrorHandler();
fn llvm_arg_to_arg_name(full_arg: &str) -> &str {
full_arg.trim().split(|c: char| c == '=' || c.is_whitespace()).next().unwrap_or("")
}
let cg_opts = sess.opts.cg.llvm_args.iter();
let tg_opts = sess.target.target.options.llvm_args.iter();
let sess_args = cg_opts.chain(tg_opts);
let user_specified_args: FxHashSet<_> =
sess_args.clone().map(|s| llvm_arg_to_arg_name(s)).filter(|s| !s.is_empty()).collect();
{
// This adds the given argument to LLVM. Unless `force` is true
// user specified arguments are *not* overridden.
let mut add = |arg: &str, force: bool| {
if force || !user_specified_args.contains(llvm_arg_to_arg_name(arg)) {
let s = CString::new(arg).unwrap();
llvm_args.push(s.as_ptr());
llvm_c_strs.push(s);
}
};
// Set the llvm "program name" to make usage and invalid argument messages more clear.
add("rustc -Cllvm-args=\"...\" with", true);
if sess.time_llvm_passes() {
add("-time-passes", false);
}
if sess.print_llvm_passes() {
add("-debug-pass=Structure", false);
}
if !sess.opts.debugging_opts.no_generate_arange_section {
add("-generate-arange-section", false);
}
match sess
.opts
.debugging_opts
.merge_functions
.unwrap_or(sess.target.target.options.merge_functions)
{
MergeFunctions::Disabled | MergeFunctions::Trampolines => {}
MergeFunctions::Aliases => {
add("-mergefunc-use-aliases", false);
}
}
if sess.target.target.target_os == "emscripten"
&& sess.panic_strategy() == PanicStrategy::Unwind
{
add("-enable-emscripten-cxx-exceptions", false);
}
// HACK(eddyb) LLVM inserts `llvm.assume` calls to preserve align attributes
// during inlining. Unfortunately these may block other optimizations.
add("-preserve-alignment-assumptions-during-inlining=false", false);
for arg in sess_args {
add(&(*arg), true);
}
}
if sess.opts.debugging_opts.llvm_time_trace && get_major_version() >= 9 {
// time-trace is not thread safe and running it in parallel will cause seg faults.
if !sess.opts.debugging_opts.no_parallel_llvm {
bug!("`-Z llvm-time-trace` requires `-Z no-parallel-llvm")
}
llvm::LLVMTimeTraceProfilerInitialize();
}
llvm::LLVMInitializePasses();
::rustc_llvm::initialize_available_targets();
llvm::LLVMRustSetLLVMOptions(llvm_args.len() as c_int, llvm_args.as_ptr());
}
pub fn time_trace_profiler_finish(file_name: &str) {
unsafe {
if get_major_version() >= 9 {
let file_name = CString::new(file_name).unwrap();
llvm::LLVMTimeTraceProfilerFinish(file_name.as_ptr());
}
}
}
// WARNING: the features after applying `to_llvm_feature` must be known
// to LLVM or the feature detection code will walk past the end of the feature
// array, leading to crashes.
const ARM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("aclass", Some(sym::arm_target_feature)),
("mclass", Some(sym::arm_target_feature)),
("rclass", Some(sym::arm_target_feature)),
("dsp", Some(sym::arm_target_feature)),
("neon", Some(sym::arm_target_feature)),
("crc", Some(sym::arm_target_feature)),
("crypto", Some(sym::arm_target_feature)),
("v5te", Some(sym::arm_target_feature)),
("v6", Some(sym::arm_target_feature)),
("v6k", Some(sym::arm_target_feature)),
("v6t2", Some(sym::arm_target_feature)),
("v7", Some(sym::arm_target_feature)),
("v8", Some(sym::arm_target_feature)),
("vfp2", Some(sym::arm_target_feature)),
("vfp3", Some(sym::arm_target_feature)),
("vfp4", Some(sym::arm_target_feature)),
// This is needed for inline assembly, but shouldn't be stabilized as-is
// since it should be enabled per-function using #[instruction_set], not
// #[target_feature].
("thumb-mode", Some(sym::arm_target_feature)),
];
const AARCH64_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("fp", Some(sym::aarch64_target_feature)),
("neon", Some(sym::aarch64_target_feature)),
("sve", Some(sym::aarch64_target_feature)),
("crc", Some(sym::aarch64_target_feature)),
("crypto", Some(sym::aarch64_target_feature)),
("ras", Some(sym::aarch64_target_feature)),
("lse", Some(sym::aarch64_target_feature)),
("rdm", Some(sym::aarch64_target_feature)),
("fp16", Some(sym::aarch64_target_feature)),
("rcpc", Some(sym::aarch64_target_feature)),
("dotprod", Some(sym::aarch64_target_feature)),
("tme", Some(sym::aarch64_target_feature)),
("v8.1a", Some(sym::aarch64_target_feature)),
("v8.2a", Some(sym::aarch64_target_feature)),
("v8.3a", Some(sym::aarch64_target_feature)),
];
const X86_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("adx", Some(sym::adx_target_feature)),
("aes", None),
("avx", None),
("avx2", None),
("avx512bw", Some(sym::avx512_target_feature)),
("avx512cd", Some(sym::avx512_target_feature)),
("avx512dq", Some(sym::avx512_target_feature)),
("avx512er", Some(sym::avx512_target_feature)),
("avx512f", Some(sym::avx512_target_feature)),
("avx512ifma", Some(sym::avx512_target_feature)),
("avx512pf", Some(sym::avx512_target_feature)),
("avx512vbmi", Some(sym::avx512_target_feature)),
("avx512vl", Some(sym::avx512_target_feature)),
("avx512vpopcntdq", Some(sym::avx512_target_feature)),
("bmi1", None),
("bmi2", None),
("cmpxchg16b", Some(sym::cmpxchg16b_target_feature)),
("f16c", Some(sym::f16c_target_feature)),
("fma", None),
("fxsr", None),
("lzcnt", None),
("mmx", Some(sym::mmx_target_feature)),
("movbe", Some(sym::movbe_target_feature)),
("pclmulqdq", None),
("popcnt", None),
("rdrand", None),
("rdseed", None),
("rtm", Some(sym::rtm_target_feature)),
("sha", None),
("sse", None),
("sse2", None),
("sse3", None),
("sse4.1", None),
("sse4.2", None),
("sse4a", Some(sym::sse4a_target_feature)),
("ssse3", None),
("tbm", Some(sym::tbm_target_feature)),
("xsave", None),
("xsavec", None),
("xsaveopt", None),
("xsaves", None),
];
const HEXAGON_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("hvx", Some(sym::hexagon_target_feature)),
("hvx-length128b", Some(sym::hexagon_target_feature)),
];
const POWERPC_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("altivec", Some(sym::powerpc_target_feature)),
("power8-altivec", Some(sym::powerpc_target_feature)),
("power9-altivec", Some(sym::powerpc_target_feature)),
("power8-vector", Some(sym::powerpc_target_feature)),
("power9-vector", Some(sym::powerpc_target_feature)),
("vsx", Some(sym::powerpc_target_feature)),
];
const MIPS_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] =
&[("fp64", Some(sym::mips_target_feature)), ("msa", Some(sym::mips_target_feature))];
const RISCV_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("m", Some(sym::riscv_target_feature)),
("a", Some(sym::riscv_target_feature)),
("c", Some(sym::riscv_target_feature)),
("f", Some(sym::riscv_target_feature)),
("d", Some(sym::riscv_target_feature)),
("e", Some(sym::riscv_target_feature)),
];
const WASM_ALLOWED_FEATURES: &[(&str, Option<Symbol>)] = &[
("simd128", Some(sym::wasm_target_feature)),
("atomics", Some(sym::wasm_target_feature)),
("nontrapping-fptoint", Some(sym::wasm_target_feature)),
];
/// When rustdoc is running, provide a list of all known features so that all their respective
/// primitives may be documented.
///
/// IMPORTANT: If you're adding another feature list above, make sure to add it to this iterator!
pub fn all_known_features() -> impl Iterator<Item = (&'static str, Option<Symbol>)> {
std::iter::empty()
.chain(ARM_ALLOWED_FEATURES.iter())
.chain(AARCH64_ALLOWED_FEATURES.iter())
.chain(X86_ALLOWED_FEATURES.iter())
.chain(HEXAGON_ALLOWED_FEATURES.iter())
.chain(POWERPC_ALLOWED_FEATURES.iter())
.chain(MIPS_ALLOWED_FEATURES.iter())
.chain(RISCV_ALLOWED_FEATURES.iter())
.chain(WASM_ALLOWED_FEATURES.iter())
.cloned()
}
pub fn to_llvm_feature<'a>(sess: &Session, s: &'a str) -> &'a str {
let arch = if sess.target.target.arch == "x86_64" { "x86" } else { &*sess.target.target.arch };
match (arch, s) {
("x86", "pclmulqdq") => "pclmul",
("x86", "rdrand") => "rdrnd",
("x86", "bmi1") => "bmi",
("x86", "cmpxchg16b") => "cx16",
("aarch64", "fp") => "fp-armv8",
("aarch64", "fp16") => "fullfp16",
(_, s) => s,
}
}
pub fn target_features(sess: &Session) -> Vec<Symbol> {
let target_machine = create_informational_target_machine(sess);
supported_target_features(sess)
.iter()
.filter_map(|&(feature, gate)| {
if UnstableFeatures::from_environment().is_nightly_build() || gate.is_none() {
Some(feature)
} else {
None
}
})
.filter(|feature| {
let llvm_feature = to_llvm_feature(sess, feature);
let cstr = CString::new(llvm_feature).unwrap();
unsafe { llvm::LLVMRustHasFeature(target_machine, cstr.as_ptr()) }
})
.map(|feature| Symbol::intern(feature))
.collect()
}
pub fn supported_target_features(sess: &Session) -> &'static [(&'static str, Option<Symbol>)] {
match &*sess.target.target.arch {
"arm" => ARM_ALLOWED_FEATURES,
"aarch64" => AARCH64_ALLOWED_FEATURES,
"x86" | "x86_64" => X86_ALLOWED_FEATURES,
"hexagon" => HEXAGON_ALLOWED_FEATURES,
"mips" | "mips64" => MIPS_ALLOWED_FEATURES,
"powerpc" | "powerpc64" => POWERPC_ALLOWED_FEATURES,
"riscv32" | "riscv64" => RISCV_ALLOWED_FEATURES,
"wasm32" => WASM_ALLOWED_FEATURES,
_ => &[],
}
}
pub fn print_version() {
// Can be called without initializing LLVM
unsafe {
println!("LLVM version: {}.{}", llvm::LLVMRustVersionMajor(), llvm::LLVMRustVersionMinor());
}
}
pub fn get_major_version() -> u32 {
unsafe { llvm::LLVMRustVersionMajor() }
}
pub fn print_passes() {
// Can be called without initializing LLVM
unsafe {
llvm::LLVMRustPrintPasses();
}
}
pub(crate) fn print(req: PrintRequest, sess: &Session) {
require_inited();
let tm = create_informational_target_machine(sess);
unsafe {
match req {
PrintRequest::TargetCPUs => llvm::LLVMRustPrintTargetCPUs(tm),
PrintRequest::TargetFeatures => llvm::LLVMRustPrintTargetFeatures(tm),
_ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req),
}
}
}
pub fn target_cpu(sess: &Session) -> &str {
let name = match sess.opts.cg.target_cpu {
Some(ref s) => &**s,
None => &*sess.target.target.options.cpu,
};
if name != "native" {
return name;
}
unsafe {
let mut len = 0;
let ptr = llvm::LLVMRustGetHostCPUName(&mut len);
str::from_utf8(slice::from_raw_parts(ptr as *const u8, len)).unwrap()
}
}

View file

@ -0,0 +1,112 @@
use crate::llvm;
use crate::llvm::archive_ro::ArchiveRO;
use crate::llvm::{mk_section_iter, False, ObjectFile};
use rustc_middle::middle::cstore::MetadataLoader;
use rustc_target::spec::Target;
use rustc_codegen_ssa::METADATA_FILENAME;
use rustc_data_structures::owning_ref::OwningRef;
use rustc_data_structures::rustc_erase_owner;
use tracing::debug;
use rustc_fs_util::path_to_c_string;
use std::path::Path;
use std::slice;
pub use rustc_data_structures::sync::MetadataRef;
pub struct LlvmMetadataLoader;
impl MetadataLoader for LlvmMetadataLoader {
fn get_rlib_metadata(&self, _: &Target, filename: &Path) -> Result<MetadataRef, String> {
// Use ArchiveRO for speed here, it's backed by LLVM and uses mmap
// internally to read the file. We also avoid even using a memcpy by
// just keeping the archive along while the metadata is in use.
let archive =
ArchiveRO::open(filename).map(|ar| OwningRef::new(Box::new(ar))).map_err(|e| {
debug!("llvm didn't like `{}`: {}", filename.display(), e);
format!("failed to read rlib metadata in '{}': {}", filename.display(), e)
})?;
let buf: OwningRef<_, [u8]> = archive.try_map(|ar| {
ar.iter()
.filter_map(|s| s.ok())
.find(|sect| sect.name() == Some(METADATA_FILENAME))
.map(|s| s.data())
.ok_or_else(|| {
debug!("didn't find '{}' in the archive", METADATA_FILENAME);
format!("failed to read rlib metadata: '{}'", filename.display())
})
})?;
Ok(rustc_erase_owner!(buf))
}
fn get_dylib_metadata(&self, target: &Target, filename: &Path) -> Result<MetadataRef, String> {
unsafe {
let buf = path_to_c_string(filename);
let mb = llvm::LLVMRustCreateMemoryBufferWithContentsOfFile(buf.as_ptr())
.ok_or_else(|| format!("error reading library: '{}'", filename.display()))?;
let of =
ObjectFile::new(mb).map(|of| OwningRef::new(Box::new(of))).ok_or_else(|| {
format!("provided path not an object file: '{}'", filename.display())
})?;
let buf = of.try_map(|of| search_meta_section(of, target, filename))?;
Ok(rustc_erase_owner!(buf))
}
}
}
fn search_meta_section<'a>(
of: &'a ObjectFile,
target: &Target,
filename: &Path,
) -> Result<&'a [u8], String> {
unsafe {
let si = mk_section_iter(of.llof);
while llvm::LLVMIsSectionIteratorAtEnd(of.llof, si.llsi) == False {
let mut name_buf = None;
let name_len = llvm::LLVMRustGetSectionName(si.llsi, &mut name_buf);
let name = name_buf.map_or(
String::new(), // We got a NULL ptr, ignore `name_len`.
|buf| {
String::from_utf8(
slice::from_raw_parts(buf.as_ptr() as *const u8, name_len as usize)
.to_vec(),
)
.unwrap()
},
);
debug!("get_metadata_section: name {}", name);
if read_metadata_section_name(target) == name {
let cbuf = llvm::LLVMGetSectionContents(si.llsi);
let csz = llvm::LLVMGetSectionSize(si.llsi) as usize;
// The buffer is valid while the object file is around
let buf: &'a [u8] = slice::from_raw_parts(cbuf as *const u8, csz);
return Ok(buf);
}
llvm::LLVMMoveToNextSection(si.llsi);
}
}
Err(format!("metadata not found: '{}'", filename.display()))
}
pub fn metadata_section_name(target: &Target) -> &'static str {
// Historical note:
//
// When using link.exe it was seen that the section name `.note.rustc`
// was getting shortened to `.note.ru`, and according to the PE and COFF
// specification:
//
// > Executable images do not use a string table and do not support
// > section names longer than 8 characters
//
// https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
//
// As a result, we choose a slightly shorter name! As to why
// `.note.rustc` works on MinGW, that's another good question...
if target.options.is_like_osx { "__DATA,.rustc" } else { ".rustc" }
}
fn read_metadata_section_name(_target: &Target) -> &'static str {
".rustc"
}

View file

@ -0,0 +1,84 @@
use crate::abi::FnAbi;
use crate::attributes;
use crate::base;
use crate::context::CodegenCx;
use crate::llvm;
use crate::type_of::LayoutLlvmExt;
use rustc_codegen_ssa::traits::*;
use rustc_hir::def_id::{DefId, LOCAL_CRATE};
pub use rustc_middle::mir::mono::MonoItem;
use rustc_middle::mir::mono::{Linkage, Visibility};
use rustc_middle::ty::layout::FnAbiExt;
use rustc_middle::ty::{self, Instance, TypeFoldable};
use rustc_target::abi::LayoutOf;
use tracing::debug;
impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn predefine_static(
&self,
def_id: DefId,
linkage: Linkage,
visibility: Visibility,
symbol_name: &str,
) {
let instance = Instance::mono(self.tcx, def_id);
let ty = instance.ty(self.tcx, ty::ParamEnv::reveal_all());
let llty = self.layout_of(ty).llvm_type(self);
let g = self.define_global(symbol_name, llty).unwrap_or_else(|| {
self.sess().span_fatal(
self.tcx.def_span(def_id),
&format!("symbol `{}` is already defined", symbol_name),
)
});
unsafe {
llvm::LLVMRustSetLinkage(g, base::linkage_to_llvm(linkage));
llvm::LLVMRustSetVisibility(g, base::visibility_to_llvm(visibility));
}
self.instances.borrow_mut().insert(instance, g);
}
fn predefine_fn(
&self,
instance: Instance<'tcx>,
linkage: Linkage,
visibility: Visibility,
symbol_name: &str,
) {
assert!(!instance.substs.needs_infer());
let fn_abi = FnAbi::of_instance(self, instance, &[]);
let lldecl = self.declare_fn(symbol_name, &fn_abi);
unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
base::set_link_section(lldecl, &attrs);
if linkage == Linkage::LinkOnceODR || linkage == Linkage::WeakODR {
llvm::SetUniqueComdat(self.llmod, lldecl);
}
// If we're compiling the compiler-builtins crate, e.g., the equivalent of
// compiler-rt, then we want to implicitly compile everything with hidden
// visibility as we're going to link this object all over the place but
// don't want the symbols to get exported.
if linkage != Linkage::Internal
&& linkage != Linkage::Private
&& self.tcx.is_compiler_builtins(LOCAL_CRATE)
{
unsafe {
llvm::LLVMRustSetVisibility(lldecl, llvm::Visibility::Hidden);
}
} else {
unsafe {
llvm::LLVMRustSetVisibility(lldecl, base::visibility_to_llvm(visibility));
}
}
debug!("predefine_fn: instance = {:?}", instance);
attributes::from_fn_attrs(self, lldecl, instance);
self.instances.borrow_mut().insert(instance, lldecl);
}
}

View file

@ -0,0 +1,289 @@
pub use crate::llvm::Type;
use crate::abi::{FnAbiLlvmExt, LlvmType};
use crate::common;
use crate::context::CodegenCx;
use crate::llvm;
use crate::llvm::{Bool, False, True};
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_ast as ast;
use rustc_codegen_ssa::common::TypeKind;
use rustc_codegen_ssa::traits::*;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_middle::bug;
use rustc_middle::ty::layout::TyAndLayout;
use rustc_middle::ty::Ty;
use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
use rustc_target::abi::{AddressSpace, Align, Integer, Size};
use std::fmt;
use std::ptr;
use libc::c_uint;
impl PartialEq for Type {
fn eq(&self, other: &Self) -> bool {
ptr::eq(self, other)
}
}
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(
&llvm::build_string(|s| unsafe {
llvm::LLVMRustWriteTypeToString(self, s);
})
.expect("non-UTF8 type description from LLVM"),
)
}
}
impl CodegenCx<'ll, 'tcx> {
crate fn type_named_struct(&self, name: &str) -> &'ll Type {
let name = SmallCStr::new(name);
unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) }
}
crate fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) {
unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) }
}
crate fn type_void(&self) -> &'ll Type {
unsafe { llvm::LLVMVoidTypeInContext(self.llcx) }
}
crate fn type_metadata(&self) -> &'ll Type {
unsafe { llvm::LLVMRustMetadataTypeInContext(self.llcx) }
}
///x Creates an integer type with the given number of bits, e.g., i24
crate fn type_ix(&self, num_bits: u64) -> &'ll Type {
unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) }
}
crate fn type_x86_mmx(&self) -> &'ll Type {
unsafe { llvm::LLVMX86MMXTypeInContext(self.llcx) }
}
crate fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
unsafe { llvm::LLVMVectorType(ty, len as c_uint) }
}
crate fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
unsafe {
let n_args = llvm::LLVMCountParamTypes(ty) as usize;
let mut args = Vec::with_capacity(n_args);
llvm::LLVMGetParamTypes(ty, args.as_mut_ptr());
args.set_len(n_args);
args
}
}
crate fn type_bool(&self) -> &'ll Type {
self.type_i8()
}
crate fn type_int_from_ty(&self, t: ast::IntTy) -> &'ll Type {
match t {
ast::IntTy::Isize => self.type_isize(),
ast::IntTy::I8 => self.type_i8(),
ast::IntTy::I16 => self.type_i16(),
ast::IntTy::I32 => self.type_i32(),
ast::IntTy::I64 => self.type_i64(),
ast::IntTy::I128 => self.type_i128(),
}
}
crate fn type_uint_from_ty(&self, t: ast::UintTy) -> &'ll Type {
match t {
ast::UintTy::Usize => self.type_isize(),
ast::UintTy::U8 => self.type_i8(),
ast::UintTy::U16 => self.type_i16(),
ast::UintTy::U32 => self.type_i32(),
ast::UintTy::U64 => self.type_i64(),
ast::UintTy::U128 => self.type_i128(),
}
}
crate fn type_float_from_ty(&self, t: ast::FloatTy) -> &'ll Type {
match t {
ast::FloatTy::F32 => self.type_f32(),
ast::FloatTy::F64 => self.type_f64(),
}
}
crate fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = Integer::approximate_align(self, align);
self.type_from_integer(ity)
}
/// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array.
crate fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
let unit = Integer::approximate_align(self, align);
let size = size.bytes();
let unit_size = unit.size().bytes();
assert_eq!(size % unit_size, 0);
self.type_array(self.type_from_integer(unit), size / unit_size)
}
crate fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) }
}
crate fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
unsafe { llvm::LLVMRustArrayType(ty, len) }
}
}
impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn type_i1(&self) -> &'ll Type {
unsafe { llvm::LLVMInt1TypeInContext(self.llcx) }
}
fn type_i8(&self) -> &'ll Type {
unsafe { llvm::LLVMInt8TypeInContext(self.llcx) }
}
fn type_i16(&self) -> &'ll Type {
unsafe { llvm::LLVMInt16TypeInContext(self.llcx) }
}
fn type_i32(&self) -> &'ll Type {
unsafe { llvm::LLVMInt32TypeInContext(self.llcx) }
}
fn type_i64(&self) -> &'ll Type {
unsafe { llvm::LLVMInt64TypeInContext(self.llcx) }
}
fn type_i128(&self) -> &'ll Type {
unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) }
}
fn type_isize(&self) -> &'ll Type {
self.isize_ty
}
fn type_f32(&self) -> &'ll Type {
unsafe { llvm::LLVMFloatTypeInContext(self.llcx) }
}
fn type_f64(&self) -> &'ll Type {
unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) }
}
fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, False) }
}
fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type {
unsafe {
llvm::LLVMStructTypeInContext(
self.llcx,
els.as_ptr(),
els.len() as c_uint,
packed as Bool,
)
}
}
fn type_kind(&self, ty: &'ll Type) -> TypeKind {
unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
}
fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
assert_ne!(
self.type_kind(ty),
TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
);
ty.ptr_to(AddressSpace::DATA)
}
fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type {
ty.ptr_to(address_space)
}
fn element_type(&self, ty: &'ll Type) -> &'ll Type {
unsafe { llvm::LLVMGetElementType(ty) }
}
fn vector_length(&self, ty: &'ll Type) -> usize {
unsafe { llvm::LLVMGetVectorSize(ty) as usize }
}
fn float_width(&self, ty: &'ll Type) -> usize {
match self.type_kind(ty) {
TypeKind::Float => 32,
TypeKind::Double => 64,
TypeKind::X86_FP80 => 80,
TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
_ => bug!("llvm_float_width called on a non-float type"),
}
}
fn int_width(&self, ty: &'ll Type) -> u64 {
unsafe { llvm::LLVMGetIntTypeWidth(ty) as u64 }
}
fn val_ty(&self, v: &'ll Value) -> &'ll Type {
common::val_ty(v)
}
}
impl Type {
pub fn i8_llcx(llcx: &llvm::Context) -> &Type {
unsafe { llvm::LLVMInt8TypeInContext(llcx) }
}
// Creates an integer type with the given number of bits, e.g., i24
pub fn ix_llcx(llcx: &llvm::Context, num_bits: u64) -> &Type {
unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
}
pub fn i8p_llcx(llcx: &llvm::Context) -> &Type {
Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA)
}
fn ptr_to(&self, address_space: AddressSpace) -> &Type {
unsafe { llvm::LLVMPointerType(&self, address_space.0) }
}
}
impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
layout.llvm_type(self)
}
fn immediate_backend_type(&self, layout: TyAndLayout<'tcx>) -> &'ll Type {
layout.immediate_llvm_type(self)
}
fn is_backend_immediate(&self, layout: TyAndLayout<'tcx>) -> bool {
layout.is_llvm_immediate()
}
fn is_backend_scalar_pair(&self, layout: TyAndLayout<'tcx>) -> bool {
layout.is_llvm_scalar_pair()
}
fn backend_field_index(&self, layout: TyAndLayout<'tcx>, index: usize) -> u64 {
layout.llvm_field_index(index)
}
fn scalar_pair_element_backend_type(
&self,
layout: TyAndLayout<'tcx>,
index: usize,
immediate: bool,
) -> &'ll Type {
layout.scalar_pair_element_llvm_type(self, index, immediate)
}
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
fn_abi.ptr_to_llvm_type(self)
}
fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
ty.llvm_type(self)
}
}

View file

@ -0,0 +1,392 @@
use crate::abi::FnAbi;
use crate::common::*;
use crate::type_::Type;
use rustc_codegen_ssa::traits::*;
use rustc_middle::bug;
use rustc_middle::ty::layout::{FnAbiExt, TyAndLayout};
use rustc_middle::ty::print::obsolete::DefPathBasedNames;
use rustc_middle::ty::{self, Ty, TypeFoldable};
use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F32, F64};
use rustc_target::abi::{LayoutOf, PointeeInfo, Scalar, Size, TyAndLayoutMethods, Variants};
use tracing::debug;
use std::fmt::Write;
fn uncached_llvm_type<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
defer: &mut Option<(&'a Type, TyAndLayout<'tcx>)>,
) -> &'a Type {
match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { ref element, count } => {
// LLVM has a separate type for 64-bit SIMD vectors on X86 called
// `x86_mmx` which is needed for some SIMD operations. As a bit of a
// hack (all SIMD definitions are super unstable anyway) we
// recognize any one-element SIMD vector as "this should be an
// x86_mmx" type. In general there shouldn't be a need for other
// one-element SIMD vectors, so it's assumed this won't clash with
// much else.
let use_x86_mmx = count == 1
&& layout.size.bits() == 64
&& (cx.sess().target.target.arch == "x86"
|| cx.sess().target.target.arch == "x86_64");
if use_x86_mmx {
return cx.type_x86_mmx();
} else {
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
return cx.type_vector(element, count);
}
}
Abi::ScalarPair(..) => {
return cx.type_struct(
&[
layout.scalar_pair_element_llvm_type(cx, 0, false),
layout.scalar_pair_element_llvm_type(cx, 1, false),
],
false,
);
}
Abi::Uninhabited | Abi::Aggregate { .. } => {}
}
let name = match layout.ty.kind {
ty::Closure(..) |
ty::Generator(..) |
ty::Adt(..) |
// FIXME(eddyb) producing readable type names for trait objects can result
// in problematically distinct types due to HRTB and subtyping (see #47638).
// ty::Dynamic(..) |
ty::Foreign(..) |
ty::Str => {
let mut name = String::with_capacity(32);
let printer = DefPathBasedNames::new(cx.tcx, true, true);
printer.push_type_name(layout.ty, &mut name, false);
if let (&ty::Adt(def, _), &Variants::Single { index })
= (&layout.ty.kind, &layout.variants)
{
if def.is_enum() && !def.variants.is_empty() {
write!(&mut name, "::{}", def.variants[index].ident).unwrap();
}
}
if let (&ty::Generator(_, _, _), &Variants::Single { index })
= (&layout.ty.kind, &layout.variants)
{
write!(&mut name, "::{}", ty::GeneratorSubsts::variant_name(index)).unwrap();
}
Some(name)
}
_ => None
};
match layout.fields {
FieldsShape::Primitive | FieldsShape::Union(_) => {
let fill = cx.type_padding_filler(layout.size, layout.align.abi);
let packed = false;
match name {
None => cx.type_struct(&[fill], packed),
Some(ref name) => {
let llty = cx.type_named_struct(name);
cx.set_struct_body(llty, &[fill], packed);
llty
}
}
}
FieldsShape::Array { count, .. } => cx.type_array(layout.field(cx, 0).llvm_type(cx), count),
FieldsShape::Arbitrary { .. } => match name {
None => {
let (llfields, packed) = struct_llfields(cx, layout);
cx.type_struct(&llfields, packed)
}
Some(ref name) => {
let llty = cx.type_named_struct(name);
*defer = Some((llty, layout));
llty
}
},
}
}
fn struct_llfields<'a, 'tcx>(
cx: &CodegenCx<'a, 'tcx>,
layout: TyAndLayout<'tcx>,
) -> (Vec<&'a Type>, bool) {
debug!("struct_llfields: {:#?}", layout);
let field_count = layout.fields.count();
let mut packed = false;
let mut offset = Size::ZERO;
let mut prev_effective_align = layout.align.abi;
let mut result: Vec<_> = Vec::with_capacity(1 + field_count * 2);
for i in layout.fields.index_by_increasing_offset() {
let target_offset = layout.fields.offset(i as usize);
let field = layout.field(cx, i);
let effective_field_align =
layout.align.abi.min(field.align.abi).restrict_for_offset(target_offset);
packed |= effective_field_align < field.align.abi;
debug!(
"struct_llfields: {}: {:?} offset: {:?} target_offset: {:?} \
effective_field_align: {}",
i,
field,
offset,
target_offset,
effective_field_align.bytes()
);
assert!(target_offset >= offset);
let padding = target_offset - offset;
let padding_align = prev_effective_align.min(effective_field_align);
assert_eq!(offset.align_to(padding_align) + padding, target_offset);
result.push(cx.type_padding_filler(padding, padding_align));
debug!(" padding before: {:?}", padding);
result.push(field.llvm_type(cx));
offset = target_offset + field.size;
prev_effective_align = effective_field_align;
}
if !layout.is_unsized() && field_count > 0 {
if offset > layout.size {
bug!("layout: {:#?} stride: {:?} offset: {:?}", layout, layout.size, offset);
}
let padding = layout.size - offset;
let padding_align = prev_effective_align;
assert_eq!(offset.align_to(padding_align) + padding, layout.size);
debug!(
"struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
padding, offset, layout.size
);
result.push(cx.type_padding_filler(padding, padding_align));
assert_eq!(result.len(), 1 + field_count * 2);
} else {
debug!("struct_llfields: offset: {:?} stride: {:?}", offset, layout.size);
}
(result, packed)
}
impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
pub fn align_of(&self, ty: Ty<'tcx>) -> Align {
self.layout_of(ty).align.abi
}
pub fn size_of(&self, ty: Ty<'tcx>) -> Size {
self.layout_of(ty).size
}
pub fn size_and_align_of(&self, ty: Ty<'tcx>) -> (Size, Align) {
let layout = self.layout_of(ty);
(layout.size, layout.align.abi)
}
}
pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_immediate(&self) -> bool;
fn is_llvm_scalar_pair(&self) -> bool;
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn scalar_llvm_type_at<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar,
offset: Size,
) -> &'a Type;
fn scalar_pair_element_llvm_type<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
index: usize,
immediate: bool,
) -> &'a Type;
fn llvm_field_index(&self, index: usize) -> u64;
fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo>;
}
impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
fn is_llvm_immediate(&self) -> bool {
match self.abi {
Abi::Scalar(_) | Abi::Vector { .. } => true,
Abi::ScalarPair(..) => false,
Abi::Uninhabited | Abi::Aggregate { .. } => self.is_zst(),
}
}
fn is_llvm_scalar_pair(&self) -> bool {
match self.abi {
Abi::ScalarPair(..) => true,
Abi::Uninhabited | Abi::Scalar(_) | Abi::Vector { .. } | Abi::Aggregate { .. } => false,
}
}
/// Gets the LLVM type corresponding to a Rust type, i.e., `rustc_middle::ty::Ty`.
/// The pointee type of the pointer in `PlaceRef` is always this type.
/// For sized types, it is also the right LLVM type for an `alloca`
/// containing a value of that type, and most immediates (except `bool`).
/// Unsized types, however, are represented by a "minimal unit", e.g.
/// `[T]` becomes `T`, while `str` and `Trait` turn into `i8` - this
/// is useful for indexing slices, as `&[T]`'s data pointer is `T*`.
/// If the type is an unsized struct, the regular layout is generated,
/// with the inner-most trailing unsized field using the "minimal unit"
/// of that field's type - this is useful for taking the address of
/// that field and ensuring the struct has the right alignment.
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi {
// Use a different cache for scalars because pointers to DSTs
// can be either fat or thin (data pointers of fat pointers).
if let Some(&llty) = cx.scalar_lltypes.borrow().get(&self.ty) {
return llty;
}
let llty = match self.ty.kind {
ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
}
ty::Adt(def, _) if def.is_box() => {
cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
}
ty::FnPtr(sig) => cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[])),
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO),
};
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty;
}
// Check the cache.
let variant_index = match self.variants {
Variants::Single { index } => Some(index),
_ => None,
};
if let Some(&llty) = cx.lltypes.borrow().get(&(self.ty, variant_index)) {
return llty;
}
debug!("llvm_type({:#?})", self);
assert!(!self.ty.has_escaping_bound_vars(), "{:?} has escaping bound vars", self.ty);
// Make sure lifetimes are erased, to avoid generating distinct LLVM
// types for Rust types that only differ in the choice of lifetimes.
let normal_ty = cx.tcx.erase_regions(&self.ty);
let mut defer = None;
let llty = if self.ty != normal_ty {
let mut layout = cx.layout_of(normal_ty);
if let Some(v) = variant_index {
layout = layout.for_variant(cx, v);
}
layout.llvm_type(cx)
} else {
uncached_llvm_type(cx, *self, &mut defer)
};
debug!("--> mapped {:#?} to llty={:?}", self, llty);
cx.lltypes.borrow_mut().insert((self.ty, variant_index), llty);
if let Some((llty, layout)) = defer {
let (llfields, packed) = struct_llfields(cx, layout);
cx.set_struct_body(llty, &llfields, packed)
}
llty
}
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
if let Abi::Scalar(ref scalar) = self.abi {
if scalar.is_bool() {
return cx.type_i1();
}
}
self.llvm_type(cx)
}
fn scalar_llvm_type_at<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: &Scalar,
offset: Size,
) -> &'a Type {
match scalar.value {
Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(),
F64 => cx.type_f64(),
Pointer => {
// If we know the alignment, pick something better than i8.
let (pointee, address_space) =
if let Some(pointee) = self.pointee_info_at(cx, offset) {
(cx.type_pointee_for_align(pointee.align), pointee.address_space)
} else {
(cx.type_i8(), AddressSpace::DATA)
};
cx.type_ptr_to_ext(pointee, address_space)
}
}
}
fn scalar_pair_element_llvm_type<'a>(
&self,
cx: &CodegenCx<'a, 'tcx>,
index: usize,
immediate: bool,
) -> &'a Type {
// HACK(eddyb) special-case fat pointers until LLVM removes
// pointee types, to avoid bitcasting every `OperandRef::deref`.
match self.ty.kind {
ty::Ref(..) | ty::RawPtr(_) => {
return self.field(cx, index).llvm_type(cx);
}
ty::Adt(def, _) if def.is_box() => {
let ptr_ty = cx.tcx.mk_mut_ptr(self.ty.boxed_ty());
return cx.layout_of(ptr_ty).scalar_pair_element_llvm_type(cx, index, immediate);
}
_ => {}
}
let (a, b) = match self.abi {
Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self),
};
let scalar = [a, b][index];
// Make sure to return the same type `immediate_llvm_type` would when
// dealing with an immediate pair. This means that `(bool, bool)` is
// effectively represented as `{i8, i8}` in memory and two `i1`s as an
// immediate, just like `bool` is typically `i8` in memory and only `i1`
// when immediate. We need to load/store `bool` as `i8` to avoid
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
if immediate && scalar.is_bool() {
return cx.type_i1();
}
let offset =
if index == 0 { Size::ZERO } else { a.value.size(cx).align_to(b.value.align(cx).abi) };
self.scalar_llvm_type_at(cx, scalar, offset)
}
fn llvm_field_index(&self, index: usize) -> u64 {
match self.abi {
Abi::Scalar(_) | Abi::ScalarPair(..) => {
bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
}
_ => {}
}
match self.fields {
FieldsShape::Primitive | FieldsShape::Union(_) => {
bug!("TyAndLayout::llvm_field_index({:?}): not applicable", self)
}
FieldsShape::Array { .. } => index as u64,
FieldsShape::Arbitrary { .. } => 1 + (self.fields.memory_index(index) as u64) * 2,
}
}
fn pointee_info_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, offset: Size) -> Option<PointeeInfo> {
if let Some(&pointee) = cx.pointee_infos.borrow().get(&(self.ty, offset)) {
return pointee;
}
let result = Ty::pointee_info_at(*self, cx, offset);
cx.pointee_infos.borrow_mut().insert((self.ty, offset), result);
result
}
}

View file

@ -0,0 +1,206 @@
use crate::builder::Builder;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value;
use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::{
common::IntPredicate,
traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods},
};
use rustc_middle::ty::layout::HasTyCtxt;
use rustc_middle::ty::Ty;
use rustc_target::abi::{Align, HasDataLayout, LayoutOf, Size};
#[allow(dead_code)]
fn round_pointer_up_to_alignment(
bx: &mut Builder<'a, 'll, 'tcx>,
addr: &'ll Value,
align: Align,
ptr_ty: &'ll Type,
) -> &'ll Value {
let mut ptr_as_int = bx.ptrtoint(addr, bx.cx().type_isize());
ptr_as_int = bx.add(ptr_as_int, bx.cx().const_i32(align.bytes() as i32 - 1));
ptr_as_int = bx.and(ptr_as_int, bx.cx().const_i32(-(align.bytes() as i32)));
bx.inttoptr(ptr_as_int, ptr_ty)
}
fn emit_direct_ptr_va_arg(
bx: &mut Builder<'a, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
llty: &'ll Type,
size: Size,
align: Align,
slot_size: Align,
allow_higher_align: bool,
) -> (&'ll Value, Align) {
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
} else {
list.immediate()
};
let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
} else {
(ptr, slot_size)
};
let aligned_size = size.align_to(slot_size).bytes() as i32;
let full_direct_size = bx.cx().const_i32(aligned_size);
let next = bx.inbounds_gep(addr, &[full_direct_size]);
bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
if size.bytes() < slot_size.bytes() && &*bx.tcx().sess.target.target.target_endian == "big" {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align)
} else {
(bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align)
}
}
fn emit_ptr_va_arg(
bx: &mut Builder<'a, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
indirect: bool,
slot_size: Align,
allow_higher_align: bool,
) -> &'ll Value {
let layout = bx.cx.layout_of(target_ty);
let (llty, size, align) = if indirect {
(
bx.cx.layout_of(bx.cx.tcx.mk_imm_ptr(target_ty)).llvm_type(bx.cx),
bx.cx.data_layout().pointer_size,
bx.cx.data_layout().pointer_align,
)
} else {
(layout.llvm_type(bx.cx), layout.size, layout.align)
};
let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
if indirect {
let tmp_ret = bx.load(addr, addr_align);
bx.load(tmp_ret, align.abi)
} else {
bx.load(addr, addr_align)
}
}
fn emit_aapcs_va_arg(
bx: &mut Builder<'a, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
) -> &'ll Value {
// Implementation of the AAPCS64 calling convention for va_args see
// https://github.com/ARM-software/abi-aa/blob/master/aapcs64/aapcs64.rst
let va_list_addr = list.immediate();
let layout = bx.cx.layout_of(target_ty);
let mut maybe_reg = bx.build_sibling_block("va_arg.maybe_reg");
let mut in_reg = bx.build_sibling_block("va_arg.in_reg");
let mut on_stack = bx.build_sibling_block("va_arg.on_stack");
let mut end = bx.build_sibling_block("va_arg.end");
let zero = bx.const_i32(0);
let offset_align = Align::from_bytes(4).unwrap();
assert!(&*bx.tcx().sess.target.target.target_endian == "little");
let gr_type = target_ty.is_any_ptr() || target_ty.is_integral();
let (reg_off, reg_top_index, slot_size) = if gr_type {
let gr_offs = bx.struct_gep(va_list_addr, 7);
let nreg = (layout.size.bytes() + 7) / 8;
(gr_offs, 3, nreg * 8)
} else {
let vr_off = bx.struct_gep(va_list_addr, 9);
let nreg = (layout.size.bytes() + 15) / 16;
(vr_off, 5, nreg * 16)
};
// if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
// The value at this point might be in a register, but there is a chance that
// it could be on the stack so we have to update the offset and then check
// the offset again.
if gr_type && layout.align.abi.bytes() > 8 {
reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(15));
reg_off_v = maybe_reg.and(reg_off_v, bx.const_i32(-16));
}
let new_reg_off_v = maybe_reg.add(reg_off_v, bx.const_i32(slot_size as i32));
maybe_reg.store(new_reg_off_v, reg_off, offset_align);
// Check to see if we have overflowed the registers as a result of this.
// If we have then we need to use the stack for this value
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
// reg_value = *(@top + reg_off_v);
let top = in_reg.gep(top, &[reg_off_v]);
let top = in_reg.bitcast(top, bx.cx.type_ptr_to(layout.llvm_type(bx)));
let reg_value = in_reg.load(top, layout.align.abi);
in_reg.br(&end.llbb());
// On Stack block
let stack_value =
emit_ptr_va_arg(&mut on_stack, list, target_ty, false, Align::from_bytes(8).unwrap(), true);
on_stack.br(&end.llbb());
let val = end.phi(
layout.immediate_llvm_type(bx),
&[reg_value, stack_value],
&[&in_reg.llbb(), &on_stack.llbb()],
);
*bx = end;
val
}
pub(super) fn emit_va_arg(
bx: &mut Builder<'a, 'll, 'tcx>,
addr: OperandRef<'tcx, &'ll Value>,
target_ty: Ty<'tcx>,
) -> &'ll Value {
// Determine the va_arg implementation to use. The LLVM va_arg instruction
// is lacking in some instances, so we should only use it as a fallback.
let target = &bx.cx.tcx.sess.target.target;
let arch = &bx.cx.tcx.sess.target.target.arch;
match (&**arch, target.options.is_like_windows) {
// Windows x86
("x86", true) => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), false)
}
// Generic x86
("x86", _) => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(4).unwrap(), true)
}
// Windows AArch64
("aarch64", true) => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), false)
}
// iOS AArch64
("aarch64", _) if target.target_os == "ios" => {
emit_ptr_va_arg(bx, addr, target_ty, false, Align::from_bytes(8).unwrap(), true)
}
("aarch64", _) => emit_aapcs_va_arg(bx, addr, target_ty),
// Windows x86_64
("x86_64", true) => {
let target_ty_size = bx.cx.size_of(target_ty).bytes();
let indirect: bool = target_ty_size > 8 || !target_ty_size.is_power_of_two();
emit_ptr_va_arg(bx, addr, target_ty, indirect, Align::from_bytes(8).unwrap(), false)
}
// For all other architecture/OS combinations fall back to using
// the LLVM va_arg instruction.
// https://llvm.org/docs/LangRef.html#va-arg-instruction
_ => bx.va_arg(addr.immediate(), bx.cx.layout_of(target_ty).llvm_type(bx.cx)),
}
}

View file

@ -0,0 +1,32 @@
pub use crate::llvm::Value;
use crate::llvm;
use std::fmt;
use std::hash::{Hash, Hasher};
use std::ptr;
impl PartialEq for Value {
fn eq(&self, other: &Self) -> bool {
ptr::eq(self, other)
}
}
impl Eq for Value {}
impl Hash for Value {
fn hash<H: Hasher>(&self, hasher: &mut H) {
(self as *const Self).hash(hasher);
}
}
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(
&llvm::build_string(|s| unsafe {
llvm::LLVMRustWriteValueToString(self, s);
})
.expect("non-UTF8 value description from LLVM"),
)
}
}