1
Fork 0

Generalized mir::codegen_mir (and all subsequent functions)

This commit is contained in:
Denis Merigoux 2018-09-20 15:47:22 +02:00 committed by Eduard-Mihai Burtescu
parent cbe31a4229
commit 6a993fe353
41 changed files with 1778 additions and 1390 deletions

View file

@ -16,11 +16,12 @@ use mir::operand::OperandValue;
use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
use rustc_target::abi::call::ArgType;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods};
use interfaces::*;
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
use rustc::ty::{self, Ty};
use rustc::ty::{self, Ty, Instance};
use rustc::ty::layout;
use libc::c_uint;
@ -280,6 +281,27 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}
}
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn store_fn_arg(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
) {
ty.store_fn_arg(self, idx, dst)
}
fn store_arg_ty(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
) {
ty.store(self, val, dst)
}
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.memory_ty(self.cx())
}
}
pub trait FnTypeExt<'tcx> {
fn of_instance(cx: &CodegenCx<'ll, 'tcx>, instance: &ty::Instance<'tcx>) -> Self;
fn new(cx: &CodegenCx<'ll, 'tcx>,
@ -790,3 +812,29 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
}
}
}
impl AbiMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn new_fn_type(&self, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>> {
FnType::new(&self, sig, extra_args)
}
fn new_vtable(
&self,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]
) -> FnType<'tcx, Ty<'tcx>> {
FnType::new_vtable(&self, sig, extra_args)
}
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>> {
FnType::of_instance(&self, instance)
}
}
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn apply_attrs_callsite(
&self,
ty: &FnType<'tcx, Ty<'tcx>>,
callsite: Self::Value
) {
ty.apply_attrs_callsite(self, callsite)
}
}

View file

@ -15,7 +15,7 @@ use builder::Builder;
use value::Value;
use rustc::hir;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
@ -23,9 +23,11 @@ use mir::operand::OperandValue;
use std::ffi::CString;
use libc::{c_uint, c_char};
impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
// Take an inline assembly expression and splat it out via LLVM
pub fn codegen_inline_asm(
bx: &Builder<'a, 'll, 'tcx>,
fn codegen_inline_asm(
&self,
ia: &hir::InlineAsm,
outputs: Vec<PlaceRef<'tcx, &'ll Value>>,
mut inputs: Vec<&'ll Value>
@ -37,13 +39,13 @@ pub fn codegen_inline_asm(
let mut indirect_outputs = vec![];
for (i, (out, &place)) in ia.outputs.iter().zip(&outputs).enumerate() {
if out.is_rw {
inputs.push(bx.load_operand(place).immediate());
inputs.push(self.load_operand(place).immediate());
ext_constraints.push(i.to_string());
}
if out.is_indirect {
indirect_outputs.push(bx.load_operand(place).immediate());
indirect_outputs.push(self.load_operand(place).immediate());
} else {
output_types.push(place.layout.llvm_type(bx.cx()));
output_types.push(place.layout.llvm_type(self.cx()));
}
}
if !indirect_outputs.is_empty() {
@ -56,7 +58,7 @@ pub fn codegen_inline_asm(
// Default per-arch clobbers
// Basically what clang does
let arch_clobbers = match &bx.sess().target.target.arch[..] {
let arch_clobbers = match &self.cx().sess().target.target.arch[..] {
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
"mips" | "mips64" => vec!["~{$1}"],
_ => Vec::new()
@ -75,14 +77,14 @@ pub fn codegen_inline_asm(
// Depending on how many outputs we have, the return type is different
let num_outputs = output_types.len();
let output_type = match num_outputs {
0 => bx.cx().type_void(),
0 => self.cx().type_void(),
1 => output_types[0],
_ => bx.cx().type_struct(&output_types, false)
_ => self.cx().type_struct(&output_types, false)
};
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
let constraint_cstr = CString::new(all_constraints).unwrap();
let r = bx.inline_asm_call(
let r = self.inline_asm_call(
asm.as_ptr(),
constraint_cstr.as_ptr(),
&inputs,
@ -99,30 +101,32 @@ pub fn codegen_inline_asm(
// Again, based on how many outputs we have
let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
for (i, (_, &place)) in outputs.enumerate() {
let v = if num_outputs == 1 { r } else { bx.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(bx, place);
let v = if num_outputs == 1 { r } else { self.extract_value(r, i as u64) };
OperandValue::Immediate(v).store(self, place);
}
// Store mark in a metadata node so we can map LLVM errors
// back to source locations. See #17552.
unsafe {
let key = "srcloc";
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1));
}
return true;
true
}
}
pub fn codegen_global_asm<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
ga: &hir::GlobalAsm) {
impl AsmMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn codegen_global_asm(&self, ga: &hir::GlobalAsm) {
let asm = CString::new(ga.asm.as_str().as_bytes()).unwrap();
unsafe {
llvm::LLVMRustAppendModuleInlineAsm(cx.llmod, asm.as_ptr());
llvm::LLVMRustAppendModuleInlineAsm(self.llmod, asm.as_ptr());
}
}
}

View file

@ -21,6 +21,7 @@ use rustc::ty::query::Providers;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::fx::FxHashMap;
use rustc_target::spec::PanicStrategy;
use interfaces::*;
use attributes;
use llvm::{self, Attribute};

View file

@ -57,7 +57,6 @@ use rustc_mir::monomorphize::item::DefPathBasedNames;
use common::{self, IntPredicate, RealPredicate, TypeKind};
use context::CodegenCx;
use debuginfo;
use declare;
use meth;
use mir;
use monomorphize::Instance;
@ -392,15 +391,18 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc
}
pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) {
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
bx.call(assume_intrinsic, &[val], None);
}
pub fn from_immediate<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx: &Builder,
val: Builder::Value
) -> Builder::Value {
pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) -> Bx::Value {
if bx.cx().val_ty(val) == bx.cx().type_i1() {
bx.zext(val, bx.cx().type_i8())
} else {
@ -447,7 +449,7 @@ pub fn memcpy_ty<'a, 'tcx: 'a, Builder: BuilderMethods<'a, 'tcx>>(
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'tcx>) {
pub fn codegen_instance(cx: &CodegenCx<'_, 'tcx>, instance: Instance<'tcx>) {
let _s = if cx.sess().codegen_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(cx.tcx, true, true)
@ -471,7 +473,7 @@ pub fn codegen_instance<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>, instance: Instance<'
cx.stats.borrow_mut().n_closures += 1;
let mir = cx.tcx.instance_mir(instance.def);
mir::codegen_mir(cx, lldecl, &mir, instance, sig);
mir::codegen_mir::<Builder>(cx, lldecl, &mir, instance, sig);
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
@ -532,7 +534,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
&main_ret_ty.no_bound_vars().unwrap(),
);
if declare::get_defined_value(cx, "main").is_some() {
if cx.get_defined_value("main").is_some() {
// FIXME: We should be smart and show a better diagnostic here.
cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
@ -540,7 +542,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
cx.sess().abort_if_errors();
bug!();
}
let llfn = declare::declare_cfn(cx, "main", llfty);
let llfn = cx.declare_cfn("main", llfty);
// `main` should respect same config for frame pointer elimination as rest of code
attributes::set_frame_pointer_elimination(cx, llfn);

View file

@ -18,7 +18,7 @@ use value::Value;
use libc::{c_uint, c_char};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc::session::{config, Session};
use rustc::session::config;
use rustc_data_structures::small_c_str::SmallCStr;
use interfaces::*;
use syntax;
@ -59,11 +59,13 @@ bitflags! {
}
}
impl BackendTypes for Builder<'_, 'll, '_> {
type Value = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Context = &'ll llvm::Context;
impl BackendTypes for Builder<'_, 'll, 'tcx> {
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;
type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type;
type Context = <CodegenCx<'ll, 'tcx> as BackendTypes>::Context;
type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope;
}
impl ty::layout::HasDataLayout for Builder<'_, '_, '_> {
@ -126,10 +128,6 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
Builder::new_block(self.cx, self.llfn(), name)
}
fn sess(&self) -> &Session {
self.cx.sess()
}
fn llfn(&self) -> &'ll Value {
unsafe {
llvm::LLVMGetBasicBlockParent(self.llbb())
@ -223,7 +221,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args: &[&'ll Value],
then: &'ll BasicBlock,
catch: &'ll BasicBlock,
bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
funclet: Option<&common::Funclet<&'ll Value>>) -> &'ll Value {
self.count_insn("invoke");
debug!("Invoke {:?} with args ({:?})",
@ -231,6 +229,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args);
let args = self.check_call("invoke", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.map(OperandBundleDef::from_generic);
let bundle = bundle.as_ref().map(|b| &*b.raw);
@ -610,7 +609,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn range_metadata(&self, load: &'ll Value, range: Range<u128>) {
if self.sess().target.target.arch == "amdgpu" {
if self.cx().sess().target.target.arch == "amdgpu" {
// amdgpu/LLVM does something weird and thinks a i64 value is
// split into a v2i32, halving the bitwidth LLVM expects,
// tripping an assertion. So, for now, just disable this
@ -920,7 +919,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
align: Align,
flags: MemFlags,
) {
let ptr_width = &self.sess().target.target.target_pointer_width;
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
let ptr = self.pointercast(ptr, self.cx().type_i8p());
@ -1362,7 +1361,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
fn call(&self, llfn: &'ll Value, args: &[&'ll Value],
bundle: Option<&common::OperandBundleDef<&'ll Value>>) -> &'ll Value {
funclet: Option<&common::Funclet<&'ll Value>>) -> &'ll Value {
self.count_insn("call");
debug!("Call {:?} with args ({:?})",
@ -1370,6 +1369,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
args);
let args = self.check_call("call", llfn, args);
let bundle = funclet.map(|funclet| funclet.bundle());
let bundle = bundle.map(OperandBundleDef::from_generic);
let bundle = bundle.as_ref().map(|b| &*b.raw);
@ -1399,7 +1399,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn cx(&self) -> &'a CodegenCx<'ll, 'tcx> {
fn cx(&self) -> &CodegenCx<'ll, 'tcx> {
self.cx
}
fn delete_basic_block(&self, bb: &'ll BasicBlock) {
unsafe {
llvm::LLVMDeleteBasicBlock(bb);
}
}
fn do_not_inline(&self, llret: &'ll Value) {
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
}
}

View file

@ -15,18 +15,15 @@
//! closure.
use attributes;
use common::{CodegenCx};
use consts;
use declare;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
use context::CodegenCx;
use value::Value;
use interfaces::*;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, TypeFoldable};
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use rustc::ty::subst::Substs;
/// Codegens a reference to a fn/method item, monomorphizing and
@ -40,7 +37,7 @@ pub fn get_fn(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll Value {
let tcx = cx.tcx;
let tcx = cx.tcx();
debug!("get_fn(instance={:?})", instance);
@ -48,8 +45,8 @@ pub fn get_fn(
assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types());
let sig = instance.fn_sig(cx.tcx);
if let Some(&llfn) = cx.instances.borrow().get(&instance) {
let sig = instance.fn_sig(cx.tcx());
if let Some(&llfn) = cx.instances().borrow().get(&instance) {
return llfn;
}
@ -58,9 +55,9 @@ pub fn get_fn(
// Create a fn pointer with the substituted signature.
let fn_ptr_ty = tcx.mk_fn_ptr(sig);
let llptrty = cx.layout_of(fn_ptr_ty).llvm_type(cx);
let llptrty = cx.backend_type(cx.layout_of(fn_ptr_ty));
let llfn = if let Some(llfn) = declare::get_declared_value(cx, &sym) {
let llfn = if let Some(llfn) = cx.get_declared_value(&sym) {
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
@ -86,13 +83,13 @@ pub fn get_fn(
// other weird situations. Annoying.
if cx.val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
consts::ptrcast(llfn, llptrty)
cx.static_ptrcast(llfn, llptrty)
} else {
debug!("get_fn: not casting pointer!");
llfn
}
} else {
let llfn = declare::declare_fn(cx, &sym, sig);
let llfn = cx.declare_fn(&sym, sig);
assert_eq!(cx.val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");

View file

@ -17,21 +17,23 @@ use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
use abi;
use base;
use builder::Builder;
use consts;
use declare;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::{BackendTypes, BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
use rustc::ty::layout::{HasDataLayout, LayoutOf, self, TyLayout, Size};
use rustc::mir::interpret::{Scalar, AllocType, Allocation};
use rustc::hir;
use mir::constant::const_alloc_to_llvm;
use mir::place::PlaceRef;
use libc::{c_uint, c_char};
use syntax::symbol::LocalInternedString;
use syntax::ast::Mutability;
use syntax_pos::{Span, DUMMY_SP};
pub use context::CodegenCx;
@ -48,13 +50,13 @@ pub fn type_is_freeze<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bo
ty.is_freeze(tcx, ty::ParamEnv::reveal_all(), DUMMY_SP)
}
pub struct OperandBundleDef<'a, Value> {
pub struct OperandBundleDef<'a, V> {
pub name: &'a str,
pub val: Value
pub val: V
}
impl<'a, Value> OperandBundleDef<'a, Value> {
pub fn new(name: &'a str, val: Value) -> Self {
impl<'a, V> OperandBundleDef<'a, V> {
pub fn new(name: &'a str, val: V) -> Self {
OperandBundleDef {
name,
val
@ -190,24 +192,24 @@ pub enum TypeKind {
/// When inside of a landing pad, each function call in LLVM IR needs to be
/// annotated with which landing pad it's a part of. This is accomplished via
/// the `OperandBundleDef` value created for MSVC landing pads.
pub struct Funclet<'ll> {
cleanuppad: &'ll Value,
operand: OperandBundleDef<'ll, &'ll Value>,
pub struct Funclet<'a, V> {
cleanuppad: V,
operand: OperandBundleDef<'a, V>,
}
impl Funclet<'ll> {
pub fn new(cleanuppad: &'ll Value) -> Self {
impl<'a, V: CodegenObject> Funclet<'a, V> {
pub fn new(cleanuppad: V) -> Self {
Funclet {
cleanuppad,
operand: OperandBundleDef::new("funclet", cleanuppad),
}
}
pub fn cleanuppad(&self) -> &'ll Value {
pub fn cleanuppad(&self) -> V {
self.cleanuppad
}
pub fn bundle(&self) -> &OperandBundleDef<'ll, &'ll Value> {
pub fn bundle(&self) -> &OperandBundleDef<'a, V> {
&self.operand
}
}
@ -217,6 +219,8 @@ impl BackendTypes for CodegenCx<'ll, 'tcx> {
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Context = &'ll llvm::Context;
type DIScope = &'ll llvm::debuginfo::DIScope;
}
impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
@ -300,7 +304,7 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
s.len() as c_uint,
!null_terminated as Bool);
let sym = self.generate_local_symbol_name("str");
let g = declare::define_global(&self, &sym[..], self.val_ty(sc)).unwrap_or_else(||{
let g = self.define_global(&sym[..], self.val_ty(sc)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
@ -415,6 +419,79 @@ impl ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}
}
fn scalar_to_backend(
&self,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(self).bits() };
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(self).bytes());
self.const_undef(self.type_ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(self).bytes());
let llval = self.const_uint_big(self.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
self.static_bitcast(llval, llty)
}
},
Scalar::Ptr(ptr) => {
let alloc_type = self.tcx.alloc_map.lock().get(ptr.alloc_id);
let base_addr = match alloc_type {
Some(AllocType::Memory(alloc)) => {
let init = const_alloc_to_llvm(self, alloc);
if alloc.mutability == Mutability::Mutable {
self.static_addr_of_mut(init, alloc.align, None)
} else {
self.static_addr_of(init, alloc.align, None)
}
}
Some(AllocType::Function(fn_instance)) => {
self.get_fn(fn_instance)
}
Some(AllocType::Static(def_id)) => {
assert!(self.tcx.is_static(def_id).is_some());
self.get_static(def_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),
&self.const_usize(ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
self.static_bitcast(llval, llty)
}
}
}
}
fn from_const_alloc(
&self,
layout: TyLayout<'tcx>,
alloc: &Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(self, alloc);
let base_addr = self.static_addr_of(init, layout.align, None);
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
self.static_bitcast(base_addr, self.type_i8p()),
&self.const_usize(offset.bytes()),
1,
)};
let llval = self.static_bitcast(llval, self.type_ptr_to(layout.llvm_type(self)));
PlaceRef::new_sized(llval, layout, alloc.align)
}
}
pub fn val_ty(v: &'ll Value) -> &'ll Type {
@ -466,20 +543,23 @@ pub fn langcall(tcx: TyCtxt,
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift(
bx: &Builder<'a, 'll, 'tcx>,
lhs: &'ll Value,
rhs: &'ll Value
) -> &'ll Value {
pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
}
pub fn build_unchecked_rshift(
bx: &Builder<'a, 'll, 'tcx>, lhs_t: Ty<'tcx>, lhs: &'ll Value, rhs: &'ll Value
) -> &'ll Value {
pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs_t: Ty<'tcx>,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
@ -491,26 +571,29 @@ pub fn build_unchecked_rshift(
}
}
fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value {
fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
rhs: Bx::Value
) -> Bx::Value {
let rhs_llty = bx.cx().val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
pub fn shift_mask_val(
bx: &Builder<'a, 'll, 'tcx>,
llty: &'ll Type,
mask_llty: &'ll Type,
pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
llty: Bx::Type,
mask_llty: Bx::Type,
invert: bool
) -> &'ll Value {
) -> Bx::Value {
let kind = bx.cx().type_kind(llty);
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1;
if invert {
bx.cx.const_int(mask_llty, !val as i64)
bx.cx().const_int(mask_llty, !val as i64)
} else {
bx.cx.const_uint(mask_llty, val)
bx.cx().const_uint(mask_llty, val)
}
},
TypeKind::Vector => {

View file

@ -16,7 +16,6 @@ use debuginfo;
use base;
use monomorphize::MonoItem;
use common::CodegenCx;
use declare;
use monomorphize::Instance;
use syntax_pos::Span;
use syntax_pos::symbol::LocalInternedString;
@ -24,7 +23,7 @@ use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty};
use interfaces::{BaseTypeMethods, DerivedTypeMethods, StaticMethods};
use interfaces::*;
use rustc::ty::layout::{Align, LayoutOf};
@ -79,7 +78,7 @@ fn check_and_apply_linkage(
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = declare::declare_global(cx, &sym, llty2);
let g1 = cx.declare_global(&sym, llty2);
llvm::LLVMRustSetLinkage(g1, base::linkage_to_llvm(linkage));
// Declare an internal global `extern_with_linkage_foo` which
@ -90,7 +89,7 @@ fn check_and_apply_linkage(
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&sym);
let g2 = declare::define_global(cx, &real_name, llty).unwrap_or_else(||{
let g2 = cx.define_global(&real_name, llty).unwrap_or_else(||{
if let Some(span) = span {
cx.sess().span_fatal(
span,
@ -107,7 +106,7 @@ fn check_and_apply_linkage(
} else {
// Generate an external declaration.
// FIXME(nagisa): investigate whether it can be changed into define_global
declare::declare_global(cx, &sym, llty)
cx.declare_global(&sym, llty)
}
}
@ -139,14 +138,14 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let gv = match kind {
Some(kind) if !self.tcx.sess.fewer_names() => {
let name = self.generate_local_symbol_name(kind);
let gv = declare::define_global(&self, &name[..],
let gv = self.define_global(&name[..],
self.val_ty(cv)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
_ => declare::define_private_global(&self, self.val_ty(cv)),
_ => self.define_private_global(self.val_ty(cv)),
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(&self, gv, align);
@ -206,11 +205,11 @@ impl StaticMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Node::Item(&hir::Item {
ref attrs, span, node: hir::ItemKind::Static(..), ..
}) => {
if declare::get_declared_value(&self, &sym[..]).is_some() {
if self.get_declared_value(&sym[..]).is_some() {
span_bug!(span, "Conflicting symbol names for static?");
}
let g = declare::define_global(&self, &sym[..], llty).unwrap();
let g = self.define_global(&sym[..], llty).unwrap();
if !self.tcx.is_reachable_non_generic(def_id) {
unsafe {

View file

@ -15,7 +15,6 @@ use rustc::hir;
use debuginfo;
use callee;
use base;
use declare;
use monomorphize::Instance;
use value::Value;
@ -23,6 +22,7 @@ use monomorphize::partitioning::CodegenUnit;
use type_::Type;
use type_of::PointeeInfo;
use interfaces::*;
use libc::c_uint;
use rustc_data_structures::base_n;
use rustc_data_structures::small_c_str::SmallCStr;
@ -315,21 +315,108 @@ impl<'a, 'tcx> CodegenCx<'a, 'tcx> {
}
}
impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
pub fn sess<'a>(&'a self) -> &'a Session {
&self.tcx.sess
}
}
impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn vtables(&self) -> &RefCell<FxHashMap<(Ty<'tcx>,
ty::PolyExistentialTraitRef<'tcx>), &'ll Value>>
{
&self.vtables
}
fn instances(&self) -> &RefCell<FxHashMap<Instance<'tcx>, &'ll Value>> {
&self.instances
}
fn get_fn(&self, instance: Instance<'tcx>) -> &'ll Value {
callee::get_fn(&&self,instance)
}
fn get_param(&self, llfn: &'ll Value, index: c_uint) -> &'ll Value {
llvm::get_param(llfn, index)
}
fn eh_personality(&self) -> &'ll Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
self.declare_cfn(name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
fn eh_unwind_resume(&self) -> &'ll Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let sig = ty::Binder::bind(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
));
let llfn = self.declare_fn("rust_eh_unwind_resume", sig);
attributes::unwind(llfn, true);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn
}
fn sess(&self) -> &Session {
&self.tcx.sess
}
fn check_overflow(&self) -> bool {
self.check_overflow
}
}
impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
@ -349,7 +436,7 @@ impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_func(&[], $ret));
let f = self.declare_cfn($name, self.type_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
@ -357,7 +444,7 @@ impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
);
($name:expr, fn(...) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_variadic_func(&[], $ret));
let f = self.declare_cfn($name, self.type_variadic_func(&[], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
@ -365,7 +452,7 @@ impl IntrinsicDeclarationMethods<'tcx> for CodegenCx<'b, 'tcx> {
);
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name {
let f = declare::declare_cfn(&self, $name, self.type_func(&[$($arg),*], $ret));
let f = self.declare_cfn($name, self.type_func(&[$($arg),*], $ret));
llvm::SetUnnamedAddr(f, false);
self.intrinsics.borrow_mut().insert($name, f.clone());
return Some(f);
@ -668,83 +755,6 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
base_n::push_str(idx as u128, base_n::ALPHANUMERIC_ONLY, &mut name);
name
}
pub fn eh_personality(&self) -> &'b Value {
// The exception handling personality function.
//
// If our compilation unit has the `eh_personality` lang item somewhere
// within it, then we just need to codegen that. Otherwise, we're
// building an rlib which will depend on some upstream implementation of
// this function, so we just codegen a generic reference to it. We don't
// specify any of the types for the function, we just make it a symbol
// that LLVM can later use.
//
// Note that MSVC is a little special here in that we don't use the
// `eh_personality` lang item at all. Currently LLVM has support for
// both Dwarf and SEH unwind mechanisms for MSVC targets and uses the
// *name of the personality function* to decide what kind of unwind side
// tables/landing pads to emit. It looks like Dwarf is used by default,
// injecting a dependency on the `_Unwind_Resume` symbol for resuming
// an "exception", but for MSVC we want to force SEH. This means that we
// can't actually have the personality function be our standard
// `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH".
if let Some(llpersonality) = self.eh_personality.get() {
return llpersonality
}
let tcx = self.tcx;
let llfn = match tcx.lang_items().eh_personality() {
Some(def_id) if !base::wants_msvc_seh(self.sess()) => {
callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]))
}
_ => {
let name = if base::wants_msvc_seh(self.sess()) {
"__CxxFrameHandler3"
} else {
"rust_eh_personality"
};
let fty = self.type_variadic_func(&[], self.type_i32());
declare::declare_cfn(self, name, fty)
}
};
attributes::apply_target_cpu_attr(self, llfn);
self.eh_personality.set(Some(llfn));
llfn
}
// Returns a Value of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function.
pub fn eh_unwind_resume(&self) -> &'b Value {
use attributes;
let unwresume = &self.eh_unwind_resume;
if let Some(llfn) = unwresume.get() {
return llfn;
}
let tcx = self.tcx;
assert!(self.sess().target.target.options.custom_unwind_resume);
if let Some(def_id) = tcx.lang_items().eh_unwind_resume() {
let llfn = callee::resolve_and_get_fn(self, def_id, tcx.intern_substs(&[]));
unwresume.set(Some(llfn));
return llfn;
}
let sig = ty::Binder::bind(tcx.mk_fn_sig(
iter::once(tcx.mk_mut_ptr(tcx.types.u8)),
tcx.types.never,
false,
hir::Unsafety::Unsafe,
Abi::C
));
let llfn = declare::declare_fn(self, "rust_eh_unwind_resume", sig);
attributes::unwind(llfn, true);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn
}
}
impl ty::layout::HasDataLayout for CodegenCx<'ll, 'tcx> {

View file

@ -13,7 +13,7 @@ use super::metadata::file_metadata;
use super::utils::{DIB, span_start};
use llvm;
use llvm::debuginfo::DIScope;
use llvm::debuginfo::{DIScope, DISubprogram};
use common::CodegenCx;
use rustc::mir::{Mir, SourceScope};
@ -27,15 +27,15 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use syntax_pos::BytePos;
#[derive(Clone, Copy, Debug)]
pub struct MirDebugScope<'ll> {
pub scope_metadata: Option<&'ll DIScope>,
pub struct MirDebugScope<D> {
pub scope_metadata: Option<D>,
// Start and end offsets of the file to which this DIScope belongs.
// These are used to quickly determine whether some span refers to the same file.
pub file_start_pos: BytePos,
pub file_end_pos: BytePos,
}
impl MirDebugScope<'ll> {
impl<D> MirDebugScope<D> {
pub fn is_valid(&self) -> bool {
self.scope_metadata.is_some()
}
@ -46,8 +46,8 @@ impl MirDebugScope<'ll> {
pub fn create_mir_scopes(
cx: &CodegenCx<'ll, '_>,
mir: &Mir,
debug_context: &FunctionDebugContext<'ll>,
) -> IndexVec<SourceScope, MirDebugScope<'ll>> {
debug_context: &FunctionDebugContext<&'ll DISubprogram>,
) -> IndexVec<SourceScope, MirDebugScope<&'ll DIScope>> {
let null_scope = MirDebugScope {
scope_metadata: None,
file_start_pos: BytePos(0),
@ -82,9 +82,9 @@ pub fn create_mir_scopes(
fn make_mir_scope(cx: &CodegenCx<'ll, '_>,
mir: &Mir,
has_variables: &BitSet<SourceScope>,
debug_context: &FunctionDebugContextData<'ll>,
debug_context: &FunctionDebugContextData<&'ll DISubprogram>,
scope: SourceScope,
scopes: &mut IndexVec<SourceScope, MirDebugScope<'ll>>) {
scopes: &mut IndexVec<SourceScope, MirDebugScope<&'ll DIScope>>) {
if scopes[scope].is_valid() {
return;
}

View file

@ -14,10 +14,9 @@ use llvm;
use common::CodegenCx;
use builder::Builder;
use declare;
use rustc::session::config::DebugInfo;
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods};
use interfaces::*;
use syntax::attr;
@ -58,7 +57,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
let llvm_type = cx.type_array(cx.type_i8(),
section_contents.len() as u64);
let section_var = declare::define_global(cx, section_var_name,
let section_var = cx.define_global(section_var_name,
llvm_type).unwrap_or_else(||{
bug!("symbol `{}` is already defined", section_var_name)
});

View file

@ -1968,6 +1968,68 @@ pub fn create_global_var_metadata(
}
}
/// Creates debug information for the given vtable, which is for the
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
pub fn create_vtable_metadata(
cx: &CodegenCx<'ll, 'tcx>,
ty: ty::Ty<'tcx>,
vtable: &'ll Value,
) {
if cx.dbg_cx.is_none() {
return;
}
let type_metadata = type_metadata(cx, ty, syntax_pos::DUMMY_SP);
unsafe {
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(cx), &[]);
let name = const_cstr!("vtable");
// Create a new one each time. We don't want metadata caching
// here, because each vtable will refer to a unique containing
// type.
let vtable_type = llvm::LLVMRustDIBuilderCreateStructType(
DIB(cx),
NO_SCOPE_METADATA,
name.as_ptr(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(),
cx.tcx.data_layout.pointer_align.abi_bits() as u32,
DIFlags::FlagArtificial,
None,
empty_array,
0,
Some(type_metadata),
name.as_ptr()
);
llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(cx),
NO_SCOPE_METADATA,
name.as_ptr(),
// LLVM 3.9
// doesn't accept
// null here, so
// pass the name
// as the linkage
// name.
name.as_ptr(),
unknown_file_metadata(cx),
UNKNOWN_LINE_NUMBER,
vtable_type,
true,
vtable,
None,
0);
}
}
// Creates an "extension" of an existing DIScope into another file.
pub fn extend_scope_to_file(
cx: &CodegenCx<'ll, '_>,
@ -1983,61 +2045,3 @@ pub fn extend_scope_to_file(
file_metadata)
}
}
impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
/// Creates debug information for the given vtable, which is for the
/// given type.
///
/// Adds the created metadata nodes directly to the crate's IR.
fn create_vtable_metadata(
&self,
ty: ty::Ty<'tcx>,
vtable: &'ll Value,
) {
if self.dbg_cx.is_none() {
return;
}
let type_metadata = type_metadata(&self, ty, syntax_pos::DUMMY_SP);
unsafe {
// LLVMRustDIBuilderCreateStructType() wants an empty array. A null
// pointer will lead to hard to trace and debug LLVM assertions
// later on in llvm/lib/IR/Value.cpp.
let empty_array = create_DIArray(DIB(&self), &[]);
let name = const_cstr!("vtable");
// Create a new one each time. We don't want metadata caching
// here, because each vtable will refer to a unique containing
// type.
let vtable_type = llvm::LLVMRustDIBuilderCreateStructType(
DIB(&self),
NO_SCOPE_METADATA,
name.as_ptr(),
unknown_file_metadata(&self),
UNKNOWN_LINE_NUMBER,
Size::ZERO.bits(),
self.tcx.data_layout.pointer_align.abi_bits() as u32,
DIFlags::FlagArtificial,
None,
empty_array,
0,
Some(type_metadata),
name.as_ptr()
);
llvm::LLVMRustDIBuilderCreateStaticVariable(DIB(&self),
NO_SCOPE_METADATA,
name.as_ptr(),
ptr::null(),
unknown_file_metadata(&self),
UNKNOWN_LINE_NUMBER,
vtable_type,
true,
vtable,
None,
0);
}
}
}

View file

@ -21,7 +21,8 @@ use self::metadata::{type_metadata, file_metadata, TypeMap};
use self::source_loc::InternalDebugLocation::{self, UnknownLocation};
use llvm;
use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags};
use llvm::debuginfo::{DIFile, DIType, DIScope, DIBuilder, DISubprogram, DIArray, DIFlags,
DILexicalBlock};
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{DefId, CrateNum};
use rustc::ty::subst::{Substs, UnpackedKind};
@ -35,6 +36,7 @@ use rustc::mir;
use rustc::session::config::{self, DebugInfo};
use rustc::util::nodemap::{DefIdMap, FxHashMap, FxHashSet};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::indexed_vec::IndexVec;
use value::Value;
use libc::c_uint;
@ -44,8 +46,8 @@ use std::ffi::CString;
use syntax_pos::{self, Span, Pos};
use syntax::ast;
use syntax::symbol::{Symbol, InternedString};
use rustc::ty::layout::{self, LayoutOf};
use interfaces::BuilderMethods;
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
use interfaces::*;
pub mod gdb;
mod utils;
@ -109,21 +111,21 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
}
}
pub enum FunctionDebugContext<'ll> {
RegularContext(FunctionDebugContextData<'ll>),
pub enum FunctionDebugContext<D> {
RegularContext(FunctionDebugContextData<D>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
impl FunctionDebugContext<'ll> {
pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<'ll> {
impl<D> FunctionDebugContext<D> {
pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<D> {
match *self {
FunctionDebugContext::RegularContext(ref data) => data,
FunctionDebugContext::DebugInfoDisabled => {
span_bug!(span, "{}", FunctionDebugContext::debuginfo_disabled_message());
span_bug!(span, "{}", Self::debuginfo_disabled_message());
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
span_bug!(span, "{}", FunctionDebugContext::should_be_ignored_message());
span_bug!(span, "{}", Self::should_be_ignored_message());
}
}
}
@ -138,18 +140,18 @@ impl FunctionDebugContext<'ll> {
}
}
pub struct FunctionDebugContextData<'ll> {
fn_metadata: &'ll DISubprogram,
pub struct FunctionDebugContextData<D> {
fn_metadata: D,
source_locations_enabled: Cell<bool>,
pub defining_crate: CrateNum,
}
pub enum VariableAccess<'a, 'll> {
pub enum VariableAccess<'a, V> {
// The llptr given is an alloca containing the variable's value
DirectVariable { alloca: &'ll Value },
DirectVariable { alloca: V },
// The llptr given is an alloca containing the start of some pointer chain
// leading to the variable's content.
IndirectVariable { alloca: &'ll Value, address_operations: &'a [i64] }
IndirectVariable { alloca: V, address_operations: &'a [i64] }
}
pub enum VariableKind {
@ -202,25 +204,103 @@ pub fn finalize(cx: &CodegenCx) {
};
}
impl DebugInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn declare_local(
&self,
dbg_context: &FunctionDebugContext<&'ll DISubprogram>,
variable_name: ast::Name,
variable_type: Ty<'tcx>,
scope_metadata: &'ll DIScope,
variable_access: VariableAccess<'_, &'ll Value>,
variable_kind: VariableKind,
span: Span,
) {
assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
let cx = self.cx();
let file = span_start(cx, span).file;
let file_metadata = file_metadata(cx,
&file.name,
dbg_context.get_ref(span).defining_crate);
let loc = span_start(cx, span);
let type_metadata = type_metadata(cx, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable => (0, DW_TAG_auto_variable)
};
let align = cx.align_of(variable_type);
let name = SmallCStr::new(&variable_name.as_str());
match (variable_access, &[][..]) {
(DirectVariable { alloca }, address_operations) |
(IndirectVariable {alloca, address_operations}, _) => {
let metadata = unsafe {
llvm::LLVMRustDIBuilderCreateVariable(
DIB(cx),
dwarf_tag,
scope_metadata,
name.as_ptr(),
file_metadata,
loc.line as c_uint,
type_metadata,
cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero,
argument_index,
align.abi() as u32,
)
};
source_loc::set_debug_location(self,
InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize()));
unsafe {
let debug_loc = llvm::LLVMGetCurrentDebugLocation(self.llbuilder);
let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
DIB(cx),
alloca,
metadata,
address_operations.as_ptr(),
address_operations.len() as c_uint,
debug_loc,
self.llbb());
llvm::LLVMSetInstDebugLocation(self.llbuilder, instr);
}
source_loc::set_debug_location(self, UnknownLocation);
}
}
}
fn set_source_location(
&self,
debug_context: &FunctionDebugContext<&'ll DISubprogram>,
scope: Option<&'ll DIScope>,
span: Span,
) {
set_source_location(debug_context, &self, scope, span)
}
}
impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
/// Creates the function-specific debug context.
///
/// Returns the FunctionDebugContext for the function which holds state needed
/// for debug info creation. The function may also return another variant of the
/// FunctionDebugContext enum which indicates why no debuginfo should be created
/// for the function.
pub fn create_function_debug_context(
cx: &CodegenCx<'ll, 'tcx>,
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
llfn: &'ll Value,
mir: &mir::Mir,
) -> FunctionDebugContext<'ll> {
if cx.sess().opts.debuginfo == DebugInfo::None {
) -> FunctionDebugContext<&'ll DISubprogram> {
if self.sess().opts.debuginfo == DebugInfo::None {
return FunctionDebugContext::DebugInfoDisabled;
}
if let InstanceDef::Item(def_id) = instance.def {
if cx.tcx.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) {
if self.tcx().codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::NO_DEBUG) {
return FunctionDebugContext::FunctionWithoutDebugInfo;
}
}
@ -234,56 +314,56 @@ pub fn create_function_debug_context(
}
let def_id = instance.def_id();
let containing_scope = get_containing_scope(cx, instance);
let loc = span_start(cx, span);
let file_metadata = file_metadata(cx, &loc.file.name, def_id.krate);
let containing_scope = get_containing_scope(self, instance);
let loc = span_start(self, span);
let file_metadata = file_metadata(self, &loc.file.name, def_id.krate);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(cx, sig);
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(cx), file_metadata, fn_signature)
let fn_signature = get_function_signature(self, sig);
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), file_metadata, fn_signature)
};
// Find the enclosing function, in case this is a closure.
let def_key = cx.tcx.def_key(def_id);
let def_key = self.tcx().def_key(def_id);
let mut name = def_key.disambiguated_data.data.to_string();
let enclosing_fn_def_id = cx.tcx.closure_base_def_id(def_id);
let enclosing_fn_def_id = self.tcx().closure_base_def_id(def_id);
// Get_template_parameters() will append a `<...>` clause to the function
// name if necessary.
let generics = cx.tcx.generics_of(enclosing_fn_def_id);
let substs = instance.substs.truncate_to(cx.tcx, generics);
let template_parameters = get_template_parameters(cx,
let generics = self.tcx().generics_of(enclosing_fn_def_id);
let substs = instance.substs.truncate_to(self.tcx(), generics);
let template_parameters = get_template_parameters(self,
&generics,
substs,
file_metadata,
&mut name);
// Get the linkage_name, which is just the symbol name
let linkage_name = mangled_name_of_instance(cx, instance);
let linkage_name = mangled_name_of_instance(self, instance);
let scope_line = span_start(cx, span).line;
let is_local_to_unit = is_node_local_to_unit(cx, def_id);
let scope_line = span_start(self, span).line;
let is_local_to_unit = is_node_local_to_unit(self, def_id);
let function_name = CString::new(name).unwrap();
let linkage_name = SmallCStr::new(&linkage_name.as_str());
let mut flags = DIFlags::FlagPrototyped;
let local_id = cx.tcx.hir.as_local_node_id(def_id);
if let Some((id, _, _)) = *cx.sess().entry_fn.borrow() {
let local_id = self.tcx().hir.as_local_node_id(def_id);
if let Some((id, _, _)) = *self.sess().entry_fn.borrow() {
if local_id == Some(id) {
flags |= DIFlags::FlagMainSubprogram;
}
}
if cx.layout_of(sig.output()).abi.is_uninhabited() {
if self.layout_of(sig.output()).abi.is_uninhabited() {
flags |= DIFlags::FlagNoReturn;
}
let fn_metadata = unsafe {
llvm::LLVMRustDIBuilderCreateFunction(
DIB(cx),
DIB(self),
containing_scope,
function_name.as_ptr(),
linkage_name.as_ptr(),
@ -294,7 +374,7 @@ pub fn create_function_debug_context(
true,
scope_line as c_uint,
flags,
cx.sess().opts.optimize != config::OptLevel::No,
self.sess().opts.optimize != config::OptLevel::No,
llfn,
template_parameters,
None)
@ -309,7 +389,7 @@ pub fn create_function_debug_context(
return FunctionDebugContext::RegularContext(fn_debug_context);
fn get_function_signature(
fn get_function_signature<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
) -> &'ll DIArray {
@ -372,7 +452,7 @@ pub fn create_function_debug_context(
create_DIArray(DIB(cx), &signature[..])
}
fn get_template_parameters(
fn get_template_parameters<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
generics: &ty::Generics,
substs: &Substs<'tcx>,
@ -389,7 +469,8 @@ pub fn create_function_debug_context(
name_to_append_suffix_to.push_str(",");
}
let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), actual_type);
// Add actual type name to <...> clause of function name
let actual_type_name = compute_debuginfo_type_name(cx,
actual_type,
@ -403,7 +484,8 @@ pub fn create_function_debug_context(
let names = get_parameter_names(cx, generics);
substs.iter().zip(names).filter_map(|(kind, name)| {
if let UnpackedKind::Type(ty) = kind.unpack() {
let actual_type = cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
let actual_type =
cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), ty);
let actual_type_metadata =
type_metadata(cx, actual_type, syntax_pos::DUMMY_SP);
let name = SmallCStr::new(&name.as_str());
@ -426,7 +508,7 @@ pub fn create_function_debug_context(
vec![]
};
create_DIArray(DIB(cx), &template_params[..])
return create_DIArray(DIB(cx), &template_params[..]);
}
fn get_parameter_names(cx: &CodegenCx,
@ -439,7 +521,7 @@ pub fn create_function_debug_context(
names
}
fn get_containing_scope(
fn get_containing_scope<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
instance: Instance<'tcx>,
) -> &'ll DIScope {
@ -482,68 +564,28 @@ pub fn create_function_debug_context(
}
}
pub fn declare_local(
bx: &Builder<'a, 'll, 'tcx>,
dbg_context: &FunctionDebugContext<'ll>,
variable_name: ast::Name,
variable_type: Ty<'tcx>,
scope_metadata: &'ll DIScope,
variable_access: VariableAccess<'_, 'll>,
variable_kind: VariableKind,
span: Span,
fn create_vtable_metadata(
&self,
ty: Ty<'tcx>,
vtable: Self::Value,
) {
assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
let cx = bx.cx();
let file = span_start(cx, span).file;
let file_metadata = file_metadata(cx,
&file.name,
dbg_context.get_ref(span).defining_crate);
let loc = span_start(cx, span);
let type_metadata = type_metadata(cx, variable_type, span);
let (argument_index, dwarf_tag) = match variable_kind {
ArgumentVariable(index) => (index as c_uint, DW_TAG_arg_variable),
LocalVariable => (0, DW_TAG_auto_variable)
};
let align = cx.align_of(variable_type);
let name = SmallCStr::new(&variable_name.as_str());
match (variable_access, &[][..]) {
(DirectVariable { alloca }, address_operations) |
(IndirectVariable {alloca, address_operations}, _) => {
let metadata = unsafe {
llvm::LLVMRustDIBuilderCreateVariable(
DIB(cx),
dwarf_tag,
scope_metadata,
name.as_ptr(),
file_metadata,
loc.line as c_uint,
type_metadata,
cx.sess().opts.optimize != config::OptLevel::No,
DIFlags::FlagZero,
argument_index,
align.abi() as u32,
)
};
source_loc::set_debug_location(bx,
InternalDebugLocation::new(scope_metadata, loc.line, loc.col.to_usize()));
unsafe {
let debug_loc = llvm::LLVMGetCurrentDebugLocation(bx.llbuilder);
let instr = llvm::LLVMRustDIBuilderInsertDeclareAtEnd(
DIB(cx),
alloca,
metadata,
address_operations.as_ptr(),
address_operations.len() as c_uint,
debug_loc,
bx.llbb());
llvm::LLVMSetInstDebugLocation(bx.llbuilder, instr);
}
source_loc::set_debug_location(bx, UnknownLocation);
metadata::create_vtable_metadata(self, ty, vtable)
}
fn create_mir_scopes(
&self,
mir: &mir::Mir,
debug_context: &FunctionDebugContext<&'ll DISubprogram>,
) -> IndexVec<mir::SourceScope, MirDebugScope<&'ll DIScope>> {
create_scope_map::create_mir_scopes(self, mir, debug_context)
}
fn extend_scope_to_file(
&self,
scope_metadata: &'ll DIScope,
file: &syntax_pos::SourceFile,
defining_crate: CrateNum,
) -> &'ll DILexicalBlock {
metadata::extend_scope_to_file(&self, scope_metadata, file, defining_crate)
}
}

View file

@ -17,7 +17,7 @@ use super::FunctionDebugContext;
use llvm;
use llvm::debuginfo::DIScope;
use builder::Builder;
use interfaces::BuilderMethods;
use interfaces::*;
use libc::c_uint;
use syntax_pos::{Span, Pos};
@ -25,8 +25,8 @@ use syntax_pos::{Span, Pos};
/// Sets the current debug location at the beginning of the span.
///
/// Maps to a call to llvm::LLVMSetCurrentDebugLocation(...).
pub fn set_source_location(
debug_context: &FunctionDebugContext<'ll>,
pub fn set_source_location<D>(
debug_context: &FunctionDebugContext<D>,
bx: &Builder<'_, 'll, '_>,
scope: Option<&'ll DIScope>,
span: Span,
@ -41,7 +41,7 @@ pub fn set_source_location(
};
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span));
let loc = span_start(bx.cx(), span);
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
} else {
@ -56,7 +56,7 @@ pub fn set_source_location(
/// they are disabled when beginning to codegen a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is codegened.
pub fn start_emitting_source_locations(dbg_context: &FunctionDebugContext<'ll>) {
pub fn start_emitting_source_locations<D>(dbg_context: &FunctionDebugContext<D>) {
if let FunctionDebugContext::RegularContext(ref data) = *dbg_context {
data.source_locations_enabled.set(true);
}

View file

@ -14,6 +14,7 @@ use common::CodegenCx;
use rustc::hir::def_id::DefId;
use rustc::ty::subst::Substs;
use rustc::ty::{self, Ty};
use interfaces::*;
use rustc::hir;

View file

@ -19,6 +19,7 @@ use rustc::ty::DefIdTree;
use llvm;
use llvm::debuginfo::{DIScope, DIBuilder, DIDescriptor, DIArray};
use common::{CodegenCx};
use interfaces::*;
use syntax_pos::{self, Span};

View file

@ -23,7 +23,7 @@
use llvm;
use llvm::AttributePlace::Function;
use rustc::ty::{self, PolyFnSig};
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{self, LayoutOf};
use rustc::session::config::Sanitizer;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_target::spec::PanicStrategy;
@ -31,22 +31,9 @@ use abi::{Abi, FnType, FnTypeExt};
use attributes;
use context::CodegenCx;
use type_::Type;
use interfaces::*;
use value::Value;
/// Declare a global value.
///
/// If theres a value with the same name already declared, the function will
/// return its Value instead.
pub fn declare_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe {
llvm::LLVMRustGetOrInsertGlobal(cx.llmod, namebuf.as_ptr(), ty)
}
}
/// Declare a function.
///
/// If theres a value with the same name already declared, the function will
@ -108,6 +95,22 @@ fn declare_raw_fn(
llfn
}
impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
/// Declare a global value.
///
/// If theres a value with the same name already declared, the function will
/// return its Value instead.
fn declare_global(
&self,
name: &str, ty: &'ll Type
) -> &'ll Value {
debug!("declare_global(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe {
llvm::LLVMRustGetOrInsertGlobal(self.llmod, namebuf.as_ptr(), ty)
}
}
/// Declare a C ABI function.
///
@ -116,12 +119,12 @@ fn declare_raw_fn(
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
pub fn declare_cfn(
cx: &CodegenCx<'ll, '_>,
fn declare_cfn(
&self,
name: &str,
fn_type: &'ll Type
) -> &'ll Value {
declare_raw_fn(cx, name, llvm::CCallConv, fn_type)
declare_raw_fn(self, name, llvm::CCallConv, fn_type)
}
@ -129,19 +132,19 @@ pub fn declare_cfn(
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
pub fn declare_fn(
cx: &CodegenCx<'ll, 'tcx>,
fn declare_fn(
&self,
name: &str,
sig: PolyFnSig<'tcx>,
) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig);
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(cx, sig, &[]);
let llfn = declare_raw_fn(cx, name, fty.llvm_cconv(), fty.llvm_type(cx));
let fty = FnType::new(self, sig, &[]);
let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self));
if cx.layout_of(sig.output()).abi.is_uninhabited() {
if self.layout_of(sig.output()).abi == layout::Abi::Uninhabited {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
@ -161,20 +164,24 @@ pub fn declare_fn(
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to users fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
pub fn define_global(cx: &CodegenCx<'ll, '_>, name: &str, ty: &'ll Type) -> Option<&'ll Value> {
if get_defined_value(cx, name).is_some() {
fn define_global(
&self,
name: &str,
ty: &'ll Type
) -> Option<&'ll Value> {
if self.get_defined_value(name).is_some() {
None
} else {
Some(declare_global(cx, name, ty))
Some(self.declare_global(name, ty))
}
}
/// Declare a private global
///
/// Use this function when you intend to define a global without a name.
pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Value {
fn define_private_global(&self, ty: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMRustInsertPrivateGlobal(cx.llmod, ty)
llvm::LLVMRustInsertPrivateGlobal(self.llmod, ty)
}
}
@ -183,15 +190,15 @@ pub fn define_private_global(cx: &CodegenCx<'ll, '_>, ty: &'ll Type) -> &'ll Val
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_fn(
cx: &CodegenCx<'ll, 'tcx>,
fn define_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
if get_defined_value(cx, name).is_some() {
cx.sess().fatal(&format!("symbol `{}` already defined", name))
if self.get_defined_value(name).is_some() {
self.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
declare_fn(cx, name, fn_sig)
self.declare_fn(name, fn_sig)
}
}
@ -200,28 +207,28 @@ pub fn define_fn(
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_internal_fn(
cx: &CodegenCx<'ll, 'tcx>,
fn define_internal_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
let llfn = define_fn(cx, name, fn_sig);
let llfn = self.define_fn(name, fn_sig);
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
llfn
}
/// Get declared value by name.
pub fn get_declared_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
let namebuf = SmallCStr::new(name);
unsafe { llvm::LLVMRustGetNamedValue(cx.llmod, namebuf.as_ptr()) }
unsafe { llvm::LLVMRustGetNamedValue(self.llmod, namebuf.as_ptr()) }
}
/// Get defined or externally defined (AvailableExternally linkage) value by
/// name.
pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Value> {
get_declared_value(cx, name).and_then(|val|{
fn get_defined_value(&self, name: &str) -> Option<&'ll Value> {
self.get_declared_value(name).and_then(|val|{
let declaration = unsafe {
llvm::LLVMIsDeclaration(val) != 0
};
@ -232,3 +239,4 @@ pub fn get_defined_value(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Val
}
})
}
}

View file

@ -0,0 +1,23 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::HasCodegen;
use abi::FnType;
use rustc::ty::{FnSig, Instance, Ty};
pub trait AbiMethods<'tcx> {
fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
fn new_vtable(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;
fn fn_type_of_instance(&self, instance: &Instance<'tcx>) -> FnType<'tcx, Ty<'tcx>>;
}
pub trait AbiBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn apply_attrs_callsite(&self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
}

View file

@ -0,0 +1,27 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::backend::Backend;
use super::HasCodegen;
use mir::place::PlaceRef;
use rustc::hir::{GlobalAsm, InlineAsm};
pub trait AsmBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn codegen_inline_asm(
&self,
ia: &InlineAsm,
outputs: Vec<PlaceRef<'tcx, Self::Value>>,
inputs: Vec<Self::Value>,
) -> bool;
}
pub trait AsmMethods<'tcx>: Backend<'tcx> {
fn codegen_global_asm(&self, ga: &GlobalAsm);
}

View file

@ -15,9 +15,11 @@ use super::CodegenObject;
pub trait BackendTypes {
type Value: CodegenObject;
type BasicBlock;
type BasicBlock: Copy;
type Type: CodegenObject;
type Context;
type DIScope: Copy;
}
pub trait Backend<'tcx>:

View file

@ -8,25 +8,35 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
use super::debuginfo::DebugInfoBuilderMethods;
use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods;
use super::HasCodegen;
use builder::MemFlags;
use common::*;
use libc::c_char;
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::session::Session;
use rustc::ty::layout::{Align, Size};
use std::borrow::Cow;
use std::ops::Range;
use syntax::ast::AsmDialect;
pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> {
pub trait BuilderMethods<'a, 'tcx: 'a>:
HasCodegen<'tcx>
+ DebugInfoBuilderMethods<'tcx>
+ ArgTypeMethods<'tcx>
+ AbiBuilderMethods<'tcx>
+ IntrinsicCallMethods<'tcx>
+ AsmBuilderMethods<'tcx>
{
fn new_block<'b>(cx: &'a Self::CodegenCx, llfn: Self::Value, name: &'b str) -> Self;
fn with_cx(cx: &'a Self::CodegenCx) -> Self;
fn build_sibling_block<'b>(&self, name: &'b str) -> Self;
fn sess(&self) -> &Session;
fn cx(&self) -> &'a Self::CodegenCx; // FIXME(eddyb) remove 'a
fn cx(&self) -> &Self::CodegenCx;
fn llfn(&self) -> Self::Value;
fn llbb(&self) -> Self::BasicBlock;
fn count_insn(&self, category: &str);
@ -45,7 +55,7 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> {
args: &[Self::Value],
then: Self::BasicBlock,
catch: Self::BasicBlock,
bundle: Option<&OperandBundleDef<Self::Value>>,
funclet: Option<&Funclet<Self::Value>>,
) -> Self::Value;
fn unreachable(&self);
fn add(&self, lhs: Self::Value, rhs: Self::Value) -> Self::Value;
@ -252,7 +262,10 @@ pub trait BuilderMethods<'a, 'tcx: 'a>: HasCodegen<'tcx> {
&self,
llfn: Self::Value,
args: &[Self::Value],
bundle: Option<&OperandBundleDef<Self::Value>>,
funclet: Option<&Funclet<Self::Value>>,
) -> Self::Value;
fn zext(&self, val: Self::Value, dest_ty: Self::Type) -> Self::Value;
fn delete_basic_block(&self, bb: Self::BasicBlock);
fn do_not_inline(&self, llret: Self::Value);
}

View file

@ -9,6 +9,10 @@
// except according to those terms.
use super::Backend;
use mir::place::PlaceRef;
use rustc::mir::interpret::Allocation;
use rustc::mir::interpret::Scalar;
use rustc::ty::layout;
use syntax::symbol::LocalInternedString;
pub trait ConstMethods<'tcx>: Backend<'tcx> {
@ -39,4 +43,17 @@ pub trait ConstMethods<'tcx>: Backend<'tcx> {
fn is_const_integral(&self, v: Self::Value) -> bool;
fn is_const_real(&self, v: Self::Value) -> bool;
fn scalar_to_backend(
&self,
cv: Scalar,
layout: &layout::Scalar,
llty: Self::Type,
) -> Self::Value;
fn from_const_alloc(
&self,
layout: layout::TyLayout<'tcx>,
alloc: &Allocation,
offset: layout::Size,
) -> PlaceRef<'tcx, Self::Value>;
}

View file

@ -9,8 +9,53 @@
// except according to those terms.
use super::backend::Backend;
use rustc::ty::Ty;
use super::HasCodegen;
use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind};
use monomorphize::Instance;
use rustc::hir::def_id::CrateNum;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc_data_structures::indexed_vec::IndexVec;
use syntax::ast::Name;
use syntax_pos::{SourceFile, Span};
pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
fn create_vtable_metadata(&self, ty: Ty<'tcx>, vtable: Self::Value);
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
llfn: Self::Value,
mir: &mir::Mir,
) -> FunctionDebugContext<Self::DIScope>;
fn create_mir_scopes(
&self,
mir: &mir::Mir,
debug_context: &FunctionDebugContext<Self::DIScope>,
) -> IndexVec<mir::SourceScope, MirDebugScope<Self::DIScope>>;
fn extend_scope_to_file(
&self,
scope_metadata: Self::DIScope,
file: &SourceFile,
defining_crate: CrateNum,
) -> Self::DIScope;
}
pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {
fn declare_local(
&self,
dbg_context: &FunctionDebugContext<Self::DIScope>,
variable_name: Name,
variable_type: Ty<'tcx>,
scope_metadata: Self::DIScope,
variable_access: VariableAccess<'_, Self::Value>,
variable_kind: VariableKind,
span: Span,
);
fn set_source_location(
&self,
debug_context: &FunctionDebugContext<Self::DIScope>,
scope: Option<Self::DIScope>,
span: Span,
);
}

View file

@ -0,0 +1,24 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::backend::Backend;
use rustc::ty;
pub trait DeclareMethods<'tcx>: Backend<'tcx> {
fn declare_global(&self, name: &str, ty: Self::Type) -> Self::Value;
fn declare_cfn(&self, name: &str, fn_type: Self::Type) -> Self::Value;
fn declare_fn(&self, name: &str, sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn define_global(&self, name: &str, ty: Self::Type) -> Option<Self::Value>;
fn define_private_global(&self, ty: Self::Type) -> Self::Value;
fn define_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
fn get_declared_value(&self, name: &str) -> Option<Self::Value>;
fn get_defined_value(&self, name: &str) -> Option<Self::Value>;
}

View file

@ -9,13 +9,13 @@
// except according to those terms.
use super::backend::Backend;
use super::builder::BuilderMethods;
use super::HasCodegen;
use abi::FnType;
use mir::operand::OperandRef;
use rustc::ty::Ty;
use syntax_pos::Span;
pub trait IntrinsicCallMethods<'a, 'tcx: 'a>: BuilderMethods<'a, 'tcx> {
pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {
fn codegen_intrinsic_call(
&self,
callee_ty: Ty<'tcx>,

View file

@ -9,6 +9,8 @@
// except according to those terms.
use super::backend::Backend;
use libc::c_uint;
use rustc::session::Session;
use rustc::ty::{self, Instance, Ty};
use rustc::util::nodemap::FxHashMap;
use std::cell::RefCell;
@ -17,5 +19,11 @@ pub trait MiscMethods<'tcx>: Backend<'tcx> {
fn vtables(
&self,
) -> &RefCell<FxHashMap<(Ty<'tcx>, ty::PolyExistentialTraitRef<'tcx>), Self::Value>>;
fn check_overflow(&self) -> bool;
fn instances(&self) -> &RefCell<FxHashMap<Instance<'tcx>, Self::Value>>;
fn get_fn(&self, instance: Instance<'tcx>) -> Self::Value;
fn get_param(&self, llfn: Self::Value, index: c_uint) -> Self::Value;
fn eh_personality(&self) -> Self::Value;
fn eh_unwind_resume(&self) -> Self::Value;
fn sess(&self) -> &Session;
}

View file

@ -8,23 +8,31 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod abi;
mod asm;
mod backend;
mod builder;
mod consts;
mod debuginfo;
mod declare;
mod intrinsic;
mod misc;
mod statics;
mod type_;
pub use self::abi::{AbiBuilderMethods, AbiMethods};
pub use self::asm::{AsmBuilderMethods, AsmMethods};
pub use self::backend::{Backend, BackendTypes};
pub use self::builder::BuilderMethods;
pub use self::consts::ConstMethods;
pub use self::debuginfo::DebugInfoMethods;
pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
pub use self::declare::DeclareMethods;
pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
pub use self::misc::MiscMethods;
pub use self::statics::StaticMethods;
pub use self::type_::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods};
pub use self::type_::{
ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
};
use std::fmt;
@ -35,6 +43,10 @@ pub trait CodegenMethods<'tcx>:
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
{
}
@ -45,6 +57,10 @@ impl<'tcx, T> CodegenMethods<'tcx> for T where
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
{}
pub trait HasCodegen<'tcx>: Backend<'tcx> {
@ -54,6 +70,7 @@ pub trait HasCodegen<'tcx>: Backend<'tcx> {
BasicBlock = Self::BasicBlock,
Type = Self::Type,
Context = Self::Context,
DIScope = Self::DIScope,
>;
}

View file

@ -9,11 +9,14 @@
// except according to those terms.
use super::backend::Backend;
use super::HasCodegen;
use common::TypeKind;
use mir::place::PlaceRef;
use rustc::ty::layout::TyLayout;
use rustc::ty::layout::{self, Align, Size};
use rustc::ty::Ty;
use rustc::util::nodemap::FxHashMap;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
use std::cell::RefCell;
use syntax::ast;
@ -70,6 +73,10 @@ pub trait DerivedTypeMethods<'tcx>: Backend<'tcx> {
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
fn scalar_pair_element_backend_type<'a>(
@ -80,6 +87,22 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
) -> Self::Type;
}
pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
fn store_fn_arg(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
idx: &mut usize,
dst: PlaceRef<'tcx, Self::Value>,
);
fn store_arg_ty(
&self,
ty: &ArgType<'tcx, Ty<'tcx>>,
val: Self::Value,
dst: PlaceRef<'tcx, Self::Value>,
);
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type;
}
pub trait TypeMethods<'tcx>:
BaseTypeMethods<'tcx> + DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx>
{

View file

@ -20,7 +20,6 @@ use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
use context::CodegenCx;
use declare;
use glue;
use type_::Type;
use type_of::LayoutLlvmExt;
@ -87,7 +86,7 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu
Some(cx.get_intrinsic(&llvm_name))
}
impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_codegen_llvm/context.rs
@ -274,12 +273,12 @@ impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
},
"volatile_store" => {
let dst = args[0].deref(cx);
args[1].val.volatile_store(&self, dst);
args[1].val.volatile_store(self, dst);
return;
},
"unaligned_volatile_store" => {
let dst = args[0].deref(cx);
args[1].val.unaligned_volatile_store(&self, dst);
args[1].val.unaligned_volatile_store(self, dst);
return;
},
"prefetch_read_data" | "prefetch_write_data" |
@ -451,7 +450,7 @@ impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
},
"discriminant_value" => {
args[0].deref(cx).codegen_get_discr(&self, ret_ty)
args[0].deref(cx).codegen_get_discr(self, ret_ty)
}
name if name.starts_with("simd_") => {
@ -600,7 +599,7 @@ impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
"nontemporal_store" => {
let dst = args[0].deref(cx);
args[1].val.nontemporal_store(&self, dst);
args[1].val.nontemporal_store(self, dst);
return;
}
@ -716,9 +715,10 @@ impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let val = match intr.definition {
intrinsics::IntrinsicDef::Named(name) => {
let f = declare::declare_cfn(cx,
let f = cx.declare_cfn(
name,
cx.type_func(&inputs, outputs));
cx.type_func(&inputs, outputs),
);
self.call(f, &llargs, None)
}
};
@ -745,7 +745,7 @@ impl IntrinsicCallMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let ptr = self.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
self.store(llval, ptr, result.align);
} else {
OperandRef::from_immediate_or_packed_pair(&self, llval, result.layout)
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val.store(self, result);
}
}
@ -801,11 +801,11 @@ fn try_intrinsic(
local_ptr: &'ll Value,
dest: &'ll Value,
) {
if bx.sess().no_landing_pads() {
if bx.cx().sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
} else if wants_msvc_seh(bx.cx().sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
codegen_gnu_try(bx, cx, func, data, local_ptr, dest);
@ -1003,7 +1003,7 @@ fn gen_fn<'ll, 'tcx>(
hir::Unsafety::Unsafe,
Abi::Rust
));
let llfn = declare::define_internal_fn(cx, name, rust_fn_sig);
let llfn = cx.define_internal_fn(name, rust_fn_sig);
attributes::from_fn_attrs(cx, llfn, None);
let bx = Builder::new_block(cx, llfn, "entry-block");
codegen(bx);
@ -1058,7 +1058,7 @@ fn generic_simd_intrinsic(
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
bx.cx().sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
@ -1229,7 +1229,7 @@ fn generic_simd_intrinsic(
};
($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error(
bx.sess(), span,
bx.cx().sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
name, $($fmt)*));
}
@ -1447,7 +1447,7 @@ fn generic_simd_intrinsic(
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
let f = bx.cx().declare_cfn(&llvm_intrinsic,
bx.cx().type_func(&[
llvm_pointer_vec_ty,
alignment_ty,
@ -1549,7 +1549,7 @@ fn generic_simd_intrinsic(
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
llvm_elem_vec_str, llvm_pointer_vec_str);
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
let f = bx.cx().declare_cfn(&llvm_intrinsic,
bx.cx().type_func(&[llvm_elem_vec_ty,
llvm_pointer_vec_ty,
alignment_ty,

View file

@ -8,16 +8,13 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::{FnType, FnTypeExt};
use abi::FnType;
use callee;
use builder::Builder;
use monomorphize;
use value::Value;
use interfaces::*;
use rustc::ty::{self, Ty};
use rustc::ty::layout::HasTyCtxt;
#[derive(Copy, Clone, Debug)]
pub struct VirtualIndex(u64);
@ -31,15 +28,18 @@ impl<'a, 'tcx: 'a> VirtualIndex {
VirtualIndex(index as u64 + 3)
}
pub fn get_fn(self, bx: &Builder<'a, 'll, 'tcx>,
llvtable: &'ll Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Value {
pub fn get_fn<Bx: BuilderMethods<'a, 'tcx>>(
self,
bx: &Bx,
llvtable: Bx::Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>
) -> Bx::Value {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(
llvtable,
bx.cx().type_ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
);
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(

View file

@ -18,13 +18,14 @@ use rustc::mir::{self, Location, TerminatorKind};
use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUseContext};
use rustc::mir::traversal;
use rustc::ty;
use rustc::ty::layout::LayoutOf;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use type_of::LayoutLlvmExt;
use super::FunctionCx;
use value::Value;
use interfaces::*;
pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet<mir::Local> {
pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
fx: &FunctionCx<'a, 'tcx, Bx>
) -> BitSet<mir::Local> {
let mir = fx.mir;
let mut analyzer = LocalAnalyzer::new(fx);
@ -53,8 +54,8 @@ pub fn non_ssa_locals(fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> BitSet<mir:
analyzer.non_ssa_locals
}
struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> {
fx: &'mir FunctionCx<'a, 'll, 'tcx, V>,
struct LocalAnalyzer<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
fx: &'mir FunctionCx<'a, 'tcx, Bx>,
dominators: Dominators<mir::BasicBlock>,
non_ssa_locals: BitSet<mir::Local>,
// The location of the first visited direct assignment to each
@ -62,8 +63,8 @@ struct LocalAnalyzer<'mir, 'a: 'mir, 'll: 'a, 'tcx: 'll, V: 'll> {
first_assignment: IndexVec<mir::Local, Location>
}
impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
fn new(fx: &'mir FunctionCx<'a, 'll, 'tcx, &'ll Value>) -> Self {
impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
fn new(fx: &'mir FunctionCx<'a, 'tcx, Bx>) -> Self {
let invalid_location =
mir::BasicBlock::new(fx.mir.basic_blocks().len()).start_location();
let mut analyzer = LocalAnalyzer {
@ -104,7 +105,8 @@ impl LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
}
}
impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
for LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
fn visit_assign(&mut self,
block: mir::BasicBlock,
place: &mir::Place<'tcx>,
@ -143,7 +145,7 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
_ => None,
};
if let Some((def_id, args)) = check {
if Some(def_id) == self.fx.cx.tcx.lang_items().box_free_fn() {
if Some(def_id) == self.fx.cx.tcx().lang_items().box_free_fn() {
// box_free(x) shares with `drop x` the property that it
// is not guaranteed to be statically dominated by the
// definition of x, so x must always be in an alloca.
@ -175,20 +177,20 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
_ => false
};
if is_consume {
let base_ty = proj.base.ty(self.fx.mir, cx.tcx);
let base_ty = proj.base.ty(self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(&base_ty);
// ZSTs don't require any actual memory access.
let elem_ty = base_ty
.projection_ty(cx.tcx, &proj.elem)
.to_ty(cx.tcx);
.projection_ty(cx.tcx(), &proj.elem)
.to_ty(cx.tcx());
let elem_ty = self.fx.monomorphize(&elem_ty);
if cx.layout_of(elem_ty).is_zst() {
return;
}
if let mir::ProjectionElem::Field(..) = proj.elem {
let layout = cx.layout_of(base_ty.to_ty(cx.tcx));
let layout = cx.layout_of(base_ty.to_ty(cx.tcx()));
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
@ -254,8 +256,8 @@ impl Visitor<'tcx> for LocalAnalyzer<'mir, 'a, 'll, 'tcx, &'ll Value> {
}
PlaceContext::MutatingUse(MutatingUseContext::Drop) => {
let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx);
let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx));
let ty = mir::Place::Local(local).ty(self.fx.mir, self.fx.cx.tcx());
let ty = self.fx.monomorphize(&ty.to_ty(self.fx.cx.tcx()));
// Only need the place if we're actually dropping it.
if self.fx.cx.type_needs_drop(ty) {

View file

@ -8,22 +8,18 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, BasicBlock};
use rustc::middle::lang_items;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
use rustc::mir;
use rustc::mir::interpret::EvalErrorKind;
use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode};
use abi::{Abi, FnType, PassMode};
use rustc_target::abi::call::ArgType;
use base;
use callee;
use builder::{Builder, MemFlags};
use common::{self, IntPredicate};
use builder::MemFlags;
use common::{self, Funclet, IntPredicate};
use meth;
use monomorphize;
use type_of::LayoutLlvmExt;
use type_::Type;
use value::Value;
use interfaces::*;
@ -35,8 +31,11 @@ use super::place::PlaceRef;
use super::operand::OperandRef;
use super::operand::OperandValue::{Pair, Ref, Immediate};
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_block(&mut self, bb: mir::BasicBlock) {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_block(
&mut self,
bb: mir::BasicBlock,
) {
let mut bx = self.build_block(bb);
let data = &self.mir[bb];
@ -49,21 +48,35 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
self.codegen_terminator(bx, bb, data.terminator());
}
fn codegen_terminator(&mut self,
mut bx: Builder<'a, 'll, 'tcx>,
fn codegen_terminator(
&mut self,
mut bx: Bx,
bb: mir::BasicBlock,
terminator: &mir::Terminator<'tcx>)
{
terminator: &mir::Terminator<'tcx>
) {
debug!("codegen_terminator: {:?}", terminator);
// Create the cleanup bundle, if needed.
let tcx = self.cx.tcx;
let tcx = self.cx.tcx();
let span = terminator.source_info.span;
let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
let funclet = funclet_bb.and_then(|funclet_bb| self.funclets[funclet_bb].as_ref());
let cleanup_pad = funclet.map(|lp| lp.cleanuppad());
let cleanup_bundle = funclet.map(|l| l.bundle());
// HACK(eddyb) force the right lifetimes, NLL can't figure them out.
fn funclet_closure_factory<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
funclet_bb: Option<mir::BasicBlock>
) -> impl for<'b> Fn(
&'b FunctionCx<'a, 'tcx, Bx>,
) -> Option<&'b Funclet<'static, Bx::Value>> {
move |this| {
match funclet_bb {
Some(funclet_bb) => this.funclets[funclet_bb].as_ref(),
None => None,
}
}
}
let funclet = funclet_closure_factory(funclet_bb);
let cleanup_pad = |this: &Self| funclet(this).map(|lp| lp.cleanuppad());
let lltarget = |this: &mut Self, target: mir::BasicBlock| {
let lltarget = this.blocks[target];
@ -92,7 +105,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
debug!("llblock: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", bb, target);
let trampoline = this.new_block(name);
trampoline.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
trampoline.cleanup_ret(cleanup_pad(this).unwrap(), Some(lltarget));
trampoline.llbb()
} else {
lltarget
@ -100,12 +113,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
};
let funclet_br =
|this: &mut Self, bx: Builder<'_, 'll, '_>, target: mir::BasicBlock| {
|this: &mut Self, bx: &Bx, target: mir::BasicBlock| {
let (lltarget, is_cleanupret) = lltarget(this, target);
if is_cleanupret {
// micro-optimization: generate a `ret` rather than a jump
// to a trampoline.
bx.cleanup_ret(cleanup_pad.unwrap(), Some(lltarget));
bx.cleanup_ret(cleanup_pad(this).unwrap(), Some(lltarget));
} else {
bx.br(lltarget);
}
@ -113,11 +126,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let do_call = |
this: &mut Self,
bx: Builder<'a, 'll, 'tcx>,
bx: &Bx,
fn_ty: FnType<'tcx, Ty<'tcx>>,
fn_ptr: &'ll Value,
llargs: &[&'ll Value],
destination: Option<(ReturnDest<'tcx, &'ll Value>, mir::BasicBlock)>,
fn_ptr: Bx::Value,
llargs: &[Bx::Value],
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
cleanup: Option<mir::BasicBlock>
| {
if let Some(cleanup) = cleanup {
@ -130,8 +143,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
&llargs,
ret_bx,
llblock(this, cleanup),
cleanup_bundle);
fn_ty.apply_attrs_callsite(&bx, invokeret);
funclet(this));
bx.apply_attrs_callsite(&fn_ty, invokeret);
if let Some((ret_dest, target)) = destination {
let ret_bx = this.build_block(target);
@ -139,18 +152,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
this.store_return(&ret_bx, ret_dest, &fn_ty.ret, invokeret);
}
} else {
let llret = bx.call(fn_ptr, &llargs, cleanup_bundle);
fn_ty.apply_attrs_callsite(&bx, llret);
let llret = bx.call(fn_ptr, &llargs, funclet(this));
bx.apply_attrs_callsite(&fn_ty, llret);
if this.mir[bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
// struct, there are "symmetry" issues that cause
// exponential inlining - see issue #41696.
llvm::Attribute::NoInline.apply_callsite(llvm::AttributePlace::Function, llret);
bx.do_not_inline(llret);
}
if let Some((ret_dest, target)) = destination {
this.store_return(&bx, ret_dest, &fn_ty.ret, llret);
this.store_return(bx, ret_dest, &fn_ty.ret, llret);
funclet_br(this, bx, target);
} else {
bx.unreachable();
@ -161,7 +174,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
self.set_debug_loc(&bx, terminator.source_info);
match terminator.kind {
mir::TerminatorKind::Resume => {
if let Some(cleanup_pad) = cleanup_pad {
if let Some(cleanup_pad) = cleanup_pad(self) {
bx.cleanup_ret(cleanup_pad, None);
} else {
let slot = self.get_personality_slot(&bx);
@ -169,13 +182,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let lp1 = bx.load_operand(slot.project_field(&bx, 1)).immediate();
slot.storage_dead(&bx);
if !bx.sess().target.target.options.custom_unwind_resume {
if !bx.cx().sess().target.target.options.custom_unwind_resume {
let mut lp = bx.cx().const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0);
lp = bx.insert_value(lp, lp1, 1);
bx.resume(lp);
} else {
bx.call(bx.cx().eh_unwind_resume(), &[lp0], cleanup_bundle);
bx.call(bx.cx().eh_unwind_resume(), &[lp0], funclet(self));
bx.unreachable();
}
}
@ -189,7 +202,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
mir::TerminatorKind::Goto { target } => {
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
}
mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
@ -207,7 +220,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bx.cond_br(discr.immediate(), lltrue, llfalse);
}
} else {
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
let switch_llty = bx.cx().immediate_backend_type(
bx.cx().layout_of(switch_ty)
);
let llval = bx.cx().const_uint_big(switch_llty, values[0]);
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
bx.cond_br(cmp, lltrue, llfalse);
@ -217,7 +232,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let switch = bx.switch(discr.immediate(),
llblock(self, *otherwise),
values.len());
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
let switch_llty = bx.cx().immediate_backend_type(
bx.cx().layout_of(switch_ty)
);
for (&value, target) in values.iter().zip(targets) {
let llval = bx.cx().const_uint_big(switch_llty, value);
let llbb = llblock(self, *target);
@ -267,7 +284,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
};
bx.load(
bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))),
bx.pointercast(llslot, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&cast_ty)
)),
self.fn_ty.ret.layout.align)
}
};
@ -281,11 +300,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::TerminatorKind::Drop { ref location, target, unwind } => {
let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
let ty = self.monomorphize(&ty);
let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx, ty);
let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx(), ty);
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
// we don't actually need to drop anything.
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
return
}
@ -300,22 +319,22 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
};
let (drop_fn, fn_ty) = match ty.sty {
ty::Dynamic(..) => {
let sig = drop_fn.fn_sig(bx.tcx());
let sig = bx.tcx().normalize_erasing_late_bound_regions(
let sig = drop_fn.fn_sig(tcx);
let sig = tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
);
let fn_ty = FnType::new_vtable(bx.cx(), sig, &[]);
let fn_ty = bx.cx().new_vtable(sig, &[]);
let vtable = args[1];
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
}
_ => {
(callee::get_fn(bx.cx(), drop_fn),
FnType::of_instance(bx.cx(), &drop_fn))
(bx.cx().get_fn(drop_fn),
bx.cx().fn_type_of_instance(&drop_fn))
}
};
do_call(self, bx, fn_ty, drop_fn, args,
do_call(self, &bx, fn_ty, drop_fn, args,
Some((ReturnDest::Nothing, target)),
unwind);
}
@ -331,7 +350,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// NOTE: Unlike binops, negation doesn't have its own
// checked operation, just a comparison with the minimum
// value, so we have to check for the assert message.
if !bx.cx().check_overflow {
if !bx.cx().check_overflow() {
if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
const_cond = Some(expected);
}
@ -339,7 +358,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Don't codegen the panic block if success if known.
if const_cond == Some(expected) {
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
return;
}
@ -361,7 +380,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
self.set_debug_loc(&bx, terminator.source_info);
// Get the location information.
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32);
@ -373,8 +392,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Put together the arguments to the panic entry point.
let (lang_item, args) = match *msg {
EvalErrorKind::BoundsCheck { ref len, ref index } => {
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
let len = self.codegen_operand(&bx, len).immediate();
let index = self.codegen_operand(&bx, index).immediate();
let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
let file_line_col = bx.cx().static_addr_of(
@ -406,11 +425,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Obtain the panic entry point.
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_ty = FnType::of_instance(bx.cx(), &instance);
let llfn = callee::get_fn(bx.cx(), instance);
let fn_ty = bx.cx().fn_type_of_instance(&instance);
let llfn = bx.cx().get_fn(instance);
// Codegen the actual panic invoke/call.
do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
do_call(self, &bx, fn_ty, llfn, &args, None, cleanup);
}
mir::TerminatorKind::DropAndReplace { .. } => {
@ -429,7 +448,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let (instance, mut llfn) = match callee.layout.ty.sty {
ty::FnDef(def_id, substs) => {
(Some(ty::Instance::resolve(bx.cx().tcx,
(Some(ty::Instance::resolve(bx.cx().tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs).unwrap()),
@ -460,7 +479,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if let Some(destination_ref) = destination.as_ref() {
let &(ref dest, target) = destination_ref;
self.codegen_transmute(&bx, &args[0], dest);
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
} else {
// If we are trying to transmute to an uninhabited type,
// it is likely there is no allotted destination. In fact,
@ -482,26 +501,26 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let fn_ty = match def {
Some(ty::InstanceDef::Virtual(..)) => {
FnType::new_vtable(bx.cx(), sig, &extra_args)
bx.cx().new_vtable(sig, &extra_args)
}
Some(ty::InstanceDef::DropGlue(_, None)) => {
// empty drop glue - a nop.
let &(_, target) = destination.as_ref().unwrap();
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
return;
}
_ => FnType::new(bx.cx(), sig, &extra_args)
_ => bx.cx().new_fn_type(sig, &extra_args)
};
// emit a panic instead of instantiating an uninhabited type
if (intrinsic == Some("init") || intrinsic == Some("uninit")) &&
fn_ty.ret.layout.abi.is_uninhabited()
{
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = bx.cx.const_str_slice(filename);
let line = bx.cx.const_u32(loc.line as u32);
let col = bx.cx.const_u32(loc.col.to_usize() as u32 + 1);
let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
@ -512,12 +531,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
);
let msg_str = Symbol::intern(&str).as_str();
let msg_str = bx.cx.const_str_slice(msg_str);
let msg_file_line_col = bx.cx.const_struct(
let msg_str = bx.cx().const_str_slice(msg_str);
let msg_file_line_col = bx.cx().const_struct(
&[msg_str, filename, line, col],
false,
);
let msg_file_line_col = bx.cx.static_addr_of(
let msg_file_line_col = bx.cx().static_addr_of(
msg_file_line_col,
align,
Some("panic_loc"),
@ -527,13 +546,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let def_id =
common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_ty = FnType::of_instance(bx.cx, &instance);
let llfn = callee::get_fn(bx.cx, instance);
let fn_ty = bx.cx().fn_type_of_instance(&instance);
let llfn = bx.cx().get_fn(instance);
// Codegen the actual panic invoke/call.
do_call(
self,
bx,
&bx,
fn_ty,
llfn,
&[msg_file_line_col],
@ -560,7 +579,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => {
bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx())))
bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret)))
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => dst.llval,
@ -622,8 +641,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}).collect();
let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx);
&bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx());
bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
@ -631,7 +650,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
if let Some((_, target)) = *destination {
funclet_br(self, bx, target);
funclet_br(self, &bx, target);
} else {
bx.unreachable();
}
@ -719,11 +738,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let fn_ptr = match (llfn, instance) {
(Some(llfn), _) => llfn,
(None, Some(instance)) => callee::get_fn(bx.cx(), instance),
(None, Some(instance)) => bx.cx().get_fn(instance),
_ => span_bug!(span, "no llfn for call"),
};
do_call(self, bx, fn_ty, fn_ptr, &llargs,
do_call(self, &bx, fn_ty, fn_ptr, &llargs,
destination.as_ref().map(|&(_, target)| (ret_dest, target)),
cleanup);
}
@ -734,14 +753,16 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
}
fn codegen_argument(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
op: OperandRef<'tcx, &'ll Value>,
llargs: &mut Vec<&'ll Value>,
arg: &ArgType<'tcx, Ty<'tcx>>) {
fn codegen_argument(
&mut self,
bx: &Bx,
op: OperandRef<'tcx, Bx::Value>,
llargs: &mut Vec<Bx::Value>,
arg: &ArgType<'tcx, Ty<'tcx>>
) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(bx.cx().const_undef(ty.llvm_type(bx.cx())));
llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty)))
}
if arg.is_ignore() {
@ -801,8 +822,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode {
llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))),
align.min(arg.layout.align));
llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(
bx.cx().cast_backend_type(&ty))
), align.min(arg.layout.align));
} else {
// We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI
@ -823,11 +845,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
llargs.push(llval);
}
fn codegen_arguments_untupled(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
fn codegen_arguments_untupled(
&mut self,
bx: &Bx,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<&'ll Value>,
args: &[ArgType<'tcx, Ty<'tcx>>]) {
llargs: &mut Vec<Bx::Value>,
args: &[ArgType<'tcx, Ty<'tcx>>]
) {
let tuple = self.codegen_operand(bx, operand);
// Handle both by-ref and immediate tuples.
@ -850,15 +874,15 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
fn get_personality_slot(
&mut self,
bx: &Builder<'a, 'll, 'tcx>
) -> PlaceRef<'tcx, &'ll Value> {
bx: &Bx
) -> PlaceRef<'tcx, Bx::Value> {
let cx = bx.cx();
if let Some(slot) = self.personality_slot {
slot
} else {
let layout = cx.layout_of(cx.tcx.intern_tup(&[
cx.tcx.mk_mut_ptr(cx.tcx.types.u8),
cx.tcx.types.i32
let layout = cx.layout_of(cx.tcx().intern_tup(&[
cx.tcx().mk_mut_ptr(cx.tcx().types.u8),
cx.tcx().types.i32
]));
let slot = PlaceRef::alloca(bx, layout, "personalityslot");
self.personality_slot = Some(slot);
@ -869,7 +893,10 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
/// Return the landingpad wrapper around the given basic block
///
/// No-op in MSVC SEH scheme.
fn landing_pad_to(&mut self, target_bb: mir::BasicBlock) -> &'ll BasicBlock {
fn landing_pad_to(
&mut self,
target_bb: mir::BasicBlock
) -> Bx::BasicBlock {
if let Some(block) = self.landing_pads[target_bb] {
return block;
}
@ -880,7 +907,10 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
landing_pad
}
fn landing_pad_uncached(&mut self, target_bb: &'ll BasicBlock) -> &'ll BasicBlock {
fn landing_pad_uncached(
&mut self,
target_bb: Bx::BasicBlock
) -> Bx::BasicBlock {
if base::wants_msvc_seh(self.cx.sess()) {
span_bug!(self.mir.span, "landing pad was not inserted?")
}
@ -900,34 +930,42 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bx.llbb()
}
fn landing_pad_type(&self) -> &'ll Type {
fn landing_pad_type(&self) -> Bx::Type {
let cx = self.cx;
cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false)
}
fn unreachable_block(&mut self) -> &'ll BasicBlock {
fn unreachable_block(
&mut self
) -> Bx::BasicBlock {
self.unreachable_block.unwrap_or_else(|| {
let bl = self.new_block("unreachable");
bl.unreachable();
self.unreachable_block = Some(bl.llbb());
bl.llbb()
let bx = self.new_block("unreachable");
bx.unreachable();
self.unreachable_block = Some(bx.llbb());
bx.llbb()
})
}
pub fn new_block(&self, name: &str) -> Builder<'a, 'll, 'tcx> {
Builder::new_block(self.cx, self.llfn, name)
pub fn new_block(&self, name: &str) -> Bx {
Bx::new_block(self.cx, self.llfn, name)
}
pub fn build_block(&self, bb: mir::BasicBlock) -> Builder<'a, 'll, 'tcx> {
let bx = Builder::with_cx(self.cx);
pub fn build_block(
&self,
bb: mir::BasicBlock
) -> Bx {
let bx = Bx::with_cx(self.cx);
bx.position_at_end(self.blocks[bb]);
bx
}
fn make_return_dest(&mut self, bx: &Builder<'a, 'll, 'tcx>,
dest: &mir::Place<'tcx>, fn_ret: &ArgType<'tcx, Ty<'tcx>>,
llargs: &mut Vec<&'ll Value>, is_intrinsic: bool)
-> ReturnDest<'tcx, &'ll Value> {
fn make_return_dest(
&mut self,
bx: &Bx,
dest: &mir::Place<'tcx>,
fn_ret: &ArgType<'tcx, Ty<'tcx>>,
llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
) -> ReturnDest<'tcx, Bx::Value> {
// If the return is ignored, we can just return a do-nothing ReturnDest
if fn_ret.is_ignore() {
return ReturnDest::Nothing;
@ -981,9 +1019,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
}
fn codegen_transmute(&mut self, bx: &Builder<'a, 'll, 'tcx>,
fn codegen_transmute(
&mut self,
bx: &Bx,
src: &mir::Operand<'tcx>,
dst: &mir::Place<'tcx>) {
dst: &mir::Place<'tcx>
) {
if let mir::Place::Local(index) = *dst {
match self.locals[index] {
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
@ -1009,11 +1050,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
}
fn codegen_transmute_into(&mut self, bx: &Builder<'a, 'll, 'tcx>,
fn codegen_transmute_into(
&mut self,
bx: &Bx,
src: &mir::Operand<'tcx>,
dst: PlaceRef<'tcx, &'ll Value>) {
dst: PlaceRef<'tcx, Bx::Value>
) {
let src = self.codegen_operand(bx, src);
let llty = src.layout.llvm_type(bx.cx());
let llty = bx.cx().backend_type(src.layout);
let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
let align = src.layout.align.min(dst.layout.align);
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
@ -1021,16 +1065,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Stores the return value of a function call into it's final location.
fn store_return(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
dest: ReturnDest<'tcx, &'ll Value>,
fn store_return(
&mut self,
bx: &Bx,
dest: ReturnDest<'tcx, Bx::Value>,
ret_ty: &ArgType<'tcx, Ty<'tcx>>,
llval: &'ll Value) {
llval: Bx::Value
) {
use self::ReturnDest::*;
match dest {
Nothing => (),
Store(dst) => ret_ty.store(bx, llval, dst),
Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst),
IndirectOperand(tmp, index) => {
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
@ -1041,7 +1087,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let op = if let PassMode::Cast(_) = ret_ty.mode {
let tmp = PlaceRef::alloca(bx, ret_ty.layout, "tmp_ret");
tmp.storage_live(bx);
ret_ty.store(bx, llval, tmp);
bx.store_arg_ty(&ret_ty, llval, tmp);
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
op

View file

@ -8,82 +8,21 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm;
use rustc::mir::interpret::{ErrorHandled, read_target_uint};
use rustc_mir::const_eval::const_field;
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, Pointer, Scalar, Allocation, ConstValue, AllocType};
use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size, HasTyCtxt};
use builder::Builder;
use common::{CodegenCx};
use type_of::LayoutLlvmExt;
use type_::Type;
use syntax::ast::Mutability;
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size};
use common::CodegenCx;
use syntax::source_map::Span;
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, StaticMethods};
use interfaces::*;
use super::super::callee;
use super::FunctionCx;
pub fn scalar_to_llvm(
cx: &CodegenCx<'ll, '_>,
cv: Scalar,
layout: &layout::Scalar,
llty: &'ll Type,
) -> &'ll Value {
let bitsize = if layout.is_bool() { 1 } else { layout.value.size(cx).bits() };
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes());
cx.const_undef(cx.type_ix(0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes());
let llval = cx.const_uint_big(cx.type_ix(bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
cx.static_bitcast(llval, llty)
}
},
Scalar::Ptr(ptr) => {
let alloc_type = cx.tcx.alloc_map.lock().get(ptr.alloc_id);
let base_addr = match alloc_type {
Some(AllocType::Memory(alloc)) => {
let init = const_alloc_to_llvm(cx, alloc);
if alloc.mutability == Mutability::Mutable {
cx.static_addr_of_mut(init, alloc.align, None)
} else {
cx.static_addr_of(init, alloc.align, None)
}
}
Some(AllocType::Function(fn_instance)) => {
callee::get_fn(cx, fn_instance)
}
Some(AllocType::Static(def_id)) => {
assert!(cx.tcx.is_static(def_id).is_some());
cx.get_static(def_id)
}
None => bug!("missing allocation {:?}", ptr.alloc_id),
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
cx.static_bitcast(base_addr, cx.type_i8p()),
&cx.const_usize(ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else {
cx.static_bitcast(llval, llty)
}
}
}
}
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let dl = cx.data_layout();
@ -101,8 +40,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(scalar_to_llvm(
cx,
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
@ -138,10 +76,10 @@ pub fn codegen_static_initializer(
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn fully_evaluate(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
constant: &'tcx ty::Const<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
match constant.val {
@ -161,7 +99,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn eval_mir_constant(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
constant: &mir::Constant<'tcx>,
) -> Result<&'tcx ty::Const<'tcx>, ErrorHandled> {
let c = self.monomorphize(&constant.literal);
@ -171,11 +109,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
/// process constant containing SIMD shuffle indices
pub fn simd_shuffle_indices(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
span: Span,
ty: Ty<'tcx>,
constant: Result<&'tcx ty::Const<'tcx>, ErrorHandled>,
) -> (&'ll Value, Ty<'tcx>) {
) -> (Bx::Value, Ty<'tcx>) {
constant
.and_then(|c| {
let field_ty = c.ty.builtin_index().unwrap();
@ -198,9 +136,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
layout::Abi::Scalar(ref x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
};
Ok(scalar_to_llvm(
bx.cx(), prim, scalar,
layout.immediate_llvm_type(bx.cx()),
Ok(bx.cx().scalar_to_backend(
prim, scalar,
bx.cx().immediate_backend_type(layout),
))
} else {
bug!("simd shuffle field {:?}", field)
@ -216,7 +154,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
);
// We've errored, so we don't have to produce working code.
let ty = self.monomorphize(&ty);
let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
let llty = bx.cx().backend_type(bx.cx().layout_of(ty));
(bx.cx().const_undef(llty), ty)
})
}

View file

@ -9,8 +9,7 @@
// except according to those terms.
use libc::c_uint;
use llvm::{self, BasicBlock};
use llvm::debuginfo::DIScope;
use llvm;
use llvm_util;
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
@ -18,13 +17,11 @@ use rustc::mir::{self, Mir};
use rustc::ty::subst::Substs;
use rustc::session::config::DebugInfo;
use base;
use builder::Builder;
use common::{CodegenCx, Funclet};
use debuginfo::{self, declare_local, VariableAccess, VariableKind, FunctionDebugContext};
use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext};
use common::Funclet;
use monomorphize::Instance;
use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode};
use value::Value;
use interfaces::{BuilderMethods, ConstMethods, DerivedTypeMethods};
use abi::{FnType, PassMode};
use interfaces::*;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
@ -43,16 +40,16 @@ use rustc::mir::traversal;
use self::operand::{OperandRef, OperandValue};
/// Master context for codegenning from MIR.
pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
pub struct FunctionCx<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
mir: &'a mir::Mir<'tcx>,
debug_context: FunctionDebugContext<'ll>,
debug_context: FunctionDebugContext<Bx::DIScope>,
llfn: V,
llfn: Bx::Value,
cx: &'a CodegenCx<'ll, 'tcx>,
cx: &'a Bx::CodegenCx,
fn_ty: FnType<'tcx, Ty<'tcx>>,
@ -63,25 +60,24 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
/// don't really care about it very much. Anyway, this value
/// contains an alloca into which the personality is stored and
/// then later loaded when generating the DIVERGE_BLOCK.
personality_slot: Option<PlaceRef<'tcx, V>>,
personality_slot: Option<PlaceRef<'tcx, Bx::Value,>>,
/// A `Block` for each MIR `BasicBlock`
blocks: IndexVec<mir::BasicBlock, &'ll BasicBlock>,
blocks: IndexVec<mir::BasicBlock, Bx::BasicBlock>,
/// The funclet status of each basic block
cleanup_kinds: IndexVec<mir::BasicBlock, analyze::CleanupKind>,
/// When targeting MSVC, this stores the cleanup info for each funclet
/// BB. Thisrustup component add rustfmt-preview is initialized as we compute the funclets'
/// head block in RPO.
funclets: &'a IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>,
/// BB. This is initialized as we compute the funclets' head block in RPO.
funclets: IndexVec<mir::BasicBlock, Option<Funclet<'static, Bx::Value>>>,
/// This stores the landing-pad block for a given BB, computed lazily on GNU
/// and eagerly on MSVC.
landing_pads: IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
/// Cached unreachable block
unreachable_block: Option<&'ll BasicBlock>,
unreachable_block: Option<Bx::BasicBlock>,
/// The location where each MIR arg/var/tmp/ret is stored. This is
/// usually an `PlaceRef` representing an alloca, but not always:
@ -98,20 +94,20 @@ pub struct FunctionCx<'a, 'll: 'a, 'tcx: 'll, V> {
///
/// Avoiding allocs can also be important for certain intrinsics,
/// notably `expect`.
locals: IndexVec<mir::Local, LocalRef<'tcx, V>>,
locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
/// Debug information for MIR scopes.
scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
scopes: IndexVec<mir::SourceScope, debuginfo::MirDebugScope<Bx::DIScope>>,
/// If this function is being monomorphized, this contains the type substitutions used.
param_substs: &'tcx Substs<'tcx>,
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn monomorphize<T>(&self, value: &T) -> T
where T: TypeFoldable<'tcx>
{
self.cx.tcx.subst_and_normalize_erasing_regions(
self.cx.tcx().subst_and_normalize_erasing_regions(
self.param_substs,
ty::ParamEnv::reveal_all(),
value,
@ -120,14 +116,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn set_debug_loc(
&mut self,
bx: &Builder<'_, 'll, '_>,
bx: &Bx,
source_info: mir::SourceInfo
) {
let (scope, span) = self.debug_loc(source_info);
debuginfo::set_source_location(&self.debug_context, bx, scope, span);
bx.set_source_location(&self.debug_context, scope, span);
}
pub fn debug_loc(&mut self, source_info: mir::SourceInfo) -> (Option<&'ll DIScope>, Span) {
pub fn debug_loc(&self, source_info: mir::SourceInfo) -> (Option<Bx::DIScope>, Span) {
// Bail out if debug info emission is not enabled.
match self.debug_context {
FunctionDebugContext::DebugInfoDisabled |
@ -167,16 +163,17 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// corresponding to span's containing source scope. If so, we need to create a DIScope
// "extension" into that file.
fn scope_metadata_for_loc(&self, scope_id: mir::SourceScope, pos: BytePos)
-> Option<&'ll DIScope> {
-> Option<Bx::DIScope> {
let scope_metadata = self.scopes[scope_id].scope_metadata;
if pos < self.scopes[scope_id].file_start_pos ||
pos >= self.scopes[scope_id].file_end_pos {
let cm = self.cx.sess().source_map();
let sm = self.cx.sess().source_map();
let defining_crate = self.debug_context.get_ref(DUMMY_SP).defining_crate;
Some(debuginfo::extend_scope_to_file(self.cx,
Some(self.cx.extend_scope_to_file(
scope_metadata.unwrap(),
&cm.lookup_char_pos(pos).file,
defining_crate))
&sm.lookup_char_pos(pos).file,
defining_crate,
))
} else {
scope_metadata
}
@ -193,11 +190,11 @@ enum LocalRef<'tcx, V> {
Operand(Option<OperandRef<'tcx, V>>),
}
impl LocalRef<'tcx, &'ll Value> {
fn new_operand(
cx: &CodegenCx<'ll, 'tcx>,
impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
fn new_operand<Cx: CodegenMethods<'tcx, Value = V>>(
cx: &Cx,
layout: TyLayout<'tcx>,
) -> LocalRef<'tcx, &'ll Value> {
) -> LocalRef<'tcx, V> {
if layout.is_zst() {
// Zero-size temporaries aren't always initialized, which
// doesn't matter because they don't contain data, but
@ -211,18 +208,18 @@ impl LocalRef<'tcx, &'ll Value> {
///////////////////////////////////////////////////////////////////////////
pub fn codegen_mir(
cx: &'a CodegenCx<'ll, 'tcx>,
llfn: &'ll Value,
pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
llfn: Bx::Value,
mir: &'a Mir<'tcx>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
) {
let fn_ty = FnType::new(cx, sig, &[]);
let fn_ty = cx.new_fn_type(sig, &[]);
debug!("fn_ty: {:?}", fn_ty);
let debug_context =
debuginfo::create_function_debug_context(cx, instance, sig, llfn, mir);
let bx = Builder::new_block(cx, llfn, "start");
cx.create_function_debug_context(instance, sig, llfn, mir);
let bx = Bx::new_block(cx, llfn, "start");
if mir.basic_blocks().iter().any(|bb| bb.is_cleanup) {
bx.set_personality_fn(cx.eh_personality());
@ -232,7 +229,7 @@ pub fn codegen_mir(
// Allocate a `Block` for every basic block, except
// the start block, if nothing loops back to it.
let reentrant_start_block = !mir.predecessors_for(mir::START_BLOCK).is_empty();
let block_bxs: IndexVec<mir::BasicBlock, &'ll BasicBlock> =
let block_bxs: IndexVec<mir::BasicBlock, Bx::BasicBlock> =
mir.basic_blocks().indices().map(|bb| {
if bb == mir::START_BLOCK && !reentrant_start_block {
bx.llbb()
@ -242,7 +239,7 @@ pub fn codegen_mir(
}).collect();
// Compute debuginfo scopes from MIR scopes.
let scopes = debuginfo::create_mir_scopes(cx, mir, &debug_context);
let scopes = cx.create_mir_scopes(mir, &debug_context);
let (landing_pads, funclets) = create_funclets(mir, &bx, &cleanup_kinds, &block_bxs);
let mut fx = FunctionCx {
@ -256,7 +253,7 @@ pub fn codegen_mir(
unreachable_block: None,
cleanup_kinds,
landing_pads,
funclets: &funclets,
funclets,
scopes,
locals: IndexVec::new(),
debug_context,
@ -272,7 +269,7 @@ pub fn codegen_mir(
fx.locals = {
let args = arg_local_refs(&bx, &fx, &fx.scopes, &memory_locals);
let mut allocate_local = |local| {
let allocate_local = |local| {
let decl = &mir.local_decls[local];
let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
assert!(!layout.ty.has_erasable_regions());
@ -280,7 +277,8 @@ pub fn codegen_mir(
if let Some(name) = decl.name {
// User variable
let debug_scope = fx.scopes[decl.visibility_scope];
let dbg = debug_scope.is_valid() && bx.sess().opts.debuginfo == DebugInfo::Full;
let dbg = debug_scope.is_valid() &&
bx.cx().sess().opts.debuginfo == DebugInfo::Full;
if !memory_locals.contains(local) && !dbg {
debug!("alloc: {:?} ({}) -> operand", local, name);
@ -300,7 +298,7 @@ pub fn codegen_mir(
span: decl.source_info.span,
scope: decl.visibility_scope,
});
declare_local(&bx, &fx.debug_context, name, layout.ty, scope.unwrap(),
bx.declare_local(&fx.debug_context, name, layout.ty, scope.unwrap(),
VariableAccess::DirectVariable { alloca: place.llval },
VariableKind::LocalVariable, span);
}
@ -310,7 +308,7 @@ pub fn codegen_mir(
// Temporary or return place
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = llvm::get_param(llfn, 0);
let llretptr = fx.cx.get_param(llfn, 0);
LocalRef::Place(PlaceRef::new_sized(llretptr, layout, layout.align))
} else if memory_locals.contains(local) {
debug!("alloc: {:?} -> place", local);
@ -363,24 +361,22 @@ pub fn codegen_mir(
// Unreachable block
if !visited.contains(bb.index()) {
debug!("codegen_mir: block {:?} was not visited", bb);
unsafe {
llvm::LLVMDeleteBasicBlock(fx.blocks[bb]);
}
bx.delete_basic_block(fx.blocks[bb]);
}
}
}
fn create_funclets(
fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
mir: &'a Mir<'tcx>,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
block_bxs: &IndexVec<mir::BasicBlock, &'ll BasicBlock>)
-> (IndexVec<mir::BasicBlock, Option<&'ll BasicBlock>>,
IndexVec<mir::BasicBlock, Option<Funclet<'ll>>>)
block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>)
-> (IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
IndexVec<mir::BasicBlock, Option<Funclet<'static, Bx::Value>>>)
{
block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
match *cleanup_kind {
CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {}
_ => return (None, None)
}
@ -439,12 +435,15 @@ fn create_funclets(
/// Produce, for each argument, a `Value` pointing at the
/// argument's value. As arguments are places, these are always
/// indirect.
fn arg_local_refs(
bx: &Builder<'a, 'll, 'tcx>,
fx: &FunctionCx<'a, 'll, 'tcx, &'ll Value>,
scopes: &IndexVec<mir::SourceScope, debuginfo::MirDebugScope<'ll>>,
fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
fx: &FunctionCx<'a, 'tcx, Bx>,
scopes: &IndexVec<
mir::SourceScope,
debuginfo::MirDebugScope<Bx::DIScope>
>,
memory_locals: &BitSet<mir::Local>,
) -> Vec<LocalRef<'tcx, &'ll Value>> {
) -> Vec<LocalRef<'tcx, Bx::Value>> {
let mir = fx.mir;
let tcx = bx.tcx();
let mut idx = 0;
@ -452,7 +451,7 @@ fn arg_local_refs(
// Get the argument scope, if it exists and if we need it.
let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full {
arg_scope.scope_metadata
} else {
None
@ -486,7 +485,7 @@ fn arg_local_refs(
if arg.pad.is_some() {
llarg_idx += 1;
}
arg.store_fn_arg(bx, &mut llarg_idx, place.project_field(bx, i));
bx.store_fn_arg(arg, &mut llarg_idx, place.project_field(bx, i));
}
// Now that we have one alloca that contains the aggregate value,
@ -495,8 +494,7 @@ fn arg_local_refs(
let variable_access = VariableAccess::DirectVariable {
alloca: place.llval
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
arg_decl.name.unwrap_or(keywords::Invalid.name()),
arg_ty, scope,
@ -525,18 +523,18 @@ fn arg_local_refs(
return local(OperandRef::new_zst(bx.cx(), arg.layout));
}
PassMode::Direct(_) => {
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
return local(
OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
}
PassMode::Pair(..) => {
let a = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(a, &(name.clone() + ".0"));
llarg_idx += 1;
let b = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(b, &(name + ".1"));
llarg_idx += 1;
@ -553,16 +551,16 @@ fn arg_local_refs(
// Don't copy an indirect argument to an alloca, the caller
// already put it in a temporary alloca and gave it up.
// FIXME: lifetimes
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
bx.set_value_name(llarg, &name);
llarg_idx += 1;
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align)
} else if arg.is_unsized_indirect() {
// As the storage for the indirect argument lives during
// the whole function call, we just copy the fat pointer.
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
let llextra = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
llarg_idx += 1;
let indirect_operand = OperandValue::Pair(llarg, llextra);
@ -571,7 +569,7 @@ fn arg_local_refs(
tmp
} else {
let tmp = PlaceRef::alloca(bx, arg.layout, &name);
arg.store_fn_arg(bx, &mut llarg_idx, tmp);
bx.store_fn_arg(arg, &mut llarg_idx, tmp);
tmp
};
arg_scope.map(|scope| {
@ -585,8 +583,7 @@ fn arg_local_refs(
alloca: place.llval
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
arg_decl.name.unwrap_or(keywords::Invalid.name()),
arg.layout.ty,
@ -658,8 +655,7 @@ fn arg_local_refs(
alloca: env_ptr,
address_operations: &ops
};
declare_local(
bx,
bx.declare_local(
&fx.debug_context,
decl.debug_name,
ty,
@ -680,7 +676,7 @@ fn arg_local_refs(
mod analyze;
mod block;
mod constant;
pub mod constant;
pub mod place;
pub mod operand;
mod rvalue;

View file

@ -14,10 +14,7 @@ use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use base;
use common::CodegenCx;
use builder::{Builder, MemFlags};
use value::Value;
use type_of::LayoutLlvmExt;
use builder::MemFlags;
use glue;
use interfaces::*;
@ -25,7 +22,6 @@ use interfaces::*;
use std::fmt;
use super::{FunctionCx, LocalRef};
use super::constant::scalar_to_llvm;
use super::place::PlaceRef;
/// The representation of a Rust value. The enum variant is in fact
@ -61,13 +57,13 @@ pub struct OperandRef<'tcx, V> {
pub layout: TyLayout<'tcx>,
}
impl fmt::Debug for OperandRef<'tcx, &'ll Value> {
impl<V: CodegenObject> fmt::Debug for OperandRef<'tcx, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "OperandRef({:?} @ {:?})", self.val, self.layout)
}
}
impl<'tcx, V: CodegenObject> OperandRef<'tcx, V> {
impl<'a, 'tcx: 'a, V: CodegenObject> OperandRef<'tcx, V> {
pub fn new_zst<Cx: CodegenMethods<'tcx, Value = V>>(
cx: &Cx,
layout: TyLayout<'tcx>
@ -78,12 +74,11 @@ impl<'tcx, V: CodegenObject> OperandRef<'tcx, V> {
layout
}
}
}
impl OperandRef<'tcx, &'ll Value> {
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
val: &'tcx ty::Const<'tcx>)
-> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
pub fn from_const<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
val: &'tcx ty::Const<'tcx>
) -> Result<Self, ErrorHandled> {
let layout = bx.cx().layout_of(val.ty);
if layout.is_zst() {
@ -97,11 +92,10 @@ impl OperandRef<'tcx, &'ll Value> {
layout::Abi::Scalar(ref x) => x,
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
};
let llval = scalar_to_llvm(
bx.cx(),
let llval = bx.cx().scalar_to_backend(
x,
scalar,
layout.immediate_llvm_type(bx.cx()),
bx.cx().immediate_backend_type(layout),
);
OperandValue::Immediate(llval)
},
@ -110,23 +104,20 @@ impl OperandRef<'tcx, &'ll Value> {
layout::Abi::ScalarPair(ref a, ref b) => (a, b),
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
};
let a_llval = scalar_to_llvm(
bx.cx(),
let a_llval = bx.cx().scalar_to_backend(
a,
a_scalar,
layout.scalar_pair_element_llvm_type(bx.cx(), 0, true),
bx.cx().scalar_pair_element_backend_type(layout, 0, true),
);
let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true);
let b_llval = scalar_to_llvm(
bx.cx(),
let b_llval = bx.cx().scalar_to_backend(
b,
b_scalar,
b_layout,
bx.cx().scalar_pair_element_backend_type(layout, 1, true),
);
OperandValue::Pair(a_llval, b_llval)
},
ConstValue::ByRef(_, alloc, offset) => {
return Ok(bx.load_operand(PlaceRef::from_const_alloc(bx, layout, alloc, offset)));
return Ok(bx.load_operand(bx.cx().from_const_alloc(layout, alloc, offset)));
},
};
@ -138,14 +129,17 @@ impl OperandRef<'tcx, &'ll Value> {
/// Asserts that this operand refers to a scalar and returns
/// a reference to its value.
pub fn immediate(self) -> &'ll Value {
pub fn immediate(self) -> V {
match self.val {
OperandValue::Immediate(s) => s,
_ => bug!("not immediate: {:?}", self)
}
}
pub fn deref(self, cx: &CodegenCx<'ll, 'tcx>) -> PlaceRef<'tcx, &'ll Value> {
pub fn deref<Cx: CodegenMethods<'tcx, Value = V>>(
self,
cx: &Cx
) -> PlaceRef<'tcx, V> {
let projected_ty = self.layout.ty.builtin_deref(true)
.unwrap_or_else(|| bug!("deref of non-pointer {:?}", self)).ty;
let (llptr, llextra) = match self.val {
@ -164,9 +158,12 @@ impl OperandRef<'tcx, &'ll Value> {
/// If this operand is a `Pair`, we return an aggregate with the two values.
/// For other cases, see `immediate`.
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
pub fn immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx
) -> V {
if let OperandValue::Pair(a, b) = self.val {
let llty = self.layout.llvm_type(bx.cx());
let llty = bx.cx().backend_type(self.layout);
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
self, llty);
// Reconstruct the immediate aggregate.
@ -180,10 +177,11 @@ impl OperandRef<'tcx, &'ll Value> {
}
/// If the type is a pair, we return a `Pair`, otherwise, an `Immediate`.
pub fn from_immediate_or_packed_pair(bx: &Builder<'a, 'll, 'tcx>,
llval: &'ll Value,
layout: TyLayout<'tcx>)
-> OperandRef<'tcx, &'ll Value> {
pub fn from_immediate_or_packed_pair<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
llval: V,
layout: TyLayout<'tcx>
) -> Self {
let val = if let layout::Abi::ScalarPair(ref a, ref b) = layout.abi {
debug!("Operand::from_immediate_or_packed_pair: unpacking {:?} @ {:?}",
llval, layout);
@ -198,11 +196,11 @@ impl OperandRef<'tcx, &'ll Value> {
OperandRef { val, layout }
}
pub fn extract_field(
pub fn extract_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Builder<'a, 'll, 'tcx>,
i: usize,
) -> OperandRef<'tcx, &'ll Value> {
bx: &Bx,
i: usize
) -> Self {
let field = self.layout.field(bx.cx(), i);
let offset = self.layout.fields.offset(i);
@ -244,11 +242,11 @@ impl OperandRef<'tcx, &'ll Value> {
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
match val {
OperandValue::Immediate(ref mut llval) => {
*llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx()));
*llval = bx.bitcast(*llval, bx.cx().immediate_backend_type(field));
}
OperandValue::Pair(ref mut a, ref mut b) => {
*a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true));
*b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true));
*a = bx.bitcast(*a, bx.cx().scalar_pair_element_backend_type(field, 0, true));
*b = bx.bitcast(*b, bx.cx().scalar_pair_element_backend_type(field, 1, true));
}
OperandValue::Ref(..) => bug!()
}
@ -264,46 +262,39 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::empty());
}
}
impl OperandValue<&'ll Value> {
pub fn volatile_store(
pub fn volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE);
}
pub fn unaligned_volatile_store(
pub fn unaligned_volatile_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>,
bx: &Bx,
dest: PlaceRef<'tcx, V>,
) {
self.store_with_flags(bx, dest, MemFlags::VOLATILE | MemFlags::UNALIGNED);
}
}
impl<'a, 'll: 'a, 'tcx: 'll> OperandValue<&'ll Value> {
pub fn nontemporal_store(
pub fn nontemporal_store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
dest: PlaceRef<'tcx, V>
) {
self.store_with_flags(bx, dest, MemFlags::NONTEMPORAL);
}
}
impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
fn store_with_flags<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Bx,
dest: PlaceRef<'tcx, Bx::Value>,
dest: PlaceRef<'tcx, V>,
flags: MemFlags,
) {
debug!("OperandRef::store: operand={:?}, dest={:?}", self, dest);
@ -333,13 +324,10 @@ impl<'a, 'tcx: 'a, V: CodegenObject> OperandValue<V> {
}
}
}
}
impl OperandValue<&'ll Value> {
pub fn store_unsized(
pub fn store_unsized<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
indirect_dest: PlaceRef<'tcx, &'ll Value>
bx: &Bx,
indirect_dest: PlaceRef<'tcx, V>
) {
debug!("OperandRef::store_unsized: operand={:?}, indirect_dest={:?}", self, indirect_dest);
let flags = MemFlags::empty();
@ -370,12 +358,12 @@ impl OperandValue<&'ll Value> {
}
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
fn maybe_codegen_consume_direct(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> Option<OperandRef<'tcx, &'ll Value>>
{
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn maybe_codegen_consume_direct(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> Option<OperandRef<'tcx, Bx::Value>> {
debug!("maybe_codegen_consume_direct(place={:?})", place);
// watch out for locals that do not have an
@ -419,11 +407,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
None
}
pub fn codegen_consume(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> OperandRef<'tcx, &'ll Value>
{
pub fn codegen_consume(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> OperandRef<'tcx, Bx::Value> {
debug!("codegen_consume(place={:?})", place);
let ty = self.monomorphized_place_ty(place);
@ -443,11 +431,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bx.load_operand(self.codegen_place(bx, place))
}
pub fn codegen_operand(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
operand: &mir::Operand<'tcx>)
-> OperandRef<'tcx, &'ll Value>
{
pub fn codegen_operand(
&mut self,
bx: &Bx,
operand: &mir::Operand<'tcx>
) -> OperandRef<'tcx, Bx::Value> {
debug!("codegen_operand(operand={:?})", operand);
match *operand {
@ -475,7 +463,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty);
bx.load_operand(PlaceRef::new_sized(
bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
bx.cx().const_undef(bx.cx().type_ptr_to(bx.cx().backend_type(layout))),
layout,
layout.align,
))

View file

@ -8,17 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::LLVMConstInBoundsGEP;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use builder::{Builder, MemFlags};
use common::{CodegenCx, IntPredicate};
use builder::MemFlags;
use common::IntPredicate;
use type_of::LayoutLlvmExt;
use value::Value;
use glue;
use mir::constant::const_alloc_to_llvm;
use interfaces::*;
@ -40,12 +37,12 @@ pub struct PlaceRef<'tcx, V> {
pub align: Align,
}
impl PlaceRef<'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn new_sized(
llval: &'ll Value,
llval: V,
layout: TyLayout<'tcx>,
align: Align,
) -> PlaceRef<'tcx, &'ll Value> {
) -> PlaceRef<'tcx, V> {
assert!(!layout.is_unsized());
PlaceRef {
llval,
@ -55,46 +52,34 @@ impl PlaceRef<'tcx, &'ll Value> {
}
}
pub fn from_const_alloc(
bx: &Builder<'a, 'll, 'tcx>,
pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
layout: TyLayout<'tcx>,
alloc: &mir::interpret::Allocation,
offset: Size,
) -> PlaceRef<'tcx, &'ll Value> {
let init = const_alloc_to_llvm(bx.cx(), alloc);
let base_addr = bx.cx().static_addr_of(init, layout.align, None);
let llval = unsafe { LLVMConstInBoundsGEP(
bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()),
&bx.cx().const_usize(offset.bytes()),
1,
)};
let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
PlaceRef::new_sized(llval, layout, alloc.align)
}
pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
-> PlaceRef<'tcx, &'ll Value> {
name: &str
) -> Self {
debug!("alloca({:?}: {:?})", name, layout);
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
Self::new_sized(tmp, layout, layout.align)
}
/// Returns a place for an indirect reference to an unsized place.
pub fn alloca_unsized_indirect(
bx: &Builder<'a, 'll, 'tcx>,
pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &Bx,
layout: TyLayout<'tcx>,
name: &str,
) -> PlaceRef<'tcx, &'ll Value> {
) -> Self {
debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
let ptr_layout = bx.cx().layout_of(ptr_ty);
Self::alloca(bx, ptr_layout, name)
}
pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
&self,
cx: &Cx
) -> V {
if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
if self.layout.is_unsized() {
assert_eq!(count, 0);
@ -114,7 +99,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self, bx: &Bx,
ix: usize,
) -> PlaceRef<'tcx, Bx::Value> {
) -> Self {
let cx = bx.cx();
let field = self.layout.field(cx, ix);
let offset = self.layout.fields.offset(ix);
@ -216,17 +201,14 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
align: effective_field_align,
}
}
}
impl PlaceRef<'tcx, &'ll Value> {
/// Obtain the actual discriminant of a value.
pub fn codegen_get_discr(
pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
cast_to: Ty<'tcx>
) -> &'ll Value {
let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
) -> V {
let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
if self.layout.abi.is_uninhabited() {
return bx.cx().const_undef(cast_to);
}
@ -234,7 +216,7 @@ impl PlaceRef<'tcx, &'ll Value> {
layout::Variants::Single { index } => {
let discr_val = self.layout.ty.ty_adt_def().map_or(
index.as_u32() as u128,
|def| def.discriminant_for_variant(bx.cx().tcx, index).val);
|def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
return bx.cx().const_uint_big(cast_to, discr_val);
}
layout::Variants::Tagged { .. } |
@ -262,7 +244,7 @@ impl PlaceRef<'tcx, &'ll Value> {
niche_start,
..
} => {
let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
let niche_llty = bx.cx().immediate_backend_type(discr.layout);
if niche_variants.start() == niche_variants.end() {
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_start == 0 {
@ -290,7 +272,11 @@ impl PlaceRef<'tcx, &'ll Value> {
/// Set the discriminant for a new value of the given case of the given
/// representation.
pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
variant_index: VariantIdx
) {
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
return;
}
@ -304,7 +290,7 @@ impl PlaceRef<'tcx, &'ll Value> {
.discriminant_for_variant(bx.tcx(), variant_index)
.val;
bx.store(
bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
ptr.llval,
ptr.align);
}
@ -315,8 +301,8 @@ impl PlaceRef<'tcx, &'ll Value> {
..
} => {
if variant_index != dataful_variant {
if bx.sess().target.target.arch == "arm" ||
bx.sess().target.target.arch == "aarch64" {
if bx.cx().sess().target.target.arch == "arm" ||
bx.cx().sess().target.target.arch == "aarch64" {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let fill_byte = bx.cx().const_u8(0);
@ -326,7 +312,7 @@ impl PlaceRef<'tcx, &'ll Value> {
}
let niche = self.project_field(bx, 0);
let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
let niche_llty = bx.cx().immediate_backend_type(niche.layout);
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
let niche_value = (niche_value as u128)
.wrapping_add(niche_start);
@ -343,8 +329,11 @@ impl PlaceRef<'tcx, &'ll Value> {
}
}
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'tcx, &'ll Value> {
pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
llindex: V
) -> Self {
PlaceRef {
llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llextra: None,
@ -353,36 +342,40 @@ impl PlaceRef<'tcx, &'ll Value> {
}
}
pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
-> PlaceRef<'tcx, &'ll Value> {
pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
&self,
bx: &Bx,
variant_index: VariantIdx
) -> Self {
let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
// Cast to the appropriate variant struct type.
let variant_ty = downcast.layout.llvm_type(bx.cx());
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast
}
pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
bx.lifetime_start(self.llval, self.layout.size);
}
pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
bx.lifetime_end(self.llval, self.layout.size);
}
}
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_place(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
place: &mir::Place<'tcx>)
-> PlaceRef<'tcx, &'ll Value> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_place(
&mut self,
bx: &Bx,
place: &mir::Place<'tcx>
) -> PlaceRef<'tcx, Bx::Value> {
debug!("codegen_place(place={:?})", place);
let cx = bx.cx();
let tcx = cx.tcx;
let tcx = cx.tcx();
if let mir::Place::Local(index) = *place {
match self.locals[index] {
@ -390,7 +383,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
return place;
}
LocalRef::UnsizedPlace(place) => {
return bx.load_operand(place).deref(&cx);
return bx.load_operand(place).deref(cx);
}
LocalRef::Operand(..) => {
bug!("using operand local {:?} as place", place);
@ -410,7 +403,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
match bx.tcx().const_eval(param_env.and(cid)) {
Ok(val) => match val.val {
mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
PlaceRef::from_const_alloc(bx, layout, alloc, offset)
bx.cx().from_const_alloc(layout, alloc, offset)
}
_ => bug!("promoteds should have an allocation: {:?}", val),
},
@ -422,7 +415,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None);
let llval = bx.cx().const_undef(
bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
bx.cx().type_ptr_to(bx.cx().backend_type(layout))
);
PlaceRef::new_sized(llval, layout, layout.align)
}
@ -471,8 +464,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let mut subslice = cg_base.project_index(bx,
bx.cx().const_usize(from as u64));
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem)
.to_ty(bx.tcx());
.projection_ty(tcx, &projection.elem).to_ty(tcx);
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
if subslice.layout.is_unsized() {
@ -483,7 +475,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Cast the place pointer type to the new
// array or slice type (*[%_; new_len]).
subslice.llval = bx.pointercast(subslice.llval,
bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
subslice
}
@ -498,7 +490,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
let tcx = self.cx.tcx;
let tcx = self.cx.tcx();
let place_ty = place.ty(self.mir, tcx);
self.monomorphize(&place_ty.to_ty(tcx))
}

View file

@ -17,13 +17,11 @@ use rustc_apfloat::{ieee, Float, Status, Round};
use std::{u128, i128};
use base;
use builder::{Builder, MemFlags};
use builder::MemFlags;
use callee;
use common::{self, IntPredicate, RealPredicate};
use monomorphize;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::*;
@ -31,13 +29,13 @@ use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
use super::place::PlaceRef;
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_rvalue(&mut self,
bx: Builder<'a, 'll, 'tcx>,
dest: PlaceRef<'tcx, &'ll Value>,
rvalue: &mir::Rvalue<'tcx>)
-> Builder<'a, 'll, 'tcx>
{
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_rvalue(
&mut self,
bx: Bx,
dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>
) -> Bx {
debug!("codegen_rvalue(dest.llval={:?}, rvalue={:?})",
dest.llval, rvalue);
@ -176,12 +174,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
}
pub fn codegen_rvalue_unsized(&mut self,
bx: Builder<'a, 'll, 'tcx>,
indirect_dest: PlaceRef<'tcx, &'ll Value>,
rvalue: &mir::Rvalue<'tcx>)
-> Builder<'a, 'll, 'tcx>
{
pub fn codegen_rvalue_unsized(
&mut self,
bx: Bx,
indirect_dest: PlaceRef<'tcx, Bx::Value>,
rvalue: &mir::Rvalue<'tcx>,
) -> Bx {
debug!("codegen_rvalue_unsized(indirect_dest.llval={:?}, rvalue={:?})",
indirect_dest.llval, rvalue);
@ -198,9 +196,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_rvalue_operand(
&mut self,
bx: Builder<'a, 'll, 'tcx>,
bx: Bx,
rvalue: &mir::Rvalue<'tcx>
) -> (Builder<'a, 'll, 'tcx>, OperandRef<'tcx, &'ll Value>) {
) -> (Bx, OperandRef<'tcx, Bx::Value>) {
assert!(self.rvalue_creates_operand(rvalue), "cannot codegen {:?} to operand", rvalue);
match *rvalue {
@ -213,7 +211,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::CastKind::ReifyFnPointer => {
match operand.layout.ty.sty {
ty::FnDef(def_id, substs) => {
if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") {
if bx.cx().tcx().has_attr(def_id, "rustc_args_required_const") {
bug!("reifying a fn ptr that requires \
const arguments");
}
@ -229,8 +227,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
match operand.layout.ty.sty {
ty::Closure(def_id, substs) => {
let instance = monomorphize::resolve_closure(
bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(callee::get_fn(bx.cx(), instance))
bx.cx().tcx(), def_id, substs, ty::ClosureKind::FnOnce);
OperandValue::Immediate(bx.cx().get_fn(instance))
}
_ => {
bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
@ -253,7 +251,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// HACK(eddyb) have to bitcast pointers
// until LLVM removes pointee types.
let lldata = bx.pointercast(lldata,
cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
bx.cx().scalar_pair_element_backend_type(cast, 0, true));
OperandValue::Pair(lldata, llextra)
}
OperandValue::Immediate(lldata) => {
@ -272,12 +270,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if cast.is_llvm_scalar_pair() {
let data_cast = bx.pointercast(data_ptr,
cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
bx.cx().scalar_pair_element_backend_type(cast, 0, true));
OperandValue::Pair(data_cast, meta)
} else { // cast to thin-ptr
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
// pointer-cast of that pointer to desired pointer type.
let llcast_ty = cast.immediate_llvm_type(bx.cx());
let llcast_ty = bx.cx().immediate_backend_type(cast);
let llval = bx.pointercast(data_ptr, llcast_ty);
OperandValue::Immediate(llval)
}
@ -287,7 +285,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
mir::CastKind::Misc => {
assert!(cast.is_llvm_immediate());
let ll_t_out = cast.immediate_llvm_type(bx.cx());
let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
return (bx, OperandRef {
@ -298,12 +296,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let r_t_in = CastTy::from_ty(operand.layout.ty)
.expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
let ll_t_in = operand.layout.immediate_llvm_type(bx.cx());
let ll_t_in = bx.cx().immediate_backend_type(operand.layout);
match operand.layout.variants {
layout::Variants::Single { index } => {
if let Some(def) = operand.layout.ty.ty_adt_def() {
let discr_val = def
.discriminant_for_variant(bx.cx().tcx, index)
.discriminant_for_variant(bx.cx().tcx(), index)
.val;
let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
return (bx, OperandRef {
@ -365,7 +363,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
(CastTy::FnPtr, CastTy::Int(_)) =>
bx.ptrtoint(llval, ll_t_out),
(CastTy::Int(_), CastTy::Ptr(_)) => {
let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed);
let usize_llval = bx.intcast(llval, bx.cx().type_isize(), signed);
bx.inttoptr(usize_llval, ll_t_out)
}
(CastTy::Int(_), CastTy::Float) =>
@ -399,8 +397,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
};
(bx, OperandRef {
val,
layout: self.cx.layout_of(self.cx.tcx.mk_ref(
self.cx.tcx.types.re_erased,
layout: self.cx.layout_of(self.cx.tcx().mk_ref(
self.cx.tcx().types.re_erased,
ty::TypeAndMut { ty, mutbl: bk.to_mutbl_lossy() }
)),
})
@ -487,8 +485,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
assert!(bx.cx().type_is_sized(ty));
let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes());
let tcx = self.cx.tcx;
let val = bx.cx().const_usize(bx.cx().layout_of(ty).size.bytes());
let tcx = self.cx.tcx();
(bx, OperandRef {
val: OperandValue::Immediate(val),
layout: self.cx.layout_of(tcx.types.usize),
@ -497,21 +495,21 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let (size, align) = bx.cx().size_and_align_of(content_ty);
let (size, align) = bx.cx().layout_of(content_ty).size_and_align();
let llsize = bx.cx().const_usize(size.bytes());
let llalign = bx.cx().const_usize(align.abi());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = box_layout.llvm_type(bx.cx());
let llty_ptr = bx.cx().backend_type(box_layout);
// Allocate space:
let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
Ok(id) => id,
Err(s) => {
bx.sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
bx.cx().sess().fatal(&format!("allocation of `{}` {}", box_layout.ty, s));
}
};
let instance = ty::Instance::mono(bx.tcx(), def_id);
let r = callee::get_fn(bx.cx(), instance);
let r = bx.cx().get_fn(instance);
let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
let operand = OperandRef {
@ -528,7 +526,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(self.mir, self.cx.tcx);
let ty = rvalue.ty(self.mir, self.cx.tcx());
(bx, OperandRef::new_zst(self.cx,
self.cx.layout_of(self.monomorphize(&ty))))
}
@ -537,32 +535,32 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
fn evaluate_array_len(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
place: &mir::Place<'tcx>,
) -> &'ll Value {
) -> Bx::Value {
// ZST are passed as operands and require special handling
// because codegen_place() panics if Local is operand.
if let mir::Place::Local(index) = *place {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx().tcx);
let n = n.unwrap_usize(bx.cx().tcx());
return bx.cx().const_usize(n);
}
}
}
// use common size calculation for non zero-sized types
let cg_value = self.codegen_place(&bx, place);
let cg_value = self.codegen_place(bx, place);
return cg_value.len(bx.cx());
}
pub fn codegen_scalar_binop(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
op: mir::BinOp,
lhs: &'ll Value,
rhs: &'ll Value,
lhs: Bx::Value,
rhs: Bx::Value,
input_ty: Ty<'tcx>,
) -> &'ll Value {
) -> Bx::Value {
let is_float = input_ty.is_fp();
let is_signed = input_ty.is_signed();
let is_unit = input_ty.is_unit();
@ -625,14 +623,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_fat_ptr_binop(
&mut self,
bx: &Builder<'a, 'll, 'tcx>,
bx: &Bx,
op: mir::BinOp,
lhs_addr: &'ll Value,
lhs_extra: &'ll Value,
rhs_addr: &'ll Value,
rhs_extra: &'ll Value,
lhs_addr: Bx::Value,
lhs_extra: Bx::Value,
rhs_addr: Bx::Value,
rhs_extra: Bx::Value,
_input_ty: Ty<'tcx>,
) -> &'ll Value {
) -> Bx::Value {
match op {
mir::BinOp::Eq => {
bx.and(
@ -671,17 +669,19 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
}
pub fn codegen_scalar_checked_binop(&mut self,
bx: &Builder<'a, 'll, 'tcx>,
pub fn codegen_scalar_checked_binop(
&mut self,
bx: &Bx,
op: mir::BinOp,
lhs: &'ll Value,
rhs: &'ll Value,
input_ty: Ty<'tcx>) -> OperandValue<&'ll Value> {
lhs: Bx::Value,
rhs: Bx::Value,
input_ty: Ty<'tcx>
) -> OperandValue<Bx::Value> {
// This case can currently arise only from functions marked
// with #[rustc_inherit_overflow_checks] and inlined from
// another crate (mostly core::num generic/#[inline] fns),
// while the current crate doesn't use overflow checks.
if !bx.cx().check_overflow {
if !bx.cx().check_overflow() {
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
return OperandValue::Pair(val, bx.cx().const_bool(false));
}
@ -704,7 +704,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::BinOp::Shl | mir::BinOp::Shr => {
let lhs_llty = bx.cx().val_ty(lhs);
let rhs_llty = bx.cx().val_ty(rhs);
let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
let invert_mask = common::shift_mask_val(bx, lhs_llty, rhs_llty, true);
let outer_bits = bx.and(rhs, invert_mask);
let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
@ -719,7 +719,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
OperandValue::Pair(val, of)
}
}
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn rvalue_creates_operand(&self, rvalue: &mir::Rvalue<'tcx>) -> bool {
match *rvalue {
mir::Rvalue::Ref(..) |
@ -734,7 +736,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(self.mir, self.cx.tcx);
let ty = rvalue.ty(self.mir, self.cx.tcx());
let ty = self.monomorphize(&ty);
self.cx.layout_of(ty).is_zst()
}
@ -749,11 +751,11 @@ enum OverflowOp {
Add, Sub, Mul
}
fn get_overflow_intrinsic(
fn get_overflow_intrinsic<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
oop: OverflowOp,
bx: &Builder<'_, 'll, '_>,
bx: &Bx,
ty: Ty
) -> &'ll Value {
) -> Bx::Value {
use syntax::ast::IntTy::*;
use syntax::ast::UintTy::*;
use rustc::ty::{Int, Uint};
@ -818,11 +820,13 @@ fn get_overflow_intrinsic(
bx.cx().get_intrinsic(&name)
}
fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
fn cast_int_to_float<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
signed: bool,
x: &'ll Value,
int_ty: &'ll Type,
float_ty: &'ll Type) -> &'ll Value {
x: Bx::Value,
int_ty: Bx::Type,
float_ty: Bx::Type
) -> Bx::Value {
// Most integer types, even i128, fit into [-f32::MAX, f32::MAX] after rounding.
// It's only u128 -> f32 that can cause overflows (i.e., should yield infinity).
// LLVM's uitofp produces undef in those cases, so we manually check for that case.
@ -850,18 +854,20 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
}
}
fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
fn cast_float_to_int<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
signed: bool,
x: &'ll Value,
float_ty: &'ll Type,
int_ty: &'ll Type) -> &'ll Value {
x: Bx::Value,
float_ty: Bx::Type,
int_ty: Bx::Type
) -> Bx::Value {
let fptosui_result = if signed {
bx.fptosi(x, int_ty)
} else {
bx.fptoui(x, int_ty)
};
if !bx.sess().opts.debugging_opts.saturating_float_casts {
if !bx.cx().sess().opts.debugging_opts.saturating_float_casts {
return fptosui_result;
}
// LLVM's fpto[su]i returns undef when the input x is infinite, NaN, or does not fit into the
@ -883,7 +889,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
// On the other hand, f_max works even if int_ty::MAX is greater than float_ty::MAX. Because
// we're rounding towards zero, we just get float_ty::MAX (which is always an integer).
// This already happens today with u128::MAX = 2^128 - 1 > f32::MAX.
let int_max = |signed: bool, int_ty: &'ll Type| -> u128 {
let int_max = |signed: bool, int_ty: Bx::Type| -> u128 {
let shift_amount = 128 - bx.cx().int_width(int_ty);
if signed {
i128::MAX as u128 >> shift_amount
@ -891,7 +897,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
u128::MAX >> shift_amount
}
};
let int_min = |signed: bool, int_ty: &'ll Type| -> i128 {
let int_min = |signed: bool, int_ty: Bx::Type| -> i128 {
if signed {
i128::MIN >> (128 - bx.cx().int_width(int_ty))
} else {
@ -899,14 +905,16 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
}
};
let compute_clamp_bounds_single = |signed: bool, int_ty: &'ll Type| -> (u128, u128) {
let compute_clamp_bounds_single =
|signed: bool, int_ty: Bx::Type| -> (u128, u128) {
let rounded_min = ieee::Single::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
assert_eq!(rounded_min.status, Status::OK);
let rounded_max = ieee::Single::from_u128_r(int_max(signed, int_ty), Round::TowardZero);
assert!(rounded_max.value.is_finite());
(rounded_min.value.to_bits(), rounded_max.value.to_bits())
};
let compute_clamp_bounds_double = |signed: bool, int_ty: &'ll Type| -> (u128, u128) {
let compute_clamp_bounds_double =
|signed: bool, int_ty: Bx::Type| -> (u128, u128) {
let rounded_min = ieee::Double::from_i128_r(int_min(signed, int_ty), Round::TowardZero);
assert_eq!(rounded_min.status, Status::OK);
let rounded_max = ieee::Double::from_u128_r(int_max(signed, int_ty), Round::TowardZero);

View file

@ -10,20 +10,18 @@
use rustc::mir;
use asm;
use builder::Builder;
use interfaces::BuilderMethods;
use super::FunctionCx;
use super::LocalRef;
use super::OperandValue;
use value::Value;
use interfaces::*;
impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
pub fn codegen_statement(&mut self,
bx: Builder<'a, 'll, 'tcx>,
statement: &mir::Statement<'tcx>)
-> Builder<'a, 'll, 'tcx> {
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
pub fn codegen_statement(
&mut self,
bx: Bx,
statement: &mir::Statement<'tcx>
) -> Bx {
debug!("codegen_statement(statement={:?})", statement);
self.set_debug_loc(&bx, statement.source_info);
@ -91,16 +89,16 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if let OperandValue::Immediate(_) = op.val {
acc.push(op.immediate());
} else {
span_err!(bx.sess(), span.to_owned(), E0669,
span_err!(bx.cx().sess(), span.to_owned(), E0669,
"invalid value for constraint in inline assembly");
}
acc
});
if input_vals.len() == inputs.len() {
let res = asm::codegen_inline_asm(&bx, asm, outputs, input_vals);
let res = bx.codegen_inline_asm(asm, outputs, input_vals);
if !res {
span_err!(bx.sess(), statement.source_info.span, E0668,
span_err!(bx.cx().sess(), statement.source_info.span, E0668,
"malformed inline assembly");
}
}

View file

@ -14,11 +14,9 @@
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
use asm;
use attributes;
use base;
use context::CodegenCx;
use declare;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
@ -29,7 +27,7 @@ use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::TypeFoldable;
use rustc::ty::layout::LayoutOf;
use std::fmt;
use interfaces::StaticMethods;
use interfaces::*;
pub use rustc::mir::mono::MonoItem;
@ -59,7 +57,7 @@ pub trait MonoItemExt<'a, 'tcx>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
MonoItem::GlobalAsm(node_id) => {
let item = cx.tcx.hir.expect_item(node_id);
if let hir::ItemKind::GlobalAsm(ref ga) = item.node {
asm::codegen_global_asm(cx, ga);
cx.codegen_global_asm(ga);
} else {
span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
}
@ -132,7 +130,7 @@ fn predefine_static<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let ty = instance.ty(cx.tcx);
let llty = cx.layout_of(ty).llvm_type(cx);
let g = declare::define_global(cx, symbol_name, llty).unwrap_or_else(|| {
let g = cx.define_global(symbol_name, llty).unwrap_or_else(|| {
cx.sess().span_fatal(cx.tcx.def_span(def_id),
&format!("symbol `{}` is already defined", symbol_name))
});
@ -155,7 +153,7 @@ fn predefine_fn<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
let mono_sig = instance.fn_sig(cx.tcx);
let attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
let lldecl = declare::declare_fn(cx, symbol_name, mono_sig);
let lldecl = cx.declare_fn(symbol_name, mono_sig);
unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
base::set_link_section(lldecl, &attrs);
if linkage == Linkage::LinkOnceODR ||

View file

@ -24,9 +24,11 @@ use rustc::ty::layout::{self, Align, Size, HasTyCtxt};
use rustc::util::nodemap::FxHashMap;
use rustc::ty::{self, Ty};
use rustc::ty::layout::TyLayout;
use rustc_target::abi::call::{CastTarget, FnType, Reg};
use rustc_data_structures::small_c_str::SmallCStr;
use common::{self, TypeKind};
use type_of::LayoutLlvmExt;
use abi::{LlvmType, FnTypeExt};
use std::fmt;
use std::cell::RefCell;
@ -395,7 +397,7 @@ impl DerivedTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.llvm_type(&self)
layout.llvm_type(self)
}
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> &'ll Type {
layout.immediate_llvm_type(self)
@ -411,4 +413,16 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
) -> &'ll Type {
layout.scalar_pair_element_llvm_type(self, index, immediate)
}
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.ptr_to_llvm_type(self)
}
fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
ty.llvm_type(self)
}
}

View file

@ -16,7 +16,7 @@ use rustc::ty::layout::{self, Align, LayoutOf, Size, TyLayout};
use rustc_target::abi::FloatTy;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use type_::Type;
use interfaces::{BaseTypeMethods, DerivedTypeMethods};
use interfaces::*;
use std::fmt::Write;
@ -266,7 +266,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
ty::ParamEnv::reveal_all(),
&sig,
);
FnType::new(cx, sig, &[]).ptr_to_llvm_type(cx)
cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[]))
}
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
};