1
Fork 0

Great separation of librustc_codegen_llvm: librustc_codegen_ssa compiles

This commit is contained in:
Denis Merigoux 2018-10-03 13:49:57 +02:00 committed by Eduard-Mihai Burtescu
parent 915382f730
commit c0a428ee70
41 changed files with 1634 additions and 1470 deletions

View file

@ -38,7 +38,7 @@ use rustc::middle::weak_lang_items;
use rustc::mir::mono::{Linkage, Visibility, Stats, CodegenUnitNameBuilder};
use rustc::middle::cstore::{EncodedMetadata};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, HasTyCtxt};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::middle::exported_symbols;
@ -66,7 +66,6 @@ use rustc::util::nodemap::FxHashMap;
use CrateInfo;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::indexed_vec::Idx;
use interfaces::*;
@ -88,500 +87,6 @@ use mir::operand::OperandValue;
use rustc_codegen_utils::check_for_rustc_errors_attr;
pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> {
cx: &'a Cx,
name: Option<String>,
istart: usize,
_marker: marker::PhantomData<&'tcx ()>,
}
impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> {
pub fn new(cx: &'a Cx, name: String) -> Self {
let istart = cx.stats().borrow().n_llvm_insns;
StatRecorder {
cx,
name: Some(name),
istart,
_marker: marker::PhantomData,
}
}
}
impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> {
fn drop(&mut self) {
if self.cx.sess().codegen_stats() {
let mut stats = self.cx.stats().borrow_mut();
let iend = stats.n_llvm_insns;
stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
stats.n_fns += 1;
// Reset LLVM insn count to avoid compound costs.
stats.n_llvm_insns = self.istart;
}
}
}
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
signed: bool)
-> IntPredicate {
match op {
hir::BinOpKind::Eq => IntPredicate::IntEQ,
hir::BinOpKind::Ne => IntPredicate::IntNE,
hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT },
hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE },
hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT },
hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE },
op => {
bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
found {:?}",
op)
}
}
}
pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
match op {
hir::BinOpKind::Eq => RealPredicate::RealOEQ,
hir::BinOpKind::Ne => RealPredicate::RealUNE,
hir::BinOpKind::Lt => RealPredicate::RealOLT,
hir::BinOpKind::Le => RealPredicate::RealOLE,
hir::BinOpKind::Gt => RealPredicate::RealOGT,
hir::BinOpKind::Ge => RealPredicate::RealOGE,
op => {
bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
found {:?}",
op);
}
}
}
pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value,
t: Ty<'tcx>,
ret_ty: Bx::Type,
op: hir::BinOpKind
) -> Bx::Value {
let signed = match t.sty {
ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
},
ty::Uint(_) => false,
ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
};
let cmp = bin_op_to_icmp_predicate(op, signed);
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
// to get the correctly sized type. This will compile to a single instruction
// once the IR is converted to assembly if the SIMD instruction is supported
// by the target architecture.
bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be derived
/// from the old one.
pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<Cx::Value>,
) -> Cx::Value {
let (source, target) = cx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => {
cx.const_usize(len.unwrap_usize(cx.tcx()))
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
.field(cx, abi::FAT_PTR_EXTRA);
cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()),
cx.backend_type(vtable_ptr))
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
target),
}
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
src: Bx::Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>
) -> (Bx::Value, Bx::Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
(&ty::Ref(_, a, _),
&ty::Ref(_, b, _)) |
(&ty::Ref(_, a, _),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty);
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(bx.cx(), i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
}
let (lldata, llextra) = result.unwrap();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
}
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
src: PlaceRef<'tcx, Bx::Value>,
dst: PlaceRef<'tcx, Bx::Value>
) {
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
let coerce_ptr = || {
let (base, info) = match bx.load_operand(src).val {
OperandValue::Pair(base, info) => {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR);
(bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bx, base, src_ty, dst_ty)
}
OperandValue::Ref(..) => bug!()
};
OperandValue::Pair(base, info).store(bx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
(&ty::Ref(..), &ty::Ref(..)) |
(&ty::Ref(..), &ty::RawPtr(..)) |
(&ty::RawPtr(..), &ty::RawPtr(..)) => {
coerce_ptr()
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
coerce_ptr()
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
let src_f = src.project_field(bx, i);
let dst_f = dst.project_field(bx, i);
if dst_f.layout.is_zst() {
continue;
}
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align,
src_f.layout, MemFlags::empty());
} else {
coerce_unsized_into(bx, src_f, dst_f);
}
}
}
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
dst_ty),
}
}
pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b))
}
fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
trunc: F,
zext: G
) -> Bx::Value
where F: FnOnce(
Bx::Value,
Bx::Type
) -> Bx::Value,
G: FnOnce(
Bx::Value,
Bx::Type
) -> Bx::Value
{
// Shifts may have any size int on the rhs
if op.is_shift() {
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
zext(rhs, lhs_llty)
} else {
rhs
}
} else {
rhs
}
}
/// Returns whether this session's target will use SEH-based unwinding.
///
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc
}
pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) {
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
bx.call(assume_intrinsic, &[val], None);
}
pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) -> Bx::Value {
if bx.cx().val_ty(val) == bx.cx().type_i1() {
bx.zext(val, bx.cx().type_i8())
} else {
val
}
}
pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value,
layout: layout::TyLayout,
) -> Bx::Value {
if let layout::Abi::Scalar(ref scalar) = layout.abi {
return to_immediate_scalar(bx, val, scalar);
}
val
}
pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value,
scalar: &layout::Scalar,
) -> Bx::Value {
if scalar.is_bool() {
return bx.trunc(val, bx.cx().type_i1());
}
val
}
pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
dst: Bx::Value,
dst_align: Align,
src: Bx::Value,
src_align: Align,
layout: TyLayout<'tcx>,
flags: MemFlags,
) {
let size = layout.size.bytes();
if size == 0 {
return;
}
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
let _s = if cx.sess().codegen_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(cx.tcx(), true, true)
.push_def_path(instance.def_id(), &mut instance_name);
Some(StatRecorder::new(cx, instance_name))
} else {
None
};
// this is an info! to allow collecting monomorphization statistics
// and to allow finding the last function before LLVM aborts from
// release builds.
info!("codegen_instance({})", instance);
let sig = instance.fn_sig(cx.tcx());
let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(||
bug!("Instance `{:?}` not already declared", instance));
cx.stats().borrow_mut().n_closures += 1;
let mir = cx.tcx().instance_mir(instance.def);
mir::codegen_mir::<Bx>(cx, lldecl, &mir, instance, sig);
}
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(&sect.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
}
/// Create the `main` function which will initialize the rust runtime and call
/// users main function.
fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx
) {
let (main_def_id, span) = match *cx.sess().entry_fn.borrow() {
Some((id, span, _)) => {
(cx.tcx().hir.local_def_id(id), span)
}
None => return,
};
let instance = Instance::mono(cx.tcx(), main_def_id);
if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
// We want to create the wrapper in the same codegen unit as Rust's main
// function.
return;
}
let main_llfn = cx.get_fn(instance);
let et = cx.sess().entry_fn.get().map(|e| e.2);
match et {
Some(EntryFnType::Main) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, true),
Some(EntryFnType::Start) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, false),
None => {} // Do nothing.
}
fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
sp: Span,
rust_main: Bx::Value,
rust_main_def_id: DefId,
use_start_lang_item: bool,
) {
let llfty =
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
// then its return type cannot have
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
let main_ret_ty = cx.tcx().erase_regions(
&main_ret_ty.no_bound_vars().unwrap(),
);
if cx.get_defined_value("main").is_some() {
// FIXME: We should be smart and show a better diagnostic here.
cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
.emit();
cx.sess().abort_if_errors();
bug!();
}
let llfn = cx.declare_cfn("main", llfty);
// `main` should respect same config for frame pointer elimination as rest of code
cx.set_frame_pointer_elimination(llfn);
cx.apply_target_cpu_attr(llfn);
let bx = Bx::new_block(&cx, llfn, "top");
bx.insert_reference_to_gdb_debug_scripts_section_global();
// Params from native main() used as args for rust start function
let param_argc = cx.get_param(llfn, 0);
let param_argv = cx.get_param(llfn, 1);
let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
let arg_argv = param_argv;
let (start_fn, args) = if use_start_lang_item {
let start_def_id = cx.tcx().require_lang_item(StartFnLangItem);
let start_fn = callee::resolve_and_get_fn(
cx,
start_def_id,
cx.tcx().intern_substs(&[main_ret_ty.into()]),
);
(start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
(rust_main, vec![arg_argc, arg_argv])
};
let result = bx.call(start_fn, &args, None);
bx.ret(bx.intcast(result, cx.type_int(), true));
}
}
pub(crate) fn write_metadata<'a, 'gcx>(
tcx: TyCtxt<'a, 'gcx, 'gcx>,
llvm_module: &ModuleLlvm
@ -675,397 +180,6 @@ pub fn iter_globals(llmod: &'ll llvm::Module) -> ValueIter<'ll> {
}
}
fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu: &CodegenUnit<'tcx>)
-> CguReuse {
if !tcx.dep_graph.is_fully_enabled() {
return CguReuse::No
}
let work_product_id = &cgu.work_product_id();
if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No
}
// Try to mark the CGU as green. If it we can do so, it means that nothing
// affecting the LLVM module has changed and we can re-use a cached version.
// If we compile with any kind of LTO, this means we can re-use the bitcode
// of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
// know that later). If we are not doing LTO, there is only one optimized
// version of each module, so we re-use that.
let dep_node = cgu.codegen_dep_node(tcx);
assert!(!tcx.dep_graph.dep_node_exists(&dep_node),
"CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
cgu.name());
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
// We can re-use either the pre- or the post-thinlto state
if tcx.sess.lto() != Lto::No {
CguReuse::PreLto
} else {
CguReuse::PostLto
}
} else {
CguReuse::No
}
}
pub fn codegen_crate<B: BackendMethods>(
backend: B,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> B::OngoingCodegen {
check_for_rustc_errors_attr(tcx);
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Codegen the metadata.
tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen));
let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("metadata")).as_str()
.to_string();
let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name);
let metadata = time(tcx.sess, "write metadata", || {
backend.write_metadata(tcx, &metadata_llvm_module)
});
tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen));
let metadata_module = ModuleCodegen {
name: metadata_cgu_name,
module_llvm: metadata_llvm_module,
kind: ModuleKind::Metadata,
};
let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
Some(time_graph::TimeGraph::new())
} else {
None
};
// Skip crate items and just output metadata in -Z no-codegen mode.
if tcx.sess.opts.debugging_opts.no_codegen ||
!tcx.sess.opts.output_types.should_codegen() {
let ongoing_codegen = backend.start_async_codegen(
tcx,
time_graph,
metadata,
rx,
1);
backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module);
backend.codegen_finished(&ongoing_codegen, tcx);
assert_and_save_dep_graph(tcx);
backend.check_for_errors(&ongoing_codegen, tcx.sess);
return ongoing_codegen;
}
// Run the monomorphization collector and partition the collected items into
// codegen units.
let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
let codegen_units = (*codegen_units).clone();
// Force all codegen_unit queries so they are already either red or green
// when compile_codegen_unit accesses them. We are not able to re-execute
// the codegen_unit query from just the DepNode, so an unknown color would
// lead to having to re-execute compile_codegen_unit, possibly
// unnecessarily.
if tcx.dep_graph.is_fully_enabled() {
for cgu in &codegen_units {
tcx.codegen_unit(cgu.name().clone());
}
}
let ongoing_codegen = backend.start_async_codegen(
tcx,
time_graph.clone(),
metadata,
rx,
codegen_units.len());
let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
// Codegen an allocator shim, if necessary.
//
// If the crate doesn't have an `allocator_kind` set then there's definitely
// no shim to generate. Otherwise we also check our dependency graph for all
// our output crate types. If anything there looks like its a `Dynamic`
// linkage, then it's already got an allocator shim and we'll be using that
// one instead. If nothing exists then it's our job to generate the
// allocator!
let any_dynamic_crate = tcx.sess.dependency_formats.borrow()
.iter()
.any(|(_, list)| {
use rustc::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let allocator_module = if any_dynamic_crate {
None
} else if let Some(kind) = *tcx.sess.allocator_kind.get() {
let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("allocator")).as_str()
.to_string();
let modules = backend.new_metadata(tcx.sess, &llmod_id);
time(tcx.sess, "write allocator module", || {
backend.codegen_allocator(tcx, &modules, kind)
});
Some(ModuleCodegen {
name: llmod_id,
module_llvm: modules,
kind: ModuleKind::Allocator,
})
} else {
None
};
if let Some(allocator_module) = allocator_module {
backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, allocator_module);
}
backend.submit_pre_codegened_module_to_llvm(&ongoing_codegen, tcx, metadata_module);
// We sort the codegen units by size. This way we can schedule work for LLVM
// a bit more efficiently.
let codegen_units = {
let mut codegen_units = codegen_units;
codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
codegen_units
};
let mut total_codegen_time = Duration::new(0, 0);
let mut all_stats = Stats::default();
for cgu in codegen_units.into_iter() {
backend.wait_for_signal_to_codegen_item(&ongoing_codegen);
backend.check_for_errors(&ongoing_codegen, tcx.sess);
let cgu_reuse = determine_cgu_reuse(tcx, &cgu);
tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
CguReuse::No => {
let _timing_guard = time_graph.as_ref().map(|time_graph| {
time_graph.start(write::CODEGEN_WORKER_TIMELINE,
write::CODEGEN_WORK_PACKAGE_KIND,
&format!("codegen {}", cgu.name()))
});
let start_time = Instant::now();
let stats = backend.compile_codegen_unit(tcx, *cgu.name());
all_stats.extend(stats);
total_codegen_time += start_time.elapsed();
false
}
CguReuse::PreLto => {
write::submit_pre_lto_module_to_llvm(tcx, CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
true
}
CguReuse::PostLto => {
write::submit_post_lto_module_to_llvm(tcx, CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
true
}
};
}
backend.codegen_finished(&ongoing_codegen, tcx);
// Since the main thread is sometimes blocked during codegen, we keep track
// -Ztime-passes output manually.
print_time_passes_entry(tcx.sess.time_passes(),
"codegen to LLVM IR",
total_codegen_time);
rustc_incremental::assert_module_sources::assert_module_sources(tcx);
symbol_names_test::report_symbol_names(tcx);
if tcx.sess.codegen_stats() {
println!("--- codegen stats ---");
println!("n_glues_created: {}", all_stats.n_glues_created);
println!("n_null_glues: {}", all_stats.n_null_glues);
println!("n_real_glues: {}", all_stats.n_real_glues);
println!("n_fns: {}", all_stats.n_fns);
println!("n_inlines: {}", all_stats.n_inlines);
println!("n_closures: {}", all_stats.n_closures);
println!("fn stats:");
all_stats.fn_stats.sort_by_key(|&(_, insns)| insns);
for &(ref name, insns) in all_stats.fn_stats.iter() {
println!("{} insns, {}", insns, *name);
}
}
if tcx.sess.count_llvm_insns() {
for (k, v) in all_stats.llvm_insns.iter() {
println!("{:7} {}", *v, *k);
}
}
backend.check_for_errors(&ongoing_codegen, tcx.sess);
assert_and_save_dep_graph(tcx);
ongoing_codegen.into_inner()
}
/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
/// when it's dropped abnormally.
///
/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
/// stumbled upon. The segfault was never reproduced locally, but it was
/// suspected to be related to the fact that codegen worker threads were
/// sticking around by the time the main thread was exiting, causing issues.
///
/// This structure is an attempt to fix that issue where the `codegen_aborted`
/// message will block until all workers have finished. This should ensure that
/// even if the main codegen thread panics we'll wait for pending work to
/// complete before returning from the main thread, hopefully avoiding
/// segfaults.
///
/// If you see this comment in the code, then it means that this workaround
/// worked! We may yet one day track down the mysterious cause of that
/// segfault...
struct AbortCodegenOnDrop<B: BackendMethods>(Option<B::OngoingCodegen>);
impl<B: BackendMethods> AbortCodegenOnDrop<B> {
fn into_inner(mut self) -> B::OngoingCodegen {
self.0.take().unwrap()
}
}
impl<B: BackendMethods> Deref for AbortCodegenOnDrop<B> {
type Target = B::OngoingCodegen;
fn deref(&self) -> &B::OngoingCodegen {
self.0.as_ref().unwrap()
}
}
impl<B: BackendMethods> DerefMut for AbortCodegenOnDrop<B> {
fn deref_mut(&mut self) -> &mut B::OngoingCodegen {
self.0.as_mut().unwrap()
}
}
impl<B: BackendMethods> Drop for AbortCodegenOnDrop<B> {
fn drop(&mut self) {
if let Some(codegen) = self.0.take() {
B::codegen_aborted(codegen);
}
}
}
fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) {
time(tcx.sess,
"assert dep graph",
|| rustc_incremental::assert_dep_graph(tcx));
time(tcx.sess,
"serialize dep graph",
|| rustc_incremental::save_dep_graph(tcx));
}
impl CrateInfo {
pub fn new(tcx: TyCtxt) -> CrateInfo {
let mut info = CrateInfo {
panic_runtime: None,
compiler_builtins: None,
profiler_runtime: None,
sanitizer_runtime: None,
is_no_builtins: Default::default(),
native_libraries: Default::default(),
used_libraries: tcx.native_libraries(LOCAL_CRATE),
link_args: tcx.link_args(LOCAL_CRATE),
crate_name: Default::default(),
used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
used_crate_source: Default::default(),
wasm_imports: Default::default(),
lang_item_to_crate: Default::default(),
missing_lang_items: Default::default(),
};
let lang_items = tcx.lang_items();
let load_wasm_items = tcx.sess.crate_types.borrow()
.iter()
.any(|c| *c != config::CrateType::Rlib) &&
tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown";
if load_wasm_items {
info.load_wasm_imports(tcx, LOCAL_CRATE);
}
let crates = tcx.crates();
let n_crates = crates.len();
info.native_libraries.reserve(n_crates);
info.crate_name.reserve(n_crates);
info.used_crate_source.reserve(n_crates);
info.missing_lang_items.reserve(n_crates);
for &cnum in crates.iter() {
info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
if tcx.is_panic_runtime(cnum) {
info.panic_runtime = Some(cnum);
}
if tcx.is_compiler_builtins(cnum) {
info.compiler_builtins = Some(cnum);
}
if tcx.is_profiler_runtime(cnum) {
info.profiler_runtime = Some(cnum);
}
if tcx.is_sanitizer_runtime(cnum) {
info.sanitizer_runtime = Some(cnum);
}
if tcx.is_no_builtins(cnum) {
info.is_no_builtins.insert(cnum);
}
if load_wasm_items {
info.load_wasm_imports(tcx, cnum);
}
let missing = tcx.missing_lang_items(cnum);
for &item in missing.iter() {
if let Ok(id) = lang_items.require(item) {
info.lang_item_to_crate.insert(item, id.krate);
}
}
// No need to look for lang items that are whitelisted and don't
// actually need to exist.
let missing = missing.iter()
.cloned()
.filter(|&l| !weak_lang_items::whitelisted(tcx, l))
.collect();
info.missing_lang_items.insert(cnum, missing);
}
return info
}
fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) {
self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| {
let instance = Instance::mono(tcx, id);
let import_name = tcx.symbol_name(instance);
(import_name.to_string(), module.clone())
}));
}
}
pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
cgu_name: InternedString)
-> Stats {
@ -1141,35 +255,15 @@ pub fn compile_codegen_unit<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>,
}
}
pub fn provide_both(providers: &mut Providers) {
providers.dllimport_foreign_items = |tcx, krate| {
let module_map = tcx.foreign_modules(krate);
let module_map = module_map.iter()
.map(|lib| (lib.def_id, lib))
.collect::<FxHashMap<_, _>>();
let dllimports = tcx.native_libraries(krate)
.iter()
.filter(|lib| {
if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
return false
pub fn set_link_section(llval: &Value, attrs: &CodegenFnAttrs) {
let sect = match attrs.link_section {
Some(name) => name,
None => return,
};
unsafe {
let buf = SmallCStr::new(&sect.as_str());
llvm::LLVMSetSection(llval, buf.as_ptr());
}
let cfg = match lib.cfg {
Some(ref cfg) => cfg,
None => return true,
};
attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
})
.filter_map(|lib| lib.foreign_module)
.map(|id| &module_map[&id])
.flat_map(|module| module.foreign_items.iter().cloned())
.collect();
Lrc::new(dllimports)
};
providers.is_dllimport_foreign_item = |tcx, def_id| {
tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
};
}
pub fn linkage_to_llvm(linkage: Linkage) -> llvm::Linkage {

View file

@ -53,14 +53,6 @@ fn noname() -> *const c_char {
&CNULL
}
bitflags! {
pub struct MemFlags: u8 {
const VOLATILE = 1 << 0;
const NONTEMPORAL = 1 << 1;
const UNALIGNED = 1 << 2;
}
}
impl BackendTypes for Builder<'_, 'll, 'tcx> {
type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value;
type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock;

View file

@ -202,35 +202,3 @@ pub fn get_fn(
llfn
}
pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> Cx::Value {
cx.get_fn(
ty::Instance::resolve(
cx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs
).unwrap()
)
}
pub fn resolve_and_get_fn_for_vtable<'tcx,
Cx: Backend<'tcx> + MiscMethods<'tcx> + TypeMethods<'tcx>
>(
cx: &Cx,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> Cx::Value {
cx.get_fn(
ty::Instance::resolve_for_vtable(
cx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs
).unwrap()
)
}

View file

@ -405,88 +405,3 @@ pub fn struct_in_context(
fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
pub fn langcall(tcx: TyCtxt,
span: Option<Span>,
msg: &str,
li: LangItem)
-> DefId {
tcx.lang_items().require(li).unwrap_or_else(|s| {
let msg = format!("{} {}", msg, s);
match span {
Some(span) => tcx.sess.span_fatal(span, &msg[..]),
None => tcx.sess.fatal(&msg[..]),
}
})
}
// To avoid UB from LLVM, these two functions mask RHS with an
// appropriate mask unconditionally (i.e. the fallback behavior for
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
}
pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs_t: Ty<'tcx>,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
let is_signed = lhs_t.is_signed();
if is_signed {
bx.ashr(lhs, rhs)
} else {
bx.lshr(lhs, rhs)
}
}
fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
rhs: Bx::Value
) -> Bx::Value {
let rhs_llty = bx.cx().val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
llty: Bx::Type,
mask_llty: Bx::Type,
invert: bool
) -> Bx::Value {
let kind = bx.cx().type_kind(llty);
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1;
if invert {
bx.cx().const_int(mask_llty, !val as i64)
} else {
bx.cx().const_uint(mask_llty, val)
}
},
TypeKind::Vector => {
let mask = shift_mask_val(
bx,
bx.cx().element_type(llty),
bx.cx().element_type(mask_llty),
invert
);
bx.vector_splat(bx.cx().vector_length(mask_llty), mask)
},
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
}
}

View file

@ -31,6 +31,58 @@ use rustc::hir::{self, CodegenFnAttrs, CodegenFnAttrFlags};
use std::ffi::{CStr, CString};
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
},
cx.type_i8p()
));
next_offset = offset + pointer_size;
}
if alloc.bytes.len() >= next_offset {
llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
}
cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
let instance = ty::Instance::mono(cx.tcx, def_id);
let cid = GlobalId {
instance,
promoted: None,
};
let param_env = ty::ParamEnv::reveal_all();
let static_ = cx.tcx.const_eval(param_env.and(cid))?;
let alloc = match static_.val {
ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc,
_ => bug!("static const eval returned {:#?}", static_),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
fn set_global_alignment(cx: &CodegenCx<'ll, '_>,
gv: &'ll Value,

View file

@ -10,6 +10,7 @@
use attributes;
use llvm;
use llvm_util;
use rustc::dep_graph::DepGraphSafe;
use rustc::hir;
use debuginfo;
@ -445,6 +446,9 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
attributes::apply_target_cpu_attr(self, llfn)
}
fn closure_env_needs_indirect_debuginfo(&self) {
llvm_util::get_major_version() < 6
}
fn create_used_variable(&self) {
let name = const_cstr!("llvm.used");

View file

@ -26,21 +26,6 @@ use rustc_data_structures::indexed_vec::{Idx, IndexVec};
use syntax_pos::BytePos;
#[derive(Clone, Copy, Debug)]
pub struct MirDebugScope<D> {
pub scope_metadata: Option<D>,
// Start and end offsets of the file to which this DIScope belongs.
// These are used to quickly determine whether some span refers to the same file.
pub file_start_pos: BytePos,
pub file_end_pos: BytePos,
}
impl<D> MirDebugScope<D> {
pub fn is_valid(&self) -> bool {
self.scope_metadata.is_some()
}
}
/// Produce DIScope DIEs for each MIR Scope which has variables defined in it.
/// If debuginfo is disabled, the returned vector is empty.
pub fn create_mir_scopes(

View file

@ -111,54 +111,6 @@ impl<'a, 'tcx> CrateDebugContext<'a, 'tcx> {
}
}
pub enum FunctionDebugContext<D> {
RegularContext(FunctionDebugContextData<D>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
impl<D> FunctionDebugContext<D> {
pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<D> {
match *self {
FunctionDebugContext::RegularContext(ref data) => data,
FunctionDebugContext::DebugInfoDisabled => {
span_bug!(span, "{}", Self::debuginfo_disabled_message());
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
span_bug!(span, "{}", Self::should_be_ignored_message());
}
}
}
fn debuginfo_disabled_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
}
fn should_be_ignored_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext for function that should be \
ignored by debug info!"
}
}
pub struct FunctionDebugContextData<D> {
fn_metadata: D,
source_locations_enabled: Cell<bool>,
pub defining_crate: CrateNum,
}
pub enum VariableAccess<'a, V> {
// The llptr given is an alloca containing the variable's value
DirectVariable { alloca: V },
// The llptr given is an alloca containing the start of some pointer chain
// leading to the variable's content.
IndirectVariable { alloca: V, address_operations: &'a [i64] }
}
pub enum VariableKind {
ArgumentVariable(usize /*index*/),
LocalVariable,
}
/// Create any deferred debug metadata nodes
pub fn finalize(cx: &CodegenCx) {
if cx.dbg_cx.is_none() {
@ -589,4 +541,13 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn debuginfo_finalize(&self) {
finalize(self)
}
fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64] {
unsafe {
[llvm::LLVMRustDIBuilderCreateOpDeref(),
llvm::LLVMRustDIBuilderCreateOpPlusUconst(),
byte_offset_of_var_in_env as i64,
llvm::LLVMRustDIBuilderCreateOpDeref()]
};
}
}

View file

@ -50,18 +50,6 @@ pub fn set_source_location<D>(
set_debug_location(bx, dbg_loc);
}
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
/// they are disabled when beginning to codegen a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is codegened.
pub fn start_emitting_source_locations<D>(dbg_context: &FunctionDebugContext<D>) {
if let FunctionDebugContext::RegularContext(ref data) = *dbg_context {
data.source_locations_enabled.set(true);
}
}
#[derive(Copy, Clone, PartialEq)]
pub enum InternalDebugLocation<'ll> {

View file

@ -47,37 +47,4 @@ unsafe { simd_add(i32x2(0, 0), i32x2(1, 2)); } // ok!
```
"##,
E0668: r##"
Malformed inline assembly rejected by LLVM.
LLVM checks the validity of the constraints and the assembly string passed to
it. This error implies that LLVM seems something wrong with the inline
assembly call.
In particular, it can happen if you forgot the closing bracket of a register
constraint (see issue #51430):
```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail)
#![feature(asm)]
fn main() {
let rax: u64;
unsafe {
asm!("" :"={rax"(rax));
println!("Accumulator is: {}", rax);
}
}
```
"##,
E0669: r##"
Cannot convert inline assembly operand to a single LLVM value.
This error usually happens when trying to pass in a value to an input inline
assembly operand that is actually a pair of values. In particular, this can
happen when trying to pass in a slice, for instance a `&str`. In Rust, these
values are represented internally as a pair of values, the pointer and its
length. When passed as an input operand, this pair of values can not be
coerced into a register and thus we must fail with an error.
"##,
}

View file

@ -1,72 +0,0 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod abi;
mod asm;
mod builder;
mod consts;
mod debuginfo;
mod intrinsic;
mod type_;
pub use self::abi::{AbiBuilderMethods, AbiMethods};
pub use self::asm::{AsmBuilderMethods, AsmMethods};
pub use self::builder::BuilderMethods;
pub use self::consts::ConstMethods;
pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
pub use self::type_::{
ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
};
pub use rustc_codegen_ssa::interfaces::{
Backend, BackendMethods, BackendTypes, CodegenObject, DeclareMethods, MiscMethods,
PreDefineMethods, StaticMethods,
};
pub trait CodegenMethods<'tcx>:
Backend<'tcx>
+ TypeMethods<'tcx>
+ MiscMethods<'tcx>
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
+ PreDefineMethods<'tcx>
{
}
impl<'tcx, T> CodegenMethods<'tcx> for T where
Self: Backend<'tcx>
+ TypeMethods<'tcx>
+ MiscMethods<'tcx>
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
+ PreDefineMethods<'tcx>
{}
pub trait HasCodegen<'tcx>: Backend<'tcx> {
type CodegenCx: CodegenMethods<'tcx>
+ BackendTypes<
Value = Self::Value,
BasicBlock = Self::BasicBlock,
Type = Self::Type,
Context = Self::Context,
Funclet = Self::Funclet,
DIScope = Self::DIScope,
>;
}

View file

@ -39,7 +39,6 @@
use back::write::create_target_machine;
use syntax_pos::symbol::Symbol;
#[macro_use] extern crate bitflags;
extern crate flate2;
extern crate libc;
#[macro_use] extern crate rustc;
@ -92,7 +91,7 @@ use rustc::util::time_graph;
use rustc::util::nodemap::{FxHashSet, FxHashMap};
use rustc::util::profiling::ProfileCategory;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::{ModuleCodegen, CompiledModule};
use rustc_codegen_ssa::{interfaces, ModuleCodegen, CompiledModule};
use rustc_codegen_utils::codegen_backend::CodegenBackend;
use rustc_data_structures::svh::Svh;
@ -108,8 +107,6 @@ mod back {
pub mod wasm;
}
mod interfaces;
mod abi;
mod allocator;
mod asm;
@ -122,7 +119,6 @@ mod consts;
mod context;
mod debuginfo;
mod declare;
mod glue;
mod intrinsic;
// The following is a work around that replaces `pub mod llvm;` and that fixes issue 53912.
@ -130,8 +126,6 @@ mod intrinsic;
mod llvm_util;
mod metadata;
mod meth;
mod mir;
mod mono_item;
mod type_;
mod type_of;
@ -171,6 +165,12 @@ impl BackendMethods for LlvmCodegenBackend {
) {
codegen.submit_pre_codegened_module_to_llvm(tcx, module)
}
fn submit_pre_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) {
write::submit_pre_lto_module_to_llvm(tcx, module)
}
fn submit_post_lto_module_to_llvm(&self, tcx: TyCtxt, module: CachedModuleCodegen) {
write::submit_post_lto_module_to_llvm(tcx, module)
}
fn codegen_aborted(codegen: OngoingCodegen) {
codegen.codegen_aborted();
}
@ -378,24 +378,4 @@ struct CodegenResults {
linker_info: rustc_codegen_utils::linker::LinkerInfo,
crate_info: CrateInfo,
}
/// Misc info we load from metadata to persist beyond the tcx
struct CrateInfo {
panic_runtime: Option<CrateNum>,
compiler_builtins: Option<CrateNum>,
profiler_runtime: Option<CrateNum>,
sanitizer_runtime: Option<CrateNum>,
is_no_builtins: FxHashSet<CrateNum>,
native_libraries: FxHashMap<CrateNum, Lrc<Vec<NativeLibrary>>>,
crate_name: FxHashMap<CrateNum, String>,
used_libraries: Lrc<Vec<NativeLibrary>>,
link_args: Lrc<Vec<String>>,
used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
used_crates_static: Vec<(CrateNum, LibSource)>,
used_crates_dynamic: Vec<(CrateNum, LibSource)>,
wasm_imports: FxHashMap<String, String>,
lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
}
__build_diagnostic_array! { librustc_codegen_llvm, DIAGNOSTICS }

View file

@ -8,12 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Walks the crate looking for items/impl-items/trait-items that have
//! either a `rustc_symbol_name` or `rustc_item_path` attribute and
//! generates an error giving, respectively, the symbol name or
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
use attributes;
use base;
use context::CodegenCx;
@ -31,98 +25,6 @@ use interfaces::*;
pub use rustc::mir::mono::MonoItem;
pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt;
pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
match *self.as_mono_item() {
MonoItem::Static(def_id) => {
let tcx = cx.tcx();
let is_mutable = match tcx.describe_def(def_id) {
Some(Def::Static(_, is_mutable)) => is_mutable,
Some(other) => {
bug!("Expected Def::Static, found {:?}", other)
}
None => {
bug!("Expected Def::Static for {:?}, found nothing", def_id)
}
};
cx.codegen_static(def_id, is_mutable);
}
MonoItem::GlobalAsm(node_id) => {
let item = cx.tcx().hir.expect_item(node_id);
if let hir::ItemKind::GlobalAsm(ref ga) = item.node {
cx.codegen_global_asm(ga);
} else {
span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
}
}
MonoItem::Fn(instance) => {
base::codegen_instance::<Bx>(&cx, instance);
}
}
debug!("END IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
}
fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
&self,
cx: &'a Bx::CodegenCx,
linkage: Linkage,
visibility: Visibility
) {
debug!("BEGIN PREDEFINING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
let symbol_name = self.symbol_name(cx.tcx()).as_str();
debug!("symbol {}", &symbol_name);
match *self.as_mono_item() {
MonoItem::Static(def_id) => {
cx.predefine_static(def_id, linkage, visibility, &symbol_name);
}
MonoItem::Fn(instance) => {
cx.predefine_fn(instance, linkage, visibility, &symbol_name);
}
MonoItem::GlobalAsm(..) => {}
}
debug!("END PREDEFINING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
}
fn to_raw_string(&self) -> String {
match *self.as_mono_item() {
MonoItem::Fn(instance) => {
format!("Fn({:?}, {})",
instance.def,
instance.substs.as_ptr() as usize)
}
MonoItem::Static(id) => {
format!("Static({:?})", id)
}
MonoItem::GlobalAsm(id) => {
format!("GlobalAsm({:?})", id)
}
}
}
}
impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {}
impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn predefine_static(&self,
def_id: DefId,

View file

@ -400,6 +400,12 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool {
layout.is_llvm_immediate()
}
fn is_backend_scalar_pair(&self, ty: &TyLayout<'tcx>) -> bool {
ty.is_llvm_scalar_pair()
}
fn backend_field_index(&self, ty: &TyLayout<'tcx>, index: usize) -> u64 {
ty.llvm_field_index()
}
fn scalar_pair_element_backend_type<'a>(
&self,
layout: TyLayout<'tcx>,

View file

@ -0,0 +1,989 @@
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Codegen the completed AST to the LLVM IR.
//!
//! Some functions here, such as codegen_block and codegen_expr, return a value --
//! the result of the codegen to LLVM -- while others, such as codegen_fn
//! and mono_item, are called only for the side effect of adding a
//! particular definition to the LLVM IR output we're producing.
//!
//! Hopefully useful general knowledge about codegen:
//!
//! * There's no way to find out the Ty type of a Value. Doing so
//! would be "trying to get the eggs out of an omelette" (credit:
//! pcwalton). You can, instead, find out its llvm::Type by calling val_ty,
//! but one llvm::Type corresponds to many `Ty`s; for instance, tup(int, int,
//! int) and rec(x=int, y=int, z=int) will have the same llvm::Type.
use {ModuleCodegen, ModuleKind, CachedModuleCodegen};
use rustc::dep_graph::cgu_reuse_tracker::CguReuse;
use rustc::hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
use rustc::middle::lang_items::StartFnLangItem;
use rustc::middle::weak_lang_items;
use rustc::mir::mono::{Stats, CodegenUnitNameBuilder};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::util::common::{time, print_time_passes_entry};
use rustc::util::profiling::ProfileCategory;
use rustc::session::config::{self, EntryFnType, Lto};
use rustc::session::Session;
use mir::place::PlaceRef;
use {MemFlags, CrateInfo};
use callee;
use rustc_mir::monomorphize::item::DefPathBasedNames;
use common::{RealPredicate, TypeKind, IntPredicate};
use meth;
use mir;
use rustc::util::time_graph;
use rustc_mir::monomorphize::Instance;
use rustc_mir::monomorphize::partitioning::{CodegenUnit, CodegenUnitExt};
use mono_item::MonoItem;
use rustc::util::nodemap::FxHashMap;
use rustc_data_structures::indexed_vec::Idx;
use rustc_data_structures::sync::Lrc;
use rustc_codegen_utils::{symbol_names_test, check_for_rustc_errors_attr};
use rustc::ty::layout::{FAT_PTR_ADDR, FAT_PTR_EXTRA};
use interfaces::*;
use std::any::Any;
use std::cmp;
use std::ops::{Deref, DerefMut};
use std::time::{Instant, Duration};
use std::sync::mpsc;
use syntax_pos::Span;
use syntax::attr;
use rustc::hir;
use mir::operand::OperandValue;
use std::marker::PhantomData;
pub struct StatRecorder<'a, 'tcx, Cx: 'a + CodegenMethods<'tcx>> {
cx: &'a Cx,
name: Option<String>,
istart: usize,
_marker: PhantomData<&'tcx ()>,
}
impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> StatRecorder<'a, 'tcx, Cx> {
pub fn new(cx: &'a Cx, name: String) -> Self {
let istart = cx.stats().borrow().n_llvm_insns;
StatRecorder {
cx,
name: Some(name),
istart,
_marker: PhantomData,
}
}
}
impl<'a, 'tcx, Cx: CodegenMethods<'tcx>> Drop for StatRecorder<'a, 'tcx, Cx> {
fn drop(&mut self) {
if self.cx.sess().codegen_stats() {
let mut stats = self.cx.stats().borrow_mut();
let iend = stats.n_llvm_insns;
stats.fn_stats.push((self.name.take().unwrap(), iend - self.istart));
stats.n_fns += 1;
// Reset LLVM insn count to avoid compound costs.
stats.n_llvm_insns = self.istart;
}
}
}
pub fn bin_op_to_icmp_predicate(op: hir::BinOpKind,
signed: bool)
-> IntPredicate {
match op {
hir::BinOpKind::Eq => IntPredicate::IntEQ,
hir::BinOpKind::Ne => IntPredicate::IntNE,
hir::BinOpKind::Lt => if signed { IntPredicate::IntSLT } else { IntPredicate::IntULT },
hir::BinOpKind::Le => if signed { IntPredicate::IntSLE } else { IntPredicate::IntULE },
hir::BinOpKind::Gt => if signed { IntPredicate::IntSGT } else { IntPredicate::IntUGT },
hir::BinOpKind::Ge => if signed { IntPredicate::IntSGE } else { IntPredicate::IntUGE },
op => {
bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
found {:?}",
op)
}
}
}
pub fn bin_op_to_fcmp_predicate(op: hir::BinOpKind) -> RealPredicate {
match op {
hir::BinOpKind::Eq => RealPredicate::RealOEQ,
hir::BinOpKind::Ne => RealPredicate::RealUNE,
hir::BinOpKind::Lt => RealPredicate::RealOLT,
hir::BinOpKind::Le => RealPredicate::RealOLE,
hir::BinOpKind::Gt => RealPredicate::RealOGT,
hir::BinOpKind::Ge => RealPredicate::RealOGE,
op => {
bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
found {:?}",
op);
}
}
}
pub fn compare_simd_types<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value,
t: Ty<'tcx>,
ret_ty: Bx::Type,
op: hir::BinOpKind
) -> Bx::Value {
let signed = match t.sty {
ty::Float(_) => {
let cmp = bin_op_to_fcmp_predicate(op);
return bx.sext(bx.fcmp(cmp, lhs, rhs), ret_ty);
},
ty::Uint(_) => false,
ty::Int(_) => true,
_ => bug!("compare_simd_types: invalid SIMD type"),
};
let cmp = bin_op_to_icmp_predicate(op, signed);
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
// to get the correctly sized type. This will compile to a single instruction
// once the IR is converted to assembly if the SIMD instruction is supported
// by the target architecture.
bx.sext(bx.icmp(cmp, lhs, rhs), ret_ty)
}
/// Retrieve the information we are losing (making dynamic) in an unsizing
/// adjustment.
///
/// The `old_info` argument is a bit funny. It is intended for use
/// in an upcast, where the new vtable for an object will be derived
/// from the old one.
pub fn unsized_info<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
source: Ty<'tcx>,
target: Ty<'tcx>,
old_info: Option<Cx::Value>,
) -> Cx::Value {
let (source, target) = cx.tcx().struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => {
cx.const_usize(len.unwrap_usize(cx.tcx()))
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
// traits, and hence never actually require an actual
// change to the vtable.
old_info.expect("unsized_info: missing old info for trait upcast")
}
(_, &ty::Dynamic(ref data, ..)) => {
let vtable_ptr = cx.layout_of(cx.tcx().mk_mut_ptr(target))
.field(cx, FAT_PTR_EXTRA);
cx.static_ptrcast(meth::get_vtable(cx, source, data.principal()),
cx.backend_type(vtable_ptr))
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
source,
target),
}
}
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
pub fn unsize_thin_ptr<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
src: Bx::Value,
src_ty: Ty<'tcx>,
dst_ty: Ty<'tcx>
) -> (Bx::Value, Bx::Value) {
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
match (&src_ty.sty, &dst_ty.sty) {
(&ty::Ref(_, a, _),
&ty::Ref(_, b, _)) |
(&ty::Ref(_, a, _),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
assert!(bx.cx().type_is_sized(a));
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b)));
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
let src_layout = bx.cx().layout_of(src_ty);
let dst_layout = bx.cx().layout_of(dst_ty);
let mut result = None;
for i in 0..src_layout.fields.count() {
let src_f = src_layout.field(bx.cx(), i);
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
if src_f.is_zst() {
continue;
}
assert_eq!(src_layout.size, src_f.size);
let dst_f = dst_layout.field(bx.cx(), i);
assert_ne!(src_f.ty, dst_f.ty);
assert_eq!(result, None);
result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
}
let (lldata, llextra) = result.unwrap();
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true)),
bx.bitcast(llextra, bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true)))
}
_ => bug!("unsize_thin_ptr: called on bad types"),
}
}
/// Coerce `src`, which is a reference to a value of type `src_ty`,
/// to a value of type `dst_ty` and store the result in `dst`
pub fn coerce_unsized_into<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
src: PlaceRef<'tcx, Bx::Value>,
dst: PlaceRef<'tcx, Bx::Value>
) {
let src_ty = src.layout.ty;
let dst_ty = dst.layout.ty;
let coerce_ptr = || {
let (base, info) = match bx.load_operand(src).val {
OperandValue::Pair(base, info) => {
// fat-ptr to fat-ptr unsize preserves the vtable
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
// So we need to pointercast the base to ensure
// the types match up.
let thin_ptr = dst.layout.field(bx.cx(), FAT_PTR_ADDR);
(bx.pointercast(base, bx.cx().backend_type(thin_ptr)), info)
}
OperandValue::Immediate(base) => {
unsize_thin_ptr(bx, base, src_ty, dst_ty)
}
OperandValue::Ref(..) => bug!()
};
OperandValue::Pair(base, info).store(bx, dst);
};
match (&src_ty.sty, &dst_ty.sty) {
(&ty::Ref(..), &ty::Ref(..)) |
(&ty::Ref(..), &ty::RawPtr(..)) |
(&ty::RawPtr(..), &ty::RawPtr(..)) => {
coerce_ptr()
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
coerce_ptr()
}
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b);
for i in 0..def_a.variants[VariantIdx::new(0)].fields.len() {
let src_f = src.project_field(bx, i);
let dst_f = dst.project_field(bx, i);
if dst_f.layout.is_zst() {
continue;
}
if src_f.layout.ty == dst_f.layout.ty {
memcpy_ty(bx, dst_f.llval, dst_f.align, src_f.llval, src_f.align,
src_f.layout, MemFlags::empty());
} else {
coerce_unsized_into(bx, src_f, dst_f);
}
}
}
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
src_ty,
dst_ty),
}
}
pub fn cast_shift_expr_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
cast_shift_rhs(bx, op, lhs, rhs, |a, b| bx.trunc(a, b), |a, b| bx.zext(a, b))
}
fn cast_shift_rhs<'a, 'tcx: 'a, F, G, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
op: hir::BinOpKind,
lhs: Bx::Value,
rhs: Bx::Value,
trunc: F,
zext: G
) -> Bx::Value
where F: FnOnce(
Bx::Value,
Bx::Type
) -> Bx::Value,
G: FnOnce(
Bx::Value,
Bx::Type
) -> Bx::Value
{
// Shifts may have any size int on the rhs
if op.is_shift() {
let mut rhs_llty = bx.cx().val_ty(rhs);
let mut lhs_llty = bx.cx().val_ty(lhs);
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
rhs_llty = bx.cx().element_type(rhs_llty)
}
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
lhs_llty = bx.cx().element_type(lhs_llty)
}
let rhs_sz = bx.cx().int_width(rhs_llty);
let lhs_sz = bx.cx().int_width(lhs_llty);
if lhs_sz < rhs_sz {
trunc(rhs, lhs_llty)
} else if lhs_sz > rhs_sz {
// FIXME (#1877: If in the future shifting by negative
// values is no longer undefined then this is wrong.
zext(rhs, lhs_llty)
} else {
rhs
}
} else {
rhs
}
}
/// Returns whether this session's target will use SEH-based unwinding.
///
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
/// 64-bit MinGW) instead of "full SEH".
pub fn wants_msvc_seh(sess: &Session) -> bool {
sess.target.target.options.is_like_msvc
}
pub fn call_assume<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) {
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
bx.call(assume_intrinsic, &[val], None);
}
pub fn from_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value
) -> Bx::Value {
if bx.cx().val_ty(val) == bx.cx().type_i1() {
bx.zext(val, bx.cx().type_i8())
} else {
val
}
}
pub fn to_immediate<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value,
layout: layout::TyLayout,
) -> Bx::Value {
if let layout::Abi::Scalar(ref scalar) = layout.abi {
return to_immediate_scalar(bx, val, scalar);
}
val
}
pub fn to_immediate_scalar<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
val: Bx::Value,
scalar: &layout::Scalar,
) -> Bx::Value {
if scalar.is_bool() {
return bx.trunc(val, bx.cx().type_i1());
}
val
}
pub fn memcpy_ty<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
dst: Bx::Value,
dst_align: Align,
src: Bx::Value,
src_align: Align,
layout: TyLayout<'tcx>,
flags: MemFlags,
) {
let size = layout.size.bytes();
if size == 0 {
return;
}
bx.memcpy(dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
}
pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
instance: Instance<'tcx>,
) {
let _s = if cx.sess().codegen_stats() {
let mut instance_name = String::new();
DefPathBasedNames::new(cx.tcx(), true, true)
.push_def_path(instance.def_id(), &mut instance_name);
Some(StatRecorder::new(cx, instance_name))
} else {
None
};
// this is an info! to allow collecting monomorphization statistics
// and to allow finding the last function before LLVM aborts from
// release builds.
info!("codegen_instance({})", instance);
let sig = instance.fn_sig(cx.tcx());
let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let lldecl = cx.instances().borrow().get(&instance).cloned().unwrap_or_else(||
bug!("Instance `{:?}` not already declared", instance));
cx.stats().borrow_mut().n_closures += 1;
let mir = cx.tcx().instance_mir(instance.def);
mir::codegen_mir::<Bx>(cx, lldecl, &mir, instance, sig);
}
/// Create the `main` function which will initialize the rust runtime and call
/// users main function.
fn maybe_create_entry_wrapper<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx
) {
let (main_def_id, span) = match *cx.sess().entry_fn.borrow() {
Some((id, span, _)) => {
(cx.tcx().hir.local_def_id(id), span)
}
None => return,
};
let instance = Instance::mono(cx.tcx(), main_def_id);
if !cx.codegen_unit().contains_item(&MonoItem::Fn(instance)) {
// We want to create the wrapper in the same codegen unit as Rust's main
// function.
return;
}
let main_llfn = cx.get_fn(instance);
let et = cx.sess().entry_fn.get().map(|e| e.2);
match et {
Some(EntryFnType::Main) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, true),
Some(EntryFnType::Start) => create_entry_fn::<Bx>(cx, span, main_llfn, main_def_id, false),
None => {} // Do nothing.
}
fn create_entry_fn<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
sp: Span,
rust_main: Bx::Value,
rust_main_def_id: DefId,
use_start_lang_item: bool,
) {
let llfty =
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
let main_ret_ty = cx.tcx().fn_sig(rust_main_def_id).output();
// Given that `main()` has no arguments,
// then its return type cannot have
// late-bound regions, since late-bound
// regions must appear in the argument
// listing.
let main_ret_ty = cx.tcx().erase_regions(
&main_ret_ty.no_bound_vars().unwrap(),
);
if cx.get_defined_value("main").is_some() {
// FIXME: We should be smart and show a better diagnostic here.
cx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
.emit();
cx.sess().abort_if_errors();
bug!();
}
let llfn = cx.declare_cfn("main", llfty);
// `main` should respect same config for frame pointer elimination as rest of code
cx.set_frame_pointer_elimination(llfn);
cx.apply_target_cpu_attr(llfn);
let bx = Bx::new_block(&cx, llfn, "top");
bx.insert_reference_to_gdb_debug_scripts_section_global();
// Params from native main() used as args for rust start function
let param_argc = cx.get_param(llfn, 0);
let param_argv = cx.get_param(llfn, 1);
let arg_argc = bx.intcast(param_argc, cx.type_isize(), true);
let arg_argv = param_argv;
let (start_fn, args) = if use_start_lang_item {
let start_def_id = cx.tcx().require_lang_item(StartFnLangItem);
let start_fn = callee::resolve_and_get_fn(
cx,
start_def_id,
cx.tcx().intern_substs(&[main_ret_ty.into()]),
);
(start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
arg_argc, arg_argv])
} else {
debug!("using user-defined start fn");
(rust_main, vec![arg_argc, arg_argv])
};
let result = bx.call(start_fn, &args, None);
bx.ret(bx.intcast(result, cx.type_int(), true));
}
}
pub const CODEGEN_WORKER_ID: usize = ::std::usize::MAX;
pub const CODEGEN_WORKER_TIMELINE: time_graph::TimelineId =
time_graph::TimelineId(CODEGEN_WORKER_ID);
pub const CODEGEN_WORK_PACKAGE_KIND: time_graph::WorkPackageKind =
time_graph::WorkPackageKind(&["#DE9597", "#FED1D3", "#FDC5C7", "#B46668", "#88494B"]);
pub fn codegen_crate<B: BackendMethods>(
backend: B,
tcx: TyCtxt<'a, 'tcx, 'tcx>,
rx: mpsc::Receiver<Box<dyn Any + Send>>
) -> B::OngoingCodegen {
check_for_rustc_errors_attr(tcx);
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Codegen the metadata.
tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen));
let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("metadata")).as_str()
.to_string();
let metadata_llvm_module = backend.new_metadata(tcx.sess, &metadata_cgu_name);
let metadata = time(tcx.sess, "write metadata", || {
backend.write_metadata(tcx, &metadata_llvm_module)
});
tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen));
let metadata_module = ModuleCodegen {
name: metadata_cgu_name,
module_llvm: metadata_llvm_module,
kind: ModuleKind::Metadata,
};
let time_graph = if tcx.sess.opts.debugging_opts.codegen_time_graph {
Some(time_graph::TimeGraph::new())
} else {
None
};
// Skip crate items and just output metadata in -Z no-codegen mode.
if tcx.sess.opts.debugging_opts.no_codegen ||
!tcx.sess.opts.output_types.should_codegen() {
let ongoing_codegen = backend.start_async_codegen(
tcx,
time_graph,
metadata,
rx,
1);
backend.submit_pre_codegened_module_to_backend(&ongoing_codegen, tcx, metadata_module);
backend.codegen_finished(&ongoing_codegen, tcx);
assert_and_save_dep_graph(tcx);
backend.check_for_errors(&ongoing_codegen, tcx.sess);
return ongoing_codegen;
}
// Run the monomorphization collector and partition the collected items into
// codegen units.
let codegen_units = tcx.collect_and_partition_mono_items(LOCAL_CRATE).1;
let codegen_units = (*codegen_units).clone();
// Force all codegen_unit queries so they are already either red or green
// when compile_codegen_unit accesses them. We are not able to re-execute
// the codegen_unit query from just the DepNode, so an unknown color would
// lead to having to re-execute compile_codegen_unit, possibly
// unnecessarily.
if tcx.dep_graph.is_fully_enabled() {
for cgu in &codegen_units {
tcx.codegen_unit(cgu.name().clone());
}
}
let ongoing_codegen = backend.start_async_codegen(
tcx,
time_graph.clone(),
metadata,
rx,
codegen_units.len());
let ongoing_codegen = AbortCodegenOnDrop::<B>(Some(ongoing_codegen));
// Codegen an allocator shim, if necessary.
//
// If the crate doesn't have an `allocator_kind` set then there's definitely
// no shim to generate. Otherwise we also check our dependency graph for all
// our output crate types. If anything there looks like its a `Dynamic`
// linkage, then it's already got an allocator shim and we'll be using that
// one instead. If nothing exists then it's our job to generate the
// allocator!
let any_dynamic_crate = tcx.sess.dependency_formats.borrow()
.iter()
.any(|(_, list)| {
use rustc::middle::dependency_format::Linkage;
list.iter().any(|&linkage| linkage == Linkage::Dynamic)
});
let allocator_module = if any_dynamic_crate {
None
} else if let Some(kind) = *tcx.sess.allocator_kind.get() {
let llmod_id = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
Some("allocator")).as_str()
.to_string();
let modules = backend.new_metadata(tcx.sess, &llmod_id);
time(tcx.sess, "write allocator module", || {
backend.codegen_allocator(tcx, &modules, kind)
});
Some(ModuleCodegen {
name: llmod_id,
module_llvm: modules,
kind: ModuleKind::Allocator,
})
} else {
None
};
if let Some(allocator_module) = allocator_module {
backend.submit_pre_codegened_module_to_backend(&ongoing_codegen, tcx, allocator_module);
}
backend.submit_pre_codegened_module_to_backend(&ongoing_codegen, tcx, metadata_module);
// We sort the codegen units by size. This way we can schedule work for LLVM
// a bit more efficiently.
let codegen_units = {
let mut codegen_units = codegen_units;
codegen_units.sort_by_cached_key(|cgu| cmp::Reverse(cgu.size_estimate()));
codegen_units
};
let mut total_codegen_time = Duration::new(0, 0);
let mut all_stats = Stats::default();
for cgu in codegen_units.into_iter() {
backend.wait_for_signal_to_codegen_item(&ongoing_codegen);
backend.check_for_errors(&ongoing_codegen, tcx.sess);
let cgu_reuse = determine_cgu_reuse(tcx, &cgu);
tcx.sess.cgu_reuse_tracker.set_actual_reuse(&cgu.name().as_str(), cgu_reuse);
match cgu_reuse {
CguReuse::No => {
let _timing_guard = time_graph.as_ref().map(|time_graph| {
time_graph.start(CODEGEN_WORKER_TIMELINE,
CODEGEN_WORK_PACKAGE_KIND,
&format!("codegen {}", cgu.name()))
});
let start_time = Instant::now();
let stats = backend.compile_codegen_unit(tcx, *cgu.name());
all_stats.extend(stats);
total_codegen_time += start_time.elapsed();
false
}
CguReuse::PreLto => {
backend.submit_pre_lto_module_to_backend(tcx, CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
true
}
CguReuse::PostLto => {
backend.submit_post_lto_module_to_backend(tcx, CachedModuleCodegen {
name: cgu.name().to_string(),
source: cgu.work_product(tcx),
});
true
}
};
}
backend.codegen_finished(&ongoing_codegen, tcx);
// Since the main thread is sometimes blocked during codegen, we keep track
// -Ztime-passes output manually.
print_time_passes_entry(tcx.sess.time_passes(),
"codegen to LLVM IR",
total_codegen_time);
::rustc_incremental::assert_module_sources::assert_module_sources(tcx);
symbol_names_test::report_symbol_names(tcx);
if tcx.sess.codegen_stats() {
println!("--- codegen stats ---");
println!("n_glues_created: {}", all_stats.n_glues_created);
println!("n_null_glues: {}", all_stats.n_null_glues);
println!("n_real_glues: {}", all_stats.n_real_glues);
println!("n_fns: {}", all_stats.n_fns);
println!("n_inlines: {}", all_stats.n_inlines);
println!("n_closures: {}", all_stats.n_closures);
println!("fn stats:");
all_stats.fn_stats.sort_by_key(|&(_, insns)| insns);
for &(ref name, insns) in all_stats.fn_stats.iter() {
println!("{} insns, {}", insns, *name);
}
}
if tcx.sess.count_llvm_insns() {
for (k, v) in all_stats.llvm_insns.iter() {
println!("{:7} {}", *v, *k);
}
}
backend.check_for_errors(&ongoing_codegen, tcx.sess);
assert_and_save_dep_graph(tcx);
ongoing_codegen.into_inner()
}
/// A curious wrapper structure whose only purpose is to call `codegen_aborted`
/// when it's dropped abnormally.
///
/// In the process of working on rust-lang/rust#55238 a mysterious segfault was
/// stumbled upon. The segfault was never reproduced locally, but it was
/// suspected to be related to the fact that codegen worker threads were
/// sticking around by the time the main thread was exiting, causing issues.
///
/// This structure is an attempt to fix that issue where the `codegen_aborted`
/// message will block until all workers have finished. This should ensure that
/// even if the main codegen thread panics we'll wait for pending work to
/// complete before returning from the main thread, hopefully avoiding
/// segfaults.
///
/// If you see this comment in the code, then it means that this workaround
/// worked! We may yet one day track down the mysterious cause of that
/// segfault...
struct AbortCodegenOnDrop<B: BackendMethods>(Option<B::OngoingCodegen>);
impl<B: BackendMethods> AbortCodegenOnDrop<B> {
fn into_inner(mut self) -> B::OngoingCodegen {
self.0.take().unwrap()
}
}
impl<B: BackendMethods> Deref for AbortCodegenOnDrop<B> {
type Target = B::OngoingCodegen;
fn deref(&self) -> &B::OngoingCodegen {
self.0.as_ref().unwrap()
}
}
impl<B: BackendMethods> DerefMut for AbortCodegenOnDrop<B> {
fn deref_mut(&mut self) -> &mut B::OngoingCodegen {
self.0.as_mut().unwrap()
}
}
impl<B: BackendMethods> Drop for AbortCodegenOnDrop<B> {
fn drop(&mut self) {
if let Some(codegen) = self.0.take() {
B::codegen_aborted(codegen);
}
}
}
fn assert_and_save_dep_graph<'ll, 'tcx>(tcx: TyCtxt<'ll, 'tcx, 'tcx>) {
time(tcx.sess,
"assert dep graph",
|| ::rustc_incremental::assert_dep_graph(tcx));
time(tcx.sess,
"serialize dep graph",
|| ::rustc_incremental::save_dep_graph(tcx));
}
impl CrateInfo {
pub fn new(tcx: TyCtxt) -> CrateInfo {
let mut info = CrateInfo {
panic_runtime: None,
compiler_builtins: None,
profiler_runtime: None,
sanitizer_runtime: None,
is_no_builtins: Default::default(),
native_libraries: Default::default(),
used_libraries: tcx.native_libraries(LOCAL_CRATE),
link_args: tcx.link_args(LOCAL_CRATE),
crate_name: Default::default(),
used_crates_dynamic: cstore::used_crates(tcx, LinkagePreference::RequireDynamic),
used_crates_static: cstore::used_crates(tcx, LinkagePreference::RequireStatic),
used_crate_source: Default::default(),
wasm_imports: Default::default(),
lang_item_to_crate: Default::default(),
missing_lang_items: Default::default(),
};
let lang_items = tcx.lang_items();
let load_wasm_items = tcx.sess.crate_types.borrow()
.iter()
.any(|c| *c != config::CrateType::Rlib) &&
tcx.sess.opts.target_triple.triple() == "wasm32-unknown-unknown";
if load_wasm_items {
info.load_wasm_imports(tcx, LOCAL_CRATE);
}
let crates = tcx.crates();
let n_crates = crates.len();
info.native_libraries.reserve(n_crates);
info.crate_name.reserve(n_crates);
info.used_crate_source.reserve(n_crates);
info.missing_lang_items.reserve(n_crates);
for &cnum in crates.iter() {
info.native_libraries.insert(cnum, tcx.native_libraries(cnum));
info.crate_name.insert(cnum, tcx.crate_name(cnum).to_string());
info.used_crate_source.insert(cnum, tcx.used_crate_source(cnum));
if tcx.is_panic_runtime(cnum) {
info.panic_runtime = Some(cnum);
}
if tcx.is_compiler_builtins(cnum) {
info.compiler_builtins = Some(cnum);
}
if tcx.is_profiler_runtime(cnum) {
info.profiler_runtime = Some(cnum);
}
if tcx.is_sanitizer_runtime(cnum) {
info.sanitizer_runtime = Some(cnum);
}
if tcx.is_no_builtins(cnum) {
info.is_no_builtins.insert(cnum);
}
if load_wasm_items {
info.load_wasm_imports(tcx, cnum);
}
let missing = tcx.missing_lang_items(cnum);
for &item in missing.iter() {
if let Ok(id) = lang_items.require(item) {
info.lang_item_to_crate.insert(item, id.krate);
}
}
// No need to look for lang items that are whitelisted and don't
// actually need to exist.
let missing = missing.iter()
.cloned()
.filter(|&l| !weak_lang_items::whitelisted(tcx, l))
.collect();
info.missing_lang_items.insert(cnum, missing);
}
return info
}
fn load_wasm_imports(&mut self, tcx: TyCtxt, cnum: CrateNum) {
self.wasm_imports.extend(tcx.wasm_import_module_map(cnum).iter().map(|(&id, module)| {
let instance = Instance::mono(tcx, id);
let import_name = tcx.symbol_name(instance);
(import_name.to_string(), module.clone())
}));
}
}
fn is_codegened_item(tcx: TyCtxt, id: DefId) -> bool {
let (all_mono_items, _) =
tcx.collect_and_partition_mono_items(LOCAL_CRATE);
all_mono_items.contains(&id)
}
pub fn provide_both(providers: &mut Providers) {
providers.dllimport_foreign_items = |tcx, krate| {
let module_map = tcx.foreign_modules(krate);
let module_map = module_map.iter()
.map(|lib| (lib.def_id, lib))
.collect::<FxHashMap<_, _>>();
let dllimports = tcx.native_libraries(krate)
.iter()
.filter(|lib| {
if lib.kind != cstore::NativeLibraryKind::NativeUnknown {
return false
}
let cfg = match lib.cfg {
Some(ref cfg) => cfg,
None => return true,
};
attr::cfg_matches(cfg, &tcx.sess.parse_sess, None)
})
.filter_map(|lib| lib.foreign_module)
.map(|id| &module_map[&id])
.flat_map(|module| module.foreign_items.iter().cloned())
.collect();
Lrc::new(dllimports)
};
providers.is_dllimport_foreign_item = |tcx, def_id| {
tcx.dllimport_foreign_items(def_id.krate).contains(&def_id)
};
}
fn determine_cgu_reuse<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
cgu: &CodegenUnit<'tcx>)
-> CguReuse {
if !tcx.dep_graph.is_fully_enabled() {
return CguReuse::No
}
let work_product_id = &cgu.work_product_id();
if tcx.dep_graph.previous_work_product(work_product_id).is_none() {
// We don't have anything cached for this CGU. This can happen
// if the CGU did not exist in the previous session.
return CguReuse::No
}
// Try to mark the CGU as green. If it we can do so, it means that nothing
// affecting the LLVM module has changed and we can re-use a cached version.
// If we compile with any kind of LTO, this means we can re-use the bitcode
// of the Pre-LTO stage (possibly also the Post-LTO version but we'll only
// know that later). If we are not doing LTO, there is only one optimized
// version of each module, so we re-use that.
let dep_node = cgu.codegen_dep_node(tcx);
assert!(!tcx.dep_graph.dep_node_exists(&dep_node),
"CompileCodegenUnit dep-node for CGU `{}` already exists before marking.",
cgu.name());
if tcx.dep_graph.try_mark_green(tcx, &dep_node).is_some() {
// We can re-use either the pre- or the post-thinlto state
if tcx.sess.lto() != Lto::No {
CguReuse::PreLto
} else {
CguReuse::PostLto
}
} else {
CguReuse::No
}
}

View file

@ -0,0 +1,46 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use interfaces::*;
use rustc::ty;
use rustc::ty::subst::Substs;
use rustc::hir::def_id::DefId;
pub fn resolve_and_get_fn<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> Cx::Value {
cx.get_fn(
ty::Instance::resolve(
cx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs
).unwrap()
)
}
pub fn resolve_and_get_fn_for_vtable<'tcx,
Cx: Backend<'tcx> + MiscMethods<'tcx> + TypeMethods<'tcx>
>(
cx: &Cx,
def_id: DefId,
substs: &'tcx Substs<'tcx>,
) -> Cx::Value {
cx.get_fn(
ty::Instance::resolve_for_vtable(
cx.tcx(),
ty::ParamEnv::reveal_all(),
def_id,
substs
).unwrap()
)
}

View file

@ -10,8 +10,15 @@
#![allow(non_camel_case_types, non_snake_case)]
use rustc::ty::{self, Ty, TyCtxt};
use syntax_pos::DUMMY_SP;
use syntax_pos::{DUMMY_SP, Span};
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
use base;
use interfaces::*;
use rustc::hir;
use interfaces::BuilderMethods;
pub fn type_needs_drop<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, ty: Ty<'tcx>) -> bool {
ty.needs_drop(tcx, ty::ParamEnv::reveal_all())
@ -135,3 +142,88 @@ mod temp_stable_hash_impls {
}
}
}
pub fn langcall(tcx: TyCtxt,
span: Option<Span>,
msg: &str,
li: LangItem)
-> DefId {
tcx.lang_items().require(li).unwrap_or_else(|s| {
let msg = format!("{} {}", msg, s);
match span {
Some(span) => tcx.sess.span_fatal(span, &msg[..]),
None => tcx.sess.fatal(&msg[..]),
}
})
}
// To avoid UB from LLVM, these two functions mask RHS with an
// appropriate mask unconditionally (i.e. the fallback behavior for
// all shifts). For 32- and 64-bit types, this matches the semantics
// of Java. (See related discussion on #1877 and #10183.)
pub fn build_unchecked_lshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shl, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
bx.shl(lhs, rhs)
}
pub fn build_unchecked_rshift<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
lhs_t: Ty<'tcx>,
lhs: Bx::Value,
rhs: Bx::Value
) -> Bx::Value {
let rhs = base::cast_shift_expr_rhs(bx, hir::BinOpKind::Shr, lhs, rhs);
// #1877, #10183: Ensure that input is always valid
let rhs = shift_mask_rhs(bx, rhs);
let is_signed = lhs_t.is_signed();
if is_signed {
bx.ashr(lhs, rhs)
} else {
bx.lshr(lhs, rhs)
}
}
fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
rhs: Bx::Value
) -> Bx::Value {
let rhs_llty = bx.cx().val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
bx: &Bx,
llty: Bx::Type,
mask_llty: Bx::Type,
invert: bool
) -> Bx::Value {
let kind = bx.cx().type_kind(llty);
match kind {
TypeKind::Integer => {
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1;
if invert {
bx.cx().const_int(mask_llty, !val as i64)
} else {
bx.cx().const_uint(mask_llty, val)
}
},
TypeKind::Vector => {
let mask = shift_mask_val(
bx,
bx.cx().element_type(llty),
bx.cx().element_type(mask_llty),
invert
);
bx.vector_splat(bx.cx().vector_length(mask_llty), mask)
},
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
}
}

View file

@ -0,0 +1,92 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use syntax_pos::{BytePos, Span};
use rustc::hir::def_id::CrateNum;
use std::cell::Cell;
pub enum FunctionDebugContext<D> {
RegularContext(FunctionDebugContextData<D>),
DebugInfoDisabled,
FunctionWithoutDebugInfo,
}
impl<D> FunctionDebugContext<D> {
pub fn get_ref<'a>(&'a self, span: Span) -> &'a FunctionDebugContextData<D> {
match *self {
FunctionDebugContext::RegularContext(ref data) => data,
FunctionDebugContext::DebugInfoDisabled => {
span_bug!(span, "{}", FunctionDebugContext::<D>::debuginfo_disabled_message());
}
FunctionDebugContext::FunctionWithoutDebugInfo => {
span_bug!(span, "{}", FunctionDebugContext::<D>::should_be_ignored_message());
}
}
}
fn debuginfo_disabled_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext although debug info is disabled!"
}
fn should_be_ignored_message() -> &'static str {
"debuginfo: Error trying to access FunctionDebugContext for function that should be \
ignored by debug info!"
}
}
/// Enables emitting source locations for the given functions.
///
/// Since we don't want source locations to be emitted for the function prelude,
/// they are disabled when beginning to codegen a new function. This functions
/// switches source location emitting on and must therefore be called before the
/// first real statement/expression of the function is codegened.
pub fn start_emitting_source_locations<D>(dbg_context: &FunctionDebugContext<D>) {
match *dbg_context {
FunctionDebugContext::RegularContext(ref data) => {
data.source_locations_enabled.set(true)
},
_ => { /* safe to ignore */ }
}
}
pub struct FunctionDebugContextData<D> {
fn_metadata: D,
source_locations_enabled: Cell<bool>,
pub defining_crate: CrateNum,
}
pub enum VariableAccess<'a, V> {
// The llptr given is an alloca containing the variable's value
DirectVariable { alloca: V },
// The llptr given is an alloca containing the start of some pointer chain
// leading to the variable's content.
IndirectVariable { alloca: V, address_operations: &'a [i64] }
}
pub enum VariableKind {
ArgumentVariable(usize /*index*/),
LocalVariable,
}
#[derive(Clone, Copy, Debug)]
pub struct MirDebugScope<D> {
pub scope_metadata: Option<D>,
// Start and end offsets of the file to which this DIScope belongs.
// These are used to quickly determine whether some span refers to the same file.
pub file_start_pos: BytePos,
pub file_end_pos: BytePos,
}
impl<D> MirDebugScope<D> {
pub fn is_valid(&self) -> bool {
!self.scope_metadata.is_none()
}
}

View file

@ -0,0 +1,48 @@
// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
#![allow(non_snake_case)]
register_long_diagnostics! {
E0668: r##"
Malformed inline assembly rejected by LLVM.
LLVM checks the validity of the constraints and the assembly string passed to
it. This error implies that LLVM seems something wrong with the inline
assembly call.
In particular, it can happen if you forgot the closing bracket of a register
constraint (see issue #51430):
```ignore (error-emitted-at-codegen-which-cannot-be-handled-by-compile_fail)
#![feature(asm)]
fn main() {
let rax: u64;
unsafe {
asm!("" :"={rax"(rax));
println!("Accumulator is: {}", rax);
}
}
```
"##,
E0669: r##"
Cannot convert inline assembly operand to a single LLVM value.
This error usually happens when trying to pass in a value to an input inline
assembly operand that is actually a pair of values. In particular, this can
happen when trying to pass in a slice, for instance a `&str`. In Rust, these
values are represented internally as a pair of values, the pointer and its
length. When passed as an input operand, this pair of values can not be
coerced into a register and thus we must fail with an error.
"##,
}

View file

@ -14,7 +14,7 @@
use std;
use rustc_codegen_ssa::common::IntPredicate;
use common::IntPredicate;
use meth;
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, Ty};

View file

@ -9,8 +9,8 @@
// except according to those terms.
use super::HasCodegen;
use abi::FnType;
use rustc::ty::{FnSig, Instance, Ty};
use rustc_target::abi::call::FnType;
pub trait AbiMethods<'tcx> {
fn new_fn_type(&self, sig: FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> FnType<'tcx, Ty<'tcx>>;

View file

@ -21,7 +21,7 @@ use rustc::util::time_graph::TimeGraph;
use std::any::Any;
use std::sync::mpsc::Receiver;
use syntax_pos::symbol::InternedString;
use ModuleCodegen;
use {CachedModuleCodegen, ModuleCodegen};
pub trait BackendTypes {
type Value: CodegenObject;
@ -62,12 +62,14 @@ pub trait BackendMethods {
coordinator_receive: Receiver<Box<dyn Any + Send>>,
total_cgus: usize,
) -> Self::OngoingCodegen;
fn submit_pre_codegened_module_to_llvm(
fn submit_pre_codegened_module_to_backend(
&self,
codegen: &Self::OngoingCodegen,
tcx: TyCtxt,
module: ModuleCodegen<Self::Module>,
);
fn submit_pre_lto_module_to_backend(&self, tcx: TyCtxt, module: CachedModuleCodegen);
fn submit_post_lto_module_to_backend(&self, tcx: TyCtxt, module: CachedModuleCodegen);
fn codegen_aborted(codegen: Self::OngoingCodegen);
fn codegen_finished(&self, codegen: &Self::OngoingCodegen, tcx: TyCtxt);
fn check_for_errors(&self, codegen: &Self::OngoingCodegen, sess: &Session);

View file

@ -14,14 +14,12 @@ use super::debuginfo::DebugInfoBuilderMethods;
use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods;
use super::HasCodegen;
use builder::MemFlags;
use common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope};
use libc::c_char;
use mir::operand::OperandRef;
use mir::place::PlaceRef;
use rustc::ty::layout::{Align, Size};
use rustc_codegen_ssa::common::{
AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate, SynchronizationScope,
};
use MemFlags;
use std::borrow::Cow;
use std::ops::Range;

View file

@ -11,11 +11,11 @@
use super::Backend;
use super::HasCodegen;
use debuginfo::{FunctionDebugContext, MirDebugScope, VariableAccess, VariableKind};
use monomorphize::Instance;
use rustc::hir::def_id::CrateNum;
use rustc::mir;
use rustc::ty::{self, Ty};
use rustc_data_structures::indexed_vec::IndexVec;
use rustc_mir::monomorphize::Instance;
use syntax::ast::Name;
use syntax_pos::{SourceFile, Span};
@ -48,6 +48,7 @@ pub trait DebugInfoMethods<'tcx>: Backend<'tcx> {
defining_crate: CrateNum,
) -> Self::DIScope;
fn debuginfo_finalize(&self);
fn debuginfo_upvar_decls_ops_sequence(&self, byte_offset_of_var_in_env: u64) -> &[i64];
}
pub trait DebugInfoBuilderMethods<'tcx>: HasCodegen<'tcx> {

View file

@ -10,9 +10,9 @@
use super::Backend;
use super::HasCodegen;
use abi::FnType;
use mir::operand::OperandRef;
use rustc::ty::Ty;
use rustc_target::abi::call::FnType;
use syntax_pos::Span;
pub trait IntrinsicCallMethods<'tcx>: HasCodegen<'tcx> {

View file

@ -33,6 +33,7 @@ pub trait MiscMethods<'tcx>: Backend<'tcx> {
fn consume_stats(self) -> RefCell<Stats>;
fn codegen_unit(&self) -> &Arc<CodegenUnit<'tcx>>;
fn statics_to_rauw(&self) -> &RefCell<Vec<(Self::Value, Self::Value)>>;
fn closure_env_needs_indirect_debuginfo(&self) -> bool;
fn used_statics(&self) -> &RefCell<Vec<Self::Value>>;
fn set_frame_pointer_elimination(&self, llfn: Self::Value);
fn apply_target_cpu_attr(&self, llfn: Self::Value);

View file

@ -8,17 +8,74 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
mod abi;
mod asm;
mod backend;
mod builder;
mod consts;
mod debuginfo;
mod declare;
mod intrinsic;
mod misc;
mod statics;
mod type_;
pub use self::abi::{AbiBuilderMethods, AbiMethods};
pub use self::asm::{AsmBuilderMethods, AsmMethods};
pub use self::backend::{Backend, BackendMethods, BackendTypes};
pub use self::builder::BuilderMethods;
pub use self::consts::ConstMethods;
pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
pub use self::declare::{DeclareMethods, PreDefineMethods};
pub use self::intrinsic::{IntrinsicCallMethods, IntrinsicDeclarationMethods};
pub use self::misc::MiscMethods;
pub use self::statics::StaticMethods;
pub use self::type_::{
ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
};
use std::fmt;
pub trait CodegenObject: Copy + PartialEq + fmt::Debug {}
impl<T: Copy + PartialEq + fmt::Debug> CodegenObject for T {}
pub trait CodegenMethods<'tcx>:
Backend<'tcx>
+ TypeMethods<'tcx>
+ MiscMethods<'tcx>
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
+ PreDefineMethods<'tcx>
{
}
impl<'tcx, T> CodegenMethods<'tcx> for T where
Self: Backend<'tcx>
+ TypeMethods<'tcx>
+ MiscMethods<'tcx>
+ ConstMethods<'tcx>
+ StaticMethods<'tcx>
+ DebugInfoMethods<'tcx>
+ AbiMethods<'tcx>
+ IntrinsicDeclarationMethods<'tcx>
+ DeclareMethods<'tcx>
+ AsmMethods<'tcx>
+ PreDefineMethods<'tcx>
{}
pub trait HasCodegen<'tcx>: Backend<'tcx> {
type CodegenCx: CodegenMethods<'tcx>
+ BackendTypes<
Value = Self::Value,
BasicBlock = Self::BasicBlock,
Type = Self::Type,
Context = Self::Context,
Funclet = Self::Funclet,
DIScope = Self::DIScope,
>;
}

View file

@ -10,12 +10,12 @@
use super::Backend;
use super::HasCodegen;
use common::TypeKind;
use mir::place::PlaceRef;
use rustc::ty::layout::TyLayout;
use rustc::ty::layout::{self, Align, Size};
use rustc::ty::Ty;
use rustc::util::nodemap::FxHashMap;
use rustc_codegen_ssa::common::TypeKind;
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
use std::cell::RefCell;
use syntax::ast;
@ -93,6 +93,8 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
fn is_backend_scalar_pair(&self, layout: TyLayout<'tcx>) -> bool;
fn backend_field_index(&self, layout: TyLayout<'tcx>, index: usize) -> u64;
fn scalar_pair_element_backend_type<'a>(
&self,
layout: TyLayout<'tcx>,

View file

@ -20,28 +20,49 @@
#![feature(box_syntax)]
#![feature(custom_attribute)]
#![feature(libc)]
#![feature(rustc_diagnostic_macros)]
#![feature(in_band_lifetimes)]
#![feature(slice_sort_by_cached_key)]
#![feature(nll)]
#![allow(unused_attributes)]
#![allow(dead_code)]
#![feature(quote)]
#![feature(rustc_diagnostic_macros)]
#![recursion_limit="256"]
extern crate rustc;
#[macro_use] extern crate bitflags;
#[macro_use] extern crate log;
extern crate rustc_apfloat;
#[macro_use] extern crate rustc;
extern crate rustc_target;
extern crate rustc_mir;
extern crate syntax;
#[macro_use] extern crate syntax;
extern crate syntax_pos;
extern crate rustc_incremental;
extern crate rustc_codegen_utils;
extern crate rustc_data_structures;
extern crate libc;
use std::path::PathBuf;
use rustc::dep_graph::WorkProduct;
use rustc::session::config::{OutputFilenames, OutputType};
use rustc::middle::lang_items::LangItem;
use rustc::hir::def_id::CrateNum;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
use rustc_data_structures::sync::Lrc;
use rustc::middle::cstore::{LibSource, CrateSource, NativeLibrary};
// NB: This module needs to be declared first so diagnostics are
// registered before they are used.
mod diagnostics;
pub mod common;
pub mod interfaces;
pub mod mir;
pub mod debuginfo;
pub mod base;
pub mod callee;
pub mod glue;
pub mod meth;
pub mod mono_item;
pub struct ModuleCodegen<M> {
/// The name of the module. When the crate may be saved between
@ -111,5 +132,31 @@ pub enum ModuleKind {
Allocator,
}
bitflags! {
pub struct MemFlags: u8 {
const VOLATILE = 1 << 0;
const NONTEMPORAL = 1 << 1;
const UNALIGNED = 1 << 2;
}
}
/// Misc info we load from metadata to persist beyond the tcx
struct CrateInfo {
panic_runtime: Option<CrateNum>,
compiler_builtins: Option<CrateNum>,
profiler_runtime: Option<CrateNum>,
sanitizer_runtime: Option<CrateNum>,
is_no_builtins: FxHashSet<CrateNum>,
native_libraries: FxHashMap<CrateNum, Lrc<Vec<NativeLibrary>>>,
crate_name: FxHashMap<CrateNum, String>,
used_libraries: Lrc<Vec<NativeLibrary>>,
link_args: Lrc<Vec<String>>,
used_crate_source: FxHashMap<CrateNum, Lrc<CrateSource>>,
used_crates_static: Vec<(CrateNum, LibSource)>,
used_crates_dynamic: Vec<(CrateNum, LibSource)>,
wasm_imports: FxHashMap<String, String>,
lang_item_to_crate: FxHashMap<LangItem, CrateNum>,
missing_lang_items: FxHashMap<CrateNum, Vec<LangItem>>,
}
__build_diagnostic_array! { librustc_codegen_ssa, DIAGNOSTICS }

View file

@ -8,9 +8,9 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use abi::FnType;
use rustc_target::abi::call::FnType;
use callee;
use monomorphize;
use rustc_mir::monomorphize;
use interfaces::*;

View file

@ -19,7 +19,6 @@ use rustc::mir::visit::{Visitor, PlaceContext, MutatingUseContext, NonMutatingUs
use rustc::mir::traversal;
use rustc::ty;
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use type_of::LayoutLlvmExt;
use super::FunctionCx;
use interfaces::*;
@ -35,10 +34,10 @@ pub fn non_ssa_locals<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
let ty = fx.monomorphize(&ty);
debug!("local {} has type {:?}", index, ty);
let layout = fx.cx.layout_of(ty);
if layout.is_llvm_immediate() {
if fx.cx.is_backend_immediate(layout) {
// These sorts of types are immediates that we can store
// in an Value without an alloca.
} else if layout.is_llvm_scalar_pair() {
} else if fx.cx.is_backend_scalar_pair(layout) {
// We allow pairs and uses of any of their 2 fields.
} else {
// These sorts of types require an alloca. Note that
@ -191,7 +190,7 @@ impl<'mir, 'a: 'mir, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> Visitor<'tcx>
if let mir::ProjectionElem::Field(..) = proj.elem {
let layout = cx.layout_of(base_ty.to_ty(cx.tcx()));
if layout.is_llvm_immediate() || layout.is_llvm_scalar_pair() {
if cx.is_backend_immediate(layout) || cx.is_backend_scalar_pair(layout) {
// Recurse with the same context, instead of `Projection`,
// potentially stopping at non-operand projections,
// which would trigger `not_ssa` on locals.

View file

@ -13,14 +13,13 @@ use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt};
use rustc::mir;
use rustc::mir::interpret::EvalErrorKind;
use abi::{Abi, FnType, PassMode};
use rustc_target::abi::call::ArgType;
use rustc_target::abi::call::{ArgType, FnType, PassMode};
use rustc_target::spec::abi::Abi;
use base;
use builder::MemFlags;
use common;
use rustc_codegen_ssa::common::IntPredicate;
use MemFlags;
use common::{self, IntPredicate};
use meth;
use monomorphize;
use rustc_mir::monomorphize;
use interfaces::*;

View file

@ -8,74 +8,18 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use rustc::mir::interpret::{ErrorHandled, read_target_uint};
use rustc::mir::interpret::ErrorHandled;
use rustc_mir::const_eval::const_field;
use rustc::hir::def_id::DefId;
use rustc::mir;
use rustc_data_structures::indexed_vec::Idx;
use rustc::mir::interpret::{GlobalId, Pointer, Allocation, ConstValue};
use rustc::mir::interpret::{GlobalId, ConstValue};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size};
use common::CodegenCx;
use rustc::ty::layout::{self, LayoutOf};
use syntax::source_map::Span;
use value::Value;
use interfaces::*;
use super::FunctionCx;
pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll Value {
let mut llvals = Vec::with_capacity(alloc.relocations.len() + 1);
let dl = cx.data_layout();
let pointer_size = dl.pointer_size.bytes() as usize;
let mut next_offset = 0;
for &(offset, ((), alloc_id)) in alloc.relocations.iter() {
let offset = offset.bytes();
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
dl.endian,
&alloc.bytes[offset..(offset + pointer_size)],
).expect("const_alloc_to_llvm: could not read relocation pointer") as u64;
llvals.push(cx.scalar_to_backend(
Pointer::new(alloc_id, Size::from_bytes(ptr_offset)).into(),
&layout::Scalar {
value: layout::Primitive::Pointer,
valid_range: 0..=!0
},
cx.type_i8p()
));
next_offset = offset + pointer_size;
}
if alloc.bytes.len() >= next_offset {
llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
}
cx.const_struct(&llvals, true)
}
pub fn codegen_static_initializer(
cx: &CodegenCx<'ll, 'tcx>,
def_id: DefId,
) -> Result<(&'ll Value, &'tcx Allocation), ErrorHandled> {
let instance = ty::Instance::mono(cx.tcx, def_id);
let cid = GlobalId {
instance,
promoted: None,
};
let param_env = ty::ParamEnv::reveal_all();
let static_ = cx.tcx.const_eval(param_env.and(cid))?;
let alloc = match static_.val {
ConstValue::ByRef(_, alloc, n) if n.bytes() == 0 => alloc,
_ => bug!("static const eval returned {:#?}", static_),
};
Ok((const_alloc_to_llvm(cx, alloc), alloc))
}
impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn fully_evaluate(
&mut self,

View file

@ -9,8 +9,6 @@
// except according to those terms.
use libc::c_uint;
use llvm;
use llvm_util;
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
use rustc::mir::{self, Mir};
@ -18,8 +16,8 @@ use rustc::ty::subst::Substs;
use rustc::session::config::DebugInfo;
use base;
use debuginfo::{self, VariableAccess, VariableKind, FunctionDebugContext};
use monomorphize::Instance;
use abi::{FnType, PassMode};
use rustc_mir::monomorphize::Instance;
use rustc_target::abi::call::{FnType, PassMode};
use interfaces::*;
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
@ -30,8 +28,6 @@ use std::iter;
use rustc_data_structures::bit_set::BitSet;
use rustc_data_structures::indexed_vec::IndexVec;
pub use self::constant::codegen_static_initializer;
use self::analyze::CleanupKind;
use self::place::PlaceRef;
use rustc::mir::traversal;
@ -171,7 +167,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(self.cx.extend_scope_to_file(
scope_metadata.unwrap(),
&sm.lookup_char_pos(pos).file,
defining_crate,
defining_crate
))
} else {
scope_metadata
@ -616,7 +612,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
// doesn't actually strip the offset when splitting the closure
// environment into its components so it ends up out of bounds.
// (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
let env_alloca = !env_ref && bx.cx().closure_env_needs_indirect_debuginfo();
let env_ptr = if env_alloca {
let scratch = PlaceRef::alloca(bx,
bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
@ -630,12 +626,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
let ops = unsafe {
[llvm::LLVMRustDIBuilderCreateOpDeref(),
llvm::LLVMRustDIBuilderCreateOpPlusUconst(),
byte_offset_of_var_in_env as i64,
llvm::LLVMRustDIBuilderCreateOpDeref()]
};
let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
// The environment and the capture can each be indirect.

View file

@ -14,7 +14,7 @@ use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use base;
use builder::MemFlags;
use MemFlags;
use glue;
use interfaces::*;

View file

@ -12,9 +12,8 @@ use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use builder::MemFlags;
use rustc_codegen_ssa::common::IntPredicate;
use type_of::LayoutLlvmExt;
use MemFlags;
use common::IntPredicate;
use glue;
use interfaces::*;
@ -114,7 +113,7 @@ impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
bx.struct_gep(self.llval, 1)
} else {
bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
};
PlaceRef {
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.

View file

@ -17,12 +17,10 @@ use rustc_apfloat::{ieee, Float, Status, Round};
use std::{u128, i128};
use base;
use builder::MemFlags;
use MemFlags;
use callee;
use common;
use rustc_codegen_ssa::common::{RealPredicate, IntPredicate};
use monomorphize;
use type_of::LayoutLlvmExt;
use common::{self, RealPredicate, IntPredicate};
use rustc_mir::monomorphize;
use interfaces::*;
@ -52,7 +50,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, _) => {
// The destination necessarily contains a fat pointer, so if
// it's a scalar pair, it's a fat pointer or newtype thereof.
if dest.layout.is_llvm_scalar_pair() {
if bx.cx().is_backend_scalar_pair(dest.layout) {
// into-coerce of a thin pointer to a fat pointer - just
// use the operand path.
let (bx, temp) = self.codegen_rvalue_operand(bx, rvalue);
@ -241,7 +239,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
operand.val
}
mir::CastKind::Unsize => {
assert!(cast.is_llvm_scalar_pair());
assert!(bx.cx().is_backend_scalar_pair(cast));
match operand.val {
OperandValue::Pair(lldata, llextra) => {
// unsize from a fat pointer - this is a
@ -267,9 +265,9 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
}
mir::CastKind::Misc if operand.layout.is_llvm_scalar_pair() => {
mir::CastKind::Misc if bx.cx().is_backend_scalar_pair(operand.layout) => {
if let OperandValue::Pair(data_ptr, meta) = operand.val {
if cast.is_llvm_scalar_pair() {
if bx.cx().is_backend_scalar_pair(cast) {
let data_cast = bx.pointercast(data_ptr,
bx.cx().scalar_pair_element_backend_type(cast, 0, true));
OperandValue::Pair(data_cast, meta)
@ -285,7 +283,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
mir::CastKind::Misc => {
assert!(cast.is_llvm_immediate());
assert!(bx.cx().is_backend_immediate(cast));
let ll_t_out = bx.cx().immediate_backend_type(cast);
if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));

View file

@ -0,0 +1,117 @@
// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Walks the crate looking for items/impl-items/trait-items that have
//! either a `rustc_symbol_name` or `rustc_item_path` attribute and
//! generates an error giving, respectively, the symbol name or
//! item-path. This is used for unit testing the code that generates
//! paths etc in all kinds of annoying scenarios.
use base;
use rustc::hir;
use rustc::hir::def::Def;
use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::layout::HasTyCtxt;
use std::fmt;
use interfaces::*;
pub use rustc::mir::mono::MonoItem;
pub use rustc_mir::monomorphize::item::MonoItemExt as BaseMonoItemExt;
pub trait MonoItemExt<'a, 'tcx: 'a>: fmt::Debug + BaseMonoItemExt<'a, 'tcx> {
fn define<Bx: BuilderMethods<'a, 'tcx>>(&self, cx: &'a Bx::CodegenCx) {
debug!("BEGIN IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
match *self.as_mono_item() {
MonoItem::Static(def_id) => {
let tcx = cx.tcx();
let is_mutable = match tcx.describe_def(def_id) {
Some(Def::Static(_, is_mutable)) => is_mutable,
Some(other) => {
bug!("Expected Def::Static, found {:?}", other)
}
None => {
bug!("Expected Def::Static for {:?}, found nothing", def_id)
}
};
cx.codegen_static(def_id, is_mutable);
}
MonoItem::GlobalAsm(node_id) => {
let item = cx.tcx().hir.expect_item(node_id);
if let hir::ItemKind::GlobalAsm(ref ga) = item.node {
cx.codegen_global_asm(ga);
} else {
span_bug!(item.span, "Mismatch between hir::Item type and MonoItem type")
}
}
MonoItem::Fn(instance) => {
base::codegen_instance::<Bx>(&cx, instance);
}
}
debug!("END IMPLEMENTING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
}
fn predefine<Bx: BuilderMethods<'a, 'tcx>>(
&self,
cx: &'a Bx::CodegenCx,
linkage: Linkage,
visibility: Visibility
) {
debug!("BEGIN PREDEFINING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
let symbol_name = self.symbol_name(cx.tcx()).as_str();
debug!("symbol {}", &symbol_name);
match *self.as_mono_item() {
MonoItem::Static(def_id) => {
cx.predefine_static(def_id, linkage, visibility, &symbol_name);
}
MonoItem::Fn(instance) => {
cx.predefine_fn(instance, linkage, visibility, &symbol_name);
}
MonoItem::GlobalAsm(..) => {}
}
debug!("END PREDEFINING '{} ({})' in cgu {}",
self.to_string(cx.tcx()),
self.to_raw_string(),
cx.codegen_unit().name());
}
fn to_raw_string(&self) -> String {
match *self.as_mono_item() {
MonoItem::Fn(instance) => {
format!("Fn({:?}, {})",
instance.def,
instance.substs.as_ptr() as usize)
}
MonoItem::Static(id) => {
format!("Static({:?})", id)
}
MonoItem::GlobalAsm(id) => {
format!("GlobalAsm({:?})", id)
}
}
}
}
impl<'a, 'tcx: 'a> MonoItemExt<'a, 'tcx> for MonoItem<'tcx> {}