2015-02-28 23:53:12 +02:00
|
|
|
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 16:48:01 -08:00
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
|
// except according to those terms.
|
2016-02-15 15:41:16 -05:00
|
|
|
|
|
2015-02-28 23:53:12 +02:00
|
|
|
|
//! Translate the completed AST to the LLVM IR.
|
|
|
|
|
//!
|
|
|
|
|
//! Some functions here, such as trans_block and trans_expr, return a value --
|
2016-02-23 21:39:35 +02:00
|
|
|
|
//! the result of the translation to LLVM -- while others, such as trans_fn
|
|
|
|
|
//! and trans_item, are called only for the side effect of adding a
|
2015-02-28 23:53:12 +02:00
|
|
|
|
//! particular definition to the LLVM IR output we're producing.
|
|
|
|
|
//!
|
|
|
|
|
//! Hopefully useful general knowledge about trans:
|
|
|
|
|
//!
|
|
|
|
|
//! * There's no way to find out the Ty type of a ValueRef. Doing so
|
|
|
|
|
//! would be "trying to get the eggs out of an omelette" (credit:
|
|
|
|
|
//! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
|
|
|
|
|
//! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
|
|
|
|
|
//! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
|
2011-12-13 16:25:51 -08:00
|
|
|
|
|
2014-03-21 18:05:05 -07:00
|
|
|
|
#![allow(non_camel_case_types)]
|
2013-05-17 15:28:44 -07:00
|
|
|
|
|
2014-11-27 07:21:26 -05:00
|
|
|
|
use super::CrateTranslation;
|
2016-07-21 12:49:59 -04:00
|
|
|
|
use super::ModuleLlvm;
|
|
|
|
|
use super::ModuleSource;
|
2014-11-27 07:21:26 -05:00
|
|
|
|
use super::ModuleTranslation;
|
|
|
|
|
|
2016-07-21 12:50:15 -04:00
|
|
|
|
use assert_module_sources;
|
2016-05-12 19:52:38 +03:00
|
|
|
|
use back::link;
|
2016-05-25 01:45:25 +03:00
|
|
|
|
use back::linker::LinkerInfo;
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use llvm::{Linkage, ValueRef, Vector, get_param};
|
2014-07-07 17:58:01 -07:00
|
|
|
|
use llvm;
|
2016-10-25 18:18:17 +03:00
|
|
|
|
use rustc::hir::def::Def;
|
2016-03-29 12:54:26 +03:00
|
|
|
|
use rustc::hir::def_id::DefId;
|
2013-07-15 20:42:13 -07:00
|
|
|
|
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
|
2016-06-15 01:40:09 +03:00
|
|
|
|
use rustc::ty::subst::Substs;
|
2016-03-22 17:30:57 +02:00
|
|
|
|
use rustc::traits;
|
|
|
|
|
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
|
|
|
|
|
use rustc::ty::adjustment::CustomCoerceUnsized;
|
2016-07-25 10:51:14 -04:00
|
|
|
|
use rustc::dep_graph::{DepNode, WorkProduct};
|
2016-03-29 08:50:44 +03:00
|
|
|
|
use rustc::hir::map as hir_map;
|
2015-12-25 13:59:02 -05:00
|
|
|
|
use rustc::util::common::time;
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use session::config::{self, NoDebugInfo};
|
2016-08-23 07:47:14 -04:00
|
|
|
|
use rustc_incremental::IncrementalHashesMap;
|
2014-11-15 20:30:33 -05:00
|
|
|
|
use session::Session;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use abi::{self, Abi, FnType};
|
|
|
|
|
use adt;
|
|
|
|
|
use attributes;
|
|
|
|
|
use build::*;
|
|
|
|
|
use builder::{Builder, noname};
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use callee::{Callee};
|
|
|
|
|
use common::{Block, C_bool, C_bytes_in_context, C_i32, C_uint};
|
2016-06-16 18:56:14 -04:00
|
|
|
|
use collector::{self, TransItemCollectionMode};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
|
2016-08-23 10:39:30 +03:00
|
|
|
|
use common::{CrateContext, FunctionContext};
|
|
|
|
|
use common::{Result};
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use common::{fulfill_obligation};
|
|
|
|
|
use common::{type_is_zero_size, val_ty};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use common;
|
|
|
|
|
use consts;
|
2016-05-05 14:14:41 -04:00
|
|
|
|
use context::{SharedCrateContext, CrateContextList};
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use debuginfo::{self, DebugLoc};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use declare;
|
|
|
|
|
use machine;
|
2016-06-02 23:43:16 +02:00
|
|
|
|
use machine::{llalign_of_min, llsize_of};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use meth;
|
|
|
|
|
use mir;
|
|
|
|
|
use monomorphize::{self, Instance};
|
2016-05-09 14:26:15 -04:00
|
|
|
|
use partitioning::{self, PartitioningStrategy, CodegenUnit};
|
2016-05-26 08:59:58 -04:00
|
|
|
|
use symbol_map::SymbolMap;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use symbol_names_test;
|
2016-11-04 17:37:42 -04:00
|
|
|
|
use trans_item::{TransItem, DefPathBasedNames};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use type_::Type;
|
|
|
|
|
use type_of;
|
|
|
|
|
use value::Value;
|
|
|
|
|
use Disr;
|
2016-11-08 14:02:55 +11:00
|
|
|
|
use util::nodemap::{NodeSet, FxHashMap, FxHashSet};
|
2013-06-16 22:52:44 +12:00
|
|
|
|
|
2014-01-29 13:50:05 +11:00
|
|
|
|
use arena::TypedArena;
|
2015-02-28 23:55:50 +02:00
|
|
|
|
use libc::c_uint;
|
2015-02-17 22:47:40 -08:00
|
|
|
|
use std::ffi::{CStr, CString};
|
2016-07-20 07:55:45 -04:00
|
|
|
|
use std::borrow::Cow;
|
2013-12-20 20:33:22 -08:00
|
|
|
|
use std::cell::{Cell, RefCell};
|
2016-06-24 20:54:52 +02:00
|
|
|
|
use std::ptr;
|
2016-05-26 08:59:58 -04:00
|
|
|
|
use std::rc::Rc;
|
2014-11-25 13:28:35 -08:00
|
|
|
|
use std::str;
|
2016-08-16 17:41:38 +03:00
|
|
|
|
use std::i32;
|
2016-06-21 18:08:13 -04:00
|
|
|
|
use syntax_pos::{Span, DUMMY_SP};
|
2015-09-14 21:58:20 +12:00
|
|
|
|
use syntax::attr;
|
2016-03-29 08:50:44 +03:00
|
|
|
|
use rustc::hir;
|
2015-07-31 00:04:06 -07:00
|
|
|
|
use syntax::ast;
|
2012-03-03 17:49:23 -08:00
|
|
|
|
|
2014-11-14 09:18:10 -08:00
|
|
|
|
thread_local! {
|
|
|
|
|
static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
|
|
|
|
|
RefCell::new(None)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-02-27 19:13:53 -05:00
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn with_insn_ctxt<F>(blk: F)
|
|
|
|
|
where F: FnOnce(&[&'static str])
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
|
|
|
|
TASK_LOCAL_INSN_KEY.with(move |slot| {
|
2015-02-01 21:53:25 -05:00
|
|
|
|
slot.borrow().as_ref().map(move |s| blk(s));
|
2014-11-14 14:20:57 -08:00
|
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
pub fn init_insn_ctxt() {
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
|
|
|
|
*slot.borrow_mut() = Some(Vec::new());
|
|
|
|
|
});
|
2012-09-05 15:58:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-06 15:51:42 +02:00
|
|
|
|
pub struct _InsnCtxt {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_cannot_construct_outside_of_this_module: (),
|
2014-06-06 15:51:42 +02:00
|
|
|
|
}
|
2012-07-11 15:00:40 -07:00
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
impl Drop for _InsnCtxt {
|
2013-09-16 21:18:07 -04:00
|
|
|
|
fn drop(&mut self) {
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
2016-07-03 14:38:37 -07:00
|
|
|
|
if let Some(ctx) = slot.borrow_mut().as_mut() {
|
|
|
|
|
ctx.pop();
|
2014-11-14 14:20:57 -08:00
|
|
|
|
}
|
|
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
|
2013-10-21 13:08:31 -07:00
|
|
|
|
debug!("new InsnCtxt: {}", s);
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
2016-02-23 21:18:07 +05:30
|
|
|
|
if let Some(ctx) = slot.borrow_mut().as_mut() {
|
|
|
|
|
ctx.push(s)
|
2014-11-14 14:20:57 -08:00
|
|
|
|
}
|
|
|
|
|
});
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_InsnCtxt {
|
|
|
|
|
_cannot_construct_outside_of_this_module: (),
|
|
|
|
|
}
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
pub struct StatRecorder<'a, 'tcx: 'a> {
|
|
|
|
|
ccx: &'a CrateContext<'a, 'tcx>,
|
2014-05-22 16:57:53 -07:00
|
|
|
|
name: Option<String>,
|
2015-03-25 17:06:52 -07:00
|
|
|
|
istart: usize,
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let istart = ccx.stats().n_llvm_insns.get();
|
2013-06-28 11:15:34 -07:00
|
|
|
|
StatRecorder {
|
|
|
|
|
ccx: ccx,
|
2014-02-14 07:07:09 +02:00
|
|
|
|
name: Some(name),
|
2013-06-28 11:15:34 -07:00
|
|
|
|
istart: istart,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
|
2013-09-16 21:18:07 -04:00
|
|
|
|
fn drop(&mut self) {
|
2014-03-05 16:36:01 +02:00
|
|
|
|
if self.ccx.sess().trans_stats() {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let iend = self.ccx.stats().n_llvm_insns.get();
|
2015-11-19 12:36:31 +01:00
|
|
|
|
self.ccx
|
|
|
|
|
.stats()
|
|
|
|
|
.fn_stats
|
|
|
|
|
.borrow_mut()
|
|
|
|
|
.push((self.name.take().unwrap(), iend - self.istart));
|
2014-09-05 09:18:53 -07:00
|
|
|
|
self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
|
2013-06-28 11:15:34 -07:00
|
|
|
|
// Reset LLVM insn count to avoid compound costs.
|
2014-09-05 09:18:53 -07:00
|
|
|
|
self.ccx.stats().n_llvm_insns.set(self.istart);
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-16 17:41:38 +03:00
|
|
|
|
pub fn get_meta(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
|
|
|
|
|
StructGEP(bcx, fat_ptr, abi::FAT_PTR_EXTRA)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn get_dataptr(bcx: Block, fat_ptr: ValueRef) -> ValueRef {
|
|
|
|
|
StructGEP(bcx, fat_ptr, abi::FAT_PTR_ADDR)
|
2014-07-29 22:08:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-10-04 17:44:31 +03:00
|
|
|
|
pub fn get_meta_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
|
|
|
|
|
b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn get_dataptr_builder(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
|
|
|
|
|
b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
|
2014-04-06 13:54:41 +03:00
|
|
|
|
match bcx.tcx().lang_items.require(it) {
|
|
|
|
|
Ok(id) => id,
|
|
|
|
|
Err(s) => {
|
2015-06-18 20:25:05 +03:00
|
|
|
|
bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
|
2013-07-15 20:42:13 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-06 13:54:41 +03:00
|
|
|
|
}
|
2013-07-15 20:42:13 -07:00
|
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
|
// The following malloc_raw_dyn* functions allocate a box to contain
|
|
|
|
|
// a given type, but with a potentially dynamic size.
|
2012-06-06 18:22:49 -07:00
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
llty_ptr: Type,
|
2014-09-29 22:11:30 +03:00
|
|
|
|
info_ty: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
|
size: ValueRef,
|
2015-02-04 17:16:59 +01:00
|
|
|
|
align: ValueRef,
|
|
|
|
|
debug_loc: DebugLoc)
|
2014-09-06 19:13:04 +03:00
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-04-06 13:54:41 +03:00
|
|
|
|
let _icx = push_ctxt("malloc_raw_exchange");
|
2013-07-02 19:51:39 -04:00
|
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
|
// Allocate space:
|
2016-02-23 21:21:50 +02:00
|
|
|
|
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
|
2016-10-24 18:23:29 -06:00
|
|
|
|
let r = Callee::def(bcx.ccx(), def_id, bcx.tcx().intern_substs(&[]))
|
2016-08-16 17:41:38 +03:00
|
|
|
|
.call(bcx, debug_loc, &[size, align], None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
|
2014-05-03 23:14:56 +12:00
|
|
|
|
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
|
2012-06-13 18:00:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-19 10:33:46 -07:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
signed: bool)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
-> llvm::IntPredicate {
|
|
|
|
|
match op {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::BiEq => llvm::IntEQ,
|
|
|
|
|
hir::BiNe => llvm::IntNE,
|
|
|
|
|
hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
|
|
|
|
|
hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
|
|
|
|
|
hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
|
|
|
|
|
hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
|
2015-01-29 14:03:34 +02:00
|
|
|
|
op => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
|
|
|
|
|
found {:?}",
|
|
|
|
|
op)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-06-15 11:19:50 -07:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
|
2015-01-29 14:03:34 +02:00
|
|
|
|
match op {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::BiEq => llvm::RealOEQ,
|
|
|
|
|
hir::BiNe => llvm::RealUNE,
|
|
|
|
|
hir::BiLt => llvm::RealOLT,
|
|
|
|
|
hir::BiLe => llvm::RealOLE,
|
|
|
|
|
hir::BiGt => llvm::RealOGT,
|
|
|
|
|
hir::BiGe => llvm::RealOGE,
|
2015-01-29 14:03:34 +02:00
|
|
|
|
op => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
|
|
|
|
|
found {:?}",
|
|
|
|
|
op);
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
|
|
|
|
t: Ty<'tcx>,
|
2015-07-16 11:59:23 -07:00
|
|
|
|
ret_ty: Type,
|
2015-07-31 00:04:06 -07:00
|
|
|
|
op: hir::BinOp_,
|
2015-01-29 14:03:34 +02:00
|
|
|
|
debug_loc: DebugLoc)
|
|
|
|
|
-> ValueRef {
|
|
|
|
|
let signed = match t.sty {
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyFloat(_) => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
let cmp = bin_op_to_fcmp_predicate(op);
|
2015-07-16 11:59:23 -07:00
|
|
|
|
return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
|
2015-01-28 20:20:55 +11:00
|
|
|
|
},
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyUint(_) => false,
|
|
|
|
|
ty::TyInt(_) => true,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("compare_simd_types: invalid SIMD type"),
|
2015-01-28 20:20:55 +11:00
|
|
|
|
};
|
2015-01-29 14:03:34 +02:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
let cmp = bin_op_to_icmp_predicate(op, signed);
|
2015-01-28 20:20:55 +11:00
|
|
|
|
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
|
|
|
|
|
// to get the correctly sized type. This will compile to a single instruction
|
|
|
|
|
// once the IR is converted to assembly if the SIMD instruction is supported
|
|
|
|
|
// by the target architecture.
|
2015-07-16 11:59:23 -07:00
|
|
|
|
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
|
2014-05-02 11:04:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 22:02:51 +02:00
|
|
|
|
/// Retrieve the information we are losing (making dynamic) in an unsizing
|
|
|
|
|
/// adjustment.
|
|
|
|
|
///
|
|
|
|
|
/// The `old_info` argument is a bit funny. It is intended for use
|
|
|
|
|
/// in an upcast, where the new vtable for an object will be drived
|
|
|
|
|
/// from the old one.
|
|
|
|
|
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
|
|
|
|
|
source: Ty<'tcx>,
|
|
|
|
|
target: Ty<'tcx>,
|
2016-03-06 17:32:47 +02:00
|
|
|
|
old_info: Option<ValueRef>)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
-> ValueRef {
|
|
|
|
|
let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
|
|
|
|
|
match (&source.sty, &target.sty) {
|
|
|
|
|
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
|
|
|
|
|
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
|
|
|
|
|
// For now, upcasts are limited to changes in marker
|
|
|
|
|
// traits, and hence never actually require an actual
|
|
|
|
|
// change to the vtable.
|
|
|
|
|
old_info.expect("unsized_info: missing old info for trait upcast")
|
|
|
|
|
}
|
2016-08-04 15:52:57 +03:00
|
|
|
|
(_, &ty::TyTrait(ref data)) => {
|
|
|
|
|
let trait_ref = data.principal.with_self_ty(ccx.tcx(), source);
|
|
|
|
|
let trait_ref = ccx.tcx().erase_regions(&trait_ref);
|
2016-03-06 17:32:47 +02:00
|
|
|
|
consts::ptrcast(meth::get_vtable(ccx, trait_ref),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
Type::vtable_ptr(ccx))
|
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
|
2015-11-11 22:02:51 +02:00
|
|
|
|
source,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
target),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
|
|
|
|
|
pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
src_ty: Ty<'tcx>,
|
|
|
|
|
dst_ty: Ty<'tcx>)
|
|
|
|
|
-> (ValueRef, ValueRef) {
|
|
|
|
|
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
|
|
|
|
|
match (&src_ty.sty, &dst_ty.sty) {
|
|
|
|
|
(&ty::TyBox(a), &ty::TyBox(b)) |
|
|
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
|
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
|
|
|
|
assert!(common::type_is_sized(bcx.tcx(), a));
|
|
|
|
|
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
|
|
|
|
|
(PointerCast(bcx, src, ptr_ty),
|
2016-03-06 17:32:47 +02:00
|
|
|
|
unsized_info(bcx.ccx(), a, b, None))
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unsize_thin_ptr: called on bad types"),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Coerce `src`, which is a reference to a value of type `src_ty`,
|
|
|
|
|
/// to a value of type `dst_ty` and store the result in `dst`
|
|
|
|
|
pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
src_ty: Ty<'tcx>,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
dst_ty: Ty<'tcx>) {
|
|
|
|
|
match (&src_ty.sty, &dst_ty.sty) {
|
|
|
|
|
(&ty::TyBox(..), &ty::TyBox(..)) |
|
|
|
|
|
(&ty::TyRef(..), &ty::TyRef(..)) |
|
|
|
|
|
(&ty::TyRef(..), &ty::TyRawPtr(..)) |
|
|
|
|
|
(&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
|
|
|
|
|
let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
|
|
|
|
|
// fat-ptr to fat-ptr unsize preserves the vtable
|
2016-05-18 22:25:03 -04:00
|
|
|
|
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
|
|
|
|
|
// So we need to pointercast the base to ensure
|
|
|
|
|
// the types match up.
|
|
|
|
|
let (base, info) = load_fat_ptr(bcx, src, src_ty);
|
|
|
|
|
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty);
|
|
|
|
|
let base = PointerCast(bcx, base, llcast_ty);
|
|
|
|
|
(base, info)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
} else {
|
|
|
|
|
let base = load_ty(bcx, src, src_ty);
|
|
|
|
|
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
|
|
|
|
|
};
|
|
|
|
|
store_fat_ptr(bcx, base, info, dst, dst_ty);
|
|
|
|
|
}
|
|
|
|
|
|
2016-08-28 20:44:19 -04:00
|
|
|
|
(&ty::TyAdt(def_a, substs_a), &ty::TyAdt(def_b, substs_b)) => {
|
2015-11-11 22:02:51 +02:00
|
|
|
|
assert_eq!(def_a, def_b);
|
|
|
|
|
|
2016-08-28 20:44:19 -04:00
|
|
|
|
let src_fields = def_a.variants[0].fields.iter().map(|f| {
|
|
|
|
|
monomorphize::field_ty(bcx.tcx(), substs_a, f)
|
|
|
|
|
});
|
|
|
|
|
let dst_fields = def_b.variants[0].fields.iter().map(|f| {
|
|
|
|
|
monomorphize::field_ty(bcx.tcx(), substs_b, f)
|
|
|
|
|
});
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let src = adt::MaybeSizedValue::sized(src);
|
|
|
|
|
let dst = adt::MaybeSizedValue::sized(dst);
|
|
|
|
|
|
2016-08-28 20:44:19 -04:00
|
|
|
|
let iter = src_fields.zip(dst_fields).enumerate();
|
2015-11-11 22:02:51 +02:00
|
|
|
|
for (i, (src_fty, dst_fty)) in iter {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if type_is_zero_size(bcx.ccx(), dst_fty) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
2016-08-28 20:44:19 -04:00
|
|
|
|
let src_f = adt::trans_field_ptr(bcx, src_ty, src, Disr(0), i);
|
|
|
|
|
let dst_f = adt::trans_field_ptr(bcx, dst_ty, dst, Disr(0), i);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
if src_fty == dst_fty {
|
|
|
|
|
memcpy_ty(bcx, dst_f, src_f, src_fty);
|
|
|
|
|
} else {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
|
|
|
|
|
src_ty,
|
|
|
|
|
dst_ty),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 17:07:36 -04:00
|
|
|
|
pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
|
2015-11-02 14:46:39 +01:00
|
|
|
|
source_ty: Ty<'tcx>,
|
|
|
|
|
target_ty: Ty<'tcx>)
|
|
|
|
|
-> CustomCoerceUnsized {
|
|
|
|
|
let trait_ref = ty::Binder(ty::TraitRef {
|
2016-05-06 17:07:36 -04:00
|
|
|
|
def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
|
2016-10-24 18:23:29 -06:00
|
|
|
|
substs: scx.tcx().mk_substs_trait(source_ty, &[target_ty])
|
2015-11-02 14:46:39 +01:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-06 17:07:36 -04:00
|
|
|
|
match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
|
2015-11-02 14:46:39 +01:00
|
|
|
|
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
|
2016-05-06 17:07:36 -04:00
|
|
|
|
scx.tcx().custom_coerce_unsized_kind(impl_def_id)
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
vtable => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
|
|
|
|
cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
|
|
|
|
cast_shift_rhs(op,
|
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
2013-06-16 22:52:44 +12:00
|
|
|
|
|a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
|
|
|
|
|
|a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-31 00:04:06 -07:00
|
|
|
|
fn cast_shift_rhs<F, G>(op: hir::BinOp_,
|
2015-03-19 19:52:08 +01:00
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
|
|
|
|
trunc: F,
|
|
|
|
|
zext: G)
|
2015-11-19 12:36:31 +01:00
|
|
|
|
-> ValueRef
|
|
|
|
|
where F: FnOnce(ValueRef, Type) -> ValueRef,
|
|
|
|
|
G: FnOnce(ValueRef, Type) -> ValueRef
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
2012-02-21 21:01:33 -08:00
|
|
|
|
// Shifts may have any size int on the rhs
|
2016-03-29 09:32:58 +03:00
|
|
|
|
if op.is_shift() {
|
2015-01-15 01:08:22 +11:00
|
|
|
|
let mut rhs_llty = val_ty(rhs);
|
|
|
|
|
let mut lhs_llty = val_ty(lhs);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if rhs_llty.kind() == Vector {
|
|
|
|
|
rhs_llty = rhs_llty.element_type()
|
|
|
|
|
}
|
|
|
|
|
if lhs_llty.kind() == Vector {
|
|
|
|
|
lhs_llty = lhs_llty.element_type()
|
|
|
|
|
}
|
2015-01-15 01:08:22 +11:00
|
|
|
|
let rhs_sz = rhs_llty.int_width();
|
|
|
|
|
let lhs_sz = lhs_llty.int_width();
|
|
|
|
|
if lhs_sz < rhs_sz {
|
|
|
|
|
trunc(rhs, lhs_llty)
|
|
|
|
|
} else if lhs_sz > rhs_sz {
|
|
|
|
|
// FIXME (#1877: If shifting by negative
|
|
|
|
|
// values becomes not undefined then this is wrong.
|
|
|
|
|
zext(rhs, lhs_llty)
|
2012-02-21 21:01:33 -08:00
|
|
|
|
} else {
|
|
|
|
|
rhs
|
|
|
|
|
}
|
2015-01-15 01:08:22 +11:00
|
|
|
|
} else {
|
|
|
|
|
rhs
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
llfn: ValueRef,
|
2014-11-25 07:54:24 -05:00
|
|
|
|
llargs: &[ValueRef],
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc: DebugLoc)
|
2014-09-06 19:13:04 +03:00
|
|
|
|
-> (ValueRef, Block<'blk, 'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("invoke_");
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if bcx.unreachable.get() {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
return (C_null(Type::i8(bcx.ccx())), bcx);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2013-03-08 21:16:09 -08:00
|
|
|
|
|
2014-01-18 11:21:10 -08:00
|
|
|
|
if need_invoke(bcx) {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb);
|
2015-01-31 12:20:46 -05:00
|
|
|
|
for &llarg in llargs {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("arg: {:?}", Value(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
|
}
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let normal_bcx = bcx.fcx.new_block("normal-return");
|
2014-01-15 14:39:08 -05:00
|
|
|
|
let landing_pad = bcx.fcx.get_landing_pad();
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let llresult = Invoke(bcx,
|
|
|
|
|
llfn,
|
2015-02-18 14:48:57 -05:00
|
|
|
|
&llargs[..],
|
2013-04-18 15:53:29 -07:00
|
|
|
|
normal_bcx.llbb,
|
2013-12-13 17:46:10 +01:00
|
|
|
|
landing_pad,
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
return (llresult, normal_bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
|
} else {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb);
|
2015-01-31 12:20:46 -05:00
|
|
|
|
for &llarg in llargs {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("arg: {:?}", Value(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
|
}
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
2016-02-26 01:10:40 +02:00
|
|
|
|
let llresult = Call(bcx, llfn, &llargs[..], debug_loc);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
return (llresult, bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
|
}
|
2011-09-07 11:46:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 11:48:43 -07:00
|
|
|
|
/// Returns whether this session's target will use SEH-based unwinding.
|
|
|
|
|
///
|
|
|
|
|
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
|
|
|
|
|
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
|
|
|
|
|
/// 64-bit MinGW) instead of "full SEH".
|
|
|
|
|
pub fn wants_msvc_seh(sess: &Session) -> bool {
|
2015-10-23 18:18:44 -07:00
|
|
|
|
sess.target.target.options.is_like_msvc
|
2015-08-11 11:48:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-12-20 15:30:09 +02:00
|
|
|
|
pub fn avoid_invoke(bcx: Block) -> bool {
|
2016-02-08 11:53:06 +01:00
|
|
|
|
bcx.sess().no_landing_pads() || bcx.lpad().is_some()
|
2015-12-20 15:30:09 +02:00
|
|
|
|
}
|
2012-06-29 12:31:23 -07:00
|
|
|
|
|
2015-12-20 15:30:09 +02:00
|
|
|
|
pub fn need_invoke(bcx: Block) -> bool {
|
|
|
|
|
if avoid_invoke(bcx) {
|
|
|
|
|
false
|
|
|
|
|
} else {
|
|
|
|
|
bcx.fcx.needs_invoke()
|
2012-07-23 16:00:19 -07:00
|
|
|
|
}
|
2012-03-26 13:30:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-10-04 19:24:49 +03:00
|
|
|
|
pub fn call_assume<'a, 'tcx>(b: &Builder<'a, 'tcx>, val: ValueRef) {
|
|
|
|
|
let assume_intrinsic = b.ccx.get_intrinsic("llvm.assume");
|
|
|
|
|
b.call(assume_intrinsic, &[val], None);
|
|
|
|
|
}
|
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
|
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
|
|
|
|
|
/// differs from the type used for SSA values. Also handles various special cases where the type
|
|
|
|
|
/// gives us better information about what we are loading.
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if cx.unreachable.get() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
return C_undef(type_of::type_of(cx.ccx(), t));
|
|
|
|
|
}
|
2016-03-09 14:20:22 +02:00
|
|
|
|
load_ty_builder(&B(cx), ptr, t)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
|
|
|
|
let ccx = b.ccx;
|
|
|
|
|
if type_is_zero_size(ccx, t) {
|
|
|
|
|
return C_undef(type_of::type_of(ccx, t));
|
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
|
let global = llvm::LLVMIsAGlobalVariable(ptr);
|
|
|
|
|
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
|
|
|
|
let val = llvm::LLVMGetInitializer(global);
|
|
|
|
|
if !val.is_null() {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if t.is_bool() {
|
|
|
|
|
return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref());
|
|
|
|
|
}
|
|
|
|
|
return val;
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if t.is_bool() {
|
|
|
|
|
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx))
|
2015-06-24 08:24:13 +03:00
|
|
|
|
} else if t.is_char() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
// a char is a Unicode codepoint, and so takes values from 0
|
|
|
|
|
// to 0x10FFFF inclusive only.
|
2016-03-09 14:20:22 +02:00
|
|
|
|
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
|
|
|
|
|
} else if (t.is_region_ptr() || t.is_unique()) &&
|
|
|
|
|
!common::type_is_fat_ptr(ccx.tcx(), t) {
|
|
|
|
|
b.load_nonnull(ptr)
|
2015-03-21 00:21:38 +01:00
|
|
|
|
} else {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
b.load(ptr)
|
|
|
|
|
}
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
|
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
|
|
|
|
|
/// differs from the type used for SSA values.
|
2015-01-08 16:28:07 +01:00
|
|
|
|
pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
|
2015-04-15 20:14:54 +02:00
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
|
2015-11-09 02:16:19 +02:00
|
|
|
|
|
2015-06-26 16:40:51 +02:00
|
|
|
|
if common::type_is_fat_ptr(cx.tcx(), t) {
|
2016-10-04 18:34:03 +03:00
|
|
|
|
let lladdr = ExtractValue(cx, v, abi::FAT_PTR_ADDR);
|
|
|
|
|
let llextra = ExtractValue(cx, v, abi::FAT_PTR_EXTRA);
|
|
|
|
|
store_fat_ptr(cx, lladdr, llextra, dst, t);
|
2015-06-26 16:40:51 +02:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Store(cx, from_immediate(cx, v), dst);
|
2015-04-15 20:14:54 +02:00
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-10 22:05:11 +02:00
|
|
|
|
pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
data: ValueRef,
|
|
|
|
|
extra: ValueRef,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
_ty: Ty<'tcx>) {
|
|
|
|
|
// FIXME: emit metadata
|
2016-08-16 17:41:38 +03:00
|
|
|
|
Store(cx, data, get_dataptr(cx, dst));
|
|
|
|
|
Store(cx, extra, get_meta(cx, dst));
|
2015-11-10 22:05:11 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
2016-10-04 17:44:31 +03:00
|
|
|
|
ty: Ty<'tcx>)
|
|
|
|
|
-> (ValueRef, ValueRef)
|
|
|
|
|
{
|
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
// FIXME: remove me
|
|
|
|
|
return (Load(cx, get_dataptr(cx, src)),
|
|
|
|
|
Load(cx, get_meta(cx, src)));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
load_fat_ptr_builder(&B(cx), src, ty)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn load_fat_ptr_builder<'a, 'tcx>(
|
|
|
|
|
b: &Builder<'a, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
t: Ty<'tcx>)
|
|
|
|
|
-> (ValueRef, ValueRef)
|
|
|
|
|
{
|
|
|
|
|
|
|
|
|
|
let ptr = get_dataptr_builder(b, src);
|
|
|
|
|
let ptr = if t.is_region_ptr() || t.is_unique() {
|
|
|
|
|
b.load_nonnull(ptr)
|
|
|
|
|
} else {
|
|
|
|
|
b.load(ptr)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// FIXME: emit metadata on `meta`.
|
|
|
|
|
let meta = b.load(get_meta_builder(b, src));
|
|
|
|
|
|
|
|
|
|
(ptr, meta)
|
2015-11-10 22:05:11 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
|
|
|
|
|
if val_ty(val) == Type::i1(bcx.ccx()) {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
ZExt(bcx, val, Type::i8(bcx.ccx()))
|
|
|
|
|
} else {
|
|
|
|
|
val
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
|
2015-06-24 08:24:13 +03:00
|
|
|
|
if ty.is_bool() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
Trunc(bcx, val, Type::i1(bcx.ccx()))
|
|
|
|
|
} else {
|
|
|
|
|
val
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
|
|
|
|
|
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("with_cond");
|
2014-09-04 13:36:39 +02:00
|
|
|
|
|
2015-05-19 17:38:55 +03:00
|
|
|
|
if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
|
2014-09-04 13:36:39 +02:00
|
|
|
|
return bcx;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
|
let fcx = bcx.fcx;
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let next_cx = fcx.new_block("next");
|
|
|
|
|
let cond_cx = fcx.new_block("cond");
|
2014-12-11 13:53:30 +01:00
|
|
|
|
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let after_cx = f(cond_cx);
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if !after_cx.terminated.get() {
|
2014-12-11 13:53:30 +01:00
|
|
|
|
Br(after_cx, next_cx.llbb, DebugLoc::None);
|
2013-12-18 14:54:42 -08:00
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
|
next_cx
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
pub enum Lifetime { Start, End }
|
2016-01-12 17:17:50 +01:00
|
|
|
|
|
|
|
|
|
// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
|
|
|
|
|
// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
|
|
|
|
|
// and the intrinsic for `lt` and passes them to `emit`, which is in
|
|
|
|
|
// charge of generating code to call the passed intrinsic on whatever
|
|
|
|
|
// block of generated code is targetted for the intrinsic.
|
|
|
|
|
//
|
|
|
|
|
// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
|
|
|
|
|
// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
|
|
|
|
|
fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>,
|
|
|
|
|
ptr: ValueRef,
|
|
|
|
|
lt: Lifetime,
|
|
|
|
|
emit: F)
|
|
|
|
|
where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef)
|
|
|
|
|
{
|
2015-12-31 16:50:06 +13:00
|
|
|
|
if ccx.sess().opts.optimize == config::OptLevel::No {
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-12 17:17:50 +01:00
|
|
|
|
let _icx = push_ctxt(match lt {
|
|
|
|
|
Lifetime::Start => "lifetime_start",
|
|
|
|
|
Lifetime::End => "lifetime_end"
|
|
|
|
|
});
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
|
2015-08-23 14:38:42 +02:00
|
|
|
|
let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
|
|
|
|
|
if size == 0 {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-12 17:17:50 +01:00
|
|
|
|
let lifetime_intrinsic = ccx.get_intrinsic(match lt {
|
|
|
|
|
Lifetime::Start => "llvm.lifetime.start",
|
|
|
|
|
Lifetime::End => "llvm.lifetime.end"
|
|
|
|
|
});
|
|
|
|
|
emit(ccx, size, lifetime_intrinsic)
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
impl Lifetime {
|
|
|
|
|
pub fn call(self, b: &Builder, ptr: ValueRef) {
|
|
|
|
|
core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| {
|
|
|
|
|
let ptr = b.pointercast(ptr, Type::i8p(ccx));
|
|
|
|
|
b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None);
|
|
|
|
|
});
|
|
|
|
|
}
|
2016-01-12 17:17:50 +01:00
|
|
|
|
}
|
2015-08-23 14:38:42 +02:00
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) {
|
|
|
|
|
if !bcx.unreachable.get() {
|
|
|
|
|
Lifetime::Start.call(&bcx.build(), ptr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) {
|
|
|
|
|
if !bcx.unreachable.get() {
|
|
|
|
|
Lifetime::End.call(&bcx.build(), ptr);
|
|
|
|
|
}
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-10-18 14:17:34 -07:00
|
|
|
|
// Generates code for resumption of unwind at the end of a landing pad.
|
|
|
|
|
pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) {
|
|
|
|
|
if !bcx.sess().target.target.options.custom_unwind_resume {
|
|
|
|
|
Resume(bcx, lpval);
|
|
|
|
|
} else {
|
|
|
|
|
let exc_ptr = ExtractValue(bcx, lpval, 0);
|
2016-02-23 21:21:50 +02:00
|
|
|
|
bcx.fcx.eh_unwind_resume()
|
2016-08-16 17:41:38 +03:00
|
|
|
|
.call(bcx, DebugLoc::None, &[exc_ptr], None);
|
2015-10-18 14:17:34 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-08 14:29:46 +02:00
|
|
|
|
pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
n_bytes: ValueRef,
|
|
|
|
|
align: u32) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("call_memcpy");
|
2016-03-08 14:29:46 +02:00
|
|
|
|
let ccx = b.ccx;
|
2015-08-15 18:43:39 +12:00
|
|
|
|
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
|
|
|
|
|
let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
|
2014-04-09 19:56:31 -04:00
|
|
|
|
let memcpy = ccx.get_intrinsic(&key);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
let src_ptr = b.pointercast(src, Type::i8p(ccx));
|
|
|
|
|
let dst_ptr = b.pointercast(dst, Type::i8p(ccx));
|
|
|
|
|
let size = b.intcast(n_bytes, ccx.int_type());
|
2014-03-15 22:29:34 +02:00
|
|
|
|
let align = C_i32(ccx, align as i32);
|
2014-07-05 21:43:47 +02:00
|
|
|
|
let volatile = C_bool(ccx, false);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("memcpy_ty");
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let ccx = bcx.ccx();
|
2015-08-22 17:07:37 +02:00
|
|
|
|
|
2016-03-08 14:29:46 +02:00
|
|
|
|
if type_is_zero_size(ccx, t) || bcx.unreachable.get() {
|
2015-08-22 17:07:37 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-24 08:24:13 +03:00
|
|
|
|
if t.is_structural() {
|
2013-05-25 09:52:25 -04:00
|
|
|
|
let llty = type_of::type_of(ccx, t);
|
|
|
|
|
let llsz = llsize_of(ccx, llty);
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let llalign = type_of::align_of(ccx, t);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
call_memcpy(&B(bcx), dst, src, llsz, llalign as u32);
|
2015-11-20 21:42:13 +01:00
|
|
|
|
} else if common::type_is_fat_ptr(bcx.tcx(), t) {
|
|
|
|
|
let (data, extra) = load_fat_ptr(bcx, src, t);
|
|
|
|
|
store_fat_ptr(bcx, data, extra, dst, t);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
} else {
|
2015-01-29 14:03:34 +02:00
|
|
|
|
store_ty(bcx, load_ty(bcx, src, t), dst, t);
|
2011-07-27 14:19:39 +02:00
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-02-10 10:04:39 +01:00
|
|
|
|
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2015-02-10 10:04:39 +01:00
|
|
|
|
let _icx = push_ctxt("init_zero_mem");
|
|
|
|
|
let bcx = cx;
|
|
|
|
|
memfill(&B(bcx), llptr, t, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Always use this function instead of storing a constant byte to the memory
|
|
|
|
|
// in question. e.g. if you store a zero constant, LLVM will drown in vreg
|
2012-08-28 15:54:45 -07:00
|
|
|
|
// allocation for large data structures, and the generated code will be
|
|
|
|
|
// awful. (A telltale sign of this is large quantities of
|
|
|
|
|
// `mov [byte ptr foo],0` in the generated code.)
|
2015-02-10 10:04:39 +01:00
|
|
|
|
fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
|
|
|
|
|
let _icx = push_ctxt("memfill");
|
2013-07-21 16:19:34 +02:00
|
|
|
|
let ccx = b.ccx;
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let llty = type_of::type_of(ccx, ty);
|
2014-03-15 22:29:34 +02:00
|
|
|
|
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
|
2015-08-05 09:46:59 +02:00
|
|
|
|
let llzeroval = C_u8(ccx, byte);
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let size = machine::llsize_of(ccx, llty);
|
|
|
|
|
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
|
2016-02-04 19:40:28 +02:00
|
|
|
|
call_memset(b, llptr, llzeroval, size, align, false);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-04 19:40:28 +02:00
|
|
|
|
pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
|
|
|
|
|
ptr: ValueRef,
|
|
|
|
|
fill_byte: ValueRef,
|
|
|
|
|
size: ValueRef,
|
|
|
|
|
align: ValueRef,
|
|
|
|
|
volatile: bool) {
|
|
|
|
|
let ccx = b.ccx;
|
|
|
|
|
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
|
|
|
|
|
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
|
|
|
|
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
|
|
|
|
|
let volatile = C_bool(ccx, volatile);
|
2016-02-26 01:10:40 +02:00
|
|
|
|
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
|
2016-02-04 19:40:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-01-08 20:40:52 +01:00
|
|
|
|
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2016-08-16 17:41:38 +03:00
|
|
|
|
ty: Ty<'tcx>,
|
2016-01-08 20:40:52 +01:00
|
|
|
|
name: &str) -> ValueRef {
|
2016-08-16 17:41:38 +03:00
|
|
|
|
assert!(!ty.has_param_types());
|
|
|
|
|
alloca(bcx, type_of::type_of(bcx.ccx(), ty), name)
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("alloca");
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if cx.unreachable.get() {
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
2013-07-17 03:13:23 -05:00
|
|
|
|
return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
|
2013-01-10 21:23:07 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-07 22:35:11 +03:00
|
|
|
|
DebugLoc::None.apply(cx.fcx);
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
|
Alloca(cx, ty, name)
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
|
|
|
|
/// Create a function context for the given function.
|
|
|
|
|
/// Beware that you must call `fcx.init` or `fcx.bind_args`
|
|
|
|
|
/// before doing anything with the returned function context.
|
|
|
|
|
pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
|
|
|
|
|
llfndecl: ValueRef,
|
|
|
|
|
fn_ty: FnType,
|
2016-08-16 17:41:38 +03:00
|
|
|
|
definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi)>,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
block_arena: &'blk TypedArena<common::BlockS<'blk, 'tcx>>)
|
|
|
|
|
-> FunctionContext<'blk, 'tcx> {
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let (param_substs, def_id) = match definition {
|
2016-08-26 19:23:42 +03:00
|
|
|
|
Some((instance, ..)) => {
|
2016-04-06 08:34:03 +03:00
|
|
|
|
common::validate_substs(instance.substs);
|
2016-08-16 17:41:38 +03:00
|
|
|
|
(instance.substs, Some(instance.def))
|
2016-04-06 08:34:03 +03:00
|
|
|
|
}
|
2016-10-24 18:23:29 -06:00
|
|
|
|
None => (ccx.tcx().intern_substs(&[]), None)
|
2016-04-06 08:34:03 +03:00
|
|
|
|
};
|
2016-03-06 16:30:21 +02:00
|
|
|
|
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id));
|
|
|
|
|
|
2016-04-06 08:34:03 +03:00
|
|
|
|
debug!("FunctionContext::new({})",
|
2016-04-06 10:49:50 +03:00
|
|
|
|
definition.map_or(String::new(), |d| d.0.to_string()));
|
2016-03-08 14:38:13 +02:00
|
|
|
|
|
2016-08-24 06:36:37 +03:00
|
|
|
|
let no_debug = if let Some(id) = local_id {
|
|
|
|
|
ccx.tcx().map.attrs(id)
|
|
|
|
|
.iter().any(|item| item.check_name("no_debug"))
|
2016-03-08 14:38:13 +02:00
|
|
|
|
} else if let Some(def_id) = def_id {
|
2016-08-24 06:36:37 +03:00
|
|
|
|
ccx.sess().cstore.item_attrs(def_id)
|
|
|
|
|
.iter().any(|item| item.check_name("no_debug"))
|
2016-03-08 14:38:13 +02:00
|
|
|
|
} else {
|
2016-08-24 06:36:37 +03:00
|
|
|
|
false
|
2016-03-08 14:38:13 +02:00
|
|
|
|
};
|
|
|
|
|
|
2016-10-28 13:55:49 +03:00
|
|
|
|
let mir = def_id.map(|id| ccx.tcx().item_mir(id));
|
2016-08-17 22:50:55 +03:00
|
|
|
|
|
|
|
|
|
let debug_context = if let (false, Some((instance, sig, abi)), &Some(ref mir)) =
|
|
|
|
|
(no_debug, definition, &mir) {
|
|
|
|
|
debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl, mir)
|
2016-04-06 10:49:50 +03:00
|
|
|
|
} else {
|
|
|
|
|
debuginfo::empty_function_debug_context(ccx)
|
|
|
|
|
};
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
FunctionContext {
|
2016-08-17 22:50:55 +03:00
|
|
|
|
mir: mir,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
llfn: llfndecl,
|
|
|
|
|
llretslotptr: Cell::new(None),
|
|
|
|
|
param_env: ccx.tcx().empty_parameter_environment(),
|
|
|
|
|
alloca_insert_pt: Cell::new(None),
|
|
|
|
|
landingpad_alloca: Cell::new(None),
|
|
|
|
|
fn_ty: fn_ty,
|
|
|
|
|
param_substs: param_substs,
|
2016-08-16 17:41:38 +03:00
|
|
|
|
span: None,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
block_arena: block_arena,
|
|
|
|
|
lpad_arena: TypedArena::new(),
|
|
|
|
|
ccx: ccx,
|
|
|
|
|
debug_context: debug_context,
|
|
|
|
|
scopes: RefCell::new(Vec::new()),
|
2013-05-21 15:25:44 -04:00
|
|
|
|
}
|
2013-06-20 16:42:44 +02:00
|
|
|
|
}
|
2014-07-05 01:52:12 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
/// Performs setup on a newly created function, creating the entry
|
|
|
|
|
/// scope block and allocating space for the return pointer.
|
2016-08-16 17:41:38 +03:00
|
|
|
|
pub fn init(&'blk self, skip_retptr: bool) -> Block<'blk, 'tcx> {
|
|
|
|
|
let entry_bcx = self.new_block("entry-block");
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
// Use a dummy instruction as the insertion point for all allocas.
|
|
|
|
|
// This is later removed in FunctionContext::cleanup.
|
|
|
|
|
self.alloca_insert_pt.set(Some(unsafe {
|
|
|
|
|
Load(entry_bcx, C_null(Type::i8p(self.ccx)));
|
|
|
|
|
llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
|
|
|
|
|
}));
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if !self.fn_ty.ret.is_ignore() && !skip_retptr {
|
|
|
|
|
// We normally allocate the llretslotptr, unless we
|
|
|
|
|
// have been instructed to skip it for immediate return
|
|
|
|
|
// values, or there is nothing to return at all.
|
|
|
|
|
|
|
|
|
|
// We create an alloca to hold a pointer of type `ret.original_ty`
|
|
|
|
|
// which will hold the pointer to the right alloca which has the
|
|
|
|
|
// final ret value
|
|
|
|
|
let llty = self.fn_ty.ret.memory_ty(self.ccx);
|
2016-08-16 17:41:38 +03:00
|
|
|
|
// But if there are no nested returns, we skip the indirection
|
|
|
|
|
// and have a single retslot
|
|
|
|
|
let slot = if self.fn_ty.ret.is_indirect() {
|
|
|
|
|
get_param(self.llfn, 0)
|
2016-03-06 16:30:21 +02:00
|
|
|
|
} else {
|
2016-08-16 17:41:38 +03:00
|
|
|
|
AllocaFcx(self, llty, "sret_slot")
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
};
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
self.llretslotptr.set(Some(slot));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
entry_bcx
|
2014-01-15 14:39:08 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
|
|
|
|
|
/// and builds the return block.
|
2016-08-16 17:41:38 +03:00
|
|
|
|
pub fn finish(&'blk self, ret_cx: Block<'blk, 'tcx>,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
ret_debug_loc: DebugLoc) {
|
|
|
|
|
let _icx = push_ctxt("FunctionContext::finish");
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
self.build_return_block(ret_cx, ret_debug_loc);
|
|
|
|
|
|
2016-04-07 22:35:11 +03:00
|
|
|
|
DebugLoc::None.apply(self);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
self.cleanup();
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
// Builds the return block for a function.
|
|
|
|
|
pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>,
|
|
|
|
|
ret_debug_location: DebugLoc) {
|
|
|
|
|
if self.llretslotptr.get().is_none() ||
|
|
|
|
|
ret_cx.unreachable.get() ||
|
2016-08-16 17:41:38 +03:00
|
|
|
|
self.fn_ty.ret.is_indirect() {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
return RetVoid(ret_cx, ret_debug_location);
|
|
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
|
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let retslot = self.llretslotptr.get().unwrap();
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let retptr = Value(retslot);
|
|
|
|
|
let llty = self.fn_ty.ret.original_ty;
|
|
|
|
|
match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) {
|
|
|
|
|
// If there's only a single store to the ret slot, we can directly return
|
|
|
|
|
// the value that was stored and omit the store and the alloca.
|
|
|
|
|
// However, we only want to do this when there is no cast needed.
|
|
|
|
|
(Some(s), None) => {
|
|
|
|
|
let mut retval = s.get_operand(0).unwrap().get();
|
|
|
|
|
s.erase_from_parent();
|
|
|
|
|
|
|
|
|
|
if retptr.has_no_uses() {
|
|
|
|
|
retptr.erase_from_parent();
|
|
|
|
|
}
|
2014-08-11 15:58:46 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if self.fn_ty.ret.is_indirect() {
|
|
|
|
|
Store(ret_cx, retval, get_param(self.llfn, 0));
|
|
|
|
|
RetVoid(ret_cx, ret_debug_location)
|
|
|
|
|
} else {
|
|
|
|
|
if llty == Type::i1(self.ccx) {
|
|
|
|
|
retval = Trunc(ret_cx, retval, llty);
|
|
|
|
|
}
|
|
|
|
|
Ret(ret_cx, retval, ret_debug_location)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
|
|
|
|
(_, cast_ty) if self.fn_ty.ret.is_indirect() => {
|
|
|
|
|
// Otherwise, copy the return value to the ret slot.
|
|
|
|
|
assert_eq!(cast_ty, None);
|
|
|
|
|
let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty);
|
|
|
|
|
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
call_memcpy(&B(ret_cx), get_param(self.llfn, 0),
|
2016-03-06 16:30:21 +02:00
|
|
|
|
retslot, llsz, llalign as u32);
|
2014-12-11 13:53:30 +01:00
|
|
|
|
RetVoid(ret_cx, ret_debug_location)
|
2014-08-11 15:58:46 -07:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
(_, Some(cast_ty)) => {
|
|
|
|
|
let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to()));
|
|
|
|
|
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMSetAlignment(load, llalign);
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Ret(ret_cx, load, ret_debug_location)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
(_, None) => {
|
|
|
|
|
let retval = if llty == Type::i1(self.ccx) {
|
|
|
|
|
let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False);
|
|
|
|
|
Trunc(ret_cx, val, llty)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Load(ret_cx, retslot)
|
|
|
|
|
};
|
|
|
|
|
Ret(ret_cx, retval, ret_debug_location)
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
|
}
|
2012-02-13 16:06:56 -08:00
|
|
|
|
}
|
|
|
|
|
|
2016-11-09 23:09:28 +02:00
|
|
|
|
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
|
2016-11-04 17:37:42 -04:00
|
|
|
|
let _s = if ccx.sess().trans_stats() {
|
|
|
|
|
let mut instance_name = String::new();
|
|
|
|
|
DefPathBasedNames::new(ccx.tcx(), true, true)
|
|
|
|
|
.push_def_path(instance.def, &mut instance_name);
|
|
|
|
|
Some(StatRecorder::new(ccx, instance_name))
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
};
|
|
|
|
|
|
2016-11-09 23:09:28 +02:00
|
|
|
|
// this is an info! to allow collecting monomorphization statistics
|
|
|
|
|
// and to allow finding the last function before LLVM aborts from
|
|
|
|
|
// release builds.
|
|
|
|
|
info!("trans_instance({})", instance);
|
|
|
|
|
|
|
|
|
|
let _icx = push_ctxt("trans_instance");
|
|
|
|
|
|
|
|
|
|
let fn_ty = ccx.tcx().item_type(instance.def);
|
|
|
|
|
let fn_ty = ccx.tcx().erase_regions(&fn_ty);
|
|
|
|
|
let fn_ty = monomorphize::apply_param_substs(ccx.shared(), instance.substs, &fn_ty);
|
|
|
|
|
|
|
|
|
|
let ty::BareFnTy { abi, ref sig, .. } = *common::ty_fn_ty(ccx, fn_ty);
|
|
|
|
|
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(sig);
|
|
|
|
|
|
|
|
|
|
let lldecl = match ccx.instances().borrow().get(&instance) {
|
|
|
|
|
Some(&val) => val,
|
|
|
|
|
None => bug!("Instance `{:?}` not already declared", instance)
|
|
|
|
|
};
|
|
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
|
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
|
2013-12-22 13:50:04 -08:00
|
|
|
|
|
rustc: Implement custom panic runtimes
This commit is an implementation of [RFC 1513] which allows applications to
alter the behavior of panics at compile time. A new compiler flag, `-C panic`,
is added and accepts the values `unwind` or `panic`, with the default being
`unwind`. This model affects how code is generated for the local crate, skipping
generation of landing pads with `-C panic=abort`.
[RFC 1513]: https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md
Panic implementations are then provided by crates tagged with
`#![panic_runtime]` and lazily required by crates with
`#![needs_panic_runtime]`. The panic strategy (`-C panic` value) of the panic
runtime must match the final product, and if the panic strategy is not `abort`
then the entire DAG must have the same panic strategy.
With the `-C panic=abort` strategy, users can expect a stable method to disable
generation of landing pads, improving optimization in niche scenarios,
decreasing compile time, and decreasing output binary size. With the `-C
panic=unwind` strategy users can expect the existing ability to isolate failure
in Rust code from the outside world.
Organizationally, this commit dismantles the `sys_common::unwind` module in
favor of some bits moving part of it to `libpanic_unwind` and the rest into the
`panicking` module in libstd. The custom panic runtime support is pretty similar
to the custom allocator support with the only major difference being how the
panic runtime is injected (takes the `-C panic` flag into account).
2016-04-08 16:18:40 -07:00
|
|
|
|
if !ccx.sess().no_landing_pads() {
|
2016-11-09 23:09:28 +02:00
|
|
|
|
attributes::emit_uwtable(lldecl, true);
|
rustc: Implement custom panic runtimes
This commit is an implementation of [RFC 1513] which allows applications to
alter the behavior of panics at compile time. A new compiler flag, `-C panic`,
is added and accepts the values `unwind` or `panic`, with the default being
`unwind`. This model affects how code is generated for the local crate, skipping
generation of landing pads with `-C panic=abort`.
[RFC 1513]: https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md
Panic implementations are then provided by crates tagged with
`#![panic_runtime]` and lazily required by crates with
`#![needs_panic_runtime]`. The panic strategy (`-C panic` value) of the panic
runtime must match the final product, and if the panic strategy is not `abort`
then the entire DAG must have the same panic strategy.
With the `-C panic=abort` strategy, users can expect a stable method to disable
generation of landing pads, improving optimization in niche scenarios,
decreasing compile time, and decreasing output binary size. With the `-C
panic=unwind` strategy users can expect the existing ability to isolate failure
in Rust code from the outside world.
Organizationally, this commit dismantles the `sys_common::unwind` module in
favor of some bits moving part of it to `libpanic_unwind` and the rest into the
`panicking` module in libstd. The custom panic runtime support is pretty similar
to the custom allocator support with the only major difference being how the
panic runtime is injected (takes the `-C panic` flag into account).
2016-04-08 16:18:40 -07:00
|
|
|
|
}
|
2010-11-26 17:47:27 -08:00
|
|
|
|
|
2016-11-09 23:09:28 +02:00
|
|
|
|
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
|
2016-04-06 10:49:50 +03:00
|
|
|
|
|
2015-01-26 13:38:33 +01:00
|
|
|
|
let (arena, fcx): (TypedArena<_>, FunctionContext);
|
|
|
|
|
arena = TypedArena::new();
|
2016-07-05 03:44:26 -04:00
|
|
|
|
fcx = FunctionContext::new(ccx,
|
2016-11-09 23:09:28 +02:00
|
|
|
|
lldecl,
|
2016-07-05 03:44:26 -04:00
|
|
|
|
fn_ty,
|
2016-11-09 23:09:28 +02:00
|
|
|
|
Some((instance, &sig, abi)),
|
2016-07-05 03:44:26 -04:00
|
|
|
|
&arena);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
2016-08-16 17:41:38 +03:00
|
|
|
|
if fcx.mir.is_none() {
|
|
|
|
|
bug!("attempted translation of `{}` w/o MIR", instance);
|
2014-07-29 12:25:06 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-08-16 17:41:38 +03:00
|
|
|
|
mir::trans_mir(&fcx);
|
2011-06-29 19:50:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-23 22:04:51 +02:00
|
|
|
|
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
2016-08-17 22:50:55 +03:00
|
|
|
|
def_id: DefId,
|
|
|
|
|
substs: &'tcx Substs<'tcx>,
|
2016-02-23 22:04:51 +02:00
|
|
|
|
disr: Disr,
|
|
|
|
|
llfndecl: ValueRef) {
|
2016-08-17 22:50:55 +03:00
|
|
|
|
attributes::inline(llfndecl, attributes::InlineAttr::Hint);
|
|
|
|
|
attributes::set_frame_pointer_elimination(ccx, llfndecl);
|
|
|
|
|
|
2016-11-10 16:49:53 +02:00
|
|
|
|
let ctor_ty = ccx.tcx().item_type(def_id);
|
2016-08-08 20:50:19 -04:00
|
|
|
|
let ctor_ty = monomorphize::apply_param_substs(ccx.shared(), substs, &ctor_ty);
|
2013-06-20 15:23:52 -04:00
|
|
|
|
|
2016-10-13 00:08:11 +03:00
|
|
|
|
let sig = ccx.tcx().erase_late_bound_regions_and_normalize(&ctor_ty.fn_sig());
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
2015-01-26 13:38:33 +01:00
|
|
|
|
let (arena, fcx): (TypedArena<_>, FunctionContext);
|
|
|
|
|
arena = TypedArena::new();
|
2016-04-06 08:34:03 +03:00
|
|
|
|
fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena);
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let bcx = fcx.init(false);
|
2014-08-11 19:16:00 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if !fcx.fn_ty.ret.is_ignore() {
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let dest = fcx.llretslotptr.get().unwrap();
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
|
|
|
|
|
let mut arg_idx = 0;
|
|
|
|
|
for (i, arg_ty) in sig.inputs.into_iter().enumerate() {
|
2016-08-28 20:44:19 -04:00
|
|
|
|
let lldestptr = adt::trans_field_ptr(bcx, sig.output, dest_val, Disr::from(disr), i);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let arg = &fcx.fn_ty.args[arg_idx];
|
|
|
|
|
arg_idx += 1;
|
2016-03-09 14:20:22 +02:00
|
|
|
|
let b = &bcx.build();
|
2015-07-02 18:18:22 +02:00
|
|
|
|
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let meta = &fcx.fn_ty.args[arg_idx];
|
|
|
|
|
arg_idx += 1;
|
2016-08-16 17:41:38 +03:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, get_dataptr(bcx, lldestptr));
|
|
|
|
|
meta.store_fn_arg(b, &mut llarg_idx, get_meta(bcx, lldestptr));
|
2015-07-02 18:18:22 +02:00
|
|
|
|
} else {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
|
2015-07-02 18:18:22 +02:00
|
|
|
|
}
|
2014-01-16 15:11:22 -05:00
|
|
|
|
}
|
2016-08-28 20:44:19 -04:00
|
|
|
|
adt::trans_set_discr(bcx, sig.output, dest, disr);
|
2012-10-24 14:36:00 -07:00
|
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
fcx.finish(bcx, DebugLoc::None);
|
2012-10-24 14:36:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-11 20:22:41 -05:00
|
|
|
|
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
|
|
|
|
|
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
|
|
|
|
|
// applicable to variable declarations and may not really make sense for
|
|
|
|
|
// Rust code in the first place but whitelist them anyway and trust that
|
|
|
|
|
// the user knows what s/he's doing. Who knows, unanticipated use cases
|
|
|
|
|
// may pop up in the future.
|
|
|
|
|
//
|
|
|
|
|
// ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
|
|
|
|
|
// and don't have to be, LLVM treats them as no-ops.
|
|
|
|
|
match name {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
"appending" => Some(llvm::Linkage::AppendingLinkage),
|
|
|
|
|
"available_externally" => Some(llvm::Linkage::AvailableExternallyLinkage),
|
|
|
|
|
"common" => Some(llvm::Linkage::CommonLinkage),
|
|
|
|
|
"extern_weak" => Some(llvm::Linkage::ExternalWeakLinkage),
|
|
|
|
|
"external" => Some(llvm::Linkage::ExternalLinkage),
|
|
|
|
|
"internal" => Some(llvm::Linkage::InternalLinkage),
|
|
|
|
|
"linkonce" => Some(llvm::Linkage::LinkOnceAnyLinkage),
|
|
|
|
|
"linkonce_odr" => Some(llvm::Linkage::LinkOnceODRLinkage),
|
|
|
|
|
"private" => Some(llvm::Linkage::PrivateLinkage),
|
|
|
|
|
"weak" => Some(llvm::Linkage::WeakAnyLinkage),
|
|
|
|
|
"weak_odr" => Some(llvm::Linkage::WeakODRLinkage),
|
2014-11-11 20:22:41 -05:00
|
|
|
|
_ => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 20:02:09 -04:00
|
|
|
|
pub fn set_link_section(ccx: &CrateContext,
|
|
|
|
|
llval: ValueRef,
|
|
|
|
|
attrs: &[ast::Attribute]) {
|
|
|
|
|
if let Some(sect) = attr::first_attr_value_str_by_name(attrs, "link_section") {
|
2016-06-13 22:43:30 -07:00
|
|
|
|
if contains_null(§) {
|
|
|
|
|
ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", §));
|
|
|
|
|
}
|
|
|
|
|
unsafe {
|
|
|
|
|
let buf = CString::new(sect.as_bytes()).unwrap();
|
|
|
|
|
llvm::LLVMSetSection(llval, buf.as_ptr());
|
|
|
|
|
}
|
2015-08-03 15:38:06 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
/// Create the `main` function which will initialise the rust runtime and call
|
|
|
|
|
/// users’ main function.
|
|
|
|
|
pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
|
|
|
|
|
let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() {
|
|
|
|
|
Some((id, span)) => {
|
|
|
|
|
(ccx.tcx().map.local_def_id(id), span)
|
|
|
|
|
}
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// check for the #[rustc_error] annotation, which forces an
|
|
|
|
|
// error in trans. This is used to write compile-fail tests
|
|
|
|
|
// that actually test that compilation succeeds without
|
|
|
|
|
// reporting an error.
|
|
|
|
|
if ccx.tcx().has_attr(main_def_id, "rustc_error") {
|
|
|
|
|
ccx.tcx().sess.span_fatal(span, "compilation successful");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let instance = Instance::mono(ccx.shared(), main_def_id);
|
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) {
|
2016-05-26 12:18:39 -04:00
|
|
|
|
// We want to create the wrapper in the same codegen unit as Rust's main
|
|
|
|
|
// function.
|
|
|
|
|
return;
|
2013-01-11 18:08:01 +09:00
|
|
|
|
}
|
|
|
|
|
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx);
|
2016-05-26 12:18:39 -04:00
|
|
|
|
|
2014-03-05 16:36:01 +02:00
|
|
|
|
let et = ccx.sess().entry_type.get().unwrap();
|
2013-08-03 19:59:46 -07:00
|
|
|
|
match et {
|
2014-05-06 23:38:01 +12:00
|
|
|
|
config::EntryMain => {
|
2016-05-26 12:18:39 -04:00
|
|
|
|
create_entry_fn(ccx, span, main_llfn, true);
|
2013-08-03 19:59:46 -07:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
config::EntryStart => create_entry_fn(ccx, span, main_llfn, false),
|
2014-05-06 23:38:01 +12:00
|
|
|
|
config::EntryNone => {} // Do nothing.
|
2013-04-09 20:16:06 +12:00
|
|
|
|
}
|
2011-08-12 18:43:44 -07:00
|
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
|
fn create_entry_fn(ccx: &CrateContext,
|
2015-03-04 11:46:55 +02:00
|
|
|
|
sp: Span,
|
2013-04-18 15:53:29 -07:00
|
|
|
|
rust_main: ValueRef,
|
|
|
|
|
use_start_lang_item: bool) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type());
|
2012-11-30 09:21:49 +09:00
|
|
|
|
|
2016-02-23 21:46:08 +02:00
|
|
|
|
if declare::get_defined_value(ccx, "main").is_some() {
|
2015-03-04 01:08:06 +02:00
|
|
|
|
// FIXME: We should be smart and show a better diagnostic here.
|
2015-12-21 10:00:43 +13:00
|
|
|
|
ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
|
|
|
|
|
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
|
|
|
|
|
.emit();
|
2015-03-04 01:08:06 +02:00
|
|
|
|
ccx.sess().abort_if_errors();
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!();
|
2016-02-23 21:46:08 +02:00
|
|
|
|
}
|
|
|
|
|
let llfn = declare::declare_cfn(ccx, "main", llfty);
|
2014-08-18 14:15:05 -04:00
|
|
|
|
|
2016-11-03 10:53:13 +01:00
|
|
|
|
// `main` should respect same config for frame pointer elimination as rest of code
|
|
|
|
|
attributes::set_frame_pointer_elimination(ccx, llfn);
|
|
|
|
|
|
2014-11-25 13:28:35 -08:00
|
|
|
|
let llbb = unsafe {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _)
|
2014-11-25 13:28:35 -08:00
|
|
|
|
};
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let bld = ccx.raw_builder();
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
|
2013-04-09 20:16:06 +12:00
|
|
|
|
|
2015-04-24 15:25:42 +12:00
|
|
|
|
debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
|
2014-12-03 14:48:18 -08:00
|
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let (start_fn, args) = if use_start_lang_item {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
|
2013-07-15 20:42:13 -07:00
|
|
|
|
Ok(id) => id,
|
2016-02-23 21:21:50 +02:00
|
|
|
|
Err(s) => ccx.sess().fatal(&s)
|
2013-07-15 20:42:13 -07:00
|
|
|
|
};
|
2016-10-24 18:23:29 -06:00
|
|
|
|
let empty_substs = ccx.tcx().intern_substs(&[]);
|
2016-08-16 17:41:38 +03:00
|
|
|
|
let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let args = {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let opaque_rust_main =
|
|
|
|
|
llvm::LLVMBuildPointerCast(bld,
|
|
|
|
|
rust_main,
|
|
|
|
|
Type::i8p(ccx).to_ref(),
|
|
|
|
|
"rust_main\0".as_ptr() as *const _);
|
|
|
|
|
|
|
|
|
|
vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)]
|
2013-04-18 15:53:29 -07:00
|
|
|
|
};
|
|
|
|
|
(start_fn, args)
|
|
|
|
|
} else {
|
2013-10-21 13:08:31 -07:00
|
|
|
|
debug!("using user-defined start fn");
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)];
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
|
|
|
|
(rust_main, args)
|
|
|
|
|
};
|
|
|
|
|
|
2015-10-23 18:18:44 -07:00
|
|
|
|
let result = llvm::LLVMRustBuildCall(bld,
|
|
|
|
|
start_fn,
|
|
|
|
|
args.as_ptr(),
|
|
|
|
|
args.len() as c_uint,
|
2016-06-24 20:54:52 +02:00
|
|
|
|
ptr::null_mut(),
|
2015-10-23 18:18:44 -07:00
|
|
|
|
noname());
|
2013-07-07 13:30:48 -07:00
|
|
|
|
|
2013-01-10 21:23:07 -08:00
|
|
|
|
llvm::LLVMBuildRet(bld, result);
|
|
|
|
|
}
|
2011-10-20 13:48:10 +02:00
|
|
|
|
}
|
2011-02-28 17:33:46 -05:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-23 00:14:46 -07:00
|
|
|
|
fn contains_null(s: &str) -> bool {
|
2014-09-23 12:54:16 -07:00
|
|
|
|
s.bytes().any(|b| b == 0)
|
2014-09-23 00:14:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-25 08:46:36 +03:00
|
|
|
|
fn write_metadata(cx: &SharedCrateContext,
|
|
|
|
|
reachable_ids: &NodeSet) -> Vec<u8> {
|
2014-01-24 21:00:31 -08:00
|
|
|
|
use flate;
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
|
2016-10-19 12:06:46 +11:00
|
|
|
|
#[derive(PartialEq, Eq, PartialOrd, Ord)]
|
|
|
|
|
enum MetadataKind {
|
|
|
|
|
None,
|
|
|
|
|
Uncompressed,
|
|
|
|
|
Compressed
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let kind = cx.sess().crate_types.borrow().iter().map(|ty| {
|
|
|
|
|
match *ty {
|
|
|
|
|
config::CrateTypeExecutable |
|
|
|
|
|
config::CrateTypeStaticlib |
|
|
|
|
|
config::CrateTypeCdylib => MetadataKind::None,
|
|
|
|
|
|
|
|
|
|
config::CrateTypeRlib => MetadataKind::Uncompressed,
|
|
|
|
|
|
|
|
|
|
config::CrateTypeDylib |
|
|
|
|
|
config::CrateTypeProcMacro => MetadataKind::Compressed,
|
|
|
|
|
}
|
|
|
|
|
}).max().unwrap();
|
|
|
|
|
|
|
|
|
|
if kind == MetadataKind::None {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
return Vec::new();
|
2013-12-22 14:40:03 -08:00
|
|
|
|
}
|
2013-06-13 19:19:50 +12:00
|
|
|
|
|
2015-11-21 01:08:09 +02:00
|
|
|
|
let cstore = &cx.tcx().sess.cstore;
|
2015-12-08 15:53:19 -05:00
|
|
|
|
let metadata = cstore.encode_metadata(cx.tcx(),
|
|
|
|
|
cx.export_map(),
|
|
|
|
|
cx.link_meta(),
|
2016-10-28 13:55:49 +03:00
|
|
|
|
reachable_ids);
|
2016-10-19 12:06:46 +11:00
|
|
|
|
if kind == MetadataKind::Uncompressed {
|
|
|
|
|
return metadata;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert!(kind == MetadataKind::Compressed);
|
2015-11-21 01:08:09 +02:00
|
|
|
|
let mut compressed = cstore.metadata_encoding_version().to_vec();
|
2015-12-02 17:31:49 -08:00
|
|
|
|
compressed.extend_from_slice(&flate::deflate_bytes(&metadata));
|
2015-11-21 01:08:09 +02:00
|
|
|
|
|
2015-02-18 14:48:57 -05:00
|
|
|
|
let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
|
2014-11-17 21:39:01 +13:00
|
|
|
|
let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
|
2016-05-16 20:05:43 +03:00
|
|
|
|
let name = cx.metadata_symbol_name();
|
2015-02-17 22:47:40 -08:00
|
|
|
|
let buf = CString::new(name).unwrap();
|
2014-11-25 13:28:35 -08:00
|
|
|
|
let llglobal = unsafe {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
|
2014-11-25 13:28:35 -08:00
|
|
|
|
};
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMSetInitializer(llglobal, llconst);
|
2016-08-14 11:16:28 +03:00
|
|
|
|
let section_name =
|
2015-11-21 01:08:09 +02:00
|
|
|
|
cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target);
|
2016-08-14 11:16:28 +03:00
|
|
|
|
let name = CString::new(section_name).unwrap();
|
|
|
|
|
llvm::LLVMSetSection(llglobal, name.as_ptr());
|
|
|
|
|
|
|
|
|
|
// Also generate a .section directive to force no
|
|
|
|
|
// flags, at least for ELF outputs, so that the
|
|
|
|
|
// metadata doesn't get loaded into memory.
|
|
|
|
|
let directive = format!(".section {}", section_name);
|
|
|
|
|
let directive = CString::new(directive).unwrap();
|
|
|
|
|
llvm::LLVMSetModuleInlineAsm(cx.metadata_llmod(), directive.as_ptr())
|
2013-01-10 21:23:07 -08:00
|
|
|
|
}
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
return metadata;
|
2011-06-27 16:09:28 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
|
/// Find any symbols that are defined in one compilation unit, but not declared
|
|
|
|
|
/// in any other compilation unit. Give these symbols internal linkage.
|
2016-07-22 10:39:30 -04:00
|
|
|
|
fn internalize_symbols<'a, 'tcx>(sess: &Session,
|
|
|
|
|
ccxs: &CrateContextList<'a, 'tcx>,
|
2016-07-20 07:55:45 -04:00
|
|
|
|
symbol_map: &SymbolMap<'tcx>,
|
2016-11-08 14:02:55 +11:00
|
|
|
|
reachable: &FxHashSet<&str>) {
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let scx = ccxs.shared();
|
|
|
|
|
let tcx = scx.tcx();
|
|
|
|
|
|
2016-07-22 10:39:30 -04:00
|
|
|
|
// In incr. comp. mode, we can't necessarily see all refs since we
|
|
|
|
|
// don't generate LLVM IR for reused modules, so skip this
|
|
|
|
|
// step. Later we should get smarter.
|
|
|
|
|
if sess.opts.debugging_opts.incremental.is_some() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// 'unsafe' because we are holding on to CStr's from the LLVM module within
|
|
|
|
|
// this block.
|
2014-08-01 10:29:44 -07:00
|
|
|
|
unsafe {
|
2016-11-08 14:02:55 +11:00
|
|
|
|
let mut referenced_somewhere = FxHashSet();
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// Collect all symbols that need to stay externally visible because they
|
|
|
|
|
// are referenced via a declaration in some other codegen unit.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in ccxs.iter_need_trans() {
|
2014-08-01 10:29:44 -07:00
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
let linkage = llvm::LLVMRustGetLinkage(val);
|
2014-08-01 10:29:44 -07:00
|
|
|
|
// We only care about external declarations (not definitions)
|
|
|
|
|
// and available_externally definitions.
|
2016-09-01 13:52:33 -05:00
|
|
|
|
let is_available_externally = linkage == llvm::Linkage::AvailableExternallyLinkage;
|
2016-05-14 05:41:42 +12:00
|
|
|
|
let is_decl = llvm::LLVMIsDeclaration(val) != 0;
|
|
|
|
|
|
|
|
|
|
if is_decl || is_available_externally {
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let symbol_name = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
|
|
|
|
referenced_somewhere.insert(symbol_name);
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// Also collect all symbols for which we cannot adjust linkage, because
|
|
|
|
|
// it is fixed by some directive in the source code (e.g. #[no_mangle]).
|
2016-11-08 14:02:55 +11:00
|
|
|
|
let linkage_fixed_explicitly: FxHashSet<_> = scx
|
2016-07-20 07:55:45 -04:00
|
|
|
|
.translation_items()
|
|
|
|
|
.borrow()
|
|
|
|
|
.iter()
|
|
|
|
|
.cloned()
|
|
|
|
|
.filter(|trans_item|{
|
2016-09-15 20:39:58 -04:00
|
|
|
|
trans_item.explicit_linkage(tcx).is_some()
|
2016-07-20 07:55:45 -04:00
|
|
|
|
})
|
|
|
|
|
.map(|trans_item| symbol_map.get_or_compute(scx, trans_item))
|
|
|
|
|
.collect();
|
|
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
|
// Examine each external definition. If the definition is not used in
|
|
|
|
|
// any other compilation unit, and is not reachable from other crates,
|
|
|
|
|
// then give it internal linkage.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in ccxs.iter_need_trans() {
|
2014-08-01 10:29:44 -07:00
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
let linkage = llvm::LLVMRustGetLinkage(val);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-09-01 13:52:33 -05:00
|
|
|
|
let is_externally_visible = (linkage == llvm::Linkage::ExternalLinkage) ||
|
|
|
|
|
(linkage == llvm::Linkage::LinkOnceODRLinkage) ||
|
|
|
|
|
(linkage == llvm::Linkage::WeakODRLinkage);
|
2016-07-19 05:47:28 -04:00
|
|
|
|
let is_definition = llvm::LLVMIsDeclaration(val) == 0;
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-18 10:16:19 -04:00
|
|
|
|
// If this is a definition (as opposed to just a declaration)
|
|
|
|
|
// and externally visible, check if we can internalize it
|
|
|
|
|
if is_definition && is_externally_visible {
|
|
|
|
|
let name_cstr = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
|
|
|
|
let name_str = name_cstr.to_str().unwrap();
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let name_cow = Cow::Borrowed(name_str);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let is_referenced_somewhere = referenced_somewhere.contains(&name_cstr);
|
|
|
|
|
let is_reachable = reachable.contains(&name_str);
|
|
|
|
|
let has_fixed_linkage = linkage_fixed_explicitly.contains(&name_cow);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
if !is_referenced_somewhere && !is_reachable && !has_fixed_linkage {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
llvm::LLVMRustSetLinkage(val, llvm::Linkage::InternalLinkage);
|
2016-08-03 00:25:19 +03:00
|
|
|
|
llvm::LLVMSetDLLStorageClass(val,
|
|
|
|
|
llvm::DLLStorageClass::Default);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
llvm::UnsetComdat(val);
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
|
|
|
|
|
// This is required to satisfy `dllimport` references to static data in .rlibs
|
|
|
|
|
// when using MSVC linker. We do this only for data, as linker can fix up
|
|
|
|
|
// code references on its own.
|
|
|
|
|
// See #26591, #27438
|
2016-05-05 14:14:41 -04:00
|
|
|
|
fn create_imps(cx: &CrateContextList) {
|
2015-09-29 16:26:34 -07:00
|
|
|
|
// The x86 ABI seems to require that leading underscores are added to symbol
|
|
|
|
|
// names, so we need an extra underscore on 32-bit. There's also a leading
|
|
|
|
|
// '\x01' here which disables LLVM's symbol mangling (e.g. no extra
|
|
|
|
|
// underscores added in front).
|
2016-05-05 14:14:41 -04:00
|
|
|
|
let prefix = if cx.shared().sess().target.target.target_pointer_width == "32" {
|
2015-09-29 16:26:34 -07:00
|
|
|
|
"\x01__imp__"
|
|
|
|
|
} else {
|
|
|
|
|
"\x01__imp_"
|
|
|
|
|
};
|
2015-08-21 00:41:07 -07:00
|
|
|
|
unsafe {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in cx.iter_need_trans() {
|
2015-08-21 00:41:07 -07:00
|
|
|
|
let exported: Vec<_> = iter_globals(ccx.llmod())
|
2015-11-19 12:36:31 +01:00
|
|
|
|
.filter(|&val| {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
llvm::LLVMRustGetLinkage(val) ==
|
|
|
|
|
llvm::Linkage::ExternalLinkage &&
|
2015-11-19 12:36:31 +01:00
|
|
|
|
llvm::LLVMIsDeclaration(val) == 0
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2015-08-21 00:41:07 -07:00
|
|
|
|
|
|
|
|
|
let i8p_ty = Type::i8p(&ccx);
|
|
|
|
|
for val in exported {
|
|
|
|
|
let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
2015-09-29 16:26:34 -07:00
|
|
|
|
let mut imp_name = prefix.as_bytes().to_vec();
|
|
|
|
|
imp_name.extend(name.to_bytes());
|
2015-08-21 00:41:07 -07:00
|
|
|
|
let imp_name = CString::new(imp_name).unwrap();
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let imp = llvm::LLVMAddGlobal(ccx.llmod(),
|
|
|
|
|
i8p_ty.to_ref(),
|
2015-08-21 00:41:07 -07:00
|
|
|
|
imp_name.as_ptr() as *const _);
|
2015-09-29 16:26:34 -07:00
|
|
|
|
let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
|
|
|
|
|
llvm::LLVMSetInitializer(imp, init);
|
2016-09-01 13:52:33 -05:00
|
|
|
|
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
struct ValueIter {
|
|
|
|
|
cur: ValueRef,
|
|
|
|
|
step: unsafe extern "C" fn(ValueRef) -> ValueRef,
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
impl Iterator for ValueIter {
|
|
|
|
|
type Item = ValueRef;
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn next(&mut self) -> Option<ValueRef> {
|
|
|
|
|
let old = self.cur;
|
|
|
|
|
if !old.is_null() {
|
2015-10-17 20:15:26 -04:00
|
|
|
|
self.cur = unsafe { (self.step)(old) };
|
2015-08-21 00:41:07 -07:00
|
|
|
|
Some(old)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
|
|
|
|
|
unsafe {
|
|
|
|
|
ValueIter {
|
|
|
|
|
cur: llvm::LLVMGetFirstGlobal(llmod),
|
|
|
|
|
step: llvm::LLVMGetNextGlobal,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-01-01 23:54:03 -05:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter {
|
|
|
|
|
unsafe {
|
|
|
|
|
ValueIter {
|
|
|
|
|
cur: llvm::LLVMGetFirstFunction(llmod),
|
|
|
|
|
step: llvm::LLVMGetNextFunction,
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
/// The context provided lists a set of reachable ids as calculated by
|
|
|
|
|
/// middle::reachable, but this contains far more ids and symbols than we're
|
|
|
|
|
/// actually exposing from the object file. This function will filter the set in
|
|
|
|
|
/// the context to the set of ids which correspond to symbols that are exposed
|
|
|
|
|
/// from the object file being generated.
|
|
|
|
|
///
|
|
|
|
|
/// This list is later used by linkers to determine the set of symbols needed to
|
|
|
|
|
/// be exposed from a dynamic library and it's also encoded into the metadata.
|
2016-05-19 12:35:36 -04:00
|
|
|
|
pub fn filter_reachable_ids(tcx: TyCtxt, reachable: NodeSet) -> NodeSet {
|
|
|
|
|
reachable.into_iter().filter(|&id| {
|
2015-07-28 17:19:08 -07:00
|
|
|
|
// Next, we want to ignore some FFI functions that are not exposed from
|
|
|
|
|
// this crate. Reachable FFI functions can be lumped into two
|
|
|
|
|
// categories:
|
|
|
|
|
//
|
|
|
|
|
// 1. Those that are included statically via a static library
|
|
|
|
|
// 2. Those included otherwise (e.g. dynamically or via a framework)
|
|
|
|
|
//
|
|
|
|
|
// Although our LLVM module is not literally emitting code for the
|
|
|
|
|
// statically included symbols, it's an export of our library which
|
|
|
|
|
// needs to be passed on to the linker and encoded in the metadata.
|
|
|
|
|
//
|
|
|
|
|
// As a result, if this id is an FFI item (foreign item) then we only
|
|
|
|
|
// let it through if it's included statically.
|
2016-05-19 12:35:36 -04:00
|
|
|
|
match tcx.map.get(id) {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir_map::NodeForeignItem(..) => {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
tcx.sess.cstore.is_statically_included_foreign_item(id)
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
2016-05-12 19:52:38 +03:00
|
|
|
|
|
|
|
|
|
// Only consider nodes that actually have exported symbols.
|
|
|
|
|
hir_map::NodeItem(&hir::Item {
|
|
|
|
|
node: hir::ItemStatic(..), .. }) |
|
|
|
|
|
hir_map::NodeItem(&hir::Item {
|
|
|
|
|
node: hir::ItemFn(..), .. }) |
|
|
|
|
|
hir_map::NodeImplItem(&hir::ImplItem {
|
2016-05-25 08:46:36 +03:00
|
|
|
|
node: hir::ImplItemKind::Method(..), .. }) => {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let def_id = tcx.map.local_def_id(id);
|
2016-11-10 16:49:53 +02:00
|
|
|
|
let generics = tcx.item_generics(def_id);
|
2016-09-15 11:40:16 -04:00
|
|
|
|
let attributes = tcx.get_attrs(def_id);
|
|
|
|
|
(generics.parent_types == 0 && generics.types.is_empty()) &&
|
|
|
|
|
// Functions marked with #[inline] are only ever translated
|
|
|
|
|
// with "internal" linkage and are never exported.
|
|
|
|
|
!attr::requests_inline(&attributes[..])
|
2016-05-25 08:46:36 +03:00
|
|
|
|
}
|
2016-05-12 19:52:38 +03:00
|
|
|
|
|
|
|
|
|
_ => false
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
|
|
|
|
}).collect()
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-03 05:23:22 +03:00
|
|
|
|
pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
2016-08-19 07:23:36 -04:00
|
|
|
|
analysis: ty::CrateAnalysis,
|
2016-08-23 07:47:14 -04:00
|
|
|
|
incremental_hashes_map: &IncrementalHashesMap)
|
2016-05-03 04:56:42 +03:00
|
|
|
|
-> CrateTranslation {
|
2015-12-22 16:35:02 -05:00
|
|
|
|
let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
|
|
|
|
|
|
|
|
|
|
// Be careful with this krate: obviously it gives access to the
|
|
|
|
|
// entire contents of the krate. So if you push any subtasks of
|
|
|
|
|
// `TransCrate`, you need to be careful to register "reads" of the
|
|
|
|
|
// particular items that will be processed.
|
2014-09-07 20:09:06 +03:00
|
|
|
|
let krate = tcx.map.krate();
|
2014-03-15 22:29:34 +02:00
|
|
|
|
|
2015-12-22 16:35:02 -05:00
|
|
|
|
let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let reachable = filter_reachable_ids(tcx, reachable);
|
2015-12-22 16:35:02 -05:00
|
|
|
|
|
2015-01-06 00:56:30 -05:00
|
|
|
|
let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
|
|
|
|
|
v
|
|
|
|
|
} else {
|
2015-03-02 14:51:24 -08:00
|
|
|
|
tcx.sess.opts.debug_assertions
|
2015-01-06 00:56:30 -05:00
|
|
|
|
};
|
|
|
|
|
|
2016-08-23 07:47:14 -04:00
|
|
|
|
let link_meta = link::build_link_meta(incremental_hashes_map, name);
|
2011-12-05 14:56:11 +08:00
|
|
|
|
|
2016-05-05 14:14:41 -04:00
|
|
|
|
let shared_ccx = SharedCrateContext::new(tcx,
|
2014-12-18 20:27:17 +02:00
|
|
|
|
export_map,
|
2014-07-16 11:27:57 -07:00
|
|
|
|
link_meta.clone(),
|
2015-01-06 00:56:30 -05:00
|
|
|
|
reachable,
|
2016-08-16 17:41:38 +03:00
|
|
|
|
check_overflow);
|
2016-05-25 08:46:36 +03:00
|
|
|
|
// Translate the metadata.
|
|
|
|
|
let metadata = time(tcx.sess.time_passes(), "write metadata", || {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
write_metadata(&shared_ccx, shared_ccx.reachable())
|
2016-05-25 08:46:36 +03:00
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let metadata_module = ModuleTranslation {
|
2016-05-13 20:48:32 -04:00
|
|
|
|
name: "metadata".to_string(),
|
2016-07-21 12:49:59 -04:00
|
|
|
|
symbol_name_hash: 0, // we always rebuild metadata, at least for now
|
|
|
|
|
source: ModuleSource::Translated(ModuleLlvm {
|
|
|
|
|
llcx: shared_ccx.metadata_llcx(),
|
|
|
|
|
llmod: shared_ccx.metadata_llmod(),
|
|
|
|
|
}),
|
2016-05-25 08:46:36 +03:00
|
|
|
|
};
|
|
|
|
|
let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
// Run the translation item collector and partition the collected items into
|
|
|
|
|
// codegen units.
|
|
|
|
|
let (codegen_units, symbol_map) = collect_and_partition_translation_items(&shared_ccx);
|
2016-05-06 20:02:09 -04:00
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let symbol_map = Rc::new(symbol_map);
|
2016-05-05 14:14:41 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
let previous_work_products = trans_reuse_previous_work_products(tcx,
|
|
|
|
|
&codegen_units,
|
|
|
|
|
&symbol_map);
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let crate_context_list = CrateContextList::new(&shared_ccx,
|
|
|
|
|
codegen_units,
|
2016-07-21 12:49:59 -04:00
|
|
|
|
previous_work_products,
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_map.clone());
|
2016-07-21 12:49:59 -04:00
|
|
|
|
let modules: Vec<_> = crate_context_list.iter_all()
|
|
|
|
|
.map(|ccx| {
|
|
|
|
|
let source = match ccx.previous_work_product() {
|
|
|
|
|
Some(buf) => ModuleSource::Preexisting(buf.clone()),
|
|
|
|
|
None => ModuleSource::Translated(ModuleLlvm {
|
|
|
|
|
llcx: ccx.llcx(),
|
|
|
|
|
llmod: ccx.llmod(),
|
|
|
|
|
}),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
ModuleTranslation {
|
|
|
|
|
name: String::from(ccx.codegen_unit().name()),
|
|
|
|
|
symbol_name_hash: ccx.codegen_unit().compute_symbol_name_hash(tcx, &symbol_map),
|
|
|
|
|
source: source,
|
|
|
|
|
}
|
2016-05-13 20:48:32 -04:00
|
|
|
|
})
|
2016-05-25 08:46:36 +03:00
|
|
|
|
.collect();
|
|
|
|
|
|
2016-07-21 12:50:15 -04:00
|
|
|
|
assert_module_sources::assert_module_sources(tcx, &modules);
|
|
|
|
|
|
2016-05-25 08:46:36 +03:00
|
|
|
|
// Skip crate items and just output metadata in -Z no-trans mode.
|
2016-08-02 16:53:58 -04:00
|
|
|
|
if tcx.sess.opts.debugging_opts.no_trans {
|
2016-05-25 08:46:36 +03:00
|
|
|
|
let linker_info = LinkerInfo::new(&shared_ccx, &[]);
|
|
|
|
|
return CrateTranslation {
|
|
|
|
|
modules: modules,
|
|
|
|
|
metadata_module: metadata_module,
|
|
|
|
|
link: link_meta,
|
|
|
|
|
metadata: metadata,
|
|
|
|
|
reachable: vec![],
|
|
|
|
|
no_builtins: no_builtins,
|
2016-10-31 09:36:30 -07:00
|
|
|
|
linker_info: linker_info,
|
|
|
|
|
windows_subsystem: None,
|
2016-05-25 08:46:36 +03:00
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 20:02:09 -04:00
|
|
|
|
// Instantiate translation items without filling out definitions yet...
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in crate_context_list.iter_need_trans() {
|
|
|
|
|
let cgu = ccx.codegen_unit();
|
|
|
|
|
let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
|
2016-05-26 11:43:53 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
|
|
|
|
|
for (trans_item, linkage) in trans_items {
|
|
|
|
|
trans_item.predefine(&ccx, linkage);
|
|
|
|
|
}
|
|
|
|
|
});
|
2016-05-06 20:02:09 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ... and now that we have everything pre-defined, fill out those definitions.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in crate_context_list.iter_need_trans() {
|
|
|
|
|
let cgu = ccx.codegen_unit();
|
|
|
|
|
let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
|
|
|
|
|
tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
|
|
|
|
|
for (trans_item, _) in trans_items {
|
|
|
|
|
trans_item.define(&ccx);
|
|
|
|
|
}
|
2015-11-02 14:46:39 +01:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// If this codegen unit contains the main function, also create the
|
|
|
|
|
// wrapper here
|
|
|
|
|
maybe_create_entry_wrapper(&ccx);
|
2016-03-16 15:00:20 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// Run replace-all-uses-with for statics that need it
|
|
|
|
|
for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
|
|
|
|
|
unsafe {
|
|
|
|
|
let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
|
|
|
|
|
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
|
|
|
|
|
llvm::LLVMDeleteGlobal(old_g);
|
|
|
|
|
}
|
2015-06-28 10:36:46 -07:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// Finalize debuginfo
|
|
|
|
|
if ccx.sess().opts.debuginfo != NoDebugInfo {
|
|
|
|
|
debuginfo::finalize(&ccx);
|
|
|
|
|
}
|
|
|
|
|
});
|
2016-05-26 12:18:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
symbol_names_test::report_symbol_names(&shared_ccx);
|
|
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
|
if shared_ccx.sess().trans_stats() {
|
|
|
|
|
let stats = shared_ccx.stats();
|
2014-01-09 21:06:55 +11:00
|
|
|
|
println!("--- trans stats ---");
|
2014-07-16 11:27:57 -07:00
|
|
|
|
println!("n_glues_created: {}", stats.n_glues_created.get());
|
|
|
|
|
println!("n_null_glues: {}", stats.n_null_glues.get());
|
|
|
|
|
println!("n_real_glues: {}", stats.n_real_glues.get());
|
|
|
|
|
|
|
|
|
|
println!("n_fns: {}", stats.n_fns.get());
|
|
|
|
|
println!("n_inlines: {}", stats.n_inlines.get());
|
|
|
|
|
println!("n_closures: {}", stats.n_closures.get());
|
2014-01-09 21:06:55 +11:00
|
|
|
|
println!("fn stats:");
|
2014-11-10 12:27:56 -08:00
|
|
|
|
stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
insns_b.cmp(&insns_a)
|
|
|
|
|
});
|
2015-06-11 13:56:07 +01:00
|
|
|
|
for tuple in stats.fn_stats.borrow().iter() {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
match *tuple {
|
2014-11-10 12:27:56 -08:00
|
|
|
|
(ref name, insns) => {
|
|
|
|
|
println!("{} insns, {}", insns, *name);
|
2013-07-08 11:05:52 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
2013-06-13 14:49:01 +12:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
|
if shared_ccx.sess().count_llvm_insns() {
|
2015-06-11 13:56:07 +01:00
|
|
|
|
for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
|
2014-11-17 11:29:38 -08:00
|
|
|
|
println!("{:7} {}", *v, *k);
|
2013-06-17 16:23:24 +12:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
let sess = shared_ccx.sess();
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let mut reachable_symbols = shared_ccx.reachable().iter().map(|&id| {
|
2016-05-12 19:52:38 +03:00
|
|
|
|
let def_id = shared_ccx.tcx().map.local_def_id(id);
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_for_def_id(def_id, &shared_ccx, &symbol_map)
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}).collect::<Vec<_>>();
|
2016-05-26 08:59:58 -04:00
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
if sess.entry_fn.borrow().is_some() {
|
|
|
|
|
reachable_symbols.push("main".to_string());
|
|
|
|
|
}
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
|
|
2016-05-16 20:05:43 +03:00
|
|
|
|
if sess.crate_types.borrow().contains(&config::CrateTypeDylib) {
|
|
|
|
|
reachable_symbols.push(shared_ccx.metadata_symbol_name());
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-10 14:17:57 -07:00
|
|
|
|
// For the purposes of LTO or when creating a cdylib, we add to the
|
|
|
|
|
// reachable set all of the upstream reachable extern fns. These functions
|
|
|
|
|
// are all part of the public ABI of the final product, so we need to
|
|
|
|
|
// preserve them.
|
|
|
|
|
//
|
|
|
|
|
// Note that this happens even if LTO isn't requested or we're not creating
|
|
|
|
|
// a cdylib. In those cases, though, we're not even reading the
|
|
|
|
|
// `reachable_symbols` list later on so it should be ok.
|
|
|
|
|
for cnum in sess.cstore.crates() {
|
|
|
|
|
let syms = sess.cstore.reachable_ids(cnum);
|
2016-10-25 18:18:17 +03:00
|
|
|
|
reachable_symbols.extend(syms.into_iter().filter(|&def_id| {
|
|
|
|
|
let applicable = match sess.cstore.describe_def(def_id) {
|
|
|
|
|
Some(Def::Static(..)) => true,
|
|
|
|
|
Some(Def::Fn(_)) => {
|
2016-11-10 16:49:53 +02:00
|
|
|
|
shared_ccx.tcx().item_generics(def_id).types.is_empty()
|
2016-10-25 18:18:17 +03:00
|
|
|
|
}
|
|
|
|
|
_ => false
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if applicable {
|
|
|
|
|
let attrs = shared_ccx.tcx().get_attrs(def_id);
|
|
|
|
|
attr::contains_extern_indicator(sess.diagnostic(), &attrs)
|
|
|
|
|
} else {
|
|
|
|
|
false
|
|
|
|
|
}
|
2016-05-10 14:17:57 -07:00
|
|
|
|
}).map(|did| {
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_for_def_id(did, &shared_ccx, &symbol_map)
|
2016-05-10 14:17:57 -07:00
|
|
|
|
}));
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
2013-06-13 21:25:12 -07:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
|
2016-07-22 10:39:30 -04:00
|
|
|
|
internalize_symbols(sess,
|
|
|
|
|
&crate_context_list,
|
2016-07-20 07:55:45 -04:00
|
|
|
|
&symbol_map,
|
|
|
|
|
&reachable_symbols.iter()
|
|
|
|
|
.map(|s| &s[..])
|
|
|
|
|
.collect())
|
|
|
|
|
});
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
if sess.target.target.options.is_like_msvc &&
|
2015-09-25 18:53:14 -07:00
|
|
|
|
sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
|
2016-05-05 14:14:41 -04:00
|
|
|
|
create_imps(&crate_context_list);
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-25 01:45:25 +03:00
|
|
|
|
let linker_info = LinkerInfo::new(&shared_ccx, &reachable_symbols);
|
2016-05-13 20:48:32 -04:00
|
|
|
|
|
2016-10-31 09:36:30 -07:00
|
|
|
|
let subsystem = attr::first_attr_value_str_by_name(&krate.attrs,
|
|
|
|
|
"windows_subsystem");
|
|
|
|
|
let windows_subsystem = subsystem.map(|subsystem| {
|
|
|
|
|
if subsystem != "windows" && subsystem != "console" {
|
|
|
|
|
tcx.sess.fatal(&format!("invalid windows subsystem `{}`, only \
|
|
|
|
|
`windows` and `console` are allowed",
|
|
|
|
|
subsystem));
|
|
|
|
|
}
|
|
|
|
|
subsystem.to_string()
|
|
|
|
|
});
|
|
|
|
|
|
2015-06-14 01:49:28 +03:00
|
|
|
|
CrateTranslation {
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
|
modules: modules,
|
2014-03-15 22:29:34 +02:00
|
|
|
|
metadata_module: metadata_module,
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
|
link: link_meta,
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
metadata: metadata,
|
2015-07-28 17:19:08 -07:00
|
|
|
|
reachable: reachable_symbols,
|
2014-05-14 11:24:12 -07:00
|
|
|
|
no_builtins: no_builtins,
|
2016-10-31 09:36:30 -07:00
|
|
|
|
linker_info: linker_info,
|
|
|
|
|
windows_subsystem: windows_subsystem,
|
2015-06-14 01:49:28 +03:00
|
|
|
|
}
|
2013-06-13 21:25:12 -07:00
|
|
|
|
}
|
2015-11-18 05:38:50 -05:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
/// For each CGU, identify if we can reuse an existing object file (or
|
|
|
|
|
/// maybe other context).
|
|
|
|
|
fn trans_reuse_previous_work_products(tcx: TyCtxt,
|
|
|
|
|
codegen_units: &[CodegenUnit],
|
|
|
|
|
symbol_map: &SymbolMap)
|
2016-07-25 10:51:14 -04:00
|
|
|
|
-> Vec<Option<WorkProduct>> {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
debug!("trans_reuse_previous_work_products()");
|
|
|
|
|
codegen_units
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|cgu| {
|
|
|
|
|
let id = cgu.work_product_id();
|
|
|
|
|
|
|
|
|
|
let hash = cgu.compute_symbol_name_hash(tcx, symbol_map);
|
|
|
|
|
|
|
|
|
|
debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash);
|
|
|
|
|
|
|
|
|
|
if let Some(work_product) = tcx.dep_graph.previous_work_product(&id) {
|
|
|
|
|
if work_product.input_hash == hash {
|
|
|
|
|
debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
|
2016-07-25 10:51:14 -04:00
|
|
|
|
return Some(work_product);
|
2016-07-21 12:49:59 -04:00
|
|
|
|
} else {
|
|
|
|
|
debug!("trans_reuse_previous_work_products: \
|
|
|
|
|
not reusing {:?} because hash changed to {:?}",
|
|
|
|
|
work_product, hash);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
None
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
|
2016-05-26 08:59:58 -04:00
|
|
|
|
-> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let time_passes = scx.sess().time_passes();
|
2015-11-02 14:46:39 +01:00
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items {
|
2015-11-02 14:46:39 +01:00
|
|
|
|
Some(ref s) => {
|
|
|
|
|
let mode_string = s.to_lowercase();
|
|
|
|
|
let mode_string = mode_string.trim();
|
|
|
|
|
if mode_string == "eager" {
|
|
|
|
|
TransItemCollectionMode::Eager
|
|
|
|
|
} else {
|
|
|
|
|
if mode_string != "lazy" {
|
|
|
|
|
let message = format!("Unknown codegen-item collection mode '{}'. \
|
|
|
|
|
Falling back to 'lazy' mode.",
|
|
|
|
|
mode_string);
|
2016-05-06 14:27:34 -04:00
|
|
|
|
scx.sess().warn(&message);
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TransItemCollectionMode::Lazy
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => TransItemCollectionMode::Lazy
|
|
|
|
|
};
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let (items, inlining_map) =
|
|
|
|
|
time(time_passes, "translation item collection", || {
|
|
|
|
|
collector::collect_crate_translation_items(&scx, collection_mode)
|
2015-11-02 14:46:39 +01:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let symbol_map = SymbolMap::build(scx, items.iter().cloned());
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let strategy = if scx.sess().opts.debugging_opts.incremental.is_some() {
|
2016-04-21 16:45:33 -04:00
|
|
|
|
PartitioningStrategy::PerModule
|
|
|
|
|
} else {
|
2016-05-06 14:27:34 -04:00
|
|
|
|
PartitioningStrategy::FixedUnitCount(scx.sess().opts.cg.codegen_units)
|
2016-04-21 16:45:33 -04:00
|
|
|
|
};
|
|
|
|
|
|
2016-03-24 11:40:49 -04:00
|
|
|
|
let codegen_units = time(time_passes, "codegen unit partitioning", || {
|
2016-08-08 20:50:19 -04:00
|
|
|
|
partitioning::partition(scx,
|
2016-04-21 16:45:33 -04:00
|
|
|
|
items.iter().cloned(),
|
|
|
|
|
strategy,
|
2016-09-15 20:39:58 -04:00
|
|
|
|
&inlining_map)
|
2016-03-24 11:40:49 -04:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() ||
|
|
|
|
|
scx.tcx().sess.opts.debugging_opts.incremental.is_some());
|
|
|
|
|
|
2016-06-07 21:14:51 -04:00
|
|
|
|
{
|
|
|
|
|
let mut ccx_map = scx.translation_items().borrow_mut();
|
|
|
|
|
|
|
|
|
|
for trans_item in items.iter().cloned() {
|
2016-06-16 18:56:14 -04:00
|
|
|
|
ccx_map.insert(trans_item);
|
2016-06-07 21:14:51 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
|
2016-11-08 14:02:55 +11:00
|
|
|
|
let mut item_to_cgus = FxHashMap();
|
2016-03-24 11:40:49 -04:00
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
for cgu in &codegen_units {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for (&trans_item, &linkage) in cgu.items() {
|
2016-03-24 11:40:49 -04:00
|
|
|
|
item_to_cgus.entry(trans_item)
|
|
|
|
|
.or_insert(Vec::new())
|
2016-07-21 12:49:59 -04:00
|
|
|
|
.push((cgu.name().clone(), linkage));
|
2016-03-24 11:40:49 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut item_keys: Vec<_> = items
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|i| {
|
2016-05-09 23:56:49 -04:00
|
|
|
|
let mut output = i.to_string(scx.tcx());
|
2016-03-24 11:40:49 -04:00
|
|
|
|
output.push_str(" @@");
|
|
|
|
|
let mut empty = Vec::new();
|
|
|
|
|
let mut cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
|
|
|
|
|
cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone());
|
|
|
|
|
cgus.dedup();
|
|
|
|
|
for &(ref cgu_name, linkage) in cgus.iter() {
|
|
|
|
|
output.push_str(" ");
|
|
|
|
|
output.push_str(&cgu_name[..]);
|
|
|
|
|
|
|
|
|
|
let linkage_abbrev = match linkage {
|
2016-09-01 13:52:33 -05:00
|
|
|
|
llvm::Linkage::ExternalLinkage => "External",
|
|
|
|
|
llvm::Linkage::AvailableExternallyLinkage => "Available",
|
|
|
|
|
llvm::Linkage::LinkOnceAnyLinkage => "OnceAny",
|
|
|
|
|
llvm::Linkage::LinkOnceODRLinkage => "OnceODR",
|
|
|
|
|
llvm::Linkage::WeakAnyLinkage => "WeakAny",
|
|
|
|
|
llvm::Linkage::WeakODRLinkage => "WeakODR",
|
|
|
|
|
llvm::Linkage::AppendingLinkage => "Appending",
|
|
|
|
|
llvm::Linkage::InternalLinkage => "Internal",
|
|
|
|
|
llvm::Linkage::PrivateLinkage => "Private",
|
|
|
|
|
llvm::Linkage::ExternalWeakLinkage => "ExternalWeak",
|
|
|
|
|
llvm::Linkage::CommonLinkage => "Common",
|
2016-03-24 11:40:49 -04:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
output.push_str("[");
|
|
|
|
|
output.push_str(linkage_abbrev);
|
|
|
|
|
output.push_str("]");
|
|
|
|
|
}
|
|
|
|
|
output
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
2015-11-02 14:46:39 +01:00
|
|
|
|
item_keys.sort();
|
|
|
|
|
|
|
|
|
|
for item in item_keys {
|
|
|
|
|
println!("TRANS_ITEM {}", item);
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-06 14:27:34 -04:00
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
(codegen_units, symbol_map)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn symbol_for_def_id<'a, 'tcx>(def_id: DefId,
|
|
|
|
|
scx: &SharedCrateContext<'a, 'tcx>,
|
|
|
|
|
symbol_map: &SymbolMap<'tcx>)
|
|
|
|
|
-> String {
|
|
|
|
|
// Just try to look things up in the symbol map. If nothing's there, we
|
|
|
|
|
// recompute.
|
|
|
|
|
if let Some(node_id) = scx.tcx().map.as_local_node_id(def_id) {
|
|
|
|
|
if let Some(sym) = symbol_map.get(TransItem::Static(node_id)) {
|
|
|
|
|
return sym.to_owned();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let instance = Instance::mono(scx, def_id);
|
|
|
|
|
|
|
|
|
|
symbol_map.get(TransItem::Fn(instance))
|
|
|
|
|
.map(str::to_owned)
|
|
|
|
|
.unwrap_or_else(|| instance.symbol_name(scx))
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|