2015-02-28 23:53:12 +02:00
|
|
|
|
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 16:48:01 -08:00
|
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
|
//
|
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
|
// except according to those terms.
|
2016-02-15 15:41:16 -05:00
|
|
|
|
|
2015-02-28 23:53:12 +02:00
|
|
|
|
//! Translate the completed AST to the LLVM IR.
|
|
|
|
|
//!
|
|
|
|
|
//! Some functions here, such as trans_block and trans_expr, return a value --
|
2016-02-23 21:39:35 +02:00
|
|
|
|
//! the result of the translation to LLVM -- while others, such as trans_fn
|
|
|
|
|
//! and trans_item, are called only for the side effect of adding a
|
2015-02-28 23:53:12 +02:00
|
|
|
|
//! particular definition to the LLVM IR output we're producing.
|
|
|
|
|
//!
|
|
|
|
|
//! Hopefully useful general knowledge about trans:
|
|
|
|
|
//!
|
|
|
|
|
//! * There's no way to find out the Ty type of a ValueRef. Doing so
|
|
|
|
|
//! would be "trying to get the eggs out of an omelette" (credit:
|
|
|
|
|
//! pcwalton). You can, instead, find out its TypeRef by calling val_ty,
|
|
|
|
|
//! but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
|
|
|
|
|
//! int) and rec(x=int, y=int, z=int) will have the same TypeRef.
|
2011-12-13 16:25:51 -08:00
|
|
|
|
|
2014-03-21 18:05:05 -07:00
|
|
|
|
#![allow(non_camel_case_types)]
|
2013-05-17 15:28:44 -07:00
|
|
|
|
|
2014-11-27 07:21:26 -05:00
|
|
|
|
use super::CrateTranslation;
|
2016-07-21 12:49:59 -04:00
|
|
|
|
use super::ModuleLlvm;
|
|
|
|
|
use super::ModuleSource;
|
2014-11-27 07:21:26 -05:00
|
|
|
|
use super::ModuleTranslation;
|
|
|
|
|
|
2016-07-21 12:50:15 -04:00
|
|
|
|
use assert_module_sources;
|
2016-05-12 19:52:38 +03:00
|
|
|
|
use back::link;
|
2016-05-25 01:45:25 +03:00
|
|
|
|
use back::linker::LinkerInfo;
|
2015-02-28 23:53:12 +02:00
|
|
|
|
use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
|
2014-07-07 17:58:01 -07:00
|
|
|
|
use llvm;
|
2016-03-22 17:30:57 +02:00
|
|
|
|
use rustc::cfg;
|
2016-03-29 12:54:26 +03:00
|
|
|
|
use rustc::hir::def_id::DefId;
|
2013-07-15 20:42:13 -07:00
|
|
|
|
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
|
2016-03-29 12:54:26 +03:00
|
|
|
|
use rustc::hir::pat_util::simple_name;
|
2016-06-15 01:40:09 +03:00
|
|
|
|
use rustc::ty::subst::Substs;
|
2016-03-22 17:30:57 +02:00
|
|
|
|
use rustc::traits;
|
|
|
|
|
use rustc::ty::{self, Ty, TyCtxt, TypeFoldable};
|
|
|
|
|
use rustc::ty::adjustment::CustomCoerceUnsized;
|
2016-07-25 10:51:14 -04:00
|
|
|
|
use rustc::dep_graph::{DepNode, WorkProduct};
|
2016-03-29 08:50:44 +03:00
|
|
|
|
use rustc::hir::map as hir_map;
|
2015-12-25 13:59:02 -05:00
|
|
|
|
use rustc::util::common::time;
|
2016-02-05 09:32:33 +01:00
|
|
|
|
use rustc::mir::mir_map::MirMap;
|
2016-04-06 17:28:59 -04:00
|
|
|
|
use rustc_data_structures::graph::OUTGOING;
|
2016-08-23 07:47:14 -04:00
|
|
|
|
use rustc_incremental::IncrementalHashesMap;
|
2015-07-02 18:18:22 +02:00
|
|
|
|
use session::config::{self, NoDebugInfo, FullDebugInfo};
|
2014-11-15 20:30:33 -05:00
|
|
|
|
use session::Session;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use _match;
|
|
|
|
|
use abi::{self, Abi, FnType};
|
|
|
|
|
use adt;
|
|
|
|
|
use attributes;
|
|
|
|
|
use build::*;
|
|
|
|
|
use builder::{Builder, noname};
|
|
|
|
|
use callee::{Callee, CallArgs, ArgExprs, ArgVals};
|
|
|
|
|
use cleanup::{self, CleanupMethods, DropHint};
|
|
|
|
|
use closure;
|
|
|
|
|
use common::{Block, C_bool, C_bytes_in_context, C_i32, C_int, C_uint, C_integral};
|
2016-06-16 18:56:14 -04:00
|
|
|
|
use collector::{self, TransItemCollectionMode};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use common::{C_null, C_struct_in_context, C_u64, C_u8, C_undef};
|
|
|
|
|
use common::{CrateContext, DropFlagHintsMap, Field, FunctionContext};
|
|
|
|
|
use common::{Result, NodeIdAndSpan, VariantInfo};
|
|
|
|
|
use common::{node_id_type, fulfill_obligation};
|
|
|
|
|
use common::{type_is_immediate, type_is_zero_size, val_ty};
|
|
|
|
|
use common;
|
|
|
|
|
use consts;
|
2016-05-05 14:14:41 -04:00
|
|
|
|
use context::{SharedCrateContext, CrateContextList};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use controlflow;
|
|
|
|
|
use datum;
|
|
|
|
|
use debuginfo::{self, DebugLoc, ToDebugLoc};
|
|
|
|
|
use declare;
|
|
|
|
|
use expr;
|
|
|
|
|
use glue;
|
2016-05-14 05:41:42 +12:00
|
|
|
|
use inline;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use machine;
|
2016-06-02 23:43:16 +02:00
|
|
|
|
use machine::{llalign_of_min, llsize_of};
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use meth;
|
|
|
|
|
use mir;
|
|
|
|
|
use monomorphize::{self, Instance};
|
2016-05-09 14:26:15 -04:00
|
|
|
|
use partitioning::{self, PartitioningStrategy, CodegenUnit};
|
2016-05-26 08:59:58 -04:00
|
|
|
|
use symbol_map::SymbolMap;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use symbol_names_test;
|
2016-05-09 13:37:14 -04:00
|
|
|
|
use trans_item::TransItem;
|
2016-03-22 19:23:36 +02:00
|
|
|
|
use tvec;
|
|
|
|
|
use type_::Type;
|
|
|
|
|
use type_of;
|
|
|
|
|
use value::Value;
|
|
|
|
|
use Disr;
|
2012-08-28 15:54:45 -07:00
|
|
|
|
use util::common::indenter;
|
2013-12-09 14:56:53 -07:00
|
|
|
|
use util::sha2::Sha256;
|
2016-07-20 07:55:45 -04:00
|
|
|
|
use util::nodemap::{NodeMap, NodeSet, FnvHashSet};
|
2013-06-16 22:52:44 +12:00
|
|
|
|
|
2014-01-29 13:50:05 +11:00
|
|
|
|
use arena::TypedArena;
|
2015-02-28 23:55:50 +02:00
|
|
|
|
use libc::c_uint;
|
2015-02-17 22:47:40 -08:00
|
|
|
|
use std::ffi::{CStr, CString};
|
2016-07-20 07:55:45 -04:00
|
|
|
|
use std::borrow::Cow;
|
2013-12-20 20:33:22 -08:00
|
|
|
|
use std::cell::{Cell, RefCell};
|
2016-07-20 07:55:45 -04:00
|
|
|
|
use std::collections::HashMap;
|
2016-06-24 20:54:52 +02:00
|
|
|
|
use std::ptr;
|
2016-05-26 08:59:58 -04:00
|
|
|
|
use std::rc::Rc;
|
2014-11-25 13:28:35 -08:00
|
|
|
|
use std::str;
|
2014-06-05 12:23:34 -07:00
|
|
|
|
use std::{i8, i16, i32, i64};
|
2016-06-21 18:08:13 -04:00
|
|
|
|
use syntax_pos::{Span, DUMMY_SP};
|
2014-01-21 10:08:10 -08:00
|
|
|
|
use syntax::parse::token::InternedString;
|
2015-09-14 21:58:20 +12:00
|
|
|
|
use syntax::attr::AttrMetaMethods;
|
|
|
|
|
use syntax::attr;
|
2016-03-29 08:50:44 +03:00
|
|
|
|
use rustc::hir::intravisit::{self, Visitor};
|
|
|
|
|
use rustc::hir;
|
2015-07-31 00:04:06 -07:00
|
|
|
|
use syntax::ast;
|
2012-03-03 17:49:23 -08:00
|
|
|
|
|
2014-11-14 09:18:10 -08:00
|
|
|
|
thread_local! {
|
|
|
|
|
static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
|
|
|
|
|
RefCell::new(None)
|
|
|
|
|
}
|
|
|
|
|
}
|
2013-02-27 19:13:53 -05:00
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn with_insn_ctxt<F>(blk: F)
|
|
|
|
|
where F: FnOnce(&[&'static str])
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
|
|
|
|
TASK_LOCAL_INSN_KEY.with(move |slot| {
|
2015-02-01 21:53:25 -05:00
|
|
|
|
slot.borrow().as_ref().map(move |s| blk(s));
|
2014-11-14 14:20:57 -08:00
|
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
pub fn init_insn_ctxt() {
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
|
|
|
|
*slot.borrow_mut() = Some(Vec::new());
|
|
|
|
|
});
|
2012-09-05 15:58:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-06-06 15:51:42 +02:00
|
|
|
|
pub struct _InsnCtxt {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_cannot_construct_outside_of_this_module: (),
|
2014-06-06 15:51:42 +02:00
|
|
|
|
}
|
2012-07-11 15:00:40 -07:00
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
impl Drop for _InsnCtxt {
|
2013-09-16 21:18:07 -04:00
|
|
|
|
fn drop(&mut self) {
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
2016-07-03 14:38:37 -07:00
|
|
|
|
if let Some(ctx) = slot.borrow_mut().as_mut() {
|
|
|
|
|
ctx.pop();
|
2014-11-14 14:20:57 -08:00
|
|
|
|
}
|
|
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
|
2013-10-21 13:08:31 -07:00
|
|
|
|
debug!("new InsnCtxt: {}", s);
|
2014-11-14 14:20:57 -08:00
|
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
2016-02-23 21:18:07 +05:30
|
|
|
|
if let Some(ctx) = slot.borrow_mut().as_mut() {
|
|
|
|
|
ctx.push(s)
|
2014-11-14 14:20:57 -08:00
|
|
|
|
}
|
|
|
|
|
});
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_InsnCtxt {
|
|
|
|
|
_cannot_construct_outside_of_this_module: (),
|
|
|
|
|
}
|
2012-03-22 13:44:20 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
pub struct StatRecorder<'a, 'tcx: 'a> {
|
|
|
|
|
ccx: &'a CrateContext<'a, 'tcx>,
|
2014-05-22 16:57:53 -07:00
|
|
|
|
name: Option<String>,
|
2015-03-25 17:06:52 -07:00
|
|
|
|
istart: usize,
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String) -> StatRecorder<'a, 'tcx> {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let istart = ccx.stats().n_llvm_insns.get();
|
2013-06-28 11:15:34 -07:00
|
|
|
|
StatRecorder {
|
|
|
|
|
ccx: ccx,
|
2014-02-14 07:07:09 +02:00
|
|
|
|
name: Some(name),
|
2013-06-28 11:15:34 -07:00
|
|
|
|
istart: istart,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
|
impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
|
2013-09-16 21:18:07 -04:00
|
|
|
|
fn drop(&mut self) {
|
2014-03-05 16:36:01 +02:00
|
|
|
|
if self.ccx.sess().trans_stats() {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let iend = self.ccx.stats().n_llvm_insns.get();
|
2015-11-19 12:36:31 +01:00
|
|
|
|
self.ccx
|
|
|
|
|
.stats()
|
|
|
|
|
.fn_stats
|
|
|
|
|
.borrow_mut()
|
|
|
|
|
.push((self.name.take().unwrap(), iend - self.istart));
|
2014-09-05 09:18:53 -07:00
|
|
|
|
self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
|
2013-06-28 11:15:34 -07:00
|
|
|
|
// Reset LLVM insn count to avoid compound costs.
|
2014-09-05 09:18:53 -07:00
|
|
|
|
self.ccx.stats().n_llvm_insns.set(self.istart);
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-08-16 06:32:28 -04:00
|
|
|
|
pub fn kind_for_closure(ccx: &CrateContext, closure_id: DefId) -> ty::ClosureKind {
|
2015-06-24 13:40:54 -07:00
|
|
|
|
*ccx.tcx().tables.borrow().closure_kinds.get(&closure_id).unwrap()
|
2014-07-29 22:08:39 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, info_ty: Ty<'tcx>, it: LangItem) -> DefId {
|
2014-04-06 13:54:41 +03:00
|
|
|
|
match bcx.tcx().lang_items.require(it) {
|
|
|
|
|
Ok(id) => id,
|
|
|
|
|
Err(s) => {
|
2015-06-18 20:25:05 +03:00
|
|
|
|
bcx.sess().fatal(&format!("allocation of `{}` {}", info_ty, s));
|
2013-07-15 20:42:13 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-04-06 13:54:41 +03:00
|
|
|
|
}
|
2013-07-15 20:42:13 -07:00
|
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
|
// The following malloc_raw_dyn* functions allocate a box to contain
|
|
|
|
|
// a given type, but with a potentially dynamic size.
|
2012-06-06 18:22:49 -07:00
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
llty_ptr: Type,
|
2014-09-29 22:11:30 +03:00
|
|
|
|
info_ty: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
|
size: ValueRef,
|
2015-02-04 17:16:59 +01:00
|
|
|
|
align: ValueRef,
|
|
|
|
|
debug_loc: DebugLoc)
|
2014-09-06 19:13:04 +03:00
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-04-06 13:54:41 +03:00
|
|
|
|
let _icx = push_ctxt("malloc_raw_exchange");
|
2013-07-02 19:51:39 -04:00
|
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
|
// Allocate space:
|
2016-02-23 21:21:50 +02:00
|
|
|
|
let def_id = require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem);
|
2016-08-08 23:39:49 +03:00
|
|
|
|
let r = Callee::def(bcx.ccx(), def_id, Substs::empty(bcx.tcx()))
|
2016-02-23 21:21:50 +02:00
|
|
|
|
.call(bcx, debug_loc, ArgVals(&[size, align]), None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
|
2014-05-03 23:14:56 +12:00
|
|
|
|
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
|
2012-06-13 18:00:17 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-04-19 10:33:46 -07:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
pub fn bin_op_to_icmp_predicate(op: hir::BinOp_,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
signed: bool)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
-> llvm::IntPredicate {
|
|
|
|
|
match op {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::BiEq => llvm::IntEQ,
|
|
|
|
|
hir::BiNe => llvm::IntNE,
|
|
|
|
|
hir::BiLt => if signed { llvm::IntSLT } else { llvm::IntULT },
|
|
|
|
|
hir::BiLe => if signed { llvm::IntSLE } else { llvm::IntULE },
|
|
|
|
|
hir::BiGt => if signed { llvm::IntSGT } else { llvm::IntUGT },
|
|
|
|
|
hir::BiGe => if signed { llvm::IntSGE } else { llvm::IntUGE },
|
2015-01-29 14:03:34 +02:00
|
|
|
|
op => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("comparison_op_to_icmp_predicate: expected comparison operator, \
|
|
|
|
|
found {:?}",
|
|
|
|
|
op)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2011-06-15 11:19:50 -07:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
pub fn bin_op_to_fcmp_predicate(op: hir::BinOp_) -> llvm::RealPredicate {
|
2015-01-29 14:03:34 +02:00
|
|
|
|
match op {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::BiEq => llvm::RealOEQ,
|
|
|
|
|
hir::BiNe => llvm::RealUNE,
|
|
|
|
|
hir::BiLt => llvm::RealOLT,
|
|
|
|
|
hir::BiLe => llvm::RealOLE,
|
|
|
|
|
hir::BiGt => llvm::RealOGT,
|
|
|
|
|
hir::BiGe => llvm::RealOGE,
|
2015-01-29 14:03:34 +02:00
|
|
|
|
op => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("comparison_op_to_fcmp_predicate: expected comparison operator, \
|
|
|
|
|
found {:?}",
|
|
|
|
|
op);
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-10 22:05:11 +02:00
|
|
|
|
pub fn compare_fat_ptrs<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
lhs_addr: ValueRef,
|
|
|
|
|
lhs_extra: ValueRef,
|
|
|
|
|
rhs_addr: ValueRef,
|
|
|
|
|
rhs_extra: ValueRef,
|
|
|
|
|
_t: Ty<'tcx>,
|
|
|
|
|
op: hir::BinOp_,
|
|
|
|
|
debug_loc: DebugLoc)
|
|
|
|
|
-> ValueRef {
|
|
|
|
|
match op {
|
|
|
|
|
hir::BiEq => {
|
|
|
|
|
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
|
|
|
|
|
let extra_eq = ICmp(bcx, llvm::IntEQ, lhs_extra, rhs_extra, debug_loc);
|
|
|
|
|
And(bcx, addr_eq, extra_eq, debug_loc)
|
|
|
|
|
}
|
|
|
|
|
hir::BiNe => {
|
|
|
|
|
let addr_eq = ICmp(bcx, llvm::IntNE, lhs_addr, rhs_addr, debug_loc);
|
|
|
|
|
let extra_eq = ICmp(bcx, llvm::IntNE, lhs_extra, rhs_extra, debug_loc);
|
|
|
|
|
Or(bcx, addr_eq, extra_eq, debug_loc)
|
|
|
|
|
}
|
|
|
|
|
hir::BiLe | hir::BiLt | hir::BiGe | hir::BiGt => {
|
|
|
|
|
// a OP b ~ a.0 STRICT(OP) b.0 | (a.0 == b.0 && a.1 OP a.1)
|
|
|
|
|
let (op, strict_op) = match op {
|
|
|
|
|
hir::BiLt => (llvm::IntULT, llvm::IntULT),
|
|
|
|
|
hir::BiLe => (llvm::IntULE, llvm::IntULT),
|
|
|
|
|
hir::BiGt => (llvm::IntUGT, llvm::IntUGT),
|
|
|
|
|
hir::BiGe => (llvm::IntUGE, llvm::IntUGT),
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!(),
|
2015-11-10 22:05:11 +02:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let addr_eq = ICmp(bcx, llvm::IntEQ, lhs_addr, rhs_addr, debug_loc);
|
|
|
|
|
let extra_op = ICmp(bcx, op, lhs_extra, rhs_extra, debug_loc);
|
|
|
|
|
let addr_eq_extra_op = And(bcx, addr_eq, extra_op, debug_loc);
|
|
|
|
|
|
|
|
|
|
let addr_strict = ICmp(bcx, strict_op, lhs_addr, rhs_addr, debug_loc);
|
|
|
|
|
Or(bcx, addr_strict, addr_eq_extra_op, debug_loc)
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("unexpected fat ptr binop");
|
2015-11-10 22:05:11 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-29 14:03:34 +02:00
|
|
|
|
pub fn compare_scalar_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
|
t: Ty<'tcx>,
|
2015-07-31 00:04:06 -07:00
|
|
|
|
op: hir::BinOp_,
|
2015-02-04 17:42:32 +01:00
|
|
|
|
debug_loc: DebugLoc)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
-> ValueRef {
|
2014-10-31 10:51:16 +02:00
|
|
|
|
match t.sty {
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyTuple(ref tys) if tys.is_empty() => {
|
2015-01-29 14:03:34 +02:00
|
|
|
|
// We don't need to do actual comparisons for nil.
|
|
|
|
|
// () == () holds but () < () does not.
|
|
|
|
|
match op {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::BiEq | hir::BiLe | hir::BiGe => return C_bool(bcx.ccx(), true),
|
|
|
|
|
hir::BiNe | hir::BiLt | hir::BiGt => return C_bool(bcx.ccx(), false),
|
2015-01-29 14:03:34 +02:00
|
|
|
|
// refinements would be nice
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("compare_scalar_types: must be a comparison operator"),
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
2011-10-27 22:01:30 -07:00
|
|
|
|
}
|
2015-06-13 13:15:03 -07:00
|
|
|
|
ty::TyFnDef(..) | ty::TyFnPtr(_) | ty::TyBool | ty::TyUint(_) | ty::TyChar => {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
ICmp(bcx,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bin_op_to_icmp_predicate(op, false),
|
2015-11-19 12:36:31 +01:00
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
|
|
|
|
debug_loc)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyRawPtr(mt) if common::type_is_sized(bcx.tcx(), mt.ty) => {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
ICmp(bcx,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bin_op_to_icmp_predicate(op, false),
|
2015-11-19 12:36:31 +01:00
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
|
|
|
|
debug_loc)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
2015-11-09 02:16:19 +02:00
|
|
|
|
ty::TyRawPtr(_) => {
|
|
|
|
|
let lhs_addr = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_ADDR]));
|
|
|
|
|
let lhs_extra = Load(bcx, GEPi(bcx, lhs, &[0, abi::FAT_PTR_EXTRA]));
|
|
|
|
|
|
|
|
|
|
let rhs_addr = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_ADDR]));
|
|
|
|
|
let rhs_extra = Load(bcx, GEPi(bcx, rhs, &[0, abi::FAT_PTR_EXTRA]));
|
2015-11-10 22:05:11 +02:00
|
|
|
|
compare_fat_ptrs(bcx,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
lhs_addr,
|
|
|
|
|
lhs_extra,
|
|
|
|
|
rhs_addr,
|
|
|
|
|
rhs_extra,
|
|
|
|
|
t,
|
|
|
|
|
op,
|
|
|
|
|
debug_loc)
|
2015-11-09 02:16:19 +02:00
|
|
|
|
}
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyInt(_) => {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
ICmp(bcx,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bin_op_to_icmp_predicate(op, true),
|
2015-11-19 12:36:31 +01:00
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
|
|
|
|
debug_loc)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyFloat(_) => {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
FCmp(bcx,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bin_op_to_fcmp_predicate(op),
|
2015-11-19 12:36:31 +01:00
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
|
|
|
|
debug_loc)
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
// Should never get here, because t is scalar.
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("non-scalar type passed to compare_scalar_types"),
|
2011-04-19 15:22:57 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-01-29 14:03:34 +02:00
|
|
|
|
pub fn compare_simd_types<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
|
|
|
|
t: Ty<'tcx>,
|
2015-07-16 11:59:23 -07:00
|
|
|
|
ret_ty: Type,
|
2015-07-31 00:04:06 -07:00
|
|
|
|
op: hir::BinOp_,
|
2015-01-29 14:03:34 +02:00
|
|
|
|
debug_loc: DebugLoc)
|
|
|
|
|
-> ValueRef {
|
|
|
|
|
let signed = match t.sty {
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyFloat(_) => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
let cmp = bin_op_to_fcmp_predicate(op);
|
2015-07-16 11:59:23 -07:00
|
|
|
|
return SExt(bcx, FCmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty);
|
2015-01-28 20:20:55 +11:00
|
|
|
|
},
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyUint(_) => false,
|
|
|
|
|
ty::TyInt(_) => true,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("compare_simd_types: invalid SIMD type"),
|
2015-01-28 20:20:55 +11:00
|
|
|
|
};
|
2015-01-29 14:03:34 +02:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
let cmp = bin_op_to_icmp_predicate(op, signed);
|
2015-01-28 20:20:55 +11:00
|
|
|
|
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
|
|
|
|
|
// to get the correctly sized type. This will compile to a single instruction
|
|
|
|
|
// once the IR is converted to assembly if the SIMD instruction is supported
|
|
|
|
|
// by the target architecture.
|
2015-07-16 11:59:23 -07:00
|
|
|
|
SExt(bcx, ICmp(bcx, cmp, lhs, rhs, debug_loc), ret_ty)
|
2014-05-02 11:04:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2011-08-30 13:10:10 +02:00
|
|
|
|
// Iterates through the elements of a structural type.
|
2015-01-04 17:23:01 -05:00
|
|
|
|
pub fn iter_structural_ty<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
av: ValueRef,
|
|
|
|
|
t: Ty<'tcx>,
|
|
|
|
|
mut f: F)
|
2015-11-19 12:36:31 +01:00
|
|
|
|
-> Block<'blk, 'tcx>
|
|
|
|
|
where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
|
2015-01-04 17:23:01 -05:00
|
|
|
|
{
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("iter_structural_ty");
|
2012-03-22 13:44:20 -07:00
|
|
|
|
|
2015-01-04 17:23:01 -05:00
|
|
|
|
fn iter_variant<'blk, 'tcx, F>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
repr: &adt::Repr<'tcx>,
|
2015-12-07 02:38:29 +13:00
|
|
|
|
av: adt::MaybeSizedValue,
|
2015-08-07 14:41:33 +03:00
|
|
|
|
variant: ty::VariantDef<'tcx>,
|
2015-01-29 14:03:34 +02:00
|
|
|
|
substs: &Substs<'tcx>,
|
2015-01-04 17:23:01 -05:00
|
|
|
|
f: &mut F)
|
2015-11-19 12:36:31 +01:00
|
|
|
|
-> Block<'blk, 'tcx>
|
|
|
|
|
where F: FnMut(Block<'blk, 'tcx>, ValueRef, Ty<'tcx>) -> Block<'blk, 'tcx>
|
2015-01-04 17:23:01 -05:00
|
|
|
|
{
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("iter_variant");
|
2013-02-24 18:41:39 -08:00
|
|
|
|
let tcx = cx.tcx();
|
2012-03-15 09:47:03 -04:00
|
|
|
|
let mut cx = cx;
|
2013-02-24 18:41:39 -08:00
|
|
|
|
|
2015-08-02 22:52:50 +03:00
|
|
|
|
for (i, field) in variant.fields.iter().enumerate() {
|
|
|
|
|
let arg = monomorphize::field_ty(tcx, substs, field);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
cx = f(cx,
|
2016-01-16 16:03:09 +01:00
|
|
|
|
adt::trans_field_ptr(cx, repr, av, Disr::from(variant.disr_val), i),
|
2015-11-19 12:36:31 +01:00
|
|
|
|
arg);
|
2011-07-01 12:36:49 +02:00
|
|
|
|
}
|
2012-08-01 17:30:05 -07:00
|
|
|
|
return cx;
|
2011-07-01 12:36:49 +02:00
|
|
|
|
}
|
2011-07-13 15:44:09 -07:00
|
|
|
|
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let value = if common::type_is_sized(cx.tcx(), t) {
|
|
|
|
|
adt::MaybeSizedValue::sized(av)
|
2014-08-06 11:59:40 +02:00
|
|
|
|
} else {
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let data = Load(cx, expr::get_dataptr(cx, av));
|
|
|
|
|
let info = Load(cx, expr::get_meta(cx, av));
|
|
|
|
|
adt::MaybeSizedValue::unsized_(data, info)
|
2014-08-06 11:59:40 +02:00
|
|
|
|
};
|
|
|
|
|
|
2012-03-15 09:47:03 -04:00
|
|
|
|
let mut cx = cx;
|
2014-10-31 10:51:16 +02:00
|
|
|
|
match t.sty {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
ty::TyStruct(..) => {
|
|
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
|
|
|
|
let VariantInfo { fields, discr } = VariantInfo::from_ty(cx.tcx(), t, None);
|
|
|
|
|
for (i, &Field(_, field_ty)) in fields.iter().enumerate() {
|
2016-02-09 21:24:11 +01:00
|
|
|
|
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr::from(discr), i);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
|
|
|
|
|
let val = if common::type_is_sized(cx.tcx(), field_ty) {
|
|
|
|
|
llfld_a
|
|
|
|
|
} else {
|
|
|
|
|
let scratch = datum::rvalue_scratch_datum(cx, field_ty, "__fat_ptr_iter");
|
|
|
|
|
Store(cx, llfld_a, expr::get_dataptr(cx, scratch.val));
|
2015-12-07 02:38:29 +13:00
|
|
|
|
Store(cx, value.meta, expr::get_meta(cx, scratch.val));
|
2015-11-19 12:36:31 +01:00
|
|
|
|
scratch.val
|
|
|
|
|
};
|
|
|
|
|
cx = f(cx, val, field_ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ty::TyClosure(_, ref substs) => {
|
|
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
|
|
|
|
for (i, upvar_ty) in substs.upvar_tys.iter().enumerate() {
|
2016-02-09 21:24:11 +01:00
|
|
|
|
let llupvar = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
cx = f(cx, llupvar, upvar_ty);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ty::TyArray(_, n) => {
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let (base, len) = tvec::get_fixed_base_and_len(cx, value.value, n);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let unit_ty = t.sequence_element_type(cx.tcx());
|
|
|
|
|
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
|
|
|
|
|
}
|
|
|
|
|
ty::TySlice(_) | ty::TyStr => {
|
|
|
|
|
let unit_ty = t.sequence_element_type(cx.tcx());
|
2015-12-07 02:38:29 +13:00
|
|
|
|
cx = tvec::iter_vec_raw(cx, value.value, unit_ty, value.meta, f);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
}
|
|
|
|
|
ty::TyTuple(ref args) => {
|
|
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
|
|
|
|
for (i, arg) in args.iter().enumerate() {
|
2016-02-09 21:24:11 +01:00
|
|
|
|
let llfld_a = adt::trans_field_ptr(cx, &repr, value, Disr(0), i);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
cx = f(cx, llfld_a, *arg);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
ty::TyEnum(en, substs) => {
|
|
|
|
|
let fcx = cx.fcx;
|
|
|
|
|
let ccx = fcx.ccx;
|
|
|
|
|
|
|
|
|
|
let repr = adt::represent_type(ccx, t);
|
|
|
|
|
let n_variants = en.variants.len();
|
|
|
|
|
|
|
|
|
|
// NB: we must hit the discriminant first so that structural
|
|
|
|
|
// comparison know not to proceed when the discriminants differ.
|
|
|
|
|
|
2016-02-09 21:24:11 +01:00
|
|
|
|
match adt::trans_switch(cx, &repr, av, false) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
(_match::Single, None) => {
|
|
|
|
|
if n_variants != 0 {
|
|
|
|
|
assert!(n_variants == 1);
|
2016-02-09 21:24:11 +01:00
|
|
|
|
cx = iter_variant(cx, &repr, adt::MaybeSizedValue::sized(av),
|
2015-12-07 02:38:29 +13:00
|
|
|
|
&en.variants[0], substs, &mut f);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
(_match::Switch, Some(lldiscrim_a)) => {
|
|
|
|
|
cx = f(cx, lldiscrim_a, cx.tcx().types.isize);
|
|
|
|
|
|
|
|
|
|
// Create a fall-through basic block for the "else" case of
|
|
|
|
|
// the switch instruction we're about to generate. Note that
|
|
|
|
|
// we do **not** use an Unreachable instruction here, even
|
|
|
|
|
// though most of the time this basic block will never be hit.
|
|
|
|
|
//
|
|
|
|
|
// When an enum is dropped it's contents are currently
|
|
|
|
|
// overwritten to DTOR_DONE, which means the discriminant
|
|
|
|
|
// could have changed value to something not within the actual
|
|
|
|
|
// range of the discriminant. Currently this function is only
|
|
|
|
|
// used for drop glue so in this case we just return quickly
|
|
|
|
|
// from the outer function, and any other use case will only
|
|
|
|
|
// call this for an already-valid enum in which case the `ret
|
|
|
|
|
// void` will never be hit.
|
|
|
|
|
let ret_void_cx = fcx.new_temp_block("enum-iter-ret-void");
|
|
|
|
|
RetVoid(ret_void_cx, DebugLoc::None);
|
|
|
|
|
let llswitch = Switch(cx, lldiscrim_a, ret_void_cx.llbb, n_variants);
|
|
|
|
|
let next_cx = fcx.new_temp_block("enum-iter-next");
|
|
|
|
|
|
|
|
|
|
for variant in &en.variants {
|
|
|
|
|
let variant_cx = fcx.new_temp_block(&format!("enum-iter-variant-{}",
|
|
|
|
|
&variant.disr_val
|
|
|
|
|
.to_string()));
|
2016-02-09 21:24:11 +01:00
|
|
|
|
let case_val = adt::trans_case(cx, &repr, Disr::from(variant.disr_val));
|
2015-11-19 12:36:31 +01:00
|
|
|
|
AddCase(llswitch, case_val, variant_cx.llbb);
|
|
|
|
|
let variant_cx = iter_variant(variant_cx,
|
2016-02-09 21:24:11 +01:00
|
|
|
|
&repr,
|
2015-12-07 02:38:29 +13:00
|
|
|
|
value,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
variant,
|
|
|
|
|
substs,
|
|
|
|
|
&mut f);
|
|
|
|
|
Br(variant_cx, next_cx.llbb, DebugLoc::None);
|
|
|
|
|
}
|
|
|
|
|
cx = next_cx;
|
|
|
|
|
}
|
|
|
|
|
_ => ccx.sess().unimpl("value from adt::trans_switch in iter_structural_ty"),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {
|
|
|
|
|
cx.sess().unimpl(&format!("type in iter_structural_ty: {}", t))
|
|
|
|
|
}
|
2010-11-09 17:49:20 -08:00
|
|
|
|
}
|
2012-08-01 17:30:05 -07:00
|
|
|
|
return cx;
|
2010-11-09 17:49:20 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
|
|
|
|
/// Retrieve the information we are losing (making dynamic) in an unsizing
|
|
|
|
|
/// adjustment.
|
|
|
|
|
///
|
|
|
|
|
/// The `old_info` argument is a bit funny. It is intended for use
|
|
|
|
|
/// in an upcast, where the new vtable for an object will be drived
|
|
|
|
|
/// from the old one.
|
|
|
|
|
pub fn unsized_info<'ccx, 'tcx>(ccx: &CrateContext<'ccx, 'tcx>,
|
|
|
|
|
source: Ty<'tcx>,
|
|
|
|
|
target: Ty<'tcx>,
|
2016-03-06 17:32:47 +02:00
|
|
|
|
old_info: Option<ValueRef>)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
-> ValueRef {
|
|
|
|
|
let (source, target) = ccx.tcx().struct_lockstep_tails(source, target);
|
|
|
|
|
match (&source.sty, &target.sty) {
|
|
|
|
|
(&ty::TyArray(_, len), &ty::TySlice(_)) => C_uint(ccx, len),
|
|
|
|
|
(&ty::TyTrait(_), &ty::TyTrait(_)) => {
|
|
|
|
|
// For now, upcasts are limited to changes in marker
|
|
|
|
|
// traits, and hence never actually require an actual
|
|
|
|
|
// change to the vtable.
|
|
|
|
|
old_info.expect("unsized_info: missing old info for trait upcast")
|
|
|
|
|
}
|
2016-08-04 15:52:57 +03:00
|
|
|
|
(_, &ty::TyTrait(ref data)) => {
|
|
|
|
|
let trait_ref = data.principal.with_self_ty(ccx.tcx(), source);
|
|
|
|
|
let trait_ref = ccx.tcx().erase_regions(&trait_ref);
|
2016-03-06 17:32:47 +02:00
|
|
|
|
consts::ptrcast(meth::get_vtable(ccx, trait_ref),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
Type::vtable_ptr(ccx))
|
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}",
|
2015-11-11 22:02:51 +02:00
|
|
|
|
source,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
target),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Coerce `src` to `dst_ty`. `src_ty` must be a thin pointer.
|
|
|
|
|
pub fn unsize_thin_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
src_ty: Ty<'tcx>,
|
|
|
|
|
dst_ty: Ty<'tcx>)
|
|
|
|
|
-> (ValueRef, ValueRef) {
|
|
|
|
|
debug!("unsize_thin_ptr: {:?} => {:?}", src_ty, dst_ty);
|
|
|
|
|
match (&src_ty.sty, &dst_ty.sty) {
|
|
|
|
|
(&ty::TyBox(a), &ty::TyBox(b)) |
|
|
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRef(_, ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
|
(&ty::TyRef(_, ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) |
|
|
|
|
|
(&ty::TyRawPtr(ty::TypeAndMut { ty: a, .. }),
|
|
|
|
|
&ty::TyRawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
|
|
|
|
assert!(common::type_is_sized(bcx.tcx(), a));
|
|
|
|
|
let ptr_ty = type_of::in_memory_type_of(bcx.ccx(), b).ptr_to();
|
|
|
|
|
(PointerCast(bcx, src, ptr_ty),
|
2016-03-06 17:32:47 +02:00
|
|
|
|
unsized_info(bcx.ccx(), a, b, None))
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unsize_thin_ptr: called on bad types"),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
/// Coerce `src`, which is a reference to a value of type `src_ty`,
|
|
|
|
|
/// to a value of type `dst_ty` and store the result in `dst`
|
|
|
|
|
pub fn coerce_unsized_into<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
src_ty: Ty<'tcx>,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
dst_ty: Ty<'tcx>) {
|
|
|
|
|
match (&src_ty.sty, &dst_ty.sty) {
|
|
|
|
|
(&ty::TyBox(..), &ty::TyBox(..)) |
|
|
|
|
|
(&ty::TyRef(..), &ty::TyRef(..)) |
|
|
|
|
|
(&ty::TyRef(..), &ty::TyRawPtr(..)) |
|
|
|
|
|
(&ty::TyRawPtr(..), &ty::TyRawPtr(..)) => {
|
|
|
|
|
let (base, info) = if common::type_is_fat_ptr(bcx.tcx(), src_ty) {
|
|
|
|
|
// fat-ptr to fat-ptr unsize preserves the vtable
|
2016-05-18 22:25:03 -04:00
|
|
|
|
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
|
|
|
|
|
// So we need to pointercast the base to ensure
|
|
|
|
|
// the types match up.
|
|
|
|
|
let (base, info) = load_fat_ptr(bcx, src, src_ty);
|
|
|
|
|
let llcast_ty = type_of::fat_ptr_base_ty(bcx.ccx(), dst_ty);
|
|
|
|
|
let base = PointerCast(bcx, base, llcast_ty);
|
|
|
|
|
(base, info)
|
2015-11-11 22:02:51 +02:00
|
|
|
|
} else {
|
|
|
|
|
let base = load_ty(bcx, src, src_ty);
|
|
|
|
|
unsize_thin_ptr(bcx, base, src_ty, dst_ty)
|
|
|
|
|
};
|
|
|
|
|
store_fat_ptr(bcx, base, info, dst, dst_ty);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// This can be extended to enums and tuples in the future.
|
|
|
|
|
// (&ty::TyEnum(def_id_a, _), &ty::TyEnum(def_id_b, _)) |
|
|
|
|
|
(&ty::TyStruct(def_a, _), &ty::TyStruct(def_b, _)) => {
|
|
|
|
|
assert_eq!(def_a, def_b);
|
|
|
|
|
|
|
|
|
|
let src_repr = adt::represent_type(bcx.ccx(), src_ty);
|
|
|
|
|
let src_fields = match &*src_repr {
|
|
|
|
|
&adt::Repr::Univariant(ref s, _) => &s.fields,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("struct has non-univariant repr"),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
};
|
|
|
|
|
let dst_repr = adt::represent_type(bcx.ccx(), dst_ty);
|
|
|
|
|
let dst_fields = match &*dst_repr {
|
|
|
|
|
&adt::Repr::Univariant(ref s, _) => &s.fields,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("struct has non-univariant repr"),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
};
|
|
|
|
|
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let src = adt::MaybeSizedValue::sized(src);
|
|
|
|
|
let dst = adt::MaybeSizedValue::sized(dst);
|
|
|
|
|
|
2015-11-11 22:02:51 +02:00
|
|
|
|
let iter = src_fields.iter().zip(dst_fields).enumerate();
|
|
|
|
|
for (i, (src_fty, dst_fty)) in iter {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if type_is_zero_size(bcx.ccx(), dst_fty) {
|
|
|
|
|
continue;
|
|
|
|
|
}
|
2015-11-11 22:02:51 +02:00
|
|
|
|
|
2016-01-16 16:03:09 +01:00
|
|
|
|
let src_f = adt::trans_field_ptr(bcx, &src_repr, src, Disr(0), i);
|
|
|
|
|
let dst_f = adt::trans_field_ptr(bcx, &dst_repr, dst, Disr(0), i);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
if src_fty == dst_fty {
|
|
|
|
|
memcpy_ty(bcx, dst_f, src_f, src_fty);
|
|
|
|
|
} else {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
coerce_unsized_into(bcx, src_f, src_fty, dst_f, dst_fty);
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("coerce_unsized_into: invalid coercion {:?} -> {:?}",
|
|
|
|
|
src_ty,
|
|
|
|
|
dst_ty),
|
2015-11-11 22:02:51 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 17:07:36 -04:00
|
|
|
|
pub fn custom_coerce_unsize_info<'scx, 'tcx>(scx: &SharedCrateContext<'scx, 'tcx>,
|
2015-11-02 14:46:39 +01:00
|
|
|
|
source_ty: Ty<'tcx>,
|
|
|
|
|
target_ty: Ty<'tcx>)
|
|
|
|
|
-> CustomCoerceUnsized {
|
|
|
|
|
let trait_ref = ty::Binder(ty::TraitRef {
|
2016-05-06 17:07:36 -04:00
|
|
|
|
def_id: scx.tcx().lang_items.coerce_unsized_trait().unwrap(),
|
2016-08-08 23:39:49 +03:00
|
|
|
|
substs: Substs::new_trait(scx.tcx(), vec![target_ty], vec![], source_ty)
|
2015-11-02 14:46:39 +01:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-06 17:07:36 -04:00
|
|
|
|
match fulfill_obligation(scx, DUMMY_SP, trait_ref) {
|
2015-11-02 14:46:39 +01:00
|
|
|
|
traits::VtableImpl(traits::VtableImplData { impl_def_id, .. }) => {
|
2016-05-06 17:07:36 -04:00
|
|
|
|
scx.tcx().custom_coerce_unsized_kind(impl_def_id)
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
vtable => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("invalid CoerceUnsized vtable: {:?}", vtable);
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn cast_shift_expr_rhs(cx: Block, op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
|
|
|
|
cast_shift_rhs(op, lhs, rhs, |a, b| Trunc(cx, a, b), |a, b| ZExt(cx, a, b))
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn cast_shift_const_rhs(op: hir::BinOp_, lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
|
|
|
|
cast_shift_rhs(op,
|
|
|
|
|
lhs,
|
|
|
|
|
rhs,
|
2013-06-16 22:52:44 +12:00
|
|
|
|
|a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
|
|
|
|
|
|a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-07-31 00:04:06 -07:00
|
|
|
|
fn cast_shift_rhs<F, G>(op: hir::BinOp_,
|
2015-03-19 19:52:08 +01:00
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
|
|
|
|
trunc: F,
|
|
|
|
|
zext: G)
|
2015-11-19 12:36:31 +01:00
|
|
|
|
-> ValueRef
|
|
|
|
|
where F: FnOnce(ValueRef, Type) -> ValueRef,
|
|
|
|
|
G: FnOnce(ValueRef, Type) -> ValueRef
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
2012-02-21 21:01:33 -08:00
|
|
|
|
// Shifts may have any size int on the rhs
|
2016-03-29 09:32:58 +03:00
|
|
|
|
if op.is_shift() {
|
2015-01-15 01:08:22 +11:00
|
|
|
|
let mut rhs_llty = val_ty(rhs);
|
|
|
|
|
let mut lhs_llty = val_ty(lhs);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if rhs_llty.kind() == Vector {
|
|
|
|
|
rhs_llty = rhs_llty.element_type()
|
|
|
|
|
}
|
|
|
|
|
if lhs_llty.kind() == Vector {
|
|
|
|
|
lhs_llty = lhs_llty.element_type()
|
|
|
|
|
}
|
2015-01-15 01:08:22 +11:00
|
|
|
|
let rhs_sz = rhs_llty.int_width();
|
|
|
|
|
let lhs_sz = lhs_llty.int_width();
|
|
|
|
|
if lhs_sz < rhs_sz {
|
|
|
|
|
trunc(rhs, lhs_llty)
|
|
|
|
|
} else if lhs_sz > rhs_sz {
|
|
|
|
|
// FIXME (#1877: If shifting by negative
|
|
|
|
|
// values becomes not undefined then this is wrong.
|
|
|
|
|
zext(rhs, lhs_llty)
|
2012-02-21 21:01:33 -08:00
|
|
|
|
} else {
|
|
|
|
|
rhs
|
|
|
|
|
}
|
2015-01-15 01:08:22 +11:00
|
|
|
|
} else {
|
|
|
|
|
rhs
|
2012-02-21 21:01:33 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-04-15 02:19:26 +02:00
|
|
|
|
pub fn llty_and_min_for_signed_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
val_t: Ty<'tcx>)
|
|
|
|
|
-> (Type, u64) {
|
2015-04-15 02:19:26 +02:00
|
|
|
|
match val_t.sty {
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyInt(t) => {
|
2015-04-15 02:19:26 +02:00
|
|
|
|
let llty = Type::int_from_ty(cx.ccx(), t);
|
|
|
|
|
let min = match t {
|
2016-02-08 16:20:57 +01:00
|
|
|
|
ast::IntTy::Is if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
|
|
|
|
|
ast::IntTy::Is => i64::MIN as u64,
|
|
|
|
|
ast::IntTy::I8 => i8::MIN as u64,
|
|
|
|
|
ast::IntTy::I16 => i16::MIN as u64,
|
|
|
|
|
ast::IntTy::I32 => i32::MIN as u64,
|
|
|
|
|
ast::IntTy::I64 => i64::MIN as u64,
|
2015-04-15 02:19:26 +02:00
|
|
|
|
};
|
|
|
|
|
(llty, min)
|
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!(),
|
2015-04-15 02:19:26 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn fail_if_zero_or_overflows<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
call_info: NodeIdAndSpan,
|
|
|
|
|
divrem: hir::BinOp,
|
|
|
|
|
lhs: ValueRef,
|
|
|
|
|
rhs: ValueRef,
|
|
|
|
|
rhs_t: Ty<'tcx>)
|
|
|
|
|
-> Block<'blk, 'tcx> {
|
2016-05-26 22:53:27 +03:00
|
|
|
|
use rustc_const_math::{ConstMathErr, Op};
|
|
|
|
|
|
|
|
|
|
let (zero_err, overflow_err) = if divrem.node == hir::BiDiv {
|
|
|
|
|
(ConstMathErr::DivisionByZero, ConstMathErr::Overflow(Op::Div))
|
2012-06-14 15:32:20 -07:00
|
|
|
|
} else {
|
2016-05-26 22:53:27 +03:00
|
|
|
|
(ConstMathErr::RemainderByZero, ConstMathErr::Overflow(Op::Rem))
|
2012-06-14 15:32:20 -07:00
|
|
|
|
};
|
2015-02-04 17:42:32 +01:00
|
|
|
|
let debug_loc = call_info.debug_loc();
|
|
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
|
let (is_zero, is_signed) = match rhs_t.sty {
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyInt(t) => {
|
2015-03-03 10:42:26 +02:00
|
|
|
|
let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0, false);
|
2015-02-04 17:42:32 +01:00
|
|
|
|
(ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), true)
|
2014-06-05 12:23:34 -07:00
|
|
|
|
}
|
2015-06-11 16:21:46 -07:00
|
|
|
|
ty::TyUint(t) => {
|
2015-03-03 10:42:26 +02:00
|
|
|
|
let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0, false);
|
2015-02-04 17:42:32 +01:00
|
|
|
|
(ICmp(cx, llvm::IntEQ, rhs, zero, debug_loc), false)
|
2014-06-05 12:23:34 -07:00
|
|
|
|
}
|
2015-08-06 18:25:15 +03:00
|
|
|
|
ty::TyStruct(def, _) if def.is_simd() => {
|
2015-03-25 16:10:49 +01:00
|
|
|
|
let mut res = C_bool(cx.ccx(), false);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
for i in 0..rhs_t.simd_size(cx.tcx()) {
|
|
|
|
|
res = Or(cx,
|
|
|
|
|
res,
|
|
|
|
|
IsNull(cx, ExtractElement(cx, rhs, C_int(cx.ccx(), i as i64))),
|
|
|
|
|
debug_loc);
|
2015-03-25 16:10:49 +01:00
|
|
|
|
}
|
|
|
|
|
(res, false)
|
|
|
|
|
}
|
2014-06-05 12:23:34 -07:00
|
|
|
|
_ => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("fail-if-zero on unexpected type: {}", rhs_t);
|
2014-06-05 12:23:34 -07:00
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
|
};
|
2014-06-05 12:23:34 -07:00
|
|
|
|
let bcx = with_cond(cx, is_zero, |bcx| {
|
2016-05-26 22:53:27 +03:00
|
|
|
|
controlflow::trans_fail(bcx, call_info, InternedString::new(zero_err.description()))
|
2014-06-05 12:23:34 -07:00
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
// To quote LLVM's documentation for the sdiv instruction:
|
|
|
|
|
//
|
|
|
|
|
// Division by zero leads to undefined behavior. Overflow also leads
|
|
|
|
|
// to undefined behavior; this is a rare case, but can occur, for
|
|
|
|
|
// example, by doing a 32-bit division of -2147483648 by -1.
|
|
|
|
|
//
|
|
|
|
|
// In order to avoid undefined behavior, we perform runtime checks for
|
|
|
|
|
// signed division/remainder which would trigger overflow. For unsigned
|
|
|
|
|
// integers, no action beyond checking for zero need be taken.
|
|
|
|
|
if is_signed {
|
2015-04-15 02:19:26 +02:00
|
|
|
|
let (llty, min) = llty_and_min_for_signed_ty(cx, rhs_t);
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let minus_one = ICmp(bcx,
|
|
|
|
|
llvm::IntEQ,
|
|
|
|
|
rhs,
|
|
|
|
|
C_integral(llty, !0, false),
|
|
|
|
|
debug_loc);
|
2014-06-05 12:23:34 -07:00
|
|
|
|
with_cond(bcx, minus_one, |bcx| {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let is_min = ICmp(bcx,
|
|
|
|
|
llvm::IntEQ,
|
|
|
|
|
lhs,
|
|
|
|
|
C_integral(llty, min, true),
|
|
|
|
|
debug_loc);
|
2014-06-05 12:23:34 -07:00
|
|
|
|
with_cond(bcx, is_min, |bcx| {
|
2016-05-26 22:53:27 +03:00
|
|
|
|
controlflow::trans_fail(bcx, call_info,
|
|
|
|
|
InternedString::new(overflow_err.description()))
|
2014-06-05 12:23:34 -07:00
|
|
|
|
})
|
|
|
|
|
})
|
|
|
|
|
} else {
|
|
|
|
|
bcx
|
|
|
|
|
}
|
2012-04-13 19:07:47 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
llfn: ValueRef,
|
2014-11-25 07:54:24 -05:00
|
|
|
|
llargs: &[ValueRef],
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc: DebugLoc)
|
2014-09-06 19:13:04 +03:00
|
|
|
|
-> (ValueRef, Block<'blk, 'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("invoke_");
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if bcx.unreachable.get() {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
return (C_null(Type::i8(bcx.ccx())), bcx);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2013-03-08 21:16:09 -08:00
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
|
match bcx.opt_node_id {
|
|
|
|
|
None => {
|
|
|
|
|
debug!("invoke at ???");
|
|
|
|
|
}
|
|
|
|
|
Some(id) => {
|
2014-06-21 03:39:03 -07:00
|
|
|
|
debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
|
2013-03-08 21:16:09 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-18 11:21:10 -08:00
|
|
|
|
if need_invoke(bcx) {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("invoking {:?} at {:?}", Value(llfn), bcx.llbb);
|
2015-01-31 12:20:46 -05:00
|
|
|
|
for &llarg in llargs {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("arg: {:?}", Value(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
let normal_bcx = bcx.fcx.new_temp_block("normal-return");
|
|
|
|
|
let landing_pad = bcx.fcx.get_landing_pad();
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let llresult = Invoke(bcx,
|
|
|
|
|
llfn,
|
2015-02-18 14:48:57 -05:00
|
|
|
|
&llargs[..],
|
2013-04-18 15:53:29 -07:00
|
|
|
|
normal_bcx.llbb,
|
2013-12-13 17:46:10 +01:00
|
|
|
|
landing_pad,
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
return (llresult, normal_bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
|
} else {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("calling {:?} at {:?}", Value(llfn), bcx.llbb);
|
2015-01-31 12:20:46 -05:00
|
|
|
|
for &llarg in llargs {
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("arg: {:?}", Value(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
|
}
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
2016-02-26 01:10:40 +02:00
|
|
|
|
let llresult = Call(bcx, llfn, &llargs[..], debug_loc);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
return (llresult, bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
|
}
|
2011-09-07 11:46:53 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-08-11 11:48:43 -07:00
|
|
|
|
/// Returns whether this session's target will use SEH-based unwinding.
|
|
|
|
|
///
|
|
|
|
|
/// This is only true for MSVC targets, and even then the 64-bit MSVC target
|
|
|
|
|
/// currently uses SEH-ish unwinding with DWARF info tables to the side (same as
|
|
|
|
|
/// 64-bit MinGW) instead of "full SEH".
|
|
|
|
|
pub fn wants_msvc_seh(sess: &Session) -> bool {
|
2015-10-23 18:18:44 -07:00
|
|
|
|
sess.target.target.options.is_like_msvc
|
2015-08-11 11:48:43 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-12-20 15:30:09 +02:00
|
|
|
|
pub fn avoid_invoke(bcx: Block) -> bool {
|
2016-02-08 11:53:06 +01:00
|
|
|
|
bcx.sess().no_landing_pads() || bcx.lpad().is_some()
|
2015-12-20 15:30:09 +02:00
|
|
|
|
}
|
2012-06-29 12:31:23 -07:00
|
|
|
|
|
2015-12-20 15:30:09 +02:00
|
|
|
|
pub fn need_invoke(bcx: Block) -> bool {
|
|
|
|
|
if avoid_invoke(bcx) {
|
|
|
|
|
false
|
|
|
|
|
} else {
|
|
|
|
|
bcx.fcx.needs_invoke()
|
2012-07-23 16:00:19 -07:00
|
|
|
|
}
|
2012-03-26 13:30:56 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("load_if_immediate");
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if type_is_immediate(cx.ccx(), t) {
|
|
|
|
|
return load_ty(cx, v, t);
|
|
|
|
|
}
|
2012-08-01 17:30:05 -07:00
|
|
|
|
return v;
|
2010-12-03 13:03:07 -08:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
|
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
|
|
|
|
|
/// differs from the type used for SSA values. Also handles various special cases where the type
|
|
|
|
|
/// gives us better information about what we are loading.
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if cx.unreachable.get() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
return C_undef(type_of::type_of(cx.ccx(), t));
|
|
|
|
|
}
|
2016-03-09 14:20:22 +02:00
|
|
|
|
load_ty_builder(&B(cx), ptr, t)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn load_ty_builder<'a, 'tcx>(b: &Builder<'a, 'tcx>, ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
|
|
|
|
let ccx = b.ccx;
|
|
|
|
|
if type_is_zero_size(ccx, t) {
|
|
|
|
|
return C_undef(type_of::type_of(ccx, t));
|
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
|
let global = llvm::LLVMIsAGlobalVariable(ptr);
|
|
|
|
|
if !global.is_null() && llvm::LLVMIsGlobalConstant(global) == llvm::True {
|
|
|
|
|
let val = llvm::LLVMGetInitializer(global);
|
|
|
|
|
if !val.is_null() {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if t.is_bool() {
|
|
|
|
|
return llvm::LLVMConstTrunc(val, Type::i1(ccx).to_ref());
|
|
|
|
|
}
|
|
|
|
|
return val;
|
2015-01-29 14:03:34 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
|
2016-03-09 14:20:22 +02:00
|
|
|
|
if t.is_bool() {
|
|
|
|
|
b.trunc(b.load_range_assert(ptr, 0, 2, llvm::False), Type::i1(ccx))
|
2015-06-24 08:24:13 +03:00
|
|
|
|
} else if t.is_char() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
// a char is a Unicode codepoint, and so takes values from 0
|
|
|
|
|
// to 0x10FFFF inclusive only.
|
2016-03-09 14:20:22 +02:00
|
|
|
|
b.load_range_assert(ptr, 0, 0x10FFFF + 1, llvm::False)
|
|
|
|
|
} else if (t.is_region_ptr() || t.is_unique()) &&
|
|
|
|
|
!common::type_is_fat_ptr(ccx.tcx(), t) {
|
|
|
|
|
b.load_nonnull(ptr)
|
2015-03-21 00:21:38 +01:00
|
|
|
|
} else {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
b.load(ptr)
|
|
|
|
|
}
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
|
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
|
|
|
|
|
/// differs from the type used for SSA values.
|
2015-01-08 16:28:07 +01:00
|
|
|
|
pub fn store_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>, v: ValueRef, dst: ValueRef, t: Ty<'tcx>) {
|
2015-04-15 20:14:54 +02:00
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-18 19:49:45 +02:00
|
|
|
|
debug!("store_ty: {:?} : {:?} <- {:?}", Value(dst), t, Value(v));
|
2015-11-09 02:16:19 +02:00
|
|
|
|
|
2015-06-26 16:40:51 +02:00
|
|
|
|
if common::type_is_fat_ptr(cx.tcx(), t) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
Store(cx,
|
|
|
|
|
ExtractValue(cx, v, abi::FAT_PTR_ADDR),
|
|
|
|
|
expr::get_dataptr(cx, dst));
|
|
|
|
|
Store(cx,
|
|
|
|
|
ExtractValue(cx, v, abi::FAT_PTR_EXTRA),
|
|
|
|
|
expr::get_meta(cx, dst));
|
2015-06-26 16:40:51 +02:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Store(cx, from_immediate(cx, v), dst);
|
2015-04-15 20:14:54 +02:00
|
|
|
|
}
|
2015-03-21 00:21:38 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-10 22:05:11 +02:00
|
|
|
|
pub fn store_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
data: ValueRef,
|
|
|
|
|
extra: ValueRef,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
_ty: Ty<'tcx>) {
|
|
|
|
|
// FIXME: emit metadata
|
|
|
|
|
Store(cx, data, expr::get_dataptr(cx, dst));
|
|
|
|
|
Store(cx, extra, expr::get_meta(cx, dst));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn load_fat_ptr<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
|
src: ValueRef,
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_ty: Ty<'tcx>)
|
|
|
|
|
-> (ValueRef, ValueRef) {
|
2015-11-10 22:05:11 +02:00
|
|
|
|
// FIXME: emit metadata
|
|
|
|
|
(Load(cx, expr::get_dataptr(cx, src)),
|
|
|
|
|
Load(cx, expr::get_meta(cx, src)))
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
pub fn from_immediate(bcx: Block, val: ValueRef) -> ValueRef {
|
|
|
|
|
if val_ty(val) == Type::i1(bcx.ccx()) {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
ZExt(bcx, val, Type::i8(bcx.ccx()))
|
|
|
|
|
} else {
|
|
|
|
|
val
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
pub fn to_immediate(bcx: Block, val: ValueRef, ty: Ty) -> ValueRef {
|
2015-06-24 08:24:13 +03:00
|
|
|
|
if ty.is_bool() {
|
2015-03-21 00:21:38 +01:00
|
|
|
|
Trunc(bcx, val, Type::i1(bcx.ccx()))
|
|
|
|
|
} else {
|
|
|
|
|
val
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &hir::Local) -> Block<'blk, 'tcx> {
|
2014-10-15 02:25:34 -04:00
|
|
|
|
debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let _indenter = indenter();
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("init_local");
|
2014-01-15 14:39:08 -05:00
|
|
|
|
_match::store_local(bcx, local)
|
2010-10-04 15:55:12 -07:00
|
|
|
|
}
|
2010-09-29 17:22:07 -07:00
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
|
|
|
|
|
llbb: BasicBlockRef)
|
|
|
|
|
-> Block<'blk, 'tcx> {
|
2015-10-23 18:18:44 -07:00
|
|
|
|
common::BlockS::new(llbb, None, fcx)
|
2012-02-17 13:17:40 +01:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, val: ValueRef, f: F) -> Block<'blk, 'tcx>
|
|
|
|
|
where F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>
|
2014-12-09 13:44:51 -05:00
|
|
|
|
{
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("with_cond");
|
2014-09-04 13:36:39 +02:00
|
|
|
|
|
2015-05-19 17:38:55 +03:00
|
|
|
|
if bcx.unreachable.get() || common::const_to_opt_uint(val) == Some(0) {
|
2014-09-04 13:36:39 +02:00
|
|
|
|
return bcx;
|
|
|
|
|
}
|
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
|
let fcx = bcx.fcx;
|
|
|
|
|
let next_cx = fcx.new_temp_block("next");
|
|
|
|
|
let cond_cx = fcx.new_temp_block("cond");
|
2014-12-11 13:53:30 +01:00
|
|
|
|
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb, DebugLoc::None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let after_cx = f(cond_cx);
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if !after_cx.terminated.get() {
|
2014-12-11 13:53:30 +01:00
|
|
|
|
Br(after_cx, next_cx.llbb, DebugLoc::None);
|
2013-12-18 14:54:42 -08:00
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
|
next_cx
|
|
|
|
|
}
|
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
pub enum Lifetime { Start, End }
|
2016-01-12 17:17:50 +01:00
|
|
|
|
|
|
|
|
|
// If LLVM lifetime intrinsic support is enabled (i.e. optimizations
|
|
|
|
|
// on), and `ptr` is nonzero-sized, then extracts the size of `ptr`
|
|
|
|
|
// and the intrinsic for `lt` and passes them to `emit`, which is in
|
|
|
|
|
// charge of generating code to call the passed intrinsic on whatever
|
|
|
|
|
// block of generated code is targetted for the intrinsic.
|
|
|
|
|
//
|
|
|
|
|
// If LLVM lifetime intrinsic support is disabled (i.e. optimizations
|
|
|
|
|
// off) or `ptr` is zero-sized, then no-op (does not call `emit`).
|
|
|
|
|
fn core_lifetime_emit<'blk, 'tcx, F>(ccx: &'blk CrateContext<'blk, 'tcx>,
|
|
|
|
|
ptr: ValueRef,
|
|
|
|
|
lt: Lifetime,
|
|
|
|
|
emit: F)
|
|
|
|
|
where F: FnOnce(&'blk CrateContext<'blk, 'tcx>, machine::llsize, ValueRef)
|
|
|
|
|
{
|
2015-12-31 16:50:06 +13:00
|
|
|
|
if ccx.sess().opts.optimize == config::OptLevel::No {
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-12 17:17:50 +01:00
|
|
|
|
let _icx = push_ctxt(match lt {
|
|
|
|
|
Lifetime::Start => "lifetime_start",
|
|
|
|
|
Lifetime::End => "lifetime_end"
|
|
|
|
|
});
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
|
2015-08-23 14:38:42 +02:00
|
|
|
|
let size = machine::llsize_of_alloc(ccx, val_ty(ptr).element_type());
|
|
|
|
|
if size == 0 {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-12 17:17:50 +01:00
|
|
|
|
let lifetime_intrinsic = ccx.get_intrinsic(match lt {
|
|
|
|
|
Lifetime::Start => "llvm.lifetime.start",
|
|
|
|
|
Lifetime::End => "llvm.lifetime.end"
|
|
|
|
|
});
|
|
|
|
|
emit(ccx, size, lifetime_intrinsic)
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
impl Lifetime {
|
|
|
|
|
pub fn call(self, b: &Builder, ptr: ValueRef) {
|
|
|
|
|
core_lifetime_emit(b.ccx, ptr, self, |ccx, size, lifetime_intrinsic| {
|
|
|
|
|
let ptr = b.pointercast(ptr, Type::i8p(ccx));
|
|
|
|
|
b.call(lifetime_intrinsic, &[C_u64(ccx, size), ptr], None);
|
|
|
|
|
});
|
|
|
|
|
}
|
2016-01-12 17:17:50 +01:00
|
|
|
|
}
|
2015-08-23 14:38:42 +02:00
|
|
|
|
|
2016-06-08 00:35:01 +03:00
|
|
|
|
pub fn call_lifetime_start(bcx: Block, ptr: ValueRef) {
|
|
|
|
|
if !bcx.unreachable.get() {
|
|
|
|
|
Lifetime::Start.call(&bcx.build(), ptr);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn call_lifetime_end(bcx: Block, ptr: ValueRef) {
|
|
|
|
|
if !bcx.unreachable.get() {
|
|
|
|
|
Lifetime::End.call(&bcx.build(), ptr);
|
|
|
|
|
}
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
|
}
|
|
|
|
|
|
2015-10-18 14:17:34 -07:00
|
|
|
|
// Generates code for resumption of unwind at the end of a landing pad.
|
|
|
|
|
pub fn trans_unwind_resume(bcx: Block, lpval: ValueRef) {
|
|
|
|
|
if !bcx.sess().target.target.options.custom_unwind_resume {
|
|
|
|
|
Resume(bcx, lpval);
|
|
|
|
|
} else {
|
|
|
|
|
let exc_ptr = ExtractValue(bcx, lpval, 0);
|
2016-02-23 21:21:50 +02:00
|
|
|
|
bcx.fcx.eh_unwind_resume()
|
|
|
|
|
.call(bcx, DebugLoc::None, ArgVals(&[exc_ptr]), None);
|
2015-10-18 14:17:34 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-03-08 14:29:46 +02:00
|
|
|
|
pub fn call_memcpy<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
|
|
|
|
|
dst: ValueRef,
|
|
|
|
|
src: ValueRef,
|
|
|
|
|
n_bytes: ValueRef,
|
|
|
|
|
align: u32) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("call_memcpy");
|
2016-03-08 14:29:46 +02:00
|
|
|
|
let ccx = b.ccx;
|
2015-08-15 18:43:39 +12:00
|
|
|
|
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
|
|
|
|
|
let key = format!("llvm.memcpy.p0i8.p0i8.i{}", ptr_width);
|
2014-04-09 19:56:31 -04:00
|
|
|
|
let memcpy = ccx.get_intrinsic(&key);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
let src_ptr = b.pointercast(src, Type::i8p(ccx));
|
|
|
|
|
let dst_ptr = b.pointercast(dst, Type::i8p(ccx));
|
|
|
|
|
let size = b.intcast(n_bytes, ccx.int_type());
|
2014-03-15 22:29:34 +02:00
|
|
|
|
let align = C_i32(ccx, align as i32);
|
2014-07-05 21:43:47 +02:00
|
|
|
|
let volatile = C_bool(ccx, false);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
b.call(memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, dst: ValueRef, src: ValueRef, t: Ty<'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("memcpy_ty");
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let ccx = bcx.ccx();
|
2015-08-22 17:07:37 +02:00
|
|
|
|
|
2016-03-08 14:29:46 +02:00
|
|
|
|
if type_is_zero_size(ccx, t) || bcx.unreachable.get() {
|
2015-08-22 17:07:37 +02:00
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2015-06-24 08:24:13 +03:00
|
|
|
|
if t.is_structural() {
|
2013-05-25 09:52:25 -04:00
|
|
|
|
let llty = type_of::type_of(ccx, t);
|
|
|
|
|
let llsz = llsize_of(ccx, llty);
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let llalign = type_of::align_of(ccx, t);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
call_memcpy(&B(bcx), dst, src, llsz, llalign as u32);
|
2015-11-20 21:42:13 +01:00
|
|
|
|
} else if common::type_is_fat_ptr(bcx.tcx(), t) {
|
|
|
|
|
let (data, extra) = load_fat_ptr(bcx, src, t);
|
|
|
|
|
store_fat_ptr(bcx, data, extra, dst, t);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
} else {
|
2015-01-29 14:03:34 +02:00
|
|
|
|
store_ty(bcx, load_ty(bcx, src, t), dst, t);
|
2011-07-27 14:19:39 +02:00
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-02-10 10:04:39 +01:00
|
|
|
|
pub fn drop_done_fill_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2015-02-10 10:04:39 +01:00
|
|
|
|
let _icx = push_ctxt("drop_done_fill_mem");
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let bcx = cx;
|
2015-02-10 10:04:39 +01:00
|
|
|
|
memfill(&B(bcx), llptr, t, adt::DTOR_DONE);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2015-02-10 10:04:39 +01:00
|
|
|
|
pub fn init_zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
2015-02-10 10:04:39 +01:00
|
|
|
|
let _icx = push_ctxt("init_zero_mem");
|
|
|
|
|
let bcx = cx;
|
|
|
|
|
memfill(&B(bcx), llptr, t, 0);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Always use this function instead of storing a constant byte to the memory
|
|
|
|
|
// in question. e.g. if you store a zero constant, LLVM will drown in vreg
|
2012-08-28 15:54:45 -07:00
|
|
|
|
// allocation for large data structures, and the generated code will be
|
|
|
|
|
// awful. (A telltale sign of this is large quantities of
|
|
|
|
|
// `mov [byte ptr foo],0` in the generated code.)
|
2015-02-10 10:04:39 +01:00
|
|
|
|
fn memfill<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>, byte: u8) {
|
|
|
|
|
let _icx = push_ctxt("memfill");
|
2013-07-21 16:19:34 +02:00
|
|
|
|
let ccx = b.ccx;
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let llty = type_of::type_of(ccx, ty);
|
2014-03-15 22:29:34 +02:00
|
|
|
|
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
|
2015-08-05 09:46:59 +02:00
|
|
|
|
let llzeroval = C_u8(ccx, byte);
|
2014-08-06 11:59:40 +02:00
|
|
|
|
let size = machine::llsize_of(ccx, llty);
|
|
|
|
|
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
|
2016-02-04 19:40:28 +02:00
|
|
|
|
call_memset(b, llptr, llzeroval, size, align, false);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-02-04 19:40:28 +02:00
|
|
|
|
pub fn call_memset<'bcx, 'tcx>(b: &Builder<'bcx, 'tcx>,
|
|
|
|
|
ptr: ValueRef,
|
|
|
|
|
fill_byte: ValueRef,
|
|
|
|
|
size: ValueRef,
|
|
|
|
|
align: ValueRef,
|
|
|
|
|
volatile: bool) {
|
|
|
|
|
let ccx = b.ccx;
|
|
|
|
|
let ptr_width = &ccx.sess().target.target.target_pointer_width[..];
|
|
|
|
|
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
|
|
|
|
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
|
|
|
|
|
let volatile = C_bool(ccx, volatile);
|
2016-02-26 01:10:40 +02:00
|
|
|
|
b.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
|
2016-02-04 19:40:28 +02:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2016-01-08 20:40:52 +01:00
|
|
|
|
/// In general, when we create an scratch value in an alloca, the
|
|
|
|
|
/// creator may not know if the block (that initializes the scratch
|
|
|
|
|
/// with the desired value) actually dominates the cleanup associated
|
|
|
|
|
/// with the scratch value.
|
|
|
|
|
///
|
|
|
|
|
/// To deal with this, when we do an alloca (at the *start* of whole
|
|
|
|
|
/// function body), we optionally can also set the associated
|
|
|
|
|
/// dropped-flag state of the alloca to "dropped."
|
|
|
|
|
#[derive(Copy, Clone, Debug)]
|
|
|
|
|
pub enum InitAlloca {
|
|
|
|
|
/// Indicates that the state should have its associated drop flag
|
|
|
|
|
/// set to "dropped" at the point of allocation.
|
|
|
|
|
Dropped,
|
|
|
|
|
/// Indicates the value of the associated drop flag is irrelevant.
|
|
|
|
|
/// The embedded string literal is a programmer provided argument
|
|
|
|
|
/// for why. This is a safeguard forcing compiler devs to
|
|
|
|
|
/// document; it might be a good idea to also emit this as a
|
|
|
|
|
/// comment with the alloca itself when emitting LLVM output.ll.
|
|
|
|
|
Uninit(&'static str),
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
t: Ty<'tcx>,
|
|
|
|
|
name: &str) -> ValueRef {
|
|
|
|
|
// pnkfelix: I do not know why alloc_ty meets the assumptions for
|
|
|
|
|
// passing Uninit, but it was never needed (even back when we had
|
|
|
|
|
// the original boolean `zero` flag on `lvalue_scratch_datum`).
|
|
|
|
|
alloc_ty_init(bcx, t, InitAlloca::Uninit("all alloc_ty are uninit"), name)
|
|
|
|
|
}
|
|
|
|
|
|
2016-01-12 17:24:00 +01:00
|
|
|
|
/// This variant of `fn alloc_ty` does not necessarily assume that the
|
|
|
|
|
/// alloca should be created with no initial value. Instead the caller
|
|
|
|
|
/// controls that assumption via the `init` flag.
|
|
|
|
|
///
|
|
|
|
|
/// Note that if the alloca *is* initialized via `init`, then we will
|
|
|
|
|
/// also inject an `llvm.lifetime.start` before that initialization
|
|
|
|
|
/// occurs, and thus callers should not call_lifetime_start
|
|
|
|
|
/// themselves. But if `init` says "uninitialized", then callers are
|
|
|
|
|
/// in charge of choosing where to call_lifetime_start and
|
|
|
|
|
/// subsequently populate the alloca.
|
|
|
|
|
///
|
|
|
|
|
/// (See related discussion on PR #30823.)
|
2016-01-08 20:40:52 +01:00
|
|
|
|
pub fn alloc_ty_init<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
|
t: Ty<'tcx>,
|
|
|
|
|
init: InitAlloca,
|
|
|
|
|
name: &str) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("alloc_ty");
|
2012-08-28 15:54:45 -07:00
|
|
|
|
let ccx = bcx.ccx();
|
2013-06-16 02:29:52 +12:00
|
|
|
|
let ty = type_of::type_of(ccx, t);
|
2015-06-24 02:54:32 +03:00
|
|
|
|
assert!(!t.has_param_types());
|
2016-01-08 20:40:52 +01:00
|
|
|
|
match init {
|
|
|
|
|
InitAlloca::Dropped => alloca_dropped(bcx, t, name),
|
|
|
|
|
InitAlloca::Uninit(_) => alloca(bcx, ty, name),
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
pub fn alloca_dropped<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>, name: &str) -> ValueRef {
|
|
|
|
|
let _icx = push_ctxt("alloca_dropped");
|
|
|
|
|
let llty = type_of::type_of(cx.ccx(), ty);
|
|
|
|
|
if cx.unreachable.get() {
|
|
|
|
|
unsafe { return llvm::LLVMGetUndef(llty.ptr_to().to_ref()); }
|
|
|
|
|
}
|
|
|
|
|
let p = alloca(cx, llty, name);
|
|
|
|
|
let b = cx.fcx.ccx.builder();
|
|
|
|
|
b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
|
2016-01-12 17:21:11 +01:00
|
|
|
|
|
|
|
|
|
// This is just like `call_lifetime_start` (but latter expects a
|
|
|
|
|
// Block, which we do not have for `alloca_insert_pt`).
|
|
|
|
|
core_lifetime_emit(cx.ccx(), p, Lifetime::Start, |ccx, size, lifetime_start| {
|
|
|
|
|
let ptr = b.pointercast(p, Type::i8p(ccx));
|
2016-02-26 01:10:40 +02:00
|
|
|
|
b.call(lifetime_start, &[C_u64(ccx, size), ptr], None);
|
2016-01-12 17:21:11 +01:00
|
|
|
|
});
|
2016-01-08 20:40:52 +01:00
|
|
|
|
memfill(&b, p, ty, adt::DTOR_DONE);
|
|
|
|
|
p
|
2012-08-28 15:54:45 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("alloca");
|
2013-12-18 14:54:42 -08:00
|
|
|
|
if cx.unreachable.get() {
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
2013-07-17 03:13:23 -05:00
|
|
|
|
return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
|
2013-01-10 21:23:07 -08:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-04-07 22:35:11 +03:00
|
|
|
|
DebugLoc::None.apply(cx.fcx);
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
|
Alloca(cx, ty, name)
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-02 18:18:22 +02:00
|
|
|
|
pub fn set_value_name(val: ValueRef, name: &str) {
|
|
|
|
|
unsafe {
|
|
|
|
|
let name = CString::new(name).unwrap();
|
|
|
|
|
llvm::LLVMSetValueName(val, name.as_ptr());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-16 12:21:08 +13:00
|
|
|
|
struct FindNestedReturn {
|
2014-09-12 13:10:30 +03:00
|
|
|
|
found: bool,
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
|
|
2014-12-16 12:21:08 +13:00
|
|
|
|
impl FindNestedReturn {
|
|
|
|
|
fn new() -> FindNestedReturn {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
FindNestedReturn {
|
|
|
|
|
found: false,
|
|
|
|
|
}
|
2014-09-12 13:10:30 +03:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-12-16 12:21:08 +13:00
|
|
|
|
impl<'v> Visitor<'v> for FindNestedReturn {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
fn visit_expr(&mut self, e: &hir::Expr) {
|
2014-08-11 19:16:00 -07:00
|
|
|
|
match e.node {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::ExprRet(..) => {
|
2014-12-16 12:21:08 +13:00
|
|
|
|
self.found = true;
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_ => intravisit::walk_expr(self, e),
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-03 05:23:22 +03:00
|
|
|
|
fn build_cfg<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
|
|
|
|
id: ast::NodeId)
|
|
|
|
|
-> (ast::NodeId, Option<cfg::CFG>) {
|
2014-12-16 12:21:08 +13:00
|
|
|
|
let blk = match tcx.map.find(id) {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeItem(i)) => {
|
2014-08-11 19:16:00 -07:00
|
|
|
|
match i.node {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::ItemFn(_, _, _, _, _, ref blk) => {
|
2015-03-11 23:38:58 +02:00
|
|
|
|
blk
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unexpected item variant in has_nested_returns"),
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeTraitItem(trait_item)) => {
|
2015-03-10 12:28:44 +02:00
|
|
|
|
match trait_item.node {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir::MethodTraitItem(_, Some(ref body)) => body,
|
2015-03-14 12:05:00 -06:00
|
|
|
|
_ => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("unexpected variant: trait item other than a provided method in \
|
|
|
|
|
has_nested_returns")
|
2014-08-05 19:44:21 -07:00
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeImplItem(impl_item)) => {
|
2015-03-10 12:28:44 +02:00
|
|
|
|
match impl_item.node {
|
2015-11-12 15:57:51 +01:00
|
|
|
|
hir::ImplItemKind::Method(_, ref body) => body,
|
2015-03-14 12:05:00 -06:00
|
|
|
|
_ => {
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!("unexpected variant: non-method impl item in has_nested_returns")
|
2015-03-11 23:38:58 +02:00
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeExpr(e)) => {
|
2014-08-11 19:16:00 -07:00
|
|
|
|
match e.node {
|
2016-04-20 14:44:07 -04:00
|
|
|
|
hir::ExprClosure(_, _, ref blk, _) => blk,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unexpected expr variant in has_nested_returns"),
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeVariant(..)) |
|
|
|
|
|
Some(hir_map::NodeStructCtor(..)) => return (ast::DUMMY_NODE_ID, None),
|
2014-08-11 19:16:00 -07:00
|
|
|
|
|
|
|
|
|
// glue, shims, etc
|
2014-12-16 12:21:08 +13:00
|
|
|
|
None if id == ast::DUMMY_NODE_ID => return (ast::DUMMY_NODE_ID, None),
|
2014-07-29 12:25:06 -07:00
|
|
|
|
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("unexpected variant in has_nested_returns: {}",
|
2016-04-06 13:51:55 +03:00
|
|
|
|
tcx.node_path_str(id)),
|
2014-12-16 12:21:08 +13:00
|
|
|
|
};
|
|
|
|
|
|
2015-03-10 12:28:44 +02:00
|
|
|
|
(blk.id, Some(cfg::CFG::new(tcx, blk)))
|
2014-12-16 12:21:08 +13:00
|
|
|
|
}
|
|
|
|
|
|
2014-12-16 12:35:59 +13:00
|
|
|
|
// Checks for the presence of "nested returns" in a function.
|
|
|
|
|
// Nested returns are when the inner expression of a return expression
|
|
|
|
|
// (the 'expr' in 'return expr') contains a return expression. Only cases
|
|
|
|
|
// where the outer return is actually reachable are considered. Implicit
|
|
|
|
|
// returns from the end of blocks are considered as well.
|
|
|
|
|
//
|
|
|
|
|
// This check is needed to handle the case where the inner expression is
|
|
|
|
|
// part of a larger expression that may have already partially-filled the
|
|
|
|
|
// return slot alloca. This can cause errors related to clean-up due to
|
|
|
|
|
// the clobbering of the existing value in the return slot.
|
2016-05-03 04:56:42 +03:00
|
|
|
|
fn has_nested_returns(tcx: TyCtxt, cfg: &cfg::CFG, blk_id: ast::NodeId) -> bool {
|
2016-04-06 17:28:59 -04:00
|
|
|
|
for index in cfg.graph.depth_traverse(cfg.entry, OUTGOING) {
|
2015-04-17 10:01:45 -04:00
|
|
|
|
let n = cfg.graph.node_data(index);
|
2015-02-19 15:27:25 +01:00
|
|
|
|
match tcx.map.find(n.id()) {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeExpr(ex)) => {
|
|
|
|
|
if let hir::ExprRet(Some(ref ret_expr)) = ex.node {
|
2014-12-16 12:21:08 +13:00
|
|
|
|
let mut visitor = FindNestedReturn::new();
|
2016-02-09 21:24:11 +01:00
|
|
|
|
intravisit::walk_expr(&mut visitor, &ret_expr);
|
2014-12-16 12:21:08 +13:00
|
|
|
|
if visitor.found {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-07-31 00:04:06 -07:00
|
|
|
|
Some(hir_map::NodeBlock(blk)) if blk.id == blk_id => {
|
2014-12-16 12:21:08 +13:00
|
|
|
|
let mut visitor = FindNestedReturn::new();
|
2015-09-29 00:23:54 +03:00
|
|
|
|
walk_list!(&mut visitor, visit_expr, &blk.expr);
|
2014-12-16 12:21:08 +13:00
|
|
|
|
if visitor.found {
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
|
}
|
2014-12-16 12:21:08 +13:00
|
|
|
|
|
|
|
|
|
return false;
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2011-06-15 11:19:50 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
impl<'blk, 'tcx> FunctionContext<'blk, 'tcx> {
|
|
|
|
|
/// Create a function context for the given function.
|
|
|
|
|
/// Beware that you must call `fcx.init` or `fcx.bind_args`
|
|
|
|
|
/// before doing anything with the returned function context.
|
|
|
|
|
pub fn new(ccx: &'blk CrateContext<'blk, 'tcx>,
|
|
|
|
|
llfndecl: ValueRef,
|
|
|
|
|
fn_ty: FnType,
|
2016-07-05 03:44:26 -04:00
|
|
|
|
definition: Option<(Instance<'tcx>, &ty::FnSig<'tcx>, Abi, ast::NodeId)>,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
block_arena: &'blk TypedArena<common::BlockS<'blk, 'tcx>>)
|
|
|
|
|
-> FunctionContext<'blk, 'tcx> {
|
2016-07-05 03:44:26 -04:00
|
|
|
|
let (param_substs, def_id, inlined_id) = match definition {
|
|
|
|
|
Some((instance, _, _, inlined_id)) => {
|
2016-04-06 08:34:03 +03:00
|
|
|
|
common::validate_substs(instance.substs);
|
2016-07-05 03:44:26 -04:00
|
|
|
|
(instance.substs, Some(instance.def), Some(inlined_id))
|
2016-04-06 08:34:03 +03:00
|
|
|
|
}
|
2016-08-08 23:39:49 +03:00
|
|
|
|
None => (Substs::empty(ccx.tcx()), None, None)
|
2016-04-06 08:34:03 +03:00
|
|
|
|
};
|
2016-03-06 16:30:21 +02:00
|
|
|
|
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let local_id = def_id.and_then(|id| ccx.tcx().map.as_local_node_id(id));
|
|
|
|
|
|
2016-04-06 08:34:03 +03:00
|
|
|
|
debug!("FunctionContext::new({})",
|
2016-04-06 10:49:50 +03:00
|
|
|
|
definition.map_or(String::new(), |d| d.0.to_string()));
|
2016-03-08 14:38:13 +02:00
|
|
|
|
|
|
|
|
|
let cfg = inlined_id.map(|id| build_cfg(ccx.tcx(), id));
|
|
|
|
|
let nested_returns = if let Some((blk_id, Some(ref cfg))) = cfg {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
has_nested_returns(ccx.tcx(), cfg, blk_id)
|
|
|
|
|
} else {
|
|
|
|
|
false
|
|
|
|
|
};
|
2014-01-27 14:18:36 +02:00
|
|
|
|
|
2016-08-24 06:36:37 +03:00
|
|
|
|
let no_debug = if let Some(id) = local_id {
|
|
|
|
|
ccx.tcx().map.attrs(id)
|
|
|
|
|
.iter().any(|item| item.check_name("no_debug"))
|
2016-03-08 14:38:13 +02:00
|
|
|
|
} else if let Some(def_id) = def_id {
|
2016-08-24 06:36:37 +03:00
|
|
|
|
ccx.sess().cstore.item_attrs(def_id)
|
|
|
|
|
.iter().any(|item| item.check_name("no_debug"))
|
2016-03-08 14:38:13 +02:00
|
|
|
|
} else {
|
2016-08-24 06:36:37 +03:00
|
|
|
|
false
|
2016-03-08 14:38:13 +02:00
|
|
|
|
};
|
|
|
|
|
|
2016-08-24 06:36:37 +03:00
|
|
|
|
let mir = def_id.and_then(|id| ccx.get_mir(id));
|
2016-03-08 14:38:13 +02:00
|
|
|
|
|
2016-04-06 10:49:50 +03:00
|
|
|
|
let debug_context = if let (false, Some(definition)) = (no_debug, definition) {
|
2016-07-05 03:44:26 -04:00
|
|
|
|
let (instance, sig, abi, _) = definition;
|
2016-04-06 15:37:19 +03:00
|
|
|
|
debuginfo::create_function_debug_context(ccx, instance, sig, abi, llfndecl)
|
2016-04-06 10:49:50 +03:00
|
|
|
|
} else {
|
|
|
|
|
debuginfo::empty_function_debug_context(ccx)
|
|
|
|
|
};
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
FunctionContext {
|
2016-03-09 22:46:00 +02:00
|
|
|
|
needs_ret_allocas: nested_returns && mir.is_none(),
|
2016-03-08 14:38:13 +02:00
|
|
|
|
mir: mir,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
llfn: llfndecl,
|
|
|
|
|
llretslotptr: Cell::new(None),
|
|
|
|
|
param_env: ccx.tcx().empty_parameter_environment(),
|
|
|
|
|
alloca_insert_pt: Cell::new(None),
|
|
|
|
|
llreturn: Cell::new(None),
|
|
|
|
|
landingpad_alloca: Cell::new(None),
|
|
|
|
|
lllocals: RefCell::new(NodeMap()),
|
|
|
|
|
llupvars: RefCell::new(NodeMap()),
|
|
|
|
|
lldropflag_hints: RefCell::new(DropFlagHintsMap::new()),
|
|
|
|
|
fn_ty: fn_ty,
|
|
|
|
|
param_substs: param_substs,
|
2016-04-06 15:37:19 +03:00
|
|
|
|
span: inlined_id.and_then(|id| ccx.tcx().map.opt_span(id)),
|
2016-03-06 16:30:21 +02:00
|
|
|
|
block_arena: block_arena,
|
|
|
|
|
lpad_arena: TypedArena::new(),
|
|
|
|
|
ccx: ccx,
|
|
|
|
|
debug_context: debug_context,
|
|
|
|
|
scopes: RefCell::new(Vec::new()),
|
2016-03-08 14:38:13 +02:00
|
|
|
|
cfg: cfg.and_then(|(_, cfg)| cfg)
|
2013-05-21 15:25:44 -04:00
|
|
|
|
}
|
2013-06-20 16:42:44 +02:00
|
|
|
|
}
|
2014-07-05 01:52:12 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
/// Performs setup on a newly created function, creating the entry
|
|
|
|
|
/// scope block and allocating space for the return pointer.
|
2016-03-08 14:38:13 +02:00
|
|
|
|
pub fn init(&'blk self, skip_retptr: bool, fn_did: Option<DefId>)
|
|
|
|
|
-> Block<'blk, 'tcx> {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let entry_bcx = self.new_temp_block("entry-block");
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
// Use a dummy instruction as the insertion point for all allocas.
|
|
|
|
|
// This is later removed in FunctionContext::cleanup.
|
|
|
|
|
self.alloca_insert_pt.set(Some(unsafe {
|
|
|
|
|
Load(entry_bcx, C_null(Type::i8p(self.ccx)));
|
|
|
|
|
llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
|
|
|
|
|
}));
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if !self.fn_ty.ret.is_ignore() && !skip_retptr {
|
|
|
|
|
// We normally allocate the llretslotptr, unless we
|
|
|
|
|
// have been instructed to skip it for immediate return
|
|
|
|
|
// values, or there is nothing to return at all.
|
|
|
|
|
|
|
|
|
|
// We create an alloca to hold a pointer of type `ret.original_ty`
|
|
|
|
|
// which will hold the pointer to the right alloca which has the
|
|
|
|
|
// final ret value
|
|
|
|
|
let llty = self.fn_ty.ret.memory_ty(self.ccx);
|
|
|
|
|
let slot = if self.needs_ret_allocas {
|
|
|
|
|
// Let's create the stack slot
|
|
|
|
|
let slot = AllocaFcx(self, llty.ptr_to(), "llretslotptr");
|
|
|
|
|
|
|
|
|
|
// and if we're using an out pointer, then store that in our newly made slot
|
|
|
|
|
if self.fn_ty.ret.is_indirect() {
|
|
|
|
|
let outptr = get_param(self.llfn, 0);
|
|
|
|
|
|
|
|
|
|
let b = self.ccx.builder();
|
|
|
|
|
b.position_before(self.alloca_insert_pt.get().unwrap());
|
|
|
|
|
b.store(outptr, slot);
|
|
|
|
|
}
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
slot
|
|
|
|
|
} else {
|
|
|
|
|
// But if there are no nested returns, we skip the indirection
|
|
|
|
|
// and have a single retslot
|
|
|
|
|
if self.fn_ty.ret.is_indirect() {
|
|
|
|
|
get_param(self.llfn, 0)
|
|
|
|
|
} else {
|
|
|
|
|
AllocaFcx(self, llty, "sret_slot")
|
|
|
|
|
}
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
};
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
self.llretslotptr.set(Some(slot));
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Create the drop-flag hints for every unfragmented path in the function.
|
|
|
|
|
let tcx = self.ccx.tcx();
|
|
|
|
|
let tables = tcx.tables.borrow();
|
|
|
|
|
let mut hints = self.lldropflag_hints.borrow_mut();
|
|
|
|
|
let fragment_infos = tcx.fragment_infos.borrow();
|
|
|
|
|
|
|
|
|
|
// Intern table for drop-flag hint datums.
|
|
|
|
|
let mut seen = HashMap::new();
|
|
|
|
|
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let fragment_infos = fn_did.and_then(|did| fragment_infos.get(&did));
|
|
|
|
|
if let Some(fragment_infos) = fragment_infos {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
for &info in fragment_infos {
|
|
|
|
|
|
|
|
|
|
let make_datum = |id| {
|
|
|
|
|
let init_val = C_u8(self.ccx, adt::DTOR_NEEDED_HINT);
|
|
|
|
|
let llname = &format!("dropflag_hint_{}", id);
|
|
|
|
|
debug!("adding hint {}", llname);
|
|
|
|
|
let ty = tcx.types.u8;
|
|
|
|
|
let ptr = alloc_ty(entry_bcx, ty, llname);
|
|
|
|
|
Store(entry_bcx, init_val, ptr);
|
|
|
|
|
let flag = datum::Lvalue::new_dropflag_hint("FunctionContext::init");
|
|
|
|
|
datum::Datum::new(ptr, ty, flag)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
let (var, datum) = match info {
|
|
|
|
|
ty::FragmentInfo::Moved { var, .. } |
|
|
|
|
|
ty::FragmentInfo::Assigned { var, .. } => {
|
|
|
|
|
let opt_datum = seen.get(&var).cloned().unwrap_or_else(|| {
|
|
|
|
|
let ty = tables.node_types[&var];
|
|
|
|
|
if self.type_needs_drop(ty) {
|
|
|
|
|
let datum = make_datum(var);
|
|
|
|
|
seen.insert(var, Some(datum.clone()));
|
|
|
|
|
Some(datum)
|
|
|
|
|
} else {
|
|
|
|
|
// No drop call needed, so we don't need a dropflag hint
|
|
|
|
|
None
|
|
|
|
|
}
|
|
|
|
|
});
|
|
|
|
|
if let Some(datum) = opt_datum {
|
|
|
|
|
(var, datum)
|
2016-01-12 18:04:21 +01:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
continue
|
2016-01-12 18:04:21 +01:00
|
|
|
|
}
|
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
};
|
|
|
|
|
match info {
|
|
|
|
|
ty::FragmentInfo::Moved { move_expr: expr_id, .. } => {
|
|
|
|
|
debug!("FragmentInfo::Moved insert drop hint for {}", expr_id);
|
|
|
|
|
hints.insert(expr_id, DropHint::new(var, datum));
|
|
|
|
|
}
|
|
|
|
|
ty::FragmentInfo::Assigned { assignee_id: expr_id, .. } => {
|
|
|
|
|
debug!("FragmentInfo::Assigned insert drop hint for {}", expr_id);
|
|
|
|
|
hints.insert(expr_id, DropHint::new(var, datum));
|
|
|
|
|
}
|
Add dropflag hints (stack-local booleans) for unfragmented paths in trans.
Added code to maintain these hints at runtime, and to conditionalize
drop-filling and calls to destructors.
In this early stage, we are using hints, so we are always free to
leave out a flag for a path -- then we just pass `None` as the
dropflag hint in the corresponding schedule cleanup call. But, once a
path has a hint, we must at least maintain it: i.e. if the hint
exists, we must ensure it is never set to "moved" if the data in
question might actually have been initialized. It remains sound to
conservatively set the hint to "initialized" as long as the true
drop-flag embedded in the value itself is up-to-date.
----
Here are some high-level details I want to point out:
* We maintain the hint in Lvalue::post_store, marking the lvalue as
moved. (But also continue drop-filling if necessary.)
* We update the hint on ExprAssign.
* We pass along the hint in once closures that capture-by-move.
* You only call `drop_ty` for state that does not have an associated hint.
If you have a hint, you must call `drop_ty_core` instead.
(Originally I passed the hint into `drop_ty` as well, to make the
connection to a hint more apparent, but the vast majority of
current calls to `drop_ty` are in contexts where no hint is
available, so it just seemed like noise in the resulting diff.)
2015-06-07 09:25:14 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2010-09-27 15:38:34 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
entry_bcx
|
2014-01-15 14:39:08 -05:00
|
|
|
|
}
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
/// Creates lvalue datums for each of the incoming function arguments,
|
|
|
|
|
/// matches all argument patterns against them to produce bindings,
|
|
|
|
|
/// and returns the entry block (see FunctionContext::init).
|
|
|
|
|
fn bind_args(&'blk self,
|
|
|
|
|
args: &[hir::Arg],
|
|
|
|
|
abi: Abi,
|
2016-03-08 14:38:13 +02:00
|
|
|
|
id: ast::NodeId,
|
2016-03-06 16:30:21 +02:00
|
|
|
|
closure_env: closure::ClosureEnv,
|
|
|
|
|
arg_scope: cleanup::CustomScopeIndex)
|
|
|
|
|
-> Block<'blk, 'tcx> {
|
|
|
|
|
let _icx = push_ctxt("FunctionContext::bind_args");
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let fn_did = self.ccx.tcx().map.local_def_id(id);
|
|
|
|
|
let mut bcx = self.init(false, Some(fn_did));
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let arg_scope_id = cleanup::CustomScope(arg_scope);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let mut idx = 0;
|
|
|
|
|
let mut llarg_idx = self.fn_ty.ret.is_indirect() as usize;
|
2016-01-08 20:40:13 +01:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let has_tupled_arg = match closure_env {
|
|
|
|
|
closure::ClosureEnv::NotClosure => abi == Abi::RustCall,
|
|
|
|
|
closure::ClosureEnv::Closure(..) => {
|
|
|
|
|
closure_env.load(bcx, arg_scope_id);
|
|
|
|
|
let env_arg = &self.fn_ty.args[idx];
|
2015-07-02 18:18:22 +02:00
|
|
|
|
idx += 1;
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if env_arg.pad.is_some() {
|
|
|
|
|
llarg_idx += 1;
|
2015-07-02 18:18:22 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if !env_arg.is_ignore() {
|
|
|
|
|
llarg_idx += 1;
|
2015-07-02 18:18:22 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
false
|
2014-05-28 22:26:56 -07:00
|
|
|
|
}
|
|
|
|
|
};
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let tupled_arg_id = if has_tupled_arg {
|
|
|
|
|
args[args.len() - 1].id
|
2015-07-02 18:18:22 +02:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
ast::DUMMY_NODE_ID
|
2015-07-02 18:18:22 +02:00
|
|
|
|
};
|
2012-09-20 12:29:15 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
// Return an array wrapping the ValueRefs that we get from `get_param` for
|
|
|
|
|
// each argument into datums.
|
|
|
|
|
//
|
|
|
|
|
// For certain mode/type combinations, the raw llarg values are passed
|
|
|
|
|
// by value. However, within the fn body itself, we want to always
|
|
|
|
|
// have all locals and arguments be by-ref so that we can cancel the
|
|
|
|
|
// cleanup and for better interaction with LLVM's debug info. So, if
|
|
|
|
|
// the argument would be passed by value, we store it into an alloca.
|
|
|
|
|
// This alloca should be optimized away by LLVM's mem-to-reg pass in
|
|
|
|
|
// the event it's not truly needed.
|
|
|
|
|
let uninit_reason = InitAlloca::Uninit("fn_arg populate dominates dtor");
|
|
|
|
|
for hir_arg in args {
|
|
|
|
|
let arg_ty = node_id_type(bcx, hir_arg.id);
|
|
|
|
|
let arg_datum = if hir_arg.id != tupled_arg_id {
|
|
|
|
|
let arg = &self.fn_ty.args[idx];
|
|
|
|
|
idx += 1;
|
|
|
|
|
if arg.is_indirect() && bcx.sess().opts.debuginfo != FullDebugInfo {
|
|
|
|
|
// Don't copy an indirect argument to an alloca, the caller
|
|
|
|
|
// already put it in a temporary alloca and gave it up, unless
|
|
|
|
|
// we emit extra-debug-info, which requires local allocas :(.
|
|
|
|
|
let llarg = get_param(self.llfn, llarg_idx as c_uint);
|
|
|
|
|
llarg_idx += 1;
|
|
|
|
|
self.schedule_lifetime_end(arg_scope_id, llarg);
|
|
|
|
|
self.schedule_drop_mem(arg_scope_id, llarg, arg_ty, None);
|
|
|
|
|
|
|
|
|
|
datum::Datum::new(llarg,
|
2016-05-26 11:26:03 +03:00
|
|
|
|
arg_ty,
|
|
|
|
|
datum::Lvalue::new("FunctionContext::bind_args"))
|
2016-03-06 16:30:21 +02:00
|
|
|
|
} else {
|
2016-06-08 00:35:01 +03:00
|
|
|
|
unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx, arg_ty, "",
|
|
|
|
|
uninit_reason,
|
|
|
|
|
arg_scope_id, |bcx, dst| {
|
|
|
|
|
debug!("FunctionContext::bind_args: {:?}: {:?}", hir_arg, arg_ty);
|
2016-05-26 02:45:13 +03:00
|
|
|
|
let b = &bcx.build();
|
2016-06-08 00:35:01 +03:00
|
|
|
|
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
|
|
|
|
|
let meta = &self.fn_ty.args[idx];
|
|
|
|
|
idx += 1;
|
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
|
|
|
|
|
meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
|
|
|
|
|
} else {
|
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, dst);
|
|
|
|
|
}
|
|
|
|
|
bcx
|
|
|
|
|
}))
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
|
|
|
|
|
let tupled_arg_tys = match arg_ty.sty {
|
|
|
|
|
ty::TyTuple(ref tys) => tys,
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("last argument of `rust-call` fn isn't a tuple?!")
|
2016-03-06 16:30:21 +02:00
|
|
|
|
};
|
2010-11-26 17:47:27 -08:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
unpack_datum!(bcx, datum::lvalue_scratch_datum(bcx,
|
|
|
|
|
arg_ty,
|
|
|
|
|
"tupled_args",
|
|
|
|
|
uninit_reason,
|
|
|
|
|
arg_scope_id,
|
|
|
|
|
|bcx, llval| {
|
|
|
|
|
debug!("FunctionContext::bind_args: tupled {:?}: {:?}", hir_arg, arg_ty);
|
|
|
|
|
for (j, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
|
|
|
|
|
let dst = StructGEP(bcx, llval, j);
|
|
|
|
|
let arg = &self.fn_ty.args[idx];
|
2016-03-21 11:38:48 +01:00
|
|
|
|
idx += 1;
|
2016-03-09 14:20:22 +02:00
|
|
|
|
let b = &bcx.build();
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if common::type_is_fat_ptr(bcx.tcx(), tupled_arg_ty) {
|
|
|
|
|
let meta = &self.fn_ty.args[idx];
|
|
|
|
|
idx += 1;
|
2016-03-09 14:20:22 +02:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, dst));
|
|
|
|
|
meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, dst));
|
2016-03-06 16:30:21 +02:00
|
|
|
|
} else {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, dst);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
bcx
|
|
|
|
|
}))
|
|
|
|
|
};
|
2013-07-13 03:25:46 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let pat = &hir_arg.pat;
|
2016-06-08 00:35:01 +03:00
|
|
|
|
bcx = if let Some(name) = simple_name(pat) {
|
|
|
|
|
// Generate nicer LLVM for the common case of fn a pattern
|
|
|
|
|
// like `x: T`
|
|
|
|
|
set_value_name(arg_datum.val, &bcx.name(name));
|
|
|
|
|
self.lllocals.borrow_mut().insert(pat.id, arg_datum);
|
|
|
|
|
bcx
|
|
|
|
|
} else {
|
|
|
|
|
// General path. Copy out the values that are used in the
|
|
|
|
|
// pattern.
|
|
|
|
|
_match::bind_irrefutable_pat(bcx, pat, arg_datum.match_input(), arg_scope_id)
|
2016-03-06 16:30:21 +02:00
|
|
|
|
};
|
|
|
|
|
debuginfo::create_argument_metadata(bcx, hir_arg);
|
2013-07-13 03:25:46 +02:00
|
|
|
|
}
|
2014-10-24 21:14:37 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
bcx
|
|
|
|
|
}
|
2014-10-24 21:14:37 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
/// Ties up the llstaticallocas -> llloadenv -> lltop edges,
|
|
|
|
|
/// and builds the return block.
|
|
|
|
|
pub fn finish(&'blk self, last_bcx: Block<'blk, 'tcx>,
|
|
|
|
|
ret_debug_loc: DebugLoc) {
|
|
|
|
|
let _icx = push_ctxt("FunctionContext::finish");
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let ret_cx = match self.llreturn.get() {
|
|
|
|
|
Some(llreturn) => {
|
|
|
|
|
if !last_bcx.terminated.get() {
|
|
|
|
|
Br(last_bcx, llreturn, DebugLoc::None);
|
|
|
|
|
}
|
|
|
|
|
raw_block(self, llreturn)
|
|
|
|
|
}
|
|
|
|
|
None => last_bcx,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
self.build_return_block(ret_cx, ret_debug_loc);
|
|
|
|
|
|
2016-04-07 22:35:11 +03:00
|
|
|
|
DebugLoc::None.apply(self);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
self.cleanup();
|
2013-04-18 15:53:29 -07:00
|
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
// Builds the return block for a function.
|
|
|
|
|
pub fn build_return_block(&self, ret_cx: Block<'blk, 'tcx>,
|
|
|
|
|
ret_debug_location: DebugLoc) {
|
|
|
|
|
if self.llretslotptr.get().is_none() ||
|
|
|
|
|
ret_cx.unreachable.get() ||
|
|
|
|
|
(!self.needs_ret_allocas && self.fn_ty.ret.is_indirect()) {
|
|
|
|
|
return RetVoid(ret_cx, ret_debug_location);
|
|
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let retslot = if self.needs_ret_allocas {
|
|
|
|
|
Load(ret_cx, self.llretslotptr.get().unwrap())
|
|
|
|
|
} else {
|
|
|
|
|
self.llretslotptr.get().unwrap()
|
|
|
|
|
};
|
|
|
|
|
let retptr = Value(retslot);
|
|
|
|
|
let llty = self.fn_ty.ret.original_ty;
|
|
|
|
|
match (retptr.get_dominating_store(ret_cx), self.fn_ty.ret.cast) {
|
|
|
|
|
// If there's only a single store to the ret slot, we can directly return
|
|
|
|
|
// the value that was stored and omit the store and the alloca.
|
|
|
|
|
// However, we only want to do this when there is no cast needed.
|
|
|
|
|
(Some(s), None) => {
|
|
|
|
|
let mut retval = s.get_operand(0).unwrap().get();
|
|
|
|
|
s.erase_from_parent();
|
|
|
|
|
|
|
|
|
|
if retptr.has_no_uses() {
|
|
|
|
|
retptr.erase_from_parent();
|
|
|
|
|
}
|
2014-08-11 15:58:46 -07:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if self.fn_ty.ret.is_indirect() {
|
|
|
|
|
Store(ret_cx, retval, get_param(self.llfn, 0));
|
|
|
|
|
RetVoid(ret_cx, ret_debug_location)
|
|
|
|
|
} else {
|
|
|
|
|
if llty == Type::i1(self.ccx) {
|
|
|
|
|
retval = Trunc(ret_cx, retval, llty);
|
|
|
|
|
}
|
|
|
|
|
Ret(ret_cx, retval, ret_debug_location)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
|
|
|
|
(_, cast_ty) if self.fn_ty.ret.is_indirect() => {
|
|
|
|
|
// Otherwise, copy the return value to the ret slot.
|
|
|
|
|
assert_eq!(cast_ty, None);
|
|
|
|
|
let llsz = llsize_of(self.ccx, self.fn_ty.ret.ty);
|
|
|
|
|
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
2016-03-08 14:29:46 +02:00
|
|
|
|
call_memcpy(&B(ret_cx), get_param(self.llfn, 0),
|
2016-03-06 16:30:21 +02:00
|
|
|
|
retslot, llsz, llalign as u32);
|
2014-12-11 13:53:30 +01:00
|
|
|
|
RetVoid(ret_cx, ret_debug_location)
|
2014-08-11 15:58:46 -07:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
(_, Some(cast_ty)) => {
|
|
|
|
|
let load = Load(ret_cx, PointerCast(ret_cx, retslot, cast_ty.ptr_to()));
|
|
|
|
|
let llalign = llalign_of_min(self.ccx, self.fn_ty.ret.ty);
|
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMSetAlignment(load, llalign);
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Ret(ret_cx, load, ret_debug_location)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
(_, None) => {
|
|
|
|
|
let retval = if llty == Type::i1(self.ccx) {
|
|
|
|
|
let val = LoadRangeAssert(ret_cx, retslot, 0, 2, llvm::False);
|
|
|
|
|
Trunc(ret_cx, val, llty)
|
2014-10-24 21:14:37 +02:00
|
|
|
|
} else {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
Load(ret_cx, retslot)
|
|
|
|
|
};
|
|
|
|
|
Ret(ret_cx, retval, ret_debug_location)
|
2014-07-05 21:47:14 +02:00
|
|
|
|
}
|
2016-03-06 16:30:21 +02:00
|
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
|
}
|
2012-02-13 16:06:56 -08:00
|
|
|
|
}
|
|
|
|
|
|
2015-02-27 13:34:33 +02:00
|
|
|
|
/// Builds an LLVM function out of a source function.
|
|
|
|
|
///
|
|
|
|
|
/// If the function closes over its environment a closure will be returned.
|
2016-03-08 14:38:13 +02:00
|
|
|
|
pub fn trans_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
|
decl: &hir::FnDecl,
|
|
|
|
|
body: &hir::Block,
|
|
|
|
|
llfndecl: ValueRef,
|
2016-04-06 08:34:03 +03:00
|
|
|
|
instance: Instance<'tcx>,
|
2016-03-08 14:38:13 +02:00
|
|
|
|
inlined_id: ast::NodeId,
|
2016-04-06 10:49:50 +03:00
|
|
|
|
sig: &ty::FnSig<'tcx>,
|
2016-03-08 14:38:13 +02:00
|
|
|
|
abi: Abi,
|
|
|
|
|
closure_env: closure::ClosureEnv) {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
|
2013-12-22 13:50:04 -08:00
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
|
let _icx = push_ctxt("trans_closure");
|
rustc: Implement custom panic runtimes
This commit is an implementation of [RFC 1513] which allows applications to
alter the behavior of panics at compile time. A new compiler flag, `-C panic`,
is added and accepts the values `unwind` or `panic`, with the default being
`unwind`. This model affects how code is generated for the local crate, skipping
generation of landing pads with `-C panic=abort`.
[RFC 1513]: https://github.com/rust-lang/rfcs/blob/master/text/1513-less-unwinding.md
Panic implementations are then provided by crates tagged with
`#![panic_runtime]` and lazily required by crates with
`#![needs_panic_runtime]`. The panic strategy (`-C panic` value) of the panic
runtime must match the final product, and if the panic strategy is not `abort`
then the entire DAG must have the same panic strategy.
With the `-C panic=abort` strategy, users can expect a stable method to disable
generation of landing pads, improving optimization in niche scenarios,
decreasing compile time, and decreasing output binary size. With the `-C
panic=unwind` strategy users can expect the existing ability to isolate failure
in Rust code from the outside world.
Organizationally, this commit dismantles the `sys_common::unwind` module in
favor of some bits moving part of it to `libpanic_unwind` and the rest into the
`panicking` module in libstd. The custom panic runtime support is pretty similar
to the custom allocator support with the only major difference being how the
panic runtime is injected (takes the `-C panic` flag into account).
2016-04-08 16:18:40 -07:00
|
|
|
|
if !ccx.sess().no_landing_pads() {
|
|
|
|
|
attributes::emit_uwtable(llfndecl, true);
|
|
|
|
|
}
|
2010-11-26 17:47:27 -08:00
|
|
|
|
|
2016-06-08 21:03:06 +03:00
|
|
|
|
// this is an info! to allow collecting monomorphization statistics
|
|
|
|
|
// and to allow finding the last function before LLVM aborts from
|
|
|
|
|
// release builds.
|
|
|
|
|
info!("trans_closure(..., {})", instance);
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-08 22:54:49 -07:00
|
|
|
|
|
2016-04-06 10:49:50 +03:00
|
|
|
|
let fn_ty = FnType::new(ccx, abi, sig, &[]);
|
|
|
|
|
|
2015-01-26 13:38:33 +01:00
|
|
|
|
let (arena, fcx): (TypedArena<_>, FunctionContext);
|
|
|
|
|
arena = TypedArena::new();
|
2016-07-05 03:44:26 -04:00
|
|
|
|
fcx = FunctionContext::new(ccx,
|
|
|
|
|
llfndecl,
|
|
|
|
|
fn_ty,
|
|
|
|
|
Some((instance, sig, abi, inlined_id)),
|
|
|
|
|
&arena);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
2016-03-08 14:38:13 +02:00
|
|
|
|
if fcx.mir.is_some() {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
return mir::trans_mir(&fcx);
|
2016-08-24 06:36:37 +03:00
|
|
|
|
} else {
|
|
|
|
|
span_bug!(body.span, "attempted translation of `{}` w/o MIR", instance);
|
2015-10-21 17:35:15 -04:00
|
|
|
|
}
|
|
|
|
|
|
2016-04-06 10:49:50 +03:00
|
|
|
|
debuginfo::fill_scope_map_for_function(&fcx, decl, body, inlined_id);
|
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
|
// cleanup scope for the incoming arguments
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let fn_cleanup_debug_loc = debuginfo::get_cleanup_debug_loc_for_ast_node(
|
|
|
|
|
ccx, inlined_id, body.span, true);
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
|
2013-08-19 18:23:43 +02:00
|
|
|
|
|
2013-08-22 17:00:12 -07:00
|
|
|
|
// Set up arguments to the function.
|
2016-03-06 16:30:21 +02:00
|
|
|
|
debug!("trans_closure: function: {:?}", Value(fcx.llfn));
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let bcx = fcx.bind_args(&decl.inputs, abi, inlined_id, closure_env, arg_scope);
|
2011-06-28 18:54:05 -07:00
|
|
|
|
|
2013-08-19 18:23:43 +02:00
|
|
|
|
// Up until here, IR instructions for this function have explicitly not been annotated with
|
|
|
|
|
// source code location, so we don't step into call setup code. From here on, source location
|
|
|
|
|
// emitting should be enabled.
|
2014-01-07 08:54:58 -08:00
|
|
|
|
debuginfo::start_emitting_source_locations(&fcx);
|
2013-08-19 18:23:43 +02:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let dest = if fcx.fn_ty.ret.is_ignore() {
|
|
|
|
|
expr::Ignore
|
|
|
|
|
} else {
|
|
|
|
|
expr::SaveIn(fcx.get_ret_slot(bcx, "iret_slot"))
|
2014-02-07 21:00:31 +01:00
|
|
|
|
};
|
|
|
|
|
|
2011-08-16 21:34:52 +02:00
|
|
|
|
// This call to trans_block is the place where we bridge between
|
|
|
|
|
// translation calls that don't have a return value (trans_crate,
|
2012-01-13 10:58:31 +01:00
|
|
|
|
// trans_mod, trans_item, et cetera) and those that do
|
2011-08-16 21:34:52 +02:00
|
|
|
|
// (trans_block, trans_expr, et cetera).
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let mut bcx = controlflow::trans_block(bcx, body, dest);
|
2012-10-08 11:49:01 -07:00
|
|
|
|
|
2014-07-29 12:25:06 -07:00
|
|
|
|
match dest {
|
2014-08-11 19:16:00 -07:00
|
|
|
|
expr::SaveIn(slot) if fcx.needs_ret_allocas => {
|
2014-07-29 12:25:06 -07:00
|
|
|
|
Store(bcx, slot, fcx.llretslotptr.get().unwrap());
|
|
|
|
|
}
|
|
|
|
|
_ => {}
|
|
|
|
|
}
|
|
|
|
|
|
2013-12-20 20:41:54 -08:00
|
|
|
|
match fcx.llreturn.get() {
|
2014-01-15 14:39:08 -05:00
|
|
|
|
Some(_) => {
|
2014-12-11 13:53:30 +01:00
|
|
|
|
Br(bcx, fcx.return_exit_block(), DebugLoc::None);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
fcx.pop_custom_cleanup_scope(arg_scope);
|
|
|
|
|
}
|
|
|
|
|
None => {
|
|
|
|
|
// Microoptimization writ large: avoid creating a separate
|
|
|
|
|
// llreturn basic block
|
|
|
|
|
bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
|
|
|
|
|
}
|
2013-07-13 03:25:46 +02:00
|
|
|
|
};
|
2013-06-11 12:41:09 -07:00
|
|
|
|
|
2013-06-14 11:59:49 -07:00
|
|
|
|
// Put return block after all other blocks.
|
|
|
|
|
// This somewhat improves single-stepping experience in debugger.
|
|
|
|
|
unsafe {
|
2013-12-20 20:41:54 -08:00
|
|
|
|
let llreturn = fcx.llreturn.get();
|
2015-01-31 12:20:46 -05:00
|
|
|
|
if let Some(llreturn) = llreturn {
|
2013-07-13 03:25:46 +02:00
|
|
|
|
llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
|
|
|
|
|
}
|
2013-06-14 11:59:49 -07:00
|
|
|
|
}
|
2011-07-15 11:38:16 -07:00
|
|
|
|
|
2011-06-29 17:29:24 -07:00
|
|
|
|
// Insert the mandatory first few basic blocks before lltop.
|
2016-04-06 10:49:50 +03:00
|
|
|
|
fcx.finish(bcx, fn_cleanup_debug_loc.debug_loc());
|
2011-06-29 19:50:50 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-14 05:41:42 +12:00
|
|
|
|
pub fn trans_instance<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, instance: Instance<'tcx>) {
|
2016-08-03 01:25:34 +03:00
|
|
|
|
let local_instance = inline::maybe_inline_instance(ccx, instance);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-08-03 01:25:34 +03:00
|
|
|
|
let fn_node_id = ccx.tcx().map.as_local_node_id(local_instance.def).unwrap();
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
|
|
|
|
let _s = StatRecorder::new(ccx, ccx.tcx().node_path_str(fn_node_id));
|
|
|
|
|
debug!("trans_instance(instance={:?})", instance);
|
|
|
|
|
let _icx = push_ctxt("trans_instance");
|
|
|
|
|
|
|
|
|
|
let item = ccx.tcx().map.find(fn_node_id).unwrap();
|
|
|
|
|
|
|
|
|
|
let fn_ty = ccx.tcx().lookup_item_type(instance.def).ty;
|
|
|
|
|
let fn_ty = ccx.tcx().erase_regions(&fn_ty);
|
|
|
|
|
let fn_ty = monomorphize::apply_param_substs(ccx.tcx(), instance.substs, &fn_ty);
|
|
|
|
|
|
|
|
|
|
let sig = ccx.tcx().erase_late_bound_regions(fn_ty.fn_sig());
|
|
|
|
|
let sig = ccx.tcx().normalize_associated_type(&sig);
|
|
|
|
|
let abi = fn_ty.fn_abi();
|
|
|
|
|
|
2016-08-03 01:25:34 +03:00
|
|
|
|
let lldecl = match ccx.instances().borrow().get(&local_instance) {
|
2016-05-14 05:41:42 +12:00
|
|
|
|
Some(&val) => val,
|
|
|
|
|
None => bug!("Instance `{:?}` not already declared", instance)
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
match item {
|
|
|
|
|
hir_map::NodeItem(&hir::Item {
|
|
|
|
|
node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
|
|
|
|
|
}) |
|
|
|
|
|
hir_map::NodeTraitItem(&hir::TraitItem {
|
|
|
|
|
node: hir::MethodTraitItem(
|
|
|
|
|
hir::MethodSig { ref decl, .. }, Some(ref body)), ..
|
|
|
|
|
}) |
|
|
|
|
|
hir_map::NodeImplItem(&hir::ImplItem {
|
|
|
|
|
node: hir::ImplItemKind::Method(
|
|
|
|
|
hir::MethodSig { ref decl, .. }, ref body), ..
|
|
|
|
|
}) => {
|
|
|
|
|
trans_closure(ccx, decl, body, lldecl, instance,
|
|
|
|
|
fn_node_id, &sig, abi, closure::ClosureEnv::NotClosure);
|
|
|
|
|
}
|
|
|
|
|
_ => bug!("Instance is a {:?}?", item)
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
|
pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
|
ctor_ty: Ty<'tcx>,
|
2016-01-16 16:03:09 +01:00
|
|
|
|
disr: Disr,
|
2016-02-23 21:21:50 +02:00
|
|
|
|
args: CallArgs,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
dest: expr::Dest,
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc: DebugLoc)
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-07-09 23:42:08 -07:00
|
|
|
|
|
|
|
|
|
let ccx = bcx.fcx.ccx;
|
|
|
|
|
|
2015-11-14 12:12:12 -08:00
|
|
|
|
let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
|
2016-03-11 02:33:20 +02:00
|
|
|
|
let sig = ccx.tcx().normalize_associated_type(&sig);
|
2016-07-31 22:33:41 +08:00
|
|
|
|
let result_ty = sig.output;
|
2014-07-09 23:42:08 -07:00
|
|
|
|
|
|
|
|
|
// Get location to store the result. If the user does not care about
|
|
|
|
|
// the result, just make a stack slot
|
|
|
|
|
let llresult = match dest {
|
|
|
|
|
expr::SaveIn(d) => d,
|
|
|
|
|
expr::Ignore => {
|
|
|
|
|
if !type_is_zero_size(ccx, result_ty) {
|
2015-08-25 18:24:16 +02:00
|
|
|
|
let llresult = alloc_ty(bcx, result_ty, "constructor_result");
|
|
|
|
|
call_lifetime_start(bcx, llresult);
|
|
|
|
|
llresult
|
2014-07-09 23:42:08 -07:00
|
|
|
|
} else {
|
2015-06-10 00:07:47 +02:00
|
|
|
|
C_undef(type_of::type_of(ccx, result_ty).ptr_to())
|
2014-07-09 23:42:08 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
if !type_is_zero_size(ccx, result_ty) {
|
|
|
|
|
match args {
|
2016-02-23 21:21:50 +02:00
|
|
|
|
ArgExprs(exprs) => {
|
2014-09-07 20:09:06 +03:00
|
|
|
|
let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
bcx = expr::trans_adt(bcx,
|
|
|
|
|
result_ty,
|
|
|
|
|
disr,
|
2015-02-18 14:48:57 -05:00
|
|
|
|
&fields[..],
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
None,
|
|
|
|
|
expr::SaveIn(llresult),
|
2014-12-11 13:53:30 +01:00
|
|
|
|
debug_loc);
|
2014-07-09 23:42:08 -07:00
|
|
|
|
}
|
2016-03-29 01:46:02 +02:00
|
|
|
|
_ => bug!("expected expr as arguments for variant/struct tuple constructor"),
|
2014-07-09 23:42:08 -07:00
|
|
|
|
}
|
2015-08-31 23:57:41 +12:00
|
|
|
|
} else {
|
|
|
|
|
// Just eval all the expressions (if any). Since expressions in Rust can have arbitrary
|
|
|
|
|
// contents, there could be side-effects we need from them.
|
|
|
|
|
match args {
|
2016-02-23 21:21:50 +02:00
|
|
|
|
ArgExprs(exprs) => {
|
2015-08-31 23:57:41 +12:00
|
|
|
|
for expr in exprs {
|
|
|
|
|
bcx = expr::trans_into(bcx, expr, expr::Ignore);
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-11-19 12:36:31 +01:00
|
|
|
|
_ => (),
|
2015-08-31 23:57:41 +12:00
|
|
|
|
}
|
2014-07-09 23:42:08 -07:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// If the caller doesn't care about the result
|
|
|
|
|
// drop the temporary we made
|
|
|
|
|
let bcx = match dest {
|
|
|
|
|
expr::SaveIn(_) => bcx,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
expr::Ignore => {
|
2015-01-30 17:14:17 +01:00
|
|
|
|
let bcx = glue::drop_ty(bcx, llresult, result_ty, debug_loc);
|
|
|
|
|
if !type_is_zero_size(ccx, result_ty) {
|
|
|
|
|
call_lifetime_end(bcx, llresult);
|
|
|
|
|
}
|
|
|
|
|
bcx
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
|
}
|
2014-07-09 23:42:08 -07:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
Result::new(bcx, llresult)
|
|
|
|
|
}
|
|
|
|
|
|
2016-02-23 22:04:51 +02:00
|
|
|
|
pub fn trans_ctor_shim<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
|
ctor_id: ast::NodeId,
|
|
|
|
|
disr: Disr,
|
|
|
|
|
param_substs: &'tcx Substs<'tcx>,
|
|
|
|
|
llfndecl: ValueRef) {
|
2015-06-25 23:42:17 +03:00
|
|
|
|
let ctor_ty = ccx.tcx().node_id_to_type(ctor_id);
|
2014-12-17 14:16:28 -05:00
|
|
|
|
let ctor_ty = monomorphize::apply_param_substs(ccx.tcx(), param_substs, &ctor_ty);
|
2013-06-20 15:23:52 -04:00
|
|
|
|
|
2015-11-14 12:12:12 -08:00
|
|
|
|
let sig = ccx.tcx().erase_late_bound_regions(&ctor_ty.fn_sig());
|
2016-03-11 02:33:20 +02:00
|
|
|
|
let sig = ccx.tcx().normalize_associated_type(&sig);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
2015-01-26 13:38:33 +01:00
|
|
|
|
let (arena, fcx): (TypedArena<_>, FunctionContext);
|
|
|
|
|
arena = TypedArena::new();
|
2016-04-06 08:34:03 +03:00
|
|
|
|
fcx = FunctionContext::new(ccx, llfndecl, fn_ty, None, &arena);
|
2016-03-08 14:38:13 +02:00
|
|
|
|
let bcx = fcx.init(false, None);
|
2013-01-07 14:16:52 -08:00
|
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
|
assert!(!fcx.needs_ret_allocas);
|
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
if !fcx.fn_ty.ret.is_ignore() {
|
|
|
|
|
let dest = fcx.get_ret_slot(bcx, "eret_slot");
|
2015-12-07 02:38:29 +13:00
|
|
|
|
let dest_val = adt::MaybeSizedValue::sized(dest); // Can return unsized value
|
2016-07-31 22:33:41 +08:00
|
|
|
|
let repr = adt::represent_type(ccx, sig.output);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
|
|
|
|
|
let mut arg_idx = 0;
|
|
|
|
|
for (i, arg_ty) in sig.inputs.into_iter().enumerate() {
|
2016-02-09 21:24:11 +01:00
|
|
|
|
let lldestptr = adt::trans_field_ptr(bcx, &repr, dest_val, Disr::from(disr), i);
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let arg = &fcx.fn_ty.args[arg_idx];
|
|
|
|
|
arg_idx += 1;
|
2016-03-09 14:20:22 +02:00
|
|
|
|
let b = &bcx.build();
|
2015-07-02 18:18:22 +02:00
|
|
|
|
if common::type_is_fat_ptr(bcx.tcx(), arg_ty) {
|
2016-03-06 16:30:21 +02:00
|
|
|
|
let meta = &fcx.fn_ty.args[arg_idx];
|
|
|
|
|
arg_idx += 1;
|
2016-03-09 14:20:22 +02:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, expr::get_dataptr(bcx, lldestptr));
|
|
|
|
|
meta.store_fn_arg(b, &mut llarg_idx, expr::get_meta(bcx, lldestptr));
|
2015-07-02 18:18:22 +02:00
|
|
|
|
} else {
|
2016-03-09 14:20:22 +02:00
|
|
|
|
arg.store_fn_arg(b, &mut llarg_idx, lldestptr);
|
2015-07-02 18:18:22 +02:00
|
|
|
|
}
|
2014-01-16 15:11:22 -05:00
|
|
|
|
}
|
2016-02-09 21:24:11 +01:00
|
|
|
|
adt::trans_set_discr(bcx, &repr, dest, disr);
|
2012-10-24 14:36:00 -07:00
|
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
2016-03-06 16:30:21 +02:00
|
|
|
|
fcx.finish(bcx, DebugLoc::None);
|
2012-10-24 14:36:00 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-11-11 20:22:41 -05:00
|
|
|
|
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
|
|
|
|
|
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
|
|
|
|
|
// applicable to variable declarations and may not really make sense for
|
|
|
|
|
// Rust code in the first place but whitelist them anyway and trust that
|
|
|
|
|
// the user knows what s/he's doing. Who knows, unanticipated use cases
|
|
|
|
|
// may pop up in the future.
|
|
|
|
|
//
|
|
|
|
|
// ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
|
|
|
|
|
// and don't have to be, LLVM treats them as no-ops.
|
|
|
|
|
match name {
|
|
|
|
|
"appending" => Some(llvm::AppendingLinkage),
|
|
|
|
|
"available_externally" => Some(llvm::AvailableExternallyLinkage),
|
|
|
|
|
"common" => Some(llvm::CommonLinkage),
|
|
|
|
|
"extern_weak" => Some(llvm::ExternalWeakLinkage),
|
|
|
|
|
"external" => Some(llvm::ExternalLinkage),
|
|
|
|
|
"internal" => Some(llvm::InternalLinkage),
|
|
|
|
|
"linkonce" => Some(llvm::LinkOnceAnyLinkage),
|
|
|
|
|
"linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
|
|
|
|
|
"private" => Some(llvm::PrivateLinkage),
|
|
|
|
|
"weak" => Some(llvm::WeakAnyLinkage),
|
|
|
|
|
"weak_odr" => Some(llvm::WeakODRLinkage),
|
|
|
|
|
_ => None,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 20:02:09 -04:00
|
|
|
|
pub fn set_link_section(ccx: &CrateContext,
|
|
|
|
|
llval: ValueRef,
|
|
|
|
|
attrs: &[ast::Attribute]) {
|
|
|
|
|
if let Some(sect) = attr::first_attr_value_str_by_name(attrs, "link_section") {
|
2016-06-13 22:43:30 -07:00
|
|
|
|
if contains_null(§) {
|
|
|
|
|
ccx.sess().fatal(&format!("Illegal null byte in link_section value: `{}`", §));
|
|
|
|
|
}
|
|
|
|
|
unsafe {
|
|
|
|
|
let buf = CString::new(sect.as_bytes()).unwrap();
|
|
|
|
|
llvm::LLVMSetSection(llval, buf.as_ptr());
|
|
|
|
|
}
|
2015-08-03 15:38:06 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
/// Create the `main` function which will initialise the rust runtime and call
|
|
|
|
|
/// users’ main function.
|
|
|
|
|
pub fn maybe_create_entry_wrapper(ccx: &CrateContext) {
|
|
|
|
|
let (main_def_id, span) = match *ccx.sess().entry_fn.borrow() {
|
|
|
|
|
Some((id, span)) => {
|
|
|
|
|
(ccx.tcx().map.local_def_id(id), span)
|
|
|
|
|
}
|
|
|
|
|
None => return,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
// check for the #[rustc_error] annotation, which forces an
|
|
|
|
|
// error in trans. This is used to write compile-fail tests
|
|
|
|
|
// that actually test that compilation succeeds without
|
|
|
|
|
// reporting an error.
|
|
|
|
|
if ccx.tcx().has_attr(main_def_id, "rustc_error") {
|
|
|
|
|
ccx.tcx().sess.span_fatal(span, "compilation successful");
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let instance = Instance::mono(ccx.shared(), main_def_id);
|
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
if !ccx.codegen_unit().contains_item(&TransItem::Fn(instance)) {
|
2016-05-26 12:18:39 -04:00
|
|
|
|
// We want to create the wrapper in the same codegen unit as Rust's main
|
|
|
|
|
// function.
|
|
|
|
|
return;
|
2013-01-11 18:08:01 +09:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
let main_llfn = Callee::def(ccx, main_def_id, instance.substs).reify(ccx).val;
|
|
|
|
|
|
2014-03-05 16:36:01 +02:00
|
|
|
|
let et = ccx.sess().entry_type.get().unwrap();
|
2013-08-03 19:59:46 -07:00
|
|
|
|
match et {
|
2014-05-06 23:38:01 +12:00
|
|
|
|
config::EntryMain => {
|
2016-05-26 12:18:39 -04:00
|
|
|
|
create_entry_fn(ccx, span, main_llfn, true);
|
2013-08-03 19:59:46 -07:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
config::EntryStart => create_entry_fn(ccx, span, main_llfn, false),
|
2014-05-06 23:38:01 +12:00
|
|
|
|
config::EntryNone => {} // Do nothing.
|
2013-04-09 20:16:06 +12:00
|
|
|
|
}
|
2011-08-12 18:43:44 -07:00
|
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
|
fn create_entry_fn(ccx: &CrateContext,
|
2015-03-04 11:46:55 +02:00
|
|
|
|
sp: Span,
|
2013-04-18 15:53:29 -07:00
|
|
|
|
rust_main: ValueRef,
|
|
|
|
|
use_start_lang_item: bool) {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()], &ccx.int_type());
|
2012-11-30 09:21:49 +09:00
|
|
|
|
|
2016-02-23 21:46:08 +02:00
|
|
|
|
if declare::get_defined_value(ccx, "main").is_some() {
|
2015-03-04 01:08:06 +02:00
|
|
|
|
// FIXME: We should be smart and show a better diagnostic here.
|
2015-12-21 10:00:43 +13:00
|
|
|
|
ccx.sess().struct_span_err(sp, "entry symbol `main` defined multiple times")
|
|
|
|
|
.help("did you use #[no_mangle] on `fn main`? Use #[start] instead")
|
|
|
|
|
.emit();
|
2015-03-04 01:08:06 +02:00
|
|
|
|
ccx.sess().abort_if_errors();
|
2016-03-29 01:46:02 +02:00
|
|
|
|
bug!();
|
2016-02-23 21:46:08 +02:00
|
|
|
|
}
|
|
|
|
|
let llfn = declare::declare_cfn(ccx, "main", llfty);
|
2014-08-18 14:15:05 -04:00
|
|
|
|
|
2014-11-25 13:28:35 -08:00
|
|
|
|
let llbb = unsafe {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, "top\0".as_ptr() as *const _)
|
2014-11-25 13:28:35 -08:00
|
|
|
|
};
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let bld = ccx.raw_builder();
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
|
2013-04-09 20:16:06 +12:00
|
|
|
|
|
2015-04-24 15:25:42 +12:00
|
|
|
|
debuginfo::gdb::insert_reference_to_gdb_debug_scripts_section_global(ccx);
|
2014-12-03 14:48:18 -08:00
|
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let (start_fn, args) = if use_start_lang_item {
|
2014-09-05 09:18:53 -07:00
|
|
|
|
let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
|
2013-07-15 20:42:13 -07:00
|
|
|
|
Ok(id) => id,
|
2016-02-23 21:21:50 +02:00
|
|
|
|
Err(s) => ccx.sess().fatal(&s)
|
2013-07-15 20:42:13 -07:00
|
|
|
|
};
|
2016-08-08 23:39:49 +03:00
|
|
|
|
let empty_substs = Substs::empty(ccx.tcx());
|
2016-02-23 21:21:50 +02:00
|
|
|
|
let start_fn = Callee::def(ccx, start_def_id, empty_substs).reify(ccx).val;
|
2013-04-18 15:53:29 -07:00
|
|
|
|
let args = {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let opaque_rust_main =
|
|
|
|
|
llvm::LLVMBuildPointerCast(bld,
|
|
|
|
|
rust_main,
|
|
|
|
|
Type::i8p(ccx).to_ref(),
|
|
|
|
|
"rust_main\0".as_ptr() as *const _);
|
|
|
|
|
|
|
|
|
|
vec![opaque_rust_main, get_param(llfn, 0), get_param(llfn, 1)]
|
2013-04-18 15:53:29 -07:00
|
|
|
|
};
|
|
|
|
|
(start_fn, args)
|
|
|
|
|
} else {
|
2013-10-21 13:08:31 -07:00
|
|
|
|
debug!("using user-defined start fn");
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let args = vec![get_param(llfn, 0 as c_uint), get_param(llfn, 1 as c_uint)];
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
|
|
|
|
(rust_main, args)
|
|
|
|
|
};
|
|
|
|
|
|
2015-10-23 18:18:44 -07:00
|
|
|
|
let result = llvm::LLVMRustBuildCall(bld,
|
|
|
|
|
start_fn,
|
|
|
|
|
args.as_ptr(),
|
|
|
|
|
args.len() as c_uint,
|
2016-06-24 20:54:52 +02:00
|
|
|
|
ptr::null_mut(),
|
2015-10-23 18:18:44 -07:00
|
|
|
|
noname());
|
2013-07-07 13:30:48 -07:00
|
|
|
|
|
2013-01-10 21:23:07 -08:00
|
|
|
|
llvm::LLVMBuildRet(bld, result);
|
|
|
|
|
}
|
2011-10-20 13:48:10 +02:00
|
|
|
|
}
|
2011-02-28 17:33:46 -05:00
|
|
|
|
}
|
|
|
|
|
|
2014-09-23 00:14:46 -07:00
|
|
|
|
fn contains_null(s: &str) -> bool {
|
2014-09-23 12:54:16 -07:00
|
|
|
|
s.bytes().any(|b| b == 0)
|
2014-09-23 00:14:46 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-25 08:46:36 +03:00
|
|
|
|
fn write_metadata(cx: &SharedCrateContext,
|
|
|
|
|
reachable_ids: &NodeSet) -> Vec<u8> {
|
2014-01-24 21:00:31 -08:00
|
|
|
|
use flate;
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let any_library = cx.sess()
|
|
|
|
|
.crate_types
|
|
|
|
|
.borrow()
|
|
|
|
|
.iter()
|
|
|
|
|
.any(|ty| *ty != config::CrateTypeExecutable);
|
2014-05-02 15:26:45 -07:00
|
|
|
|
if !any_library {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
return Vec::new();
|
2013-12-22 14:40:03 -08:00
|
|
|
|
}
|
2013-06-13 19:19:50 +12:00
|
|
|
|
|
2015-11-21 01:08:09 +02:00
|
|
|
|
let cstore = &cx.tcx().sess.cstore;
|
2015-12-08 15:53:19 -05:00
|
|
|
|
let metadata = cstore.encode_metadata(cx.tcx(),
|
|
|
|
|
cx.export_map(),
|
|
|
|
|
cx.link_meta(),
|
2016-05-25 08:46:36 +03:00
|
|
|
|
reachable_ids,
|
|
|
|
|
cx.mir_map(),
|
|
|
|
|
cx.tcx().map.krate());
|
2015-11-21 01:08:09 +02:00
|
|
|
|
let mut compressed = cstore.metadata_encoding_version().to_vec();
|
2015-12-02 17:31:49 -08:00
|
|
|
|
compressed.extend_from_slice(&flate::deflate_bytes(&metadata));
|
2015-11-21 01:08:09 +02:00
|
|
|
|
|
2015-02-18 14:48:57 -05:00
|
|
|
|
let llmeta = C_bytes_in_context(cx.metadata_llcx(), &compressed[..]);
|
2014-11-17 21:39:01 +13:00
|
|
|
|
let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
|
2016-05-16 20:05:43 +03:00
|
|
|
|
let name = cx.metadata_symbol_name();
|
2015-02-17 22:47:40 -08:00
|
|
|
|
let buf = CString::new(name).unwrap();
|
2014-11-25 13:28:35 -08:00
|
|
|
|
let llglobal = unsafe {
|
2015-11-19 12:36:31 +01:00
|
|
|
|
llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf.as_ptr())
|
2014-11-25 13:28:35 -08:00
|
|
|
|
};
|
2013-01-10 21:23:07 -08:00
|
|
|
|
unsafe {
|
|
|
|
|
llvm::LLVMSetInitializer(llglobal, llconst);
|
2016-08-14 11:16:28 +03:00
|
|
|
|
let section_name =
|
2015-11-21 01:08:09 +02:00
|
|
|
|
cx.tcx().sess.cstore.metadata_section_name(&cx.sess().target.target);
|
2016-08-14 11:16:28 +03:00
|
|
|
|
let name = CString::new(section_name).unwrap();
|
|
|
|
|
llvm::LLVMSetSection(llglobal, name.as_ptr());
|
|
|
|
|
|
|
|
|
|
// Also generate a .section directive to force no
|
|
|
|
|
// flags, at least for ELF outputs, so that the
|
|
|
|
|
// metadata doesn't get loaded into memory.
|
|
|
|
|
let directive = format!(".section {}", section_name);
|
|
|
|
|
let directive = CString::new(directive).unwrap();
|
|
|
|
|
llvm::LLVMSetModuleInlineAsm(cx.metadata_llmod(), directive.as_ptr())
|
2013-01-10 21:23:07 -08:00
|
|
|
|
}
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
return metadata;
|
2011-06-27 16:09:28 -07:00
|
|
|
|
}
|
|
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
|
/// Find any symbols that are defined in one compilation unit, but not declared
|
|
|
|
|
/// in any other compilation unit. Give these symbols internal linkage.
|
2016-07-22 10:39:30 -04:00
|
|
|
|
fn internalize_symbols<'a, 'tcx>(sess: &Session,
|
|
|
|
|
ccxs: &CrateContextList<'a, 'tcx>,
|
2016-07-20 07:55:45 -04:00
|
|
|
|
symbol_map: &SymbolMap<'tcx>,
|
|
|
|
|
reachable: &FnvHashSet<&str>) {
|
|
|
|
|
let scx = ccxs.shared();
|
|
|
|
|
let tcx = scx.tcx();
|
|
|
|
|
|
2016-07-22 10:39:30 -04:00
|
|
|
|
// In incr. comp. mode, we can't necessarily see all refs since we
|
|
|
|
|
// don't generate LLVM IR for reused modules, so skip this
|
|
|
|
|
// step. Later we should get smarter.
|
|
|
|
|
if sess.opts.debugging_opts.incremental.is_some() {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// 'unsafe' because we are holding on to CStr's from the LLVM module within
|
|
|
|
|
// this block.
|
2014-08-01 10:29:44 -07:00
|
|
|
|
unsafe {
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let mut referenced_somewhere = FnvHashSet();
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// Collect all symbols that need to stay externally visible because they
|
|
|
|
|
// are referenced via a declaration in some other codegen unit.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in ccxs.iter_need_trans() {
|
2014-08-01 10:29:44 -07:00
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
|
|
|
|
let linkage = llvm::LLVMGetLinkage(val);
|
|
|
|
|
// We only care about external declarations (not definitions)
|
|
|
|
|
// and available_externally definitions.
|
2016-05-14 05:41:42 +12:00
|
|
|
|
let is_available_externally = linkage == llvm::AvailableExternallyLinkage as c_uint;
|
|
|
|
|
let is_decl = llvm::LLVMIsDeclaration(val) != 0;
|
|
|
|
|
|
|
|
|
|
if is_decl || is_available_externally {
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let symbol_name = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
|
|
|
|
referenced_somewhere.insert(symbol_name);
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
// Also collect all symbols for which we cannot adjust linkage, because
|
|
|
|
|
// it is fixed by some directive in the source code (e.g. #[no_mangle]).
|
|
|
|
|
let linkage_fixed_explicitly: FnvHashSet<_> = scx
|
|
|
|
|
.translation_items()
|
|
|
|
|
.borrow()
|
|
|
|
|
.iter()
|
|
|
|
|
.cloned()
|
|
|
|
|
.filter(|trans_item|{
|
|
|
|
|
let def_id = match *trans_item {
|
|
|
|
|
TransItem::DropGlue(..) => {
|
|
|
|
|
return false
|
|
|
|
|
},
|
|
|
|
|
TransItem::Fn(ref instance) => {
|
|
|
|
|
instance.def
|
|
|
|
|
}
|
|
|
|
|
TransItem::Static(node_id) => {
|
|
|
|
|
tcx.map.local_def_id(node_id)
|
|
|
|
|
}
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
trans_item.explicit_linkage(tcx).is_some() ||
|
|
|
|
|
attr::contains_extern_indicator(tcx.sess.diagnostic(),
|
|
|
|
|
&tcx.get_attrs(def_id))
|
|
|
|
|
})
|
|
|
|
|
.map(|trans_item| symbol_map.get_or_compute(scx, trans_item))
|
|
|
|
|
.collect();
|
|
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
|
// Examine each external definition. If the definition is not used in
|
|
|
|
|
// any other compilation unit, and is not reachable from other crates,
|
|
|
|
|
// then give it internal linkage.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in ccxs.iter_need_trans() {
|
2014-08-01 10:29:44 -07:00
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
2016-03-28 17:57:31 +02:00
|
|
|
|
let linkage = llvm::LLVMGetLinkage(val);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-18 10:16:19 -04:00
|
|
|
|
let is_externally_visible = (linkage == llvm::ExternalLinkage as c_uint) ||
|
|
|
|
|
(linkage == llvm::LinkOnceODRLinkage as c_uint) ||
|
|
|
|
|
(linkage == llvm::WeakODRLinkage as c_uint);
|
2016-07-19 05:47:28 -04:00
|
|
|
|
let is_definition = llvm::LLVMIsDeclaration(val) == 0;
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-18 10:16:19 -04:00
|
|
|
|
// If this is a definition (as opposed to just a declaration)
|
|
|
|
|
// and externally visible, check if we can internalize it
|
|
|
|
|
if is_definition && is_externally_visible {
|
|
|
|
|
let name_cstr = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
|
|
|
|
let name_str = name_cstr.to_str().unwrap();
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let name_cow = Cow::Borrowed(name_str);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
let is_referenced_somewhere = referenced_somewhere.contains(&name_cstr);
|
|
|
|
|
let is_reachable = reachable.contains(&name_str);
|
|
|
|
|
let has_fixed_linkage = linkage_fixed_explicitly.contains(&name_cow);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
if !is_referenced_somewhere && !is_reachable && !has_fixed_linkage {
|
2016-08-03 00:25:19 +03:00
|
|
|
|
llvm::LLVMSetLinkage(val, llvm::InternalLinkage);
|
|
|
|
|
llvm::LLVMSetDLLStorageClass(val,
|
|
|
|
|
llvm::DLLStorageClass::Default);
|
2016-05-14 05:41:42 +12:00
|
|
|
|
llvm::UnsetComdat(val);
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
// Create a `__imp_<symbol> = &symbol` global for every public static `symbol`.
|
|
|
|
|
// This is required to satisfy `dllimport` references to static data in .rlibs
|
|
|
|
|
// when using MSVC linker. We do this only for data, as linker can fix up
|
|
|
|
|
// code references on its own.
|
|
|
|
|
// See #26591, #27438
|
2016-05-05 14:14:41 -04:00
|
|
|
|
fn create_imps(cx: &CrateContextList) {
|
2015-09-29 16:26:34 -07:00
|
|
|
|
// The x86 ABI seems to require that leading underscores are added to symbol
|
|
|
|
|
// names, so we need an extra underscore on 32-bit. There's also a leading
|
|
|
|
|
// '\x01' here which disables LLVM's symbol mangling (e.g. no extra
|
|
|
|
|
// underscores added in front).
|
2016-05-05 14:14:41 -04:00
|
|
|
|
let prefix = if cx.shared().sess().target.target.target_pointer_width == "32" {
|
2015-09-29 16:26:34 -07:00
|
|
|
|
"\x01__imp__"
|
|
|
|
|
} else {
|
|
|
|
|
"\x01__imp_"
|
|
|
|
|
};
|
2015-08-21 00:41:07 -07:00
|
|
|
|
unsafe {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in cx.iter_need_trans() {
|
2015-08-21 00:41:07 -07:00
|
|
|
|
let exported: Vec<_> = iter_globals(ccx.llmod())
|
2015-11-19 12:36:31 +01:00
|
|
|
|
.filter(|&val| {
|
|
|
|
|
llvm::LLVMGetLinkage(val) ==
|
|
|
|
|
llvm::ExternalLinkage as c_uint &&
|
|
|
|
|
llvm::LLVMIsDeclaration(val) == 0
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
2015-08-21 00:41:07 -07:00
|
|
|
|
|
|
|
|
|
let i8p_ty = Type::i8p(&ccx);
|
|
|
|
|
for val in exported {
|
|
|
|
|
let name = CStr::from_ptr(llvm::LLVMGetValueName(val));
|
2015-09-29 16:26:34 -07:00
|
|
|
|
let mut imp_name = prefix.as_bytes().to_vec();
|
|
|
|
|
imp_name.extend(name.to_bytes());
|
2015-08-21 00:41:07 -07:00
|
|
|
|
let imp_name = CString::new(imp_name).unwrap();
|
2015-11-19 12:36:31 +01:00
|
|
|
|
let imp = llvm::LLVMAddGlobal(ccx.llmod(),
|
|
|
|
|
i8p_ty.to_ref(),
|
2015-08-21 00:41:07 -07:00
|
|
|
|
imp_name.as_ptr() as *const _);
|
2015-09-29 16:26:34 -07:00
|
|
|
|
let init = llvm::LLVMConstBitCast(val, i8p_ty.to_ref());
|
|
|
|
|
llvm::LLVMSetInitializer(imp, init);
|
2016-08-03 00:25:19 +03:00
|
|
|
|
llvm::LLVMSetLinkage(imp, llvm::ExternalLinkage);
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
struct ValueIter {
|
|
|
|
|
cur: ValueRef,
|
|
|
|
|
step: unsafe extern "C" fn(ValueRef) -> ValueRef,
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
impl Iterator for ValueIter {
|
|
|
|
|
type Item = ValueRef;
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn next(&mut self) -> Option<ValueRef> {
|
|
|
|
|
let old = self.cur;
|
|
|
|
|
if !old.is_null() {
|
2015-10-17 20:15:26 -04:00
|
|
|
|
self.cur = unsafe { (self.step)(old) };
|
2015-08-21 00:41:07 -07:00
|
|
|
|
Some(old)
|
|
|
|
|
} else {
|
|
|
|
|
None
|
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn iter_globals(llmod: llvm::ModuleRef) -> ValueIter {
|
|
|
|
|
unsafe {
|
|
|
|
|
ValueIter {
|
|
|
|
|
cur: llvm::LLVMGetFirstGlobal(llmod),
|
|
|
|
|
step: llvm::LLVMGetNextGlobal,
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
2015-01-01 23:54:03 -05:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
fn iter_functions(llmod: llvm::ModuleRef) -> ValueIter {
|
|
|
|
|
unsafe {
|
|
|
|
|
ValueIter {
|
|
|
|
|
cur: llvm::LLVMGetFirstFunction(llmod),
|
|
|
|
|
step: llvm::LLVMGetNextFunction,
|
2014-08-01 10:29:44 -07:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
/// The context provided lists a set of reachable ids as calculated by
|
|
|
|
|
/// middle::reachable, but this contains far more ids and symbols than we're
|
|
|
|
|
/// actually exposing from the object file. This function will filter the set in
|
|
|
|
|
/// the context to the set of ids which correspond to symbols that are exposed
|
|
|
|
|
/// from the object file being generated.
|
|
|
|
|
///
|
|
|
|
|
/// This list is later used by linkers to determine the set of symbols needed to
|
|
|
|
|
/// be exposed from a dynamic library and it's also encoded into the metadata.
|
2016-05-19 12:35:36 -04:00
|
|
|
|
pub fn filter_reachable_ids(tcx: TyCtxt, reachable: NodeSet) -> NodeSet {
|
|
|
|
|
reachable.into_iter().filter(|&id| {
|
2015-07-28 17:19:08 -07:00
|
|
|
|
// Next, we want to ignore some FFI functions that are not exposed from
|
|
|
|
|
// this crate. Reachable FFI functions can be lumped into two
|
|
|
|
|
// categories:
|
|
|
|
|
//
|
|
|
|
|
// 1. Those that are included statically via a static library
|
|
|
|
|
// 2. Those included otherwise (e.g. dynamically or via a framework)
|
|
|
|
|
//
|
|
|
|
|
// Although our LLVM module is not literally emitting code for the
|
|
|
|
|
// statically included symbols, it's an export of our library which
|
|
|
|
|
// needs to be passed on to the linker and encoded in the metadata.
|
|
|
|
|
//
|
|
|
|
|
// As a result, if this id is an FFI item (foreign item) then we only
|
|
|
|
|
// let it through if it's included statically.
|
2016-05-19 12:35:36 -04:00
|
|
|
|
match tcx.map.get(id) {
|
2015-07-31 00:04:06 -07:00
|
|
|
|
hir_map::NodeForeignItem(..) => {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
tcx.sess.cstore.is_statically_included_foreign_item(id)
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
2016-05-12 19:52:38 +03:00
|
|
|
|
|
|
|
|
|
// Only consider nodes that actually have exported symbols.
|
|
|
|
|
hir_map::NodeItem(&hir::Item {
|
|
|
|
|
node: hir::ItemStatic(..), .. }) |
|
|
|
|
|
hir_map::NodeItem(&hir::Item {
|
|
|
|
|
node: hir::ItemFn(..), .. }) |
|
|
|
|
|
hir_map::NodeImplItem(&hir::ImplItem {
|
2016-05-25 08:46:36 +03:00
|
|
|
|
node: hir::ImplItemKind::Method(..), .. }) => {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let def_id = tcx.map.local_def_id(id);
|
2016-08-10 20:39:09 +03:00
|
|
|
|
let generics = tcx.lookup_generics(def_id);
|
|
|
|
|
generics.parent_types == 0 && generics.types.is_empty()
|
2016-05-25 08:46:36 +03:00
|
|
|
|
}
|
2016-05-12 19:52:38 +03:00
|
|
|
|
|
|
|
|
|
_ => false
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
|
|
|
|
}).collect()
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-03 05:23:22 +03:00
|
|
|
|
pub fn trans_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
2016-05-03 04:56:42 +03:00
|
|
|
|
mir_map: &MirMap<'tcx>,
|
2016-08-19 07:23:36 -04:00
|
|
|
|
analysis: ty::CrateAnalysis,
|
2016-08-23 07:47:14 -04:00
|
|
|
|
incremental_hashes_map: &IncrementalHashesMap)
|
2016-05-03 04:56:42 +03:00
|
|
|
|
-> CrateTranslation {
|
2015-12-22 16:35:02 -05:00
|
|
|
|
let _task = tcx.dep_graph.in_task(DepNode::TransCrate);
|
|
|
|
|
|
|
|
|
|
// Be careful with this krate: obviously it gives access to the
|
|
|
|
|
// entire contents of the krate. So if you push any subtasks of
|
|
|
|
|
// `TransCrate`, you need to be careful to register "reads" of the
|
|
|
|
|
// particular items that will be processed.
|
2014-09-07 20:09:06 +03:00
|
|
|
|
let krate = tcx.map.krate();
|
2014-03-15 22:29:34 +02:00
|
|
|
|
|
2015-12-22 16:35:02 -05:00
|
|
|
|
let ty::CrateAnalysis { export_map, reachable, name, .. } = analysis;
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let reachable = filter_reachable_ids(tcx, reachable);
|
2015-12-22 16:35:02 -05:00
|
|
|
|
|
2015-01-06 00:56:30 -05:00
|
|
|
|
let check_overflow = if let Some(v) = tcx.sess.opts.debugging_opts.force_overflow_checks {
|
|
|
|
|
v
|
|
|
|
|
} else {
|
2015-03-02 14:51:24 -08:00
|
|
|
|
tcx.sess.opts.debug_assertions
|
2015-01-06 00:56:30 -05:00
|
|
|
|
};
|
|
|
|
|
|
2015-03-25 11:57:55 +01:00
|
|
|
|
let check_dropflag = if let Some(v) = tcx.sess.opts.debugging_opts.force_dropflag_checks {
|
|
|
|
|
v
|
|
|
|
|
} else {
|
|
|
|
|
tcx.sess.opts.debug_assertions
|
|
|
|
|
};
|
|
|
|
|
|
2016-08-23 07:47:14 -04:00
|
|
|
|
let link_meta = link::build_link_meta(incremental_hashes_map, name);
|
2011-12-05 14:56:11 +08:00
|
|
|
|
|
2016-05-05 14:14:41 -04:00
|
|
|
|
let shared_ccx = SharedCrateContext::new(tcx,
|
2015-10-21 17:20:00 -04:00
|
|
|
|
&mir_map,
|
2014-12-18 20:27:17 +02:00
|
|
|
|
export_map,
|
2014-07-16 11:27:57 -07:00
|
|
|
|
Sha256::new(),
|
|
|
|
|
link_meta.clone(),
|
2015-01-06 00:56:30 -05:00
|
|
|
|
reachable,
|
2015-03-25 11:57:55 +01:00
|
|
|
|
check_overflow,
|
|
|
|
|
check_dropflag);
|
2016-05-25 08:46:36 +03:00
|
|
|
|
// Translate the metadata.
|
|
|
|
|
let metadata = time(tcx.sess.time_passes(), "write metadata", || {
|
2016-05-19 12:35:36 -04:00
|
|
|
|
write_metadata(&shared_ccx, shared_ccx.reachable())
|
2016-05-25 08:46:36 +03:00
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
let metadata_module = ModuleTranslation {
|
2016-05-13 20:48:32 -04:00
|
|
|
|
name: "metadata".to_string(),
|
2016-07-21 12:49:59 -04:00
|
|
|
|
symbol_name_hash: 0, // we always rebuild metadata, at least for now
|
|
|
|
|
source: ModuleSource::Translated(ModuleLlvm {
|
|
|
|
|
llcx: shared_ccx.metadata_llcx(),
|
|
|
|
|
llmod: shared_ccx.metadata_llmod(),
|
|
|
|
|
}),
|
2016-05-25 08:46:36 +03:00
|
|
|
|
};
|
|
|
|
|
let no_builtins = attr::contains_name(&krate.attrs, "no_builtins");
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
// Run the translation item collector and partition the collected items into
|
|
|
|
|
// codegen units.
|
|
|
|
|
let (codegen_units, symbol_map) = collect_and_partition_translation_items(&shared_ccx);
|
2016-05-06 20:02:09 -04:00
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let symbol_map = Rc::new(symbol_map);
|
2016-05-05 14:14:41 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
let previous_work_products = trans_reuse_previous_work_products(tcx,
|
|
|
|
|
&codegen_units,
|
|
|
|
|
&symbol_map);
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let crate_context_list = CrateContextList::new(&shared_ccx,
|
|
|
|
|
codegen_units,
|
2016-07-21 12:49:59 -04:00
|
|
|
|
previous_work_products,
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_map.clone());
|
2016-07-21 12:49:59 -04:00
|
|
|
|
let modules: Vec<_> = crate_context_list.iter_all()
|
|
|
|
|
.map(|ccx| {
|
|
|
|
|
let source = match ccx.previous_work_product() {
|
|
|
|
|
Some(buf) => ModuleSource::Preexisting(buf.clone()),
|
|
|
|
|
None => ModuleSource::Translated(ModuleLlvm {
|
|
|
|
|
llcx: ccx.llcx(),
|
|
|
|
|
llmod: ccx.llmod(),
|
|
|
|
|
}),
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
ModuleTranslation {
|
|
|
|
|
name: String::from(ccx.codegen_unit().name()),
|
|
|
|
|
symbol_name_hash: ccx.codegen_unit().compute_symbol_name_hash(tcx, &symbol_map),
|
|
|
|
|
source: source,
|
|
|
|
|
}
|
2016-05-13 20:48:32 -04:00
|
|
|
|
})
|
2016-05-25 08:46:36 +03:00
|
|
|
|
.collect();
|
|
|
|
|
|
2016-07-21 12:50:15 -04:00
|
|
|
|
assert_module_sources::assert_module_sources(tcx, &modules);
|
|
|
|
|
|
2016-05-25 08:46:36 +03:00
|
|
|
|
// Skip crate items and just output metadata in -Z no-trans mode.
|
2016-08-02 16:53:58 -04:00
|
|
|
|
if tcx.sess.opts.debugging_opts.no_trans {
|
2016-05-25 08:46:36 +03:00
|
|
|
|
let linker_info = LinkerInfo::new(&shared_ccx, &[]);
|
|
|
|
|
return CrateTranslation {
|
|
|
|
|
modules: modules,
|
|
|
|
|
metadata_module: metadata_module,
|
|
|
|
|
link: link_meta,
|
|
|
|
|
metadata: metadata,
|
|
|
|
|
reachable: vec![],
|
|
|
|
|
no_builtins: no_builtins,
|
|
|
|
|
linker_info: linker_info
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 20:02:09 -04:00
|
|
|
|
// Instantiate translation items without filling out definitions yet...
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in crate_context_list.iter_need_trans() {
|
|
|
|
|
let cgu = ccx.codegen_unit();
|
|
|
|
|
let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
|
2016-05-26 11:43:53 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
|
|
|
|
|
for (trans_item, linkage) in trans_items {
|
|
|
|
|
trans_item.predefine(&ccx, linkage);
|
|
|
|
|
}
|
|
|
|
|
});
|
2016-05-06 20:02:09 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// ... and now that we have everything pre-defined, fill out those definitions.
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for ccx in crate_context_list.iter_need_trans() {
|
|
|
|
|
let cgu = ccx.codegen_unit();
|
|
|
|
|
let trans_items = cgu.items_in_deterministic_order(tcx, &symbol_map);
|
|
|
|
|
tcx.dep_graph.with_task(cgu.work_product_dep_node(), || {
|
|
|
|
|
for (trans_item, _) in trans_items {
|
|
|
|
|
trans_item.define(&ccx);
|
|
|
|
|
}
|
2015-11-02 14:46:39 +01:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// If this codegen unit contains the main function, also create the
|
|
|
|
|
// wrapper here
|
|
|
|
|
maybe_create_entry_wrapper(&ccx);
|
2016-03-16 15:00:20 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// Run replace-all-uses-with for statics that need it
|
|
|
|
|
for &(old_g, new_g) in ccx.statics_to_rauw().borrow().iter() {
|
|
|
|
|
unsafe {
|
|
|
|
|
let bitcast = llvm::LLVMConstPointerCast(new_g, llvm::LLVMTypeOf(old_g));
|
|
|
|
|
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
|
|
|
|
|
llvm::LLVMDeleteGlobal(old_g);
|
|
|
|
|
}
|
2015-06-28 10:36:46 -07:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
// Finalize debuginfo
|
|
|
|
|
if ccx.sess().opts.debuginfo != NoDebugInfo {
|
|
|
|
|
debuginfo::finalize(&ccx);
|
|
|
|
|
}
|
|
|
|
|
});
|
2016-05-26 12:18:39 -04:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
symbol_names_test::report_symbol_names(&shared_ccx);
|
|
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
|
if shared_ccx.sess().trans_stats() {
|
|
|
|
|
let stats = shared_ccx.stats();
|
2014-01-09 21:06:55 +11:00
|
|
|
|
println!("--- trans stats ---");
|
2014-07-16 11:27:57 -07:00
|
|
|
|
println!("n_glues_created: {}", stats.n_glues_created.get());
|
|
|
|
|
println!("n_null_glues: {}", stats.n_null_glues.get());
|
|
|
|
|
println!("n_real_glues: {}", stats.n_real_glues.get());
|
|
|
|
|
|
2016-06-07 21:14:51 -04:00
|
|
|
|
println!("n_fallback_instantiations: {}", stats.n_fallback_instantiations.get());
|
|
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
|
println!("n_fns: {}", stats.n_fns.get());
|
|
|
|
|
println!("n_monos: {}", stats.n_monos.get());
|
|
|
|
|
println!("n_inlines: {}", stats.n_inlines.get());
|
|
|
|
|
println!("n_closures: {}", stats.n_closures.get());
|
2014-01-09 21:06:55 +11:00
|
|
|
|
println!("fn stats:");
|
2014-11-10 12:27:56 -08:00
|
|
|
|
stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
insns_b.cmp(&insns_a)
|
|
|
|
|
});
|
2015-06-11 13:56:07 +01:00
|
|
|
|
for tuple in stats.fn_stats.borrow().iter() {
|
2014-03-15 22:29:34 +02:00
|
|
|
|
match *tuple {
|
2014-11-10 12:27:56 -08:00
|
|
|
|
(ref name, insns) => {
|
|
|
|
|
println!("{} insns, {}", insns, *name);
|
2013-07-08 11:05:52 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
2013-06-28 11:15:34 -07:00
|
|
|
|
}
|
2013-06-13 14:49:01 +12:00
|
|
|
|
}
|
2016-05-26 12:18:39 -04:00
|
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
|
if shared_ccx.sess().count_llvm_insns() {
|
2015-06-11 13:56:07 +01:00
|
|
|
|
for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
|
2014-11-17 11:29:38 -08:00
|
|
|
|
println!("{:7} {}", *v, *k);
|
2013-06-17 16:23:24 +12:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
let sess = shared_ccx.sess();
|
2016-05-19 12:35:36 -04:00
|
|
|
|
let mut reachable_symbols = shared_ccx.reachable().iter().map(|&id| {
|
2016-05-12 19:52:38 +03:00
|
|
|
|
let def_id = shared_ccx.tcx().map.local_def_id(id);
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_for_def_id(def_id, &shared_ccx, &symbol_map)
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}).collect::<Vec<_>>();
|
2016-05-26 08:59:58 -04:00
|
|
|
|
|
2015-07-28 17:19:08 -07:00
|
|
|
|
if sess.entry_fn.borrow().is_some() {
|
|
|
|
|
reachable_symbols.push("main".to_string());
|
|
|
|
|
}
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
|
|
2016-05-16 20:05:43 +03:00
|
|
|
|
if sess.crate_types.borrow().contains(&config::CrateTypeDylib) {
|
|
|
|
|
reachable_symbols.push(shared_ccx.metadata_symbol_name());
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-10 14:17:57 -07:00
|
|
|
|
// For the purposes of LTO or when creating a cdylib, we add to the
|
|
|
|
|
// reachable set all of the upstream reachable extern fns. These functions
|
|
|
|
|
// are all part of the public ABI of the final product, so we need to
|
|
|
|
|
// preserve them.
|
|
|
|
|
//
|
|
|
|
|
// Note that this happens even if LTO isn't requested or we're not creating
|
|
|
|
|
// a cdylib. In those cases, though, we're not even reading the
|
|
|
|
|
// `reachable_symbols` list later on so it should be ok.
|
|
|
|
|
for cnum in sess.cstore.crates() {
|
|
|
|
|
let syms = sess.cstore.reachable_ids(cnum);
|
|
|
|
|
reachable_symbols.extend(syms.into_iter().filter(|did| {
|
|
|
|
|
sess.cstore.is_extern_item(shared_ccx.tcx(), *did)
|
|
|
|
|
}).map(|did| {
|
2016-05-26 08:59:58 -04:00
|
|
|
|
symbol_for_def_id(did, &shared_ccx, &symbol_map)
|
2016-05-10 14:17:57 -07:00
|
|
|
|
}));
|
2015-07-28 17:19:08 -07:00
|
|
|
|
}
|
2013-06-13 21:25:12 -07:00
|
|
|
|
|
2016-07-20 07:55:45 -04:00
|
|
|
|
time(shared_ccx.sess().time_passes(), "internalize symbols", || {
|
2016-07-22 10:39:30 -04:00
|
|
|
|
internalize_symbols(sess,
|
|
|
|
|
&crate_context_list,
|
2016-07-20 07:55:45 -04:00
|
|
|
|
&symbol_map,
|
|
|
|
|
&reachable_symbols.iter()
|
|
|
|
|
.map(|s| &s[..])
|
|
|
|
|
.collect())
|
|
|
|
|
});
|
2014-08-01 10:29:44 -07:00
|
|
|
|
|
2015-08-21 00:41:07 -07:00
|
|
|
|
if sess.target.target.options.is_like_msvc &&
|
2015-09-25 18:53:14 -07:00
|
|
|
|
sess.crate_types.borrow().iter().any(|ct| *ct == config::CrateTypeRlib) {
|
2016-05-05 14:14:41 -04:00
|
|
|
|
create_imps(&crate_context_list);
|
2015-08-21 00:41:07 -07:00
|
|
|
|
}
|
|
|
|
|
|
2016-05-25 01:45:25 +03:00
|
|
|
|
let linker_info = LinkerInfo::new(&shared_ccx, &reachable_symbols);
|
2016-05-13 20:48:32 -04:00
|
|
|
|
|
2015-06-14 01:49:28 +03:00
|
|
|
|
CrateTranslation {
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
|
modules: modules,
|
2014-03-15 22:29:34 +02:00
|
|
|
|
metadata_module: metadata_module,
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
|
link: link_meta,
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
metadata: metadata,
|
2015-07-28 17:19:08 -07:00
|
|
|
|
reachable: reachable_symbols,
|
2014-05-14 11:24:12 -07:00
|
|
|
|
no_builtins: no_builtins,
|
2016-05-25 01:45:25 +03:00
|
|
|
|
linker_info: linker_info
|
2015-06-14 01:49:28 +03:00
|
|
|
|
}
|
2013-06-13 21:25:12 -07:00
|
|
|
|
}
|
2015-11-18 05:38:50 -05:00
|
|
|
|
|
2016-07-21 12:49:59 -04:00
|
|
|
|
/// For each CGU, identify if we can reuse an existing object file (or
|
|
|
|
|
/// maybe other context).
|
|
|
|
|
fn trans_reuse_previous_work_products(tcx: TyCtxt,
|
|
|
|
|
codegen_units: &[CodegenUnit],
|
|
|
|
|
symbol_map: &SymbolMap)
|
2016-07-25 10:51:14 -04:00
|
|
|
|
-> Vec<Option<WorkProduct>> {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
debug!("trans_reuse_previous_work_products()");
|
|
|
|
|
codegen_units
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|cgu| {
|
|
|
|
|
let id = cgu.work_product_id();
|
|
|
|
|
|
|
|
|
|
let hash = cgu.compute_symbol_name_hash(tcx, symbol_map);
|
|
|
|
|
|
|
|
|
|
debug!("trans_reuse_previous_work_products: id={:?} hash={}", id, hash);
|
|
|
|
|
|
|
|
|
|
if let Some(work_product) = tcx.dep_graph.previous_work_product(&id) {
|
|
|
|
|
if work_product.input_hash == hash {
|
|
|
|
|
debug!("trans_reuse_previous_work_products: reusing {:?}", work_product);
|
2016-07-25 10:51:14 -04:00
|
|
|
|
return Some(work_product);
|
2016-07-21 12:49:59 -04:00
|
|
|
|
} else {
|
|
|
|
|
debug!("trans_reuse_previous_work_products: \
|
|
|
|
|
not reusing {:?} because hash changed to {:?}",
|
|
|
|
|
work_product, hash);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
None
|
|
|
|
|
})
|
|
|
|
|
.collect()
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
fn collect_and_partition_translation_items<'a, 'tcx>(scx: &SharedCrateContext<'a, 'tcx>)
|
2016-05-26 08:59:58 -04:00
|
|
|
|
-> (Vec<CodegenUnit<'tcx>>, SymbolMap<'tcx>) {
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let time_passes = scx.sess().time_passes();
|
2015-11-02 14:46:39 +01:00
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let collection_mode = match scx.sess().opts.debugging_opts.print_trans_items {
|
2015-11-02 14:46:39 +01:00
|
|
|
|
Some(ref s) => {
|
|
|
|
|
let mode_string = s.to_lowercase();
|
|
|
|
|
let mode_string = mode_string.trim();
|
|
|
|
|
if mode_string == "eager" {
|
|
|
|
|
TransItemCollectionMode::Eager
|
|
|
|
|
} else {
|
|
|
|
|
if mode_string != "lazy" {
|
|
|
|
|
let message = format!("Unknown codegen-item collection mode '{}'. \
|
|
|
|
|
Falling back to 'lazy' mode.",
|
|
|
|
|
mode_string);
|
2016-05-06 14:27:34 -04:00
|
|
|
|
scx.sess().warn(&message);
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
TransItemCollectionMode::Lazy
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
None => TransItemCollectionMode::Lazy
|
|
|
|
|
};
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let (items, inlining_map) =
|
|
|
|
|
time(time_passes, "translation item collection", || {
|
|
|
|
|
collector::collect_crate_translation_items(&scx, collection_mode)
|
2015-11-02 14:46:39 +01:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
let symbol_map = SymbolMap::build(scx, items.iter().cloned());
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
let strategy = if scx.sess().opts.debugging_opts.incremental.is_some() {
|
2016-04-21 16:45:33 -04:00
|
|
|
|
PartitioningStrategy::PerModule
|
|
|
|
|
} else {
|
2016-05-06 14:27:34 -04:00
|
|
|
|
PartitioningStrategy::FixedUnitCount(scx.sess().opts.cg.codegen_units)
|
2016-04-21 16:45:33 -04:00
|
|
|
|
};
|
|
|
|
|
|
2016-03-24 11:40:49 -04:00
|
|
|
|
let codegen_units = time(time_passes, "codegen unit partitioning", || {
|
2016-05-06 14:27:34 -04:00
|
|
|
|
partitioning::partition(scx.tcx(),
|
2016-04-21 16:45:33 -04:00
|
|
|
|
items.iter().cloned(),
|
|
|
|
|
strategy,
|
2016-05-19 12:35:36 -04:00
|
|
|
|
&inlining_map,
|
|
|
|
|
scx.reachable())
|
2016-03-24 11:40:49 -04:00
|
|
|
|
});
|
|
|
|
|
|
2016-05-26 12:18:39 -04:00
|
|
|
|
assert!(scx.tcx().sess.opts.cg.codegen_units == codegen_units.len() ||
|
|
|
|
|
scx.tcx().sess.opts.debugging_opts.incremental.is_some());
|
|
|
|
|
|
2016-06-07 21:14:51 -04:00
|
|
|
|
{
|
|
|
|
|
let mut ccx_map = scx.translation_items().borrow_mut();
|
|
|
|
|
|
|
|
|
|
for trans_item in items.iter().cloned() {
|
2016-06-16 18:56:14 -04:00
|
|
|
|
ccx_map.insert(trans_item);
|
2016-06-07 21:14:51 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
if scx.sess().opts.debugging_opts.print_trans_items.is_some() {
|
2016-03-24 11:40:49 -04:00
|
|
|
|
let mut item_to_cgus = HashMap::new();
|
|
|
|
|
|
2016-05-06 14:27:34 -04:00
|
|
|
|
for cgu in &codegen_units {
|
2016-07-21 12:49:59 -04:00
|
|
|
|
for (&trans_item, &linkage) in cgu.items() {
|
2016-03-24 11:40:49 -04:00
|
|
|
|
item_to_cgus.entry(trans_item)
|
|
|
|
|
.or_insert(Vec::new())
|
2016-07-21 12:49:59 -04:00
|
|
|
|
.push((cgu.name().clone(), linkage));
|
2016-03-24 11:40:49 -04:00
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let mut item_keys: Vec<_> = items
|
|
|
|
|
.iter()
|
|
|
|
|
.map(|i| {
|
2016-05-09 23:56:49 -04:00
|
|
|
|
let mut output = i.to_string(scx.tcx());
|
2016-03-24 11:40:49 -04:00
|
|
|
|
output.push_str(" @@");
|
|
|
|
|
let mut empty = Vec::new();
|
|
|
|
|
let mut cgus = item_to_cgus.get_mut(i).unwrap_or(&mut empty);
|
|
|
|
|
cgus.as_mut_slice().sort_by_key(|&(ref name, _)| name.clone());
|
|
|
|
|
cgus.dedup();
|
|
|
|
|
for &(ref cgu_name, linkage) in cgus.iter() {
|
|
|
|
|
output.push_str(" ");
|
|
|
|
|
output.push_str(&cgu_name[..]);
|
|
|
|
|
|
|
|
|
|
let linkage_abbrev = match linkage {
|
2016-05-09 14:26:15 -04:00
|
|
|
|
llvm::ExternalLinkage => "External",
|
|
|
|
|
llvm::AvailableExternallyLinkage => "Available",
|
|
|
|
|
llvm::LinkOnceAnyLinkage => "OnceAny",
|
|
|
|
|
llvm::LinkOnceODRLinkage => "OnceODR",
|
|
|
|
|
llvm::WeakAnyLinkage => "WeakAny",
|
|
|
|
|
llvm::WeakODRLinkage => "WeakODR",
|
|
|
|
|
llvm::AppendingLinkage => "Appending",
|
|
|
|
|
llvm::InternalLinkage => "Internal",
|
|
|
|
|
llvm::PrivateLinkage => "Private",
|
|
|
|
|
llvm::ExternalWeakLinkage => "ExternalWeak",
|
|
|
|
|
llvm::CommonLinkage => "Common",
|
2016-03-24 11:40:49 -04:00
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
output.push_str("[");
|
|
|
|
|
output.push_str(linkage_abbrev);
|
|
|
|
|
output.push_str("]");
|
|
|
|
|
}
|
|
|
|
|
output
|
|
|
|
|
})
|
|
|
|
|
.collect();
|
|
|
|
|
|
2015-11-02 14:46:39 +01:00
|
|
|
|
item_keys.sort();
|
|
|
|
|
|
|
|
|
|
for item in item_keys {
|
|
|
|
|
println!("TRANS_ITEM {}", item);
|
|
|
|
|
}
|
|
|
|
|
}
|
2016-05-06 14:27:34 -04:00
|
|
|
|
|
2016-05-26 08:59:58 -04:00
|
|
|
|
(codegen_units, symbol_map)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fn symbol_for_def_id<'a, 'tcx>(def_id: DefId,
|
|
|
|
|
scx: &SharedCrateContext<'a, 'tcx>,
|
|
|
|
|
symbol_map: &SymbolMap<'tcx>)
|
|
|
|
|
-> String {
|
|
|
|
|
// Just try to look things up in the symbol map. If nothing's there, we
|
|
|
|
|
// recompute.
|
|
|
|
|
if let Some(node_id) = scx.tcx().map.as_local_node_id(def_id) {
|
|
|
|
|
if let Some(sym) = symbol_map.get(TransItem::Static(node_id)) {
|
|
|
|
|
return sym.to_owned();
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
let instance = Instance::mono(scx, def_id);
|
|
|
|
|
|
|
|
|
|
symbol_map.get(TransItem::Fn(instance))
|
|
|
|
|
.map(str::to_owned)
|
|
|
|
|
.unwrap_or_else(|| instance.symbol_name(scx))
|
2015-11-02 14:46:39 +01:00
|
|
|
|
}
|