2014-02-10 15:36:31 +01:00
|
|
|
// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
|
2012-12-03 16:48:01 -08:00
|
|
|
// file at the top-level directory of this distribution and at
|
|
|
|
// http://rust-lang.org/COPYRIGHT.
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
|
|
|
|
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
|
|
|
|
// option. This file may not be copied, modified, or distributed
|
|
|
|
// except according to those terms.
|
|
|
|
|
2011-06-01 11:34:52 -07:00
|
|
|
// trans.rs: Translate the completed AST to the LLVM IR.
|
|
|
|
//
|
|
|
|
// Some functions here, such as trans_block and trans_expr, return a value --
|
|
|
|
// the result of the translation to LLVM -- while others, such as trans_fn,
|
2012-01-13 10:58:31 +01:00
|
|
|
// trans_impl, and trans_item, are called only for the side effect of adding a
|
2011-06-01 11:34:52 -07:00
|
|
|
// particular definition to the LLVM IR output we're producing.
|
2011-06-01 16:33:03 -07:00
|
|
|
//
|
|
|
|
// Hopefully useful general knowledge about trans:
|
2011-06-07 17:54:22 -07:00
|
|
|
//
|
2014-09-13 21:09:25 +03:00
|
|
|
// * There's no way to find out the Ty type of a ValueRef. Doing so
|
2011-06-01 16:33:03 -07:00
|
|
|
// would be "trying to get the eggs out of an omelette" (credit:
|
|
|
|
// pcwalton). You can, instead, find out its TypeRef by calling val_ty,
|
2014-09-13 21:09:25 +03:00
|
|
|
// but one TypeRef corresponds to many `Ty`s; for instance, tup(int, int,
|
2011-06-01 16:33:03 -07:00
|
|
|
// int) and rec(x=int, y=int, z=int) will have the same TypeRef.
|
2011-12-13 16:25:51 -08:00
|
|
|
|
2014-03-21 18:05:05 -07:00
|
|
|
#![allow(non_camel_case_types)]
|
2013-05-17 15:28:44 -07:00
|
|
|
|
2014-11-06 00:05:53 -08:00
|
|
|
pub use self::ValueOrigin::*;
|
|
|
|
pub use self::scalar_type::*;
|
|
|
|
|
2014-11-27 07:21:26 -05:00
|
|
|
use super::CrateTranslation;
|
|
|
|
use super::ModuleTranslation;
|
|
|
|
|
2012-12-13 13:05:22 -08:00
|
|
|
use back::link::{mangle_exported_name};
|
2013-06-13 14:49:01 +12:00
|
|
|
use back::{link, abi};
|
2014-05-28 22:26:56 -07:00
|
|
|
use lint;
|
2014-11-11 20:22:41 -05:00
|
|
|
use llvm::{BasicBlockRef, Linkage, ValueRef, Vector, get_param};
|
2014-07-07 17:58:01 -07:00
|
|
|
use llvm;
|
2014-06-11 10:48:17 +03:00
|
|
|
use metadata::{csearch, encoder, loader};
|
2012-12-23 17:41:37 -05:00
|
|
|
use middle::astencode;
|
2013-07-15 20:42:13 -07:00
|
|
|
use middle::lang_items::{LangItem, ExchangeMallocFnLangItem, StartFnLangItem};
|
2014-05-13 11:35:42 -04:00
|
|
|
use middle::subst;
|
2014-05-28 22:26:56 -07:00
|
|
|
use middle::weak_lang_items;
|
2014-11-06 09:24:44 +02:00
|
|
|
use middle::subst::{Subst, Substs};
|
2014-09-13 21:09:25 +03:00
|
|
|
use middle::ty::{mod, Ty};
|
2014-11-15 20:30:33 -05:00
|
|
|
use session::config::{mod, NoDebugInfo, FullDebugInfo};
|
|
|
|
use session::Session;
|
|
|
|
use trans::_match;
|
|
|
|
use trans::adt;
|
|
|
|
use trans::build::*;
|
|
|
|
use trans::builder::{Builder, noname};
|
|
|
|
use trans::callee;
|
2014-09-29 22:11:30 +03:00
|
|
|
use trans::cleanup::CleanupMethods;
|
2014-11-15 20:30:33 -05:00
|
|
|
use trans::cleanup;
|
2014-09-29 22:11:30 +03:00
|
|
|
use trans::closure;
|
2014-11-15 20:30:33 -05:00
|
|
|
use trans::common::{Block, C_bool, C_bytes_in_context, C_i32, C_integral};
|
|
|
|
use trans::common::{C_null, C_struct_in_context, C_u64, C_u8, C_uint, C_undef};
|
|
|
|
use trans::common::{CrateContext, ExternMap, FunctionContext};
|
2014-11-06 09:24:44 +02:00
|
|
|
use trans::common::{NodeInfo, Result};
|
|
|
|
use trans::common::{node_id_type, return_type_is_void};
|
2014-11-15 20:30:33 -05:00
|
|
|
use trans::common::{tydesc_info, type_is_immediate};
|
|
|
|
use trans::common::{type_is_zero_size, val_ty};
|
|
|
|
use trans::common;
|
|
|
|
use trans::consts;
|
|
|
|
use trans::context::SharedCrateContext;
|
|
|
|
use trans::controlflow;
|
|
|
|
use trans::datum;
|
|
|
|
use trans::debuginfo;
|
|
|
|
use trans::expr;
|
|
|
|
use trans::foreign;
|
|
|
|
use trans::glue;
|
|
|
|
use trans::inline;
|
|
|
|
use trans::intrinsic;
|
|
|
|
use trans::machine;
|
|
|
|
use trans::machine::{llsize_of, llsize_of_real, llalign_of_min};
|
|
|
|
use trans::meth;
|
|
|
|
use trans::monomorphize;
|
|
|
|
use trans::tvec;
|
|
|
|
use trans::type_::Type;
|
|
|
|
use trans::type_of;
|
|
|
|
use trans::type_of::*;
|
|
|
|
use trans::value::Value;
|
2012-08-28 15:54:45 -07:00
|
|
|
use util::common::indenter;
|
2014-06-21 03:39:03 -07:00
|
|
|
use util::ppaux::{Repr, ty_to_string};
|
2013-12-09 14:56:53 -07:00
|
|
|
use util::sha2::Sha256;
|
2014-02-28 14:34:26 -08:00
|
|
|
use util::nodemap::NodeMap;
|
2013-06-16 22:52:44 +12:00
|
|
|
|
2014-01-29 13:50:05 +11:00
|
|
|
use arena::TypedArena;
|
2014-05-21 15:07:48 -04:00
|
|
|
use libc::{c_uint, uint64_t};
|
2013-08-03 17:13:14 -07:00
|
|
|
use std::c_str::ToCStr;
|
2013-12-20 20:33:22 -08:00
|
|
|
use std::cell::{Cell, RefCell};
|
2014-08-01 10:29:44 -07:00
|
|
|
use std::collections::HashSet;
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
use std::mem;
|
2014-04-22 02:21:52 +03:00
|
|
|
use std::rc::Rc;
|
2014-06-05 12:23:34 -07:00
|
|
|
use std::{i8, i16, i32, i64};
|
2014-07-23 11:56:36 -07:00
|
|
|
use syntax::abi::{Rust, RustCall, RustIntrinsic, Abi};
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
use syntax::ast_util::local_def;
|
2014-01-10 14:02:36 -08:00
|
|
|
use syntax::attr::AttrMetaMethods;
|
2014-01-21 10:08:10 -08:00
|
|
|
use syntax::attr;
|
2013-08-31 18:13:04 +02:00
|
|
|
use syntax::codemap::Span;
|
2014-01-21 10:08:10 -08:00
|
|
|
use syntax::parse::token::InternedString;
|
2013-08-28 23:28:06 -07:00
|
|
|
use syntax::visit::Visitor;
|
2014-01-10 14:02:36 -08:00
|
|
|
use syntax::visit;
|
2014-01-21 10:08:10 -08:00
|
|
|
use syntax::{ast, ast_util, ast_map};
|
2012-03-03 17:49:23 -08:00
|
|
|
|
2014-11-14 14:20:57 -08:00
|
|
|
thread_local!(static TASK_LOCAL_INSN_KEY: RefCell<Option<Vec<&'static str>>> = {
|
|
|
|
RefCell::new(None)
|
|
|
|
})
|
2013-02-27 19:13:53 -05:00
|
|
|
|
2014-12-09 13:44:51 -05:00
|
|
|
pub fn with_insn_ctxt<F>(blk: F) where
|
|
|
|
F: FnOnce(&[&'static str]),
|
|
|
|
{
|
|
|
|
TASK_LOCAL_INSN_KEY.with(move |slot| {
|
|
|
|
slot.borrow().as_ref().map(move |s| blk(s.as_slice()));
|
2014-11-14 14:20:57 -08:00
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
pub fn init_insn_ctxt() {
|
2014-11-14 14:20:57 -08:00
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
|
|
|
*slot.borrow_mut() = Some(Vec::new());
|
|
|
|
});
|
2012-09-05 15:58:43 -07:00
|
|
|
}
|
|
|
|
|
2014-06-06 15:51:42 +02:00
|
|
|
pub struct _InsnCtxt {
|
|
|
|
_cannot_construct_outside_of_this_module: ()
|
|
|
|
}
|
2012-07-11 15:00:40 -07:00
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
#[unsafe_destructor]
|
|
|
|
impl Drop for _InsnCtxt {
|
2013-09-16 21:18:07 -04:00
|
|
|
fn drop(&mut self) {
|
2014-11-14 14:20:57 -08:00
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
|
|
|
match slot.borrow_mut().as_mut() {
|
|
|
|
Some(ctx) => { ctx.pop(); }
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
})
|
2012-03-22 13:44:20 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
pub fn push_ctxt(s: &'static str) -> _InsnCtxt {
|
2013-10-21 13:08:31 -07:00
|
|
|
debug!("new InsnCtxt: {}", s);
|
2014-11-14 14:20:57 -08:00
|
|
|
TASK_LOCAL_INSN_KEY.with(|slot| {
|
|
|
|
match slot.borrow_mut().as_mut() {
|
|
|
|
Some(ctx) => ctx.push(s),
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
});
|
2014-06-06 15:51:42 +02:00
|
|
|
_InsnCtxt { _cannot_construct_outside_of_this_module: () }
|
2012-03-22 13:44:20 -07:00
|
|
|
}
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
pub struct StatRecorder<'a, 'tcx: 'a> {
|
|
|
|
ccx: &'a CrateContext<'a, 'tcx>,
|
2014-05-22 16:57:53 -07:00
|
|
|
name: Option<String>,
|
2013-06-28 11:15:34 -07:00
|
|
|
istart: uint,
|
|
|
|
}
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
impl<'a, 'tcx> StatRecorder<'a, 'tcx> {
|
|
|
|
pub fn new(ccx: &'a CrateContext<'a, 'tcx>, name: String)
|
|
|
|
-> StatRecorder<'a, 'tcx> {
|
2014-09-05 09:18:53 -07:00
|
|
|
let istart = ccx.stats().n_llvm_insns.get();
|
2013-06-28 11:15:34 -07:00
|
|
|
StatRecorder {
|
|
|
|
ccx: ccx,
|
2014-02-14 07:07:09 +02:00
|
|
|
name: Some(name),
|
2013-06-28 11:15:34 -07:00
|
|
|
istart: istart,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#[unsafe_destructor]
|
2014-04-22 15:56:37 +03:00
|
|
|
impl<'a, 'tcx> Drop for StatRecorder<'a, 'tcx> {
|
2013-09-16 21:18:07 -04:00
|
|
|
fn drop(&mut self) {
|
2014-03-05 16:36:01 +02:00
|
|
|
if self.ccx.sess().trans_stats() {
|
2014-09-05 09:18:53 -07:00
|
|
|
let iend = self.ccx.stats().n_llvm_insns.get();
|
|
|
|
self.ccx.stats().fn_stats.borrow_mut().push((self.name.take().unwrap(),
|
2014-03-20 19:49:20 -07:00
|
|
|
iend - self.istart));
|
2014-09-05 09:18:53 -07:00
|
|
|
self.ccx.stats().n_fns.set(self.ccx.stats().n_fns.get() + 1);
|
2013-06-28 11:15:34 -07:00
|
|
|
// Reset LLVM insn count to avoid compound costs.
|
2014-09-05 09:18:53 -07:00
|
|
|
self.ccx.stats().n_llvm_insns.set(self.istart);
|
2013-06-28 11:15:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-09-10 18:42:01 -04:00
|
|
|
// only use this for foreign function ABIs and glue, use `decl_rust_fn` for Rust functions
|
2014-08-01 22:25:41 -06:00
|
|
|
pub fn decl_fn(ccx: &CrateContext, name: &str, cc: llvm::CallConv,
|
2014-09-13 21:09:25 +03:00
|
|
|
ty: Type, output: ty::FnOutput) -> ValueRef {
|
2014-05-21 15:07:48 -04:00
|
|
|
|
2013-11-21 15:42:55 -08:00
|
|
|
let llfn: ValueRef = name.with_c_str(|buf| {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2014-09-05 09:18:53 -07:00
|
|
|
llvm::LLVMGetOrInsertFunction(ccx.llmod(), buf, ty.to_ref())
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2013-04-18 15:53:29 -07:00
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
// diverging functions may unwind, but can never return normally
|
|
|
|
if output == ty::FnDiverging {
|
|
|
|
llvm::SetFunctionAttribute(llfn, llvm::NoReturnAttribute);
|
2014-01-14 19:17:38 +02:00
|
|
|
}
|
|
|
|
|
2014-07-23 11:56:36 -07:00
|
|
|
if ccx.tcx().sess.opts.cg.no_redzone
|
|
|
|
.unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) {
|
2014-09-11 23:00:07 -07:00
|
|
|
llvm::SetFunctionAttribute(llfn, llvm::NoRedZoneAttribute)
|
2014-07-16 13:35:50 +02:00
|
|
|
}
|
|
|
|
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionCallConv(llfn, cc);
|
2013-10-31 14:12:17 -07:00
|
|
|
// Function addresses in Rust are never significant, allowing functions to be merged.
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetUnnamedAddr(llfn, true);
|
2014-05-12 20:03:34 +03:00
|
|
|
|
2014-09-05 17:56:59 -07:00
|
|
|
if ccx.is_split_stack_supported() && !ccx.sess().opts.cg.no_stack_check {
|
2014-05-12 20:03:34 +03:00
|
|
|
set_split_stack(llfn);
|
|
|
|
}
|
2014-01-14 19:17:38 +02:00
|
|
|
|
|
|
|
llfn
|
2010-09-23 17:16:34 -07:00
|
|
|
}
|
|
|
|
|
2013-09-10 18:42:01 -04:00
|
|
|
// only use this for foreign function ABIs and glue, use `decl_rust_fn` for Rust functions
|
2014-05-12 20:03:34 +03:00
|
|
|
pub fn decl_cdecl_fn(ccx: &CrateContext,
|
2014-01-14 19:17:38 +02:00
|
|
|
name: &str,
|
|
|
|
ty: Type,
|
2014-09-13 21:09:25 +03:00
|
|
|
output: Ty) -> ValueRef {
|
2014-10-24 21:14:37 +02:00
|
|
|
decl_fn(ccx, name, llvm::CCallConv, ty, ty::FnConverging(output))
|
2010-11-14 12:28:07 -08:00
|
|
|
}
|
|
|
|
|
2013-09-10 18:42:01 -04:00
|
|
|
// only use this for foreign function ABIs and glue, use `get_extern_rust_fn` for Rust functions
|
2014-05-21 15:07:48 -04:00
|
|
|
pub fn get_extern_fn(ccx: &CrateContext,
|
|
|
|
externs: &mut ExternMap,
|
2014-05-09 18:45:36 -07:00
|
|
|
name: &str,
|
2014-07-07 17:58:01 -07:00
|
|
|
cc: llvm::CallConv,
|
2014-05-09 18:45:36 -07:00
|
|
|
ty: Type,
|
2014-09-13 21:09:25 +03:00
|
|
|
output: Ty)
|
2014-05-09 18:45:36 -07:00
|
|
|
-> ValueRef {
|
2014-11-12 15:51:51 -08:00
|
|
|
match externs.get(name) {
|
2013-05-21 15:25:44 -04:00
|
|
|
Some(n) => return *n,
|
2014-01-14 19:17:38 +02:00
|
|
|
None => {}
|
2013-03-23 18:55:58 -04:00
|
|
|
}
|
2014-10-24 21:14:37 +02:00
|
|
|
let f = decl_fn(ccx, name, cc, ty, ty::FnConverging(output));
|
2014-05-25 03:17:19 -07:00
|
|
|
externs.insert(name.to_string(), f);
|
2013-09-10 18:42:01 -04:00
|
|
|
f
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
fn get_extern_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>,
|
|
|
|
name: &str, did: ast::DefId) -> ValueRef {
|
2014-11-12 15:51:51 -08:00
|
|
|
match ccx.externs().borrow().get(name) {
|
2014-03-20 19:49:20 -07:00
|
|
|
Some(n) => return *n,
|
|
|
|
None => ()
|
2013-09-10 18:42:01 -04:00
|
|
|
}
|
2013-12-18 18:24:34 -08:00
|
|
|
|
2014-05-21 15:07:48 -04:00
|
|
|
let f = decl_rust_fn(ccx, fn_ty, name);
|
|
|
|
|
2014-05-21 00:05:45 -07:00
|
|
|
csearch::get_item_attrs(&ccx.sess().cstore, did, |attrs| {
|
2014-09-05 17:28:24 -07:00
|
|
|
set_llvm_fn_attrs(ccx, attrs.as_slice(), f)
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2013-12-18 18:24:34 -08:00
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.externs().borrow_mut().insert(name.to_string(), f);
|
2013-09-10 18:42:01 -04:00
|
|
|
f
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn self_type_for_unboxed_closure<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
2014-10-18 10:46:57 -07:00
|
|
|
closure_id: ast::DefId,
|
2014-09-29 22:11:30 +03:00
|
|
|
fn_ty: Ty<'tcx>)
|
|
|
|
-> Ty<'tcx> {
|
2014-09-05 09:18:53 -07:00
|
|
|
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
|
2014-10-14 23:05:01 -07:00
|
|
|
let unboxed_closure = &(*unboxed_closures)[closure_id];
|
2014-07-29 22:08:39 -07:00
|
|
|
match unboxed_closure.kind {
|
|
|
|
ty::FnUnboxedClosureKind => {
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::mk_imm_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
|
2014-07-29 22:08:39 -07:00
|
|
|
}
|
|
|
|
ty::FnMutUnboxedClosureKind => {
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::mk_mut_rptr(ccx.tcx(), ty::ReStatic, fn_ty)
|
2014-07-29 22:08:39 -07:00
|
|
|
}
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::FnOnceUnboxedClosureKind => fn_ty
|
2014-07-29 22:08:39 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn kind_for_unboxed_closure(ccx: &CrateContext, closure_id: ast::DefId)
|
|
|
|
-> ty::UnboxedClosureKind {
|
2014-09-05 09:18:53 -07:00
|
|
|
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
|
2014-10-14 23:05:01 -07:00
|
|
|
(*unboxed_closures)[closure_id].kind
|
2014-07-29 22:08:39 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn decl_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
fn_ty: Ty<'tcx>, name: &str) -> ValueRef {
|
2014-10-31 10:51:16 +02:00
|
|
|
let (inputs, output, abi, env) = match fn_ty.sty {
|
2014-05-28 22:26:56 -07:00
|
|
|
ty::ty_bare_fn(ref f) => {
|
|
|
|
(f.sig.inputs.clone(), f.sig.output, f.abi, None)
|
|
|
|
}
|
|
|
|
ty::ty_closure(ref f) => {
|
|
|
|
(f.sig.inputs.clone(), f.sig.output, f.abi, Some(Type::i8p(ccx)))
|
|
|
|
}
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::ty_unboxed_closure(closure_did, _, ref substs) => {
|
2014-09-05 09:18:53 -07:00
|
|
|
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
|
2014-10-14 23:05:01 -07:00
|
|
|
let unboxed_closure = &(*unboxed_closures)[closure_did];
|
2014-07-29 22:08:39 -07:00
|
|
|
let function_type = unboxed_closure.closure_type.clone();
|
2014-10-18 10:46:57 -07:00
|
|
|
let self_type = self_type_for_unboxed_closure(ccx, closure_did, fn_ty);
|
2014-07-29 22:08:39 -07:00
|
|
|
let llenvironment_type = type_of_explicit_arg(ccx, self_type);
|
2014-10-18 10:46:57 -07:00
|
|
|
(function_type.sig.inputs.iter().map(|t| t.subst(ccx.tcx(), substs)).collect(),
|
|
|
|
function_type.sig.output.subst(ccx.tcx(), substs),
|
2014-05-28 22:26:56 -07:00
|
|
|
RustCall,
|
|
|
|
Some(llenvironment_type))
|
|
|
|
}
|
2014-10-09 15:17:22 -04:00
|
|
|
_ => panic!("expected closure or fn")
|
2014-05-21 15:07:48 -04:00
|
|
|
};
|
2013-09-10 18:42:01 -04:00
|
|
|
|
2014-05-28 22:26:56 -07:00
|
|
|
let llfty = type_of_rust_fn(ccx, env, inputs.as_slice(), output, abi);
|
|
|
|
debug!("decl_rust_fn(input count={},type={})",
|
|
|
|
inputs.len(),
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.tn().type_to_string(llfty));
|
2014-05-28 22:26:56 -07:00
|
|
|
|
2014-07-07 17:58:01 -07:00
|
|
|
let llfn = decl_fn(ccx, name, llvm::CCallConv, llfty, output);
|
2014-05-21 15:07:48 -04:00
|
|
|
let attrs = get_fn_llvm_attributes(ccx, fn_ty);
|
2014-07-25 16:06:44 -07:00
|
|
|
attrs.apply_llfn(llfn);
|
add sret + noalias to the out pointer parameter
This brings Rust in line with how `clang` handles return pointers.
Example:
pub fn bar() -> [uint, .. 8] {
let a = [0, .. 8];
a
}
Before:
; Function Attrs: nounwind uwtable
define void @_ZN3bar17ha4635c6f704bfa334v0.0E([8 x i64]* nocapture, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #1 {
"function top level":
%a = alloca [8 x i64], align 8
%2 = bitcast [8 x i64]* %a to i8*
call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 64, i32 8, i1 false)
%3 = bitcast [8 x i64]* %0 to i8*
call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 64, i32 8, i1 false)
ret void
}
After:
; Function Attrs: nounwind uwtable
define void @_ZN3bar17ha4635c6f704bfa334v0.0E([8 x i64]* noalias nocapture sret, { i64, %tydesc*, i8*, i8*, i8 }* nocapture readnone) #1 {
"function top level":
%2 = bitcast [8 x i64]* %0 to i8*
call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 64, i32 8, i1 false)
ret void
}
Closes #9072
Closes #7298
Closes #9154
2013-09-10 14:28:59 -04:00
|
|
|
|
2013-09-10 18:42:01 -04:00
|
|
|
llfn
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn decl_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
fn_ty: Ty<'tcx>, name: &str) -> ValueRef {
|
2014-05-21 15:07:48 -04:00
|
|
|
let llfn = decl_rust_fn(ccx, fn_ty, name);
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetLinkage(llfn, llvm::InternalLinkage);
|
2013-09-10 18:42:01 -04:00
|
|
|
llfn
|
2010-09-23 18:38:37 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn get_extern_const<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, did: ast::DefId,
|
|
|
|
t: Ty<'tcx>) -> ValueRef {
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
let name = csearch::get_symbol(&ccx.sess().cstore, did);
|
|
|
|
let ty = type_of(ccx, t);
|
2014-11-06 12:25:16 -05:00
|
|
|
match ccx.externs().borrow_mut().get(&name) {
|
2013-05-21 15:25:44 -04:00
|
|
|
Some(n) => return *n,
|
2013-03-23 18:55:58 -04:00
|
|
|
None => ()
|
|
|
|
}
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2013-11-21 15:42:55 -08:00
|
|
|
let c = name.with_c_str(|buf| {
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
llvm::LLVMAddGlobal(ccx.llmod(), ty.to_ref(), buf)
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
// Thread-local statics in some other crate need to *always* be linked
|
|
|
|
// against in a thread-local fashion, so we need to be sure to apply the
|
|
|
|
// thread-local attribute locally if it was present remotely. If we
|
|
|
|
// don't do this then linker errors can be generated where the linker
|
|
|
|
// complains that one object files has a thread local version of the
|
|
|
|
// symbol and another one doesn't.
|
|
|
|
ty::each_attr(ccx.tcx(), did, |attr| {
|
|
|
|
if attr.check_name("thread_local") {
|
|
|
|
llvm::set_thread_local(c, true);
|
|
|
|
}
|
|
|
|
true
|
|
|
|
});
|
|
|
|
ccx.externs().borrow_mut().insert(name.to_string(), c);
|
2013-01-10 21:23:07 -08:00
|
|
|
return c;
|
|
|
|
}
|
2011-03-25 17:59:45 -07:00
|
|
|
}
|
2011-06-15 11:19:50 -07:00
|
|
|
|
2012-02-01 18:52:08 -08:00
|
|
|
// Returns a pointer to the body for the box. The box may be an opaque
|
|
|
|
// box. The result will be casted to the type of body_t, if it is statically
|
|
|
|
// known.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn at_box_body<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
body_t: Ty<'tcx>, boxptr: ValueRef) -> ValueRef {
|
2014-01-27 14:18:36 +02:00
|
|
|
let _icx = push_ctxt("at_box_body");
|
2012-02-21 14:20:18 +01:00
|
|
|
let ccx = bcx.ccx();
|
2014-01-27 14:18:36 +02:00
|
|
|
let ty = Type::at_box(ccx, type_of(ccx, body_t));
|
2013-06-16 02:29:52 +12:00
|
|
|
let boxptr = PointerCast(bcx, boxptr, ty.ptr_to());
|
2014-11-11 10:15:42 +01:00
|
|
|
GEPi(bcx, boxptr, &[0u, abi::BOX_FIELD_BODY])
|
2012-02-01 18:52:08 -08:00
|
|
|
}
|
2012-02-01 18:50:19 -08:00
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
fn require_alloc_fn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
info_ty: Ty<'tcx>, it: LangItem) -> ast::DefId {
|
2014-04-06 13:54:41 +03:00
|
|
|
match bcx.tcx().lang_items.require(it) {
|
|
|
|
Ok(id) => id,
|
|
|
|
Err(s) => {
|
|
|
|
bcx.sess().fatal(format!("allocation of `{}` {}",
|
2014-06-21 03:39:03 -07:00
|
|
|
bcx.ty_to_string(info_ty),
|
2014-05-16 10:45:16 -07:00
|
|
|
s).as_slice());
|
2013-07-15 20:42:13 -07:00
|
|
|
}
|
|
|
|
}
|
2014-04-06 13:54:41 +03:00
|
|
|
}
|
2013-07-15 20:42:13 -07:00
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
// The following malloc_raw_dyn* functions allocate a box to contain
|
|
|
|
// a given type, but with a potentially dynamic size.
|
2012-06-06 18:22:49 -07:00
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn malloc_raw_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
llty_ptr: Type,
|
2014-09-29 22:11:30 +03:00
|
|
|
info_ty: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
size: ValueRef,
|
|
|
|
align: ValueRef)
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-04-06 13:54:41 +03:00
|
|
|
let _icx = push_ctxt("malloc_raw_exchange");
|
2013-07-02 19:51:39 -04:00
|
|
|
|
2014-04-06 13:54:41 +03:00
|
|
|
// Allocate space:
|
|
|
|
let r = callee::trans_lang_call(bcx,
|
2014-04-25 15:14:52 +12:00
|
|
|
require_alloc_fn(bcx, info_ty, ExchangeMallocFnLangItem),
|
2014-11-17 21:39:01 +13:00
|
|
|
&[size, align],
|
2014-04-06 13:54:41 +03:00
|
|
|
None);
|
2012-08-28 15:54:45 -07:00
|
|
|
|
2014-05-03 23:14:56 +12:00
|
|
|
Result::new(r.bcx, PointerCast(r.bcx, r.val, llty_ptr))
|
2012-06-13 18:00:17 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn malloc_raw_dyn_proc<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>)
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-09-05 01:07:51 -04:00
|
|
|
let _icx = push_ctxt("malloc_raw_dyn_proc");
|
|
|
|
let ccx = bcx.ccx();
|
|
|
|
|
|
|
|
// Grab the TypeRef type of ptr_ty.
|
|
|
|
let ptr_ty = ty::mk_uniq(bcx.tcx(), t);
|
|
|
|
let ptr_llty = type_of(ccx, ptr_ty);
|
|
|
|
|
|
|
|
let llty = type_of(bcx.ccx(), t);
|
|
|
|
let size = llsize_of(bcx.ccx(), llty);
|
2014-10-14 23:36:11 +03:00
|
|
|
let llalign = C_uint(ccx, llalign_of_min(bcx.ccx(), llty));
|
2014-09-05 01:07:51 -04:00
|
|
|
|
2014-09-15 16:31:32 -04:00
|
|
|
// Allocate space and store the destructor pointer:
|
2014-10-06 13:36:53 +13:00
|
|
|
let Result {bcx, val: llbox} = malloc_raw_dyn(bcx, ptr_llty, t, size, llalign);
|
2014-11-11 10:15:42 +01:00
|
|
|
let dtor_ptr = GEPi(bcx, llbox, &[0u, abi::BOX_FIELD_DROP_GLUE]);
|
2014-09-15 16:31:32 -04:00
|
|
|
let drop_glue_field_ty = type_of(ccx, ty::mk_nil_ptr(bcx.tcx()));
|
|
|
|
let drop_glue = PointerCast(bcx, glue::get_drop_glue(ccx, ty::mk_uniq(bcx.tcx(), t)),
|
|
|
|
drop_glue_field_ty);
|
|
|
|
Store(bcx, drop_glue, dtor_ptr);
|
|
|
|
|
|
|
|
Result::new(bcx, llbox)
|
2014-09-05 01:07:51 -04:00
|
|
|
}
|
|
|
|
|
2010-12-20 10:23:37 -08:00
|
|
|
// Type descriptor and type glue stuff
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn get_tydesc<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
t: Ty<'tcx>) -> Rc<tydesc_info<'tcx>> {
|
2014-11-06 12:25:16 -05:00
|
|
|
match ccx.tydescs().borrow().get(&t) {
|
2014-04-22 03:45:16 +03:00
|
|
|
Some(inf) => return inf.clone(),
|
2014-03-20 19:49:20 -07:00
|
|
|
_ => { }
|
2010-12-20 15:23:24 -08:00
|
|
|
}
|
2013-03-15 15:24:24 -04:00
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.stats().n_static_tydescs.set(ccx.stats().n_static_tydescs.get() + 1u);
|
2014-04-22 03:45:16 +03:00
|
|
|
let inf = Rc::new(glue::declare_tydesc(ccx, t));
|
2013-12-18 18:15:27 -08:00
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.tydescs().borrow_mut().insert(t, inf.clone());
|
2014-04-22 03:45:16 +03:00
|
|
|
inf
|
2010-12-10 15:02:23 -08:00
|
|
|
}
|
|
|
|
|
2014-04-01 10:27:40 -04:00
|
|
|
#[allow(dead_code)] // useful
|
2013-02-14 14:48:15 -05:00
|
|
|
pub fn set_optimize_for_size(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::OptimizeForSizeAttribute)
|
2013-02-14 14:48:15 -05:00
|
|
|
}
|
|
|
|
|
2013-01-30 11:46:19 -08:00
|
|
|
pub fn set_no_inline(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::NoInlineAttribute)
|
2011-05-20 14:57:52 -07:00
|
|
|
}
|
|
|
|
|
2014-04-01 10:27:40 -04:00
|
|
|
#[allow(dead_code)] // useful
|
2013-01-30 11:46:19 -08:00
|
|
|
pub fn set_no_unwind(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::NoUnwindAttribute)
|
2012-04-25 15:38:56 -07:00
|
|
|
}
|
|
|
|
|
2011-07-06 17:02:56 -07:00
|
|
|
// Tell LLVM to emit the information necessary to unwind the stack for the
|
|
|
|
// function f.
|
2013-01-30 11:46:19 -08:00
|
|
|
pub fn set_uwtable(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::UWTableAttribute)
|
2011-05-24 13:47:27 -04:00
|
|
|
}
|
|
|
|
|
2013-01-30 11:46:19 -08:00
|
|
|
pub fn set_inline_hint(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::InlineHintAttribute)
|
2012-03-02 10:05:30 -08:00
|
|
|
}
|
|
|
|
|
2014-09-05 17:28:24 -07:00
|
|
|
pub fn set_llvm_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRef) {
|
2013-07-19 21:51:37 +10:00
|
|
|
use syntax::attr::*;
|
2013-08-05 21:21:37 -07:00
|
|
|
// Set the inline hint if there is one
|
2013-07-19 21:51:37 +10:00
|
|
|
match find_inline_attr(attrs) {
|
|
|
|
InlineHint => set_inline_hint(llfn),
|
|
|
|
InlineAlways => set_always_inline(llfn),
|
|
|
|
InlineNever => set_no_inline(llfn),
|
|
|
|
InlineNone => { /* fallthrough */ }
|
2012-03-02 10:05:30 -08:00
|
|
|
}
|
2013-08-05 21:21:37 -07:00
|
|
|
|
2014-09-05 17:28:24 -07:00
|
|
|
for attr in attrs.iter() {
|
|
|
|
let mut used = true;
|
|
|
|
match attr.name().get() {
|
|
|
|
"no_stack_check" => unset_split_stack(llfn),
|
|
|
|
"no_split_stack" => {
|
|
|
|
unset_split_stack(llfn);
|
|
|
|
ccx.sess().span_warn(attr.span,
|
|
|
|
"no_split_stack is a deprecated synonym for no_stack_check");
|
|
|
|
}
|
|
|
|
"cold" => unsafe {
|
|
|
|
llvm::LLVMAddFunctionAttribute(llfn,
|
|
|
|
llvm::FunctionIndex as c_uint,
|
|
|
|
llvm::ColdAttribute as uint64_t)
|
|
|
|
},
|
|
|
|
_ => used = false,
|
|
|
|
}
|
|
|
|
if used {
|
|
|
|
attr::mark_used(attr);
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
2013-10-20 23:13:48 -04:00
|
|
|
}
|
2012-03-02 10:05:30 -08:00
|
|
|
}
|
|
|
|
|
2013-01-30 11:46:19 -08:00
|
|
|
pub fn set_always_inline(f: ValueRef) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetFunctionAttribute(f, llvm::AlwaysInlineAttribute)
|
2011-09-30 18:20:28 -07:00
|
|
|
}
|
|
|
|
|
2014-03-31 14:43:19 -07:00
|
|
|
pub fn set_split_stack(f: ValueRef) {
|
|
|
|
"split-stack".with_c_str(|buf| {
|
2014-07-07 17:58:01 -07:00
|
|
|
unsafe { llvm::LLVMAddFunctionAttrString(f, llvm::FunctionIndex as c_uint, buf); }
|
2013-11-21 15:42:55 -08:00
|
|
|
})
|
2011-05-20 14:57:52 -07:00
|
|
|
}
|
|
|
|
|
2014-04-19 10:33:46 -07:00
|
|
|
pub fn unset_split_stack(f: ValueRef) {
|
|
|
|
"split-stack".with_c_str(|buf| {
|
2014-07-07 17:58:01 -07:00
|
|
|
unsafe { llvm::LLVMRemoveFunctionAttrString(f, llvm::FunctionIndex as c_uint, buf); }
|
2014-04-19 10:33:46 -07:00
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2012-03-20 14:21:02 -07:00
|
|
|
// Double-check that we never ask LLVM to declare the same symbol twice. It
|
|
|
|
// silently mangles such symbols, breaking our linkage model.
|
2014-05-22 16:57:53 -07:00
|
|
|
pub fn note_unique_llvm_symbol(ccx: &CrateContext, sym: String) {
|
2014-09-05 09:18:53 -07:00
|
|
|
if ccx.all_llvm_symbols().borrow().contains(&sym) {
|
2014-05-16 10:45:16 -07:00
|
|
|
ccx.sess().bug(format!("duplicate LLVM symbol: {}", sym).as_slice());
|
2012-03-20 14:21:02 -07:00
|
|
|
}
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.all_llvm_symbols().borrow_mut().insert(sym);
|
2012-03-20 14:21:02 -07:00
|
|
|
}
|
2011-05-20 14:57:52 -07:00
|
|
|
|
2010-12-10 15:02:23 -08:00
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn get_res_dtor<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
did: ast::DefId,
|
|
|
|
t: Ty<'tcx>,
|
|
|
|
parent_id: ast::DefId,
|
|
|
|
substs: &subst::Substs<'tcx>)
|
|
|
|
-> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_res_dtor");
|
2014-09-06 01:39:51 +01:00
|
|
|
let did = inline::maybe_instantiate_inline(ccx, did);
|
2014-05-07 07:20:15 -04:00
|
|
|
|
2014-05-31 18:53:13 -04:00
|
|
|
if !substs.types.is_empty() {
|
2014-02-05 22:15:24 +01:00
|
|
|
assert_eq!(did.krate, ast::LOCAL_CRATE);
|
2014-02-20 00:12:09 +02:00
|
|
|
|
2014-07-30 17:47:54 -07:00
|
|
|
// Since we're in trans we don't care for any region parameters
|
|
|
|
let ref substs = subst::Substs::erased(substs.types.clone());
|
|
|
|
|
2014-09-12 11:42:58 -04:00
|
|
|
let (val, _) = monomorphize::monomorphic_fn(ccx, did, substs, None);
|
2013-02-19 00:14:56 -08:00
|
|
|
|
2013-02-19 02:40:42 -05:00
|
|
|
val
|
2014-02-05 22:15:24 +01:00
|
|
|
} else if did.krate == ast::LOCAL_CRATE {
|
2012-04-18 13:46:21 +02:00
|
|
|
get_item_val(ccx, did.node)
|
|
|
|
} else {
|
2014-03-15 22:29:34 +02:00
|
|
|
let tcx = ccx.tcx();
|
2014-03-09 15:20:44 +02:00
|
|
|
let name = csearch::get_symbol(&ccx.sess().cstore, did);
|
2014-05-13 11:35:42 -04:00
|
|
|
let class_ty = ty::lookup_item_type(tcx, parent_id).ty.subst(tcx, substs);
|
2012-06-24 15:09:57 -07:00
|
|
|
let llty = type_of_dtor(ccx, class_ty);
|
2014-11-15 16:50:34 -05:00
|
|
|
let dtor_ty = ty::mk_ctor_fn(ccx.tcx(),
|
|
|
|
&[glue::get_drop_glue_type(ccx, t)],
|
|
|
|
ty::mk_nil(ccx.tcx()));
|
2014-05-21 15:07:48 -04:00
|
|
|
get_extern_fn(ccx,
|
2014-09-05 09:18:53 -07:00
|
|
|
&mut *ccx.externs().borrow_mut(),
|
2014-05-09 18:45:36 -07:00
|
|
|
name.as_slice(),
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::CCallConv,
|
2014-05-09 18:45:36 -07:00
|
|
|
llty,
|
2014-05-21 15:07:48 -04:00
|
|
|
dtor_ty)
|
2012-02-17 13:17:40 +01:00
|
|
|
}
|
2011-06-28 16:14:01 +02:00
|
|
|
}
|
|
|
|
|
2011-06-15 11:19:50 -07:00
|
|
|
// Structural comparison: a rather involved form of glue.
|
2013-06-13 19:19:50 +12:00
|
|
|
pub fn maybe_name_value(cx: &CrateContext, v: ValueRef, s: &str) {
|
2014-03-05 16:36:01 +02:00
|
|
|
if cx.sess().opts.cg.save_temps {
|
2013-11-21 15:42:55 -08:00
|
|
|
s.with_c_str(|buf| {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMSetValueName(v, buf)
|
|
|
|
}
|
2013-11-21 15:42:55 -08:00
|
|
|
})
|
2011-05-10 16:43:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-15 11:19:50 -07:00
|
|
|
|
2011-08-02 12:26:52 -07:00
|
|
|
// Used only for creating scalar comparison glue.
|
2013-01-30 11:46:19 -08:00
|
|
|
pub enum scalar_type { nil_type, signed_int, unsigned_int, floating_point, }
|
2011-06-15 11:19:50 -07:00
|
|
|
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
impl Copy for scalar_type {}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn compare_scalar_types<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
t: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
op: ast::BinOp)
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-05-03 23:14:56 +12:00
|
|
|
let f = |a| Result::new(cx, compare_scalar_values(cx, lhs, rhs, a, op));
|
2011-07-27 14:19:39 +02:00
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
match t.sty {
|
2014-11-04 07:57:21 -05:00
|
|
|
ty::ty_tup(ref tys) if tys.is_empty() => f(nil_type),
|
2014-08-27 17:07:28 +12:00
|
|
|
ty::ty_bool | ty::ty_uint(_) | ty::ty_char => f(unsigned_int),
|
|
|
|
ty::ty_ptr(mt) if ty::type_is_sized(cx.tcx(), mt.ty) => f(unsigned_int),
|
2014-02-14 07:07:09 +02:00
|
|
|
ty::ty_int(_) => f(signed_int),
|
|
|
|
ty::ty_float(_) => f(floating_point),
|
2012-08-28 15:54:45 -07:00
|
|
|
// Should never get here, because t is scalar.
|
2014-02-11 13:48:55 +02:00
|
|
|
_ => cx.sess().bug("non-scalar type passed to compare_scalar_types")
|
2011-04-19 15:22:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-06-15 11:19:50 -07:00
|
|
|
|
2011-07-11 16:37:21 -05:00
|
|
|
// A helper function to do the actual comparison of scalar values.
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn compare_scalar_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
|
|
|
nt: scalar_type,
|
|
|
|
op: ast::BinOp)
|
|
|
|
-> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("compare_scalar_values");
|
2014-09-06 19:13:04 +03:00
|
|
|
fn die(cx: Block) -> ! {
|
2014-03-05 16:36:01 +02:00
|
|
|
cx.sess().bug("compare_scalar_values: must be a comparison operator");
|
2012-01-30 21:00:57 -08:00
|
|
|
}
|
2012-08-06 12:34:08 -07:00
|
|
|
match nt {
|
2012-08-03 19:59:04 -07:00
|
|
|
nil_type => {
|
2011-07-27 14:19:39 +02:00
|
|
|
// We don't need to do actual comparisons for nil.
|
|
|
|
// () == () holds but () < () does not.
|
2012-08-06 12:34:08 -07:00
|
|
|
match op {
|
2014-07-05 21:43:47 +02:00
|
|
|
ast::BiEq | ast::BiLe | ast::BiGe => return C_bool(cx.ccx(), true),
|
|
|
|
ast::BiNe | ast::BiLt | ast::BiGt => return C_bool(cx.ccx(), false),
|
2012-01-30 21:00:57 -08:00
|
|
|
// refinements would be nice
|
2013-01-08 06:21:19 -08:00
|
|
|
_ => die(cx)
|
2011-10-27 22:01:30 -07:00
|
|
|
}
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
floating_point => {
|
2012-08-06 12:34:08 -07:00
|
|
|
let cmp = match op {
|
2014-07-07 17:58:01 -07:00
|
|
|
ast::BiEq => llvm::RealOEQ,
|
|
|
|
ast::BiNe => llvm::RealUNE,
|
|
|
|
ast::BiLt => llvm::RealOLT,
|
|
|
|
ast::BiLe => llvm::RealOLE,
|
|
|
|
ast::BiGt => llvm::RealOGT,
|
|
|
|
ast::BiGe => llvm::RealOGE,
|
2013-01-08 06:21:19 -08:00
|
|
|
_ => die(cx)
|
2011-10-28 14:20:10 -07:00
|
|
|
};
|
2012-08-01 17:30:05 -07:00
|
|
|
return FCmp(cx, cmp, lhs, rhs);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
signed_int => {
|
2012-08-06 12:34:08 -07:00
|
|
|
let cmp = match op {
|
2014-07-07 17:58:01 -07:00
|
|
|
ast::BiEq => llvm::IntEQ,
|
|
|
|
ast::BiNe => llvm::IntNE,
|
|
|
|
ast::BiLt => llvm::IntSLT,
|
|
|
|
ast::BiLe => llvm::IntSLE,
|
|
|
|
ast::BiGt => llvm::IntSGT,
|
|
|
|
ast::BiGe => llvm::IntSGE,
|
2013-01-08 06:21:19 -08:00
|
|
|
_ => die(cx)
|
2011-10-28 14:20:10 -07:00
|
|
|
};
|
2012-08-01 17:30:05 -07:00
|
|
|
return ICmp(cx, cmp, lhs, rhs);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
unsigned_int => {
|
2012-08-06 12:34:08 -07:00
|
|
|
let cmp = match op {
|
2014-07-07 17:58:01 -07:00
|
|
|
ast::BiEq => llvm::IntEQ,
|
|
|
|
ast::BiNe => llvm::IntNE,
|
|
|
|
ast::BiLt => llvm::IntULT,
|
|
|
|
ast::BiLe => llvm::IntULE,
|
|
|
|
ast::BiGt => llvm::IntUGT,
|
|
|
|
ast::BiGe => llvm::IntUGE,
|
2013-01-08 06:21:19 -08:00
|
|
|
_ => die(cx)
|
2011-10-28 14:20:10 -07:00
|
|
|
};
|
2012-08-01 17:30:05 -07:00
|
|
|
return ICmp(cx, cmp, lhs, rhs);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2011-04-19 15:22:57 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn compare_simd_types<'blk, 'tcx>(
|
|
|
|
cx: Block<'blk, 'tcx>,
|
2014-05-02 11:04:46 -07:00
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
t: Ty<'tcx>,
|
2014-05-02 11:04:46 -07:00
|
|
|
size: uint,
|
|
|
|
op: ast::BinOp)
|
|
|
|
-> ValueRef {
|
2014-10-31 10:51:16 +02:00
|
|
|
match t.sty {
|
2014-05-02 11:04:46 -07:00
|
|
|
ty::ty_float(_) => {
|
|
|
|
// The comparison operators for floating point vectors are challenging.
|
|
|
|
// LLVM outputs a `< size x i1 >`, but if we perform a sign extension
|
|
|
|
// then bitcast to a floating point vector, the result will be `-NaN`
|
|
|
|
// for each truth value. Because of this they are unsupported.
|
|
|
|
cx.sess().bug("compare_simd_types: comparison operators \
|
|
|
|
not supported for floating point SIMD types")
|
|
|
|
},
|
|
|
|
ty::ty_uint(_) | ty::ty_int(_) => {
|
|
|
|
let cmp = match op {
|
2014-07-07 17:58:01 -07:00
|
|
|
ast::BiEq => llvm::IntEQ,
|
|
|
|
ast::BiNe => llvm::IntNE,
|
|
|
|
ast::BiLt => llvm::IntSLT,
|
|
|
|
ast::BiLe => llvm::IntSLE,
|
|
|
|
ast::BiGt => llvm::IntSGT,
|
|
|
|
ast::BiGe => llvm::IntSGE,
|
2014-05-02 11:04:46 -07:00
|
|
|
_ => cx.sess().bug("compare_simd_types: must be a comparison operator"),
|
|
|
|
};
|
|
|
|
let return_ty = Type::vector(&type_of(cx.ccx(), t), size as u64);
|
|
|
|
// LLVM outputs an `< size x i1 >`, so we need to perform a sign extension
|
|
|
|
// to get the correctly sized type. This will compile to a single instruction
|
|
|
|
// once the IR is converted to assembly if the SIMD instruction is supported
|
|
|
|
// by the target architecture.
|
|
|
|
SExt(cx, ICmp(cx, cmp, lhs, rhs), return_ty)
|
|
|
|
},
|
|
|
|
_ => cx.sess().bug("compare_simd_types: invalid SIMD type"),
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub type val_and_ty_fn<'a, 'blk, 'tcx> =
|
2014-09-29 22:11:30 +03:00
|
|
|
|Block<'blk, 'tcx>, ValueRef, Ty<'tcx>|: 'a -> Block<'blk, 'tcx>;
|
2011-02-28 17:49:26 -08:00
|
|
|
|
2011-08-30 13:10:10 +02:00
|
|
|
// Iterates through the elements of a structural type.
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn iter_structural_ty<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
av: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
t: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
f: val_and_ty_fn<'a, 'blk, 'tcx>)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("iter_structural_ty");
|
2012-03-22 13:44:20 -07:00
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
fn iter_variant<'a, 'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
repr: &adt::Repr<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
av: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
variant: &ty::VariantInfo<'tcx>,
|
|
|
|
substs: &subst::Substs<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
f: val_and_ty_fn<'a, 'blk, 'tcx>)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("iter_variant");
|
2013-02-24 18:41:39 -08:00
|
|
|
let tcx = cx.tcx();
|
2012-03-15 09:47:03 -04:00
|
|
|
let mut cx = cx;
|
2013-02-24 18:41:39 -08:00
|
|
|
|
2013-08-03 12:45:23 -04:00
|
|
|
for (i, &arg) in variant.args.iter().enumerate() {
|
2013-03-02 16:08:49 -08:00
|
|
|
cx = f(cx,
|
|
|
|
adt::trans_field_ptr(cx, repr, av, variant.disr_val, i),
|
2014-05-13 11:35:42 -04:00
|
|
|
arg.subst(tcx, substs));
|
2011-07-01 12:36:49 +02:00
|
|
|
}
|
2012-08-01 17:30:05 -07:00
|
|
|
return cx;
|
2011-07-01 12:36:49 +02:00
|
|
|
}
|
2011-07-13 15:44:09 -07:00
|
|
|
|
2014-08-06 11:59:40 +02:00
|
|
|
let (data_ptr, info) = if ty::type_is_sized(cx.tcx(), t) {
|
|
|
|
(av, None)
|
|
|
|
} else {
|
2014-11-10 18:14:31 +01:00
|
|
|
let data = GEPi(cx, av, &[0, abi::FAT_PTR_ADDR]);
|
|
|
|
let info = GEPi(cx, av, &[0, abi::FAT_PTR_EXTRA]);
|
2014-08-06 11:59:40 +02:00
|
|
|
(Load(cx, data), Some(Load(cx, info)))
|
|
|
|
};
|
|
|
|
|
2012-03-15 09:47:03 -04:00
|
|
|
let mut cx = cx;
|
2014-10-31 10:51:16 +02:00
|
|
|
match t.sty {
|
2013-11-28 12:22:53 -08:00
|
|
|
ty::ty_struct(..) => {
|
2013-02-24 17:34:32 -08:00
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
2013-11-21 15:42:55 -08:00
|
|
|
expr::with_field_tys(cx.tcx(), t, None, |discr, field_tys| {
|
2013-08-03 12:45:23 -04:00
|
|
|
for (i, field_ty) in field_tys.iter().enumerate() {
|
2014-08-06 11:59:40 +02:00
|
|
|
let field_ty = field_ty.mt.ty;
|
|
|
|
let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, discr, i);
|
|
|
|
|
|
|
|
let val = if ty::type_is_sized(cx.tcx(), field_ty) {
|
|
|
|
llfld_a
|
|
|
|
} else {
|
|
|
|
let boxed_ty = ty::mk_open(cx.tcx(), field_ty);
|
|
|
|
let scratch = datum::rvalue_scratch_datum(cx, boxed_ty, "__fat_ptr_iter");
|
2014-11-10 18:14:31 +01:00
|
|
|
Store(cx, llfld_a, GEPi(cx, scratch.val, &[0, abi::FAT_PTR_ADDR]));
|
|
|
|
Store(cx, info.unwrap(), GEPi(cx, scratch.val, &[0, abi::FAT_PTR_EXTRA]));
|
2014-08-06 11:59:40 +02:00
|
|
|
scratch.val
|
|
|
|
};
|
|
|
|
cx = f(cx, val, field_ty);
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
2013-11-21 15:42:55 -08:00
|
|
|
})
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::ty_unboxed_closure(def_id, _, ref substs) => {
|
2014-05-28 22:26:56 -07:00
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
2014-10-18 10:46:57 -07:00
|
|
|
let upvars = ty::unboxed_closure_upvars(cx.tcx(), def_id, substs);
|
2014-05-28 22:26:56 -07:00
|
|
|
for (i, upvar) in upvars.iter().enumerate() {
|
2014-08-06 11:59:40 +02:00
|
|
|
let llupvar = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
|
2014-05-28 22:26:56 -07:00
|
|
|
cx = f(cx, llupvar, upvar.ty);
|
|
|
|
}
|
|
|
|
}
|
2014-04-09 19:15:31 +12:00
|
|
|
ty::ty_vec(_, Some(n)) => {
|
2014-08-06 11:59:40 +02:00
|
|
|
let (base, len) = tvec::get_fixed_base_and_len(cx, data_ptr, n);
|
2014-04-06 13:54:41 +03:00
|
|
|
let unit_ty = ty::sequence_element_type(cx.tcx(), t);
|
|
|
|
cx = tvec::iter_vec_raw(cx, base, unit_ty, len, f);
|
2012-04-09 17:32:49 -07:00
|
|
|
}
|
2013-03-20 01:17:42 -04:00
|
|
|
ty::ty_tup(ref args) => {
|
2013-02-24 17:32:09 -08:00
|
|
|
let repr = adt::represent_type(cx.ccx(), t);
|
2013-08-03 12:45:23 -04:00
|
|
|
for (i, arg) in args.iter().enumerate() {
|
2014-08-06 11:59:40 +02:00
|
|
|
let llfld_a = adt::trans_field_ptr(cx, &*repr, data_ptr, 0, i);
|
2013-02-24 17:32:09 -08:00
|
|
|
cx = f(cx, llfld_a, *arg);
|
|
|
|
}
|
2011-08-15 11:40:26 +02:00
|
|
|
}
|
2012-12-04 10:50:00 -08:00
|
|
|
ty::ty_enum(tid, ref substs) => {
|
2014-01-15 14:39:08 -05:00
|
|
|
let fcx = cx.fcx;
|
|
|
|
let ccx = fcx.ccx;
|
2011-07-27 14:19:39 +02:00
|
|
|
|
2013-02-24 18:41:39 -08:00
|
|
|
let repr = adt::represent_type(ccx, t);
|
2014-03-15 22:29:34 +02:00
|
|
|
let variants = ty::enum_variants(ccx.tcx(), tid);
|
2013-02-24 18:41:39 -08:00
|
|
|
let n_variants = (*variants).len();
|
|
|
|
|
|
|
|
// NB: we must hit the discriminant first so that structural
|
|
|
|
// comparison know not to proceed when the discriminants differ.
|
|
|
|
|
2014-04-22 03:03:02 +03:00
|
|
|
match adt::trans_switch(cx, &*repr, av) {
|
2014-08-30 16:22:19 +02:00
|
|
|
(_match::Single, None) => {
|
2014-10-14 23:05:01 -07:00
|
|
|
cx = iter_variant(cx, &*repr, av, &*(*variants)[0],
|
2014-05-07 07:20:15 -04:00
|
|
|
substs, f);
|
2013-02-24 18:41:39 -08:00
|
|
|
}
|
2014-08-30 16:22:19 +02:00
|
|
|
(_match::Switch, Some(lldiscrim_a)) => {
|
2013-04-22 20:19:05 -07:00
|
|
|
cx = f(cx, lldiscrim_a, ty::mk_int());
|
2014-01-15 14:39:08 -05:00
|
|
|
let unr_cx = fcx.new_temp_block("enum-iter-unr");
|
2013-02-24 18:41:39 -08:00
|
|
|
Unreachable(unr_cx);
|
|
|
|
let llswitch = Switch(cx, lldiscrim_a, unr_cx.llbb,
|
|
|
|
n_variants);
|
2014-01-15 14:39:08 -05:00
|
|
|
let next_cx = fcx.new_temp_block("enum-iter-next");
|
2013-02-24 18:41:39 -08:00
|
|
|
|
2013-08-03 12:45:23 -04:00
|
|
|
for variant in (*variants).iter() {
|
2013-02-24 18:41:39 -08:00
|
|
|
let variant_cx =
|
2014-05-16 10:45:16 -07:00
|
|
|
fcx.new_temp_block(
|
2014-05-27 20:44:58 -07:00
|
|
|
format!("enum-iter-variant-{}",
|
2014-06-21 03:39:03 -07:00
|
|
|
variant.disr_val.to_string().as_slice())
|
2014-05-27 20:44:58 -07:00
|
|
|
.as_slice());
|
2014-04-22 03:03:02 +03:00
|
|
|
match adt::trans_case(cx, &*repr, variant.disr_val) {
|
2014-08-30 16:22:19 +02:00
|
|
|
_match::SingleResult(r) => {
|
2013-02-24 18:41:39 -08:00
|
|
|
AddCase(llswitch, r.val, variant_cx.llbb)
|
|
|
|
}
|
2014-03-05 16:36:01 +02:00
|
|
|
_ => ccx.sess().unimpl("value from adt::trans_case \
|
|
|
|
in iter_structural_ty")
|
2013-02-24 18:41:39 -08:00
|
|
|
}
|
2014-01-23 16:51:54 +02:00
|
|
|
let variant_cx =
|
2014-03-08 21:36:22 +01:00
|
|
|
iter_variant(variant_cx,
|
2014-04-22 03:03:02 +03:00
|
|
|
&*repr,
|
2014-08-06 11:59:40 +02:00
|
|
|
data_ptr,
|
2014-04-22 02:21:52 +03:00
|
|
|
&**variant,
|
2014-05-07 07:20:15 -04:00
|
|
|
substs,
|
2014-03-08 21:36:22 +01:00
|
|
|
|x,y,z| f(x,y,z));
|
2013-02-24 18:41:39 -08:00
|
|
|
Br(variant_cx, next_cx.llbb);
|
|
|
|
}
|
|
|
|
cx = next_cx;
|
|
|
|
}
|
2014-03-05 16:36:01 +02:00
|
|
|
_ => ccx.sess().unimpl("value from adt::trans_switch \
|
|
|
|
in iter_structural_ty")
|
2013-02-24 18:41:39 -08:00
|
|
|
}
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
_ => {
|
|
|
|
cx.sess().unimpl(format!("type in iter_structural_ty: {}",
|
|
|
|
ty_to_string(cx.tcx(), t)).as_slice())
|
|
|
|
}
|
2010-11-09 17:49:20 -08:00
|
|
|
}
|
2012-08-01 17:30:05 -07:00
|
|
|
return cx;
|
2010-11-09 17:49:20 -08:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn cast_shift_expr_rhs(cx: Block,
|
2014-01-07 08:54:58 -08:00
|
|
|
op: ast::BinOp,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef)
|
|
|
|
-> ValueRef {
|
2012-02-21 21:01:33 -08:00
|
|
|
cast_shift_rhs(op, lhs, rhs,
|
2012-06-30 16:19:07 -07:00
|
|
|
|a,b| Trunc(cx, a, b),
|
|
|
|
|a,b| ZExt(cx, a, b))
|
2012-02-21 21:01:33 -08:00
|
|
|
}
|
|
|
|
|
2013-09-02 03:45:37 +02:00
|
|
|
pub fn cast_shift_const_rhs(op: ast::BinOp,
|
2013-01-30 11:46:19 -08:00
|
|
|
lhs: ValueRef, rhs: ValueRef) -> ValueRef {
|
2013-04-09 02:07:50 -04:00
|
|
|
cast_shift_rhs(op, lhs, rhs,
|
2013-06-16 22:52:44 +12:00
|
|
|
|a, b| unsafe { llvm::LLVMConstTrunc(a, b.to_ref()) },
|
|
|
|
|a, b| unsafe { llvm::LLVMConstZExt(a, b.to_ref()) })
|
2012-02-21 21:01:33 -08:00
|
|
|
}
|
|
|
|
|
2014-12-09 13:44:51 -05:00
|
|
|
pub fn cast_shift_rhs<F, G>(op: ast::BinOp,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
|
|
|
trunc: F,
|
|
|
|
zext: G)
|
|
|
|
-> ValueRef where
|
|
|
|
F: FnOnce(ValueRef, Type) -> ValueRef,
|
|
|
|
G: FnOnce(ValueRef, Type) -> ValueRef,
|
|
|
|
{
|
2012-02-21 21:01:33 -08:00
|
|
|
// Shifts may have any size int on the rhs
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
|
|
|
if ast_util::is_shift_binop(op) {
|
2014-01-30 00:28:29 +09:00
|
|
|
let mut rhs_llty = val_ty(rhs);
|
|
|
|
let mut lhs_llty = val_ty(lhs);
|
|
|
|
if rhs_llty.kind() == Vector { rhs_llty = rhs_llty.element_type() }
|
|
|
|
if lhs_llty.kind() == Vector { lhs_llty = lhs_llty.element_type() }
|
2013-06-16 22:52:44 +12:00
|
|
|
let rhs_sz = llvm::LLVMGetIntTypeWidth(rhs_llty.to_ref());
|
|
|
|
let lhs_sz = llvm::LLVMGetIntTypeWidth(lhs_llty.to_ref());
|
2013-01-10 21:23:07 -08:00
|
|
|
if lhs_sz < rhs_sz {
|
|
|
|
trunc(rhs, lhs_llty)
|
|
|
|
} else if lhs_sz > rhs_sz {
|
|
|
|
// FIXME (#1877: If shifting by negative
|
|
|
|
// values becomes not undefined then this is wrong.
|
|
|
|
zext(rhs, lhs_llty)
|
|
|
|
} else {
|
|
|
|
rhs
|
|
|
|
}
|
2012-02-21 21:01:33 -08:00
|
|
|
} else {
|
|
|
|
rhs
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn fail_if_zero_or_overflows<'blk, 'tcx>(
|
|
|
|
cx: Block<'blk, 'tcx>,
|
|
|
|
span: Span,
|
|
|
|
divrem: ast::BinOp,
|
|
|
|
lhs: ValueRef,
|
|
|
|
rhs: ValueRef,
|
2014-09-29 22:11:30 +03:00
|
|
|
rhs_t: Ty<'tcx>)
|
2014-09-06 19:13:04 +03:00
|
|
|
-> Block<'blk, 'tcx> {
|
2014-06-05 12:23:34 -07:00
|
|
|
let (zero_text, overflow_text) = if divrem == ast::BiDiv {
|
|
|
|
("attempted to divide by zero",
|
|
|
|
"attempted to divide with overflow")
|
2012-06-14 15:32:20 -07:00
|
|
|
} else {
|
2014-06-05 12:23:34 -07:00
|
|
|
("attempted remainder with a divisor of zero",
|
|
|
|
"attempted remainder with overflow")
|
2012-06-14 15:32:20 -07:00
|
|
|
};
|
2014-10-31 10:51:16 +02:00
|
|
|
let (is_zero, is_signed) = match rhs_t.sty {
|
2014-06-05 12:23:34 -07:00
|
|
|
ty::ty_int(t) => {
|
|
|
|
let zero = C_integral(Type::int_from_ty(cx.ccx(), t), 0u64, false);
|
2014-07-07 17:58:01 -07:00
|
|
|
(ICmp(cx, llvm::IntEQ, rhs, zero), true)
|
2014-06-05 12:23:34 -07:00
|
|
|
}
|
|
|
|
ty::ty_uint(t) => {
|
|
|
|
let zero = C_integral(Type::uint_from_ty(cx.ccx(), t), 0u64, false);
|
2014-07-07 17:58:01 -07:00
|
|
|
(ICmp(cx, llvm::IntEQ, rhs, zero), false)
|
2014-06-05 12:23:34 -07:00
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
cx.sess().bug(format!("fail-if-zero on unexpected type: {}",
|
2014-06-21 03:39:03 -07:00
|
|
|
ty_to_string(cx.tcx(), rhs_t)).as_slice());
|
2014-06-05 12:23:34 -07:00
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
};
|
2014-06-05 12:23:34 -07:00
|
|
|
let bcx = with_cond(cx, is_zero, |bcx| {
|
|
|
|
controlflow::trans_fail(bcx, span, InternedString::new(zero_text))
|
|
|
|
});
|
|
|
|
|
|
|
|
// To quote LLVM's documentation for the sdiv instruction:
|
|
|
|
//
|
|
|
|
// Division by zero leads to undefined behavior. Overflow also leads
|
|
|
|
// to undefined behavior; this is a rare case, but can occur, for
|
|
|
|
// example, by doing a 32-bit division of -2147483648 by -1.
|
|
|
|
//
|
|
|
|
// In order to avoid undefined behavior, we perform runtime checks for
|
|
|
|
// signed division/remainder which would trigger overflow. For unsigned
|
|
|
|
// integers, no action beyond checking for zero need be taken.
|
|
|
|
if is_signed {
|
2014-10-31 10:51:16 +02:00
|
|
|
let (llty, min) = match rhs_t.sty {
|
2014-06-05 12:23:34 -07:00
|
|
|
ty::ty_int(t) => {
|
|
|
|
let llty = Type::int_from_ty(cx.ccx(), t);
|
|
|
|
let min = match t {
|
|
|
|
ast::TyI if llty == Type::i32(cx.ccx()) => i32::MIN as u64,
|
|
|
|
ast::TyI => i64::MIN as u64,
|
|
|
|
ast::TyI8 => i8::MIN as u64,
|
|
|
|
ast::TyI16 => i16::MIN as u64,
|
|
|
|
ast::TyI32 => i32::MIN as u64,
|
|
|
|
ast::TyI64 => i64::MIN as u64,
|
|
|
|
};
|
|
|
|
(llty, min)
|
|
|
|
}
|
|
|
|
_ => unreachable!(),
|
|
|
|
};
|
2014-07-07 17:58:01 -07:00
|
|
|
let minus_one = ICmp(bcx, llvm::IntEQ, rhs,
|
2014-06-05 12:23:34 -07:00
|
|
|
C_integral(llty, -1, false));
|
|
|
|
with_cond(bcx, minus_one, |bcx| {
|
2014-07-07 17:58:01 -07:00
|
|
|
let is_min = ICmp(bcx, llvm::IntEQ, lhs,
|
2014-06-05 12:23:34 -07:00
|
|
|
C_integral(llty, min, true));
|
|
|
|
with_cond(bcx, is_min, |bcx| {
|
|
|
|
controlflow::trans_fail(bcx, span,
|
|
|
|
InternedString::new(overflow_text))
|
|
|
|
})
|
|
|
|
})
|
|
|
|
} else {
|
|
|
|
bcx
|
|
|
|
}
|
2012-04-13 19:07:47 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn trans_external_path<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
did: ast::DefId, t: Ty<'tcx>) -> ValueRef {
|
2014-03-09 15:20:44 +02:00
|
|
|
let name = csearch::get_symbol(&ccx.sess().cstore, did);
|
2014-10-31 10:51:16 +02:00
|
|
|
match t.sty {
|
2013-09-14 10:46:29 -07:00
|
|
|
ty::ty_bare_fn(ref fn_ty) => {
|
2014-07-23 11:56:36 -07:00
|
|
|
match ccx.sess().target.target.adjust_abi(fn_ty.abi) {
|
|
|
|
Rust | RustCall => {
|
2014-05-21 15:07:48 -04:00
|
|
|
get_extern_rust_fn(ccx, t, name.as_slice(), did)
|
2013-09-10 18:42:01 -04:00
|
|
|
}
|
2014-07-23 11:56:36 -07:00
|
|
|
RustIntrinsic => {
|
2014-07-09 17:51:05 -07:00
|
|
|
ccx.sess().bug("unexpected intrinsic in trans_external_path")
|
|
|
|
}
|
2014-07-23 11:56:36 -07:00
|
|
|
_ => {
|
2014-05-14 02:07:33 -04:00
|
|
|
foreign::register_foreign_item_fn(ccx, fn_ty.abi, t,
|
2014-07-23 11:56:36 -07:00
|
|
|
name.as_slice())
|
2013-09-14 10:46:29 -07:00
|
|
|
}
|
2013-09-10 18:42:01 -04:00
|
|
|
}
|
2013-09-14 10:46:29 -07:00
|
|
|
}
|
2014-05-21 15:07:48 -04:00
|
|
|
ty::ty_closure(_) => {
|
|
|
|
get_extern_rust_fn(ccx, t, name.as_slice(), did)
|
2013-09-14 10:46:29 -07:00
|
|
|
}
|
|
|
|
_ => {
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
get_extern_const(ccx, did, t)
|
2013-09-14 10:46:29 -07:00
|
|
|
}
|
2013-09-10 18:42:01 -04:00
|
|
|
}
|
2011-02-08 11:47:53 -08:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn invoke<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
llfn: ValueRef,
|
2014-11-25 07:54:24 -05:00
|
|
|
llargs: &[ValueRef],
|
2014-09-29 22:11:30 +03:00
|
|
|
fn_ty: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
call_info: Option<NodeInfo>,
|
|
|
|
// FIXME(15064) is_lang_item is a horrible hack, please remove it
|
|
|
|
// at the soonest opportunity.
|
|
|
|
is_lang_item: bool)
|
|
|
|
-> (ValueRef, Block<'blk, 'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("invoke_");
|
2013-12-18 14:54:42 -08:00
|
|
|
if bcx.unreachable.get() {
|
2014-03-15 22:29:34 +02:00
|
|
|
return (C_null(Type::i8(bcx.ccx())), bcx);
|
2013-04-18 15:53:29 -07:00
|
|
|
}
|
2013-03-08 21:16:09 -08:00
|
|
|
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 14:20:11 +02:00
|
|
|
// FIXME(15064) Lang item methods may (in the reflect case) not have proper
|
|
|
|
// types, so doing an attribute lookup will fail.
|
|
|
|
let attributes = if is_lang_item {
|
2014-08-06 11:59:40 +02:00
|
|
|
llvm::AttrBuilder::new()
|
DST coercions and DST structs
[breaking-change]
1. The internal layout for traits has changed from (vtable, data) to (data, vtable). If you were relying on this in unsafe transmutes, you might get some very weird and apparently unrelated errors. You should not be doing this! Prefer not to do this at all, but if you must, you should use raw::TraitObject rather than hardcoding rustc's internal representation into your code.
2. The minimal type of reference-to-vec-literals (e.g., `&[1, 2, 3]`) is now a fixed size vec (e.g., `&[int, ..3]`) where it used to be an unsized vec (e.g., `&[int]`). If you want the unszied type, you must explicitly give the type (e.g., `let x: &[_] = &[1, 2, 3]`). Note in particular where multiple blocks must have the same type (e.g., if and else clauses, vec elements), the compiler will not coerce to the unsized type without a hint. E.g., `[&[1], &[1, 2]]` used to be a valid expression of type '[&[int]]'. It no longer type checks since the first element now has type `&[int, ..1]` and the second has type &[int, ..2]` which are incompatible.
3. The type of blocks (including functions) must be coercible to the expected type (used to be a subtype). Mostly this makes things more flexible and not less (in particular, in the case of coercing function bodies to the return type). However, in some rare cases, this is less flexible. TBH, I'm not exactly sure of the exact effects. I think the change causes us to resolve inferred type variables slightly earlier which might make us slightly more restrictive. Possibly it only affects blocks with unreachable code. E.g., `if ... { fail!(); "Hello" }` used to type check, it no longer does. The fix is to add a semicolon after the string.
2014-08-04 14:20:11 +02:00
|
|
|
} else {
|
|
|
|
get_fn_llvm_attributes(bcx.ccx(), fn_ty)
|
|
|
|
};
|
2014-05-21 15:07:48 -04:00
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
match bcx.opt_node_id {
|
|
|
|
None => {
|
|
|
|
debug!("invoke at ???");
|
|
|
|
}
|
|
|
|
Some(id) => {
|
2014-06-21 03:39:03 -07:00
|
|
|
debug!("invoke at {}", bcx.tcx().map.node_to_string(id));
|
2013-03-08 21:16:09 -08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-01-18 11:21:10 -08:00
|
|
|
if need_invoke(bcx) {
|
2014-12-01 09:23:40 -05:00
|
|
|
debug!("invoking {} at {}", bcx.val_to_string(llfn), bcx.llbb);
|
2014-02-20 23:22:45 +11:00
|
|
|
for &llarg in llargs.iter() {
|
2014-12-01 09:23:40 -05:00
|
|
|
debug!("arg: {}", bcx.val_to_string(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
let normal_bcx = bcx.fcx.new_temp_block("normal-return");
|
|
|
|
let landing_pad = bcx.fcx.get_landing_pad();
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
|
|
match call_info {
|
|
|
|
Some(info) => debuginfo::set_source_location(bcx.fcx, info.id, info.span),
|
|
|
|
None => debuginfo::clear_source_location(bcx.fcx)
|
|
|
|
};
|
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
let llresult = Invoke(bcx,
|
|
|
|
llfn,
|
2014-03-08 21:36:22 +01:00
|
|
|
llargs.as_slice(),
|
2013-04-18 15:53:29 -07:00
|
|
|
normal_bcx.llbb,
|
2013-12-13 17:46:10 +01:00
|
|
|
landing_pad,
|
2014-07-25 16:06:44 -07:00
|
|
|
Some(attributes));
|
2013-04-18 15:53:29 -07:00
|
|
|
return (llresult, normal_bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
} else {
|
2014-12-01 09:23:40 -05:00
|
|
|
debug!("calling {} at {}", bcx.val_to_string(llfn), bcx.llbb);
|
2014-02-20 23:22:45 +11:00
|
|
|
for &llarg in llargs.iter() {
|
2014-12-01 09:23:40 -05:00
|
|
|
debug!("arg: {}", bcx.val_to_string(llarg));
|
2013-03-08 21:16:09 -08:00
|
|
|
}
|
2013-12-13 17:46:10 +01:00
|
|
|
|
|
|
|
match call_info {
|
|
|
|
Some(info) => debuginfo::set_source_location(bcx.fcx, info.id, info.span),
|
|
|
|
None => debuginfo::clear_source_location(bcx.fcx)
|
|
|
|
};
|
|
|
|
|
2014-07-25 16:06:44 -07:00
|
|
|
let llresult = Call(bcx, llfn, llargs.as_slice(), Some(attributes));
|
2013-04-18 15:53:29 -07:00
|
|
|
return (llresult, bcx);
|
2012-03-26 13:30:56 -07:00
|
|
|
}
|
2011-09-07 11:46:53 -07:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn need_invoke(bcx: Block) -> bool {
|
2014-03-05 16:36:01 +02:00
|
|
|
if bcx.sess().no_landing_pads() {
|
2012-08-01 17:30:05 -07:00
|
|
|
return false;
|
2012-06-29 12:31:23 -07:00
|
|
|
}
|
|
|
|
|
2012-07-23 16:00:19 -07:00
|
|
|
// Avoid using invoke if we are already inside a landing pad.
|
|
|
|
if bcx.is_lpad {
|
2012-08-01 17:30:05 -07:00
|
|
|
return false;
|
2012-07-23 16:00:19 -07:00
|
|
|
}
|
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
bcx.fcx.needs_invoke()
|
2012-03-26 13:30:56 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn load_if_immediate<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
v: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("load_if_immediate");
|
2014-07-05 21:47:14 +02:00
|
|
|
if type_is_immediate(cx.ccx(), t) { return load_ty(cx, v, t); }
|
2012-08-01 17:30:05 -07:00
|
|
|
return v;
|
2010-12-03 13:03:07 -08:00
|
|
|
}
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
/// Helper for loading values from memory. Does the necessary conversion if the in-memory type
|
|
|
|
/// differs from the type used for SSA values. Also handles various special cases where the type
|
|
|
|
/// gives us better information about what we are loading.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn load_ty<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
|
|
|
|
ptr: ValueRef, t: Ty<'tcx>) -> ValueRef {
|
2014-07-05 21:47:14 +02:00
|
|
|
if type_is_zero_size(cx.ccx(), t) {
|
|
|
|
C_undef(type_of::type_of(cx.ccx(), t))
|
|
|
|
} else if ty::type_is_bool(t) {
|
2014-07-07 17:58:01 -07:00
|
|
|
Trunc(cx, LoadRangeAssert(cx, ptr, 0, 2, llvm::False), Type::i1(cx.ccx()))
|
2014-07-05 21:47:14 +02:00
|
|
|
} else if ty::type_is_char(t) {
|
2014-09-02 01:35:58 -04:00
|
|
|
// a char is a Unicode codepoint, and so takes values from 0
|
2014-07-05 21:47:14 +02:00
|
|
|
// to 0x10FFFF inclusive only.
|
2014-07-07 17:58:01 -07:00
|
|
|
LoadRangeAssert(cx, ptr, 0, 0x10FFFF + 1, llvm::False)
|
2014-07-05 21:47:14 +02:00
|
|
|
} else {
|
|
|
|
Load(cx, ptr)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-25 21:17:11 -05:00
|
|
|
/// Helper for storing values in memory. Does the necessary conversion if the in-memory type
|
|
|
|
/// differs from the type used for SSA values.
|
2014-09-13 21:09:25 +03:00
|
|
|
pub fn store_ty(cx: Block, v: ValueRef, dst: ValueRef, t: Ty) {
|
2014-07-05 21:47:14 +02:00
|
|
|
if ty::type_is_bool(t) {
|
|
|
|
Store(cx, ZExt(cx, v, Type::i8(cx.ccx())), dst);
|
|
|
|
} else {
|
|
|
|
Store(cx, v, dst);
|
|
|
|
};
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn init_local<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, local: &ast::Local)
|
|
|
|
-> Block<'blk, 'tcx> {
|
2014-10-15 02:25:34 -04:00
|
|
|
debug!("init_local(bcx={}, local.id={})", bcx.to_str(), local.id);
|
2012-08-28 15:54:45 -07:00
|
|
|
let _indenter = indenter();
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("init_local");
|
2014-01-15 14:39:08 -05:00
|
|
|
_match::store_local(bcx, local)
|
2010-10-04 15:55:12 -07:00
|
|
|
}
|
2010-09-29 17:22:07 -07:00
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn raw_block<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
|
|
|
|
is_lpad: bool,
|
|
|
|
llbb: BasicBlockRef)
|
|
|
|
-> Block<'blk, 'tcx> {
|
|
|
|
common::BlockS::new(llbb, is_lpad, None, fcx)
|
2012-02-17 13:17:40 +01:00
|
|
|
}
|
|
|
|
|
2014-12-09 13:44:51 -05:00
|
|
|
pub fn with_cond<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
|
|
|
|
val: ValueRef,
|
|
|
|
f: F)
|
|
|
|
-> Block<'blk, 'tcx> where
|
|
|
|
F: FnOnce(Block<'blk, 'tcx>) -> Block<'blk, 'tcx>,
|
|
|
|
{
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("with_cond");
|
2014-01-15 14:39:08 -05:00
|
|
|
let fcx = bcx.fcx;
|
|
|
|
let next_cx = fcx.new_temp_block("next");
|
|
|
|
let cond_cx = fcx.new_temp_block("cond");
|
2012-08-28 15:54:45 -07:00
|
|
|
CondBr(bcx, val, cond_cx.llbb, next_cx.llbb);
|
|
|
|
let after_cx = f(cond_cx);
|
2013-12-18 14:54:42 -08:00
|
|
|
if !after_cx.terminated.get() {
|
|
|
|
Br(after_cx, next_cx.llbb);
|
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
next_cx
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn call_lifetime_start(cx: Block, ptr: ValueRef) {
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
if cx.sess().opts.optimize == config::No {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let _icx = push_ctxt("lifetime_start");
|
|
|
|
let ccx = cx.ccx();
|
|
|
|
|
|
|
|
let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
|
|
|
|
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
|
|
|
|
let lifetime_start = ccx.get_intrinsic(&"llvm.lifetime.start");
|
2014-11-17 21:39:01 +13:00
|
|
|
Call(cx, lifetime_start, &[llsize, ptr], None);
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn call_lifetime_end(cx: Block, ptr: ValueRef) {
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
if cx.sess().opts.optimize == config::No {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
let _icx = push_ctxt("lifetime_end");
|
|
|
|
let ccx = cx.ccx();
|
|
|
|
|
|
|
|
let llsize = C_u64(ccx, machine::llsize_of_alloc(ccx, val_ty(ptr).element_type()));
|
|
|
|
let ptr = PointerCast(cx, ptr, Type::i8p(ccx));
|
|
|
|
let lifetime_end = ccx.get_intrinsic(&"llvm.lifetime.end");
|
2014-11-17 21:39:01 +13:00
|
|
|
Call(cx, lifetime_end, &[llsize, ptr], None);
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn call_memcpy(cx: Block, dst: ValueRef, src: ValueRef, n_bytes: ValueRef, align: u32) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("call_memcpy");
|
2012-08-28 15:54:45 -07:00
|
|
|
let ccx = cx.ccx();
|
2014-07-23 11:56:36 -07:00
|
|
|
let key = match ccx.sess().target.target.target_word_size.as_slice() {
|
|
|
|
"32" => "llvm.memcpy.p0i8.p0i8.i32",
|
|
|
|
"64" => "llvm.memcpy.p0i8.p0i8.i64",
|
|
|
|
tws => panic!("Unsupported target word size for memcpy: {}", tws),
|
2012-08-28 15:54:45 -07:00
|
|
|
};
|
2014-04-09 19:56:31 -04:00
|
|
|
let memcpy = ccx.get_intrinsic(&key);
|
2014-03-15 22:29:34 +02:00
|
|
|
let src_ptr = PointerCast(cx, src, Type::i8p(ccx));
|
|
|
|
let dst_ptr = PointerCast(cx, dst, Type::i8p(ccx));
|
2014-09-05 09:18:53 -07:00
|
|
|
let size = IntCast(cx, n_bytes, ccx.int_type());
|
2014-03-15 22:29:34 +02:00
|
|
|
let align = C_i32(ccx, align as i32);
|
2014-07-05 21:43:47 +02:00
|
|
|
let volatile = C_bool(ccx, false);
|
2014-11-17 21:39:01 +13:00
|
|
|
Call(cx, memcpy, &[dst_ptr, src_ptr, size, align, volatile], None);
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn memcpy_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
|
|
|
|
dst: ValueRef, src: ValueRef,
|
|
|
|
t: Ty<'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("memcpy_ty");
|
2012-08-28 15:54:45 -07:00
|
|
|
let ccx = bcx.ccx();
|
|
|
|
if ty::type_is_structural(t) {
|
2013-05-25 09:52:25 -04:00
|
|
|
let llty = type_of::type_of(ccx, t);
|
|
|
|
let llsz = llsize_of(ccx, llty);
|
2014-08-06 11:59:40 +02:00
|
|
|
let llalign = type_of::align_of(ccx, t);
|
2013-05-25 09:52:25 -04:00
|
|
|
call_memcpy(bcx, dst, src, llsz, llalign as u32);
|
2012-08-28 15:54:45 -07:00
|
|
|
} else {
|
2014-08-11 15:58:46 -07:00
|
|
|
store_ty(bcx, Load(bcx, src), dst, t);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn zero_mem<'blk, 'tcx>(cx: Block<'blk, 'tcx>, llptr: ValueRef, t: Ty<'tcx>) {
|
2013-12-18 14:54:42 -08:00
|
|
|
if cx.unreachable.get() { return; }
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("zero_mem");
|
2012-08-28 15:54:45 -07:00
|
|
|
let bcx = cx;
|
2014-08-06 11:59:40 +02:00
|
|
|
memzero(&B(bcx), llptr, t);
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Always use this function instead of storing a zero constant to the memory
|
|
|
|
// in question. If you store a zero constant, LLVM will drown in vreg
|
|
|
|
// allocation for large data structures, and the generated code will be
|
|
|
|
// awful. (A telltale sign of this is large quantities of
|
|
|
|
// `mov [byte ptr foo],0` in the generated code.)
|
2014-09-29 22:11:30 +03:00
|
|
|
fn memzero<'a, 'tcx>(b: &Builder<'a, 'tcx>, llptr: ValueRef, ty: Ty<'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("memzero");
|
2013-07-21 16:19:34 +02:00
|
|
|
let ccx = b.ccx;
|
2012-08-28 15:54:45 -07:00
|
|
|
|
2014-08-06 11:59:40 +02:00
|
|
|
let llty = type_of::type_of(ccx, ty);
|
|
|
|
|
2014-07-23 11:56:36 -07:00
|
|
|
let intrinsic_key = match ccx.sess().target.target.target_word_size.as_slice() {
|
|
|
|
"32" => "llvm.memset.p0i8.i32",
|
|
|
|
"64" => "llvm.memset.p0i8.i64",
|
|
|
|
tws => panic!("Unsupported target word size for memset: {}", tws),
|
2013-06-13 03:02:55 +10:00
|
|
|
};
|
2012-08-28 15:54:45 -07:00
|
|
|
|
2014-04-09 19:56:31 -04:00
|
|
|
let llintrinsicfn = ccx.get_intrinsic(&intrinsic_key);
|
2014-03-15 22:29:34 +02:00
|
|
|
let llptr = b.pointercast(llptr, Type::i8(ccx).ptr_to());
|
|
|
|
let llzeroval = C_u8(ccx, 0);
|
2014-08-06 11:59:40 +02:00
|
|
|
let size = machine::llsize_of(ccx, llty);
|
|
|
|
let align = C_i32(ccx, type_of::align_of(ccx, ty) as i32);
|
2014-07-05 21:43:47 +02:00
|
|
|
let volatile = C_bool(ccx, false);
|
2014-11-17 21:39:01 +13:00
|
|
|
b.call(llintrinsicfn, &[llptr, llzeroval, size, align, volatile], None);
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn alloc_ty<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, name: &str) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("alloc_ty");
|
2012-08-28 15:54:45 -07:00
|
|
|
let ccx = bcx.ccx();
|
2013-06-16 02:29:52 +12:00
|
|
|
let ty = type_of::type_of(ccx, t);
|
2013-09-27 22:38:08 -07:00
|
|
|
assert!(!ty::type_has_params(t));
|
2013-06-20 15:21:37 -04:00
|
|
|
let val = alloca(bcx, ty, name);
|
2012-08-28 15:54:45 -07:00
|
|
|
return val;
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn alloca(cx: Block, ty: Type, name: &str) -> ValueRef {
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
let p = alloca_no_lifetime(cx, ty, name);
|
|
|
|
call_lifetime_start(cx, p);
|
|
|
|
p
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn alloca_no_lifetime(cx: Block, ty: Type, name: &str) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("alloca");
|
2013-12-18 14:54:42 -08:00
|
|
|
if cx.unreachable.get() {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2013-07-17 03:13:23 -05:00
|
|
|
return llvm::LLVMGetUndef(ty.ptr_to().to_ref());
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
|
|
|
}
|
2013-12-13 12:27:22 +01:00
|
|
|
debuginfo::clear_source_location(cx.fcx);
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
Alloca(cx, ty, name)
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn alloca_zeroed<'blk, 'tcx>(cx: Block<'blk, 'tcx>, ty: Ty<'tcx>,
|
|
|
|
name: &str) -> ValueRef {
|
2014-08-06 11:59:40 +02:00
|
|
|
let llty = type_of::type_of(cx.ccx(), ty);
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
if cx.unreachable.get() {
|
|
|
|
unsafe {
|
2014-08-06 11:59:40 +02:00
|
|
|
return llvm::LLVMGetUndef(llty.ptr_to().to_ref());
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
}
|
2013-07-21 16:19:34 +02:00
|
|
|
}
|
2014-08-06 11:59:40 +02:00
|
|
|
let p = alloca_no_lifetime(cx, llty, name);
|
Improve usage of lifetime intrinsics in match expressions
The allocas used in match expression currently don't get good lifetime
markers, in fact they only get lifetime start markers, because their
lifetimes don't match to cleanup scopes.
While the bindings themselves are bog standard and just need a matching
pair of start and end markers, they might need them twice, once for a
guard clause and once for the match body.
The __llmatch alloca OTOH needs a single lifetime start marker, but
when there's a guard clause, it needs two end markers, because its
lifetime ends either when the guard doesn't match or after the match
body.
With these intrinsics in place, LLVM can now, for example, optimize
code like this:
````rust
enum E {
A1(int),
A2(int),
A3(int),
A4(int),
}
pub fn variants(x: E) {
match x {
A1(m) => bar(&m),
A2(m) => bar(&m),
A3(m) => bar(&m),
A4(m) => bar(&m),
}
}
````
To a single call to bar, using only a single stack slot. It still fails
to eliminate some of checks.
````gas
.Ltmp5:
.cfi_def_cfa_offset 16
movb (%rdi), %al
testb %al, %al
je .LBB3_5
movzbl %al, %eax
cmpl $1, %eax
je .LBB3_5
cmpl $2, %eax
.LBB3_5:
movq 8(%rdi), %rax
movq %rax, (%rsp)
leaq (%rsp), %rdi
callq _ZN3bar20hcb7a0d8be8e17e37daaE@PLT
popq %rax
retq
````
2014-07-23 17:39:13 +02:00
|
|
|
let b = cx.fcx.ccx.builder();
|
|
|
|
b.position_before(cx.fcx.alloca_insert_pt.get().unwrap());
|
|
|
|
memzero(&b, p, ty);
|
2013-06-16 02:29:52 +12:00
|
|
|
p
|
2012-08-28 15:54:45 -07:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn arrayalloca(cx: Block, ty: Type, v: ValueRef) -> ValueRef {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("arrayalloca");
|
2013-12-18 14:54:42 -08:00
|
|
|
if cx.unreachable.get() {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2013-06-16 22:52:44 +12:00
|
|
|
return llvm::LLVMGetUndef(ty.to_ref());
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
|
|
|
}
|
2013-12-13 12:27:22 +01:00
|
|
|
debuginfo::clear_source_location(cx.fcx);
|
Emit LLVM lifetime intrinsics to improve stack usage and codegen in general
Lifetime intrinsics help to reduce stack usage, because LLVM can apply
stack coloring to reuse the stack slots of dead allocas for new ones.
For example these functions now both use the same amount of stack, while
previous `bar()` used five times as much as `foo()`:
````rust
fn foo() {
println("{}", 5);
}
fn bar() {
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
println("{}", 5);
}
````
On top of that, LLVM can also optimize out certain operations when it
knows that memory is dead after a certain point. For example, it can
sometimes remove the zeroing used to cancel the drop glue. This is
possible when the glue drop itself was already removed because the
zeroing dominated the drop glue call. For example in:
````rust
pub fn bar(x: (Box<int>, int)) -> (Box<int>, int) {
x
}
````
With optimizations, this currently results in:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.memset.p0i8.i64(i8* %2, i8 0, i64 16, i32 8, i1 false)
ret void
}
````
But with lifetime intrinsics we get:
````llvm
define void @_ZN3bar20h330fa42547df8179niaE({ i64*, i64 }* noalias nocapture nonnull sret, { i64*, i64 }* noalias nocapture nonnull) unnamed_addr #0 {
"_ZN29_$LP$Box$LT$int$GT$$C$int$RP$39glue_drop.$x22glue_drop$x22$LP$1347$RP$17h88cf42702e5a322aE.exit":
%2 = bitcast { i64*, i64 }* %1 to i8*
%3 = bitcast { i64*, i64 }* %0 to i8*
tail call void @llvm.memcpy.p0i8.p0i8.i64(i8* %3, i8* %2, i64 16, i32 8, i1 false)
tail call void @llvm.lifetime.end(i64 16, i8* %2)
ret void
}
````
Fixes #15665
2014-05-01 19:32:07 +02:00
|
|
|
let p = ArrayAlloca(cx, ty, v);
|
|
|
|
call_lifetime_start(cx, p);
|
|
|
|
p
|
2010-09-23 13:15:51 -07:00
|
|
|
}
|
|
|
|
|
2014-07-29 12:25:06 -07:00
|
|
|
// Creates the alloca slot which holds the pointer to the slot for the final return value
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn make_return_slot_pointer<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
|
|
|
|
output_type: Ty<'tcx>) -> ValueRef {
|
2014-07-29 12:25:06 -07:00
|
|
|
let lloutputtype = type_of::type_of(fcx.ccx, output_type);
|
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
// We create an alloca to hold a pointer of type `output_type`
|
|
|
|
// which will hold the pointer to the right alloca which has the
|
|
|
|
// final ret value
|
|
|
|
if fcx.needs_ret_allocas {
|
|
|
|
// Let's create the stack slot
|
|
|
|
let slot = AllocaFcx(fcx, lloutputtype.ptr_to(), "llretslotptr");
|
|
|
|
|
|
|
|
// and if we're using an out pointer, then store that in our newly made slot
|
|
|
|
if type_of::return_uses_outptr(fcx.ccx, output_type) {
|
|
|
|
let outptr = get_param(fcx.llfn, 0);
|
|
|
|
|
|
|
|
let b = fcx.ccx.builder();
|
|
|
|
b.position_before(fcx.alloca_insert_pt.get().unwrap());
|
|
|
|
b.store(outptr, slot);
|
|
|
|
}
|
|
|
|
|
|
|
|
slot
|
|
|
|
|
|
|
|
// But if there are no nested returns, we skip the indirection and have a single
|
|
|
|
// retslot
|
|
|
|
} else {
|
|
|
|
if type_of::return_uses_outptr(fcx.ccx, output_type) {
|
|
|
|
get_param(fcx.llfn, 0)
|
|
|
|
} else {
|
|
|
|
AllocaFcx(fcx, lloutputtype, "sret_slot")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
struct CheckForNestedReturnsVisitor {
|
2014-09-12 13:10:30 +03:00
|
|
|
found: bool,
|
|
|
|
in_return: bool
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
|
2014-09-12 13:10:30 +03:00
|
|
|
impl CheckForNestedReturnsVisitor {
|
|
|
|
fn explicit() -> CheckForNestedReturnsVisitor {
|
|
|
|
CheckForNestedReturnsVisitor { found: false, in_return: false }
|
|
|
|
}
|
|
|
|
fn implicit() -> CheckForNestedReturnsVisitor {
|
|
|
|
CheckForNestedReturnsVisitor { found: false, in_return: true }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-10 01:54:36 +03:00
|
|
|
impl<'v> Visitor<'v> for CheckForNestedReturnsVisitor {
|
2014-09-12 13:10:30 +03:00
|
|
|
fn visit_expr(&mut self, e: &ast::Expr) {
|
2014-08-11 19:16:00 -07:00
|
|
|
match e.node {
|
2014-09-12 13:10:30 +03:00
|
|
|
ast::ExprRet(..) => {
|
|
|
|
if self.in_return {
|
|
|
|
self.found = true;
|
|
|
|
} else {
|
|
|
|
self.in_return = true;
|
|
|
|
visit::walk_expr(self, e);
|
|
|
|
self.in_return = false;
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
2014-09-12 13:10:30 +03:00
|
|
|
_ => visit::walk_expr(self, e)
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
2013-04-18 15:53:29 -07:00
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
fn has_nested_returns(tcx: &ty::ctxt, id: ast::NodeId) -> bool {
|
|
|
|
match tcx.map.find(id) {
|
|
|
|
Some(ast_map::NodeItem(i)) => {
|
|
|
|
match i.node {
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::ItemFn(_, _, _, _, ref blk) => {
|
2014-09-12 13:10:30 +03:00
|
|
|
let mut explicit = CheckForNestedReturnsVisitor::explicit();
|
|
|
|
let mut implicit = CheckForNestedReturnsVisitor::implicit();
|
|
|
|
visit::walk_item(&mut explicit, &*i);
|
2014-09-10 01:54:36 +03:00
|
|
|
visit::walk_expr_opt(&mut implicit, &blk.expr);
|
2014-08-11 19:16:00 -07:00
|
|
|
explicit.found || implicit.found
|
|
|
|
}
|
|
|
|
_ => tcx.sess.bug("unexpected item variant in has_nested_returns")
|
|
|
|
}
|
|
|
|
}
|
2014-08-04 13:56:56 -07:00
|
|
|
Some(ast_map::NodeTraitItem(trait_method)) => {
|
2014-08-11 19:16:00 -07:00
|
|
|
match *trait_method {
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::ProvidedMethod(ref m) => {
|
2014-08-11 19:16:00 -07:00
|
|
|
match m.node {
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::MethDecl(_, _, _, _, _, _, ref blk, _) => {
|
2014-09-12 13:10:30 +03:00
|
|
|
let mut explicit = CheckForNestedReturnsVisitor::explicit();
|
|
|
|
let mut implicit = CheckForNestedReturnsVisitor::implicit();
|
2014-09-07 20:09:06 +03:00
|
|
|
visit::walk_method_helper(&mut explicit, &**m);
|
2014-09-10 01:54:36 +03:00
|
|
|
visit::walk_expr_opt(&mut implicit, &blk.expr);
|
2014-08-11 19:16:00 -07:00
|
|
|
explicit.found || implicit.found
|
|
|
|
}
|
|
|
|
ast::MethMac(_) => tcx.sess.bug("unexpanded macro")
|
|
|
|
}
|
|
|
|
}
|
2014-08-04 13:56:56 -07:00
|
|
|
ast::RequiredMethod(_) => {
|
|
|
|
tcx.sess.bug("unexpected variant: required trait method \
|
|
|
|
in has_nested_returns")
|
|
|
|
}
|
2014-08-05 19:44:21 -07:00
|
|
|
ast::TypeTraitItem(_) => {
|
|
|
|
tcx.sess.bug("unexpected variant: type trait item in \
|
|
|
|
has_nested_returns")
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
|
|
|
}
|
2014-09-07 20:09:06 +03:00
|
|
|
Some(ast_map::NodeImplItem(ii)) => {
|
|
|
|
match *ii {
|
2014-08-04 13:56:56 -07:00
|
|
|
ast::MethodImplItem(ref m) => {
|
|
|
|
match m.node {
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::MethDecl(_, _, _, _, _, _, ref blk, _) => {
|
2014-09-12 13:10:30 +03:00
|
|
|
let mut explicit = CheckForNestedReturnsVisitor::explicit();
|
|
|
|
let mut implicit = CheckForNestedReturnsVisitor::implicit();
|
|
|
|
visit::walk_method_helper(&mut explicit, &**m);
|
2014-09-10 01:54:36 +03:00
|
|
|
visit::walk_expr_opt(&mut implicit, &blk.expr);
|
2014-08-04 13:56:56 -07:00
|
|
|
explicit.found || implicit.found
|
|
|
|
}
|
|
|
|
ast::MethMac(_) => tcx.sess.bug("unexpanded macro")
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
2014-08-05 19:44:21 -07:00
|
|
|
ast::TypeImplItem(_) => {
|
|
|
|
tcx.sess.bug("unexpected variant: type impl item in \
|
|
|
|
has_nested_returns")
|
|
|
|
}
|
2014-08-11 19:16:00 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
Some(ast_map::NodeExpr(e)) => {
|
|
|
|
match e.node {
|
2014-11-26 10:07:22 -05:00
|
|
|
ast::ExprClosure(_, _, _, ref blk) => {
|
2014-09-12 13:10:30 +03:00
|
|
|
let mut explicit = CheckForNestedReturnsVisitor::explicit();
|
|
|
|
let mut implicit = CheckForNestedReturnsVisitor::implicit();
|
2014-09-07 20:09:06 +03:00
|
|
|
visit::walk_expr(&mut explicit, e);
|
2014-09-10 01:54:36 +03:00
|
|
|
visit::walk_expr_opt(&mut implicit, &blk.expr);
|
2014-08-11 19:16:00 -07:00
|
|
|
explicit.found || implicit.found
|
|
|
|
}
|
|
|
|
_ => tcx.sess.bug("unexpected expr variant in has_nested_returns")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
Some(ast_map::NodeVariant(..)) | Some(ast_map::NodeStructCtor(..)) => false,
|
|
|
|
|
|
|
|
// glue, shims, etc
|
|
|
|
None if id == ast::DUMMY_NODE_ID => false,
|
2014-07-29 12:25:06 -07:00
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
_ => tcx.sess.bug(format!("unexpected variant in has_nested_returns: {}",
|
|
|
|
tcx.map.path_to_string(id)).as_slice())
|
|
|
|
}
|
2013-04-18 15:53:29 -07:00
|
|
|
}
|
2011-06-15 11:19:50 -07:00
|
|
|
|
2011-02-08 11:47:53 -08:00
|
|
|
// NB: must keep 4 fns in sync:
|
|
|
|
//
|
2011-09-14 14:34:50 +02:00
|
|
|
// - type_of_fn
|
2014-01-15 14:39:08 -05:00
|
|
|
// - create_datums_for_fn_args.
|
2011-02-08 11:47:53 -08:00
|
|
|
// - new_fn_ctxt
|
|
|
|
// - trans_args
|
2014-01-07 08:54:58 -08:00
|
|
|
//
|
|
|
|
// Be warned! You must call `init_function` before doing anything with the
|
|
|
|
// returned function context.
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn new_fn_ctxt<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
|
|
|
|
llfndecl: ValueRef,
|
|
|
|
id: ast::NodeId,
|
|
|
|
has_env: bool,
|
2014-09-29 22:11:30 +03:00
|
|
|
output_type: ty::FnOutput<'tcx>,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &'a Substs<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
sp: Option<Span>,
|
|
|
|
block_arena: &'a TypedArena<common::BlockS<'a, 'tcx>>)
|
|
|
|
-> FunctionContext<'a, 'tcx> {
|
2014-11-06 09:24:44 +02:00
|
|
|
common::validate_substs(param_substs);
|
2013-03-21 08:34:18 -04:00
|
|
|
|
2014-02-14 07:07:09 +02:00
|
|
|
debug!("new_fn_ctxt(path={}, id={}, param_substs={})",
|
2014-05-07 16:33:43 -07:00
|
|
|
if id == -1 {
|
2014-05-25 03:10:11 -07:00
|
|
|
"".to_string()
|
2014-05-07 16:33:43 -07:00
|
|
|
} else {
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.tcx().map.path_to_string(id).to_string()
|
2014-05-07 16:33:43 -07:00
|
|
|
},
|
2014-05-14 20:53:48 -04:00
|
|
|
id, param_substs.repr(ccx.tcx()));
|
2013-03-21 08:33:52 -04:00
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
let uses_outptr = match output_type {
|
|
|
|
ty::FnConverging(output_type) => {
|
2014-11-06 09:24:44 +02:00
|
|
|
let substd_output_type = output_type.subst(ccx.tcx(), param_substs);
|
2014-10-24 21:14:37 +02:00
|
|
|
type_of::return_uses_outptr(ccx, substd_output_type)
|
|
|
|
}
|
|
|
|
ty::FnDiverging => false
|
|
|
|
};
|
2013-08-23 18:45:02 +02:00
|
|
|
let debug_context = debuginfo::create_function_debug_context(ccx, id, param_substs, llfndecl);
|
2014-08-11 19:16:00 -07:00
|
|
|
let nested_returns = has_nested_returns(ccx.tcx(), id);
|
2013-08-16 18:46:29 +02:00
|
|
|
|
2014-01-27 14:18:36 +02:00
|
|
|
let mut fcx = FunctionContext {
|
2014-01-22 14:03:02 -05:00
|
|
|
llfn: llfndecl,
|
|
|
|
llenv: None,
|
2014-07-29 12:25:06 -07:00
|
|
|
llretslotptr: Cell::new(None),
|
2014-01-22 14:03:02 -05:00
|
|
|
alloca_insert_pt: Cell::new(None),
|
|
|
|
llreturn: Cell::new(None),
|
2014-08-11 19:16:00 -07:00
|
|
|
needs_ret_allocas: nested_returns,
|
2014-01-22 14:03:02 -05:00
|
|
|
personality: Cell::new(None),
|
|
|
|
caller_expects_out_pointer: uses_outptr,
|
2014-02-28 14:34:26 -08:00
|
|
|
lllocals: RefCell::new(NodeMap::new()),
|
|
|
|
llupvars: RefCell::new(NodeMap::new()),
|
2014-01-22 14:03:02 -05:00
|
|
|
id: id,
|
|
|
|
param_substs: param_substs,
|
|
|
|
span: sp,
|
|
|
|
block_arena: block_arena,
|
|
|
|
ccx: ccx,
|
|
|
|
debug_context: debug_context,
|
2014-08-11 16:55:13 -07:00
|
|
|
scopes: RefCell::new(Vec::new())
|
2013-01-06 11:16:14 -08:00
|
|
|
};
|
2014-01-27 14:18:36 +02:00
|
|
|
|
|
|
|
if has_env {
|
2014-05-28 22:26:56 -07:00
|
|
|
fcx.llenv = Some(get_param(fcx.llfn, fcx.env_arg_pos() as c_uint))
|
2014-01-27 14:18:36 +02:00
|
|
|
}
|
2013-07-21 16:19:34 +02:00
|
|
|
|
2014-01-07 08:54:58 -08:00
|
|
|
fcx
|
|
|
|
}
|
|
|
|
|
|
|
|
/// Performs setup on a newly created function, creating the entry scope block
|
|
|
|
/// and allocating space for the return pointer.
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn init_function<'a, 'tcx>(fcx: &'a FunctionContext<'a, 'tcx>,
|
|
|
|
skip_retptr: bool,
|
2014-09-29 22:11:30 +03:00
|
|
|
output: ty::FnOutput<'tcx>)
|
|
|
|
-> Block<'a, 'tcx> {
|
2014-01-27 14:18:36 +02:00
|
|
|
let entry_bcx = fcx.new_temp_block("entry-block");
|
2013-07-21 16:19:34 +02:00
|
|
|
|
2014-01-27 14:18:36 +02:00
|
|
|
// Use a dummy instruction as the insertion point for all allocas.
|
|
|
|
// This is later removed in FunctionContext::cleanup.
|
|
|
|
fcx.alloca_insert_pt.set(Some(unsafe {
|
2014-03-15 22:29:34 +02:00
|
|
|
Load(entry_bcx, C_null(Type::i8p(fcx.ccx)));
|
2014-01-27 14:18:36 +02:00
|
|
|
llvm::LLVMGetFirstInstruction(entry_bcx.llbb)
|
|
|
|
}));
|
2013-07-21 16:19:34 +02:00
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
if let ty::FnConverging(output_type) = output {
|
|
|
|
// This shouldn't need to recompute the return type,
|
|
|
|
// as new_fn_ctxt did it already.
|
2014-11-06 09:24:44 +02:00
|
|
|
let substd_output_type = output_type.subst(fcx.ccx.tcx(), fcx.param_substs);
|
2014-10-24 21:14:37 +02:00
|
|
|
if !return_type_is_void(fcx.ccx, substd_output_type) {
|
|
|
|
// If the function returns nil/bot, there is no real return
|
|
|
|
// value, so do not set `llretslotptr`.
|
|
|
|
if !skip_retptr || fcx.caller_expects_out_pointer {
|
|
|
|
// Otherwise, we normally allocate the llretslotptr, unless we
|
|
|
|
// have been instructed to skip it for immediate return
|
|
|
|
// values.
|
|
|
|
fcx.llretslotptr.set(Some(make_return_slot_pointer(fcx, substd_output_type)));
|
|
|
|
}
|
2013-05-21 15:25:44 -04:00
|
|
|
}
|
2013-06-20 16:42:44 +02:00
|
|
|
}
|
2014-07-05 01:52:12 +02:00
|
|
|
|
|
|
|
entry_bcx
|
2010-09-27 15:38:34 -07:00
|
|
|
}
|
|
|
|
|
2011-02-08 11:47:53 -08:00
|
|
|
// NB: must keep 4 fns in sync:
|
|
|
|
//
|
2011-09-14 14:34:50 +02:00
|
|
|
// - type_of_fn
|
2014-01-15 14:39:08 -05:00
|
|
|
// - create_datums_for_fn_args.
|
2011-02-08 11:47:53 -08:00
|
|
|
// - new_fn_ctxt
|
|
|
|
// - trans_args
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn arg_kind<'a, 'tcx>(cx: &FunctionContext<'a, 'tcx>, t: Ty<'tcx>)
|
|
|
|
-> datum::Rvalue {
|
2014-11-15 20:30:33 -05:00
|
|
|
use trans::datum::{ByRef, ByValue};
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
|
|
datum::Rvalue {
|
|
|
|
mode: if arg_is_indirect(cx.ccx, t) { ByRef } else { ByValue }
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// work around bizarre resolve errors
|
2014-09-29 22:11:30 +03:00
|
|
|
pub type RvalueDatum<'tcx> = datum::Datum<'tcx, datum::Rvalue>;
|
|
|
|
pub type LvalueDatum<'tcx> = datum::Datum<'tcx, datum::Lvalue>;
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-01-27 14:18:36 +02:00
|
|
|
// create_datums_for_fn_args: creates rvalue datums for each of the
|
2014-01-15 14:39:08 -05:00
|
|
|
// incoming function arguments. These will later be stored into
|
|
|
|
// appropriate lvalue datums.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn create_datums_for_fn_args<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
|
|
|
|
arg_tys: &[Ty<'tcx>])
|
|
|
|
-> Vec<RvalueDatum<'tcx>> {
|
2014-01-15 14:39:08 -05:00
|
|
|
let _icx = push_ctxt("create_datums_for_fn_args");
|
|
|
|
|
2014-05-28 22:26:56 -07:00
|
|
|
// Return an array wrapping the ValueRefs that we get from `get_param` for
|
|
|
|
// each argument into datums.
|
2014-01-27 14:18:36 +02:00
|
|
|
arg_tys.iter().enumerate().map(|(i, &arg_ty)| {
|
2014-05-28 22:26:56 -07:00
|
|
|
let llarg = get_param(fcx.llfn, fcx.arg_pos(i) as c_uint);
|
2014-05-28 20:36:05 +01:00
|
|
|
datum::Datum::new(llarg, arg_ty, arg_kind(fcx, arg_ty))
|
2014-01-27 14:18:36 +02:00
|
|
|
}).collect()
|
2010-12-09 17:38:17 -08:00
|
|
|
}
|
|
|
|
|
2014-05-28 22:26:56 -07:00
|
|
|
/// Creates rvalue datums for each of the incoming function arguments and
|
|
|
|
/// tuples the arguments. These will later be stored into appropriate lvalue
|
|
|
|
/// datums.
|
2014-07-29 22:08:39 -07:00
|
|
|
///
|
|
|
|
/// FIXME(pcwalton): Reduce the amount of code bloat this is responsible for.
|
2014-09-29 22:11:30 +03:00
|
|
|
fn create_datums_for_fn_args_under_call_abi<'blk, 'tcx>(
|
|
|
|
mut bcx: Block<'blk, 'tcx>,
|
2014-05-28 22:26:56 -07:00
|
|
|
arg_scope: cleanup::CustomScopeIndex,
|
2014-09-29 22:11:30 +03:00
|
|
|
arg_tys: &[Ty<'tcx>])
|
|
|
|
-> Vec<RvalueDatum<'tcx>> {
|
2014-05-28 22:26:56 -07:00
|
|
|
let mut result = Vec::new();
|
|
|
|
for (i, &arg_ty) in arg_tys.iter().enumerate() {
|
|
|
|
if i < arg_tys.len() - 1 {
|
|
|
|
// Regular argument.
|
|
|
|
let llarg = get_param(bcx.fcx.llfn, bcx.fcx.arg_pos(i) as c_uint);
|
|
|
|
result.push(datum::Datum::new(llarg, arg_ty, arg_kind(bcx.fcx,
|
|
|
|
arg_ty)));
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
// This is the last argument. Tuple it.
|
2014-10-31 10:51:16 +02:00
|
|
|
match arg_ty.sty {
|
2014-05-28 22:26:56 -07:00
|
|
|
ty::ty_tup(ref tupled_arg_tys) => {
|
|
|
|
let tuple_args_scope_id = cleanup::CustomScope(arg_scope);
|
|
|
|
let tuple =
|
|
|
|
unpack_datum!(bcx,
|
|
|
|
datum::lvalue_scratch_datum(bcx,
|
|
|
|
arg_ty,
|
|
|
|
"tupled_args",
|
|
|
|
false,
|
|
|
|
tuple_args_scope_id,
|
|
|
|
(),
|
|
|
|
|(),
|
|
|
|
mut bcx,
|
|
|
|
llval| {
|
|
|
|
for (j, &tupled_arg_ty) in
|
|
|
|
tupled_arg_tys.iter().enumerate() {
|
|
|
|
let llarg =
|
|
|
|
get_param(bcx.fcx.llfn,
|
|
|
|
bcx.fcx.arg_pos(i + j) as c_uint);
|
2014-11-17 21:39:01 +13:00
|
|
|
let lldest = GEPi(bcx, llval, &[0, j]);
|
2014-05-28 22:26:56 -07:00
|
|
|
let datum = datum::Datum::new(
|
|
|
|
llarg,
|
|
|
|
tupled_arg_ty,
|
|
|
|
arg_kind(bcx.fcx, tupled_arg_ty));
|
|
|
|
bcx = datum.store_to(bcx, lldest);
|
|
|
|
}
|
|
|
|
bcx
|
|
|
|
}));
|
|
|
|
let tuple = unpack_datum!(bcx,
|
|
|
|
tuple.to_expr_datum()
|
|
|
|
.to_rvalue_datum(bcx,
|
|
|
|
"argtuple"));
|
|
|
|
result.push(tuple);
|
|
|
|
}
|
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.bug("last argument of a function with \
|
|
|
|
`rust-call` ABI isn't a tuple?!")
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
result
|
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
fn copy_args_to_allocas<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
|
|
|
|
arg_scope: cleanup::CustomScopeIndex,
|
|
|
|
bcx: Block<'blk, 'tcx>,
|
|
|
|
args: &[ast::Arg],
|
2014-09-29 22:11:30 +03:00
|
|
|
arg_datums: Vec<RvalueDatum<'tcx>>)
|
2014-09-06 19:13:04 +03:00
|
|
|
-> Block<'blk, 'tcx> {
|
2014-01-15 14:39:08 -05:00
|
|
|
debug!("copy_args_to_allocas");
|
2013-08-21 09:27:48 -04:00
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("copy_args_to_allocas");
|
2012-09-20 12:29:15 -07:00
|
|
|
let mut bcx = bcx;
|
2012-08-16 16:44:22 -07:00
|
|
|
|
2014-01-15 14:39:08 -05:00
|
|
|
let arg_scope_id = cleanup::CustomScope(arg_scope);
|
2012-08-16 16:44:22 -07:00
|
|
|
|
2014-09-14 20:27:36 -07:00
|
|
|
for (i, arg_datum) in arg_datums.into_iter().enumerate() {
|
2012-09-20 12:29:15 -07:00
|
|
|
// For certain mode/type combinations, the raw llarg values are passed
|
|
|
|
// by value. However, within the fn body itself, we want to always
|
2012-09-26 19:40:05 -07:00
|
|
|
// have all locals and arguments be by-ref so that we can cancel the
|
2012-09-20 12:29:15 -07:00
|
|
|
// cleanup and for better interaction with LLVM's debug info. So, if
|
|
|
|
// the argument would be passed by value, we store it into an alloca.
|
|
|
|
// This alloca should be optimized away by LLVM's mem-to-reg pass in
|
|
|
|
// the event it's not truly needed.
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-09-07 20:09:06 +03:00
|
|
|
bcx = _match::store_arg(bcx, &*args[i].pat, arg_datum, arg_scope_id);
|
2012-09-20 12:29:15 -07:00
|
|
|
|
2014-03-05 16:36:01 +02:00
|
|
|
if fcx.ccx.sess().opts.debuginfo == FullDebugInfo {
|
2014-01-11 16:39:32 +02:00
|
|
|
debuginfo::create_argument_metadata(bcx, &args[i]);
|
2011-12-06 00:05:22 -05:00
|
|
|
}
|
2010-11-26 17:47:27 -08:00
|
|
|
}
|
2012-09-20 12:29:15 -07:00
|
|
|
|
2014-01-27 14:18:36 +02:00
|
|
|
bcx
|
2010-11-26 17:47:27 -08:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
fn copy_unboxed_closure_args_to_allocas<'blk, 'tcx>(
|
|
|
|
mut bcx: Block<'blk, 'tcx>,
|
2014-05-28 22:26:56 -07:00
|
|
|
arg_scope: cleanup::CustomScopeIndex,
|
|
|
|
args: &[ast::Arg],
|
2014-09-29 22:11:30 +03:00
|
|
|
arg_datums: Vec<RvalueDatum<'tcx>>,
|
|
|
|
monomorphized_arg_types: &[Ty<'tcx>])
|
2014-09-06 19:13:04 +03:00
|
|
|
-> Block<'blk, 'tcx> {
|
2014-05-28 22:26:56 -07:00
|
|
|
let _icx = push_ctxt("copy_unboxed_closure_args_to_allocas");
|
|
|
|
let arg_scope_id = cleanup::CustomScope(arg_scope);
|
|
|
|
|
|
|
|
assert_eq!(arg_datums.len(), 1);
|
|
|
|
|
2014-09-14 20:27:36 -07:00
|
|
|
let arg_datum = arg_datums.into_iter().next().unwrap();
|
2014-05-28 22:26:56 -07:00
|
|
|
|
|
|
|
// Untuple the rest of the arguments.
|
|
|
|
let tuple_datum =
|
|
|
|
unpack_datum!(bcx,
|
|
|
|
arg_datum.to_lvalue_datum_in_scope(bcx,
|
|
|
|
"argtuple",
|
|
|
|
arg_scope_id));
|
2014-10-31 10:51:16 +02:00
|
|
|
let untupled_arg_types = match monomorphized_arg_types[0].sty {
|
2014-05-28 22:26:56 -07:00
|
|
|
ty::ty_tup(ref types) => types.as_slice(),
|
|
|
|
_ => {
|
|
|
|
bcx.tcx().sess.span_bug(args[0].pat.span,
|
|
|
|
"first arg to `rust-call` ABI function \
|
|
|
|
wasn't a tuple?!")
|
|
|
|
}
|
|
|
|
};
|
|
|
|
for j in range(0, args.len()) {
|
|
|
|
let tuple_element_type = untupled_arg_types[j];
|
|
|
|
let tuple_element_datum =
|
2014-08-06 11:59:40 +02:00
|
|
|
tuple_datum.get_element(bcx,
|
|
|
|
tuple_element_type,
|
2014-11-17 21:39:01 +13:00
|
|
|
|llval| GEPi(bcx, llval, &[0, j]));
|
2014-05-28 22:26:56 -07:00
|
|
|
let tuple_element_datum = tuple_element_datum.to_expr_datum();
|
|
|
|
let tuple_element_datum =
|
|
|
|
unpack_datum!(bcx,
|
|
|
|
tuple_element_datum.to_rvalue_datum(bcx,
|
|
|
|
"arg"));
|
|
|
|
bcx = _match::store_arg(bcx,
|
2014-09-07 20:09:06 +03:00
|
|
|
&*args[j].pat,
|
2014-05-28 22:26:56 -07:00
|
|
|
tuple_element_datum,
|
|
|
|
arg_scope_id);
|
|
|
|
|
|
|
|
if bcx.fcx.ccx.sess().opts.debuginfo == FullDebugInfo {
|
|
|
|
debuginfo::create_argument_metadata(bcx, &args[j]);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
bcx
|
|
|
|
}
|
|
|
|
|
2012-03-12 10:05:15 +01:00
|
|
|
// Ties up the llstaticallocas -> llloadenv -> lltop edges,
|
|
|
|
// and builds the return block.
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn finish_fn<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>,
|
|
|
|
last_bcx: Block<'blk, 'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
retty: ty::FnOutput<'tcx>) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("finish_fn");
|
2013-07-13 03:25:46 +02:00
|
|
|
|
2013-12-20 20:41:54 -08:00
|
|
|
let ret_cx = match fcx.llreturn.get() {
|
2013-07-13 03:25:46 +02:00
|
|
|
Some(llreturn) => {
|
2013-12-18 14:54:42 -08:00
|
|
|
if !last_bcx.terminated.get() {
|
2013-07-13 03:25:46 +02:00
|
|
|
Br(last_bcx, llreturn);
|
|
|
|
}
|
|
|
|
raw_block(fcx, false, llreturn)
|
|
|
|
}
|
|
|
|
None => last_bcx
|
|
|
|
};
|
2014-10-24 21:14:37 +02:00
|
|
|
|
|
|
|
// This shouldn't need to recompute the return type,
|
|
|
|
// as new_fn_ctxt did it already.
|
2014-11-06 09:24:44 +02:00
|
|
|
let substd_retty = retty.subst(fcx.ccx.tcx(), fcx.param_substs);
|
2014-07-05 21:47:14 +02:00
|
|
|
build_return_block(fcx, ret_cx, substd_retty);
|
2014-10-24 21:14:37 +02:00
|
|
|
|
2013-12-13 12:27:22 +01:00
|
|
|
debuginfo::clear_source_location(fcx);
|
2013-07-21 16:19:34 +02:00
|
|
|
fcx.cleanup();
|
2013-04-18 15:53:29 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
// Builds the return block for a function.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn build_return_block<'blk, 'tcx>(fcx: &FunctionContext<'blk, 'tcx>,
|
|
|
|
ret_cx: Block<'blk, 'tcx>,
|
|
|
|
retty: ty::FnOutput<'tcx>) {
|
2014-08-11 19:16:00 -07:00
|
|
|
if fcx.llretslotptr.get().is_none() ||
|
|
|
|
(!fcx.needs_ret_allocas && fcx.caller_expects_out_pointer) {
|
2013-07-28 16:40:35 +02:00
|
|
|
return RetVoid(ret_cx);
|
2013-04-18 15:53:29 -07:00
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
let retslot = if fcx.needs_ret_allocas {
|
|
|
|
Load(ret_cx, fcx.llretslotptr.get().unwrap())
|
|
|
|
} else {
|
|
|
|
fcx.llretslotptr.get().unwrap()
|
|
|
|
};
|
2014-07-29 12:25:06 -07:00
|
|
|
let retptr = Value(retslot);
|
2014-08-11 15:58:46 -07:00
|
|
|
match retptr.get_dominating_store(ret_cx) {
|
2013-07-28 16:40:35 +02:00
|
|
|
// If there's only a single store to the ret slot, we can directly return
|
|
|
|
// the value that was stored and omit the store and the alloca
|
|
|
|
Some(s) => {
|
2013-11-01 18:06:31 -07:00
|
|
|
let retval = s.get_operand(0).unwrap().get();
|
2013-07-28 16:40:35 +02:00
|
|
|
s.erase_from_parent();
|
|
|
|
|
|
|
|
if retptr.has_no_uses() {
|
|
|
|
retptr.erase_from_parent();
|
|
|
|
}
|
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
let retval = if retty == ty::FnConverging(ty::mk_bool()) {
|
2014-07-05 21:47:14 +02:00
|
|
|
Trunc(ret_cx, retval, Type::i1(fcx.ccx))
|
|
|
|
} else {
|
|
|
|
retval
|
2014-08-11 15:58:46 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
if fcx.caller_expects_out_pointer {
|
2014-10-24 21:14:37 +02:00
|
|
|
if let ty::FnConverging(retty) = retty {
|
|
|
|
store_ty(ret_cx, retval, get_param(fcx.llfn, 0), retty);
|
|
|
|
}
|
|
|
|
RetVoid(ret_cx)
|
2014-08-11 15:58:46 -07:00
|
|
|
} else {
|
2014-10-24 21:14:37 +02:00
|
|
|
Ret(ret_cx, retval)
|
2014-08-11 15:58:46 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
// Otherwise, copy the return value to the ret slot
|
2014-10-24 21:14:37 +02:00
|
|
|
None => match retty {
|
|
|
|
ty::FnConverging(retty) => {
|
|
|
|
if fcx.caller_expects_out_pointer {
|
|
|
|
memcpy_ty(ret_cx, get_param(fcx.llfn, 0), retslot, retty);
|
|
|
|
RetVoid(ret_cx)
|
|
|
|
} else {
|
|
|
|
Ret(ret_cx, load_ty(ret_cx, retslot, retty))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ty::FnDiverging => {
|
|
|
|
if fcx.caller_expects_out_pointer {
|
|
|
|
RetVoid(ret_cx)
|
|
|
|
} else {
|
|
|
|
Ret(ret_cx, C_undef(Type::nil(fcx.ccx)))
|
|
|
|
}
|
2014-07-05 21:47:14 +02:00
|
|
|
}
|
2013-07-28 16:40:35 +02:00
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
}
|
2012-02-13 16:06:56 -08:00
|
|
|
}
|
|
|
|
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
#[deriving(Clone, Eq, PartialEq)]
|
|
|
|
pub enum IsUnboxedClosureFlag {
|
|
|
|
NotUnboxedClosure,
|
|
|
|
IsUnboxedClosure,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Copy for IsUnboxedClosureFlag {}
|
|
|
|
|
2011-06-29 19:50:50 -07:00
|
|
|
// trans_closure: Builds an LLVM function out of a source function.
|
|
|
|
// If the function closes over its environment a closure will be
|
|
|
|
// returned.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn trans_closure<'a, 'b, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
decl: &ast::FnDecl,
|
|
|
|
body: &ast::Block,
|
|
|
|
llfndecl: ValueRef,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &Substs<'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
fn_ast_id: ast::NodeId,
|
|
|
|
_attributes: &[ast::Attribute],
|
|
|
|
output_type: ty::FnOutput<'tcx>,
|
|
|
|
abi: Abi,
|
|
|
|
closure_env: closure::ClosureEnv<'b, 'tcx>) {
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.stats().n_closures.set(ccx.stats().n_closures.get() + 1);
|
2013-12-22 13:50:04 -08:00
|
|
|
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_closure");
|
2011-05-24 13:47:27 -04:00
|
|
|
set_uwtable(llfndecl);
|
2010-11-26 17:47:27 -08:00
|
|
|
|
2013-10-21 13:08:31 -07:00
|
|
|
debug!("trans_closure(..., param_substs={})",
|
2014-05-14 20:53:48 -04:00
|
|
|
param_substs.repr(ccx.tcx()));
|
Cleanup substitutions and treatment of generics around traits in a number of ways.
- In a TraitRef, use the self type consistently to refer to the Self type:
- trait ref in `impl Trait<A,B,C> for S` has a self type of `S`.
- trait ref in `A:Trait` has the self type `A`
- trait ref associated with a trait decl has self type `Self`
- trait ref associated with a supertype has self type `Self`
- trait ref in an object type `@Trait` has no self type
- Rewrite `each_bound_traits_and_supertraits` to perform
substitutions as it goes, and thus yield a series of trait refs
that are always in the same 'namespace' as the type parameter
bound given as input. Before, we left this to the caller, but
this doesn't work because the caller lacks adequare information
to perform the type substitutions correctly.
- For provided methods, substitute the generics involved in the provided
method correctly.
- Introduce TypeParameterDef, which tracks the bounds declared on a type
parameter and brings them together with the def_id and (in the future)
other information (maybe even the parameter's name!).
- Introduce Subst trait, which helps to cleanup a lot of the
repetitive code involved with doing type substitution.
- Introduce Repr trait, which makes debug printouts far more convenient.
Fixes #4183. Needed for #5656.
2013-04-08 22:54:49 -07:00
|
|
|
|
2014-01-22 14:03:02 -05:00
|
|
|
let arena = TypedArena::new();
|
|
|
|
let fcx = new_fn_ctxt(ccx,
|
|
|
|
llfndecl,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
fn_ast_id,
|
2014-09-29 22:11:30 +03:00
|
|
|
closure_env.kind != closure::NotClosure,
|
2014-01-22 14:03:02 -05:00
|
|
|
output_type,
|
2014-05-14 20:53:48 -04:00
|
|
|
param_substs,
|
2014-01-22 14:03:02 -05:00
|
|
|
Some(body.span),
|
2014-08-11 16:55:13 -07:00
|
|
|
&arena);
|
2014-07-05 01:52:12 +02:00
|
|
|
let mut bcx = init_function(&fcx, false, output_type);
|
2014-01-15 14:39:08 -05:00
|
|
|
|
|
|
|
// cleanup scope for the incoming arguments
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
let fn_cleanup_debug_loc =
|
2014-11-27 13:54:01 +01:00
|
|
|
debuginfo::get_cleanup_debug_loc_for_ast_node(ccx, fn_ast_id, body.span, true);
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
let arg_scope = fcx.push_custom_cleanup_scope_with_debug_loc(fn_cleanup_debug_loc);
|
2013-08-19 18:23:43 +02:00
|
|
|
|
2013-07-16 20:08:35 +02:00
|
|
|
let block_ty = node_id_type(bcx, body.id);
|
2011-08-19 14:34:45 +02:00
|
|
|
|
2013-08-22 17:00:12 -07:00
|
|
|
// Set up arguments to the function.
|
2014-05-28 22:26:56 -07:00
|
|
|
let monomorphized_arg_types =
|
2014-10-31 16:20:25 +02:00
|
|
|
decl.inputs.iter()
|
|
|
|
.map(|arg| node_id_type(bcx, arg.id))
|
|
|
|
.collect::<Vec<_>>();
|
2014-09-29 22:11:30 +03:00
|
|
|
let monomorphized_arg_types = match closure_env.kind {
|
|
|
|
closure::NotClosure | closure::BoxedClosure(..) => {
|
|
|
|
monomorphized_arg_types
|
|
|
|
}
|
2014-10-31 16:20:25 +02:00
|
|
|
|
|
|
|
// Tuple up closure argument types for the "rust-call" ABI.
|
2014-09-29 22:11:30 +03:00
|
|
|
closure::UnboxedClosure(..) => {
|
|
|
|
vec![ty::mk_tup(ccx.tcx(), monomorphized_arg_types)]
|
|
|
|
}
|
2014-10-31 16:20:25 +02:00
|
|
|
};
|
2014-05-28 22:26:56 -07:00
|
|
|
for monomorphized_arg_type in monomorphized_arg_types.iter() {
|
|
|
|
debug!("trans_closure: monomorphized_arg_type: {}",
|
|
|
|
ty_to_string(ccx.tcx(), *monomorphized_arg_type));
|
|
|
|
}
|
|
|
|
debug!("trans_closure: function lltype: {}",
|
2014-09-05 09:18:53 -07:00
|
|
|
bcx.fcx.ccx.tn().val_to_string(bcx.fcx.llfn));
|
2014-05-28 22:26:56 -07:00
|
|
|
|
|
|
|
let arg_datums = if abi != RustCall {
|
|
|
|
create_datums_for_fn_args(&fcx,
|
|
|
|
monomorphized_arg_types.as_slice())
|
|
|
|
} else {
|
|
|
|
create_datums_for_fn_args_under_call_abi(
|
|
|
|
bcx,
|
|
|
|
arg_scope,
|
|
|
|
monomorphized_arg_types.as_slice())
|
|
|
|
};
|
2013-08-22 17:00:12 -07:00
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
bcx = match closure_env.kind {
|
|
|
|
closure::NotClosure | closure::BoxedClosure(..) => {
|
2014-05-28 22:26:56 -07:00
|
|
|
copy_args_to_allocas(&fcx,
|
|
|
|
arg_scope,
|
|
|
|
bcx,
|
|
|
|
decl.inputs.as_slice(),
|
|
|
|
arg_datums)
|
|
|
|
}
|
2014-09-29 22:11:30 +03:00
|
|
|
closure::UnboxedClosure(..) => {
|
2014-05-28 22:26:56 -07:00
|
|
|
copy_unboxed_closure_args_to_allocas(
|
|
|
|
bcx,
|
|
|
|
arg_scope,
|
|
|
|
decl.inputs.as_slice(),
|
|
|
|
arg_datums,
|
|
|
|
monomorphized_arg_types.as_slice())
|
|
|
|
}
|
|
|
|
};
|
2011-06-28 18:54:05 -07:00
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
bcx = closure_env.load(bcx, cleanup::CustomScope(arg_scope));
|
2011-06-28 18:54:05 -07:00
|
|
|
|
2013-08-19 18:23:43 +02:00
|
|
|
// Up until here, IR instructions for this function have explicitly not been annotated with
|
|
|
|
// source code location, so we don't step into call setup code. From here on, source location
|
|
|
|
// emitting should be enabled.
|
2014-01-07 08:54:58 -08:00
|
|
|
debuginfo::start_emitting_source_locations(&fcx);
|
2013-08-19 18:23:43 +02:00
|
|
|
|
2014-07-29 12:25:06 -07:00
|
|
|
let dest = match fcx.llretslotptr.get() {
|
2014-10-24 21:14:37 +02:00
|
|
|
Some(_) => expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(block_ty), "iret_slot")),
|
2014-02-07 21:00:31 +01:00
|
|
|
None => {
|
2014-07-29 12:25:06 -07:00
|
|
|
assert!(type_is_zero_size(bcx.ccx(), block_ty));
|
2014-02-07 21:00:31 +01:00
|
|
|
expr::Ignore
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2011-08-16 21:34:52 +02:00
|
|
|
// This call to trans_block is the place where we bridge between
|
|
|
|
// translation calls that don't have a return value (trans_crate,
|
2012-01-13 10:58:31 +01:00
|
|
|
// trans_mod, trans_item, et cetera) and those that do
|
2011-08-16 21:34:52 +02:00
|
|
|
// (trans_block, trans_expr, et cetera).
|
2014-02-07 21:00:31 +01:00
|
|
|
bcx = controlflow::trans_block(bcx, body, dest);
|
2012-10-08 11:49:01 -07:00
|
|
|
|
2014-07-29 12:25:06 -07:00
|
|
|
match dest {
|
2014-08-11 19:16:00 -07:00
|
|
|
expr::SaveIn(slot) if fcx.needs_ret_allocas => {
|
2014-07-29 12:25:06 -07:00
|
|
|
Store(bcx, slot, fcx.llretslotptr.get().unwrap());
|
|
|
|
}
|
|
|
|
_ => {}
|
|
|
|
}
|
|
|
|
|
2013-12-20 20:41:54 -08:00
|
|
|
match fcx.llreturn.get() {
|
2014-01-15 14:39:08 -05:00
|
|
|
Some(_) => {
|
|
|
|
Br(bcx, fcx.return_exit_block());
|
|
|
|
fcx.pop_custom_cleanup_scope(arg_scope);
|
|
|
|
}
|
|
|
|
None => {
|
|
|
|
// Microoptimization writ large: avoid creating a separate
|
|
|
|
// llreturn basic block
|
|
|
|
bcx = fcx.pop_and_trans_custom_cleanup_scope(bcx, arg_scope);
|
|
|
|
}
|
2013-07-13 03:25:46 +02:00
|
|
|
};
|
2013-06-11 12:41:09 -07:00
|
|
|
|
2013-06-14 11:59:49 -07:00
|
|
|
// Put return block after all other blocks.
|
|
|
|
// This somewhat improves single-stepping experience in debugger.
|
|
|
|
unsafe {
|
2013-12-20 20:41:54 -08:00
|
|
|
let llreturn = fcx.llreturn.get();
|
|
|
|
for &llreturn in llreturn.iter() {
|
2013-07-13 03:25:46 +02:00
|
|
|
llvm::LLVMMoveBasicBlockAfter(llreturn, bcx.llbb);
|
|
|
|
}
|
2013-06-14 11:59:49 -07:00
|
|
|
}
|
2011-07-15 11:38:16 -07:00
|
|
|
|
2011-06-29 17:29:24 -07:00
|
|
|
// Insert the mandatory first few basic blocks before lltop.
|
2014-07-05 21:47:14 +02:00
|
|
|
finish_fn(&fcx, bcx, output_type);
|
2011-06-29 19:50:50 -07:00
|
|
|
}
|
|
|
|
|
2011-07-19 11:56:46 -07:00
|
|
|
// trans_fn: creates an LLVM function corresponding to a source language
|
|
|
|
// function.
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn trans_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
decl: &ast::FnDecl,
|
|
|
|
body: &ast::Block,
|
|
|
|
llfndecl: ValueRef,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &Substs<'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
id: ast::NodeId,
|
|
|
|
attrs: &[ast::Attribute]) {
|
2014-09-05 09:18:53 -07:00
|
|
|
let _s = StatRecorder::new(ccx, ccx.tcx().map.path_to_string(id).to_string());
|
2014-05-14 20:53:48 -04:00
|
|
|
debug!("trans_fn(param_substs={})", param_substs.repr(ccx.tcx()));
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_fn");
|
2014-05-28 22:26:56 -07:00
|
|
|
let fn_ty = ty::node_id_to_type(ccx.tcx(), id);
|
|
|
|
let output_type = ty::ty_fn_ret(fn_ty);
|
|
|
|
let abi = ty::ty_fn_abi(fn_ty);
|
|
|
|
trans_closure(ccx,
|
|
|
|
decl,
|
|
|
|
body,
|
|
|
|
llfndecl,
|
|
|
|
param_substs,
|
|
|
|
id,
|
|
|
|
attrs,
|
|
|
|
output_type,
|
|
|
|
abi,
|
2014-09-29 22:11:30 +03:00
|
|
|
closure::ClosureEnv::new(&[], closure::NotClosure));
|
|
|
|
}
|
|
|
|
|
|
|
|
pub fn trans_enum_variant<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
_enum_id: ast::NodeId,
|
|
|
|
variant: &ast::Variant,
|
|
|
|
_args: &[ast::VariantArg],
|
|
|
|
disr: ty::Disr,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &Substs<'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
llfndecl: ValueRef) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_enum_variant");
|
2013-04-18 15:53:29 -07:00
|
|
|
|
2013-06-20 15:23:52 -04:00
|
|
|
trans_enum_variant_or_tuple_like_struct(
|
|
|
|
ccx,
|
|
|
|
variant.node.id,
|
|
|
|
disr,
|
|
|
|
param_substs,
|
|
|
|
llfndecl);
|
2010-12-01 19:03:47 -08:00
|
|
|
}
|
|
|
|
|
2014-09-06 19:13:04 +03:00
|
|
|
pub fn trans_named_tuple_constructor<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
ctor_ty: Ty<'tcx>,
|
2014-09-06 19:13:04 +03:00
|
|
|
disr: ty::Disr,
|
|
|
|
args: callee::CallArgs,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
dest: expr::Dest,
|
|
|
|
call_info: Option<NodeInfo>)
|
|
|
|
-> Result<'blk, 'tcx> {
|
2014-07-09 23:42:08 -07:00
|
|
|
|
|
|
|
let ccx = bcx.fcx.ccx;
|
2014-09-05 09:18:53 -07:00
|
|
|
let tcx = ccx.tcx();
|
2014-07-09 23:42:08 -07:00
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
let result_ty = match ctor_ty.sty {
|
2014-10-24 21:14:37 +02:00
|
|
|
ty::ty_bare_fn(ref bft) => bft.sig.output.unwrap(),
|
2014-07-09 23:42:08 -07:00
|
|
|
_ => ccx.sess().bug(
|
|
|
|
format!("trans_enum_variant_constructor: \
|
|
|
|
unexpected ctor return type {}",
|
|
|
|
ctor_ty.repr(tcx)).as_slice())
|
|
|
|
};
|
|
|
|
|
|
|
|
// Get location to store the result. If the user does not care about
|
|
|
|
// the result, just make a stack slot
|
|
|
|
let llresult = match dest {
|
|
|
|
expr::SaveIn(d) => d,
|
|
|
|
expr::Ignore => {
|
|
|
|
if !type_is_zero_size(ccx, result_ty) {
|
|
|
|
alloc_ty(bcx, result_ty, "constructor_result")
|
|
|
|
} else {
|
|
|
|
C_undef(type_of::type_of(ccx, result_ty))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
if !type_is_zero_size(ccx, result_ty) {
|
|
|
|
match args {
|
|
|
|
callee::ArgExprs(exprs) => {
|
2014-09-07 20:09:06 +03:00
|
|
|
let fields = exprs.iter().map(|x| &**x).enumerate().collect::<Vec<_>>();
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
bcx = expr::trans_adt(bcx,
|
|
|
|
result_ty,
|
|
|
|
disr,
|
|
|
|
fields.as_slice(),
|
|
|
|
None,
|
|
|
|
expr::SaveIn(llresult),
|
|
|
|
call_info);
|
2014-07-09 23:42:08 -07:00
|
|
|
}
|
|
|
|
_ => ccx.sess().bug("expected expr as arguments for variant/struct tuple constructor")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// If the caller doesn't care about the result
|
|
|
|
// drop the temporary we made
|
|
|
|
let bcx = match dest {
|
|
|
|
expr::SaveIn(_) => bcx,
|
debuginfo: Make sure that all calls to drop glue are associated with debug locations.
This commit makes rustc emit debug locations for all call
and invoke statements in LLVM IR, if they are contained
within a function that debuginfo is enabled for. This is
important because LLVM does not handle the case where a
function body containing debuginfo is inlined into another
function with debuginfo, but the inlined call statement
does not have a debug location. In this case, LLVM will
not know where (in terms of source code coordinates) the
function was inlined to and we end up with some statements
still linked to the source locations in there original,
non-inlined function without any indication that they are
indeed an inline-copy. Later, when generating DWARF from
the IR, LLVM will interpret this as corrupt IR and abort.
Unfortunately, the undesirable case described above can
still occur when using LTO. If there is a crate compiled
without debuginfo calling into a crate compiled with
debuginfo, we again end up with the conditions triggering
the error. This is why some LTO tests still fail with the
dreaded assertion, if the standard library was built with
debuginfo enabled.
That is, `RUSTFLAGS_STAGE2=-g make rustc-stage2` will
succeed but `RUSTFLAGS_STAGE2=-g make check` will still
fail after this commit has been merged. This is a problem
that has to be dealt with separately.
Fixes #17201
Fixes #15816
Fixes #15156
2014-09-24 08:49:38 +02:00
|
|
|
expr::Ignore => {
|
|
|
|
glue::drop_ty(bcx, llresult, result_ty, call_info)
|
|
|
|
}
|
2014-07-09 23:42:08 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
Result::new(bcx, llresult)
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn trans_tuple_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
_fields: &[ast::StructField],
|
|
|
|
ctor_id: ast::NodeId,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &Substs<'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
llfndecl: ValueRef) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_tuple_struct");
|
2012-10-24 14:36:00 -07:00
|
|
|
|
2013-06-20 15:23:52 -04:00
|
|
|
trans_enum_variant_or_tuple_like_struct(
|
|
|
|
ccx,
|
|
|
|
ctor_id,
|
|
|
|
0,
|
|
|
|
param_substs,
|
|
|
|
llfndecl);
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
fn trans_enum_variant_or_tuple_like_struct<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
ctor_id: ast::NodeId,
|
|
|
|
disr: ty::Disr,
|
2014-11-06 09:24:44 +02:00
|
|
|
param_substs: &Substs<'tcx>,
|
2014-09-29 22:11:30 +03:00
|
|
|
llfndecl: ValueRef) {
|
2014-05-07 07:20:15 -04:00
|
|
|
let ctor_ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
|
2014-11-06 09:24:44 +02:00
|
|
|
let ctor_ty = ctor_ty.subst(ccx.tcx(), param_substs);
|
2013-06-20 15:23:52 -04:00
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
let result_ty = match ctor_ty.sty {
|
2013-04-18 15:53:29 -07:00
|
|
|
ty::ty_bare_fn(ref bft) => bft.sig.output,
|
2014-03-05 16:36:01 +02:00
|
|
|
_ => ccx.sess().bug(
|
2013-09-27 22:38:08 -07:00
|
|
|
format!("trans_enum_variant_or_tuple_like_struct: \
|
2014-05-16 10:45:16 -07:00
|
|
|
unexpected ctor return type {}",
|
2014-06-21 03:39:03 -07:00
|
|
|
ty_to_string(ccx.tcx(), ctor_ty)).as_slice())
|
2013-04-18 15:53:29 -07:00
|
|
|
};
|
|
|
|
|
2014-01-22 14:03:02 -05:00
|
|
|
let arena = TypedArena::new();
|
2014-02-14 07:07:09 +02:00
|
|
|
let fcx = new_fn_ctxt(ccx, llfndecl, ctor_id, false, result_ty,
|
2014-08-11 16:55:13 -07:00
|
|
|
param_substs, None, &arena);
|
2014-07-05 01:52:12 +02:00
|
|
|
let bcx = init_function(&fcx, false, result_ty);
|
2013-01-07 14:16:52 -08:00
|
|
|
|
2014-08-11 19:16:00 -07:00
|
|
|
assert!(!fcx.needs_ret_allocas);
|
|
|
|
|
2013-08-22 17:00:12 -07:00
|
|
|
let arg_tys = ty::ty_fn_args(ctor_ty);
|
|
|
|
|
2014-03-08 21:36:22 +01:00
|
|
|
let arg_datums = create_datums_for_fn_args(&fcx, arg_tys.as_slice());
|
2012-10-24 14:36:00 -07:00
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
if !type_is_zero_size(fcx.ccx, result_ty.unwrap()) {
|
2014-08-11 19:16:00 -07:00
|
|
|
let dest = fcx.get_ret_slot(bcx, result_ty, "eret_slot");
|
2014-10-24 21:14:37 +02:00
|
|
|
let repr = adt::represent_type(ccx, result_ty.unwrap());
|
2014-09-14 20:27:36 -07:00
|
|
|
for (i, arg_datum) in arg_datums.into_iter().enumerate() {
|
2014-01-16 15:11:22 -05:00
|
|
|
let lldestptr = adt::trans_field_ptr(bcx,
|
2014-04-22 03:03:02 +03:00
|
|
|
&*repr,
|
2014-07-29 12:25:06 -07:00
|
|
|
dest,
|
2014-01-16 15:11:22 -05:00
|
|
|
disr,
|
|
|
|
i);
|
|
|
|
arg_datum.store_to(bcx, lldestptr);
|
|
|
|
}
|
2014-07-29 12:25:06 -07:00
|
|
|
adt::trans_set_discr(bcx, &*repr, dest, disr);
|
2012-10-24 14:36:00 -07:00
|
|
|
}
|
2014-01-15 14:39:08 -05:00
|
|
|
|
2014-07-05 21:47:14 +02:00
|
|
|
finish_fn(&fcx, bcx, result_ty);
|
2012-10-24 14:36:00 -07:00
|
|
|
}
|
|
|
|
|
2014-05-19 14:57:24 -07:00
|
|
|
fn enum_variant_size_lint(ccx: &CrateContext, enum_def: &ast::EnumDef, sp: Span, id: ast::NodeId) {
|
|
|
|
let mut sizes = Vec::new(); // does no allocation if no pushes, thankfully
|
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
let levels = ccx.tcx().node_lint_levels.borrow();
|
2014-10-14 11:37:16 -07:00
|
|
|
let lint_id = lint::LintId::of(lint::builtin::VARIANT_SIZE_DIFFERENCES);
|
2014-11-06 12:25:16 -05:00
|
|
|
let lvlsrc = match levels.get(&(id, lint_id)) {
|
2014-06-06 15:49:48 -07:00
|
|
|
None | Some(&(lint::Allow, _)) => return,
|
|
|
|
Some(&lvlsrc) => lvlsrc,
|
|
|
|
};
|
|
|
|
|
|
|
|
let avar = adt::represent_type(ccx, ty::node_id_to_type(ccx.tcx(), id));
|
|
|
|
match *avar {
|
2014-06-14 15:55:55 +02:00
|
|
|
adt::General(_, ref variants, _) => {
|
2014-06-06 15:49:48 -07:00
|
|
|
for var in variants.iter() {
|
|
|
|
let mut size = 0;
|
|
|
|
for field in var.fields.iter().skip(1) {
|
|
|
|
// skip the discriminant
|
|
|
|
size += llsize_of_real(ccx, sizing_type_of(ccx, *field));
|
|
|
|
}
|
|
|
|
sizes.push(size);
|
2014-06-04 14:35:58 -07:00
|
|
|
}
|
2014-06-06 15:49:48 -07:00
|
|
|
},
|
|
|
|
_ => { /* its size is either constant or unimportant */ }
|
|
|
|
}
|
2014-05-19 14:57:24 -07:00
|
|
|
|
2014-06-06 15:49:48 -07:00
|
|
|
let (largest, slargest, largest_index) = sizes.iter().enumerate().fold((0, 0, 0),
|
|
|
|
|(l, s, li), (idx, &size)|
|
|
|
|
if size > l {
|
|
|
|
(size, l, idx)
|
|
|
|
} else if size > s {
|
|
|
|
(l, size, li)
|
|
|
|
} else {
|
|
|
|
(l, s, li)
|
2014-06-04 14:35:58 -07:00
|
|
|
}
|
2014-06-06 15:49:48 -07:00
|
|
|
);
|
|
|
|
|
|
|
|
// we only warn if the largest variant is at least thrice as large as
|
|
|
|
// the second-largest.
|
|
|
|
if largest > slargest * 3 && slargest > 0 {
|
|
|
|
// Use lint::raw_emit_lint rather than sess.add_lint because the lint-printing
|
|
|
|
// pass for the latter already ran.
|
2014-10-14 11:37:16 -07:00
|
|
|
lint::raw_emit_lint(&ccx.tcx().sess, lint::builtin::VARIANT_SIZE_DIFFERENCES,
|
2014-06-17 17:41:50 -07:00
|
|
|
lvlsrc, Some(sp),
|
|
|
|
format!("enum variant is more than three times larger \
|
|
|
|
({} bytes) than the next largest (ignoring padding)",
|
|
|
|
largest).as_slice());
|
2014-06-06 15:49:48 -07:00
|
|
|
|
2014-10-14 23:05:01 -07:00
|
|
|
ccx.sess().span_note(enum_def.variants[largest_index].span,
|
2014-06-06 15:49:48 -07:00
|
|
|
"this variant is the largest");
|
2014-05-19 14:57:24 -07:00
|
|
|
}
|
2012-08-08 14:17:52 -07:00
|
|
|
}
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
pub struct TransItemVisitor<'a, 'tcx: 'a> {
|
|
|
|
pub ccx: &'a CrateContext<'a, 'tcx>,
|
2013-09-25 10:58:40 +02:00
|
|
|
}
|
2013-08-28 23:28:06 -07:00
|
|
|
|
2014-09-10 01:54:36 +03:00
|
|
|
impl<'a, 'tcx, 'v> Visitor<'v> for TransItemVisitor<'a, 'tcx> {
|
2014-09-12 13:10:30 +03:00
|
|
|
fn visit_item(&mut self, i: &ast::Item) {
|
2013-09-25 10:58:40 +02:00
|
|
|
trans_item(self.ccx, i);
|
2013-08-28 23:28:06 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-11 20:22:41 -05:00
|
|
|
pub fn llvm_linkage_by_name(name: &str) -> Option<Linkage> {
|
|
|
|
// Use the names from src/llvm/docs/LangRef.rst here. Most types are only
|
|
|
|
// applicable to variable declarations and may not really make sense for
|
|
|
|
// Rust code in the first place but whitelist them anyway and trust that
|
|
|
|
// the user knows what s/he's doing. Who knows, unanticipated use cases
|
|
|
|
// may pop up in the future.
|
|
|
|
//
|
|
|
|
// ghost, dllimport, dllexport and linkonce_odr_autohide are not supported
|
|
|
|
// and don't have to be, LLVM treats them as no-ops.
|
|
|
|
match name {
|
|
|
|
"appending" => Some(llvm::AppendingLinkage),
|
|
|
|
"available_externally" => Some(llvm::AvailableExternallyLinkage),
|
|
|
|
"common" => Some(llvm::CommonLinkage),
|
|
|
|
"extern_weak" => Some(llvm::ExternalWeakLinkage),
|
|
|
|
"external" => Some(llvm::ExternalLinkage),
|
|
|
|
"internal" => Some(llvm::InternalLinkage),
|
|
|
|
"linkonce" => Some(llvm::LinkOnceAnyLinkage),
|
|
|
|
"linkonce_odr" => Some(llvm::LinkOnceODRLinkage),
|
|
|
|
"private" => Some(llvm::PrivateLinkage),
|
|
|
|
"weak" => Some(llvm::WeakAnyLinkage),
|
|
|
|
"weak_odr" => Some(llvm::WeakODRLinkage),
|
|
|
|
_ => None,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-08-01 12:27:12 -07:00
|
|
|
/// Enum describing the origin of an LLVM `Value`, for linkage purposes.
|
|
|
|
pub enum ValueOrigin {
|
|
|
|
/// The LLVM `Value` is in this context because the corresponding item was
|
|
|
|
/// assigned to the current compilation unit.
|
|
|
|
OriginalTranslation,
|
|
|
|
/// The `Value`'s corresponding item was assigned to some other compilation
|
|
|
|
/// unit, but the `Value` was translated in this context anyway because the
|
|
|
|
/// item is marked `#[inline]`.
|
|
|
|
InlinedCopy,
|
|
|
|
}
|
|
|
|
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
impl Copy for ValueOrigin {}
|
|
|
|
|
2014-07-31 16:45:29 -07:00
|
|
|
/// Set the appropriate linkage for an LLVM `ValueRef` (function or global).
|
|
|
|
/// If the `llval` is the direct translation of a specific Rust item, `id`
|
|
|
|
/// should be set to the `NodeId` of that item. (This mapping should be
|
|
|
|
/// 1-to-1, so monomorphizations and drop/visit glue should have `id` set to
|
2014-08-01 12:27:12 -07:00
|
|
|
/// `None`.) `llval_origin` indicates whether `llval` is the translation of an
|
|
|
|
/// item assigned to `ccx`'s compilation unit or an inlined copy of an item
|
|
|
|
/// assigned to a different compilation unit.
|
|
|
|
pub fn update_linkage(ccx: &CrateContext,
|
|
|
|
llval: ValueRef,
|
|
|
|
id: Option<ast::NodeId>,
|
|
|
|
llval_origin: ValueOrigin) {
|
|
|
|
match llval_origin {
|
|
|
|
InlinedCopy => {
|
|
|
|
// `llval` is a translation of an item defined in a separate
|
|
|
|
// compilation unit. This only makes sense if there are at least
|
|
|
|
// two compilation units.
|
|
|
|
assert!(ccx.sess().opts.cg.codegen_units > 1);
|
|
|
|
// `llval` is a copy of something defined elsewhere, so use
|
|
|
|
// `AvailableExternallyLinkage` to avoid duplicating code in the
|
|
|
|
// output.
|
|
|
|
llvm::SetLinkage(llval, llvm::AvailableExternallyLinkage);
|
|
|
|
return;
|
|
|
|
},
|
|
|
|
OriginalTranslation => {},
|
|
|
|
}
|
|
|
|
|
2014-11-29 16:41:21 -05:00
|
|
|
if let Some(id) = id {
|
|
|
|
let item = ccx.tcx().map.get(id);
|
|
|
|
if let ast_map::NodeItem(i) = item {
|
|
|
|
if let Some(name) = attr::first_attr_value_str_by_name(i.attrs[], "linkage") {
|
|
|
|
if let Some(linkage) = llvm_linkage_by_name(name.get()) {
|
|
|
|
llvm::SetLinkage(llval, linkage);
|
|
|
|
} else {
|
|
|
|
ccx.sess().span_fatal(i.span, "invalid linkage specified");
|
2014-11-11 20:22:41 -05:00
|
|
|
}
|
2014-11-29 16:41:21 -05:00
|
|
|
return;
|
2014-11-11 20:22:41 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-07-31 16:45:29 -07:00
|
|
|
match id {
|
|
|
|
Some(id) if ccx.reachable().contains(&id) => {
|
|
|
|
llvm::SetLinkage(llval, llvm::ExternalLinkage);
|
|
|
|
},
|
|
|
|
_ => {
|
|
|
|
// `id` does not refer to an item in `ccx.reachable`.
|
|
|
|
if ccx.sess().opts.cg.codegen_units > 1 {
|
|
|
|
llvm::SetLinkage(llval, llvm::ExternalLinkage);
|
|
|
|
} else {
|
|
|
|
llvm::SetLinkage(llval, llvm::InternalLinkage);
|
|
|
|
}
|
|
|
|
},
|
2014-07-21 16:42:34 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
pub fn trans_item(ccx: &CrateContext, item: &ast::Item) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_item");
|
2014-07-21 16:42:34 -07:00
|
|
|
|
2014-08-01 12:27:12 -07:00
|
|
|
let from_external = ccx.external_srcs().borrow().contains_key(&item.id);
|
|
|
|
|
2013-03-20 01:17:42 -04:00
|
|
|
match item.node {
|
2014-05-16 10:15:33 -07:00
|
|
|
ast::ItemFn(ref decl, _fn_style, abi, ref generics, ref body) => {
|
2014-08-01 22:25:41 -06:00
|
|
|
if !generics.is_type_parameterized() {
|
2014-08-01 12:27:12 -07:00
|
|
|
let trans_everywhere = attr::requests_inline(item.attrs.as_slice());
|
|
|
|
// Ignore `trans_everywhere` for cross-crate inlined items
|
|
|
|
// (`from_external`). `trans_item` will be called once for each
|
|
|
|
// compilation unit that references the item, so it will still get
|
|
|
|
// translated everywhere it's needed.
|
|
|
|
for (ref ccx, is_origin) in ccx.maybe_iter(!from_external && trans_everywhere) {
|
|
|
|
let llfn = get_item_val(ccx, item.id);
|
|
|
|
if abi != Rust {
|
|
|
|
foreign::trans_rust_fn_with_foreign_abi(ccx,
|
|
|
|
&**decl,
|
|
|
|
&**body,
|
|
|
|
item.attrs.as_slice(),
|
|
|
|
llfn,
|
2014-11-06 09:24:44 +02:00
|
|
|
&Substs::trans_empty(),
|
2014-08-01 12:27:12 -07:00
|
|
|
item.id,
|
|
|
|
None);
|
|
|
|
} else {
|
|
|
|
trans_fn(ccx,
|
|
|
|
&**decl,
|
|
|
|
&**body,
|
|
|
|
llfn,
|
2014-11-06 09:24:44 +02:00
|
|
|
&Substs::trans_empty(),
|
2014-08-01 12:27:12 -07:00
|
|
|
item.id,
|
|
|
|
item.attrs.as_slice());
|
|
|
|
}
|
|
|
|
update_linkage(ccx,
|
|
|
|
llfn,
|
|
|
|
Some(item.id),
|
|
|
|
if is_origin { OriginalTranslation } else { InlinedCopy });
|
2014-08-01 22:25:41 -06:00
|
|
|
}
|
2010-09-22 17:05:38 -07:00
|
|
|
}
|
2014-08-12 09:19:47 -07:00
|
|
|
|
|
|
|
// Be sure to travel more than just one layer deep to catch nested
|
|
|
|
// items in blocks and such.
|
|
|
|
let mut v = TransItemVisitor{ ccx: ccx };
|
2014-09-12 13:10:30 +03:00
|
|
|
v.visit_block(&**body);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2014-12-10 06:15:06 -05:00
|
|
|
ast::ItemImpl(_, ref generics, _, _, ref impl_items) => {
|
2014-08-04 13:56:56 -07:00
|
|
|
meth::trans_impl(ccx,
|
|
|
|
item.ident,
|
|
|
|
impl_items.as_slice(),
|
|
|
|
generics,
|
|
|
|
item.id);
|
2011-12-16 11:37:38 +01:00
|
|
|
}
|
2014-01-09 15:05:33 +02:00
|
|
|
ast::ItemMod(ref m) => {
|
2014-07-21 16:42:34 -07:00
|
|
|
trans_mod(&ccx.rotate(), m);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
2014-07-10 10:59:52 -07:00
|
|
|
ast::ItemEnum(ref enum_definition, _) => {
|
|
|
|
enum_variant_size_lint(ccx, enum_definition, item.span, item.id);
|
2011-07-27 14:19:39 +02:00
|
|
|
}
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
ast::ItemConst(_, ref expr) => {
|
|
|
|
// Recurse on the expression to catch items in blocks
|
|
|
|
let mut v = TransItemVisitor{ ccx: ccx };
|
|
|
|
v.visit_expr(&**expr);
|
|
|
|
}
|
2014-05-16 10:15:33 -07:00
|
|
|
ast::ItemStatic(_, m, ref expr) => {
|
2014-05-04 10:39:11 +02:00
|
|
|
// Recurse on the expression to catch items in blocks
|
|
|
|
let mut v = TransItemVisitor{ ccx: ccx };
|
2014-09-12 13:10:30 +03:00
|
|
|
v.visit_expr(&**expr);
|
2014-07-21 16:42:34 -07:00
|
|
|
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
consts::trans_static(ccx, m, item.id);
|
|
|
|
let g = get_item_val(ccx, item.id);
|
|
|
|
update_linkage(ccx, g, Some(item.id), OriginalTranslation);
|
2014-07-21 16:42:34 -07:00
|
|
|
|
2013-08-09 13:47:00 -07:00
|
|
|
// Do static_assert checking. It can't really be done much earlier
|
|
|
|
// because we need to get the value of the bool out of LLVM
|
2014-02-28 15:25:15 -08:00
|
|
|
if attr::contains_name(item.attrs.as_slice(), "static_assert") {
|
2013-09-02 03:45:37 +02:00
|
|
|
if m == ast::MutMutable {
|
2014-03-05 16:36:01 +02:00
|
|
|
ccx.sess().span_fatal(expr.span,
|
|
|
|
"cannot have static_assert on a mutable \
|
|
|
|
static");
|
2013-08-09 13:47:00 -07:00
|
|
|
}
|
2013-12-18 17:08:56 -08:00
|
|
|
|
2014-11-07 14:35:18 -05:00
|
|
|
let v = ccx.static_values().borrow()[item.id].clone();
|
2013-08-09 13:47:00 -07:00
|
|
|
unsafe {
|
2013-09-04 17:05:31 -04:00
|
|
|
if !(llvm::LLVMConstIntGetZExtValue(v) != 0) {
|
2014-03-05 16:36:01 +02:00
|
|
|
ccx.sess().span_fatal(expr.span, "static assertion failed");
|
2013-07-19 21:51:37 +10:00
|
|
|
}
|
2013-05-21 20:26:45 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
},
|
2014-01-09 15:05:33 +02:00
|
|
|
ast::ItemForeignMod(ref foreign_mod) => {
|
2013-05-21 15:25:44 -04:00
|
|
|
foreign::trans_foreign_mod(ccx, foreign_mod);
|
2011-11-10 09:14:53 -08:00
|
|
|
}
|
2014-01-09 15:05:33 +02:00
|
|
|
ast::ItemTrait(..) => {
|
2013-09-13 01:42:44 -07:00
|
|
|
// Inside of this trait definition, we won't be actually translating any
|
|
|
|
// functions, but the trait still needs to be walked. Otherwise default
|
|
|
|
// methods with items will not get translated and will cause ICE's when
|
|
|
|
// metadata time comes around.
|
2013-09-25 10:58:40 +02:00
|
|
|
let mut v = TransItemVisitor{ ccx: ccx };
|
2014-09-12 13:10:30 +03:00
|
|
|
visit::walk_item(&mut v, item);
|
2013-09-13 01:42:44 -07:00
|
|
|
}
|
2012-08-03 19:59:04 -07:00
|
|
|
_ => {/* fall through */ }
|
2010-09-22 17:05:38 -07:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-01-18 22:37:22 -08:00
|
|
|
// Translate a module. Doing this amounts to translating the items in the
|
2011-06-15 12:18:02 -07:00
|
|
|
// module; there ends up being no artifact (aside from linkage names) of
|
|
|
|
// separate modules in the compiled program. That's because modules exist
|
|
|
|
// only as a convenience for humans working with the code, to organize names
|
|
|
|
// and control visibility.
|
2014-03-06 18:47:24 +02:00
|
|
|
pub fn trans_mod(ccx: &CrateContext, m: &ast::Mod) {
|
2013-06-17 16:23:24 +12:00
|
|
|
let _icx = push_ctxt("trans_mod");
|
2013-08-03 12:45:23 -04:00
|
|
|
for item in m.items.iter() {
|
2014-05-16 10:15:33 -07:00
|
|
|
trans_item(ccx, &**item);
|
2012-09-18 21:41:37 -07:00
|
|
|
}
|
2010-09-22 17:05:38 -07:00
|
|
|
}
|
|
|
|
|
2014-05-22 16:57:53 -07:00
|
|
|
fn finish_register_fn(ccx: &CrateContext, sp: Span, sym: String, node_id: ast::NodeId,
|
2013-09-24 18:12:06 -04:00
|
|
|
llfn: ValueRef) {
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.item_symbols().borrow_mut().insert(node_id, sym);
|
2013-09-24 18:12:06 -04:00
|
|
|
|
2014-05-19 09:30:09 -07:00
|
|
|
// The stack exhaustion lang item shouldn't have a split stack because
|
|
|
|
// otherwise it would continue to be exhausted (bad), and both it and the
|
|
|
|
// eh_personality functions need to be externally linkable.
|
|
|
|
let def = ast_util::local_def(node_id);
|
2014-09-05 09:18:53 -07:00
|
|
|
if ccx.tcx().lang_items.stack_exhausted() == Some(def) {
|
2014-05-19 09:30:09 -07:00
|
|
|
unset_split_stack(llfn);
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
|
2014-05-19 09:30:09 -07:00
|
|
|
}
|
2014-09-05 09:18:53 -07:00
|
|
|
if ccx.tcx().lang_items.eh_personality() == Some(def) {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::SetLinkage(llfn, llvm::ExternalLinkage);
|
2014-05-19 09:30:09 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2014-05-02 15:26:45 -07:00
|
|
|
if is_entry_fn(ccx.sess(), node_id) {
|
2013-09-24 18:12:06 -04:00
|
|
|
create_entry_wrapper(ccx, sp, llfn);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
fn register_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
|
|
|
|
sp: Span,
|
|
|
|
sym: String,
|
|
|
|
node_id: ast::NodeId,
|
|
|
|
node_type: Ty<'tcx>)
|
|
|
|
-> ValueRef {
|
2014-10-31 10:51:16 +02:00
|
|
|
match node_type.sty {
|
2013-09-10 18:42:01 -04:00
|
|
|
ty::ty_bare_fn(ref f) => {
|
2014-05-28 22:26:56 -07:00
|
|
|
assert!(f.abi == Rust || f.abi == RustCall);
|
2013-09-10 18:42:01 -04:00
|
|
|
}
|
2014-10-09 15:17:22 -04:00
|
|
|
_ => panic!("expected bare rust fn")
|
2013-09-10 18:42:01 -04:00
|
|
|
};
|
|
|
|
|
2014-05-21 15:07:48 -04:00
|
|
|
let llfn = decl_rust_fn(ccx, node_type, sym.as_slice());
|
2013-09-24 18:12:06 -04:00
|
|
|
finish_register_fn(ccx, sp, sym, node_id, llfn);
|
2013-09-10 18:42:01 -04:00
|
|
|
llfn
|
2012-02-13 16:06:56 -08:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
pub fn get_fn_llvm_attributes<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_ty: Ty<'tcx>)
|
|
|
|
-> llvm::AttrBuilder {
|
2014-05-21 15:07:48 -04:00
|
|
|
use middle::ty::{BrAnon, ReLateBound};
|
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
let (fn_sig, abi, has_env) = match fn_ty.sty {
|
2014-05-28 22:26:56 -07:00
|
|
|
ty::ty_closure(ref f) => (f.sig.clone(), f.abi, true),
|
|
|
|
ty::ty_bare_fn(ref f) => (f.sig.clone(), f.abi, false),
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::ty_unboxed_closure(closure_did, _, ref substs) => {
|
2014-09-05 09:18:53 -07:00
|
|
|
let unboxed_closures = ccx.tcx().unboxed_closures.borrow();
|
2014-10-14 23:05:01 -07:00
|
|
|
let ref function_type = (*unboxed_closures)[closure_did]
|
2014-08-21 11:25:47 -07:00
|
|
|
.closure_type;
|
|
|
|
|
2014-10-18 10:46:57 -07:00
|
|
|
(function_type.sig.subst(ccx.tcx(), substs), RustCall, true)
|
2014-05-28 22:26:56 -07:00
|
|
|
}
|
2014-08-21 11:25:47 -07:00
|
|
|
_ => ccx.sess().bug("expected closure or function.")
|
2014-05-21 15:07:48 -04:00
|
|
|
};
|
|
|
|
|
2014-08-21 11:25:47 -07:00
|
|
|
|
2014-07-25 16:06:44 -07:00
|
|
|
// Since index 0 is the return value of the llvm func, we start
|
|
|
|
// at either 1 or 2 depending on whether there's an env slot or not
|
|
|
|
let mut first_arg_offset = if has_env { 2 } else { 1 };
|
|
|
|
let mut attrs = llvm::AttrBuilder::new();
|
|
|
|
let ret_ty = fn_sig.output;
|
|
|
|
|
2014-08-21 11:25:47 -07:00
|
|
|
// These have an odd calling convention, so we need to manually
|
|
|
|
// unpack the input ty's
|
2014-10-31 10:51:16 +02:00
|
|
|
let input_tys = match fn_ty.sty {
|
2014-10-18 10:46:57 -07:00
|
|
|
ty::ty_unboxed_closure(_, _, _) => {
|
2014-08-21 11:25:47 -07:00
|
|
|
assert!(abi == RustCall);
|
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
match fn_sig.inputs[0].sty {
|
2014-08-21 11:25:47 -07:00
|
|
|
ty::ty_tup(ref inputs) => inputs.clone(),
|
|
|
|
_ => ccx.sess().bug("expected tuple'd inputs")
|
|
|
|
}
|
|
|
|
},
|
|
|
|
ty::ty_bare_fn(_) if abi == RustCall => {
|
2014-10-14 23:05:01 -07:00
|
|
|
let mut inputs = vec![fn_sig.inputs[0]];
|
2014-08-21 11:25:47 -07:00
|
|
|
|
2014-10-31 10:51:16 +02:00
|
|
|
match fn_sig.inputs[1].sty {
|
2014-10-14 23:05:01 -07:00
|
|
|
ty::ty_tup(ref t_in) => {
|
|
|
|
inputs.push_all(t_in.as_slice());
|
|
|
|
inputs
|
|
|
|
}
|
2014-08-21 11:25:47 -07:00
|
|
|
_ => ccx.sess().bug("expected tuple'd inputs")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
_ => fn_sig.inputs.clone()
|
|
|
|
};
|
2014-05-28 22:26:56 -07:00
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
if let ty::FnConverging(ret_ty) = ret_ty {
|
|
|
|
// A function pointer is called without the declaration
|
|
|
|
// available, so we have to apply any attributes with ABI
|
|
|
|
// implications directly to the call instruction. Right now,
|
|
|
|
// the only attribute we need to worry about is `sret`.
|
|
|
|
if type_of::return_uses_outptr(ccx, ret_ty) {
|
|
|
|
let llret_sz = llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
|
|
|
|
|
|
|
|
// The outptr can be noalias and nocapture because it's entirely
|
|
|
|
// invisible to the program. We also know it's nonnull as well
|
|
|
|
// as how many bytes we can dereference
|
|
|
|
attrs.arg(1, llvm::StructRetAttribute)
|
|
|
|
.arg(1, llvm::NoAliasAttribute)
|
|
|
|
.arg(1, llvm::NoCaptureAttribute)
|
|
|
|
.arg(1, llvm::DereferenceableAttribute(llret_sz));
|
|
|
|
|
|
|
|
// Add one more since there's an outptr
|
|
|
|
first_arg_offset += 1;
|
|
|
|
} else {
|
|
|
|
// The `noalias` attribute on the return value is useful to a
|
|
|
|
// function ptr caller.
|
2014-10-31 10:51:16 +02:00
|
|
|
match ret_ty.sty {
|
2014-10-24 21:14:37 +02:00
|
|
|
// `~` pointer return values never alias because ownership
|
|
|
|
// is transferred
|
|
|
|
ty::ty_uniq(it) if !ty::type_is_sized(ccx.tcx(), it) => {}
|
|
|
|
ty::ty_uniq(_) => {
|
|
|
|
attrs.ret(llvm::NoAliasAttribute);
|
|
|
|
}
|
|
|
|
_ => {}
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
|
|
|
|
2014-10-24 21:14:37 +02:00
|
|
|
// We can also mark the return value as `dereferenceable` in certain cases
|
2014-10-31 10:51:16 +02:00
|
|
|
match ret_ty.sty {
|
2014-10-24 21:14:37 +02:00
|
|
|
// These are not really pointers but pairs, (pointer, len)
|
|
|
|
ty::ty_uniq(it) |
|
|
|
|
ty::ty_rptr(_, ty::mt { ty: it, .. }) if !ty::type_is_sized(ccx.tcx(), it) => {}
|
|
|
|
ty::ty_uniq(inner) | ty::ty_rptr(_, ty::mt { ty: inner, .. }) => {
|
|
|
|
let llret_sz = llsize_of_real(ccx, type_of::type_of(ccx, inner));
|
|
|
|
attrs.ret(llvm::DereferenceableAttribute(llret_sz));
|
|
|
|
}
|
|
|
|
_ => {}
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
2014-03-16 09:29:05 +01:00
|
|
|
|
2014-11-29 16:41:21 -05:00
|
|
|
if let ty::ty_bool = ret_ty.sty {
|
|
|
|
attrs.ret(llvm::ZExtAttribute);
|
2014-03-16 09:29:05 +01:00
|
|
|
}
|
|
|
|
}
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
|
|
|
|
2014-08-21 11:25:47 -07:00
|
|
|
for (idx, &t) in input_tys.iter().enumerate().map(|(i, v)| (i + first_arg_offset, v)) {
|
2014-10-31 10:51:16 +02:00
|
|
|
match t.sty {
|
2014-05-26 03:20:31 -04:00
|
|
|
// this needs to be first to prevent fat pointers from falling through
|
|
|
|
_ if !type_is_immediate(ccx, t) => {
|
2014-07-25 18:33:10 -07:00
|
|
|
let llarg_sz = llsize_of_real(ccx, type_of::type_of(ccx, t));
|
|
|
|
|
2014-05-26 03:20:31 -04:00
|
|
|
// For non-immediate arguments the callee gets its own copy of
|
|
|
|
// the value on the stack, so there are no aliases. It's also
|
|
|
|
// program-invisible so can't possibly capture
|
2014-07-25 16:06:44 -07:00
|
|
|
attrs.arg(idx, llvm::NoAliasAttribute)
|
|
|
|
.arg(idx, llvm::NoCaptureAttribute)
|
2014-07-25 18:33:10 -07:00
|
|
|
.arg(idx, llvm::DereferenceableAttribute(llarg_sz));
|
2014-05-26 03:20:31 -04:00
|
|
|
}
|
2014-07-25 18:33:10 -07:00
|
|
|
|
2014-03-16 09:29:05 +01:00
|
|
|
ty::ty_bool => {
|
2014-07-25 16:06:44 -07:00
|
|
|
attrs.arg(idx, llvm::ZExtAttribute);
|
2014-03-16 09:29:05 +01:00
|
|
|
}
|
2014-07-25 18:33:10 -07:00
|
|
|
|
2014-05-21 15:07:48 -04:00
|
|
|
// `~` pointer parameters never alias because ownership is transferred
|
2014-07-25 18:33:10 -07:00
|
|
|
ty::ty_uniq(inner) => {
|
|
|
|
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, inner));
|
|
|
|
|
|
|
|
attrs.arg(idx, llvm::NoAliasAttribute)
|
|
|
|
.arg(idx, llvm::DereferenceableAttribute(llsz));
|
|
|
|
}
|
|
|
|
|
2014-05-21 15:07:48 -04:00
|
|
|
// `&mut` pointer parameters never alias other parameters, or mutable global data
|
2014-08-15 12:34:28 -04:00
|
|
|
//
|
|
|
|
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as both
|
|
|
|
// `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely on
|
|
|
|
// memory dependencies rather than pointer equality
|
2014-07-25 07:29:12 +02:00
|
|
|
ty::ty_rptr(b, mt) if mt.mutbl == ast::MutMutable ||
|
|
|
|
!ty::type_contents(ccx.tcx(), mt.ty).interior_unsafe() => {
|
2014-07-25 18:33:10 -07:00
|
|
|
|
|
|
|
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
|
2014-07-25 16:06:44 -07:00
|
|
|
attrs.arg(idx, llvm::NoAliasAttribute)
|
2014-07-25 18:33:10 -07:00
|
|
|
.arg(idx, llvm::DereferenceableAttribute(llsz));
|
|
|
|
|
2014-08-15 12:34:28 -04:00
|
|
|
if mt.mutbl == ast::MutImmutable {
|
|
|
|
attrs.arg(idx, llvm::ReadOnlyAttribute);
|
|
|
|
}
|
|
|
|
|
2014-11-29 16:41:21 -05:00
|
|
|
if let ReLateBound(_, BrAnon(_)) = b {
|
|
|
|
attrs.arg(idx, llvm::NoCaptureAttribute);
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
|
|
|
}
|
2014-07-25 18:33:10 -07:00
|
|
|
|
2014-05-21 15:07:48 -04:00
|
|
|
// When a reference in an argument has no named lifetime, it's impossible for that
|
|
|
|
// reference to escape this function (returned or stored beyond the call by a closure).
|
2014-07-25 18:33:10 -07:00
|
|
|
ty::ty_rptr(ReLateBound(_, BrAnon(_)), mt) => {
|
|
|
|
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
|
2014-07-25 16:06:44 -07:00
|
|
|
attrs.arg(idx, llvm::NoCaptureAttribute)
|
2014-07-25 18:33:10 -07:00
|
|
|
.arg(idx, llvm::DereferenceableAttribute(llsz));
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
2014-07-25 18:33:10 -07:00
|
|
|
|
|
|
|
// & pointer parameters are also never null and we know exactly how
|
|
|
|
// many bytes we can dereference
|
|
|
|
ty::ty_rptr(_, mt) => {
|
|
|
|
let llsz = llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
|
|
|
|
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
2014-05-26 03:20:31 -04:00
|
|
|
_ => ()
|
2014-05-21 15:07:48 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
attrs
|
|
|
|
}
|
|
|
|
|
2013-09-10 18:42:01 -04:00
|
|
|
// only use this for foreign function ABIs and glue, use `register_fn` for Rust functions
|
2014-03-06 18:47:24 +02:00
|
|
|
pub fn register_fn_llvmty(ccx: &CrateContext,
|
2013-08-31 18:13:04 +02:00
|
|
|
sp: Span,
|
2014-05-22 16:57:53 -07:00
|
|
|
sym: String,
|
2013-07-27 10:25:59 +02:00
|
|
|
node_id: ast::NodeId,
|
2014-07-07 17:58:01 -07:00
|
|
|
cc: llvm::CallConv,
|
2014-05-21 15:07:48 -04:00
|
|
|
llfty: Type) -> ValueRef {
|
2014-02-14 07:07:09 +02:00
|
|
|
debug!("register_fn_llvmty id={} sym={}", node_id, sym);
|
2013-01-09 23:17:57 -08:00
|
|
|
|
2014-11-04 07:57:21 -05:00
|
|
|
let llfn = decl_fn(ccx, sym.as_slice(), cc, llfty, ty::FnConverging(ty::mk_nil(ccx.tcx())));
|
2013-09-24 18:12:06 -04:00
|
|
|
finish_register_fn(ccx, sp, sym, node_id, llfn);
|
2012-03-06 11:33:25 +01:00
|
|
|
llfn
|
2011-08-12 18:43:44 -07:00
|
|
|
}
|
|
|
|
|
2013-07-27 10:25:59 +02:00
|
|
|
pub fn is_entry_fn(sess: &Session, node_id: ast::NodeId) -> bool {
|
2014-03-28 10:29:55 -07:00
|
|
|
match *sess.entry_fn.borrow() {
|
2013-04-09 20:16:06 +12:00
|
|
|
Some((entry_id, _)) => node_id == entry_id,
|
2013-01-11 18:08:01 +09:00
|
|
|
None => false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-06-29 16:26:56 -07:00
|
|
|
// Create a _rust_main(args: ~[str]) function which will be called from the
|
2011-08-17 20:31:55 -07:00
|
|
|
// runtime rust_start function
|
2014-03-06 18:47:24 +02:00
|
|
|
pub fn create_entry_wrapper(ccx: &CrateContext,
|
2013-08-31 18:13:04 +02:00
|
|
|
_sp: Span,
|
2013-08-03 19:59:46 -07:00
|
|
|
main_llfn: ValueRef) {
|
2014-03-05 16:36:01 +02:00
|
|
|
let et = ccx.sess().entry_type.get().unwrap();
|
2013-08-03 19:59:46 -07:00
|
|
|
match et {
|
2014-05-06 23:38:01 +12:00
|
|
|
config::EntryMain => {
|
2013-09-18 00:54:08 -04:00
|
|
|
create_entry_fn(ccx, main_llfn, true);
|
2013-08-03 19:59:46 -07:00
|
|
|
}
|
2014-05-06 23:38:01 +12:00
|
|
|
config::EntryStart => create_entry_fn(ccx, main_llfn, false),
|
|
|
|
config::EntryNone => {} // Do nothing.
|
2013-04-09 20:16:06 +12:00
|
|
|
}
|
2011-08-12 18:43:44 -07:00
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
fn create_entry_fn(ccx: &CrateContext,
|
2013-04-18 15:53:29 -07:00
|
|
|
rust_main: ValueRef,
|
|
|
|
use_start_lang_item: bool) {
|
2014-11-17 21:39:01 +13:00
|
|
|
let llfty = Type::func(&[ccx.int_type(), Type::i8p(ccx).ptr_to()],
|
2014-09-05 09:18:53 -07:00
|
|
|
&ccx.int_type());
|
2012-11-30 09:21:49 +09:00
|
|
|
|
2014-11-04 07:57:21 -05:00
|
|
|
let llfn = decl_cdecl_fn(ccx, "main", llfty, ty::mk_nil(ccx.tcx()));
|
2014-08-18 14:15:05 -04:00
|
|
|
|
|
|
|
// FIXME: #16581: Marking a symbol in the executable with `dllexport`
|
|
|
|
// linkage forces MinGW's linker to output a `.reloc` section for ASLR
|
2014-07-23 11:56:36 -07:00
|
|
|
if ccx.sess().target.target.options.is_like_windows {
|
2014-08-18 14:15:05 -04:00
|
|
|
unsafe { llvm::LLVMRustSetDLLExportStorageClass(llfn) }
|
|
|
|
}
|
|
|
|
|
2013-11-21 15:42:55 -08:00
|
|
|
let llbb = "top".with_c_str(|buf| {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2014-09-05 09:18:53 -07:00
|
|
|
llvm::LLVMAppendBasicBlockInContext(ccx.llcx(), llfn, buf)
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2014-09-05 09:18:53 -07:00
|
|
|
let bld = ccx.raw_builder();
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMPositionBuilderAtEnd(bld, llbb);
|
2013-04-09 20:16:06 +12:00
|
|
|
|
2013-04-18 15:53:29 -07:00
|
|
|
let (start_fn, args) = if use_start_lang_item {
|
2014-09-05 09:18:53 -07:00
|
|
|
let start_def_id = match ccx.tcx().lang_items.require(StartFnLangItem) {
|
2013-07-15 20:42:13 -07:00
|
|
|
Ok(id) => id,
|
2014-05-09 18:45:36 -07:00
|
|
|
Err(s) => { ccx.sess().fatal(s.as_slice()); }
|
2013-07-15 20:42:13 -07:00
|
|
|
};
|
2014-02-05 22:15:24 +01:00
|
|
|
let start_fn = if start_def_id.krate == ast::LOCAL_CRATE {
|
2013-05-25 18:18:52 -07:00
|
|
|
get_item_val(ccx, start_def_id.node)
|
2013-04-18 15:53:29 -07:00
|
|
|
} else {
|
2014-03-15 22:29:34 +02:00
|
|
|
let start_fn_type = csearch::get_type(ccx.tcx(),
|
2013-07-15 20:42:13 -07:00
|
|
|
start_def_id).ty;
|
2013-04-18 15:53:29 -07:00
|
|
|
trans_external_path(ccx, start_def_id, start_fn_type)
|
|
|
|
};
|
|
|
|
|
|
|
|
let args = {
|
2013-11-21 15:42:55 -08:00
|
|
|
let opaque_rust_main = "rust_main".with_c_str(|buf| {
|
2014-03-15 22:29:34 +02:00
|
|
|
llvm::LLVMBuildPointerCast(bld, rust_main, Type::i8p(ccx).to_ref(), buf)
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2013-04-18 15:53:29 -07:00
|
|
|
|
2014-03-04 10:02:49 -08:00
|
|
|
vec!(
|
2013-04-18 15:53:29 -07:00
|
|
|
opaque_rust_main,
|
2014-05-28 22:26:56 -07:00
|
|
|
get_param(llfn, 0),
|
|
|
|
get_param(llfn, 1)
|
2014-03-04 10:02:49 -08:00
|
|
|
)
|
2013-04-18 15:53:29 -07:00
|
|
|
};
|
|
|
|
(start_fn, args)
|
|
|
|
} else {
|
2013-10-21 13:08:31 -07:00
|
|
|
debug!("using user-defined start fn");
|
2014-03-04 10:02:49 -08:00
|
|
|
let args = vec!(
|
2014-05-28 22:26:56 -07:00
|
|
|
get_param(llfn, 0 as c_uint),
|
|
|
|
get_param(llfn, 1 as c_uint)
|
2014-03-04 10:02:49 -08:00
|
|
|
);
|
2013-04-18 15:53:29 -07:00
|
|
|
|
|
|
|
(rust_main, args)
|
|
|
|
};
|
|
|
|
|
2014-03-08 21:36:22 +01:00
|
|
|
let result = llvm::LLVMBuildCall(bld,
|
|
|
|
start_fn,
|
|
|
|
args.as_ptr(),
|
|
|
|
args.len() as c_uint,
|
2013-12-18 01:49:31 +11:00
|
|
|
noname());
|
2013-07-07 13:30:48 -07:00
|
|
|
|
2013-01-10 21:23:07 -08:00
|
|
|
llvm::LLVMBuildRet(bld, result);
|
|
|
|
}
|
2011-10-20 13:48:10 +02:00
|
|
|
}
|
2011-02-28 17:33:46 -05:00
|
|
|
}
|
|
|
|
|
2014-09-29 22:11:30 +03:00
|
|
|
fn exported_name<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, id: ast::NodeId,
|
|
|
|
ty: Ty<'tcx>, attrs: &[ast::Attribute]) -> String {
|
2014-11-06 12:25:16 -05:00
|
|
|
match ccx.external_srcs().borrow().get(&id) {
|
2014-07-28 14:45:27 -07:00
|
|
|
Some(&did) => {
|
|
|
|
let sym = csearch::get_symbol(&ccx.sess().cstore, did);
|
|
|
|
debug!("found item {} in other crate...", sym);
|
|
|
|
return sym;
|
|
|
|
}
|
|
|
|
None => {}
|
|
|
|
}
|
|
|
|
|
2013-08-31 13:23:31 -04:00
|
|
|
match attr::first_attr_value_str_by_name(attrs, "export_name") {
|
|
|
|
// Use provided name
|
2014-05-25 03:17:19 -07:00
|
|
|
Some(name) => name.get().to_string(),
|
2013-08-31 13:23:31 -04:00
|
|
|
|
2014-11-06 09:32:37 -08:00
|
|
|
_ => ccx.tcx().map.with_path(id, |path| {
|
2014-02-14 07:07:09 +02:00
|
|
|
if attr::contains_name(attrs, "no_mangle") {
|
|
|
|
// Don't mangle
|
2014-06-21 03:39:03 -07:00
|
|
|
path.last().unwrap().to_string()
|
2014-02-14 07:07:09 +02:00
|
|
|
} else {
|
2014-05-19 09:30:09 -07:00
|
|
|
match weak_lang_items::link_name(attrs) {
|
2014-05-25 03:17:19 -07:00
|
|
|
Some(name) => name.get().to_string(),
|
2014-05-19 09:30:09 -07:00
|
|
|
None => {
|
|
|
|
// Usual name mangling
|
|
|
|
mangle_exported_name(ccx, path, ty, id)
|
|
|
|
}
|
|
|
|
}
|
2014-02-14 07:07:09 +02:00
|
|
|
}
|
|
|
|
})
|
2013-07-27 01:50:20 -04:00
|
|
|
}
|
2012-02-03 09:53:37 +01:00
|
|
|
}
|
|
|
|
|
2014-09-23 00:14:46 -07:00
|
|
|
fn contains_null(s: &str) -> bool {
|
2014-09-23 12:54:16 -07:00
|
|
|
s.bytes().any(|b| b == 0)
|
2014-09-23 00:14:46 -07:00
|
|
|
}
|
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
pub fn get_item_val(ccx: &CrateContext, id: ast::NodeId) -> ValueRef {
|
2014-10-15 02:25:34 -04:00
|
|
|
debug!("get_item_val(id=`{}`)", id);
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-11-07 14:35:18 -05:00
|
|
|
match ccx.item_vals().borrow().get(&id).cloned() {
|
2014-03-20 19:49:20 -07:00
|
|
|
Some(v) => return v,
|
|
|
|
None => {}
|
|
|
|
}
|
2013-12-18 16:41:15 -08:00
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
let item = ccx.tcx().map.get(id);
|
2014-03-20 19:49:20 -07:00
|
|
|
let val = match item {
|
|
|
|
ast_map::NodeItem(i) => {
|
|
|
|
let ty = ty::node_id_to_type(ccx.tcx(), i.id);
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
let sym = || exported_name(ccx, id, ty, i.attrs.as_slice());
|
2014-03-20 19:49:20 -07:00
|
|
|
|
|
|
|
let v = match i.node {
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
ast::ItemStatic(_, _, ref expr) => {
|
2014-03-20 19:49:20 -07:00
|
|
|
// If this static came from an external crate, then
|
|
|
|
// we need to get the symbol from csearch instead of
|
|
|
|
// using the current crate's name/version
|
|
|
|
// information in the hash of the symbol
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
let sym = sym();
|
2014-03-20 19:49:20 -07:00
|
|
|
debug!("making {}", sym);
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
// We need the translated value here, because for enums the
|
|
|
|
// LLVM type is not fully determined by the Rust type.
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
let (v, ty) = consts::const_expr(ccx, &**expr);
|
|
|
|
ccx.static_values().borrow_mut().insert(id, v);
|
2014-03-20 19:49:20 -07:00
|
|
|
unsafe {
|
2014-10-08 23:20:18 +02:00
|
|
|
// boolean SSA values are i1, but they have to be stored in i8 slots,
|
|
|
|
// otherwise some LLVM optimization passes don't work as expected
|
|
|
|
let llty = if ty::type_is_bool(ty) {
|
|
|
|
llvm::LLVMInt8TypeInContext(ccx.llcx())
|
|
|
|
} else {
|
|
|
|
llvm::LLVMTypeOf(v)
|
|
|
|
};
|
2014-09-23 00:14:46 -07:00
|
|
|
if contains_null(sym.as_slice()) {
|
|
|
|
ccx.sess().fatal(
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
format!("Illegal null byte in export_name \
|
|
|
|
value: `{}`", sym).as_slice());
|
2014-09-23 00:14:46 -07:00
|
|
|
}
|
2014-11-27 14:10:25 -05:00
|
|
|
let g = sym.with_c_str(|buf| {
|
2014-09-05 09:18:53 -07:00
|
|
|
llvm::LLVMAddGlobal(ccx.llmod(), llty, buf)
|
2014-03-20 19:49:20 -07:00
|
|
|
});
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
if attr::contains_name(i.attrs.as_slice(),
|
|
|
|
"thread_local") {
|
2014-07-07 17:58:01 -07:00
|
|
|
llvm::set_thread_local(g, true);
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.item_symbols().borrow_mut().insert(i.id, sym);
|
2014-03-20 19:49:20 -07:00
|
|
|
g
|
2013-07-27 01:50:20 -04:00
|
|
|
}
|
2012-08-25 15:09:33 -07:00
|
|
|
}
|
2012-05-14 14:13:32 -07:00
|
|
|
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
ast::ItemConst(_, ref expr) => {
|
|
|
|
let (v, _) = consts::const_expr(ccx, &**expr);
|
|
|
|
ccx.const_values().borrow_mut().insert(id, v);
|
|
|
|
v
|
|
|
|
}
|
|
|
|
|
2014-05-06 18:43:56 -07:00
|
|
|
ast::ItemFn(_, _, abi, _, _) => {
|
rustc: Add `const` globals to the language
This change is an implementation of [RFC 69][rfc] which adds a third kind of
global to the language, `const`. This global is most similar to what the old
`static` was, and if you're unsure about what to use then you should use a
`const`.
The semantics of these three kinds of globals are:
* A `const` does not represent a memory location, but only a value. Constants
are translated as rvalues, which means that their values are directly inlined
at usage location (similar to a #define in C/C++). Constant values are, well,
constant, and can not be modified. Any "modification" is actually a
modification to a local value on the stack rather than the actual constant
itself.
Almost all values are allowed inside constants, whether they have interior
mutability or not. There are a few minor restrictions listed in the RFC, but
they should in general not come up too often.
* A `static` now always represents a memory location (unconditionally). Any
references to the same `static` are actually a reference to the same memory
location. Only values whose types ascribe to `Sync` are allowed in a `static`.
This restriction is in place because many threads may access a `static`
concurrently. Lifting this restriction (and allowing unsafe access) is a
future extension not implemented at this time.
* A `static mut` continues to always represent a memory location. All references
to a `static mut` continue to be `unsafe`.
This is a large breaking change, and many programs will need to be updated
accordingly. A summary of the breaking changes is:
* Statics may no longer be used in patterns. Statics now always represent a
memory location, which can sometimes be modified. To fix code, repurpose the
matched-on-`static` to a `const`.
static FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
change this code to:
const FOO: uint = 4;
match n {
FOO => { /* ... */ }
_ => { /* ... */ }
}
* Statics may no longer refer to other statics by value. Due to statics being
able to change at runtime, allowing them to reference one another could
possibly lead to confusing semantics. If you are in this situation, use a
constant initializer instead. Note, however, that statics may reference other
statics by address, however.
* Statics may no longer be used in constant expressions, such as array lengths.
This is due to the same restrictions as listed above. Use a `const` instead.
[breaking-change]
[rfc]: https://github.com/rust-lang/rfcs/pull/246
2014-10-06 08:17:01 -07:00
|
|
|
let sym = sym();
|
2014-05-06 18:43:56 -07:00
|
|
|
let llfn = if abi == Rust {
|
2014-03-20 19:49:20 -07:00
|
|
|
register_fn(ccx, i.span, sym, i.id, ty)
|
|
|
|
} else {
|
|
|
|
foreign::register_rust_fn_with_foreign_abi(ccx,
|
|
|
|
i.span,
|
|
|
|
sym,
|
|
|
|
i.id)
|
|
|
|
};
|
2014-09-05 17:28:24 -07:00
|
|
|
set_llvm_fn_attrs(ccx, i.attrs.as_slice(), llfn);
|
2014-03-20 19:49:20 -07:00
|
|
|
llfn
|
2012-08-08 14:17:52 -07:00
|
|
|
}
|
2012-10-24 14:36:00 -07:00
|
|
|
|
2014-10-09 15:17:22 -04:00
|
|
|
_ => panic!("get_item_val: weird result in table")
|
2014-03-20 19:49:20 -07:00
|
|
|
};
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
match attr::first_attr_value_str_by_name(i.attrs.as_slice(),
|
|
|
|
"link_section") {
|
2014-09-23 00:14:46 -07:00
|
|
|
Some(sect) => {
|
|
|
|
if contains_null(sect.get()) {
|
|
|
|
ccx.sess().fatal(format!("Illegal null byte in link_section value: `{}`",
|
|
|
|
sect.get()).as_slice());
|
|
|
|
}
|
|
|
|
unsafe {
|
|
|
|
sect.get().with_c_str(|buf| {
|
|
|
|
llvm::LLVMSetSection(v, buf);
|
|
|
|
})
|
|
|
|
}
|
2014-03-20 19:49:20 -07:00
|
|
|
},
|
|
|
|
None => ()
|
|
|
|
}
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
v
|
|
|
|
}
|
|
|
|
|
2014-08-04 13:56:56 -07:00
|
|
|
ast_map::NodeTraitItem(trait_method) => {
|
|
|
|
debug!("get_item_val(): processing a NodeTraitItem");
|
2014-03-20 19:49:20 -07:00
|
|
|
match *trait_method {
|
2014-08-05 19:44:21 -07:00
|
|
|
ast::RequiredMethod(_) | ast::TypeTraitItem(_) => {
|
|
|
|
ccx.sess().bug("unexpected variant: required trait \
|
|
|
|
method in get_item_val()");
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::ProvidedMethod(ref m) => {
|
|
|
|
register_method(ccx, id, &**m)
|
2012-10-24 14:36:00 -07:00
|
|
|
}
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
|
|
|
}
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-08-04 13:56:56 -07:00
|
|
|
ast_map::NodeImplItem(ii) => {
|
|
|
|
match *ii {
|
2014-09-07 20:09:06 +03:00
|
|
|
ast::MethodImplItem(ref m) => register_method(ccx, id, &**m),
|
2014-08-05 19:44:21 -07:00
|
|
|
ast::TypeImplItem(ref typedef) => {
|
|
|
|
ccx.sess().span_bug(typedef.span,
|
|
|
|
"unexpected variant: required impl \
|
|
|
|
method in get_item_val()")
|
|
|
|
}
|
2014-08-04 13:56:56 -07:00
|
|
|
}
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
|
|
|
|
|
|
|
ast_map::NodeForeignItem(ni) => {
|
|
|
|
match ni.node {
|
|
|
|
ast::ForeignItemFn(..) => {
|
2014-09-05 09:18:53 -07:00
|
|
|
let abi = ccx.tcx().map.get_foreign_abi(id);
|
2014-05-14 02:07:33 -04:00
|
|
|
let ty = ty::node_id_to_type(ccx.tcx(), ni.id);
|
2014-05-16 10:15:33 -07:00
|
|
|
let name = foreign::link_name(&*ni);
|
2014-07-23 11:56:36 -07:00
|
|
|
foreign::register_foreign_item_fn(ccx, abi, ty, name.get().as_slice())
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
|
|
|
ast::ForeignItemStatic(..) => {
|
2014-05-16 10:15:33 -07:00
|
|
|
foreign::register_static(ccx, &*ni)
|
2013-07-27 01:50:20 -04:00
|
|
|
}
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
|
|
|
}
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
ast_map::NodeVariant(ref v) => {
|
|
|
|
let llfn;
|
|
|
|
let args = match v.node.kind {
|
|
|
|
ast::TupleVariantKind(ref args) => args,
|
|
|
|
ast::StructVariantKind(_) => {
|
2014-10-09 15:17:22 -04:00
|
|
|
panic!("struct variant kind unexpected in get_item_val")
|
2014-03-20 19:49:20 -07:00
|
|
|
}
|
|
|
|
};
|
|
|
|
assert!(args.len() != 0u);
|
|
|
|
let ty = ty::node_id_to_type(ccx.tcx(), id);
|
2014-09-05 09:18:53 -07:00
|
|
|
let parent = ccx.tcx().map.get_parent(id);
|
|
|
|
let enm = ccx.tcx().map.expect_item(parent);
|
2014-03-20 19:49:20 -07:00
|
|
|
let sym = exported_name(ccx,
|
|
|
|
id,
|
|
|
|
ty,
|
|
|
|
enm.attrs.as_slice());
|
|
|
|
|
|
|
|
llfn = match enm.node {
|
|
|
|
ast::ItemEnum(_, _) => {
|
|
|
|
register_fn(ccx, (*v).span, sym, id, ty)
|
2013-07-27 01:50:20 -04:00
|
|
|
}
|
2014-10-09 15:17:22 -04:00
|
|
|
_ => panic!("NodeVariant, shouldn't happen")
|
2013-07-27 01:50:20 -04:00
|
|
|
};
|
2014-03-20 19:49:20 -07:00
|
|
|
set_inline_hint(llfn);
|
|
|
|
llfn
|
|
|
|
}
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
ast_map::NodeStructCtor(struct_def) => {
|
|
|
|
// Only register the constructor if this is a tuple-like struct.
|
|
|
|
let ctor_id = match struct_def.ctor_id {
|
|
|
|
None => {
|
|
|
|
ccx.sess().bug("attempt to register a constructor of \
|
|
|
|
a non-tuple-like struct")
|
|
|
|
}
|
|
|
|
Some(ctor_id) => ctor_id,
|
|
|
|
};
|
2014-09-05 09:18:53 -07:00
|
|
|
let parent = ccx.tcx().map.get_parent(id);
|
|
|
|
let struct_item = ccx.tcx().map.expect_item(parent);
|
2014-03-20 19:49:20 -07:00
|
|
|
let ty = ty::node_id_to_type(ccx.tcx(), ctor_id);
|
|
|
|
let sym = exported_name(ccx,
|
|
|
|
id,
|
|
|
|
ty,
|
|
|
|
struct_item.attrs
|
|
|
|
.as_slice());
|
|
|
|
let llfn = register_fn(ccx, struct_item.span,
|
|
|
|
sym, ctor_id, ty);
|
|
|
|
set_inline_hint(llfn);
|
|
|
|
llfn
|
|
|
|
}
|
2012-10-24 14:36:00 -07:00
|
|
|
|
2014-03-20 19:49:20 -07:00
|
|
|
ref variant => {
|
2014-10-15 02:25:34 -04:00
|
|
|
ccx.sess().bug(format!("get_item_val(): unexpected variant: {}",
|
2014-05-16 10:45:16 -07:00
|
|
|
variant).as_slice())
|
2012-03-20 13:19:33 +01:00
|
|
|
}
|
2014-03-20 19:49:20 -07:00
|
|
|
};
|
|
|
|
|
2014-07-21 16:42:34 -07:00
|
|
|
// All LLVM globals and functions are initially created as external-linkage
|
|
|
|
// declarations. If `trans_item`/`trans_fn` later turns the declaration
|
|
|
|
// into a definition, it adjusts the linkage then (using `update_linkage`).
|
|
|
|
//
|
|
|
|
// The exception is foreign items, which have their linkage set inside the
|
|
|
|
// call to `foreign::register_*` above. We don't touch the linkage after
|
|
|
|
// that (`foreign::trans_foreign_mod` doesn't adjust the linkage like the
|
|
|
|
// other item translation functions do).
|
2014-03-20 19:49:20 -07:00
|
|
|
|
2014-09-05 09:18:53 -07:00
|
|
|
ccx.item_vals().borrow_mut().insert(id, val);
|
2014-03-20 19:49:20 -07:00
|
|
|
val
|
2011-01-05 15:31:35 -08:00
|
|
|
}
|
|
|
|
|
2014-03-06 18:47:24 +02:00
|
|
|
fn register_method(ccx: &CrateContext, id: ast::NodeId,
|
2014-01-11 16:39:32 +02:00
|
|
|
m: &ast::Method) -> ValueRef {
|
2014-03-15 22:29:34 +02:00
|
|
|
let mty = ty::node_id_to_type(ccx.tcx(), id);
|
2013-06-21 20:28:33 +12:00
|
|
|
|
2014-02-28 15:25:15 -08:00
|
|
|
let sym = exported_name(ccx, id, mty, m.attrs.as_slice());
|
2013-07-27 01:50:20 -04:00
|
|
|
|
2014-01-27 14:18:36 +02:00
|
|
|
let llfn = register_fn(ccx, m.span, sym, id, mty);
|
2014-09-05 17:28:24 -07:00
|
|
|
set_llvm_fn_attrs(ccx, m.attrs.as_slice(), llfn);
|
2012-08-13 16:29:40 -07:00
|
|
|
llfn
|
|
|
|
}
|
|
|
|
|
2014-04-22 15:56:37 +03:00
|
|
|
pub fn crate_ctxt_to_encode_parms<'a, 'tcx>(cx: &'a SharedCrateContext<'tcx>,
|
|
|
|
ie: encoder::EncodeInlinedItem<'a>)
|
|
|
|
-> encoder::EncodeParams<'a, 'tcx> {
|
|
|
|
encoder::EncodeParams {
|
|
|
|
diag: cx.sess().diagnostic(),
|
|
|
|
tcx: cx.tcx(),
|
|
|
|
reexports2: cx.exp_map2(),
|
|
|
|
item_symbols: cx.item_symbols(),
|
|
|
|
link_meta: cx.link_meta(),
|
|
|
|
cstore: &cx.sess().cstore,
|
|
|
|
encode_inlined_item: ie,
|
|
|
|
reachable: cx.reachable(),
|
|
|
|
}
|
2013-06-13 19:19:50 +12:00
|
|
|
}
|
|
|
|
|
2014-07-21 16:42:34 -07:00
|
|
|
pub fn write_metadata(cx: &SharedCrateContext, krate: &ast::Crate) -> Vec<u8> {
|
2014-01-24 21:00:31 -08:00
|
|
|
use flate;
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
|
2014-05-02 15:26:45 -07:00
|
|
|
let any_library = cx.sess().crate_types.borrow().iter().any(|ty| {
|
2014-05-06 23:38:01 +12:00
|
|
|
*ty != config::CrateTypeExecutable
|
2014-05-02 15:26:45 -07:00
|
|
|
});
|
|
|
|
if !any_library {
|
2014-03-04 10:02:49 -08:00
|
|
|
return Vec::new()
|
2013-12-22 14:40:03 -08:00
|
|
|
}
|
2013-06-13 19:19:50 +12:00
|
|
|
|
2014-02-14 07:07:09 +02:00
|
|
|
let encode_inlined_item: encoder::EncodeInlinedItem =
|
2014-07-29 17:06:37 -07:00
|
|
|
|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii);
|
2012-05-14 17:46:45 -07:00
|
|
|
|
2013-06-13 19:19:50 +12:00
|
|
|
let encode_parms = crate_ctxt_to_encode_parms(cx, encode_inlined_item);
|
2014-02-05 22:15:24 +01:00
|
|
|
let metadata = encoder::encode_metadata(encode_parms, krate);
|
2014-10-14 23:05:01 -07:00
|
|
|
let mut compressed = encoder::metadata_encoding_version.to_vec();
|
|
|
|
compressed.push_all(match flate::deflate_bytes(metadata.as_slice()) {
|
|
|
|
Some(compressed) => compressed,
|
|
|
|
None => cx.sess().fatal("failed to compress metadata"),
|
|
|
|
}.as_slice());
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
let llmeta = C_bytes_in_context(cx.metadata_llcx(), compressed.as_slice());
|
2014-11-17 21:39:01 +13:00
|
|
|
let llconst = C_struct_in_context(cx.metadata_llcx(), &[llmeta], false);
|
2014-06-06 13:21:18 -07:00
|
|
|
let name = format!("rust_metadata_{}_{}",
|
2014-09-05 09:18:53 -07:00
|
|
|
cx.link_meta().crate_name,
|
|
|
|
cx.link_meta().crate_hash);
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
let llglobal = name.with_c_str(|buf| {
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
2014-09-05 09:18:53 -07:00
|
|
|
llvm::LLVMAddGlobal(cx.metadata_llmod(), val_ty(llconst).to_ref(), buf)
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2013-01-10 21:23:07 -08:00
|
|
|
unsafe {
|
|
|
|
llvm::LLVMSetInitializer(llglobal, llconst);
|
2014-07-23 11:56:36 -07:00
|
|
|
let name = loader::meta_section_name(cx.sess().target.target.options.is_like_osx);
|
|
|
|
name.with_c_str(|buf| {
|
2013-01-10 21:23:07 -08:00
|
|
|
llvm::LLVMSetSection(llglobal, buf)
|
2013-11-21 15:42:55 -08:00
|
|
|
});
|
2013-01-10 21:23:07 -08:00
|
|
|
}
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
return metadata;
|
2011-06-27 16:09:28 -07:00
|
|
|
}
|
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
/// Find any symbols that are defined in one compilation unit, but not declared
|
|
|
|
/// in any other compilation unit. Give these symbols internal linkage.
|
|
|
|
fn internalize_symbols(cx: &SharedCrateContext, reachable: &HashSet<String>) {
|
|
|
|
use std::c_str::CString;
|
|
|
|
|
|
|
|
unsafe {
|
|
|
|
let mut declared = HashSet::new();
|
|
|
|
|
|
|
|
let iter_globals = |llmod| {
|
|
|
|
ValueIter {
|
|
|
|
cur: llvm::LLVMGetFirstGlobal(llmod),
|
|
|
|
step: llvm::LLVMGetNextGlobal,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
let iter_functions = |llmod| {
|
|
|
|
ValueIter {
|
|
|
|
cur: llvm::LLVMGetFirstFunction(llmod),
|
|
|
|
step: llvm::LLVMGetNextFunction,
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
// Collect all external declarations in all compilation units.
|
|
|
|
for ccx in cx.iter() {
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
|
|
|
let linkage = llvm::LLVMGetLinkage(val);
|
|
|
|
// We only care about external declarations (not definitions)
|
|
|
|
// and available_externally definitions.
|
|
|
|
if !(linkage == llvm::ExternalLinkage as c_uint &&
|
|
|
|
llvm::LLVMIsDeclaration(val) != 0) &&
|
|
|
|
!(linkage == llvm::AvailableExternallyLinkage as c_uint) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
let name = CString::new(llvm::LLVMGetValueName(val), false);
|
|
|
|
declared.insert(name);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Examine each external definition. If the definition is not used in
|
|
|
|
// any other compilation unit, and is not reachable from other crates,
|
|
|
|
// then give it internal linkage.
|
|
|
|
for ccx in cx.iter() {
|
|
|
|
for val in iter_globals(ccx.llmod()).chain(iter_functions(ccx.llmod())) {
|
|
|
|
// We only care about external definitions.
|
|
|
|
if !(llvm::LLVMGetLinkage(val) == llvm::ExternalLinkage as c_uint &&
|
|
|
|
llvm::LLVMIsDeclaration(val) == 0) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
|
|
|
let name = CString::new(llvm::LLVMGetValueName(val), false);
|
|
|
|
if !declared.contains(&name) &&
|
2014-11-12 15:51:51 -08:00
|
|
|
!reachable.contains(name.as_str().unwrap()) {
|
2014-08-01 10:29:44 -07:00
|
|
|
llvm::SetLinkage(val, llvm::InternalLinkage);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
struct ValueIter {
|
|
|
|
cur: ValueRef,
|
|
|
|
step: unsafe extern "C" fn(ValueRef) -> ValueRef,
|
|
|
|
}
|
|
|
|
|
|
|
|
impl Iterator<ValueRef> for ValueIter {
|
|
|
|
fn next(&mut self) -> Option<ValueRef> {
|
|
|
|
let old = self.cur;
|
|
|
|
if !old.is_null() {
|
librustc: Make `Copy` opt-in.
This change makes the compiler no longer infer whether types (structures
and enumerations) implement the `Copy` trait (and thus are implicitly
copyable). Rather, you must implement `Copy` yourself via `impl Copy for
MyType {}`.
A new warning has been added, `missing_copy_implementations`, to warn
you if a non-generic public type has been added that could have
implemented `Copy` but didn't.
For convenience, you may *temporarily* opt out of this behavior by using
`#![feature(opt_out_copy)]`. Note though that this feature gate will never be
accepted and will be removed by the time that 1.0 is released, so you should
transition your code away from using it.
This breaks code like:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
Change this code to:
#[deriving(Show)]
struct Point2D {
x: int,
y: int,
}
impl Copy for Point2D {}
fn main() {
let mypoint = Point2D {
x: 1,
y: 1,
};
let otherpoint = mypoint;
println!("{}{}", mypoint, otherpoint);
}
This is the backwards-incompatible part of #13231.
Part of RFC #3.
[breaking-change]
2014-12-05 17:01:33 -08:00
|
|
|
self.cur = unsafe {
|
|
|
|
let step: unsafe extern "C" fn(ValueRef) -> ValueRef =
|
|
|
|
mem::transmute_copy(&self.step);
|
|
|
|
step(old)
|
|
|
|
};
|
2014-08-01 10:29:44 -07:00
|
|
|
Some(old)
|
|
|
|
} else {
|
|
|
|
None
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-11-27 07:21:26 -05:00
|
|
|
pub fn trans_crate<'tcx>(analysis: ty::CrateAnalysis<'tcx>)
|
2014-09-07 20:09:06 +03:00
|
|
|
-> (ty::ctxt<'tcx>, CrateTranslation) {
|
2014-11-27 07:21:26 -05:00
|
|
|
let ty::CrateAnalysis { ty_cx: tcx, exp_map2, reachable, name, .. } = analysis;
|
2014-09-07 20:09:06 +03:00
|
|
|
let krate = tcx.map.krate();
|
2014-03-15 22:29:34 +02:00
|
|
|
|
2013-06-25 23:46:26 -07:00
|
|
|
// Before we touch LLVM, make sure that multithreading is enabled.
|
2013-12-28 19:44:52 -08:00
|
|
|
unsafe {
|
2014-06-07 11:13:26 -07:00
|
|
|
use std::sync::{Once, ONCE_INIT};
|
2014-10-10 21:59:10 -07:00
|
|
|
static INIT: Once = ONCE_INIT;
|
2013-12-28 19:44:52 -08:00
|
|
|
static mut POISONED: bool = false;
|
|
|
|
INIT.doit(|| {
|
|
|
|
if llvm::LLVMStartMultithreaded() != 1 {
|
|
|
|
// use an extra bool to make sure that all future usage of LLVM
|
|
|
|
// cannot proceed despite the Once not running more than once.
|
|
|
|
POISONED = true;
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
if POISONED {
|
2014-03-15 22:29:34 +02:00
|
|
|
tcx.sess.bug("couldn't enable multi-threaded LLVM");
|
2013-12-28 19:44:52 -08:00
|
|
|
}
|
2013-06-25 23:46:26 -07:00
|
|
|
}
|
2012-08-17 12:41:34 -07:00
|
|
|
|
2014-09-07 20:09:06 +03:00
|
|
|
let link_meta = link::build_link_meta(&tcx.sess, krate, name);
|
2011-12-05 14:56:11 +08:00
|
|
|
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
let codegen_units = tcx.sess.opts.cg.codegen_units;
|
2014-07-16 11:27:57 -07:00
|
|
|
let shared_ccx = SharedCrateContext::new(link_meta.crate_name.as_slice(),
|
|
|
|
codegen_units,
|
|
|
|
tcx,
|
|
|
|
exp_map2,
|
|
|
|
Sha256::new(),
|
|
|
|
link_meta.clone(),
|
|
|
|
reachable);
|
2013-06-13 19:19:50 +12:00
|
|
|
|
2014-07-21 16:42:34 -07:00
|
|
|
{
|
2014-07-16 11:27:57 -07:00
|
|
|
let ccx = shared_ccx.get_ccx(0);
|
2014-06-12 14:08:44 -07:00
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
// First, verify intrinsics.
|
|
|
|
intrinsic::check_intrinsics(&ccx);
|
2014-06-12 14:08:44 -07:00
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
// Next, translate the module.
|
|
|
|
{
|
|
|
|
let _icx = push_ctxt("text");
|
|
|
|
trans_mod(&ccx, &krate.module);
|
|
|
|
}
|
2014-07-21 16:42:34 -07:00
|
|
|
}
|
2013-06-13 14:49:01 +12:00
|
|
|
|
2014-07-21 16:42:34 -07:00
|
|
|
for ccx in shared_ccx.iter() {
|
2014-07-16 11:27:57 -07:00
|
|
|
glue::emit_tydescs(&ccx);
|
|
|
|
if ccx.sess().opts.debuginfo != NoDebugInfo {
|
|
|
|
debuginfo::finalize(&ccx);
|
|
|
|
}
|
2014-07-21 16:42:34 -07:00
|
|
|
}
|
2014-07-16 11:27:57 -07:00
|
|
|
|
2014-07-21 16:42:34 -07:00
|
|
|
// Translate the metadata.
|
2014-09-07 20:09:06 +03:00
|
|
|
let metadata = write_metadata(&shared_ccx, krate);
|
2013-06-14 11:59:49 -07:00
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
if shared_ccx.sess().trans_stats() {
|
|
|
|
let stats = shared_ccx.stats();
|
2014-01-09 21:06:55 +11:00
|
|
|
println!("--- trans stats ---");
|
2014-07-16 11:27:57 -07:00
|
|
|
println!("n_static_tydescs: {}", stats.n_static_tydescs.get());
|
|
|
|
println!("n_glues_created: {}", stats.n_glues_created.get());
|
|
|
|
println!("n_null_glues: {}", stats.n_null_glues.get());
|
|
|
|
println!("n_real_glues: {}", stats.n_real_glues.get());
|
|
|
|
|
|
|
|
println!("n_fns: {}", stats.n_fns.get());
|
|
|
|
println!("n_monos: {}", stats.n_monos.get());
|
|
|
|
println!("n_inlines: {}", stats.n_inlines.get());
|
|
|
|
println!("n_closures: {}", stats.n_closures.get());
|
2014-01-09 21:06:55 +11:00
|
|
|
println!("fn stats:");
|
2014-11-10 12:27:56 -08:00
|
|
|
stats.fn_stats.borrow_mut().sort_by(|&(_, insns_a), &(_, insns_b)| {
|
2014-03-15 22:29:34 +02:00
|
|
|
insns_b.cmp(&insns_a)
|
|
|
|
});
|
2014-07-16 11:27:57 -07:00
|
|
|
for tuple in stats.fn_stats.borrow().iter() {
|
2014-03-15 22:29:34 +02:00
|
|
|
match *tuple {
|
2014-11-10 12:27:56 -08:00
|
|
|
(ref name, insns) => {
|
|
|
|
println!("{} insns, {}", insns, *name);
|
2013-07-08 11:05:52 -04:00
|
|
|
}
|
|
|
|
}
|
2013-06-28 11:15:34 -07:00
|
|
|
}
|
2013-06-13 14:49:01 +12:00
|
|
|
}
|
2014-07-16 11:27:57 -07:00
|
|
|
if shared_ccx.sess().count_llvm_insns() {
|
|
|
|
for (k, v) in shared_ccx.stats().llvm_insns.borrow().iter() {
|
2014-11-17 11:29:38 -08:00
|
|
|
println!("{:7} {}", *v, *k);
|
2013-06-17 16:23:24 +12:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
let modules = shared_ccx.iter()
|
|
|
|
.map(|ccx| ModuleTranslation { llcx: ccx.llcx(), llmod: ccx.llmod() })
|
|
|
|
.collect();
|
2013-12-20 20:14:51 -08:00
|
|
|
|
2014-07-16 11:27:57 -07:00
|
|
|
let mut reachable: Vec<String> = shared_ccx.reachable().iter().filter_map(|id| {
|
2014-11-06 12:25:16 -05:00
|
|
|
shared_ccx.item_symbols().borrow().get(id).map(|s| s.to_string())
|
2014-03-09 13:42:22 +02:00
|
|
|
}).collect();
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
|
2014-06-06 17:48:46 -07:00
|
|
|
// For the purposes of LTO, we add to the reachable set all of the upstream
|
|
|
|
// reachable extern fns. These functions are all part of the public ABI of
|
|
|
|
// the final product, so LTO needs to preserve them.
|
2014-07-16 11:27:57 -07:00
|
|
|
shared_ccx.sess().cstore.iter_crate_data(|cnum, _| {
|
|
|
|
let syms = csearch::get_reachable_extern_fns(&shared_ccx.sess().cstore, cnum);
|
2014-09-14 20:27:36 -07:00
|
|
|
reachable.extend(syms.into_iter().map(|did| {
|
2014-07-16 11:27:57 -07:00
|
|
|
csearch::get_symbol(&shared_ccx.sess().cstore, did)
|
2014-06-06 17:48:46 -07:00
|
|
|
}));
|
|
|
|
});
|
|
|
|
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
// Make sure that some other crucial symbols are not eliminated from the
|
2013-12-10 21:53:30 -08:00
|
|
|
// module. This includes the main function, the crate map (used for debug
|
|
|
|
// log settings and I/O), and finally the curious rust_stack_exhausted
|
|
|
|
// symbol. This symbol is required for use by the libmorestack library that
|
|
|
|
// we link in, so we must ensure that this symbol is not internalized (if
|
|
|
|
// defined in the crate).
|
2014-05-25 03:17:19 -07:00
|
|
|
reachable.push("main".to_string());
|
|
|
|
reachable.push("rust_stack_exhausted".to_string());
|
2014-04-15 18:17:48 -07:00
|
|
|
|
|
|
|
// referenced from .eh_frame section on some platforms
|
2014-05-25 03:17:19 -07:00
|
|
|
reachable.push("rust_eh_personality".to_string());
|
2014-05-09 18:45:36 -07:00
|
|
|
// referenced from rt/rust_try.ll
|
2014-05-25 03:17:19 -07:00
|
|
|
reachable.push("rust_eh_personality_catch".to_string());
|
2013-06-13 21:25:12 -07:00
|
|
|
|
2014-08-01 10:29:44 -07:00
|
|
|
if codegen_units > 1 {
|
|
|
|
internalize_symbols(&shared_ccx, &reachable.iter().map(|x| x.clone()).collect());
|
|
|
|
}
|
|
|
|
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
let metadata_module = ModuleTranslation {
|
|
|
|
llcx: shared_ccx.metadata_llcx(),
|
|
|
|
llmod: shared_ccx.metadata_llmod(),
|
|
|
|
};
|
2014-07-16 11:27:57 -07:00
|
|
|
let formats = shared_ccx.tcx().dependency_formats.borrow().clone();
|
2014-05-14 11:24:12 -07:00
|
|
|
let no_builtins = attr::contains_name(krate.attrs.as_slice(), "no_builtins");
|
2014-03-15 22:29:34 +02:00
|
|
|
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
let translation = CrateTranslation {
|
|
|
|
modules: modules,
|
2014-03-15 22:29:34 +02:00
|
|
|
metadata_module: metadata_module,
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
link: link_meta,
|
Store metadata separately in rlib files
Right now whenever an rlib file is linked against, all of the metadata from the
rlib is pulled in to the final staticlib or binary. The reason for this is that
the metadata is currently stored in a section of the object file. Note that this
is intentional for dynamic libraries in order to distribute metadata bundled
with static libraries.
This commit alters the situation for rlib libraries to instead store the
metadata in a separate file in the archive. In doing so, when the archive is
passed to the linker, none of the metadata will get pulled into the result
executable. Furthermore, the metadata file is skipped when assembling rlibs into
an archive.
The snag in this implementation comes with multiple output formats. When
generating a dylib, the metadata needs to be in the object file, but when
generating an rlib this needs to be separate. In order to accomplish this, the
metadata variable is inserted into an entirely separate LLVM Module which is
then codegen'd into a different location (foo.metadata.o). This is then linked
into dynamic libraries and silently ignored for rlib files.
While changing how metadata is inserted into archives, I have also stopped
compressing metadata when inserted into rlib files. We have wanted to stop
compressing metadata, but the sections it creates in object file sections are
apparently too large. Thankfully if it's just an arbitrary file it doesn't
matter how large it is.
I have seen massive reductions in executable sizes, as well as staticlib output
sizes (to confirm that this is all working).
2013-12-03 17:41:01 -08:00
|
|
|
metadata: metadata,
|
Implement LTO
This commit implements LTO for rust leveraging LLVM's passes. What this means
is:
* When compiling an rlib, in addition to insdering foo.o into the archive, also
insert foo.bc (the LLVM bytecode) of the optimized module.
* When the compiler detects the -Z lto option, it will attempt to perform LTO on
a staticlib or binary output. The compiler will emit an error if a dylib or
rlib output is being generated.
* The actual act of performing LTO is as follows:
1. Force all upstream libraries to have an rlib version available.
2. Load the bytecode of each upstream library from the rlib.
3. Link all this bytecode into the current LLVM module (just using llvm
apis)
4. Run an internalization pass which internalizes all symbols except those
found reachable for the local crate of compilation.
5. Run the LLVM LTO pass manager over this entire module
6a. If assembling an archive, then add all upstream rlibs into the output
archive. This ignores all of the object/bitcode/metadata files rust
generated and placed inside the rlibs.
6b. If linking a binary, create copies of all upstream rlibs, remove the
rust-generated object-file, and then link everything as usual.
As I have explained in #10741, this process is excruciatingly slow, so this is
*not* turned on by default, and it is also why I have decided to hide it behind
a -Z flag for now. The good news is that the binary sizes are about as small as
they can be as a result of LTO, so it's definitely working.
Closes #10741
Closes #10740
2013-12-02 23:19:29 -08:00
|
|
|
reachable: reachable,
|
2014-05-02 00:59:27 -07:00
|
|
|
crate_formats: formats,
|
2014-05-14 11:24:12 -07:00
|
|
|
no_builtins: no_builtins,
|
run optimization and codegen on worker threads
Refactor the code in `llvm::back` that invokes LLVM optimization and codegen
passes so that it can be called from worker threads. (Previously, it used
`&Session` extensively, and `Session` is not `Share`.) The new code can handle
multiple compilation units, by compiling each unit to `crate.0.o`, `crate.1.o`,
etc., and linking together all the `crate.N.o` files into a single `crate.o`
using `ld -r`. The later linking steps can then be run unchanged.
The new code preserves the behavior of `--emit`/`-o` when building a single
compilation unit. With multiple compilation units, the `--emit=asm/ir/bc`
options produce multiple files, so combinations like `--emit=ir -o foo.ll` will
not actually produce `foo.ll` (they instead produce several `foo.N.ll` files).
The new code supports `-Z lto` only when using a single compilation unit.
Compiling with multiple compilation units and `-Z lto` will produce an error.
(I can't think of any good reason to do such a thing.) Linking with `-Z lto`
against a library that was built as multiple compilation units will also fail,
because the rlib does not contain a `crate.bytecode.deflate` file. This could
be supported in the future by linking together the `crate.N.bc` files produced
when compiling the library into a single `crate.bc`, or by making the LTO code
support multiple `crate.N.bytecode.deflate` files.
2014-07-17 10:52:52 -07:00
|
|
|
};
|
|
|
|
|
|
|
|
(shared_ccx.take_tcx(), translation)
|
2013-06-13 21:25:12 -07:00
|
|
|
}
|