1
Fork 0

Auto merge of #32080 - eddyb:transcendent, r=nikomatsakis

Refactor call & function handling in trans, enable MIR bootstrap.

Non-Rust and Rust ABIs were combined into a common codepath, which means:
* The ugly `__rust_abi` "clown shoes" shim for C->Rust FFI is gone, fixes #10116.
* Methods, *including virtual ones* support non-Rust ABIs, closes #30235.
* Non-Rust ABIs also pass fat pointers in two arguments; the result should be identical.
* Zero-sized types are never passed as arguments; again, behavior shouldn't change.

Additionally, MIR support for calling intrinsics (through old trans) was implemented.
Alongside assorted fixes, it enabled MIR to launch 🚀 and do a *complete* bootstrap.
To try it yourself, `./configure --enable-orbit` *or* `make RUSTFLAGS="-Z orbit"`.
This commit is contained in:
bors 2016-03-18 06:54:58 -07:00
commit 235d77457d
155 changed files with 4621 additions and 6389 deletions

3
configure vendored
View file

@ -607,6 +607,7 @@ opt dist-host-only 0 "only install bins for the host architecture"
opt inject-std-version 1 "inject the current compiler version of libstd into programs" opt inject-std-version 1 "inject the current compiler version of libstd into programs"
opt llvm-version-check 1 "check if the LLVM version is supported, build anyway" opt llvm-version-check 1 "check if the LLVM version is supported, build anyway"
opt rustbuild 0 "use the rust and cargo based build system" opt rustbuild 0 "use the rust and cargo based build system"
opt orbit 0 "get MIR where it belongs - everywhere; most importantly, in orbit"
# Optimization and debugging options. These may be overridden by the release channel, etc. # Optimization and debugging options. These may be overridden by the release channel, etc.
opt_nosave optimize 1 "build optimized rust code" opt_nosave optimize 1 "build optimized rust code"
@ -713,6 +714,8 @@ if [ -n "$CFG_ENABLE_DEBUG_ASSERTIONS" ]; then putvar CFG_ENABLE_DEBUG_ASSERTION
if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi if [ -n "$CFG_ENABLE_DEBUGINFO" ]; then putvar CFG_ENABLE_DEBUGINFO; fi
if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi if [ -n "$CFG_ENABLE_DEBUG_JEMALLOC" ]; then putvar CFG_ENABLE_DEBUG_JEMALLOC; fi
if [ -n "$CFG_ENABLE_ORBIT" ]; then putvar CFG_ENABLE_ORBIT; fi
# A magic value that allows the compiler to use unstable features # A magic value that allows the compiler to use unstable features
# during the bootstrap even when doing so would normally be an error # during the bootstrap even when doing so would normally be an error
# because of feature staging or because the build turns on # because of feature staging or because the build turns on

View file

@ -134,6 +134,11 @@ ifdef CFG_ENABLE_DEBUGINFO
CFG_RUSTC_FLAGS += -g CFG_RUSTC_FLAGS += -g
endif endif
ifdef CFG_ENABLE_ORBIT
$(info cfg: launching MIR (CFG_ENABLE_ORBIT))
CFG_RUSTC_FLAGS += -Z orbit
endif
ifdef SAVE_TEMPS ifdef SAVE_TEMPS
CFG_RUSTC_FLAGS += --save-temps CFG_RUSTC_FLAGS += --save-temps
endif endif

View file

@ -31,6 +31,8 @@ pub struct TestProps {
pub pp_exact: Option<PathBuf>, pub pp_exact: Option<PathBuf>,
// Modules from aux directory that should be compiled // Modules from aux directory that should be compiled
pub aux_builds: Vec<String> , pub aux_builds: Vec<String> ,
// Environment settings to use for compiling
pub rustc_env: Vec<(String,String)> ,
// Environment settings to use during execution // Environment settings to use during execution
pub exec_env: Vec<(String,String)> , pub exec_env: Vec<(String,String)> ,
// Lines to check if they appear in the expected debugger output // Lines to check if they appear in the expected debugger output
@ -77,6 +79,7 @@ pub fn load_props(testfile: &Path) -> TestProps {
pp_exact: pp_exact, pp_exact: pp_exact,
aux_builds: aux_builds, aux_builds: aux_builds,
revisions: vec![], revisions: vec![],
rustc_env: vec![],
exec_env: exec_env, exec_env: exec_env,
check_lines: check_lines, check_lines: check_lines,
build_aux_docs: build_aux_docs, build_aux_docs: build_aux_docs,
@ -153,10 +156,14 @@ pub fn load_props_into(props: &mut TestProps, testfile: &Path, cfg: Option<&str>
props.aux_builds.push(ab); props.aux_builds.push(ab);
} }
if let Some(ee) = parse_exec_env(ln) { if let Some(ee) = parse_env(ln, "exec-env") {
props.exec_env.push(ee); props.exec_env.push(ee);
} }
if let Some(ee) = parse_env(ln, "rustc-env") {
props.rustc_env.push(ee);
}
if let Some(cl) = parse_check_line(ln) { if let Some(cl) = parse_check_line(ln) {
props.check_lines.push(cl); props.check_lines.push(cl);
} }
@ -372,8 +379,8 @@ fn parse_pretty_compare_only(line: &str) -> bool {
parse_name_directive(line, "pretty-compare-only") parse_name_directive(line, "pretty-compare-only")
} }
fn parse_exec_env(line: &str) -> Option<(String, String)> { fn parse_env(line: &str, name: &str) -> Option<(String, String)> {
parse_name_value_directive(line, "exec-env").map(|nv| { parse_name_value_directive(line, name).map(|nv| {
// nv is either FOO or FOO=BAR // nv is either FOO or FOO=BAR
let mut strs: Vec<String> = nv let mut strs: Vec<String> = nv
.splitn(2, '=') .splitn(2, '=')

View file

@ -863,12 +863,28 @@ fn cleanup_debug_info_options(options: &Option<String>) -> Option<String> {
"-g".to_owned(), "-g".to_owned(),
"--debuginfo".to_owned() "--debuginfo".to_owned()
]; ];
let new_options = let mut new_options =
split_maybe_args(options).into_iter() split_maybe_args(options).into_iter()
.filter(|x| !options_to_remove.contains(x)) .filter(|x| !options_to_remove.contains(x))
.collect::<Vec<String>>() .collect::<Vec<String>>();
.join(" ");
Some(new_options) let mut i = 0;
while i + 1 < new_options.len() {
if new_options[i] == "-Z" {
// FIXME #31005 MIR missing debuginfo currently.
if new_options[i + 1] == "orbit" {
// Remove "-Z" and "orbit".
new_options.remove(i);
new_options.remove(i);
continue;
}
// Always skip over -Z's argument.
i += 1;
}
i += 1;
}
Some(new_options.join(" "))
} }
fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String]) { fn check_debugger_output(debugger_run_result: &ProcRes, check_lines: &[String]) {
@ -1386,7 +1402,7 @@ fn compose_and_run_compiler(config: &Config, props: &TestProps,
compose_and_run(config, compose_and_run(config,
testpaths, testpaths,
args, args,
Vec::new(), props.rustc_env.clone(),
&config.compile_lib_path, &config.compile_lib_path,
Some(aux_dir.to_str().unwrap()), Some(aux_dir.to_str().unwrap()),
input) input)

View file

@ -72,6 +72,7 @@
#![feature(reflect)] #![feature(reflect)]
#![feature(unwind_attributes)] #![feature(unwind_attributes)]
#![feature(repr_simd, platform_intrinsics)] #![feature(repr_simd, platform_intrinsics)]
#![feature(rustc_attrs)]
#![feature(staged_api)] #![feature(staged_api)]
#![feature(unboxed_closures)] #![feature(unboxed_closures)]

View file

@ -1008,6 +1008,7 @@ macro_rules! int_impl {
/// ``` /// ```
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[inline] #[inline]
#[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD.
pub fn pow(self, mut exp: u32) -> Self { pub fn pow(self, mut exp: u32) -> Self {
let mut base = self; let mut base = self;
let mut acc = Self::one(); let mut acc = Self::one();
@ -1049,6 +1050,7 @@ macro_rules! int_impl {
/// ``` /// ```
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[inline] #[inline]
#[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD.
pub fn abs(self) -> Self { pub fn abs(self) -> Self {
if self.is_negative() { if self.is_negative() {
// Note that the #[inline] above means that the overflow // Note that the #[inline] above means that the overflow
@ -2013,6 +2015,7 @@ macro_rules! uint_impl {
/// ``` /// ```
#[stable(feature = "rust1", since = "1.0.0")] #[stable(feature = "rust1", since = "1.0.0")]
#[inline] #[inline]
#[cfg_attr(not(stage0), rustc_no_mir)] // FIXME #29769 MIR overflow checking is TBD.
pub fn pow(self, mut exp: u32) -> Self { pub fn pow(self, mut exp: u32) -> Self {
let mut base = self; let mut base = self;
let mut acc = Self::one(); let mut acc = Self::one();

View file

@ -22,6 +22,7 @@ use middle::def_id::DefId;
use syntax::abi::Abi; use syntax::abi::Abi;
use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID}; use syntax::ast::{self, Name, NodeId, DUMMY_NODE_ID};
use syntax::attr::ThinAttributesExt;
use syntax::codemap::{Span, Spanned}; use syntax::codemap::{Span, Spanned};
use syntax::parse::token; use syntax::parse::token;
@ -718,6 +719,8 @@ impl<'ast> Map<'ast> {
Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]), Some(NodeTraitItem(ref ti)) => Some(&ti.attrs[..]),
Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]), Some(NodeImplItem(ref ii)) => Some(&ii.attrs[..]),
Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]), Some(NodeVariant(ref v)) => Some(&v.node.attrs[..]),
Some(NodeExpr(ref e)) => Some(e.attrs.as_attr_slice()),
Some(NodeStmt(ref s)) => Some(s.node.attrs()),
// unit/tuple structs take the attributes straight from // unit/tuple structs take the attributes straight from
// the struct definition. // the struct definition.
Some(NodeStructCtor(_)) => { Some(NodeStructCtor(_)) => {

View file

@ -73,7 +73,6 @@ mod macros;
pub mod diagnostics; pub mod diagnostics;
pub mod back { pub mod back {
pub use rustc_back::abi;
pub use rustc_back::rpath; pub use rustc_back::rpath;
pub use rustc_back::svh; pub use rustc_back::svh;
} }

View file

@ -354,19 +354,10 @@ impl<'a, 'tcx> CFGBuilder<'a, 'tcx> {
self.straightline(expr, pred, Some(&**e).into_iter()) self.straightline(expr, pred, Some(&**e).into_iter())
} }
hir::ExprInlineAsm(ref inline_asm) => { hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
let inputs = inline_asm.inputs.iter(); let post_outputs = self.exprs(outputs.iter().map(|e| &**e), pred);
let outputs = inline_asm.outputs.iter(); let post_inputs = self.exprs(inputs.iter().map(|e| &**e), post_outputs);
let post_inputs = self.exprs(inputs.map(|a| { self.add_ast_node(expr.id, &[post_inputs])
debug!("cfg::construct InlineAsm id:{} input:{:?}", expr.id, a);
let &(_, ref expr) = a;
&**expr
}), pred);
let post_outputs = self.exprs(outputs.map(|a| {
debug!("cfg::construct InlineAsm id:{} output:{:?}", expr.id, a);
&*a.expr
}), post_inputs);
self.add_ast_node(expr.id, &[post_outputs])
} }
hir::ExprClosure(..) | hir::ExprClosure(..) |

View file

@ -475,9 +475,9 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def()); let def = self.tcx.def_map.borrow().get(&pat.id).map(|d| d.full_def());
match def { match def {
Some(Def::AssociatedConst(did)) | Some(Def::AssociatedConst(did)) |
Some(Def::Const(did)) => match lookup_const_by_id(self.tcx, did, Some(Def::Const(did)) => {
Some(pat.id), None) { let substs = Some(self.tcx.node_id_item_substs(pat.id).substs);
Some((const_expr, _const_ty)) => { if let Some((const_expr, _)) = lookup_const_by_id(self.tcx, did, substs) {
const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| { const_expr_to_pat(self.tcx, const_expr, pat.span).map(|new_pat| {
if let Some(ref mut renaming_map) = self.renaming_map { if let Some(ref mut renaming_map) = self.renaming_map {
@ -487,14 +487,13 @@ impl<'a, 'tcx> Folder for StaticInliner<'a, 'tcx> {
new_pat new_pat
}) })
} } else {
None => {
self.failed = true; self.failed = true;
span_err!(self.tcx.sess, pat.span, E0158, span_err!(self.tcx.sess, pat.span, E0158,
"statics cannot be referenced in patterns"); "statics cannot be referenced in patterns");
pat pat
} }
}, }
_ => noop_fold_pat(pat, self) _ => noop_fold_pat(pat, self)
} }
} }

View file

@ -19,7 +19,6 @@ use front::map::blocks::FnLikeNode;
use middle::cstore::{self, CrateStore, InlinedItem}; use middle::cstore::{self, CrateStore, InlinedItem};
use middle::{infer, subst, traits}; use middle::{infer, subst, traits};
use middle::def::Def; use middle::def::Def;
use middle::subst::Subst;
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::pat_util::def_to_path; use middle::pat_util::def_to_path;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
@ -89,16 +88,13 @@ fn lookup_variant_by_id<'a>(tcx: &'a ty::TyCtxt,
} }
/// * `def_id` is the id of the constant. /// * `def_id` is the id of the constant.
/// * `maybe_ref_id` is the id of the expr referencing the constant. /// * `substs` is the monomorphized substitutions for the expression.
/// * `param_substs` is the monomorphization substitution for the expression.
/// ///
/// `maybe_ref_id` and `param_substs` are optional and are used for /// `substs` is optional and is used for associated constants.
/// finding substitutions in associated constants. This generally /// This generally happens in late/trans const evaluation.
/// happens in late/trans const evaluation.
pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>, pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
def_id: DefId, def_id: DefId,
maybe_ref_id: Option<ast::NodeId>, substs: Option<subst::Substs<'tcx>>)
param_substs: Option<&'tcx subst::Substs<'tcx>>)
-> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)> { -> Option<(&'tcx Expr, Option<ty::Ty<'tcx>>)> {
if let Some(node_id) = tcx.map.as_local_node_id(def_id) { if let Some(node_id) = tcx.map.as_local_node_id(def_id) {
match tcx.map.find(node_id) { match tcx.map.find(node_id) {
@ -111,28 +107,20 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
}, },
Some(ast_map::NodeTraitItem(ti)) => match ti.node { Some(ast_map::NodeTraitItem(ti)) => match ti.node {
hir::ConstTraitItem(_, _) => { hir::ConstTraitItem(_, _) => {
match maybe_ref_id { if let Some(substs) = substs {
// If we have a trait item, and we know the expression // If we have a trait item and the substitutions for it,
// that's the source of the obligation to resolve it,
// `resolve_trait_associated_const` will select an impl // `resolve_trait_associated_const` will select an impl
// or the default. // or the default.
Some(ref_id) => { let trait_id = tcx.trait_of_item(def_id).unwrap();
let trait_id = tcx.trait_of_item(def_id) resolve_trait_associated_const(tcx, ti, trait_id, substs)
.unwrap(); } else {
let mut substs = tcx.node_id_item_substs(ref_id)
.substs;
if let Some(param_substs) = param_substs {
substs = substs.subst(tcx, param_substs);
}
resolve_trait_associated_const(tcx, ti, trait_id, substs)
}
// Technically, without knowing anything about the // Technically, without knowing anything about the
// expression that generates the obligation, we could // expression that generates the obligation, we could
// still return the default if there is one. However, // still return the default if there is one. However,
// it's safer to return `None` than to return some value // it's safer to return `None` than to return some value
// that may differ from what you would get from // that may differ from what you would get from
// correctly selecting an impl. // correctly selecting an impl.
None => None None
} }
} }
_ => None _ => None
@ -153,7 +141,7 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
} }
None => {} None => {}
} }
let mut used_ref_id = false; let mut used_substs = false;
let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) { let expr_ty = match tcx.sess.cstore.maybe_get_item_ast(tcx, def_id) {
cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node { cstore::FoundAst::Found(&InlinedItem::Item(ref item)) => match item.node {
hir::ItemConst(ref ty, ref const_expr) => { hir::ItemConst(ref ty, ref const_expr) => {
@ -163,21 +151,15 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
}, },
cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node { cstore::FoundAst::Found(&InlinedItem::TraitItem(trait_id, ref ti)) => match ti.node {
hir::ConstTraitItem(_, _) => { hir::ConstTraitItem(_, _) => {
used_ref_id = true; used_substs = true;
match maybe_ref_id { if let Some(substs) = substs {
// As mentioned in the comments above for in-crate // As mentioned in the comments above for in-crate
// constants, we only try to find the expression for // constants, we only try to find the expression for
// a trait-associated const if the caller gives us // a trait-associated const if the caller gives us
// the expression that refers to it. // the substitutions for the reference to it.
Some(ref_id) => { resolve_trait_associated_const(tcx, ti, trait_id, substs)
let mut substs = tcx.node_id_item_substs(ref_id) } else {
.substs; None
if let Some(param_substs) = param_substs {
substs = substs.subst(tcx, param_substs);
}
resolve_trait_associated_const(tcx, ti, trait_id, substs)
}
None => None
} }
} }
_ => None _ => None
@ -190,10 +172,10 @@ pub fn lookup_const_by_id<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
}, },
_ => None _ => None
}; };
// If we used the reference expression, particularly to choose an impl // If we used the substitutions, particularly to choose an impl
// of a trait-associated const, don't cache that, because the next // of a trait-associated const, don't cache that, because the next
// lookup with the same def_id may yield a different result. // lookup with the same def_id may yield a different result.
if !used_ref_id { if !used_substs {
tcx.extern_const_statics tcx.extern_const_statics
.borrow_mut() .borrow_mut()
.insert(def_id, expr_ty.map(|(e, t)| (e.id, t))); .insert(def_id, expr_ty.map(|(e, t)| (e.id, t)));
@ -389,7 +371,8 @@ pub fn const_expr_to_pat(tcx: &TyCtxt, expr: &Expr, span: Span) -> P<hir::Pat> {
PatKind::Path(path.clone()), PatKind::Path(path.clone()),
Some(Def::Const(def_id)) | Some(Def::Const(def_id)) |
Some(Def::AssociatedConst(def_id)) => { Some(Def::AssociatedConst(def_id)) => {
let (expr, _ty) = lookup_const_by_id(tcx, def_id, Some(expr.id), None).unwrap(); let substs = Some(tcx.node_id_item_substs(expr.id).substs);
let (expr, _ty) = lookup_const_by_id(tcx, def_id, substs).unwrap();
return const_expr_to_pat(tcx, expr, span); return const_expr_to_pat(tcx, expr, span);
}, },
_ => unreachable!(), _ => unreachable!(),
@ -788,12 +771,12 @@ pub fn eval_const_expr_partial<'tcx>(tcx: &TyCtxt<'tcx>,
match opt_def { match opt_def {
Def::Const(def_id) | Def::Const(def_id) |
Def::AssociatedConst(def_id) => { Def::AssociatedConst(def_id) => {
let maybe_ref_id = if let ExprTypeChecked = ty_hint { let substs = if let ExprTypeChecked = ty_hint {
Some(e.id) Some(tcx.node_id_item_substs(e.id).substs)
} else { } else {
None None
}; };
if let Some((e, ty)) = lookup_const_by_id(tcx, def_id, maybe_ref_id, None) { if let Some((e, ty)) = lookup_const_by_id(tcx, def_id, substs) {
let item_hint = match ty { let item_hint = match ty {
Some(ty) => ty_hint.checked_or(ty), Some(ty) => ty_hint.checked_or(ty),
None => ty_hint, None => ty_hint,
@ -1077,7 +1060,7 @@ fn resolve_trait_associated_const<'a, 'tcx: 'a>(tcx: &'a TyCtxt<'tcx>,
traits::VtableImpl(ref impl_data) => { traits::VtableImpl(ref impl_data) => {
match tcx.associated_consts(impl_data.impl_def_id) match tcx.associated_consts(impl_data.impl_def_id)
.iter().find(|ic| ic.name == ti.name) { .iter().find(|ic| ic.name == ti.name) {
Some(ic) => lookup_const_by_id(tcx, ic.def_id, None, None), Some(ic) => lookup_const_by_id(tcx, ic.def_id, None),
None => match ti.node { None => match ti.node {
hir::ConstTraitItem(ref ty, Some(ref expr)) => { hir::ConstTraitItem(ref ty, Some(ref expr)) => {
Some((&*expr, ast_ty_to_prim_ty(tcx, ty))) Some((&*expr, ast_ty_to_prim_ty(tcx, ty)))

View file

@ -122,7 +122,7 @@ pub struct ChildItem {
pub enum FoundAst<'ast> { pub enum FoundAst<'ast> {
Found(&'ast InlinedItem), Found(&'ast InlinedItem),
FoundParent(DefId, &'ast InlinedItem), FoundParent(DefId, &'ast hir::Item),
NotFound, NotFound,
} }
@ -182,7 +182,7 @@ pub trait CrateStore<'tcx> : Any {
fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
-> Option<DefId>; -> Option<DefId>;
fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
-> ty::ImplOrTraitItem<'tcx>; -> Option<ty::ImplOrTraitItem<'tcx>>;
// flags // flags
fn is_const_fn(&self, did: DefId) -> bool; fn is_const_fn(&self, did: DefId) -> bool;
@ -353,7 +353,7 @@ impl<'tcx> CrateStore<'tcx> for DummyCrateStore {
fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId) fn trait_of_item(&self, tcx: &TyCtxt<'tcx>, def_id: DefId)
-> Option<DefId> { unimplemented!() } -> Option<DefId> { unimplemented!() }
fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
-> ty::ImplOrTraitItem<'tcx> { unimplemented!() } -> Option<ty::ImplOrTraitItem<'tcx>> { unimplemented!() }
// flags // flags
fn is_const_fn(&self, did: DefId) -> bool { unimplemented!() } fn is_const_fn(&self, did: DefId) -> bool { unimplemented!() }

View file

@ -449,23 +449,20 @@ impl<'d,'t,'a,'tcx> ExprUseVisitor<'d,'t,'a,'tcx> {
} }
} }
hir::ExprInlineAsm(ref ia) => { hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
for &(_, ref input) in &ia.inputs { for (o, output) in ia.outputs.iter().zip(outputs) {
self.consume_expr(&input); if o.is_indirect {
} self.consume_expr(output);
for output in &ia.outputs {
if output.is_indirect {
self.consume_expr(&output.expr);
} else { } else {
self.mutate_expr(expr, &output.expr, self.mutate_expr(expr, output,
if output.is_rw { if o.is_rw {
MutateMode::WriteAndRead MutateMode::WriteAndRead
} else { } else {
MutateMode::JustWrite MutateMode::JustWrite
}); });
} }
} }
self.consume_exprs(inputs);
} }
hir::ExprBreak(..) | hir::ExprBreak(..) |

View file

@ -1170,25 +1170,21 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> {
self.propagate_through_expr(&e, succ) self.propagate_through_expr(&e, succ)
} }
hir::ExprInlineAsm(ref ia) => { hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
let succ = ia.outputs.iter().zip(outputs).rev().fold(succ, |succ, (o, output)| {
let succ = ia.outputs.iter().rev().fold(succ, // see comment on lvalues
|succ, out| { // in propagate_through_lvalue_components()
// see comment on lvalues if o.is_indirect {
// in propagate_through_lvalue_components() self.propagate_through_expr(output, succ)
if out.is_indirect { } else {
self.propagate_through_expr(&out.expr, succ) let acc = if o.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE };
} else { let succ = self.write_lvalue(output, succ, acc);
let acc = if out.is_rw { ACC_WRITE|ACC_READ } else { ACC_WRITE }; self.propagate_through_lvalue_components(output, succ)
let succ = self.write_lvalue(&out.expr, succ, acc);
self.propagate_through_lvalue_components(&out.expr, succ)
}
} }
); });
// Inputs are executed first. Propagate last because of rev order // Inputs are executed first. Propagate last because of rev order
ia.inputs.iter().rev().fold(succ, |succ, &(_, ref expr)| { self.propagate_through_exprs(inputs, succ)
self.propagate_through_expr(&expr, succ)
})
} }
hir::ExprLit(..) => { hir::ExprLit(..) => {
@ -1425,17 +1421,17 @@ fn check_expr(this: &mut Liveness, expr: &Expr) {
intravisit::walk_expr(this, expr); intravisit::walk_expr(this, expr);
} }
hir::ExprInlineAsm(ref ia) => { hir::ExprInlineAsm(ref ia, ref outputs, ref inputs) => {
for &(_, ref input) in &ia.inputs { for input in inputs {
this.visit_expr(&input); this.visit_expr(input);
} }
// Output operands must be lvalues // Output operands must be lvalues
for out in &ia.outputs { for (o, output) in ia.outputs.iter().zip(outputs) {
if !out.is_indirect { if !o.is_indirect {
this.check_lvalue(&out.expr); this.check_lvalue(output);
} }
this.visit_expr(&out.expr); this.visit_expr(output);
} }
intravisit::walk_expr(this, expr); intravisit::walk_expr(this, expr);

View file

@ -2182,7 +2182,8 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> { pub fn impl_or_trait_item(&self, id: DefId) -> ImplOrTraitItem<'tcx> {
lookup_locally_or_in_crate_store( lookup_locally_or_in_crate_store(
"impl_or_trait_items", id, &self.impl_or_trait_items, "impl_or_trait_items", id, &self.impl_or_trait_items,
|| self.sess.cstore.impl_or_trait_item(self, id)) || self.sess.cstore.impl_or_trait_item(self, id)
.expect("missing ImplOrTraitItem in metadata"))
} }
pub fn trait_item_def_ids(&self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> { pub fn trait_item_def_ids(&self, id: DefId) -> Rc<Vec<ImplOrTraitItemId>> {
@ -2502,10 +2503,12 @@ impl<'tcx> TyCtxt<'tcx> {
/// ID of the impl that the method belongs to. Otherwise, return `None`. /// ID of the impl that the method belongs to. Otherwise, return `None`.
pub fn impl_of_method(&self, def_id: DefId) -> Option<DefId> { pub fn impl_of_method(&self, def_id: DefId) -> Option<DefId> {
if def_id.krate != LOCAL_CRATE { if def_id.krate != LOCAL_CRATE {
return match self.sess.cstore.impl_or_trait_item(self, def_id).container() { return self.sess.cstore.impl_or_trait_item(self, def_id).and_then(|item| {
TraitContainer(_) => None, match item.container() {
ImplContainer(def_id) => Some(def_id), TraitContainer(_) => None,
}; ImplContainer(def_id) => Some(def_id),
}
});
} }
match self.impl_or_trait_items.borrow().get(&def_id).cloned() { match self.impl_or_trait_items.borrow().get(&def_id).cloned() {
Some(trait_item) => { Some(trait_item) => {

View file

@ -948,7 +948,7 @@ impl<'tcx> TyS<'tcx> {
} }
} }
fn is_slice(&self) -> bool { pub fn is_slice(&self) -> bool {
match self.sty { match self.sty {
TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty { TyRawPtr(mt) | TyRef(_, mt) => match mt.ty.sty {
TySlice(_) | TyStr => true, TySlice(_) | TyStr => true,

View file

@ -14,6 +14,7 @@ use rustc_const_eval::{ConstUsize, ConstInt};
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::subst::Substs; use middle::subst::Substs;
use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty}; use middle::ty::{self, AdtDef, ClosureSubsts, FnOutput, Region, Ty};
use util::ppaux;
use rustc_back::slice; use rustc_back::slice;
use rustc_front::hir::InlineAsm; use rustc_front::hir::InlineAsm;
use std::ascii; use std::ascii;
@ -177,6 +178,10 @@ pub struct TempDecl<'tcx> {
#[derive(Clone, Debug, RustcEncodable, RustcDecodable)] #[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
pub struct ArgDecl<'tcx> { pub struct ArgDecl<'tcx> {
pub ty: Ty<'tcx>, pub ty: Ty<'tcx>,
/// If true, this argument is a tuple after monomorphization,
/// and has to be collected from multiple actual arguments.
pub spread: bool
} }
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
@ -675,7 +680,11 @@ pub enum Rvalue<'tcx> {
from_end: usize, from_end: usize,
}, },
InlineAsm(InlineAsm), InlineAsm {
asm: InlineAsm,
outputs: Vec<Lvalue<'tcx>>,
inputs: Vec<Operand<'tcx>>
}
} }
#[derive(Clone, Copy, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)] #[derive(Clone, Copy, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
@ -760,7 +769,9 @@ impl<'tcx> Debug for Rvalue<'tcx> {
BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b), BinaryOp(ref op, ref a, ref b) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a), UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
Box(ref t) => write!(fmt, "Box({:?})", t), Box(ref t) => write!(fmt, "Box({:?})", t),
InlineAsm(ref asm) => write!(fmt, "InlineAsm({:?})", asm), InlineAsm { ref asm, ref outputs, ref inputs } => {
write!(fmt, "asm!({:?} : {:?} : {:?})", asm, outputs, inputs)
}
Slice { ref input, from_start, from_end } => Slice { ref input, from_start, from_end } =>
write!(fmt, "{:?}[{:?}..-{:?}]", input, from_start, from_end), write!(fmt, "{:?}[{:?}..-{:?}]", input, from_start, from_end),
@ -775,8 +786,8 @@ impl<'tcx> Debug for Rvalue<'tcx> {
Aggregate(ref kind, ref lvs) => { Aggregate(ref kind, ref lvs) => {
use self::AggregateKind::*; use self::AggregateKind::*;
fn fmt_tuple(fmt: &mut Formatter, name: &str, lvs: &[Operand]) -> fmt::Result { fn fmt_tuple(fmt: &mut Formatter, lvs: &[Operand]) -> fmt::Result {
let mut tuple_fmt = fmt.debug_tuple(name); let mut tuple_fmt = fmt.debug_tuple("");
for lv in lvs { for lv in lvs {
tuple_fmt.field(lv); tuple_fmt.field(lv);
} }
@ -790,19 +801,24 @@ impl<'tcx> Debug for Rvalue<'tcx> {
match lvs.len() { match lvs.len() {
0 => write!(fmt, "()"), 0 => write!(fmt, "()"),
1 => write!(fmt, "({:?},)", lvs[0]), 1 => write!(fmt, "({:?},)", lvs[0]),
_ => fmt_tuple(fmt, "", lvs), _ => fmt_tuple(fmt, lvs),
} }
} }
Adt(adt_def, variant, _) => { Adt(adt_def, variant, substs) => {
let variant_def = &adt_def.variants[variant]; let variant_def = &adt_def.variants[variant];
let name = ty::tls::with(|tcx| tcx.item_path_str(variant_def.did));
try!(ppaux::parameterized(fmt, substs, variant_def.did,
ppaux::Ns::Value, &[],
|tcx| {
tcx.lookup_item_type(variant_def.did).generics
}));
match variant_def.kind() { match variant_def.kind() {
ty::VariantKind::Unit => write!(fmt, "{}", name), ty::VariantKind::Unit => Ok(()),
ty::VariantKind::Tuple => fmt_tuple(fmt, &name, lvs), ty::VariantKind::Tuple => fmt_tuple(fmt, lvs),
ty::VariantKind::Struct => { ty::VariantKind::Struct => {
let mut struct_fmt = fmt.debug_struct(&name); let mut struct_fmt = fmt.debug_struct("");
for (field, lv) in variant_def.fields.iter().zip(lvs) { for (field, lv) in variant_def.fields.iter().zip(lvs) {
struct_fmt.field(&field.name.as_str(), lv); struct_fmt.field(&field.name.as_str(), lv);
} }
@ -882,8 +898,10 @@ impl<'tcx> Debug for Literal<'tcx> {
fn fmt(&self, fmt: &mut Formatter) -> fmt::Result { fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
use self::Literal::*; use self::Literal::*;
match *self { match *self {
Item { def_id, .. } => Item { def_id, substs } => {
write!(fmt, "{}", item_path_str(def_id)), ppaux::parameterized(fmt, substs, def_id, ppaux::Ns::Value, &[],
|tcx| tcx.lookup_item_type(def_id).generics)
}
Value { ref value } => { Value { ref value } => {
try!(write!(fmt, "const ")); try!(write!(fmt, "const "));
fmt_const_val(fmt, value) fmt_const_val(fmt, value)

View file

@ -16,6 +16,7 @@
use mir::repr::*; use mir::repr::*;
use middle::subst::{Subst, Substs}; use middle::subst::{Subst, Substs};
use middle::ty::{self, AdtDef, Ty, TyCtxt}; use middle::ty::{self, AdtDef, Ty, TyCtxt};
use middle::ty::fold::{TypeFoldable, TypeFolder, TypeVisitor};
use rustc_front::hir; use rustc_front::hir;
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -77,6 +78,29 @@ impl<'tcx> LvalueTy<'tcx> {
} }
} }
impl<'tcx> TypeFoldable<'tcx> for LvalueTy<'tcx> {
fn super_fold_with<F: TypeFolder<'tcx>>(&self, folder: &mut F) -> Self {
match *self {
LvalueTy::Ty { ty } => LvalueTy::Ty { ty: ty.fold_with(folder) },
LvalueTy::Downcast { adt_def, substs, variant_index } => {
let substs = substs.fold_with(folder);
LvalueTy::Downcast {
adt_def: adt_def,
substs: folder.tcx().mk_substs(substs),
variant_index: variant_index
}
}
}
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {
match *self {
LvalueTy::Ty { ty } => ty.visit_with(visitor),
LvalueTy::Downcast { substs, .. } => substs.visit_with(visitor)
}
}
}
impl<'tcx> Mir<'tcx> { impl<'tcx> Mir<'tcx> {
pub fn operand_ty(&self, pub fn operand_ty(&self,
tcx: &TyCtxt<'tcx>, tcx: &TyCtxt<'tcx>,
@ -196,7 +220,7 @@ impl<'tcx> Mir<'tcx> {
} }
} }
Rvalue::Slice { .. } => None, Rvalue::Slice { .. } => None,
Rvalue::InlineAsm(..) => None Rvalue::InlineAsm { .. } => None
} }
} }
} }

View file

@ -261,7 +261,14 @@ macro_rules! make_mir_visitor {
}); });
} }
Rvalue::InlineAsm(_) => { Rvalue::InlineAsm { ref $($mutability)* outputs,
ref $($mutability)* inputs, .. } => {
for output in & $($mutability)* outputs[..] {
self.visit_lvalue(output, LvalueContext::Store);
}
for input in & $($mutability)* inputs[..] {
self.visit_operand(input);
}
} }
} }
} }

View file

@ -663,6 +663,8 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
"print the result of the translation item collection pass"), "print the result of the translation item collection pass"),
mir_opt_level: Option<usize> = (None, parse_opt_uint, mir_opt_level: Option<usize> = (None, parse_opt_uint,
"set the MIR optimization level (0-3)"), "set the MIR optimization level (0-3)"),
orbit: bool = (false, parse_bool,
"get MIR where it belongs - everywhere; most importantly, in orbit"),
} }
pub fn default_lib_output() -> CrateType { pub fn default_lib_output() -> CrateType {

View file

@ -60,17 +60,37 @@ fn fn_sig(f: &mut fmt::Formatter,
} }
} }
fn parameterized<GG>(f: &mut fmt::Formatter, /// Namespace of the path given to parameterized to print.
substs: &subst::Substs, #[derive(Copy, Clone, PartialEq)]
did: DefId, pub enum Ns {
projections: &[ty::ProjectionPredicate], Type,
get_generics: GG) Value
-> fmt::Result }
pub fn parameterized<GG>(f: &mut fmt::Formatter,
substs: &subst::Substs,
did: DefId,
ns: Ns,
projections: &[ty::ProjectionPredicate],
get_generics: GG)
-> fmt::Result
where GG: for<'tcx> FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx> where GG: for<'tcx> FnOnce(&TyCtxt<'tcx>) -> ty::Generics<'tcx>
{ {
let (fn_trait_kind, verbose) = try!(ty::tls::with(|tcx| { if let (Ns::Value, Some(self_ty)) = (ns, substs.self_ty()) {
try!(write!(f, "<{} as ", self_ty));
}
let (fn_trait_kind, verbose, last_name) = try!(ty::tls::with(|tcx| {
let (did, last_name) = if ns == Ns::Value {
// Try to get the impl/trait parent, if this is an
// associated value item (method or constant).
tcx.trait_of_item(did).or_else(|| tcx.impl_of_method(did))
.map_or((did, None), |parent| (parent, Some(tcx.item_name(did))))
} else {
(did, None)
};
try!(write!(f, "{}", tcx.item_path_str(did))); try!(write!(f, "{}", tcx.item_path_str(did)));
Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose())) Ok((tcx.lang_items.fn_trait_kind(did), tcx.sess.verbose(), last_name))
})); }));
let mut empty = true; let mut empty = true;
@ -185,7 +205,28 @@ fn parameterized<GG>(f: &mut fmt::Formatter,
projection.ty)); projection.ty));
} }
start_or_continue(f, "", ">") try!(start_or_continue(f, "", ">"));
// For values, also print their name and type parameters.
if ns == Ns::Value {
if substs.self_ty().is_some() {
try!(write!(f, ">"));
}
if let Some(name) = last_name {
try!(write!(f, "::{}", name));
}
let tps = substs.types.get_slice(subst::FnSpace);
if !tps.is_empty() {
try!(write!(f, "::<{}", tps[0]));
for ty in &tps[1..] {
try!(write!(f, ", {}", ty));
}
try!(write!(f, ">"));
}
}
Ok(())
} }
fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter, fn in_binder<'tcx, T, U>(f: &mut fmt::Formatter,
@ -265,6 +306,7 @@ impl<'tcx> fmt::Display for TraitAndProjections<'tcx> {
let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self; let TraitAndProjections(ref trait_ref, ref projection_bounds) = *self;
parameterized(f, trait_ref.substs, parameterized(f, trait_ref.substs,
trait_ref.def_id, trait_ref.def_id,
Ns::Type,
projection_bounds, projection_bounds,
|tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone()) |tcx| tcx.lookup_trait_def(trait_ref.def_id).generics.clone())
} }
@ -769,7 +811,7 @@ impl fmt::Display for ty::Binder<ty::OutlivesPredicate<ty::Region, ty::Region>>
impl<'tcx> fmt::Display for ty::TraitRef<'tcx> { impl<'tcx> fmt::Display for ty::TraitRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
parameterized(f, self.substs, self.def_id, &[], parameterized(f, self.substs, self.def_id, Ns::Type, &[],
|tcx| tcx.lookup_trait_def(self.def_id).generics.clone()) |tcx| tcx.lookup_trait_def(self.def_id).generics.clone())
} }
} }
@ -821,19 +863,9 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> {
try!(write!(f, "extern {} ", bare_fn.abi)); try!(write!(f, "extern {} ", bare_fn.abi));
} }
try!(write!(f, "{}", bare_fn.sig.0)); try!(write!(f, "{} {{", bare_fn.sig.0));
try!(ty::tls::with(|tcx| { try!(parameterized(f, substs, def_id, Ns::Value, &[],
write!(f, " {{{}", tcx.item_path_str(def_id)) |tcx| tcx.lookup_item_type(def_id).generics));
}));
let tps = substs.types.get_slice(subst::FnSpace);
if tps.len() >= 1 {
try!(write!(f, "::<{}", tps[0]));
for &ty in &tps[1..] {
try!(write!(f, ", {}", ty));
}
try!(write!(f, ">"));
}
write!(f, "}}") write!(f, "}}")
} }
TyFnPtr(ref bare_fn) => { TyFnPtr(ref bare_fn) => {
@ -856,7 +888,7 @@ impl<'tcx> fmt::Display for ty::TypeVariants<'tcx> {
!tcx.tcache.borrow().contains_key(&def.did) { !tcx.tcache.borrow().contains_key(&def.did) {
write!(f, "{}<..>", tcx.item_path_str(def.did)) write!(f, "{}<..>", tcx.item_path_str(def.did))
} else { } else {
parameterized(f, substs, def.did, &[], parameterized(f, substs, def.did, Ns::Type, &[],
|tcx| tcx.lookup_item_type(def.did).generics) |tcx| tcx.lookup_item_type(def.did).generics)
} }
}) })

View file

@ -1,24 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub const BOX_FIELD_DROP_GLUE: usize = 1;
pub const BOX_FIELD_BODY: usize = 4;
/// The first half of a fat pointer.
/// - For a closure, this is the code address.
/// - For an object or trait instance, this is the address of the box.
/// - For a slice, this is the base address.
pub const FAT_PTR_ADDR: usize = 0;
/// The second half of a fat pointer.
/// - For a closure, this is the address of the environment.
/// - For an object or trait instance, this is the address of the vtable.
/// - For a slice, this is the length.
pub const FAT_PTR_EXTRA: usize = 1;

View file

@ -48,7 +48,6 @@ extern crate rustc_llvm;
extern crate rustc_front; extern crate rustc_front;
#[macro_use] extern crate log; #[macro_use] extern crate log;
pub mod abi;
pub mod tempdir; pub mod tempdir;
pub mod rpath; pub mod rpath;
pub mod sha2; pub mod sha2;

View file

@ -279,7 +279,7 @@ mod svh_visitor {
ExprBreak(id) => SawExprBreak(id.map(|id| id.node.name.as_str())), ExprBreak(id) => SawExprBreak(id.map(|id| id.node.name.as_str())),
ExprAgain(id) => SawExprAgain(id.map(|id| id.node.name.as_str())), ExprAgain(id) => SawExprAgain(id.map(|id| id.node.name.as_str())),
ExprRet(..) => SawExprRet, ExprRet(..) => SawExprRet,
ExprInlineAsm(ref asm) => SawExprInlineAsm(asm), ExprInlineAsm(ref a,_,_) => SawExprInlineAsm(a),
ExprStruct(..) => SawExprStruct, ExprStruct(..) => SawExprStruct,
ExprRepeat(..) => SawExprRepeat, ExprRepeat(..) => SawExprRepeat,
} }

View file

@ -1107,34 +1107,11 @@ pub fn noop_fold_expr<T: Folder>(Expr { id, node, span, attrs }: Expr, folder: &
respan(folder.new_span(label.span), folder.fold_ident(label.node)) respan(folder.new_span(label.span), folder.fold_ident(label.node))
})), })),
ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))), ExprRet(e) => ExprRet(e.map(|x| folder.fold_expr(x))),
ExprInlineAsm(InlineAsm { ExprInlineAsm(asm, outputs, inputs) => {
inputs, ExprInlineAsm(asm,
outputs, outputs.move_map(|x| folder.fold_expr(x)),
asm, inputs.move_map(|x| folder.fold_expr(x)))
asm_str_style, }
clobbers,
volatile,
alignstack,
dialect,
expn_id,
}) => ExprInlineAsm(InlineAsm {
inputs: inputs.move_map(|(c, input)| (c, folder.fold_expr(input))),
outputs: outputs.move_map(|out| {
InlineAsmOutput {
constraint: out.constraint,
expr: folder.fold_expr(out.expr),
is_rw: out.is_rw,
is_indirect: out.is_indirect,
}
}),
asm: asm,
asm_str_style: asm_str_style,
clobbers: clobbers,
volatile: volatile,
alignstack: alignstack,
dialect: dialect,
expn_id: expn_id,
}),
ExprStruct(path, fields, maybe_expr) => { ExprStruct(path, fields, maybe_expr) => {
ExprStruct(folder.fold_path(path), ExprStruct(folder.fold_path(path),
fields.move_map(|x| folder.fold_field(x)), fields.move_map(|x| folder.fold_field(x)),

View file

@ -39,7 +39,7 @@ use syntax::codemap::{self, Span, Spanned, DUMMY_SP, ExpnId};
use syntax::abi::Abi; use syntax::abi::Abi;
use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect}; use syntax::ast::{Name, NodeId, DUMMY_NODE_ID, TokenTree, AsmDialect};
use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem}; use syntax::ast::{Attribute, Lit, StrStyle, FloatTy, IntTy, UintTy, MetaItem};
use syntax::attr::ThinAttributes; use syntax::attr::{ThinAttributes, ThinAttributesExt};
use syntax::parse::token::InternedString; use syntax::parse::token::InternedString;
use syntax::ptr::P; use syntax::ptr::P;
@ -635,6 +635,16 @@ pub enum Stmt_ {
StmtSemi(P<Expr>, NodeId), StmtSemi(P<Expr>, NodeId),
} }
impl Stmt_ {
pub fn attrs(&self) -> &[Attribute] {
match *self {
StmtDecl(ref d, _) => d.node.attrs(),
StmtExpr(ref e, _) |
StmtSemi(ref e, _) => e.attrs.as_attr_slice(),
}
}
}
// FIXME (pending discussion of #1697, #2178...): local should really be // FIXME (pending discussion of #1697, #2178...): local should really be
// a refinement on pat. // a refinement on pat.
/// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;` /// Local represents a `let` statement, e.g., `let <pat>:<ty> = <expr>;`
@ -659,6 +669,15 @@ pub enum Decl_ {
DeclItem(ItemId), DeclItem(ItemId),
} }
impl Decl_ {
pub fn attrs(&self) -> &[Attribute] {
match *self {
DeclLocal(ref l) => l.attrs.as_attr_slice(),
DeclItem(_) => &[]
}
}
}
/// represents one arm of a 'match' /// represents one arm of a 'match'
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct Arm { pub struct Arm {
@ -793,8 +812,8 @@ pub enum Expr_ {
/// A `return`, with an optional value to be returned /// A `return`, with an optional value to be returned
ExprRet(Option<P<Expr>>), ExprRet(Option<P<Expr>>),
/// Output of the `asm!()` macro /// Inline assembly (from `asm!`), with its outputs and inputs.
ExprInlineAsm(InlineAsm), ExprInlineAsm(InlineAsm, Vec<P<Expr>>, Vec<P<Expr>>),
/// A struct literal expression. /// A struct literal expression.
/// ///
@ -978,7 +997,6 @@ pub enum Ty_ {
#[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)] #[derive(Clone, PartialEq, Eq, RustcEncodable, RustcDecodable, Hash, Debug)]
pub struct InlineAsmOutput { pub struct InlineAsmOutput {
pub constraint: InternedString, pub constraint: InternedString,
pub expr: P<Expr>,
pub is_rw: bool, pub is_rw: bool,
pub is_indirect: bool, pub is_indirect: bool,
} }
@ -988,7 +1006,7 @@ pub struct InlineAsm {
pub asm: InternedString, pub asm: InternedString,
pub asm_str_style: StrStyle, pub asm_str_style: StrStyle,
pub outputs: HirVec<InlineAsmOutput>, pub outputs: HirVec<InlineAsmOutput>,
pub inputs: HirVec<(InternedString, P<Expr>)>, pub inputs: HirVec<InternedString>,
pub clobbers: HirVec<InternedString>, pub clobbers: HirVec<InternedString>,
pub volatile: bool, pub volatile: bool,
pub alignstack: bool, pub alignstack: bool,

View file

@ -798,12 +798,12 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr) {
ExprRet(ref optional_expression) => { ExprRet(ref optional_expression) => {
walk_list!(visitor, visit_expr, optional_expression); walk_list!(visitor, visit_expr, optional_expression);
} }
ExprInlineAsm(ref ia) => { ExprInlineAsm(_, ref outputs, ref inputs) => {
for &(_, ref input) in &ia.inputs { for output in outputs {
visitor.visit_expr(&input) visitor.visit_expr(output)
} }
for output in &ia.outputs { for input in inputs {
visitor.visit_expr(&output.expr) visitor.visit_expr(input)
} }
} }
} }

View file

@ -1320,14 +1320,11 @@ pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P<hir::Expr> {
dialect, dialect,
expn_id, expn_id,
}) => hir::ExprInlineAsm(hir::InlineAsm { }) => hir::ExprInlineAsm(hir::InlineAsm {
inputs: inputs.iter() inputs: inputs.iter().map(|&(ref c, _)| c.clone()).collect(),
.map(|&(ref c, ref input)| (c.clone(), lower_expr(lctx, input)))
.collect(),
outputs: outputs.iter() outputs: outputs.iter()
.map(|out| { .map(|out| {
hir::InlineAsmOutput { hir::InlineAsmOutput {
constraint: out.constraint.clone(), constraint: out.constraint.clone(),
expr: lower_expr(lctx, &out.expr),
is_rw: out.is_rw, is_rw: out.is_rw,
is_indirect: out.is_indirect, is_indirect: out.is_indirect,
} }
@ -1340,7 +1337,8 @@ pub fn lower_expr(lctx: &LoweringContext, e: &Expr) -> P<hir::Expr> {
alignstack: alignstack, alignstack: alignstack,
dialect: dialect, dialect: dialect,
expn_id: expn_id, expn_id: expn_id,
}), }, outputs.iter().map(|out| lower_expr(lctx, &out.expr)).collect(),
inputs.iter().map(|&(_, ref input)| lower_expr(lctx, input)).collect()),
ExprKind::Struct(ref path, ref fields, ref maybe_expr) => { ExprKind::Struct(ref path, ref fields, ref maybe_expr) => {
hir::ExprStruct(lower_path(lctx, path), hir::ExprStruct(lower_path(lctx, path),
fields.iter().map(|x| lower_field(lctx, x)).collect(), fields.iter().map(|x| lower_field(lctx, x)).collect(),

View file

@ -1486,12 +1486,13 @@ impl<'a> State<'a> {
_ => (), _ => (),
} }
} }
hir::ExprInlineAsm(ref a) => { hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
try!(word(&mut self.s, "asm!")); try!(word(&mut self.s, "asm!"));
try!(self.popen()); try!(self.popen());
try!(self.print_string(&a.asm, a.asm_str_style)); try!(self.print_string(&a.asm, a.asm_str_style));
try!(self.word_space(":")); try!(self.word_space(":"));
let mut out_idx = 0;
try!(self.commasep(Inconsistent, &a.outputs, |s, out| { try!(self.commasep(Inconsistent, &a.outputs, |s, out| {
match out.constraint.slice_shift_char() { match out.constraint.slice_shift_char() {
Some(('=', operand)) if out.is_rw => { Some(('=', operand)) if out.is_rw => {
@ -1500,18 +1501,21 @@ impl<'a> State<'a> {
_ => try!(s.print_string(&out.constraint, ast::StrStyle::Cooked)), _ => try!(s.print_string(&out.constraint, ast::StrStyle::Cooked)),
} }
try!(s.popen()); try!(s.popen());
try!(s.print_expr(&out.expr)); try!(s.print_expr(&outputs[out_idx]));
try!(s.pclose()); try!(s.pclose());
out_idx += 1;
Ok(()) Ok(())
})); }));
try!(space(&mut self.s)); try!(space(&mut self.s));
try!(self.word_space(":")); try!(self.word_space(":"));
try!(self.commasep(Inconsistent, &a.inputs, |s, &(ref co, ref o)| { let mut in_idx = 0;
try!(self.commasep(Inconsistent, &a.inputs, |s, co| {
try!(s.print_string(&co, ast::StrStyle::Cooked)); try!(s.print_string(&co, ast::StrStyle::Cooked));
try!(s.popen()); try!(s.popen());
try!(s.print_expr(&o)); try!(s.print_expr(&inputs[in_idx]));
try!(s.pclose()); try!(s.pclose());
in_idx += 1;
Ok(()) Ok(())
})); }));
try!(space(&mut self.s)); try!(space(&mut self.s));

View file

@ -33,8 +33,6 @@
extern crate libc; extern crate libc;
#[macro_use] #[no_link] extern crate rustc_bitflags; #[macro_use] #[no_link] extern crate rustc_bitflags;
pub use self::OtherAttribute::*;
pub use self::SpecialAttribute::*;
pub use self::AttributeSet::*; pub use self::AttributeSet::*;
pub use self::IntPredicate::*; pub use self::IntPredicate::*;
pub use self::RealPredicate::*; pub use self::RealPredicate::*;
@ -133,6 +131,7 @@ pub enum DLLStorageClassTypes {
} }
bitflags! { bitflags! {
#[derive(Default, Debug)]
flags Attribute : u64 { flags Attribute : u64 {
const ZExt = 1 << 0, const ZExt = 1 << 0,
const SExt = 1 << 1, const SExt = 1 << 1,
@ -150,46 +149,88 @@ bitflags! {
const OptimizeForSize = 1 << 13, const OptimizeForSize = 1 << 13,
const StackProtect = 1 << 14, const StackProtect = 1 << 14,
const StackProtectReq = 1 << 15, const StackProtectReq = 1 << 15,
const Alignment = 1 << 16,
const NoCapture = 1 << 21, const NoCapture = 1 << 21,
const NoRedZone = 1 << 22, const NoRedZone = 1 << 22,
const NoImplicitFloat = 1 << 23, const NoImplicitFloat = 1 << 23,
const Naked = 1 << 24, const Naked = 1 << 24,
const InlineHint = 1 << 25, const InlineHint = 1 << 25,
const Stack = 7 << 26,
const ReturnsTwice = 1 << 29, const ReturnsTwice = 1 << 29,
const UWTable = 1 << 30, const UWTable = 1 << 30,
const NonLazyBind = 1 << 31, const NonLazyBind = 1 << 31,
// Some of these are missing from the LLVM C API, the rest are
// present, but commented out, and preceded by the following warning:
// FIXME: These attributes are currently not included in the C API as
// a temporary measure until the API/ABI impact to the C API is understood
// and the path forward agreed upon.
const SanitizeAddress = 1 << 32,
const MinSize = 1 << 33,
const NoDuplicate = 1 << 34,
const StackProtectStrong = 1 << 35,
const SanitizeThread = 1 << 36,
const SanitizeMemory = 1 << 37,
const NoBuiltin = 1 << 38,
const Returned = 1 << 39,
const Cold = 1 << 40,
const Builtin = 1 << 41,
const OptimizeNone = 1 << 42, const OptimizeNone = 1 << 42,
const InAlloca = 1 << 43,
const NonNull = 1 << 44,
const JumpTable = 1 << 45,
const Convergent = 1 << 46,
const SafeStack = 1 << 47,
const NoRecurse = 1 << 48,
const InaccessibleMemOnly = 1 << 49,
const InaccessibleMemOrArgMemOnly = 1 << 50,
} }
} }
#[derive(Copy, Clone, Default, Debug)]
#[repr(u64)] pub struct Attributes {
#[derive(Copy, Clone)] regular: Attribute,
pub enum OtherAttribute { dereferenceable_bytes: u64
// The following are not really exposed in
// the LLVM C api so instead to add these
// we call a wrapper function in RustWrapper
// that uses the C++ api.
SanitizeAddressAttribute = 1 << 32,
MinSizeAttribute = 1 << 33,
NoDuplicateAttribute = 1 << 34,
StackProtectStrongAttribute = 1 << 35,
SanitizeThreadAttribute = 1 << 36,
SanitizeMemoryAttribute = 1 << 37,
NoBuiltinAttribute = 1 << 38,
ReturnedAttribute = 1 << 39,
ColdAttribute = 1 << 40,
BuiltinAttribute = 1 << 41,
OptimizeNoneAttribute = 1 << 42,
InAllocaAttribute = 1 << 43,
NonNullAttribute = 1 << 44,
} }
#[derive(Copy, Clone)] impl Attributes {
pub enum SpecialAttribute { pub fn set(&mut self, attr: Attribute) -> &mut Self {
DereferenceableAttribute(u64) self.regular = self.regular | attr;
self
}
pub fn unset(&mut self, attr: Attribute) -> &mut Self {
self.regular = self.regular - attr;
self
}
pub fn set_dereferenceable(&mut self, bytes: u64) -> &mut Self {
self.dereferenceable_bytes = bytes;
self
}
pub fn unset_dereferenceable(&mut self) -> &mut Self {
self.dereferenceable_bytes = 0;
self
}
pub fn apply_llfn(&self, idx: usize, llfn: ValueRef) {
unsafe {
LLVMAddFunctionAttribute(llfn, idx as c_uint, self.regular.bits());
if self.dereferenceable_bytes != 0 {
LLVMAddDereferenceableAttr(llfn, idx as c_uint,
self.dereferenceable_bytes);
}
}
}
pub fn apply_callsite(&self, idx: usize, callsite: ValueRef) {
unsafe {
LLVMAddCallSiteAttribute(callsite, idx as c_uint, self.regular.bits());
if self.dereferenceable_bytes != 0 {
LLVMAddDereferenceableCallSiteAttr(callsite, idx as c_uint,
self.dereferenceable_bytes);
}
}
}
} }
#[repr(C)] #[repr(C)]
@ -199,91 +240,6 @@ pub enum AttributeSet {
FunctionIndex = !0 FunctionIndex = !0
} }
pub trait AttrHelper {
fn apply_llfn(&self, idx: c_uint, llfn: ValueRef);
fn apply_callsite(&self, idx: c_uint, callsite: ValueRef);
}
impl AttrHelper for Attribute {
fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
unsafe {
LLVMAddFunctionAttribute(llfn, idx, self.bits() as uint64_t);
}
}
fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
unsafe {
LLVMAddCallSiteAttribute(callsite, idx, self.bits() as uint64_t);
}
}
}
impl AttrHelper for OtherAttribute {
fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
unsafe {
LLVMAddFunctionAttribute(llfn, idx, *self as uint64_t);
}
}
fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
unsafe {
LLVMAddCallSiteAttribute(callsite, idx, *self as uint64_t);
}
}
}
impl AttrHelper for SpecialAttribute {
fn apply_llfn(&self, idx: c_uint, llfn: ValueRef) {
match *self {
DereferenceableAttribute(bytes) => unsafe {
LLVMAddDereferenceableAttr(llfn, idx, bytes as uint64_t);
}
}
}
fn apply_callsite(&self, idx: c_uint, callsite: ValueRef) {
match *self {
DereferenceableAttribute(bytes) => unsafe {
LLVMAddDereferenceableCallSiteAttr(callsite, idx, bytes as uint64_t);
}
}
}
}
pub struct AttrBuilder {
attrs: Vec<(usize, Box<AttrHelper+'static>)>
}
impl AttrBuilder {
pub fn new() -> AttrBuilder {
AttrBuilder {
attrs: Vec::new()
}
}
pub fn arg<T: AttrHelper + 'static>(&mut self, idx: usize, a: T) -> &mut AttrBuilder {
self.attrs.push((idx, box a as Box<AttrHelper+'static>));
self
}
pub fn ret<T: AttrHelper + 'static>(&mut self, a: T) -> &mut AttrBuilder {
self.attrs.push((ReturnIndex as usize, box a as Box<AttrHelper+'static>));
self
}
pub fn apply_llfn(&self, llfn: ValueRef) {
for &(idx, ref attr) in &self.attrs {
attr.apply_llfn(idx as c_uint, llfn);
}
}
pub fn apply_callsite(&self, callsite: ValueRef) {
for &(idx, ref attr) in &self.attrs {
attr.apply_callsite(idx as c_uint, callsite);
}
}
}
// enum for the LLVM IntPredicate type // enum for the LLVM IntPredicate type
#[derive(Copy, Clone)] #[derive(Copy, Clone)]
pub enum IntPredicate { pub enum IntPredicate {

View file

@ -125,61 +125,51 @@ pub fn decode_inlined_item<'tcx>(cdata: &cstore::crate_metadata,
tcx: &TyCtxt<'tcx>, tcx: &TyCtxt<'tcx>,
parent_path: Vec<ast_map::PathElem>, parent_path: Vec<ast_map::PathElem>,
parent_def_path: ast_map::DefPath, parent_def_path: ast_map::DefPath,
par_doc: rbml::Doc, ast_doc: rbml::Doc,
orig_did: DefId) orig_did: DefId)
-> Result<&'tcx InlinedItem, (Vec<ast_map::PathElem>, -> &'tcx InlinedItem {
ast_map::DefPath)> { let mut path_as_str = None;
match par_doc.opt_child(c::tag_ast) { debug!("> Decoding inlined fn: {:?}::?",
None => Err((parent_path, parent_def_path)), {
Some(ast_doc) => { // Do an Option dance to use the path after it is moved below.
let mut path_as_str = None; let s = ast_map::path_to_string(parent_path.iter().cloned());
debug!("> Decoding inlined fn: {:?}::?", path_as_str = Some(s);
{ path_as_str.as_ref().map(|x| &x[..])
// Do an Option dance to use the path after it is moved below. });
let s = ast_map::path_to_string(parent_path.iter().cloned()); let mut ast_dsr = reader::Decoder::new(ast_doc);
path_as_str = Some(s); let from_id_range = Decodable::decode(&mut ast_dsr).unwrap();
path_as_str.as_ref().map(|x| &x[..]) let to_id_range = reserve_id_range(&tcx.sess, from_id_range);
}); let dcx = &DecodeContext {
let mut ast_dsr = reader::Decoder::new(ast_doc); cdata: cdata,
let from_id_range = Decodable::decode(&mut ast_dsr).unwrap(); tcx: tcx,
let to_id_range = reserve_id_range(&tcx.sess, from_id_range); from_id_range: from_id_range,
let dcx = &DecodeContext { to_id_range: to_id_range,
cdata: cdata, last_filemap_index: Cell::new(0)
tcx: tcx, };
from_id_range: from_id_range, let ii = ast_map::map_decoded_item(&dcx.tcx.map,
to_id_range: to_id_range, parent_path,
last_filemap_index: Cell::new(0) parent_def_path,
}; decode_ast(ast_doc),
let raw_ii = decode_ast(ast_doc); dcx);
let ii = ast_map::map_decoded_item(&dcx.tcx.map, let name = match *ii {
parent_path, InlinedItem::Item(ref i) => i.name,
parent_def_path, InlinedItem::Foreign(ref i) => i.name,
raw_ii, InlinedItem::TraitItem(_, ref ti) => ti.name,
dcx); InlinedItem::ImplItem(_, ref ii) => ii.name
let name = match *ii { };
InlinedItem::Item(ref i) => i.name, debug!("Fn named: {}", name);
InlinedItem::Foreign(ref i) => i.name, debug!("< Decoded inlined fn: {}::{}",
InlinedItem::TraitItem(_, ref ti) => ti.name, path_as_str.unwrap(),
InlinedItem::ImplItem(_, ref ii) => ii.name name);
}; region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii);
debug!("Fn named: {}", name); decode_side_tables(dcx, ast_doc);
debug!("< Decoded inlined fn: {}::{}", copy_item_types(dcx, ii, orig_did);
path_as_str.unwrap(), if let InlinedItem::Item(ref i) = *ii {
name); debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
region::resolve_inlined_item(&tcx.sess, &tcx.region_maps, ii); ::rustc_front::print::pprust::item_to_string(&i));
decode_side_tables(dcx, ast_doc);
copy_item_types(dcx, ii, orig_did);
match *ii {
InlinedItem::Item(ref i) => {
debug!(">>> DECODED ITEM >>>\n{}\n<<< DECODED ITEM <<<",
::rustc_front::print::pprust::item_to_string(&i));
}
_ => { }
}
Ok(ii)
}
} }
ii
} }
// ______________________________________________________________________ // ______________________________________________________________________

View file

@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use astencode;
use cstore; use cstore;
use decoder; use decoder;
use encoder; use encoder;
@ -237,7 +236,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
} }
fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId) fn impl_or_trait_item(&self, tcx: &TyCtxt<'tcx>, def: DefId)
-> ty::ImplOrTraitItem<'tcx> -> Option<ty::ImplOrTraitItem<'tcx>>
{ {
let cdata = self.get_crate_data(def.krate); let cdata = self.get_crate_data(def.krate);
decoder::get_impl_or_trait_item( decoder::get_impl_or_trait_item(
@ -439,8 +438,7 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
-> FoundAst<'tcx> -> FoundAst<'tcx>
{ {
let cdata = self.get_crate_data(def.krate); let cdata = self.get_crate_data(def.krate);
let decode_inlined_item = Box::new(astencode::decode_inlined_item); decoder::maybe_get_item_ast(&cdata, tcx, def.index)
decoder::maybe_get_item_ast(&cdata, tcx, def.index, decode_inlined_item)
} }
fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId) fn maybe_get_item_mir(&self, tcx: &TyCtxt<'tcx>, def: DefId)
@ -509,21 +507,18 @@ impl<'tcx> CrateStore<'tcx> for cstore::CStore {
mir_map: &MirMap<'tcx>, mir_map: &MirMap<'tcx>,
krate: &hir::Crate) -> Vec<u8> krate: &hir::Crate) -> Vec<u8>
{ {
let encode_inlined_item: encoder::EncodeInlinedItem = let ecx = encoder::EncodeContext {
Box::new(|ecx, rbml_w, ii| astencode::encode_inlined_item(ecx, rbml_w, ii));
let encode_params = encoder::EncodeParams {
diag: tcx.sess.diagnostic(), diag: tcx.sess.diagnostic(),
tcx: tcx, tcx: tcx,
reexports: reexports, reexports: reexports,
item_symbols: item_symbols, item_symbols: item_symbols,
link_meta: link_meta, link_meta: link_meta,
cstore: self, cstore: self,
encode_inlined_item: encode_inlined_item,
reachable: reachable, reachable: reachable,
mir_map: mir_map, mir_map: mir_map,
type_abbrevs: RefCell::new(FnvHashMap()),
}; };
encoder::encode_metadata(encode_params, krate) encoder::encode_metadata(ecx, krate)
} }

View file

@ -14,6 +14,7 @@
use self::Family::*; use self::Family::*;
use astencode::decode_inlined_item;
use cstore::{self, crate_metadata}; use cstore::{self, crate_metadata};
use common::*; use common::*;
use encoder::def_to_u64; use encoder::def_to_u64;
@ -797,64 +798,36 @@ pub fn get_item_name(intr: &IdentInterner, cdata: Cmd, id: DefIndex) -> ast::Nam
item_name(intr, cdata.lookup_item(id)) item_name(intr, cdata.lookup_item(id))
} }
pub type DecodeInlinedItem<'a> = pub fn maybe_get_item_ast<'tcx>(cdata: Cmd, tcx: &TyCtxt<'tcx>, id: DefIndex)
Box<for<'tcx> FnMut(Cmd,
&TyCtxt<'tcx>,
Vec<hir_map::PathElem>, // parent_path
hir_map::DefPath, // parent_def_path
rbml::Doc,
DefId)
-> Result<&'tcx InlinedItem, (Vec<hir_map::PathElem>,
hir_map::DefPath)> + 'a>;
pub fn maybe_get_item_ast<'tcx>(cdata: Cmd,
tcx: &TyCtxt<'tcx>,
id: DefIndex,
mut decode_inlined_item: DecodeInlinedItem)
-> FoundAst<'tcx> { -> FoundAst<'tcx> {
debug!("Looking up item: {:?}", id); debug!("Looking up item: {:?}", id);
let item_doc = cdata.lookup_item(id); let item_doc = cdata.lookup_item(id);
let item_did = item_def_id(item_doc, cdata); let item_did = item_def_id(item_doc, cdata);
let parent_path = { let mut parent_path = item_path(item_doc);
let mut path = item_path(item_doc); parent_path.pop();
path.pop(); let mut parent_def_path = def_path(cdata, id);
path parent_def_path.pop();
}; if let Some(ast_doc) = reader::maybe_get_doc(item_doc, tag_ast as usize) {
let parent_def_path = { let ii = decode_inlined_item(cdata, tcx, parent_path,
let mut def_path = def_path(cdata, id); parent_def_path,
def_path.pop(); ast_doc, item_did);
def_path return FoundAst::Found(ii);
}; } else if let Some(parent_did) = item_parent_item(cdata, item_doc) {
match decode_inlined_item(cdata, // Remove the last element from the paths, since we are now
tcx, // trying to inline the parent.
parent_path, parent_path.pop();
parent_def_path, parent_def_path.pop();
item_doc, let parent_doc = cdata.lookup_item(parent_did.index);
item_did) { if let Some(ast_doc) = reader::maybe_get_doc(parent_doc, tag_ast as usize) {
Ok(ii) => FoundAst::Found(ii), let ii = decode_inlined_item(cdata, tcx, parent_path,
Err((mut parent_path, mut parent_def_path)) => { parent_def_path,
match item_parent_item(cdata, item_doc) { ast_doc, parent_did);
Some(parent_did) => { if let &InlinedItem::Item(ref i) = ii {
// Remove the last element from the paths, since we are now return FoundAst::FoundParent(parent_did, i);
// trying to inline the parent.
parent_path.pop();
parent_def_path.pop();
let parent_item = cdata.lookup_item(parent_did.index);
match decode_inlined_item(cdata,
tcx,
parent_path,
parent_def_path,
parent_item,
parent_did) {
Ok(ii) => FoundAst::FoundParent(parent_did, ii),
Err(_) => FoundAst::NotFound
}
}
None => FoundAst::NotFound
} }
} }
} }
FoundAst::NotFound
} }
pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool { pub fn is_item_mir_available<'tcx>(cdata: Cmd, id: DefIndex) -> bool {
@ -982,12 +955,16 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
cdata: Cmd, cdata: Cmd,
id: DefIndex, id: DefIndex,
tcx: &TyCtxt<'tcx>) tcx: &TyCtxt<'tcx>)
-> ty::ImplOrTraitItem<'tcx> { -> Option<ty::ImplOrTraitItem<'tcx>> {
let item_doc = cdata.lookup_item(id); let item_doc = cdata.lookup_item(id);
let def_id = item_def_id(item_doc, cdata); let def_id = item_def_id(item_doc, cdata);
let container_id = item_require_parent_item(cdata, item_doc); let container_id = if let Some(id) = item_parent_item(cdata, item_doc) {
id
} else {
return None;
};
let container_doc = cdata.lookup_item(container_id.index); let container_doc = cdata.lookup_item(container_id.index);
let container = match item_family(container_doc) { let container = match item_family(container_doc) {
Trait => TraitContainer(container_id), Trait => TraitContainer(container_id),
@ -998,7 +975,7 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
let vis = item_visibility(item_doc); let vis = item_visibility(item_doc);
let defaultness = item_defaultness(item_doc); let defaultness = item_defaultness(item_doc);
match item_sort(item_doc) { Some(match item_sort(item_doc) {
sort @ Some('C') | sort @ Some('c') => { sort @ Some('C') | sort @ Some('c') => {
let ty = doc_type(item_doc, tcx, cdata); let ty = doc_type(item_doc, tcx, cdata);
ty::ConstTraitItem(Rc::new(ty::AssociatedConst { ty::ConstTraitItem(Rc::new(ty::AssociatedConst {
@ -1044,8 +1021,8 @@ pub fn get_impl_or_trait_item<'tcx>(intr: Rc<IdentInterner>,
container: container, container: container,
})) }))
} }
_ => panic!("unknown impl/trait item sort"), _ => return None
} })
} }
pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex) pub fn get_trait_item_def_ids(cdata: Cmd, id: DefIndex)
@ -1085,7 +1062,7 @@ pub fn get_provided_trait_methods<'tcx>(intr: Rc<IdentInterner>,
cdata, cdata,
did.index, did.index,
tcx); tcx);
if let ty::MethodTraitItem(ref method) = trait_item { if let Some(ty::MethodTraitItem(ref method)) = trait_item {
Some((*method).clone()) Some((*method).clone())
} else { } else {
None None
@ -1114,7 +1091,7 @@ pub fn get_associated_consts<'tcx>(intr: Rc<IdentInterner>,
cdata, cdata,
did.index, did.index,
tcx); tcx);
if let ty::ConstTraitItem(ref ac) = trait_item { if let Some(ty::ConstTraitItem(ref ac)) = trait_item {
Some((*ac).clone()) Some((*ac).clone())
} else { } else {
None None

View file

@ -13,6 +13,7 @@
#![allow(unused_must_use)] // everything is just a MemWriter, can't fail #![allow(unused_must_use)] // everything is just a MemWriter, can't fail
#![allow(non_camel_case_types)] #![allow(non_camel_case_types)]
use astencode::encode_inlined_item;
use common::*; use common::*;
use cstore; use cstore;
use decoder; use decoder;
@ -55,21 +56,6 @@ use rustc_front::hir::{self, PatKind};
use rustc_front::intravisit::Visitor; use rustc_front::intravisit::Visitor;
use rustc_front::intravisit; use rustc_front::intravisit;
pub type EncodeInlinedItem<'a> =
Box<FnMut(&EncodeContext, &mut Encoder, InlinedItemRef) + 'a>;
pub struct EncodeParams<'a, 'tcx: 'a> {
pub diag: &'a Handler,
pub tcx: &'a TyCtxt<'tcx>,
pub reexports: &'a def::ExportMap,
pub item_symbols: &'a RefCell<NodeMap<String>>,
pub link_meta: &'a LinkMeta,
pub cstore: &'a cstore::CStore,
pub encode_inlined_item: EncodeInlinedItem<'a>,
pub reachable: &'a NodeSet,
pub mir_map: &'a MirMap<'tcx>,
}
pub struct EncodeContext<'a, 'tcx: 'a> { pub struct EncodeContext<'a, 'tcx: 'a> {
pub diag: &'a Handler, pub diag: &'a Handler,
pub tcx: &'a TyCtxt<'tcx>, pub tcx: &'a TyCtxt<'tcx>,
@ -77,7 +63,6 @@ pub struct EncodeContext<'a, 'tcx: 'a> {
pub item_symbols: &'a RefCell<NodeMap<String>>, pub item_symbols: &'a RefCell<NodeMap<String>>,
pub link_meta: &'a LinkMeta, pub link_meta: &'a LinkMeta,
pub cstore: &'a cstore::CStore, pub cstore: &'a cstore::CStore,
pub encode_inlined_item: RefCell<EncodeInlinedItem<'a>>,
pub type_abbrevs: tyencode::abbrev_map<'tcx>, pub type_abbrevs: tyencode::abbrev_map<'tcx>,
pub reachable: &'a NodeSet, pub reachable: &'a NodeSet,
pub mir_map: &'a MirMap<'tcx>, pub mir_map: &'a MirMap<'tcx>,
@ -688,6 +673,7 @@ fn encode_info_for_associated_const<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
rbml_w, rbml_w,
InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id),
ii)); ii));
encode_mir(ecx, rbml_w, ii.id);
} }
rbml_w.end_tag(); rbml_w.end_tag();
@ -733,6 +719,7 @@ fn encode_info_for_method<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
rbml_w, rbml_w,
InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id), InlinedItemRef::ImplItem(ecx.tcx.map.local_def_id(parent_id),
impl_item)); impl_item));
encode_mir(ecx, rbml_w, impl_item.id);
} }
encode_constness(rbml_w, sig.constness); encode_constness(rbml_w, sig.constness);
encode_defaultness(rbml_w, impl_item.defaultness); encode_defaultness(rbml_w, impl_item.defaultness);
@ -820,23 +807,6 @@ fn encode_repr_attrs(rbml_w: &mut Encoder,
rbml_w.end_tag(); rbml_w.end_tag();
} }
fn encode_inlined_item(ecx: &EncodeContext,
rbml_w: &mut Encoder,
ii: InlinedItemRef) {
let mut eii = ecx.encode_inlined_item.borrow_mut();
let eii: &mut EncodeInlinedItem = &mut *eii;
eii(ecx, rbml_w, ii);
let node_id = match ii {
InlinedItemRef::Item(item) => item.id,
InlinedItemRef::TraitItem(_, trait_item) => trait_item.id,
InlinedItemRef::ImplItem(_, impl_item) => impl_item.id,
InlinedItemRef::Foreign(foreign_item) => foreign_item.id
};
encode_mir(ecx, rbml_w, node_id);
}
fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) { fn encode_mir(ecx: &EncodeContext, rbml_w: &mut Encoder, node_id: NodeId) {
if let Some(mir) = ecx.mir_map.map.get(&node_id) { if let Some(mir) = ecx.mir_map.map.get(&node_id) {
rbml_w.start_tag(tag_mir as usize); rbml_w.start_tag(tag_mir as usize);
@ -958,6 +928,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_path(rbml_w, path); encode_path(rbml_w, path);
encode_attributes(rbml_w, &item.attrs); encode_attributes(rbml_w, &item.attrs);
encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
encode_mir(ecx, rbml_w, item.id);
encode_visibility(rbml_w, vis); encode_visibility(rbml_w, vis);
encode_stability(rbml_w, stab); encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr); encode_deprecation(rbml_w, depr);
@ -976,6 +947,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs); let needs_inline = tps_len > 0 || attr::requests_inline(&item.attrs);
if needs_inline || constness == hir::Constness::Const { if needs_inline || constness == hir::Constness::Const {
encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
encode_mir(ecx, rbml_w, item.id);
} }
if tps_len == 0 { if tps_len == 0 {
encode_symbol(ecx, rbml_w, item.id); encode_symbol(ecx, rbml_w, item.id);
@ -1044,6 +1016,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id())); encode_variant_id(rbml_w, ecx.tcx.map.local_def_id(v.node.data.id()));
} }
encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
encode_mir(ecx, rbml_w, item.id);
encode_path(rbml_w, path); encode_path(rbml_w, path);
// Encode inherent implementations for this enumeration. // Encode inherent implementations for this enumeration.
@ -1092,6 +1065,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_struct_fields(rbml_w, variant); encode_struct_fields(rbml_w, variant);
encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item)); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Item(item));
encode_mir(ecx, rbml_w, item.id);
// Encode inherent implementations for this structure. // Encode inherent implementations for this structure.
encode_inherent_implementations(ecx, rbml_w, def_id); encode_inherent_implementations(ecx, rbml_w, def_id);
@ -1374,6 +1348,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_inlined_item(ecx, rbml_w, encode_inlined_item(ecx, rbml_w,
InlinedItemRef::TraitItem(def_id, trait_item)); InlinedItemRef::TraitItem(def_id, trait_item));
encode_mir(ecx, rbml_w, trait_item.id);
} }
hir::MethodTraitItem(ref sig, ref body) => { hir::MethodTraitItem(ref sig, ref body) => {
// If this is a static method, we've already // If this is a static method, we've already
@ -1389,6 +1364,7 @@ fn encode_info_for_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_item_sort(rbml_w, 'p'); encode_item_sort(rbml_w, 'p');
encode_inlined_item(ecx, rbml_w, encode_inlined_item(ecx, rbml_w,
InlinedItemRef::TraitItem(def_id, trait_item)); InlinedItemRef::TraitItem(def_id, trait_item));
encode_mir(ecx, rbml_w, trait_item.id);
} else { } else {
encode_item_sort(rbml_w, 'r'); encode_item_sort(rbml_w, 'r');
} }
@ -1426,13 +1402,15 @@ fn encode_info_for_foreign_item<'a, 'tcx>(ecx: &EncodeContext<'a, 'tcx>,
encode_name(rbml_w, nitem.name); encode_name(rbml_w, nitem.name);
if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic { if abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(nitem)); encode_inlined_item(ecx, rbml_w, InlinedItemRef::Foreign(nitem));
encode_mir(ecx, rbml_w, nitem.id);
} else {
encode_symbol(ecx, rbml_w, nitem.id);
} }
encode_attributes(rbml_w, &nitem.attrs); encode_attributes(rbml_w, &nitem.attrs);
let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); let stab = stability::lookup_stability(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id)); let depr = stability::lookup_deprecation(ecx.tcx, ecx.tcx.map.local_def_id(nitem.id));
encode_stability(rbml_w, stab); encode_stability(rbml_w, stab);
encode_deprecation(rbml_w, depr); encode_deprecation(rbml_w, depr);
encode_symbol(ecx, rbml_w, nitem.id);
encode_method_argument_names(rbml_w, &fndecl); encode_method_argument_names(rbml_w, &fndecl);
} }
hir::ForeignItemStatic(_, mutbl) => { hir::ForeignItemStatic(_, mutbl) => {
@ -1928,32 +1906,7 @@ fn encode_dylib_dependency_formats(rbml_w: &mut Encoder, ecx: &EncodeContext) {
#[allow(non_upper_case_globals)] #[allow(non_upper_case_globals)]
pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ]; pub const metadata_encoding_version : &'static [u8] = &[b'r', b'u', b's', b't', 0, 0, 0, 2 ];
pub fn encode_metadata(parms: EncodeParams, krate: &hir::Crate) -> Vec<u8> { pub fn encode_metadata(ecx: EncodeContext, krate: &hir::Crate) -> Vec<u8> {
let EncodeParams {
item_symbols,
diag,
tcx,
reexports,
cstore,
encode_inlined_item,
link_meta,
reachable,
mir_map,
..
} = parms;
let ecx = EncodeContext {
diag: diag,
tcx: tcx,
reexports: reexports,
item_symbols: item_symbols,
link_meta: link_meta,
cstore: cstore,
encode_inlined_item: RefCell::new(encode_inlined_item),
type_abbrevs: RefCell::new(FnvHashMap()),
reachable: reachable,
mir_map: mir_map,
};
let mut wr = Cursor::new(Vec::new()); let mut wr = Cursor::new(Vec::new());
{ {

View file

@ -39,8 +39,20 @@ impl<'a,'tcx> Builder<'a,'tcx> {
ExprKind::Scope { extent, value } => { ExprKind::Scope { extent, value } => {
this.in_scope(extent, block, |this| this.as_rvalue(block, value)) this.in_scope(extent, block, |this| this.as_rvalue(block, value))
} }
ExprKind::InlineAsm { asm } => { ExprKind::InlineAsm { asm, outputs, inputs } => {
block.and(Rvalue::InlineAsm(asm.clone())) let outputs = outputs.into_iter().map(|output| {
unpack!(block = this.as_lvalue(block, output))
}).collect();
let inputs = inputs.into_iter().map(|input| {
unpack!(block = this.as_operand(block, input))
}).collect();
block.and(Rvalue::InlineAsm {
asm: asm.clone(),
outputs: outputs,
inputs: inputs
})
} }
ExprKind::Repeat { value, count } => { ExprKind::Repeat { value, count } => {
let value_operand = unpack!(block = this.as_operand(block, value)); let value_operand = unpack!(block = this.as_operand(block, value));
@ -73,8 +85,13 @@ impl<'a,'tcx> Builder<'a,'tcx> {
}) })
} }
ExprKind::Cast { source } => { ExprKind::Cast { source } => {
let source = unpack!(block = this.as_operand(block, source)); let source = this.hir.mirror(source);
block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty)) if source.ty == expr.ty {
this.expr_as_rvalue(block, source)
} else {
let source = unpack!(block = this.as_operand(block, source));
block.and(Rvalue::Cast(CastKind::Misc, source, expr.ty))
}
} }
ExprKind::ReifyFnPointer { source } => { ExprKind::ReifyFnPointer { source } => {
let source = unpack!(block = this.as_operand(block, source)); let source = unpack!(block = this.as_operand(block, source));

View file

@ -238,6 +238,13 @@ pub struct MatchPair<'pat, 'tcx:'pat> {
// ... must match this pattern. // ... must match this pattern.
pattern: &'pat Pattern<'tcx>, pattern: &'pat Pattern<'tcx>,
// HACK(eddyb) This is used to toggle whether a Slice pattern
// has had its length checked. This is only necessary because
// the "rest" part of the pattern right now has type &[T] and
// as such, it requires an Rvalue::Slice to be generated.
// See RFC 495 / issue #23121 for the eventual (proper) solution.
slice_len_checked: bool
} }
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]

View file

@ -95,7 +95,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
Err(match_pair) Err(match_pair)
} }
PatternKind::Array { ref prefix, ref slice, ref suffix } => { PatternKind::Range { .. } |
PatternKind::Variant { .. } => {
// cannot simplify, test is required
Err(match_pair)
}
PatternKind::Slice { .. } if !match_pair.slice_len_checked => {
Err(match_pair)
}
PatternKind::Array { ref prefix, ref slice, ref suffix } |
PatternKind::Slice { ref prefix, ref slice, ref suffix } => {
unpack!(block = self.prefix_suffix_slice(&mut candidate.match_pairs, unpack!(block = self.prefix_suffix_slice(&mut candidate.match_pairs,
block, block,
match_pair.lvalue.clone(), match_pair.lvalue.clone(),
@ -105,13 +116,6 @@ impl<'a,'tcx> Builder<'a,'tcx> {
Ok(block) Ok(block)
} }
PatternKind::Slice { .. } |
PatternKind::Range { .. } |
PatternKind::Variant { .. } => {
// cannot simplify, test is required
Err(match_pair)
}
PatternKind::Leaf { ref subpatterns } => { PatternKind::Leaf { ref subpatterns } => {
// tuple struct, match subpats (if any) // tuple struct, match subpats (if any)
candidate.match_pairs candidate.match_pairs

View file

@ -75,7 +75,8 @@ impl<'a,'tcx> Builder<'a,'tcx> {
} }
} }
PatternKind::Slice { ref prefix, ref slice, ref suffix } => { PatternKind::Slice { ref prefix, ref slice, ref suffix }
if !match_pair.slice_len_checked => {
let len = prefix.len() + suffix.len(); let len = prefix.len() + suffix.len();
let op = if slice.is_some() { let op = if slice.is_some() {
BinOp::Ge BinOp::Ge
@ -89,6 +90,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
} }
PatternKind::Array { .. } | PatternKind::Array { .. } |
PatternKind::Slice { .. } |
PatternKind::Wild | PatternKind::Wild |
PatternKind::Binding { .. } | PatternKind::Binding { .. } |
PatternKind::Leaf { .. } | PatternKind::Leaf { .. } |
@ -174,14 +176,78 @@ impl<'a,'tcx> Builder<'a,'tcx> {
targets targets
} }
TestKind::Eq { ref value, ty } => { TestKind::Eq { ref value, mut ty } => {
let expect = self.literal_operand(test.span, ty.clone(), Literal::Value { let mut val = Operand::Consume(lvalue.clone());
value: value.clone()
}); // If we're using b"..." as a pattern, we need to insert an
let val = Operand::Consume(lvalue.clone()); // unsizing coercion, as the byte string has the type &[u8; N].
let expect = if let ConstVal::ByteStr(ref bytes) = *value {
let tcx = self.hir.tcx();
// Unsize the lvalue to &[u8], too, if necessary.
if let ty::TyRef(region, mt) = ty.sty {
if let ty::TyArray(_, _) = mt.ty.sty {
ty = tcx.mk_imm_ref(region, tcx.mk_slice(tcx.types.u8));
let val_slice = self.temp(ty);
self.cfg.push_assign(block, test.span, &val_slice,
Rvalue::Cast(CastKind::Unsize, val, ty));
val = Operand::Consume(val_slice);
}
}
assert!(ty.is_slice());
let array_ty = tcx.mk_array(tcx.types.u8, bytes.len());
let array_ref = tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), array_ty);
let array = self.literal_operand(test.span, array_ref, Literal::Value {
value: value.clone()
});
let slice = self.temp(ty);
self.cfg.push_assign(block, test.span, &slice,
Rvalue::Cast(CastKind::Unsize, array, ty));
Operand::Consume(slice)
} else {
self.literal_operand(test.span, ty, Literal::Value {
value: value.clone()
})
};
// Use PartialEq::eq for &str and &[u8] slices, instead of BinOp::Eq.
let fail = self.cfg.start_new_block(); let fail = self.cfg.start_new_block();
let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val.clone()); if let ty::TyRef(_, mt) = ty.sty {
vec![block, fail] assert!(ty.is_slice());
let eq_def_id = self.hir.tcx().lang_items.eq_trait().unwrap();
let ty = mt.ty;
let (mty, method) = self.hir.trait_method(eq_def_id, "eq", ty, vec![ty]);
let bool_ty = self.hir.bool_ty();
let eq_result = self.temp(bool_ty);
let eq_block = self.cfg.start_new_block();
let cleanup = self.diverge_cleanup();
self.cfg.terminate(block, Terminator::Call {
func: Operand::Constant(Constant {
span: test.span,
ty: mty,
literal: method
}),
args: vec![val, expect],
destination: Some((eq_result.clone(), eq_block)),
cleanup: cleanup,
});
// check the result
let block = self.cfg.start_new_block();
self.cfg.terminate(eq_block, Terminator::If {
cond: Operand::Consume(eq_result),
targets: (block, fail),
});
vec![block, fail]
} else {
let block = self.compare(block, fail, test.span, BinOp::Eq, expect, val);
vec![block, fail]
}
} }
TestKind::Range { ref lo, ref hi, ty } => { TestKind::Range { ref lo, ref hi, ty } => {
@ -349,9 +415,26 @@ impl<'a,'tcx> Builder<'a,'tcx> {
} }
} }
TestKind::Eq { .. } | // If we are performing a length check, then this
TestKind::Range { .. } | // informs slice patterns, but nothing else.
TestKind::Len { .. } => { TestKind::Len { .. } => {
let pattern_test = self.test(&match_pair);
match *match_pair.pattern.kind {
PatternKind::Slice { .. } if pattern_test.kind == test.kind => {
let mut new_candidate = candidate.clone();
// Set up the MatchKind to simplify this like an array.
new_candidate.match_pairs[match_pair_index]
.slice_len_checked = true;
resulting_candidates[0].push(new_candidate);
true
}
_ => false
}
}
TestKind::Eq { .. } |
TestKind::Range { .. } => {
// These are all binary tests. // These are all binary tests.
// //
// FIXME(#29623) we can be more clever here // FIXME(#29623) we can be more clever here
@ -405,7 +488,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
.map(|subpattern| { .map(|subpattern| {
// e.g., `(x as Variant).0` // e.g., `(x as Variant).0`
let lvalue = downcast_lvalue.clone().field(subpattern.field, let lvalue = downcast_lvalue.clone().field(subpattern.field,
subpattern.field_ty()); subpattern.pattern.ty);
// e.g., `(x as Variant).0 @ P1` // e.g., `(x as Variant).0 @ P1`
MatchPair::new(lvalue, &subpattern.pattern) MatchPair::new(lvalue, &subpattern.pattern)
}); });

View file

@ -22,7 +22,7 @@ impl<'a,'tcx> Builder<'a,'tcx> {
subpatterns.iter() subpatterns.iter()
.map(|fieldpat| { .map(|fieldpat| {
let lvalue = lvalue.clone().field(fieldpat.field, let lvalue = lvalue.clone().field(fieldpat.field,
fieldpat.field_ty()); fieldpat.pattern.ty);
MatchPair::new(lvalue, &fieldpat.pattern) MatchPair::new(lvalue, &fieldpat.pattern)
}) })
.collect() .collect()
@ -118,6 +118,7 @@ impl<'pat, 'tcx> MatchPair<'pat, 'tcx> {
MatchPair { MatchPair {
lvalue: lvalue, lvalue: lvalue,
pattern: pattern, pattern: pattern,
slice_len_checked: false,
} }
} }
} }

View file

@ -141,15 +141,18 @@ impl<'a,'tcx> Builder<'a,'tcx> {
.chain(explicits) .chain(explicits)
.enumerate() .enumerate()
.map(|(index, (ty, pattern))| { .map(|(index, (ty, pattern))| {
let lvalue = Lvalue::Arg(index as u32);
if let Some(pattern) = pattern { if let Some(pattern) = pattern {
let lvalue = Lvalue::Arg(index as u32);
let pattern = this.hir.irrefutable_pat(pattern); let pattern = this.hir.irrefutable_pat(pattern);
unpack!(block = this.lvalue_into_pattern(block, unpack!(block = this.lvalue_into_pattern(block,
argument_extent, argument_extent,
pattern, pattern,
&lvalue)); &lvalue));
} }
ArgDecl { ty: ty } // Make sure we drop (parts of) the argument even when not matched on.
this.schedule_drop(pattern.as_ref().map_or(ast_block.span, |pat| pat.span),
argument_extent, &lvalue, ty);
ArgDecl { ty: ty, spread: false }
}) })
.collect(); .collect();

View file

@ -337,8 +337,12 @@ impl<'tcx> Mirror<'tcx> for &'tcx hir::Expr {
convert_path_expr(cx, self) convert_path_expr(cx, self)
} }
hir::ExprInlineAsm(ref asm) => { hir::ExprInlineAsm(ref asm, ref outputs, ref inputs) => {
ExprKind::InlineAsm { asm: asm } ExprKind::InlineAsm {
asm: asm,
outputs: outputs.to_ref(),
inputs: inputs.to_ref()
}
} }
// Now comes the rote stuff: // Now comes the rote stuff:
@ -668,11 +672,16 @@ fn convert_path_expr<'a, 'tcx: 'a>(cx: &mut Cx<'a, 'tcx>, expr: &'tcx hir::Expr)
}, },
Def::Const(def_id) | Def::Const(def_id) |
Def::AssociatedConst(def_id) => { Def::AssociatedConst(def_id) => {
if let Some(v) = cx.try_const_eval_literal(expr) { let substs = Some(cx.tcx.node_id_item_substs(expr.id).substs);
return ExprKind::Literal { literal: v }; if let Some((e, _)) = const_eval::lookup_const_by_id(cx.tcx, def_id, substs) {
} else { // FIXME ConstVal can't be yet used with adjustments, as they would be lost.
def_id if !cx.tcx.tables.borrow().adjustments.contains_key(&e.id) {
if let Some(v) = cx.try_const_eval_literal(e) {
return ExprKind::Literal { literal: v };
}
}
} }
def_id
} }
Def::Static(node_id, _) => return ExprKind::StaticRef { Def::Static(node_id, _) => return ExprKind::StaticRef {

View file

@ -19,7 +19,9 @@ use hair::*;
use rustc::mir::repr::*; use rustc::mir::repr::*;
use rustc::middle::const_eval::{self, ConstVal}; use rustc::middle::const_eval::{self, ConstVal};
use rustc::middle::def_id::DefId;
use rustc::middle::infer::InferCtxt; use rustc::middle::infer::InferCtxt;
use rustc::middle::subst::{Subst, Substs};
use rustc::middle::ty::{self, Ty, TyCtxt}; use rustc::middle::ty::{self, Ty, TyCtxt};
use syntax::codemap::Span; use syntax::codemap::Span;
use syntax::parse::token; use syntax::parse::token;
@ -84,9 +86,44 @@ impl<'a,'tcx:'a> Cx<'a, 'tcx> {
pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option<Literal<'tcx>> { pub fn try_const_eval_literal(&mut self, e: &hir::Expr) -> Option<Literal<'tcx>> {
let hint = const_eval::EvalHint::ExprTypeChecked; let hint = const_eval::EvalHint::ExprTypeChecked;
const_eval::eval_const_expr_partial(self.tcx, e, hint, None) const_eval::eval_const_expr_partial(self.tcx, e, hint, None).ok().and_then(|v| {
.ok() match v {
.map(|v| Literal::Value { value: v }) // All of these contain local IDs, unsuitable for storing in MIR.
ConstVal::Struct(_) | ConstVal::Tuple(_) |
ConstVal::Array(..) | ConstVal::Repeat(..) |
ConstVal::Function(_) => None,
_ => Some(Literal::Value { value: v })
}
})
}
pub fn trait_method(&mut self,
trait_def_id: DefId,
method_name: &str,
self_ty: Ty<'tcx>,
params: Vec<Ty<'tcx>>)
-> (Ty<'tcx>, Literal<'tcx>) {
let method_name = token::intern(method_name);
let substs = Substs::new_trait(params, vec![], self_ty);
for trait_item in self.tcx.trait_items(trait_def_id).iter() {
match *trait_item {
ty::ImplOrTraitItem::MethodTraitItem(ref method) => {
if method.name == method_name {
let method_ty = self.tcx.lookup_item_type(method.def_id);
let method_ty = method_ty.ty.subst(self.tcx, &substs);
return (method_ty, Literal::Item {
def_id: method.def_id,
substs: self.tcx.mk_substs(substs),
});
}
}
ty::ImplOrTraitItem::ConstTraitItem(..) |
ty::ImplOrTraitItem::TypeTraitItem(..) => {}
}
}
self.tcx.sess.bug(&format!("found no method `{}` in `{:?}`", method_name, trait_def_id));
} }
pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize { pub fn num_variants(&mut self, adt_def: ty::AdtDef<'tcx>) -> usize {

View file

@ -63,6 +63,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
} }
fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> { fn to_pattern(&mut self, pat: &hir::Pat) -> Pattern<'tcx> {
let mut ty = self.cx.tcx.node_id_to_type(pat.id);
let kind = match pat.node { let kind = match pat.node {
PatKind::Wild => PatternKind::Wild, PatKind::Wild => PatternKind::Wild,
@ -84,9 +86,9 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
{ {
let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def(); let def = self.cx.tcx.def_map.borrow().get(&pat.id).unwrap().full_def();
match def { match def {
Def::Const(def_id) | Def::AssociatedConst(def_id) => Def::Const(def_id) | Def::AssociatedConst(def_id) => {
match const_eval::lookup_const_by_id(self.cx.tcx, def_id, let substs = Some(self.cx.tcx.node_id_item_substs(pat.id).substs);
Some(pat.id), None) { match const_eval::lookup_const_by_id(self.cx.tcx, def_id, substs) {
Some((const_expr, _const_ty)) => { Some((const_expr, _const_ty)) => {
let pat = const_eval::const_expr_to_pat(self.cx.tcx, const_expr, let pat = const_eval::const_expr_to_pat(self.cx.tcx, const_expr,
pat.span); pat.span);
@ -97,7 +99,8 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
pat.span, pat.span,
&format!("cannot eval constant: {:?}", def_id)) &format!("cannot eval constant: {:?}", def_id))
} }
}, }
}
_ => _ =>
self.cx.tcx.sess.span_bug( self.cx.tcx.sess.span_bug(
pat.span, pat.span,
@ -169,6 +172,17 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
hir::BindByRef(hir::MutImmutable) => hir::BindByRef(hir::MutImmutable) =>
(Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)), (Mutability::Not, BindingMode::ByRef(region.unwrap(), BorrowKind::Shared)),
}; };
// A ref x pattern is the same node used for x, and as such it has
// x's type, which is &T, where we want T (the type being matched).
if let hir::BindByRef(_) = bm {
if let ty::TyRef(_, mt) = ty.sty {
ty = mt.ty;
} else {
unreachable!("`ref {}` has wrong type {}", ident.node, ty);
}
}
PatternKind::Binding { PatternKind::Binding {
mutability: mutability, mutability: mutability,
mode: mode, mode: mode,
@ -234,8 +248,6 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
} }
}; };
let ty = self.cx.tcx.node_id_to_type(pat.id);
Pattern { Pattern {
span: pat.span, span: pat.span,
ty: ty, ty: ty,
@ -314,20 +326,3 @@ impl<'patcx, 'cx, 'tcx> PatCx<'patcx, 'cx, 'tcx> {
} }
} }
} }
impl<'tcx> FieldPattern<'tcx> {
pub fn field_ty(&self) -> Ty<'tcx> {
debug!("field_ty({:?},ty={:?})", self, self.pattern.ty);
let r = match *self.pattern.kind {
PatternKind::Binding { mode: BindingMode::ByRef(..), ..} => {
match self.pattern.ty.sty {
ty::TyRef(_, mt) => mt.ty,
_ => unreachable!()
}
}
_ => self.pattern.ty
};
debug!("field_ty -> {:?}", r);
r
}
}

View file

@ -230,6 +230,8 @@ pub enum ExprKind<'tcx> {
}, },
InlineAsm { InlineAsm {
asm: &'tcx hir::InlineAsm, asm: &'tcx hir::InlineAsm,
outputs: Vec<ExprRef<'tcx>>,
inputs: Vec<ExprRef<'tcx>>
}, },
} }

View file

@ -33,6 +33,7 @@ use rustc::util::common::ErrorReported;
use rustc::util::nodemap::NodeMap; use rustc::util::nodemap::NodeMap;
use rustc_front::hir; use rustc_front::hir;
use rustc_front::intravisit::{self, Visitor}; use rustc_front::intravisit::{self, Visitor};
use syntax::abi::Abi;
use syntax::ast; use syntax::ast;
use syntax::attr::AttrMetaMethods; use syntax::attr::AttrMetaMethods;
use syntax::codemap::Span; use syntax::codemap::Span;
@ -181,13 +182,20 @@ fn build_mir<'a,'tcx:'a>(cx: Cx<'a,'tcx>,
let parameter_scope = let parameter_scope =
cx.tcx().region_maps.lookup_code_extent( cx.tcx().region_maps.lookup_code_extent(
CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body.id }); CodeExtentData::ParameterScope { fn_id: fn_id, body_id: body.id });
Ok(build::construct(cx, let mut mir = build::construct(cx, span, implicit_arg_tys, arguments,
span, parameter_scope, fn_sig.output, body);
implicit_arg_tys,
arguments, match cx.tcx().node_id_to_type(fn_id).sty {
parameter_scope, ty::TyFnDef(_, _, f) if f.abi == Abi::RustCall => {
fn_sig.output, // RustCall pseudo-ABI untuples the last argument.
body)) if let Some(arg_decl) = mir.arg_decls.last_mut() {
arg_decl.spread = true;
}
}
_ => {}
}
Ok(mir)
} }
fn closure_self_ty<'a, 'tcx>(tcx: &TyCtxt<'tcx>, fn closure_self_ty<'a, 'tcx>(tcx: &TyCtxt<'tcx>,

View file

@ -82,7 +82,7 @@ impl<'a, 'tcx> MutVisitor<'tcx> for EraseRegionsVisitor<'a, 'tcx> {
Rvalue::BinaryOp(_, _, _) | Rvalue::BinaryOp(_, _, _) |
Rvalue::UnaryOp(_, _) | Rvalue::UnaryOp(_, _) |
Rvalue::Slice { input: _, from_start: _, from_end: _ } | Rvalue::Slice { input: _, from_start: _, from_end: _ } |
Rvalue::InlineAsm(_) => {}, Rvalue::InlineAsm {..} => {},
Rvalue::Repeat(_, ref mut value) => value.ty = self.tcx.erase_regions(&value.ty), Rvalue::Repeat(_, ref mut value) => value.ty = self.tcx.erase_regions(&value.ty),
Rvalue::Ref(ref mut region, _, _) => *region = ty::ReStatic, Rvalue::Ref(ref mut region, _, _) => *region = ty::ReStatic,

View file

@ -610,9 +610,8 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
} }
Some(Def::Const(did)) | Some(Def::Const(did)) |
Some(Def::AssociatedConst(did)) => { Some(Def::AssociatedConst(did)) => {
if let Some((expr, _ty)) = const_eval::lookup_const_by_id(v.tcx, did, let substs = Some(v.tcx.node_id_item_substs(e.id).substs);
Some(e.id), if let Some((expr, _)) = const_eval::lookup_const_by_id(v.tcx, did, substs) {
None) {
let inner = v.global_expr(Mode::Const, expr); let inner = v.global_expr(Mode::Const, expr);
v.add_qualif(inner); v.add_qualif(inner);
} }
@ -756,7 +755,7 @@ fn check_expr<'a, 'tcx>(v: &mut CheckCrateVisitor<'a, 'tcx>,
// Expressions with side-effects. // Expressions with side-effects.
hir::ExprAssign(..) | hir::ExprAssign(..) |
hir::ExprAssignOp(..) | hir::ExprAssignOp(..) |
hir::ExprInlineAsm(_) => { hir::ExprInlineAsm(..) => {
v.add_qualif(ConstQualif::NOT_CONST); v.add_qualif(ConstQualif::NOT_CONST);
if v.mode != Mode::Var { if v.mode != Mode::Var {
span_err!(v.tcx.sess, e.span, E0019, span_err!(v.tcx.sess, e.span, E0019,

View file

@ -61,7 +61,6 @@ pub use rustc::lint;
pub use rustc::util; pub use rustc::util;
pub mod back { pub mod back {
pub use rustc_back::abi;
pub use rustc_back::rpath; pub use rustc_back::rpath;
pub use rustc_back::svh; pub use rustc_back::svh;

View file

@ -200,12 +200,13 @@ use middle::lang_items::StrEqFnLangItem;
use middle::mem_categorization as mc; use middle::mem_categorization as mc;
use middle::mem_categorization::Categorization; use middle::mem_categorization::Categorization;
use middle::pat_util::*; use middle::pat_util::*;
use middle::subst::Substs;
use trans::adt; use trans::adt;
use trans::base::*; use trans::base::*;
use trans::build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast}; use trans::build::{AddCase, And, Br, CondBr, GEPi, InBoundsGEP, Load, PointerCast};
use trans::build::{Not, Store, Sub, add_comment}; use trans::build::{Not, Store, Sub, add_comment};
use trans::build; use trans::build;
use trans::callee; use trans::callee::{Callee, ArgVals};
use trans::cleanup::{self, CleanupMethods, DropHintMethods}; use trans::cleanup::{self, CleanupMethods, DropHintMethods};
use trans::common::*; use trans::common::*;
use trans::consts; use trans::consts;
@ -216,6 +217,7 @@ use trans::monomorphize;
use trans::tvec; use trans::tvec;
use trans::type_of; use trans::type_of;
use trans::Disr; use trans::Disr;
use trans::value::Value;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
use middle::traits::ProjectionMode; use middle::traits::ProjectionMode;
use session::config::NoDebugInfo; use session::config::NoDebugInfo;
@ -448,6 +450,12 @@ impl<'tcx> Datum<'tcx, Lvalue> {
} }
} }
impl fmt::Debug for MatchInput {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Debug::fmt(&Value(self.val), f)
}
}
impl MatchInput { impl MatchInput {
fn from_val(val: ValueRef) -> MatchInput { fn from_val(val: ValueRef) -> MatchInput {
MatchInput { MatchInput {
@ -466,11 +474,8 @@ fn expand_nested_bindings<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
col: usize, col: usize,
val: MatchInput) val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> { -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={})", debug!("expand_nested_bindings(bcx={}, m={:?}, col={}, val={:?})",
bcx.to_str(), bcx.to_str(), m, col, val);
m,
col,
bcx.val_to_string(val.val));
let _indenter = indenter(); let _indenter = indenter();
m.iter().map(|br| { m.iter().map(|br| {
@ -506,11 +511,8 @@ fn enter_match<'a, 'b, 'p, 'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
-> Vec<Match<'a, 'p, 'blk, 'tcx>> where -> Vec<Match<'a, 'p, 'blk, 'tcx>> where
F: FnMut(&[&'p hir::Pat]) -> Option<Vec<&'p hir::Pat>>, F: FnMut(&[&'p hir::Pat]) -> Option<Vec<&'p hir::Pat>>,
{ {
debug!("enter_match(bcx={}, m={:?}, col={}, val={})", debug!("enter_match(bcx={}, m={:?}, col={}, val={:?})",
bcx.to_str(), bcx.to_str(), m, col, val);
m,
col,
bcx.val_to_string(val.val));
let _indenter = indenter(); let _indenter = indenter();
m.iter().filter_map(|br| { m.iter().filter_map(|br| {
@ -549,11 +551,8 @@ fn enter_default<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
col: usize, col: usize,
val: MatchInput) val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> { -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("enter_default(bcx={}, m={:?}, col={}, val={})", debug!("enter_default(bcx={}, m={:?}, col={}, val={:?})",
bcx.to_str(), bcx.to_str(), m, col, val);
m,
col,
bcx.val_to_string(val.val));
let _indenter = indenter(); let _indenter = indenter();
// Collect all of the matches that can match against anything. // Collect all of the matches that can match against anything.
@ -606,12 +605,8 @@ fn enter_opt<'a, 'p, 'blk, 'tcx>(
variant_size: usize, variant_size: usize,
val: MatchInput) val: MatchInput)
-> Vec<Match<'a, 'p, 'blk, 'tcx>> { -> Vec<Match<'a, 'p, 'blk, 'tcx>> {
debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={})", debug!("enter_opt(bcx={}, m={:?}, opt={:?}, col={}, val={:?})",
bcx.to_str(), bcx.to_str(), m, *opt, col, val);
m,
*opt,
col,
bcx.val_to_string(val.val));
let _indenter = indenter(); let _indenter = indenter();
let ctor = match opt { let ctor = match opt {
@ -887,7 +882,7 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
rhs_t: Ty<'tcx>, rhs_t: Ty<'tcx>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Result<'blk, 'tcx> { -> Result<'blk, 'tcx> {
fn compare_str<'blk, 'tcx>(cx: Block<'blk, 'tcx>, fn compare_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
lhs_data: ValueRef, lhs_data: ValueRef,
lhs_len: ValueRef, lhs_len: ValueRef,
rhs_data: ValueRef, rhs_data: ValueRef,
@ -895,11 +890,13 @@ fn compare_values<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
rhs_t: Ty<'tcx>, rhs_t: Ty<'tcx>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Result<'blk, 'tcx> { -> Result<'blk, 'tcx> {
let did = langcall(cx, let did = langcall(bcx,
None, None,
&format!("comparison of `{}`", rhs_t), &format!("comparison of `{}`", rhs_t),
StrEqFnLangItem); StrEqFnLangItem);
callee::trans_lang_call(cx, did, &[lhs_data, lhs_len, rhs_data, rhs_len], None, debug_loc) let args = [lhs_data, lhs_len, rhs_data, rhs_len];
Callee::def(bcx.ccx(), did, bcx.tcx().mk_substs(Substs::empty()))
.call(bcx, debug_loc, ArgVals(&args), None)
} }
let _icx = push_ctxt("compare_values"); let _icx = push_ctxt("compare_values");
@ -1032,7 +1029,7 @@ fn insert_lllocals<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum); bcx.fcx.schedule_drop_and_fill_mem(cs, llval, binding_info.ty, opt_datum);
} }
debug!("binding {} to {}", binding_info.id, bcx.val_to_string(llval)); debug!("binding {} to {:?}", binding_info.id, Value(llval));
bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum); bcx.fcx.lllocals.borrow_mut().insert(binding_info.id, datum);
debuginfo::create_match_binding_metadata(bcx, name, binding_info); debuginfo::create_match_binding_metadata(bcx, name, binding_info);
} }
@ -1047,11 +1044,8 @@ fn compile_guard<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
chk: &FailureHandler, chk: &FailureHandler,
has_genuine_default: bool) has_genuine_default: bool)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals=[{}])", debug!("compile_guard(bcx={}, guard_expr={:?}, m={:?}, vals={:?})",
bcx.to_str(), bcx.to_str(), guard_expr, m, vals);
guard_expr,
m,
vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
let _indenter = indenter(); let _indenter = indenter();
let mut bcx = insert_lllocals(bcx, &data.bindings_map, None); let mut bcx = insert_lllocals(bcx, &data.bindings_map, None);
@ -1093,10 +1087,8 @@ fn compile_submatch<'a, 'p, 'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
vals: &[MatchInput], vals: &[MatchInput],
chk: &FailureHandler, chk: &FailureHandler,
has_genuine_default: bool) { has_genuine_default: bool) {
debug!("compile_submatch(bcx={}, m={:?}, vals=[{}])", debug!("compile_submatch(bcx={}, m={:?}, vals=[{:?}])",
bcx.to_str(), bcx.to_str(), m, vals);
m,
vals.iter().map(|v| bcx.val_to_string(v.val)).collect::<Vec<_>>().join(", "));
let _indenter = indenter(); let _indenter = indenter();
let _icx = push_ctxt("match::compile_submatch"); let _icx = push_ctxt("match::compile_submatch");
let mut bcx = bcx; let mut bcx = bcx;
@ -1256,7 +1248,7 @@ fn compile_submatch_continue<'a, 'p, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
debug!("options={:?}", opts); debug!("options={:?}", opts);
let mut kind = NoBranch; let mut kind = NoBranch;
let mut test_val = val.val; let mut test_val = val.val;
debug!("test_val={}", bcx.val_to_string(test_val)); debug!("test_val={:?}", Value(test_val));
if !opts.is_empty() { if !opts.is_empty() {
match opts[0] { match opts[0] {
ConstantValue(..) | ConstantRange(..) => { ConstantValue(..) | ConstantRange(..) => {
@ -1761,8 +1753,8 @@ fn mk_binding_alloca<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>,
let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse); let lvalue = Lvalue::new_with_hint(caller_name, bcx, p_id, HintKind::DontZeroJustUse);
let datum = Datum::new(llval, var_ty, lvalue); let datum = Datum::new(llval, var_ty, lvalue);
debug!("mk_binding_alloca cleanup_scope={:?} llval={} var_ty={:?}", debug!("mk_binding_alloca cleanup_scope={:?} llval={:?} var_ty={:?}",
cleanup_scope, bcx.ccx().tn().val_to_string(llval), var_ty); cleanup_scope, Value(llval), var_ty);
// Subtle: be sure that we *populate* the memory *before* // Subtle: be sure that we *populate* the memory *before*
// we schedule the cleanup. // we schedule the cleanup.
@ -1794,10 +1786,8 @@ pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
val: MatchInput, val: MatchInput,
cleanup_scope: cleanup::ScopeId) cleanup_scope: cleanup::ScopeId)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={})", debug!("bind_irrefutable_pat(bcx={}, pat={:?}, val={:?})",
bcx.to_str(), bcx.to_str(), pat, val);
pat,
bcx.val_to_string(val.val));
if bcx.sess().asm_comments() { if bcx.sess().asm_comments() {
add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})", add_comment(bcx, &format!("bind_irrefutable_pat(pat={:?})",
@ -1923,7 +1913,7 @@ pub fn bind_irrefutable_pat<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// a regular one // a regular one
if !type_is_sized(tcx, fty) { if !type_is_sized(tcx, fty) {
let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr"); let scratch = alloc_ty(bcx, fty, "__struct_field_fat_ptr");
debug!("Creating fat pointer {}", bcx.val_to_string(scratch)); debug!("Creating fat pointer {:?}", Value(scratch));
Store(bcx, fldptr, expr::get_dataptr(bcx, scratch)); Store(bcx, fldptr, expr::get_dataptr(bcx, scratch));
Store(bcx, val.meta, expr::get_meta(bcx, scratch)); Store(bcx, val.meta, expr::get_meta(bcx, scratch));
fldptr = scratch; fldptr = scratch;

View file

@ -0,0 +1,538 @@
// Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use llvm::{self, ValueRef};
use trans::base;
use trans::builder::Builder;
use trans::common::{type_is_fat_ptr, BlockAndBuilder};
use trans::context::CrateContext;
use trans::cabi_x86;
use trans::cabi_x86_64;
use trans::cabi_x86_win64;
use trans::cabi_arm;
use trans::cabi_aarch64;
use trans::cabi_powerpc;
use trans::cabi_powerpc64;
use trans::cabi_mips;
use trans::cabi_asmjs;
use trans::machine::{llalign_of_min, llsize_of, llsize_of_real};
use trans::type_::Type;
use trans::type_of;
use rustc_front::hir;
use middle::ty::{self, Ty};
use libc::c_uint;
pub use syntax::abi::Abi;
/// The first half of a fat pointer.
/// - For a closure, this is the code address.
/// - For an object or trait instance, this is the address of the box.
/// - For a slice, this is the base address.
pub const FAT_PTR_ADDR: usize = 0;
/// The second half of a fat pointer.
/// - For a closure, this is the address of the environment.
/// - For an object or trait instance, this is the address of the vtable.
/// - For a slice, this is the length.
pub const FAT_PTR_EXTRA: usize = 1;
#[derive(Clone, Copy, PartialEq, Debug)]
enum ArgKind {
/// Pass the argument directly using the normal converted
/// LLVM type or by coercing to another specified type
Direct,
/// Pass the argument indirectly via a hidden pointer
Indirect,
/// Ignore the argument (useful for empty struct)
Ignore,
}
/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
#[derive(Clone, Copy, Debug)]
pub struct ArgType {
kind: ArgKind,
/// Original LLVM type
pub original_ty: Type,
/// Sizing LLVM type (pointers are opaque).
/// Unlike original_ty, this is guaranteed to be complete.
///
/// For example, while we're computing the function pointer type in
/// `struct Foo(fn(Foo));`, `original_ty` is still LLVM's `%Foo = {}`.
/// The field type will likely end up being `void(%Foo)*`, but we cannot
/// use `%Foo` to compute properties (e.g. size and alignment) of `Foo`,
/// until `%Foo` is completed by having all of its field types inserted,
/// so `ty` holds the "sizing type" of `Foo`, which replaces all pointers
/// with opaque ones, resulting in `{i8*}` for `Foo`.
/// ABI-specific logic can then look at the size, alignment and fields of
/// `{i8*}` in order to determine how the argument will be passed.
/// Only later will `original_ty` aka `%Foo` be used in the LLVM function
/// pointer type, without ever having introspected it.
pub ty: Type,
/// Coerced LLVM Type
pub cast: Option<Type>,
/// Dummy argument, which is emitted before the real argument
pub pad: Option<Type>,
/// LLVM attributes of argument
pub attrs: llvm::Attributes
}
impl ArgType {
fn new(original_ty: Type, ty: Type) -> ArgType {
ArgType {
kind: ArgKind::Direct,
original_ty: original_ty,
ty: ty,
cast: None,
pad: None,
attrs: llvm::Attributes::default()
}
}
pub fn make_indirect(&mut self, ccx: &CrateContext) {
assert_eq!(self.kind, ArgKind::Direct);
// Wipe old attributes, likely not valid through indirection.
self.attrs = llvm::Attributes::default();
let llarg_sz = llsize_of_real(ccx, self.ty);
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
self.attrs.set(llvm::Attribute::NoAlias)
.set(llvm::Attribute::NoCapture)
.set_dereferenceable(llarg_sz);
self.kind = ArgKind::Indirect;
}
pub fn ignore(&mut self) {
assert_eq!(self.kind, ArgKind::Direct);
self.kind = ArgKind::Ignore;
}
pub fn is_indirect(&self) -> bool {
self.kind == ArgKind::Indirect
}
pub fn is_ignore(&self) -> bool {
self.kind == ArgKind::Ignore
}
/// Get the LLVM type for an lvalue of the original Rust type of
/// this argument/return, i.e. the result of `type_of::type_of`.
pub fn memory_ty(&self, ccx: &CrateContext) -> Type {
if self.original_ty == Type::i1(ccx) {
Type::i8(ccx)
} else {
self.original_ty
}
}
/// Store a direct/indirect value described by this ArgType into a
/// lvalue for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
pub fn store(&self, b: &Builder, mut val: ValueRef, dst: ValueRef) {
if self.is_ignore() {
return;
}
if self.is_indirect() {
let llsz = llsize_of(b.ccx, self.ty);
let llalign = llalign_of_min(b.ccx, self.ty);
base::call_memcpy(b, dst, val, llsz, llalign as u32);
} else if let Some(ty) = self.cast {
let cast_dst = b.pointercast(dst, ty.ptr_to());
let store = b.store(val, cast_dst);
let llalign = llalign_of_min(b.ccx, self.ty);
unsafe {
llvm::LLVMSetAlignment(store, llalign);
}
} else {
if self.original_ty == Type::i1(b.ccx) {
val = b.zext(val, Type::i8(b.ccx));
}
b.store(val, dst);
}
}
pub fn store_fn_arg(&self, bcx: &BlockAndBuilder, idx: &mut usize, dst: ValueRef) {
if self.pad.is_some() {
*idx += 1;
}
if self.is_ignore() {
return;
}
let val = llvm::get_param(bcx.fcx().llfn, *idx as c_uint);
*idx += 1;
self.store(bcx, val, dst);
}
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument.
pub args: Vec<ArgType>,
/// LLVM return type.
pub ret: ArgType,
pub variadic: bool,
pub cconv: llvm::CallConv
}
impl FnType {
pub fn new<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
abi: Abi,
sig: &ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {
let mut fn_ty = FnType::unadjusted(ccx, abi, sig, extra_args);
fn_ty.adjust_for_abi(ccx, abi, sig);
fn_ty
}
pub fn unadjusted<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
abi: Abi,
sig: &ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>]) -> FnType {
use self::Abi::*;
let cconv = match ccx.sess().target.target.adjust_abi(abi) {
RustIntrinsic | PlatformIntrinsic |
Rust | RustCall => llvm::CCallConv,
// It's the ABI's job to select this, not us.
System => ccx.sess().bug("system abi should be selected elsewhere"),
Stdcall => llvm::X86StdcallCallConv,
Fastcall => llvm::X86FastcallCallConv,
Vectorcall => llvm::X86_VectorCall,
C => llvm::CCallConv,
Win64 => llvm::X86_64_Win64,
// These API constants ought to be more specific...
Cdecl => llvm::CCallConv,
Aapcs => llvm::CCallConv,
};
let mut inputs = &sig.inputs[..];
let extra_args = if abi == RustCall {
assert!(!sig.variadic && extra_args.is_empty());
match inputs[inputs.len() - 1].sty {
ty::TyTuple(ref tupled_arguments) => {
inputs = &inputs[..inputs.len() - 1];
&tupled_arguments[..]
}
_ => {
unreachable!("argument to function with \"rust-call\" ABI \
is not a tuple");
}
}
} else {
assert!(sig.variadic || extra_args.is_empty());
extra_args
};
let target = &ccx.sess().target.target;
let win_x64_gnu = target.target_os == "windows"
&& target.arch == "x86_64"
&& target.target_env == "gnu";
let rust_abi = match abi {
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => true,
_ => false
};
let arg_of = |ty: Ty<'tcx>, is_return: bool| {
if ty.is_bool() {
let llty = Type::i1(ccx);
let mut arg = ArgType::new(llty, llty);
arg.attrs.set(llvm::Attribute::ZExt);
arg
} else {
let mut arg = ArgType::new(type_of::type_of(ccx, ty),
type_of::sizing_type_of(ccx, ty));
if llsize_of_real(ccx, arg.ty) == 0 {
// For some forsaken reason, x86_64-pc-windows-gnu
// doesn't ignore zero-sized struct arguments.
if is_return || rust_abi || !win_x64_gnu {
arg.ignore();
}
}
arg
}
};
let ret_ty = match sig.output {
ty::FnConverging(ret_ty) => ret_ty,
ty::FnDiverging => ccx.tcx().mk_nil()
};
let mut ret = arg_of(ret_ty, true);
if !type_is_fat_ptr(ccx.tcx(), ret_ty) {
// The `noalias` attribute on the return value is useful to a
// function ptr caller.
if let ty::TyBox(_) = ret_ty.sty {
// `Box` pointer return values never alias because ownership
// is transferred
ret.attrs.set(llvm::Attribute::NoAlias);
}
// We can also mark the return value as `dereferenceable` in certain cases
match ret_ty.sty {
// These are not really pointers but pairs, (pointer, len)
ty::TyRef(_, ty::TypeAndMut { ty, .. }) |
ty::TyBox(ty) => {
let llty = type_of::sizing_type_of(ccx, ty);
let llsz = llsize_of_real(ccx, llty);
ret.attrs.set_dereferenceable(llsz);
}
_ => {}
}
}
let mut args = Vec::with_capacity(inputs.len() + extra_args.len());
// Handle safe Rust thin and fat pointers.
let rust_ptr_attrs = |ty: Ty<'tcx>, arg: &mut ArgType| match ty.sty {
// `Box` pointer parameters never alias because ownership is transferred
ty::TyBox(inner) => {
arg.attrs.set(llvm::Attribute::NoAlias);
Some(inner)
}
ty::TyRef(b, mt) => {
use middle::ty::{BrAnon, ReLateBound};
// `&mut` pointer parameters never alias other parameters, or mutable global data
//
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
if mt.mutbl != hir::MutMutable && !interior_unsafe {
arg.attrs.set(llvm::Attribute::NoAlias);
}
if mt.mutbl == hir::MutImmutable && !interior_unsafe {
arg.attrs.set(llvm::Attribute::ReadOnly);
}
// When a reference in an argument has no named lifetime, it's
// impossible for that reference to escape this function
// (returned or stored beyond the call by a closure).
if let ReLateBound(_, BrAnon(_)) = *b {
arg.attrs.set(llvm::Attribute::NoCapture);
}
Some(mt.ty)
}
_ => None
};
for ty in inputs.iter().chain(extra_args.iter()) {
let mut arg = arg_of(ty, false);
if type_is_fat_ptr(ccx.tcx(), ty) {
let original_tys = arg.original_ty.field_types();
let sizing_tys = arg.ty.field_types();
assert_eq!((original_tys.len(), sizing_tys.len()), (2, 2));
let mut data = ArgType::new(original_tys[0], sizing_tys[0]);
let mut info = ArgType::new(original_tys[1], sizing_tys[1]);
if let Some(inner) = rust_ptr_attrs(ty, &mut data) {
data.attrs.set(llvm::Attribute::NonNull);
if ccx.tcx().struct_tail(inner).is_trait() {
info.attrs.set(llvm::Attribute::NonNull);
}
}
args.push(data);
args.push(info);
} else {
if let Some(inner) = rust_ptr_attrs(ty, &mut arg) {
let llty = type_of::sizing_type_of(ccx, inner);
let llsz = llsize_of_real(ccx, llty);
arg.attrs.set_dereferenceable(llsz);
}
args.push(arg);
}
}
FnType {
args: args,
ret: ret,
variadic: sig.variadic,
cconv: cconv
}
}
pub fn adjust_for_abi<'a, 'tcx>(&mut self,
ccx: &CrateContext<'a, 'tcx>,
abi: Abi,
sig: &ty::FnSig<'tcx>) {
if abi == Abi::Rust || abi == Abi::RustCall ||
abi == Abi::RustIntrinsic || abi == Abi::PlatformIntrinsic {
let fixup = |arg: &mut ArgType| {
let mut llty = arg.ty;
// Replace newtypes with their inner-most type.
while llty.kind() == llvm::TypeKind::Struct {
let inner = llty.field_types();
if inner.len() != 1 {
break;
}
llty = inner[0];
}
if !llty.is_aggregate() {
// Scalars and vectors, always immediate.
if llty != arg.ty {
// Needs a cast as we've unpacked a newtype.
arg.cast = Some(llty);
}
return;
}
let size = llsize_of_real(ccx, llty);
if size > llsize_of_real(ccx, ccx.int_type()) {
arg.make_indirect(ccx);
} else if size > 0 {
// We want to pass small aggregates as immediates, but using
// a LLVM aggregate type for this leads to bad optimizations,
// so we pick an appropriately sized integer type instead.
arg.cast = Some(Type::ix(ccx, size * 8));
}
};
// Fat pointers are returned by-value.
if !self.ret.is_ignore() {
if !type_is_fat_ptr(ccx.tcx(), sig.output.unwrap()) {
fixup(&mut self.ret);
}
}
for arg in &mut self.args {
if arg.is_ignore() { continue; }
fixup(arg);
}
if self.ret.is_indirect() {
self.ret.attrs.set(llvm::Attribute::StructRet);
}
return;
}
match &ccx.sess().target.target.arch[..] {
"x86" => cabi_x86::compute_abi_info(ccx, self),
"x86_64" => if ccx.sess().target.target.options.is_like_windows {
cabi_x86_win64::compute_abi_info(ccx, self);
} else {
cabi_x86_64::compute_abi_info(ccx, self);
},
"aarch64" => cabi_aarch64::compute_abi_info(ccx, self),
"arm" => {
let flavor = if ccx.sess().target.target.target_os == "ios" {
cabi_arm::Flavor::Ios
} else {
cabi_arm::Flavor::General
};
cabi_arm::compute_abi_info(ccx, self, flavor);
},
"mips" => cabi_mips::compute_abi_info(ccx, self),
"powerpc" => cabi_powerpc::compute_abi_info(ccx, self),
"powerpc64" => cabi_powerpc64::compute_abi_info(ccx, self),
"asmjs" => cabi_asmjs::compute_abi_info(ccx, self),
a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a))
}
if self.ret.is_indirect() {
self.ret.attrs.set(llvm::Attribute::StructRet);
}
}
pub fn llvm_type(&self, ccx: &CrateContext) -> Type {
let mut llargument_tys = Vec::new();
let llreturn_ty = if self.ret.is_ignore() {
Type::void(ccx)
} else if self.ret.is_indirect() {
llargument_tys.push(self.ret.original_ty.ptr_to());
Type::void(ccx)
} else {
self.ret.cast.unwrap_or(self.ret.original_ty)
};
for arg in &self.args {
if arg.is_ignore() {
continue;
}
// add padding
if let Some(ty) = arg.pad {
llargument_tys.push(ty);
}
let llarg_ty = if arg.is_indirect() {
arg.original_ty.ptr_to()
} else {
arg.cast.unwrap_or(arg.original_ty)
};
llargument_tys.push(llarg_ty);
}
if self.variadic {
Type::variadic_func(&llargument_tys, &llreturn_ty)
} else {
Type::func(&llargument_tys, &llreturn_ty)
}
}
pub fn apply_attrs_llfn(&self, llfn: ValueRef) {
let mut i = if self.ret.is_indirect() { 1 } else { 0 };
if !self.ret.is_ignore() {
self.ret.attrs.apply_llfn(i, llfn);
}
i += 1;
for arg in &self.args {
if !arg.is_ignore() {
if arg.pad.is_some() { i += 1; }
arg.attrs.apply_llfn(i, llfn);
i += 1;
}
}
}
pub fn apply_attrs_callsite(&self, callsite: ValueRef) {
let mut i = if self.ret.is_indirect() { 1 } else { 0 };
if !self.ret.is_ignore() {
self.ret.attrs.apply_callsite(i, callsite);
}
i += 1;
for arg in &self.args {
if !arg.is_ignore() {
if arg.pad.is_some() { i += 1; }
arg.attrs.apply_callsite(i, callsite);
i += 1;
}
}
if self.cconv != llvm::CCallConv {
llvm::SetInstructionCallConv(callsite, self.cconv);
}
}
}

View file

@ -48,13 +48,13 @@ use std;
use std::rc::Rc; use std::rc::Rc;
use llvm::{ValueRef, True, IntEQ, IntNE}; use llvm::{ValueRef, True, IntEQ, IntNE};
use back::abi::FAT_PTR_ADDR;
use middle::subst; use middle::subst;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
use syntax::ast; use syntax::ast;
use syntax::attr; use syntax::attr;
use syntax::attr::IntType; use syntax::attr::IntType;
use trans::_match; use trans::_match;
use trans::abi::FAT_PTR_ADDR;
use trans::base::InitAlloca; use trans::base::InitAlloca;
use trans::build::*; use trans::build::*;
use trans::cleanup; use trans::cleanup;
@ -67,6 +67,7 @@ use trans::machine;
use trans::monomorphize; use trans::monomorphize;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of; use trans::type_of;
use trans::value::Value;
type Hint = attr::ReprAttr; type Hint = attr::ReprAttr;
@ -88,11 +89,6 @@ impl TypeContext {
fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext { fn may_need_drop_flag(t: Type, needs_drop_flag: bool) -> TypeContext {
TypeContext { prefix: t, needs_drop_flag: needs_drop_flag } TypeContext { prefix: t, needs_drop_flag: needs_drop_flag }
} }
pub fn to_string(self) -> String {
let TypeContext { prefix, needs_drop_flag } = self;
format!("TypeContext {{ prefix: {}, needs_drop_flag: {} }}",
prefix.to_string(), needs_drop_flag)
}
} }
/// Representations. /// Representations.
@ -1069,6 +1065,15 @@ pub fn num_args(r: &Repr, discr: Disr) -> usize {
/// Access a field, at a point when the value's case is known. /// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>, pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef { val: MaybeSizedValue, discr: Disr, ix: usize) -> ValueRef {
trans_field_ptr_builder(&bcx.build(), r, val, discr, ix)
}
/// Access a field, at a point when the value's case is known.
pub fn trans_field_ptr_builder<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
r: &Repr<'tcx>,
val: MaybeSizedValue,
discr: Disr, ix: usize)
-> ValueRef {
// Note: if this ever needs to generate conditionals (e.g., if we // Note: if this ever needs to generate conditionals (e.g., if we
// decide to do some kind of cdr-coding-like non-unique repr // decide to do some kind of cdr-coding-like non-unique repr
// someday), it will need to return a possibly-new bcx as well. // someday), it will need to return a possibly-new bcx as well.
@ -1091,13 +1096,15 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0); assert_eq!(machine::llsize_of_alloc(bcx.ccx(), ty), 0);
// The contents of memory at this pointer can't matter, but use // The contents of memory at this pointer can't matter, but use
// the value that's "reasonable" in case of pointer comparison. // the value that's "reasonable" in case of pointer comparison.
PointerCast(bcx, val.value, ty.ptr_to()) if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
bcx.pointercast(val.value, ty.ptr_to())
} }
RawNullablePointer { nndiscr, nnty, .. } => { RawNullablePointer { nndiscr, nnty, .. } => {
assert_eq!(ix, 0); assert_eq!(ix, 0);
assert_eq!(discr, nndiscr); assert_eq!(discr, nndiscr);
let ty = type_of::type_of(bcx.ccx(), nnty); let ty = type_of::type_of(bcx.ccx(), nnty);
PointerCast(bcx, val.value, ty.ptr_to()) if bcx.is_unreachable() { return C_undef(ty.ptr_to()); }
bcx.pointercast(val.value, ty.ptr_to())
} }
StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => { StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
assert_eq!(discr, nndiscr); assert_eq!(discr, nndiscr);
@ -1106,43 +1113,48 @@ pub fn trans_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, r: &Repr<'tcx>,
} }
} }
pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, val: MaybeSizedValue, fn struct_field_ptr<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
ix: usize, needs_cast: bool) -> ValueRef { st: &Struct<'tcx>, val: MaybeSizedValue,
ix: usize, needs_cast: bool) -> ValueRef {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let fty = st.fields[ix];
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
if bcx.is_unreachable() {
return C_undef(ll_fty.ptr_to());
}
let ptr_val = if needs_cast { let ptr_val = if needs_cast {
let fields = st.fields.iter().map(|&ty| { let fields = st.fields.iter().map(|&ty| {
type_of::in_memory_type_of(ccx, ty) type_of::in_memory_type_of(ccx, ty)
}).collect::<Vec<_>>(); }).collect::<Vec<_>>();
let real_ty = Type::struct_(ccx, &fields[..], st.packed); let real_ty = Type::struct_(ccx, &fields[..], st.packed);
PointerCast(bcx, val.value, real_ty.ptr_to()) bcx.pointercast(val.value, real_ty.ptr_to())
} else { } else {
val.value val.value
}; };
let fty = st.fields[ix];
// Simple case - we can just GEP the field // Simple case - we can just GEP the field
// * First field - Always aligned properly // * First field - Always aligned properly
// * Packed struct - There is no alignment padding // * Packed struct - There is no alignment padding
// * Field is sized - pointer is properly aligned already // * Field is sized - pointer is properly aligned already
if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) { if ix == 0 || st.packed || type_is_sized(bcx.tcx(), fty) {
return StructGEP(bcx, ptr_val, ix); return bcx.struct_gep(ptr_val, ix);
} }
// If the type of the last field is [T] or str, then we don't need to do // If the type of the last field is [T] or str, then we don't need to do
// any adjusments // any adjusments
match fty.sty { match fty.sty {
ty::TySlice(..) | ty::TyStr => { ty::TySlice(..) | ty::TyStr => {
return StructGEP(bcx, ptr_val, ix); return bcx.struct_gep(ptr_val, ix);
} }
_ => () _ => ()
} }
// There's no metadata available, log the case and just do the GEP. // There's no metadata available, log the case and just do the GEP.
if !val.has_meta() { if !val.has_meta() {
debug!("Unsized field `{}`, of `{}` has no metadata for adjustment", debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
ix, ix, Value(ptr_val));
bcx.val_to_string(ptr_val)); return bcx.struct_gep(ptr_val, ix);
return StructGEP(bcx, ptr_val, ix);
} }
let dbloc = DebugLoc::None; let dbloc = DebugLoc::None;
@ -1183,23 +1195,21 @@ pub fn struct_field_ptr<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, st: &Struct<'tcx>, v
// (unaligned offset + (align - 1)) & -align // (unaligned offset + (align - 1)) & -align
// Calculate offset // Calculate offset
let align_sub_1 = Sub(bcx, align, C_uint(bcx.ccx(), 1u64), dbloc); dbloc.apply(bcx.fcx());
let offset = And(bcx, let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx(), 1u64));
Add(bcx, unaligned_offset, align_sub_1, dbloc), let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
Neg(bcx, align, dbloc), bcx.neg(align));
dbloc);
debug!("struct_field_ptr: DST field offset: {}", debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
bcx.val_to_string(offset));
// Cast and adjust pointer // Cast and adjust pointer
let byte_ptr = PointerCast(bcx, ptr_val, Type::i8p(bcx.ccx())); let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx()));
let byte_ptr = GEP(bcx, byte_ptr, &[offset]); let byte_ptr = bcx.gep(byte_ptr, &[offset]);
// Finally, cast back to the type expected // Finally, cast back to the type expected
let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty); let ll_fty = type_of::in_memory_type_of(bcx.ccx(), fty);
debug!("struct_field_ptr: Field type is {}", ll_fty.to_string()); debug!("struct_field_ptr: Field type is {:?}", ll_fty);
PointerCast(bcx, byte_ptr, ll_fty.ptr_to()) bcx.pointercast(byte_ptr, ll_fty.ptr_to())
} }
pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>, pub fn fold_variants<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
@ -1283,14 +1293,15 @@ pub fn trans_drop_flag_ptr<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum( let scratch = unpack_datum!(bcx, datum::lvalue_scratch_datum(
bcx, tcx.dtor_type(), "drop_flag", bcx, tcx.dtor_type(), "drop_flag",
InitAlloca::Uninit("drop flag itself has no dtor"), InitAlloca::Uninit("drop flag itself has no dtor"),
cleanup::CustomScope(custom_cleanup_scope), (), |_, bcx, _| { cleanup::CustomScope(custom_cleanup_scope), |bcx, _| {
debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}", debug!("no-op populate call for trans_drop_flag_ptr on dtor_type={:?}",
tcx.dtor_type()); tcx.dtor_type());
bcx bcx
} }
)); ));
bcx = fold_variants(bcx, r, val, |variant_cx, st, value| { bcx = fold_variants(bcx, r, val, |variant_cx, st, value| {
let ptr = struct_field_ptr(variant_cx, st, MaybeSizedValue::sized(value), let ptr = struct_field_ptr(&variant_cx.build(), st,
MaybeSizedValue::sized(value),
(st.fields.len() - 1), false); (st.fields.len() - 1), false);
datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr")) datum::Datum::new(ptr, ptr_ty, datum::Lvalue::new("adt::trans_drop_flag_ptr"))
.store_to(variant_cx, scratch.val) .store_to(variant_cx, scratch.val)
@ -1442,7 +1453,7 @@ fn padding(ccx: &CrateContext, size: u64) -> ValueRef {
fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a } fn roundup(x: u64, a: u32) -> u64 { let a = a as u64; ((x + (a - 1)) / a) * a }
/// Get the discriminant of a constant value. /// Get the discriminant of a constant value.
pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr { pub fn const_get_discrim(r: &Repr, val: ValueRef) -> Disr {
match *r { match *r {
CEnum(ity, _, _) => { CEnum(ity, _, _) => {
match ity { match ity {
@ -1452,13 +1463,13 @@ pub fn const_get_discrim(ccx: &CrateContext, r: &Repr, val: ValueRef) -> Disr {
} }
General(ity, _, _) => { General(ity, _, _) => {
match ity { match ity {
attr::SignedInt(..) => Disr(const_to_int(const_get_elt(ccx, val, &[0])) as u64), attr::SignedInt(..) => Disr(const_to_int(const_get_elt(val, &[0])) as u64),
attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(ccx, val, &[0]))) attr::UnsignedInt(..) => Disr(const_to_uint(const_get_elt(val, &[0])))
} }
} }
Univariant(..) => Disr(0), Univariant(..) => Disr(0),
RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => { RawNullablePointer { .. } | StructWrappedNullablePointer { .. } => {
ccx.sess().bug("const discrim access of non c-like enum") unreachable!("const discrim access of non c-like enum")
} }
} }
} }
@ -1472,25 +1483,25 @@ pub fn const_get_field(ccx: &CrateContext, r: &Repr, val: ValueRef,
_discr: Disr, ix: usize) -> ValueRef { _discr: Disr, ix: usize) -> ValueRef {
match *r { match *r {
CEnum(..) => ccx.sess().bug("element access in C-like enum const"), CEnum(..) => ccx.sess().bug("element access in C-like enum const"),
Univariant(..) => const_struct_field(ccx, val, ix), Univariant(..) => const_struct_field(val, ix),
General(..) => const_struct_field(ccx, val, ix + 1), General(..) => const_struct_field(val, ix + 1),
RawNullablePointer { .. } => { RawNullablePointer { .. } => {
assert_eq!(ix, 0); assert_eq!(ix, 0);
val val
}, },
StructWrappedNullablePointer{ .. } => const_struct_field(ccx, val, ix) StructWrappedNullablePointer{ .. } => const_struct_field(val, ix)
} }
} }
/// Extract field of struct-like const, skipping our alignment padding. /// Extract field of struct-like const, skipping our alignment padding.
fn const_struct_field(ccx: &CrateContext, val: ValueRef, ix: usize) -> ValueRef { fn const_struct_field(val: ValueRef, ix: usize) -> ValueRef {
// Get the ix-th non-undef element of the struct. // Get the ix-th non-undef element of the struct.
let mut real_ix = 0; // actual position in the struct let mut real_ix = 0; // actual position in the struct
let mut ix = ix; // logical index relative to real_ix let mut ix = ix; // logical index relative to real_ix
let mut field; let mut field;
loop { loop {
loop { loop {
field = const_get_elt(ccx, val, &[real_ix]); field = const_get_elt(val, &[real_ix]);
if !is_undef(field) { if !is_undef(field) {
break; break;
} }

View file

@ -10,13 +10,11 @@
//! # Translation of inline assembly. //! # Translation of inline assembly.
use llvm; use llvm::{self, ValueRef};
use trans::base;
use trans::build::*; use trans::build::*;
use trans::callee;
use trans::common::*; use trans::common::*;
use trans::cleanup; use trans::datum::{Datum, Lvalue};
use trans::cleanup::CleanupMethods;
use trans::expr;
use trans::type_of; use trans::type_of;
use trans::type_::Type; use trans::type_::Type;
@ -26,64 +24,35 @@ use syntax::ast::AsmDialect;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
// Take an inline assembly expression and splat it out via LLVM // Take an inline assembly expression and splat it out via LLVM
pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm) pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-> Block<'blk, 'tcx> { ia: &ast::InlineAsm,
let fcx = bcx.fcx; outputs: Vec<Datum<'tcx, Lvalue>>,
let mut bcx = bcx; mut inputs: Vec<ValueRef>) {
let mut constraints = Vec::new(); let mut ext_constraints = vec![];
let mut output_types = Vec::new(); let mut output_types = vec![];
let temp_scope = fcx.push_custom_cleanup_scope();
let mut ext_inputs = Vec::new();
let mut ext_constraints = Vec::new();
// Prepare the output operands // Prepare the output operands
let mut outputs = Vec::new(); let mut indirect_outputs = vec![];
let mut inputs = Vec::new(); for (i, (out, out_datum)) in ia.outputs.iter().zip(&outputs).enumerate() {
for (i, out) in ia.outputs.iter().enumerate() { let val = if out.is_rw || out.is_indirect {
constraints.push(out.constraint.clone()); Some(base::load_ty(bcx, out_datum.val, out_datum.ty))
} else {
let out_datum = unpack_datum!(bcx, expr::trans(bcx, &out.expr)); None
};
if out.is_rw {
inputs.push(val.unwrap());
ext_constraints.push(i.to_string());
}
if out.is_indirect { if out.is_indirect {
bcx = callee::trans_arg_datum(bcx, indirect_outputs.push(val.unwrap());
expr_ty(bcx, &out.expr),
out_datum,
cleanup::CustomScope(temp_scope),
&mut inputs);
if out.is_rw {
ext_inputs.push(*inputs.last().unwrap());
ext_constraints.push(i.to_string());
}
} else { } else {
output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty)); output_types.push(type_of::type_of(bcx.ccx(), out_datum.ty));
outputs.push(out_datum.val);
if out.is_rw {
bcx = callee::trans_arg_datum(bcx,
expr_ty(bcx, &out.expr),
out_datum,
cleanup::CustomScope(temp_scope),
&mut ext_inputs);
ext_constraints.push(i.to_string());
}
} }
} }
if !indirect_outputs.is_empty() {
// Now the input operands indirect_outputs.extend_from_slice(&inputs);
for &(ref c, ref input) in &ia.inputs { inputs = indirect_outputs;
constraints.push((*c).clone());
let in_datum = unpack_datum!(bcx, expr::trans(bcx, &input));
bcx = callee::trans_arg_datum(bcx,
expr_ty(bcx, &input),
in_datum,
cleanup::CustomScope(temp_scope),
&mut inputs);
} }
inputs.extend_from_slice(&ext_inputs[..]);
// no failure occurred preparing operands, no need to cleanup
fcx.pop_custom_cleanup_scope(temp_scope);
let clobbers = ia.clobbers.iter() let clobbers = ia.clobbers.iter()
.map(|s| format!("~{{{}}}", &s)); .map(|s| format!("~{{{}}}", &s));
@ -95,19 +64,18 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
_ => Vec::new() _ => Vec::new()
}; };
let all_constraints= constraints.iter() let all_constraints =
.map(|s| s.to_string()) ia.outputs.iter().map(|out| out.constraint.to_string())
.chain(ext_constraints) .chain(ia.inputs.iter().map(|s| s.to_string()))
.chain(clobbers) .chain(ext_constraints)
.chain(arch_clobbers.iter() .chain(clobbers)
.map(|s| s.to_string())) .chain(arch_clobbers.iter().map(|s| s.to_string()))
.collect::<Vec<String>>() .collect::<Vec<String>>().join(",");
.join(",");
debug!("Asm Constraints: {}", &all_constraints[..]); debug!("Asm Constraints: {}", &all_constraints[..]);
// Depending on how many outputs we have, the return type is different // Depending on how many outputs we have, the return type is different
let num_outputs = outputs.len(); let num_outputs = output_types.len();
let output_type = match num_outputs { let output_type = match num_outputs {
0 => Type::void(bcx.ccx()), 0 => Type::void(bcx.ccx()),
1 => output_types[0], 1 => output_types[0],
@ -131,13 +99,10 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
dialect); dialect);
// Again, based on how many outputs we have // Again, based on how many outputs we have
if num_outputs == 1 { let outputs = ia.outputs.iter().zip(&outputs).filter(|&(ref o, _)| !o.is_indirect);
Store(bcx, r, outputs[0]); for (i, (_, datum)) in outputs.enumerate() {
} else { let v = if num_outputs == 1 { r } else { ExtractValue(bcx, r, i) };
for (i, o) in outputs.iter().enumerate() { Store(bcx, v, datum.val);
let v = ExtractValue(bcx, r, i);
Store(bcx, v, *o);
}
} }
// Store expn_id in a metadata node so we can map LLVM errors // Store expn_id in a metadata node so we can map LLVM errors
@ -152,7 +117,4 @@ pub fn trans_inline_asm<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, ia: &ast::InlineAsm)
llvm::LLVMSetMetadata(r, kind, llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1)); llvm::LLVMMDNodeInContext(bcx.ccx().llcx(), &val, 1));
} }
return bcx;
} }

View file

@ -10,20 +10,11 @@
//! Set and unset common attributes on LLVM values. //! Set and unset common attributes on LLVM values.
use libc::{c_uint, c_ulonglong}; use libc::{c_uint, c_ulonglong};
use llvm::{self, ValueRef, AttrHelper}; use llvm::{self, ValueRef};
use middle::ty;
use middle::infer;
use middle::traits::ProjectionMode;
use session::config::NoDebugInfo; use session::config::NoDebugInfo;
use syntax::abi::Abi;
pub use syntax::attr::InlineAttr; pub use syntax::attr::InlineAttr;
use syntax::ast; use syntax::ast;
use rustc_front::hir;
use trans::base;
use trans::common;
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::machine;
use trans::type_of;
/// Mark LLVM function to use provided inline heuristic. /// Mark LLVM function to use provided inline heuristic.
#[inline] #[inline]
@ -112,199 +103,13 @@ pub fn from_fn_attrs(ccx: &CrateContext, attrs: &[ast::Attribute], llfn: ValueRe
for attr in attrs { for attr in attrs {
if attr.check_name("cold") { if attr.check_name("cold") {
unsafe { llvm::Attributes::default().set(llvm::Attribute::Cold)
llvm::LLVMAddFunctionAttribute(llfn, .apply_llfn(llvm::FunctionIndex as usize, llfn)
llvm::FunctionIndex as c_uint,
llvm::ColdAttribute as u64)
}
} else if attr.check_name("allocator") { } else if attr.check_name("allocator") {
llvm::Attribute::NoAlias.apply_llfn(llvm::ReturnIndex as c_uint, llfn); llvm::Attributes::default().set(llvm::Attribute::NoAlias)
.apply_llfn(llvm::ReturnIndex as usize, llfn)
} else if attr.check_name("unwind") { } else if attr.check_name("unwind") {
unwind(llfn, true); unwind(llfn, true);
} }
} }
} }
/// Composite function which converts function type into LLVM attributes for the function.
pub fn from_fn_type<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn_type: ty::Ty<'tcx>)
-> llvm::AttrBuilder {
use middle::ty::{BrAnon, ReLateBound};
let function_type;
let (fn_sig, abi, env_ty) = match fn_type.sty {
ty::TyFnDef(_, _, ref f) | ty::TyFnPtr(ref f) => (&f.sig, f.abi, None),
ty::TyClosure(closure_did, ref substs) => {
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(),
&ccx.tcx().tables,
ProjectionMode::Any);
function_type = infcx.closure_type(closure_did, substs);
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
(&function_type.sig, Abi::RustCall, Some(self_type))
}
_ => ccx.sess().bug("expected closure or function.")
};
let fn_sig = ccx.tcx().erase_late_bound_regions(fn_sig);
let fn_sig = infer::normalize_associated_type(ccx.tcx(), &fn_sig);
let mut attrs = llvm::AttrBuilder::new();
let ret_ty = fn_sig.output;
// These have an odd calling convention, so we need to manually
// unpack the input ty's
let input_tys = match fn_type.sty {
ty::TyClosure(..) => {
assert!(abi == Abi::RustCall);
match fn_sig.inputs[0].sty {
ty::TyTuple(ref inputs) => {
let mut full_inputs = vec![env_ty.expect("Missing closure environment")];
full_inputs.extend_from_slice(inputs);
full_inputs
}
_ => ccx.sess().bug("expected tuple'd inputs")
}
},
ty::TyFnDef(..) | ty::TyFnPtr(_) if abi == Abi::RustCall => {
let mut inputs = vec![fn_sig.inputs[0]];
match fn_sig.inputs[1].sty {
ty::TyTuple(ref t_in) => {
inputs.extend_from_slice(&t_in[..]);
inputs
}
_ => ccx.sess().bug("expected tuple'd inputs")
}
}
_ => fn_sig.inputs.clone()
};
// Index 0 is the return value of the llvm func, so we start at 1
let mut idx = 1;
if let ty::FnConverging(ret_ty) = ret_ty {
// A function pointer is called without the declaration
// available, so we have to apply any attributes with ABI
// implications directly to the call instruction. Right now,
// the only attribute we need to worry about is `sret`.
if type_of::return_uses_outptr(ccx, ret_ty) {
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, ret_ty));
// The outptr can be noalias and nocapture because it's entirely
// invisible to the program. We also know it's nonnull as well
// as how many bytes we can dereference
attrs.arg(1, llvm::Attribute::StructRet)
.arg(1, llvm::Attribute::NoAlias)
.arg(1, llvm::Attribute::NoCapture)
.arg(1, llvm::DereferenceableAttribute(llret_sz));
// Add one more since there's an outptr
idx += 1;
} else {
// The `noalias` attribute on the return value is useful to a
// function ptr caller.
match ret_ty.sty {
// `Box` pointer return values never alias because ownership
// is transferred
ty::TyBox(it) if common::type_is_sized(ccx.tcx(), it) => {
attrs.ret(llvm::Attribute::NoAlias);
}
_ => {}
}
// We can also mark the return value as `dereferenceable` in certain cases
match ret_ty.sty {
// These are not really pointers but pairs, (pointer, len)
ty::TyRef(_, ty::TypeAndMut { ty: inner, .. })
| ty::TyBox(inner) if common::type_is_sized(ccx.tcx(), inner) => {
let llret_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
attrs.ret(llvm::DereferenceableAttribute(llret_sz));
}
_ => {}
}
if let ty::TyBool = ret_ty.sty {
attrs.ret(llvm::Attribute::ZExt);
}
}
}
for &t in input_tys.iter() {
match t.sty {
_ if type_of::arg_is_indirect(ccx, t) => {
let llarg_sz = machine::llsize_of_real(ccx, type_of::type_of(ccx, t));
// For non-immediate arguments the callee gets its own copy of
// the value on the stack, so there are no aliases. It's also
// program-invisible so can't possibly capture
attrs.arg(idx, llvm::Attribute::NoAlias)
.arg(idx, llvm::Attribute::NoCapture)
.arg(idx, llvm::DereferenceableAttribute(llarg_sz));
}
ty::TyBool => {
attrs.arg(idx, llvm::Attribute::ZExt);
}
// `Box` pointer parameters never alias because ownership is transferred
ty::TyBox(inner) => {
attrs.arg(idx, llvm::Attribute::NoAlias);
if common::type_is_sized(ccx.tcx(), inner) {
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, inner));
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
if inner.is_trait() {
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
}
ty::TyRef(b, mt) => {
// `&mut` pointer parameters never alias other parameters, or mutable global data
//
// `&T` where `T` contains no `UnsafeCell<U>` is immutable, and can be marked as
// both `readonly` and `noalias`, as LLVM's definition of `noalias` is based solely
// on memory dependencies rather than pointer equality
let interior_unsafe = mt.ty.type_contents(ccx.tcx()).interior_unsafe();
if mt.mutbl != hir::MutMutable && !interior_unsafe {
attrs.arg(idx, llvm::Attribute::NoAlias);
}
if mt.mutbl == hir::MutImmutable && !interior_unsafe {
attrs.arg(idx, llvm::Attribute::ReadOnly);
}
// & pointer parameters are also never null and for sized types we also know
// exactly how many bytes we can dereference
if common::type_is_sized(ccx.tcx(), mt.ty) {
let llsz = machine::llsize_of_real(ccx, type_of::type_of(ccx, mt.ty));
attrs.arg(idx, llvm::DereferenceableAttribute(llsz));
} else {
attrs.arg(idx, llvm::NonNullAttribute);
if mt.ty.is_trait() {
attrs.arg(idx + 1, llvm::NonNullAttribute);
}
}
// When a reference in an argument has no named lifetime, it's
// impossible for that reference to escape this function
// (returned or stored beyond the call by a closure).
if let ReLateBound(_, BrAnon(_)) = *b {
attrs.arg(idx, llvm::Attribute::NoCapture);
}
}
_ => ()
}
if common::type_is_fat_ptr(ccx.tcx(), t) {
idx += 2;
} else {
idx += 1;
}
}
attrs
}

File diff suppressed because it is too large Load diff

View file

@ -12,7 +12,7 @@
#![allow(non_snake_case)] #![allow(non_snake_case)]
use llvm; use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{Opcode, IntPredicate, RealPredicate}; use llvm::{Opcode, IntPredicate, RealPredicate};
use llvm::{ValueRef, BasicBlockRef}; use llvm::{ValueRef, BasicBlockRef};
use trans::common::*; use trans::common::*;
@ -20,6 +20,7 @@ use syntax::codemap::Span;
use trans::builder::Builder; use trans::builder::Builder;
use trans::type_::Type; use trans::type_::Type;
use trans::value::Value;
use trans::debuginfo::DebugLoc; use trans::debuginfo::DebugLoc;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
@ -138,7 +139,6 @@ pub fn Invoke(cx: Block,
args: &[ValueRef], args: &[ValueRef],
then: BasicBlockRef, then: BasicBlockRef,
catch: BasicBlockRef, catch: BasicBlockRef,
attributes: Option<AttrBuilder>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> ValueRef { -> ValueRef {
if cx.unreachable.get() { if cx.unreachable.get() {
@ -146,12 +146,14 @@ pub fn Invoke(cx: Block,
} }
check_not_terminated(cx); check_not_terminated(cx);
terminate(cx, "Invoke"); terminate(cx, "Invoke");
debug!("Invoke({} with arguments ({}))", debug!("Invoke({:?} with arguments ({}))",
cx.val_to_string(fn_), Value(fn_),
args.iter().map(|a| cx.val_to_string(*a)).collect::<Vec<String>>().join(", ")); args.iter().map(|a| {
format!("{:?}", Value(*a))
}).collect::<Vec<String>>().join(", "));
debug_loc.apply(cx.fcx); debug_loc.apply(cx.fcx);
let bundle = cx.lpad().and_then(|b| b.bundle()); let bundle = cx.lpad().and_then(|b| b.bundle());
B(cx).invoke(fn_, args, then, catch, bundle, attributes) B(cx).invoke(fn_, args, then, catch, bundle)
} }
pub fn Unreachable(cx: Block) { pub fn Unreachable(cx: Block) {
@ -908,7 +910,6 @@ pub fn InlineAsmCall(cx: Block, asm: *const c_char, cons: *const c_char,
pub fn Call(cx: Block, pub fn Call(cx: Block,
fn_: ValueRef, fn_: ValueRef,
args: &[ValueRef], args: &[ValueRef],
attributes: Option<AttrBuilder>,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> ValueRef { -> ValueRef {
if cx.unreachable.get() { if cx.unreachable.get() {
@ -916,22 +917,7 @@ pub fn Call(cx: Block,
} }
debug_loc.apply(cx.fcx); debug_loc.apply(cx.fcx);
let bundle = cx.lpad.get().and_then(|b| b.bundle()); let bundle = cx.lpad.get().and_then(|b| b.bundle());
B(cx).call(fn_, args, bundle, attributes) B(cx).call(fn_, args, bundle)
}
pub fn CallWithConv(cx: Block,
fn_: ValueRef,
args: &[ValueRef],
conv: CallConv,
attributes: Option<AttrBuilder>,
debug_loc: DebugLoc)
-> ValueRef {
if cx.unreachable.get() {
return _UndefReturn(cx, fn_);
}
debug_loc.apply(cx.fcx);
let bundle = cx.lpad.get().and_then(|b| b.bundle());
B(cx).call_with_conv(fn_, args, conv, bundle, attributes)
} }
pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) { pub fn AtomicFence(cx: Block, order: AtomicOrdering, scope: SynchronizationScope) {

View file

@ -11,13 +11,14 @@
#![allow(dead_code)] // FFI wrappers #![allow(dead_code)] // FFI wrappers
use llvm; use llvm;
use llvm::{CallConv, AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect, AttrBuilder}; use llvm::{AtomicBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef}; use llvm::{Opcode, IntPredicate, RealPredicate, False, OperandBundleDef};
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ModuleRef};
use trans::base; use trans::base;
use trans::common::*; use trans::common::*;
use trans::machine::llalign_of_pref; use trans::machine::llalign_of_pref;
use trans::type_::Type; use trans::type_::Type;
use trans::value::Value;
use util::nodemap::FnvHashMap; use util::nodemap::FnvHashMap;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
@ -164,33 +165,28 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
args: &[ValueRef], args: &[ValueRef],
then: BasicBlockRef, then: BasicBlockRef,
catch: BasicBlockRef, catch: BasicBlockRef,
bundle: Option<&OperandBundleDef>, bundle: Option<&OperandBundleDef>)
attributes: Option<AttrBuilder>)
-> ValueRef { -> ValueRef {
self.count_insn("invoke"); self.count_insn("invoke");
debug!("Invoke {} with args ({})", debug!("Invoke {:?} with args ({})",
self.ccx.tn().val_to_string(llfn), Value(llfn),
args.iter() args.iter()
.map(|&v| self.ccx.tn().val_to_string(v)) .map(|&v| format!("{:?}", Value(v)))
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join(", ")); .join(", "));
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _); let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
unsafe { unsafe {
let v = llvm::LLVMRustBuildInvoke(self.llbuilder, llvm::LLVMRustBuildInvoke(self.llbuilder,
llfn, llfn,
args.as_ptr(), args.as_ptr(),
args.len() as c_uint, args.len() as c_uint,
then, then,
catch, catch,
bundle, bundle,
noname()); noname())
if let Some(a) = attributes {
a.apply_callsite(v);
}
v
} }
} }
@ -497,9 +493,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { pub fn store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
debug!("Store {} -> {}", debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
assert!(!self.llbuilder.is_null()); assert!(!self.llbuilder.is_null());
self.count_insn("store"); self.count_insn("store");
unsafe { unsafe {
@ -508,9 +502,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef { pub fn volatile_store(&self, val: ValueRef, ptr: ValueRef) -> ValueRef {
debug!("Store {} -> {}", debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
assert!(!self.llbuilder.is_null()); assert!(!self.llbuilder.is_null());
self.count_insn("store.volatile"); self.count_insn("store.volatile");
unsafe { unsafe {
@ -521,9 +513,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
} }
pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) { pub fn atomic_store(&self, val: ValueRef, ptr: ValueRef, order: AtomicOrdering) {
debug!("Store {} -> {}", debug!("Store {:?} -> {:?}", Value(val), Value(ptr));
self.ccx.tn().val_to_string(val),
self.ccx.tn().val_to_string(ptr));
self.count_insn("store.atomic"); self.count_insn("store.atomic");
unsafe { unsafe {
let ty = Type::from_ref(llvm::LLVMTypeOf(ptr)); let ty = Type::from_ref(llvm::LLVMTypeOf(ptr));
@ -780,7 +770,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
comment_text.as_ptr(), noname(), False, comment_text.as_ptr(), noname(), False,
False) False)
}; };
self.call(asm, &[], None, None); self.call(asm, &[], None);
} }
} }
@ -796,28 +786,27 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
else { llvm::False }; else { llvm::False };
let argtys = inputs.iter().map(|v| { let argtys = inputs.iter().map(|v| {
debug!("Asm Input Type: {}", self.ccx.tn().val_to_string(*v)); debug!("Asm Input Type: {:?}", Value(*v));
val_ty(*v) val_ty(*v)
}).collect::<Vec<_>>(); }).collect::<Vec<_>>();
debug!("Asm Output Type: {}", self.ccx.tn().type_to_string(output)); debug!("Asm Output Type: {:?}", output);
let fty = Type::func(&argtys[..], &output); let fty = Type::func(&argtys[..], &output);
unsafe { unsafe {
let v = llvm::LLVMInlineAsm( let v = llvm::LLVMInlineAsm(
fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint); fty.to_ref(), asm, cons, volatile, alignstack, dia as c_uint);
self.call(v, inputs, None, None) self.call(v, inputs, None)
} }
} }
pub fn call(&self, llfn: ValueRef, args: &[ValueRef], pub fn call(&self, llfn: ValueRef, args: &[ValueRef],
bundle: Option<&OperandBundleDef>, bundle: Option<&OperandBundleDef>) -> ValueRef {
attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("call"); self.count_insn("call");
debug!("Call {} with args ({})", debug!("Call {:?} with args ({})",
self.ccx.tn().val_to_string(llfn), Value(llfn),
args.iter() args.iter()
.map(|&v| self.ccx.tn().val_to_string(v)) .map(|&v| format!("{:?}", Value(v)))
.collect::<Vec<String>>() .collect::<Vec<String>>()
.join(", ")); .join(", "));
@ -838,11 +827,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
if expected_ty != actual_ty { if expected_ty != actual_ty {
self.ccx.sess().bug( self.ccx.sess().bug(
&format!( &format!(
"Type mismatch in function call of {}. Expected {} for param {}, got {}", "Type mismatch in function call of {:?}. \
self.ccx.tn().val_to_string(llfn), Expected {:?} for param {}, got {:?}",
self.ccx.tn().type_to_string(expected_ty), Value(llfn),
i, expected_ty, i, actual_ty));
self.ccx.tn().type_to_string(actual_ty)));
} }
} }
@ -850,26 +838,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> {
let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _); let bundle = bundle.as_ref().map(|b| b.raw()).unwrap_or(0 as *mut _);
unsafe { unsafe {
let v = llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(), llvm::LLVMRustBuildCall(self.llbuilder, llfn, args.as_ptr(),
args.len() as c_uint, bundle, args.len() as c_uint, bundle, noname())
noname());
if let Some(a) = attributes {
a.apply_callsite(v);
}
v
} }
} }
pub fn call_with_conv(&self, llfn: ValueRef, args: &[ValueRef],
conv: CallConv,
bundle: Option<&OperandBundleDef>,
attributes: Option<AttrBuilder>) -> ValueRef {
self.count_insn("callwithconv");
let v = self.call(llfn, args, bundle, attributes);
llvm::SetInstructionCallConv(v, conv);
v
}
pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef { pub fn select(&self, cond: ValueRef, then_val: ValueRef, else_val: ValueRef) -> ValueRef {
self.count_insn("select"); self.count_insn("select");
unsafe { unsafe {

View file

@ -1,137 +0,0 @@
// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
pub use self::ArgKind::*;
use llvm::Attribute;
use std::option;
use trans::context::CrateContext;
use trans::cabi_x86;
use trans::cabi_x86_64;
use trans::cabi_x86_win64;
use trans::cabi_arm;
use trans::cabi_aarch64;
use trans::cabi_powerpc;
use trans::cabi_powerpc64;
use trans::cabi_mips;
use trans::cabi_asmjs;
use trans::type_::Type;
#[derive(Clone, Copy, PartialEq)]
pub enum ArgKind {
/// Pass the argument directly using the normal converted
/// LLVM type or by coercing to another specified type
Direct,
/// Pass the argument indirectly via a hidden pointer
Indirect,
/// Ignore the argument (useful for empty struct)
Ignore,
}
/// Information about how a specific C type
/// should be passed to or returned from a function
///
/// This is borrowed from clang's ABIInfo.h
#[derive(Clone, Copy)]
pub struct ArgType {
pub kind: ArgKind,
/// Original LLVM type
pub ty: Type,
/// Coerced LLVM Type
pub cast: option::Option<Type>,
/// Dummy argument, which is emitted before the real argument
pub pad: option::Option<Type>,
/// LLVM attribute of argument
pub attr: option::Option<Attribute>
}
impl ArgType {
pub fn direct(ty: Type, cast: option::Option<Type>,
pad: option::Option<Type>,
attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Direct,
ty: ty,
cast: cast,
pad: pad,
attr: attr
}
}
pub fn indirect(ty: Type, attr: option::Option<Attribute>) -> ArgType {
ArgType {
kind: Indirect,
ty: ty,
cast: option::Option::None,
pad: option::Option::None,
attr: attr
}
}
pub fn ignore(ty: Type) -> ArgType {
ArgType {
kind: Ignore,
ty: ty,
cast: None,
pad: None,
attr: None,
}
}
pub fn is_indirect(&self) -> bool {
return self.kind == Indirect;
}
pub fn is_ignore(&self) -> bool {
return self.kind == Ignore;
}
}
/// Metadata describing how the arguments to a native function
/// should be passed in order to respect the native ABI.
///
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
pub struct FnType {
/// The LLVM types of each argument.
pub arg_tys: Vec<ArgType> ,
/// LLVM return type.
pub ret_ty: ArgType,
}
pub fn compute_abi_info(ccx: &CrateContext,
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
match &ccx.sess().target.target.arch[..] {
"x86" => cabi_x86::compute_abi_info(ccx, atys, rty, ret_def),
"x86_64" => if ccx.sess().target.target.options.is_like_windows {
cabi_x86_win64::compute_abi_info(ccx, atys, rty, ret_def)
} else {
cabi_x86_64::compute_abi_info(ccx, atys, rty, ret_def)
},
"aarch64" => cabi_aarch64::compute_abi_info(ccx, atys, rty, ret_def),
"arm" => {
let flavor = if ccx.sess().target.target.target_os == "ios" {
cabi_arm::Flavor::Ios
} else {
cabi_arm::Flavor::General
};
cabi_arm::compute_abi_info(ccx, atys, rty, ret_def, flavor)
},
"mips" => cabi_mips::compute_abi_info(ccx, atys, rty, ret_def),
"powerpc" => cabi_powerpc::compute_abi_info(ccx, atys, rty, ret_def),
"powerpc64" => cabi_powerpc64::compute_abi_info(ccx, atys, rty, ret_def),
"asmjs" => cabi_asmjs::compute_abi_info(ccx, atys, rty, ret_def),
a => ccx.sess().fatal(&format!("unrecognized arch \"{}\" in target specification", a)
),
}
}

View file

@ -10,8 +10,8 @@
#![allow(non_upper_case_globals)] #![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use trans::cabi::{FnType, ArgType}; use trans::abi::{FnType, ArgType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -161,16 +161,15 @@ fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
}) })
} }
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ty) { if is_reg_ty(ret.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
let llty = Type::array(&base_ty, members); ret.cast = Some(Type::array(&base_ty, members));
return ArgType::direct(ty, Some(llty), None, None); return;
} }
let size = ty_size(ty); let size = ty_size(ret.ty);
if size <= 16 { if size <= 16 {
let llty = if size <= 1 { let llty = if size <= 1 {
Type::i8(ccx) Type::i8(ccx)
@ -183,21 +182,21 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
}; };
return ArgType::direct(ty, Some(llty), None, None); ret.cast = Some(llty);
return;
} }
ArgType::indirect(ty, Some(Attribute::StructRet)) ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
if is_reg_ty(ty) { if is_reg_ty(arg.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) {
let llty = Type::array(&base_ty, members); arg.cast = Some(Type::array(&base_ty, members));
return ArgType::direct(ty, Some(llty), None, None); return;
} }
let size = ty_size(ty); let size = ty_size(arg.ty);
if size <= 16 { if size <= 16 {
let llty = if size == 0 { let llty = if size == 0 {
Type::array(&Type::i64(ccx), 0) Type::array(&Type::i64(ccx), 0)
@ -212,9 +211,10 @@ fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType {
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
}; };
return ArgType::direct(ty, Some(llty), None, None); arg.cast = Some(llty);
return;
} }
ArgType::indirect(ty, None) arg.make_indirect(ccx);
} }
fn is_reg_ty(ty: Type) -> bool { fn is_reg_ty(ty: Type) -> bool {
@ -228,24 +228,13 @@ fn is_reg_ty(ty: Type) -> bool {
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], if !fty.ret.is_ignore() {
rty: Type, classify_ret_ty(ccx, &mut fty.ret);
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
} }
let ret_ty = if ret_def { for arg in &mut fty.args {
classify_ret_ty(ccx, rty) if arg.is_ignore() { continue; }
} else { classify_arg_ty(ccx, arg);
ArgType::direct(Type::void(ccx), None, None, None) }
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -10,8 +10,8 @@
#![allow(non_upper_case_globals)] #![allow(non_upper_case_globals)]
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use trans::cabi::{FnType, ArgType}; use trans::abi::{FnType, ArgType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -129,12 +129,11 @@ fn ty_size(ty: Type, align_fn: TyAlignFn) -> usize {
} }
} }
fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType, align_fn: TyAlignFn) {
if is_reg_ty(ty) { if is_reg_ty(ret.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
} }
let size = ty_size(ty, align_fn); let size = ty_size(ret.ty, align_fn);
if size <= 4 { if size <= 4 {
let llty = if size <= 1 { let llty = if size <= 1 {
Type::i8(ccx) Type::i8(ccx)
@ -143,24 +142,24 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType
} else { } else {
Type::i32(ccx) Type::i32(ccx)
}; };
return ArgType::direct(ty, Some(llty), None, None); ret.cast = Some(llty);
return;
} }
ArgType::indirect(ty, Some(Attribute::StructRet)) ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, ty: Type, align_fn: TyAlignFn) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, align_fn: TyAlignFn) {
if is_reg_ty(ty) { if is_reg_ty(arg.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
} }
let align = align_fn(ty); let align = align_fn(arg.ty);
let size = ty_size(ty, align_fn); let size = ty_size(arg.ty, align_fn);
let llty = if align <= 4 { let llty = if align <= 4 {
Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64) Type::array(&Type::i32(ccx), ((size + 3) / 4) as u64)
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64) Type::array(&Type::i64(ccx), ((size + 7) / 8) as u64)
}; };
ArgType::direct(ty, Some(llty), None, None) arg.cast = Some(llty);
} }
fn is_reg_ty(ty: Type) -> bool { fn is_reg_ty(ty: Type) -> bool {
@ -174,30 +173,18 @@ fn is_reg_ty(ty: Type) -> bool {
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType, flavor: Flavor) {
atys: &[Type],
rty: Type,
ret_def: bool,
flavor: Flavor) -> FnType {
let align_fn = match flavor { let align_fn = match flavor {
Flavor::General => general_ty_align as TyAlignFn, Flavor::General => general_ty_align as TyAlignFn,
Flavor::Ios => ios_ty_align as TyAlignFn, Flavor::Ios => ios_ty_align as TyAlignFn,
}; };
let mut arg_tys = Vec::new(); if !fty.ret.is_ignore() {
for &aty in atys { classify_ret_ty(ccx, &mut fty.ret, align_fn);
let ty = classify_arg_ty(ccx, aty, align_fn);
arg_tys.push(ty);
} }
let ret_ty = if ret_def { for arg in &mut fty.args {
classify_ret_ty(ccx, rty, align_fn) if arg.is_ignore() { continue; }
} else { classify_arg_ty(ccx, arg, align_fn);
ArgType::direct(Type::void(ccx), None, None, None) }
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -11,62 +11,45 @@
#![allow(non_upper_case_globals)] #![allow(non_upper_case_globals)]
use llvm::{Struct, Array, Attribute}; use llvm::{Struct, Array, Attribute};
use trans::cabi::{FnType, ArgType}; use trans::abi::{FnType, ArgType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type;
// Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128 // Data layout: e-p:32:32-i64:64-v128:32:128-n32-S128
// See the https://github.com/kripken/emscripten-fastcomp-clang repository. // See the https://github.com/kripken/emscripten-fastcomp-clang repository.
// The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions. // The class `EmscriptenABIInfo` in `/lib/CodeGen/TargetInfo.cpp` contains the ABI definitions.
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
match ty.kind() { match ret.ty.kind() {
Struct => { Struct => {
let field_types = ty.field_types(); let field_types = ret.ty.field_types();
if field_types.len() == 1 { if field_types.len() == 1 {
ArgType::direct(ty, Some(field_types[0]), None, None) ret.cast = Some(field_types[0]);
} else { } else {
ArgType::indirect(ty, Some(Attribute::StructRet)) ret.make_indirect(ccx);
} }
},
Array => {
ArgType::indirect(ty, Some(Attribute::StructRet))
},
_ => {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
} }
Array => {
ret.make_indirect(ccx);
}
_ => {}
} }
} }
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
if ty.is_aggregate() { if arg.ty.is_aggregate() {
ArgType::indirect(ty, Some(Attribute::ByVal)) arg.make_indirect(ccx);
} else { arg.attrs.set(Attribute::ByVal);
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], if !fty.ret.is_ignore() {
rty: Type, classify_ret_ty(ccx, &mut fty.ret);
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
for &aty in atys {
let ty = classify_arg_ty(ccx, aty);
arg_tys.push(ty);
} }
let ret_ty = if ret_def { for arg in &mut fty.args {
classify_ret_ty(ccx, rty) if arg.is_ignore() { continue; }
} else { classify_arg_ty(ccx, arg);
ArgType::direct(Type::void(ccx), None, None, None) }
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -13,8 +13,8 @@
use libc::c_uint; use libc::c_uint;
use std::cmp; use std::cmp;
use llvm; use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector, Attribute}; use llvm::{Integer, Pointer, Float, Double, Struct, Array, Vector};
use trans::cabi::{ArgType, FnType}; use trans::abi::{ArgType, FnType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -86,34 +86,18 @@ fn ty_size(ty: Type) -> usize {
} }
} }
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
} else {
ArgType::indirect(ty, Some(Attribute::StructRet))
}
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType {
let orig_offset = *offset; let orig_offset = *offset;
let size = ty_size(ty) * 8; let size = ty_size(arg.ty) * 8;
let mut align = ty_align(ty); let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align); *offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8; *offset += align_up_to(size, align * 8) / 8;
if is_reg_ty(ty) { if !is_reg_ty(arg.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; arg.cast = Some(struct_ty(ccx, arg.ty));
ArgType::direct(ty, None, None, attr) arg.pad = padding_ty(ccx, align, orig_offset);
} else {
ArgType::direct(
ty,
Some(struct_ty(ccx, ty)),
padding_ty(ccx, align, orig_offset),
None
)
} }
} }
@ -161,27 +145,14 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, &coerce_to_int(ccx, size), false) Type::struct_(ccx, &coerce_to_int(ccx, size), false)
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) {
rty: Type, fty.ret.make_indirect(ccx);
ret_def: bool) -> FnType { }
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
let sret = ret_ty.is_indirect(); let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
let mut arg_tys = Vec::new(); for arg in &mut fty.args {
let mut offset = if sret { 4 } else { 0 }; if arg.is_ignore() { continue; }
classify_arg_ty(ccx, arg, &mut offset);
for aty in atys { }
let ty = classify_arg_ty(ccx, *aty, &mut offset);
arg_tys.push(ty);
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -10,8 +10,8 @@
use libc::c_uint; use libc::c_uint;
use llvm; use llvm;
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use trans::cabi::{FnType, ArgType}; use trans::abi::{FnType, ArgType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -82,34 +82,18 @@ fn ty_size(ty: Type) -> usize {
} }
} }
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType, offset: &mut usize) {
if is_reg_ty(ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
} else {
ArgType::indirect(ty, Some(Attribute::StructRet))
}
}
fn classify_arg_ty(ccx: &CrateContext, ty: Type, offset: &mut usize) -> ArgType {
let orig_offset = *offset; let orig_offset = *offset;
let size = ty_size(ty) * 8; let size = ty_size(arg.ty) * 8;
let mut align = ty_align(ty); let mut align = ty_align(arg.ty);
align = cmp::min(cmp::max(align, 4), 8); align = cmp::min(cmp::max(align, 4), 8);
*offset = align_up_to(*offset, align); *offset = align_up_to(*offset, align);
*offset += align_up_to(size, align * 8) / 8; *offset += align_up_to(size, align * 8) / 8;
if is_reg_ty(ty) { if !is_reg_ty(arg.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; arg.cast = Some(struct_ty(ccx, arg.ty));
ArgType::direct(ty, None, None, attr) arg.pad = padding_ty(ccx, align, orig_offset);
} else {
ArgType::direct(
ty,
Some(struct_ty(ccx, ty)),
padding_ty(ccx, align, orig_offset),
None
)
} }
} }
@ -156,27 +140,14 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, &coerce_to_int(ccx, size), false) Type::struct_(ccx, &coerce_to_int(ccx, size), false)
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], if !fty.ret.is_ignore() && !is_reg_ty(fty.ret.ty) {
rty: Type, fty.ret.make_indirect(ccx);
ret_def: bool) -> FnType { }
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
let sret = ret_ty.is_indirect(); let mut offset = if fty.ret.is_indirect() { 4 } else { 0 };
let mut arg_tys = Vec::new(); for arg in &mut fty.args {
let mut offset = if sret { 4 } else { 0 }; if arg.is_ignore() { continue; }
classify_arg_ty(ccx, arg, &mut offset);
for aty in atys { }
let ty = classify_arg_ty(ccx, *aty, &mut offset);
arg_tys.push(ty);
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -15,8 +15,8 @@
// Alignment of 128 bit types is not currently handled, this will // Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added. // need to be fixed when PowerPC vector support is added.
use llvm::{Integer, Pointer, Float, Double, Struct, Array, Attribute}; use llvm::{Integer, Pointer, Float, Double, Struct, Array};
use trans::cabi::{FnType, ArgType}; use trans::abi::{FnType, ArgType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -151,22 +151,21 @@ fn is_homogenous_aggregate_ty(ty: Type) -> Option<(Type, u64)> {
}) })
} }
fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_ret_ty(ccx: &CrateContext, ret: &mut ArgType) {
if is_reg_ty(ty) { if is_reg_ty(ret.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
} }
// The PowerPC64 big endian ABI doesn't return aggregates in registers // The PowerPC64 big endian ABI doesn't return aggregates in registers
if ccx.sess().target.target.target_endian == "big" { if ccx.sess().target.target.target_endian == "big" {
return ArgType::indirect(ty, Some(Attribute::StructRet)) ret.make_indirect(ccx);
} }
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) { if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ret.ty) {
let llty = Type::array(&base_ty, members); ret.cast = Some(Type::array(&base_ty, members));
return ArgType::direct(ty, Some(llty), None, None); return;
} }
let size = ty_size(ty); let size = ty_size(ret.ty);
if size <= 16 { if size <= 16 {
let llty = if size <= 1 { let llty = if size <= 1 {
Type::i8(ccx) Type::i8(ccx)
@ -179,28 +178,24 @@ fn classify_ret_ty(ccx: &CrateContext, ty: Type) -> ArgType {
} else { } else {
Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64) Type::array(&Type::i64(ccx), ((size + 7 ) / 8 ) as u64)
}; };
return ArgType::direct(ty, Some(llty), None, None); ret.cast = Some(llty);
return;
} }
ArgType::indirect(ty, Some(Attribute::StructRet)) ret.make_indirect(ccx);
} }
fn classify_arg_ty(ccx: &CrateContext, ty: Type) -> ArgType { fn classify_arg_ty(ccx: &CrateContext, arg: &mut ArgType) {
if is_reg_ty(ty) { if is_reg_ty(arg.ty) {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None }; return;
return ArgType::direct(ty, None, None, attr);
}
if let Some((base_ty, members)) = is_homogenous_aggregate_ty(ty) {
let llty = Type::array(&base_ty, members);
return ArgType::direct(ty, Some(llty), None, None);
} }
ArgType::direct( if let Some((base_ty, members)) = is_homogenous_aggregate_ty(arg.ty) {
ty, arg.cast = Some(Type::array(&base_ty, members));
Some(struct_ty(ccx, ty)), return;
None, }
None
) arg.cast = Some(struct_ty(ccx, arg.ty));
} }
fn is_reg_ty(ty: Type) -> bool { fn is_reg_ty(ty: Type) -> bool {
@ -236,24 +231,13 @@ fn struct_ty(ccx: &CrateContext, ty: Type) -> Type {
Type::struct_(ccx, &coerce_to_long(ccx, size), false) Type::struct_(ccx, &coerce_to_long(ccx, size), false)
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], if !fty.ret.is_ignore() {
rty: Type, classify_ret_ty(ccx, &mut fty.ret);
ret_def: bool) -> FnType { }
let ret_ty = if ret_def {
classify_ret_ty(ccx, rty)
} else {
ArgType::direct(Type::void(ccx), None, None, None)
};
let mut arg_tys = Vec::new(); for arg in &mut fty.args {
for &aty in atys { if arg.is_ignore() { continue; }
let ty = classify_arg_ty(ccx, aty); classify_arg_ty(ccx, arg);
arg_tys.push(ty); }
};
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -8,24 +8,14 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use self::Strategy::*;
use llvm::*; use llvm::*;
use trans::cabi::{ArgType, FnType}; use trans::abi::FnType;
use trans::type_::Type; use trans::type_::Type;
use super::common::*; use super::common::*;
use super::machine::*; use super::machine::*;
enum Strategy { RetValue(Type), RetPointer } pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
pub fn compute_abi_info(ccx: &CrateContext, if !fty.ret.is_ignore() && fty.ret.ty.kind() == Struct {
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
let mut arg_tys = Vec::new();
let ret_ty;
if !ret_def {
ret_ty = ArgType::direct(Type::void(ccx), None, None, None);
} else if rty.kind() == Struct {
// Returning a structure. Most often, this will use // Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though, // a hidden first argument. On some platforms, though,
// small structs are returned as integers. // small structs are returned as integers.
@ -33,53 +23,25 @@ pub fn compute_abi_info(ccx: &CrateContext,
// Some links: // Some links:
// http://www.angelcode.com/dev/callconv/callconv.html // http://www.angelcode.com/dev/callconv/callconv.html
// Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp
let t = &ccx.sess().target.target; let t = &ccx.sess().target.target;
let strategy = if t.options.is_like_osx || t.options.is_like_windows { if t.options.is_like_osx || t.options.is_like_windows {
match llsize_of_alloc(ccx, rty) { match llsize_of_alloc(ccx, fty.ret.ty) {
1 => RetValue(Type::i8(ccx)), 1 => fty.ret.cast = Some(Type::i8(ccx)),
2 => RetValue(Type::i16(ccx)), 2 => fty.ret.cast = Some(Type::i16(ccx)),
4 => RetValue(Type::i32(ccx)), 4 => fty.ret.cast = Some(Type::i32(ccx)),
8 => RetValue(Type::i64(ccx)), 8 => fty.ret.cast = Some(Type::i64(ccx)),
_ => RetPointer _ => fty.ret.make_indirect(ccx)
} }
} else { } else {
RetPointer fty.ret.make_indirect(ccx);
};
match strategy {
RetValue(t) => {
ret_ty = ArgType::direct(rty, Some(t), None, None);
}
RetPointer => {
ret_ty = ArgType::indirect(rty, Some(Attribute::StructRet));
}
} }
} else {
let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ret_ty = ArgType::direct(rty, None, None, attr);
} }
for &t in atys { for arg in &mut fty.args {
let ty = match t.kind() { if arg.is_ignore() { continue; }
Struct => { if arg.ty.kind() == Struct {
let size = llsize_of_alloc(ccx, t); arg.make_indirect(ccx);
if size == 0 { arg.attrs.set(Attribute::ByVal);
ArgType::ignore(t) }
} else {
ArgType::indirect(t, Some(Attribute::ByVal))
}
}
_ => {
let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(t, None, None, attr)
}
};
arg_tys.push(ty);
} }
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -16,7 +16,7 @@ use self::RegClass::*;
use llvm::{Integer, Pointer, Float, Double}; use llvm::{Integer, Pointer, Float, Double};
use llvm::{Struct, Array, Attribute, Vector}; use llvm::{Struct, Array, Attribute, Vector};
use trans::cabi::{ArgType, FnType}; use trans::abi::{ArgType, FnType};
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
@ -383,38 +383,31 @@ fn llreg_ty(ccx: &CrateContext, cls: &[RegClass]) -> Type {
} }
} }
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type],
rty: Type,
ret_def: bool) -> FnType {
fn x86_64_ty<F>(ccx: &CrateContext, fn x86_64_ty<F>(ccx: &CrateContext,
ty: Type, arg: &mut ArgType,
is_mem_cls: F, is_mem_cls: F,
ind_attr: Attribute) ind_attr: Option<Attribute>)
-> ArgType where where F: FnOnce(&[RegClass]) -> bool
F: FnOnce(&[RegClass]) -> bool,
{ {
if !ty.is_reg_ty() { if !arg.ty.is_reg_ty() {
let cls = classify_ty(ty); let cls = classify_ty(arg.ty);
if is_mem_cls(&cls) { if is_mem_cls(&cls) {
ArgType::indirect(ty, Some(ind_attr)) arg.make_indirect(ccx);
if let Some(attr) = ind_attr {
arg.attrs.set(attr);
}
} else { } else {
ArgType::direct(ty, arg.cast = Some(llreg_ty(ccx, &cls));
Some(llreg_ty(ccx, &cls)),
None,
None)
} }
} else {
let attr = if ty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(ty, None, None, attr)
} }
} }
let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9 let mut int_regs = 6; // RDI, RSI, RDX, RCX, R8, R9
let mut sse_regs = 8; // XMM0-7 let mut sse_regs = 8; // XMM0-7
let ret_ty = if ret_def { if !fty.ret.is_ignore() {
x86_64_ty(ccx, rty, |cls| { x86_64_ty(ccx, &mut fty.ret, |cls| {
if cls.is_ret_bysret() { if cls.is_ret_bysret() {
// `sret` parameter thus one less register available // `sret` parameter thus one less register available
int_regs -= 1; int_regs -= 1;
@ -422,14 +415,12 @@ pub fn compute_abi_info(ccx: &CrateContext,
} else { } else {
false false
} }
}, Attribute::StructRet) }, None);
} else { }
ArgType::direct(Type::void(ccx), None, None, None)
};
let mut arg_tys = Vec::new(); for arg in &mut fty.args {
for t in atys { if arg.is_ignore() { continue; }
let ty = x86_64_ty(ccx, *t, |cls| { x86_64_ty(ccx, arg, |cls| {
let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize; let needed_int = cls.iter().filter(|&&c| c == Int).count() as isize;
let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize; let needed_sse = cls.iter().filter(|c| c.is_sse()).count() as isize;
let in_mem = cls.is_pass_byval() || let in_mem = cls.is_pass_byval() ||
@ -444,21 +435,15 @@ pub fn compute_abi_info(ccx: &CrateContext,
sse_regs -= needed_sse; sse_regs -= needed_sse;
} }
in_mem in_mem
}, Attribute::ByVal); }, Some(Attribute::ByVal));
arg_tys.push(ty);
// An integer, pointer, double or float parameter // An integer, pointer, double or float parameter
// thus the above closure passed to `x86_64_ty` won't // thus the above closure passed to `x86_64_ty` won't
// get called. // get called.
if t.kind() == Integer || t.kind() == Pointer { match arg.ty.kind() {
int_regs -= 1; Integer | Pointer => int_regs -= 1,
} else if t.kind() == Double || t.kind() == Float { Double | Float => sse_regs -= 1,
sse_regs -= 1; _ => {}
} }
} }
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
};
} }

View file

@ -11,54 +11,29 @@
use llvm::*; use llvm::*;
use super::common::*; use super::common::*;
use super::machine::*; use super::machine::*;
use trans::cabi::{ArgType, FnType}; use trans::abi::{ArgType, FnType};
use trans::type_::Type; use trans::type_::Type;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx // Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info(ccx: &CrateContext, pub fn compute_abi_info(ccx: &CrateContext, fty: &mut FnType) {
atys: &[Type], let fixup = |a: &mut ArgType| {
rty: Type, if a.ty.kind() == Struct {
ret_def: bool) -> FnType { match llsize_of_alloc(ccx, a.ty) {
let mut arg_tys = Vec::new(); 1 => a.cast = Some(Type::i8(ccx)),
2 => a.cast = Some(Type::i16(ccx)),
let ret_ty; 4 => a.cast = Some(Type::i32(ccx)),
if !ret_def { 8 => a.cast = Some(Type::i64(ccx)),
ret_ty = ArgType::direct(Type::void(ccx), None, None, None); _ => a.make_indirect(ccx)
} else if rty.kind() == Struct {
ret_ty = match llsize_of_alloc(ccx, rty) {
1 => ArgType::direct(rty, Some(Type::i8(ccx)), None, None),
2 => ArgType::direct(rty, Some(Type::i16(ccx)), None, None),
4 => ArgType::direct(rty, Some(Type::i32(ccx)), None, None),
8 => ArgType::direct(rty, Some(Type::i64(ccx)), None, None),
_ => ArgType::indirect(rty, Some(Attribute::StructRet))
};
} else {
let attr = if rty == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ret_ty = ArgType::direct(rty, None, None, attr);
}
for &t in atys {
let ty = match t.kind() {
Struct => {
match llsize_of_alloc(ccx, t) {
1 => ArgType::direct(t, Some(Type::i8(ccx)), None, None),
2 => ArgType::direct(t, Some(Type::i16(ccx)), None, None),
4 => ArgType::direct(t, Some(Type::i32(ccx)), None, None),
8 => ArgType::direct(t, Some(Type::i64(ccx)), None, None),
_ => ArgType::indirect(t, None)
}
} }
_ => { }
let attr = if t == Type::i1(ccx) { Some(Attribute::ZExt) } else { None };
ArgType::direct(t, None, None, attr)
}
};
arg_tys.push(ty);
}
return FnType {
arg_tys: arg_tys,
ret_ty: ret_ty,
}; };
if !fty.ret.is_ignore() {
fixup(&mut fty.ret);
}
for arg in &mut fty.args {
if arg.is_ignore() { continue; }
fixup(arg);
}
} }

File diff suppressed because it is too large Load diff

View file

@ -129,7 +129,9 @@ use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::glue; use trans::glue;
use middle::region; use middle::region;
use trans::type_::Type; use trans::type_::Type;
use trans::value::Value;
use middle::ty::{Ty, TyCtxt}; use middle::ty::{Ty, TyCtxt};
use std::fmt; use std::fmt;
use syntax::ast; use syntax::ast;
@ -401,9 +403,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
ptr: val, ptr: val,
}; };
debug!("schedule_lifetime_end({:?}, val={})", debug!("schedule_lifetime_end({:?}, val={:?})",
cleanup_scope, cleanup_scope, Value(val));
self.ccx.tn().val_to_string(val));
self.schedule_clean(cleanup_scope, drop as CleanupObj); self.schedule_clean(cleanup_scope, drop as CleanupObj);
} }
@ -426,9 +427,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
drop_hint: drop_hint, drop_hint: drop_hint,
}; };
debug!("schedule_drop_mem({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", debug!("schedule_drop_mem({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope, cleanup_scope,
self.ccx.tn().val_to_string(val), Value(val),
ty, ty,
drop.fill_on_drop, drop.fill_on_drop,
drop.skip_dtor); drop.skip_dtor);
@ -454,10 +455,10 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
drop_hint: drop_hint, drop_hint: drop_hint,
}; };
debug!("schedule_drop_and_fill_mem({:?}, val={}, ty={:?}, debug!("schedule_drop_and_fill_mem({:?}, val={:?}, ty={:?},
fill_on_drop={}, skip_dtor={}, has_drop_hint={})", fill_on_drop={}, skip_dtor={}, has_drop_hint={})",
cleanup_scope, cleanup_scope,
self.ccx.tn().val_to_string(val), Value(val),
ty, ty,
drop.fill_on_drop, drop.fill_on_drop,
drop.skip_dtor, drop.skip_dtor,
@ -488,9 +489,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
drop_hint: None, drop_hint: None,
}; };
debug!("schedule_drop_adt_contents({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", debug!("schedule_drop_adt_contents({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope, cleanup_scope,
self.ccx.tn().val_to_string(val), Value(val),
ty, ty,
drop.fill_on_drop, drop.fill_on_drop,
drop.skip_dtor); drop.skip_dtor);
@ -514,9 +515,9 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
drop_hint: None, drop_hint: None,
}); });
debug!("schedule_drop_immediate({:?}, val={}, ty={:?}) fill_on_drop={} skip_dtor={}", debug!("schedule_drop_immediate({:?}, val={:?}, ty={:?}) fill_on_drop={} skip_dtor={}",
cleanup_scope, cleanup_scope,
self.ccx.tn().val_to_string(val), Value(val),
ty, ty,
drop.fill_on_drop, drop.fill_on_drop,
drop.skip_dtor); drop.skip_dtor);
@ -532,10 +533,8 @@ impl<'blk, 'tcx> CleanupMethods<'blk, 'tcx> for FunctionContext<'blk, 'tcx> {
content_ty: Ty<'tcx>) { content_ty: Ty<'tcx>) {
let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty }; let drop = box FreeValue { ptr: val, heap: heap, content_ty: content_ty };
debug!("schedule_free_value({:?}, val={}, heap={:?})", debug!("schedule_free_value({:?}, val={:?}, heap={:?})",
cleanup_scope, cleanup_scope, Value(val), heap);
self.ccx.tn().val_to_string(val),
heap);
self.schedule_clean(cleanup_scope, drop as CleanupObj); self.schedule_clean(cleanup_scope, drop as CleanupObj);
} }

View file

@ -10,10 +10,11 @@
use arena::TypedArena; use arena::TypedArena;
use back::link::{self, mangle_internal_name_by_path_and_seq}; use back::link::{self, mangle_internal_name_by_path_and_seq};
use llvm::{ValueRef, get_params}; use llvm::{ValueRef, get_param, get_params};
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::infer; use middle::infer;
use middle::traits::ProjectionMode; use middle::traits::ProjectionMode;
use trans::abi::{Abi, FnType};
use trans::adt; use trans::adt;
use trans::attributes; use trans::attributes;
use trans::base::*; use trans::base::*;
@ -21,44 +22,41 @@ use trans::build::*;
use trans::callee::{self, ArgVals, Callee}; use trans::callee::{self, ArgVals, Callee};
use trans::cleanup::{CleanupMethods, CustomScope, ScopeId}; use trans::cleanup::{CleanupMethods, CustomScope, ScopeId};
use trans::common::*; use trans::common::*;
use trans::datum::{self, Datum, rvalue_scratch_datum, Rvalue}; use trans::datum::{ByRef, Datum, lvalue_scratch_datum};
use trans::datum::{rvalue_scratch_datum, Rvalue};
use trans::debuginfo::{self, DebugLoc}; use trans::debuginfo::{self, DebugLoc};
use trans::declare; use trans::declare;
use trans::expr; use trans::expr;
use trans::monomorphize::{MonoId}; use trans::monomorphize::{Instance};
use trans::type_of::*; use trans::value::Value;
use trans::Disr; use trans::Disr;
use middle::ty; use middle::ty::{self, Ty, TyCtxt};
use session::config::FullDebugInfo; use session::config::FullDebugInfo;
use syntax::abi::Abi::RustCall;
use syntax::ast; use syntax::ast;
use syntax::attr::{ThinAttributes, ThinAttributesExt};
use rustc_front::hir; use rustc_front::hir;
use libc::c_uint;
fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
closure_def_id: DefId, closure_def_id: DefId,
arg_scope_id: ScopeId, arg_scope_id: ScopeId,
freevars: &[ty::Freevar]) id: ast::NodeId) {
-> Block<'blk, 'tcx>
{
let _icx = push_ctxt("closure::load_closure_environment"); let _icx = push_ctxt("closure::load_closure_environment");
let kind = kind_for_closure(bcx.ccx(), closure_def_id);
let env_arg = &bcx.fcx.fn_ty.args[0];
let mut env_idx = bcx.fcx.fn_ty.ret.is_indirect() as usize;
// Special case for small by-value selfs. // Special case for small by-value selfs.
let closure_ty = node_id_type(bcx, bcx.fcx.id); let llenv = if kind == ty::ClosureKind::FnOnce && !env_arg.is_indirect() {
let self_type = self_type_for_closure(bcx.ccx(), closure_def_id, closure_ty); let closure_ty = node_id_type(bcx, id);
let kind = kind_for_closure(bcx.ccx(), closure_def_id); let llenv = rvalue_scratch_datum(bcx, closure_ty, "closure_env").val;
let llenv = if kind == ty::ClosureKind::FnOnce && env_arg.store_fn_arg(&bcx.build(), &mut env_idx, llenv);
!arg_is_indirect(bcx.ccx(), self_type) { llenv
let datum = rvalue_scratch_datum(bcx,
self_type,
"closure_env");
store_ty(bcx, bcx.fcx.llenv.unwrap(), datum.val, self_type);
datum.val
} else { } else {
bcx.fcx.llenv.unwrap() get_param(bcx.fcx.llfn, env_idx as c_uint)
}; };
// Store the pointer to closure data in an alloca for debug info because that's what the // Store the pointer to closure data in an alloca for debug info because that's what the
@ -71,100 +69,120 @@ fn load_closure_environment<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
None None
}; };
for (i, freevar) in freevars.iter().enumerate() { bcx.tcx().with_freevars(id, |fv| {
let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), for (i, freevar) in fv.iter().enumerate() {
closure_expr_id: bcx.fcx.id }; let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap(); closure_expr_id: id };
let mut upvar_ptr = StructGEP(bcx, llenv, i); let upvar_capture = bcx.tcx().upvar_capture(upvar_id).unwrap();
let captured_by_ref = match upvar_capture { let mut upvar_ptr = StructGEP(bcx, llenv, i);
ty::UpvarCapture::ByValue => false, let captured_by_ref = match upvar_capture {
ty::UpvarCapture::ByRef(..) => { ty::UpvarCapture::ByValue => false,
upvar_ptr = Load(bcx, upvar_ptr); ty::UpvarCapture::ByRef(..) => {
true upvar_ptr = Load(bcx, upvar_ptr);
true
}
};
let node_id = freevar.def.var_id();
bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
if kind == ty::ClosureKind::FnOnce && !captured_by_ref {
let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id);
bcx.fcx.schedule_drop_mem(arg_scope_id,
upvar_ptr,
node_id_type(bcx, node_id),
hint)
} }
};
let node_id = freevar.def.var_id();
bcx.fcx.llupvars.borrow_mut().insert(node_id, upvar_ptr);
if kind == ty::ClosureKind::FnOnce && !captured_by_ref { if let Some(env_pointer_alloca) = env_pointer_alloca {
let hint = bcx.fcx.lldropflag_hints.borrow().hint_datum(upvar_id.var_id); debuginfo::create_captured_var_metadata(
bcx.fcx.schedule_drop_mem(arg_scope_id, bcx,
upvar_ptr, node_id,
node_id_type(bcx, node_id), env_pointer_alloca,
hint) i,
captured_by_ref,
freevar.span);
}
} }
})
}
if let Some(env_pointer_alloca) = env_pointer_alloca { pub enum ClosureEnv {
debuginfo::create_captured_var_metadata( NotClosure,
bcx, Closure(DefId, ast::NodeId),
node_id, }
env_pointer_alloca,
i, impl ClosureEnv {
captured_by_ref, pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) {
freevar.span); if let ClosureEnv::Closure(def_id, id) = self {
load_closure_environment(bcx, def_id, arg_scope, id);
} }
} }
bcx
} }
pub enum ClosureEnv<'a> { fn get_self_type<'tcx>(tcx: &TyCtxt<'tcx>,
NotClosure, closure_id: DefId,
Closure(DefId, &'a [ty::Freevar]), fn_ty: Ty<'tcx>)
} -> Ty<'tcx> {
match tcx.closure_kind(closure_id) {
impl<'a> ClosureEnv<'a> { ty::ClosureKind::Fn => {
pub fn load<'blk,'tcx>(self, bcx: Block<'blk, 'tcx>, arg_scope: ScopeId) tcx.mk_imm_ref(tcx.mk_region(ty::ReStatic), fn_ty)
-> Block<'blk, 'tcx>
{
match self {
ClosureEnv::NotClosure => bcx,
ClosureEnv::Closure(def_id, freevars) => {
if freevars.is_empty() {
bcx
} else {
load_closure_environment(bcx, def_id, arg_scope, freevars)
}
}
} }
ty::ClosureKind::FnMut => {
tcx.mk_mut_ref(tcx.mk_region(ty::ReStatic), fn_ty)
}
ty::ClosureKind::FnOnce => fn_ty,
} }
} }
/// Returns the LLVM function declaration for a closure, creating it if /// Returns the LLVM function declaration for a closure, creating it if
/// necessary. If the ID does not correspond to a closure ID, returns None. /// necessary. If the ID does not correspond to a closure ID, returns None.
pub fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn get_or_create_closure_declaration<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
closure_id: DefId, closure_id: DefId,
substs: &ty::ClosureSubsts<'tcx>) substs: &ty::ClosureSubsts<'tcx>)
-> ValueRef { -> ValueRef {
// Normalize type so differences in regions and typedefs don't cause // Normalize type so differences in regions and typedefs don't cause
// duplicate declarations // duplicate declarations
let substs = ccx.tcx().erase_regions(substs); let tcx = ccx.tcx();
let mono_id = MonoId { let substs = tcx.erase_regions(substs);
let instance = Instance {
def: closure_id, def: closure_id,
params: &substs.func_substs.types params: &substs.func_substs.types
}; };
if let Some(&llfn) = ccx.closure_vals().borrow().get(&mono_id) { if let Some(&llfn) = ccx.instances().borrow().get(&instance) {
debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}", debug!("get_or_create_closure_declaration(): found closure {:?}: {:?}",
mono_id, ccx.tn().val_to_string(llfn)); instance, Value(llfn));
return llfn; return llfn;
} }
let path = ccx.tcx().def_path(closure_id); let path = tcx.def_path(closure_id);
let symbol = mangle_internal_name_by_path_and_seq(path, "closure"); let symbol = mangle_internal_name_by_path_and_seq(path, "closure");
let function_type = ccx.tcx().mk_closure_from_closure_substs(closure_id, Box::new(substs)); // Compute the rust-call form of the closure call method.
let llfn = declare::define_internal_rust_fn(ccx, &symbol[..], function_type); let infcx = infer::normalizing_infer_ctxt(tcx, &tcx.tables, ProjectionMode::Any);
let sig = &infcx.closure_type(closure_id, &substs).sig;
let sig = tcx.erase_late_bound_regions(sig);
let sig = infer::normalize_associated_type(tcx, &sig);
let closure_type = tcx.mk_closure_from_closure_substs(closure_id, Box::new(substs));
let function_type = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Normal,
abi: Abi::RustCall,
sig: ty::Binder(ty::FnSig {
inputs: Some(get_self_type(tcx, closure_id, closure_type))
.into_iter().chain(sig.inputs).collect(),
output: sig.output,
variadic: false
})
});
let llfn = declare::define_internal_fn(ccx, &symbol, function_type);
// set an inline hint for all closures // set an inline hint for all closures
attributes::inline(llfn, attributes::InlineAttr::Hint); attributes::inline(llfn, attributes::InlineAttr::Hint);
debug!("get_or_create_declaration_if_closure(): inserting new \ debug!("get_or_create_declaration_if_closure(): inserting new \
closure {:?} (type {}): {:?}", closure {:?}: {:?}",
mono_id, instance, Value(llfn));
ccx.tn().type_to_string(val_ty(llfn)), ccx.instances().borrow_mut().insert(instance, llfn);
ccx.tn().val_to_string(llfn));
ccx.closure_vals().borrow_mut().insert(mono_id, llfn);
llfn llfn
} }
@ -179,8 +197,7 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
body: &hir::Block, body: &hir::Block,
id: ast::NodeId, id: ast::NodeId,
closure_def_id: DefId, // (*) closure_def_id: DefId, // (*)
closure_substs: &'tcx ty::ClosureSubsts<'tcx>, closure_substs: &ty::ClosureSubsts<'tcx>)
closure_expr_attrs: &ThinAttributes)
-> Option<Block<'a, 'tcx>> -> Option<Block<'a, 'tcx>>
{ {
// (*) Note that in the case of inlined functions, the `closure_def_id` will be the // (*) Note that in the case of inlined functions, the `closure_def_id` will be the
@ -210,22 +227,29 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any); let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
let function_type = infcx.closure_type(closure_def_id, closure_substs); let function_type = infcx.closure_type(closure_def_id, closure_substs);
let freevars: Vec<ty::Freevar> =
tcx.with_freevars(id, |fv| fv.iter().cloned().collect());
let sig = tcx.erase_late_bound_regions(&function_type.sig); let sig = tcx.erase_late_bound_regions(&function_type.sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig); let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let closure_type = tcx.mk_closure_from_closure_substs(closure_def_id,
Box::new(closure_substs.clone()));
let sig = ty::FnSig {
inputs: Some(get_self_type(tcx, closure_def_id, closure_type))
.into_iter().chain(sig.inputs).collect(),
output: sig.output,
variadic: false
};
let fn_ty = FnType::new(ccx, Abi::RustCall, &sig, &[]);
trans_closure(ccx, trans_closure(ccx,
decl, decl,
body, body,
llfn, llfn,
param_substs, param_substs,
closure_def_id,
id, id,
closure_expr_attrs.as_attr_slice(), fn_ty,
sig.output, Abi::RustCall,
function_type.abi, ClosureEnv::Closure(closure_def_id, id));
ClosureEnv::Closure(closure_def_id, &freevars));
// Don't hoist this to the top of the function. It's perfectly legitimate // Don't hoist this to the top of the function. It's perfectly legitimate
// to have a zero-size closure (in which case dest will be `Ignore`) and // to have a zero-size closure (in which case dest will be `Ignore`) and
@ -241,21 +265,23 @@ pub fn trans_closure_expr<'a, 'tcx>(dest: Dest<'a, 'tcx>,
let repr = adt::represent_type(ccx, node_id_type(bcx, id)); let repr = adt::represent_type(ccx, node_id_type(bcx, id));
// Create the closure. // Create the closure.
for (i, freevar) in freevars.iter().enumerate() { tcx.with_freevars(id, |fv| {
let datum = expr::trans_local_var(bcx, freevar.def); for (i, freevar) in fv.iter().enumerate() {
let upvar_slot_dest = adt::trans_field_ptr( let datum = expr::trans_var(bcx, freevar.def);
bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i); let upvar_slot_dest = adt::trans_field_ptr(
let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(), bcx, &repr, adt::MaybeSizedValue::sized(dest_addr), Disr(0), i);
closure_expr_id: id }; let upvar_id = ty::UpvarId { var_id: freevar.def.var_id(),
match tcx.upvar_capture(upvar_id).unwrap() { closure_expr_id: id };
ty::UpvarCapture::ByValue => { match tcx.upvar_capture(upvar_id).unwrap() {
bcx = datum.store_to(bcx, upvar_slot_dest); ty::UpvarCapture::ByValue => {
} bcx = datum.store_to(bcx, upvar_slot_dest);
ty::UpvarCapture::ByRef(..) => { }
Store(bcx, datum.to_llref(), upvar_slot_dest); ty::UpvarCapture::ByRef(..) => {
Store(bcx, datum.to_llref(), upvar_slot_dest);
}
} }
} }
} });
adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0)); adt::trans_set_discr(bcx, &repr, dest_addr, Disr(0));
Some(bcx) Some(bcx)
@ -278,11 +304,8 @@ pub fn trans_closure_method<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
let tcx = ccx.tcx(); let tcx = ccx.tcx();
debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \ debug!("trans_closure_adapter_shim(llfn_closure_kind={:?}, \
trait_closure_kind={:?}, \ trait_closure_kind={:?}, llfn={:?})",
llfn={})", llfn_closure_kind, trait_closure_kind, Value(llfn));
llfn_closure_kind,
trait_closure_kind,
ccx.tn().val_to_string(llfn));
match (llfn_closure_kind, trait_closure_kind) { match (llfn_closure_kind, trait_closure_kind) {
(ty::ClosureKind::Fn, ty::ClosureKind::Fn) | (ty::ClosureKind::Fn, ty::ClosureKind::Fn) |
@ -324,10 +347,8 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
llreffn: ValueRef) llreffn: ValueRef)
-> ValueRef -> ValueRef
{ {
debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={})", debug!("trans_fn_once_adapter_shim(closure_def_id={:?}, substs={:?}, llreffn={:?})",
closure_def_id, closure_def_id, substs, Value(llreffn));
substs,
ccx.tn().val_to_string(llreffn));
let tcx = ccx.tcx(); let tcx = ccx.tcx();
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any); let infcx = infer::normalizing_infer_ctxt(ccx.tcx(), &ccx.tcx().tables, ProjectionMode::Any);
@ -348,56 +369,70 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}", debug!("trans_fn_once_adapter_shim: llref_fn_ty={:?}",
llref_fn_ty); llref_fn_ty);
let ret_ty = tcx.erase_late_bound_regions(&sig.output());
let ret_ty = infer::normalize_associated_type(ccx.tcx(), &ret_ty);
// Make a version of the closure type with the same arguments, but // Make a version of the closure type with the same arguments, but
// with argument #0 being by value. // with argument #0 being by value.
assert_eq!(abi, RustCall); assert_eq!(abi, Abi::RustCall);
sig.0.inputs[0] = closure_ty; sig.0.inputs[0] = closure_ty;
let sig = tcx.erase_late_bound_regions(&sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
let fn_ty = FnType::new(ccx, abi, &sig, &[]);
let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { let llonce_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: unsafety, unsafety: unsafety,
abi: abi, abi: abi,
sig: sig sig: ty::Binder(sig)
}); });
// Create the by-value helper. // Create the by-value helper.
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim"); let function_name = link::mangle_internal_name_by_type_and_seq(ccx, llonce_fn_ty, "once_shim");
let lloncefn = declare::define_internal_rust_fn(ccx, &function_name, let lloncefn = declare::define_internal_fn(ccx, &function_name, llonce_fn_ty);
llonce_fn_ty);
let (block_arena, fcx): (TypedArena<_>, FunctionContext); let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new(); block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, fcx = FunctionContext::new(ccx, lloncefn, fn_ty, None, substs.func_substs, &block_arena);
lloncefn, let mut bcx = fcx.init(false, None);
ast::DUMMY_NODE_ID,
false,
ret_ty,
substs.func_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, ret_ty);
let mut llargs = get_params(fcx.llfn);
// the first argument (`self`) will be the (by value) closure env. // the first argument (`self`) will be the (by value) closure env.
let self_scope = fcx.push_custom_cleanup_scope(); let self_scope = fcx.push_custom_cleanup_scope();
let self_scope_id = CustomScope(self_scope); let self_scope_id = CustomScope(self_scope);
let rvalue_mode = datum::appropriate_rvalue_mode(ccx, closure_ty);
let self_idx = fcx.arg_offset();
let llself = llargs[self_idx];
let env_datum = Datum::new(llself, closure_ty, Rvalue::new(rvalue_mode));
let env_datum = unpack_datum!(bcx,
env_datum.to_lvalue_datum_in_scope(bcx, "self",
self_scope_id));
debug!("trans_fn_once_adapter_shim: env_datum={}", let mut llargs = get_params(fcx.llfn);
bcx.val_to_string(env_datum.val)); let mut self_idx = fcx.fn_ty.ret.is_indirect() as usize;
llargs[self_idx] = env_datum.val; let env_arg = &fcx.fn_ty.args[0];
let llenv = if env_arg.is_indirect() {
Datum::new(llargs[self_idx], closure_ty, Rvalue::new(ByRef))
.add_clean(&fcx, self_scope_id)
} else {
unpack_datum!(bcx, lvalue_scratch_datum(bcx, closure_ty, "self",
InitAlloca::Dropped,
self_scope_id, |bcx, llval| {
let mut llarg_idx = self_idx;
env_arg.store_fn_arg(&bcx.build(), &mut llarg_idx, llval);
bcx.fcx.schedule_lifetime_end(self_scope_id, llval);
bcx
})).val
};
debug!("trans_fn_once_adapter_shim: env={:?}", Value(llenv));
// Adjust llargs such that llargs[self_idx..] has the call arguments.
// For zero-sized closures that means sneaking in a new argument.
if env_arg.is_ignore() {
if self_idx > 0 {
self_idx -= 1;
llargs[self_idx] = llenv;
} else {
llargs.insert(0, llenv);
}
} else {
llargs[self_idx] = llenv;
}
let dest = let dest =
fcx.llretslotptr.get().map( fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot"))); |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
let callee = Callee { let callee = Callee {
data: callee::Fn(llreffn), data: callee::Fn(llreffn),
@ -407,7 +442,7 @@ fn trans_fn_once_adapter_shim<'a, 'tcx>(
fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope); fcx.pop_and_trans_custom_cleanup_scope(bcx, self_scope);
finish_fn(&fcx, bcx, ret_ty, DebugLoc::None); fcx.finish(bcx, DebugLoc::None);
lloncefn lloncefn
} }

View file

@ -194,10 +194,10 @@ use rustc_front::intravisit as hir_visit;
use rustc::front::map as hir_map; use rustc::front::map as hir_map;
use rustc::middle::def_id::DefId; use rustc::middle::def_id::DefId;
use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem}; use rustc::middle::lang_items::{ExchangeFreeFnLangItem, ExchangeMallocFnLangItem};
use rustc::middle::{ty, traits}; use rustc::middle::traits;
use rustc::middle::subst::{self, Substs, Subst}; use rustc::middle::subst::{self, Substs, Subst};
use rustc::middle::ty::{self, Ty, TypeFoldable};
use rustc::middle::ty::adjustment::CustomCoerceUnsized; use rustc::middle::ty::adjustment::CustomCoerceUnsized;
use rustc::middle::ty::fold::TypeFoldable;
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use rustc::mir::visit as mir_visit; use rustc::mir::visit as mir_visit;
use rustc::mir::visit::Visitor as MirVisitor; use rustc::mir::visit::Visitor as MirVisitor;
@ -213,11 +213,10 @@ use trans::common::{fulfill_obligation, normalize_and_test_predicates,
type_is_sized}; type_is_sized};
use trans::glue; use trans::glue;
use trans::meth; use trans::meth;
use trans::monomorphize; use trans::monomorphize::{self, Instance};
use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap}; use util::nodemap::{FnvHashSet, FnvHashMap, DefIdMap};
use std::hash::{Hash, Hasher}; use std::hash::{Hash, Hasher};
use std::rc::Rc;
#[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)] #[derive(PartialEq, Eq, Hash, Clone, Copy, Debug)]
pub enum TransItemCollectionMode { pub enum TransItemCollectionMode {
@ -225,13 +224,10 @@ pub enum TransItemCollectionMode {
Lazy Lazy
} }
#[derive(Eq, Clone, Copy, Debug)] #[derive(PartialEq, Eq, Clone, Copy, Debug)]
pub enum TransItem<'tcx> { pub enum TransItem<'tcx> {
DropGlue(ty::Ty<'tcx>), DropGlue(Ty<'tcx>),
Fn { Fn(Instance<'tcx>),
def_id: DefId,
substs: &'tcx Substs<'tcx>
},
Static(NodeId) Static(NodeId)
} }
@ -242,35 +238,19 @@ impl<'tcx> Hash for TransItem<'tcx> {
0u8.hash(s); 0u8.hash(s);
t.hash(s); t.hash(s);
}, },
TransItem::Fn { def_id, substs } => { TransItem::Fn(instance) => {
1u8.hash(s); 1u8.hash(s);
def_id.hash(s); instance.def.hash(s);
(substs as *const Substs<'tcx> as usize).hash(s); (instance.params as *const _ as usize).hash(s);
} }
TransItem::Static(node_id) => { TransItem::Static(node_id) => {
3u8.hash(s); 2u8.hash(s);
node_id.hash(s); node_id.hash(s);
} }
}; };
} }
} }
impl<'tcx> PartialEq for TransItem<'tcx> {
fn eq(&self, other: &Self) -> bool {
match (*self, *other) {
(TransItem::DropGlue(t1), TransItem::DropGlue(t2)) => t1 == t2,
(TransItem::Fn { def_id: def_id1, substs: substs1 },
TransItem::Fn { def_id: def_id2, substs: substs2 }) => {
def_id1 == def_id2 && substs1 == substs2
},
(TransItem::Static(node_id1), TransItem::Static(node_id2)) => {
node_id1 == node_id2
},
_ => false
}
}
}
pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
mode: TransItemCollectionMode) mode: TransItemCollectionMode)
-> FnvHashSet<TransItem<'tcx>> { -> FnvHashSet<TransItem<'tcx>> {
@ -282,14 +262,9 @@ pub fn collect_crate_translation_items<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
debug!("Building translation item graph, beginning at roots"); debug!("Building translation item graph, beginning at roots");
let mut visited = FnvHashSet(); let mut visited = FnvHashSet();
let mut recursion_depths = DefIdMap(); let mut recursion_depths = DefIdMap();
let mut mir_cache = DefIdMap();
for root in roots { for root in roots {
collect_items_rec(ccx, collect_items_rec(ccx, root, &mut visited, &mut recursion_depths);
root,
&mut visited,
&mut recursion_depths,
&mut mir_cache);
} }
visited visited
@ -319,27 +294,11 @@ fn collect_roots<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
roots roots
} }
#[derive(Clone)]
enum CachedMir<'mir, 'tcx: 'mir> {
Ref(&'mir mir::Mir<'tcx>),
Owned(Rc<mir::Mir<'tcx>>)
}
impl<'mir, 'tcx: 'mir> CachedMir<'mir, 'tcx> {
fn get_ref<'a>(&'a self) -> &'a mir::Mir<'tcx> {
match *self {
CachedMir::Ref(r) => r,
CachedMir::Owned(ref rc) => &rc,
}
}
}
// Collect all monomorphized translation items reachable from `starting_point` // Collect all monomorphized translation items reachable from `starting_point`
fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
starting_point: TransItem<'tcx>, starting_point: TransItem<'tcx>,
visited: &mut FnvHashSet<TransItem<'tcx>>, visited: &mut FnvHashSet<TransItem<'tcx>>,
recursion_depths: &mut DefIdMap<usize>, recursion_depths: &mut DefIdMap<usize>) {
mir_cache: &mut DefIdMap<CachedMir<'a, 'tcx>>) {
if !visited.insert(starting_point.clone()) { if !visited.insert(starting_point.clone()) {
// We've been here already, no need to search again. // We've been here already, no need to search again.
return; return;
@ -357,29 +316,33 @@ fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
TransItem::Static(_) => { TransItem::Static(_) => {
recursion_depth_reset = None; recursion_depth_reset = None;
} }
TransItem::Fn { def_id, substs: ref param_substs } => { TransItem::Fn(instance) => {
// Keep track of the monomorphization recursion depth // Keep track of the monomorphization recursion depth
recursion_depth_reset = Some(check_recursion_limit(ccx, recursion_depth_reset = Some(check_recursion_limit(ccx,
def_id, instance,
recursion_depths)); recursion_depths));
// Scan the MIR in order to find function calls, closures, and // Scan the MIR in order to find function calls, closures, and
// drop-glue // drop-glue
let mir = load_mir(ccx, def_id, mir_cache); let mir = errors::expect(ccx.sess().diagnostic(), ccx.get_mir(instance.def),
|| format!("Could not find MIR for function: {}", instance));
let mut visitor = MirNeighborCollector { let mut visitor = MirNeighborCollector {
ccx: ccx, ccx: ccx,
mir: mir.get_ref(), mir: &mir,
output: &mut neighbors, output: &mut neighbors,
param_substs: param_substs param_substs: ccx.tcx().mk_substs(Substs {
types: instance.params.clone(),
regions: subst::ErasedRegions
})
}; };
visitor.visit_mir(mir.get_ref()); visitor.visit_mir(&mir);
} }
} }
for neighbour in neighbors { for neighbour in neighbors {
collect_items_rec(ccx, neighbour, visited, recursion_depths, mir_cache); collect_items_rec(ccx, neighbour, visited, recursion_depths);
} }
if let Some((def_id, depth)) = recursion_depth_reset { if let Some((def_id, depth)) = recursion_depth_reset {
@ -389,42 +352,11 @@ fn collect_items_rec<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
debug!("END collect_items_rec({})", starting_point.to_string(ccx)); debug!("END collect_items_rec({})", starting_point.to_string(ccx));
} }
fn load_mir<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId,
mir_cache: &mut DefIdMap<CachedMir<'a, 'tcx>>)
-> CachedMir<'a, 'tcx> {
let mir_not_found_error_message = || {
format!("Could not find MIR for function: {}",
ccx.tcx().item_path_str(def_id))
};
if def_id.is_local() {
let node_id = ccx.tcx().map.as_local_node_id(def_id).unwrap();
let mir_opt = ccx.mir_map().map.get(&node_id);
let mir = errors::expect(ccx.sess().diagnostic(),
mir_opt,
mir_not_found_error_message);
CachedMir::Ref(mir)
} else {
if let Some(mir) = mir_cache.get(&def_id) {
return mir.clone();
}
let mir_opt = ccx.sess().cstore.maybe_get_item_mir(ccx.tcx(), def_id);
let mir = errors::expect(ccx.sess().diagnostic(),
mir_opt,
mir_not_found_error_message);
let cached = CachedMir::Owned(Rc::new(mir));
mir_cache.insert(def_id, cached.clone());
cached
}
}
fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>, fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId, instance: Instance<'tcx>,
recursion_depths: &mut DefIdMap<usize>) recursion_depths: &mut DefIdMap<usize>)
-> (DefId, usize) { -> (DefId, usize) {
let recursion_depth = recursion_depths.get(&def_id) let recursion_depth = recursion_depths.get(&instance.def)
.map(|x| *x) .map(|x| *x)
.unwrap_or(0); .unwrap_or(0);
debug!(" => recursion depth={}", recursion_depth); debug!(" => recursion depth={}", recursion_depth);
@ -433,20 +365,18 @@ fn check_recursion_limit<'a, 'tcx: 'a>(ccx: &CrateContext<'a, 'tcx>,
// more than the recursion limit is assumed to be causing an // more than the recursion limit is assumed to be causing an
// infinite expansion. // infinite expansion.
if recursion_depth > ccx.sess().recursion_limit.get() { if recursion_depth > ccx.sess().recursion_limit.get() {
if let Some(node_id) = ccx.tcx().map.as_local_node_id(def_id) { let error = format!("reached the recursion limit while instantiating `{}`",
ccx.sess().span_fatal(ccx.tcx().map.span(node_id), instance);
"reached the recursion limit during monomorphization"); if let Some(node_id) = ccx.tcx().map.as_local_node_id(instance.def) {
ccx.sess().span_fatal(ccx.tcx().map.span(node_id), &error);
} else { } else {
let error = format!("reached the recursion limit during \ ccx.sess().fatal(&error);
monomorphization of '{}'",
ccx.tcx().item_path_str(def_id));
ccx.sess().fatal(&error[..]);
} }
} }
recursion_depths.insert(def_id, recursion_depth + 1); recursion_depths.insert(instance.def, recursion_depth + 1);
(def_id, recursion_depth) (instance.def, recursion_depth)
} }
struct MirNeighborCollector<'a, 'tcx: 'a> { struct MirNeighborCollector<'a, 'tcx: 'a> {
@ -750,7 +680,7 @@ fn do_static_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
param_substs: &'tcx Substs<'tcx>) param_substs: &'tcx Substs<'tcx>)
-> Option<(DefId, &'tcx Substs<'tcx>)> { -> Option<(DefId, &'tcx Substs<'tcx>)> {
debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})", debug!("do_static_dispatch(fn_def_id={}, fn_substs={:?}, param_substs={:?})",
def_id_to_string(ccx, fn_def_id, None), def_id_to_string(ccx, fn_def_id),
fn_substs, fn_substs,
param_substs); param_substs);
@ -798,8 +728,8 @@ fn do_static_trait_method_dispatch<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
trait_id={}, \ trait_id={}, \
callee_substs={:?}, \ callee_substs={:?}, \
param_substs={:?}", param_substs={:?}",
def_id_to_string(ccx, trait_method.def_id, None), def_id_to_string(ccx, trait_method.def_id),
def_id_to_string(ccx, trait_id, None), def_id_to_string(ccx, trait_id),
callee_substs, callee_substs,
param_substs); param_substs);
@ -933,7 +863,7 @@ fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
-> TransItem<'tcx> -> TransItem<'tcx>
{ {
debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})", debug!("create_fn_trans_item(def_id={}, fn_substs={:?}, param_substs={:?})",
def_id_to_string(ccx, def_id, None), def_id_to_string(ccx, def_id),
fn_substs, fn_substs,
param_substs); param_substs);
@ -945,10 +875,10 @@ fn create_fn_trans_item<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_substs); fn_substs);
let concrete_substs = ccx.tcx().erase_regions(&concrete_substs); let concrete_substs = ccx.tcx().erase_regions(&concrete_substs);
let trans_item = TransItem::Fn { let trans_item = TransItem::Fn(Instance {
def_id: def_id, def: def_id,
substs: ccx.tcx().mk_substs(concrete_substs), params: &ccx.tcx().mk_substs(concrete_substs).types,
}; });
return trans_item; return trans_item;
} }
@ -1048,8 +978,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
if self.mode == TransItemCollectionMode::Eager { if self.mode == TransItemCollectionMode::Eager {
debug!("RootCollector: ADT drop-glue for {}", debug!("RootCollector: ADT drop-glue for {}",
def_id_to_string(self.ccx, def_id_to_string(self.ccx,
self.ccx.tcx().map.local_def_id(item.id), self.ccx.tcx().map.local_def_id(item.id)));
None));
let ty = glue::get_drop_glue_type(self.ccx, ty); let ty = glue::get_drop_glue_type(self.ccx, ty);
self.output.push(TransItem::DropGlue(ty)); self.output.push(TransItem::DropGlue(ty));
@ -1059,8 +988,7 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
hir::ItemStatic(..) => { hir::ItemStatic(..) => {
debug!("RootCollector: ItemStatic({})", debug!("RootCollector: ItemStatic({})",
def_id_to_string(self.ccx, def_id_to_string(self.ccx,
self.ccx.tcx().map.local_def_id(item.id), self.ccx.tcx().map.local_def_id(item.id)));
None));
self.output.push(TransItem::Static(item.id)); self.output.push(TransItem::Static(item.id));
} }
hir::ItemFn(_, _, constness, _, ref generics, _) => { hir::ItemFn(_, _, constness, _, ref generics, _) => {
@ -1069,12 +997,10 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
let def_id = self.ccx.tcx().map.local_def_id(item.id); let def_id = self.ccx.tcx().map.local_def_id(item.id);
debug!("RootCollector: ItemFn({})", debug!("RootCollector: ItemFn({})",
def_id_to_string(self.ccx, def_id, None)); def_id_to_string(self.ccx, def_id));
self.output.push(TransItem::Fn { let instance = Instance::mono(self.ccx.tcx(), def_id);
def_id: def_id, self.output.push(TransItem::Fn(instance));
substs: self.trans_empty_substs
});
} }
} }
} }
@ -1108,12 +1034,10 @@ impl<'b, 'a, 'v> hir_visit::Visitor<'v> for RootCollector<'b, 'a, 'v> {
let def_id = self.ccx.tcx().map.local_def_id(ii.id); let def_id = self.ccx.tcx().map.local_def_id(ii.id);
debug!("RootCollector: MethodImplItem({})", debug!("RootCollector: MethodImplItem({})",
def_id_to_string(self.ccx, def_id, None)); def_id_to_string(self.ccx, def_id));
self.output.push(TransItem::Fn { let instance = Instance::mono(self.ccx.tcx(), def_id);
def_id: def_id, self.output.push(TransItem::Fn(instance));
substs: self.trans_empty_substs
});
} }
} }
_ => { /* Nothing to do here */ } _ => { /* Nothing to do here */ }
@ -1142,7 +1066,7 @@ fn create_trans_items_for_default_impls<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let impl_def_id = tcx.map.local_def_id(item.id); let impl_def_id = tcx.map.local_def_id(item.id);
debug!("create_trans_items_for_default_impls(item={})", debug!("create_trans_items_for_default_impls(item={})",
def_id_to_string(ccx, impl_def_id, None)); def_id_to_string(ccx, impl_def_id));
if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) { if let Some(trait_ref) = tcx.impl_trait_ref(impl_def_id) {
let default_impls = tcx.provided_trait_methods(trait_ref.def_id); let default_impls = tcx.provided_trait_methods(trait_ref.def_id);
@ -1225,7 +1149,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty::TyStruct(adt_def, substs) | ty::TyStruct(adt_def, substs) |
ty::TyEnum(adt_def, substs) => { ty::TyEnum(adt_def, substs) => {
push_item_name(cx, adt_def.did, output); push_item_name(cx, adt_def.did, output);
push_type_params(cx, substs, &[], output); push_type_params(cx, &substs.types, &[], output);
}, },
ty::TyTuple(ref component_types) => { ty::TyTuple(ref component_types) => {
output.push('('); output.push('(');
@ -1275,7 +1199,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty::TyTrait(ref trait_data) => { ty::TyTrait(ref trait_data) => {
push_item_name(cx, trait_data.principal.skip_binder().def_id, output); push_item_name(cx, trait_data.principal.skip_binder().def_id, output);
push_type_params(cx, push_type_params(cx,
&trait_data.principal.skip_binder().substs, &trait_data.principal.skip_binder().substs.types,
&trait_data.bounds.projection_bounds, &trait_data.bounds.projection_bounds,
output); output);
}, },
@ -1285,7 +1209,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
output.push_str("unsafe "); output.push_str("unsafe ");
} }
if abi != ::syntax::abi::Abi::Rust { if abi != ::trans::abi::Abi::Rust {
output.push_str("extern \""); output.push_str("extern \"");
output.push_str(abi.name()); output.push_str(abi.name());
output.push_str("\" "); output.push_str("\" ");
@ -1329,7 +1253,7 @@ pub fn push_unique_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
output.push_str("{"); output.push_str("{");
output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize())); output.push_str(&format!("{}:{}", def_id.krate, def_id.index.as_usize()));
output.push_str("}"); output.push_str("}");
push_type_params(cx, closure_substs.func_substs, &[], output); push_type_params(cx, &closure_substs.func_substs.types, &[], output);
} }
ty::TyError | ty::TyError |
ty::TyInfer(_) | ty::TyInfer(_) |
@ -1371,16 +1295,16 @@ fn push_item_name(ccx: &CrateContext,
} }
fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
substs: &Substs<'tcx>, types: &'tcx subst::VecPerParamSpace<Ty<'tcx>>,
projections: &[ty::PolyProjectionPredicate<'tcx>], projections: &[ty::PolyProjectionPredicate<'tcx>],
output: &mut String) { output: &mut String) {
if substs.types.is_empty() && projections.is_empty() { if types.is_empty() && projections.is_empty() {
return; return;
} }
output.push('<'); output.push('<');
for &type_parameter in &substs.types { for &type_parameter in types {
push_unique_type_name(cx, type_parameter, output); push_unique_type_name(cx, type_parameter, output);
output.push_str(", "); output.push_str(", ");
} }
@ -1400,23 +1324,16 @@ fn push_type_params<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
output.push('>'); output.push('>');
} }
fn push_def_id_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn push_instance_as_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
def_id: DefId, instance: Instance<'tcx>,
substs: Option<&Substs<'tcx>>, output: &mut String) {
output: &mut String) { push_item_name(ccx, instance.def, output);
push_item_name(ccx, def_id, output); push_type_params(ccx, instance.params, &[], output);
if let Some(substs) = substs {
push_type_params(ccx, substs, &[], output);
}
} }
fn def_id_to_string<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn def_id_to_string(ccx: &CrateContext, def_id: DefId) -> String {
def_id: DefId,
substs: Option<&Substs<'tcx>>)
-> String {
let mut output = String::new(); let mut output = String::new();
push_def_id_as_string(ccx, def_id, substs, &mut output); push_item_name(ccx, def_id, &mut output);
output output
} }
@ -1440,23 +1357,23 @@ impl<'tcx> TransItem<'tcx> {
push_unique_type_name(ccx, t, &mut s); push_unique_type_name(ccx, t, &mut s);
s s
} }
TransItem::Fn { def_id, ref substs } => { TransItem::Fn(instance) => {
to_string_internal(ccx, "fn ", def_id, Some(substs)) to_string_internal(ccx, "fn ", instance)
}, },
TransItem::Static(node_id) => { TransItem::Static(node_id) => {
let def_id = hir_map.local_def_id(node_id); let def_id = hir_map.local_def_id(node_id);
to_string_internal(ccx, "static ", def_id, None) let instance = Instance::mono(ccx.tcx(), def_id);
to_string_internal(ccx, "static ", instance)
}, },
}; };
fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn to_string_internal<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
prefix: &str, prefix: &str,
def_id: DefId, instance: Instance<'tcx>)
substs: Option<&Substs<'tcx>>)
-> String { -> String {
let mut result = String::with_capacity(32); let mut result = String::with_capacity(32);
result.push_str(prefix); result.push_str(prefix);
push_def_id_as_string(ccx, def_id, substs, &mut result); push_instance_as_string(ccx, instance, &mut result);
result result
} }
} }
@ -1466,10 +1383,10 @@ impl<'tcx> TransItem<'tcx> {
TransItem::DropGlue(t) => { TransItem::DropGlue(t) => {
format!("DropGlue({})", t as *const _ as usize) format!("DropGlue({})", t as *const _ as usize)
} }
TransItem::Fn { def_id, substs } => { TransItem::Fn(instance) => {
format!("Fn({:?}, {})", format!("Fn({:?}, {})",
def_id, instance.def,
substs as *const _ as usize) instance.params as *const _ as usize)
} }
TransItem::Static(id) => { TransItem::Static(id) => {
format!("Static({:?})", id) format!("Static({:?})", id)

View file

@ -12,8 +12,6 @@
//! Code that is useful in various trans modules. //! Code that is useful in various trans modules.
pub use self::ExprOrMethodCall::*;
use session::Session; use session::Session;
use llvm; use llvm;
use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind}; use llvm::{ValueRef, BasicBlockRef, BuilderRef, ContextRef, TypeKind};
@ -23,33 +21,34 @@ use middle::def::Def;
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::infer; use middle::infer;
use middle::lang_items::LangItem; use middle::lang_items::LangItem;
use middle::subst::{self, Substs}; use middle::subst::Substs;
use trans::abi::{Abi, FnType};
use trans::base; use trans::base;
use trans::build; use trans::build;
use trans::builder::Builder; use trans::builder::Builder;
use trans::callee; use trans::callee::Callee;
use trans::cleanup; use trans::cleanup;
use trans::consts; use trans::consts;
use trans::datum; use trans::datum;
use trans::debuginfo::{self, DebugLoc}; use trans::debuginfo::{self, DebugLoc};
use trans::declare; use trans::declare;
use trans::machine; use trans::machine;
use trans::mir::CachedMir;
use trans::monomorphize; use trans::monomorphize;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of; use trans::value::Value;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
use middle::traits::{self, SelectionContext, ProjectionMode}; use middle::traits::{self, SelectionContext, ProjectionMode};
use middle::ty::fold::{TypeFolder, TypeFoldable}; use middle::ty::fold::{TypeFolder, TypeFoldable};
use rustc_front::hir; use rustc_front::hir;
use rustc::mir::repr::Mir; use util::nodemap::NodeMap;
use util::nodemap::{FnvHashMap, NodeMap};
use arena::TypedArena; use arena::TypedArena;
use libc::{c_uint, c_char}; use libc::{c_uint, c_char};
use std::ops::Deref; use std::ops::Deref;
use std::ffi::CString; use std::ffi::CString;
use std::cell::{Cell, RefCell}; use std::cell::{Cell, RefCell};
use std::vec::Vec;
use syntax::ast; use syntax::ast;
use syntax::codemap::{DUMMY_SP, Span}; use syntax::codemap::{DUMMY_SP, Span};
use syntax::parse::token::InternedString; use syntax::parse::token::InternedString;
@ -75,18 +74,6 @@ pub fn type_is_fat_ptr<'tcx>(cx: &TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
} }
} }
fn type_is_newtype_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
match ty.sty {
ty::TyStruct(def, substs) => {
let fields = &def.struct_variant().fields;
fields.len() == 1 && {
type_is_immediate(ccx, monomorphize::field_ty(ccx.tcx(), substs, &fields[0]))
}
}
_ => false
}
}
pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool { pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
use trans::machine::llsize_of_alloc; use trans::machine::llsize_of_alloc;
use trans::type_of::sizing_type_of; use trans::type_of::sizing_type_of;
@ -94,7 +81,6 @@ pub fn type_is_immediate<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
let tcx = ccx.tcx(); let tcx = ccx.tcx();
let simple = ty.is_scalar() || let simple = ty.is_scalar() ||
ty.is_unique() || ty.is_region_ptr() || ty.is_unique() || ty.is_region_ptr() ||
type_is_newtype_immediate(ccx, ty) ||
ty.is_simd(); ty.is_simd();
if simple && !type_is_fat_ptr(tcx, ty) { if simple && !type_is_fat_ptr(tcx, ty) {
return true; return true;
@ -120,12 +106,6 @@ pub fn type_is_zero_size<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -
llsize_of_alloc(ccx, llty) == 0 llsize_of_alloc(ccx, llty) == 0
} }
/// Identifies types which we declare to be equivalent to `void` in C for the purpose of function
/// return types. These are `()`, bot, uninhabited enums and all other zero-sized types.
pub fn return_type_is_void<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> bool {
ty.is_nil() || ty.is_empty(ccx.tcx()) || type_is_zero_size(ccx, ty)
}
/// Generates a unique symbol based off the name given. This is used to create /// Generates a unique symbol based off the name given. This is used to create
/// unique symbols for things like closures. /// unique symbols for things like closures.
pub fn gensym_name(name: &str) -> ast::Name { pub fn gensym_name(name: &str) -> ast::Name {
@ -252,8 +232,6 @@ pub fn BuilderRef_res(b: BuilderRef) -> BuilderRef_res {
} }
} }
pub type ExternMap = FnvHashMap<String, ValueRef>;
pub fn validate_substs(substs: &Substs) { pub fn validate_substs(substs: &Substs) {
assert!(!substs.types.needs_infer()); assert!(!substs.types.needs_infer());
} }
@ -295,7 +273,7 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// The MIR for this function. At present, this is optional because // The MIR for this function. At present, this is optional because
// we only have MIR available for things that are local to the // we only have MIR available for things that are local to the
// crate. // crate.
pub mir: Option<&'a Mir<'tcx>>, pub mir: Option<CachedMir<'a, 'tcx>>,
// The ValueRef returned from a call to llvm::LLVMAddFunction; the // The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of // address of the first instruction in the sequence of
@ -306,9 +284,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// always an empty parameter-environment NOTE: @jroesch another use of ParamEnv // always an empty parameter-environment NOTE: @jroesch another use of ParamEnv
pub param_env: ty::ParameterEnvironment<'a, 'tcx>, pub param_env: ty::ParameterEnvironment<'a, 'tcx>,
// The environment argument in a closure.
pub llenv: Option<ValueRef>,
// A pointer to where to store the return value. If the return type is // A pointer to where to store the return value. If the return type is
// immediate, this points to an alloca in the function. Otherwise, it's a // immediate, this points to an alloca in the function. Otherwise, it's a
// pointer to the hidden first parameter of the function. After function // pointer to the hidden first parameter of the function. After function
@ -336,11 +311,6 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// Note that for cleanuppad-based exceptions this is not used. // Note that for cleanuppad-based exceptions this is not used.
pub landingpad_alloca: Cell<Option<ValueRef>>, pub landingpad_alloca: Cell<Option<ValueRef>>,
// True if the caller expects this fn to use the out pointer to
// return. Either way, your code should write into the slot llretslotptr
// points to, but if this value is false, that slot will be a local alloca.
pub caller_expects_out_pointer: bool,
// Maps the DefId's for local variables to the allocas created for // Maps the DefId's for local variables to the allocas created for
// them in llallocas. // them in llallocas.
pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>, pub lllocals: RefCell<NodeMap<LvalueDatum<'tcx>>>,
@ -352,9 +322,8 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
// paths) for the code being compiled. // paths) for the code being compiled.
pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>, pub lldropflag_hints: RefCell<DropFlagHintsMap<'tcx>>,
// The NodeId of the function, or -1 if it doesn't correspond to // Describes the return/argument LLVM types and their ABI handling.
// a user-defined function. pub fn_ty: FnType,
pub id: ast::NodeId,
// If this function is being monomorphized, this contains the type // If this function is being monomorphized, this contains the type
// substitutions used. // substitutions used.
@ -383,20 +352,8 @@ pub struct FunctionContext<'a, 'tcx: 'a> {
} }
impl<'a, 'tcx> FunctionContext<'a, 'tcx> { impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
pub fn mir(&self) -> &'a Mir<'tcx> { pub fn mir(&self) -> CachedMir<'a, 'tcx> {
self.mir.unwrap() self.mir.clone().expect("fcx.mir was empty")
}
pub fn arg_offset(&self) -> usize {
self.env_arg_pos() + if self.llenv.is_some() { 1 } else { 0 }
}
pub fn env_arg_pos(&self) -> usize {
if self.caller_expects_out_pointer {
1
} else {
0
}
} }
pub fn cleanup(&self) { pub fn cleanup(&self) {
@ -419,14 +376,9 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
self.llreturn.get().unwrap() self.llreturn.get().unwrap()
} }
pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, pub fn get_ret_slot(&self, bcx: Block<'a, 'tcx>, name: &str) -> ValueRef {
output: ty::FnOutput<'tcx>,
name: &str) -> ValueRef {
if self.needs_ret_allocas { if self.needs_ret_allocas {
base::alloca(bcx, match output { base::alloca(bcx, self.fn_ty.ret.memory_ty(self.ccx), name)
ty::FnConverging(output_type) => type_of::type_of(bcx.ccx(), output_type),
ty::FnDiverging => Type::void(bcx.ccx())
}, name)
} else { } else {
self.llretslotptr.get().unwrap() self.llretslotptr.get().unwrap()
} }
@ -511,62 +463,60 @@ impl<'a, 'tcx> FunctionContext<'a, 'tcx> {
// `rust_eh_personality` function, but rather we wired it up to the // `rust_eh_personality` function, but rather we wired it up to the
// CRT's custom personality function, which forces LLVM to consider // CRT's custom personality function, which forces LLVM to consider
// landing pads as "landing pads for SEH". // landing pads as "landing pads for SEH".
let target = &self.ccx.sess().target.target; let ccx = self.ccx;
match self.ccx.tcx().lang_items.eh_personality() { let tcx = ccx.tcx();
Some(def_id) if !base::wants_msvc_seh(self.ccx.sess()) => { let target = &ccx.sess().target.target;
callee::trans_fn_ref(self.ccx, def_id, ExprId(0), match tcx.lang_items.eh_personality() {
self.param_substs).val Some(def_id) if !base::wants_msvc_seh(ccx.sess()) => {
Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty())).reify(ccx).val
} }
_ => { _ => if let Some(llpersonality) = ccx.eh_personality().get() {
let mut personality = self.ccx.eh_personality().borrow_mut(); llpersonality
match *personality { } else {
Some(llpersonality) => llpersonality, let name = if !base::wants_msvc_seh(ccx.sess()) {
None => { "rust_eh_personality"
let name = if !base::wants_msvc_seh(self.ccx.sess()) { } else if target.arch == "x86" {
"rust_eh_personality" "_except_handler3"
} else if target.arch == "x86" { } else {
"_except_handler3" "__C_specific_handler"
} else { };
"__C_specific_handler" let fty = Type::variadic_func(&[], &Type::i32(ccx));
}; let f = declare::declare_cfn(ccx, name, fty);
let fty = Type::variadic_func(&[], &Type::i32(self.ccx)); ccx.eh_personality().set(Some(f));
let f = declare::declare_cfn(self.ccx, name, fty, f
self.ccx.tcx().types.i32);
*personality = Some(f);
f
}
}
} }
} }
} }
// Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined, // Returns a ValueRef of the "eh_unwind_resume" lang item if one is defined,
// otherwise declares it as an external function. // otherwise declares it as an external function.
pub fn eh_unwind_resume(&self) -> ValueRef { pub fn eh_unwind_resume(&self) -> Callee<'tcx> {
use trans::attributes; use trans::attributes;
assert!(self.ccx.sess().target.target.options.custom_unwind_resume); let ccx = self.ccx;
match self.ccx.tcx().lang_items.eh_unwind_resume() { let tcx = ccx.tcx();
Some(def_id) => { assert!(ccx.sess().target.target.options.custom_unwind_resume);
callee::trans_fn_ref(self.ccx, def_id, ExprId(0), if let Some(def_id) = tcx.lang_items.eh_unwind_resume() {
self.param_substs).val return Callee::def(ccx, def_id, tcx.mk_substs(Substs::empty()));
}
None => {
let mut unwresume = self.ccx.eh_unwind_resume().borrow_mut();
match *unwresume {
Some(llfn) => llfn,
None => {
let fty = Type::func(&[Type::i8p(self.ccx)], &Type::void(self.ccx));
let llfn = declare::declare_fn(self.ccx,
"rust_eh_unwind_resume",
llvm::CCallConv,
fty, ty::FnDiverging);
attributes::unwind(llfn, true);
*unwresume = Some(llfn);
llfn
}
}
}
} }
let ty = tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::C,
sig: ty::Binder(ty::FnSig {
inputs: vec![tcx.mk_mut_ptr(tcx.types.u8)],
output: ty::FnDiverging,
variadic: false
}),
});
let unwresume = ccx.eh_unwind_resume();
if let Some(llfn) = unwresume.get() {
return Callee::ptr(datum::immediate_rvalue(llfn, ty));
}
let llfn = declare::declare_fn(ccx, "rust_eh_unwind_resume", ty);
attributes::unwind(llfn, true);
unwresume.set(Some(llfn));
Callee::ptr(datum::immediate_rvalue(llfn, ty))
} }
} }
@ -630,7 +580,7 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
self.lpad.get() self.lpad.get()
} }
pub fn mir(&self) -> &'blk Mir<'tcx> { pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
self.fcx.mir() self.fcx.mir()
} }
@ -652,14 +602,6 @@ impl<'blk, 'tcx> BlockS<'blk, 'tcx> {
} }
} }
pub fn val_to_string(&self, val: ValueRef) -> String {
self.ccx().tn().val_to_string(val)
}
pub fn llty_str(&self, ty: Type) -> String {
self.ccx().tn().type_to_string(ty)
}
pub fn to_str(&self) -> String { pub fn to_str(&self) -> String {
format!("[block {:p}]", self) format!("[block {:p}]", self)
} }
@ -746,6 +688,10 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
// Methods delegated to bcx // Methods delegated to bcx
pub fn is_unreachable(&self) -> bool {
self.bcx.unreachable.get()
}
pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> { pub fn ccx(&self) -> &'blk CrateContext<'blk, 'tcx> {
self.bcx.ccx() self.bcx.ccx()
} }
@ -763,14 +709,10 @@ impl<'blk, 'tcx> BlockAndBuilder<'blk, 'tcx> {
self.bcx.llbb self.bcx.llbb
} }
pub fn mir(&self) -> &'blk Mir<'tcx> { pub fn mir(&self) -> CachedMir<'blk, 'tcx> {
self.bcx.mir() self.bcx.mir()
} }
pub fn val_to_string(&self, val: ValueRef) -> String {
self.bcx.val_to_string(val)
}
pub fn monomorphize<T>(&self, value: &T) -> T pub fn monomorphize<T>(&self, value: &T) -> T
where T: TypeFoldable<'tcx> where T: TypeFoldable<'tcx>
{ {
@ -1028,15 +970,15 @@ pub fn C_bytes_in_context(llcx: ContextRef, bytes: &[u8]) -> ValueRef {
} }
} }
pub fn const_get_elt(cx: &CrateContext, v: ValueRef, us: &[c_uint]) pub fn const_get_elt(v: ValueRef, us: &[c_uint])
-> ValueRef { -> ValueRef {
unsafe { unsafe {
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint); let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={}, us={:?}, r={})", debug!("const_get_elt(v={:?}, us={:?}, r={:?})",
cx.tn().val_to_string(v), us, cx.tn().val_to_string(r)); Value(v), us, Value(r));
return r; r
} }
} }
@ -1215,41 +1157,6 @@ pub fn normalize_and_test_predicates<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok() infer::drain_fulfillment_cx(&infcx, &mut fulfill_cx, &()).is_ok()
} }
// Key used to lookup values supplied for type parameters in an expr.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ExprOrMethodCall {
// Type parameters for a path like `None::<int>`
ExprId(ast::NodeId),
// Type parameters for a method call like `a.foo::<int>()`
MethodCallKey(ty::MethodCall)
}
pub fn node_id_substs<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
node: ExprOrMethodCall,
param_substs: &subst::Substs<'tcx>)
-> &'tcx subst::Substs<'tcx> {
let tcx = ccx.tcx();
let substs = match node {
ExprId(id) => {
tcx.node_id_item_substs(id).substs
}
MethodCallKey(method_call) => {
tcx.tables.borrow().method_map[&method_call].substs.clone()
}
};
if substs.types.needs_infer() {
tcx.sess.bug(&format!("type parameters for node {:?} include inference types: {:?}",
node, substs));
}
ccx.tcx().mk_substs(monomorphize::apply_param_substs(tcx,
param_substs,
&substs.erase_regions()))
}
pub fn langcall(bcx: Block, pub fn langcall(bcx: Block,
span: Option<Span>, span: Option<Span>,
msg: &str, msg: &str,
@ -1351,14 +1258,3 @@ pub fn shift_mask_val<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
_ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind), _ => panic!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
} }
} }
pub fn get_static_val<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
did: DefId,
ty: Ty<'tcx>)
-> ValueRef {
if let Some(node_id) = ccx.tcx().map.as_local_node_id(did) {
base::get_item_val(ccx, node_id)
} else {
base::get_extern_const(ccx, did, ty)
}
}

View file

@ -9,27 +9,28 @@
// except according to those terms. // except according to those terms.
use back::abi;
use llvm; use llvm;
use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr}; use llvm::{ConstFCmp, ConstICmp, SetLinkage, SetUnnamedAddr};
use llvm::{InternalLinkage, ValueRef, Bool, True}; use llvm::{InternalLinkage, ValueRef, Bool, True};
use middle::const_qualif::ConstQualif; use middle::const_qualif::ConstQualif;
use middle::cstore::LOCAL_CRATE;
use middle::const_eval::{self, ConstEvalErr}; use middle::const_eval::{self, ConstEvalErr};
use middle::def::Def; use middle::def::Def;
use middle::def_id::DefId; use middle::def_id::DefId;
use trans::{adt, closure, debuginfo, expr, inline, machine}; use rustc::front::map as hir_map;
use trans::base::{self, push_ctxt}; use trans::{abi, adt, closure, debuginfo, expr, machine};
use trans::base::{self, exported_name, imported_name, push_ctxt};
use trans::callee::Callee; use trans::callee::Callee;
use trans::collector::{self, TransItem}; use trans::collector::{self, TransItem};
use trans::common::{self, type_is_sized, ExprOrMethodCall, node_id_substs, C_nil, const_get_elt}; use trans::common::{type_is_sized, C_nil, const_get_elt};
use trans::common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty}; use trans::common::{CrateContext, C_integral, C_floating, C_bool, C_str_slice, C_bytes, val_ty};
use trans::common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint}; use trans::common::{C_struct, C_undef, const_to_opt_int, const_to_opt_uint, VariantInfo, C_uint};
use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null, ExprId, MethodCallKey}; use trans::common::{type_is_fat_ptr, Field, C_vector, C_array, C_null};
use trans::datum::{Datum, Lvalue};
use trans::declare; use trans::declare;
use trans::monomorphize; use trans::monomorphize::{self, Instance};
use trans::type_::Type; use trans::type_::Type;
use trans::type_of; use trans::type_of;
use trans::value::Value;
use trans::Disr; use trans::Disr;
use middle::subst::Substs; use middle::subst::Substs;
use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
@ -45,7 +46,7 @@ use std::ffi::{CStr, CString};
use std::borrow::Cow; use std::borrow::Cow;
use libc::c_uint; use libc::c_uint;
use syntax::ast::{self, LitKind}; use syntax::ast::{self, LitKind};
use syntax::attr; use syntax::attr::{self, AttrMetaMethods};
use syntax::parse::token; use syntax::parse::token;
use syntax::ptr::P; use syntax::ptr::P;
@ -191,27 +192,31 @@ fn const_deref<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
} }
fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, fn const_fn_call<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
node: ExprOrMethodCall,
def_id: DefId, def_id: DefId,
substs: Substs<'tcx>,
arg_vals: &[ValueRef], arg_vals: &[ValueRef],
param_substs: &'tcx Substs<'tcx>, param_substs: &'tcx Substs<'tcx>,
trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> { trueconst: TrueConst) -> Result<ValueRef, ConstEvalFailure> {
let fn_like = const_eval::lookup_const_fn_by_id(ccx.tcx(), def_id); let fn_like = const_eval::lookup_const_fn_by_id(ccx.tcx(), def_id);
let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call"); let fn_like = fn_like.expect("lookup_const_fn_by_id failed in const_fn_call");
let body = match fn_like.body().expr {
Some(ref expr) => expr,
None => return Ok(C_nil(ccx))
};
let args = &fn_like.decl().inputs; let args = &fn_like.decl().inputs;
assert_eq!(args.len(), arg_vals.len()); assert_eq!(args.len(), arg_vals.len());
let arg_ids = args.iter().map(|arg| arg.pat.id); let arg_ids = args.iter().map(|arg| arg.pat.id);
let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect(); let fn_args = arg_ids.zip(arg_vals.iter().cloned()).collect();
let substs = node_id_substs(ccx, node, param_substs); let substs = monomorphize::apply_param_substs(ccx.tcx(),
match fn_like.body().expr { param_substs,
Some(ref expr) => { &substs.erase_regions());
const_expr(ccx, &expr, substs, Some(&fn_args), trueconst).map(|(res, _)| res) let substs = ccx.tcx().mk_substs(substs);
},
None => Ok(C_nil(ccx)), const_expr(ccx, body, substs, Some(&fn_args), trueconst).map(|(res, _)| res)
}
} }
pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
@ -219,14 +224,11 @@ pub fn get_const_expr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ref_expr: &hir::Expr, ref_expr: &hir::Expr,
param_substs: &'tcx Substs<'tcx>) param_substs: &'tcx Substs<'tcx>)
-> &'tcx hir::Expr { -> &'tcx hir::Expr {
let def_id = inline::maybe_instantiate_inline(ccx, def_id); let substs = ccx.tcx().node_id_item_substs(ref_expr.id).substs;
let substs = monomorphize::apply_param_substs(ccx.tcx(),
if def_id.krate != LOCAL_CRATE { param_substs,
ccx.sess().span_bug(ref_expr.span, &substs.erase_regions());
"cross crate constant could not be inlined"); match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(substs)) {
}
match const_eval::lookup_const_by_id(ccx.tcx(), def_id, Some(ref_expr.id), Some(param_substs)) {
Some((ref expr, _ty)) => expr, Some((ref expr, _ty)) => expr,
None => { None => {
ccx.sess().span_bug(ref_expr.span, "constant item not found") ccx.sess().span_bug(ref_expr.span, "constant item not found")
@ -351,9 +353,7 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
Some(AdjustReifyFnPointer) => { Some(AdjustReifyFnPointer) => {
match ety.sty { match ety.sty {
ty::TyFnDef(def_id, substs, _) => { ty::TyFnDef(def_id, substs, _) => {
let datum = Callee::def(cx, def_id, substs, ety).reify(cx); llconst = Callee::def(cx, def_id, substs).reify(cx).val;
llconst = datum.val;
ety_adjusted = datum.ty;
} }
_ => { _ => {
unreachable!("{} cannot be reified to a fn ptr", ety) unreachable!("{} cannot be reified to a fn ptr", ety)
@ -405,8 +405,8 @@ pub fn const_expr<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
// to use a different vtable. In that case, we want to // to use a different vtable. In that case, we want to
// load out the original data pointer so we can repackage // load out the original data pointer so we can repackage
// it. // it.
(const_get_elt(cx, llconst, &[abi::FAT_PTR_ADDR as u32]), (const_get_elt(llconst, &[abi::FAT_PTR_ADDR as u32]),
Some(const_get_elt(cx, llconst, &[abi::FAT_PTR_EXTRA as u32]))) Some(const_get_elt(llconst, &[abi::FAT_PTR_EXTRA as u32])))
} else { } else {
(llconst, None) (llconst, None)
}; };
@ -595,17 +595,15 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
/* Neither type is bottom, and we expect them to be unified /* Neither type is bottom, and we expect them to be unified
* already, so the following is safe. */ * already, so the following is safe. */
let (te1, ty) = try!(const_expr(cx, &e1, param_substs, fn_args, trueconst)); let (te1, ty) = try!(const_expr(cx, &e1, param_substs, fn_args, trueconst));
debug!("const_expr_unadjusted: te1={}, ty={:?}", debug!("const_expr_unadjusted: te1={:?}, ty={:?}",
cx.tn().val_to_string(te1), Value(te1), ty);
ty);
assert!(!ty.is_simd()); assert!(!ty.is_simd());
let is_float = ty.is_fp(); let is_float = ty.is_fp();
let signed = ty.is_signed(); let signed = ty.is_signed();
let (te2, ty2) = try!(const_expr(cx, &e2, param_substs, fn_args, trueconst)); let (te2, ty2) = try!(const_expr(cx, &e2, param_substs, fn_args, trueconst));
debug!("const_expr_unadjusted: te2={}, ty={:?}", debug!("const_expr_unadjusted: te2={:?}, ty={:?}",
cx.tn().val_to_string(te2), Value(te2), ty2);
ty2);
try!(check_binary_expr_validity(cx, e, ty, te1, te2, trueconst)); try!(check_binary_expr_validity(cx, e, ty, te1, te2, trueconst));
@ -689,8 +687,8 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let (arr, len) = match bt.sty { let (arr, len) = match bt.sty {
ty::TyArray(_, u) => (bv, C_uint(cx, u)), ty::TyArray(_, u) => (bv, C_uint(cx, u)),
ty::TySlice(..) | ty::TyStr => { ty::TySlice(..) | ty::TyStr => {
let e1 = const_get_elt(cx, bv, &[0]); let e1 = const_get_elt(bv, &[0]);
(load_const(cx, e1, bt), const_get_elt(cx, bv, &[1])) (load_const(cx, e1, bt), const_get_elt(bv, &[1]))
}, },
ty::TyRef(_, mt) => match mt.ty.sty { ty::TyRef(_, mt) => match mt.ty.sty {
ty::TyArray(_, u) => { ty::TyArray(_, u) => {
@ -725,7 +723,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
"const index-expr is out of bounds"); "const index-expr is out of bounds");
C_undef(val_ty(arr).element_type()) C_undef(val_ty(arr).element_type())
} else { } else {
const_get_elt(cx, arr, &[iv as c_uint]) const_get_elt(arr, &[iv as c_uint])
} }
}, },
hir::ExprCast(ref base, _) => { hir::ExprCast(ref base, _) => {
@ -741,10 +739,10 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
let t_cast_inner = let t_cast_inner =
t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty; t_cast.builtin_deref(true, ty::NoPreference).expect("cast to non-pointer").ty;
let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to(); let ptr_ty = type_of::in_memory_type_of(cx, t_cast_inner).ptr_to();
let addr = ptrcast(const_get_elt(cx, v, &[abi::FAT_PTR_ADDR as u32]), let addr = ptrcast(const_get_elt(v, &[abi::FAT_PTR_ADDR as u32]),
ptr_ty); ptr_ty);
if type_is_fat_ptr(cx.tcx(), t_cast) { if type_is_fat_ptr(cx.tcx(), t_cast) {
let info = const_get_elt(cx, v, &[abi::FAT_PTR_EXTRA as u32]); let info = const_get_elt(v, &[abi::FAT_PTR_EXTRA as u32]);
return Ok(C_struct(cx, &[addr, info], false)) return Ok(C_struct(cx, &[addr, info], false))
} else { } else {
return Ok(addr); return Ok(addr);
@ -756,7 +754,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
) { ) {
(CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => { (CastTy::Int(IntTy::CEnum), CastTy::Int(_)) => {
let repr = adt::represent_type(cx, t_expr); let repr = adt::represent_type(cx, t_expr);
let discr = adt::const_get_discrim(cx, &repr, v); let discr = adt::const_get_discrim(&repr, v);
let iv = C_integral(cx.int_type(), discr.0, false); let iv = C_integral(cx.int_type(), discr.0, false);
let s = adt::is_discr_signed(&repr) as Bool; let s = adt::is_discr_signed(&repr) as Bool;
llvm::LLVMConstIntCast(iv, llty.to_ref(), s) llvm::LLVMConstIntCast(iv, llty.to_ref(), s)
@ -809,7 +807,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
} }
let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def()); let opt_def = cx.tcx().def_map.borrow().get(&cur.id).map(|d| d.full_def());
if let Some(Def::Static(def_id, _)) = opt_def { if let Some(Def::Static(def_id, _)) = opt_def {
common::get_static_val(cx, def_id, ety) get_static(cx, def_id).val
} else { } else {
// If this isn't the address of a static, then keep going through // If this isn't the address of a static, then keep going through
// normal constant evaluation. // normal constant evaluation.
@ -947,8 +945,8 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
Def::Fn(did) | Def::Method(did) => { Def::Fn(did) | Def::Method(did) => {
try!(const_fn_call( try!(const_fn_call(
cx, cx,
ExprId(callee.id),
did, did,
cx.tcx().node_id_item_substs(callee.id).substs,
&arg_vals, &arg_vals,
param_substs, param_substs,
trueconst, trueconst,
@ -976,9 +974,9 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
hir::ExprMethodCall(_, _, ref args) => { hir::ExprMethodCall(_, _, ref args) => {
let arg_vals = try!(map_list(args)); let arg_vals = try!(map_list(args));
let method_call = ty::MethodCall::expr(e.id); let method_call = ty::MethodCall::expr(e.id);
let method_did = cx.tcx().tables.borrow().method_map[&method_call].def_id; let method = cx.tcx().tables.borrow().method_map[&method_call];
try!(const_fn_call(cx, MethodCallKey(method_call), try!(const_fn_call(cx, method.def_id, method.substs.clone(),
method_did, &arg_vals, param_substs, trueconst)) &arg_vals, param_substs, trueconst))
}, },
hir::ExprType(ref e, _) => try!(const_expr(cx, &e, param_substs, fn_args, trueconst)).0, hir::ExprType(ref e, _) => try!(const_expr(cx, &e, param_substs, fn_args, trueconst)).0,
hir::ExprBlock(ref block) => { hir::ExprBlock(ref block) => {
@ -1001,8 +999,7 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
body, body,
e.id, e.id,
def_id, def_id,
substs, substs);
&e.attrs);
} }
_ => _ =>
cx.sess().span_bug( cx.sess().span_bug(
@ -1016,6 +1013,125 @@ fn const_expr_unadjusted<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
}) })
} }
pub fn get_static<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, def_id: DefId)
-> Datum<'tcx, Lvalue> {
let ty = ccx.tcx().lookup_item_type(def_id).ty;
let instance = Instance::mono(ccx.tcx(), def_id);
if let Some(&g) = ccx.instances().borrow().get(&instance) {
return Datum::new(g, ty, Lvalue::new("static"));
}
let g = if let Some(id) = ccx.tcx().map.as_local_node_id(def_id) {
let llty = type_of::type_of(ccx, ty);
match ccx.tcx().map.get(id) {
hir_map::NodeItem(&hir::Item {
ref attrs, span, node: hir::ItemStatic(..), ..
}) => {
// If this static came from an external crate, then
// we need to get the symbol from metadata instead of
// using the current crate's name/version
// information in the hash of the symbol
let sym = exported_name(ccx, id, ty, attrs);
debug!("making {}", sym);
// Create the global before evaluating the initializer;
// this is necessary to allow recursive statics.
let g = declare::define_global(ccx, &sym, llty).unwrap_or_else(|| {
ccx.sess().span_fatal(span,
&format!("symbol `{}` is already defined", sym))
});
ccx.item_symbols().borrow_mut().insert(id, sym);
g
}
hir_map::NodeForeignItem(&hir::ForeignItem {
ref attrs, name, span, node: hir::ForeignItemStatic(..), ..
}) => {
let ident = imported_name(name, attrs);
let g = if let Some(name) =
attr::first_attr_value_str_by_name(&attrs, "linkage") {
// If this is a static with a linkage specified, then we need to handle
// it a little specially. The typesystem prevents things like &T and
// extern "C" fn() from being non-null, so we can't just declare a
// static and call it a day. Some linkages (like weak) will make it such
// that the static actually has a null value.
let linkage = match base::llvm_linkage_by_name(&name) {
Some(linkage) => linkage,
None => {
ccx.sess().span_fatal(span, "invalid linkage specified");
}
};
let llty2 = match ty.sty {
ty::TyRawPtr(ref mt) => type_of::type_of(ccx, mt.ty),
_ => {
ccx.sess().span_fatal(span, "must have type `*const T` or `*mut T`");
}
};
unsafe {
// Declare a symbol `foo` with the desired linkage.
let g1 = declare::declare_global(ccx, &ident, llty2);
llvm::SetLinkage(g1, linkage);
// Declare an internal global `extern_with_linkage_foo` which
// is initialized with the address of `foo`. If `foo` is
// discarded during linking (for example, if `foo` has weak
// linkage and there are no definitions), then
// `extern_with_linkage_foo` will instead be initialized to
// zero.
let mut real_name = "_rust_extern_with_linkage_".to_string();
real_name.push_str(&ident);
let g2 = declare::define_global(ccx, &real_name, llty).unwrap_or_else(||{
ccx.sess().span_fatal(span,
&format!("symbol `{}` is already defined", ident))
});
llvm::SetLinkage(g2, llvm::InternalLinkage);
llvm::LLVMSetInitializer(g2, g1);
g2
}
} else {
// Generate an external declaration.
declare::declare_global(ccx, &ident, llty)
};
for attr in attrs {
if attr.check_name("thread_local") {
llvm::set_thread_local(g, true);
}
}
g
}
item => unreachable!("get_static: expected static, found {:?}", item)
}
} else {
// FIXME(nagisa): perhaps the map of externs could be offloaded to llvm somehow?
// FIXME(nagisa): investigate whether it can be changed into define_global
let name = ccx.sess().cstore.item_symbol(def_id);
let g = declare::declare_global(ccx, &name, type_of::type_of(ccx, ty));
// Thread-local statics in some other crate need to *always* be linked
// against in a thread-local fashion, so we need to be sure to apply the
// thread-local attribute locally if it was present remotely. If we
// don't do this then linker errors can be generated where the linker
// complains that one object files has a thread local version of the
// symbol and another one doesn't.
for attr in ccx.tcx().get_attrs(def_id).iter() {
if attr.check_name("thread_local") {
llvm::set_thread_local(g, true);
}
}
if ccx.use_dll_storage_attrs() {
llvm::SetDLLStorageClass(g, llvm::DLLImportStorageClass);
}
g
};
ccx.instances().borrow_mut().insert(instance, g);
Datum::new(g, ty, Lvalue::new("static"))
}
pub fn trans_static(ccx: &CrateContext, pub fn trans_static(ccx: &CrateContext,
m: hir::Mutability, m: hir::Mutability,
expr: &hir::Expr, expr: &hir::Expr,
@ -1029,7 +1145,8 @@ pub fn trans_static(ccx: &CrateContext,
unsafe { unsafe {
let _icx = push_ctxt("trans_static"); let _icx = push_ctxt("trans_static");
let g = base::get_item_val(ccx, id); let def_id = ccx.tcx().map.local_def_id(id);
let datum = get_static(ccx, def_id);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (v, _) = try!(const_expr( let (v, _) = try!(const_expr(
@ -1042,40 +1159,39 @@ pub fn trans_static(ccx: &CrateContext,
// boolean SSA values are i1, but they have to be stored in i8 slots, // boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected // otherwise some LLVM optimization passes don't work as expected
let mut val_llty = llvm::LLVMTypeOf(v); let mut val_llty = val_ty(v);
let v = if val_llty == Type::i1(ccx).to_ref() { let v = if val_llty == Type::i1(ccx) {
val_llty = Type::i8(ccx).to_ref(); val_llty = Type::i8(ccx);
llvm::LLVMConstZExt(v, val_llty) llvm::LLVMConstZExt(v, val_llty.to_ref())
} else { } else {
v v
}; };
let ty = ccx.tcx().node_id_to_type(id); let llty = type_of::type_of(ccx, datum.ty);
let llty = type_of::type_of(ccx, ty); let g = if val_llty == llty {
let g = if val_llty == llty.to_ref() { datum.val
g
} else { } else {
// If we created the global with the wrong type, // If we created the global with the wrong type,
// correct the type. // correct the type.
let empty_string = CString::new("").unwrap(); let empty_string = CString::new("").unwrap();
let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(g)); let name_str_ref = CStr::from_ptr(llvm::LLVMGetValueName(datum.val));
let name_string = CString::new(name_str_ref.to_bytes()).unwrap(); let name_string = CString::new(name_str_ref.to_bytes()).unwrap();
llvm::LLVMSetValueName(g, empty_string.as_ptr()); llvm::LLVMSetValueName(datum.val, empty_string.as_ptr());
let new_g = llvm::LLVMGetOrInsertGlobal( let new_g = llvm::LLVMGetOrInsertGlobal(
ccx.llmod(), name_string.as_ptr(), val_llty); ccx.llmod(), name_string.as_ptr(), val_llty.to_ref());
// To avoid breaking any invariants, we leave around the old // To avoid breaking any invariants, we leave around the old
// global for the moment; we'll replace all references to it // global for the moment; we'll replace all references to it
// with the new global later. (See base::trans_crate.) // with the new global later. (See base::trans_crate.)
ccx.statics_to_rauw().borrow_mut().push((g, new_g)); ccx.statics_to_rauw().borrow_mut().push((datum.val, new_g));
new_g new_g
}; };
llvm::LLVMSetAlignment(g, type_of::align_of(ccx, ty)); llvm::LLVMSetAlignment(g, type_of::align_of(ccx, datum.ty));
llvm::LLVMSetInitializer(g, v); llvm::LLVMSetInitializer(g, v);
// As an optimization, all shared statics which do not have interior // As an optimization, all shared statics which do not have interior
// mutability are placed into read-only memory. // mutability are placed into read-only memory.
if m != hir::MutMutable { if m != hir::MutMutable {
let tcontents = ty.type_contents(ccx.tcx()); let tcontents = datum.ty.type_contents(ccx.tcx());
if !tcontents.interior_unsafe() { if !tcontents.interior_unsafe() {
llvm::LLVMSetGlobalConstant(g, llvm::True); llvm::LLVMSetGlobalConstant(g, llvm::True);
} }

View file

@ -16,14 +16,16 @@ use middle::def::ExportMap;
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::traits; use middle::traits;
use rustc::mir::mir_map::MirMap; use rustc::mir::mir_map::MirMap;
use rustc::mir::repr as mir;
use trans::adt; use trans::adt;
use trans::base; use trans::base;
use trans::builder::Builder; use trans::builder::Builder;
use trans::common::{ExternMap,BuilderRef_res}; use trans::common::BuilderRef_res;
use trans::debuginfo; use trans::debuginfo;
use trans::declare; use trans::declare;
use trans::glue::DropGlueKind; use trans::glue::DropGlueKind;
use trans::monomorphize::MonoId; use trans::mir::CachedMir;
use trans::monomorphize::Instance;
use trans::collector::{TransItem, TransItemState}; use trans::collector::{TransItem, TransItemState};
use trans::type_::{Type, TypeNames}; use trans::type_::{Type, TypeNames};
use middle::subst::Substs; use middle::subst::Substs;
@ -75,6 +77,7 @@ pub struct SharedCrateContext<'a, 'tcx: 'a> {
check_overflow: bool, check_overflow: bool,
check_drop_flag_for_sanity: bool, check_drop_flag_for_sanity: bool,
mir_map: &'a MirMap<'tcx>, mir_map: &'a MirMap<'tcx>,
mir_cache: RefCell<DefIdMap<Rc<mir::Mir<'tcx>>>>,
available_drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, String>>, available_drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, String>>,
use_dll_storage_attrs: bool, use_dll_storage_attrs: bool,
@ -90,8 +93,6 @@ pub struct LocalCrateContext<'tcx> {
llmod: ModuleRef, llmod: ModuleRef,
llcx: ContextRef, llcx: ContextRef,
tn: TypeNames, tn: TypeNames,
externs: RefCell<ExternMap>,
item_vals: RefCell<NodeMap<ValueRef>>,
needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>, needs_unwind_cleanup_cache: RefCell<FnvHashMap<Ty<'tcx>, bool>>,
fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>, fn_pointer_shims: RefCell<FnvHashMap<Ty<'tcx>, ValueRef>>,
drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>, drop_glues: RefCell<FnvHashMap<DropGlueKind<'tcx>, ValueRef>>,
@ -100,8 +101,8 @@ pub struct LocalCrateContext<'tcx> {
/// Backwards version of the `external` map (inlined items to where they /// Backwards version of the `external` map (inlined items to where they
/// came from) /// came from)
external_srcs: RefCell<NodeMap<DefId>>, external_srcs: RefCell<NodeMap<DefId>>,
/// Cache instances of monomorphized functions /// Cache instances of monomorphic and polymorphic items
monomorphized: RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>, instances: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
monomorphizing: RefCell<DefIdMap<usize>>, monomorphizing: RefCell<DefIdMap<usize>>,
available_monomorphizations: RefCell<FnvHashSet<String>>, available_monomorphizations: RefCell<FnvHashSet<String>>,
/// Cache generated vtables /// Cache generated vtables
@ -148,13 +149,13 @@ pub struct LocalCrateContext<'tcx> {
builder: BuilderRef_res, builder: BuilderRef_res,
/// Holds the LLVM values for closure IDs. /// Holds the LLVM values for closure IDs.
closure_vals: RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>>, closure_vals: RefCell<FnvHashMap<Instance<'tcx>, ValueRef>>,
dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>, dbg_cx: Option<debuginfo::CrateDebugContext<'tcx>>,
eh_personality: RefCell<Option<ValueRef>>, eh_personality: Cell<Option<ValueRef>>,
eh_unwind_resume: RefCell<Option<ValueRef>>, eh_unwind_resume: Cell<Option<ValueRef>>,
rust_try_fn: RefCell<Option<ValueRef>>, rust_try_fn: Cell<Option<ValueRef>>,
intrinsics: RefCell<FnvHashMap<&'static str, ValueRef>>, intrinsics: RefCell<FnvHashMap<&'static str, ValueRef>>,
@ -340,6 +341,7 @@ impl<'b, 'tcx> SharedCrateContext<'b, 'tcx> {
symbol_hasher: RefCell::new(symbol_hasher), symbol_hasher: RefCell::new(symbol_hasher),
tcx: tcx, tcx: tcx,
mir_map: mir_map, mir_map: mir_map,
mir_cache: RefCell::new(DefIdMap()),
stats: Stats { stats: Stats {
n_glues_created: Cell::new(0), n_glues_created: Cell::new(0),
n_null_glues: Cell::new(0), n_null_glues: Cell::new(0),
@ -464,14 +466,12 @@ impl<'tcx> LocalCrateContext<'tcx> {
llmod: llmod, llmod: llmod,
llcx: llcx, llcx: llcx,
tn: TypeNames::new(), tn: TypeNames::new(),
externs: RefCell::new(FnvHashMap()),
item_vals: RefCell::new(NodeMap()),
needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()), needs_unwind_cleanup_cache: RefCell::new(FnvHashMap()),
fn_pointer_shims: RefCell::new(FnvHashMap()), fn_pointer_shims: RefCell::new(FnvHashMap()),
drop_glues: RefCell::new(FnvHashMap()), drop_glues: RefCell::new(FnvHashMap()),
external: RefCell::new(DefIdMap()), external: RefCell::new(DefIdMap()),
external_srcs: RefCell::new(NodeMap()), external_srcs: RefCell::new(NodeMap()),
monomorphized: RefCell::new(FnvHashMap()), instances: RefCell::new(FnvHashMap()),
monomorphizing: RefCell::new(DefIdMap()), monomorphizing: RefCell::new(DefIdMap()),
available_monomorphizations: RefCell::new(FnvHashSet()), available_monomorphizations: RefCell::new(FnvHashSet()),
vtables: RefCell::new(FnvHashMap()), vtables: RefCell::new(FnvHashMap()),
@ -492,9 +492,9 @@ impl<'tcx> LocalCrateContext<'tcx> {
builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)), builder: BuilderRef_res(llvm::LLVMCreateBuilderInContext(llcx)),
closure_vals: RefCell::new(FnvHashMap()), closure_vals: RefCell::new(FnvHashMap()),
dbg_cx: dbg_cx, dbg_cx: dbg_cx,
eh_personality: RefCell::new(None), eh_personality: Cell::new(None),
eh_unwind_resume: RefCell::new(None), eh_unwind_resume: Cell::new(None),
rust_try_fn: RefCell::new(None), rust_try_fn: Cell::new(None),
intrinsics: RefCell::new(FnvHashMap()), intrinsics: RefCell::new(FnvHashMap()),
n_llvm_insns: Cell::new(0), n_llvm_insns: Cell::new(0),
type_of_depth: Cell::new(0), type_of_depth: Cell::new(0),
@ -616,14 +616,6 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local.tn &self.local.tn
} }
pub fn externs<'a>(&'a self) -> &'a RefCell<ExternMap> {
&self.local.externs
}
pub fn item_vals<'a>(&'a self) -> &'a RefCell<NodeMap<ValueRef>> {
&self.local.item_vals
}
pub fn export_map<'a>(&'a self) -> &'a ExportMap { pub fn export_map<'a>(&'a self) -> &'a ExportMap {
&self.shared.export_map &self.shared.export_map
} }
@ -660,8 +652,8 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local.external_srcs &self.local.external_srcs
} }
pub fn monomorphized<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>> { pub fn instances<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
&self.local.monomorphized &self.local.instances
} }
pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> { pub fn monomorphizing<'a>(&'a self) -> &'a RefCell<DefIdMap<usize>> {
@ -746,7 +738,7 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
self.local.opaque_vec_type self.local.opaque_vec_type
} }
pub fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<MonoId<'tcx>, ValueRef>> { pub fn closure_vals<'a>(&'a self) -> &'a RefCell<FnvHashMap<Instance<'tcx>, ValueRef>> {
&self.local.closure_vals &self.local.closure_vals
} }
@ -754,15 +746,15 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
&self.local.dbg_cx &self.local.dbg_cx
} }
pub fn eh_personality<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> { pub fn eh_personality<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
&self.local.eh_personality &self.local.eh_personality
} }
pub fn eh_unwind_resume<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> { pub fn eh_unwind_resume<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
&self.local.eh_unwind_resume &self.local.eh_unwind_resume
} }
pub fn rust_try_fn<'a>(&'a self) -> &'a RefCell<Option<ValueRef>> { pub fn rust_try_fn<'a>(&'a self) -> &'a Cell<Option<ValueRef>> {
&self.local.rust_try_fn &self.local.rust_try_fn
} }
@ -829,8 +821,22 @@ impl<'b, 'tcx> CrateContext<'b, 'tcx> {
self.shared.use_dll_storage_attrs() self.shared.use_dll_storage_attrs()
} }
pub fn mir_map(&self) -> &'b MirMap<'tcx> { pub fn get_mir(&self, def_id: DefId) -> Option<CachedMir<'b, 'tcx>> {
self.shared.mir_map if def_id.is_local() {
let node_id = self.tcx().map.as_local_node_id(def_id).unwrap();
self.shared.mir_map.map.get(&node_id).map(CachedMir::Ref)
} else {
if let Some(mir) = self.shared.mir_cache.borrow().get(&def_id).cloned() {
return Some(CachedMir::Owned(mir));
}
let mir = self.sess().cstore.maybe_get_item_mir(self.tcx(), def_id);
let cached = mir.map(Rc::new);
if let Some(ref mir) = cached {
self.shared.mir_cache.borrow_mut().insert(def_id, mir.clone());
}
cached.map(CachedMir::Owned)
}
} }
pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> { pub fn translation_items(&self) -> &RefCell<FnvHashMap<TransItem<'tcx>, TransItemState>> {
@ -865,8 +871,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
macro_rules! ifn { macro_rules! ifn {
($name:expr, fn() -> $ret:expr) => ( ($name:expr, fn() -> $ret:expr) => (
if key == $name { if key == $name {
let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret), let f = declare::declare_cfn(ccx, $name, Type::func(&[], &$ret));
ccx.tcx().mk_nil());
llvm::SetUnnamedAddr(f, false); llvm::SetUnnamedAddr(f, false);
ccx.intrinsics().borrow_mut().insert($name, f.clone()); ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f); return Some(f);
@ -874,9 +879,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
); );
($name:expr, fn(...) -> $ret:expr) => ( ($name:expr, fn(...) -> $ret:expr) => (
if key == $name { if key == $name {
let f = declare::declare_cfn(ccx, $name, let f = declare::declare_cfn(ccx, $name, Type::variadic_func(&[], &$ret));
Type::variadic_func(&[], &$ret),
ccx.tcx().mk_nil());
llvm::SetUnnamedAddr(f, false); llvm::SetUnnamedAddr(f, false);
ccx.intrinsics().borrow_mut().insert($name, f.clone()); ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f); return Some(f);
@ -884,8 +887,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
); );
($name:expr, fn($($arg:expr),*) -> $ret:expr) => ( ($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
if key == $name { if key == $name {
let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret), let f = declare::declare_cfn(ccx, $name, Type::func(&[$($arg),*], &$ret));
ccx.tcx().mk_nil());
llvm::SetUnnamedAddr(f, false); llvm::SetUnnamedAddr(f, false);
ccx.intrinsics().borrow_mut().insert($name, f.clone()); ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f); return Some(f);
@ -1032,8 +1034,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
ifn!($name, fn($($arg),*) -> void); ifn!($name, fn($($arg),*) -> void);
} else if key == $name { } else if key == $name {
let f = declare::declare_cfn(ccx, stringify!($cname), let f = declare::declare_cfn(ccx, stringify!($cname),
Type::func(&[$($arg),*], &void), Type::func(&[$($arg),*], &void));
ccx.tcx().mk_nil());
llvm::SetLinkage(f, llvm::InternalLinkage); llvm::SetLinkage(f, llvm::InternalLinkage);
let bld = ccx.builder(); let bld = ccx.builder();
@ -1055,8 +1056,7 @@ fn declare_intrinsic(ccx: &CrateContext, key: &str) -> Option<ValueRef> {
ifn!($name, fn($($arg),*) -> $ret); ifn!($name, fn($($arg),*) -> $ret);
} else if key == $name { } else if key == $name {
let f = declare::declare_cfn(ccx, stringify!($cname), let f = declare::declare_cfn(ccx, stringify!($cname),
Type::func(&[$($arg),*], &$ret), Type::func(&[$($arg),*], &$ret));
ccx.tcx().mk_nil());
ccx.intrinsics().borrow_mut().insert($name, f.clone()); ccx.intrinsics().borrow_mut().insert($name, f.clone());
return Some(f); return Some(f);
} }

View file

@ -11,10 +11,11 @@
use llvm::ValueRef; use llvm::ValueRef;
use middle::def::Def; use middle::def::Def;
use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem}; use middle::lang_items::{PanicFnLangItem, PanicBoundsCheckFnLangItem};
use middle::subst::Substs;
use trans::base::*; use trans::base::*;
use trans::basic_block::BasicBlock; use trans::basic_block::BasicBlock;
use trans::build::*; use trans::build::*;
use trans::callee; use trans::callee::{Callee, ArgVals};
use trans::cleanup::CleanupMethods; use trans::cleanup::CleanupMethods;
use trans::cleanup; use trans::cleanup;
use trans::common::*; use trans::common::*;
@ -24,7 +25,6 @@ use trans::debuginfo::{DebugLoc, ToDebugLoc};
use trans::expr; use trans::expr;
use trans::machine; use trans::machine;
use trans; use trans;
use middle::ty;
use rustc_front::hir; use rustc_front::hir;
use rustc_front::util as ast_util; use rustc_front::util as ast_util;
@ -152,9 +152,8 @@ pub fn trans_if<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
els: Option<&hir::Expr>, els: Option<&hir::Expr>,
dest: expr::Dest) dest: expr::Dest)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={})", debug!("trans_if(bcx={}, if_id={}, cond={:?}, thn={}, dest={:?})",
bcx.to_str(), if_id, cond, thn.id, bcx.to_str(), if_id, cond, thn.id, dest);
dest.to_string(bcx.ccx()));
let _icx = push_ctxt("trans_if"); let _icx = push_ctxt("trans_if");
if bcx.unreachable.get() { if bcx.unreachable.get() {
@ -363,14 +362,12 @@ pub fn trans_ret<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx; let fcx = bcx.fcx;
let mut bcx = bcx; let mut bcx = bcx;
let dest = match (fcx.llretslotptr.get(), retval_expr) {
(Some(_), Some(retval_expr)) => {
let ret_ty = expr_ty_adjusted(bcx, &retval_expr);
expr::SaveIn(fcx.get_ret_slot(bcx, ty::FnConverging(ret_ty), "ret_slot"))
}
_ => expr::Ignore,
};
if let Some(x) = retval_expr { if let Some(x) = retval_expr {
let dest = if fcx.llretslotptr.get().is_some() {
expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot"))
} else {
expr::Ignore
};
bcx = expr::trans_into(bcx, &x, dest); bcx = expr::trans_into(bcx, &x, dest);
match dest { match dest {
expr::SaveIn(slot) if fcx.needs_ret_allocas => { expr::SaveIn(slot) if fcx.needs_ret_allocas => {
@ -406,13 +403,8 @@ pub fn trans_fail<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc"); let expr_file_line = consts::addr_of(ccx, expr_file_line_const, align, "panic_loc");
let args = vec!(expr_file_line); let args = vec!(expr_file_line);
let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem); let did = langcall(bcx, Some(call_info.span), "", PanicFnLangItem);
let bcx = callee::trans_lang_call(bcx, Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty()))
did, .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
&args[..],
Some(expr::Ignore),
call_info.debug_loc()).bcx;
Unreachable(bcx);
return bcx;
} }
pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
@ -439,11 +431,6 @@ pub fn trans_fail_bounds_check<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc"); let file_line = consts::addr_of(ccx, file_line_const, align, "panic_bounds_check_loc");
let args = vec!(file_line, index, len); let args = vec!(file_line, index, len);
let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem); let did = langcall(bcx, Some(call_info.span), "", PanicBoundsCheckFnLangItem);
let bcx = callee::trans_lang_call(bcx, Callee::def(ccx, did, ccx.tcx().mk_substs(Substs::empty()))
did, .call(bcx, call_info.debug_loc(), ArgVals(&args), None).bcx
&args[..],
Some(expr::Ignore),
call_info.debug_loc()).bcx;
Unreachable(bcx);
return bcx;
} }

View file

@ -101,6 +101,7 @@ use trans::cleanup;
use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods}; use trans::cleanup::{CleanupMethods, DropHintDatum, DropHintMethods};
use trans::expr; use trans::expr;
use trans::tvec; use trans::tvec;
use trans::value::Value;
use middle::ty::Ty; use middle::ty::Ty;
use std::fmt; use std::fmt;
@ -111,7 +112,7 @@ use syntax::codemap::DUMMY_SP;
/// describes where the value is stored, what Rust type the value has, /// describes where the value is stored, what Rust type the value has,
/// whether it is addressed by reference, and so forth. Please refer /// whether it is addressed by reference, and so forth. Please refer
/// the section on datums in `README.md` for more details. /// the section on datums in `README.md` for more details.
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy)]
pub struct Datum<'tcx, K> { pub struct Datum<'tcx, K> {
/// The llvm value. This is either a pointer to the Rust value or /// The llvm value. This is either a pointer to the Rust value or
/// the value itself, depending on `kind` below. /// the value itself, depending on `kind` below.
@ -124,6 +125,13 @@ pub struct Datum<'tcx, K> {
pub kind: K, pub kind: K,
} }
impl<'tcx, K: fmt::Debug> fmt::Debug for Datum<'tcx, K> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Datum({:?}, {:?}, {:?})",
Value(self.val), self.ty, self.kind)
}
}
pub struct DatumBlock<'blk, 'tcx: 'blk, K> { pub struct DatumBlock<'blk, 'tcx: 'blk, K> {
pub bcx: Block<'blk, 'tcx>, pub bcx: Block<'blk, 'tcx>,
pub datum: Datum<'tcx, K>, pub datum: Datum<'tcx, K>,
@ -298,24 +306,23 @@ pub fn immediate_rvalue_bcx<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
/// caller can prove that either (1.) the code injected by `populate` /// caller can prove that either (1.) the code injected by `populate`
/// onto `bcx` always dominates the end of `scope`, or (2.) the data /// onto `bcx` always dominates the end of `scope`, or (2.) the data
/// being allocated has no associated destructor. /// being allocated has no associated destructor.
pub fn lvalue_scratch_datum<'blk, 'tcx, A, F>(bcx: Block<'blk, 'tcx>, pub fn lvalue_scratch_datum<'blk, 'tcx, F>(bcx: Block<'blk, 'tcx>,
ty: Ty<'tcx>, ty: Ty<'tcx>,
name: &str, name: &str,
zero: InitAlloca, zero: InitAlloca,
scope: cleanup::ScopeId, scope: cleanup::ScopeId,
arg: A, populate: F)
populate: F) -> DatumBlock<'blk, 'tcx, Lvalue> where
-> DatumBlock<'blk, 'tcx, Lvalue> where F: FnOnce(Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
F: FnOnce(A, Block<'blk, 'tcx>, ValueRef) -> Block<'blk, 'tcx>,
{ {
// Very subtle: potentially initialize the scratch memory at point where it is alloca'ed. // Very subtle: potentially initialize the scratch memory at point where it is alloca'ed.
// (See discussion at Issue 30530.) // (See discussion at Issue 30530.)
let scratch = alloc_ty_init(bcx, ty, zero, name); let scratch = alloc_ty_init(bcx, ty, zero, name);
debug!("lvalue_scratch_datum scope={:?} scratch={} ty={:?}", debug!("lvalue_scratch_datum scope={:?} scratch={:?} ty={:?}",
scope, bcx.ccx().tn().val_to_string(scratch), ty); scope, Value(scratch), ty);
// Subtle. Populate the scratch memory *before* scheduling cleanup. // Subtle. Populate the scratch memory *before* scheduling cleanup.
let bcx = populate(arg, bcx, scratch); let bcx = populate(bcx, scratch);
bcx.fcx.schedule_drop_mem(scope, scratch, ty, None); bcx.fcx.schedule_drop_mem(scope, scratch, ty, None);
DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum"))) DatumBlock::new(bcx, Datum::new(scratch, ty, Lvalue::new("datum::lvalue_scratch_datum")))
@ -351,8 +358,8 @@ fn add_rvalue_clean<'a, 'tcx>(mode: RvalueMode,
scope: cleanup::ScopeId, scope: cleanup::ScopeId,
val: ValueRef, val: ValueRef,
ty: Ty<'tcx>) { ty: Ty<'tcx>) {
debug!("add_rvalue_clean scope={:?} val={} ty={:?}", debug!("add_rvalue_clean scope={:?} val={:?} ty={:?}",
scope, fcx.ccx.tn().val_to_string(val), ty); scope, Value(val), ty);
match mode { match mode {
ByValue => { fcx.schedule_drop_immediate(scope, val, ty); } ByValue => { fcx.schedule_drop_immediate(scope, val, ty); }
ByRef => { ByRef => {
@ -509,14 +516,14 @@ impl<'tcx> Datum<'tcx, Rvalue> {
ByValue => { ByValue => {
lvalue_scratch_datum( lvalue_scratch_datum(
bcx, self.ty, name, InitAlloca::Dropped, scope, self, bcx, self.ty, name, InitAlloca::Dropped, scope,
|this, bcx, llval| { |bcx, llval| {
debug!("populate call for Datum::to_lvalue_datum_in_scope \ debug!("populate call for Datum::to_lvalue_datum_in_scope \
self.ty={:?}", this.ty); self.ty={:?}", self.ty);
// do not call_lifetime_start here; the // do not call_lifetime_start here; the
// `InitAlloc::Dropped` will start scratch // `InitAlloc::Dropped` will start scratch
// value's lifetime at open of function body. // value's lifetime at open of function body.
let bcx = this.store_to(bcx, llval); let bcx = self.store_to(bcx, llval);
bcx.fcx.schedule_lifetime_end(scope, llval); bcx.fcx.schedule_lifetime_end(scope, llval);
bcx bcx
}) })
@ -617,7 +624,7 @@ impl<'tcx> Datum<'tcx, Expr> {
name: &str, name: &str,
expr_id: ast::NodeId) expr_id: ast::NodeId)
-> DatumBlock<'blk, 'tcx, Lvalue> { -> DatumBlock<'blk, 'tcx, Lvalue> {
debug!("to_lvalue_datum self: {}", self.to_string(bcx.ccx())); debug!("to_lvalue_datum self: {:?}", self);
self.match_kind( self.match_kind(
|l| DatumBlock::new(bcx, l), |l| DatumBlock::new(bcx, l),
@ -767,14 +774,6 @@ impl<'tcx, K: KindOps + fmt::Debug> Datum<'tcx, K> {
self.shallow_copy_raw(bcx, dst) self.shallow_copy_raw(bcx, dst)
} }
#[allow(dead_code)] // useful for debugging
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("Datum({}, {:?}, {:?})",
ccx.tn().val_to_string(self.val),
self.ty,
self.kind)
}
/// See the `appropriate_rvalue_mode()` function /// See the `appropriate_rvalue_mode()` function
pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>) pub fn appropriate_rvalue_mode<'a>(&self, ccx: &CrateContext<'a, 'tcx>)
-> RvalueMode { -> RvalueMode {

View file

@ -468,16 +468,13 @@ fn walk_expr(cx: &CrateContext,
} }
} }
hir::ExprInlineAsm(hir::InlineAsm { ref inputs, hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
ref outputs, for output in outputs {
.. }) => { walk_expr(cx, output, scope_stack, scope_map);
// inputs, outputs: Vec<(String, P<Expr>)>
for &(_, ref exp) in inputs {
walk_expr(cx, &exp, scope_stack, scope_map);
} }
for out in outputs { for input in inputs {
walk_expr(cx, &out.expr, scope_stack, scope_map); walk_expr(cx, input, scope_stack, scope_map);
} }
} }
} }

View file

@ -32,9 +32,10 @@ use middle::subst::{self, Substs};
use rustc_front; use rustc_front;
use rustc_front::hir; use rustc_front::hir;
use trans::abi::Abi;
use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block}; use trans::common::{NodeIdAndSpan, CrateContext, FunctionContext, Block};
use trans; use trans;
use trans::{monomorphize, type_of}; use trans::monomorphize;
use middle::infer; use middle::infer;
use middle::ty::{self, Ty}; use middle::ty::{self, Ty};
use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo}; use session::config::{self, FullDebugInfo, LimitedDebugInfo, NoDebugInfo};
@ -49,7 +50,6 @@ use std::rc::Rc;
use syntax::codemap::{Span, Pos}; use syntax::codemap::{Span, Pos};
use syntax::{ast, codemap}; use syntax::{ast, codemap};
use syntax::abi::Abi;
use syntax::attr::IntType; use syntax::attr::IntType;
use syntax::parse::token::{self, special_idents}; use syntax::parse::token::{self, special_idents};
@ -456,10 +456,10 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
ty::FnDiverging => diverging_type_metadata(cx) ty::FnDiverging => diverging_type_metadata(cx)
}); });
let inputs = &if abi == Abi::RustCall { let inputs = if abi == Abi::RustCall {
type_of::untuple_arguments(cx, &sig.inputs) &sig.inputs[..sig.inputs.len()-1]
} else { } else {
sig.inputs &sig.inputs[..]
}; };
// Arguments types // Arguments types
@ -467,6 +467,14 @@ pub fn create_function_debug_context<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP)); signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
} }
if abi == Abi::RustCall && !sig.inputs.is_empty() {
if let ty::TyTuple(ref args) = sig.inputs[sig.inputs.len() - 1].sty {
for &argument_type in args {
signature.push(type_metadata(cx, argument_type, codemap::DUMMY_SP));
}
}
}
return create_DIArray(DIB(cx), &signature[..]); return create_DIArray(DIB(cx), &signature[..]);
} }

View file

@ -107,7 +107,7 @@ pub fn push_debuginfo_type_name<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
output.push_str("unsafe "); output.push_str("unsafe ");
} }
if abi != ::syntax::abi::Abi::Rust { if abi != ::trans::abi::Abi::Rust {
output.push_str("extern \""); output.push_str("extern \"");
output.push_str(abi.name()); output.push_str(abi.name());
output.push_str("\" "); output.push_str("\" ");

View file

@ -22,13 +22,10 @@
use llvm::{self, ValueRef}; use llvm::{self, ValueRef};
use middle::ty; use middle::ty;
use middle::infer; use middle::infer;
use middle::traits::ProjectionMode; use trans::abi::{Abi, FnType};
use syntax::abi::Abi;
use trans::attributes; use trans::attributes;
use trans::base;
use trans::context::CrateContext; use trans::context::CrateContext;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of;
use std::ffi::CString; use std::ffi::CString;
use libc::c_uint; use libc::c_uint;
@ -51,13 +48,10 @@ pub fn declare_global(ccx: &CrateContext, name: &str, ty: Type) -> llvm::ValueRe
/// Declare a function. /// Declare a function.
/// ///
/// For rust functions use `declare_rust_fn` instead.
///
/// If theres a value with the same name already declared, the function will /// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead. /// update the declaration and return existing ValueRef instead.
pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, fn declare_raw_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv, ty: Type) -> ValueRef {
ty: Type, output: ty::FnOutput) -> ValueRef { debug!("declare_raw_fn(name={:?}, ty={:?})", name, ty);
debug!("declare_fn(name={:?})", name);
let namebuf = CString::new(name).unwrap_or_else(|_|{ let namebuf = CString::new(name).unwrap_or_else(|_|{
ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) ccx.sess().bug(&format!("name {:?} contains an interior null byte", name))
}); });
@ -70,10 +64,6 @@ pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
// be merged. // be merged.
llvm::SetUnnamedAddr(llfn, true); llvm::SetUnnamedAddr(llfn, true);
if output == ty::FnDiverging {
llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn);
}
if ccx.tcx().sess.opts.cg.no_redzone if ccx.tcx().sess.opts.cg.no_redzone
.unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) { .unwrap_or(ccx.tcx().sess.target.target.options.disable_redzone) {
llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone) llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoRedZone)
@ -86,13 +76,12 @@ pub fn declare_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
/// Declare a C ABI function. /// Declare a C ABI function.
/// ///
/// Only use this for foreign function ABIs and glue. For Rust functions use /// Only use this for foreign function ABIs and glue. For Rust functions use
/// `declare_rust_fn` instead. /// `declare_fn` instead.
/// ///
/// If theres a value with the same name already declared, the function will /// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead. /// update the declaration and return existing ValueRef instead.
pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type, pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type) -> ValueRef {
output: ty::Ty) -> ValueRef { declare_raw_fn(ccx, name, llvm::CCallConv, fn_type)
declare_fn(ccx, name, llvm::CCallConv, fn_type, ty::FnConverging(output))
} }
@ -100,53 +89,27 @@ pub fn declare_cfn(ccx: &CrateContext, name: &str, fn_type: Type,
/// ///
/// If theres a value with the same name already declared, the function will /// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead. /// update the declaration and return existing ValueRef instead.
pub fn declare_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str, pub fn declare_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
fn_type: ty::Ty<'tcx>) -> ValueRef { fn_type: ty::Ty<'tcx>) -> ValueRef {
debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, debug!("declare_rust_fn(name={:?}, fn_type={:?})", name, fn_type);
fn_type); let abi = fn_type.fn_abi();
let sig = ccx.tcx().erase_late_bound_regions(fn_type.fn_sig());
let function_type; // placeholder so that the memory ownership works out ok
let (sig, abi, env) = match fn_type.sty {
ty::TyFnDef(_, _, f) |
ty::TyFnPtr(f) => {
(&f.sig, f.abi, None)
}
ty::TyClosure(closure_did, ref substs) => {
let infcx = infer::normalizing_infer_ctxt(ccx.tcx(),
&ccx.tcx().tables,
ProjectionMode::Any);
function_type = infcx.closure_type(closure_did, substs);
let self_type = base::self_type_for_closure(ccx, closure_did, fn_type);
let llenvironment_type = type_of::type_of_explicit_arg(ccx, self_type);
debug!("declare_rust_fn function_type={:?} self_type={:?}",
function_type, self_type);
(&function_type.sig, Abi::RustCall, Some(llenvironment_type))
}
_ => ccx.sess().bug("expected closure or fn")
};
let sig = ccx.tcx().erase_late_bound_regions(sig);
let sig = infer::normalize_associated_type(ccx.tcx(), &sig); let sig = infer::normalize_associated_type(ccx.tcx(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig); debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let llfty = type_of::type_of_rust_fn(ccx, env, &sig, abi);
debug!("declare_rust_fn llfty={}", ccx.tn().type_to_string(llfty));
// it is ok to directly access sig.0.output because we erased all let fty = FnType::new(ccx, abi, &sig, &[]);
// late-bound-regions above let llfn = declare_raw_fn(ccx, name, fty.cconv, fty.llvm_type(ccx));
let llfn = declare_fn(ccx, name, llvm::CCallConv, llfty, sig.output);
attributes::from_fn_type(ccx, fn_type).apply_llfn(llfn);
llfn
}
if sig.output == ty::FnDiverging {
llvm::SetFunctionAttribute(llfn, llvm::Attribute::NoReturn);
}
if abi != Abi::Rust && abi != Abi::RustCall {
attributes::unwind(llfn, false);
}
fty.apply_attrs_llfn(llfn);
/// Declare a Rust function with internal linkage.
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing ValueRef instead.
pub fn declare_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
fn_type: ty::Ty<'tcx>) -> ValueRef {
let llfn = declare_rust_fn(ccx, name, fn_type);
llvm::SetLinkage(llfn, llvm::InternalLinkage);
llfn llfn
} }
@ -166,78 +129,27 @@ pub fn define_global(ccx: &CrateContext, name: &str, ty: Type) -> Option<ValueRe
} }
/// Declare a function with an intention to define it.
///
/// For rust functions use `define_rust_fn` instead.
///
/// Use this function when you intend to define a function. This function will
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to users fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
pub fn define_fn(ccx: &CrateContext, name: &str, callconv: llvm::CallConv,
fn_type: Type, output: ty::FnOutput) -> Option<ValueRef> {
if get_defined_value(ccx, name).is_some() {
None
} else {
Some(declare_fn(ccx, name, callconv, fn_type, output))
}
}
/// Declare a C ABI function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to users fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
///
/// Only use this for foreign function ABIs and glue. For Rust functions use
/// `declare_rust_fn` instead.
pub fn define_cfn(ccx: &CrateContext, name: &str, fn_type: Type,
output: ty::Ty) -> Option<ValueRef> {
if get_defined_value(ccx, name).is_some() {
None
} else {
Some(declare_cfn(ccx, name, fn_type, output))
}
}
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return None if the name already has a definition associated with it. In that
/// case an error should be reported to the user, because it usually happens due
/// to users fault (e.g. misuse of #[no_mangle] or #[export_name] attributes).
pub fn define_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, name: &str,
fn_type: ty::Ty<'tcx>) -> Option<ValueRef> {
if get_defined_value(ccx, name).is_some() {
None
} else {
Some(declare_rust_fn(ccx, name, fn_type))
}
}
/// Declare a Rust function with an intention to define it. /// Declare a Rust function with an intention to define it.
/// ///
/// Use this function when you intend to define a function. This function will /// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This /// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example. /// can happen with #[no_mangle] or #[export_name], for example.
pub fn define_internal_rust_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn define_internal_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
name: &str, name: &str,
fn_type: ty::Ty<'tcx>) -> ValueRef { fn_type: ty::Ty<'tcx>) -> ValueRef {
if get_defined_value(ccx, name).is_some() { if get_defined_value(ccx, name).is_some() {
ccx.sess().fatal(&format!("symbol `{}` already defined", name)) ccx.sess().fatal(&format!("symbol `{}` already defined", name))
} else { } else {
declare_internal_rust_fn(ccx, name, fn_type) let llfn = declare_fn(ccx, name, fn_type);
llvm::SetLinkage(llfn, llvm::InternalLinkage);
llfn
} }
} }
/// Get defined or externally defined (AvailableExternally linkage) value by /// Get defined or externally defined (AvailableExternally linkage) value by
/// name. /// name.
fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> { pub fn get_defined_value(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
debug!("get_defined_value(name={:?})", name); debug!("get_defined_value(name={:?})", name);
let namebuf = CString::new(name).unwrap_or_else(|_|{ let namebuf = CString::new(name).unwrap_or_else(|_|{
ccx.sess().bug(&format!("name {:?} contains an interior null byte", name)) ccx.sess().bug(&format!("name {:?} contains an interior null byte", name))

View file

@ -44,19 +44,18 @@
//! expression and ensures that the result has a cleanup associated with it, //! expression and ensures that the result has a cleanup associated with it,
//! creating a temporary stack slot if necessary. //! creating a temporary stack slot if necessary.
//! //!
//! - `trans_local_var -> Datum`: looks up a local variable or upvar. //! - `trans_var -> Datum`: looks up a local variable, upvar or static.
#![allow(non_camel_case_types)] #![allow(non_camel_case_types)]
pub use self::Dest::*; pub use self::Dest::*;
use self::lazy_binop_ty::*; use self::lazy_binop_ty::*;
use back::abi;
use llvm::{self, ValueRef, TypeKind}; use llvm::{self, ValueRef, TypeKind};
use middle::const_qualif::ConstQualif; use middle::const_qualif::ConstQualif;
use middle::def::Def; use middle::def::Def;
use middle::subst::Substs; use middle::subst::Substs;
use trans::{_match, adt, asm, base, closure, consts, controlflow}; use trans::{_match, abi, adt, asm, base, closure, consts, controlflow};
use trans::base::*; use trans::base::*;
use trans::build::*; use trans::build::*;
use trans::callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp}; use trans::callee::{Callee, ArgExprs, ArgOverloadedCall, ArgOverloadedOp};
@ -69,6 +68,7 @@ use trans::glue;
use trans::machine; use trans::machine;
use trans::tvec; use trans::tvec;
use trans::type_of; use trans::type_of;
use trans::value::Value;
use trans::Disr; use trans::Disr;
use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer}; use middle::ty::adjustment::{AdjustDerefRef, AdjustReifyFnPointer};
use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer}; use middle::ty::adjustment::{AdjustUnsafeFnPointer, AdjustMutToConstPointer};
@ -85,6 +85,7 @@ use rustc_front::hir;
use syntax::{ast, codemap}; use syntax::{ast, codemap};
use syntax::parse::token::InternedString; use syntax::parse::token::InternedString;
use std::fmt;
use std::mem; use std::mem;
// Destinations // Destinations
@ -98,11 +99,11 @@ pub enum Dest {
Ignore, Ignore,
} }
impl Dest { impl fmt::Debug for Dest {
pub fn to_string(&self, ccx: &CrateContext) -> String { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
SaveIn(v) => format!("SaveIn({})", ccx.tn().val_to_string(v)), SaveIn(v) => write!(f, "SaveIn({:?})", Value(v)),
Ignore => "Ignore".to_string() Ignore => f.write_str("Ignore")
} }
} }
} }
@ -377,15 +378,13 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
Some(adj) => { adj } Some(adj) => { adj }
}; };
debug!("unadjusted datum for expr {:?}: {} adjustment={:?}", debug!("unadjusted datum for expr {:?}: {:?} adjustment={:?}",
expr, expr, datum, adjustment);
datum.to_string(bcx.ccx()),
adjustment);
match adjustment { match adjustment {
AdjustReifyFnPointer => { AdjustReifyFnPointer => {
match datum.ty.sty { match datum.ty.sty {
ty::TyFnDef(def_id, substs, _) => { ty::TyFnDef(def_id, substs, _) => {
datum = Callee::def(bcx.ccx(), def_id, substs, datum.ty) datum = Callee::def(bcx.ccx(), def_id, substs)
.reify(bcx.ccx()).to_expr_datum(); .reify(bcx.ccx()).to_expr_datum();
} }
_ => { _ => {
@ -452,7 +451,7 @@ fn apply_adjustments<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
} }
} }
debug!("after adjustments, datum={}", datum.to_string(bcx.ccx())); debug!("after adjustments, datum={:?}", datum);
DatumBlock::new(bcx, datum) DatumBlock::new(bcx, datum)
} }
@ -462,9 +461,7 @@ fn coerce_unsized<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
target: Datum<'tcx, Rvalue>) target: Datum<'tcx, Rvalue>)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
let mut bcx = bcx; let mut bcx = bcx;
debug!("coerce_unsized({} -> {})", debug!("coerce_unsized({:?} -> {:?})", source, target);
source.to_string(bcx.ccx()),
target.to_string(bcx.ccx()));
match (&source.ty.sty, &target.ty.sty) { match (&source.ty.sty, &target.ty.sty) {
(&ty::TyBox(a), &ty::TyBox(b)) | (&ty::TyBox(a), &ty::TyBox(b)) |
@ -654,7 +651,8 @@ fn trans_datum_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
trans(bcx, &e) trans(bcx, &e)
} }
hir::ExprPath(..) => { hir::ExprPath(..) => {
trans_def(bcx, expr, bcx.def(expr.id)) let var = trans_var(bcx, bcx.def(expr.id));
DatumBlock::new(bcx, var.to_expr_datum())
} }
hir::ExprField(ref base, name) => { hir::ExprField(ref base, name) => {
trans_rec_field(bcx, &base, name.node) trans_rec_field(bcx, &base, name.node)
@ -854,8 +852,8 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let (base, len) = base_datum.get_vec_base_and_len(bcx); let (base, len) = base_datum.get_vec_base_and_len(bcx);
debug!("trans_index: base {}", bcx.val_to_string(base)); debug!("trans_index: base {:?}", Value(base));
debug!("trans_index: len {}", bcx.val_to_string(len)); debug!("trans_index: len {:?}", Value(len));
let bounds_check = ICmp(bcx, let bounds_check = ICmp(bcx,
llvm::IntUGE, llvm::IntUGE,
@ -866,7 +864,6 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let expected = Call(bcx, let expected = Call(bcx,
expect, expect,
&[bounds_check, C_bool(ccx, false)], &[bounds_check, C_bool(ccx, false)],
None,
index_expr_debug_loc); index_expr_debug_loc);
bcx = with_cond(bcx, expected, |bcx| { bcx = with_cond(bcx, expected, |bcx| {
controlflow::trans_fail_bounds_check(bcx, controlflow::trans_fail_bounds_check(bcx,
@ -884,27 +881,40 @@ fn trans_index<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
DatumBlock::new(bcx, elt_datum) DatumBlock::new(bcx, elt_datum)
} }
fn trans_def<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, /// Translates a reference to a variable.
ref_expr: &hir::Expr, pub fn trans_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, def: Def)
def: Def) -> Datum<'tcx, Lvalue> {
-> DatumBlock<'blk, 'tcx, Expr> {
//! Translates a reference to a path.
let _icx = push_ctxt("trans_def_lvalue");
match def { match def {
Def::Static(did, _) => { Def::Static(did, _) => consts::get_static(bcx.ccx(), did),
let const_ty = expr_ty(bcx, ref_expr); Def::Upvar(_, nid, _, _) => {
let val = get_static_val(bcx.ccx(), did, const_ty); // Can't move upvars, so this is never a ZeroMemLastUse.
let lval = Lvalue::new("expr::trans_def"); let local_ty = node_id_type(bcx, nid);
DatumBlock::new(bcx, Datum::new(val, const_ty, LvalueExpr(lval))) let lval = Lvalue::new_with_hint("expr::trans_var (upvar)",
bcx, nid, HintKind::ZeroAndMaintain);
match bcx.fcx.llupvars.borrow().get(&nid) {
Some(&val) => Datum::new(val, local_ty, lval),
None => {
bcx.sess().bug(&format!(
"trans_var: no llval for upvar {} found",
nid));
}
}
} }
Def::Local(..) | Def::Upvar(..) => { Def::Local(_, nid) => {
DatumBlock::new(bcx, trans_local_var(bcx, def).to_expr_datum()) let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
} Some(&v) => v,
_ => { None => {
bcx.sess().span_bug(ref_expr.span, bcx.sess().bug(&format!(
&format!("{:?} should not reach expr::trans_def", def)) "trans_var: no datum for local/arg {} found",
nid));
}
};
debug!("take_local(nid={}, v={:?}, ty={})",
nid, Value(datum.val), datum.ty);
datum
} }
_ => unreachable!("{:?} should not reach expr::trans_var", def)
} }
} }
@ -1027,8 +1037,18 @@ fn trans_rvalue_stmt_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
trans_assign_op(bcx, expr, op, &dst, &src) trans_assign_op(bcx, expr, op, &dst, &src)
} }
} }
hir::ExprInlineAsm(ref a) => { hir::ExprInlineAsm(ref a, ref outputs, ref inputs) => {
asm::trans_inline_asm(bcx, a) let outputs = outputs.iter().map(|output| {
let out_datum = unpack_datum!(bcx, trans(bcx, output));
unpack_datum!(bcx, out_datum.to_lvalue_datum(bcx, "out", expr.id))
}).collect();
let inputs = inputs.iter().map(|input| {
let input = unpack_datum!(bcx, trans(bcx, input));
let input = unpack_datum!(bcx, input.to_rvalue_datum(bcx, "in"));
input.to_llscalarish(bcx)
}).collect();
asm::trans_inline_asm(bcx, a, outputs, inputs);
bcx
} }
_ => { _ => {
bcx.tcx().sess.span_bug( bcx.tcx().sess.span_bug(
@ -1131,8 +1151,7 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
body, body,
expr.id, expr.id,
def_id, def_id,
substs, substs).unwrap_or(bcx)
&expr.attrs).unwrap_or(bcx)
} }
hir::ExprCall(ref f, ref args) => { hir::ExprCall(ref f, ref args) => {
let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned(); let method = bcx.tcx().tables.borrow().method_map.get(&method_call).cloned();
@ -1145,7 +1164,7 @@ fn trans_rvalue_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let f = unpack_datum!(bcx, trans(bcx, f)); let f = unpack_datum!(bcx, trans(bcx, f));
(match f.ty.sty { (match f.ty.sty {
ty::TyFnDef(def_id, substs, _) => { ty::TyFnDef(def_id, substs, _) => {
Callee::def(bcx.ccx(), def_id, substs, f.ty) Callee::def(bcx.ccx(), def_id, substs)
} }
ty::TyFnPtr(_) => { ty::TyFnPtr(_) => {
let f = unpack_datum!(bcx, let f = unpack_datum!(bcx,
@ -1249,48 +1268,6 @@ fn trans_def_dps_unadjusted<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
} }
/// Translates a reference to a local variable or argument. This always results in an lvalue datum.
pub fn trans_local_var<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
def: Def)
-> Datum<'tcx, Lvalue> {
let _icx = push_ctxt("trans_local_var");
match def {
Def::Upvar(_, nid, _, _) => {
// Can't move upvars, so this is never a ZeroMemLastUse.
let local_ty = node_id_type(bcx, nid);
let lval = Lvalue::new_with_hint("expr::trans_local_var (upvar)",
bcx, nid, HintKind::ZeroAndMaintain);
match bcx.fcx.llupvars.borrow().get(&nid) {
Some(&val) => Datum::new(val, local_ty, lval),
None => {
bcx.sess().bug(&format!(
"trans_local_var: no llval for upvar {} found",
nid));
}
}
}
Def::Local(_, nid) => {
let datum = match bcx.fcx.lllocals.borrow().get(&nid) {
Some(&v) => v,
None => {
bcx.sess().bug(&format!(
"trans_local_var: no datum for local/arg {} found",
nid));
}
};
debug!("take_local(nid={}, v={}, ty={})",
nid, bcx.val_to_string(datum.val), datum.ty);
datum
}
_ => {
bcx.sess().unimpl(&format!(
"unsupported def type in trans_local_var: {:?}",
def));
}
}
}
fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn trans_struct<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
fields: &[hir::Field], fields: &[hir::Field],
base: Option<&hir::Expr>, base: Option<&hir::Expr>,
@ -1708,15 +1685,14 @@ fn trans_scalar_binop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
if use_fmod { if use_fmod {
let f64t = Type::f64(bcx.ccx()); let f64t = Type::f64(bcx.ccx());
let fty = Type::func(&[f64t, f64t], &f64t); let fty = Type::func(&[f64t, f64t], &f64t);
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
tcx.types.f64);
if lhs_t == tcx.types.f32 { if lhs_t == tcx.types.f32 {
let lhs = FPExt(bcx, lhs, f64t); let lhs = FPExt(bcx, lhs, f64t);
let rhs = FPExt(bcx, rhs, f64t); let rhs = FPExt(bcx, rhs, f64t);
let res = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); let res = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
FPTrunc(bcx, res, Type::f32(bcx.ccx())) FPTrunc(bcx, res, Type::f32(bcx.ccx()))
} else { } else {
Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc) Call(bcx, llfn, &[lhs, rhs], binop_debug_loc)
} }
} else { } else {
FRem(bcx, lhs, rhs, binop_debug_loc) FRem(bcx, lhs, rhs, binop_debug_loc)
@ -1829,12 +1805,10 @@ fn trans_binary<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let lhs = unpack_datum!(bcx, trans(bcx, lhs)); let lhs = unpack_datum!(bcx, trans(bcx, lhs));
let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs")); let lhs = unpack_datum!(bcx, lhs.to_rvalue_datum(bcx, "binop_lhs"));
debug!("trans_binary (expr {}): lhs={}", debug!("trans_binary (expr {}): lhs={:?}", expr.id, lhs);
expr.id, lhs.to_string(ccx));
let rhs = unpack_datum!(bcx, trans(bcx, rhs)); let rhs = unpack_datum!(bcx, trans(bcx, rhs));
let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs")); let rhs = unpack_datum!(bcx, rhs.to_rvalue_datum(bcx, "binop_rhs"));
debug!("trans_binary (expr {}): rhs={}", debug!("trans_binary (expr {}): rhs={:?}", expr.id, rhs);
expr.id, rhs.to_string(ccx));
if type_is_fat_ptr(ccx.tcx(), lhs.ty) { if type_is_fat_ptr(ccx.tcx(), lhs.ty) {
assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty), assert!(type_is_fat_ptr(ccx.tcx(), rhs.ty),
@ -1933,8 +1907,8 @@ fn trans_imm_cast<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let t_out = node_id_type(bcx, id); let t_out = node_id_type(bcx, id);
debug!("trans_cast({:?} as {:?})", t_in, t_out); debug!("trans_cast({:?} as {:?})", t_in, t_out);
let mut ll_t_in = type_of::arg_type_of(ccx, t_in); let mut ll_t_in = type_of::immediate_type_of(ccx, t_in);
let ll_t_out = type_of::arg_type_of(ccx, t_out); let ll_t_out = type_of::immediate_type_of(ccx, t_out);
// Convert the value to be cast into a ValueRef, either by-ref or // Convert the value to be cast into a ValueRef, either by-ref or
// by-value as appropriate given its type: // by-value as appropriate given its type:
let mut datum = unpack_datum!(bcx, trans(bcx, expr)); let mut datum = unpack_datum!(bcx, trans(bcx, expr));
@ -2085,10 +2059,8 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
-> DatumBlock<'blk, 'tcx, Expr> { -> DatumBlock<'blk, 'tcx, Expr> {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
debug!("deref_once(expr={:?}, datum={}, method_call={:?})", debug!("deref_once(expr={:?}, datum={:?}, method_call={:?})",
expr, expr, datum, method_call);
datum.to_string(ccx),
method_call);
let mut bcx = bcx; let mut bcx = bcx;
@ -2175,8 +2147,8 @@ fn deref_once<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
} }
}; };
debug!("deref_once(expr={}, method_call={:?}, result={})", debug!("deref_once(expr={}, method_call={:?}, result={:?})",
expr.id, method_call, r.datum.to_string(ccx)); expr.id, method_call, r.datum);
return r; return r;
} }
@ -2291,7 +2263,7 @@ impl OverflowOpViaIntrinsic {
-> (Block<'blk, 'tcx>, ValueRef) { -> (Block<'blk, 'tcx>, ValueRef) {
let llfn = self.to_intrinsic(bcx, lhs_t); let llfn = self.to_intrinsic(bcx, lhs_t);
let val = Call(bcx, llfn, &[lhs, rhs], None, binop_debug_loc); let val = Call(bcx, llfn, &[lhs, rhs], binop_debug_loc);
let result = ExtractValue(bcx, val, 0); // iN operation result let result = ExtractValue(bcx, val, 0); // iN operation result
let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?" let overflow = ExtractValue(bcx, val, 1); // i1 "did it overflow?"
@ -2300,7 +2272,7 @@ impl OverflowOpViaIntrinsic {
let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1"); let expect = bcx.ccx().get_intrinsic(&"llvm.expect.i1");
Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)], Call(bcx, expect, &[cond, C_integral(Type::i1(bcx.ccx()), 0, false)],
None, binop_debug_loc); binop_debug_loc);
let bcx = let bcx =
base::with_cond(bcx, cond, |bcx| base::with_cond(bcx, cond, |bcx|

File diff suppressed because it is too large Load diff

View file

@ -14,18 +14,19 @@
use std; use std;
use back::link::*; use back::link;
use llvm; use llvm;
use llvm::{ValueRef, get_param}; use llvm::{ValueRef, get_param};
use middle::lang_items::ExchangeFreeFnLangItem; use middle::lang_items::ExchangeFreeFnLangItem;
use middle::subst::{Substs}; use middle::subst::{Substs};
use middle::traits; use middle::traits;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
use trans::abi::{Abi, FnType};
use trans::adt; use trans::adt;
use trans::adt::GetDtorType; // for tcx.dtor_type() use trans::adt::GetDtorType; // for tcx.dtor_type()
use trans::base::*; use trans::base::*;
use trans::build::*; use trans::build::*;
use trans::callee; use trans::callee::{Callee, ArgVals};
use trans::cleanup; use trans::cleanup;
use trans::cleanup::CleanupMethods; use trans::cleanup::CleanupMethods;
use trans::collector::{self, TransItem}; use trans::collector::{self, TransItem};
@ -37,25 +38,23 @@ use trans::machine::*;
use trans::monomorphize; use trans::monomorphize;
use trans::type_of::{type_of, sizing_type_of, align_of}; use trans::type_of::{type_of, sizing_type_of, align_of};
use trans::type_::Type; use trans::type_::Type;
use trans::value::Value;
use arena::TypedArena; use arena::TypedArena;
use libc::c_uint;
use syntax::ast;
use syntax::codemap::DUMMY_SP; use syntax::codemap::DUMMY_SP;
pub fn trans_exchange_free_dyn<'blk, 'tcx>(cx: Block<'blk, 'tcx>, pub fn trans_exchange_free_dyn<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
v: ValueRef, v: ValueRef,
size: ValueRef, size: ValueRef,
align: ValueRef, align: ValueRef,
debug_loc: DebugLoc) debug_loc: DebugLoc)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
let _icx = push_ctxt("trans_exchange_free"); let _icx = push_ctxt("trans_exchange_free");
let ccx = cx.ccx();
callee::trans_lang_call(cx, let def_id = langcall(bcx, None, "", ExchangeFreeFnLangItem);
langcall(cx, None, "", ExchangeFreeFnLangItem), let args = [PointerCast(bcx, v, Type::i8p(bcx.ccx())), size, align];
&[PointerCast(cx, v, Type::i8p(ccx)), size, align], Callee::def(bcx.ccx(), def_id, bcx.tcx().mk_substs(Substs::empty()))
Some(expr::Ignore), .call(bcx, debug_loc, ArgVals(&args), None).bcx
debug_loc).bcx
} }
pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>, pub fn trans_exchange_free<'blk, 'tcx>(cx: Block<'blk, 'tcx>,
@ -170,13 +169,13 @@ pub fn drop_ty_core<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let may_need_drop = let may_need_drop =
ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None); ICmp(bcx, llvm::IntNE, hint_val, moved_val, DebugLoc::None);
bcx = with_cond(bcx, may_need_drop, |cx| { bcx = with_cond(bcx, may_need_drop, |cx| {
Call(cx, glue, &[ptr], None, debug_loc); Call(cx, glue, &[ptr], debug_loc);
cx cx
}) })
} }
None => { None => {
// No drop-hint ==> call standard drop glue // No drop-hint ==> call standard drop glue
Call(bcx, glue, &[ptr], None, debug_loc); Call(bcx, glue, &[ptr], debug_loc);
} }
} }
} }
@ -240,38 +239,40 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
} }
let t = g.ty(); let t = g.ty();
let llty = if type_is_sized(ccx.tcx(), t) { let tcx = ccx.tcx();
type_of(ccx, t).ptr_to() let sig = ty::FnSig {
} else { inputs: vec![tcx.mk_mut_ptr(tcx.types.i8)],
type_of(ccx, ccx.tcx().mk_box(t)).ptr_to() output: ty::FnOutput::FnConverging(tcx.mk_nil()),
variadic: false,
}; };
// Create a FnType for fn(*mut i8) and substitute the real type in
let llfnty = Type::glue_fn(ccx, llty); // later - that prevents FnType from splitting fat pointers up.
let mut fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
fn_ty.args[0].original_ty = type_of(ccx, t).ptr_to();
let llfnty = fn_ty.llvm_type(ccx);
// To avoid infinite recursion, don't `make_drop_glue` until after we've // To avoid infinite recursion, don't `make_drop_glue` until after we've
// added the entry to the `drop_glues` cache. // added the entry to the `drop_glues` cache.
if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) { if let Some(old_sym) = ccx.available_drop_glues().borrow().get(&g) {
let llfn = declare::declare_cfn(ccx, &old_sym, llfnty, ccx.tcx().mk_nil()); let llfn = declare::declare_cfn(ccx, &old_sym, llfnty);
ccx.drop_glues().borrow_mut().insert(g, llfn); ccx.drop_glues().borrow_mut().insert(g, llfn);
return llfn; return llfn;
}; };
let fn_nm = mangle_internal_name_by_type_and_seq(ccx, t, "drop"); let fn_nm = link::mangle_internal_name_by_type_and_seq(ccx, t, "drop");
let llfn = declare::define_cfn(ccx, &fn_nm, llfnty, ccx.tcx().mk_nil()).unwrap_or_else(||{ assert!(declare::get_defined_value(ccx, &fn_nm).is_none());
ccx.sess().bug(&format!("symbol `{}` already defined", fn_nm)); let llfn = declare::declare_cfn(ccx, &fn_nm, llfnty);
});
ccx.available_drop_glues().borrow_mut().insert(g, fn_nm); ccx.available_drop_glues().borrow_mut().insert(g, fn_nm);
ccx.drop_glues().borrow_mut().insert(g, llfn);
let _s = StatRecorder::new(ccx, format!("drop {:?}", t)); let _s = StatRecorder::new(ccx, format!("drop {:?}", t));
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (arena, fcx): (TypedArena<_>, FunctionContext); let (arena, fcx): (TypedArena<_>, FunctionContext);
arena = TypedArena::new(); arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &arena);
ty::FnConverging(ccx.tcx().mk_nil()),
empty_substs, None, &arena);
let bcx = init_function(&fcx, false, ty::FnConverging(ccx.tcx().mk_nil())); let bcx = fcx.init(false, None);
update_linkage(ccx, llfn, None, OriginalTranslation); update_linkage(ccx, llfn, None, OriginalTranslation);
@ -284,9 +285,8 @@ fn get_drop_glue_core<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// llfn is expected be declared to take a parameter of the appropriate // llfn is expected be declared to take a parameter of the appropriate
// type, so we don't need to explicitly cast the function parameter. // type, so we don't need to explicitly cast the function parameter.
let llrawptr0 = get_param(llfn, fcx.arg_offset() as c_uint); let bcx = make_drop_glue(bcx, get_param(llfn, 0), g);
let bcx = make_drop_glue(bcx, llrawptr0, g); fcx.finish(bcx, DebugLoc::None);
finish_fn(&fcx, bcx, ty::FnConverging(ccx.tcx().mk_nil()), DebugLoc::None);
llfn llfn
} }
@ -314,7 +314,7 @@ fn trans_struct_drop_flag<'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
And(bcx, not_init, not_done, DebugLoc::None); And(bcx, not_init, not_done, DebugLoc::None);
with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| { with_cond(bcx, drop_flag_neither_initialized_nor_cleared, |cx| {
let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap")); let llfn = cx.ccx().get_intrinsic(&("llvm.debugtrap"));
Call(cx, llfn, &[], None, DebugLoc::None); Call(cx, llfn, &[], DebugLoc::None);
cx cx
}) })
}; };
@ -365,27 +365,31 @@ fn trans_struct_drop<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
_ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t)) _ => tcx.sess.bug(&format!("dtor for {:?} is not an impl???", t))
}; };
let dtor_did = def.destructor().unwrap(); let dtor_did = def.destructor().unwrap();
bcx = callee::Callee::ptr(callee::trans_fn_ref_with_substs( bcx = Callee::def(bcx.ccx(), dtor_did, vtbl.substs)
bcx.ccx(), dtor_did, None, vtbl.substs)) .call(bcx, DebugLoc::None, ArgVals(args), None).bcx;
.call(bcx, DebugLoc::None, callee::ArgVals(args), Some(expr::Ignore)).bcx;
bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope) bcx.fcx.pop_and_trans_custom_cleanup_scope(bcx, contents_scope)
} }
pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, info: ValueRef) pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: &BlockAndBuilder<'blk, 'tcx>,
t: Ty<'tcx>, info: ValueRef)
-> (ValueRef, ValueRef) { -> (ValueRef, ValueRef) {
debug!("calculate size of DST: {}; with lost info: {}", debug!("calculate size of DST: {}; with lost info: {:?}",
t, bcx.val_to_string(info)); t, Value(info));
if type_is_sized(bcx.tcx(), t) { if type_is_sized(bcx.tcx(), t) {
let sizing_type = sizing_type_of(bcx.ccx(), t); let sizing_type = sizing_type_of(bcx.ccx(), t);
let size = llsize_of_alloc(bcx.ccx(), sizing_type); let size = llsize_of_alloc(bcx.ccx(), sizing_type);
let align = align_of(bcx.ccx(), t); let align = align_of(bcx.ccx(), t);
debug!("size_and_align_of_dst t={} info={} size: {} align: {}", debug!("size_and_align_of_dst t={} info={:?} size: {} align: {}",
t, bcx.val_to_string(info), size, align); t, Value(info), size, align);
let size = C_uint(bcx.ccx(), size); let size = C_uint(bcx.ccx(), size);
let align = C_uint(bcx.ccx(), align); let align = C_uint(bcx.ccx(), align);
return (size, align); return (size, align);
} }
if bcx.is_unreachable() {
let llty = Type::int(bcx.ccx());
return (C_undef(llty), C_undef(llty));
}
match t.sty { match t.sty {
ty::TyStruct(def, substs) => { ty::TyStruct(def, substs) => {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
@ -394,7 +398,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
assert!(!t.is_simd()); assert!(!t.is_simd());
let repr = adt::represent_type(ccx, t); let repr = adt::represent_type(ccx, t);
let sizing_type = adt::sizing_type_context_of(ccx, &repr, true); let sizing_type = adt::sizing_type_context_of(ccx, &repr, true);
debug!("DST {} sizing_type: {}", t, sizing_type.to_string()); debug!("DST {} sizing_type: {:?}", t, sizing_type);
let sized_size = llsize_of_alloc(ccx, sizing_type.prefix()); let sized_size = llsize_of_alloc(ccx, sizing_type.prefix());
let sized_align = llalign_of_min(ccx, sizing_type.prefix()); let sized_align = llalign_of_min(ccx, sizing_type.prefix());
debug!("DST {} statically sized prefix size: {} align: {}", debug!("DST {} statically sized prefix size: {} align: {}",
@ -408,8 +412,6 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field); let field_ty = monomorphize::field_ty(bcx.tcx(), substs, last_field);
let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info); let (unsized_size, unsized_align) = size_and_align_of_dst(bcx, field_ty, info);
let dbloc = DebugLoc::None;
// FIXME (#26403, #27023): We should be adding padding // FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align` // to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before // required of the unsized field that follows) before
@ -418,14 +420,14 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
// here. But this is where the add would go.) // here. But this is where the add would go.)
// Return the sum of sizes and max of aligns. // Return the sum of sizes and max of aligns.
let mut size = Add(bcx, sized_size, unsized_size, dbloc); let mut size = bcx.add(sized_size, unsized_size);
// Issue #27023: If there is a drop flag, *now* we add 1 // Issue #27023: If there is a drop flag, *now* we add 1
// to the size. (We can do this without adding any // to the size. (We can do this without adding any
// padding because drop flags do not have any alignment // padding because drop flags do not have any alignment
// constraints.) // constraints.)
if sizing_type.needs_drop_flag() { if sizing_type.needs_drop_flag() {
size = Add(bcx, size, C_uint(bcx.ccx(), 1_u64), dbloc); size = bcx.add(size, C_uint(bcx.ccx(), 1_u64));
} }
// Choose max of two known alignments (combined value must // Choose max of two known alignments (combined value must
@ -436,14 +438,9 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
// pick the correct alignment statically. // pick the correct alignment statically.
C_uint(ccx, std::cmp::max(sized_align, unsized_align)) C_uint(ccx, std::cmp::max(sized_align, unsized_align))
} }
_ => Select(bcx, _ => bcx.select(bcx.icmp(llvm::IntUGT, sized_align, unsized_align),
ICmp(bcx, sized_align,
llvm::IntUGT, unsized_align)
sized_align,
unsized_align,
dbloc),
sized_align,
unsized_align)
}; };
// Issue #27023: must add any necessary padding to `size` // Issue #27023: must add any necessary padding to `size`
@ -457,19 +454,18 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
// //
// `(size + (align-1)) & -align` // `(size + (align-1)) & -align`
let addend = Sub(bcx, align, C_uint(bcx.ccx(), 1_u64), dbloc); let addend = bcx.sub(align, C_uint(bcx.ccx(), 1_u64));
let size = And( let size = bcx.and(bcx.add(size, addend), bcx.neg(align));
bcx, Add(bcx, size, addend, dbloc), Neg(bcx, align, dbloc), dbloc);
(size, align) (size, align)
} }
ty::TyTrait(..) => { ty::TyTrait(..) => {
// info points to the vtable and the second entry in the vtable is the // info points to the vtable and the second entry in the vtable is the
// dynamic size of the object. // dynamic size of the object.
let info = PointerCast(bcx, info, Type::int(bcx.ccx()).ptr_to()); let info = bcx.pointercast(info, Type::int(bcx.ccx()).ptr_to());
let size_ptr = GEPi(bcx, info, &[1]); let size_ptr = bcx.gepi(info, &[1]);
let align_ptr = GEPi(bcx, info, &[2]); let align_ptr = bcx.gepi(info, &[2]);
(Load(bcx, size_ptr), Load(bcx, align_ptr)) (bcx.load(size_ptr), bcx.load(align_ptr))
} }
ty::TySlice(_) | ty::TyStr => { ty::TySlice(_) | ty::TyStr => {
let unit_ty = t.sequence_element_type(bcx.tcx()); let unit_ty = t.sequence_element_type(bcx.tcx());
@ -478,7 +474,7 @@ pub fn size_and_align_of_dst<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, t: Ty<'tcx>, in
let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty); let llunit_ty = sizing_type_of(bcx.ccx(), unit_ty);
let unit_align = llalign_of_min(bcx.ccx(), llunit_ty); let unit_align = llalign_of_min(bcx.ccx(), llunit_ty);
let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty); let unit_size = llsize_of_alloc(bcx.ccx(), llunit_ty);
(Mul(bcx, info, C_uint(bcx.ccx(), unit_size), DebugLoc::None), (bcx.mul(info, C_uint(bcx.ccx(), unit_size)),
C_uint(bcx.ccx(), unit_align)) C_uint(bcx.ccx(), unit_align))
} }
_ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t)) _ => bcx.sess().bug(&format!("Unexpected unsized type, found {}", t))
@ -523,7 +519,8 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None); let bcx = drop_ty(bcx, v0, content_ty, DebugLoc::None);
let info = expr::get_meta(bcx, v0); let info = expr::get_meta(bcx, v0);
let info = Load(bcx, info); let info = Load(bcx, info);
let (llsize, llalign) = size_and_align_of_dst(bcx, content_ty, info); let (llsize, llalign) =
size_and_align_of_dst(&bcx.build(), content_ty, info);
// `Box<ZeroSizeType>` does not allocate. // `Box<ZeroSizeType>` does not allocate.
let needs_free = ICmp(bcx, let needs_free = ICmp(bcx,
@ -585,7 +582,6 @@ fn make_drop_glue<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, v0: ValueRef, g: DropGlueK
Call(bcx, Call(bcx,
dtor, dtor,
&[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))], &[PointerCast(bcx, Load(bcx, data_ptr), Type::i8p(bcx.ccx()))],
None,
DebugLoc::None); DebugLoc::None);
bcx bcx
} }

View file

@ -12,7 +12,8 @@ use llvm::{AvailableExternallyLinkage, InternalLinkage, SetLinkage};
use middle::cstore::{CrateStore, FoundAst, InlinedItem}; use middle::cstore::{CrateStore, FoundAst, InlinedItem};
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::subst::Substs; use middle::subst::Substs;
use trans::base::{push_ctxt, trans_item, get_item_val, trans_fn}; use trans::base::{push_ctxt, trans_item, trans_fn};
use trans::callee::Callee;
use trans::common::*; use trans::common::*;
use rustc::dep_graph::DepNode; use rustc::dep_graph::DepNode;
@ -21,14 +22,15 @@ use rustc_front::hir;
fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> { fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
debug!("instantiate_inline({:?})", fn_id); debug!("instantiate_inline({:?})", fn_id);
let _icx = push_ctxt("instantiate_inline"); let _icx = push_ctxt("instantiate_inline");
let _task = ccx.tcx().dep_graph.in_task(DepNode::TransInlinedItem(fn_id)); let tcx = ccx.tcx();
let _task = tcx.dep_graph.in_task(DepNode::TransInlinedItem(fn_id));
match ccx.external().borrow().get(&fn_id) { match ccx.external().borrow().get(&fn_id) {
Some(&Some(node_id)) => { Some(&Some(node_id)) => {
// Already inline // Already inline
debug!("instantiate_inline({}): already inline as node id {}", debug!("instantiate_inline({}): already inline as node id {}",
ccx.tcx().item_path_str(fn_id), node_id); tcx.item_path_str(fn_id), node_id);
let node_def_id = ccx.tcx().map.local_def_id(node_id); let node_def_id = tcx.map.local_def_id(node_id);
return Some(node_def_id); return Some(node_def_id);
} }
Some(&None) => { Some(&None) => {
@ -39,7 +41,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
} }
} }
let inlined = ccx.tcx().sess.cstore.maybe_get_item_ast(ccx.tcx(), fn_id); let inlined = tcx.sess.cstore.maybe_get_item_ast(tcx, fn_id);
let inline_id = match inlined { let inline_id = match inlined {
FoundAst::NotFound => { FoundAst::NotFound => {
ccx.external().borrow_mut().insert(fn_id, None); ccx.external().borrow_mut().insert(fn_id, None);
@ -52,38 +54,27 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1); ccx.stats().n_inlines.set(ccx.stats().n_inlines.get() + 1);
trans_item(ccx, item); trans_item(ccx, item);
let linkage = match item.node { if let hir::ItemFn(_, _, _, _, ref generics, _) = item.node {
hir::ItemFn(_, _, _, _, ref generics, _) => { // Generics have no symbol, so they can't be given any linkage.
if generics.is_type_parameterized() { if !generics.is_type_parameterized() {
// Generics have no symbol, so they can't be given any let linkage = if ccx.sess().opts.cg.codegen_units == 1 {
// linkage. // We could use AvailableExternallyLinkage here,
None // but InternalLinkage allows LLVM to optimize more
// aggressively (at the cost of sometimes
// duplicating code).
InternalLinkage
} else { } else {
if ccx.sess().opts.cg.codegen_units == 1 { // With multiple compilation units, duplicated code
// We could use AvailableExternallyLinkage here, // is more of a problem. Also, `codegen_units > 1`
// but InternalLinkage allows LLVM to optimize more // means the user is okay with losing some
// aggressively (at the cost of sometimes // performance.
// duplicating code). AvailableExternallyLinkage
Some(InternalLinkage) };
} else { let empty_substs = tcx.mk_substs(Substs::trans_empty());
// With multiple compilation units, duplicated code let def_id = tcx.map.local_def_id(item.id);
// is more of a problem. Also, `codegen_units > 1` let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
// means the user is okay with losing some SetLinkage(llfn, linkage);
// performance.
Some(AvailableExternallyLinkage)
}
}
} }
hir::ItemConst(..) => None,
_ => unreachable!(),
};
match linkage {
Some(linkage) => {
let g = get_item_val(ccx, item.id);
SetLinkage(g, linkage);
}
None => {}
} }
item.id item.id
@ -93,7 +84,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
ccx.external_srcs().borrow_mut().insert(item.id, fn_id); ccx.external_srcs().borrow_mut().insert(item.id, fn_id);
item.id item.id
} }
FoundAst::FoundParent(parent_id, &InlinedItem::Item(ref item)) => { FoundAst::FoundParent(parent_id, item) => {
ccx.external().borrow_mut().insert(parent_id, Some(item.id)); ccx.external().borrow_mut().insert(parent_id, Some(item.id));
ccx.external_srcs().borrow_mut().insert(item.id, parent_id); ccx.external_srcs().borrow_mut().insert(item.id, parent_id);
@ -101,7 +92,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
match item.node { match item.node {
hir::ItemEnum(ref ast_def, _) => { hir::ItemEnum(ref ast_def, _) => {
let ast_vs = &ast_def.variants; let ast_vs = &ast_def.variants;
let ty_vs = &ccx.tcx().lookup_adt_def(parent_id).variants; let ty_vs = &tcx.lookup_adt_def(parent_id).variants;
assert_eq!(ast_vs.len(), ty_vs.len()); assert_eq!(ast_vs.len(), ty_vs.len());
for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) { for (ast_v, ty_v) in ast_vs.iter().zip(ty_vs.iter()) {
if ty_v.did == fn_id { my_id = ast_v.node.data.id(); } if ty_v.did == fn_id { my_id = ast_v.node.data.id(); }
@ -120,13 +111,8 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
_ => ccx.sess().bug("instantiate_inline: item has a \ _ => ccx.sess().bug("instantiate_inline: item has a \
non-enum, non-struct parent") non-enum, non-struct parent")
} }
trans_item(ccx, &item);
my_id my_id
} }
FoundAst::FoundParent(_, _) => {
ccx.sess().bug("maybe_get_item_ast returned a FoundParent \
with a non-item parent");
}
FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => { FoundAst::Found(&InlinedItem::TraitItem(_, ref trait_item)) => {
ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id)); ccx.external().borrow_mut().insert(fn_id, Some(trait_item.id));
ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id); ccx.external_srcs().borrow_mut().insert(trait_item.id, fn_id);
@ -137,10 +123,10 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
// the logic to do that already exists in `middle`. In order to // the logic to do that already exists in `middle`. In order to
// reuse that code, it needs to be able to look up the traits for // reuse that code, it needs to be able to look up the traits for
// inlined items. // inlined items.
let ty_trait_item = ccx.tcx().impl_or_trait_item(fn_id).clone(); let ty_trait_item = tcx.impl_or_trait_item(fn_id).clone();
let trait_item_def_id = ccx.tcx().map.local_def_id(trait_item.id); let trait_item_def_id = tcx.map.local_def_id(trait_item.id);
ccx.tcx().impl_or_trait_items.borrow_mut() tcx.impl_or_trait_items.borrow_mut()
.insert(trait_item_def_id, ty_trait_item); .insert(trait_item_def_id, ty_trait_item);
// If this is a default method, we can't look up the // If this is a default method, we can't look up the
// impl type. But we aren't going to translate anyways, so // impl type. But we aren't going to translate anyways, so
@ -155,18 +141,18 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
// Translate monomorphic impl methods immediately. // Translate monomorphic impl methods immediately.
if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node { if let hir::ImplItemKind::Method(ref sig, ref body) = impl_item.node {
let impl_tpt = ccx.tcx().lookup_item_type(impl_did); let impl_tpt = tcx.lookup_item_type(impl_did);
if impl_tpt.generics.types.is_empty() && if impl_tpt.generics.types.is_empty() &&
sig.generics.ty_params.is_empty() { sig.generics.ty_params.is_empty() {
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty()); let empty_substs = tcx.mk_substs(Substs::trans_empty());
let llfn = get_item_val(ccx, impl_item.id); let def_id = tcx.map.local_def_id(impl_item.id);
let llfn = Callee::def(ccx, def_id, empty_substs).reify(ccx).val;
trans_fn(ccx, trans_fn(ccx,
&sig.decl, &sig.decl,
body, body,
llfn, llfn,
empty_substs, empty_substs,
impl_item.id, impl_item.id);
&impl_item.attrs);
// See linkage comments on items. // See linkage comments on items.
if ccx.sess().opts.cg.codegen_units == 1 { if ccx.sess().opts.cg.codegen_units == 1 {
SetLinkage(llfn, InternalLinkage); SetLinkage(llfn, InternalLinkage);
@ -180,7 +166,7 @@ fn instantiate_inline(ccx: &CrateContext, fn_id: DefId) -> Option<DefId> {
} }
}; };
let inline_def_id = ccx.tcx().map.local_def_id(inline_id); let inline_def_id = tcx.map.local_def_id(inline_id);
Some(inline_def_id) Some(inline_def_id)
} }

View file

@ -18,6 +18,7 @@ use llvm::{ValueRef, TypeKind};
use middle::infer; use middle::infer;
use middle::subst; use middle::subst;
use middle::subst::FnSpace; use middle::subst::FnSpace;
use trans::abi::{Abi, FnType};
use trans::adt; use trans::adt;
use trans::attributes; use trans::attributes;
use trans::base::*; use trans::base::*;
@ -40,19 +41,18 @@ use trans::Disr;
use middle::subst::Substs; use middle::subst::Substs;
use rustc::dep_graph::DepNode; use rustc::dep_graph::DepNode;
use rustc_front::hir; use rustc_front::hir;
use syntax::abi::Abi;
use syntax::ast; use syntax::ast;
use syntax::ptr::P; use syntax::ptr::P;
use syntax::parse::token; use syntax::parse::token;
use rustc::lint; use rustc::lint;
use rustc::session::Session; use rustc::session::Session;
use syntax::codemap::Span; use syntax::codemap::{Span, DUMMY_SP};
use std::cmp::Ordering; use std::cmp::Ordering;
pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Option<ValueRef> { fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
let name = match &*item.name.as_str() { let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32", "sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64", "sqrtf64" => "llvm.sqrt.f64",
"powif32" => "llvm.powi.f32", "powif32" => "llvm.powi.f32",
@ -94,7 +94,7 @@ pub fn get_simple_intrinsic(ccx: &CrateContext, item: &hir::ForeignItem) -> Opti
"assume" => "llvm.assume", "assume" => "llvm.assume",
_ => return None _ => return None
}; };
Some(ccx.get_intrinsic(&name)) Some(ccx.get_intrinsic(&llvm_name))
} }
pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) { pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
@ -171,13 +171,11 @@ pub fn check_intrinsics(ccx: &CrateContext) {
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics, /// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs /// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>, pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
node: ast::NodeId,
callee_ty: Ty<'tcx>, callee_ty: Ty<'tcx>,
cleanup_scope: cleanup::CustomScopeIndex, fn_ty: &FnType,
args: callee::CallArgs<'a, 'tcx>, args: callee::CallArgs<'a, 'tcx>,
dest: expr::Dest, dest: expr::Dest,
substs: &'tcx subst::Substs<'tcx>, call_debug_location: DebugLoc)
call_info: NodeIdAndSpan)
-> Result<'blk, 'tcx> { -> Result<'blk, 'tcx> {
let fcx = bcx.fcx; let fcx = bcx.fcx;
let ccx = fcx.ccx; let ccx = fcx.ccx;
@ -185,14 +183,23 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let _icx = push_ctxt("trans_intrinsic_call"); let _icx = push_ctxt("trans_intrinsic_call");
let sig = ccx.tcx().erase_late_bound_regions(callee_ty.fn_sig()); let (def_id, substs, sig) = match callee_ty.sty {
let sig = infer::normalize_associated_type(ccx.tcx(), &sig); ty::TyFnDef(def_id, substs, fty) => {
let sig = tcx.erase_late_bound_regions(&fty.sig);
(def_id, substs, infer::normalize_associated_type(tcx, &sig))
}
_ => unreachable!("expected fn item type, found {}", callee_ty)
};
let arg_tys = sig.inputs; let arg_tys = sig.inputs;
let ret_ty = sig.output; let ret_ty = sig.output;
let foreign_item = tcx.map.expect_foreign_item(node); let name = tcx.item_name(def_id).as_str();
let name = foreign_item.name.as_str();
let call_debug_location = DebugLoc::At(call_info.id, call_info.span); let span = match call_debug_location {
DebugLoc::At(_, span) => span,
DebugLoc::None => fcx.span.unwrap_or(DUMMY_SP)
};
let cleanup_scope = fcx.push_custom_cleanup_scope();
// For `transmute` we can just trans the input expr directly into dest // For `transmute` we can just trans the input expr directly into dest
if name == "transmute" { if name == "transmute" {
@ -213,7 +220,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
if out_type_size != 0 { if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle. // FIXME #19925 Remove this hack after a release cycle.
let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0])); let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
let llfn = Callee::def(ccx, def_id, substs, in_type).reify(ccx).val; let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
let llfnty = val_ty(llfn); let llfnty = val_ty(llfn);
let llresult = match dest { let llresult = match dest {
expr::SaveIn(d) => d, expr::SaveIn(d) => d,
@ -267,7 +274,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
let val = if datum.kind.is_by_ref() { let val = if datum.kind.is_by_ref() {
load_ty(bcx, datum.val, datum.ty) load_ty(bcx, datum.val, datum.ty)
} else { } else {
from_arg_ty(bcx, datum.val, datum.ty) from_immediate(bcx, datum.val)
}; };
let cast_val = BitCast(bcx, val, llret_ty); let cast_val = BitCast(bcx, val, llret_ty);
@ -347,43 +354,6 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
} }
} }
// For `try` we need some custom control flow
if &name[..] == "try" {
if let callee::ArgExprs(ref exprs) = args {
let (func, data, local_ptr) = if exprs.len() != 3 {
ccx.sess().bug("expected three exprs as arguments for \
`try` intrinsic");
} else {
(&exprs[0], &exprs[1], &exprs[2])
};
// translate arguments
let func = unpack_datum!(bcx, expr::trans(bcx, func));
let func = unpack_datum!(bcx, func.to_rvalue_datum(bcx, "func"));
let data = unpack_datum!(bcx, expr::trans(bcx, data));
let data = unpack_datum!(bcx, data.to_rvalue_datum(bcx, "data"));
let local_ptr = unpack_datum!(bcx, expr::trans(bcx, local_ptr));
let local_ptr = local_ptr.to_rvalue_datum(bcx, "local_ptr");
let local_ptr = unpack_datum!(bcx, local_ptr);
let dest = match dest {
expr::SaveIn(d) => d,
expr::Ignore => alloc_ty(bcx, tcx.mk_mut_ptr(tcx.types.i8),
"try_result"),
};
// do the invoke
bcx = try_intrinsic(bcx, func.val, data.val, local_ptr.val, dest,
call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return Result::new(bcx, dest);
} else {
ccx.sess().bug("expected two exprs as arguments for \
`try` intrinsic");
}
}
// save the actual AST arguments for later (some places need to do // save the actual AST arguments for later (some places need to do
// const-evaluation on them) // const-evaluation on them)
let expr_arguments = match args { let expr_arguments = match args {
@ -394,18 +364,19 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
// Push the arguments. // Push the arguments.
let mut llargs = Vec::new(); let mut llargs = Vec::new();
bcx = callee::trans_args(bcx, bcx = callee::trans_args(bcx,
Abi::RustIntrinsic,
fn_ty,
&mut callee::Intrinsic,
args, args,
callee_ty,
&mut llargs, &mut llargs,
cleanup::CustomScope(cleanup_scope), cleanup::CustomScope(cleanup_scope));
Abi::RustIntrinsic);
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean(); fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
// These are the only intrinsic functions that diverge. // These are the only intrinsic functions that diverge.
if name == "abort" { if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap")); let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], None, call_debug_location); Call(bcx, llfn, &[], call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope); fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx); Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to())); return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
@ -437,14 +408,19 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
} }
}; };
let simple = get_simple_intrinsic(ccx, &foreign_item); let simple = get_simple_intrinsic(ccx, &name);
let llval = match (simple, &*name) { let llval = match (simple, &name[..]) {
(Some(llfn), _) => { (Some(llfn), _) => {
Call(bcx, llfn, &llargs, None, call_debug_location) Call(bcx, llfn, &llargs, call_debug_location)
}
(_, "try") => {
bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
call_debug_location);
C_nil(ccx)
} }
(_, "breakpoint") => { (_, "breakpoint") => {
let llfn = ccx.get_intrinsic(&("llvm.debugtrap")); let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
Call(bcx, llfn, &[], None, call_debug_location) Call(bcx, llfn, &[], call_debug_location)
} }
(_, "size_of") => { (_, "size_of") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
@ -454,7 +430,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "size_of_val") => { (_, "size_of_val") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_sized(tcx, tp_ty) { if !type_is_sized(tcx, tp_ty) {
let (llsize, _) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); let (llsize, _) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
llsize llsize
} else { } else {
let lltp_ty = type_of::type_of(ccx, tp_ty); let lltp_ty = type_of::type_of(ccx, tp_ty);
@ -468,7 +445,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "min_align_of_val") => { (_, "min_align_of_val") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_sized(tcx, tp_ty) { if !type_is_sized(tcx, tp_ty) {
let (_, llalign) = glue::size_and_align_of_dst(bcx, tp_ty, llargs[1]); let (_, llalign) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
llalign llalign
} else { } else {
C_uint(ccx, type_of::align_of(ccx, tp_ty)) C_uint(ccx, type_of::align_of(ccx, tp_ty))
@ -505,14 +483,14 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
} }
(_, "init_dropped") => { (_, "init_dropped") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) { if !type_is_zero_size(ccx, tp_ty) {
drop_done_fill_mem(bcx, llresult, tp_ty); drop_done_fill_mem(bcx, llresult, tp_ty);
} }
C_nil(ccx) C_nil(ccx)
} }
(_, "init") => { (_, "init") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
if !return_type_is_void(ccx, tp_ty) { if !type_is_zero_size(ccx, tp_ty) {
// Just zero out the stack slot. (See comment on base::memzero for explanation) // Just zero out the stack slot. (See comment on base::memzero for explanation)
init_zero_mem(bcx, llresult, tp_ty); init_zero_mem(bcx, llresult, tp_ty);
} }
@ -599,21 +577,24 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
} }
(_, "volatile_load") => { (_, "volatile_load") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let mut ptr = llargs[0];
if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
}
let load = VolatileLoad(bcx, ptr); let load = VolatileLoad(bcx, ptr);
unsafe { unsafe {
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty)); llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
} }
to_arg_ty(bcx, load, tp_ty) to_immediate(bcx, load, tp_ty)
}, },
(_, "volatile_store") => { (_, "volatile_store") => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let val = if fn_ty.args[1].is_indirect() {
let val = if type_is_immediate(bcx.ccx(), tp_ty) {
from_arg_ty(bcx, llargs[1], tp_ty)
} else {
Load(bcx, llargs[1]) Load(bcx, llargs[1])
} else {
from_immediate(bcx, llargs[1])
}; };
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
let store = VolatileStore(bcx, val, ptr); let store = VolatileStore(bcx, val, ptr);
unsafe { unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty)); llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
@ -634,13 +615,13 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
"cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width), "cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
llargs[0], call_debug_location), llargs[0], call_debug_location),
"ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)), "ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&llargs, None, call_debug_location), &llargs, call_debug_location),
"bswap" => { "bswap" => {
if width == 8 { if width == 8 {
llargs[0] // byte swap a u8/i8 is just a no-op llargs[0] // byte swap a u8/i8 is just a no-op
} else { } else {
Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)), Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
&llargs, None, call_debug_location) &llargs, call_debug_location)
} }
} }
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => { "add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
@ -669,7 +650,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
}, },
None => { None => {
span_invalid_monomorphization_error( span_invalid_monomorphization_error(
tcx.sess, call_info.span, tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \ &format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty)); expected basic integer type, found `{}`", name, sty));
C_null(llret_ty) C_null(llret_ty)
@ -680,8 +661,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, "return_address") => { (_, "return_address") => {
if !fcx.caller_expects_out_pointer { if !fcx.fn_ty.ret.is_indirect() {
span_err!(tcx.sess, call_info.span, E0510, span_err!(tcx.sess, span, E0510,
"invalid use of `return_address` intrinsic: function \ "invalid use of `return_address` intrinsic: function \
does not use out pointer"); does not use out pointer");
C_null(Type::i8p(ccx)) C_null(Type::i8p(ccx))
@ -709,7 +690,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
&llargs, &llargs,
ret_ty, llret_ty, ret_ty, llret_ty,
call_debug_location, call_debug_location,
call_info) span)
} }
// This requires that atomic intrinsics follow a specific naming pattern: // This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst // "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
@ -742,19 +723,17 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
match split[1] { match split[1] {
"cxchg" => { "cxchg" => {
let tp_ty = *substs.types.get(FnSpace, 0); let cmp = from_immediate(bcx, llargs[1]);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let src = from_immediate(bcx, llargs[2]);
let cmp = from_arg_ty(bcx, llargs[1], tp_ty); let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to());
let src = from_arg_ty(bcx, llargs[2], tp_ty);
let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False); let res = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::False);
ExtractValue(bcx, res, 0) ExtractValue(bcx, res, 0)
} }
"cxchgweak" => { "cxchgweak" => {
let tp_ty = *substs.types.get(FnSpace, 0); let cmp = from_immediate(bcx, llargs[1]);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let src = from_immediate(bcx, llargs[2]);
let cmp = from_arg_ty(bcx, llargs[1], tp_ty); let ptr = PointerCast(bcx, llargs[0], val_ty(src).ptr_to());
let src = from_arg_ty(bcx, llargs[2], tp_ty);
let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True); let val = AtomicCmpXchg(bcx, ptr, cmp, src, order, failorder, llvm::True);
let result = ExtractValue(bcx, val, 0); let result = ExtractValue(bcx, val, 0);
let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
@ -765,13 +744,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
"load" => { "load" => {
let tp_ty = *substs.types.get(FnSpace, 0); let tp_ty = *substs.types.get(FnSpace, 0);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let mut ptr = llargs[0];
to_arg_ty(bcx, AtomicLoad(bcx, ptr, order), tp_ty) if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
}
to_immediate(bcx, AtomicLoad(bcx, ptr, order), tp_ty)
} }
"store" => { "store" => {
let tp_ty = *substs.types.get(FnSpace, 0); let val = from_immediate(bcx, llargs[1]);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
let val = from_arg_ty(bcx, llargs[1], tp_ty);
AtomicStore(bcx, val, ptr, order); AtomicStore(bcx, val, ptr, order);
C_nil(ccx) C_nil(ccx)
} }
@ -803,9 +784,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
_ => ccx.sess().fatal("unknown atomic operation") _ => ccx.sess().fatal("unknown atomic operation")
}; };
let tp_ty = *substs.types.get(FnSpace, 0); let val = from_immediate(bcx, llargs[1]);
let ptr = to_arg_ty_ptr(bcx, llargs[0], tp_ty); let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
let val = from_arg_ty(bcx, llargs[1], tp_ty);
AtomicRMW(bcx, atom_op, ptr, val, order) AtomicRMW(bcx, atom_op, ptr, val, order)
} }
} }
@ -815,8 +795,7 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
(_, _) => { (_, _) => {
let intr = match Intrinsic::find(tcx, &name) { let intr = match Intrinsic::find(tcx, &name) {
Some(intr) => intr, Some(intr) => intr,
None => ccx.sess().span_bug(foreign_item.span, None => unreachable!("unknown intrinsic '{}'", name),
&format!("unknown intrinsic '{}'", name)),
}; };
fn one<T>(x: Vec<T>) -> T { fn one<T>(x: Vec<T>) -> T {
assert_eq!(x.len(), 1); assert_eq!(x.len(), 1);
@ -949,9 +928,8 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
intrinsics::IntrinsicDef::Named(name) => { intrinsics::IntrinsicDef::Named(name) => {
let f = declare::declare_cfn(ccx, let f = declare::declare_cfn(ccx,
name, name,
Type::func(&inputs, &outputs), Type::func(&inputs, &outputs));
tcx.mk_nil()); Call(bcx, f, &llargs, call_debug_location)
Call(bcx, f, &llargs, None, call_debug_location)
} }
}; };
@ -973,7 +951,15 @@ pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
if val_ty(llval) != Type::void(ccx) && if val_ty(llval) != Type::void(ccx) &&
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 { machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
store_ty(bcx, llval, llresult, ret_ty); if let Some(ty) = fn_ty.ret.cast {
let ptr = PointerCast(bcx, llresult, ty.ptr_to());
let store = Store(bcx, llval, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
}
} else {
store_ty(bcx, llval, llresult, ret_ty);
}
} }
// If we made a temporary stack slot, let's clean it up // If we made a temporary stack slot, let's clean it up
@ -1024,7 +1010,6 @@ fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
Mul(bcx, size, count, DebugLoc::None), Mul(bcx, size, count, DebugLoc::None),
align, align,
C_bool(ccx, volatile)], C_bool(ccx, volatile)],
None,
call_debug_location) call_debug_location)
} }
@ -1054,7 +1039,6 @@ fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
Mul(bcx, size, count, DebugLoc::None), Mul(bcx, size, count, DebugLoc::None),
align, align,
C_bool(ccx, volatile)], C_bool(ccx, volatile)],
None,
call_debug_location) call_debug_location)
} }
@ -1065,7 +1049,7 @@ fn count_zeros_intrinsic(bcx: Block,
-> ValueRef { -> ValueRef {
let y = C_bool(bcx.ccx(), false); let y = C_bool(bcx.ccx(), false);
let llfn = bcx.ccx().get_intrinsic(&name); let llfn = bcx.ccx().get_intrinsic(&name);
Call(bcx, llfn, &[val, y], None, call_debug_location) Call(bcx, llfn, &[val, y], call_debug_location)
} }
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
@ -1078,7 +1062,7 @@ fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let llfn = bcx.ccx().get_intrinsic(&name); let llfn = bcx.ccx().get_intrinsic(&name);
// Convert `i1` to a `bool`, and write it to the out parameter // Convert `i1` to a `bool`, and write it to the out parameter
let val = Call(bcx, llfn, &[a, b], None, call_debug_location); let val = Call(bcx, llfn, &[a, b], call_debug_location);
let result = ExtractValue(bcx, val, 0); let result = ExtractValue(bcx, val, 0);
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx())); let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
Store(bcx, result, StructGEP(bcx, out, 0)); Store(bcx, result, StructGEP(bcx, out, 0));
@ -1094,7 +1078,7 @@ fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
dest: ValueRef, dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> { dloc: DebugLoc) -> Block<'blk, 'tcx> {
if bcx.sess().no_landing_pads() { if bcx.sess().no_landing_pads() {
Call(bcx, func, &[data], None, dloc); Call(bcx, func, &[data], dloc);
Store(bcx, C_null(Type::i8p(bcx.ccx())), dest); Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
bcx bcx
} else if wants_msvc_seh(bcx.sess()) { } else if wants_msvc_seh(bcx.sess()) {
@ -1165,9 +1149,9 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// More information can be found in libstd's seh.rs implementation. // More information can be found in libstd's seh.rs implementation.
let slot = Alloca(bcx, Type::i8p(ccx), "slot"); let slot = Alloca(bcx, Type::i8p(ccx), "slot");
let localescape = ccx.get_intrinsic(&"llvm.localescape"); let localescape = ccx.get_intrinsic(&"llvm.localescape");
Call(bcx, localescape, &[slot], None, dloc); Call(bcx, localescape, &[slot], dloc);
Store(bcx, local_ptr, slot); Store(bcx, local_ptr, slot);
Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, None, dloc); Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
Ret(normal, C_i32(ccx, 0), dloc); Ret(normal, C_i32(ccx, 0), dloc);
@ -1184,7 +1168,7 @@ fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc); let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest); Store(bcx, ret, dest);
return bcx return bcx
} }
@ -1208,6 +1192,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
dloc: DebugLoc) -> Block<'blk, 'tcx> { dloc: DebugLoc) -> Block<'blk, 'tcx> {
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| { let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let tcx = ccx.tcx();
let dloc = DebugLoc::None; let dloc = DebugLoc::None;
// Translates the shims described above: // Translates the shims described above:
@ -1228,10 +1213,11 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// managed by the standard library. // managed by the standard library.
attributes::emit_uwtable(bcx.fcx.llfn, true); attributes::emit_uwtable(bcx.fcx.llfn, true);
let catch_pers = match bcx.tcx().lang_items.eh_personality_catch() { let catch_pers = match tcx.lang_items.eh_personality_catch() {
Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), Some(did) => {
bcx.fcx.param_substs).val, Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
None => bcx.tcx().sess.bug("eh_personality_catch not defined"), }
None => ccx.sess().bug("eh_personality_catch not defined"),
}; };
let then = bcx.fcx.new_temp_block("then"); let then = bcx.fcx.new_temp_block("then");
@ -1240,7 +1226,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let func = llvm::get_param(bcx.fcx.llfn, 0); let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1); let data = llvm::get_param(bcx.fcx.llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2); let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
Invoke(bcx, func, &[data], then.llbb, catch.llbb, None, dloc); Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
Ret(then, C_i32(ccx, 0), dloc); Ret(then, C_i32(ccx, 0), dloc);
// Type indicator for the exception being thrown. // Type indicator for the exception being thrown.
@ -1260,7 +1246,7 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching). // can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], None, dloc); let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest); Store(bcx, ret, dest);
return bcx; return bcx;
} }
@ -1269,21 +1255,32 @@ fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// This is currently primarily used for the `try` intrinsic functions above. // This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>, fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str, name: &str,
ty: Ty<'tcx>, inputs: Vec<Ty<'tcx>>,
output: ty::FnOutput<'tcx>, output: ty::FnOutput<'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
let llfn = declare::define_internal_rust_fn(ccx, name, ty); let sig = ty::FnSig {
inputs: inputs,
output: output,
variadic: false,
};
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(sig)
});
let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
let empty_substs = ccx.tcx().mk_substs(Substs::trans_empty());
let (fcx, block_arena); let (fcx, block_arena);
block_arena = TypedArena::new(); block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, llfn, ast::DUMMY_NODE_ID, false, fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
output, ccx.tcx().mk_substs(Substs::trans_empty()), let bcx = fcx.init(true, None);
None, &block_arena);
let bcx = init_function(&fcx, true, output);
trans(bcx); trans(bcx);
fcx.cleanup(); fcx.cleanup();
return llfn llfn
} }
// Helper function used to get a handle to the `__rust_try` function used to // Helper function used to get a handle to the `__rust_try` function used to
@ -1294,8 +1291,8 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>)) trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef { -> ValueRef {
let ccx = fcx.ccx; let ccx = fcx.ccx;
if let Some(llfn) = *ccx.rust_try_fn().borrow() { if let Some(llfn) = ccx.rust_try_fn().get() {
return llfn return llfn;
} }
// Define the type up front for the signature of the rust_try function. // Define the type up front for the signature of the rust_try function.
@ -1311,18 +1308,8 @@ fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
}), }),
}); });
let output = ty::FnOutput::FnConverging(tcx.types.i32); let output = ty::FnOutput::FnConverging(tcx.types.i32);
let try_fn_ty = ty::BareFnTy { let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
unsafety: hir::Unsafety::Unsafe, ccx.rust_try_fn().set(Some(rust_try));
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![fn_ty, i8p, i8p],
output: output,
variadic: false,
}),
};
let rust_try = gen_fn(fcx, "__rust_try", tcx.mk_fn_ptr(try_fn_ty), output,
trans);
*ccx.rust_try_fn().borrow_mut() = Some(rust_try);
return rust_try return rust_try
} }
@ -1341,9 +1328,10 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
let tcx = ccx.tcx(); let tcx = ccx.tcx();
let dloc = DebugLoc::None; let dloc = DebugLoc::None;
let rust_try_filter = match ccx.tcx().lang_items.msvc_try_filter() { let rust_try_filter = match tcx.lang_items.msvc_try_filter() {
Some(did) => callee::trans_fn_ref(ccx, did, ExprId(0), Some(did) => {
fcx.param_substs).val, Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
}
None => ccx.sess().bug("msvc_try_filter not defined"), None => ccx.sess().bug("msvc_try_filter not defined"),
}; };
@ -1373,11 +1361,10 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// For more info, see seh.rs in the standard library. // For more info, see seh.rs in the standard library.
let do_trans = |bcx: Block, ehptrs, base_pointer| { let do_trans = |bcx: Block, ehptrs, base_pointer| {
let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx)); let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc);
None, dloc);
let arg = Call(bcx, localrecover, let arg = Call(bcx, localrecover,
&[rust_try_fn, parentfp, C_i32(ccx, 0)], None, dloc); &[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc);
let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], None, dloc); let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc);
Ret(bcx, ret, dloc); Ret(bcx, ret, dloc);
}; };
@ -1389,17 +1376,8 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// going on here, all I can say is that there's a few tests cases in // going on here, all I can say is that there's a few tests cases in
// LLVM's test suite which follow this pattern of instructions, so we // LLVM's test suite which follow this pattern of instructions, so we
// just do the same. // just do the same.
let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
unsafety: hir::Unsafety::Unsafe, let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![],
output: output,
variadic: false,
}),
});
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], None, dloc);
let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]); let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to())); let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
do_trans(bcx, exn, ebp); do_trans(bcx, exn, ebp);
@ -1408,16 +1386,7 @@ fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
// Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer // Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
// are passed in as arguments to the filter function, so we just pass // are passed in as arguments to the filter function, so we just pass
// those along. // those along.
let filter_fn_ty = tcx.mk_fn_ptr(ty::BareFnTy { gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![i8p, i8p],
output: output,
variadic: false,
}),
});
gen_fn(fcx, "__rustc_try_filter", filter_fn_ty, output, &mut |bcx| {
let exn = llvm::get_param(bcx.fcx.llfn, 0); let exn = llvm::get_param(bcx.fcx.llfn, 0);
let rbp = llvm::get_param(bcx.fcx.llfn, 1); let rbp = llvm::get_param(bcx.fcx.llfn, 1);
do_trans(bcx, exn, rbp); do_trans(bcx, exn, rbp);
@ -1441,7 +1410,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
ret_ty: Ty<'tcx>, ret_ty: Ty<'tcx>,
llret_ty: Type, llret_ty: Type,
call_debug_location: DebugLoc, call_debug_location: DebugLoc,
call_info: NodeIdAndSpan) -> ValueRef span: Span) -> ValueRef
{ {
// macros for error handling: // macros for error handling:
macro_rules! emit_error { macro_rules! emit_error {
@ -1450,7 +1419,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
}; };
($msg: tt, $($fmt: tt)*) => { ($msg: tt, $($fmt: tt)*) => {
span_invalid_monomorphization_error( span_invalid_monomorphization_error(
bcx.sess(), call_info.span, bcx.sess(), span,
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", &format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg), $msg),
name, $($fmt)*)); name, $($fmt)*));
@ -1519,7 +1488,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
if name.starts_with("simd_shuffle") { if name.starts_with("simd_shuffle") {
let n: usize = match name["simd_shuffle".len()..].parse() { let n: usize = match name["simd_shuffle".len()..].parse() {
Ok(n) => n, Ok(n) => n,
Err(_) => tcx.sess.span_bug(call_info.span, Err(_) => tcx.sess.span_bug(span,
"bad `simd_shuffle` instruction only caught in trans?") "bad `simd_shuffle` instruction only caught in trans?")
}; };
@ -1537,22 +1506,26 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
let total_len = in_len as u64 * 2; let total_len = in_len as u64 * 2;
let vector = match args { let (vector, indirect) = match args {
Some(args) => &args[2], Some(args) => {
None => bcx.sess().span_bug(call_info.span, match consts::const_expr(bcx.ccx(), &args[2], substs, None,
"intrinsic call with unexpected argument shape"), // this should probably help simd error reporting
}; consts::TrueConst::Yes) {
let vector = match consts::const_expr(bcx.ccx(), vector, substs, None, Ok((vector, _)) => (vector, false),
consts::TrueConst::Yes, // this should probably help simd error reporting Err(err) => bcx.sess().span_fatal(span, &err.description()),
) { }
Ok((vector, _)) => vector, }
Err(err) => bcx.sess().span_fatal(call_info.span, &err.description()), None => (llargs[2], !type_is_immediate(bcx.ccx(), arg_tys[2]))
}; };
let indices: Option<Vec<_>> = (0..n) let indices: Option<Vec<_>> = (0..n)
.map(|i| { .map(|i| {
let arg_idx = i; let arg_idx = i;
let val = const_get_elt(bcx.ccx(), vector, &[i as libc::c_uint]); let val = if indirect {
Load(bcx, StructGEP(bcx, vector, i))
} else {
const_get_elt(vector, &[i as libc::c_uint])
};
let c = const_to_opt_uint(val); let c = const_to_opt_uint(val);
match c { match c {
None => { None => {
@ -1689,7 +1662,7 @@ fn generic_simd_intrinsic<'blk, 'tcx, 'a>
simd_or: TyUint, TyInt => Or; simd_or: TyUint, TyInt => Or;
simd_xor: TyUint, TyInt => Xor; simd_xor: TyUint, TyInt => Xor;
} }
bcx.sess().span_bug(call_info.span, "unknown SIMD intrinsic"); bcx.sess().span_bug(span, "unknown SIMD intrinsic");
} }
// Returns the width of an int TypeVariant, and if it's signed or not // Returns the width of an int TypeVariant, and if it's signed or not

View file

@ -1,36 +0,0 @@
// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use trans::context::CrateContext;
use trans::type_::Type;
use llvm::ValueRef;
pub trait LlvmRepr {
fn llrepr(&self, ccx: &CrateContext) -> String;
}
impl<T:LlvmRepr> LlvmRepr for [T] {
fn llrepr(&self, ccx: &CrateContext) -> String {
let reprs: Vec<String> = self.iter().map(|t| t.llrepr(ccx)).collect();
format!("[{}]", reprs.join(","))
}
}
impl LlvmRepr for Type {
fn llrepr(&self, ccx: &CrateContext) -> String {
ccx.tn().type_to_string(*self)
}
}
impl LlvmRepr for ValueRef {
fn llrepr(&self, ccx: &CrateContext) -> String {
ccx.tn().val_to_string(*self)
}
}

View file

@ -18,14 +18,13 @@ use middle::infer;
use middle::subst::{Subst, Substs}; use middle::subst::{Subst, Substs};
use middle::subst; use middle::subst;
use middle::traits::{self, ProjectionMode}; use middle::traits::{self, ProjectionMode};
use trans::abi::FnType;
use trans::base::*; use trans::base::*;
use trans::build::*; use trans::build::*;
use trans::callee::{Callee, Virtual, ArgVals, use trans::callee::{Callee, Virtual, ArgVals, trans_fn_pointer_shim};
trans_fn_pointer_shim, trans_fn_ref_with_substs};
use trans::closure; use trans::closure;
use trans::common::*; use trans::common::*;
use trans::consts; use trans::consts;
use trans::datum::*;
use trans::debuginfo::DebugLoc; use trans::debuginfo::DebugLoc;
use trans::declare; use trans::declare;
use trans::expr; use trans::expr;
@ -33,158 +32,25 @@ use trans::glue;
use trans::machine; use trans::machine;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of::*; use trans::type_of::*;
use trans::value::Value;
use middle::ty::{self, Ty, TyCtxt, TypeFoldable}; use middle::ty::{self, Ty, TyCtxt, TypeFoldable};
use syntax::ast::{self, Name}; use syntax::ast::Name;
use syntax::attr;
use syntax::codemap::DUMMY_SP; use syntax::codemap::DUMMY_SP;
use rustc_front::hir;
// drop_glue pointer, size, align. // drop_glue pointer, size, align.
const VTABLE_OFFSET: usize = 3; const VTABLE_OFFSET: usize = 3;
/// The main "translation" pass for methods. Generates code /// Extracts a method from a trait object's vtable, at the specified index.
/// for non-monomorphized methods only. Other methods will
/// be generated once they are invoked with specific type parameters,
/// see `trans::base::lval_static_fn()` or `trans::base::monomorphic_fn()`.
pub fn trans_impl(ccx: &CrateContext,
name: ast::Name,
impl_items: &[hir::ImplItem],
generics: &hir::Generics,
id: ast::NodeId) {
let _icx = push_ctxt("meth::trans_impl");
let tcx = ccx.tcx();
debug!("trans_impl(name={}, id={})", name, id);
// Both here and below with generic methods, be sure to recurse and look for
// items that we need to translate.
if !generics.ty_params.is_empty() {
return;
}
for impl_item in impl_items {
match impl_item.node {
hir::ImplItemKind::Method(ref sig, ref body) => {
if sig.generics.ty_params.is_empty() {
let trans_everywhere = attr::requests_inline(&impl_item.attrs);
for (ref ccx, is_origin) in ccx.maybe_iter(trans_everywhere) {
let llfn = get_item_val(ccx, impl_item.id);
let empty_substs = tcx.mk_substs(Substs::trans_empty());
trans_fn(ccx,
&sig.decl,
body,
llfn,
empty_substs,
impl_item.id,
&impl_item.attrs);
update_linkage(ccx,
llfn,
Some(impl_item.id),
if is_origin { OriginalTranslation } else { InlinedCopy });
}
}
}
_ => {}
}
}
}
/// Compute the appropriate callee, give na method's ID, trait ID,
/// substitutions and a Vtable for that trait.
pub fn callee_for_trait_impl<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
method_id: DefId,
substs: &'tcx subst::Substs<'tcx>,
trait_id: DefId,
method_ty: Ty<'tcx>,
vtable: traits::Vtable<'tcx, ()>)
-> Callee<'tcx> {
let _icx = push_ctxt("meth::callee_for_trait_impl");
match vtable {
traits::VtableImpl(vtable_impl) => {
let impl_did = vtable_impl.impl_def_id;
let mname = ccx.tcx().item_name(method_id);
// create a concatenated set of substitutions which includes
// those from the impl and those from the method:
let impl_substs = vtable_impl.substs.with_method_from(&substs);
let substs = ccx.tcx().mk_substs(impl_substs);
let mth = get_impl_method(ccx.tcx(), impl_did, substs, mname);
// Translate the function, bypassing Callee::def.
// That is because default methods have the same ID as the
// trait method used to look up the impl method that ended
// up here, so calling Callee::def would infinitely recurse.
Callee::ptr(trans_fn_ref_with_substs(ccx, mth.method.def_id,
Some(method_ty), mth.substs))
}
traits::VtableClosure(vtable_closure) => {
// The substitutions should have no type parameters remaining
// after passing through fulfill_obligation
let trait_closure_kind = ccx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = closure::trans_closure_method(ccx,
vtable_closure.closure_def_id,
vtable_closure.substs,
trait_closure_kind);
let fn_ptr_ty = match method_ty.sty {
ty::TyFnDef(_, _, fty) => ccx.tcx().mk_ty(ty::TyFnPtr(fty)),
_ => unreachable!("expected fn item type, found {}",
method_ty)
};
Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
}
traits::VtableFnPointer(fn_ty) => {
let trait_closure_kind = ccx.tcx().lang_items.fn_trait_kind(trait_id).unwrap();
let llfn = trans_fn_pointer_shim(ccx, trait_closure_kind, fn_ty);
let fn_ptr_ty = match method_ty.sty {
ty::TyFnDef(_, _, fty) => ccx.tcx().mk_ty(ty::TyFnPtr(fty)),
_ => unreachable!("expected fn item type, found {}",
method_ty)
};
Callee::ptr(immediate_rvalue(llfn, fn_ptr_ty))
}
traits::VtableObject(ref data) => {
Callee {
data: Virtual(traits::get_vtable_index_of_object_method(
ccx.tcx(), data, method_id)),
ty: method_ty
}
}
traits::VtableBuiltin(..) |
traits::VtableDefaultImpl(..) |
traits::VtableParam(..) => {
ccx.sess().bug(
&format!("resolved vtable bad vtable {:?} in trans",
vtable));
}
}
}
/// Extracts a method from a trait object's vtable, at the
/// specified index, and casts it to the given type.
pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
llvtable: ValueRef, llvtable: ValueRef,
vtable_index: usize, vtable_index: usize)
method_ty: Ty<'tcx>) -> ValueRef {
-> Datum<'tcx, Rvalue> {
let _icx = push_ctxt("meth::get_virtual_method");
let ccx = bcx.ccx();
// Load the data pointer from the object. // Load the data pointer from the object.
debug!("get_virtual_method(callee_ty={}, vtable_index={}, llvtable={})", debug!("get_virtual_method(vtable_index={}, llvtable={:?})",
method_ty, vtable_index, Value(llvtable));
vtable_index,
bcx.val_to_string(llvtable));
let mptr = Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET])); Load(bcx, GEPi(bcx, llvtable, &[vtable_index + VTABLE_OFFSET]))
// Replace the self type (&Self or Box<Self>) with an opaque pointer.
if let ty::TyFnDef(_, _, fty) = method_ty.sty {
let opaque_ty = opaque_method_ty(ccx.tcx(), fty);
immediate_rvalue(PointerCast(bcx, mptr, type_of(ccx, opaque_ty)), opaque_ty)
} else {
immediate_rvalue(mptr, method_ty)
}
} }
/// Generate a shim function that allows an object type like `SomeTrait` to /// Generate a shim function that allows an object type like `SomeTrait` to
@ -211,7 +77,7 @@ pub fn get_virtual_method<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>, pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
method_ty: Ty<'tcx>, method_ty: Ty<'tcx>,
vtable_index: usize) vtable_index: usize)
-> Datum<'tcx, Rvalue> { -> ValueRef {
let _icx = push_ctxt("trans_object_shim"); let _icx = push_ctxt("trans_object_shim");
let tcx = ccx.tcx(); let tcx = ccx.tcx();
@ -219,58 +85,40 @@ pub fn trans_object_shim<'a, 'tcx>(ccx: &'a CrateContext<'a, 'tcx>,
vtable_index, vtable_index,
method_ty); method_ty);
let ret_ty = tcx.erase_late_bound_regions(&method_ty.fn_ret()); let sig = tcx.erase_late_bound_regions(&method_ty.fn_sig());
let ret_ty = infer::normalize_associated_type(tcx, &ret_ty); let sig = infer::normalize_associated_type(tcx, &sig);
let fn_ty = FnType::new(ccx, method_ty.fn_abi(), &sig, &[]);
let shim_fn_ty = match method_ty.sty { let function_name = link::mangle_internal_name_by_type_and_seq(ccx, method_ty, "object_shim");
ty::TyFnDef(_, _, fty) => tcx.mk_ty(ty::TyFnPtr(fty)), let llfn = declare::define_internal_fn(ccx, &function_name, method_ty);
_ => unreachable!("expected fn item type, found {}", method_ty)
};
//
let function_name = link::mangle_internal_name_by_type_and_seq(ccx, shim_fn_ty, "object_shim");
let llfn = declare::define_internal_rust_fn(ccx, &function_name, shim_fn_ty);
let empty_substs = tcx.mk_substs(Substs::trans_empty()); let empty_substs = tcx.mk_substs(Substs::trans_empty());
let (block_arena, fcx): (TypedArena<_>, FunctionContext); let (block_arena, fcx): (TypedArena<_>, FunctionContext);
block_arena = TypedArena::new(); block_arena = TypedArena::new();
fcx = new_fn_ctxt(ccx, fcx = FunctionContext::new(ccx, llfn, fn_ty, None, empty_substs, &block_arena);
llfn, let mut bcx = fcx.init(false, None);
ast::DUMMY_NODE_ID,
false,
ret_ty,
empty_substs,
None,
&block_arena);
let mut bcx = init_function(&fcx, false, ret_ty);
let llargs = get_params(fcx.llfn);
let self_idx = fcx.arg_offset();
let llself = llargs[self_idx];
let llvtable = llargs[self_idx + 1];
debug!("trans_object_shim: llself={}, llvtable={}",
bcx.val_to_string(llself), bcx.val_to_string(llvtable));
assert!(!fcx.needs_ret_allocas); assert!(!fcx.needs_ret_allocas);
let dest = let dest =
fcx.llretslotptr.get().map( fcx.llretslotptr.get().map(
|_| expr::SaveIn(fcx.get_ret_slot(bcx, ret_ty, "ret_slot"))); |_| expr::SaveIn(fcx.get_ret_slot(bcx, "ret_slot")));
debug!("trans_object_shim: method_offset_in_vtable={}", debug!("trans_object_shim: method_offset_in_vtable={}",
vtable_index); vtable_index);
let llargs = get_params(fcx.llfn);
let args = ArgVals(&llargs[fcx.fn_ty.ret.is_indirect() as usize..]);
let callee = Callee { let callee = Callee {
data: Virtual(vtable_index), data: Virtual(vtable_index),
ty: method_ty ty: method_ty
}; };
bcx = callee.call(bcx, DebugLoc::None, ArgVals(&llargs[self_idx..]), dest).bcx; bcx = callee.call(bcx, DebugLoc::None, args, dest).bcx;
finish_fn(&fcx, bcx, ret_ty, DebugLoc::None); fcx.finish(bcx, DebugLoc::None);
immediate_rvalue(llfn, shim_fn_ty) llfn
} }
/// Creates a returns a dynamic vtable for the given type and vtable origin. /// Creates a returns a dynamic vtable for the given type and vtable origin.
@ -311,17 +159,9 @@ pub fn get_vtable<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let nullptr = C_null(Type::nil(ccx).ptr_to()); let nullptr = C_null(Type::nil(ccx).ptr_to());
get_vtable_methods(ccx, id, substs) get_vtable_methods(ccx, id, substs)
.into_iter() .into_iter()
.map(|opt_mth| { .map(|opt_mth| opt_mth.map_or(nullptr, |mth| {
match opt_mth { Callee::def(ccx, mth.method.def_id, &mth.substs).reify(ccx).val
Some(mth) => { }))
trans_fn_ref_with_substs(ccx,
mth.method.def_id,
None,
&mth.substs).val
}
None => nullptr
}
})
.collect::<Vec<_>>() .collect::<Vec<_>>()
.into_iter() .into_iter()
} }
@ -452,23 +292,6 @@ pub fn get_vtable_methods<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
.collect() .collect()
} }
/// Replace the self type (&Self or Box<Self>) with an opaque pointer.
fn opaque_method_ty<'tcx>(tcx: &TyCtxt<'tcx>, method_ty: &ty::BareFnTy<'tcx>)
-> Ty<'tcx> {
let mut inputs = method_ty.sig.0.inputs.clone();
inputs[0] = tcx.mk_mut_ptr(tcx.mk_mach_int(ast::IntTy::I8));
tcx.mk_fn_ptr(ty::BareFnTy {
unsafety: method_ty.unsafety,
abi: method_ty.abi,
sig: ty::Binder(ty::FnSig {
inputs: inputs,
output: method_ty.sig.0.output,
variadic: method_ty.sig.0.variadic,
}),
})
}
#[derive(Debug)] #[derive(Debug)]
pub struct ImplMethod<'tcx> { pub struct ImplMethod<'tcx> {
pub method: Rc<ty::Method<'tcx>>, pub method: Rc<ty::Method<'tcx>>,

View file

@ -8,33 +8,35 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use llvm::{BasicBlockRef, ValueRef, OperandBundleDef}; use llvm::{self, BasicBlockRef, ValueRef, OperandBundleDef};
use rustc::middle::ty; use rustc::middle::ty;
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use syntax::abi::Abi; use trans::abi::{Abi, FnType};
use trans::adt; use trans::adt;
use trans::attributes;
use trans::base; use trans::base;
use trans::build; use trans::build;
use trans::callee::{Callee, Fn, Virtual}; use trans::callee::{Callee, CalleeData, Fn, Intrinsic, NamedTupleConstructor, Virtual};
use trans::common::{self, Block, BlockAndBuilder}; use trans::common::{self, Block, BlockAndBuilder, C_undef};
use trans::debuginfo::DebugLoc; use trans::debuginfo::DebugLoc;
use trans::Disr; use trans::Disr;
use trans::foreign; use trans::machine::{llalign_of_min, llbitsize_of_real};
use trans::meth; use trans::meth;
use trans::type_of; use trans::type_of;
use trans::glue; use trans::glue;
use trans::type_::Type; use trans::type_::Type;
use super::{MirContext, drop}; use super::{MirContext, drop};
use super::operand::OperandValue::{FatPtr, Immediate, Ref}; use super::lvalue::{LvalueRef, load_fat_ptr};
use super::operand::OperandRef;
use super::operand::OperandValue::{self, FatPtr, Immediate, Ref};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_block(&mut self, bb: mir::BasicBlock) { pub fn trans_block(&mut self, bb: mir::BasicBlock) {
debug!("trans_block({:?})", bb); debug!("trans_block({:?})", bb);
let mut bcx = self.bcx(bb); let mut bcx = self.bcx(bb);
let data = self.mir.basic_block_data(bb); let mir = self.mir.clone();
let data = mir.basic_block_data(bb);
// MSVC SEH bits // MSVC SEH bits
let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) { let (cleanup_pad, cleanup_bundle) = if let Some((cp, cb)) = self.make_cleanup_pad(bb) {
@ -104,6 +106,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => { mir::Terminator::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
let (otherwise, targets) = targets.split_last().unwrap(); let (otherwise, targets) = targets.split_last().unwrap();
let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval); let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
let switch = bcx.switch(discr, self.llblock(*otherwise), values.len()); let switch = bcx.switch(discr, self.llblock(*otherwise), values.len());
for (value, target) in values.iter().zip(targets) { for (value, target) in values.iter().zip(targets) {
let llval = self.trans_constval(&bcx, value, switch_ty).immediate(); let llval = self.trans_constval(&bcx, value, switch_ty).immediate();
@ -113,9 +116,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} }
mir::Terminator::Return => { mir::Terminator::Return => {
let return_ty = bcx.monomorphize(&self.mir.return_ty);
bcx.with_block(|bcx| { bcx.with_block(|bcx| {
base::build_return_block(self.fcx, bcx, return_ty, DebugLoc::None); self.fcx.build_return_block(bcx, DebugLoc::None);
}) })
} }
@ -141,11 +143,10 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
&[llvalue], &[llvalue],
self.llblock(target), self.llblock(target),
unwind.llbb(), unwind.llbb(),
cleanup_bundle.as_ref(), cleanup_bundle.as_ref());
None);
self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty)); self.bcx(target).at_start(|bcx| drop::drop_fill(bcx, lvalue.llval, ty));
} else { } else {
bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref(), None); bcx.call(drop_fn, &[llvalue], cleanup_bundle.as_ref());
drop::drop_fill(&bcx, lvalue.llval, ty); drop::drop_fill(&bcx, lvalue.llval, ty);
funclet_br(bcx, self.llblock(target)); funclet_br(bcx, self.llblock(target));
} }
@ -154,190 +155,341 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => { mir::Terminator::Call { ref func, ref args, ref destination, ref cleanup } => {
// Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar. // Create the callee. This is a fn ptr or zero-sized and hence a kind of scalar.
let callee = self.trans_operand(&bcx, func); let callee = self.trans_operand(&bcx, func);
let debugloc = DebugLoc::None;
// The arguments we'll be passing. Plus one to account for outptr, if used.
let mut llargs = Vec::with_capacity(args.len() + 1);
// Types of the arguments. We do not preallocate, because this vector is only
// filled when `is_foreign` is `true` and foreign calls are minority of the cases.
let mut arg_tys = Vec::new();
let (callee, fty) = match callee.ty.sty { let (mut callee, abi, sig) = match callee.ty.sty {
ty::TyFnDef(def_id, substs, f) => { ty::TyFnDef(def_id, substs, f) => {
(Callee::def(bcx.ccx(), def_id, substs, callee.ty), f) (Callee::def(bcx.ccx(), def_id, substs), f.abi, &f.sig)
} }
ty::TyFnPtr(f) => { ty::TyFnPtr(f) => {
(Callee { (Callee {
data: Fn(callee.immediate()), data: Fn(callee.immediate()),
ty: callee.ty ty: callee.ty
}, f) }, f.abi, &f.sig)
} }
_ => unreachable!("{} is not callable", callee.ty) _ => unreachable!("{} is not callable", callee.ty)
}; };
// We do not translate intrinsics here (they shouldnt be functions) // Handle intrinsics old trans wants Expr's for, ourselves.
assert!(fty.abi != Abi::RustIntrinsic && fty.abi != Abi::PlatformIntrinsic); let intrinsic = match (&callee.ty.sty, &callee.data) {
// Foreign-ABI functions are translated differently (&ty::TyFnDef(def_id, _, _), &Intrinsic) => {
let is_foreign = fty.abi != Abi::Rust && fty.abi != Abi::RustCall; Some(bcx.tcx().item_name(def_id).as_str())
}
_ => None
};
let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
if intrinsic == Some("move_val_init") {
let &(_, target) = destination.as_ref().unwrap();
// The first argument is a thin destination pointer.
let llptr = self.trans_operand(&bcx, &args[0]).immediate();
let val = self.trans_operand(&bcx, &args[1]);
self.store_operand(&bcx, llptr, val);
self.set_operand_dropped(&bcx, &args[1]);
funclet_br(bcx, self.llblock(target));
return;
}
if intrinsic == Some("transmute") {
let &(ref dest, target) = destination.as_ref().unwrap();
let dst = self.trans_lvalue(&bcx, dest);
let mut val = self.trans_operand(&bcx, &args[0]);
if let ty::TyFnDef(def_id, substs, _) = val.ty.sty {
let llouttype = type_of::type_of(bcx.ccx(), dst.ty.to_ty(bcx.tcx()));
let out_type_size = llbitsize_of_real(bcx.ccx(), llouttype);
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let f = Callee::def(bcx.ccx(), def_id, substs);
let datum = f.reify(bcx.ccx());
val = OperandRef {
val: OperandValue::Immediate(datum.val),
ty: datum.ty
};
}
}
let llty = type_of::type_of(bcx.ccx(), val.ty);
let cast_ptr = bcx.pointercast(dst.llval, llty.ptr_to());
self.store_operand(&bcx, cast_ptr, val);
self.set_operand_dropped(&bcx, &args[0]);
funclet_br(bcx, self.llblock(target));
return;
}
let extra_args = &args[sig.0.inputs.len()..];
let extra_args = extra_args.iter().map(|op_arg| {
self.mir.operand_ty(bcx.tcx(), op_arg)
}).collect::<Vec<_>>();
let fn_ty = callee.direct_fn_type(bcx.ccx(), &extra_args);
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination // Prepare the return value destination
let (ret_dest_ty, must_copy_dest) = if let Some((ref d, _)) = *destination { let ret_dest = if let Some((ref d, _)) = *destination {
let dest = self.trans_lvalue(&bcx, d); let dest = self.trans_lvalue(&bcx, d);
let ret_ty = dest.ty.to_ty(bcx.tcx()); if fn_ty.ret.is_indirect() {
if !is_foreign && type_of::return_uses_outptr(bcx.ccx(), ret_ty) {
llargs.push(dest.llval); llargs.push(dest.llval);
(Some((dest, ret_ty)), false) None
} else if fn_ty.ret.is_ignore() {
None
} else { } else {
(Some((dest, ret_ty)), !common::type_is_zero_size(bcx.ccx(), ret_ty)) Some(dest)
} }
} else { } else {
(None, false) None
}; };
// Split the rust-call tupled arguments off. // Split the rust-call tupled arguments off.
let (args, rest) = if fty.abi == Abi::RustCall && !args.is_empty() { let (first_args, untuple) = if abi == Abi::RustCall && !args.is_empty() {
let (tup, args) = args.split_last().unwrap(); let (tup, args) = args.split_last().unwrap();
// we can reorder safely because of MIR (args, Some(tup))
(args, self.trans_operand_untupled(&bcx, tup))
} else { } else {
(&args[..], vec![]) (&args[..], None)
}; };
let datum = { let mut idx = 0;
let mut arg_ops = args.iter().map(|arg| { for arg in first_args {
self.trans_operand(&bcx, arg) let val = self.trans_operand(&bcx, arg).val;
}).chain(rest.into_iter()); self.trans_argument(&bcx, val, &mut llargs, &fn_ty,
&mut idx, &mut callee.data);
}
if let Some(tup) = untuple {
self.trans_arguments_untupled(&bcx, tup, &mut llargs, &fn_ty,
&mut idx, &mut callee.data)
}
// Get the actual pointer we can call. let fn_ptr = match callee.data {
// This can involve vtable accesses or reification. NamedTupleConstructor(_) => {
let datum = if let Virtual(idx) = callee.data { // FIXME translate this like mir::Rvalue::Aggregate.
assert!(!is_foreign); callee.reify(bcx.ccx()).val
}
Intrinsic => {
use trans::callee::ArgVals;
use trans::expr::{Ignore, SaveIn};
use trans::intrinsic::trans_intrinsic_call;
// Grab the first argument which is a trait object. let (dest, llargs) = if fn_ty.ret.is_indirect() {
let vtable = match arg_ops.next().unwrap().val { (SaveIn(llargs[0]), &llargs[1..])
FatPtr(data, vtable) => { } else if let Some(dest) = ret_dest {
llargs.push(data); (SaveIn(dest.llval), &llargs[..])
vtable } else {
} (Ignore, &llargs[..])
_ => unreachable!("expected FatPtr for Virtual call")
}; };
bcx.with_block(|bcx| { bcx.with_block(|bcx| {
meth::get_virtual_method(bcx, vtable, idx, callee.ty) let res = trans_intrinsic_call(bcx, callee.ty, &fn_ty,
}) ArgVals(llargs), dest,
} else { DebugLoc::None);
callee.reify(bcx.ccx()) let bcx = res.bcx.build();
}; if let Some((_, target)) = *destination {
for op in args {
// Process the rest of the args. self.set_operand_dropped(&bcx, op);
for operand in arg_ops { }
match operand.val { funclet_br(bcx, self.llblock(target));
Ref(llval) | Immediate(llval) => llargs.push(llval), } else {
FatPtr(b, e) => { // trans_intrinsic_call already used Unreachable.
llargs.push(b); // bcx.unreachable();
llargs.push(e);
} }
} });
if is_foreign { return;
arg_tys.push(operand.ty);
}
} }
Fn(f) => f,
datum Virtual(_) => unreachable!("Virtual fn ptr not extracted")
}; };
let attrs = attributes::from_fn_type(bcx.ccx(), datum.ty);
// Many different ways to call a function handled here // Many different ways to call a function handled here
match (is_foreign, cleanup, destination) { if let Some(cleanup) = cleanup.map(|bb| self.bcx(bb)) {
// The two cases below are the only ones to use LLVMs `invoke`. // We translate the copy into a temporary block. The temporary block is
(false, &Some(cleanup), &None) => { // necessary because the current block has already been terminated (by
let cleanup = self.bcx(cleanup); // `invoke`) and we cannot really translate into the target block
let landingpad = self.make_landing_pad(cleanup); // because:
let unreachable_blk = self.unreachable_block(); // * The target block may have more than a single precedesor;
bcx.invoke(datum.val, // * Some LLVM insns cannot have a preceeding store insn (phi,
&llargs[..], // cleanuppad), and adding/prepending the store now may render
unreachable_blk.llbb, // those other instructions invalid.
landingpad.llbb(), //
cleanup_bundle.as_ref(), // NB: This approach still may break some LLVM code. For example if the
Some(attrs)); // target block starts with a `phi` (which may only match on immediate
landingpad.at_start(|bcx| for op in args { // precedesors), it cannot know about this temporary block thus
self.set_operand_dropped(bcx, op); // resulting in an invalid code:
}); //
}, // this:
(false, &Some(cleanup), &Some((_, success))) => { // …
let cleanup = self.bcx(cleanup); // %0 = …
let landingpad = self.make_landing_pad(cleanup); // %1 = invoke to label %temp …
let invokeret = bcx.invoke(datum.val, // temp:
&llargs[..], // store ty %1, ty* %dest
self.llblock(success), // br label %actualtargetblock
landingpad.llbb(), // actualtargetblock: ; preds: %temp, …
cleanup_bundle.as_ref(), // phi … [%this, …], [%0, …] ; ERROR: phi requires to match only on
Some(attrs)); // ; immediate precedesors
if must_copy_dest {
let (ret_dest, ret_ty) = ret_dest_ty let ret_bcx = if destination.is_some() {
.expect("return destination and type not set"); self.fcx.new_block("", None)
// We translate the copy straight into the beginning of the target } else {
// block. self.unreachable_block()
self.bcx(success).at_start(|bcx| bcx.with_block( |bcx| { };
base::store_ty(bcx, invokeret, ret_dest.llval, ret_ty); let landingpad = self.make_landing_pad(cleanup);
}));
let invokeret = bcx.invoke(fn_ptr,
&llargs,
ret_bcx.llbb,
landingpad.llbb(),
cleanup_bundle.as_ref());
fn_ty.apply_attrs_callsite(invokeret);
landingpad.at_start(|bcx| for op in args {
self.set_operand_dropped(bcx, op);
});
if let Some((_, target)) = *destination {
let ret_bcx = ret_bcx.build();
if let Some(ret_dest) = ret_dest {
fn_ty.ret.store(&ret_bcx, invokeret, ret_dest.llval);
} }
self.bcx(success).at_start(|bcx| for op in args { for op in args {
self.set_operand_dropped(bcx, op); self.set_operand_dropped(&ret_bcx, op);
}); }
landingpad.at_start(|bcx| for op in args { ret_bcx.br(self.llblock(target));
self.set_operand_dropped(bcx, op);
});
},
(false, _, &None) => {
bcx.call(datum.val,
&llargs[..],
cleanup_bundle.as_ref(),
Some(attrs));
// no need to drop args, because the call never returns
bcx.unreachable();
} }
(false, _, &Some((_, target))) => { } else {
let llret = bcx.call(datum.val, let llret = bcx.call(fn_ptr, &llargs, cleanup_bundle.as_ref());
&llargs[..], fn_ty.apply_attrs_callsite(llret);
cleanup_bundle.as_ref(), if let Some((_, target)) = *destination {
Some(attrs)); if let Some(ret_dest) = ret_dest {
if must_copy_dest { fn_ty.ret.store(&bcx, llret, ret_dest.llval);
let (ret_dest, ret_ty) = ret_dest_ty
.expect("return destination and type not set");
bcx.with_block(|bcx| {
base::store_ty(bcx, llret, ret_dest.llval, ret_ty);
});
} }
for op in args { for op in args {
self.set_operand_dropped(&bcx, op); self.set_operand_dropped(&bcx, op);
} }
funclet_br(bcx, self.llblock(target)); funclet_br(bcx, self.llblock(target));
} else {
// no need to drop args, because the call never returns
bcx.unreachable();
} }
// Foreign functions
(true, _, destination) => {
let (dest, _) = ret_dest_ty
.expect("return destination is not set");
bcx = bcx.map_block(|bcx| {
foreign::trans_native_call(bcx,
datum.ty,
datum.val,
dest.llval,
&llargs[..],
arg_tys,
debugloc)
});
if let Some((_, target)) = *destination {
for op in args {
self.set_operand_dropped(&bcx, op);
}
funclet_br(bcx, self.llblock(target));
}
},
} }
} }
} }
} }
fn trans_argument(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
val: OperandValue,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// Treat the values in a fat pointer separately.
if let FatPtr(ptr, meta) = val {
if *next_idx == 0 {
if let Virtual(idx) = *callee {
let llfn = bcx.with_block(|bcx| {
meth::get_virtual_method(bcx, meta, idx)
});
let llty = fn_ty.llvm_type(bcx.ccx()).ptr_to();
*callee = Fn(bcx.pointercast(llfn, llty));
}
}
self.trans_argument(bcx, Immediate(ptr), llargs, fn_ty, next_idx, callee);
self.trans_argument(bcx, Immediate(meta), llargs, fn_ty, next_idx, callee);
return;
}
let arg = &fn_ty.args[*next_idx];
*next_idx += 1;
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty));
}
if arg.is_ignore() {
return;
}
// Force by-ref if we have to load through a cast pointer.
let (mut llval, by_ref) = match val {
Immediate(llval) if arg.is_indirect() || arg.cast.is_some() => {
let llscratch = build::AllocaFcx(bcx.fcx(), arg.original_ty, "arg");
bcx.store(llval, llscratch);
(llscratch, true)
}
Immediate(llval) => (llval, false),
Ref(llval) => (llval, true),
FatPtr(_, _) => unreachable!("fat pointers handled above")
};
if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it.
if arg.original_ty == Type::i1(bcx.ccx()) {
// We store bools as i8 so we need to truncate to i1.
llval = bcx.load_range_assert(llval, 0, 2, llvm::False);
llval = bcx.trunc(llval, arg.original_ty);
} else if let Some(ty) = arg.cast {
llval = bcx.load(bcx.pointercast(llval, ty.ptr_to()));
let llalign = llalign_of_min(bcx.ccx(), arg.ty);
unsafe {
llvm::LLVMSetAlignment(llval, llalign);
}
} else {
llval = bcx.load(llval);
}
}
llargs.push(llval);
}
fn trans_arguments_untupled(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<ValueRef>,
fn_ty: &FnType,
next_idx: &mut usize,
callee: &mut CalleeData) {
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return;
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => bcx.tcx().sess.span_bug(
self.mir.span,
&format!("bad final argument to \"rust-call\" fn {:?}", lv_ty))
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
for (n, &ty) in result_types.iter().enumerate() {
let ptr = adt::trans_field_ptr_builder(bcx, &base_repr, base, Disr(0), n);
let val = if common::type_is_fat_ptr(bcx.tcx(), ty) {
let (lldata, llextra) = load_fat_ptr(bcx, ptr);
FatPtr(lldata, llextra)
} else {
// Don't bother loading the value, trans_argument will.
Ref(ptr)
};
self.trans_argument(bcx, val, llargs, fn_ty, next_idx, callee);
}
}
fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef { fn get_personality_slot(&mut self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> ValueRef {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
if let Some(slot) = self.llpersonalityslot { if let Some(slot) = self.llpersonalityslot {

View file

@ -8,17 +8,17 @@
// option. This file may not be copied, modified, or distributed // option. This file may not be copied, modified, or distributed
// except according to those terms. // except according to those terms.
use back::abi;
use llvm::ValueRef; use llvm::ValueRef;
use middle::ty::{Ty, TypeFoldable}; use middle::ty::{Ty, TypeFoldable};
use rustc::middle::const_eval::{self, ConstVal}; use rustc::middle::const_eval::{self, ConstVal};
use rustc_const_eval::ConstInt::*; use rustc_const_eval::ConstInt::*;
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use trans::abi;
use trans::common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral, use trans::common::{self, BlockAndBuilder, C_bool, C_bytes, C_floating_f64, C_integral,
C_str_slice, C_nil, C_undef}; C_str_slice, C_undef};
use trans::consts; use trans::consts;
use trans::datum;
use trans::expr; use trans::expr;
use trans::inline;
use trans::type_of; use trans::type_of;
use trans::type_::Type; use trans::type_::Type;
@ -38,8 +38,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let val = if common::type_is_immediate(ccx, ty) { let val = if common::type_is_immediate(ccx, ty) {
OperandValue::Immediate(val) OperandValue::Immediate(val)
} else if common::type_is_fat_ptr(bcx.tcx(), ty) { } else if common::type_is_fat_ptr(bcx.tcx(), ty) {
let data = common::const_get_elt(ccx, val, &[abi::FAT_PTR_ADDR as u32]); let data = common::const_get_elt(val, &[abi::FAT_PTR_ADDR as u32]);
let extra = common::const_get_elt(ccx, val, &[abi::FAT_PTR_EXTRA as u32]); let extra = common::const_get_elt(val, &[abi::FAT_PTR_EXTRA as u32]);
OperandValue::FatPtr(data, extra) OperandValue::FatPtr(data, extra)
} else { } else {
OperandValue::Ref(val) OperandValue::Ref(val)
@ -85,16 +85,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
ConstVal::Integral(InferSigned(v)) => C_integral(llty, v as u64, true), ConstVal::Integral(InferSigned(v)) => C_integral(llty, v as u64, true),
ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()), ConstVal::Str(ref v) => C_str_slice(ccx, v.clone()),
ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"), ConstVal::ByteStr(ref v) => consts::addr_of(ccx, C_bytes(ccx, v), 1, "byte_str"),
ConstVal::Struct(id) | ConstVal::Tuple(id) | ConstVal::Struct(_) | ConstVal::Tuple(_) |
ConstVal::Array(id, _) | ConstVal::Repeat(id, _) => { ConstVal::Array(..) | ConstVal::Repeat(..) |
let expr = bcx.tcx().map.expect_expr(id); ConstVal::Function(_) => {
bcx.with_block(|bcx| { unreachable!("MIR must not use {:?} (which refers to a local ID)", cv)
expr::trans(bcx, expr).datum.val }
})
},
ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false), ConstVal::Char(c) => C_integral(Type::char(ccx), c as u64, false),
ConstVal::Dummy => unreachable!(), ConstVal::Dummy => unreachable!(),
ConstVal::Function(_) => C_nil(ccx)
} }
} }
@ -116,16 +113,26 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
}; };
} }
let substs = bcx.tcx().mk_substs(bcx.monomorphize(&substs)); let substs = Some(bcx.monomorphize(substs));
let def_id = inline::maybe_instantiate_inline(bcx.ccx(), def_id); let expr = const_eval::lookup_const_by_id(bcx.tcx(), def_id, substs)
let expr = const_eval::lookup_const_by_id(bcx.tcx(), def_id, None, Some(substs))
.expect("def was const, but lookup_const_by_id failed").0; .expect("def was const, but lookup_const_by_id failed").0;
// FIXME: this is falling back to translating from HIR. This is not easy to fix, // FIXME: this is falling back to translating from HIR. This is not easy to fix,
// because we would have somehow adapt const_eval to work on MIR rather than HIR. // because we would have somehow adapt const_eval to work on MIR rather than HIR.
let d = bcx.with_block(|bcx| { let d = bcx.with_block(|bcx| {
expr::trans(bcx, expr) expr::trans(bcx, expr)
}); });
OperandRef::from_rvalue_datum(d.datum.to_rvalue_datum(d.bcx, "").datum)
let datum = d.datum.to_rvalue_datum(d.bcx, "").datum;
match datum.kind.mode {
datum::RvalueMode::ByValue => {
OperandRef {
ty: datum.ty,
val: OperandValue::Immediate(datum.val)
}
}
datum::RvalueMode::ByRef => self.trans_load(bcx, datum.val, datum.ty)
}
} }
mir::Literal::Value { ref value } => { mir::Literal::Value { ref value } => {
self.trans_constval(bcx, value, ty) self.trans_constval(bcx, value, ty)

View file

@ -12,11 +12,13 @@ use llvm::ValueRef;
use rustc::middle::ty::{self, Ty, TypeFoldable}; use rustc::middle::ty::{self, Ty, TypeFoldable};
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use trans::abi;
use trans::adt; use trans::adt;
use trans::base; use trans::base;
use trans::common::{self, BlockAndBuilder}; use trans::builder::Builder;
use trans::common::{self, BlockAndBuilder, C_uint};
use trans::consts;
use trans::machine; use trans::machine;
use trans::type_of;
use trans::mir::drop; use trans::mir::drop;
use llvm; use llvm;
use trans::Disr; use trans::Disr;
@ -49,11 +51,25 @@ impl<'tcx> LvalueRef<'tcx> {
{ {
assert!(!ty.has_erasable_regions()); assert!(!ty.has_erasable_regions());
let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name)); let lltemp = bcx.with_block(|bcx| base::alloc_ty(bcx, ty, name));
drop::drop_fill(bcx, lltemp, ty); if bcx.fcx().type_needs_drop(ty) {
drop::drop_fill(bcx, lltemp, ty);
}
LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty)) LvalueRef::new_sized(lltemp, LvalueTy::from_ty(ty))
} }
} }
pub fn get_meta(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
b.struct_gep(fat_ptr, abi::FAT_PTR_EXTRA)
}
pub fn get_dataptr(b: &Builder, fat_ptr: ValueRef) -> ValueRef {
b.struct_gep(fat_ptr, abi::FAT_PTR_ADDR)
}
pub fn load_fat_ptr(b: &Builder, fat_ptr: ValueRef) -> (ValueRef, ValueRef) {
(b.load(get_dataptr(b, fat_ptr)), b.load(get_meta(b, fat_ptr)))
}
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn lvalue_len(&mut self, pub fn lvalue_len(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>, bcx: &BlockAndBuilder<'bcx, 'tcx>,
@ -89,16 +105,12 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Lvalue::Arg(index) => self.args[index as usize], mir::Lvalue::Arg(index) => self.args[index as usize],
mir::Lvalue::Static(def_id) => { mir::Lvalue::Static(def_id) => {
let const_ty = self.mir.lvalue_ty(tcx, lvalue); let const_ty = self.mir.lvalue_ty(tcx, lvalue);
LvalueRef::new_sized( LvalueRef::new_sized(consts::get_static(ccx, def_id).val, const_ty)
common::get_static_val(ccx, def_id, const_ty.to_ty(tcx)),
const_ty)
}, },
mir::Lvalue::ReturnPointer => { mir::Lvalue::ReturnPointer => {
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty); let llval = if !fcx.fn_ty.ret.is_ignore() {
let return_ty = fn_return_ty.unwrap();
let llval = if !common::return_type_is_void(bcx.ccx(), return_ty) {
bcx.with_block(|bcx| { bcx.with_block(|bcx| {
fcx.get_ret_slot(bcx, fn_return_ty, "") fcx.get_ret_slot(bcx, "")
}) })
} else { } else {
// This is a void return; that is, theres no place to store the value and // This is a void return; that is, theres no place to store the value and
@ -106,27 +118,40 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Ergo, we return an undef ValueRef, so we do not have to special-case every // Ergo, we return an undef ValueRef, so we do not have to special-case every
// place using lvalues, and could use it the same way you use a regular // place using lvalues, and could use it the same way you use a regular
// ReturnPointer LValue (i.e. store into it, load from it etc). // ReturnPointer LValue (i.e. store into it, load from it etc).
let llty = type_of::type_of(bcx.ccx(), return_ty).ptr_to(); let llty = fcx.fn_ty.ret.original_ty.ptr_to();
unsafe { unsafe {
llvm::LLVMGetUndef(llty.to_ref()) llvm::LLVMGetUndef(llty.to_ref())
} }
}; };
let fn_return_ty = bcx.monomorphize(&self.mir.return_ty);
let return_ty = fn_return_ty.unwrap();
LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty)) LvalueRef::new_sized(llval, LvalueTy::from_ty(return_ty))
}, },
mir::Lvalue::Projection(ref projection) => { mir::Lvalue::Projection(ref projection) => {
let tr_base = self.trans_lvalue(bcx, &projection.base); let tr_base = self.trans_lvalue(bcx, &projection.base);
let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem); let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
let projected_ty = bcx.monomorphize(&projected_ty);
let project_index = |llindex| {
let element = if let ty::TySlice(_) = tr_base.ty.to_ty(tcx).sty {
// Slices already point to the array element type.
bcx.inbounds_gep(tr_base.llval, &[llindex])
} else {
let zero = common::C_uint(bcx.ccx(), 0u64);
bcx.inbounds_gep(tr_base.llval, &[zero, llindex])
};
(element, ptr::null_mut())
};
let (llprojected, llextra) = match projection.elem { let (llprojected, llextra) = match projection.elem {
mir::ProjectionElem::Deref => { mir::ProjectionElem::Deref => {
let base_ty = tr_base.ty.to_ty(tcx); let base_ty = tr_base.ty.to_ty(tcx);
bcx.with_block(|bcx| { if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) {
if common::type_is_sized(tcx, projected_ty.to_ty(tcx)) { (base::load_ty_builder(bcx, tr_base.llval, base_ty),
(base::load_ty(bcx, tr_base.llval, base_ty), ptr::null_mut())
ptr::null_mut()) } else {
} else { load_fat_ptr(bcx, tr_base.llval)
base::load_fat_ptr(bcx, tr_base.llval, base_ty) }
}
})
} }
mir::ProjectionElem::Field(ref field, _) => { mir::ProjectionElem::Field(ref field, _) => {
let base_ty = tr_base.ty.to_ty(tcx); let base_ty = tr_base.ty.to_ty(tcx);
@ -142,9 +167,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} else { } else {
adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra) adt::MaybeSizedValue::unsized_(tr_base.llval, tr_base.llextra)
}; };
let llprojected = bcx.with_block(|bcx| { let llprojected = adt::trans_field_ptr_builder(bcx, &base_repr, base,
adt::trans_field_ptr(bcx, &base_repr, base, Disr(discr), field.index()) Disr(discr), field.index());
});
let llextra = if is_sized { let llextra = if is_sized {
ptr::null_mut() ptr::null_mut()
} else { } else {
@ -154,30 +178,21 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} }
mir::ProjectionElem::Index(ref index) => { mir::ProjectionElem::Index(ref index) => {
let index = self.trans_operand(bcx, index); let index = self.trans_operand(bcx, index);
let llindex = self.prepare_index(bcx, index.immediate()); project_index(self.prepare_index(bcx, index.immediate()))
let zero = common::C_uint(bcx.ccx(), 0u64);
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
} }
mir::ProjectionElem::ConstantIndex { offset, mir::ProjectionElem::ConstantIndex { offset,
from_end: false, from_end: false,
min_length: _ } => { min_length: _ } => {
let lloffset = common::C_u32(bcx.ccx(), offset); let lloffset = C_uint(bcx.ccx(), offset);
let llindex = self.prepare_index(bcx, lloffset); project_index(self.prepare_index(bcx, lloffset))
let zero = common::C_uint(bcx.ccx(), 0u64);
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
} }
mir::ProjectionElem::ConstantIndex { offset, mir::ProjectionElem::ConstantIndex { offset,
from_end: true, from_end: true,
min_length: _ } => { min_length: _ } => {
let lloffset = common::C_u32(bcx.ccx(), offset); let lloffset = C_uint(bcx.ccx(), offset);
let lllen = self.lvalue_len(bcx, tr_base); let lllen = self.lvalue_len(bcx, tr_base);
let llindex = bcx.sub(lllen, lloffset); let llindex = bcx.sub(lllen, lloffset);
let llindex = self.prepare_index(bcx, llindex); project_index(self.prepare_index(bcx, llindex))
let zero = common::C_uint(bcx.ccx(), 0u64);
(bcx.inbounds_gep(tr_base.llval, &[zero, llindex]),
ptr::null_mut())
} }
mir::ProjectionElem::Downcast(..) => { mir::ProjectionElem::Downcast(..) => {
(tr_base.llval, tr_base.llextra) (tr_base.llval, tr_base.llextra)

View file

@ -10,21 +10,39 @@
use libc::c_uint; use libc::c_uint;
use llvm::{self, ValueRef}; use llvm::{self, ValueRef};
use middle::ty;
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use rustc::mir::tcx::LvalueTy; use rustc::mir::tcx::LvalueTy;
use trans::base; use trans::base;
use trans::common::{self, Block, BlockAndBuilder}; use trans::common::{self, Block, BlockAndBuilder, FunctionContext};
use trans::expr;
use trans::type_of;
use self::lvalue::LvalueRef; use std::ops::Deref;
use std::rc::Rc;
use self::lvalue::{LvalueRef, get_dataptr, get_meta};
use self::operand::OperandRef; use self::operand::OperandRef;
#[derive(Clone)]
pub enum CachedMir<'mir, 'tcx: 'mir> {
Ref(&'mir mir::Mir<'tcx>),
Owned(Rc<mir::Mir<'tcx>>)
}
impl<'mir, 'tcx: 'mir> Deref for CachedMir<'mir, 'tcx> {
type Target = mir::Mir<'tcx>;
fn deref(&self) -> &mir::Mir<'tcx> {
match *self {
CachedMir::Ref(r) => r,
CachedMir::Owned(ref rc) => rc
}
}
}
// FIXME DebugLoc is always None right now // FIXME DebugLoc is always None right now
/// Master context for translating MIR. /// Master context for translating MIR.
pub struct MirContext<'bcx, 'tcx:'bcx> { pub struct MirContext<'bcx, 'tcx:'bcx> {
mir: &'bcx mir::Mir<'tcx>, mir: CachedMir<'bcx, 'tcx>,
/// Function context /// Function context
fcx: &'bcx common::FunctionContext<'bcx, 'tcx>, fcx: &'bcx common::FunctionContext<'bcx, 'tcx>,
@ -77,16 +95,16 @@ enum TempRef<'tcx> {
/////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////
pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) { pub fn trans_mir<'blk, 'tcx>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
let fcx = bcx.fcx(); let bcx = fcx.init(false, None).build();
let mir = bcx.mir(); let mir = bcx.mir();
let mir_blocks = bcx.mir().all_basic_blocks(); let mir_blocks = mir.all_basic_blocks();
// Analyze the temps to determine which must be lvalues // Analyze the temps to determine which must be lvalues
// FIXME // FIXME
let lvalue_temps = bcx.with_block(|bcx| { let lvalue_temps = bcx.with_block(|bcx| {
analyze::lvalue_temps(bcx, mir) analyze::lvalue_temps(bcx, &mir)
}); });
// Allocate variable and temp allocas // Allocate variable and temp allocas
@ -108,10 +126,10 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
TempRef::Operand(None) TempRef::Operand(None)
}) })
.collect(); .collect();
let args = arg_value_refs(&bcx, mir); let args = arg_value_refs(&bcx, &mir);
// Allocate a `Block` for every basic block // Allocate a `Block` for every basic block
let block_bcxs: Vec<Block<'bcx,'tcx>> = let block_bcxs: Vec<Block<'blk,'tcx>> =
mir_blocks.iter() mir_blocks.iter()
.map(|&bb|{ .map(|&bb|{
// FIXME(#30941) this doesn't handle msvc-style exceptions // FIXME(#30941) this doesn't handle msvc-style exceptions
@ -138,6 +156,8 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
for &bb in &mir_blocks { for &bb in &mir_blocks {
mircx.trans_block(bb); mircx.trans_block(bb);
} }
fcx.cleanup();
} }
/// Produce, for each argument, a `ValueRef` pointing at the /// Produce, for each argument, a `ValueRef` pointing at the
@ -146,51 +166,75 @@ pub fn trans_mir<'bcx, 'tcx>(bcx: BlockAndBuilder<'bcx, 'tcx>) {
fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>, fn arg_value_refs<'bcx, 'tcx>(bcx: &BlockAndBuilder<'bcx, 'tcx>,
mir: &mir::Mir<'tcx>) mir: &mir::Mir<'tcx>)
-> Vec<LvalueRef<'tcx>> { -> Vec<LvalueRef<'tcx>> {
// FIXME tupled_args? I think I'd rather that mapping is done in MIR land though
let fcx = bcx.fcx(); let fcx = bcx.fcx();
let tcx = bcx.tcx(); let tcx = bcx.tcx();
let mut idx = fcx.arg_offset() as c_uint; let mut idx = 0;
mir.arg_decls let mut llarg_idx = fcx.fn_ty.ret.is_indirect() as usize;
.iter() mir.arg_decls.iter().enumerate().map(|(arg_index, arg_decl)| {
.enumerate() let arg_ty = bcx.monomorphize(&arg_decl.ty);
.map(|(arg_index, arg_decl)| { if arg_decl.spread {
let arg_ty = bcx.monomorphize(&arg_decl.ty); // This argument (e.g. the last argument in the "rust-call" ABI)
let llval = if type_of::arg_is_indirect(bcx.ccx(), arg_ty) { // is a tuple that was spread at the ABI level and now we have
// Don't copy an indirect argument to an alloca, the caller // to reconstruct it into a tuple local variable, from multiple
// already put it in a temporary alloca and gave it up, unless // individual LLVM function arguments.
// we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes, debug info let tupled_arg_tys = match arg_ty.sty {
let llarg = llvm::get_param(fcx.llfn, idx); ty::TyTuple(ref tys) => tys,
idx += 1; _ => unreachable!("spread argument isn't a tuple?!")
llarg };
} else if common::type_is_fat_ptr(tcx, arg_ty) {
// we pass fat pointers as two words, but we want to let lltemp = bcx.with_block(|bcx| {
// represent them internally as a pointer to two words, base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
// so make an alloca to store them in. });
let lldata = llvm::get_param(fcx.llfn, idx); for (i, &tupled_arg_ty) in tupled_arg_tys.iter().enumerate() {
let llextra = llvm::get_param(fcx.llfn, idx + 1); let dst = bcx.struct_gep(lltemp, i);
idx += 2; let arg = &fcx.fn_ty.args[idx];
let (lltemp, dataptr, meta) = bcx.with_block(|bcx| { idx += 1;
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); if common::type_is_fat_ptr(tcx, tupled_arg_ty) {
(lltemp, expr::get_dataptr(bcx, lltemp), expr::get_meta(bcx, lltemp)) // We pass fat pointers as two words, but inside the tuple
}); // they are the two sub-fields of a single aggregate field.
bcx.store(lldata, dataptr); let meta = &fcx.fn_ty.args[idx];
bcx.store(llextra, meta); idx += 1;
lltemp arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, dst));
} else { meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, dst));
// otherwise, arg is passed by value, so make a } else {
// temporary and store it there arg.store_fn_arg(bcx, &mut llarg_idx, dst);
let llarg = llvm::get_param(fcx.llfn, idx); }
idx += 1; }
bcx.with_block(|bcx| { return LvalueRef::new_sized(lltemp, LvalueTy::from_ty(arg_ty));
let lltemp = base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index)); }
base::store_ty(bcx, llarg, lltemp, arg_ty);
lltemp let arg = &fcx.fn_ty.args[idx];
}) idx += 1;
}; let llval = if arg.is_indirect() {
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty)) // Don't copy an indirect argument to an alloca, the caller
}) // already put it in a temporary alloca and gave it up, unless
.collect() // we emit extra-debug-info, which requires local allocas :(.
// FIXME: lifetimes, debug info
let llarg = llvm::get_param(fcx.llfn, llarg_idx as c_uint);
llarg_idx += 1;
llarg
} else {
let lltemp = bcx.with_block(|bcx| {
base::alloc_ty(bcx, arg_ty, &format!("arg{}", arg_index))
});
if common::type_is_fat_ptr(tcx, arg_ty) {
// we pass fat pointers as two words, but we want to
// represent them internally as a pointer to two words,
// so make an alloca to store them in.
let meta = &fcx.fn_ty.args[idx];
idx += 1;
arg.store_fn_arg(bcx, &mut llarg_idx, get_dataptr(bcx, lltemp));
meta.store_fn_arg(bcx, &mut llarg_idx, get_meta(bcx, lltemp));
} else {
// otherwise, arg is passed by value, so make a
// temporary and store it there
arg.store_fn_arg(bcx, &mut llarg_idx, lltemp);
}
lltemp
};
LvalueRef::new_sized(llval, LvalueTy::from_ty(arg_ty))
}).collect()
} }
mod analyze; mod analyze;

View file

@ -9,17 +9,18 @@
// except according to those terms. // except according to those terms.
use llvm::ValueRef; use llvm::ValueRef;
use rustc::middle::ty::{self, Ty}; use rustc::middle::ty::Ty;
use rustc::mir::repr as mir; use rustc::mir::repr as mir;
use trans::adt;
use trans::base; use trans::base;
use trans::common::{self, Block, BlockAndBuilder}; use trans::common::{self, Block, BlockAndBuilder};
use trans::datum; use trans::datum;
use trans::Disr; use trans::value::Value;
use trans::glue; use trans::glue;
use std::fmt;
use super::lvalue::load_fat_ptr;
use super::{MirContext, TempRef, drop}; use super::{MirContext, TempRef, drop};
use super::lvalue::LvalueRef;
/// The representation of a Rust value. The enum variant is in fact /// The representation of a Rust value. The enum variant is in fact
/// uniquely determined by the value's type, but is kept as a /// uniquely determined by the value's type, but is kept as a
@ -53,6 +54,25 @@ pub struct OperandRef<'tcx> {
pub ty: Ty<'tcx> pub ty: Ty<'tcx>
} }
impl<'tcx> fmt::Debug for OperandRef<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.val {
OperandValue::Ref(r) => {
write!(f, "OperandRef(Ref({:?}) @ {:?})",
Value(r), self.ty)
}
OperandValue::Immediate(i) => {
write!(f, "OperandRef(Immediate({:?}) @ {:?})",
Value(i), self.ty)
}
OperandValue::FatPtr(a, d) => {
write!(f, "OperandRef(FatPtr({:?}, {:?}) @ {:?})",
Value(a), Value(d), self.ty)
}
}
}
}
impl<'tcx> OperandRef<'tcx> { impl<'tcx> OperandRef<'tcx> {
/// Asserts that this operand refers to a scalar and returns /// Asserts that this operand refers to a scalar and returns
/// a reference to its value. /// a reference to its value.
@ -62,35 +82,6 @@ impl<'tcx> OperandRef<'tcx> {
_ => unreachable!() _ => unreachable!()
} }
} }
pub fn repr<'bcx>(self, bcx: &BlockAndBuilder<'bcx, 'tcx>) -> String {
match self.val {
OperandValue::Ref(r) => {
format!("OperandRef(Ref({}) @ {:?})",
bcx.val_to_string(r), self.ty)
}
OperandValue::Immediate(i) => {
format!("OperandRef(Immediate({}) @ {:?})",
bcx.val_to_string(i), self.ty)
}
OperandValue::FatPtr(a, d) => {
format!("OperandRef(FatPtr({}, {}) @ {:?})",
bcx.val_to_string(a),
bcx.val_to_string(d),
self.ty)
}
}
}
pub fn from_rvalue_datum(datum: datum::Datum<'tcx, datum::Rvalue>) -> OperandRef {
OperandRef {
ty: datum.ty,
val: match datum.kind.mode {
datum::RvalueMode::ByRef => OperandValue::Ref(datum.val),
datum::RvalueMode::ByValue => OperandValue::Immediate(datum.val),
}
}
}
} }
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
@ -100,18 +91,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
ty: Ty<'tcx>) ty: Ty<'tcx>)
-> OperandRef<'tcx> -> OperandRef<'tcx>
{ {
debug!("trans_load: {} @ {:?}", bcx.val_to_string(llval), ty); debug!("trans_load: {:?} @ {:?}", Value(llval), ty);
let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) { let val = match datum::appropriate_rvalue_mode(bcx.ccx(), ty) {
datum::ByValue => { datum::ByValue => {
bcx.with_block(|bcx| { OperandValue::Immediate(base::load_ty_builder(bcx, llval, ty))
OperandValue::Immediate(base::load_ty(bcx, llval, ty))
})
} }
datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => { datum::ByRef if common::type_is_fat_ptr(bcx.tcx(), ty) => {
let (lldata, llextra) = bcx.with_block(|bcx| { let (lldata, llextra) = load_fat_ptr(bcx, llval);
base::load_fat_ptr(bcx, llval, ty)
});
OperandValue::FatPtr(lldata, llextra) OperandValue::FatPtr(lldata, llextra)
} }
datum::ByRef => OperandValue::Ref(llval) datum::ByRef => OperandValue::Ref(llval)
@ -164,7 +151,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
lldest: ValueRef, lldest: ValueRef,
operand: OperandRef<'tcx>) operand: OperandRef<'tcx>)
{ {
debug!("store_operand: operand={}", operand.repr(bcx)); debug!("store_operand: operand={:?}", operand);
bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand)) bcx.with_block(|bcx| self.store_operand_direct(bcx, lldest, operand))
} }
@ -187,48 +174,6 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} }
} }
pub fn trans_operand_untupled(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>)
-> Vec<OperandRef<'tcx>>
{
// FIXME: consider having some optimization to avoid tupling/untupling
// (and storing/loading in the case of immediates)
// avoid trans_operand for pointless copying
let lv = match *operand {
mir::Operand::Consume(ref lvalue) => self.trans_lvalue(bcx, lvalue),
mir::Operand::Constant(ref constant) => {
// FIXME: consider being less pessimized
if constant.ty.is_nil() {
return vec![];
}
let ty = bcx.monomorphize(&constant.ty);
let lv = LvalueRef::alloca(bcx, ty, "__untuple_alloca");
let constant = self.trans_constant(bcx, constant);
self.store_operand(bcx, lv.llval, constant);
lv
}
};
let lv_ty = lv.ty.to_ty(bcx.tcx());
let result_types = match lv_ty.sty {
ty::TyTuple(ref tys) => tys,
_ => bcx.tcx().sess.span_bug(
self.mir.span,
&format!("bad final argument to \"rust-call\" fn {:?}", lv_ty))
};
let base_repr = adt::represent_type(bcx.ccx(), lv_ty);
let base = adt::MaybeSizedValue::sized(lv.llval);
result_types.iter().enumerate().map(|(n, &ty)| {
self.trans_load(bcx, bcx.with_block(|bcx| {
adt::trans_field_ptr(bcx, &base_repr, base, Disr(0), n)
}), ty)
}).collect()
}
pub fn set_operand_dropped(&mut self, pub fn set_operand_dropped(&mut self,
bcx: &BlockAndBuilder<'bcx, 'tcx>, bcx: &BlockAndBuilder<'bcx, 'tcx>,
operand: &mir::Operand<'tcx>) { operand: &mir::Operand<'tcx>) {

View file

@ -18,20 +18,21 @@ use rustc::mir::repr as mir;
use trans::asm; use trans::asm;
use trans::base; use trans::base;
use trans::callee::Callee; use trans::callee::Callee;
use trans::common::{self, BlockAndBuilder, Result}; use trans::common::{self, C_uint, BlockAndBuilder, Result};
use trans::datum::{Datum, Lvalue};
use trans::debuginfo::DebugLoc; use trans::debuginfo::DebugLoc;
use trans::declare; use trans::declare;
use trans::expr;
use trans::adt; use trans::adt;
use trans::machine; use trans::machine;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of; use trans::type_of;
use trans::tvec; use trans::tvec;
use trans::value::Value;
use trans::Disr; use trans::Disr;
use super::MirContext; use super::MirContext;
use super::operand::{OperandRef, OperandValue}; use super::operand::{OperandRef, OperandValue};
use super::lvalue::LvalueRef; use super::lvalue::{LvalueRef, get_dataptr, get_meta};
impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> { impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
pub fn trans_rvalue(&mut self, pub fn trans_rvalue(&mut self,
@ -40,9 +41,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
rvalue: &mir::Rvalue<'tcx>) rvalue: &mir::Rvalue<'tcx>)
-> BlockAndBuilder<'bcx, 'tcx> -> BlockAndBuilder<'bcx, 'tcx>
{ {
debug!("trans_rvalue(dest.llval={}, rvalue={:?})", debug!("trans_rvalue(dest.llval={:?}, rvalue={:?})",
bcx.val_to_string(dest.llval), Value(dest.llval), rvalue);
rvalue);
match *rvalue { match *rvalue {
mir::Rvalue::Use(ref operand) => { mir::Rvalue::Use(ref operand) => {
@ -54,7 +54,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
bcx bcx
} }
mir::Rvalue::Cast(mir::CastKind::Unsize, ref operand, cast_ty) => { mir::Rvalue::Cast(mir::CastKind::Unsize, ref source, cast_ty) => {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
// into-coerce of a thin pointer to a fat pointer - just // into-coerce of a thin pointer to a fat pointer - just
// use the operand path. // use the operand path.
@ -67,7 +67,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// this to be eliminated by MIR translation, but // this to be eliminated by MIR translation, but
// `CoerceUnsized` can be passed by a where-clause, // `CoerceUnsized` can be passed by a where-clause,
// so the (generic) MIR may not be able to expand it. // so the (generic) MIR may not be able to expand it.
let operand = self.trans_operand(&bcx, operand); let operand = self.trans_operand(&bcx, source);
bcx.with_block(|bcx| { bcx.with_block(|bcx| {
match operand.val { match operand.val {
OperandValue::FatPtr(..) => unreachable!(), OperandValue::FatPtr(..) => unreachable!(),
@ -92,6 +92,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
} }
} }
}); });
self.set_operand_dropped(&bcx, source);
bcx bcx
} }
@ -99,8 +100,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let tr_elem = self.trans_operand(&bcx, elem); let tr_elem = self.trans_operand(&bcx, elem);
let count = ConstVal::Integral(ConstInt::Usize(count.value)); let count = ConstVal::Integral(ConstInt::Usize(count.value));
let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate(); let size = self.trans_constval(&bcx, &count, bcx.tcx().types.usize).immediate();
let base = get_dataptr(&bcx, dest.llval);
let bcx = bcx.map_block(|block| { let bcx = bcx.map_block(|block| {
let base = expr::get_dataptr(block, dest.llval);
tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| { tvec::iter_vec_raw(block, base, tr_elem.ty, size, |block, llslot, _| {
self.store_operand_direct(block, llslot, tr_elem); self.store_operand_direct(block, llslot, tr_elem);
block block
@ -123,15 +124,39 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// Do not generate stores and GEPis for zero-sized fields. // Do not generate stores and GEPis for zero-sized fields.
if !common::type_is_zero_size(bcx.ccx(), op.ty) { if !common::type_is_zero_size(bcx.ccx(), op.ty) {
let val = adt::MaybeSizedValue::sized(dest.llval); let val = adt::MaybeSizedValue::sized(dest.llval);
let lldest_i = bcx.with_block(|bcx| { let lldest_i = adt::trans_field_ptr_builder(&bcx, &repr,
adt::trans_field_ptr(bcx, &repr, val, disr, i) val, disr, i);
});
self.store_operand(&bcx, lldest_i, op); self.store_operand(&bcx, lldest_i, op);
self.set_operand_dropped(&bcx, operand);
} }
self.set_operand_dropped(&bcx, operand);
} }
}, },
_ => { _ => {
// FIXME Shouldn't need to manually trigger closure instantiations.
if let mir::AggregateKind::Closure(def_id, substs) = *kind {
use rustc_front::hir;
use syntax::ast::DUMMY_NODE_ID;
use syntax::codemap::DUMMY_SP;
use syntax::ptr::P;
use trans::closure;
closure::trans_closure_expr(closure::Dest::Ignore(bcx.ccx()),
&hir::FnDecl {
inputs: P::new(),
output: hir::NoReturn(DUMMY_SP),
variadic: false
},
&hir::Block {
stmts: P::new(),
expr: None,
id: DUMMY_NODE_ID,
rules: hir::DefaultBlock,
span: DUMMY_SP
},
DUMMY_NODE_ID, def_id,
&bcx.monomorphize(substs));
}
for (i, operand) in operands.iter().enumerate() { for (i, operand) in operands.iter().enumerate() {
let op = self.trans_operand(&bcx, operand); let op = self.trans_operand(&bcx, operand);
// Do not generate stores and GEPis for zero-sized fields. // Do not generate stores and GEPis for zero-sized fields.
@ -141,8 +166,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// not be structs but arrays. // not be structs but arrays.
let dest = bcx.gepi(dest.llval, &[0, i]); let dest = bcx.gepi(dest.llval, &[0, i]);
self.store_operand(&bcx, dest, op); self.store_operand(&bcx, dest, op);
self.set_operand_dropped(&bcx, operand);
} }
self.set_operand_dropped(&bcx, operand);
} }
} }
} }
@ -152,26 +177,42 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Rvalue::Slice { ref input, from_start, from_end } => { mir::Rvalue::Slice { ref input, from_start, from_end } => {
let ccx = bcx.ccx(); let ccx = bcx.ccx();
let input = self.trans_lvalue(&bcx, input); let input = self.trans_lvalue(&bcx, input);
let (llbase, lllen) = bcx.with_block(|bcx| { let ty = input.ty.to_ty(bcx.tcx());
tvec::get_base_and_len(bcx, let (llbase1, lllen) = match ty.sty {
input.llval, ty::TyArray(_, n) => {
input.ty.to_ty(bcx.tcx())) (bcx.gepi(input.llval, &[0, from_start]), C_uint(ccx, n))
}); }
let llbase1 = bcx.gepi(llbase, &[from_start]); ty::TySlice(_) | ty::TyStr => {
let adj = common::C_uint(ccx, from_start + from_end); (bcx.gepi(input.llval, &[from_start]), input.llextra)
}
_ => unreachable!("cannot slice {}", ty)
};
let adj = C_uint(ccx, from_start + from_end);
let lllen1 = bcx.sub(lllen, adj); let lllen1 = bcx.sub(lllen, adj);
let (lladdrdest, llmetadest) = bcx.with_block(|bcx| { bcx.store(llbase1, get_dataptr(&bcx, dest.llval));
(expr::get_dataptr(bcx, dest.llval), expr::get_meta(bcx, dest.llval)) bcx.store(lllen1, get_meta(&bcx, dest.llval));
});
bcx.store(llbase1, lladdrdest);
bcx.store(lllen1, llmetadest);
bcx bcx
} }
mir::Rvalue::InlineAsm(ref inline_asm) => { mir::Rvalue::InlineAsm { ref asm, ref outputs, ref inputs } => {
bcx.map_block(|bcx| { let outputs = outputs.iter().map(|output| {
asm::trans_inline_asm(bcx, inline_asm) let lvalue = self.trans_lvalue(&bcx, output);
}) Datum::new(lvalue.llval, lvalue.ty.to_ty(bcx.tcx()),
Lvalue::new("out"))
}).collect();
let input_vals = inputs.iter().map(|input| {
self.trans_operand(&bcx, input).immediate()
}).collect();
bcx.with_block(|bcx| {
asm::trans_inline_asm(bcx, asm, outputs, input_vals);
});
for input in inputs {
self.set_operand_dropped(&bcx, input);
}
bcx
} }
_ => { _ => {
@ -191,9 +232,9 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue); assert!(rvalue_creates_operand(rvalue), "cannot trans {:?} to operand", rvalue);
match *rvalue { match *rvalue {
mir::Rvalue::Cast(ref kind, ref operand, cast_ty) => { mir::Rvalue::Cast(ref kind, ref source, cast_ty) => {
let operand = self.trans_operand(&bcx, operand); let operand = self.trans_operand(&bcx, source);
debug!("cast operand is {}", operand.repr(&bcx)); debug!("cast operand is {:?}", operand);
let cast_ty = bcx.monomorphize(&cast_ty); let cast_ty = bcx.monomorphize(&cast_ty);
let val = match *kind { let val = match *kind {
@ -201,7 +242,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
match operand.ty.sty { match operand.ty.sty {
ty::TyFnDef(def_id, substs, _) => { ty::TyFnDef(def_id, substs, _) => {
OperandValue::Immediate( OperandValue::Immediate(
Callee::def(bcx.ccx(), def_id, substs, operand.ty) Callee::def(bcx.ccx(), def_id, substs)
.reify(bcx.ccx()).val) .reify(bcx.ccx()).val)
} }
_ => { _ => {
@ -225,6 +266,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
// example, // example,
// &'a fmt::Debug+Send => &'a fmt::Debug, // &'a fmt::Debug+Send => &'a fmt::Debug,
// and is a no-op at the LLVM level // and is a no-op at the LLVM level
self.set_operand_dropped(&bcx, source);
operand.val operand.val
} }
OperandValue::Immediate(lldata) => { OperandValue::Immediate(lldata) => {
@ -233,12 +275,13 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
base::unsize_thin_ptr(bcx, lldata, base::unsize_thin_ptr(bcx, lldata,
operand.ty, cast_ty) operand.ty, cast_ty)
}); });
self.set_operand_dropped(&bcx, source);
OperandValue::FatPtr(lldata, llextra) OperandValue::FatPtr(lldata, llextra)
} }
OperandValue::Ref(_) => { OperandValue::Ref(_) => {
bcx.sess().bug( bcx.sess().bug(
&format!("by-ref operand {} in trans_rvalue_operand", &format!("by-ref operand {:?} in trans_rvalue_operand",
operand.repr(&bcx))); operand));
} }
} }
} }
@ -246,17 +289,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty)); debug_assert!(common::type_is_immediate(bcx.ccx(), cast_ty));
let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast"); let r_t_in = CastTy::from_ty(operand.ty).expect("bad input type for cast");
let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast"); let r_t_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
let ll_t_in = type_of::arg_type_of(bcx.ccx(), operand.ty); let ll_t_in = type_of::immediate_type_of(bcx.ccx(), operand.ty);
let ll_t_out = type_of::arg_type_of(bcx.ccx(), cast_ty); let ll_t_out = type_of::immediate_type_of(bcx.ccx(), cast_ty);
let (llval, ll_t_in, signed) = if let CastTy::Int(IntTy::CEnum) = r_t_in { let llval = operand.immediate();
let signed = if let CastTy::Int(IntTy::CEnum) = r_t_in {
let repr = adt::represent_type(bcx.ccx(), operand.ty); let repr = adt::represent_type(bcx.ccx(), operand.ty);
let llval = operand.immediate(); adt::is_discr_signed(&repr)
let discr = bcx.with_block(|bcx| {
adt::trans_get_discr(bcx, &repr, llval, None, true)
});
(discr, common::val_ty(discr), adt::is_discr_signed(&repr))
} else { } else {
(operand.immediate(), ll_t_in, operand.ty.is_signed()) operand.ty.is_signed()
}; };
let newval = match (r_t_in, r_t_out) { let newval = match (r_t_in, r_t_out) {
@ -308,8 +348,8 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
OperandValue::Immediate(newval) OperandValue::Immediate(newval)
} }
mir::CastKind::Misc => { // Casts from a fat-ptr. mir::CastKind::Misc => { // Casts from a fat-ptr.
let ll_cast_ty = type_of::arg_type_of(bcx.ccx(), cast_ty); let ll_cast_ty = type_of::immediate_type_of(bcx.ccx(), cast_ty);
let ll_from_ty = type_of::arg_type_of(bcx.ccx(), operand.ty); let ll_from_ty = type_of::immediate_type_of(bcx.ccx(), operand.ty);
if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val { if let OperandValue::FatPtr(data_ptr, meta_ptr) = operand.val {
if common::type_is_fat_ptr(bcx.tcx(), cast_ty) { if common::type_is_fat_ptr(bcx.tcx(), cast_ty) {
let ll_cft = ll_cast_ty.field_types(); let ll_cft = ll_cast_ty.field_types();
@ -423,7 +463,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
let llty = type_of::type_of(bcx.ccx(), content_ty); let llty = type_of::type_of(bcx.ccx(), content_ty);
let llsize = machine::llsize_of(bcx.ccx(), llty); let llsize = machine::llsize_of(bcx.ccx(), llty);
let align = type_of::align_of(bcx.ccx(), content_ty); let align = type_of::align_of(bcx.ccx(), content_ty);
let llalign = common::C_uint(bcx.ccx(), align); let llalign = C_uint(bcx.ccx(), align);
let llty_ptr = llty.ptr_to(); let llty_ptr = llty.ptr_to();
let box_ty = bcx.tcx().mk_box(content_ty); let box_ty = bcx.tcx().mk_box(content_ty);
let mut llval = None; let mut llval = None;
@ -448,7 +488,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
mir::Rvalue::Repeat(..) | mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) | mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } | mir::Rvalue::Slice { .. } |
mir::Rvalue::InlineAsm(..) => { mir::Rvalue::InlineAsm { .. } => {
bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue)); bcx.tcx().sess.bug(&format!("cannot generate operand from rvalue {:?}", rvalue));
} }
} }
@ -511,15 +551,14 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
if use_fmod { if use_fmod {
let f64t = Type::f64(bcx.ccx()); let f64t = Type::f64(bcx.ccx());
let fty = Type::func(&[f64t, f64t], &f64t); let fty = Type::func(&[f64t, f64t], &f64t);
let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty, let llfn = declare::declare_cfn(bcx.ccx(), "fmod", fty);
tcx.types.f64);
if input_ty == tcx.types.f32 { if input_ty == tcx.types.f32 {
let lllhs = bcx.fpext(lhs, f64t); let lllhs = bcx.fpext(lhs, f64t);
let llrhs = bcx.fpext(rhs, f64t); let llrhs = bcx.fpext(rhs, f64t);
let llres = bcx.call(llfn, &[lllhs, llrhs], None, None); let llres = bcx.call(llfn, &[lllhs, llrhs], None);
bcx.fptrunc(llres, Type::f32(bcx.ccx())) bcx.fptrunc(llres, Type::f32(bcx.ccx()))
} else { } else {
bcx.call(llfn, &[lhs, rhs], None, None) bcx.call(llfn, &[lhs, rhs], None)
} }
} else { } else {
bcx.frem(lhs, rhs) bcx.frem(lhs, rhs)
@ -573,7 +612,7 @@ pub fn rvalue_creates_operand<'tcx>(rvalue: &mir::Rvalue<'tcx>) -> bool {
mir::Rvalue::Repeat(..) | mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) | mir::Rvalue::Aggregate(..) |
mir::Rvalue::Slice { .. } | mir::Rvalue::Slice { .. } |
mir::Rvalue::InlineAsm(..) => mir::Rvalue::InlineAsm { .. } =>
false, false,
} }

View file

@ -19,6 +19,7 @@ pub use self::disr::Disr;
#[macro_use] #[macro_use]
mod macros; mod macros;
mod abi;
mod adt; mod adt;
mod asm; mod asm;
mod assert_dep_graph; mod assert_dep_graph;
@ -27,7 +28,6 @@ mod base;
mod basic_block; mod basic_block;
mod build; mod build;
mod builder; mod builder;
mod cabi;
mod cabi_aarch64; mod cabi_aarch64;
mod cabi_arm; mod cabi_arm;
mod cabi_asmjs; mod cabi_asmjs;
@ -49,11 +49,9 @@ mod debuginfo;
mod declare; mod declare;
mod disr; mod disr;
mod expr; mod expr;
mod foreign;
mod glue; mod glue;
mod inline; mod inline;
mod intrinsic; mod intrinsic;
mod llrepr;
mod machine; mod machine;
mod _match; mod _match;
mod meth; mod meth;

View file

@ -17,38 +17,35 @@ use middle::subst;
use middle::subst::{Subst, Substs}; use middle::subst::{Subst, Substs};
use middle::ty::fold::{TypeFolder, TypeFoldable}; use middle::ty::fold::{TypeFolder, TypeFoldable};
use trans::attributes; use trans::attributes;
use trans::base::{trans_enum_variant, push_ctxt, get_item_val}; use trans::base::{push_ctxt};
use trans::base::trans_fn; use trans::base::trans_fn;
use trans::base; use trans::base;
use trans::common::*; use trans::common::*;
use trans::declare; use trans::declare;
use trans::foreign;
use middle::ty::{self, Ty, TyCtxt}; use middle::ty::{self, Ty, TyCtxt};
use trans::Disr; use trans::Disr;
use rustc::front::map as hir_map; use rustc::front::map as hir_map;
use rustc::util::ppaux;
use rustc_front::hir; use rustc_front::hir;
use syntax::abi::Abi;
use syntax::ast;
use syntax::attr; use syntax::attr;
use syntax::errors; use syntax::errors;
use std::fmt;
use std::hash::{Hasher, Hash, SipHasher}; use std::hash::{Hasher, Hash, SipHasher};
pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
fn_id: DefId, fn_id: DefId,
psubsts: &'tcx subst::Substs<'tcx>) psubsts: &'tcx subst::Substs<'tcx>)
-> (ValueRef, Ty<'tcx>, bool) { -> (ValueRef, Ty<'tcx>) {
debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts); debug!("monomorphic_fn(fn_id={:?}, real_substs={:?})", fn_id, psubsts);
assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types()); assert!(!psubsts.types.needs_infer() && !psubsts.types.has_param_types());
// we can only monomorphize things in this crate (or inlined into it)
let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap();
let _icx = push_ctxt("monomorphic_fn"); let _icx = push_ctxt("monomorphic_fn");
let hash_id = MonoId { let instance = Instance {
def: fn_id, def: fn_id,
params: &psubsts.types params: &psubsts.types
}; };
@ -59,41 +56,15 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty); let mono_ty = apply_param_substs(ccx.tcx(), psubsts, &item_ty);
debug!("mono_ty = {:?} (post-substitution)", mono_ty); debug!("mono_ty = {:?} (post-substitution)", mono_ty);
match ccx.monomorphized().borrow().get(&hash_id) { match ccx.instances().borrow().get(&instance) {
Some(&val) => { Some(&val) => {
debug!("leaving monomorphic fn {}", debug!("leaving monomorphic fn {:?}", instance);
ccx.tcx().item_path_str(fn_id)); return (val, mono_ty);
return (val, mono_ty, false);
} }
None => () None => ()
} }
debug!("monomorphic_fn(\ debug!("monomorphic_fn({:?})", instance);
fn_id={:?}, \
psubsts={:?}, \
hash_id={:?})",
fn_id,
psubsts,
hash_id);
let map_node = errors::expect(
ccx.sess().diagnostic(),
ccx.tcx().map.find(fn_node_id),
|| {
format!("while monomorphizing {:?}, couldn't find it in \
the item map (may have attempted to monomorphize \
an item defined in a different crate?)",
fn_id)
});
if let hir_map::NodeForeignItem(_) = map_node {
let abi = ccx.tcx().map.get_foreign_abi(fn_node_id);
if abi != Abi::RustIntrinsic && abi != Abi::PlatformIntrinsic {
// Foreign externs don't have to be monomorphized.
return (get_item_val(ccx, fn_node_id), mono_ty, true);
}
}
ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1); ccx.stats().n_monos.set(ccx.stats().n_monos.get() + 1);
@ -110,8 +81,13 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
// recursively more than thirty times can probably safely be assumed // recursively more than thirty times can probably safely be assumed
// to be causing an infinite expansion. // to be causing an infinite expansion.
if depth > ccx.sess().recursion_limit.get() { if depth > ccx.sess().recursion_limit.get() {
ccx.sess().span_fatal(ccx.tcx().map.span(fn_node_id), let error = format!("reached the recursion limit while instantiating `{}`",
"reached the recursion limit during monomorphization"); instance);
if let Some(id) = ccx.tcx().map.as_local_node_id(fn_id) {
ccx.sess().span_fatal(ccx.tcx().map.span(id), &error);
} else {
ccx.sess().fatal(&error);
}
} }
monomorphizing.insert(fn_id, depth + 1); monomorphizing.insert(fn_id, depth + 1);
@ -120,173 +96,112 @@ pub fn monomorphic_fn<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
let hash; let hash;
let s = { let s = {
let mut state = SipHasher::new(); let mut state = SipHasher::new();
hash_id.hash(&mut state); instance.hash(&mut state);
mono_ty.hash(&mut state); mono_ty.hash(&mut state);
hash = format!("h{}", state.finish()); hash = format!("h{}", state.finish());
let path = ccx.tcx().map.def_path_from_id(fn_node_id); let path = ccx.tcx().map.def_path(fn_id);
exported_name(path, &hash[..]) exported_name(path, &hash[..])
}; };
debug!("monomorphize_fn mangled to {}", s); debug!("monomorphize_fn mangled to {}", s);
assert!(declare::get_defined_value(ccx, &s).is_none());
// This shouldn't need to option dance. // FIXME(nagisa): perhaps needs a more fine grained selection?
let mut hash_id = Some(hash_id); let lldecl = declare::define_internal_fn(ccx, &s, mono_ty);
let mut mk_lldecl = |abi: Abi| { // FIXME(eddyb) Doubt all extern fn should allow unwinding.
let lldecl = if abi != Abi::Rust { attributes::unwind(lldecl, true);
foreign::decl_rust_fn_with_foreign_abi(ccx, mono_ty, &s)
} else {
// FIXME(nagisa): perhaps needs a more fine grained selection? See
// setup_lldecl below.
declare::define_internal_rust_fn(ccx, &s, mono_ty)
};
ccx.monomorphized().borrow_mut().insert(hash_id.take().unwrap(), lldecl); ccx.instances().borrow_mut().insert(instance, lldecl);
lldecl
};
let setup_lldecl = |lldecl, attrs: &[ast::Attribute]| {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
let is_first = !ccx.available_monomorphizations().borrow().contains(&s); // we can only monomorphize things in this crate (or inlined into it)
if is_first { let fn_node_id = ccx.tcx().map.as_local_node_id(fn_id).unwrap();
ccx.available_monomorphizations().borrow_mut().insert(s.clone()); let map_node = errors::expect(
} ccx.sess().diagnostic(),
ccx.tcx().map.find(fn_node_id),
|| {
format!("while instantiating `{}`, couldn't find it in \
the item map (may have attempted to monomorphize \
an item defined in a different crate?)",
instance)
});
match map_node {
hir_map::NodeItem(&hir::Item {
ref attrs, node: hir::ItemFn(ref decl, _, _, _, _, ref body), ..
}) |
hir_map::NodeTraitItem(&hir::TraitItem {
ref attrs, node: hir::MethodTraitItem(
hir::MethodSig { ref decl, .. }, Some(ref body)), ..
}) |
hir_map::NodeImplItem(&hir::ImplItem {
ref attrs, node: hir::ImplItemKind::Method(
hir::MethodSig { ref decl, .. }, ref body), ..
}) => {
base::update_linkage(ccx, lldecl, None, base::OriginalTranslation);
attributes::from_fn_attrs(ccx, attrs, lldecl);
let trans_everywhere = attr::requests_inline(attrs); let is_first = !ccx.available_monomorphizations().borrow().contains(&s);
if trans_everywhere && !is_first { if is_first {
llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage); ccx.available_monomorphizations().borrow_mut().insert(s.clone());
} }
// If `true`, then `lldecl` should be given a function body. let trans_everywhere = attr::requests_inline(attrs);
// Otherwise, it should be left as a declaration of an external if trans_everywhere && !is_first {
// function, with no definition in the current compilation unit. llvm::SetLinkage(lldecl, llvm::AvailableExternallyLinkage);
trans_everywhere || is_first }
};
let lldecl = match map_node { if trans_everywhere || is_first {
hir_map::NodeItem(i) => { trans_fn(ccx, decl, body, lldecl, psubsts, fn_node_id);
match *i {
hir::Item {
node: hir::ItemFn(ref decl, _, _, abi, _, ref body),
..
} => {
let d = mk_lldecl(abi);
let needs_body = setup_lldecl(d, &i.attrs);
if needs_body {
if abi != Abi::Rust {
foreign::trans_rust_fn_with_foreign_abi(
ccx, &decl, &body, &[], d, psubsts, fn_node_id,
Some(&hash[..]));
} else {
trans_fn(ccx,
&decl,
&body,
d,
psubsts,
fn_node_id,
&i.attrs);
}
}
d
}
_ => {
ccx.sess().bug("Can't monomorphize this kind of item")
}
} }
} }
hir_map::NodeVariant(v) => {
let variant = inlined_variant_def(ccx, fn_node_id); hir_map::NodeVariant(_) | hir_map::NodeStructCtor(_) => {
assert_eq!(v.node.name, variant.name); let disr = match map_node {
let d = mk_lldecl(Abi::Rust); hir_map::NodeVariant(_) => {
attributes::inline(d, attributes::InlineAttr::Hint); Disr::from(inlined_variant_def(ccx, fn_node_id).disr_val)
trans_enum_variant(ccx, fn_node_id, Disr::from(variant.disr_val), psubsts, d);
d
}
hir_map::NodeImplItem(impl_item) => {
match impl_item.node {
hir::ImplItemKind::Method(ref sig, ref body) => {
let d = mk_lldecl(Abi::Rust);
let needs_body = setup_lldecl(d, &impl_item.attrs);
if needs_body {
trans_fn(ccx,
&sig.decl,
body,
d,
psubsts,
impl_item.id,
&impl_item.attrs);
}
d
} }
_ => { hir_map::NodeStructCtor(_) => Disr(0),
ccx.sess().bug(&format!("can't monomorphize a {:?}", _ => unreachable!()
map_node)) };
} attributes::inline(lldecl, attributes::InlineAttr::Hint);
} base::trans_ctor_shim(ccx, fn_node_id, disr, psubsts, lldecl);
}
hir_map::NodeTraitItem(trait_item) => {
match trait_item.node {
hir::MethodTraitItem(ref sig, Some(ref body)) => {
let d = mk_lldecl(Abi::Rust);
let needs_body = setup_lldecl(d, &trait_item.attrs);
if needs_body {
trans_fn(ccx,
&sig.decl,
body,
d,
psubsts,
trait_item.id,
&trait_item.attrs);
}
d
}
_ => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}
}
hir_map::NodeStructCtor(struct_def) => {
let d = mk_lldecl(Abi::Rust);
attributes::inline(d, attributes::InlineAttr::Hint);
if struct_def.is_struct() {
panic!("ast-mapped struct didn't have a ctor id")
}
base::trans_tuple_struct(ccx,
struct_def.id(),
psubsts,
d);
d
} }
// Ugh -- but this ensures any new variants won't be forgotten _ => unreachable!("can't monomorphize a {:?}", map_node)
hir_map::NodeForeignItem(..) |
hir_map::NodeLifetime(..) |
hir_map::NodeTyParam(..) |
hir_map::NodeExpr(..) |
hir_map::NodeStmt(..) |
hir_map::NodeBlock(..) |
hir_map::NodePat(..) |
hir_map::NodeLocal(..) => {
ccx.sess().bug(&format!("can't monomorphize a {:?}",
map_node))
}
}; };
ccx.monomorphizing().borrow_mut().insert(fn_id, depth); ccx.monomorphizing().borrow_mut().insert(fn_id, depth);
debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id)); debug!("leaving monomorphic fn {}", ccx.tcx().item_path_str(fn_id));
(lldecl, mono_ty, true) (lldecl, mono_ty)
} }
#[derive(PartialEq, Eq, Hash, Debug)] #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
pub struct MonoId<'tcx> { pub struct Instance<'tcx> {
pub def: DefId, pub def: DefId,
pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>> pub params: &'tcx subst::VecPerParamSpace<Ty<'tcx>>
} }
impl<'tcx> fmt::Display for Instance<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let substs = Substs {
types: self.params.clone(),
regions: subst::ErasedRegions
};
ppaux::parameterized(f, &substs, self.def, ppaux::Ns::Value, &[],
|tcx| tcx.lookup_item_type(self.def).generics)
}
}
impl<'tcx> Instance<'tcx> {
pub fn mono(tcx: &TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
Instance {
def: def_id,
params: &tcx.mk_substs(Substs::trans_empty()).types
}
}
}
/// Monomorphizes a type from the AST by first applying the in-scope /// Monomorphizes a type from the AST by first applying the in-scope
/// substitutions and then normalizing any associated types. /// substitutions and then normalizing any associated types.
pub fn apply_param_substs<'tcx,T>(tcx: &TyCtxt<'tcx>, pub fn apply_param_substs<'tcx,T>(tcx: &TyCtxt<'tcx>,

View file

@ -26,6 +26,7 @@ use trans::expr;
use trans::machine::llsize_of_alloc; use trans::machine::llsize_of_alloc;
use trans::type_::Type; use trans::type_::Type;
use trans::type_of; use trans::type_of;
use trans::value::Value;
use middle::ty::{self, Ty}; use middle::ty::{self, Ty};
use rustc_front::hir; use rustc_front::hir;
@ -33,20 +34,12 @@ use rustc_front::hir;
use syntax::ast; use syntax::ast;
use syntax::parse::token::InternedString; use syntax::parse::token::InternedString;
#[derive(Copy, Clone)] #[derive(Copy, Clone, Debug)]
struct VecTypes<'tcx> { struct VecTypes<'tcx> {
unit_ty: Ty<'tcx>, unit_ty: Ty<'tcx>,
llunit_ty: Type llunit_ty: Type
} }
impl<'tcx> VecTypes<'tcx> {
pub fn to_string<'a>(&self, ccx: &CrateContext<'a, 'tcx>) -> String {
format!("VecTypes {{unit_ty={}, llunit_ty={}}}",
self.unit_ty,
ccx.tn().type_to_string(self.llunit_ty))
}
}
pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>, pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
expr: &hir::Expr, expr: &hir::Expr,
dest: expr::Dest) dest: expr::Dest)
@ -58,8 +51,7 @@ pub fn trans_fixed_vstore<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// to store the array of the suitable size, so all we have to do is // to store the array of the suitable size, so all we have to do is
// generate the content. // generate the content.
debug!("trans_fixed_vstore(expr={:?}, dest={})", debug!("trans_fixed_vstore(expr={:?}, dest={:?})", expr, dest);
expr, dest.to_string(bcx.ccx()));
let vt = vec_types_from_expr(bcx, expr); let vt = vec_types_from_expr(bcx, expr);
@ -82,7 +74,6 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
content_expr: &hir::Expr) content_expr: &hir::Expr)
-> DatumBlock<'blk, 'tcx, Expr> { -> DatumBlock<'blk, 'tcx, Expr> {
let fcx = bcx.fcx; let fcx = bcx.fcx;
let ccx = fcx.ccx;
let mut bcx = bcx; let mut bcx = bcx;
debug!("trans_slice_vec(slice_expr={:?})", debug!("trans_slice_vec(slice_expr={:?})",
@ -105,7 +96,7 @@ pub fn trans_slice_vec<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
// Handle the &[...] case: // Handle the &[...] case:
let vt = vec_types_from_expr(bcx, content_expr); let vt = vec_types_from_expr(bcx, content_expr);
let count = elements_required(bcx, content_expr); let count = elements_required(bcx, content_expr);
debug!(" vt={}, count={}", vt.to_string(ccx), count); debug!(" vt={:?}, count={}", vt, count);
let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count); let fixed_ty = bcx.tcx().mk_array(vt.unit_ty, count);
@ -144,9 +135,7 @@ pub fn trans_lit_str<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
str_lit: InternedString, str_lit: InternedString,
dest: Dest) dest: Dest)
-> Block<'blk, 'tcx> { -> Block<'blk, 'tcx> {
debug!("trans_lit_str(lit_expr={:?}, dest={})", debug!("trans_lit_str(lit_expr={:?}, dest={:?})", lit_expr, dest);
lit_expr,
dest.to_string(bcx.ccx()));
match dest { match dest {
Ignore => bcx, Ignore => bcx,
@ -172,10 +161,8 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let fcx = bcx.fcx; let fcx = bcx.fcx;
let mut bcx = bcx; let mut bcx = bcx;
debug!("write_content(vt={}, dest={}, vstore_expr={:?})", debug!("write_content(vt={:?}, dest={:?}, vstore_expr={:?})",
vt.to_string(bcx.ccx()), vt, dest, vstore_expr);
dest.to_string(bcx.ccx()),
vstore_expr);
match content_expr.node { match content_expr.node {
hir::ExprLit(ref lit) => { hir::ExprLit(ref lit) => {
@ -187,11 +174,9 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let bytes = s.len(); let bytes = s.len();
let llbytes = C_uint(bcx.ccx(), bytes); let llbytes = C_uint(bcx.ccx(), bytes);
let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false); let llcstr = C_cstr(bcx.ccx(), (*s).clone(), false);
base::call_memcpy(bcx, if !bcx.unreachable.get() {
lldest, base::call_memcpy(&B(bcx), lldest, llcstr, llbytes, 1);
llcstr, }
llbytes,
1);
return bcx; return bcx;
} }
} }
@ -214,8 +199,8 @@ fn write_content<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
let temp_scope = fcx.push_custom_cleanup_scope(); let temp_scope = fcx.push_custom_cleanup_scope();
for (i, element) in elements.iter().enumerate() { for (i, element) in elements.iter().enumerate() {
let lleltptr = GEPi(bcx, lldest, &[i]); let lleltptr = GEPi(bcx, lldest, &[i]);
debug!("writing index {} with lleltptr={}", debug!("writing index {} with lleltptr={:?}",
i, bcx.val_to_string(lleltptr)); i, Value(lleltptr));
bcx = expr::trans_into(bcx, &element, bcx = expr::trans_into(bcx, &element,
SaveIn(lleltptr)); SaveIn(lleltptr));
let scope = cleanup::CustomScope(temp_scope); let scope = cleanup::CustomScope(temp_scope);

View file

@ -11,7 +11,7 @@
#![allow(non_upper_case_globals)] #![allow(non_upper_case_globals)]
use llvm; use llvm;
use llvm::{TypeRef, Bool, False, True, TypeKind, ValueRef}; use llvm::{TypeRef, Bool, False, True, TypeKind};
use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128}; use llvm::{Float, Double, X86_FP80, PPC_FP128, FP128};
use trans::context::CrateContext; use trans::context::CrateContext;
@ -20,18 +20,27 @@ use util::nodemap::FnvHashMap;
use syntax::ast; use syntax::ast;
use std::ffi::CString; use std::ffi::CString;
use std::fmt;
use std::mem; use std::mem;
use std::ptr; use std::ptr;
use std::cell::RefCell; use std::cell::RefCell;
use libc::c_uint; use libc::c_uint;
#[derive(Clone, Copy, PartialEq, Debug)] #[derive(Clone, Copy, PartialEq)]
#[repr(C)] #[repr(C)]
pub struct Type { pub struct Type {
rf: TypeRef rf: TypeRef
} }
impl fmt::Debug for Type {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&llvm::build_string(|s| unsafe {
llvm::LLVMWriteTypeToString(self.to_ref(), s);
}).expect("non-UTF8 type description from LLVM"))
}
}
macro_rules! ty { macro_rules! ty {
($e:expr) => ( Type::from_ref(unsafe { $e })) ($e:expr) => ( Type::from_ref(unsafe { $e }))
} }
@ -50,12 +59,6 @@ impl Type {
self.rf self.rf
} }
pub fn to_string(self: Type) -> String {
llvm::build_string(|s| unsafe {
llvm::LLVMWriteTypeToString(self.to_ref(), s);
}).expect("non-UTF8 type description from LLVM")
}
pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] { pub fn to_ref_slice(slice: &[Type]) -> &[TypeRef] {
unsafe { mem::transmute(slice) } unsafe { mem::transmute(slice) }
} }
@ -180,10 +183,6 @@ impl Type {
Type::struct_(ccx, &[], false) Type::struct_(ccx, &[], false)
} }
pub fn glue_fn(ccx: &CrateContext, t: Type) -> Type {
Type::func(&[t], &Type::void(ccx))
}
pub fn array(ty: &Type, len: u64) -> Type { pub fn array(ty: &Type, len: u64) -> Type {
ty!(llvm::LLVMRustArrayType(ty.to_ref(), len)) ty!(llvm::LLVMRustArrayType(ty.to_ref(), len))
} }
@ -203,7 +202,7 @@ impl Type {
} }
pub fn vtable_ptr(ccx: &CrateContext) -> Type { pub fn vtable_ptr(ccx: &CrateContext) -> Type {
Type::glue_fn(ccx, Type::i8p(ccx)).ptr_to().ptr_to() Type::func(&[Type::i8p(ccx)], &Type::void(ccx)).ptr_to().ptr_to()
} }
pub fn kind(&self) -> TypeKind { pub fn kind(&self) -> TypeKind {
@ -301,7 +300,6 @@ impl Type {
} }
} }
/* Memory-managed object interface to type handles. */ /* Memory-managed object interface to type handles. */
pub struct TypeNames { pub struct TypeNames {
@ -323,19 +321,4 @@ impl TypeNames {
pub fn find_type(&self, s: &str) -> Option<Type> { pub fn find_type(&self, s: &str) -> Option<Type> {
self.named_types.borrow().get(s).map(|x| Type::from_ref(*x)) self.named_types.borrow().get(s).map(|x| Type::from_ref(*x))
} }
pub fn type_to_string(&self, ty: Type) -> String {
ty.to_string()
}
pub fn types_to_str(&self, tys: &[Type]) -> String {
let strs: Vec<String> = tys.iter().map(|t| self.type_to_string(*t)).collect();
format!("[{}]", strs.join(","))
}
pub fn val_to_string(&self, val: ValueRef) -> String {
llvm::build_string(|s| unsafe {
llvm::LLVMWriteValueToString(val, s);
}).expect("nun-UTF8 value description from LLVM")
}
} }

View file

@ -13,15 +13,14 @@
use middle::def_id::DefId; use middle::def_id::DefId;
use middle::infer; use middle::infer;
use middle::subst; use middle::subst;
use trans::abi::FnType;
use trans::adt; use trans::adt;
use trans::common::*; use trans::common::*;
use trans::foreign;
use trans::machine; use trans::machine;
use middle::ty::{self, Ty, TypeFoldable}; use middle::ty::{self, Ty, TypeFoldable};
use trans::type_::Type; use trans::type_::Type;
use syntax::abi::Abi;
use syntax::ast; use syntax::ast;
// LLVM doesn't like objects that are too big. Issue #17913 // LLVM doesn't like objects that are too big. Issue #17913
@ -36,120 +35,6 @@ fn ensure_array_fits_in_address_space<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
} }
} }
pub fn arg_is_indirect<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
arg_ty: Ty<'tcx>) -> bool {
!type_is_immediate(ccx, arg_ty) && !type_is_fat_ptr(ccx.tcx(), arg_ty)
}
pub fn return_uses_outptr<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
ty: Ty<'tcx>) -> bool {
arg_is_indirect(ccx, ty)
}
pub fn type_of_explicit_arg<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
arg_ty: Ty<'tcx>) -> Type {
let llty = arg_type_of(ccx, arg_ty);
if arg_is_indirect(ccx, arg_ty) {
llty.ptr_to()
} else {
llty
}
}
/// Yields the types of the "real" arguments for a function using the `RustCall`
/// ABI by untupling the arguments of the function.
pub fn untuple_arguments<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>,
inputs: &[Ty<'tcx>])
-> Vec<Ty<'tcx>> {
if inputs.is_empty() {
return Vec::new()
}
let mut result = Vec::new();
for (i, &arg_prior_to_tuple) in inputs.iter().enumerate() {
if i < inputs.len() - 1 {
result.push(arg_prior_to_tuple);
}
}
match inputs[inputs.len() - 1].sty {
ty::TyTuple(ref tupled_arguments) => {
debug!("untuple_arguments(): untupling arguments");
for &tupled_argument in tupled_arguments {
result.push(tupled_argument);
}
}
_ => {
ccx.tcx().sess.bug("argument to function with \"rust-call\" ABI \
is neither a tuple nor unit")
}
}
result
}
pub fn type_of_rust_fn<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>,
llenvironment_type: Option<Type>,
sig: &ty::FnSig<'tcx>,
abi: Abi)
-> Type
{
debug!("type_of_rust_fn(sig={:?},abi={:?})",
sig,
abi);
assert!(!sig.variadic); // rust fns are never variadic
let mut atys: Vec<Type> = Vec::new();
// First, munge the inputs, if this has the `rust-call` ABI.
let inputs_temp;
let inputs = if abi == Abi::RustCall {
inputs_temp = untuple_arguments(cx, &sig.inputs);
&inputs_temp
} else {
&sig.inputs
};
// Arg 0: Output pointer.
// (if the output type is non-immediate)
let lloutputtype = match sig.output {
ty::FnConverging(output) => {
let use_out_pointer = return_uses_outptr(cx, output);
let lloutputtype = arg_type_of(cx, output);
// Use the output as the actual return value if it's immediate.
if use_out_pointer {
atys.push(lloutputtype.ptr_to());
Type::void(cx)
} else if return_type_is_void(cx, output) {
Type::void(cx)
} else {
lloutputtype
}
}
ty::FnDiverging => Type::void(cx)
};
// Arg 1: Environment
match llenvironment_type {
None => {}
Some(llenvironment_type) => atys.push(llenvironment_type),
}
// ... then explicit args.
for input in inputs {
let arg_ty = type_of_explicit_arg(cx, input);
if type_is_fat_ptr(cx.tcx(), input) {
atys.extend(arg_ty.field_types());
} else {
atys.push(arg_ty);
}
}
Type::func(&atys[..], &lloutputtype)
}
// A "sizing type" is an LLVM type, the size and alignment of which are // A "sizing type" is an LLVM type, the size and alignment of which are
// guaranteed to be equivalent to what you would get out of `type_of()`. It's // guaranteed to be equivalent to what you would get out of `type_of()`. It's
// useful because: // useful because:
@ -171,7 +56,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
let llsizingty = match t.sty { let llsizingty = match t.sty {
_ if !type_is_sized(cx.tcx(), t) => { _ if !type_is_sized(cx.tcx(), t) => {
Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, t)], false)
} }
ty::TyBool => Type::bool(cx), ty::TyBool => Type::bool(cx),
@ -186,7 +71,7 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
if type_is_sized(cx.tcx(), ty) { if type_is_sized(cx.tcx(), ty) {
Type::i8p(cx) Type::i8p(cx)
} else { } else {
Type::struct_(cx, &[Type::i8p(cx), Type::i8p(cx)], false) Type::struct_(cx, &[Type::i8p(cx), unsized_info_ty(cx, ty)], false)
} }
} }
@ -234,32 +119,27 @@ pub fn sizing_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Typ
ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => unreachable!() ty::TySlice(_) | ty::TyTrait(..) | ty::TyStr => unreachable!()
}; };
debug!("--> mapped t={:?} to llsizingty={}", debug!("--> mapped t={:?} to llsizingty={:?}", t, llsizingty);
t,
cx.tn().type_to_string(llsizingty));
cx.llsizingtypes().borrow_mut().insert(t, llsizingty); cx.llsizingtypes().borrow_mut().insert(t, llsizingty);
llsizingty llsizingty
} }
pub fn foreign_arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { fn unsized_info_ty<'a, 'tcx>(ccx: &CrateContext<'a, 'tcx>, ty: Ty<'tcx>) -> Type {
if t.is_bool() { let unsized_part = ccx.tcx().struct_tail(ty);
Type::i1(cx) match unsized_part.sty {
} else { ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
type_of(cx, t) Type::uint_from_ty(ccx, ast::UintTy::Us)
}
ty::TyTrait(_) => Type::vtable_ptr(ccx),
_ => unreachable!("Unexpected tail in unsized_info_ty: {:?} for ty={:?}",
unsized_part, ty)
} }
} }
pub fn arg_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type { pub fn immediate_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) -> Type {
if t.is_bool() { if t.is_bool() {
Type::i1(cx) Type::i1(cx)
} else if type_is_immediate(cx, t) && type_of(cx, t).is_aggregate() {
// We want to pass small aggregates as immediate values, but using an aggregate LLVM type
// for this leads to bad optimizations, so its arg type is an appropriately sized integer
match machine::llsize_of_alloc(cx, sizing_type_of(cx, t)) {
0 => type_of(cx, t),
n => Type::ix(cx, n * 8),
}
} else { } else {
type_of(cx, t) type_of(cx, t)
} }
@ -314,12 +194,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
if t != t_norm { if t != t_norm {
let llty = in_memory_type_of(cx, t_norm); let llty = in_memory_type_of(cx, t_norm);
debug!("--> normalized {:?} {:?} to {:?} {:?} llty={}", debug!("--> normalized {:?} to {:?} llty={:?}", t, t_norm, llty);
t,
t,
t_norm,
t_norm,
cx.tn().type_to_string(llty));
cx.lltypes().borrow_mut().insert(t, llty); cx.lltypes().borrow_mut().insert(t, llty);
return llty; return llty;
} }
@ -361,16 +236,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
cx.tn().find_type("str_slice").unwrap() cx.tn().find_type("str_slice").unwrap()
} else { } else {
let ptr_ty = in_memory_type_of(cx, ty).ptr_to(); let ptr_ty = in_memory_type_of(cx, ty).ptr_to();
let unsized_part = cx.tcx().struct_tail(ty); let info_ty = unsized_info_ty(cx, ty);
let info_ty = match unsized_part.sty {
ty::TyStr | ty::TyArray(..) | ty::TySlice(_) => {
Type::uint_from_ty(cx, ast::UintTy::Us)
}
ty::TyTrait(_) => Type::vtable_ptr(cx),
_ => panic!("Unexpected type returned from \
struct_tail: {:?} for ty={:?}",
unsized_part, ty)
};
Type::struct_(cx, &[ptr_ty, info_ty], false) Type::struct_(cx, &[ptr_ty, info_ty], false)
} }
} else { } else {
@ -398,13 +264,9 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
ty::TyFnDef(..) => Type::nil(cx), ty::TyFnDef(..) => Type::nil(cx),
ty::TyFnPtr(f) => { ty::TyFnPtr(f) => {
if f.abi == Abi::Rust || f.abi == Abi::RustCall { let sig = cx.tcx().erase_late_bound_regions(&f.sig);
let sig = cx.tcx().erase_late_bound_regions(&f.sig); let sig = infer::normalize_associated_type(cx.tcx(), &sig);
let sig = infer::normalize_associated_type(cx.tcx(), &sig); FnType::new(cx, f.abi, &sig, &[]).llvm_type(cx).ptr_to()
type_of_rust_fn(cx, None, &sig, f.abi).ptr_to()
} else {
foreign::lltype_for_foreign_fn(cx, t).ptr_to()
}
} }
ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx), ty::TyTuple(ref tys) if tys.is_empty() => Type::nil(cx),
ty::TyTuple(..) => { ty::TyTuple(..) => {
@ -440,9 +302,7 @@ pub fn in_memory_type_of<'a, 'tcx>(cx: &CrateContext<'a, 'tcx>, t: Ty<'tcx>) ->
ty::TyError => cx.sess().bug("type_of with TyError"), ty::TyError => cx.sess().bug("type_of with TyError"),
}; };
debug!("--> mapped t={:?} to llty={}", debug!("--> mapped t={:?} to llty={:?}", t, llty);
t,
cx.tn().type_to_string(llty));
cx.lltypes().borrow_mut().insert(t, llty); cx.lltypes().borrow_mut().insert(t, llty);

View file

@ -12,11 +12,22 @@ use llvm;
use llvm::{UseRef, ValueRef}; use llvm::{UseRef, ValueRef};
use trans::basic_block::BasicBlock; use trans::basic_block::BasicBlock;
use trans::common::Block; use trans::common::Block;
use std::fmt;
use libc::c_uint; use libc::c_uint;
#[derive(Copy, Clone)] #[derive(Copy, Clone, PartialEq)]
pub struct Value(pub ValueRef); pub struct Value(pub ValueRef);
impl fmt::Debug for Value {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.write_str(&llvm::build_string(|s| unsafe {
llvm::LLVMWriteValueToString(self.0, s);
}).expect("nun-UTF8 value description from LLVM"))
}
}
macro_rules! opt_val { ($e:expr) => ( macro_rules! opt_val { ($e:expr) => (
unsafe { unsafe {
match $e { match $e {

View file

@ -3416,12 +3416,12 @@ fn check_expr_with_expectation_and_lvalue_pref<'a, 'tcx>(fcx: &FnCtxt<'a, 'tcx>,
fcx.add_wf_bounds(&item_substs.substs, expr); fcx.add_wf_bounds(&item_substs.substs, expr);
}); });
} }
hir::ExprInlineAsm(ref ia) => { hir::ExprInlineAsm(_, ref outputs, ref inputs) => {
for &(_, ref input) in &ia.inputs { for output in outputs {
check_expr(fcx, &input); check_expr(fcx, output);
} }
for out in &ia.outputs { for input in inputs {
check_expr(fcx, &out.expr); check_expr(fcx, input);
} }
fcx.write_nil(id); fcx.write_nil(id);
} }

View file

@ -2155,7 +2155,7 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>(
let input_tys = decl.inputs let input_tys = decl.inputs
.iter() .iter()
.map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None)) .map(|a| ty_of_arg(&ccx.icx(ast_generics), &rb, a, None))
.collect(); .collect::<Vec<_>>();
let output = match decl.output { let output = match decl.output {
hir::Return(ref ty) => hir::Return(ref ty) =>
@ -2166,6 +2166,29 @@ fn compute_type_scheme_of_foreign_fn_decl<'a, 'tcx>(
ty::FnDiverging ty::FnDiverging
}; };
// feature gate SIMD types in FFI, since I (huonw) am not sure the
// ABIs are handled at all correctly.
if abi != abi::Abi::RustIntrinsic && abi != abi::Abi::PlatformIntrinsic
&& !ccx.tcx.sess.features.borrow().simd_ffi {
let check = |ast_ty: &hir::Ty, ty: ty::Ty| {
if ty.is_simd() {
ccx.tcx.sess.struct_span_err(ast_ty.span,
&format!("use of SIMD type `{}` in FFI is highly experimental and \
may result in invalid code",
pprust::ty_to_string(ast_ty)))
.fileline_help(ast_ty.span,
"add #![feature(simd_ffi)] to the crate attributes to enable")
.emit();
}
};
for (input, ty) in decl.inputs.iter().zip(&input_tys) {
check(&input.ty, ty)
}
if let hir::Return(ref ty) = decl.output {
check(&ty, output.unwrap())
}
}
let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics)); let substs = ccx.tcx.mk_substs(mk_item_substs(ccx, &ty_generics));
let t_fn = ccx.tcx.mk_fn_def(id, substs, ty::BareFnTy { let t_fn = ccx.tcx.mk_fn_def(id, substs, ty::BareFnTy {
abi: abi, abi: abi,

Some files were not shown because too many files have changed in this diff Show more