rust/src/comp/middle/trans.rs

9281 lines
358 KiB
Rust
Raw Normal View History

2011-06-01 11:34:52 -07:00
// trans.rs: Translate the completed AST to the LLVM IR.
//
// Some functions here, such as trans_block and trans_expr, return a value --
// the result of the translation to LLVM -- while others, such as trans_fn,
// trans_obj, and trans_item, are called only for the side effect of adding a
// particular definition to the LLVM IR output we're producing.
2011-06-01 16:33:03 -07:00
//
// Hopefully useful general knowledge about trans:
//
2011-06-01 16:33:03 -07:00
// * There's no way to find out the ty::t type of a ValueRef. Doing so
// would be "trying to get the eggs out of an omelette" (credit:
// pcwalton). You can, instead, find out its TypeRef by calling val_ty,
// but many TypeRefs correspond to one ty::t; for instance, tup(int, int,
// int) and rec(x=int, y=int, z=int) will have the same TypeRef.
import std::int;
import std::str;
import std::uint;
import std::vec;
import std::str::rustrt::sbuf;
import std::vec::rustrt::vbuf;
import std::map;
import std::map::hashmap;
import std::option;
import std::option::some;
import std::option::none;
import std::fs;
import syntax::ast;
import syntax::walk;
import driver::session;
import middle::ty;
import back::link;
import back::x86;
import back::abi;
import back::upcall;
import middle::ty::pat_ty;
import syntax::visit;
import visit::vt;
import util::common;
import util::common::new_def_hash;
import std::map::new_int_hash;
import std::map::new_str_hash;
import util::common::local_rhs_span;
import syntax::codemap::span;
import lib::llvm::llvm;
import lib::llvm::builder;
import lib::llvm::target_data;
import lib::llvm::type_handle;
import lib::llvm::type_names;
import lib::llvm::mk_target_data;
import lib::llvm::mk_type_handle;
import lib::llvm::mk_type_names;
import lib::llvm::llvm::ModuleRef;
import lib::llvm::llvm::ValueRef;
import lib::llvm::llvm::TypeRef;
import lib::llvm::llvm::TypeHandleRef;
import lib::llvm::llvm::BuilderRef;
import lib::llvm::llvm::BasicBlockRef;
import lib::llvm::False;
import lib::llvm::True;
import lib::llvm::Bool;
import link::mangle_internal_name_by_type_only;
import link::mangle_internal_name_by_seq;
import link::mangle_internal_name_by_path;
import link::mangle_internal_name_by_path_and_seq;
import link::mangle_exported_name;
import metadata::tyencode;
import metadata::creader;
import metadata::decoder;
import util::ppaux::ty_to_str;
import util::ppaux::ty_to_short_str;
import syntax::print::pprust::expr_to_str;
import syntax::print::pprust::path_to_str;
obj namegen(mutable int i) {
fn next(str prefix) -> str { i += 1; ret prefix + int::str(i); }
}
type derived_tydesc_info = rec(ValueRef lltydesc, bool escapes);
type glue_fns = rec(ValueRef no_op_type_glue);
type tydesc_info =
rec(ty::t ty,
ValueRef tydesc,
ValueRef size,
ValueRef align,
2011-06-28 16:52:29 -07:00
mutable option::t[ValueRef] copy_glue,
mutable option::t[ValueRef] drop_glue,
mutable option::t[ValueRef] free_glue,
mutable option::t[ValueRef] cmp_glue,
vec[uint] ty_params);
/*
* A note on nomenclature of linking: "upcall", "extern" and "native".
*
* An "extern" is an LLVM symbol we wind up emitting an undefined external
* reference to. This means "we don't have the thing in this compilation unit,
* please make sure you link it in at runtime". This could be a reference to
* C code found in a C library, or rust code found in a rust crate.
*
* A "native" is an extern that references C code. Called with cdecl.
*
* An upcall is a native call generated by the compiler (not corresponding to
* any user-written call in the code) into librustrt, to perform some helper
* task such as bringing a task to life, allocating memory, etc.
*
*/
type stats =
rec(mutable uint n_static_tydescs,
mutable uint n_derived_tydescs,
mutable uint n_glues_created,
mutable uint n_null_glues,
mutable uint n_real_glues);
2011-06-16 16:55:46 -07:00
// Crate context. Every crate we compile has one of these.
type crate_ctxt =
rec(session::session sess,
ModuleRef llmod,
target_data td,
type_names tn,
hashmap[str, ValueRef] externs,
hashmap[str, ValueRef] intrinsics,
// A mapping from the def_id of each item in this crate to the address
// of the first instruction of the item's definition in the executable
// we're generating.
hashmap[ast::node_id, ValueRef] item_ids,
ast_map::map ast_map,
hashmap[ast::node_id, str] item_symbols,
mutable option::t[ValueRef] main_fn,
link::link_meta link_meta,
// TODO: hashmap[tup(tag_id,subtys), @tag_info]
hashmap[ty::t, uint] tag_sizes,
hashmap[ast::node_id, ValueRef] discrims,
hashmap[ast::node_id, str] discrim_symbols,
hashmap[ast::node_id, ValueRef] fn_pairs,
hashmap[ast::node_id, ValueRef] consts,
hashmap[ast::node_id, ()] obj_methods,
hashmap[ty::t, @tydesc_info] tydescs,
hashmap[str, ValueRef] module_data,
hashmap[ty::t, TypeRef] lltypes,
@glue_fns glues,
namegen names,
std::sha1::sha1 sha,
hashmap[ty::t, str] type_sha1s,
hashmap[ty::t, tyencode::ty_abbrev] type_abbrevs,
hashmap[ty::t, str] type_short_names,
ty::ctxt tcx,
stats stats,
@upcall::upcalls upcalls);
type local_ctxt =
rec(vec[str] path,
vec[str] module_path,
vec[ast::ty_param] obj_typarams,
vec[ast::obj_field] obj_fields,
@crate_ctxt ccx);
2011-06-01 11:34:52 -07:00
// Types used for llself.
type val_self_pair = rec(ValueRef v, ty::t t);
type ty_self_pair = tup(TypeRef, ty::t);
// Function context. Every LLVM function we create will have one of these.
type fn_ctxt =
rec(
// The ValueRef returned from a call to llvm::LLVMAddFunction; the
// address of the first instruction in the sequence of instructions
// for this function that will go in the .text section of the
// executable we're generating.
ValueRef llfn,
// The three implicit arguments that arrive in the function we're
// creating. For instance, foo(int, int) is really foo(ret*, task*,
// env*, int, int). These are also available via
// llvm::LLVMGetParam(llfn, uint) where uint = 1, 2, 0 respectively,
// but we unpack them into these fields for convenience.
2011-06-29 17:29:24 -07:00
// Points to the current task.
ValueRef lltaskptr,
2011-06-29 17:29:24 -07:00
// Points to the current environment (bindings of variables to
// values), if this is a regular function; points to the current
// object, if this is a method.
ValueRef llenv,
2011-06-29 17:29:24 -07:00
// Points to where the return value of this function should end up.
ValueRef llretptr,
// The next three elements: "hoisted basic blocks" containing
// administrative activities that have to happen in only one place in
// the function, due to LLVM's quirks.
// A block for all the function's static allocas, so that LLVM will
// coalesce them into a single alloca call.
mutable BasicBlockRef llstaticallocas,
// A block containing code that copies incoming arguments to space
// already allocated by code in one of the llallocas blocks. (LLVM
// requires that arguments be copied to local allocas before allowing
// most any operation to be performed on them.)
mutable BasicBlockRef llcopyargs,
// The first block containing derived tydescs received from the
// runtime. See description of derived_tydescs, below.
mutable BasicBlockRef llderivedtydescs_first,
// The last block of the llderivedtydescs group.
mutable BasicBlockRef llderivedtydescs,
// A block for all of the dynamically sized allocas. This must be
// after llderivedtydescs, because these sometimes depend on
// information computed from derived tydescs.
mutable BasicBlockRef lldynamicallocas,
// FIXME: Is llcopyargs actually the block containing the allocas for
// incoming function arguments? Or is it merely the block containing
// code that copies incoming args to space already alloca'd by code in
// llallocas?
// The 'self' object currently in use in this function, if there is
// one.
mutable option::t[val_self_pair] llself,
// If this function is actually a iter, a block containing the code
// called whenever the iter calls 'put'.
mutable option::t[ValueRef] lliterbody,
// The next four items: hash tables mapping from AST def_ids to
// LLVM-stuff-in-the-frame.
// Maps arguments to allocas created for them in llallocas.
hashmap[ast::node_id, ValueRef] llargs,
// Maps fields in objects to pointers into the interior of llself's
// body.
hashmap[ast::node_id, ValueRef] llobjfields,
// Maps the def_ids for local variables to the allocas created for
// them in llallocas.
hashmap[ast::node_id, ValueRef] lllocals,
// The same as above, but for variables accessed via the frame pointer
// we pass into an iter, for access to the static environment of the
// iter-calling frame.
hashmap[ast::node_id, ValueRef] llupvars,
// For convenience, a vector of the incoming tydescs for each of this
// functions type parameters, fetched via llvm::LLVMGetParam. For
// example, for a function foo[A, B, C](), lltydescs contains the
// ValueRefs for the tydescs for A, B, and C.
mutable vec[ValueRef] lltydescs,
// Derived tydescs are tydescs created at runtime, for types that
// involve type parameters inside type constructors. For example,
// suppose a function parameterized by T creates a vector of type
// vec[T]. The function doesn't know what T is until runtime, and the
// function's caller knows T but doesn't know that a vector is
// involved. So a tydesc for vec[T] can't be created until runtime,
// when information about both "vec" and "T" are available. When such
// a tydesc is created, we cache it in the derived_tydescs table for
// the next time that such a tydesc is needed.
hashmap[ty::t, derived_tydesc_info] derived_tydescs,
// The source span where this function comes from, for error
// reporting.
span sp,
// This function's enclosing local context.
@local_ctxt lcx);
tag cleanup { clean(fn(&@block_ctxt) -> result ); }
tag block_kind {
// A scope block is a basic block created by translating a block { ... }
// the the source language. Since these blocks create variable scope, any
// variables created in them that are still live at the end of the block
// must be dropped and cleaned up when the block ends.
SCOPE_BLOCK;
// A basic block created from the body of a loop. Contains pointers to
// which block to jump to in the case of "continue" or "break", with the
// "continue" block optional, because "while" and "do while" don't support
// "continue" (TODO: is this intentional?)
LOOP_SCOPE_BLOCK(option::t[@block_ctxt], @block_ctxt);
// A non-scope block is a basic block created as a translation artifact
// from translating code that expresses conditional logic rather than by
// explicit { ... } block structure in the source language. It's called a
// non-scope block because it doesn't introduce a new variable scope.
NON_SCOPE_BLOCK;
}
// Basic block context. We create a block context for each basic block
// (single-entry, single-exit sequence of instructions) we generate from Rust
// code. Each basic block we generate is attached to a function, typically
// with many basic blocks per function. All the basic blocks attached to a
// function are organized as a directed graph.
type block_ctxt =
rec(
// The BasicBlockRef returned from a call to
// llvm::LLVMAppendBasicBlock(llfn, name), which adds a basic block to
// the function pointed to by llfn. We insert instructions into that
// block by way of this block context.
BasicBlockRef llbb,
// The llvm::builder object serving as an interface to LLVM's
// LLVMBuild* functions.
builder build,
// The block pointing to this one in the function's digraph.
block_parent parent,
// The 'kind' of basic block this is.
block_kind kind,
// A list of functions that run at the end of translating this block,
// cleaning up any variables that were introduced in the block and
// need to go out of scope at the end of it.
mutable vec[cleanup] cleanups,
// The source span where this block comes from, for error reporting.
span sp,
// The function context for the function to which this block is
// attached.
@fn_ctxt fcx);
// FIXME: we should be able to use option::t[@block_parent] here but
// the infinite-tag check in rustboot gets upset.
tag block_parent { parent_none; parent_some(@block_ctxt); }
type result = rec(@block_ctxt bcx, ValueRef val);
type result_t = rec(@block_ctxt bcx, ValueRef val, ty::t ty);
2011-05-11 04:58:46 +00:00
fn extend_path(@local_ctxt cx, &str name) -> @local_ctxt {
ret @rec(path=cx.path + [name] with *cx);
}
fn rslt(@block_ctxt bcx, ValueRef val) -> result {
ret rec(bcx=bcx, val=val);
}
fn ty_str(type_names tn, TypeRef t) -> str {
ret lib::llvm::type_to_str(tn, t);
}
fn val_ty(ValueRef v) -> TypeRef { ret llvm::LLVMTypeOf(v); }
fn val_str(type_names tn, ValueRef v) -> str { ret ty_str(tn, val_ty(v)); }
// Returns the nth element of the given LLVM structure type.
fn struct_elt(TypeRef llstructty, uint n) -> TypeRef {
auto elt_count = llvm::LLVMCountStructElementTypes(llstructty);
assert (n < elt_count);
auto elt_tys = vec::init_elt(T_nil(), elt_count);
llvm::LLVMGetStructElementTypes(llstructty, vec::buf(elt_tys));
ret llvm::LLVMGetElementType(elt_tys.(n));
}
// LLVM type constructors.
fn T_void() -> TypeRef {
// Note: For the time being llvm is kinda busted here, it has the notion
// of a 'void' type that can only occur as part of the signature of a
// function, but no general unit type of 0-sized value. This is, afaict,
// vestigial from its C heritage, and we'll be attempting to submit a
// patch upstream to fix it. In the mean time we only model function
// outputs (Rust functions and C functions) using T_void, and model the
// Rust general purpose nil type you can construct as 1-bit (always
// zero). This makes the result incorrect for now -- things like a tuple
// of 10 nil values will have 10-bit size -- but it doesn't seem like we
// have any other options until it's fixed upstream.
ret llvm::LLVMVoidType();
}
fn T_nil() -> TypeRef {
// NB: See above in T_void().
ret llvm::LLVMInt1Type();
}
fn T_i1() -> TypeRef { ret llvm::LLVMInt1Type(); }
fn T_i8() -> TypeRef { ret llvm::LLVMInt8Type(); }
fn T_i16() -> TypeRef { ret llvm::LLVMInt16Type(); }
fn T_i32() -> TypeRef { ret llvm::LLVMInt32Type(); }
fn T_i64() -> TypeRef { ret llvm::LLVMInt64Type(); }
fn T_f32() -> TypeRef { ret llvm::LLVMFloatType(); }
fn T_f64() -> TypeRef { ret llvm::LLVMDoubleType(); }
fn T_bool() -> TypeRef { ret T_i1(); }
fn T_int() -> TypeRef {
// FIXME: switch on target type.
ret T_i32();
}
fn T_float() -> TypeRef {
// FIXME: switch on target type.
ret T_f64();
}
fn T_char() -> TypeRef { ret T_i32(); }
fn T_size_t() -> TypeRef {
// FIXME: switch on target type.
ret T_i32();
}
fn T_fn(vec[TypeRef] inputs, TypeRef output) -> TypeRef {
ret llvm::LLVMFunctionType(output, vec::buf[TypeRef](inputs),
vec::len[TypeRef](inputs), False);
}
2011-05-11 04:58:46 +00:00
fn T_fn_pair(&type_names tn, TypeRef tfn) -> TypeRef {
ret T_struct([T_ptr(tfn), T_opaque_closure_ptr(tn)]);
}
fn T_ptr(TypeRef t) -> TypeRef { ret llvm::LLVMPointerType(t, 0u); }
2011-05-11 04:58:46 +00:00
fn T_struct(&vec[TypeRef] elts) -> TypeRef {
ret llvm::LLVMStructType(vec::buf[TypeRef](elts), vec::len[TypeRef](elts),
False);
}
fn T_opaque() -> TypeRef { ret llvm::LLVMOpaqueType(); }
2011-05-11 04:58:46 +00:00
fn T_task(&type_names tn) -> TypeRef {
auto s = "task";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t =
T_struct([T_int(), // Refcount
T_int(), // Delegate pointer
T_int(), // Stack segment pointer
T_int(), // Runtime SP
T_int(), // Rust SP
T_int(), // GC chain
2011-06-16 16:55:46 -07:00
T_int(), // Domain pointer
// Crate cache pointer
T_int()]);
tn.associate(s, t);
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_tydesc_field(&type_names tn, int field) -> TypeRef {
// Bit of a kludge: pick the fn typeref out of the tydesc..
let vec[TypeRef] tydesc_elts =
vec::init_elt[TypeRef](T_nil(), abi::n_tydesc_fields as uint);
llvm::LLVMGetStructElementTypes(T_tydesc(tn),
vec::buf[TypeRef](tydesc_elts));
auto t = llvm::LLVMGetElementType(tydesc_elts.(field));
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_glue_fn(&type_names tn) -> TypeRef {
auto s = "glue_fn";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_tydesc_field(tn, abi::tydesc_field_drop_glue);
tn.associate(s, t);
ret t;
}
fn T_dtor(&@crate_ctxt ccx, &span sp, TypeRef llself_ty) -> TypeRef {
ret type_of_fn_full(ccx, sp, ast::proto_fn, some[TypeRef](llself_ty),
~[], ty::mk_nil(ccx.tcx), 0u);
}
2011-05-11 04:58:46 +00:00
fn T_cmp_glue_fn(&type_names tn) -> TypeRef {
auto s = "cmp_glue_fn";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_tydesc_field(tn, abi::tydesc_field_cmp_glue);
tn.associate(s, t);
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_tydesc(&type_names tn) -> TypeRef {
auto s = "tydesc";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto th = mk_type_handle();
auto abs_tydesc = llvm::LLVMResolveTypeHandle(th.llth);
auto tydescpp = T_ptr(T_ptr(abs_tydesc));
auto pvoid = T_ptr(T_i8());
auto glue_fn_ty =
T_ptr(T_fn([T_ptr(T_nil()), T_taskptr(tn), T_ptr(T_nil()), tydescpp,
pvoid], T_void()));
auto cmp_glue_fn_ty =
T_ptr(T_fn([T_ptr(T_i1()), T_taskptr(tn), T_ptr(T_nil()), tydescpp,
pvoid, pvoid, T_i8()], T_void()));
auto tydesc =
T_struct([tydescpp, // first_param
T_int(), // size
T_int(), // align
2011-06-28 16:52:29 -07:00
glue_fn_ty, // copy_glue
glue_fn_ty, // drop_glue
glue_fn_ty, // free_glue
glue_fn_ty, // sever_glue
glue_fn_ty, // mark_glue
glue_fn_ty, // obj_drop_glue
glue_fn_ty, // is_stateful
cmp_glue_fn_ty]); // cmp_glue
llvm::LLVMRefineType(abs_tydesc, tydesc);
auto t = llvm::LLVMResolveTypeHandle(th.llth);
tn.associate(s, t);
ret t;
}
fn T_array(TypeRef t, uint n) -> TypeRef { ret llvm::LLVMArrayType(t, n); }
fn T_vec(TypeRef t) -> TypeRef {
ret T_struct([T_int(), // Refcount
T_int(), // Alloc
T_int(), // Fill
2011-06-16 16:55:46 -07:00
T_int(), // Pad
// Body elements
T_array(t, 0u)]);
}
fn T_opaque_vec_ptr() -> TypeRef { ret T_ptr(T_vec(T_int())); }
// Interior vector.
//
// TODO: Support user-defined vector sizes.
fn T_ivec(TypeRef t) -> TypeRef {
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(t, abi::ivec_default_length)]); // Body elements
}
// Note that the size of this one is in bytes.
fn T_opaque_ivec() -> TypeRef {
ret T_struct([T_int(), // Length ("fill"; if zero, heapified)
T_int(), // Alloc
T_array(T_i8(), 0u)]); // Body elements
}
fn T_ivec_heap_part(TypeRef t) -> TypeRef {
ret T_struct([T_int(), // Real length
T_array(t, 0u)]); // Body elements
}
// Interior vector on the heap, also known as the "stub". Cast to this when
// the allocated length (second element of T_ivec above) is zero.
fn T_ivec_heap(TypeRef t) -> TypeRef {
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_ivec_heap_part(t))]); // Pointer
}
fn T_opaque_ivec_heap_part() -> TypeRef {
ret T_struct([T_int(), // Real length
T_array(T_i8(), 0u)]); // Body elements
}
fn T_opaque_ivec_heap() -> TypeRef {
ret T_struct([T_int(), // Length (zero)
T_int(), // Alloc
T_ptr(T_opaque_ivec_heap_part())]); // Pointer
}
fn T_str() -> TypeRef { ret T_vec(T_i8()); }
fn T_box(TypeRef t) -> TypeRef { ret T_struct([T_int(), t]); }
2011-03-16 21:49:15 -04:00
fn T_port(TypeRef t) -> TypeRef {
ret T_struct([T_int()]); // Refcount
2011-03-16 21:49:15 -04:00
}
fn T_chan(TypeRef t) -> TypeRef {
ret T_struct([T_int()]); // Refcount
2011-03-16 21:49:15 -04:00
}
fn T_taskptr(&type_names tn) -> TypeRef { ret T_ptr(T_task(tn)); }
// This type must never be used directly; it must always be cast away.
2011-05-11 04:58:46 +00:00
fn T_typaram(&type_names tn) -> TypeRef {
auto s = "typaram";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_i8();
tn.associate(s, t);
ret t;
}
fn T_typaram_ptr(&type_names tn) -> TypeRef { ret T_ptr(T_typaram(tn)); }
fn T_closure_ptr(&type_names tn, TypeRef lltarget_ty, TypeRef llbindings_ty,
uint n_ty_params) -> TypeRef {
// NB: keep this in sync with code in trans_bind; we're making
// an LLVM typeref structure that has the same "shape" as the ty::t
// it constructs.
ret T_ptr(T_box(T_struct([T_ptr(T_tydesc(tn)), lltarget_ty, llbindings_ty,
T_captured_tydescs(tn, n_ty_params)])));
}
2011-05-11 04:58:46 +00:00
fn T_opaque_closure_ptr(&type_names tn) -> TypeRef {
auto s = "*closure";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t =
T_closure_ptr(tn, T_struct([T_ptr(T_nil()), T_ptr(T_nil())]), T_nil(),
0u);
tn.associate(s, t);
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_tag(&type_names tn, uint size) -> TypeRef {
auto s = "tag_" + uint::to_str(size, 10u);
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_struct([T_int(), T_array(T_i8(), size)]);
tn.associate(s, t);
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_opaque_tag(&type_names tn) -> TypeRef {
auto s = "opaque_tag";
if (tn.name_has_type(s)) { ret tn.get_type(s); }
auto t = T_struct([T_int(), T_i8()]);
tn.associate(s, t);
ret t;
}
2011-05-11 04:58:46 +00:00
fn T_opaque_tag_ptr(&type_names tn) -> TypeRef {
ret T_ptr(T_opaque_tag(tn));
}
2011-05-11 04:58:46 +00:00
fn T_captured_tydescs(&type_names tn, uint n) -> TypeRef {
ret T_struct(vec::init_elt[TypeRef](T_ptr(T_tydesc(tn)), n));
}
2011-05-11 04:58:46 +00:00
fn T_obj_ptr(&type_names tn, uint n_captured_tydescs) -> TypeRef {
// This function is not publicly exposed because it returns an incomplete
// type. The dynamically-sized fields follow the captured tydescs.
fn T_obj(type_names tn, uint n_captured_tydescs) -> TypeRef {
ret T_struct([T_ptr(T_tydesc(tn)),
T_captured_tydescs(tn, n_captured_tydescs)]);
}
ret T_ptr(T_box(T_obj(tn, n_captured_tydescs)));
}
fn T_opaque_obj_ptr(&type_names tn) -> TypeRef { ret T_obj_ptr(tn, 0u); }
fn T_opaque_port_ptr() -> TypeRef { ret T_ptr(T_i8()); }
fn T_opaque_chan_ptr() -> TypeRef { ret T_ptr(T_i8()); }
// This function now fails if called on a type with dynamic size (as its
// return value was always meaningless in that case anyhow). Beware!
//
// TODO: Enforce via a predicate.
fn type_of(&@crate_ctxt cx, &span sp, &ty::t t) -> TypeRef {
if (ty::type_has_dynamic_size(cx.tcx, t)) {
cx.sess.span_fatal(sp,
"type_of() called on a type with dynamic size: " +
ty_to_str(cx.tcx, t));
}
ret type_of_inner(cx, sp, t);
}
fn type_of_explicit_args(&@crate_ctxt cx, &span sp, &ty::arg[] inputs) ->
vec[TypeRef] {
let vec[TypeRef] atys = [];
for (ty::arg arg in inputs) {
if (ty::type_has_dynamic_size(cx.tcx, arg.ty)) {
assert (arg.mode != ty::mo_val);
atys += [T_typaram_ptr(cx.tn)];
} else {
let TypeRef t;
alt (arg.mode) {
case (ty::mo_alias(_)) {
t = T_ptr(type_of_inner(cx, sp, arg.ty));
}
case (_) { t = type_of_inner(cx, sp, arg.ty); }
}
atys += [t];
}
}
ret atys;
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn type_of_fn_full(&@crate_ctxt cx, &span sp, ast::proto proto,
&option::t[TypeRef] obj_self, &ty::arg[] inputs,
&ty::t output, uint ty_param_count) -> TypeRef {
let vec[TypeRef] atys = [];
2011-06-28 18:54:05 -07:00
// Arg 0: Output pointer.
if (ty::type_has_dynamic_size(cx.tcx, output)) {
atys += [T_typaram_ptr(cx.tn)];
} else { atys += [T_ptr(type_of_inner(cx, sp, output))]; }
2011-06-28 18:54:05 -07:00
// Arg 1: task pointer.
atys += [T_taskptr(cx.tn)];
2011-06-28 18:54:05 -07:00
// Arg 2: Env (closure-bindings / self-obj)
alt (obj_self) {
case (some(?t)) { assert (t as int != 0); atys += [t]; }
case (_) { atys += [T_opaque_closure_ptr(cx.tn)]; }
}
2011-06-28 18:54:05 -07:00
// Args >3: ty params, if not acquired via capture...
if (obj_self == none[TypeRef]) {
auto i = 0u;
while (i < ty_param_count) {
atys += [T_ptr(T_tydesc(cx.tn))];
i += 1u;
}
}
if (proto == ast::proto_iter) {
2011-02-17 12:20:55 -08:00
// If it's an iter, the 'output' type of the iter is actually the
// *input* type of the function we're given as our iter-block
// argument.
atys +=
[T_fn_pair(cx.tn,
type_of_fn_full(cx, sp, ast::proto_fn, none[TypeRef],
~[rec(mode=ty::mo_alias(false),
ty=output)], ty::mk_nil(cx.tcx),
0u))];
2011-02-17 12:20:55 -08:00
}
2011-06-28 18:54:05 -07:00
// ... then explicit args.
atys += type_of_explicit_args(cx, sp, inputs);
ret T_fn(atys, llvm::LLVMVoidType());
}
fn type_of_fn(&@crate_ctxt cx, &span sp, ast::proto proto,
&ty::arg[] inputs, &ty::t output, uint ty_param_count) ->
TypeRef {
ret type_of_fn_full(cx, sp, proto, none[TypeRef], inputs, output,
ty_param_count);
}
fn type_of_native_fn(&@crate_ctxt cx, &span sp, ast::native_abi abi,
&ty::arg[] inputs, &ty::t output, uint ty_param_count)
-> TypeRef {
let vec[TypeRef] atys = [];
if (abi == ast::native_abi_rust) {
atys += [T_taskptr(cx.tn)];
auto i = 0u;
while (i < ty_param_count) {
atys += [T_ptr(T_tydesc(cx.tn))];
i += 1u;
}
}
atys += type_of_explicit_args(cx, sp, inputs);
ret T_fn(atys, type_of_inner(cx, sp, output));
}
fn type_of_inner(&@crate_ctxt cx, &span sp, &ty::t t) -> TypeRef {
2011-04-19 16:40:46 -07:00
// Check the cache.
if (cx.lltypes.contains_key(t)) { ret cx.lltypes.get(t); }
let TypeRef llty = 0 as TypeRef;
alt (ty::struct(cx.tcx, t)) {
case (ty::ty_native(_)) { llty = T_ptr(T_i8()); }
case (ty::ty_nil) { llty = T_nil(); }
case (ty::ty_bot) {
llty = T_nil(); /* ...I guess? */
}
case (ty::ty_bool) { llty = T_bool(); }
case (ty::ty_int) { llty = T_int(); }
case (ty::ty_float) { llty = T_float(); }
case (ty::ty_uint) { llty = T_int(); }
case (ty::ty_machine(?tm)) {
alt (tm) {
case (ast::ty_i8) { llty = T_i8(); }
case (ast::ty_u8) { llty = T_i8(); }
case (ast::ty_i16) { llty = T_i16(); }
case (ast::ty_u16) { llty = T_i16(); }
case (ast::ty_i32) { llty = T_i32(); }
case (ast::ty_u32) { llty = T_i32(); }
case (ast::ty_i64) { llty = T_i64(); }
case (ast::ty_u64) { llty = T_i64(); }
case (ast::ty_f32) { llty = T_f32(); }
case (ast::ty_f64) { llty = T_f64(); }
}
}
case (ty::ty_char) { llty = T_char(); }
case (ty::ty_str) { llty = T_ptr(T_str()); }
case (ty::ty_istr) { llty = T_ivec(T_i8()); }
case (ty::ty_tag(?did, _)) { llty = type_of_tag(cx, sp, did, t); }
case (ty::ty_box(?mt)) {
llty = T_ptr(T_box(type_of_inner(cx, sp, mt.ty)));
}
case (ty::ty_vec(?mt)) {
llty = T_ptr(T_vec(type_of_inner(cx, sp, mt.ty)));
}
case (ty::ty_ivec(?mt)) {
if (ty::type_has_dynamic_size(cx.tcx, mt.ty)) {
llty = T_opaque_ivec();
} else { llty = T_ivec(type_of_inner(cx, sp, mt.ty)); }
}
case (ty::ty_ptr(?mt)) { llty = T_ptr(type_of_inner(cx, sp, mt.ty)); }
case (ty::ty_port(?t)) {
llty = T_ptr(T_port(type_of_inner(cx, sp, t)));
2011-03-16 21:49:15 -04:00
}
case (ty::ty_chan(?t)) {
llty = T_ptr(T_chan(type_of_inner(cx, sp, t)));
2011-03-16 21:49:15 -04:00
}
case (ty::ty_task) { llty = T_taskptr(cx.tn); }
case (ty::ty_tup(?elts)) {
let vec[TypeRef] tys = [];
for (ty::mt elt in elts) {
tys += [type_of_inner(cx, sp, elt.ty)];
}
llty = T_struct(tys);
}
case (ty::ty_rec(?fields)) {
let vec[TypeRef] tys = [];
for (ty::field f in fields) {
tys += [type_of_inner(cx, sp, f.mt.ty)];
}
llty = T_struct(tys);
}
case (ty::ty_fn(?proto, ?args, ?out, _, _)) {
llty = T_fn_pair(cx.tn, type_of_fn(cx, sp, proto, args, out, 0u));
}
case (ty::ty_native_fn(?abi, ?args, ?out)) {
auto nft = native_fn_wrapper_type(cx, sp, 0u, t);
llty = T_fn_pair(cx.tn, nft);
}
case (ty::ty_obj(?meths)) {
auto th = mk_type_handle();
auto self_ty = llvm::LLVMResolveTypeHandle(th.llth);
let vec[TypeRef] mtys = [T_ptr(T_i8())];
for (ty::method m in meths) {
let TypeRef mty =
type_of_fn_full(cx, sp, m.proto, some[TypeRef](self_ty),
m.inputs, m.output, 0u);
mtys += [T_ptr(mty)];
}
let TypeRef vtbl = T_struct(mtys);
let TypeRef pair =
T_struct([T_ptr(vtbl), T_opaque_obj_ptr(cx.tn)]);
auto abs_pair = llvm::LLVMResolveTypeHandle(th.llth);
llvm::LLVMRefineType(abs_pair, pair);
abs_pair = llvm::LLVMResolveTypeHandle(th.llth);
llty = abs_pair;
}
case (ty::ty_res(_, ?sub, ?tps)) {
auto sub1 = ty::substitute_type_params(cx.tcx, tps, sub);
ret T_struct([T_i32(), type_of_inner(cx, sp, sub1)]);
}
case (ty::ty_var(_)) {
cx.tcx.sess.span_fatal(sp, "trans::type_of called on ty_var");
}
case (ty::ty_param(_)) { llty = T_i8(); }
case (ty::ty_type) { llty = T_ptr(T_tydesc(cx.tn)); }
}
assert (llty as int != 0);
if (cx.sess.get_opts().save_temps) {
llvm::LLVMAddTypeName(cx.llmod, str::buf(ty_to_short_str(cx.tcx, t)),
llty);
}
2011-04-19 16:40:46 -07:00
cx.lltypes.insert(t, llty);
ret llty;
}
fn type_of_tag(&@crate_ctxt cx, &span sp, &ast::def_id did, &ty::t t)
-> TypeRef {
auto degen = vec::len(ty::tag_variants(cx.tcx, did)) == 1u;
if (ty::type_has_dynamic_size(cx.tcx, t)) {
if (degen) { ret T_i8(); }
else { ret T_opaque_tag(cx.tn); }
} else {
auto size = static_size_of_tag(cx, sp, t);
if (!degen) { ret T_tag(cx.tn, size); }
// LLVM does not like 0-size arrays, apparently
if (size == 0u) { size = 1u; }
ret T_array(T_i8(), size);
}
}
fn type_of_arg(@local_ctxt cx, &span sp, &ty::arg arg) -> TypeRef {
alt (ty::struct(cx.ccx.tcx, arg.ty)) {
case (ty::ty_param(_)) {
if (arg.mode != ty::mo_val) { ret T_typaram_ptr(cx.ccx.tn); }
}
case (_) {
// fall through
}
}
auto typ;
if (arg.mode != ty::mo_val) {
typ = T_ptr(type_of_inner(cx.ccx, sp, arg.ty));
} else { typ = type_of_inner(cx.ccx, sp, arg.ty); }
ret typ;
}
fn type_of_ty_param_count_and_ty(@local_ctxt lcx, &span sp,
&ty::ty_param_count_and_ty tpt) -> TypeRef {
alt (ty::struct(lcx.ccx.tcx, tpt._1)) {
case (ty::ty_fn(?proto, ?inputs, ?output, _, _)) {
auto llfnty =
type_of_fn(lcx.ccx, sp, proto, inputs, output, tpt._0);
ret T_fn_pair(lcx.ccx.tn, llfnty);
}
case (_) {
// fall through
}
}
ret type_of(lcx.ccx, sp, tpt._1);
}
fn type_of_or_i8(&@block_ctxt bcx, ty::t typ) -> TypeRef {
if (ty::type_has_dynamic_size(bcx.fcx.lcx.ccx.tcx, typ)) { ret T_i8(); }
ret type_of(bcx.fcx.lcx.ccx, bcx.sp, typ);
}
// Name sanitation. LLVM will happily accept identifiers with weird names, but
// gas doesn't!
2011-05-11 04:58:46 +00:00
fn sanitize(&str s) -> str {
auto result = "";
for (u8 c in s) {
if (c == '@' as u8) {
result += "boxed_";
} else {
if (c == ',' as u8) {
result += "_";
} else {
if (c == '{' as u8 || c == '(' as u8) {
result += "_of_";
} else {
if (c != 10u8 && c != '}' as u8 && c != ')' as u8 &&
c != ' ' as u8 && c != '\t' as u8 &&
c != ';' as u8) {
auto v = [c];
result += str::from_bytes(v);
}
}
}
}
}
ret result;
}
// LLVM constant constructors.
fn C_null(TypeRef t) -> ValueRef { ret llvm::LLVMConstNull(t); }
fn C_integral(TypeRef t, uint u, Bool sign_extend) -> ValueRef {
// FIXME: We can't use LLVM::ULongLong with our existing minimal native
// API, which only knows word-sized args.
//
// ret llvm::LLVMConstInt(T_int(), t as LLVM::ULongLong, False);
//
ret llvm::LLVMRustConstSmallInt(t, u, sign_extend);
}
2011-05-11 04:58:46 +00:00
fn C_float(&str s) -> ValueRef {
ret llvm::LLVMConstRealOfString(T_float(), str::buf(s));
}
2011-05-11 04:58:46 +00:00
fn C_floating(&str s, TypeRef t) -> ValueRef {
ret llvm::LLVMConstRealOfString(t, str::buf(s));
}
fn C_nil() -> ValueRef {
// NB: See comment above in T_void().
ret C_integral(T_i1(), 0u, False);
}
fn C_bool(bool b) -> ValueRef {
if (b) {
ret C_integral(T_bool(), 1u, False);
} else { ret C_integral(T_bool(), 0u, False); }
}
fn C_int(int i) -> ValueRef { ret C_integral(T_int(), i as uint, True); }
fn C_uint(uint i) -> ValueRef { ret C_integral(T_int(), i, False); }
fn C_u8(uint i) -> ValueRef { ret C_integral(T_i8(), i, False); }
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
2011-05-11 04:58:46 +00:00
fn C_cstr(&@crate_ctxt cx, &str s) -> ValueRef {
auto sc = llvm::LLVMConstString(str::buf(s), str::byte_len(s), False);
auto g =
llvm::LLVMAddGlobal(cx.llmod, val_ty(sc),
str::buf(cx.names.next("str")));
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMSetLinkage(g, lib::llvm::LLVMInternalLinkage as llvm::Linkage);
ret g;
}
// A rust boxed-and-length-annotated string.
2011-05-11 04:58:46 +00:00
fn C_str(&@crate_ctxt cx, &str s) -> ValueRef {
auto len = str::byte_len(s);
auto box =
C_struct([C_int(abi::const_refcount as int),
C_int(len + 1u as int), // 'alloc'
C_int(len + 1u as int), // 'fill'
C_int(0), // 'pad'
llvm::LLVMConstString(str::buf(s), len, False)]);
auto g =
llvm::LLVMAddGlobal(cx.llmod, val_ty(box),
str::buf(cx.names.next("str")));
llvm::LLVMSetInitializer(g, box);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMSetLinkage(g, lib::llvm::LLVMInternalLinkage as llvm::Linkage);
ret llvm::LLVMConstPointerCast(g, T_ptr(T_str()));
}
// Returns a Plain Old LLVM String:
fn C_postr(&str s) -> ValueRef {
ret llvm::LLVMConstString(str::buf(s), str::byte_len(s), False);
}
fn C_zero_byte_arr(uint size) -> ValueRef {
auto i = 0u;
let vec[ValueRef] elts = [];
while (i < size) { elts += [C_u8(0u)]; i += 1u; }
ret llvm::LLVMConstArray(T_i8(), vec::buf[ValueRef](elts),
vec::len[ValueRef](elts));
}
2011-05-11 04:58:46 +00:00
fn C_struct(&vec[ValueRef] elts) -> ValueRef {
ret llvm::LLVMConstStruct(vec::buf[ValueRef](elts),
vec::len[ValueRef](elts), False);
}
2011-05-11 04:58:46 +00:00
fn C_array(TypeRef ty, &vec[ValueRef] elts) -> ValueRef {
ret llvm::LLVMConstArray(ty, vec::buf[ValueRef](elts),
vec::len[ValueRef](elts));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
}
2011-05-11 04:58:46 +00:00
fn decl_fn(ModuleRef llmod, &str name, uint cc, TypeRef llty) -> ValueRef {
let ValueRef llfn = llvm::LLVMAddFunction(llmod, str::buf(name), llty);
llvm::LLVMSetFunctionCallConv(llfn, cc);
ret llfn;
}
2011-05-11 04:58:46 +00:00
fn decl_cdecl_fn(ModuleRef llmod, &str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib::llvm::LLVMCCallConv, llty);
}
2011-05-11 04:58:46 +00:00
fn decl_fastcall_fn(ModuleRef llmod, &str name, TypeRef llty) -> ValueRef {
ret decl_fn(llmod, name, lib::llvm::LLVMFastCallConv, llty);
}
// Only use this if you are going to actually define the function. It's
// not valid to simply declare a function as internal.
fn decl_internal_fastcall_fn(ModuleRef llmod, &str name, TypeRef llty) ->
ValueRef {
auto llfn = decl_fn(llmod, name, lib::llvm::LLVMFastCallConv, llty);
llvm::LLVMSetLinkage(llfn,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
ret llfn;
}
2011-05-11 04:58:46 +00:00
fn decl_glue(ModuleRef llmod, type_names tn, &str s) -> ValueRef {
ret decl_cdecl_fn(llmod, s, T_fn([T_taskptr(tn)], T_void()));
}
fn get_extern_fn(&hashmap[str, ValueRef] externs, ModuleRef llmod, &str name,
uint cc, TypeRef ty) -> ValueRef {
if (externs.contains_key(name)) { ret externs.get(name); }
auto f = decl_fn(llmod, name, cc, ty);
externs.insert(name, f);
ret f;
}
fn get_extern_const(&hashmap[str, ValueRef] externs, ModuleRef llmod,
&str name, TypeRef ty) -> ValueRef {
if (externs.contains_key(name)) { ret externs.get(name); }
auto c = llvm::LLVMAddGlobal(llmod, ty, str::buf(name));
externs.insert(name, c);
ret c;
}
fn get_simple_extern_fn(&hashmap[str, ValueRef] externs, ModuleRef llmod,
&str name, int n_args) -> ValueRef {
auto inputs = vec::init_elt[TypeRef](T_int(), n_args as uint);
auto output = T_int();
auto t = T_fn(inputs, output);
ret get_extern_fn(externs, llmod, name, lib::llvm::LLVMCCallConv, t);
}
2011-05-11 04:58:46 +00:00
fn trans_native_call(&builder b, @glue_fns glues, ValueRef lltaskptr,
&hashmap[str, ValueRef] externs, &type_names tn,
ModuleRef llmod, &str name, bool pass_task,
&vec[ValueRef] args) -> ValueRef {
let int n = vec::len[ValueRef](args) as int;
let ValueRef llnative = get_simple_extern_fn(externs, llmod, name, n);
let vec[ValueRef] call_args = [];
for (ValueRef a in args) { call_args += [b.ZExtOrBitCast(a, T_int())]; }
ret b.Call(llnative, call_args);
}
2011-05-11 04:58:46 +00:00
fn trans_non_gc_free(&@block_ctxt cx, ValueRef v) -> result {
cx.build.Call(cx.fcx.lcx.ccx.upcalls.free,
[cx.fcx.lltaskptr, cx.build.PointerCast(v, T_ptr(T_i8())),
C_int(0)]);
ret rslt(cx, C_int(0));
}
2011-05-11 04:58:46 +00:00
fn find_scope_cx(&@block_ctxt cx) -> @block_ctxt {
if (cx.kind != NON_SCOPE_BLOCK) { ret cx; }
alt (cx.parent) {
case (parent_some(?b)) { ret find_scope_cx(b); }
case (parent_none) {
cx.fcx.lcx.ccx.sess.bug("trans::find_scope_cx() " +
"called on parentless block_ctxt");
}
}
}
2011-05-11 04:58:46 +00:00
fn umax(&@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
auto cond = cx.build.ICmp(lib::llvm::LLVMIntULT, a, b);
ret cx.build.Select(cond, b, a);
}
2011-05-11 04:58:46 +00:00
fn umin(&@block_ctxt cx, ValueRef a, ValueRef b) -> ValueRef {
auto cond = cx.build.ICmp(lib::llvm::LLVMIntULT, a, b);
ret cx.build.Select(cond, a, b);
}
2011-05-11 04:58:46 +00:00
fn align_to(&@block_ctxt cx, ValueRef off, ValueRef align) -> ValueRef {
auto mask = cx.build.Sub(align, C_int(1));
auto bumped = cx.build.Add(off, mask);
ret cx.build.And(bumped, cx.build.Not(mask));
}
// Returns the real size of the given type for the current target.
2011-05-11 04:58:46 +00:00
fn llsize_of_real(&@crate_ctxt cx, TypeRef t) -> uint {
ret llvm::LLVMStoreSizeOfType(cx.td.lltd, t);
}
fn llsize_of(TypeRef t) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMSizeOf(t), T_int(),
False);
}
fn llalign_of(TypeRef t) -> ValueRef {
ret llvm::LLVMConstIntCast(lib::llvm::llvm::LLVMAlignOf(t), T_int(),
False);
}
fn size_of(&@block_ctxt cx, &ty::t t) -> result {
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
ret rslt(cx, llsize_of(type_of(cx.fcx.lcx.ccx, cx.sp, t)));
}
ret dynamic_size_of(cx, t);
}
fn align_of(&@block_ctxt cx, &ty::t t) -> result {
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
ret rslt(cx, llalign_of(type_of(cx.fcx.lcx.ccx, cx.sp, t)));
}
ret dynamic_align_of(cx, t);
}
2011-05-11 04:58:46 +00:00
fn alloca(&@block_ctxt cx, TypeRef t) -> ValueRef {
ret new_builder(cx.fcx.llstaticallocas).Alloca(t);
}
2011-05-11 04:58:46 +00:00
fn array_alloca(&@block_ctxt cx, TypeRef t, ValueRef n) -> ValueRef {
ret new_builder(cx.fcx.lldynamicallocas).ArrayAlloca(t, n);
}
// Creates a simpler, size-equivalent type. The resulting type is guaranteed
// to have (a) the same size as the type that was passed in; (b) to be non-
// recursive. This is done by replacing all boxes in a type with boxed unit
// types.
fn simplify_type(&@crate_ctxt ccx, &ty::t typ) -> ty::t {
fn simplifier(@crate_ctxt ccx, ty::t typ) -> ty::t {
alt (ty::struct(ccx.tcx, typ)) {
case (ty::ty_box(_)) {
ret ty::mk_imm_box(ccx.tcx, ty::mk_nil(ccx.tcx));
}
case (ty::ty_vec(_)) {
ret ty::mk_imm_vec(ccx.tcx, ty::mk_nil(ccx.tcx));
}
case (ty::ty_fn(_, _, _, _, _)) {
ret ty::mk_imm_tup(ccx.tcx,
~[ty::mk_imm_box(ccx.tcx,
ty::mk_nil(ccx.tcx)),
ty::mk_imm_box(ccx.tcx,
ty::mk_nil(ccx.tcx))]);
}
case (ty::ty_obj(_)) {
ret ty::mk_imm_tup(ccx.tcx,
~[ty::mk_imm_box(ccx.tcx,
ty::mk_nil(ccx.tcx)),
ty::mk_imm_box(ccx.tcx,
ty::mk_nil(ccx.tcx))]);
}
case (ty::ty_res(_, ?sub, ?tps)) {
auto sub1 = ty::substitute_type_params(ccx.tcx, tps, sub);
ret ty::mk_imm_tup(ccx.tcx, ~[ty::mk_int(ccx.tcx),
simplify_type(ccx, sub1)]);
}
case (_) { ret typ; }
}
}
ret ty::fold_ty(ccx.tcx, ty::fm_general(bind simplifier(ccx, _)), typ);
}
// Computes the size of the data part of a non-dynamically-sized tag.
fn static_size_of_tag(&@crate_ctxt cx, &span sp, &ty::t t) -> uint {
if (ty::type_has_dynamic_size(cx.tcx, t)) {
cx.tcx.sess.span_fatal(sp,
"dynamically sized type passed to " +
"static_size_of_tag()");
}
if (cx.tag_sizes.contains_key(t)) { ret cx.tag_sizes.get(t); }
auto tid;
let vec[ty::t] subtys;
alt (ty::struct(cx.tcx, t)) {
case (ty::ty_tag(?tid_, ?subtys_)) { tid = tid_; subtys = subtys_; }
case (_) {
cx.tcx.sess.span_fatal(sp,
"non-tag passed to " +
"static_size_of_tag()");
}
}
// Compute max(variant sizes).
auto max_size = 0u;
auto variants = ty::tag_variants(cx.tcx, tid);
for (ty::variant_info variant in variants) {
// TODO: Remove this vec->ivec conversion.
auto args = ~[];
for (ty::t typ in variant.args) { args += ~[typ]; }
auto tup_ty = simplify_type(cx, ty::mk_imm_tup(cx.tcx, args));
// Perform any type parameter substitutions.
tup_ty = ty::substitute_type_params(cx.tcx, subtys, tup_ty);
// Here we possibly do a recursive call.
auto this_size = llsize_of_real(cx, type_of(cx, sp, tup_ty));
if (max_size < this_size) { max_size = this_size; }
}
cx.tag_sizes.insert(t, max_size);
ret max_size;
}
fn dynamic_size_of(&@block_ctxt cx, ty::t t) -> result {
fn align_elements(&@block_ctxt cx, &vec[ty::t] elts) -> result {
//
// C padding rules:
//
//
// - Pad after each element so that next element is aligned.
// - Pad after final structure member so that whole structure
// is aligned to max alignment of interior.
//
auto off = C_int(0);
auto max_align = C_int(1);
auto bcx = cx;
for (ty::t e in elts) {
auto elt_align = align_of(bcx, e);
bcx = elt_align.bcx;
auto elt_size = size_of(bcx, e);
bcx = elt_size.bcx;
auto aligned_off = align_to(bcx, off, elt_align.val);
off = bcx.build.Add(aligned_off, elt_size.val);
max_align = umax(bcx, max_align, elt_align.val);
}
off = align_to(bcx, off, max_align);
ret rslt(bcx, off);
}
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_param(?p)) {
auto szptr =
field_of_tydesc(cx, t, false, abi::tydesc_field_size);
ret rslt(szptr.bcx, szptr.bcx.build.Load(szptr.val));
}
case (ty::ty_tup(?elts)) {
let vec[ty::t] tys = [];
for (ty::mt mt in elts) { tys += [mt.ty]; }
ret align_elements(cx, tys);
}
case (ty::ty_rec(?flds)) {
let vec[ty::t] tys = [];
for (ty::field f in flds) { tys += [f.mt.ty]; }
ret align_elements(cx, tys);
}
case (ty::ty_tag(?tid, ?tps)) {
auto bcx = cx;
// Compute max(variant sizes).
let ValueRef max_size = alloca(bcx, T_int());
bcx.build.Store(C_int(0), max_size);
auto variants = ty::tag_variants(bcx.fcx.lcx.ccx.tcx, tid);
for (ty::variant_info variant in variants) {
// Perform type substitution on the raw argument types.
let vec[ty::t] raw_tys = variant.args;
let vec[ty::t] tys = [];
for (ty::t raw_ty in raw_tys) {
auto t =
ty::substitute_type_params(cx.fcx.lcx.ccx.tcx, tps,
raw_ty);
tys += [t];
}
auto rslt = align_elements(bcx, tys);
bcx = rslt.bcx;
auto this_size = rslt.val;
auto old_max_size = bcx.build.Load(max_size);
bcx.build.Store(umax(bcx, this_size, old_max_size), max_size);
}
auto max_size_val = bcx.build.Load(max_size);
auto total_size = if (vec::len(variants) != 1u) {
bcx.build.Add(max_size_val, llsize_of(T_int()))
} else { max_size_val };
ret rslt(bcx, total_size);
}
case (ty::ty_ivec(?mt)) {
auto rs = field_of_tydesc(cx, mt.ty, false,
abi::tydesc_field_size);
auto bcx = rs.bcx;
auto llunitszptr = rs.val;
auto llunitsz = bcx.build.Load(llunitszptr);
auto llsz = bcx.build.Add(llsize_of(T_opaque_ivec()),
bcx.build.Mul(llunitsz, C_uint(abi::ivec_default_length)));
ret rslt(bcx, llsz);
}
}
}
fn dynamic_align_of(&@block_ctxt cx, &ty::t t) -> result {
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_param(?p)) {
auto aptr =
field_of_tydesc(cx, t, false, abi::tydesc_field_align);
ret rslt(aptr.bcx, aptr.bcx.build.Load(aptr.val));
}
case (ty::ty_tup(?elts)) {
auto a = C_int(1);
auto bcx = cx;
for (ty::mt e in elts) {
auto align = align_of(bcx, e.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
case (ty::ty_rec(?flds)) {
auto a = C_int(1);
auto bcx = cx;
for (ty::field f in flds) {
auto align = align_of(bcx, f.mt.ty);
bcx = align.bcx;
a = umax(bcx, a, align.val);
}
ret rslt(bcx, a);
}
case (ty::ty_tag(_, _)) {
ret rslt(cx, C_int(1)); // FIXME: stub
}
case (ty::ty_ivec(?tm)) {
auto rs = align_of(cx, tm.ty);
auto bcx = rs.bcx;
auto llunitalign = rs.val;
auto llalign = umax(bcx, llalign_of(T_int()), llunitalign);
ret rslt(bcx, llalign);
}
}
}
// Replacement for the LLVM 'GEP' instruction when field-indexing into a
// tuple-like structure (tup, rec) with a static index. This one is driven off
// ty::struct and knows what to do when it runs into a ty_param stuck in the
// middle of the thing it's GEP'ing into. Much like size_of and align_of,
// above.
fn GEP_tup_like(&@block_ctxt cx, &ty::t t, ValueRef base, &vec[int] ixs) ->
result {
assert (ty::type_is_tup_like(cx.fcx.lcx.ccx.tcx, t));
// It might be a static-known type. Handle this.
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
let vec[ValueRef] v = [];
for (int i in ixs) { v += [C_int(i)]; }
ret rslt(cx, cx.build.GEP(base, v));
}
// It is a dynamic-containing type that, if we convert directly to an LLVM
// TypeRef, will be all wrong; there's no proper LLVM type to represent
// it, and the lowering function will stick in i8* values for each
// ty_param, which is not right; the ty_params are all of some dynamic
// size.
//
// What we must do instead is sadder. We must look through the indices
// manually and split the input type into a prefix and a target. We then
// measure the prefix size, bump the input pointer by that amount, and
// cast to a pointer-to-target type.
// Given a type, an index vector and an element number N in that vector,
// calculate index X and the type that results by taking the first X-1
// elements of the type and splitting the Xth off. Return the prefix as
// well as the innermost Xth type.
fn split_type(&@crate_ctxt ccx, &ty::t t, &vec[int] ixs, uint n) ->
rec(vec[ty::t] prefix, ty::t target) {
let uint len = vec::len[int](ixs);
// We don't support 0-index or 1-index GEPs: The former is nonsense
// and the latter would only be meaningful if we supported non-0
// values for the 0th index (we don't).
assert (len > 1u);
if (n == 0u) {
// Since we're starting from a value that's a pointer to a
// *single* structure, the first index (in GEP-ese) should just be
// 0, to yield the pointee.
assert (ixs.(n) == 0);
ret split_type(ccx, t, ixs, n + 1u);
}
assert (n < len);
let int ix = ixs.(n);
let vec[ty::t] prefix = [];
let int i = 0;
while (i < ix) {
vec::push[ty::t](prefix,
ty::get_element_type(ccx.tcx, t, i as uint));
i += 1;
}
auto selected = ty::get_element_type(ccx.tcx, t, i as uint);
if (n == len - 1u) {
// We are at the innermost index.
ret rec(prefix=prefix, target=selected);
} else {
// Not the innermost index; call self recursively to dig deeper.
// Once we get an inner result, append it current prefix and
// return to caller.
auto inner = split_type(ccx, selected, ixs, n + 1u);
prefix += inner.prefix;
ret rec(prefix=prefix with inner);
}
}
// We make a fake prefix tuple-type here; luckily for measuring sizes
// the tuple parens are associative so it doesn't matter that we've
// flattened the incoming structure.
auto s = split_type(cx.fcx.lcx.ccx, t, ixs, 0u);
auto args = ~[];
for (ty::t typ in s.prefix) { args += ~[typ]; }
auto prefix_ty = ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, args);
auto bcx = cx;
auto sz = size_of(bcx, prefix_ty);
bcx = sz.bcx;
auto raw = bcx.build.PointerCast(base, T_ptr(T_i8()));
auto bumped = bcx.build.GEP(raw, [sz.val]);
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, s.target)) {
ret rslt(bcx, bumped);
}
auto typ = T_ptr(type_of(bcx.fcx.lcx.ccx, bcx.sp, s.target));
ret rslt(bcx, bcx.build.PointerCast(bumped, typ));
}
// Replacement for the LLVM 'GEP' instruction when field indexing into a tag.
// This function uses GEP_tup_like() above and automatically performs casts as
// appropriate. @llblobptr is the data part of a tag value; its actual type is
// meaningless, as it will be cast away.
fn GEP_tag(@block_ctxt cx, ValueRef llblobptr, &ast::def_id tag_id,
&ast::def_id variant_id, &vec[ty::t] ty_substs, int ix) -> result {
auto variant =
ty::tag_variant_with_id(cx.fcx.lcx.ccx.tcx, tag_id, variant_id);
// Synthesize a tuple type so that GEP_tup_like() can work its magic.
// Separately, store the type of the element we're interested in.
auto arg_tys = variant.args;
auto elem_ty = ty::mk_nil(cx.fcx.lcx.ccx.tcx); // typestate infelicity
auto i = 0;
let ty::t[] true_arg_tys = ~[];
for (ty::t aty in arg_tys) {
auto arg_ty =
ty::substitute_type_params(cx.fcx.lcx.ccx.tcx, ty_substs, aty);
true_arg_tys += ~[arg_ty];
if (i == ix) { elem_ty = arg_ty; }
i += 1;
}
auto tup_ty = ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, true_arg_tys);
// Cast the blob pointer to the appropriate type, if we need to (i.e. if
// the blob pointer isn't dynamically sized).
let ValueRef llunionptr;
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, tup_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, cx.sp, tup_ty);
llunionptr = cx.build.TruncOrBitCast(llblobptr, T_ptr(llty));
} else { llunionptr = llblobptr; }
// Do the GEP_tup_like().
auto rs = GEP_tup_like(cx, tup_ty, llunionptr, [0, ix]);
// Cast the result to the appropriate type, if necessary.
auto val;
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, elem_ty)) {
auto llelemty = type_of(rs.bcx.fcx.lcx.ccx, cx.sp, elem_ty);
val = rs.bcx.build.PointerCast(rs.val, T_ptr(llelemty));
} else { val = rs.val; }
ret rslt(rs.bcx, val);
}
2011-06-03 17:16:28 -07:00
// trans_raw_malloc: expects a type indicating which pointer type we want and
// a size indicating how much space we want malloc'd.
fn trans_raw_malloc(&@block_ctxt cx, TypeRef llptr_ty, ValueRef llsize) ->
result {
// FIXME: need a table to collect tydesc globals.
auto tydesc = C_null(T_ptr(T_tydesc(cx.fcx.lcx.ccx.tn)));
auto rval =
cx.build.Call(cx.fcx.lcx.ccx.upcalls.malloc,
[cx.fcx.lltaskptr, llsize, tydesc]);
ret rslt(cx, cx.build.PointerCast(rval, llptr_ty));
}
2011-06-03 17:16:28 -07:00
// trans_malloc_boxed: expects an unboxed type and returns a pointer to enough
// space for something of that type, along with space for a reference count;
// in other words, it allocates a box for something of that type.
fn trans_malloc_boxed(&@block_ctxt cx, ty::t t) -> result {
// Synthesize a fake box type structurally so we have something
// to measure the size of.
2011-06-03 17:16:28 -07:00
// We synthesize two types here because we want both the type of the
// pointer and the pointee. boxed_body is the type that we measure the
// size of; box_ptr is the type that's converted to a TypeRef and used as
// the pointer cast target in trans_raw_malloc.
auto boxed_body =
ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx,
// The mk_int here is the space being
// reserved for the refcount.
~[ty::mk_int(cx.fcx.lcx.ccx.tcx), t]);
auto box_ptr = ty::mk_imm_box(cx.fcx.lcx.ccx.tcx, t);
auto sz = size_of(cx, boxed_body);
2011-06-03 17:16:28 -07:00
// Grab the TypeRef type of box_ptr, because that's what trans_raw_malloc
// wants.
auto llty = type_of(cx.fcx.lcx.ccx, cx.sp, box_ptr);
ret trans_raw_malloc(sz.bcx, llty, sz.val);
}
// Type descriptor and type glue stuff
// Given a type and a field index into its corresponding type descriptor,
// returns an LLVM ValueRef of that field from the tydesc, generating the
// tydesc if necessary.
fn field_of_tydesc(&@block_ctxt cx, &ty::t t, bool escapes, int field) ->
result {
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto tydesc = get_tydesc(cx, t, escapes, ti);
ret rslt(tydesc.bcx,
tydesc.bcx.build.GEP(tydesc.val, [C_int(0), C_int(field)]));
}
// Given a type containing ty params, build a vector containing a ValueRef for
// each of the ty params it uses (from the current frame) and a vector of the
// indices of the ty params present in the type. This is used solely for
// constructing derived tydescs.
fn linearize_ty_params(&@block_ctxt cx, &ty::t t) ->
tup(vec[uint], vec[ValueRef]) {
let vec[ValueRef] param_vals = [];
let vec[uint] param_defs = [];
type rr =
rec(@block_ctxt cx,
mutable vec[ValueRef] vals,
mutable vec[uint] defs);
fn linearizer(@rr r, ty::t t) {
alt (ty::struct(r.cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_param(?pid)) {
let bool seen = false;
for (uint d in r.defs) { if (d == pid) { seen = true; } }
if (!seen) {
r.vals += [r.cx.fcx.lltydescs.(pid)];
r.defs += [pid];
}
}
case (_) { }
}
}
auto x = @rec(cx=cx, mutable vals=param_vals, mutable defs=param_defs);
auto f = bind linearizer(x, _);
ty::walk_ty(cx.fcx.lcx.ccx.tcx, f, t);
ret tup(x.defs, x.vals);
}
2011-05-11 04:58:46 +00:00
fn trans_stack_local_derived_tydesc(&@block_ctxt cx, ValueRef llsz,
ValueRef llalign, ValueRef llroottydesc,
ValueRef llparamtydescs) -> ValueRef {
auto llmyroottydesc = alloca(cx, T_tydesc(cx.fcx.lcx.ccx.tn));
// By convention, desc 0 is the root descriptor.
llroottydesc = cx.build.Load(llroottydesc);
cx.build.Store(llroottydesc, llmyroottydesc);
// Store a pointer to the rest of the descriptors.
auto llfirstparam = cx.build.GEP(llparamtydescs, [C_int(0), C_int(0)]);
cx.build.Store(llfirstparam,
cx.build.GEP(llmyroottydesc, [C_int(0), C_int(0)]));
cx.build.Store(llsz, cx.build.GEP(llmyroottydesc, [C_int(0), C_int(1)]));
cx.build.Store(llalign,
cx.build.GEP(llmyroottydesc, [C_int(0), C_int(2)]));
ret llmyroottydesc;
}
2011-05-12 15:42:12 -07:00
fn get_derived_tydesc(&@block_ctxt cx, &ty::t t, bool escapes,
&mutable option::t[@tydesc_info] static_ti) -> result {
alt (cx.fcx.derived_tydescs.find(t)) {
case (some(?info)) {
// If the tydesc escapes in this context, the cached derived
// tydesc also has to be one that was marked as escaping.
if (!(escapes && !info.escapes)) { ret rslt(cx, info.lltydesc); }
}
case (none) {/* fall through */ }
}
2011-05-12 15:42:12 -07:00
cx.fcx.lcx.ccx.stats.n_derived_tydescs += 1u;
auto bcx = new_raw_block_ctxt(cx.fcx, cx.fcx.llderivedtydescs);
let uint n_params = ty::count_ty_params(bcx.fcx.lcx.ccx.tcx, t);
auto tys = linearize_ty_params(bcx, t);
assert (n_params == vec::len[uint](tys._0));
assert (n_params == vec::len[ValueRef](tys._1));
2011-05-12 15:42:12 -07:00
auto root_ti = get_static_tydesc(bcx, t, tys._0);
static_ti = some[@tydesc_info](root_ti);
lazily_emit_all_tydesc_glue(cx, static_ti);
auto root = root_ti.tydesc;
auto sz = size_of(bcx, t);
bcx = sz.bcx;
auto align = align_of(bcx, t);
bcx = align.bcx;
auto v;
if (escapes) {
auto tydescs =
alloca(bcx, /* for root*/
2011-06-16 16:55:46 -07:00
T_array(T_ptr(T_tydesc(bcx.fcx.lcx.ccx.tn)),
1u + n_params));
auto i = 0;
auto tdp = bcx.build.GEP(tydescs, [C_int(0), C_int(i)]);
bcx.build.Store(root, tdp);
i += 1;
for (ValueRef td in tys._1) {
auto tdp = bcx.build.GEP(tydescs, [C_int(0), C_int(i)]);
bcx.build.Store(td, tdp);
i += 1;
}
auto lltydescsptr =
bcx.build.PointerCast(tydescs,
T_ptr(T_ptr(T_tydesc(bcx.fcx.lcx.ccx.tn))));
auto td_val =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.get_type_desc,
[bcx.fcx.lltaskptr, C_null(T_ptr(T_nil())), sz.val,
align.val, C_int(1u + n_params as int),
lltydescsptr]);
v = td_val;
} else {
auto llparamtydescs =
alloca(bcx,
T_array(T_ptr(T_tydesc(bcx.fcx.lcx.ccx.tn)), n_params));
auto i = 0;
for (ValueRef td in tys._1) {
auto tdp = bcx.build.GEP(llparamtydescs, [C_int(0), C_int(i)]);
bcx.build.Store(td, tdp);
i += 1;
}
v =
trans_stack_local_derived_tydesc(bcx, sz.val, align.val, root,
llparamtydescs);
}
bcx.fcx.derived_tydescs.insert(t, rec(lltydesc=v, escapes=escapes));
ret rslt(cx, v);
}
2011-05-12 15:42:12 -07:00
fn get_tydesc(&@block_ctxt cx, &ty::t t, bool escapes,
&mutable option::t[@tydesc_info] static_ti) -> result {
2011-06-29 17:29:24 -07:00
// Is the supplied type a type param? If so, return the passed-in tydesc.
alt (ty::type_param(cx.fcx.lcx.ccx.tcx, t)) {
case (some(?id)) { ret rslt(cx, cx.fcx.lltydescs.(id)); }
case (none) {/* fall through */ }
}
2011-06-29 17:29:24 -07:00
// Does it contain a type param? If so, generate a derived tydesc.
if (ty::type_contains_params(cx.fcx.lcx.ccx.tcx, t)) {
2011-05-12 15:42:12 -07:00
ret get_derived_tydesc(cx, t, escapes, static_ti);
}
2011-06-29 17:29:24 -07:00
// Otherwise, generate a tydesc if necessary, and return it.
auto info = get_static_tydesc(cx, t, []);
2011-05-12 15:42:12 -07:00
static_ti = some[@tydesc_info](info);
ret rslt(cx, info.tydesc);
}
fn get_static_tydesc(&@block_ctxt cx, &ty::t t, &vec[uint] ty_params) ->
@tydesc_info {
alt (cx.fcx.lcx.ccx.tydescs.find(t)) {
case (some(?info)) { ret info; }
case (none) {
2011-05-12 15:42:12 -07:00
cx.fcx.lcx.ccx.stats.n_static_tydescs += 1u;
auto info = declare_tydesc(cx.fcx.lcx, cx.sp, t, ty_params);
cx.fcx.lcx.ccx.tydescs.insert(t, info);
ret info;
}
}
}
fn set_no_inline(ValueRef f) {
llvm::LLVMAddFunctionAttr(f,
lib::llvm::LLVMNoInlineAttribute as
lib::llvm::llvm::Attribute);
}
fn set_uwtable(ValueRef f) {
llvm::LLVMAddFunctionAttr(f,
lib::llvm::LLVMUWTableAttribute as
lib::llvm::llvm::Attribute);
}
fn set_always_inline(ValueRef f) {
llvm::LLVMAddFunctionAttr(f,
lib::llvm::LLVMAlwaysInlineAttribute as
lib::llvm::llvm::Attribute);
}
fn set_glue_inlining(&@local_ctxt cx, ValueRef f, &ty::t t) {
if (ty::type_is_structural(cx.ccx.tcx, t)) {
set_no_inline(f);
} else { set_always_inline(f); }
}
2011-05-12 15:42:12 -07:00
// Generates the declaration for (but doesn't emit) a type descriptor.
fn declare_tydesc(&@local_ctxt cx, &span sp, &ty::t t, vec[uint] ty_params) ->
@tydesc_info {
log "+++ declare_tydesc " + ty_to_str(cx.ccx.tcx, t);
auto ccx = cx.ccx;
auto llsize;
auto llalign;
if (!ty::type_has_dynamic_size(ccx.tcx, t)) {
auto llty = type_of(ccx, sp, t);
llsize = llsize_of(llty);
llalign = llalign_of(llty);
} else {
// These will be overwritten as the derived tydesc is generated, so
// we create placeholder values.
llsize = C_int(0);
llalign = C_int(0);
}
auto name;
if (cx.ccx.sess.get_opts().debuginfo) {
name = mangle_internal_name_by_type_only(cx.ccx, t, "tydesc");
name = sanitize(name);
} else { name = mangle_internal_name_by_seq(cx.ccx, "tydesc"); }
auto gvar =
llvm::LLVMAddGlobal(ccx.llmod, T_tydesc(ccx.tn), str::buf(name));
auto info =
@rec(ty=t,
tydesc=gvar,
size=llsize,
align=llalign,
2011-06-28 16:52:29 -07:00
mutable copy_glue=none[ValueRef],
mutable drop_glue=none[ValueRef],
mutable free_glue=none[ValueRef],
mutable cmp_glue=none[ValueRef],
ty_params=ty_params);
log "--- declare_tydesc " + ty_to_str(cx.ccx.tcx, t);
ret info;
}
tag make_generic_glue_helper_fn {
mgghf_single(fn(&@block_ctxt, ValueRef, &ty::t) );
mgghf_cmp;
}
fn declare_generic_glue(&@local_ctxt cx, &ty::t t, TypeRef llfnty, &str name)
-> ValueRef {
auto fn_nm;
if (cx.ccx.sess.get_opts().debuginfo) {
fn_nm = mangle_internal_name_by_type_only(cx.ccx, t, "glue_" + name);
fn_nm = sanitize(fn_nm);
} else { fn_nm = mangle_internal_name_by_seq(cx.ccx, "glue_" + name); }
2011-06-14 15:54:58 -07:00
auto llfn = decl_cdecl_fn(cx.ccx.llmod, fn_nm, llfnty);
set_glue_inlining(cx, llfn, t);
ret llfn;
}
fn make_generic_glue(&@local_ctxt cx, &span sp, &ty::t t, ValueRef llfn,
2011-05-11 04:58:46 +00:00
&make_generic_glue_helper_fn helper,
&vec[uint] ty_params) -> ValueRef {
auto fcx = new_fn_ctxt(cx, sp, llfn);
llvm::LLVMSetLinkage(llfn,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
2011-05-12 15:42:12 -07:00
cx.ccx.stats.n_glues_created += 1u;
// Any nontrivial glue is with values passed *by alias*; this is a
// requirement since in many contexts glue is invoked indirectly and
// the caller has no idea if it's dealing with something that can be
// passed by value.
auto llty;
if (ty::type_has_dynamic_size(cx.ccx.tcx, t)) {
llty = T_ptr(T_i8());
} else { llty = T_ptr(type_of(cx.ccx, sp, t)); }
auto ty_param_count = vec::len[uint](ty_params);
auto lltyparams = llvm::LLVMGetParam(llfn, 3u);
auto copy_args_bcx = new_raw_block_ctxt(fcx, fcx.llcopyargs);
auto lltydescs = vec::empty_mut[ValueRef]();
auto p = 0u;
while (p < ty_param_count) {
auto llparam = copy_args_bcx.build.GEP(lltyparams, [C_int(p as int)]);
llparam = copy_args_bcx.build.Load(llparam);
vec::grow_set[ValueRef](lltydescs, ty_params.(p), 0 as ValueRef,
llparam);
p += 1u;
}
fcx.lltydescs = vec::freeze[ValueRef](lltydescs);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto llrawptr0 = llvm::LLVMGetParam(llfn, 4u);
auto llval0 = bcx.build.BitCast(llrawptr0, llty);
alt (helper) {
case (mgghf_single(?single_fn)) { single_fn(bcx, llval0, t); }
case (mgghf_cmp) {
auto llrawptr1 = llvm::LLVMGetParam(llfn, 5u);
auto llval1 = bcx.build.BitCast(llrawptr1, llty);
auto llcmpval = llvm::LLVMGetParam(llfn, 6u);
make_cmp_glue(bcx, llval0, llval1, t, llcmpval);
}
}
finish_fn(fcx, lltop);
ret llfn;
}
2011-05-12 15:42:12 -07:00
fn emit_tydescs(&@crate_ctxt ccx) {
for each (@tup(ty::t, @tydesc_info) pair in ccx.tydescs.items()) {
auto glue_fn_ty = T_ptr(T_glue_fn(ccx.tn));
auto cmp_fn_ty = T_ptr(T_cmp_glue_fn(ccx.tn));
auto ti = pair._1;
2011-06-28 16:52:29 -07:00
auto copy_glue =
alt ({ ti.copy_glue }) {
case (none) {
ccx.stats.n_null_glues += 1u;
C_null(glue_fn_ty)
}
case (some(?v)) { ccx.stats.n_real_glues += 1u; v }
};
auto drop_glue =
alt ({ ti.drop_glue }) {
case (none) {
ccx.stats.n_null_glues += 1u;
C_null(glue_fn_ty)
}
case (some(?v)) { ccx.stats.n_real_glues += 1u; v }
};
auto free_glue =
alt ({ ti.free_glue }) {
case (none) {
ccx.stats.n_null_glues += 1u;
C_null(glue_fn_ty)
}
case (some(?v)) { ccx.stats.n_real_glues += 1u; v }
};
auto cmp_glue =
alt ({ ti.cmp_glue }) {
case (none) {
ccx.stats.n_null_glues += 1u;
C_null(cmp_fn_ty)
}
case (some(?v)) { ccx.stats.n_real_glues += 1u; v }
};
auto tydesc =
C_struct([C_null(T_ptr(T_ptr(T_tydesc(ccx.tn)))), ti.size,
2011-06-28 16:52:29 -07:00
ti.align, copy_glue, // copy_glue
drop_glue, // drop_glue
free_glue, // free_glue
C_null(glue_fn_ty), // sever_glue
C_null(glue_fn_ty), // mark_glue
C_null(glue_fn_ty), // obj_drop_glue
C_null(glue_fn_ty), // is_stateful
cmp_glue]); // cmp_glue
2011-05-12 15:42:12 -07:00
auto gvar = ti.tydesc;
llvm::LLVMSetInitializer(gvar, tydesc);
llvm::LLVMSetGlobalConstant(gvar, True);
llvm::LLVMSetLinkage(gvar,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
2011-05-12 15:42:12 -07:00
}
}
2011-06-28 16:52:29 -07:00
fn make_copy_glue(&@block_ctxt cx, ValueRef v, &ty::t t) {
// NB: v is an *alias* of type t here, not a direct value.
auto bcx;
if (ty::type_is_boxed(cx.fcx.lcx.ccx.tcx, t)) {
bcx = incr_refcnt_of_boxed(cx, cx.build.Load(v)).bcx;
} else if (ty::type_is_structural(cx.fcx.lcx.ccx.tcx, t)) {
bcx = duplicate_heap_parts_if_necessary(cx, v, t).bcx;
bcx = iter_structural_ty(bcx, v, t, bind copy_ty(_, _, _)).bcx;
} else { bcx = cx; }
bcx.build.RetVoid();
}
2011-05-11 04:58:46 +00:00
fn incr_refcnt_of_boxed(&@block_ctxt cx, ValueRef box_ptr) -> result {
auto rc_ptr =
cx.build.GEP(box_ptr, [C_int(0), C_int(abi::box_rc_field_refcnt)]);
auto rc = cx.build.Load(rc_ptr);
auto rc_adj_cx = new_sub_block_ctxt(cx, "rc++");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto const_test =
cx.build.ICmp(lib::llvm::LLVMIntEQ, C_int(abi::const_refcount as int),
rc);
cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
rc = rc_adj_cx.build.Add(rc, C_int(1));
rc_adj_cx.build.Store(rc, rc_ptr);
rc_adj_cx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn make_free_glue(&@block_ctxt cx, ValueRef v0, &ty::t t) {
// NB: v is an *alias* of type t here, not a direct value.
auto rs = alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_str) {
auto v = cx.build.Load(v0);
trans_non_gc_free(cx, v)
}
case (ty::ty_vec(_)) {
auto v = cx.build.Load(v0);
auto rs = iter_sequence(cx, v, t, bind drop_ty(_, _, _));
// FIXME: switch gc/non-gc on layer of the type.
trans_non_gc_free(rs.bcx, v)
}
case (ty::ty_box(?body_mt)) {
auto v = cx.build.Load(v0);
auto body =
cx.build.GEP(v, [C_int(0), C_int(abi::box_rc_field_body)]);
auto body_ty = body_mt.ty;
auto body_val = load_if_immediate(cx, body, body_ty);
auto rs = drop_ty(cx, body_val, body_ty);
// FIXME: switch gc/non-gc on layer of the type.
trans_non_gc_free(rs.bcx, v)
}
case (ty::ty_port(_)) {
auto v = cx.build.Load(v0);
cx.build.Call(cx.fcx.lcx.ccx.upcalls.del_port,
[cx.fcx.lltaskptr,
cx.build.PointerCast(v, T_opaque_port_ptr())]);
rslt(cx, C_int(0))
2011-03-16 21:49:15 -04:00
}
case (ty::ty_chan(_)) {
auto v = cx.build.Load(v0);
cx.build.Call(cx.fcx.lcx.ccx.upcalls.del_chan,
[cx.fcx.lltaskptr,
cx.build.PointerCast(v, T_opaque_chan_ptr())]);
rslt(cx, C_int(0))
2011-03-16 21:49:15 -04:00
}
2011-05-24 12:18:42 -07:00
case (ty::ty_task) {
// TODO: call upcall_kill
rslt(cx, C_nil())
2011-05-24 12:18:42 -07:00
}
case (ty::ty_obj(_)) {
auto box_cell =
cx.build.GEP(v0, [C_int(0), C_int(abi::obj_field_box)]);
auto b = cx.build.Load(box_cell);
auto body =
cx.build.GEP(b, [C_int(0), C_int(abi::box_rc_field_body)]);
auto tydescptr =
cx.build.GEP(body,
[C_int(0), C_int(abi::obj_body_elt_tydesc)]);
auto tydesc = cx.build.Load(tydescptr);
auto cx_ = maybe_call_dtor(cx, v0);
// Call through the obj's own fields-drop glue first.
auto ti = none[@tydesc_info];
call_tydesc_glue_full(cx_, body, tydesc,
abi::tydesc_field_drop_glue, ti);
// Then free the body.
// FIXME: switch gc/non-gc on layer of the type.
trans_non_gc_free(cx_, b)
}
case (ty::ty_fn(_, _, _, _, _)) {
auto box_cell =
cx.build.GEP(v0, [C_int(0), C_int(abi::fn_field_box)]);
auto v = cx.build.Load(box_cell);
// Call through the closure's own fields-drop glue first.
auto body =
cx.build.GEP(v, [C_int(0), C_int(abi::box_rc_field_body)]);
auto bindings =
cx.build.GEP(body,
[C_int(0), C_int(abi::closure_elt_bindings)]);
auto tydescptr =
cx.build.GEP(body,
[C_int(0), C_int(abi::closure_elt_tydesc)]);
auto ti = none[@tydesc_info];
call_tydesc_glue_full(cx, bindings, cx.build.Load(tydescptr),
abi::tydesc_field_drop_glue, ti);
// Then free the body.
// FIXME: switch gc/non-gc on layer of the type.
trans_non_gc_free(cx, v)
}
case (_) { rslt(cx, C_nil()) }
};
rs.bcx.build.RetVoid();
}
fn maybe_free_ivec_heap_part(&@block_ctxt cx, ValueRef v0, ty::t unit_ty) ->
result {
2011-06-14 14:37:47 -07:00
auto llunitty = type_of_or_i8(cx, unit_ty);
auto stack_len =
cx.build.Load(cx.build.InBoundsGEP(v0,
[C_int(0),
C_uint(abi::ivec_elt_len)]));
2011-06-14 14:37:47 -07:00
auto maybe_on_heap_cx = new_sub_block_ctxt(cx, "maybe_on_heap");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto maybe_on_heap =
cx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
2011-06-14 14:37:47 -07:00
cx.build.CondBr(maybe_on_heap, maybe_on_heap_cx.llbb, next_cx.llbb);
// Might be on the heap. Load the heap pointer and free it. (It's ok to
// free a null pointer.)
auto stub_ptr =
maybe_on_heap_cx.build.PointerCast(v0, T_ptr(T_ivec_heap(llunitty)));
auto heap_ptr =
{
auto v = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
auto m = maybe_on_heap_cx.build.InBoundsGEP(stub_ptr, v);
maybe_on_heap_cx.build.Load(m)
};
2011-06-14 14:37:47 -07:00
auto after_free_cx = trans_non_gc_free(maybe_on_heap_cx, heap_ptr).bcx;
after_free_cx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
2011-06-14 14:37:47 -07:00
}
fn make_drop_glue(&@block_ctxt cx, ValueRef v0, &ty::t t) {
// NB: v0 is an *alias* of type t here, not a direct value.
auto ccx = cx.fcx.lcx.ccx;
auto rs = alt (ty::struct(ccx.tcx, t)) {
case (ty::ty_str) { decr_refcnt_maybe_free(cx, v0, v0, t) }
case (ty::ty_vec(_)) { decr_refcnt_maybe_free(cx, v0, v0, t) }
2011-06-14 14:37:47 -07:00
case (ty::ty_ivec(?tm)) {
auto v1;
if (ty::type_has_dynamic_size(ccx.tcx, tm.ty)) {
v1 = cx.build.PointerCast(v0, T_ptr(T_opaque_ivec()));
} else {
v1 = v0;
}
auto rslt = iter_structural_ty(cx, v1, t, drop_ty);
maybe_free_ivec_heap_part(rslt.bcx, v1, tm.ty)
}
case (ty::ty_box(_)) { decr_refcnt_maybe_free(cx, v0, v0, t) }
case (ty::ty_port(_)) { decr_refcnt_maybe_free(cx, v0, v0, t) }
case (ty::ty_chan(_)) { decr_refcnt_maybe_free(cx, v0, v0, t) }
case (ty::ty_task) { decr_refcnt_maybe_free(cx, v0, v0, t) }
case (ty::ty_obj(_)) {
auto box_cell =
cx.build.GEP(v0, [C_int(0), C_int(abi::obj_field_box)]);
decr_refcnt_maybe_free(cx, box_cell, v0, t)
}
case (ty::ty_res(?did, ?inner, ?tps)) {
trans_res_drop(cx, v0, did, inner, tps)
}
case (ty::ty_fn(_, _, _, _, _)) {
auto box_cell =
cx.build.GEP(v0, [C_int(0), C_int(abi::fn_field_box)]);
decr_refcnt_maybe_free(cx, box_cell, v0, t)
}
case (_) {
if (ty::type_has_pointers(ccx.tcx, t) &&
ty::type_is_structural(ccx.tcx, t)) {
iter_structural_ty(cx, v0, t, bind drop_ty(_, _, _))
} else { rslt(cx, C_nil()) }
}
};
rs.bcx.build.RetVoid();
}
fn trans_res_drop(@block_ctxt cx, ValueRef rs, &ast::def_id did,
ty::t inner_t, &vec[ty::t] tps) -> result {
auto ccx = cx.fcx.lcx.ccx;
auto inner_t_s = ty::substitute_type_params(ccx.tcx, tps, inner_t);
auto tup_ty = ty::mk_imm_tup(ccx.tcx, ~[ty::mk_int(ccx.tcx), inner_t_s]);
auto drop_cx = new_sub_block_ctxt(cx, "drop res");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto drop_flag = GEP_tup_like(cx, tup_ty, rs, [0, 0]);
cx = drop_flag.bcx;
auto null_test = cx.build.IsNull(cx.build.Load(drop_flag.val));
cx.build.CondBr(null_test, next_cx.llbb, drop_cx.llbb);
cx = drop_cx;
auto val = GEP_tup_like(cx, tup_ty, rs, [0, 1]);
cx = val.bcx;
// Find and call the actual destructor.
auto dtor_pair = if (did._0 == ast::local_crate) {
alt (ccx.fn_pairs.find(did._1)) {
case (some(?x)) { x }
case (_) { ccx.tcx.sess.bug("internal error in trans_res_drop") }
}
} else {
auto params = decoder::get_type_param_count(ccx.tcx, did);
auto f_t = type_of_fn(ccx, cx.sp, ast::proto_fn,
~[rec(mode=ty::mo_alias(false), ty=inner_t)],
ty::mk_nil(ccx.tcx), params);
get_extern_const(ccx.externs, ccx.llmod,
decoder::get_symbol(ccx.sess, did),
T_fn_pair(ccx.tn, f_t))
};
auto dtor_addr = cx.build.Load
(cx.build.GEP(dtor_pair, [C_int(0), C_int(abi::fn_field_code)]));
auto dtor_env = cx.build.Load
(cx.build.GEP(dtor_pair, [C_int(0), C_int(abi::fn_field_box)]));
auto args = [cx.fcx.llretptr, cx.fcx.lltaskptr, dtor_env];
for (ty::t tp in tps) {
let option::t[@tydesc_info] ti = none;
auto td = get_tydesc(cx, tp, false, ti);
args += [td.val];
cx = td.bcx;
}
// Kludge to work around the fact that we know the precise type of the
// value here, but the dtor expects a type that still has opaque pointers
// for type variables.
auto val_llty = lib::llvm::fn_ty_param_tys
(llvm::LLVMGetElementType(llvm::LLVMTypeOf(dtor_addr)))
.(vec::len(args));
auto val_cast = cx.build.BitCast(val.val, val_llty);
cx.build.FastCall(dtor_addr, args + [val_cast]);
cx = drop_slot(cx, val.val, inner_t_s).bcx;
cx.build.Store(C_int(0), drop_flag.val);
cx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn decr_refcnt_maybe_free(&@block_ctxt cx, ValueRef box_ptr_alias,
ValueRef full_alias, &ty::t t) -> result {
auto load_rc_cx = new_sub_block_ctxt(cx, "load rc");
auto rc_adj_cx = new_sub_block_ctxt(cx, "rc--");
auto free_cx = new_sub_block_ctxt(cx, "free");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto box_ptr = cx.build.Load(box_ptr_alias);
auto null_test = cx.build.IsNull(box_ptr);
cx.build.CondBr(null_test, next_cx.llbb, load_rc_cx.llbb);
auto rc_ptr =
load_rc_cx.build.GEP(box_ptr,
[C_int(0), C_int(abi::box_rc_field_refcnt)]);
auto rc = load_rc_cx.build.Load(rc_ptr);
auto const_test =
load_rc_cx.build.ICmp(lib::llvm::LLVMIntEQ,
C_int(abi::const_refcount as int), rc);
load_rc_cx.build.CondBr(const_test, next_cx.llbb, rc_adj_cx.llbb);
rc = rc_adj_cx.build.Sub(rc, C_int(1));
rc_adj_cx.build.Store(rc, rc_ptr);
auto zero_test = rc_adj_cx.build.ICmp(lib::llvm::LLVMIntEQ, C_int(0), rc);
rc_adj_cx.build.CondBr(zero_test, free_cx.llbb, next_cx.llbb);
auto free_res =
free_ty(free_cx, load_if_immediate(free_cx, full_alias, t), t);
free_res.bcx.build.Br(next_cx.llbb);
auto t_else = T_nil();
auto v_else = C_nil();
auto phi =
next_cx.build.Phi(t_else, [v_else, v_else, v_else, free_res.val],
[cx.llbb, load_rc_cx.llbb, rc_adj_cx.llbb,
free_res.bcx.llbb]);
ret rslt(next_cx, phi);
}
// Structural comparison: a rather involved form of glue.
2011-05-11 04:58:46 +00:00
fn maybe_name_value(&@crate_ctxt cx, ValueRef v, &str s) {
if (cx.sess.get_opts().save_temps) {
llvm::LLVMSetValueName(v, str::buf(s));
}
}
fn make_cmp_glue(&@block_ctxt cx, ValueRef lhs0, ValueRef rhs0, &ty::t t,
ValueRef llop) {
auto lhs = load_if_immediate(cx, lhs0, t);
auto rhs = load_if_immediate(cx, rhs0, t);
if (ty::type_is_scalar(cx.fcx.lcx.ccx.tcx, t)) {
make_scalar_cmp_glue(cx, lhs, rhs, t, llop);
} else if (ty::type_is_box(cx.fcx.lcx.ccx.tcx, t)) {
lhs = cx.build.GEP(lhs, [C_int(0), C_int(abi::box_rc_field_body)]);
rhs = cx.build.GEP(rhs, [C_int(0), C_int(abi::box_rc_field_body)]);
auto t_inner =
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_box(?ti)) { ti.ty }
};
auto rslt = compare(cx, lhs, rhs, t_inner, llop);
rslt.bcx.build.Store(rslt.val, cx.fcx.llretptr);
rslt.bcx.build.RetVoid();
} else if (ty::type_is_structural(cx.fcx.lcx.ccx.tcx, t) ||
ty::type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
auto scx = new_sub_block_ctxt(cx, "structural compare start");
auto next = new_sub_block_ctxt(cx, "structural compare end");
cx.build.Br(scx.llbb);
/*
* We're doing lexicographic comparison here. We start with the
* assumption that the two input elements are equal. Depending on
* operator, this means that the result is either true or false;
* equality produces 'true' for ==, <= and >=. It produces 'false' for
* !=, < and >.
*
* We then move one element at a time through the structure checking
* for pairwise element equality: If we have equality, our assumption
* about overall sequence equality is not modified, so we have to move
* to the next element.
*
* If we do not have pairwise element equality, we have reached an
* element that 'decides' the lexicographic comparison. So we exit the
* loop with a flag that indicates the true/false sense of that
* decision, by testing the element again with the operator we're
* interested in.
*
* When we're lucky, LLVM should be able to fold some of these two
* tests together (as they're applied to the same operands and in some
* cases are sometimes redundant). But we don't bother trying to
* optimize combinations like that, at this level.
*/
auto flag = alloca(scx, T_i1());
maybe_name_value(cx.fcx.lcx.ccx, flag, "flag");
auto r;
if (ty::type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
// If we hit == all the way through the minimum-shared-length
// section, default to judging the relative sequence lengths.
auto lhs_fill;
auto rhs_fill;
auto bcx;
if (ty::sequence_is_interior(cx.fcx.lcx.ccx.tcx, t)) {
2011-06-16 16:55:46 -07:00
auto st = ty::sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
auto lad =
ivec::get_len_and_data(scx, lhs, st);
bcx = lad._2;
lhs_fill = lad._0;
2011-06-16 16:55:46 -07:00
lad =
ivec::get_len_and_data(bcx, rhs, st);
bcx = lad._2;
rhs_fill = lad._0;
} else {
lhs_fill = vec_fill(scx, lhs);
rhs_fill = vec_fill(scx, rhs);
bcx = scx;
}
2011-06-16 16:55:46 -07:00
r =
compare_numerical_values(bcx, lhs_fill, rhs_fill,
unsigned_int, llop);
r.bcx.build.Store(r.val, flag);
} else {
// == and <= default to true if they find == all the way. <
// defaults to false if it finds == all the way.
auto result_if_equal =
scx.build.ICmp(lib::llvm::LLVMIntNE, llop,
C_u8(abi::cmp_glue_op_lt));
scx.build.Store(result_if_equal, flag);
r = rslt(scx, C_nil());
}
fn inner(@block_ctxt last_cx, bool load_inner, ValueRef flag,
ValueRef llop, &@block_ctxt cx, ValueRef av0, ValueRef bv0,
ty::t t) -> result {
auto cnt_cx = new_sub_block_ctxt(cx, "continue_comparison");
auto stop_cx = new_sub_block_ctxt(cx, "stop_comparison");
auto av = av0;
auto bv = bv0;
if (load_inner) {
// If `load_inner` is true, then the pointer type will always
// be i8, because the data part of a vector always has type
// i8[]. So we need to cast it to the proper type.
if (!ty::type_has_dynamic_size(last_cx.fcx.lcx.ccx.tcx, t)) {
auto llelemty =
T_ptr(type_of(last_cx.fcx.lcx.ccx, last_cx.sp, t));
av = cx.build.PointerCast(av, llelemty);
bv = cx.build.PointerCast(bv, llelemty);
}
av = load_if_immediate(cx, av, t);
bv = load_if_immediate(cx, bv, t);
}
// First 'eq' comparison: if so, continue to next elts.
auto eq_r = compare(cx, av, bv, t, C_u8(abi::cmp_glue_op_eq));
eq_r.bcx.build.CondBr(eq_r.val, cnt_cx.llbb, stop_cx.llbb);
// Second 'op' comparison: find out how this elt-pair decides.
auto stop_r = compare(stop_cx, av, bv, t, llop);
stop_r.bcx.build.Store(stop_r.val, flag);
stop_r.bcx.build.Br(last_cx.llbb);
ret rslt(cnt_cx, C_nil());
}
if (ty::type_is_structural(cx.fcx.lcx.ccx.tcx, t)) {
r =
iter_structural_ty_full(r.bcx, lhs, rhs, t,
bind inner(next, false, flag, llop, _,
_, _, _));
} else {
auto lhs_p0 = vec_p0(r.bcx, lhs);
auto rhs_p0 = vec_p0(r.bcx, rhs);
auto min_len =
umin(r.bcx, vec_fill(r.bcx, lhs), vec_fill(r.bcx, rhs));
auto rhs_lim = r.bcx.build.GEP(rhs_p0, [min_len]);
auto elt_ty = ty::sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
r = size_of(r.bcx, elt_ty);
r =
iter_sequence_raw(r.bcx, lhs_p0, rhs_p0, rhs_lim, r.val,
bind inner(next, true, flag, llop, _, _, _,
elt_ty));
}
r.bcx.build.Br(next.llbb);
auto v = next.build.Load(flag);
next.build.Store(v, cx.fcx.llretptr);
next.build.RetVoid();
} else {
// FIXME: compare obj, fn by pointer?
trans_fail(cx, none[span],
"attempt to compare values of type " +
ty_to_str(cx.fcx.lcx.ccx.tcx, t));
}
}
// Used only for creating scalar comparsion glue.
tag numerical_type { signed_int; unsigned_int; floating_point; }
fn compare_scalar_types(@block_ctxt cx, ValueRef lhs, ValueRef rhs, &ty::t t,
ValueRef llop) -> result {
// FIXME: this could be a lot shorter if we could combine multiple cases
// of alt expressions (issue #449).
auto f = bind compare_numerical_values(cx, lhs, rhs, _, llop);
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_nil) { ret rslt(cx, C_bool(true)); }
case (ty::ty_bool) { ret f(unsigned_int); }
case (ty::ty_int) { ret f(signed_int); }
case (ty::ty_float) { ret f(floating_point); }
case (ty::ty_uint) { ret f(unsigned_int); }
case (ty::ty_machine(_)) {
if (ty::type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
// Floating point machine types
ret f(floating_point);
} else if (ty::type_is_signed(cx.fcx.lcx.ccx.tcx, t)) {
// Signed, integral machine types
ret f(signed_int);
} else {
// Unsigned, integral machine types
ret f(unsigned_int);
}
}
case (ty::ty_char) { ret f(unsigned_int); }
case (ty::ty_type) {
trans_fail(cx, none[span],
"attempt to compare values of type type");
// This is a bit lame, because we return a dummy block to the
// caller that's actually unreachable, but I don't think it
// matters.
ret rslt(new_sub_block_ctxt(cx, "after_fail_dummy"),
C_bool(false));
}
case (ty::ty_native(_)) {
trans_fail(cx, none[span],
"attempt to compare values of type native");
ret rslt(new_sub_block_ctxt(cx, "after_fail_dummy"),
C_bool(false));
}
case (ty::ty_ptr(_)) {
ret f(unsigned_int);
}
case (_) {
// Should never get here, because t is scalar.
cx.fcx.lcx.ccx.sess.bug("non-scalar type passed to " +
"compare_scalar_types");
}
}
}
// A helper function to create scalar comparison glue.
fn make_scalar_cmp_glue(&@block_ctxt cx, ValueRef lhs, ValueRef rhs, &ty::t t,
ValueRef llop) {
assert ty::type_is_scalar(cx.fcx.lcx.ccx.tcx, t);
// In most cases, we need to know whether to do signed, unsigned, or float
// comparison.
auto rslt = compare_scalar_types(cx, lhs, rhs, t, llop);
auto bcx = rslt.bcx;
auto compare_result = rslt.val;
bcx.build.Store(compare_result, cx.fcx.llretptr);
bcx.build.RetVoid();
}
// A helper function to compare numerical values.
fn compare_numerical_values(&@block_ctxt cx, ValueRef lhs, ValueRef rhs,
numerical_type nt, ValueRef llop) -> result {
auto eq_cmp;
auto lt_cmp;
auto le_cmp;
alt (nt) {
case (floating_point) {
eq_cmp = lib::llvm::LLVMRealUEQ;
lt_cmp = lib::llvm::LLVMRealULT;
le_cmp = lib::llvm::LLVMRealULE;
}
case (signed_int) {
eq_cmp = lib::llvm::LLVMIntEQ;
lt_cmp = lib::llvm::LLVMIntSLT;
le_cmp = lib::llvm::LLVMIntSLE;
}
case (unsigned_int) {
eq_cmp = lib::llvm::LLVMIntEQ;
lt_cmp = lib::llvm::LLVMIntULT;
le_cmp = lib::llvm::LLVMIntULE;
}
}
// FIXME: This wouldn't be necessary if we could bind methods off of
// objects and therefore abstract over FCmp and ICmp (issue #435). Then
// we could just write, e.g., "cmp_fn = bind cx.build.FCmp(_, _, _);" in
// the above, and "auto eq_result = cmp_fn(eq_cmp, lhs, rhs);" in the
// below.
fn generic_cmp(&@block_ctxt cx, numerical_type nt, uint op, ValueRef lhs,
ValueRef rhs) -> ValueRef {
let ValueRef r;
if (nt == floating_point) {
r = cx.build.FCmp(op, lhs, rhs);
} else { r = cx.build.ICmp(op, lhs, rhs); }
ret r;
}
auto last_cx = new_sub_block_ctxt(cx, "last");
auto eq_cx = new_sub_block_ctxt(cx, "eq");
auto eq_result = generic_cmp(eq_cx, nt, eq_cmp, lhs, rhs);
eq_cx.build.Br(last_cx.llbb);
auto lt_cx = new_sub_block_ctxt(cx, "lt");
auto lt_result = generic_cmp(lt_cx, nt, lt_cmp, lhs, rhs);
lt_cx.build.Br(last_cx.llbb);
auto le_cx = new_sub_block_ctxt(cx, "le");
auto le_result = generic_cmp(le_cx, nt, le_cmp, lhs, rhs);
le_cx.build.Br(last_cx.llbb);
auto unreach_cx = new_sub_block_ctxt(cx, "unreach");
unreach_cx.build.Unreachable();
auto llswitch = cx.build.Switch(llop, unreach_cx.llbb, 3u);
llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_eq), eq_cx.llbb);
llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_lt), lt_cx.llbb);
llvm::LLVMAddCase(llswitch, C_u8(abi::cmp_glue_op_le), le_cx.llbb);
auto last_result =
last_cx.build.Phi(T_i1(), [eq_result, lt_result, le_result],
[eq_cx.llbb, lt_cx.llbb, le_cx.llbb]);
ret rslt(last_cx, last_result);
}
// A helper function to create numerical comparison glue.
fn make_numerical_cmp_glue(&@block_ctxt cx, ValueRef lhs, ValueRef rhs,
numerical_type nt, ValueRef llop) {
auto r = compare_numerical_values(cx, lhs, rhs, nt, llop);
r.bcx.build.Store(r.val, r.bcx.fcx.llretptr);
r.bcx.build.RetVoid();
}
type val_pair_fn = fn(&@block_ctxt, ValueRef, ValueRef) -> result ;
type val_and_ty_fn = fn(&@block_ctxt, ValueRef, ty::t) -> result ;
type val_pair_and_ty_fn =
fn(&@block_ctxt, ValueRef, ValueRef, ty::t) -> result ;
// Iterates through the elements of a structural type.
fn iter_structural_ty(&@block_ctxt cx, ValueRef v, &ty::t t, val_and_ty_fn f)
-> result {
fn adaptor_fn(val_and_ty_fn f, &@block_ctxt cx, ValueRef av, ValueRef bv,
ty::t t) -> result {
ret f(cx, av, t);
}
ret iter_structural_ty_full(cx, v, v, t, bind adaptor_fn(f, _, _, _, _));
}
2011-07-05 14:19:19 -07:00
fn load_inbounds(&@block_ctxt cx, ValueRef p,
vec[ValueRef] idxs) -> ValueRef {
ret cx.build.Load(cx.build.InBoundsGEP(p, idxs));
}
fn store_inbounds(&@block_ctxt cx, ValueRef v,
ValueRef p, vec[ValueRef] idxs) {
cx.build.Store(v, cx.build.InBoundsGEP(p, idxs));
}
// This uses store and inboundsGEP, but it only doing so superficially; it's
// really storing an incremented pointer to another pointer.
fn incr_ptr(&@block_ctxt cx, ValueRef p,
ValueRef incr, ValueRef pp) {
cx.build.Store(cx.build.InBoundsGEP(p, [incr]), pp);
}
fn iter_structural_ty_full(&@block_ctxt cx, ValueRef av, ValueRef bv,
&ty::t t, &val_pair_and_ty_fn f) -> result {
fn iter_boxpp(@block_ctxt cx, ValueRef box_a_cell, ValueRef box_b_cell,
2011-05-11 04:58:46 +00:00
&val_pair_and_ty_fn f) -> result {
auto box_a_ptr = cx.build.Load(box_a_cell);
auto box_b_ptr = cx.build.Load(box_b_cell);
auto tnil = ty::mk_nil(cx.fcx.lcx.ccx.tcx);
auto tbox = ty::mk_imm_box(cx.fcx.lcx.ccx.tcx, tnil);
auto inner_cx = new_sub_block_ctxt(cx, "iter box");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto null_test = cx.build.IsNull(box_a_ptr);
cx.build.CondBr(null_test, next_cx.llbb, inner_cx.llbb);
auto r = f(inner_cx, box_a_ptr, box_b_ptr, tbox);
r.bcx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn iter_ivec(@block_ctxt bcx, ValueRef av, ValueRef bv, ty::t unit_ty,
&val_pair_and_ty_fn f) -> result {
// FIXME: "unimplemented rebinding existing function" workaround
fn adapter(&@block_ctxt bcx, ValueRef av, ValueRef bv, ty::t unit_ty,
val_pair_and_ty_fn f) -> result {
ret f(bcx, av, bv, unit_ty);
}
auto llunitty = type_of_or_i8(bcx, unit_ty);
auto rs = size_of(bcx, unit_ty);
auto unit_sz = rs.val;
bcx = rs.bcx;
auto a_len_and_data = ivec::get_len_and_data(bcx, av, unit_ty);
auto a_len = a_len_and_data._0;
auto a_elem = a_len_and_data._1;
bcx = a_len_and_data._2;
auto b_len_and_data = ivec::get_len_and_data(bcx, bv, unit_ty);
auto b_len = b_len_and_data._0;
auto b_elem = b_len_and_data._1;
bcx = b_len_and_data._2;
// Calculate the last pointer address we want to handle.
// TODO: Optimize this when the size of the unit type is statically
// known to not use pointer casts, which tend to confuse LLVM.
auto len = umin(bcx, a_len, b_len);
auto b_elem_i8 = bcx.build.PointerCast(b_elem, T_ptr(T_i8()));
auto b_end_i8 = bcx.build.GEP(b_elem_i8, [len]);
auto b_end = bcx.build.PointerCast(b_end_i8, T_ptr(llunitty));
auto dest_elem_ptr = alloca(bcx, T_ptr(llunitty));
auto src_elem_ptr = alloca(bcx, T_ptr(llunitty));
bcx.build.Store(a_elem, dest_elem_ptr);
bcx.build.Store(b_elem, src_elem_ptr);
// Now perform the iteration.
auto loop_header_cx = new_sub_block_ctxt(bcx,
"iter_ivec_loop_header");
bcx.build.Br(loop_header_cx.llbb);
auto dest_elem = loop_header_cx.build.Load(dest_elem_ptr);
auto src_elem = loop_header_cx.build.Load(src_elem_ptr);
auto not_yet_at_end = loop_header_cx.build.ICmp(lib::llvm::LLVMIntULT,
dest_elem, b_end);
auto loop_body_cx = new_sub_block_ctxt(bcx, "iter_ivec_loop_body");
auto next_cx = new_sub_block_ctxt(bcx, "iter_ivec_next");
loop_header_cx.build.CondBr(not_yet_at_end, loop_body_cx.llbb,
next_cx.llbb);
rs = f(loop_body_cx,
load_if_immediate(loop_body_cx, dest_elem, unit_ty),
load_if_immediate(loop_body_cx, src_elem, unit_ty), unit_ty);
loop_body_cx = rs.bcx;
auto increment;
if (ty::type_has_dynamic_size(bcx.fcx.lcx.ccx.tcx, unit_ty)) {
increment = unit_sz;
} else {
increment = C_int(1);
}
2011-07-05 14:19:19 -07:00
incr_ptr(loop_body_cx, dest_elem, increment, dest_elem_ptr);
incr_ptr(loop_body_cx, src_elem, increment, src_elem_ptr);
loop_body_cx.build.Br(loop_header_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn iter_variant(@block_ctxt cx, ValueRef a_tup, ValueRef b_tup,
&ty::variant_info variant, &vec[ty::t] tps,
&ast::def_id tid, &val_pair_and_ty_fn f) -> result {
if (vec::len[ty::t](variant.args) == 0u) {
ret rslt(cx, C_nil());
}
auto fn_ty = variant.ctor_ty;
auto ccx = cx.fcx.lcx.ccx;
alt (ty::struct(ccx.tcx, fn_ty)) {
case (ty::ty_fn(_, ?args, _, _, _)) {
auto j = 0;
for (ty::arg a in args) {
auto rslt = GEP_tag(cx, a_tup, tid,
variant.id, tps, j);
auto llfldp_a = rslt.val;
cx = rslt.bcx;
rslt = GEP_tag(cx, b_tup, tid,
variant.id, tps, j);
auto llfldp_b = rslt.val;
cx = rslt.bcx;
auto ty_subst =
ty::substitute_type_params(ccx.tcx, tps, a.ty);
auto llfld_a =
load_if_immediate(cx, llfldp_a, ty_subst);
auto llfld_b =
load_if_immediate(cx, llfldp_b, ty_subst);
rslt = f(cx, llfld_a, llfld_b, ty_subst);
cx = rslt.bcx;
j += 1;
}
}
}
ret rslt(cx, C_nil());
}
let result r = rslt(cx, C_nil());
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_tup(?args)) {
let int i = 0;
for (ty::mt arg in args) {
r = GEP_tup_like(r.bcx, t, av, [0, i]);
auto elt_a = r.val;
r = GEP_tup_like(r.bcx, t, bv, [0, i]);
auto elt_b = r.val;
r = f(r.bcx, load_if_immediate(r.bcx, elt_a, arg.ty),
load_if_immediate(r.bcx, elt_b, arg.ty), arg.ty);
i += 1;
}
}
case (ty::ty_rec(?fields)) {
let int i = 0;
for (ty::field fld in fields) {
r = GEP_tup_like(r.bcx, t, av, [0, i]);
auto llfld_a = r.val;
r = GEP_tup_like(r.bcx, t, bv, [0, i]);
auto llfld_b = r.val;
r = f(r.bcx, load_if_immediate(r.bcx, llfld_a, fld.mt.ty),
load_if_immediate(r.bcx, llfld_b, fld.mt.ty),
fld.mt.ty);
i += 1;
}
}
case (ty::ty_res(_, ?inner, ?tps)) {
auto inner1 = ty::substitute_type_params(cx.fcx.lcx.ccx.tcx,
tps, inner);
r = GEP_tup_like(r.bcx, t, av, [0, 1]);
auto llfld_a = r.val;
r = GEP_tup_like(r.bcx, t, bv, [0, 1]);
auto llfld_b = r.val;
f(r.bcx, load_if_immediate(r.bcx, llfld_a, inner1),
load_if_immediate(r.bcx, llfld_b, inner1), inner1);
}
case (ty::ty_tag(?tid, ?tps)) {
auto variants = ty::tag_variants(cx.fcx.lcx.ccx.tcx, tid);
auto n_variants = vec::len(variants);
// Cast the tags to types we can GEP into.
if (n_variants == 1u) {
ret iter_variant(cx, av, bv, variants.(0), tps, tid, f);
}
auto lltagty = T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn);
auto av_tag = cx.build.PointerCast(av, lltagty);
auto bv_tag = cx.build.PointerCast(bv, lltagty);
auto lldiscrim_a_ptr = cx.build.GEP(av_tag, [C_int(0), C_int(0)]);
auto llunion_a_ptr = cx.build.GEP(av_tag, [C_int(0), C_int(1)]);
auto lldiscrim_a = cx.build.Load(lldiscrim_a_ptr);
auto lldiscrim_b_ptr = cx.build.GEP(bv_tag, [C_int(0), C_int(0)]);
auto llunion_b_ptr = cx.build.GEP(bv_tag, [C_int(0), C_int(1)]);
auto lldiscrim_b = cx.build.Load(lldiscrim_b_ptr);
// NB: we must hit the discriminant first so that structural
// comparison know not to proceed when the discriminants differ.
auto bcx = cx;
bcx =
f(bcx, lldiscrim_a, lldiscrim_b,
ty::mk_int(cx.fcx.lcx.ccx.tcx)).bcx;
auto unr_cx = new_sub_block_ctxt(bcx, "tag-iter-unr");
unr_cx.build.Unreachable();
auto llswitch =
bcx.build.Switch(lldiscrim_a, unr_cx.llbb, n_variants);
auto next_cx = new_sub_block_ctxt(bcx, "tag-iter-next");
auto i = 0u;
for (ty::variant_info variant in variants) {
auto variant_cx =
new_sub_block_ctxt(bcx,
"tag-iter-variant-" +
uint::to_str(i, 10u));
llvm::LLVMAddCase(llswitch, C_int(i as int), variant_cx.llbb);
variant_cx = iter_variant
(variant_cx, llunion_a_ptr, llunion_b_ptr, variant,
tps, tid, f).bcx;
variant_cx.build.Br(next_cx.llbb);
i += 1u;
}
ret rslt(next_cx, C_nil());
}
case (ty::ty_fn(_, _, _, _, _)) {
auto box_cell_a =
cx.build.GEP(av, [C_int(0), C_int(abi::fn_field_box)]);
auto box_cell_b =
cx.build.GEP(bv, [C_int(0), C_int(abi::fn_field_box)]);
ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (ty::ty_obj(_)) {
auto box_cell_a =
cx.build.GEP(av, [C_int(0), C_int(abi::obj_field_box)]);
auto box_cell_b =
cx.build.GEP(bv, [C_int(0), C_int(abi::obj_field_box)]);
ret iter_boxpp(cx, box_cell_a, box_cell_b, f);
}
case (ty::ty_ivec(?unit_tm)) {
ret iter_ivec(cx, av, bv, unit_tm.ty, f);
}
case (ty::ty_istr) {
auto unit_ty = ty::mk_mach(cx.fcx.lcx.ccx.tcx, ast::ty_u8);
ret iter_ivec(cx, av, bv, unit_ty, f);
}
case (_) {
cx.fcx.lcx.ccx.sess.unimpl("type in iter_structural_ty_full");
}
}
ret r;
}
// Iterates through a pointer range, until the src* hits the src_lim*.
fn iter_sequence_raw(@block_ctxt cx, ValueRef dst,
2011-06-16 16:55:46 -07:00
// elt*
ValueRef src,
2011-06-16 16:55:46 -07:00
// elt*
ValueRef src_lim,
2011-06-16 16:55:46 -07:00
// elt*
ValueRef elt_sz, &val_pair_fn f) -> result {
auto bcx = cx;
let ValueRef dst_int = vp2i(bcx, dst);
let ValueRef src_int = vp2i(bcx, src);
let ValueRef src_lim_int = vp2i(bcx, src_lim);
auto cond_cx = new_scope_block_ctxt(cx, "sequence-iter cond");
auto body_cx = new_scope_block_ctxt(cx, "sequence-iter body");
auto next_cx = new_sub_block_ctxt(cx, "next");
bcx.build.Br(cond_cx.llbb);
let ValueRef dst_curr = cond_cx.build.Phi(T_int(), [dst_int], [bcx.llbb]);
let ValueRef src_curr = cond_cx.build.Phi(T_int(), [src_int], [bcx.llbb]);
auto end_test =
cond_cx.build.ICmp(lib::llvm::LLVMIntULT, src_curr, src_lim_int);
cond_cx.build.CondBr(end_test, body_cx.llbb, next_cx.llbb);
auto dst_curr_ptr = vi2p(body_cx, dst_curr, T_ptr(T_i8()));
auto src_curr_ptr = vi2p(body_cx, src_curr, T_ptr(T_i8()));
auto body_res = f(body_cx, dst_curr_ptr, src_curr_ptr);
body_cx = body_res.bcx;
auto dst_next = body_cx.build.Add(dst_curr, elt_sz);
auto src_next = body_cx.build.Add(src_curr, elt_sz);
body_cx.build.Br(cond_cx.llbb);
cond_cx.build.AddIncomingToPhi(dst_curr, [dst_next], [body_cx.llbb]);
cond_cx.build.AddIncomingToPhi(src_curr, [src_next], [body_cx.llbb]);
ret rslt(next_cx, C_nil());
}
fn iter_sequence_inner(&@block_ctxt cx, ValueRef src,
2011-06-16 16:55:46 -07:00
// elt*
ValueRef src_lim,
& // elt*
ty::t elt_ty, &val_and_ty_fn f) -> result {
fn adaptor_fn(val_and_ty_fn f, ty::t elt_ty, &@block_ctxt cx,
ValueRef dst, ValueRef src) -> result {
auto llptrty;
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, elt_ty)) {
auto llty = type_of(cx.fcx.lcx.ccx, cx.sp, elt_ty);
llptrty = T_ptr(llty);
} else { llptrty = T_ptr(T_ptr(T_i8())); }
auto p = cx.build.PointerCast(src, llptrty);
ret f(cx, load_if_immediate(cx, p, elt_ty), elt_ty);
}
auto elt_sz = size_of(cx, elt_ty);
ret iter_sequence_raw(elt_sz.bcx, src, src, src_lim, elt_sz.val,
bind adaptor_fn(f, elt_ty, _, _, _));
}
// Iterates through the elements of a vec or str.
fn iter_sequence(@block_ctxt cx, ValueRef v, &ty::t t, &val_and_ty_fn f) ->
result {
fn iter_sequence_body(@block_ctxt cx, ValueRef v, &ty::t elt_ty,
&val_and_ty_fn f, bool trailing_null, bool interior)
-> result {
auto p0;
auto len;
auto bcx;
if (!interior) {
p0 = cx.build.GEP(v, [C_int(0), C_int(abi::vec_elt_data)]);
auto lp = cx.build.GEP(v, [C_int(0), C_int(abi::vec_elt_fill)]);
len = cx.build.Load(lp);
bcx = cx;
} else {
auto len_and_data_rslt = ivec::get_len_and_data(cx, v, elt_ty);
len = len_and_data_rslt._0;
p0 = len_and_data_rslt._1;
bcx = len_and_data_rslt._2;
}
auto llunit_ty = type_of_or_i8(cx, elt_ty);
if (trailing_null) {
auto unit_sz = size_of(bcx, elt_ty);
bcx = unit_sz.bcx;
len = bcx.build.Sub(len, unit_sz.val);
}
auto p1 =
vi2p(bcx, bcx.build.Add(vp2i(bcx, p0), len), T_ptr(llunit_ty));
ret iter_sequence_inner(bcx, p0, p1, elt_ty, f);
}
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_vec(?elt)) {
ret iter_sequence_body(cx, v, elt.ty, f, false, false);
}
case (ty::ty_str) {
auto et = ty::mk_mach(cx.fcx.lcx.ccx.tcx, ast::ty_u8);
ret iter_sequence_body(cx, v, et, f, true, false);
}
case (ty::ty_ivec(?elt)) {
ret iter_sequence_body(cx, v, elt.ty, f, false, true);
}
case (ty::ty_istr) {
auto et = ty::mk_mach(cx.fcx.lcx.ccx.tcx, ast::ty_u8);
ret iter_sequence_body(cx, v, et, f, true, true);
}
case (_) {
cx.fcx.lcx.ccx.sess.bug("unexpected type in " +
"trans::iter_sequence: " +
ty_to_str(cx.fcx.lcx.ccx.tcx, t));
}
}
}
2011-05-12 15:42:12 -07:00
fn lazily_emit_all_tydesc_glue(&@block_ctxt cx,
&option::t[@tydesc_info] static_ti) {
2011-06-28 16:52:29 -07:00
lazily_emit_tydesc_glue(cx, abi::tydesc_field_copy_glue, static_ti);
2011-05-12 15:42:12 -07:00
lazily_emit_tydesc_glue(cx, abi::tydesc_field_drop_glue, static_ti);
lazily_emit_tydesc_glue(cx, abi::tydesc_field_free_glue, static_ti);
2011-05-12 15:42:12 -07:00
lazily_emit_tydesc_glue(cx, abi::tydesc_field_cmp_glue, static_ti);
}
fn lazily_emit_all_generic_info_tydesc_glues(&@block_ctxt cx,
&generic_info gi) {
for (option::t[@tydesc_info] ti in gi.static_tis) {
2011-05-12 15:42:12 -07:00
lazily_emit_all_tydesc_glue(cx, ti);
}
}
fn lazily_emit_tydesc_glue(&@block_ctxt cx, int field,
&option::t[@tydesc_info] static_ti) {
alt (static_ti) {
case (none) { }
case (some(?ti)) {
2011-06-28 16:52:29 -07:00
if (field == abi::tydesc_field_copy_glue) {
alt ({ ti.copy_glue }) {
case (some(_)) { }
case (none) {
2011-05-12 15:42:12 -07:00
log #fmt("+++ lazily_emit_tydesc_glue TAKE %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
auto lcx = cx.fcx.lcx;
auto glue_fn =
declare_generic_glue(lcx, ti.ty,
T_glue_fn(lcx.ccx.tn),
2011-06-28 16:52:29 -07:00
"copy");
ti.copy_glue = some[ValueRef](glue_fn);
auto tg = make_copy_glue;
make_generic_glue(lcx, cx.sp, ti.ty, glue_fn,
2011-05-12 15:42:12 -07:00
mgghf_single(tg), ti.ty_params);
log #fmt("--- lazily_emit_tydesc_glue TAKE %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
}
}
} else if (field == abi::tydesc_field_drop_glue) {
alt ({ ti.drop_glue }) {
case (some(_)) { }
case (none) {
2011-05-12 15:42:12 -07:00
log #fmt("+++ lazily_emit_tydesc_glue DROP %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
auto lcx = cx.fcx.lcx;
auto glue_fn =
declare_generic_glue(lcx, ti.ty,
T_glue_fn(lcx.ccx.tn),
"drop");
ti.drop_glue = some[ValueRef](glue_fn);
make_generic_glue(lcx, cx.sp, ti.ty, glue_fn,
mgghf_single(make_drop_glue),
ti.ty_params);
2011-05-12 15:42:12 -07:00
log #fmt("--- lazily_emit_tydesc_glue DROP %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
}
}
} else if (field == abi::tydesc_field_free_glue) {
alt ({ ti.free_glue }) {
case (some(_)) { }
case (none) {
log #fmt("+++ lazily_emit_tydesc_glue FREE %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
auto lcx = cx.fcx.lcx;
auto glue_fn =
declare_generic_glue(lcx, ti.ty,
T_glue_fn(lcx.ccx.tn),
"free");
ti.free_glue = some[ValueRef](glue_fn);
auto dg = make_free_glue;
2011-05-18 18:01:20 -07:00
make_generic_glue(lcx, cx.sp, ti.ty, glue_fn,
mgghf_single(dg), ti.ty_params);
log #fmt("--- lazily_emit_tydesc_glue FREE %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
}
}
2011-05-12 15:42:12 -07:00
} else if (field == abi::tydesc_field_cmp_glue) {
alt ({ ti.cmp_glue }) {
case (some(_)) { }
case (none) {
2011-05-12 15:42:12 -07:00
log #fmt("+++ lazily_emit_tydesc_glue CMP %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
auto lcx = cx.fcx.lcx;
auto glue_fn =
declare_generic_glue(lcx, ti.ty,
T_cmp_glue_fn(lcx.ccx.tn),
"cmp");
ti.cmp_glue = some[ValueRef](glue_fn);
make_generic_glue(lcx, cx.sp, ti.ty, glue_fn,
2011-05-12 15:42:12 -07:00
mgghf_cmp, ti.ty_params);
log #fmt("--- lazily_emit_tydesc_glue CMP %s",
ty_to_str(cx.fcx.lcx.ccx.tcx, ti.ty));
2011-05-12 15:42:12 -07:00
}
}
}
}
}
}
fn call_tydesc_glue_full(&@block_ctxt cx, ValueRef v, ValueRef tydesc,
int field, &option::t[@tydesc_info] static_ti) {
2011-05-12 15:42:12 -07:00
lazily_emit_tydesc_glue(cx, field, static_ti);
auto static_glue_fn = none;
alt (static_ti) {
case (none) { /* no-op */ }
case (some(?sti)) {
2011-06-28 16:52:29 -07:00
if (field == abi::tydesc_field_copy_glue) {
static_glue_fn = sti.copy_glue;
} else if (field == abi::tydesc_field_drop_glue) {
static_glue_fn = sti.drop_glue;
} else if (field == abi::tydesc_field_free_glue) {
static_glue_fn = sti.free_glue;
} else if (field == abi::tydesc_field_cmp_glue) {
static_glue_fn = sti.cmp_glue;
}
}
}
auto llrawptr = cx.build.BitCast(v, T_ptr(T_i8()));
auto lltydescs =
cx.build.GEP(tydesc,
[C_int(0), C_int(abi::tydesc_field_first_param)]);
lltydescs = cx.build.Load(lltydescs);
auto llfn;
alt (static_glue_fn) {
case (none) {
auto llfnptr = cx.build.GEP(tydesc, [C_int(0), C_int(field)]);
llfn = cx.build.Load(llfnptr);
}
case (some(?sgf)) { llfn = sgf; }
}
cx.build.Call(llfn,
[C_null(T_ptr(T_nil())), cx.fcx.lltaskptr,
C_null(T_ptr(T_nil())), lltydescs, llrawptr]);
}
fn call_tydesc_glue(&@block_ctxt cx, ValueRef v, &ty::t t, int field) ->
result {
2011-05-12 15:42:12 -07:00
let option::t[@tydesc_info] ti = none[@tydesc_info];
auto td = get_tydesc(cx, t, false, ti);
call_tydesc_glue_full(td.bcx, spill_if_immediate(td.bcx, v, t), td.val,
field, ti);
ret rslt(td.bcx, C_nil());
}
2011-05-11 04:58:46 +00:00
fn maybe_call_dtor(&@block_ctxt cx, ValueRef v) -> @block_ctxt {
auto vtbl = cx.build.GEP(v, [C_int(0), C_int(abi::obj_field_vtbl)]);
vtbl = cx.build.Load(vtbl);
auto dtor_ptr = cx.build.GEP(vtbl, [C_int(0), C_int(0)]);
dtor_ptr = cx.build.Load(dtor_ptr);
auto self_t = llvm::LLVMGetElementType(val_ty(v));
dtor_ptr =
cx.build.BitCast(dtor_ptr,
T_ptr(T_dtor(cx.fcx.lcx.ccx, cx.sp, self_t)));
auto dtor_cx = new_sub_block_ctxt(cx, "dtor");
auto after_cx = new_sub_block_ctxt(cx, "after_dtor");
auto test =
cx.build.ICmp(lib::llvm::LLVMIntNE, dtor_ptr,
C_null(val_ty(dtor_ptr)));
cx.build.CondBr(test, dtor_cx.llbb, after_cx.llbb);
auto me = dtor_cx.build.Load(v);
dtor_cx.build.FastCall(dtor_ptr,
[C_null(T_ptr(T_nil())), cx.fcx.lltaskptr, me]);
dtor_cx.build.Br(after_cx.llbb);
ret after_cx;
}
fn call_cmp_glue(&@block_ctxt cx, ValueRef lhs, ValueRef rhs, &ty::t t,
ValueRef llop) -> result {
// We can't use call_tydesc_glue_full() and friends here because compare
// glue has a special signature.
auto lllhs = spill_if_immediate(cx, lhs, t);
auto llrhs = spill_if_immediate(cx, rhs, t);
auto llrawlhsptr = cx.build.BitCast(lllhs, T_ptr(T_i8()));
auto llrawrhsptr = cx.build.BitCast(llrhs, T_ptr(T_i8()));
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto r = get_tydesc(cx, t, false, ti);
lazily_emit_tydesc_glue(cx, abi::tydesc_field_cmp_glue, ti);
auto lltydescs =
r.bcx.build.GEP(r.val,
[C_int(0), C_int(abi::tydesc_field_first_param)]);
lltydescs = r.bcx.build.Load(lltydescs);
auto llfn;
alt (ti) {
case (none) {
auto llfnptr = r.bcx.build.GEP(r.val, [C_int(0),
C_int(abi::tydesc_field_cmp_glue)]);
llfn = r.bcx.build.Load(llfnptr);
}
case (some(?sti)) { llfn = option::get(sti.cmp_glue); }
}
auto llcmpresultptr = alloca(r.bcx, T_i1());
let vec[ValueRef] llargs =
[llcmpresultptr, r.bcx.fcx.lltaskptr, C_null(T_ptr(T_nil())),
lltydescs, llrawlhsptr, llrawrhsptr, llop];
2011-06-14 15:54:58 -07:00
r.bcx.build.Call(llfn, llargs);
ret rslt(r.bcx, r.bcx.build.Load(llcmpresultptr));
}
// Compares two values. Performs the simple scalar comparison if the types are
// scalar and calls to comparison glue otherwise.
fn compare(&@block_ctxt cx, ValueRef lhs, ValueRef rhs, &ty::t t,
ValueRef llop) -> result {
if (ty::type_is_scalar(cx.fcx.lcx.ccx.tcx, t)) {
ret compare_scalar_types(cx, lhs, rhs, t, llop);
}
ret call_cmp_glue(cx, lhs, rhs, t, llop);
}
2011-06-28 16:52:29 -07:00
fn copy_ty(&@block_ctxt cx, ValueRef v, ty::t t) -> result {
if (ty::type_has_pointers(cx.fcx.lcx.ccx.tcx, t) ||
ty::type_owns_heap_mem(cx.fcx.lcx.ccx.tcx, t)) {
2011-06-28 16:52:29 -07:00
ret call_tydesc_glue(cx, v, t, abi::tydesc_field_copy_glue);
}
ret rslt(cx, C_nil());
}
fn drop_slot(&@block_ctxt cx, ValueRef slot, &ty::t t) -> result {
auto llptr = load_if_immediate(cx, slot, t);
auto re = drop_ty(cx, llptr, t);
auto llty = val_ty(slot);
auto llelemty = lib::llvm::llvm::LLVMGetElementType(llty);
re.bcx.build.Store(C_null(llelemty), slot);
ret re;
}
fn drop_ty(&@block_ctxt cx, ValueRef v, ty::t t) -> result {
if (ty::type_has_pointers(cx.fcx.lcx.ccx.tcx, t)) {
ret call_tydesc_glue(cx, v, t, abi::tydesc_field_drop_glue);
}
ret rslt(cx, C_nil());
}
fn free_ty(&@block_ctxt cx, ValueRef v, ty::t t) -> result {
if (ty::type_has_pointers(cx.fcx.lcx.ccx.tcx, t)) {
ret call_tydesc_glue(cx, v, t, abi::tydesc_field_free_glue);
}
ret rslt(cx, C_nil());
}
fn call_memmove(&@block_ctxt cx, ValueRef dst, ValueRef src,
ValueRef n_bytes) -> result {
// FIXME: switch to the 64-bit variant when on such a platform.
// TODO: Provide LLVM with better alignment information when the alignment
// is statically known (it must be nothing more than a constant int, or
// LLVM complains -- not even a constant element of a tydesc works).
auto i = cx.fcx.lcx.ccx.intrinsics;
assert (i.contains_key("llvm.memmove.p0i8.p0i8.i32"));
auto memmove = i.get("llvm.memmove.p0i8.p0i8.i32");
auto src_ptr = cx.build.PointerCast(src, T_ptr(T_i8()));
auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
auto size = cx.build.IntCast(n_bytes, T_i32());
auto align = C_int(0);
auto volatile = C_bool(false);
ret rslt(cx,
cx.build.Call(memmove,
[dst_ptr, src_ptr, size, align, volatile]));
}
fn call_bzero(&@block_ctxt cx, ValueRef dst, ValueRef n_bytes,
ValueRef align_bytes) -> result {
// FIXME: switch to the 64-bit variant when on such a platform.
auto i = cx.fcx.lcx.ccx.intrinsics;
assert (i.contains_key("llvm.memset.p0i8.i32"));
auto memset = i.get("llvm.memset.p0i8.i32");
auto dst_ptr = cx.build.PointerCast(dst, T_ptr(T_i8()));
auto size = cx.build.IntCast(n_bytes, T_i32());
auto align =
if (lib::llvm::llvm::LLVMIsConstant(align_bytes) == True) {
cx.build.IntCast(align_bytes, T_i32())
} else { cx.build.IntCast(C_int(0), T_i32()) };
auto volatile = C_bool(false);
ret rslt(cx,
cx.build.Call(memset,
[dst_ptr, C_u8(0u), size, align, volatile]));
}
fn memmove_ty(&@block_ctxt cx, ValueRef dst, ValueRef src, &ty::t t) ->
result {
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
auto llsz = size_of(cx, t);
ret call_memmove(llsz.bcx, dst, src, llsz.val);
} else { ret rslt(cx, cx.build.Store(cx.build.Load(src), dst)); }
}
// Duplicates any heap-owned memory owned by a value of the given type.
fn duplicate_heap_parts_if_necessary(&@block_ctxt cx, ValueRef vptr,
ty::t typ) -> result {
alt (ty::struct(cx.fcx.lcx.ccx.tcx, typ)) {
case (ty::ty_ivec(?tm)) {
ret ivec::duplicate_heap_part(cx, vptr, tm.ty);
}
case (ty::ty_istr) {
ret ivec::duplicate_heap_part(cx, vptr,
ty::mk_mach(cx.fcx.lcx.ccx.tcx, ast::ty_u8));
}
case (_) { ret rslt(cx, C_nil()); }
}
}
tag copy_action { INIT; DROP_EXISTING; }
fn copy_val(&@block_ctxt cx, copy_action action, ValueRef dst, ValueRef src,
&ty::t t) -> result {
auto ccx = cx.fcx.lcx.ccx;
// FIXME this is just a clunky stopgap. we should do proper checking in an
// earlier pass.
if (!ty::type_is_copyable(ccx.tcx, t)) {
2011-06-28 17:47:56 +02:00
ccx.sess.span_fatal(cx.sp, "Copying a non-copyable type.");
}
if (ty::type_is_scalar(ccx.tcx, t) ||
ty::type_is_native(ccx.tcx, t)) {
ret rslt(cx, cx.build.Store(src, dst));
} else if (ty::type_is_nil(ccx.tcx, t) ||
ty::type_is_bot(ccx.tcx, t)) {
ret rslt(cx, C_nil());
} else if (ty::type_is_boxed(ccx.tcx, t)) {
auto bcx;
if (action == DROP_EXISTING) {
bcx = drop_ty(cx, cx.build.Load(dst), t).bcx;
} else {
bcx = cx;
}
bcx = copy_ty(bcx, src, t).bcx;
ret rslt(bcx, bcx.build.Store(src, dst));
} else if (ty::type_is_structural(ccx.tcx, t) ||
ty::type_has_dynamic_size(ccx.tcx, t)) {
2011-06-29 10:22:10 -07:00
// Check for self-assignment.
auto do_copy_cx = new_sub_block_ctxt(cx, "do_copy");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto self_assigning = cx.build.ICmp(lib::llvm::LLVMIntNE,
cx.build.PointerCast(dst, val_ty(src)), src);
cx.build.CondBr(self_assigning, do_copy_cx.llbb, next_cx.llbb);
if (action == DROP_EXISTING) {
2011-06-29 10:22:10 -07:00
do_copy_cx = drop_ty(do_copy_cx, dst, t).bcx;
}
2011-06-29 10:22:10 -07:00
do_copy_cx = memmove_ty(do_copy_cx, dst, src, t).bcx;
do_copy_cx = copy_ty(do_copy_cx, dst, t).bcx;
do_copy_cx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
}
ccx.sess.bug("unexpected type in trans::copy_val: " +
ty_to_str(ccx.tcx, t));
}
2011-05-31 14:36:08 -07:00
// This works like copy_val, except that it deinitializes the source.
// Since it needs to zero out the source, src also needs to be an lval.
// FIXME: We always zero out the source. Ideally we would detect the
// case where a variable is always deinitialized by block exit and thus
// doesn't need to be dropped.
fn move_val(@block_ctxt cx, copy_action action, ValueRef dst, ValueRef src,
&ty::t t) -> result {
2011-05-31 14:36:08 -07:00
if (ty::type_is_scalar(cx.fcx.lcx.ccx.tcx, t) ||
ty::type_is_native(cx.fcx.lcx.ccx.tcx, t)) {
auto r = rslt(cx, cx.build.Store(cx.build.Load(src), dst));
ret zero_alloca(r.bcx, src, t);
2011-05-31 14:36:08 -07:00
} else if (ty::type_is_nil(cx.fcx.lcx.ccx.tcx, t) ||
2011-06-16 16:55:46 -07:00
ty::type_is_bot(cx.fcx.lcx.ccx.tcx, t)) {
ret rslt(cx, C_nil());
2011-05-31 14:36:08 -07:00
} else if (ty::type_is_boxed(cx.fcx.lcx.ccx.tcx, t)) {
if (action == DROP_EXISTING) {
cx = drop_ty(cx, cx.build.Load(dst), t).bcx;
}
auto r = rslt(cx, cx.build.Store(cx.build.Load(src), dst));
2011-05-31 14:36:08 -07:00
ret zero_alloca(r.bcx, src, t);
} else if (ty::type_is_structural(cx.fcx.lcx.ccx.tcx, t) ||
ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
if (action == DROP_EXISTING) { cx = drop_ty(cx, dst, t).bcx; }
2011-06-14 18:37:40 -07:00
auto r = memmove_ty(cx, dst, src, t);
2011-05-31 14:36:08 -07:00
ret zero_alloca(r.bcx, src, t);
}
cx.fcx.lcx.ccx.sess.bug("unexpected type in trans::move_val: " +
ty_to_str(cx.fcx.lcx.ccx.tcx, t));
2011-05-31 14:36:08 -07:00
}
fn trans_lit(&@crate_ctxt cx, &ast::lit lit, ast::node_id id) -> ValueRef {
alt (lit.node) {
case (ast::lit_int(?i)) { ret C_int(i); }
case (ast::lit_uint(?u)) { ret C_int(u as int); }
case (ast::lit_mach_int(?tm, ?i)) {
// FIXME: the entire handling of mach types falls apart
// if target int width is larger than host, at the moment;
// re-do the mach-int types using 'big' when that works.
auto t = T_int();
auto s = True;
alt (tm) {
case (ast::ty_u8) { t = T_i8(); s = False; }
case (ast::ty_u16) { t = T_i16(); s = False; }
case (ast::ty_u32) { t = T_i32(); s = False; }
case (ast::ty_u64) { t = T_i64(); s = False; }
case (ast::ty_i8) { t = T_i8(); }
case (ast::ty_i16) { t = T_i16(); }
case (ast::ty_i32) { t = T_i32(); }
case (ast::ty_i64) { t = T_i64(); }
}
ret C_integral(t, i as uint, s);
}
case (ast::lit_float(?fs)) { ret C_float(fs); }
case (ast::lit_mach_float(?tm, ?s)) {
auto t = T_float();
alt (tm) {
case (ast::ty_f32) { t = T_f32(); }
case (ast::ty_f64) { t = T_f64(); }
}
ret C_floating(s, t);
}
case (ast::lit_char(?c)) {
ret C_integral(T_char(), c as uint, False);
}
case (ast::lit_bool(?b)) { ret C_bool(b); }
case (ast::lit_nil) { ret C_nil(); }
case (ast::lit_str(?s, _)) { ret C_str(cx, s); }
}
}
// Converts an annotation to a type
fn node_id_type(&@crate_ctxt cx, ast::node_id id) -> ty::t {
ret ty::node_id_to_monotype(cx.tcx, id);
}
fn node_type(&@crate_ctxt cx, &span sp, ast::node_id id) -> TypeRef {
ret type_of(cx, sp, node_id_type(cx, id));
}
fn trans_unary(&@block_ctxt cx, ast::unop op, &@ast::expr e,
ast::node_id id) -> result {
auto sub = trans_expr(cx, e);
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
alt (op) {
case (ast::not) {
auto dr = autoderef(sub.bcx, sub.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, e));
ret rslt(dr.bcx, dr.bcx.build.Not(dr.val));
}
case (ast::neg) {
auto dr = autoderef(sub.bcx, sub.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, e));
if (ty::struct(cx.fcx.lcx.ccx.tcx, e_ty) == ty::ty_float) {
ret rslt(dr.bcx, dr.bcx.build.FNeg(dr.val));
} else { ret rslt(dr.bcx, sub.bcx.build.Neg(dr.val)); }
}
case (ast::box(_)) {
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto e_val = sub.val;
auto box_ty = node_id_type(sub.bcx.fcx.lcx.ccx, id);
sub = trans_malloc_boxed(sub.bcx, e_ty);
find_scope_cx(cx).cleanups +=
[clean(bind drop_ty(_, sub.val, box_ty))];
auto box = sub.val;
auto rc =
sub.bcx.build.GEP(box,
[C_int(0),
C_int(abi::box_rc_field_refcnt)]);
auto body =
sub.bcx.build.GEP(box,
[C_int(0), C_int(abi::box_rc_field_body)]);
sub.bcx.build.Store(C_int(1), rc);
// Cast the body type to the type of the value. This is needed to
// make tags work, since tags have a different LLVM type depending
// on whether they're boxed or not.
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, e_ty)) {
auto llety =
T_ptr(type_of(sub.bcx.fcx.lcx.ccx, e.span, e_ty));
body = sub.bcx.build.PointerCast(body, llety);
}
sub = copy_val(sub.bcx, INIT, body, e_val, e_ty);
ret rslt(sub.bcx, box);
}
case (ast::deref) {
cx.fcx.lcx.ccx.sess.bug("deref expressions should have been " +
"translated using trans_lval(), not "
+ "trans_unary()");
}
}
}
fn trans_compare(&@block_ctxt cx0, ast::binop op, &ty::t t0, ValueRef lhs0,
ValueRef rhs0) -> result {
// Autoderef both sides.
auto cx = cx0;
auto lhs_r = autoderef(cx, lhs0, t0);
auto lhs = lhs_r.val;
cx = lhs_r.bcx;
auto rhs_r = autoderef(cx, rhs0, t0);
auto rhs = rhs_r.val;
cx = rhs_r.bcx;
// Determine the operation we need.
// FIXME: Use or-patterns when we have them.
auto llop;
alt (op) {
case (ast::eq) { llop = C_u8(abi::cmp_glue_op_eq); }
case (ast::lt) { llop = C_u8(abi::cmp_glue_op_lt); }
case (ast::le) { llop = C_u8(abi::cmp_glue_op_le); }
case (ast::ne) { llop = C_u8(abi::cmp_glue_op_eq); }
case (ast::ge) { llop = C_u8(abi::cmp_glue_op_lt); }
case (ast::gt) { llop = C_u8(abi::cmp_glue_op_le); }
}
auto rs = compare(cx, lhs, rhs, rhs_r.ty, llop);
// Invert the result if necessary.
// FIXME: Use or-patterns when we have them.
alt (op) {
case (ast::eq) { ret rslt(rs.bcx, rs.val); }
case (ast::lt) { ret rslt(rs.bcx, rs.val); }
case (ast::le) { ret rslt(rs.bcx, rs.val); }
case (ast::ne) { ret rslt(rs.bcx, rs.bcx.build.Not(rs.val)); }
case (ast::ge) { ret rslt(rs.bcx, rs.bcx.build.Not(rs.val)); }
case (ast::gt) { ret rslt(rs.bcx, rs.bcx.build.Not(rs.val)); }
}
}
fn trans_vec_append(&@block_ctxt cx, &ty::t t, ValueRef lhs, ValueRef rhs) ->
result {
auto elt_ty = ty::sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
auto skip_null = C_bool(false);
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_str) { skip_null = C_bool(true); }
case (_) { }
}
auto bcx = cx;
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto llvec_tydesc = get_tydesc(bcx, t, false, ti);
bcx = llvec_tydesc.bcx;
2011-05-12 15:42:12 -07:00
ti = none[@tydesc_info];
auto llelt_tydesc = get_tydesc(bcx, elt_ty, false, ti);
2011-06-28 16:52:29 -07:00
lazily_emit_tydesc_glue(cx, abi::tydesc_field_copy_glue, ti);
2011-05-12 15:42:12 -07:00
lazily_emit_tydesc_glue(cx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(cx, abi::tydesc_field_free_glue, ti);
bcx = llelt_tydesc.bcx;
auto dst = bcx.build.PointerCast(lhs, T_ptr(T_opaque_vec_ptr()));
auto src = bcx.build.PointerCast(rhs, T_opaque_vec_ptr());
ret rslt(bcx,
bcx.build.Call(cx.fcx.lcx.ccx.upcalls.vec_append,
[cx.fcx.lltaskptr, llvec_tydesc.val,
llelt_tydesc.val, dst, src, skip_null]));
}
mod ivec {
2011-06-16 16:55:46 -07:00
// Returns the length of an interior vector and a pointer to its first
// element, in that order.
fn get_len_and_data(&@block_ctxt bcx, ValueRef orig_v, ty::t unit_ty)
-> tup(ValueRef, ValueRef, @block_ctxt) {
// If this interior vector has dynamic size, we can't assume anything
// about the LLVM type of the value passed in, so we cast it to an
// opaque vector type.
auto v;
if (ty::type_has_dynamic_size(bcx.fcx.lcx.ccx.tcx, unit_ty)) {
v = bcx.build.PointerCast(orig_v, T_ptr(T_opaque_ivec()));
} else {
v = orig_v;
}
auto llunitty = type_of_or_i8(bcx, unit_ty);
2011-07-05 14:19:19 -07:00
auto stack_len = load_inbounds(bcx, v, [C_int(0),
2011-06-16 16:55:46 -07:00
C_uint(abi::ivec_elt_len)]);
auto stack_elem =
bcx.build.InBoundsGEP(v,
2011-06-16 16:55:46 -07:00
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
auto on_heap =
bcx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
auto on_heap_cx = new_sub_block_ctxt(bcx, "on_heap");
auto next_cx = new_sub_block_ctxt(bcx, "next");
bcx.build.CondBr(on_heap, on_heap_cx.llbb, next_cx.llbb);
auto heap_stub =
on_heap_cx.build.PointerCast(v, T_ptr(T_ivec_heap(llunitty)));
2011-07-05 14:19:19 -07:00
auto heap_ptr = load_inbounds(on_heap_cx, heap_stub,
[C_int(0),
C_uint(abi::ivec_heap_stub_elt_ptr)]);
// Check whether the heap pointer is null. If it is, the vector length
// is truly zero.
auto llstubty = T_ivec_heap(llunitty);
auto llheapptrty = struct_elt(llstubty, abi::ivec_heap_stub_elt_ptr);
auto heap_ptr_is_null =
on_heap_cx.build.ICmp(lib::llvm::LLVMIntEQ, heap_ptr,
C_null(T_ptr(llheapptrty)));
auto zero_len_cx = new_sub_block_ctxt(bcx, "zero_len");
auto nonzero_len_cx = new_sub_block_ctxt(bcx, "nonzero_len");
on_heap_cx.build.CondBr(heap_ptr_is_null, zero_len_cx.llbb,
nonzero_len_cx.llbb);
// Technically this context is unnecessary, but it makes this function
// clearer.
auto zero_len = C_int(0);
auto zero_elem = C_null(T_ptr(llunitty));
zero_len_cx.build.Br(next_cx.llbb);
// If we're here, then we actually have a heapified vector.
2011-07-05 14:19:19 -07:00
auto heap_len = load_inbounds(nonzero_len_cx, heap_ptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_len)]);
auto heap_elem =
2011-06-16 16:55:46 -07:00
{
auto v = [C_int(0), C_uint(abi::ivec_heap_elt_elems),
C_int(0)];
nonzero_len_cx.build.InBoundsGEP(heap_ptr,v)
};
2011-07-05 14:19:19 -07:00
nonzero_len_cx.build.Br(next_cx.llbb);
// Now we can figure out the length of `v` and get a pointer to its
// first element.
auto len =
2011-06-16 16:55:46 -07:00
next_cx.build.Phi(T_int(), [stack_len, zero_len, heap_len],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
auto elem =
next_cx.build.Phi(T_ptr(llunitty),
2011-06-16 16:55:46 -07:00
[stack_elem, zero_elem, heap_elem],
[bcx.llbb, zero_len_cx.llbb,
nonzero_len_cx.llbb]);
ret tup(len, elem, next_cx);
}
// Returns a tuple consisting of a pointer to the newly-reserved space and
// a block context. Updates the length appropriately.
fn reserve_space(&@block_ctxt cx, TypeRef llunitty, ValueRef v,
ValueRef len_needed) -> result {
auto stack_len_ptr =
cx.build.InBoundsGEP(v, [C_int(0), C_uint(abi::ivec_elt_len)]);
auto stack_len = cx.build.Load(stack_len_ptr);
2011-07-05 14:19:19 -07:00
auto alen = load_inbounds(cx, v, [C_int(0),
C_uint(abi::ivec_elt_alen)]);
// There are four cases we have to consider:
// (1) On heap, no resize necessary.
// (2) On heap, need to resize.
// (3) On stack, no resize necessary.
// (4) On stack, need to spill to heap.
auto maybe_on_heap =
cx.build.ICmp(lib::llvm::LLVMIntEQ, stack_len, C_int(0));
auto maybe_on_heap_cx = new_sub_block_ctxt(cx, "maybe_on_heap");
auto on_stack_cx = new_sub_block_ctxt(cx, "on_stack");
cx.build.CondBr(maybe_on_heap, maybe_on_heap_cx.llbb,
on_stack_cx.llbb);
auto next_cx = new_sub_block_ctxt(cx, "next");
// We're possibly on the heap, unless the vector is zero-length.
auto stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
2011-06-16 16:55:46 -07:00
auto stub_ptr =
maybe_on_heap_cx.build.PointerCast(v,
T_ptr(T_ivec_heap(llunitty)));
2011-07-05 14:19:19 -07:00
auto heap_ptr = load_inbounds(maybe_on_heap_cx, stub_ptr, stub_p);
auto on_heap =
maybe_on_heap_cx.build.ICmp(lib::llvm::LLVMIntNE, heap_ptr,
C_null(val_ty(heap_ptr)));
auto on_heap_cx = new_sub_block_ctxt(cx, "on_heap");
maybe_on_heap_cx.build.CondBr(on_heap, on_heap_cx.llbb,
on_stack_cx.llbb);
// We're definitely on the heap. Check whether we need to resize.
2011-06-16 16:55:46 -07:00
auto heap_len_ptr =
on_heap_cx.build.InBoundsGEP(heap_ptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_len)]);
auto heap_len = on_heap_cx.build.Load(heap_len_ptr);
auto new_heap_len = on_heap_cx.build.Add(heap_len, len_needed);
auto heap_len_unscaled =
on_heap_cx.build.UDiv(heap_len, llsize_of(llunitty));
auto heap_no_resize_needed =
on_heap_cx.build.ICmp(lib::llvm::LLVMIntULE, new_heap_len, alen);
auto heap_no_resize_cx = new_sub_block_ctxt(cx, "heap_no_resize");
auto heap_resize_cx = new_sub_block_ctxt(cx, "heap_resize");
on_heap_cx.build.CondBr(heap_no_resize_needed, heap_no_resize_cx.llbb,
heap_resize_cx.llbb);
// Case (1): We're on the heap and don't need to resize.
auto heap_data_no_resize =
2011-06-16 16:55:46 -07:00
{
auto v = [C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
heap_no_resize_cx.build.InBoundsGEP(heap_ptr,v)
};
heap_no_resize_cx.build.Store(new_heap_len, heap_len_ptr);
heap_no_resize_cx.build.Br(next_cx.llbb);
// Case (2): We're on the heap and need to resize. This path is rare,
// so we delegate to cold glue.
{
2011-06-16 16:55:46 -07:00
auto p =
heap_resize_cx.build.PointerCast(v, T_ptr(T_opaque_ivec()));
heap_resize_cx.build.Call(cx.fcx.lcx.ccx.upcalls.ivec_resize,
[cx.fcx.lltaskptr, p, new_heap_len]);
}
2011-06-16 16:55:46 -07:00
auto heap_ptr_resize =
2011-07-05 14:19:19 -07:00
load_inbounds(heap_resize_cx, stub_ptr, stub_p);
auto heap_data_resize =
2011-06-16 16:55:46 -07:00
{
auto v = [C_int(0), C_uint(abi::ivec_heap_elt_elems),
heap_len_unscaled];
heap_resize_cx.build.InBoundsGEP(heap_ptr_resize, v)
};
heap_resize_cx.build.Br(next_cx.llbb);
// We're on the stack. Check whether we need to spill to the heap.
2011-06-16 16:55:46 -07:00
auto new_stack_len = on_stack_cx.build.Add(stack_len, len_needed);
auto stack_no_spill_needed =
on_stack_cx.build.ICmp(lib::llvm::LLVMIntULE, new_stack_len,
alen);
auto stack_len_unscaled =
on_stack_cx.build.UDiv(stack_len, llsize_of(llunitty));
auto stack_no_spill_cx = new_sub_block_ctxt(cx, "stack_no_spill");
auto stack_spill_cx = new_sub_block_ctxt(cx, "stack_spill");
on_stack_cx.build.CondBr(stack_no_spill_needed,
2011-06-16 16:55:46 -07:00
stack_no_spill_cx.llbb, stack_spill_cx.llbb);
// Case (3): We're on the stack and don't need to spill.
2011-06-16 16:55:46 -07:00
auto stack_data_no_spill =
stack_no_spill_cx.build.InBoundsGEP(v,
[C_int(0),
C_uint(abi::ivec_elt_elems),
stack_len_unscaled]);
stack_no_spill_cx.build.Store(new_stack_len, stack_len_ptr);
stack_no_spill_cx.build.Br(next_cx.llbb);
// Case (4): We're on the stack and need to spill. Like case (2), this
// path is rare, so we delegate to cold glue.
2011-06-16 16:55:46 -07:00
{
2011-06-16 16:55:46 -07:00
auto p =
stack_spill_cx.build.PointerCast(v, T_ptr(T_opaque_ivec()));
stack_spill_cx.build.Call(cx.fcx.lcx.ccx.upcalls.ivec_spill,
[cx.fcx.lltaskptr, p, new_stack_len]);
}
auto spill_stub =
stack_spill_cx.build.PointerCast(v, T_ptr(T_ivec_heap(llunitty)));
2011-07-05 14:19:19 -07:00
auto heap_ptr_spill =
2011-07-05 14:19:19 -07:00
load_inbounds(stack_spill_cx, spill_stub, stub_p);
auto heap_data_spill =
2011-06-16 16:55:46 -07:00
{
auto v = [C_int(0), C_uint(abi::ivec_heap_elt_elems),
stack_len_unscaled];
stack_spill_cx.build.InBoundsGEP(heap_ptr_spill, v)
};
stack_spill_cx.build.Br(next_cx.llbb);
// Phi together the different data pointers to get the result.
2011-06-16 16:55:46 -07:00
auto data_ptr =
next_cx.build.Phi(T_ptr(llunitty),
[heap_data_no_resize, heap_data_resize,
stack_data_no_spill, heap_data_spill],
[heap_no_resize_cx.llbb, heap_resize_cx.llbb,
stack_no_spill_cx.llbb, stack_spill_cx.llbb]);
ret rslt(next_cx, data_ptr);
}
fn trans_append(&@block_ctxt cx, &ty::t t, ValueRef orig_lhs,
ValueRef orig_rhs) -> result {
// Cast to opaque interior vector types if necessary.
auto lhs;
auto rhs;
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
lhs = cx.build.PointerCast(orig_lhs, T_ptr(T_opaque_ivec()));
rhs = cx.build.PointerCast(orig_rhs, T_ptr(T_opaque_ivec()));
} else {
lhs = orig_lhs;
rhs = orig_rhs;
}
auto unit_ty = ty::sequence_element_type(cx.fcx.lcx.ccx.tcx, t);
auto llunitty = type_of_or_i8(cx, unit_ty);
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
2011-06-30 09:00:44 -07:00
case (ty::ty_istr) { }
case (ty::ty_ivec(_)) { }
case (_) {
cx.fcx.lcx.ccx.tcx.sess.bug("non-istr/ivec in trans_append");
}
}
auto rs = size_of(cx, unit_ty);
auto bcx = rs.bcx;
auto unit_sz = rs.val;
// Gather the various type descriptors we'll need.
// FIXME (issue #511): This is needed to prevent a leak.
auto no_tydesc_info = none;
rs = get_tydesc(bcx, t, false, no_tydesc_info);
bcx = rs.bcx;
rs = get_tydesc(bcx, unit_ty, false, no_tydesc_info);
bcx = rs.bcx;
2011-06-28 16:52:29 -07:00
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_copy_glue, none);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, none);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, none);
auto rhs_len_and_data = get_len_and_data(bcx, rhs, unit_ty);
auto rhs_len = rhs_len_and_data._0;
auto rhs_data = rhs_len_and_data._1;
bcx = rhs_len_and_data._2;
rs = reserve_space(bcx, llunitty, lhs, rhs_len);
auto lhs_data = rs.val;
bcx = rs.bcx;
// Work out the end pointer.
2011-06-16 16:55:46 -07:00
auto lhs_unscaled_idx = bcx.build.UDiv(rhs_len, llsize_of(llunitty));
auto lhs_end = bcx.build.InBoundsGEP(lhs_data, [lhs_unscaled_idx]);
// Now emit the copy loop.
2011-06-16 16:55:46 -07:00
auto dest_ptr = alloca(bcx, T_ptr(llunitty));
bcx.build.Store(lhs_data, dest_ptr);
auto src_ptr = alloca(bcx, T_ptr(llunitty));
bcx.build.Store(rhs_data, src_ptr);
2011-06-16 16:55:46 -07:00
auto copy_loop_header_cx =
new_sub_block_ctxt(bcx, "copy_loop_header");
bcx.build.Br(copy_loop_header_cx.llbb);
auto copy_dest_ptr = copy_loop_header_cx.build.Load(dest_ptr);
auto not_yet_at_end =
copy_loop_header_cx.build.ICmp(lib::llvm::LLVMIntNE,
2011-06-16 16:55:46 -07:00
copy_dest_ptr, lhs_end);
auto copy_loop_body_cx = new_sub_block_ctxt(bcx, "copy_loop_body");
auto next_cx = new_sub_block_ctxt(bcx, "next");
copy_loop_header_cx.build.CondBr(not_yet_at_end,
copy_loop_body_cx.llbb,
next_cx.llbb);
auto copy_src_ptr = copy_loop_body_cx.build.Load(src_ptr);
auto copy_src = load_if_immediate(copy_loop_body_cx, copy_src_ptr,
unit_ty);
rs = copy_val(copy_loop_body_cx, INIT, copy_dest_ptr, copy_src,
unit_ty);
auto post_copy_cx = rs.bcx;
// Increment both pointers.
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
// We have to increment by the dynamically-computed size.
2011-07-05 14:19:19 -07:00
incr_ptr(post_copy_cx, copy_dest_ptr, unit_sz, dest_ptr);
incr_ptr(post_copy_cx, copy_src_ptr, unit_sz, src_ptr);
} else {
2011-07-05 14:19:19 -07:00
incr_ptr(post_copy_cx, copy_dest_ptr, C_int(1), dest_ptr);
incr_ptr(post_copy_cx, copy_src_ptr, C_int(1), src_ptr);
}
post_copy_cx.build.Br(copy_loop_header_cx.llbb);
ret rslt(next_cx, C_nil());
}
type alloc_result = rec(@block_ctxt bcx,
ValueRef llptr,
ValueRef llunitsz,
ValueRef llalen);
fn alloc(&@block_ctxt cx, ty::t unit_ty) -> alloc_result {
auto dynamic = ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty);
auto bcx;
if (dynamic) {
bcx = llderivedtydescs_block_ctxt(cx.fcx);
} else {
bcx = cx;
}
auto llunitsz;
auto rslt = size_of(bcx, unit_ty);
bcx = rslt.bcx;
llunitsz = rslt.val;
if (dynamic) { cx.fcx.llderivedtydescs = bcx.llbb; }
auto llalen = bcx.build.Mul(llunitsz,
C_uint(abi::ivec_default_length));
auto llptr;
auto llunitty = type_of_or_i8(bcx, unit_ty);
auto bcx_result;
if (dynamic) {
auto llarraysz = bcx.build.Add(llsize_of(T_opaque_ivec()),
llalen);
auto llvecptr = array_alloca(bcx, T_i8(), llarraysz);
bcx_result = cx;
llptr = bcx_result.build.PointerCast(llvecptr,
T_ptr(T_opaque_ivec()));
} else {
llptr = alloca(bcx, T_ivec(llunitty));
bcx_result = bcx;
}
ret rec(bcx=bcx_result,
llptr=llptr,
llunitsz=llunitsz,
llalen=llalen);
}
fn trans_add(&@block_ctxt cx, ty::t vec_ty, ValueRef lhs, ValueRef rhs)
-> result {
auto bcx = cx;
auto unit_ty = ty::sequence_element_type(bcx.fcx.lcx.ccx.tcx, vec_ty);
auto ares = alloc(bcx, unit_ty);
bcx = ares.bcx;
auto llvecptr = ares.llptr;
auto unit_sz = ares.llunitsz;
auto llalen = ares.llalen;
find_scope_cx(bcx).cleanups +=
[clean(bind drop_ty(_, llvecptr, vec_ty))];
auto llunitty = type_of_or_i8(bcx, unit_ty);
auto llheappartty = T_ivec_heap_part(llunitty);
auto lhs_len_and_data = get_len_and_data(bcx, lhs, unit_ty);
auto lhs_len = lhs_len_and_data._0;
auto lhs_data = lhs_len_and_data._1;
bcx = lhs_len_and_data._2;
auto rhs_len_and_data = get_len_and_data(bcx, rhs, unit_ty);
auto rhs_len = rhs_len_and_data._0;
auto rhs_data = rhs_len_and_data._1;
bcx = rhs_len_and_data._2;
auto lllen = bcx.build.Add(lhs_len, rhs_len);
// We have three cases to handle here:
// (1) Length is zero ([] + []).
// (2) Copy onto stack.
// (3) Allocate on heap and copy there.
2011-06-16 16:55:46 -07:00
auto len_is_zero =
bcx.build.ICmp(lib::llvm::LLVMIntEQ, lllen, C_int(0));
auto zero_len_cx = new_sub_block_ctxt(bcx, "zero_len");
auto nonzero_len_cx = new_sub_block_ctxt(bcx, "nonzero_len");
bcx.build.CondBr(len_is_zero, zero_len_cx.llbb, nonzero_len_cx.llbb);
// Case (1): Length is zero.
2011-06-16 16:55:46 -07:00
auto stub_z = [C_int(0), C_uint(abi::ivec_heap_stub_elt_zero)];
auto stub_a = [C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)];
auto stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
auto vec_l = [C_int(0), C_uint(abi::ivec_elt_len)];
auto vec_a = [C_int(0), C_uint(abi::ivec_elt_alen)];
auto stub_ptr_zero =
zero_len_cx.build.PointerCast(llvecptr,
T_ptr(T_ivec_heap(llunitty)));
zero_len_cx.build.Store(C_int(0),
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
stub_z));
zero_len_cx.build.Store(llalen,
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
stub_a));
zero_len_cx.build.Store(C_null(T_ptr(llheappartty)),
2011-06-16 16:55:46 -07:00
zero_len_cx.build.InBoundsGEP(stub_ptr_zero,
stub_p));
auto next_cx = new_sub_block_ctxt(bcx, "next");
zero_len_cx.build.Br(next_cx.llbb);
// Determine whether we need to spill to the heap.
2011-06-16 16:55:46 -07:00
auto on_stack =
nonzero_len_cx.build.ICmp(lib::llvm::LLVMIntULE, lllen, llalen);
auto stack_cx = new_sub_block_ctxt(bcx, "stack");
auto heap_cx = new_sub_block_ctxt(bcx, "heap");
nonzero_len_cx.build.CondBr(on_stack, stack_cx.llbb, heap_cx.llbb);
// Case (2): Copy onto stack.
2011-06-16 16:55:46 -07:00
stack_cx.build.Store(lllen,
stack_cx.build.InBoundsGEP(llvecptr, vec_l));
stack_cx.build.Store(llalen,
stack_cx.build.InBoundsGEP(llvecptr, vec_a));
auto dest_ptr_stack =
stack_cx.build.InBoundsGEP(llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
auto copy_cx = new_sub_block_ctxt(bcx, "copy");
stack_cx.build.Br(copy_cx.llbb);
// Case (3): Allocate on heap and copy there.
2011-06-16 16:55:46 -07:00
auto stub_ptr_heap =
heap_cx.build.PointerCast(llvecptr, T_ptr(T_ivec_heap(llunitty)));
heap_cx.build.Store(C_int(0),
heap_cx.build.InBoundsGEP(stub_ptr_heap,
stub_z));
heap_cx.build.Store(lllen,
heap_cx.build.InBoundsGEP(stub_ptr_heap,
stub_a));
auto heap_sz = heap_cx.build.Add(llsize_of(llheappartty), lllen);
auto rs = trans_raw_malloc(heap_cx, T_ptr(llheappartty), heap_sz);
auto heap_part = rs.val;
heap_cx = rs.bcx;
2011-06-16 16:55:46 -07:00
heap_cx.build.Store(heap_part,
heap_cx.build.InBoundsGEP(stub_ptr_heap,
stub_p));
{
auto v = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
heap_cx.build.Store(lllen,
heap_cx.build.InBoundsGEP(heap_part,
v));
}
auto dest_ptr_heap =
heap_cx.build.InBoundsGEP(heap_part,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
heap_cx.build.Br(copy_cx.llbb);
// Emit the copy loop.
2011-06-16 16:55:46 -07:00
auto first_dest_ptr =
copy_cx.build.Phi(T_ptr(llunitty),
[dest_ptr_stack, dest_ptr_heap],
[stack_cx.llbb, heap_cx.llbb]);
auto lhs_len_unscaled = copy_cx.build.UDiv(lhs_len, unit_sz);
2011-06-16 16:55:46 -07:00
auto lhs_end_ptr =
copy_cx.build.InBoundsGEP(lhs_data, [lhs_len_unscaled]);
auto rhs_len_unscaled = copy_cx.build.UDiv(rhs_len, unit_sz);
2011-06-16 16:55:46 -07:00
auto rhs_end_ptr =
copy_cx.build.InBoundsGEP(rhs_data, [rhs_len_unscaled]);
auto dest_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(first_dest_ptr, dest_ptr_ptr);
auto lhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(lhs_data, lhs_ptr_ptr);
auto rhs_ptr_ptr = alloca(copy_cx, T_ptr(llunitty));
copy_cx.build.Store(rhs_data, rhs_ptr_ptr);
auto lhs_copy_cx = new_sub_block_ctxt(bcx, "lhs_copy");
copy_cx.build.Br(lhs_copy_cx.llbb);
// Copy in elements from the LHS.
2011-06-16 16:55:46 -07:00
auto lhs_ptr = lhs_copy_cx.build.Load(lhs_ptr_ptr);
2011-06-16 16:55:46 -07:00
auto not_at_end_lhs =
lhs_copy_cx.build.ICmp(lib::llvm::LLVMIntNE, lhs_ptr,
lhs_end_ptr);
auto lhs_do_copy_cx = new_sub_block_ctxt(bcx, "lhs_do_copy");
auto rhs_copy_cx = new_sub_block_ctxt(bcx, "rhs_copy");
lhs_copy_cx.build.CondBr(not_at_end_lhs, lhs_do_copy_cx.llbb,
rhs_copy_cx.llbb);
auto dest_ptr_lhs_copy = lhs_do_copy_cx.build.Load(dest_ptr_ptr);
auto lhs_val = load_if_immediate(lhs_do_copy_cx, lhs_ptr, unit_ty);
rs =
2011-06-16 16:55:46 -07:00
copy_val(lhs_do_copy_cx, INIT, dest_ptr_lhs_copy, lhs_val,
unit_ty);
lhs_do_copy_cx = rs.bcx;
2011-06-16 16:55:46 -07:00
{
auto d = lhs_do_copy_cx.build.InBoundsGEP(dest_ptr_lhs_copy,
[C_int(1)]);
auto lhs = lhs_do_copy_cx.build.InBoundsGEP(lhs_ptr,
[C_int(1)]);
lhs_do_copy_cx.build.Store(d, dest_ptr_ptr);
lhs_do_copy_cx.build.Store(lhs, lhs_ptr_ptr);
}
lhs_do_copy_cx.build.Br(lhs_copy_cx.llbb);
// Copy in elements from the RHS.
2011-06-16 16:55:46 -07:00
auto rhs_ptr = rhs_copy_cx.build.Load(rhs_ptr_ptr);
2011-06-16 16:55:46 -07:00
auto not_at_end_rhs =
rhs_copy_cx.build.ICmp(lib::llvm::LLVMIntNE, rhs_ptr,
rhs_end_ptr);
auto rhs_do_copy_cx = new_sub_block_ctxt(bcx, "rhs_do_copy");
rhs_copy_cx.build.CondBr(not_at_end_rhs, rhs_do_copy_cx.llbb,
next_cx.llbb);
auto dest_ptr_rhs_copy = rhs_do_copy_cx.build.Load(dest_ptr_ptr);
auto rhs_val = load_if_immediate(rhs_do_copy_cx, rhs_ptr, unit_ty);
rs =
2011-06-16 16:55:46 -07:00
copy_val(rhs_do_copy_cx, INIT, dest_ptr_rhs_copy, rhs_val,
unit_ty);
rhs_do_copy_cx = rs.bcx;
2011-06-16 16:55:46 -07:00
{
auto d = rhs_do_copy_cx.build.InBoundsGEP(dest_ptr_rhs_copy,
[C_int(1)]);
auto rhs = rhs_do_copy_cx.build.InBoundsGEP(rhs_ptr,
[C_int(1)]);
rhs_do_copy_cx.build.Store(d, dest_ptr_ptr);
rhs_do_copy_cx.build.Store(rhs, rhs_ptr_ptr);
}
rhs_do_copy_cx.build.Br(rhs_copy_cx.llbb);
// Finally done!
2011-06-16 16:55:46 -07:00
ret rslt(next_cx, llvecptr);
}
// NB: This does *not* adjust reference counts. The caller must have done
2011-06-28 16:52:29 -07:00
// this via copy_ty() beforehand.
fn duplicate_heap_part(&@block_ctxt cx, ValueRef orig_vptr,
ty::t unit_ty) -> result {
// Cast to an opaque interior vector if we can't trust the pointer
// type.
auto vptr;
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty)) {
vptr = cx.build.PointerCast(orig_vptr, T_ptr(T_opaque_ivec()));
} else {
vptr = orig_vptr;
}
auto llunitty = type_of_or_i8(cx, unit_ty);
auto llheappartty = T_ivec_heap_part(llunitty);
// Check to see if the vector is heapified.
auto stack_len_ptr = cx.build.InBoundsGEP(vptr, [C_int(0),
C_uint(abi::ivec_elt_len)]);
auto stack_len = cx.build.Load(stack_len_ptr);
auto stack_len_is_zero = cx.build.ICmp(lib::llvm::LLVMIntEQ,
stack_len, C_int(0));
auto maybe_on_heap_cx = new_sub_block_ctxt(cx, "maybe_on_heap");
auto next_cx = new_sub_block_ctxt(cx, "next");
cx.build.CondBr(stack_len_is_zero, maybe_on_heap_cx.llbb,
next_cx.llbb);
auto stub_ptr = maybe_on_heap_cx.build.PointerCast(vptr,
T_ptr(T_ivec_heap(llunitty)));
auto heap_ptr_ptr = maybe_on_heap_cx.build.InBoundsGEP(stub_ptr,
[C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)]);
auto heap_ptr = maybe_on_heap_cx.build.Load(heap_ptr_ptr);
auto heap_ptr_is_nonnull = maybe_on_heap_cx.build.ICmp(
lib::llvm::LLVMIntNE, heap_ptr, C_null(T_ptr(llheappartty)));
auto on_heap_cx = new_sub_block_ctxt(cx, "on_heap");
maybe_on_heap_cx.build.CondBr(heap_ptr_is_nonnull, on_heap_cx.llbb,
next_cx.llbb);
// Ok, the vector is on the heap. Copy the heap part.
auto alen_ptr = on_heap_cx.build.InBoundsGEP(stub_ptr,
[C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)]);
auto alen = on_heap_cx.build.Load(alen_ptr);
auto heap_part_sz = on_heap_cx.build.Add(alen,
llsize_of(T_opaque_ivec_heap_part()));
auto rs = trans_raw_malloc(on_heap_cx, T_ptr(llheappartty),
heap_part_sz);
on_heap_cx = rs.bcx;
auto new_heap_ptr = rs.val;
rs = call_memmove(on_heap_cx, new_heap_ptr, heap_ptr, heap_part_sz);
on_heap_cx = rs.bcx;
on_heap_cx.build.Store(new_heap_ptr, heap_ptr_ptr);
on_heap_cx.build.Br(next_cx.llbb);
ret rslt(next_cx, C_nil());
}
}
fn trans_vec_add(&@block_ctxt cx, &ty::t t, ValueRef lhs, ValueRef rhs) ->
result {
auto r = alloc_ty(cx, t);
auto tmp = r.val;
r = copy_val(r.bcx, INIT, tmp, lhs, t);
2011-03-06 15:12:33 -08:00
auto bcx = trans_vec_append(r.bcx, t, tmp, rhs).bcx;
tmp = load_if_immediate(bcx, tmp, t);
find_scope_cx(cx).cleanups += [clean(bind drop_ty(_, tmp, t))];
ret rslt(bcx, tmp);
}
fn trans_eager_binop(&@block_ctxt cx, ast::binop op, &ty::t intype,
ValueRef lhs, ValueRef rhs) -> result {
auto is_float = false;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, intype)) {
case (ty::ty_float) { is_float = true; }
case (_) { is_float = false; }
}
alt (op) {
case (ast::add) {
if (ty::type_is_sequence(cx.fcx.lcx.ccx.tcx, intype)) {
if (ty::sequence_is_interior(cx.fcx.lcx.ccx.tcx, intype)) {
ret ivec::trans_add(cx, intype, lhs, rhs);
}
ret trans_vec_add(cx, intype, lhs, rhs);
}
if (is_float) {
ret rslt(cx, cx.build.FAdd(lhs, rhs));
} else { ret rslt(cx, cx.build.Add(lhs, rhs)); }
}
case (ast::sub) {
if (is_float) {
ret rslt(cx, cx.build.FSub(lhs, rhs));
} else { ret rslt(cx, cx.build.Sub(lhs, rhs)); }
}
case (ast::mul) {
if (is_float) {
ret rslt(cx, cx.build.FMul(lhs, rhs));
} else { ret rslt(cx, cx.build.Mul(lhs, rhs)); }
}
case (ast::div) {
if (is_float) { ret rslt(cx, cx.build.FDiv(lhs, rhs)); }
if (ty::type_is_signed(cx.fcx.lcx.ccx.tcx, intype)) {
ret rslt(cx, cx.build.SDiv(lhs, rhs));
} else { ret rslt(cx, cx.build.UDiv(lhs, rhs)); }
}
case (ast::rem) {
if (is_float) { ret rslt(cx, cx.build.FRem(lhs, rhs)); }
if (ty::type_is_signed(cx.fcx.lcx.ccx.tcx, intype)) {
ret rslt(cx, cx.build.SRem(lhs, rhs));
} else { ret rslt(cx, cx.build.URem(lhs, rhs)); }
}
case (ast::bitor) { ret rslt(cx, cx.build.Or(lhs, rhs)); }
case (ast::bitand) { ret rslt(cx, cx.build.And(lhs, rhs)); }
case (ast::bitxor) { ret rslt(cx, cx.build.Xor(lhs, rhs)); }
case (ast::lsl) { ret rslt(cx, cx.build.Shl(lhs, rhs)); }
case (ast::lsr) { ret rslt(cx, cx.build.LShr(lhs, rhs)); }
case (ast::asr) { ret rslt(cx, cx.build.AShr(lhs, rhs)); }
case (_) { ret trans_compare(cx, op, intype, lhs, rhs); }
}
}
fn autoderef_lval(&@block_ctxt cx, ValueRef v, &ty::t t, bool is_lval)
-> result_t {
let ValueRef v1 = v;
let ty::t t1 = t;
auto ccx = cx.fcx.lcx.ccx;
while (true) {
alt (ty::struct(ccx.tcx, t1)) {
case (ty::ty_box(?mt)) {
// If we are working with an lval, we want to
// unconditionally load at the top of the loop
// to get rid of the extra indirection
if (is_lval) { v1 = cx.build.Load(v1); }
auto body =
cx.build.GEP(v1,
[C_int(0), C_int(abi::box_rc_field_body)]);
t1 = mt.ty;
// Since we're changing levels of box indirection, we may have
// to cast this pointer, since statically-sized tag types have
// different types depending on whether they're behind a box
// or not.
if (!ty::type_has_dynamic_size(ccx.tcx, mt.ty)) {
auto llty = type_of(ccx, cx.sp, mt.ty);
v1 = cx.build.PointerCast(body, T_ptr(llty));
} else { v1 = body; }
}
case (ty::ty_res(?did, ?inner, ?tps)) {
if (is_lval) { v1 = cx.build.Load(v1); }
t1 = ty::substitute_type_params(ccx.tcx, tps, inner);
v1 = cx.build.GEP(v1, [C_int(0), C_int(1)]);
}
case (ty::ty_tag(?did, ?tps)) {
auto variants = ty::tag_variants(ccx.tcx, did);
if (vec::len(variants) != 1u ||
vec::len(variants.(0).args) != 1u) {
break;
}
if (is_lval) { v1 = cx.build.Load(v1); }
t1 = ty::substitute_type_params
(ccx.tcx, tps, variants.(0).args.(0));
if (!ty::type_has_dynamic_size(ccx.tcx, t1)) {
v1 = cx.build.PointerCast
(v1, T_ptr(type_of(ccx, cx.sp, t1)));
}
}
case (_) { break; }
}
// But if we aren't working with an lval, we get rid of
// a layer of indirection at the bottom of the loop so
// that it is gone when we return...
if (!is_lval) { v1 = load_if_immediate(cx, v1, t1); }
}
ret rec(bcx=cx, val=v1, ty=t1);
}
fn autoderef(&@block_ctxt cx, ValueRef v, &ty::t t) -> result_t {
ret autoderef_lval(cx, v, t, false);
}
fn trans_binary(&@block_ctxt cx, ast::binop op, &@ast::expr a, &@ast::expr b)
-> result {
// First couple cases are lazy:
alt (op) {
case (ast::and) {
// Lazy-eval and
auto lhs_expr = trans_expr(cx, a);
auto lhs_res =
autoderef(lhs_expr.bcx, lhs_expr.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, a));
auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
auto rhs_expr = trans_expr(rhs_cx, b);
auto rhs_res =
autoderef(rhs_expr.bcx, rhs_expr.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, b));
auto lhs_false_cx = new_scope_block_ctxt(cx, "lhs false");
auto lhs_false_res = rslt(lhs_false_cx, C_bool(false));
// The following line ensures that any cleanups for rhs
// are done within the block for rhs. This is necessary
// because and/or are lazy. So the rhs may never execute,
// and the cleanups can't be pushed into later code.
auto rhs_bcx = trans_block_cleanups(rhs_res.bcx, rhs_cx);
lhs_res.bcx.build.CondBr(lhs_res.val, rhs_cx.llbb,
lhs_false_cx.llbb);
ret join_results(cx, T_bool(),
[lhs_false_res, rec(bcx=rhs_bcx,
val=rhs_res.val)]);
}
case (ast::or) {
// Lazy-eval or
auto lhs_expr = trans_expr(cx, a);
auto lhs_res = autoderef(lhs_expr.bcx, lhs_expr.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, a));
auto rhs_cx = new_scope_block_ctxt(cx, "rhs");
auto rhs_expr = trans_expr(rhs_cx, b);
auto rhs_res = autoderef(rhs_expr.bcx, rhs_expr.val,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, b));
auto lhs_true_cx = new_scope_block_ctxt(cx, "lhs true");
auto lhs_true_res = rslt(lhs_true_cx, C_bool(true));
// see the and case for an explanation
auto rhs_bcx = trans_block_cleanups(rhs_res.bcx, rhs_cx);
lhs_res.bcx.build.CondBr(lhs_res.val, lhs_true_cx.llbb,
rhs_cx.llbb);
ret join_results(cx, T_bool(),
[lhs_true_res, rec(bcx=rhs_bcx,
val=rhs_res.val)]);
}
case (_) {
// Remaining cases are eager:
auto lhs_expr = trans_expr(cx, a);
auto lhty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, a);
auto lhs = autoderef(lhs_expr.bcx, lhs_expr.val, lhty);
auto rhs_expr = trans_expr(lhs.bcx, b);
auto rhty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, b);
auto rhs = autoderef(rhs_expr.bcx, rhs_expr.val, rhty);
ret trans_eager_binop(rhs.bcx, op, lhs.ty,
lhs.val, rhs.val);
}
}
}
fn join_results(&@block_ctxt parent_cx, TypeRef t, &vec[result] ins) ->
result {
let vec[result] live = [];
let vec[ValueRef] vals = [];
let vec[BasicBlockRef] bbs = [];
for (result r in ins) {
if (!is_terminated(r.bcx)) {
live += [r];
vals += [r.val];
bbs += [r.bcx.llbb];
}
}
alt (vec::len[result](live)) {
case (0u) {
// No incoming edges are live, so we're in dead-code-land.
// Arbitrarily pick the first dead edge, since the caller
// is just going to propagate it outward.
assert (vec::len[result](ins) >= 1u);
ret ins.(0);
}
case (_) {/* fall through */ }
}
// We have >1 incoming edges. Make a join block and br+phi them into it.
auto join_cx = new_sub_block_ctxt(parent_cx, "join");
for (result r in live) { r.bcx.build.Br(join_cx.llbb); }
auto phi = join_cx.build.Phi(t, vals, bbs);
ret rslt(join_cx, phi);
}
fn join_branches(&@block_ctxt parent_cx, &vec[result] ins) -> @block_ctxt {
auto out = new_sub_block_ctxt(parent_cx, "join");
for (result r in ins) {
if (!is_terminated(r.bcx)) { r.bcx.build.Br(out.llbb); }
}
ret out;
}
tag out_method { return; save_in(ValueRef); }
fn trans_if(&@block_ctxt cx, &@ast::expr cond, &ast::block thn,
&option::t[@ast::expr] els, ast::node_id id, &out_method output)
-> result {
auto cond_res = trans_expr(cx, cond);
auto then_cx = new_scope_block_ctxt(cx, "then");
auto then_res = trans_block(then_cx, thn, output);
auto else_cx = new_scope_block_ctxt(cx, "else");
2011-06-23 22:04:42 -07:00
auto else_res = alt (els) {
case (some(?elexpr)) {
alt (elexpr.node) {
case (ast::expr_if(_, _, _)) {
// Synthesize a block here to act as the else block
// containing an if expression. Needed in order for the
// else scope to behave like a normal block scope. A tad
// ugly.
2011-06-23 22:04:42 -07:00
auto elseif_blk = ast::block_from_expr(elexpr);
trans_block(else_cx, elseif_blk, output)
}
case (ast::expr_block(?blk)) {
// Calling trans_block directly instead of trans_expr
// because trans_expr will create another scope block
// context for the block, but we've already got the
// 'else' context
2011-06-23 22:04:42 -07:00
trans_block(else_cx, blk, output)
}
}
}
case (_) { rslt(else_cx, C_nil()) }
2011-06-23 22:04:42 -07:00
};
cond_res.bcx.build.CondBr(cond_res.val, then_cx.llbb, else_cx.llbb);
ret rslt(join_branches(cx, [then_res, else_res]), C_nil());
}
fn trans_for(&@block_ctxt cx, &@ast::local local, &@ast::expr seq,
&ast::block body) -> result {
2011-06-16 15:58:25 -07:00
fn inner(&@block_ctxt cx, @ast::local local, ValueRef curr, ty::t t,
ast::block body, @block_ctxt outer_next_cx) -> result {
2011-01-21 07:59:06 -08:00
auto next_cx = new_sub_block_ctxt(cx, "next");
auto scope_cx =
new_loop_scope_block_ctxt(cx, option::some[@block_ctxt](next_cx),
outer_next_cx, "for loop scope");
2011-01-21 07:59:06 -08:00
cx.build.Br(scope_cx.llbb);
auto local_res = alloc_local(scope_cx, local);
auto bcx = copy_val(local_res.bcx, INIT, local_res.val, curr, t).bcx;
scope_cx.cleanups += [clean(bind drop_slot(_, local_res.val, t))];
bcx = trans_block(bcx, body, return).bcx;
2011-07-04 22:48:08 -07:00
if (!bcx.build.is_terminated()) {
bcx.build.Br(next_cx.llbb);
// otherwise, this code is unreachable
}
ret rslt(next_cx, C_nil());
2011-01-21 07:59:06 -08:00
}
auto next_cx = new_sub_block_ctxt(cx, "next");
auto seq_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, seq);
2011-01-21 07:59:06 -08:00
auto seq_res = trans_expr(cx, seq);
auto it =
iter_sequence(seq_res.bcx, seq_res.val, seq_ty,
2011-06-16 15:58:25 -07:00
bind inner(_, local, _, _, body, next_cx));
it.bcx.build.Br(next_cx.llbb);
ret rslt(next_cx, it.val);
2011-01-21 07:59:06 -08:00
}
// Iterator translation
// Searches through a block for all references to locals or upvars in this
// frame and returns the list of definition IDs thus found.
fn collect_upvars(&@block_ctxt cx, &ast::block bloc,
ast::node_id initial_decl) -> vec[ast::node_id] {
type env =
@rec(mutable vec[ast::node_id] refs,
hashmap[ast::node_id, ()] decls,
resolve::def_map def_map,
session::session sess);
fn walk_expr(env e, &@ast::expr expr) {
2011-04-20 12:35:07 +02:00
alt (expr.node) {
case (ast::expr_path(?path)) {
if (! e.def_map.contains_key(expr.id)) {
e.sess.span_fatal(expr.span,
"internal error in collect_upvars");
}
alt (e.def_map.get(expr.id)) {
case (ast::def_arg(?did)) {
vec::push(e.refs, did._1);
2011-04-20 12:35:07 +02:00
}
case (ast::def_local(?did)) {
vec::push(e.refs, did._1);
2011-04-20 12:35:07 +02:00
}
case (ast::def_binding(?did)) {
vec::push(e.refs, did._1);
}
case (_) { }
2011-04-20 12:35:07 +02:00
}
}
case (_) { }
}
}
fn walk_local(env e, &@ast::local local) {
e.decls.insert(local.node.id, ());
}
fn walk_pat(env e, &@ast::pat p) {
alt (p.node) {
case (ast::pat_bind(_)) {
e.decls.insert(p.id, ());
}
case (_) {}
}
}
let hashmap[ast::node_id, ()] decls = new_int_hash[()]();
decls.insert(initial_decl, ());
let env e =
@rec(mutable refs=[],
decls=decls,
def_map=cx.fcx.lcx.ccx.tcx.def_map,
sess=cx.fcx.lcx.ccx.tcx.sess);
auto visitor =
@rec(visit_local_pre=bind walk_local(e, _),
visit_expr_pre=bind walk_expr(e, _),
visit_pat_pre=bind walk_pat(e, _)
with walk::default_visitor());
walk::walk_block(*visitor, bloc);
// Calculate (refs - decls). This is the set of captured upvars.
let vec[ast::node_id] result = [];
for (ast::node_id ref_id_ in e.refs) {
2011-06-10 16:39:09 +02:00
auto ref_id = ref_id_;
if (!decls.contains_key(ref_id)) { result += [ref_id]; }
}
ret result;
}
// Given a block context and a list of upvars, construct a closure that
// contains pointers to all of the upvars and all of the tydescs in
// scope. Return the ValueRef and TypeRef corresponding to the closure.
fn build_environment(&@block_ctxt cx, &vec[ast::node_id] upvars) ->
tup(ValueRef, TypeRef)
{
auto upvar_count = vec::len(upvars);
auto llbindingsptr;
if (upvar_count > 0u) {
// Gather up the upvars.
let vec[ValueRef] llbindings = [];
let vec[TypeRef] llbindingtys = [];
for (ast::node_id nid in upvars) {
auto llbinding;
alt (cx.fcx.lllocals.find(nid)) {
case (none) {
alt (cx.fcx.llupvars.find(nid)) {
case (none) {
alt (cx.fcx.llargs.find(nid)) {
case (some(?x)) { llbinding = x; }
case (_) {
cx.fcx.lcx.ccx.sess.bug("unbound var \
in build_environment " + int::str(nid));
}
}
}
case (some(?llval)) { llbinding = llval; }
}
}
case (some(?llval)) { llbinding = llval; }
}
llbindings += [llbinding];
llbindingtys += [val_ty(llbinding)];
}
// Create an array of bindings and copy in aliases to the upvars.
llbindingsptr = alloca(cx, T_struct(llbindingtys));
auto i = 0u;
while (i < upvar_count) {
auto llbindingptr =
cx.build.GEP(llbindingsptr, [C_int(0), C_int(i as int)]);
cx.build.Store(llbindings.(i), llbindingptr);
i += 1u;
}
} else {
// Null bindings.
llbindingsptr = C_null(T_ptr(T_i8()));
}
// Create an environment and populate it with the bindings.
auto tydesc_count = vec::len[ValueRef](cx.fcx.lltydescs);
auto llenvptrty =
T_closure_ptr(cx.fcx.lcx.ccx.tn, T_ptr(T_nil()),
val_ty(llbindingsptr), tydesc_count);
auto llenvptr = alloca(cx, llvm::LLVMGetElementType(llenvptrty));
auto llbindingsptrptr =
cx.build.GEP(llenvptr,
[C_int(0), C_int(abi::box_rc_field_body), C_int(2)]);
cx.build.Store(llbindingsptr, llbindingsptrptr);
// Copy in our type descriptors, in case the iterator body needs to refer
// to them.
auto lltydescsptr =
cx.build.GEP(llenvptr,
[C_int(0), C_int(abi::box_rc_field_body),
C_int(abi::closure_elt_ty_params)]);
auto i = 0u;
while (i < tydesc_count) {
auto lltydescptr =
cx.build.GEP(lltydescsptr, [C_int(0), C_int(i as int)]);
cx.build.Store(cx.fcx.lltydescs.(i), lltydescptr);
i += 1u;
}
ret tup(llenvptr, llenvptrty);
}
// Given an enclosing block context, a new function context, a closure type,
// and a list of upvars, generate code to load and populate the environment
// with the upvars and type descriptors.
fn load_environment(&@block_ctxt cx, &@fn_ctxt fcx,
TypeRef llenvptrty, &vec[ast::node_id] upvars)
{
auto upvar_count = vec::len(upvars);
auto copy_args_bcx = new_raw_block_ctxt(fcx, fcx.llcopyargs);
// Populate the upvars from the environment.
auto llremoteenvptr =
copy_args_bcx.build.PointerCast(fcx.llenv, llenvptrty);
auto llremotebindingsptrptr =
copy_args_bcx.build.GEP(llremoteenvptr,
[C_int(0), C_int(abi::box_rc_field_body),
C_int(abi::closure_elt_bindings)]);
auto llremotebindingsptr =
copy_args_bcx.build.Load(llremotebindingsptrptr);
auto i = 0u;
while (i < upvar_count) {
auto upvar_id = upvars.(i);
auto llupvarptrptr =
copy_args_bcx.build.GEP(llremotebindingsptr,
[C_int(0), C_int(i as int)]);
auto llupvarptr = copy_args_bcx.build.Load(llupvarptrptr);
fcx.llupvars.insert(upvar_id, llupvarptr);
i += 1u;
}
// Populate the type parameters from the environment.
auto llremotetydescsptr =
copy_args_bcx.build.GEP(llremoteenvptr,
[C_int(0), C_int(abi::box_rc_field_body),
C_int(abi::closure_elt_ty_params)]);
auto tydesc_count = vec::len(cx.fcx.lltydescs);
i = 0u;
while (i < tydesc_count) {
auto llremotetydescptr =
copy_args_bcx.build.GEP(llremotetydescsptr,
[C_int(0), C_int(i as int)]);
auto llremotetydesc = copy_args_bcx.build.Load(llremotetydescptr);
fcx.lltydescs += [llremotetydesc];
i += 1u;
}
}
fn trans_for_each(&@block_ctxt cx, &@ast::local local, &@ast::expr seq,
&ast::block body) -> result {
/*
* The translation is a little .. complex here. Code like:
*
* let ty1 p = ...;
*
* let ty1 q = ...;
*
* foreach (ty v in foo(a,b)) { body(p,q,v) }
*
*
* Turns into a something like so (C/Rust mishmash):
*
* type env = { *ty1 p, *ty2 q, ... };
*
* let env e = { &p, &q, ... };
*
* fn foreach123_body(env* e, ty v) { body(*(e->p),*(e->q),v) }
*
* foo([foreach123_body, env*], a, b);
*
*/
// Step 1: walk body and figure out which references it makes
// escape. This could be determined upstream, and probably ought
// to be so, eventualy.
auto lcx = cx.fcx.lcx;
// FIXME: possibly support alias-mode here?
auto decl_ty = node_id_type(lcx.ccx, local.node.id);
auto decl_id = local.node.id;
auto upvars = collect_upvars(cx, body, decl_id);
auto environment_data = build_environment(cx, upvars);
auto llenvptr = environment_data._0;
auto llenvptrty = environment_data._1;
// Step 2: Declare foreach body function.
let str s =
mangle_internal_name_by_path_and_seq(lcx.ccx, lcx.path, "foreach");
2011-02-17 12:20:55 -08:00
// The 'env' arg entering the body function is a fake env member (as in
// the env-part of the normal rust calling convention) that actually
// points to a stack allocated env in this frame. We bundle that env
// pointer along with the foreach-body-fn pointer into a 'normal' fn pair
// and pass it in as a first class fn-arg to the iterator.
auto iter_body_llty =
type_of_fn_full(lcx.ccx, cx.sp, ast::proto_fn, none[TypeRef],
~[rec(mode=ty::mo_alias(false), ty=decl_ty)],
ty::mk_nil(lcx.ccx.tcx), 0u);
let ValueRef lliterbody =
decl_internal_fastcall_fn(lcx.ccx.llmod, s, iter_body_llty);
auto fcx = new_fn_ctxt(lcx, cx.sp, lliterbody);
// Generate code to load the environment out of the
// environment pointer.
load_environment(cx, fcx, llenvptrty, upvars);
// Add an upvar for the loop variable alias.
fcx.llupvars.insert(decl_id, llvm::LLVMGetParam(fcx.llfn, 3u));
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto r = trans_block(bcx, body, return);
finish_fn(fcx, lltop);
if (!r.bcx.build.is_terminated()) {
// if terminated is true, no need for the ret-fail
r.bcx.build.RetVoid();
}
2011-02-17 12:20:55 -08:00
// Step 3: Call iter passing [lliterbody, llenv], plus other args.
2011-02-17 12:20:55 -08:00
alt (seq.node) {
case (ast::expr_call(?f, ?args)) {
auto pair = alloca(cx, T_fn_pair(lcx.ccx.tn, iter_body_llty));
auto code_cell =
cx.build.GEP(pair, [C_int(0), C_int(abi::fn_field_code)]);
cx.build.Store(lliterbody, code_cell);
auto env_cell =
cx.build.GEP(pair, [C_int(0), C_int(abi::fn_field_box)]);
auto llenvblobptr =
cx.build.PointerCast(llenvptr,
T_opaque_closure_ptr(lcx.ccx.tn));
cx.build.Store(llenvblobptr, env_cell);
// log "lliterbody: " + val_str(lcx.ccx.tn, lliterbody);
r = trans_call(cx, f, some[ValueRef](cx.build.Load(pair)), args,
seq.id);
ret rslt(r.bcx, C_nil());
2011-02-17 12:20:55 -08:00
}
}
}
fn trans_while(&@block_ctxt cx, &@ast::expr cond, &ast::block body) ->
result {
auto cond_cx = new_scope_block_ctxt(cx, "while cond");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto body_cx =
new_loop_scope_block_ctxt(cx, option::none[@block_ctxt], next_cx,
"while loop body");
auto body_res = trans_block(body_cx, body, return);
auto cond_res = trans_expr(cond_cx, cond);
body_res.bcx.build.Br(cond_cx.llbb);
auto cond_bcx = trans_block_cleanups(cond_res.bcx, cond_cx);
cond_bcx.build.CondBr(cond_res.val, body_cx.llbb, next_cx.llbb);
cx.build.Br(cond_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn trans_do_while(&@block_ctxt cx, &ast::block body, &@ast::expr cond) ->
result {
auto next_cx = new_sub_block_ctxt(cx, "next");
auto body_cx =
new_loop_scope_block_ctxt(cx, option::none[@block_ctxt], next_cx,
"do-while loop body");
auto body_res = trans_block(body_cx, body, return);
auto cond_res = trans_expr(body_res.bcx, cond);
cond_res.bcx.build.CondBr(cond_res.val, body_cx.llbb, next_cx.llbb);
cx.build.Br(body_cx.llbb);
ret rslt(next_cx, body_res.val);
}
2010-12-15 09:38:23 -08:00
// Pattern matching translation
fn trans_pat_match(&@block_ctxt cx, &@ast::pat pat, ValueRef llval,
2011-05-10 17:58:22 -07:00
&@block_ctxt next_cx) -> result {
2010-12-15 09:38:23 -08:00
alt (pat.node) {
case (ast::pat_wild) { ret rslt(cx, llval); }
case (ast::pat_bind(_)) { ret rslt(cx, llval); }
case (ast::pat_lit(?lt)) {
auto lllit = trans_lit(cx.fcx.lcx.ccx, *lt, pat.id);
auto lltype = ty::node_id_to_type(cx.fcx.lcx.ccx.tcx, pat.id);
auto lleq = trans_compare(cx, ast::eq, lltype, llval, lllit);
auto matched_cx = new_sub_block_ctxt(lleq.bcx, "matched_cx");
lleq.bcx.build.CondBr(lleq.val, matched_cx.llbb, next_cx.llbb);
ret rslt(matched_cx, llval);
}
case (ast::pat_tag(?ident, ?subpats)) {
auto vdef;
alt (cx.fcx.lcx.ccx.tcx.def_map.find(pat.id)) {
case (some(?x)) { vdef = ast::variant_def_ids(x); }
case (_) {
cx.fcx.lcx.ccx.sess.span_fatal(pat.span,
"trans_pat_match: unbound var");
}
}
auto variants = ty::tag_variants(cx.fcx.lcx.ccx.tcx, vdef._0);
auto matched_cx = new_sub_block_ctxt(cx, "matched_cx");
auto llblobptr = llval;
if (vec::len(variants) == 1u) {
cx.build.Br(matched_cx.llbb);
} else {
auto lltagptr =
cx.build.PointerCast(llval,
T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn));
auto lldiscrimptr = cx.build.GEP(lltagptr,
[C_int(0), C_int(0)]);
auto lldiscrim = cx.build.Load(lldiscrimptr);
auto variant_tag = 0;
auto i = 0;
for (ty::variant_info v in variants) {
auto this_variant_id = v.id;
if (vdef._1._0 == this_variant_id._0 &&
vdef._1._1 == this_variant_id._1) {
variant_tag = i;
}
i += 1;
}
auto lleq =
cx.build.ICmp(lib::llvm::LLVMIntEQ, lldiscrim,
C_int(variant_tag));
cx.build.CondBr(lleq, matched_cx.llbb, next_cx.llbb);
if (vec::len(subpats) > 0u) {
llblobptr =
matched_cx.build.GEP(lltagptr, [C_int(0), C_int(1)]);
2010-12-15 09:38:23 -08:00
}
}
auto ty_params = ty::node_id_to_type_params
(cx.fcx.lcx.ccx.tcx, pat.id);
if (vec::len(subpats) > 0u) {
2010-12-15 09:38:23 -08:00
auto i = 0;
for (@ast::pat subpat in subpats) {
auto rslt =
GEP_tag(matched_cx, llblobptr, vdef._0, vdef._1,
ty_params, i);
auto llsubvalptr = rslt.val;
matched_cx = rslt.bcx;
auto llsubval =
load_if_immediate(matched_cx, llsubvalptr,
pat_ty(cx.fcx.lcx.ccx.tcx, subpat));
auto subpat_res =
trans_pat_match(matched_cx, subpat, llsubval,
next_cx);
2010-12-15 09:38:23 -08:00
matched_cx = subpat_res.bcx;
i += 1;
2010-12-15 09:38:23 -08:00
}
}
ret rslt(matched_cx, llval);
2010-12-15 09:38:23 -08:00
}
}
}
fn trans_pat_binding(&@block_ctxt cx, &@ast::pat pat, ValueRef llval,
bool bind_alias) -> result {
2010-12-15 09:38:23 -08:00
alt (pat.node) {
case (ast::pat_wild) { ret rslt(cx, llval); }
case (ast::pat_lit(_)) { ret rslt(cx, llval); }
case (ast::pat_bind(?name)) {
if (bind_alias) {
cx.fcx.lllocals.insert(pat.id, llval);
ret rslt(cx, llval);
} else {
auto t = node_id_type(cx.fcx.lcx.ccx, pat.id);
auto rslt = alloc_ty(cx, t);
auto dst = rslt.val;
auto bcx = rslt.bcx;
maybe_name_value(cx.fcx.lcx.ccx, dst, name);
bcx.fcx.lllocals.insert(pat.id, dst);
bcx.cleanups += [clean(bind drop_slot(_, dst, t))];
ret copy_val(bcx, INIT, dst, llval, t);
}
2010-12-15 09:38:23 -08:00
}
case (ast::pat_tag(_, ?subpats)) {
if (vec::len[@ast::pat](subpats) == 0u) { ret rslt(cx, llval); }
// Get the appropriate variant for this tag.
auto vdef;
alt (cx.fcx.lcx.ccx.tcx.def_map.find(pat.id)) {
case (some(?x)) { vdef = ast::variant_def_ids(x); }
case (_) { cx.fcx.lcx.ccx.sess.span_fatal(pat.span,
"trans_pat_binding: internal error, unbound var"); }
}
auto llblobptr = llval;
if (vec::len(ty::tag_variants(cx.fcx.lcx.ccx.tcx, vdef._0))!=1u) {
auto lltagptr = cx.build.PointerCast
(llval, T_opaque_tag_ptr(cx.fcx.lcx.ccx.tn));
llblobptr = cx.build.GEP(lltagptr, [C_int(0), C_int(1)]);
}
auto ty_param_substs =
ty::node_id_to_type_params(cx.fcx.lcx.ccx.tcx, pat.id);
2010-12-15 09:38:23 -08:00
auto this_cx = cx;
auto i = 0;
for (@ast::pat subpat in subpats) {
auto rslt =
GEP_tag(this_cx, llblobptr, vdef._0, vdef._1,
ty_param_substs, i);
this_cx = rslt.bcx;
auto subpat_res =
trans_pat_binding(this_cx, subpat, rslt.val, true);
2010-12-15 09:38:23 -08:00
this_cx = subpat_res.bcx;
i += 1;
2010-12-15 09:38:23 -08:00
}
ret rslt(this_cx, llval);
2010-12-15 09:38:23 -08:00
}
}
}
fn trans_alt(&@block_ctxt cx, &@ast::expr expr, &vec[ast::arm] arms,
ast::node_id id, &out_method output) -> result {
2010-12-15 09:38:23 -08:00
auto expr_res = trans_expr(cx, expr);
auto this_cx = expr_res.bcx;
let vec[result] arm_results = [];
for (ast::arm arm in arms) {
2010-12-15 09:38:23 -08:00
auto next_cx = new_sub_block_ctxt(expr_res.bcx, "next");
auto match_res =
trans_pat_match(this_cx, arm.pat, expr_res.val, next_cx);
2010-12-15 09:38:23 -08:00
auto binding_cx = new_scope_block_ctxt(match_res.bcx, "binding");
match_res.bcx.build.Br(binding_cx.llbb);
auto binding_res =
trans_pat_binding(binding_cx, arm.pat, expr_res.val, false);
auto block_res = trans_block(binding_res.bcx, arm.block, output);
arm_results += [block_res];
2010-12-15 09:38:23 -08:00
this_cx = next_cx;
}
2011-04-02 14:12:19 -04:00
auto default_cx = this_cx;
trans_fail(default_cx, some[span](expr.span),
2011-06-30 10:41:23 -07:00
"non-exhaustive match failure");
ret rslt(join_branches(cx, arm_results), C_nil());
2010-12-15 09:38:23 -08:00
}
type generic_info =
rec(ty::t item_type,
vec[option::t[@tydesc_info]] static_tis,
vec[ValueRef] tydescs);
2011-01-14 14:17:57 -08:00
type lval_result =
rec(result res,
bool is_mem,
option::t[generic_info] generic,
option::t[ValueRef] llobj,
option::t[ty::t] method_ty);
2011-05-10 17:58:22 -07:00
fn lval_mem(&@block_ctxt cx, ValueRef val) -> lval_result {
ret rec(res=rslt(cx, val),
is_mem=true,
2011-01-14 14:17:57 -08:00
generic=none[generic_info],
llobj=none[ValueRef],
method_ty=none[ty::t]);
}
2011-05-10 17:58:22 -07:00
fn lval_val(&@block_ctxt cx, ValueRef val) -> lval_result {
ret rec(res=rslt(cx, val),
is_mem=false,
2011-01-14 14:17:57 -08:00
generic=none[generic_info],
llobj=none[ValueRef],
method_ty=none[ty::t]);
}
fn trans_external_path(&@block_ctxt cx, &ast::def_id did,
&ty::ty_param_count_and_ty tpt) -> lval_result {
auto lcx = cx.fcx.lcx;
auto name = decoder::get_symbol(lcx.ccx.sess, did);
auto v =
get_extern_const(lcx.ccx.externs, lcx.ccx.llmod, name,
type_of_ty_param_count_and_ty(lcx, cx.sp, tpt));
ret lval_val(cx, v);
}
fn lval_generic_fn(&@block_ctxt cx, &ty::ty_param_count_and_ty tpt,
&ast::def_id fn_id, ast::node_id id) -> lval_result {
auto lv;
if (fn_id._0 == ast::local_crate) {
// Internal reference.
assert (cx.fcx.lcx.ccx.fn_pairs.contains_key(fn_id._1));
lv = lval_val(cx, cx.fcx.lcx.ccx.fn_pairs.get(fn_id._1));
} else {
// External reference.
lv = trans_external_path(cx, fn_id, tpt);
}
auto tys = ty::node_id_to_type_params(cx.fcx.lcx.ccx.tcx, id);
if (vec::len[ty::t](tys) != 0u) {
auto bcx = lv.res.bcx;
let vec[ValueRef] tydescs = [];
let vec[option::t[@tydesc_info]] tis = [];
for (ty::t t in tys) {
// TODO: Doesn't always escape.
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto td = get_tydesc(bcx, t, true, ti);
tis += [ti];
bcx = td.bcx;
vec::push[ValueRef](tydescs, td.val);
}
auto gen = rec(item_type=tpt._1, static_tis=tis, tydescs=tydescs);
lv = rec(res=rslt(bcx, lv.res.val), generic=some[generic_info](gen)
with lv);
}
ret lv;
}
fn lookup_discriminant(&@local_ctxt lcx, &ast::def_id tid, &ast::def_id vid)
-> ValueRef {
alt (lcx.ccx.discrims.find(vid._1)) {
case (none) {
// It's an external discriminant that we haven't seen yet.
assert (vid._0 != ast::local_crate);
auto sym = decoder::get_symbol(lcx.ccx.sess, vid);
auto gvar =
llvm::LLVMAddGlobal(lcx.ccx.llmod, T_int(), str::buf(sym));
llvm::LLVMSetLinkage(gvar,
lib::llvm::LLVMExternalLinkage as
llvm::Linkage);
llvm::LLVMSetGlobalConstant(gvar, True);
lcx.ccx.discrims.insert(vid._1, gvar);
ret gvar;
}
case (some(?llval)) { ret llval; }
}
}
fn trans_path(&@block_ctxt cx, &ast::path p, ast::node_id id) -> lval_result {
auto ccx = cx.fcx.lcx.ccx;
alt (cx.fcx.lcx.ccx.tcx.def_map.find(id)) {
case (some(ast::def_arg(?did))) {
alt (cx.fcx.llargs.find(did._1)) {
case (none) {
assert (cx.fcx.llupvars.contains_key(did._1));
ret lval_mem(cx, cx.fcx.llupvars.get(did._1));
2010-10-19 16:33:11 -07:00
}
case (some(?llval)) { ret lval_mem(cx, llval); }
}
}
case (some(ast::def_local(?did))) {
alt (cx.fcx.lllocals.find(did._1)) {
case (none) {
assert (cx.fcx.llupvars.contains_key(did._1));
ret lval_mem(cx, cx.fcx.llupvars.get(did._1));
2010-12-30 17:01:20 -08:00
}
case (some(?llval)) { ret lval_mem(cx, llval); }
}
}
case (some(ast::def_binding(?did))) {
alt (cx.fcx.lllocals.find(did._1)) {
case (none) {
assert (cx.fcx.llupvars.contains_key(did._1));
ret lval_mem(cx, cx.fcx.llupvars.get(did._1));
}
case (some(?llval)) { ret lval_mem(cx, llval); }
}
}
case (some(ast::def_obj_field(?did))) {
assert (cx.fcx.llobjfields.contains_key(did._1));
ret lval_mem(cx, cx.fcx.llobjfields.get(did._1));
}
case (some(ast::def_fn(?did, _))) {
auto tyt = ty::lookup_item_type(ccx.tcx, did);
ret lval_generic_fn(cx, tyt, did, id);
}
case (some(ast::def_variant(?tid, ?vid))) {
auto v_tyt = ty::lookup_item_type(ccx.tcx, vid);
alt (ty::struct(ccx.tcx, v_tyt._1)) {
case (ty::ty_fn(_, _, _, _, _)) {
// N-ary variant.
ret lval_generic_fn(cx, v_tyt, vid, id);
}
case (_) {
// Nullary variant.
auto tag_ty = node_id_type(ccx, id);
auto alloc_result = alloc_ty(cx, tag_ty);
auto lltagblob = alloc_result.val;
auto lltagty = type_of_tag(ccx, p.span, tid, tag_ty);
auto bcx = alloc_result.bcx;
auto lltagptr = bcx.build.PointerCast
(lltagblob, T_ptr(lltagty));
if (vec::len(ty::tag_variants(ccx.tcx, tid)) != 1u) {
auto lldiscrim_gv =
lookup_discriminant(bcx.fcx.lcx, tid, vid);
auto lldiscrim = bcx.build.Load(lldiscrim_gv);
auto lldiscrimptr = bcx.build.GEP
(lltagptr, [C_int(0), C_int(0)]);
bcx.build.Store(lldiscrim, lldiscrimptr);
}
ret lval_val(bcx, lltagptr);
2010-10-19 16:33:11 -07:00
}
}
}
case (some(ast::def_const(?did))) {
// TODO: externals
assert (ccx.consts.contains_key(did._1));
ret lval_mem(cx, ccx.consts.get(did._1));
}
case (some(ast::def_native_fn(?did))) {
auto tyt = ty::lookup_item_type(ccx.tcx, did);
ret lval_generic_fn(cx, tyt, did, id);
}
case (_) {
ccx.sess.span_unimpl(cx.sp, "def variant in trans");
}
}
}
fn trans_field(&@block_ctxt cx, &span sp, ValueRef v, &ty::t t0,
&ast::ident field, ast::node_id id) -> lval_result {
auto r = autoderef(cx, v, t0);
auto t = r.ty;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_tup(_)) {
let uint ix = ty::field_num(cx.fcx.lcx.ccx.sess, sp, field);
auto v = GEP_tup_like(r.bcx, t, r.val, [0, ix as int]);
ret lval_mem(v.bcx, v.val);
}
case (ty::ty_rec(?fields)) {
let uint ix =
ty::field_idx(cx.fcx.lcx.ccx.sess, sp, field, fields);
auto v = GEP_tup_like(r.bcx, t, r.val, [0, ix as int]);
ret lval_mem(v.bcx, v.val);
}
case (ty::ty_obj(?methods)) {
let uint ix =
ty::method_idx(cx.fcx.lcx.ccx.sess, sp, field, methods);
auto vtbl =
r.bcx.build.GEP(r.val,
[C_int(0), C_int(abi::obj_field_vtbl)]);
vtbl = r.bcx.build.Load(vtbl);
2011-06-29 17:29:24 -07:00
// +1 because slot #0 contains the destructor
auto v = r.bcx.build.GEP(vtbl, [C_int(0), C_int(ix + 1u as int)]);
auto lvo = lval_mem(r.bcx, v);
let ty::t fn_ty =
ty::method_ty_to_fn_ty(cx.fcx.lcx.ccx.tcx, methods.(ix));
ret rec(llobj=some[ValueRef](r.val), method_ty=some[ty::t](fn_ty)
with lvo);
}
case (_) {
cx.fcx.lcx.ccx.sess.unimpl("field variant in trans_field");
}
}
}
fn trans_index(&@block_ctxt cx, &span sp, &@ast::expr base, &@ast::expr idx,
ast::node_id id) -> lval_result {
// Is this an interior vector?
auto base_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, base);
auto exp = trans_expr(cx, base);
auto lv = autoderef(exp.bcx, exp.val, base_ty);
auto base_ty_no_boxes = lv.ty;
auto is_interior =
ty::sequence_is_interior(cx.fcx.lcx.ccx.tcx, base_ty_no_boxes);
2010-12-10 17:24:35 -08:00
auto ix = trans_expr(lv.bcx, idx);
auto v = lv.val;
auto bcx = ix.bcx;
// Cast to an LLVM integer. Rust is less strict than LLVM in this regard.
auto ix_val;
auto ix_size = llsize_of_real(cx.fcx.lcx.ccx, val_ty(ix.val));
auto int_size = llsize_of_real(cx.fcx.lcx.ccx, T_int());
if (ix_size < int_size) {
ix_val = bcx.build.ZExt(ix.val, T_int());
} else if (ix_size > int_size) {
ix_val = bcx.build.Trunc(ix.val, T_int());
} else { ix_val = ix.val; }
auto unit_ty = node_id_type(cx.fcx.lcx.ccx, id);
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
maybe_name_value(cx.fcx.lcx.ccx, unit_sz.val, "unit_sz");
auto scaled_ix = bcx.build.Mul(ix_val, unit_sz.val);
maybe_name_value(cx.fcx.lcx.ccx, scaled_ix, "scaled_ix");
auto interior_len_and_data;
if (is_interior) {
auto rslt = ivec::get_len_and_data(bcx, v, unit_ty);
interior_len_and_data = some(tup(rslt._0, rslt._1));
bcx = rslt._2;
} else { interior_len_and_data = none; }
auto lim;
alt (interior_len_and_data) {
case (some(?lad)) { lim = lad._0; }
case (none) {
lim = bcx.build.GEP(v, [C_int(0), C_int(abi::vec_elt_fill)]);
lim = bcx.build.Load(lim);
}
}
auto bounds_check = bcx.build.ICmp(lib::llvm::LLVMIntULT, scaled_ix, lim);
auto fail_cx = new_sub_block_ctxt(bcx, "fail");
auto next_cx = new_sub_block_ctxt(bcx, "next");
bcx.build.CondBr(bounds_check, next_cx.llbb, fail_cx.llbb);
2010-12-10 16:10:35 -08:00
// fail: bad bounds check.
trans_fail(fail_cx, some[span](sp), "bounds check");
auto body;
alt (interior_len_and_data) {
case (some(?lad)) { body = lad._1; }
case (none) {
body =
next_cx.build.GEP(v,
[C_int(0), C_int(abi::vec_elt_data),
C_int(0)]);
}
}
auto elt;
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty)) {
body = next_cx.build.PointerCast(body, T_ptr(T_i8()));
elt = next_cx.build.GEP(body, [scaled_ix]);
} else {
elt = next_cx.build.GEP(body, [ix_val]);
// We're crossing a box boundary here, so we may need to pointer cast.
auto llunitty = type_of(next_cx.fcx.lcx.ccx, sp, unit_ty);
elt = next_cx.build.PointerCast(elt, T_ptr(llunitty));
}
ret lval_mem(next_cx, elt);
2010-12-10 16:10:35 -08:00
}
// The additional bool returned indicates whether it's mem (that is
// represented as an alloca or heap, hence needs a 'load' to be used as an
// immediate).
fn trans_lval(&@block_ctxt cx, &@ast::expr e) -> lval_result {
alt (e.node) {
case (ast::expr_path(?p)) { ret trans_path(cx, p, e.id); }
case (ast::expr_field(?base, ?ident)) {
auto r = trans_expr(cx, base);
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, base);
ret trans_field(r.bcx, e.span, r.val, t, ident, e.id);
}
case (ast::expr_index(?base, ?idx)) {
ret trans_index(cx, e.span, base, idx, e.id);
2010-12-10 16:10:35 -08:00
}
case (ast::expr_unary(?unop, ?base)) {
auto ccx = cx.fcx.lcx.ccx;
assert (unop == ast::deref);
auto sub = trans_expr(cx, base);
auto t = ty::expr_ty(ccx.tcx, base);
auto val = alt (ty::struct(ccx.tcx, t)) {
case (ty::ty_box(_)) {
sub.bcx.build.GEP
(sub.val, [C_int(0), C_int(abi::box_rc_field_body)])
}
case (ty::ty_res(_, _, _)) {
sub.bcx.build.GEP(sub.val, [C_int(0), C_int(1)])
}
case (ty::ty_tag(_, _)) {
auto ety = ty::expr_ty(ccx.tcx, e);
auto ellty;
if (ty::type_has_dynamic_size(ccx.tcx, ety)) {
ellty = T_typaram_ptr(ccx.tn);
} else {
ellty = T_ptr(type_of(ccx, e.span, ety));
};
sub.bcx.build.PointerCast(sub.val, ellty)
}
};
ret lval_mem(sub.bcx, val);
}
case (ast::expr_self_method(?ident)) {
alt ({ cx.fcx.llself }) {
2011-06-01 11:34:52 -07:00
case (some(?pair)) {
auto r = pair.v;
auto t = pair.t;
ret trans_field(cx, e.span, r, t, ident, e.id);
}
case (_) {
// Shouldn't happen.
2011-06-16 16:55:46 -07:00
cx.fcx.lcx.ccx.sess.bug("trans_lval called on " +
"expr_self_method in " +
"a context without llself");
}
}
}
case (_) {
ret rec(res=trans_expr(cx, e),
is_mem=false,
generic=none,
llobj=none,
method_ty=none);
}
2010-10-19 16:33:11 -07:00
}
}
2011-05-10 17:58:22 -07:00
fn int_cast(&@block_ctxt bcx, TypeRef lldsttype, TypeRef llsrctype,
ValueRef llsrc, bool signed) -> ValueRef {
if (llvm::LLVMGetIntTypeWidth(lldsttype) >
llvm::LLVMGetIntTypeWidth(llsrctype)) {
if (signed) {
// Widening signed cast.
ret bcx.build.SExtOrBitCast(llsrc, lldsttype);
}
// Widening unsigned cast.
ret bcx.build.ZExtOrBitCast(llsrc, lldsttype);
}
ret bcx.build.TruncOrBitCast(llsrc, lldsttype);
}
fn trans_cast(&@block_ctxt cx, &@ast::expr e, ast::node_id id) -> result {
auto e_res = trans_expr(cx, e);
auto llsrctype = val_ty(e_res.val);
auto t = node_id_type(cx.fcx.lcx.ccx, id);
auto lldsttype = type_of(cx.fcx.lcx.ccx, e.span, t);
if (!ty::type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
2011-03-18 18:49:59 -07:00
// TODO: native-to-native casts
if (ty::type_is_native(cx.fcx.lcx.ccx.tcx,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, e))) {
e_res =
rslt(e_res.bcx,
e_res.bcx.build.PtrToInt(e_res.val, lldsttype));
} else if (ty::type_is_native(cx.fcx.lcx.ccx.tcx, t)) {
e_res =
rslt(e_res.bcx,
e_res.bcx.build.IntToPtr(e_res.val, lldsttype));
} else {
e_res =
rslt(e_res.bcx,
int_cast(e_res.bcx, lldsttype, llsrctype, e_res.val,
ty::type_is_signed(cx.fcx.lcx.ccx.tcx, t)));
}
}
else {
if (ty::type_is_integral(cx.fcx.lcx.ccx.tcx,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, e))) {
if (ty::type_is_signed(cx.fcx.lcx.ccx.tcx,
ty::expr_ty(cx.fcx.lcx.ccx.tcx, e))) {
e_res = rslt(e_res.bcx,
e_res.bcx.build.SIToFP(e_res.val, lldsttype));
}
else {
e_res = rslt(e_res.bcx,
e_res.bcx.build.UIToFP(e_res.val, lldsttype));
}
}
else { cx.fcx.lcx.ccx.sess.unimpl("fp cast"); }
}
ret e_res;
}
fn trans_bind_thunk(&@local_ctxt cx, &span sp, &ty::t incoming_fty,
&ty::t outgoing_fty, &vec[option::t[@ast::expr]] args,
&ty::t closure_ty, &vec[ty::t] bound_tys,
uint ty_param_count) -> ValueRef {
2011-06-28 18:54:05 -07:00
// Here we're not necessarily constructing a thunk in the sense of
// "function with no arguments". The result of compiling 'bind f(foo,
// bar, baz)' would be a thunk that, when called, applies f to those
// arguments and returns the result. But we're stretching the meaning of
// the word "thunk" here to also mean the result of compiling, say, 'bind
// f(foo, _, baz)', or any other bind expression that binds f and leaves
// some (or all) of the arguments unbound.
// Here, 'incoming_fty' is the type of the entire bind expression, while
// 'outgoing_fty' is the type of the function that is having some of its
// arguments bound. If f is a function that takes three arguments of type
// int and returns int, and we're translating, say, 'bind f(3, _, 5)',
// then outgoing_fty is the type of f, which is (int, int, int) -> int,
// and incoming_fty is the type of 'bind f(3, _, 5)', which is int -> int.
// Once translated, the entire bind expression will be the call f(foo,
// bar, baz) wrapped in a (so-called) thunk that takes 'bar' as its
// argument and that has bindings of 'foo' to 3 and 'baz' to 5 and a
// pointer to 'f' all saved in its environment. So, our job is to
// construct and return that thunk.
// Give the thunk a name, type, and value.
let str s =
mangle_internal_name_by_path_and_seq(cx.ccx, cx.path, "thunk");
let TypeRef llthunk_ty =
get_pair_fn_ty(type_of(cx.ccx, sp, incoming_fty));
let ValueRef llthunk =
decl_internal_fastcall_fn(cx.ccx.llmod, s, llthunk_ty);
2011-06-28 18:54:05 -07:00
// Create a new function context and block context for the thunk, and hold
// onto a pointer to the first block in the function for later use.
auto fcx = new_fn_ctxt(cx, sp, llthunk);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
2011-06-28 19:06:44 -07:00
// The 'llenv' that will arrive in the thunk we're creating is an
// environment that will contain the values of its arguments and a pointer
// to the original function. So, let's create one of those:
// The llenv pointer needs to be the correct size. That size is
// 'closure_ty', which was determined by trans_bind.
auto llclosure_ptr_ty =
type_of(cx.ccx, sp, ty::mk_imm_box(cx.ccx.tcx, closure_ty));
auto llclosure = bcx.build.PointerCast(fcx.llenv, llclosure_ptr_ty);
2011-06-28 18:54:05 -07:00
// "target", in this context, means the function that's having some of its
// arguments bound and that will be called inside the thunk we're
// creating. (In our running example, target is the function f.) Pick
// out the pointer to the target function from the environment.
auto lltarget =
GEP_tup_like(bcx, closure_ty, llclosure,
[0, abi::box_rc_field_body, abi::closure_elt_target]);
bcx = lltarget.bcx;
2011-06-28 18:54:05 -07:00
// And then, pick out the target function's own environment. That's what
// we'll use as the environment the thunk gets.
auto lltargetclosure =
bcx.build.GEP(lltarget.val, [C_int(0), C_int(abi::fn_field_box)]);
lltargetclosure = bcx.build.Load(lltargetclosure);
2011-06-28 18:54:05 -07:00
// Get f's return type, which will also be the return type of the entire
// bind expression.
auto outgoing_ret_ty = ty::ty_fn_ret(cx.ccx.tcx, outgoing_fty);
2011-06-28 18:54:05 -07:00
// Get the types of the arguments to f.
auto outgoing_args = ty::ty_fn_args(cx.ccx.tcx, outgoing_fty);
2011-06-28 18:54:05 -07:00
// The 'llretptr' that will arrive in the thunk we're creating also needs
// to be the correct size. Cast it to the size of f's return type, if
// necessary.
auto llretptr = fcx.llretptr;
if (ty::type_has_dynamic_size(cx.ccx.tcx, outgoing_ret_ty)) {
llretptr = bcx.build.PointerCast(llretptr, T_typaram_ptr(cx.ccx.tn));
}
2011-06-28 18:54:05 -07:00
// Set up the three implicit arguments to the thunk.
let vec[ValueRef] llargs = [llretptr, fcx.lltaskptr, lltargetclosure];
2011-06-28 18:54:05 -07:00
// Copy in the type parameters.
let uint i = 0u;
while (i < ty_param_count) {
auto lltyparam_ptr =
GEP_tup_like(bcx, closure_ty, llclosure,
[0, abi::box_rc_field_body,
abi::closure_elt_ty_params, i as int]);
bcx = lltyparam_ptr.bcx;
auto td = bcx.build.Load(lltyparam_ptr.val);
llargs += [td];
fcx.lltydescs += [td];
i += 1u;
}
2011-06-28 18:54:05 -07:00
let uint a = 3u; // retptr, task ptr, env come first
let int b = 0;
let uint outgoing_arg_index = 0u;
let vec[TypeRef] llout_arg_tys =
type_of_explicit_args(cx.ccx, sp, outgoing_args);
for (option::t[@ast::expr] arg in args) {
auto out_arg = outgoing_args.(outgoing_arg_index);
auto llout_arg_ty = llout_arg_tys.(outgoing_arg_index);
alt (arg) {
// Arg provided at binding time; thunk copies it from
// closure.
case (some(?e)) {
auto e_ty = ty::expr_ty(cx.ccx.tcx, e);
auto bound_arg =
GEP_tup_like(bcx, closure_ty, llclosure,
[0, abi::box_rc_field_body,
abi::closure_elt_bindings, b]);
bcx = bound_arg.bcx;
auto val = bound_arg.val;
if (out_arg.mode == ty::mo_val) {
if (type_is_immediate(cx.ccx, e_ty)) {
val = bcx.build.Load(val);
2011-06-28 16:52:29 -07:00
bcx = copy_ty(bcx, val, e_ty).bcx;
} else {
2011-06-28 16:52:29 -07:00
bcx = copy_ty(bcx, val, e_ty).bcx;
val = bcx.build.Load(val);
}
} else if (ty::type_contains_params(cx.ccx.tcx, out_arg.ty)) {
assert (out_arg.mode != ty::mo_val);
val = bcx.build.PointerCast(val, llout_arg_ty);
}
llargs += [val];
b += 1;
}
case (
// Arg will be provided when the thunk is invoked.
none) {
let ValueRef passed_arg = llvm::LLVMGetParam(llthunk, a);
if (ty::type_contains_params(cx.ccx.tcx, out_arg.ty)) {
assert (out_arg.mode != ty::mo_val);
passed_arg =
bcx.build.PointerCast(passed_arg, llout_arg_ty);
}
llargs += [passed_arg];
a += 1u;
}
}
outgoing_arg_index += 1u;
}
// FIXME: turn this call + ret into a tail call.
auto lltargetfn =
bcx.build.GEP(lltarget.val, [C_int(0), C_int(abi::fn_field_code)]);
2011-06-28 18:54:05 -07:00
// Cast the outgoing function to the appropriate type (see the comments in
// trans_bind below for why this is necessary).
auto lltargetty =
type_of_fn(bcx.fcx.lcx.ccx, sp,
ty::ty_fn_proto(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
outgoing_args, outgoing_ret_ty, ty_param_count);
lltargetfn = bcx.build.PointerCast(lltargetfn, T_ptr(T_ptr(lltargetty)));
lltargetfn = bcx.build.Load(lltargetfn);
2011-06-30 09:00:44 -07:00
bcx.build.FastCall(lltargetfn, llargs);
bcx.build.RetVoid();
finish_fn(fcx, lltop);
ret llthunk;
}
fn trans_bind(&@block_ctxt cx, &@ast::expr f,
&vec[option::t[@ast::expr]] args, ast::node_id id) -> result {
auto f_res = trans_lval(cx, f);
if (f_res.is_mem) {
cx.fcx.lcx.ccx.sess.unimpl("re-binding existing function");
} else {
let vec[@ast::expr] bound = [];
for (option::t[@ast::expr] argopt in args) {
alt (argopt) {
case (none) { }
case (some(?e)) { vec::push[@ast::expr](bound, e); }
}
}
2011-06-28 18:54:05 -07:00
// Figure out which tydescs we need to pass, if any.
let ty::t outgoing_fty;
2011-03-06 13:56:38 -05:00
let vec[ValueRef] lltydescs;
alt (f_res.generic) {
case (none) {
outgoing_fty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, f);
lltydescs = [];
}
case (some(?ginfo)) {
2011-05-12 15:42:12 -07:00
lazily_emit_all_generic_info_tydesc_glues(cx, ginfo);
outgoing_fty = ginfo.item_type;
lltydescs = ginfo.tydescs;
}
}
auto ty_param_count = vec::len[ValueRef](lltydescs);
if (vec::len[@ast::expr](bound) == 0u && ty_param_count == 0u) {
2011-06-28 18:54:05 -07:00
// Trivial 'binding': just return the static pair-ptr.
ret f_res.res;
} else {
auto bcx = f_res.res.bcx;
auto pair_t = node_type(cx.fcx.lcx.ccx, cx.sp, id);
auto pair_v = alloca(bcx, pair_t);
2011-06-28 18:54:05 -07:00
// Translate the bound expressions.
let vec[ty::t] bound_tys = [];
let vec[ValueRef] bound_vals = [];
for (@ast::expr e in bound) {
auto arg = trans_expr(bcx, e);
bcx = arg.bcx;
vec::push[ValueRef](bound_vals, arg.val);
bound_tys += [ty::expr_ty(cx.fcx.lcx.ccx.tcx, e)];
}
2011-06-28 18:54:05 -07:00
// Synthesize a closure type.
2011-06-28 18:54:05 -07:00
// First, synthesize a tuple type containing the types of all the
// bound expressions.
// bindings_ty = [bound_ty1, bound_ty2, ...]
// TODO: Remove this vec->ivec conversion.
auto bound_tys_ivec = ~[];
for (ty::t typ in bound_tys) { bound_tys_ivec += ~[typ]; }
let ty::t bindings_ty =
ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, bound_tys_ivec);
2011-06-28 18:54:05 -07:00
// NB: keep this in sync with T_closure_ptr; we're making
// a ty::t structure that has the same "shape" as the LLVM type
// it constructs.
2011-06-28 18:54:05 -07:00
// Make a vector that contains ty_param_count copies of tydesc_ty.
// (We'll need room for that many tydescs in the closure.)
let ty::t tydesc_ty = ty::mk_type(cx.fcx.lcx.ccx.tcx);
let ty::t[] captured_tys =
std::ivec::init_elt[ty::t](tydesc_ty, ty_param_count);
2011-06-28 18:54:05 -07:00
// Get all the types we've got (some of which we synthesized
// ourselves) into a vector. The whole things ends up looking
// like:
// closure_tys = [tydesc_ty, outgoing_fty, [bound_ty1, bound_ty2,
// ...], [tydesc_ty, tydesc_ty, ...]]
let ty::t[] closure_tys =
~[tydesc_ty, outgoing_fty, bindings_ty,
ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, captured_tys)];
2011-06-28 18:54:05 -07:00
// Finally, synthesize a type for that whole vector.
let ty::t closure_ty =
ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, closure_tys);
2011-06-28 18:54:05 -07:00
// Allocate a box that can hold something closure-sized, including
// space for a refcount.
auto r = trans_malloc_boxed(bcx, closure_ty);
auto box = r.val;
bcx = r.bcx;
2011-06-28 18:54:05 -07:00
// Grab onto the refcount and body parts of the box we allocated.
auto rc =
bcx.build.GEP(box,
[C_int(0), C_int(abi::box_rc_field_refcnt)]);
auto closure =
bcx.build.GEP(box, [C_int(0), C_int(abi::box_rc_field_body)]);
bcx.build.Store(C_int(1), rc);
2011-06-28 18:54:05 -07:00
// Store bindings tydesc.
auto bound_tydesc =
bcx.build.GEP(closure,
[C_int(0), C_int(abi::closure_elt_tydesc)]);
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto bindings_tydesc = get_tydesc(bcx, bindings_ty, true, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = bindings_tydesc.bcx;
bcx.build.Store(bindings_tydesc.val, bound_tydesc);
2011-06-28 18:54:05 -07:00
// Determine the LLVM type for the outgoing function type. This
// may be different from the type returned by trans_malloc_boxed()
// since we have more information than that function does;
// specifically, we know how many type descriptors the outgoing
// function has, which type_of() doesn't, as only we know which
// item the function refers to.
auto llfnty =
type_of_fn(bcx.fcx.lcx.ccx, cx.sp,
ty::ty_fn_proto(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty::ty_fn_args(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty::ty_fn_ret(bcx.fcx.lcx.ccx.tcx, outgoing_fty),
ty_param_count);
auto llclosurety = T_ptr(T_fn_pair(bcx.fcx.lcx.ccx.tn, llfnty));
2011-06-28 18:54:05 -07:00
// Store thunk-target.
auto bound_target =
bcx.build.GEP(closure,
[C_int(0), C_int(abi::closure_elt_target)]);
auto src = bcx.build.Load(f_res.res.val);
bound_target = bcx.build.PointerCast(bound_target, llclosurety);
bcx.build.Store(src, bound_target);
2011-06-28 19:06:44 -07:00
// Copy expr values into boxed bindings.
auto i = 0u;
auto bindings =
bcx.build.GEP(closure,
[C_int(0), C_int(abi::closure_elt_bindings)]);
for (ValueRef v in bound_vals) {
auto bound =
bcx.build.GEP(bindings, [C_int(0), C_int(i as int)]);
bcx = copy_val(bcx, INIT, bound, v, bound_tys.(i)).bcx;
i += 1u;
}
2011-06-28 18:54:05 -07:00
// If necessary, copy tydescs describing type parameters into the
// appropriate slot in the closure.
alt (f_res.generic) {
case (none) {/* nothing to do */ }
case (some(?ginfo)) {
2011-05-12 15:42:12 -07:00
lazily_emit_all_generic_info_tydesc_glues(cx, ginfo);
auto ty_params_slot =
bcx.build.GEP(closure,
[C_int(0),
C_int(abi::closure_elt_ty_params)]);
auto i = 0;
for (ValueRef td in ginfo.tydescs) {
auto ty_param_slot =
bcx.build.GEP(ty_params_slot,
[C_int(0), C_int(i)]);
bcx.build.Store(td, ty_param_slot);
i += 1;
}
outgoing_fty = ginfo.item_type;
}
}
2011-06-28 18:54:05 -07:00
// Make thunk and store thunk-ptr in outer pair's code slot.
auto pair_code =
bcx.build.GEP(pair_v, [C_int(0), C_int(abi::fn_field_code)]);
2011-06-28 18:54:05 -07:00
// The type of the entire bind expression.
let ty::t pair_ty = node_id_type(cx.fcx.lcx.ccx, id);
2011-06-28 18:54:05 -07:00
let ValueRef llthunk =
trans_bind_thunk(cx.fcx.lcx, cx.sp, pair_ty, outgoing_fty,
args, closure_ty, bound_tys, ty_param_count);
bcx.build.Store(llthunk, pair_code);
2011-06-28 18:54:05 -07:00
// Store box ptr in outer pair's box slot.
auto tn = bcx.fcx.lcx.ccx.tn;
auto pair_box =
bcx.build.GEP(pair_v, [C_int(0), C_int(abi::fn_field_box)]);
bcx.build.Store(bcx.build.PointerCast(box,
T_opaque_closure_ptr(tn)),
pair_box);
find_scope_cx(cx).cleanups +=
[clean(bind drop_slot(_, pair_v, pair_ty))];
ret rslt(bcx, pair_v);
}
}
}
fn trans_arg_expr(&@block_ctxt cx, &ty::arg arg, TypeRef lldestty0,
&@ast::expr e) -> result {
auto val;
auto bcx = cx;
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
if (ty::type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
auto re = trans_expr(bcx, e);
val = re.val;
bcx = re.bcx;
} else if (arg.mode != ty::mo_val) {
let lval_result lv;
if (ty::is_lval(e)) {
lv = trans_lval(bcx, e);
} else {
auto r = trans_expr(bcx, e);
if (type_is_immediate(cx.fcx.lcx.ccx, e_ty)) {
lv = lval_val(r.bcx, r.val);
} else { lv = lval_mem(r.bcx, r.val); }
}
bcx = lv.res.bcx;
if (lv.is_mem) {
val = lv.res.val;
} else {
// Non-mem but we're trying to alias; synthesize an
// alloca, spill to it and pass its address.
val = do_spill(lv.res.bcx, lv.res.val);
}
} else { auto re = trans_expr(bcx, e); val = re.val; bcx = re.bcx; }
auto is_bot = ty::type_is_bot(cx.fcx.lcx.ccx.tcx, e_ty);
// Make a copy here if the type is structural and we're passing by value.
if (arg.mode == ty::mo_val && !is_bot) {
if (ty::type_owns_heap_mem(cx.fcx.lcx.ccx.tcx, e_ty)) {
auto rslt = alloc_ty(bcx, e_ty);
bcx = rslt.bcx;
auto dst = rslt.val;
rslt = copy_val(bcx, INIT, dst, val, e_ty);
bcx = rslt.bcx;
val = dst;
} else {
bcx = copy_ty(bcx, val, e_ty).bcx;
}
}
if (is_bot) {
// For values of type _|_, we generate an
// "undef" value, as such a value should never
// be inspected. It's important for the value
// to have type lldestty0 (the callee's expected type).
val = llvm::LLVMGetUndef(lldestty0);
} else if (ty::type_contains_params(cx.fcx.lcx.ccx.tcx, arg.ty)) {
auto lldestty = lldestty0;
2011-06-19 20:31:53 -07:00
if (arg.mode == ty::mo_val
&& ty::type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
lldestty = T_ptr(lldestty);
}
val = bcx.build.PointerCast(val, lldestty);
}
2011-06-19 20:31:53 -07:00
if (arg.mode == ty::mo_val
&& ty::type_is_structural(cx.fcx.lcx.ccx.tcx, e_ty)) {
// Until here we've been treating structures by pointer;
// we are now passing it as an arg, so need to load it.
val = bcx.build.Load(val);
}
ret rslt(bcx, val);
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn trans_args(&@block_ctxt cx, ValueRef llenv, &option::t[ValueRef] llobj,
&option::t[generic_info] gen, &option::t[ValueRef] lliterbody,
&vec[@ast::expr] es, &ty::t fn_ty) ->
tup(@block_ctxt, vec[ValueRef], ValueRef) {
let ty::arg[] args = ty::ty_fn_args(cx.fcx.lcx.ccx.tcx, fn_ty);
let vec[ValueRef] llargs = [];
let vec[ValueRef] lltydescs = [];
let @block_ctxt bcx = cx;
// Arg 0: Output pointer.
2011-07-03 10:39:07 -07:00
// FIXME: test case looks like
// f(1, fail, @42);
if (bcx.build.is_terminated()) {
// This means an earlier arg was divergent.
// So this arg can't be evaluated.
ret tup(bcx, [], C_nil());
}
auto retty = ty::ty_fn_ret(cx.fcx.lcx.ccx.tcx, fn_ty);
auto llretslot_res = alloc_ty(bcx, retty);
bcx = llretslot_res.bcx;
auto llretslot = llretslot_res.val;
alt (gen) {
case (some(?g)) {
2011-05-12 15:42:12 -07:00
lazily_emit_all_generic_info_tydesc_glues(cx, g);
lltydescs = g.tydescs;
args = ty::ty_fn_args(cx.fcx.lcx.ccx.tcx, g.item_type);
retty = ty::ty_fn_ret(cx.fcx.lcx.ccx.tcx, g.item_type);
}
case (_) { }
}
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, retty)) {
llargs +=
[bcx.build.PointerCast(llretslot,
T_typaram_ptr(cx.fcx.lcx.ccx.tn))];
} else if (ty::type_contains_params(cx.fcx.lcx.ccx.tcx, retty)) {
// It's possible that the callee has some generic-ness somewhere in
// its return value -- say a method signature within an obj or a fn
// type deep in a structure -- which the caller has a concrete view
// of. If so, cast the caller's view of the restlot to the callee's
// view, for the sake of making a type-compatible call.
llargs +=
[cx.build.PointerCast(llretslot,
T_ptr(type_of(bcx.fcx.lcx.ccx, bcx.sp,
retty)))];
} else { llargs += [llretslot]; }
// Arg 1: task pointer.
llargs += [bcx.fcx.lltaskptr];
// Arg 2: Env (closure-bindings / self-obj)
alt (llobj) {
case (some(?ob)) {
// Every object is always found in memory,
// and not-yet-loaded (as part of an lval x.y
// doted method-call).
llargs += [bcx.build.Load(ob)];
}
case (_) { llargs += [llenv]; }
}
// Args >3: ty_params ...
llargs += lltydescs;
// ... then possibly an lliterbody argument.
alt (lliterbody) {
case (none) { }
case (some(?lli)) { llargs += [lli]; }
}
// ... then explicit args.
// First we figure out the caller's view of the types of the arguments.
// This will be needed if this is a generic call, because the callee has
// to cast her view of the arguments to the caller's view.
auto arg_tys = type_of_explicit_args(cx.fcx.lcx.ccx, cx.sp, args);
auto i = 0u;
for (@ast::expr e in es) {
if (bcx.build.is_terminated()) {
// This means an earlier arg was divergent.
// So this arg can't be evaluated.
break;
}
auto r = trans_arg_expr(bcx, args.(i), arg_tys.(i), e);
bcx = r.bcx;
llargs += [r.val];
i += 1u;
}
ret tup(bcx, llargs, llretslot);
}
fn trans_call(&@block_ctxt cx, &@ast::expr f, &option::t[ValueRef] lliterbody,
&vec[@ast::expr] args, ast::node_id id) -> result {
// NB: 'f' isn't necessarily a function; it might be an entire self-call
// expression because of the hack that allows us to process self-calls
// with trans_call.
auto f_res = trans_lval(cx, f);
let ty::t fn_ty;
alt (f_res.method_ty) {
case (some(?meth)) {
// self-call
fn_ty = meth;
}
case (_) {
fn_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, f);
}
}
auto bcx = f_res.res.bcx;
auto faddr = f_res.res.val;
auto llenv = C_null(T_opaque_closure_ptr(cx.fcx.lcx.ccx.tn));
alt (f_res.llobj) {
case (some(_)) {
// It's a vtbl entry.
faddr = bcx.build.Load(faddr);
}
case (none) {
// It's a closure. We have to autoderef.
auto res = autoderef_lval(bcx, f_res.res.val, fn_ty, true);
bcx = res.bcx;
fn_ty = res.ty;
auto pair = res.val;
faddr =
bcx.build.GEP(pair, [C_int(0), C_int(abi::fn_field_code)]);
faddr = bcx.build.Load(faddr);
auto llclosure =
bcx.build.GEP(pair, [C_int(0), C_int(abi::fn_field_box)]);
llenv = bcx.build.Load(llclosure);
}
}
auto ret_ty = ty::node_id_to_type(cx.fcx.lcx.ccx.tcx, id);
auto args_res =
trans_args(bcx, llenv, f_res.llobj, f_res.generic,
lliterbody, args, fn_ty);
bcx = args_res._0;
auto llargs = args_res._1;
auto llretslot = args_res._2;
/*
log "calling: " + val_str(cx.fcx.lcx.ccx.tn, faddr);
for (ValueRef arg in llargs) {
log "arg: " + val_str(cx.fcx.lcx.ccx.tn, arg);
}
*/
/* If the block is terminated,
then one or more of the args has
type _|_. Since that means it diverges, the code
for the call itself is unreachable. */
auto retval = C_nil();
if (!bcx.build.is_terminated()) {
bcx.build.FastCall(faddr, llargs);
alt (lliterbody) {
case (none) {
if (!ty::type_is_nil(cx.fcx.lcx.ccx.tcx, ret_ty)) {
retval = load_if_immediate(bcx, llretslot, ret_ty);
// Retval doesn't correspond to anything really tangible
// in the frame, but it's a ref all the same, so we put a
// note here to drop it when we're done in this scope.
find_scope_cx(cx).cleanups +=
[clean(bind drop_ty(_, retval, ret_ty))];
}
}
case (some(_)) {
// If there was an lliterbody, it means we were calling an
// iter, and we are *not* the party using its 'output' value,
// we should ignore llretslot.
}
}
}
ret rslt(bcx, retval);
}
fn trans_tup(&@block_ctxt cx, &vec[ast::elt] elts, ast::node_id id)
-> result {
auto bcx = cx;
auto t = node_id_type(bcx.fcx.lcx.ccx, id);
auto tup_res = alloc_ty(bcx, t);
auto tup_val = tup_res.val;
bcx = tup_res.bcx;
find_scope_cx(cx).cleanups += [clean(bind drop_ty(_, tup_val, t))];
2010-11-24 18:10:52 -08:00
let int i = 0;
for (ast::elt e in elts) {
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e.expr);
auto src_res = trans_expr(bcx, e.expr);
bcx = src_res.bcx;
auto dst_res = GEP_tup_like(bcx, t, tup_val, [0, i]);
bcx = dst_res.bcx;
bcx = copy_val(src_res.bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
2010-11-24 18:10:52 -08:00
i += 1;
}
ret rslt(bcx, tup_val);
2010-11-24 18:10:52 -08:00
}
fn trans_vec(&@block_ctxt cx, &vec[@ast::expr] args, ast::node_id id) ->
result {
auto t = node_id_type(cx.fcx.lcx.ccx, id);
auto unit_ty = t;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_vec(?mt)) { unit_ty = mt.ty; }
case (_) { cx.fcx.lcx.ccx.sess.bug("non-vec type in trans_vec"); }
2010-12-10 17:25:11 -08:00
}
auto bcx = cx;
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
auto data_sz =
bcx.build.Mul(C_int(vec::len[@ast::expr](args) as int), unit_sz.val);
2010-12-10 17:25:11 -08:00
// FIXME: pass tydesc properly.
auto vec_val =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.new_vec,
[bcx.fcx.lltaskptr, data_sz,
C_null(T_ptr(T_tydesc(bcx.fcx.lcx.ccx.tn)))]);
auto llty = type_of(bcx.fcx.lcx.ccx, bcx.sp, t);
vec_val = bcx.build.PointerCast(vec_val, llty);
find_scope_cx(bcx).cleanups += [clean(bind drop_ty(_, vec_val, t))];
auto body = bcx.build.GEP(vec_val, [C_int(0), C_int(abi::vec_elt_data)]);
auto pseudo_tup_ty =
ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx,
std::ivec::init_elt[ty::t](unit_ty, vec::len(args)));
2010-12-10 17:25:11 -08:00
let int i = 0;
for (@ast::expr e in args) {
auto src_res = trans_expr(bcx, e);
bcx = src_res.bcx;
auto dst_res = GEP_tup_like(bcx, pseudo_tup_ty, body, [0, i]);
bcx = dst_res.bcx;
// Cast the destination type to the source type. This is needed to
// make tags work, for a subtle combination of reasons:
//
// (1) "dst_res" above is derived from "body", which is in turn
// derived from "vec_val".
// (2) "vec_val" has the LLVM type "llty".
// (3) "llty" is the result of calling type_of() on a vector type.
// (4) For tags, type_of() returns a different type depending on
// on whether the tag is behind a box or not. Vector types are
// considered boxes.
// (5) "src_res" is derived from "unit_ty", which is not behind a box.
auto dst_val;
if (!ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, unit_ty)) {
auto llunit_ty = type_of(cx.fcx.lcx.ccx, bcx.sp, unit_ty);
dst_val = bcx.build.PointerCast(dst_res.val, T_ptr(llunit_ty));
} else { dst_val = dst_res.val; }
bcx = copy_val(bcx, INIT, dst_val, src_res.val, unit_ty).bcx;
2010-12-10 17:25:11 -08:00
i += 1;
}
auto fill = bcx.build.GEP(vec_val, [C_int(0), C_int(abi::vec_elt_fill)]);
bcx.build.Store(data_sz, fill);
ret rslt(bcx, vec_val);
2010-12-10 17:25:11 -08:00
}
2011-06-16 16:55:46 -07:00
// TODO: Move me to ivec::
fn trans_ivec(@block_ctxt bcx, &vec[@ast::expr] args, ast::node_id id) ->
result {
auto typ = node_id_type(bcx.fcx.lcx.ccx, id);
auto unit_ty;
alt (ty::struct(bcx.fcx.lcx.ccx.tcx, typ)) {
case (ty::ty_ivec(?mt)) { unit_ty = mt.ty; }
case (_) { bcx.fcx.lcx.ccx.sess.bug("non-ivec type in trans_ivec"); }
}
auto llunitty = type_of_or_i8(bcx, unit_ty);
auto ares = ivec::alloc(bcx, unit_ty);
bcx = ares.bcx;
auto llvecptr = ares.llptr;
auto unit_sz = ares.llunitsz;
auto llalen = ares.llalen;
find_scope_cx(bcx).cleanups += [clean(bind drop_ty(_, llvecptr, typ))];
auto lllen = bcx.build.Mul(C_uint(vec::len(args)), unit_sz);
// Allocate the vector pieces and store length and allocated length.
auto llfirsteltptr;
if (vec::len(args) > 0u && vec::len(args) <= abi::ivec_default_length) {
// Interior case.
bcx.build.Store(lllen,
bcx.build.InBoundsGEP(llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_len)]));
bcx.build.Store(llalen,
bcx.build.InBoundsGEP(llvecptr,
[C_int(0),
C_uint(abi::ivec_elt_alen)]));
llfirsteltptr =
bcx.build.InBoundsGEP(llvecptr,
[C_int(0), C_uint(abi::ivec_elt_elems),
C_int(0)]);
} else {
// Heap case.
auto stub_z = [C_int(0), C_uint(abi::ivec_heap_stub_elt_zero)];
auto stub_a = [C_int(0), C_uint(abi::ivec_heap_stub_elt_alen)];
auto stub_p = [C_int(0), C_uint(abi::ivec_heap_stub_elt_ptr)];
auto llstubty = T_ivec_heap(llunitty);
auto llstubptr = bcx.build.PointerCast(llvecptr, T_ptr(llstubty));
2011-06-16 16:55:46 -07:00
bcx.build.Store(C_int(0), bcx.build.InBoundsGEP(llstubptr, stub_z));
auto llheapty = T_ivec_heap_part(llunitty);
if (vec::len(args) == 0u) {
// Null heap pointer indicates a zero-length vector.
bcx.build.Store(llalen, bcx.build.InBoundsGEP(llstubptr, stub_a));
bcx.build.Store(C_null(T_ptr(llheapty)),
bcx.build.InBoundsGEP(llstubptr, stub_p));
llfirsteltptr = C_null(T_ptr(llunitty));
} else {
bcx.build.Store(lllen, bcx.build.InBoundsGEP(llstubptr, stub_a));
auto llheapsz = bcx.build.Add(llsize_of(llheapty), lllen);
auto rslt = trans_raw_malloc(bcx, T_ptr(llheapty), llheapsz);
bcx = rslt.bcx;
auto llheapptr = rslt.val;
bcx.build.Store(llheapptr,
bcx.build.InBoundsGEP(llstubptr, stub_p));
auto heap_l = [C_int(0), C_uint(abi::ivec_heap_elt_len)];
2011-06-16 16:55:46 -07:00
bcx.build.Store(lllen, bcx.build.InBoundsGEP(llheapptr, heap_l));
llfirsteltptr =
bcx.build.InBoundsGEP(llheapptr,
[C_int(0),
C_uint(abi::ivec_heap_elt_elems),
C_int(0)]);
}
}
// Store the individual elements.
auto i = 0u;
for (@ast::expr e in args) {
auto rslt = trans_expr(bcx, e);
bcx = rslt.bcx;
auto llsrc = rslt.val;
auto lleltptr;
if (ty::type_has_dynamic_size(bcx.fcx.lcx.ccx.tcx, unit_ty)) {
lleltptr =
bcx.build.InBoundsGEP(llfirsteltptr,
[bcx.build.Mul(C_uint(i), unit_sz)]);
} else {
lleltptr = bcx.build.InBoundsGEP(llfirsteltptr, [C_uint(i)]);
}
bcx = copy_val(bcx, INIT, lleltptr, llsrc, unit_ty).bcx;
i += 1u;
}
ret rslt(bcx, llvecptr);
}
fn trans_rec(&@block_ctxt cx, &vec[ast::field] fields,
&option::t[@ast::expr] base, ast::node_id id) -> result {
auto bcx = cx;
auto t = node_id_type(bcx.fcx.lcx.ccx, id);
auto rec_res = alloc_ty(bcx, t);
auto rec_val = rec_res.val;
bcx = rec_res.bcx;
find_scope_cx(cx).cleanups += [clean(bind drop_ty(_, rec_val, t))];
let int i = 0;
2011-02-15 18:16:13 -08:00
auto base_val = C_nil();
alt (base) {
case (none) { }
case (some(?bexp)) {
2011-02-15 18:16:13 -08:00
auto base_res = trans_expr(bcx, bexp);
bcx = base_res.bcx;
base_val = base_res.val;
}
}
let ty::field[] ty_fields = ~[];
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_rec(?flds)) { ty_fields = flds; }
2011-02-15 18:16:13 -08:00
}
for (ty::field tf in ty_fields) {
auto e_ty = tf.mt.ty;
auto dst_res = GEP_tup_like(bcx, t, rec_val, [0, i]);
bcx = dst_res.bcx;
2011-02-15 18:16:13 -08:00
auto expr_provided = false;
auto src_res = rslt(bcx, C_nil());
for (ast::field f in fields) {
if (str::eq(f.node.ident, tf.ident)) {
2011-02-15 18:16:13 -08:00
expr_provided = true;
src_res = trans_expr(bcx, f.node.expr);
2011-02-15 18:16:13 -08:00
}
}
if (!expr_provided) {
src_res = GEP_tup_like(bcx, t, base_val, [0, i]);
src_res =
rslt(src_res.bcx, load_if_immediate(bcx, src_res.val, e_ty));
2011-02-15 18:16:13 -08:00
}
bcx = src_res.bcx;
bcx = copy_val(bcx, INIT, dst_res.val, src_res.val, e_ty).bcx;
i += 1;
}
ret rslt(bcx, rec_val);
}
fn trans_expr(&@block_ctxt cx, &@ast::expr e) -> result {
ret trans_expr_out(cx, e, return);
}
fn trans_expr_out(&@block_ctxt cx, &@ast::expr e, out_method output) ->
result {
// FIXME Fill in cx.sp
alt (e.node) {
case (ast::expr_lit(?lit)) {
ret rslt(cx, trans_lit(cx.fcx.lcx.ccx, *lit, e.id));
}
case (ast::expr_unary(?op, ?x)) {
if (op != ast::deref) { ret trans_unary(cx, op, x, e.id); }
}
case (ast::expr_binary(?op, ?x, ?y)) {
ret trans_binary(cx, op, x, y);
}
case (ast::expr_if(?cond, ?thn, ?els)) {
ret with_out_method(bind trans_if(cx, cond, thn, els, e.id, _),
cx, e.id, output);
}
case (ast::expr_if_check(?cond, ?thn, ?els)) {
ret with_out_method(bind trans_if(cx, cond, thn, els, e.id, _),
cx, e.id, output);
}
case (ast::expr_ternary(_, _, _)) {
ret trans_expr_out(cx, ast::ternary_to_if(e), output);
}
case (ast::expr_for(?decl, ?seq, ?body)) {
2011-01-21 07:59:06 -08:00
ret trans_for(cx, decl, seq, body);
}
case (ast::expr_for_each(?decl, ?seq, ?body)) {
ret trans_for_each(cx, decl, seq, body);
}
case (ast::expr_while(?cond, ?body)) {
ret trans_while(cx, cond, body);
}
case (ast::expr_do_while(?body, ?cond)) {
ret trans_do_while(cx, body, cond);
}
case (ast::expr_alt(?expr, ?arms)) {
ret with_out_method(bind trans_alt(cx, expr, arms, e.id, _),
cx, e.id, output);
2010-12-15 09:38:23 -08:00
}
case (ast::expr_fn(?f)) {
auto ccx = cx.fcx.lcx.ccx;
let TypeRef llfnty =
alt (ty::struct(ccx.tcx, node_id_type(ccx, e.id))) {
case (ty::ty_fn(?proto, ?inputs, ?output, _, _)) {
type_of_fn_full(ccx, e.span, proto, none, inputs,
output, 0u)
}
};
auto sub_cx = extend_path(cx.fcx.lcx, ccx.names.next("anon"));
auto s = mangle_internal_name_by_path(ccx, sub_cx.path);
auto llfn = decl_internal_fastcall_fn(ccx.llmod, s, llfnty);
trans_fn(sub_cx, e.span, f, llfn, none, [], e.id);
ret rslt(cx, create_fn_pair(ccx, s, llfnty, llfn, false));
}
case (ast::expr_block(?blk)) {
auto sub_cx = new_scope_block_ctxt(cx, "block-expr body");
auto next_cx = new_sub_block_ctxt(cx, "next");
auto sub =
with_out_method(bind trans_block(sub_cx, blk, _), cx, e.id,
output);
cx.build.Br(sub_cx.llbb);
sub.bcx.build.Br(next_cx.llbb);
ret rslt(next_cx, sub.val);
}
case (ast::expr_move(?dst, ?src)) {
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
// FIXME Fill in lhs_res.res.bcx.sp
2011-05-31 14:36:08 -07:00
auto rhs_res = trans_lval(lhs_res.res.bcx, src);
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, src);
// FIXME: calculate copy init-ness in typestate.
auto move_res =
move_val(rhs_res.res.bcx, DROP_EXISTING, lhs_res.res.val,
rhs_res.res.val, t);
ret rslt(move_res.bcx, C_nil());
}
case (ast::expr_assign(?dst, ?src)) {
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
// FIXME Fill in lhs_res.res.bcx.sp
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, src);
2010-11-24 18:10:52 -08:00
// FIXME: calculate copy init-ness in typestate.
auto copy_res =
copy_val(rhs_res.bcx, DROP_EXISTING, lhs_res.res.val,
rhs_res.val, t);
ret rslt(copy_res.bcx, C_nil());
}
case (ast::expr_swap(?dst, ?src)) {
2011-06-14 18:51:35 -07:00
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
// FIXME Fill in lhs_res.res.bcx.sp
auto rhs_res = trans_lval(lhs_res.res.bcx, src);
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, src);
auto tmp_res = alloc_ty(rhs_res.res.bcx, t);
// Swap through a temporary.
2011-06-16 16:55:46 -07:00
auto move1_res =
memmove_ty(tmp_res.bcx, tmp_res.val, lhs_res.res.val, t);
auto move2_res =
memmove_ty(move1_res.bcx, lhs_res.res.val, rhs_res.res.val,
t);
auto move3_res =
memmove_ty(move2_res.bcx, rhs_res.res.val, tmp_res.val, t);
ret rslt(move3_res.bcx, C_nil());
2011-06-14 18:51:35 -07:00
}
case (ast::expr_assign_op(?op, ?dst, ?src)) {
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, src);
auto lhs_res = trans_lval(cx, dst);
assert (lhs_res.is_mem);
// FIXME Fill in lhs_res.res.bcx.sp
auto rhs_res = trans_expr(lhs_res.res.bcx, src);
if (ty::type_is_sequence(cx.fcx.lcx.ccx.tcx, t)) {
alt (op) {
case (ast::add) {
if (ty::sequence_is_interior(cx.fcx.lcx.ccx.tcx, t)) {
ret ivec::trans_append(rhs_res.bcx, t,
lhs_res.res.val,
rhs_res.val);
}
ret trans_vec_append(rhs_res.bcx, t, lhs_res.res.val,
rhs_res.val);
}
case (_) { }
}
}
auto lhs_val = load_if_immediate(rhs_res.bcx, lhs_res.res.val, t);
auto v =
trans_eager_binop(rhs_res.bcx, op, t, lhs_val, rhs_res.val);
// FIXME: calculate copy init-ness in typestate.
auto copy_res =
copy_val(v.bcx, DROP_EXISTING, lhs_res.res.val, v.val, t);
ret rslt(copy_res.bcx, C_nil());
}
case (ast::expr_bind(?f, ?args)) {
ret trans_bind(cx, f, args, e.id);
}
case (ast::expr_call(?f, ?args)) {
ret trans_call(cx, f, none[ValueRef], args, e.id);
2010-10-21 17:24:26 -07:00
}
case (ast::expr_cast(?val, _)) { ret trans_cast(cx, val, e.id); }
case (ast::expr_vec(?args, _, ast::sk_rc)) {
ret trans_vec(cx, args, e.id);
2010-12-10 17:25:11 -08:00
}
case (ast::expr_vec(?args, _, ast::sk_unique)) {
ret trans_ivec(cx, args, e.id);
}
case (ast::expr_tup(?args)) { ret trans_tup(cx, args, e.id); }
case (ast::expr_rec(?args, ?base)) {
ret trans_rec(cx, args, base, e.id);
}
case (ast::expr_ext(_, _, _, ?expanded)) {
ret trans_expr(cx, expanded);
}
case (ast::expr_fail(?expr)) {
ret trans_fail_expr(cx, some(e.span), expr);
}
case (ast::expr_log(?lvl, ?a)) { ret trans_log(lvl, cx, a); }
case (ast::expr_assert(?a)) {
ret trans_check_expr(cx, a, "Assertion");
}
case (ast::expr_check(ast::checked, ?a)) {
ret trans_check_expr(cx, a, "Predicate");
}
case (ast::expr_check(ast::unchecked, ?a)) {
/* Claims are turned on and off by a global variable
that the RTS sets. This case generates code to
check the value of that variable, doing nothing
if it's set to false and acting like a check
otherwise. */
auto c = get_extern_const(cx.fcx.lcx.ccx.externs,
cx.fcx.lcx.ccx.llmod,
"check_claims", T_bool());
auto cond = cx.build.Load(c);
auto then_cx = new_scope_block_ctxt(cx, "claim_then");
auto check_res = trans_check_expr(then_cx, a, "Claim");
auto else_cx = new_scope_block_ctxt(cx, "else");
auto els = rslt(else_cx, C_nil());
cx.build.CondBr(cond, then_cx.llbb, else_cx.llbb);
ret rslt(join_branches(cx, [check_res, els]), C_nil());
}
case (ast::expr_break) { ret trans_break(e.span, cx); }
case (ast::expr_cont) { ret trans_cont(e.span, cx); }
case (ast::expr_ret(?ex)) { ret trans_ret(cx, ex); }
case (ast::expr_put(?ex)) { ret trans_put(cx, ex); }
case (ast::expr_be(?ex)) { ret trans_be(cx, ex); }
case (ast::expr_port(_)) { ret trans_port(cx, e.id); }
case (ast::expr_chan(?ex)) { ret trans_chan(cx, ex, e.id); }
case (ast::expr_send(?lhs, ?rhs)) {
ret trans_send(cx, lhs, rhs, e.id);
2011-03-16 21:49:15 -04:00
}
case (ast::expr_recv(?lhs, ?rhs)) {
ret trans_recv(cx, lhs, rhs, e.id);
2011-03-16 21:49:15 -04:00
}
case (ast::expr_spawn(?dom, ?name, ?func, ?args)) {
ret trans_spawn(cx, dom, name, func, args, e.id);
}
case (ast::expr_anon_obj(?anon_obj, ?tps, _)) {
ret trans_anon_obj(cx, e.span, anon_obj, tps, e.id);
2011-05-20 17:41:36 -07:00
}
case (_) {
// The expression is an lvalue. Fall through.
assert (ty::is_lval(e)); // make sure it really is and that we
// didn't forget to add a case for a new expr!
}
}
// lval cases fall through to trans_lval and then
// possibly load the result (if it's non-structural).
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto sub = trans_lval(cx, e);
ret rslt(sub.res.bcx, load_if_immediate(sub.res.bcx, sub.res.val, t));
}
fn with_out_method(fn(&out_method) -> result work, @block_ctxt cx,
ast::node_id id, &out_method outer_output) -> result {
auto ccx = cx.fcx.lcx.ccx;
if (outer_output != return) {
ret work(outer_output);
} else {
auto tp = node_id_type(ccx, id);
if (ty::type_is_nil(ccx.tcx, tp)) { ret work(return); }
auto res_alloca = alloc_ty(cx, tp);
cx = zero_alloca(res_alloca.bcx, res_alloca.val, tp).bcx;
fn drop_hoisted_ty(&@block_ctxt cx, ValueRef target, ty::t t) ->
result {
auto reg_val = load_if_immediate(cx, target, t);
ret drop_ty(cx, reg_val, t);
}
auto cleanup = bind drop_hoisted_ty(_, res_alloca.val, tp);
find_scope_cx(cx).cleanups += [clean(cleanup)];
auto done = work(save_in(res_alloca.val));
done = rslt(done.bcx,
load_if_immediate(done.bcx, res_alloca.val, tp));
ret done;
}
}
2010-12-03 13:03:07 -08:00
// We pass structural values around the compiler "by pointer" and
// non-structural values (scalars, boxes, pointers) "by value". We call the
// latter group "immediates" and, in some circumstances when we know we have a
// pointer (or need one), perform load/store operations based on the
// immediate-ness of the type.
fn type_is_immediate(&@crate_ctxt ccx, &ty::t t) -> bool {
ret ty::type_is_scalar(ccx.tcx, t) || ty::type_is_boxed(ccx.tcx, t) ||
ty::type_is_native(ccx.tcx, t);
}
2011-05-10 17:58:22 -07:00
fn do_spill(&@block_ctxt cx, ValueRef v) -> ValueRef {
// We have a value but we have to spill it to pass by alias.
auto llptr = alloca(cx, val_ty(v));
cx.build.Store(v, llptr);
ret llptr;
}
2010-12-03 13:03:07 -08:00
fn spill_if_immediate(&@block_ctxt cx, ValueRef v, &ty::t t) -> ValueRef {
if (type_is_immediate(cx.fcx.lcx.ccx, t)) { ret do_spill(cx, v); }
ret v;
}
fn load_if_immediate(&@block_ctxt cx, ValueRef v, &ty::t t) -> ValueRef {
if (type_is_immediate(cx.fcx.lcx.ccx, t)) { ret cx.build.Load(v); }
ret v;
2010-12-03 13:03:07 -08:00
}
fn trans_log(int lvl, &@block_ctxt cx, &@ast::expr e) -> result {
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
auto lcx = cx.fcx.lcx;
auto modname = str::connect(lcx.module_path, "::");
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
auto global;
if (lcx.ccx.module_data.contains_key(modname)) {
global = lcx.ccx.module_data.get(modname);
} else {
auto s =
link::mangle_internal_name_by_path_and_seq(lcx.ccx,
lcx.module_path,
"loglevel");
global = llvm::LLVMAddGlobal(lcx.ccx.llmod, T_int(), str::buf(s));
llvm::LLVMSetGlobalConstant(global, False);
llvm::LLVMSetInitializer(global, C_null(T_int()));
llvm::LLVMSetLinkage(global,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
lcx.ccx.module_data.insert(modname, global);
}
auto log_cx = new_scope_block_ctxt(cx, "log");
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
auto after_cx = new_sub_block_ctxt(cx, "after");
auto load = cx.build.Load(global);
auto test = cx.build.ICmp(lib::llvm::LLVMIntSGE, load, C_int(lvl));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
cx.build.CondBr(test, log_cx.llbb, after_cx.llbb);
auto sub = trans_expr(log_cx, e);
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto log_bcx = sub.bcx;
if (ty::type_is_fp(cx.fcx.lcx.ccx.tcx, e_ty)) {
let TypeRef tr;
let bool is32bit = false;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, e_ty)) {
case (ty::ty_machine(ast::ty_f32)) {
tr = T_f32();
is32bit = true;
}
case (ty::ty_machine(ast::ty_f64)) { tr = T_f64(); }
case (_) { tr = T_float(); }
}
if (is32bit) {
log_bcx.build.Call(log_bcx.fcx.lcx.ccx.upcalls.log_float,
[log_bcx.fcx.lltaskptr, C_int(lvl), sub.val]);
} else {
// FIXME: Eliminate this level of indirection.
auto tmp = alloca(log_bcx, tr);
sub.bcx.build.Store(sub.val, tmp);
log_bcx.build.Call(log_bcx.fcx.lcx.ccx.upcalls.log_double,
[log_bcx.fcx.lltaskptr, C_int(lvl), tmp]);
}
} else if (ty::type_is_integral(cx.fcx.lcx.ccx.tcx, e_ty) ||
ty::type_is_bool(cx.fcx.lcx.ccx.tcx, e_ty)) {
// FIXME: Handle signedness properly.
auto llintval =
int_cast(log_bcx, T_int(), val_ty(sub.val), sub.val, false);
log_bcx.build.Call(log_bcx.fcx.lcx.ccx.upcalls.log_int,
[log_bcx.fcx.lltaskptr, C_int(lvl), llintval]);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
} else {
alt (ty::struct(cx.fcx.lcx.ccx.tcx, e_ty)) {
case (ty::ty_str) {
log_bcx.build.Call(log_bcx.fcx.lcx.ccx.upcalls.log_str,
[log_bcx.fcx.lltaskptr, C_int(lvl),
sub.val]);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
}
case (_) {
// FIXME: Support these types.
cx.fcx.lcx.ccx.sess.span_fatal(e.span,
"log called on unsupported type "
+
ty_to_str(cx.fcx.lcx.ccx.tcx,
e_ty));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
}
}
}
log_bcx = trans_block_cleanups(log_bcx, log_cx);
log_bcx.build.Br(after_cx.llbb);
ret rslt(after_cx, C_nil());
}
fn trans_check_expr(&@block_ctxt cx, &@ast::expr e, &str s) -> result {
auto cond_res = trans_expr(cx, e);
auto expr_str = s + " " + expr_to_str(e) + " failed";
auto fail_cx = new_sub_block_ctxt(cx, "fail");
trans_fail(fail_cx, some[span](e.span), expr_str);
auto next_cx = new_sub_block_ctxt(cx, "next");
cond_res.bcx.build.CondBr(cond_res.val, next_cx.llbb, fail_cx.llbb);
ret rslt(next_cx, C_nil());
}
fn trans_fail_expr(&@block_ctxt cx, &option::t[span] sp_opt,
&option::t[@ast::expr] fail_expr)
-> result {
auto bcx = cx;
alt (fail_expr) {
case (some(?expr)) {
auto tcx = bcx.fcx.lcx.ccx.tcx;
auto expr_res = trans_expr(bcx, expr);
auto e_ty = ty::expr_ty(tcx, expr);
bcx = expr_res.bcx;
if (ty::type_is_str(tcx, e_ty)) {
auto elt = bcx.build.GEP(expr_res.val,
[C_int(0), C_int(abi::vec_elt_data)]);
ret trans_fail_value(bcx, sp_opt, elt);
} else {
cx.fcx.lcx.ccx.sess.span_bug(expr.span,
"fail called with unsupported \
type " + ty_to_str(tcx, e_ty));
}
}
case (_) {
ret trans_fail(bcx, sp_opt, "explicit failure");
}
}
}
fn trans_fail(&@block_ctxt cx, &option::t[span] sp_opt, &str fail_str)
-> result {
auto V_fail_str = C_cstr(cx.fcx.lcx.ccx, fail_str);
ret trans_fail_value(cx, sp_opt, V_fail_str);
}
fn trans_fail_value(&@block_ctxt cx, &option::t[span] sp_opt,
&ValueRef V_fail_str)
-> result {
auto V_filename;
auto V_line;
alt (sp_opt) {
case (some(?sp)) {
auto loc = cx.fcx.lcx.ccx.sess.lookup_pos(sp.lo);
V_filename = C_cstr(cx.fcx.lcx.ccx, loc.filename);
V_line = loc.line as int;
}
case (none) {
V_filename = C_cstr(cx.fcx.lcx.ccx, "<runtime>");
V_line = 0;
}
}
auto V_str = cx.build.PointerCast(V_fail_str, T_ptr(T_i8()));
V_filename = cx.build.PointerCast(V_filename, T_ptr(T_i8()));
auto args = [cx.fcx.lltaskptr, V_str, V_filename, C_int(V_line)];
cx.build.Call(cx.fcx.lcx.ccx.upcalls._fail, args);
cx.build.Unreachable();
ret rslt(cx, C_nil());
2011-01-28 00:09:26 -05:00
}
fn trans_put(&@block_ctxt cx, &option::t[@ast::expr] e) -> result {
auto llcallee = C_nil();
auto llenv = C_nil();
alt ({ cx.fcx.lliterbody }) {
case (some(?lli)) {
auto slot = alloca(cx, val_ty(lli));
cx.build.Store(lli, slot);
llcallee =
cx.build.GEP(slot, [C_int(0), C_int(abi::fn_field_code)]);
llcallee = cx.build.Load(llcallee);
llenv = cx.build.GEP(slot, [C_int(0), C_int(abi::fn_field_box)]);
llenv = cx.build.Load(llenv);
}
}
auto bcx = cx;
auto dummy_retslot = alloca(bcx, T_nil());
let vec[ValueRef] llargs = [dummy_retslot, cx.fcx.lltaskptr, llenv];
alt (e) {
case (none) { }
case (some(?x)) {
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, x);
auto arg = rec(mode=ty::mo_alias(false), ty=e_ty);
auto arg_tys =
type_of_explicit_args(cx.fcx.lcx.ccx, x.span, ~[arg]);
auto r = trans_arg_expr(bcx, arg, arg_tys.(0), x);
bcx = r.bcx;
llargs += [r.val];
}
}
ret rslt(bcx, bcx.build.FastCall(llcallee, llargs));
}
fn trans_break_cont(&span sp, &@block_ctxt cx, bool to_end) -> result {
auto bcx = cx;
// Locate closest loop block, outputting cleanup as we go.
auto cleanup_cx = cx;
while (true) {
bcx = trans_block_cleanups(bcx, cleanup_cx);
alt ({ cleanup_cx.kind }) {
case (LOOP_SCOPE_BLOCK(?_cont, ?_break)) {
if (to_end) {
bcx.build.Br(_break.llbb);
} else {
alt (_cont) {
case (option::some(?_cont)) {
bcx.build.Br(_cont.llbb);
}
case (_) { bcx.build.Br(cleanup_cx.llbb); }
}
}
ret rslt(new_sub_block_ctxt(bcx, "break_cont.unreachable"),
C_nil());
}
case (_) {
alt ({ cleanup_cx.parent }) {
case (parent_some(?cx)) { cleanup_cx = cx; }
case (parent_none) {
cx.fcx.lcx.ccx.sess.span_fatal(sp,
if (to_end) {
"Break"
} else { "Cont" } +
" outside a loop");
}
}
}
}
}
// If we get here without returning, it's a bug
cx.fcx.lcx.ccx.sess.bug("in trans::trans_break_cont()");
}
fn trans_break(&span sp, &@block_ctxt cx) -> result {
ret trans_break_cont(sp, cx, true);
}
fn trans_cont(&span sp, &@block_ctxt cx) -> result {
ret trans_break_cont(sp, cx, false);
}
fn trans_ret(&@block_ctxt cx, &option::t[@ast::expr] e) -> result {
auto bcx = cx;
auto val = C_nil();
alt (e) {
case (some(?x)) {
auto t = ty::expr_ty(cx.fcx.lcx.ccx.tcx, x);
auto r = trans_expr(cx, x);
bcx = r.bcx;
val = r.val;
bcx = copy_val(bcx, INIT, cx.fcx.llretptr, val, t).bcx;
}
case (_) {
auto t = llvm::LLVMGetElementType(val_ty(cx.fcx.llretptr));
auto null = lib::llvm::llvm::LLVMConstNull(t);
bcx.build.Store(null, cx.fcx.llretptr);
}
}
// run all cleanups and back out.
let bool more_cleanups = true;
auto cleanup_cx = cx;
while (more_cleanups) {
bcx = trans_block_cleanups(bcx, cleanup_cx);
alt ({ cleanup_cx.parent }) {
case (parent_some(?b)) { cleanup_cx = b; }
case (parent_none) { more_cleanups = false; }
}
}
bcx.build.RetVoid();
ret rslt(new_sub_block_ctxt(bcx, "ret.unreachable"), C_nil());
}
fn trans_be(&@block_ctxt cx, &@ast::expr e) -> result {
// FIXME: This should be a typestate precondition
assert (ast::is_call_expr(e));
// FIXME: Turn this into a real tail call once
// calling convention issues are settled
ret trans_ret(cx, some(e));
}
fn trans_port(&@block_ctxt cx, ast::node_id id) -> result {
auto t = node_id_type(cx.fcx.lcx.ccx, id);
2011-03-16 21:49:15 -04:00
auto unit_ty;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, t)) {
case (ty::ty_port(?t)) { unit_ty = t; }
case (_) { cx.fcx.lcx.ccx.sess.bug("non-port type in trans_port"); }
2011-03-16 21:49:15 -04:00
}
auto bcx = cx;
auto unit_sz = size_of(bcx, unit_ty);
bcx = unit_sz.bcx;
auto port_raw_val =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.new_port,
[bcx.fcx.lltaskptr, unit_sz.val]);
auto llty = type_of(cx.fcx.lcx.ccx, cx.sp, t);
auto port_val = bcx.build.PointerCast(port_raw_val, llty);
2011-03-16 21:49:15 -04:00
auto dropref = clean(bind drop_ty(_, port_val, t));
find_scope_cx(bcx).cleanups += [dropref];
ret rslt(bcx, port_val);
2011-03-16 21:49:15 -04:00
}
fn trans_chan(&@block_ctxt cx, &@ast::expr e, ast::node_id id) -> result {
2011-03-16 21:49:15 -04:00
auto bcx = cx;
auto prt = trans_expr(bcx, e);
bcx = prt.bcx;
auto prt_val = bcx.build.PointerCast(prt.val, T_opaque_port_ptr());
auto chan_raw_val =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.new_chan,
[bcx.fcx.lltaskptr, prt_val]);
auto chan_ty = node_id_type(bcx.fcx.lcx.ccx, id);
auto chan_llty = type_of(bcx.fcx.lcx.ccx, e.span, chan_ty);
auto chan_val = bcx.build.PointerCast(chan_raw_val, chan_llty);
2011-03-16 21:49:15 -04:00
auto dropref = clean(bind drop_ty(_, chan_val, chan_ty));
find_scope_cx(bcx).cleanups += [dropref];
ret rslt(bcx, chan_val);
2011-03-16 21:49:15 -04:00
}
fn trans_spawn(&@block_ctxt cx, &ast::spawn_dom dom, &option::t[str] name,
&@ast::expr func, &vec[@ast::expr] args, ast::node_id id) ->
result {
auto bcx = cx;
// Make the task name
auto tname =
alt (name) {
case (none) {
auto argss = vec::map(expr_to_str, args);
#fmt("%s(%s)", expr_to_str(func), str::connect(argss, ", "))
}
case (some(?n)) { n }
};
// Generate code
//
// This is a several step process. The following things need to happen
// (not necessarily in order):
//
// 1. Evaluate all the arguments to the spawnee.
//
// 2. Alloca a tuple that holds these arguments (they must be in reverse
// order, so that they match the expected stack layout for the spawnee)
//
// 3. Fill the tuple with the arguments we evaluated.
//
// 3.5. Generate a wrapper function that takes the tuple and unpacks it to
// call the real task.
//
// 4. Pass a pointer to the wrapper function and the argument tuple to
// upcall_start_task. In order to do this, we need to allocate another
// tuple that matches the arguments expected by rust_task::start.
//
// 5. Oh yeah, we have to create the task before we start it...
// But first, we'll create a task.
let ValueRef lltname = C_str(bcx.fcx.lcx.ccx, tname);
auto new_task =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.new_task,
[bcx.fcx.lltaskptr, lltname]);
// Translate the arguments, remembering their types and where the values
// ended up.
let ty::t[] arg_tys = ~[];
let vec[ValueRef] arg_vals = [];
for (@ast::expr e in args) {
auto e_ty = ty::expr_ty(cx.fcx.lcx.ccx.tcx, e);
auto arg = trans_expr(bcx, e);
arg = deep_copy(arg.bcx, arg.val, e_ty, new_task);
bcx = arg.bcx;
vec::push[ValueRef](arg_vals, arg.val);
arg_tys += ~[e_ty];
}
// Make the tuple.
auto args_ty = ty::mk_imm_tup(cx.fcx.lcx.ccx.tcx, arg_tys);
// Allocate and fill the tuple.
auto llargs = alloc_ty(bcx, args_ty);
auto i = 0u;
for (ValueRef v in arg_vals) {
// log_err #fmt("ty(llargs) = %s",
// val_str(bcx.fcx.lcx.ccx.tn, llargs.val));
auto target = bcx.build.GEP(llargs.val, [C_int(0), C_int(i as int)]);
// log_err #fmt("ty(v) = %s", val_str(bcx.fcx.lcx.ccx.tn, v));
// log_err #fmt("ty(target) = %s",
// val_str(bcx.fcx.lcx.ccx.tn, target));
bcx.build.Store(v, target);
i += 1u;
}
// Generate the wrapper function
auto wrapper = mk_spawn_wrapper(bcx, func, args_ty);
bcx = wrapper.bcx;
auto llfnptr_i = bcx.build.PointerCast(wrapper.val, T_int());
// And start the task
auto llargs_i = bcx.build.PointerCast(llargs.val, T_int());
auto args_size = size_of(bcx, args_ty).val;
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.start_task,
[bcx.fcx.lltaskptr, new_task, llfnptr_i, llargs_i,
args_size]);
auto task_ty = node_id_type(bcx.fcx.lcx.ccx, id);
2011-05-24 12:18:42 -07:00
auto dropref = clean(bind drop_ty(_, new_task, task_ty));
find_scope_cx(bcx).cleanups += [dropref];
ret rslt(bcx, new_task);
}
fn mk_spawn_wrapper(&@block_ctxt cx, &@ast::expr func, &ty::t args_ty) ->
result {
auto llmod = cx.fcx.lcx.ccx.llmod;
let TypeRef wrapper_fn_type =
type_of_fn(cx.fcx.lcx.ccx, cx.sp, ast::proto_fn,
~[rec(mode=ty::mo_alias(false), ty=args_ty)], ty::idx_nil,
0u);
// TODO: construct a name based on tname
2011-06-06 15:48:36 -07:00
let str wrap_name =
mangle_internal_name_by_path_and_seq(cx.fcx.lcx.ccx, cx.fcx.lcx.path,
"spawn_wrapper");
auto llfndecl = decl_cdecl_fn(llmod, wrap_name, wrapper_fn_type);
auto fcx = new_fn_ctxt(cx.fcx.lcx, cx.sp, llfndecl);
auto fbcx = new_top_block_ctxt(fcx);
// 3u to skip the three implicit args
let ValueRef arg = llvm::LLVMGetParam(fcx.llfn, 3u);
let vec[ValueRef] child_args =
[llvm::LLVMGetParam(fcx.llfn, 0u), llvm::LLVMGetParam(fcx.llfn, 1u),
llvm::LLVMGetParam(fcx.llfn, 2u)];
// unpack the arguments
alt (ty::struct(fcx.lcx.ccx.tcx, args_ty)) {
case (ty::ty_tup(?elements)) {
auto i = 0;
for (ty::mt m in elements) {
auto src = fbcx.build.GEP(arg, [C_int(0), C_int(i)]);
i += 1;
auto child_arg = fbcx.build.Load(src);
child_args += [child_arg];
}
}
}
// Find the function
auto fnptr = trans_lval(fbcx, func).res;
fbcx = fnptr.bcx;
auto llfnptr = fbcx.build.GEP(fnptr.val, [C_int(0), C_int(0)]);
auto llfn = fbcx.build.Load(llfnptr);
fbcx.build.FastCall(llfn, child_args);
fbcx.build.RetVoid();
finish_fn(fcx, fbcx.llbb);
// TODO: make sure we clean up everything we need to.
ret rslt(cx, llfndecl);
}
// Does a deep copy of a value. This is needed for passing arguments to child
// tasks, and for sending things through channels. There are probably some
// uniqueness optimizations and things we can do here for tasks in the same
// domain.
fn deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t, ValueRef target_task)
-> result
{
// TODO: make sure all paths add any reference counting that they need to.
// TODO: Teach deep copy to understand everything else it needs to.
auto tcx = bcx.fcx.lcx.ccx.tcx;
if(ty::type_is_scalar(tcx, t)) {
ret rslt(bcx, v);
}
else if(ty::type_is_str(tcx, t)) {
ret rslt(bcx,
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.dup_str,
2011-06-28 14:21:31 -07:00
[bcx.fcx.lltaskptr, target_task, v]));
}
else if(ty::type_is_chan(tcx, t)) {
// If this is a channel, we need to clone it.
auto chan_ptr = bcx.build.PointerCast(v, T_opaque_chan_ptr());
auto chan_raw_val =
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.clone_chan,
[bcx.fcx.lltaskptr, target_task, chan_ptr]);
// Cast back to the type the context was expecting.
auto chan_val = bcx.build.PointerCast(chan_raw_val,
val_ty(v));
ret rslt(bcx, chan_val);
}
else if(ty::type_is_structural(tcx, t)) {
fn inner_deep_copy(&@block_ctxt bcx, ValueRef v, ty::t t) -> result {
log_err "Unimplemented type for deep_copy.";
fail;
}
ret iter_structural_ty(bcx, v, t, inner_deep_copy);
}
else {
bcx.fcx.lcx.ccx.sess.bug("unexpected type in " +
"trans::deep_copy: " +
ty_to_str(tcx, t));
}
}
fn trans_send(&@block_ctxt cx, &@ast::expr lhs, &@ast::expr rhs,
ast::node_id id) -> result {
auto bcx = cx;
auto chn = trans_expr(bcx, lhs);
bcx = chn.bcx;
auto data = trans_expr(bcx, rhs);
bcx = data.bcx;
auto chan_ty = node_id_type(cx.fcx.lcx.ccx, id);
auto unit_ty;
alt (ty::struct(cx.fcx.lcx.ccx.tcx, chan_ty)) {
case (ty::ty_chan(?t)) { unit_ty = t; }
case (_) { bcx.fcx.lcx.ccx.sess.bug("non-chan type in trans_send"); }
}
auto data_alloc = alloc_ty(bcx, unit_ty);
bcx = data_alloc.bcx;
auto data_tmp = copy_val(bcx, INIT, data_alloc.val, data.val, unit_ty);
bcx = data_tmp.bcx;
find_scope_cx(bcx).cleanups +=
[clean(bind drop_ty(_, data_alloc.val, unit_ty))];
auto llchanval = bcx.build.PointerCast(chn.val, T_opaque_chan_ptr());
auto lldataptr = bcx.build.PointerCast(data_alloc.val, T_ptr(T_i8()));
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.send,
[bcx.fcx.lltaskptr, llchanval, lldataptr]);
ret rslt(bcx, chn.val);
2011-03-16 21:49:15 -04:00
}
fn trans_recv(&@block_ctxt cx, &@ast::expr lhs, &@ast::expr rhs,
ast::node_id id) -> result {
auto bcx = cx;
auto data = trans_lval(bcx, rhs);
assert (data.is_mem);
2011-03-18 00:45:18 -04:00
bcx = data.res.bcx;
auto unit_ty = node_id_type(bcx.fcx.lcx.ccx, id);
// FIXME: calculate copy init-ness in typestate.
ret recv_val(bcx, data.res.val, lhs, unit_ty, DROP_EXISTING);
}
fn recv_val(&@block_ctxt cx, ValueRef to, &@ast::expr from, &ty::t unit_ty,
copy_action action) -> result {
auto bcx = cx;
auto prt = trans_expr(bcx, from);
bcx = prt.bcx;
auto lldataptr = bcx.build.PointerCast(to, T_ptr(T_ptr(T_i8())));
auto llportptr = bcx.build.PointerCast(prt.val, T_opaque_port_ptr());
bcx.build.Call(bcx.fcx.lcx.ccx.upcalls.recv,
[bcx.fcx.lltaskptr, lldataptr, llportptr]);
auto data_load = load_if_immediate(bcx, to, unit_ty);
auto cp = copy_val(bcx, action, to, data_load, unit_ty);
bcx = cp.bcx;
// TODO: Any cleanup need to be done here?
ret rslt(bcx, to);
2011-03-16 21:49:15 -04:00
}
2011-05-20 17:41:36 -07:00
/*
Suppose we create an anonymous object my_b from a regular object a:
obj a() {
fn foo() -> int {
ret 2;
}
fn bar() -> int {
ret self.foo();
}
}
auto my_a = a();
auto my_b = obj { fn baz() -> int { ret self.foo() } with my_a };
Here we're extending the my_a object with an additional method baz, creating
an object my_b. Since it's an object, my_b is a pair of a vtable pointer and
a body pointer:
my_b: [vtbl* | body*]
my_b's vtable has entries for foo, bar, and baz, whereas my_a's vtable has
only foo and bar. my_b's 3-entry vtable consists of two forwarding functions
and one real method.
my_b's body just contains the pair a: [ a_vtable | a_body ], wrapped up with
any additional fields that my_b added. None were added, so my_b is just the
wrapped inner object.
*/
2011-06-09 18:15:55 -07:00
// trans_anon_obj: create and return a pointer to an object. This code
// differs from trans_obj in that, rather than creating an object constructor
// function and putting it in the generated code as an object item, we are
// instead "inlining" the construction of the object and returning the object
// itself.
fn trans_anon_obj(@block_ctxt bcx, &span sp, &ast::anon_obj anon_obj,
&vec[ast::ty_param] ty_params, ast::node_id id) -> result {
// Right now, we're assuming that anon objs don't take ty params, even
// though the AST supports it. It's nonsensical to write an expression
// like "obj[T](){ ... with ... }", since T is never instantiated;
// nevertheless, such an expression will parse. Idea for the future:
// support typarams.
2011-06-16 16:55:46 -07:00
assert (vec::len(ty_params) == 0u);
auto ccx = bcx.fcx.lcx.ccx;
// Fields.
// FIXME (part of issue #538): Where do we fill in the field *values* from
// the outer object?
let vec[ast::anon_obj_field] additional_fields = [];
let vec[result] additional_field_vals = [];
let ty::t[] additional_field_tys = ~[];
alt (anon_obj.fields) {
case (none) { }
case (some(?fields)) {
additional_fields = fields;
for (ast::anon_obj_field f in fields) {
additional_field_tys += ~[node_id_type(ccx, f.id)];
additional_field_vals += [trans_expr(bcx, f.expr)];
}
}
}
// Get the type of the eventual entire anonymous object, possibly with
// extensions. NB: This type includes both inner and outer methods.
auto outer_obj_ty = ty::node_id_to_type(ccx.tcx, id);
auto llouter_obj_ty = type_of(ccx, sp, outer_obj_ty);
2011-06-09 18:15:55 -07:00
// Create a vtable for the anonymous object.
// create_vtbl() wants an ast::_obj and all we have is an ast::anon_obj,
// so we need to roll our own.
fn anon_obj_field_to_obj_field(&ast::anon_obj_field f)
-> ast::obj_field {
ret rec(mut=f.mut, ty=f.ty, ident=f.ident, id=f.id);
}
let ast::_obj wrapper_obj = rec(
fields = vec::map(anon_obj_field_to_obj_field,
additional_fields),
methods = anon_obj.methods,
dtor = none[@ast::method]);
let result with_obj_val;
let ty::t with_obj_ty;
auto vtbl;
alt (anon_obj.with_obj) {
case (none) {
// If there's no with_obj -- that is, if we're just adding new
// fields rather than extending an existing object -- then we just
// pass the outer object to create_vtbl(). Our vtable won't need
// to have any forwarding slots.
// We need a dummy with_obj_ty for setting up the object body
// later.
with_obj_ty = ty::mk_type(ccx.tcx);
// This seems a little strange, because it'll come into
// create_vtbl() with no "additional methods". What's happening
// is that, since *all* of the methods are "additional", we can
// get away with acting like none of them are.
vtbl = create_vtbl(bcx.fcx.lcx, llouter_obj_ty, outer_obj_ty,
wrapper_obj, ty_params, none,
additional_field_tys);
}
case (some(?e)) {
// If with_obj (the object being extended) exists, translate it.
// Translating with_obj returns a ValueRef (pointer to a 2-word
// value) wrapped in a result.
with_obj_val = trans_expr(bcx, e);
// TODO: What makes more sense to get the type of an expr --
// calling ty::expr_ty(ccx.tcx, e) on it or calling
// ty::node_id_to_type(ccx.tcx, id) on its id?
with_obj_ty = ty::expr_ty(ccx.tcx, e);
//with_obj_ty = ty::node_id_to_type(ccx.tcx, e.id);
// If there's a with_obj, we pass its type along to create_vtbl().
// Part of what create_vtbl() will do is take the set difference
// of methods defined on the original and methods being added.
// For every method defined on the original that does *not* have
// one with a matching name and type being added, we'll need to
// create a forwarding slot. And, of course, we need to create a
// normal vtable entry for every method being added.
vtbl = create_vtbl(bcx.fcx.lcx, llouter_obj_ty, outer_obj_ty,
wrapper_obj, ty_params,
some(with_obj_ty),
additional_field_tys);
}
}
// Allocate the object that we're going to return. It's a two-word pair
// containing a vtable pointer and a body pointer.
auto pair =
alloca(bcx,
T_struct([val_ty(vtbl),
T_obj_ptr(ccx.tn,
vec::len[ast::ty_param](ty_params))]));
// Take care of cleanups.
auto t = node_id_type(ccx, id);
find_scope_cx(bcx).cleanups += [clean(bind drop_ty(_, pair, t))];
// Grab onto the first and second elements of the pair.
// abi::obj_field_vtbl and abi::obj_field_box simply specify words 0 and 1
// of 'pair'.
auto pair_vtbl =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
auto pair_box =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
bcx.build.Store(vtbl, pair_vtbl);
2011-06-09 18:15:55 -07:00
// Next we have to take care of the other half of the pair we're
// returning: a boxed (reference-counted) tuple containing a tydesc,
// typarams, fields, and a pointer to our with_obj.
2011-06-09 18:15:55 -07:00
let TypeRef llbox_ty = T_opaque_obj_ptr(ccx.tn);
if (vec::len[ast::ty_param](ty_params) == 0u &&
vec::len[ast::anon_obj_field](additional_fields) == 0u &&
anon_obj.with_obj == none) {
// If the object we're translating has no fields or type parameters
// and no with_obj, there's not much to do.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
// Synthesize a tuple type for fields: [field, ...]
let ty::t fields_ty = ty::mk_imm_tup(ccx.tcx, additional_field_tys);
// Tydescs are run-time instantiations of typarams. We're not
// actually supporting typarams for anon objs yet, but let's
// create space for them in case we ever want them.
let ty::t tydesc_ty = ty::mk_type(ccx.tcx);
let ty::t[] tps = ~[];
for (ast::ty_param tp in ty_params) {
tps += ~[tydesc_ty];
2011-06-09 18:15:55 -07:00
}
// Synthesize a tuple type for typarams: [typaram, ...]
let ty::t typarams_ty = ty::mk_imm_tup(ccx.tcx, tps);
// Tuple type for body:
// [tydesc_ty, [typaram, ...], [field, ...], with_obj]
let ty::t body_ty =
ty::mk_imm_tup(ccx.tcx, ~[tydesc_ty, typarams_ty,
fields_ty, with_obj_ty]);
// Hand this type we've synthesized off to trans_malloc_boxed, which
// allocates a box, including space for a refcount.
auto box = trans_malloc_boxed(bcx, body_ty);
bcx = box.bcx;
// mk_imm_box throws a refcount into the type we're synthesizing,
// so that it looks like:
// [rc, [tydesc_ty, [typaram, ...], [field, ...], with_obj]]
let ty::t boxed_body_ty = ty::mk_imm_box(ccx.tcx, body_ty);
// Grab onto the refcount and body parts of the box we allocated.
auto rc =
GEP_tup_like(bcx, boxed_body_ty, box.val,
[0, abi::box_rc_field_refcnt]);
bcx = rc.bcx;
auto body =
GEP_tup_like(bcx, boxed_body_ty, box.val,
[0, abi::box_rc_field_body]);
bcx = body.bcx;
bcx.build.Store(C_int(1), rc.val);
// Put together a tydesc for the body, so that the object can later be
// freed by calling through its tydesc.
2011-06-16 16:55:46 -07:00
// Every object (not just those with type parameters) needs to have a
// tydesc to describe its body, since all objects have unknown type to
// the user of the object. So the tydesc is needed to keep track of
// the types of the object's fields, so that the fields can be freed
// later.
auto body_tydesc =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_tydesc]);
bcx = body_tydesc.bcx;
auto ti = none[@tydesc_info];
auto body_td = get_tydesc(bcx, body_ty, true, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = body_td.bcx;
bcx.build.Store(body_td.val, body_tydesc.val);
// Copy the object's type parameters and fields into the space we
// allocated for the object body. (This is something like saving the
// lexical environment of a function in its closure: the "captured
// typarams" are any type parameters that are passed to the object
// constructor and are then available to the object's methods.
// Likewise for the object's fields.)
// Copy typarams into captured typarams.
auto body_typarams =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_typarams]);
bcx = body_typarams.bcx;
let int i = 0;
for (ast::ty_param tp in ty_params) {
auto typaram = bcx.fcx.lltydescs.(i);
auto capture =
GEP_tup_like(bcx, typarams_ty, body_typarams.val, [0, i]);
bcx = capture.bcx;
bcx = copy_val(bcx, INIT, capture.val, typaram,
tydesc_ty).bcx;
i += 1;
}
// Copy additional fields into the object's body.
auto body_fields =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_fields]);
bcx = body_fields.bcx;
i = 0;
for (ast::anon_obj_field f in additional_fields) {
// FIXME (part of issue #538): make this work eventually, when we
// have additional field exprs in the AST.
2011-06-30 09:00:44 -07:00
load_if_immediate(
bcx,
additional_field_vals.(i).val,
additional_field_tys.(i));
// what was the type of arg_tys.(i)? What's the type of
// additional_field_tys.(i) ?
// arg_tys is a vector of ty::arg, so arg_tys.(i) is a ty::arg,
// which is a record of mode and t. Meanwhile,
// additional_field_tys is a vec of ty::t. So how about I just
// don't index into it?
auto field =
GEP_tup_like(bcx, fields_ty, body_fields.val, [0, i]);
bcx = field.bcx;
bcx = copy_val(bcx, INIT, field.val,
additional_field_vals.(i).val,
additional_field_tys.(i)).bcx;
i += 1;
2011-06-09 18:15:55 -07:00
}
// Copy a pointer to the with_obj into the object's body.
auto body_with_obj =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_with_obj]);
bcx = body_with_obj.bcx;
bcx = copy_val(bcx, INIT, body_with_obj.val,
with_obj_val.val,
with_obj_ty).bcx;
// Store box ptr in outer pair.
auto p = bcx.build.PointerCast(box.val, llbox_ty);
bcx.build.Store(p, pair_box);
2011-06-09 18:15:55 -07:00
}
2011-06-16 16:55:46 -07:00
// Cast the final object to how we want its type to appear.
pair = bcx.build.PointerCast(pair, T_ptr(llouter_obj_ty));
// Return the object we built.
ret rslt(bcx, pair);
2011-05-20 17:41:36 -07:00
}
2011-06-16 15:58:25 -07:00
fn init_local(&@block_ctxt cx, &@ast::local local) -> result {
// Make a note to drop this slot on the way out.
2011-06-16 15:58:25 -07:00
assert (cx.fcx.lllocals.contains_key(local.node.id));
auto llptr = cx.fcx.lllocals.get(local.node.id);
auto ty = node_id_type(cx.fcx.lcx.ccx, local.node.id);
auto bcx = cx;
find_scope_cx(cx).cleanups += [clean(bind drop_slot(_, llptr, ty))];
2011-06-16 15:58:25 -07:00
alt (local.node.init) {
case (some(?init)) {
alt (init.op) {
case (ast::init_assign) {
// Use the type of the RHS because if it's _|_, the LHS
// type might be something else, but we don't want to copy
// the value.
ty =
node_id_type(cx.fcx.lcx.ccx, init.expr.id);
auto sub = trans_expr(bcx, init.expr);
bcx = copy_val(sub.bcx, INIT, llptr, sub.val, ty).bcx;
}
2011-05-31 15:41:08 -07:00
case (ast::init_move) {
auto sub = trans_lval(bcx, init.expr);
bcx =
move_val(sub.res.bcx, INIT, llptr, sub.res.val,
ty).bcx;
2011-05-31 15:41:08 -07:00
}
case (ast::init_recv) {
bcx = recv_val(bcx, llptr, init.expr, ty, INIT).bcx;
}
}
}
case (_) { bcx = zero_alloca(bcx, llptr, ty).bcx; }
}
ret rslt(bcx, llptr);
}
fn zero_alloca(&@block_ctxt cx, ValueRef llptr, ty::t t) -> result {
auto bcx = cx;
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
auto llsz = size_of(bcx, t);
auto llalign = align_of(llsz.bcx, t);
bcx = call_bzero(llalign.bcx, llptr, llsz.val, llalign.val).bcx;
} else {
auto llty = type_of(bcx.fcx.lcx.ccx, cx.sp, t);
auto null = lib::llvm::llvm::LLVMConstNull(llty);
bcx.build.Store(null, llptr);
}
ret rslt(bcx, llptr);
}
fn trans_stmt(&@block_ctxt cx, &ast::stmt s) -> result {
// FIXME Fill in cx.sp
auto bcx = cx;
alt (s.node) {
case (ast::stmt_expr(?e, _)) { bcx = trans_expr(cx, e).bcx; }
case (ast::stmt_decl(?d, _)) {
alt (d.node) {
case (ast::decl_local(?local)) {
bcx = init_local(bcx, local).bcx;
}
case (ast::decl_item(?i)) { trans_item(cx.fcx.lcx, *i); }
}
}
case (_) { cx.fcx.lcx.ccx.sess.unimpl("stmt variant"); }
}
ret rslt(bcx, C_nil());
}
fn new_builder(BasicBlockRef llbb) -> builder {
let BuilderRef llbuild = llvm::LLVMCreateBuilder();
llvm::LLVMPositionBuilderAtEnd(llbuild, llbb);
ret builder(llbuild, @mutable false);
}
// You probably don't want to use this one. See the
// next three functions instead.
fn new_block_ctxt(&@fn_ctxt cx, &block_parent parent, block_kind kind,
2011-05-11 04:58:46 +00:00
&str name) -> @block_ctxt {
let vec[cleanup] cleanups = [];
auto s = str::buf("");
if (cx.lcx.ccx.sess.get_opts().save_temps ||
cx.lcx.ccx.sess.get_opts().debuginfo) {
s = str::buf(cx.lcx.ccx.names.next(name));
}
let BasicBlockRef llbb = llvm::LLVMAppendBasicBlock(cx.llfn, s);
ret @rec(llbb=llbb,
build=new_builder(llbb),
parent=parent,
kind=kind,
mutable cleanups=cleanups,
sp=cx.sp,
fcx=cx);
}
// Use this when you're at the top block of a function or the like.
2011-05-11 04:58:46 +00:00
fn new_top_block_ctxt(&@fn_ctxt fcx) -> @block_ctxt {
ret new_block_ctxt(fcx, parent_none, SCOPE_BLOCK, "function top level");
}
// Use this when you're at a curly-brace or similar lexical scope.
2011-05-11 04:58:46 +00:00
fn new_scope_block_ctxt(&@block_ctxt bcx, &str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx), SCOPE_BLOCK, n);
}
fn new_loop_scope_block_ctxt(&@block_ctxt bcx, &option::t[@block_ctxt] _cont,
2011-05-11 04:58:46 +00:00
&@block_ctxt _break, &str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx),
LOOP_SCOPE_BLOCK(_cont, _break), n);
}
// Use this when you're making a general CFG BB within a scope.
2011-05-11 04:58:46 +00:00
fn new_sub_block_ctxt(&@block_ctxt bcx, &str n) -> @block_ctxt {
ret new_block_ctxt(bcx.fcx, parent_some(bcx), NON_SCOPE_BLOCK, n);
}
fn new_raw_block_ctxt(&@fn_ctxt fcx, BasicBlockRef llbb) -> @block_ctxt {
let vec[cleanup] cleanups = [];
ret @rec(llbb=llbb,
build=new_builder(llbb),
parent=parent_none,
kind=NON_SCOPE_BLOCK,
mutable cleanups=cleanups,
sp=fcx.sp,
fcx=fcx);
}
// trans_block_cleanups: Go through all the cleanups attached to this
// block_ctxt and execute them.
//
// When translating a block that introdces new variables during its scope, we
// need to make sure those variables go out of scope when the block ends. We
// do that by running a 'cleanup' function for each variable.
// trans_block_cleanups runs all the cleanup functions for the block.
fn trans_block_cleanups(&@block_ctxt cx, &@block_ctxt cleanup_cx) ->
@block_ctxt {
auto bcx = cx;
if (cleanup_cx.kind == NON_SCOPE_BLOCK) {
assert (vec::len[cleanup](cleanup_cx.cleanups) == 0u);
}
auto i = vec::len[cleanup](cleanup_cx.cleanups);
while (i > 0u) {
i -= 1u;
auto c = cleanup_cx.cleanups.(i);
alt (c) { case (clean(?cfn)) { bcx = cfn(bcx).bcx; } }
}
ret bcx;
}
2011-06-16 15:58:25 -07:00
iter block_locals(&ast::block b) -> @ast::local {
// FIXME: putting from inside an iter block doesn't work, so we can't
// use the index here.
for (@ast::stmt s in b.node.stmts) {
alt (s.node) {
case (ast::stmt_decl(?d, _)) {
alt (d.node) {
case (ast::decl_local(?local)) { put local; }
case (_) {/* fall through */ }
}
}
case (_) {/* fall through */ }
}
}
}
fn llstaticallocas_block_ctxt(&@fn_ctxt fcx) -> @block_ctxt {
let vec[cleanup] cleanups = [];
ret @rec(llbb=fcx.llstaticallocas,
build=new_builder(fcx.llstaticallocas),
parent=parent_none,
kind=SCOPE_BLOCK,
mutable cleanups=cleanups,
sp=fcx.sp,
fcx=fcx);
}
fn llderivedtydescs_block_ctxt(&@fn_ctxt fcx) -> @block_ctxt {
let vec[cleanup] cleanups = [];
ret @rec(llbb=fcx.llderivedtydescs,
build=new_builder(fcx.llderivedtydescs),
parent=parent_none,
kind=SCOPE_BLOCK,
mutable cleanups=cleanups,
sp=fcx.sp,
fcx=fcx);
}
fn lldynamicallocas_block_ctxt(&@fn_ctxt fcx) -> @block_ctxt {
let vec[cleanup] cleanups = [];
ret @rec(llbb=fcx.lldynamicallocas,
build=new_builder(fcx.lldynamicallocas),
parent=parent_none,
kind=SCOPE_BLOCK,
mutable cleanups=cleanups,
sp=fcx.sp,
fcx=fcx);
}
fn alloc_ty(&@block_ctxt cx, &ty::t t) -> result {
auto val = C_int(0);
if (ty::type_has_dynamic_size(cx.fcx.lcx.ccx.tcx, t)) {
// NB: we have to run this particular 'size_of' in a
// block_ctxt built on the llderivedtydescs block for the fn,
// so that the size dominates the array_alloca that
// comes next.
auto n = size_of(llderivedtydescs_block_ctxt(cx.fcx), t);
cx.fcx.llderivedtydescs = n.bcx.llbb;
val = array_alloca(cx, T_i8(), n.val);
} else { val = alloca(cx, type_of(cx.fcx.lcx.ccx, cx.sp, t)); }
// NB: since we've pushed all size calculations in this
// function up to the alloca block, we actually return the
// block passed into us unmodified; it doesn't really
// have to be passed-and-returned here, but it fits
// past caller conventions and may well make sense again,
// so we leave it as-is.
ret rslt(cx, val);
}
2011-06-16 15:58:25 -07:00
fn alloc_local(&@block_ctxt cx, &@ast::local local) -> result {
auto t = node_id_type(cx.fcx.lcx.ccx, local.node.id);
auto r = alloc_ty(cx, t);
2011-06-16 15:58:25 -07:00
r.bcx.fcx.lllocals.insert(local.node.id, r.val);
ret r;
}
fn trans_block(&@block_ctxt cx, &ast::block b, &out_method output) -> result {
auto bcx = cx;
2011-06-16 15:58:25 -07:00
for each (@ast::local local in block_locals(b)) {
// FIXME Update bcx.sp
bcx = alloc_local(bcx, local).bcx;
}
auto r = rslt(bcx, C_nil());
for (@ast::stmt s in b.node.stmts) {
r = trans_stmt(bcx, *s);
bcx = r.bcx;
// If we hit a terminator, control won't go any further so
// we're in dead-code land. Stop here.
if (is_terminated(bcx)) { ret r; }
}
fn accept_out_method(&@ast::expr expr) -> bool {
ret alt (expr.node) {
case (ast::expr_if(_, _, _)) { true }
case (ast::expr_alt(_, _)) { true }
case (ast::expr_block(_)) { true }
case (_) { false }
};
}
alt (b.node.expr) {
case (some(?e)) {
auto pass = output != return && accept_out_method(e);
if (pass) {
r = trans_expr_out(bcx, e, output);
} else { r = trans_expr(bcx, e); }
bcx = r.bcx;
auto ccx = cx.fcx.lcx.ccx;
auto r_ty = ty::expr_ty(ccx.tcx, e);
if (is_terminated(bcx) || ty::type_is_bot(ccx.tcx, r_ty)) {
ret r;
} else if (!pass) {
alt (output) {
case (save_in(?target)) {
// The output method is to save the value at target,
// and we didn't pass it to the recursive trans_expr
// call.
// FIXME Use move semantics!
auto res_copy =
copy_val(bcx, INIT, target, r.val, r_ty);
bcx = res_copy.bcx;
r = rslt(bcx, C_nil());
}
case (return) { }
}
}
}
case (none) { r = rslt(bcx, C_nil()); }
}
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
ret rslt(bcx, r.val);
}
2011-05-11 04:58:46 +00:00
fn new_local_ctxt(&@crate_ctxt ccx) -> @local_ctxt {
let vec[str] pth = [];
let vec[ast::ty_param] obj_typarams = [];
let vec[ast::obj_field] obj_fields = [];
ret @rec(path=pth,
module_path=[ccx.link_meta.name],
obj_typarams=obj_typarams,
obj_fields=obj_fields,
ccx=ccx);
}
// Creates the standard quartet of basic blocks: static allocas, copy args,
// derived tydescs, and dynamic allocas.
fn mk_standard_basic_blocks(ValueRef llfn) ->
tup(BasicBlockRef, BasicBlockRef, BasicBlockRef, BasicBlockRef) {
ret tup(llvm::LLVMAppendBasicBlock(llfn, str::buf("static_allocas")),
llvm::LLVMAppendBasicBlock(llfn, str::buf("copy_args")),
llvm::LLVMAppendBasicBlock(llfn, str::buf("derived_tydescs")),
llvm::LLVMAppendBasicBlock(llfn, str::buf("dynamic_allocas")));
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
fn new_fn_ctxt(@local_ctxt cx, &span sp, ValueRef llfndecl) -> @fn_ctxt {
let ValueRef llretptr = llvm::LLVMGetParam(llfndecl, 0u);
let ValueRef lltaskptr = llvm::LLVMGetParam(llfndecl, 1u);
let ValueRef llenv = llvm::LLVMGetParam(llfndecl, 2u);
let hashmap[ast::node_id, ValueRef] llargs = new_int_hash[ValueRef]();
let hashmap[ast::node_id, ValueRef] llobjfields =
new_int_hash[ValueRef]();
let hashmap[ast::node_id, ValueRef] lllocals = new_int_hash[ValueRef]();
let hashmap[ast::node_id, ValueRef] llupvars = new_int_hash[ValueRef]();
auto derived_tydescs =
map::mk_hashmap[ty::t, derived_tydesc_info](ty::hash_ty, ty::eq_ty);
auto llbbs = mk_standard_basic_blocks(llfndecl);
ret @rec(llfn=llfndecl,
lltaskptr=lltaskptr,
llenv=llenv,
llretptr=llretptr,
mutable llstaticallocas=llbbs._0,
mutable llcopyargs=llbbs._1,
mutable llderivedtydescs_first=llbbs._2,
mutable llderivedtydescs=llbbs._2,
mutable lldynamicallocas=llbbs._3,
2011-06-01 11:34:52 -07:00
mutable llself=none[val_self_pair],
2011-02-17 12:20:55 -08:00
mutable lliterbody=none[ValueRef],
llargs=llargs,
2010-12-30 17:01:20 -08:00
llobjfields=llobjfields,
lllocals=lllocals,
llupvars=llupvars,
mutable lltydescs=vec::empty[ValueRef](),
derived_tydescs=derived_tydescs,
sp=sp,
lcx=cx);
}
// NB: must keep 4 fns in sync:
//
// - type_of_fn_full
// - create_llargs_for_fn_args.
// - new_fn_ctxt
// - trans_args
2011-06-01 11:34:52 -07:00
// create_llargs_for_fn_args: Creates a mapping from incoming arguments to
// allocas created for them.
//
// When we translate a function, we need to map its incoming arguments to the
// spaces that have been created for them (by code in the llallocas field of
// the function's fn_ctxt). create_llargs_for_fn_args populates the llargs
// field of the fn_ctxt with
fn create_llargs_for_fn_args(&@fn_ctxt cx, ast::proto proto,
option::t[ty_self_pair] ty_self, ty::t ret_ty,
&vec[ast::arg] args,
&vec[ast::ty_param] ty_params) {
2011-06-01 11:34:52 -07:00
// Skip the implicit arguments 0, 1, and 2. TODO: Pull out 3u and define
// it as a constant, since we're using it in several places in trans this
// way.
auto arg_n = 3u;
alt (ty_self) {
case (some(?tt)) {
cx.llself = some[val_self_pair](rec(v=cx.llenv, t=tt._1));
}
case (none) {
auto i = 0u;
for (ast::ty_param tp in ty_params) {
auto llarg = llvm::LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
cx.lltydescs += [llarg];
arg_n += 1u;
i += 1u;
}
}
}
2011-06-01 11:34:52 -07:00
// If the function is actually an iter, populate the lliterbody field of
// the function context with the ValueRef that we get from
// llvm::LLVMGetParam for the iter's body.
if (proto == ast::proto_iter) {
auto llarg = llvm::LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
2011-02-17 12:20:55 -08:00
cx.lliterbody = some[ValueRef](llarg);
arg_n += 1u;
}
2011-06-01 11:34:52 -07:00
// Populate the llargs field of the function context with the ValueRefs
// that we get from llvm::LLVMGetParam for each argument.
for (ast::arg arg in args) {
auto llarg = llvm::LLVMGetParam(cx.llfn, arg_n);
assert (llarg as int != 0);
cx.llargs.insert(arg.id, llarg);
arg_n += 1u;
}
}
// Recommended LLVM style, strange though this is, is to copy from args to
// allocas immediately upon entry; this permits us to GEP into structures we
// were passed and whatnot. Apparently mem2reg will mop up.
fn copy_any_self_to_alloca(@fn_ctxt fcx, option::t[ty_self_pair] ty_self) {
auto bcx = llstaticallocas_block_ctxt(fcx);
alt ({ fcx.llself }) {
2011-06-01 11:34:52 -07:00
case (some(?pair)) {
2010-12-30 17:01:20 -08:00
alt (ty_self) {
2011-06-01 11:34:52 -07:00
case (some[ty_self_pair](?tt)) {
auto a = alloca(bcx, tt._0);
2011-06-01 11:34:52 -07:00
bcx.build.Store(pair.v, a);
fcx.llself = some[val_self_pair](rec(v=a, t=pair.t));
2010-12-30 17:01:20 -08:00
}
}
}
case (_) { }
2010-12-30 17:01:20 -08:00
}
}
fn copy_args_to_allocas(@fn_ctxt fcx, vec[ast::arg] args,
&ty::arg[] arg_tys) {
auto bcx = new_raw_block_ctxt(fcx, fcx.llcopyargs);
let uint arg_n = 0u;
for (ast::arg aarg in args) {
if (aarg.mode == ast::val) {
auto arg_t = type_of_arg(bcx.fcx.lcx, fcx.sp, arg_tys.(arg_n));
auto a = alloca(bcx, arg_t);
auto argval;
alt (bcx.fcx.llargs.find(aarg.id)) {
case (some(?x)) { argval = x; }
case (_) { bcx.fcx.lcx.ccx.sess.span_fatal(aarg.ty.span,
"unbound arg ID in copy_args_to_allocas"); }
}
bcx.build.Store(argval, a);
2010-12-07 12:34:10 -08:00
// Overwrite the llargs entry for this arg with its alloca.
bcx.fcx.llargs.insert(aarg.id, a);
2010-12-07 12:34:10 -08:00
}
arg_n += 1u;
}
}
fn add_cleanups_for_args(&@block_ctxt bcx, vec[ast::arg] args,
&ty::arg[] arg_tys) {
let uint arg_n = 0u;
for (ast::arg aarg in args) {
if (aarg.mode == ast::val) {
auto argval;
alt (bcx.fcx.llargs.find(aarg.id)) {
case (some(?x)) { argval = x; }
case (_) { bcx.fcx.lcx.ccx.sess.span_fatal(aarg.ty.span,
"unbound arg ID in copy_args_to_allocas"); }
}
find_scope_cx(bcx).cleanups +=
[clean(bind drop_slot(_, argval, arg_tys.(arg_n).ty))];
}
arg_n += 1u;
}
}
2011-05-10 17:58:22 -07:00
fn is_terminated(&@block_ctxt cx) -> bool {
auto inst = llvm::LLVMGetLastInstruction(cx.llbb);
ret llvm::LLVMIsATerminatorInst(inst) as int != 0;
}
fn arg_tys_of_fn(&@crate_ctxt ccx,ast::node_id id) -> ty::arg[] {
alt (ty::struct(ccx.tcx, ty::node_id_to_type(ccx.tcx, id))) {
case (ty::ty_fn(_, ?arg_tys, _, _, _)) { ret arg_tys; }
}
}
2011-06-01 11:34:52 -07:00
fn populate_fn_ctxt_from_llself(@fn_ctxt fcx, val_self_pair llself) {
auto bcx = llstaticallocas_block_ctxt(fcx);
let ty::t[] field_tys = ~[];
for (ast::obj_field f in bcx.fcx.lcx.obj_fields) {
field_tys += ~[node_id_type(bcx.fcx.lcx.ccx, f.id)];
2010-12-30 17:01:20 -08:00
}
// Synthesize a tuple type for the fields so that GEP_tup_like() can work
// its magic.
auto fields_tup_ty = ty::mk_imm_tup(fcx.lcx.ccx.tcx, field_tys);
auto n_typarams = vec::len[ast::ty_param](bcx.fcx.lcx.obj_typarams);
let TypeRef llobj_box_ty = T_obj_ptr(bcx.fcx.lcx.ccx.tn, n_typarams);
2010-12-30 17:01:20 -08:00
auto box_cell =
bcx.build.GEP(llself.v, [C_int(0), C_int(abi::obj_field_box)]);
auto box_ptr = bcx.build.Load(box_cell);
box_ptr = bcx.build.PointerCast(box_ptr, llobj_box_ty);
auto obj_typarams =
bcx.build.GEP(box_ptr,
[C_int(0), C_int(abi::box_rc_field_body),
C_int(abi::obj_body_elt_typarams)]);
// The object fields immediately follow the type parameters, so we skip
// over them to get the pointer.
auto et = llvm::LLVMGetElementType(val_ty(obj_typarams));
auto obj_fields = bcx.build.Add(vp2i(bcx, obj_typarams), llsize_of(et));
// If we can (i.e. the type is statically sized), then cast the resulting
// fields pointer to the appropriate LLVM type. If not, just leave it as
// i8 *.
if (!ty::type_has_dynamic_size(fcx.lcx.ccx.tcx, fields_tup_ty)) {
auto llfields_ty = type_of(fcx.lcx.ccx, fcx.sp, fields_tup_ty);
obj_fields = vi2p(bcx, obj_fields, T_ptr(llfields_ty));
} else { obj_fields = vi2p(bcx, obj_fields, T_ptr(T_i8())); }
2010-12-30 17:01:20 -08:00
let int i = 0;
for (ast::ty_param p in fcx.lcx.obj_typarams) {
let ValueRef lltyparam =
bcx.build.GEP(obj_typarams, [C_int(0), C_int(i)]);
lltyparam = bcx.build.Load(lltyparam);
fcx.lltydescs += [lltyparam];
i += 1;
}
i = 0;
for (ast::obj_field f in fcx.lcx.obj_fields) {
auto rslt = GEP_tup_like(bcx, fields_tup_ty, obj_fields, [0, i]);
bcx = llstaticallocas_block_ctxt(fcx);
auto llfield = rslt.val;
fcx.llobjfields.insert(f.id, llfield);
2010-12-30 17:01:20 -08:00
i += 1;
}
fcx.llstaticallocas = bcx.llbb;
2010-12-30 17:01:20 -08:00
}
// Ties up the llstaticallocas -> llcopyargs -> llderivedtydescs ->
// lldynamicallocas -> lltop edges.
fn finish_fn(&@fn_ctxt fcx, BasicBlockRef lltop) {
new_builder(fcx.llstaticallocas).Br(fcx.llcopyargs);
new_builder(fcx.llcopyargs).Br(fcx.llderivedtydescs_first);
new_builder(fcx.llderivedtydescs).Br(fcx.lldynamicallocas);
new_builder(fcx.lldynamicallocas).Br(lltop);
}
// trans_fn: creates an LLVM function corresponding to a source language
// function.
fn trans_fn(@local_ctxt cx, &span sp, &ast::_fn f, ValueRef llfndecl,
option::t[ty_self_pair] ty_self, &vec[ast::ty_param] ty_params,
ast::node_id id) {
set_uwtable(llfndecl);
2011-06-28 18:54:05 -07:00
// Set up arguments to the function.
auto fcx = new_fn_ctxt(cx, sp, llfndecl);
create_llargs_for_fn_args(fcx, f.proto, ty_self,
ty::ret_ty_of_fn(cx.ccx.tcx, id),
f.decl.inputs, ty_params);
copy_any_self_to_alloca(fcx, ty_self);
alt ({ fcx.llself }) {
case (some(?llself)) { populate_fn_ctxt_from_llself(fcx, llself); }
case (_) { }
2010-12-30 17:01:20 -08:00
}
auto arg_tys = arg_tys_of_fn(fcx.lcx.ccx, id);
copy_args_to_allocas(fcx, f.decl.inputs, arg_tys);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Create the first basic block in the function and keep a handle on it to
// pass to finish_fn later.
auto bcx = new_top_block_ctxt(fcx);
add_cleanups_for_args(bcx, f.decl.inputs, arg_tys);
auto lltop = bcx.llbb;
auto block_ty = node_id_type(cx.ccx, f.body.node.id);
2011-06-28 18:54:05 -07:00
2011-06-01 11:34:52 -07:00
// This call to trans_block is the place where we bridge between
// translation calls that don't have a return value (trans_crate,
// trans_mod, trans_item, trans_obj, et cetera) and those that do
// (trans_block, trans_expr, et cetera).
auto rslt =
if (!ty::type_is_nil(cx.ccx.tcx, block_ty) &&
!ty::type_is_bot(cx.ccx.tcx, block_ty)) {
trans_block(bcx, f.body, save_in(fcx.llretptr))
} else { trans_block(bcx, f.body, return) };
if (!is_terminated(rslt.bcx)) {
// FIXME: until LLVM has a unit type, we are moving around
// C_nil values rather than their void type.
rslt.bcx.build.RetVoid();
}
2011-06-29 17:29:24 -07:00
// Insert the mandatory first few basic blocks before lltop.
finish_fn(fcx, lltop);
}
// process_fwding_mthd: Create the forwarding function that appears in a
// vtable slot for method calls that "fall through" to an inner object. A
// helper function for create_vtbl.
fn process_fwding_mthd(@local_ctxt cx, @ty::method m, TypeRef llself_ty,
ty::t self_ty, &vec[ast::ty_param] ty_params,
ty::t with_obj_ty,
ty::t[] additional_field_tys) -> ValueRef {
// NB: self_ty (and llself_ty) is the type of the outer object;
// with_obj_ty (and llwith_obj_ty) is the type of the inner object.
// The method m is being called on the outer object, but the outer object
// doesn't have that method; only the inner object does. So what we have
// to do is synthesize that method on the outer object. It has to take
// all the same arguments as the method on the inner object does, then
// call m with those arguments on the inner object, and then return the
// value returned from that call. It's like an eta-expansion around m,
// except we also have to pass the inner object that m should be called
// on. That object won't exist until run-time, but we know its type
// statically.
// Create a fake span for functions that expect it. Shouldn't matter what
// it is, since this isn't user-written code. (Possibly better: have
// create_vtable take a span argument and pass it in here?)
let span fake_span = rec(lo=0u,hi=0u);
// Create a local context that's aware of the name of the method we're
// creating.
let @local_ctxt mcx =
@rec(path=cx.path + ["method", m.ident] with *cx);
// Make up a name for the forwarding function.
let str s = mangle_internal_name_by_path_and_seq(mcx.ccx, mcx.path,
"forwarding_fn");
// Get the forwarding function's type and declare it.
let TypeRef llforwarding_fn_ty =
type_of_fn_full(
cx.ccx, fake_span, m.proto,
some[TypeRef](llself_ty), m.inputs, m.output,
vec::len[ast::ty_param](ty_params));
let ValueRef llforwarding_fn =
decl_internal_fastcall_fn(cx.ccx.llmod, s, llforwarding_fn_ty);
// Create a new function context and block context for the forwarding
// function, holding onto a pointer to the first block.
auto fcx = new_fn_ctxt(cx, fake_span, llforwarding_fn);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// The outer object will arrive in the forwarding function via the llenv
// argument. Put it in an alloca so that we can GEP into it later.
auto llself_obj_ptr = alloca(bcx, llself_ty);
bcx.build.Store(fcx.llenv, llself_obj_ptr);
// The 'llretptr' that will arrive in the forwarding function we're
// creating also needs to be the correct size. Cast it to the size of the
// method's return type, if necessary.
auto llretptr = fcx.llretptr;
if (ty::type_has_dynamic_size(cx.ccx.tcx, m.output)) {
llretptr = bcx.build.PointerCast(llretptr,
T_typaram_ptr(cx.ccx.tn));
}
// Now, we have to get the the with_obj's vtbl out of the self_obj. This
// is a multi-step process:
// First, grab the box out of the self_obj. It contains a refcount and a
// body.
auto llself_obj_box =
bcx.build.GEP(llself_obj_ptr, [C_int(0),
C_int(abi::obj_field_box)]);
llself_obj_box = bcx.build.Load(llself_obj_box);
// Now, reach into the box and grab the body.
auto llself_obj_body =
bcx.build.GEP(llself_obj_box, [C_int(0),
C_int(abi::box_rc_field_body)]);
// Now, we need to figure out exactly what type the body is supposed to be
// cast to.
// NB: This next part is almost flat-out copypasta from trans_anon_obj.
// It would be great to factor this out.
// Synthesize a tuple type for fields: [field, ...]
let ty::t fields_ty = ty::mk_imm_tup(cx.ccx.tcx, additional_field_tys);
// Tydescs are run-time instantiations of typarams. We're not
// actually supporting typarams for anon objs yet, but let's
// create space for them in case we ever want them.
let ty::t tydesc_ty = ty::mk_type(cx.ccx.tcx);
let ty::t[] tps = ~[];
for (ast::ty_param tp in ty_params) {
tps += ~[tydesc_ty];
}
// Synthesize a tuple type for typarams: [typaram, ...]
let ty::t typarams_ty = ty::mk_imm_tup(cx.ccx.tcx, tps);
// Tuple type for body:
// [tydesc_ty, [typaram, ...], [field, ...], with_obj]
let ty::t body_ty =
ty::mk_imm_tup(cx.ccx.tcx, ~[tydesc_ty, typarams_ty,
fields_ty, with_obj_ty]);
// And cast to that type.
llself_obj_body = bcx.build.PointerCast(llself_obj_body,
T_ptr(type_of(cx.ccx,
fake_span,
body_ty)));
// Now, reach into the body and grab the with_obj.
auto llwith_obj =
GEP_tup_like(bcx,
body_ty,
llself_obj_body,
[0, abi::obj_body_elt_with_obj]);
bcx = llwith_obj.bcx;
// And, now, somewhere in with_obj is a vtable with an entry for the
// method we want. First, pick out the vtable, and then pluck that
// method's entry out of the vtable so that the forwarding function can
// call it.
auto llwith_obj_vtbl =
bcx.build.GEP(llwith_obj.val, [C_int(0), C_int(abi::obj_field_vtbl)]);
llwith_obj_vtbl = bcx.build.Load(llwith_obj_vtbl);
// Get the index of the method we want.
let uint ix = 0u;
alt (ty::struct(bcx.fcx.lcx.ccx.tcx, with_obj_ty)) {
case (ty::ty_obj(?methods)) {
ix = ty::method_idx(cx.ccx.sess, fake_span, m.ident, methods);
}
case (_) {
// Shouldn't happen.
cx.ccx.sess.bug("process_fwding_mthd(): non-object type passed "
+ "as with_obj_ty");
}
}
// Pick out the original method from the vtable. The +1 is because slot
// #0 contains the destructor.
auto llorig_mthd = bcx.build.GEP(llwith_obj_vtbl,
[C_int(0), C_int(ix + 1u as int)]);
// Set up the original method to be called.
auto orig_mthd_ty = ty::method_ty_to_fn_ty(cx.ccx.tcx, *m);
auto llwith_obj_ty_real = val_ty(llwith_obj.val);
auto llorig_mthd_ty =
type_of_fn_full(bcx.fcx.lcx.ccx, fake_span,
ty::ty_fn_proto(bcx.fcx.lcx.ccx.tcx, orig_mthd_ty),
some[TypeRef](llwith_obj_ty_real),
m.inputs,
m.output,
vec::len[ast::ty_param](ty_params));
// TODO: can we leave out one of these T_ptrs and then get rid of the
// Load?
llorig_mthd = bcx.build.PointerCast(llorig_mthd,
T_ptr(llorig_mthd_ty));
// Set up the three implicit arguments to the original method we'll need
// to call.
let vec[ValueRef] llorig_mthd_args = [llretptr, fcx.lltaskptr,
llwith_obj.val];
// Copy the explicit arguments that are being passed into the forwarding
// function (they're in fcx.llargs) to llorig_mthd_args.
let uint a = 3u; // retptr, task ptr, env come first
let ValueRef passed_arg = llvm::LLVMGetParam(llforwarding_fn, a);
for (ty::arg arg in m.inputs) {
if (arg.mode == ty::mo_val) {
passed_arg = load_if_immediate(bcx, passed_arg, arg.ty);
}
llorig_mthd_args += [passed_arg];
a += 1u;
}
// And, finally, call the original method.
bcx.build.FastCall(llorig_mthd, llorig_mthd_args);
bcx.build.RetVoid();
finish_fn(fcx, lltop);
ret llforwarding_fn;
}
// process_normal_mthd: Create the contents of a normal vtable slot. A helper
// function for create_vtbl.
fn process_normal_mthd(@local_ctxt cx, @ast::method m, TypeRef llself_ty,
ty::t self_ty, &vec[ast::ty_param] ty_params)
-> ValueRef {
auto llfnty = T_nil();
alt (ty::struct(cx.ccx.tcx, node_id_type(cx.ccx, m.node.id))){
case (ty::ty_fn(?proto, ?inputs, ?output, _, _)) {
llfnty =
type_of_fn_full(
cx.ccx, m.span, proto,
some[TypeRef](llself_ty), inputs, output,
vec::len[ast::ty_param](ty_params));
}
}
let @local_ctxt mcx =
@rec(path=cx.path + ["method", m.node.ident] with *cx);
let str s = mangle_internal_name_by_path(mcx.ccx, mcx.path);
let ValueRef llfn =
decl_internal_fastcall_fn(cx.ccx.llmod, s, llfnty);
// Every method on an object gets its node_id inserted into the
// crate-wide item_ids map, together with the ValueRef that points to
// where that method's definition will be in the executable.
cx.ccx.item_ids.insert(m.node.id, llfn);
cx.ccx.item_symbols.insert(m.node.id, s);
trans_fn(mcx, m.span, m.node.meth, llfn,
some[ty_self_pair](tup(llself_ty, self_ty)),
ty_params, m.node.id);
ret llfn;
}
2011-06-03 17:09:50 -07:00
// Create a vtable for an object being translated. Returns a pointer into
// read-only memory.
fn create_vtbl(@local_ctxt cx, TypeRef llself_ty, ty::t self_ty,
&ast::_obj ob, &vec[ast::ty_param] ty_params,
option::t[ty::t] with_obj_ty,
ty::t[] additional_field_tys) -> ValueRef {
// Used only inside create_vtbl to distinguish different kinds of slots
// we'll have to create.
tag vtbl_mthd {
// Normal methods are complete AST nodes, but for forwarding methods,
// the only information we'll have about them is their type.
normal_mthd(@ast::method);
fwding_mthd(@ty::method);
}
auto dtor = C_null(T_ptr(T_i8()));
alt (ob.dtor) {
case (some(?d)) {
auto dtor_1 = trans_dtor(cx, llself_ty, self_ty, ty_params, d);
dtor = llvm::LLVMConstBitCast(dtor_1, val_ty(dtor));
}
case (none) { }
}
let vec[ValueRef] llmethods = [dtor];
let vec[vtbl_mthd] meths = [];
alt (with_obj_ty) {
case (none) {
// If there's no with_obj, then we don't need any forwarding
// slots. Just use the object's regular methods.
for (@ast::method m in ob.methods) {
meths += [normal_mthd(m)];
}
}
case (some(?with_obj_ty)) {
// Handle forwarding slots.
// If this vtable is being created for an extended object, then
// the vtable needs to contain 'forwarding slots' for methods that
// were on the original object and are not being overloaded by the
// extended one. So, to find the set of methods that we need
// forwarding slots for, we need to take the set difference of
// with_obj_methods (methods on the original object) and
// ob.methods (methods on the object being added).
// If we're here, then with_obj_ty and llwith_obj_ty are the type
// of the inner object, and "ob" is the wrapper object. We need
// to take apart with_obj_ty (it had better have an object type
// with methods!) and put those original methods onto the list of
// methods we need forwarding methods for.
// Gather up methods on the original object in 'meths'.
alt (ty::struct(cx.ccx.tcx, with_obj_ty)) {
case (ty::ty_obj(?with_obj_methods)) {
for (ty::method m in with_obj_methods) {
meths += [fwding_mthd(@m)];
}
}
case (_) {
// Shouldn't happen.
cx.ccx.sess.bug("create_vtbl(): trying to extend a "
+ "non-object");
}
}
// Now, filter out any methods that are being replaced.
fn filtering_fn(&vtbl_mthd m, vec[vtbl_mthd] addtl_meths) ->
option::t[vtbl_mthd] {
let option::t[vtbl_mthd] rslt;
if (std::vec::member[vtbl_mthd](m, addtl_meths)) {
rslt = none;
} else {
rslt = some(m);
}
ret rslt;
}
// NB: addtl_meths is just like ob.methods except that it's of
// type vec[vtbl_mthd], not vec[@ast::method].
let vec[vtbl_mthd] addtl_meths = [];
for (@ast::method m in ob.methods) {
addtl_meths += [normal_mthd(m)];
}
auto f = bind filtering_fn(_, addtl_meths);
// Filter out any methods that we don't need forwarding slots for
// (namely, those that are being replaced).
meths = std::vec::filter_map[vtbl_mthd, vtbl_mthd](f, meths);
// And now add the additional ones (both replacements and entirely
// new ones).
meths += addtl_meths;
}
}
// Sort all the methods.
fn vtbl_mthd_lteq(&vtbl_mthd a, &vtbl_mthd b) -> bool {
alt (a) {
case (normal_mthd(?ma)) {
alt (b) {
case (normal_mthd(?mb)) {
ret str::lteq(ma.node.ident, mb.node.ident);
}
case (fwding_mthd(?mb)) {
ret str::lteq(ma.node.ident, mb.ident);
}
}
}
case (fwding_mthd(?ma)) {
alt (b) {
case (normal_mthd(?mb)) {
ret str::lteq(ma.ident, mb.node.ident);
}
case (fwding_mthd(?mb)) {
ret str::lteq(ma.ident, mb.ident);
}
}
}
}
}
meths = std::sort::merge_sort[vtbl_mthd] (bind vtbl_mthd_lteq(_, _),
meths);
// Now that we have our list of methods, we can process them in order.
for (vtbl_mthd m in meths) {
alt (m) {
case (normal_mthd(?nm)) {
llmethods += [process_normal_mthd(cx, nm, llself_ty, self_ty,
ty_params)];
}
// If we have to process a forwarding method, then we need to know
// about the with_obj's type as well as the outer object's type.
case (fwding_mthd(?fm)) {
alt (with_obj_ty) {
case (none) {
// This shouldn't happen; if we're trying to process a
// forwarding method, then we should always have a
// with_obj_ty.
cx.ccx.sess.bug("create_vtbl(): trying to create "
+ "forwarding method without a type "
+ "of object to forward to");
}
case (some(?t)) {
llmethods += [process_fwding_mthd(
cx, fm, llself_ty,
self_ty, ty_params,
t,
additional_field_tys)];
}
}
}
}
}
auto vtbl = C_struct(llmethods);
auto vtbl_name = mangle_internal_name_by_path(cx.ccx, cx.path + ["vtbl"]);
auto gvar =
llvm::LLVMAddGlobal(cx.ccx.llmod, val_ty(vtbl), str::buf(vtbl_name));
llvm::LLVMSetInitializer(gvar, vtbl);
llvm::LLVMSetGlobalConstant(gvar, True);
llvm::LLVMSetLinkage(gvar,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
ret gvar;
2010-12-16 18:34:04 -08:00
}
fn trans_dtor(@local_ctxt cx, TypeRef llself_ty, ty::t self_ty,
&vec[ast::ty_param] ty_params, &@ast::method dtor) -> ValueRef {
auto llfnty = T_dtor(cx.ccx, dtor.span, llself_ty);
let str s = mangle_internal_name_by_path(cx.ccx, cx.path + ["drop"]);
let ValueRef llfn = decl_internal_fastcall_fn(cx.ccx.llmod, s, llfnty);
cx.ccx.item_ids.insert(dtor.node.id, llfn);
cx.ccx.item_symbols.insert(dtor.node.id, s);
trans_fn(cx, dtor.span, dtor.node.meth, llfn,
some[ty_self_pair](tup(llself_ty, self_ty)), ty_params,
dtor.node.id);
ret llfn;
}
// trans_obj: creates an LLVM function that is the object constructor for the
// object being translated.
fn trans_obj(@local_ctxt cx, &span sp, &ast::_obj ob, ast::node_id ctor_id,
&vec[ast::ty_param] ty_params) {
// To make a function, we have to create a function context and, inside
// that, a number of block contexts for which code is generated.
auto ccx = cx.ccx;
auto llctor_decl;
alt (ccx.item_ids.find(ctor_id)) {
case (some(?x)) { llctor_decl = x; }
case (_) { cx.ccx.sess.span_fatal(sp,
"unbound llctor_decl in trans_obj"); }
}
// Much like trans_fn, we must create an LLVM function, but since we're
// starting with an ast::_obj rather than an ast::_fn, we have some setup
// work to do.
2011-06-03 17:09:50 -07:00
// The fields of our object will become the arguments to the function
// we're creating.
let vec[ast::arg] fn_args = [];
for (ast::obj_field f in ob.fields) {
fn_args +=
[rec(mode=ast::alias(false), ty=f.ty, ident=f.ident, id=f.id)];
}
auto fcx = new_fn_ctxt(cx, sp, llctor_decl);
2011-06-28 18:54:05 -07:00
// Both regular arguments and type parameters are handled here.
create_llargs_for_fn_args(fcx, ast::proto_fn, none[ty_self_pair],
ty::ret_ty_of_fn(ccx.tcx, ctor_id),
fn_args, ty_params);
let ty::arg[] arg_tys = arg_tys_of_fn(ccx, ctor_id);
copy_args_to_allocas(fcx, fn_args, arg_tys);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Create the first block context in the function and keep a handle on it
// to pass to finish_fn later.
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Pick up the type of this object by looking at our own output type, that
// is, the output type of the object constructor we're building.
auto self_ty = ty::ret_ty_of_fn(ccx.tcx, ctor_id);
auto llself_ty = type_of(ccx, sp, self_ty);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Set up the two-word pair that we're going to return from the object
// constructor we're building. The two elements of this pair will be a
// vtable pointer and a body pointer. (llretptr already points to the
// place where this two-word pair should go; it was pre-allocated by the
// caller of the function.)
auto pair = bcx.fcx.llretptr;
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Grab onto the first and second elements of the pair.
// abi::obj_field_vtbl and abi::obj_field_box simply specify words 0 and 1
// of 'pair'.
auto pair_vtbl =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_vtbl)]);
auto pair_box =
bcx.build.GEP(pair, [C_int(0), C_int(abi::obj_field_box)]);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Make a vtable for this object: a static array of pointers to functions.
// It will be located in the read-only memory of the executable we're
// creating and will contain ValueRefs for all of this object's methods.
// create_vtbl returns a pointer to the vtable, which we store.
auto vtbl = create_vtbl(cx, llself_ty, self_ty, ob, ty_params, none, ~[]);
bcx.build.Store(vtbl, pair_vtbl);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// Next we have to take care of the other half of the pair we're
// returning: a boxed (reference-counted) tuple containing a tydesc,
// typarams, and fields.
2011-06-03 17:09:50 -07:00
// FIXME: What about with_obj? Do we have to think about it here?
// (Pertains to issues #538/#539/#540/#543.)
2011-06-03 17:09:50 -07:00
let TypeRef llbox_ty = T_opaque_obj_ptr(ccx.tn);
2011-06-28 18:54:05 -07:00
2011-06-03 17:09:50 -07:00
// FIXME: we should probably also allocate a box for empty objs that have
// a dtor, since otherwise they are never dropped, and the dtor never
// runs.
if (vec::len[ast::ty_param](ty_params) == 0u &&
std::ivec::len[ty::arg](arg_tys) == 0u) {
2011-06-03 17:09:50 -07:00
// If the object we're translating has no fields or type parameters,
// there's not much to do.
// Store null into pair, if no args or typarams.
bcx.build.Store(C_null(llbox_ty), pair_box);
} else {
2011-06-03 17:09:50 -07:00
// Otherwise, we have to synthesize a big structural type for the
// object body.
let ty::t[] obj_fields = ~[];
for (ty::arg a in arg_tys) { obj_fields += ~[a.ty]; }
2011-06-03 17:09:50 -07:00
// Tuple type for fields: [field, ...]
let ty::t fields_ty = ty::mk_imm_tup(ccx.tcx, obj_fields);
auto tydesc_ty = ty::mk_type(ccx.tcx);
let ty::t[] tps = ~[];
for (ast::ty_param tp in ty_params) { tps += ~[tydesc_ty]; }
// Tuple type for typarams: [typaram, ...]
let ty::t typarams_ty = ty::mk_imm_tup(ccx.tcx, tps);
// Tuple type for body: [tydesc_ty, [typaram, ...], [field, ...]]
let ty::t body_ty =
ty::mk_imm_tup(ccx.tcx, ~[tydesc_ty, typarams_ty, fields_ty]);
2011-06-04 19:38:11 -07:00
// Hand this type we've synthesized off to trans_malloc_boxed, which
// allocates a box, including space for a refcount.
auto box = trans_malloc_boxed(bcx, body_ty);
bcx = box.bcx;
2011-06-04 19:38:11 -07:00
// mk_imm_box throws a refcount into the type we're synthesizing, so
// that it looks like: [rc, [tydesc_ty, [typaram, ...], [field, ...]]]
let ty::t boxed_body_ty = ty::mk_imm_box(ccx.tcx, body_ty);
// Grab onto the refcount and body parts of the box we allocated.
auto rc =
GEP_tup_like(bcx, boxed_body_ty, box.val,
[0, abi::box_rc_field_refcnt]);
bcx = rc.bcx;
auto body =
GEP_tup_like(bcx, boxed_body_ty, box.val,
[0, abi::box_rc_field_body]);
bcx = body.bcx;
bcx.build.Store(C_int(1), rc.val);
// Put together a tydesc for the body, so that the object can later be
// freed by calling through its tydesc.
2011-06-04 19:38:11 -07:00
// Every object (not just those with type parameters) needs to have a
// tydesc to describe its body, since all objects have unknown type to
// the user of the object. So the tydesc is needed to keep track of
// the types of the object's fields, so that the fields can be freed
// later.
auto body_tydesc =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_tydesc]);
bcx = body_tydesc.bcx;
2011-05-12 15:42:12 -07:00
auto ti = none[@tydesc_info];
auto body_td = get_tydesc(bcx, body_ty, true, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_drop_glue, ti);
lazily_emit_tydesc_glue(bcx, abi::tydesc_field_free_glue, ti);
bcx = body_td.bcx;
bcx.build.Store(body_td.val, body_tydesc.val);
2011-06-04 19:38:11 -07:00
// Copy the object's type parameters and fields into the space we
// allocated for the object body. (This is something like saving the
// lexical environment of a function in its closure: the "captured
// typarams" are any type parameters that are passed to the object
// constructor and are then available to the object's methods.
// Likewise for the object's fields.)
// Copy typarams into captured typarams.
auto body_typarams =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_typarams]);
bcx = body_typarams.bcx;
let int i = 0;
for (ast::ty_param tp in ty_params) {
auto typaram = bcx.fcx.lltydescs.(i);
auto capture =
GEP_tup_like(bcx, typarams_ty, body_typarams.val, [0, i]);
bcx = capture.bcx;
bcx = copy_val(bcx, INIT, capture.val, typaram, tydesc_ty).bcx;
i += 1;
}
// Copy args into body fields.
auto body_fields =
GEP_tup_like(bcx, body_ty, body.val,
[0, abi::obj_body_elt_fields]);
bcx = body_fields.bcx;
i = 0;
for (ast::obj_field f in ob.fields) {
alt (bcx.fcx.llargs.find(f.id)) {
case (some(?arg1)) {
auto arg = load_if_immediate(bcx, arg1, arg_tys.(i).ty);
auto field =
GEP_tup_like(bcx, fields_ty, body_fields.val, [0, i]);
bcx = field.bcx;
bcx = copy_val(bcx, INIT, field.val, arg,
arg_tys.(i).ty).bcx;
i += 1;
}
case (none) {
bcx.fcx.lcx.ccx.sess.span_fatal(f.ty.span,
"internal error in trans_obj");
}
}
}
// Store box ptr in outer pair.
auto p = bcx.build.PointerCast(box.val, llbox_ty);
bcx.build.Store(p, pair_box);
}
bcx.build.RetVoid();
// Insert the mandatory first few basic blocks before lltop.
finish_fn(fcx, lltop);
}
fn trans_res_ctor(@local_ctxt cx, &span sp, &ast::_fn dtor,
ast::node_id ctor_id, &vec[ast::ty_param] ty_params) {
// Create a function for the constructor
auto llctor_decl;
alt (cx.ccx.item_ids.find(ctor_id)) {
case (some(?x)) { llctor_decl = x; }
case (_) {
cx.ccx.sess.span_fatal(sp, "unbound ctor_id in trans_res_ctor");
}
}
auto fcx = new_fn_ctxt(cx, sp, llctor_decl);
auto ret_t = ty::ret_ty_of_fn(cx.ccx.tcx, ctor_id);
create_llargs_for_fn_args(fcx, ast::proto_fn, none[ty_self_pair],
ret_t, dtor.decl.inputs, ty_params);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto arg_t = arg_tys_of_fn(cx.ccx, ctor_id).(0).ty;
auto tup_t = ty::mk_imm_tup(cx.ccx.tcx, ~[ty::mk_int(cx.ccx.tcx), arg_t]);
auto arg;
alt (fcx.llargs.find(dtor.decl.inputs.(0).id)) {
case (some(?x)) { arg = load_if_immediate(bcx, x, arg_t); }
case (_) {
cx.ccx.sess.span_fatal(sp, "unbound dtor decl in trans_res_ctor");
}
}
auto llretptr = fcx.llretptr;
if (ty::type_has_dynamic_size(cx.ccx.tcx, ret_t)) {
auto llret_t = T_ptr(T_struct([T_i32(), llvm::LLVMTypeOf(arg)]));
llretptr = bcx.build.BitCast(llretptr, llret_t);
}
auto dst = GEP_tup_like(bcx, tup_t, llretptr, [0, 1]);
bcx = dst.bcx;
bcx = copy_val(bcx, INIT, dst.val, arg, arg_t).bcx;
auto flag = GEP_tup_like(bcx, tup_t, llretptr, [0, 0]);
bcx = flag.bcx;
bcx.build.Store(C_int(1), flag.val);
bcx.build.RetVoid();
finish_fn(fcx, lltop);
}
fn trans_tag_variant(@local_ctxt cx, ast::node_id tag_id,
&ast::variant variant, int index, bool is_degen,
&vec[ast::ty_param] ty_params) {
if (vec::len[ast::variant_arg](variant.node.args) == 0u) {
ret; // nullary constructors are just constants
}
// Translate variant arguments to function arguments.
let vec[ast::arg] fn_args = [];
auto i = 0u;
for (ast::variant_arg varg in variant.node.args) {
fn_args +=
[rec(mode=ast::alias(false),
ty=varg.ty,
ident="arg" + uint::to_str(i, 10u),
id=varg.id)];
}
assert (cx.ccx.item_ids.contains_key(variant.node.id));
let ValueRef llfndecl;
alt (cx.ccx.item_ids.find(variant.node.id)) {
case (some(?x)) { llfndecl = x; }
case (_) {
cx.ccx.sess.span_fatal(variant.span,
"unbound variant id in trans_tag_variant");
}
}
auto fcx = new_fn_ctxt(cx, variant.span, llfndecl);
create_llargs_for_fn_args(fcx, ast::proto_fn, none[ty_self_pair],
ty::ret_ty_of_fn(cx.ccx.tcx, variant.node.id),
fn_args, ty_params);
let vec[ty::t] ty_param_substs = [];
i = 0u;
for (ast::ty_param tp in ty_params) {
ty_param_substs += [ty::mk_param(cx.ccx.tcx, i)];
i += 1u;
}
auto arg_tys = arg_tys_of_fn(cx.ccx, variant.node.id);
copy_args_to_allocas(fcx, fn_args, arg_tys);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
auto llblobptr = if (is_degen) {
fcx.llretptr
} else {
// Cast the tag to a type we can GEP into.
auto lltagptr = bcx.build.PointerCast
(fcx.llretptr, T_opaque_tag_ptr(fcx.lcx.ccx.tn));
auto lldiscrimptr = bcx.build.GEP(lltagptr, [C_int(0), C_int(0)]);
bcx.build.Store(C_int(index), lldiscrimptr);
bcx.build.GEP(lltagptr, [C_int(0), C_int(1)])
};
i = 0u;
for (ast::variant_arg va in variant.node.args) {
auto rslt =
GEP_tag(bcx, llblobptr, ast::local_def(tag_id),
ast::local_def(variant.node.id), ty_param_substs,
i as int);
bcx = rslt.bcx;
auto lldestptr = rslt.val;
// If this argument to this function is a tag, it'll have come in to
// this function as an opaque blob due to the way that type_of()
// works. So we have to cast to the destination's view of the type.
auto llargptr;
alt (fcx.llargs.find(va.id)) {
case (some(?x)) {
llargptr = bcx.build.PointerCast(x, val_ty(lldestptr));
}
case (none) {
bcx.fcx.lcx.ccx.sess.bug("unbound argptr in \
trans_tag_variant");
}
}
auto arg_ty = arg_tys.(i).ty;
auto llargval;
if (ty::type_is_structural(cx.ccx.tcx, arg_ty) ||
ty::type_has_dynamic_size(cx.ccx.tcx, arg_ty)) {
llargval = llargptr;
} else { llargval = bcx.build.Load(llargptr); }
rslt = copy_val(bcx, INIT, lldestptr, llargval, arg_ty);
bcx = rslt.bcx;
i += 1u;
}
bcx = trans_block_cleanups(bcx, find_scope_cx(bcx));
bcx.build.RetVoid();
finish_fn(fcx, lltop);
}
// FIXME: this should do some structural hash-consing to avoid
// duplicate constants. I think. Maybe LLVM has a magical mode
// that does so later on?
fn trans_const_expr(&@crate_ctxt cx, @ast::expr e) -> ValueRef {
alt (e.node) {
case (ast::expr_lit(?lit)) { ret trans_lit(cx, *lit, e.id); }
case (_) {
cx.sess.span_unimpl(e.span, "consts that's not a plain literal");
}
}
}
fn trans_const(&@crate_ctxt cx, @ast::expr e, ast::node_id id) {
auto v = trans_const_expr(cx, e);
// The scalars come back as 1st class LLVM vals
// which we have to stick into global constants.
alt (cx.consts.find(id)) {
case (some(?g)) {
llvm::LLVMSetInitializer(g, v);
llvm::LLVMSetGlobalConstant(g, True);
}
case (_) {
cx.sess.span_fatal(e.span, "Unbound const in trans_const");
}
}
}
fn trans_item(@local_ctxt cx, &ast::item item) {
alt (item.node) {
case (ast::item_fn(?f, ?tps)) {
auto sub_cx = extend_path(cx, item.ident);
alt (cx.ccx.item_ids.find(item.id)) {
case (some(?llfndecl)) {
trans_fn(sub_cx, item.span, f, llfndecl,
none[ty_self_pair], tps, item.id);
}
case (_) {
cx.ccx.sess.span_fatal(item.span,
"unbound function item in trans_item");
}
}
}
case (ast::item_obj(?ob, ?tps, ?ctor_id)) {
auto sub_cx =
@rec(obj_typarams=tps, obj_fields=ob.fields
with *extend_path(cx, item.ident));
trans_obj(sub_cx, item.span, ob, ctor_id, tps);
}
case (ast::item_res(?dtor, ?dtor_id, ?tps, ?ctor_id)) {
trans_res_ctor(cx, item.span, dtor, ctor_id, tps);
// Create a function for the destructor
alt (cx.ccx.item_ids.find(item.id)) {
case (some(?lldtor_decl)) {
trans_fn(cx, item.span, dtor, lldtor_decl, none, tps,
dtor_id);
}
case (_) { cx.ccx.sess.span_fatal(item.span,
"unbound dtor in trans_item"); }
}
}
case (ast::item_mod(?m)) {
auto sub_cx =
@rec(path=cx.path + [item.ident],
module_path=cx.module_path + [item.ident] with *cx);
trans_mod(sub_cx, m);
}
case (ast::item_tag(?variants, ?tps)) {
auto sub_cx = extend_path(cx, item.ident);
auto degen = vec::len(variants) == 1u;
auto i = 0;
for (ast::variant variant in variants) {
trans_tag_variant(sub_cx, item.id, variant, i, degen, tps);
i += 1;
}
}
case (ast::item_const(_, ?expr)) {
trans_const(cx.ccx, expr, item.id);
}
case (_) {/* fall through */ }
}
}
2011-06-16 16:55:46 -07:00
// Translate a module. Doing this amounts to translating the items in the
// module; there ends up being no artifact (aside from linkage names) of
// separate modules in the compiled program. That's because modules exist
// only as a convenience for humans working with the code, to organize names
// and control visibility.
fn trans_mod(@local_ctxt cx, &ast::_mod m) {
for (@ast::item item in m.items) { trans_item(cx, *item); }
}
fn get_pair_fn_ty(TypeRef llpairty) -> TypeRef {
// Bit of a kludge: pick the fn typeref out of the pair.
ret struct_elt(llpairty, 0u);
}
fn decl_fn_and_pair(&@crate_ctxt ccx, &span sp, &vec[str] path, str flav,
vec[ast::ty_param] ty_params, ast::node_id node_id) {
decl_fn_and_pair_full(ccx, sp, path, flav, ty_params, node_id,
node_id_type(ccx, node_id));
}
fn decl_fn_and_pair_full(&@crate_ctxt ccx, &span sp, &vec[str] path, str flav,
vec[ast::ty_param] ty_params, ast::node_id node_id,
ty::t node_type) {
auto llfty;
alt (ty::struct(ccx.tcx, node_type)) {
case (ty::ty_fn(?proto, ?inputs, ?output, _, _)) {
llfty =
type_of_fn(ccx, sp, proto, inputs, output,
vec::len[ast::ty_param](ty_params));
}
case (_) {
ccx.sess.bug("decl_fn_and_pair(): fn item doesn't have fn type!");
}
}
let bool is_main =
str::eq(vec::top(path), "main") && !ccx.sess.get_opts().shared;
// Declare the function itself.
let str s =
if (is_main) {
"_rust_main"
} else { mangle_internal_name_by_path(ccx, path) };
let ValueRef llfn = decl_internal_fastcall_fn(ccx.llmod, s, llfty);
// Declare the global constant pair that points to it.
let str ps = mangle_exported_name(ccx, path, node_type);
register_fn_pair(ccx, ps, llfty, llfn, node_id);
if (is_main) {
2011-06-06 15:48:36 -07:00
if (ccx.main_fn != none[ValueRef]) {
ccx.sess.span_fatal(sp, "multiple 'main' functions");
2011-06-06 15:48:36 -07:00
}
llvm::LLVMSetLinkage(llfn,
lib::llvm::LLVMExternalLinkage as llvm::Linkage);
2011-06-06 15:48:36 -07:00
ccx.main_fn = some(llfn);
}
}
// Create a closure: a pair containing (1) a ValueRef, pointing to where the
// fn's definition is in the executable we're creating, and (2) a pointer to
// space for the function's environment.
fn create_fn_pair(&@crate_ctxt cx, str ps, TypeRef llfnty, ValueRef llfn,
bool external) -> ValueRef {
auto gvar =
llvm::LLVMAddGlobal(cx.llmod, T_fn_pair(cx.tn, llfnty), str::buf(ps));
auto pair = C_struct([llfn, C_null(T_opaque_closure_ptr(cx.tn))]);
llvm::LLVMSetInitializer(gvar, pair);
llvm::LLVMSetGlobalConstant(gvar, True);
if (!external) {
llvm::LLVMSetLinkage(gvar,
lib::llvm::LLVMInternalLinkage as llvm::Linkage);
}
ret gvar;
}
fn register_fn_pair(&@crate_ctxt cx, str ps, TypeRef llfnty, ValueRef llfn,
ast::node_id id) {
// FIXME: We should also hide the unexported pairs in crates.
auto gvar =
create_fn_pair(cx, ps, llfnty, llfn, cx.sess.get_opts().shared);
cx.item_ids.insert(id, llfn);
2011-03-23 15:43:18 -07:00
cx.item_symbols.insert(id, ps);
cx.fn_pairs.insert(id, gvar);
}
// Returns the number of type parameters that the given native function has.
fn native_fn_ty_param_count(&@crate_ctxt cx, ast::node_id id) -> uint {
auto count;
auto native_item = alt (cx.ast_map.find(id)) {
case (some(ast_map::node_native_item(?i))) { i }
};
alt (native_item.node) {
case (ast::native_item_ty) {
cx.sess.bug("decl_native_fn_and_pair(): native fn isn't " +
"actually a fn");
}
case (ast::native_item_fn(_, _, ?tps)) {
count = vec::len[ast::ty_param](tps);
}
}
ret count;
}
fn native_fn_wrapper_type(&@crate_ctxt cx, &span sp, uint ty_param_count,
ty::t x) -> TypeRef {
alt (ty::struct(cx.tcx, x)) {
case (ty::ty_native_fn(?abi, ?args, ?out)) {
ret type_of_fn(cx, sp, ast::proto_fn, args, out, ty_param_count);
}
}
}
fn decl_native_fn_and_pair(&@crate_ctxt ccx, &span sp, vec[str] path,
str name, ast::node_id id) {
auto num_ty_param = native_fn_ty_param_count(ccx, id);
// Declare the wrapper.
auto t = node_id_type(ccx, id);
auto wrapper_type = native_fn_wrapper_type(ccx, sp, num_ty_param, t);
let str s = mangle_internal_name_by_path(ccx, path);
let ValueRef wrapper_fn =
decl_internal_fastcall_fn(ccx.llmod, s, wrapper_type);
// Declare the global constant pair that points to it.
let str ps = mangle_exported_name(ccx, path, node_id_type(ccx, id));
register_fn_pair(ccx, ps, wrapper_type, wrapper_fn, id);
// Build the wrapper.
auto fcx = new_fn_ctxt(new_local_ctxt(ccx), sp, wrapper_fn);
auto bcx = new_top_block_ctxt(fcx);
auto lltop = bcx.llbb;
// Declare the function itself.
auto fn_type = node_id_type(ccx, id); // NB: has no type params
auto abi = ty::ty_fn_abi(ccx.tcx, fn_type);
// FIXME: If the returned type is not nil, then we assume it's 32 bits
// wide. This is obviously wildly unsafe. We should have a better FFI
// that allows types of different sizes to be returned.
auto rty_is_nil =
ty::type_is_nil(ccx.tcx, ty::ty_fn_ret(ccx.tcx, fn_type));
auto pass_task;
auto cast_to_i32;
alt (abi) {
case (ast::native_abi_rust) { pass_task = true; cast_to_i32 = true; }
case (ast::native_abi_rust_intrinsic) {
pass_task = true;
cast_to_i32 = false;
}
case (ast::native_abi_cdecl) {
pass_task = false;
cast_to_i32 = true;
}
case (ast::native_abi_llvm) {
pass_task = false;
cast_to_i32 = false;
}
}
auto lltaskptr;
if (cast_to_i32) {
lltaskptr = vp2i(bcx, fcx.lltaskptr);
} else { lltaskptr = fcx.lltaskptr; }
let vec[ValueRef] call_args = [];
if (pass_task) { call_args += [lltaskptr]; }
auto arg_n = 3u;
for each (uint i in uint::range(0u, num_ty_param)) {
auto llarg = llvm::LLVMGetParam(fcx.llfn, arg_n);
fcx.lltydescs += [llarg];
assert (llarg as int != 0);
if (cast_to_i32) {
call_args += [vp2i(bcx, llarg)];
} else { call_args += [llarg]; }
arg_n += 1u;
}
fn convert_arg_to_i32(&@block_ctxt cx, ValueRef v, ty::t t, ty::mode mode)
-> ValueRef {
if (mode == ty::mo_val) {
if (ty::type_is_integral(cx.fcx.lcx.ccx.tcx, t)) {
auto lldsttype = T_int();
auto llsrctype = type_of(cx.fcx.lcx.ccx, cx.sp, t);
if (llvm::LLVMGetIntTypeWidth(lldsttype) >
llvm::LLVMGetIntTypeWidth(llsrctype)) {
ret cx.build.ZExtOrBitCast(v, T_int());
}
ret cx.build.TruncOrBitCast(v, T_int());
}
if (ty::type_is_fp(cx.fcx.lcx.ccx.tcx, t)) {
ret cx.build.FPToSI(v, T_int());
}
}
ret vp2i(cx, v);
}
fn trans_simple_native_abi(&@block_ctxt bcx, str name,
&mutable vec[ValueRef] call_args,
ty::t fn_type, uint first_arg_n) ->
tup(ValueRef, ValueRef) {
let vec[TypeRef] call_arg_tys = [];
for (ValueRef arg in call_args) { call_arg_tys += [val_ty(arg)]; }
auto llnativefnty =
T_fn(call_arg_tys,
type_of(bcx.fcx.lcx.ccx, bcx.sp,
ty::ty_fn_ret(bcx.fcx.lcx.ccx.tcx, fn_type)));
auto llnativefn =
get_extern_fn(bcx.fcx.lcx.ccx.externs, bcx.fcx.lcx.ccx.llmod,
name, lib::llvm::LLVMCCallConv, llnativefnty);
auto r = bcx.build.Call(llnativefn, call_args);
auto rptr = bcx.fcx.llretptr;
ret tup(r, rptr);
}
auto args = ty::ty_fn_args(ccx.tcx, fn_type);
// Build up the list of arguments.
let vec[tup(ValueRef, ty::t)] drop_args = [];
auto i = arg_n;
for (ty::arg arg in args) {
auto llarg = llvm::LLVMGetParam(fcx.llfn, i);
assert (llarg as int != 0);
if (cast_to_i32) {
auto llarg_i32 = convert_arg_to_i32(bcx, llarg, arg.ty, arg.mode);
call_args += [llarg_i32];
} else { call_args += [llarg]; }
if (arg.mode == ty::mo_val) { drop_args += [tup(llarg, arg.ty)]; }
i += 1u;
}
auto r;
auto rptr;
alt (abi) {
case (ast::native_abi_llvm) {
auto result =
trans_simple_native_abi(bcx, name, call_args, fn_type, arg_n);
r = result._0;
rptr = result._1;
}
case (ast::native_abi_rust_intrinsic) {
auto external_name = "rust_intrinsic_" + name;
auto result =
trans_simple_native_abi(bcx, external_name, call_args,
fn_type, arg_n);
r = result._0;
rptr = result._1;
}
case (_) {
r =
trans_native_call(bcx.build, ccx.glues, lltaskptr,
ccx.externs, ccx.tn, ccx.llmod, name,
pass_task, call_args);
rptr = bcx.build.BitCast(fcx.llretptr, T_ptr(T_i32()));
2011-04-28 14:30:31 -07:00
}
}
// We don't store the return value if it's nil, to avoid stomping on a nil
// pointer. This is the only concession made to non-i32 return values. See
// the FIXME above.
if (!rty_is_nil) { bcx.build.Store(r, rptr); }
for (tup(ValueRef, ty::t) d in drop_args) {
bcx = drop_ty(bcx, d._0, d._1).bcx;
}
bcx.build.RetVoid();
finish_fn(fcx, lltop);
}
2011-06-16 16:55:46 -07:00
fn item_path(&@ast::item item) -> vec[str] { ret [item.ident]; }
fn collect_native_item(@crate_ctxt ccx, &@ast::native_item i, &vec[str] pt,
&vt[vec[str]] v) {
alt (i.node) {
case (ast::native_item_fn(_, _, _)) {
if (!ccx.obj_methods.contains_key(i.id)) {
decl_native_fn_and_pair(ccx, i.span, pt, i.ident, i.id);
}
}
case (_) {}
}
}
fn collect_item_1(@crate_ctxt ccx, &@ast::item i, &vec[str] pt,
&vt[vec[str]] v) {
visit::visit_item(i, pt + item_path(i), v);
alt (i.node) {
case (ast::item_const(_, _)) {
auto typ = node_id_type(ccx, i.id);
auto g =
llvm::LLVMAddGlobal(ccx.llmod, type_of(ccx, i.span, typ),
str::buf(ccx.names.next(i.ident)));
llvm::LLVMSetLinkage(g,
lib::llvm::LLVMInternalLinkage as
llvm::Linkage);
ccx.consts.insert(i.id, g);
}
case (_) { }
}
}
fn collect_item_2(&@crate_ctxt ccx, &@ast::item i, &vec[str] pt,
&vt[vec[str]] v) {
auto new_pt = pt + item_path(i);
visit::visit_item(i, new_pt, v);
alt (i.node) {
case (ast::item_fn(?f, ?tps)) {
if (!ccx.obj_methods.contains_key(i.id)) {
decl_fn_and_pair(ccx, i.span, new_pt, "fn", tps, i.id);
}
}
case (ast::item_obj(?ob, ?tps, ?ctor_id)) {
decl_fn_and_pair(ccx, i.span, new_pt, "obj_ctor", tps, ctor_id);
for (@ast::method m in ob.methods) {
ccx.obj_methods.insert(m.node.id, ());
}
}
case (ast::item_res(_, ?dtor_id, ?tps, ?ctor_id)) {
decl_fn_and_pair(ccx, i.span, new_pt, "res_ctor", tps, ctor_id);
// Note that the destructor is associated with the item's id, not
// the dtor_id. This is a bit counter-intuitive, but simplifies
// ty_res, which would have to carry around two def_ids otherwise
// -- one to identify the type, and one to find the dtor symbol.
decl_fn_and_pair_full(ccx, i.span, new_pt, "res_dtor", tps, i.id,
node_id_type(ccx, dtor_id));
}
case (_) { }
}
}
fn collect_items(&@crate_ctxt ccx, @ast::crate crate) {
auto visitor0 = visit::default_visitor();
auto visitor1 =
@rec(visit_native_item=bind collect_native_item(ccx, _, _, _),
visit_item=bind collect_item_1(ccx, _, _, _) with *visitor0);
auto visitor2 =
@rec(visit_item=bind collect_item_2(ccx, _, _, _) with *visitor0);
visit::visit_crate(*crate, [], visit::vtor(visitor1));
visit::visit_crate(*crate, [], visit::vtor(visitor2));
}
fn collect_tag_ctor(@crate_ctxt ccx, &@ast::item i, &vec[str] pt,
&vt[vec[str]] v) {
auto new_pt = pt + item_path(i);
visit::visit_item(i, new_pt, v);
alt (i.node) {
case (ast::item_tag(?variants, ?tps)) {
for (ast::variant variant in variants) {
if (vec::len[ast::variant_arg](variant.node.args) != 0u) {
decl_fn_and_pair(ccx, i.span,
new_pt + [variant.node.name], "tag", tps,
variant.node.id);
}
}
}
case (_) {/* fall through */ }
}
}
fn collect_tag_ctors(&@crate_ctxt ccx, @ast::crate crate) {
auto visitor =
@rec(visit_item=bind collect_tag_ctor(ccx, _, _, _)
with *visit::default_visitor());
visit::visit_crate(*crate, [], visit::vtor(visitor));
}
// The constant translation pass.
fn trans_constant(@crate_ctxt ccx, &@ast::item it, &vec[str] pt,
&vt[vec[str]] v) {
auto new_pt = pt + item_path(it);
visit::visit_item(it, new_pt, v);
alt (it.node) {
case (ast::item_tag(?variants, _)) {
auto i = 0u;
auto n_variants = vec::len[ast::variant](variants);
while (i < n_variants) {
auto variant = variants.(i);
auto p = new_pt + [it.ident, variant.node.name, "discrim"];
auto s = mangle_exported_name(ccx, p, ty::mk_int(ccx.tcx));
auto discrim_gvar =
llvm::LLVMAddGlobal(ccx.llmod, T_int(), str::buf(s));
if (n_variants != 1u) {
llvm::LLVMSetInitializer(discrim_gvar, C_int(i as int));
llvm::LLVMSetGlobalConstant(discrim_gvar, True);
}
2011-04-20 17:43:13 +02:00
ccx.discrims.insert(variant.node.id, discrim_gvar);
ccx.discrim_symbols.insert(variant.node.id, s);
i += 1u;
}
}
case (ast::item_const(_, ?expr)) {
// FIXME: The whole expr-translation system needs cloning to deal
// with consts.
auto v = C_int(1);
ccx.item_ids.insert(it.id, v);
auto s =
mangle_exported_name(ccx, new_pt + [it.ident],
node_id_type(ccx, it.id));
ccx.item_symbols.insert(it.id, s);
}
case (_) { }
}
}
fn trans_constants(&@crate_ctxt ccx, @ast::crate crate) {
auto visitor =
@rec(visit_item=bind trans_constant(ccx, _, _, _)
with *visit::default_visitor());
visit::visit_crate(*crate, [], visit::vtor(visitor));
}
2011-05-10 17:58:22 -07:00
fn vp2i(&@block_ctxt cx, ValueRef v) -> ValueRef {
ret cx.build.PtrToInt(v, T_int());
}
2011-05-10 17:58:22 -07:00
fn vi2p(&@block_ctxt cx, ValueRef v, TypeRef t) -> ValueRef {
ret cx.build.IntToPtr(v, t);
}
fn p2i(ValueRef v) -> ValueRef { ret llvm::LLVMConstPtrToInt(v, T_int()); }
fn i2p(ValueRef v, TypeRef t) -> ValueRef {
ret llvm::LLVMConstIntToPtr(v, t);
}
2011-05-11 04:58:46 +00:00
fn create_typedefs(&@crate_ctxt cx) {
llvm::LLVMAddTypeName(cx.llmod, str::buf("task"), T_task(cx.tn));
llvm::LLVMAddTypeName(cx.llmod, str::buf("tydesc"), T_tydesc(cx.tn));
}
fn declare_intrinsics(ModuleRef llmod) -> hashmap[str, ValueRef] {
let vec[TypeRef] T_memmove32_args =
[T_ptr(T_i8()), T_ptr(T_i8()), T_i32(), T_i32(), T_i1()];
let vec[TypeRef] T_memmove64_args =
[T_ptr(T_i8()), T_ptr(T_i8()), T_i64(), T_i32(), T_i1()];
let vec[TypeRef] T_memset32_args =
[T_ptr(T_i8()), T_i8(), T_i32(), T_i32(), T_i1()];
let vec[TypeRef] T_memset64_args =
[T_ptr(T_i8()), T_i8(), T_i64(), T_i32(), T_i1()];
let vec[TypeRef] T_trap_args = [];
auto memmove32 =
decl_cdecl_fn(llmod, "llvm.memmove.p0i8.p0i8.i32",
T_fn(T_memmove32_args, T_void()));
auto memmove64 =
decl_cdecl_fn(llmod, "llvm.memmove.p0i8.p0i8.i64",
T_fn(T_memmove64_args, T_void()));
auto memset32 =
decl_cdecl_fn(llmod, "llvm.memset.p0i8.i32",
T_fn(T_memset32_args, T_void()));
auto memset64 =
decl_cdecl_fn(llmod, "llvm.memset.p0i8.i64",
T_fn(T_memset64_args, T_void()));
auto trap =
decl_cdecl_fn(llmod, "llvm.trap", T_fn(T_trap_args, T_void()));
auto intrinsics = new_str_hash[ValueRef]();
intrinsics.insert("llvm.memmove.p0i8.p0i8.i32", memmove32);
intrinsics.insert("llvm.memmove.p0i8.p0i8.i64", memmove64);
intrinsics.insert("llvm.memset.p0i8.i32", memset32);
intrinsics.insert("llvm.memset.p0i8.i64", memset64);
intrinsics.insert("llvm.trap", trap);
ret intrinsics;
}
2011-05-10 17:58:22 -07:00
fn trace_str(&@block_ctxt cx, str s) {
cx.build.Call(cx.fcx.lcx.ccx.upcalls.trace_str,
[cx.fcx.lltaskptr, C_cstr(cx.fcx.lcx.ccx, s)]);
}
2011-05-10 17:58:22 -07:00
fn trace_word(&@block_ctxt cx, ValueRef v) {
cx.build.Call(cx.fcx.lcx.ccx.upcalls.trace_word, [cx.fcx.lltaskptr, v]);
}
2011-05-10 17:58:22 -07:00
fn trace_ptr(&@block_ctxt cx, ValueRef v) {
trace_word(cx, cx.build.PtrToInt(v, T_int()));
}
2011-05-10 17:58:22 -07:00
fn trap(&@block_ctxt bcx) {
let vec[ValueRef] v = [];
alt (bcx.fcx.lcx.ccx.intrinsics.find("llvm.trap")) {
case (some(?x)) { bcx.build.Call(x, v); }
case (_) { bcx.fcx.lcx.ccx.sess.bug("unbound llvm.trap in trap"); }
}
}
fn decl_no_op_type_glue(ModuleRef llmod, type_names tn) -> ValueRef {
auto ty = T_fn([T_taskptr(tn), T_ptr(T_i8())], T_void());
ret decl_fastcall_fn(llmod, abi::no_op_type_glue_name(), ty);
}
fn make_no_op_type_glue(ValueRef fun) {
auto bb_name = str::buf("_rust_no_op_type_glue_bb");
auto llbb = llvm::LLVMAppendBasicBlock(fun, bb_name);
new_builder(llbb).RetVoid();
}
2011-05-10 17:58:22 -07:00
fn vec_fill(&@block_ctxt bcx, ValueRef v) -> ValueRef {
ret bcx.build.Load(bcx.build.GEP(v,
[C_int(0), C_int(abi::vec_elt_fill)]));
}
2011-05-10 17:58:22 -07:00
fn vec_p0(&@block_ctxt bcx, ValueRef v) -> ValueRef {
auto p = bcx.build.GEP(v, [C_int(0), C_int(abi::vec_elt_data)]);
ret bcx.build.PointerCast(p, T_ptr(T_i8()));
}
2011-05-11 04:58:46 +00:00
fn make_glues(ModuleRef llmod, &type_names tn) -> @glue_fns {
ret @rec(no_op_type_glue=decl_no_op_type_glue(llmod, tn));
}
fn make_common_glue(&session::session sess, &str output) {
// FIXME: part of this is repetitive and is probably a good idea
// to autogen it.
auto llmod =
llvm::LLVMModuleCreateWithNameInContext(str::buf("rust_out"),
llvm::LLVMGetGlobalContext());
llvm::LLVMSetDataLayout(llmod, str::buf(x86::get_data_layout()));
llvm::LLVMSetTarget(llmod, str::buf(x86::get_target_triple()));
2011-06-30 09:00:44 -07:00
mk_target_data(x86::get_data_layout());
auto tn = mk_type_names();
2011-06-30 09:00:44 -07:00
declare_intrinsics(llmod);
llvm::LLVMSetModuleInlineAsm(llmod, str::buf(x86::get_module_asm()));
2011-06-30 09:00:44 -07:00
make_glues(llmod, tn);
link::write::run_passes(sess, llmod, output);
}
2011-05-11 04:58:46 +00:00
fn create_module_map(&@crate_ctxt ccx) -> ValueRef {
auto elttype = T_struct([T_int(), T_int()]);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
auto maptype = T_array(elttype, ccx.module_data.size() + 1u);
auto map =
llvm::LLVMAddGlobal(ccx.llmod, maptype, str::buf("_rust_mod_map"));
let vec[ValueRef] elts = [];
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
for each (@tup(str, ValueRef) item in ccx.module_data.items()) {
auto elt = C_struct([p2i(C_cstr(ccx, item._0)), p2i(item._1)]);
vec::push[ValueRef](elts, elt);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
}
auto term = C_struct([C_int(0), C_int(0)]);
vec::push[ValueRef](elts, term);
llvm::LLVMSetInitializer(map, C_array(elttype, elts));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
ret map;
}
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
// FIXME use hashed metadata instead of crate names once we have that
2011-05-11 04:58:46 +00:00
fn create_crate_map(&@crate_ctxt ccx) -> ValueRef {
let vec[ValueRef] subcrates = [];
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
auto i = 1;
while (ccx.sess.has_external_crate(i)) {
auto name = ccx.sess.get_external_crate(i).name;
auto cr =
llvm::LLVMAddGlobal(ccx.llmod, T_int(),
str::buf("_rust_crate_map_" + name));
vec::push[ValueRef](subcrates, p2i(cr));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
i += 1;
}
vec::push[ValueRef](subcrates, C_int(0));
auto mapname;
if (ccx.sess.get_opts().shared) {
mapname = ccx.link_meta.name;
} else { mapname = "toplevel"; }
auto sym_name = "_rust_crate_map_" + mapname;
auto arrtype = T_array(T_int(), vec::len[ValueRef](subcrates));
auto maptype = T_struct([T_int(), arrtype]);
auto map = llvm::LLVMAddGlobal(ccx.llmod, maptype, str::buf(sym_name));
llvm::LLVMSetLinkage(map,
lib::llvm::LLVMExternalLinkage as llvm::Linkage);
llvm::LLVMSetInitializer(map,
C_struct([p2i(create_module_map(ccx)),
C_array(T_int(), subcrates)]));
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
ret map;
}
fn write_metadata(&@trans::crate_ctxt cx, &@ast::crate crate) {
if (!cx.sess.get_opts().shared) { ret; }
auto llmeta = C_postr(metadata::encoder::encode_metadata(cx, crate));
auto llconst = trans::C_struct([llmeta]);
auto llglobal =
llvm::LLVMAddGlobal(cx.llmod, trans::val_ty(llconst),
str::buf("rust_metadata"));
llvm::LLVMSetInitializer(llglobal, llconst);
llvm::LLVMSetSection(llglobal, str::buf(x86::get_meta_sect_name()));
}
fn trans_crate(&session::session sess, &@ast::crate crate, &ty::ctxt tcx,
&str output, &ast_map::map amap) -> ModuleRef {
auto llmod =
llvm::LLVMModuleCreateWithNameInContext(str::buf("rust_out"),
llvm::LLVMGetGlobalContext());
llvm::LLVMSetDataLayout(llmod, str::buf(x86::get_data_layout()));
llvm::LLVMSetTarget(llmod, str::buf(x86::get_target_triple()));
auto td = mk_target_data(x86::get_data_layout());
auto tn = mk_type_names();
auto intrinsics = declare_intrinsics(llmod);
auto glues = make_glues(llmod, tn);
auto hasher = ty::hash_ty;
auto eqer = ty::eq_ty;
auto tag_sizes = map::mk_hashmap[ty::t, uint](hasher, eqer);
auto tydescs = map::mk_hashmap[ty::t, @tydesc_info](hasher, eqer);
auto lltypes = map::mk_hashmap[ty::t, TypeRef](hasher, eqer);
auto sha1s = map::mk_hashmap[ty::t, str](hasher, eqer);
auto abbrevs = map::mk_hashmap[ty::t, tyencode::ty_abbrev](hasher, eqer);
auto short_names = map::mk_hashmap[ty::t, str](hasher, eqer);
auto sha = std::sha1::mk_sha1();
auto ccx =
@rec(sess=sess,
llmod=llmod,
td=td,
tn=tn,
externs=new_str_hash[ValueRef](),
intrinsics=intrinsics,
item_ids=new_int_hash[ValueRef](),
ast_map=amap,
item_symbols=new_int_hash[str](),
mutable main_fn=none[ValueRef],
link_meta=link::build_link_meta(sess, *crate, output, sha),
tag_sizes=tag_sizes,
discrims=new_int_hash[ValueRef](),
discrim_symbols=new_int_hash[str](),
fn_pairs=new_int_hash[ValueRef](),
consts=new_int_hash[ValueRef](),
obj_methods=new_int_hash[()](),
tydescs=tydescs,
module_data=new_str_hash[ValueRef](),
lltypes=lltypes,
glues=glues,
names=namegen(0),
sha=sha,
type_sha1s=sha1s,
type_abbrevs=abbrevs,
type_short_names=short_names,
tcx=tcx,
stats=rec(mutable n_static_tydescs=0u,
mutable n_derived_tydescs=0u,
mutable n_glues_created=0u,
mutable n_null_glues=0u,
mutable n_real_glues=0u),
upcalls=upcall::declare_upcalls(tn, llmod));
auto cx = new_local_ctxt(ccx);
Make log the log level configurable per module This overloads the meaning of RUST_LOG to also allow 'module.submodule' or 'module.somethingelse=2' forms. The first turn on all logging for a module (loglevel 3), the second sets its loglevel to 2. Log levels are: 0: Show only errors 1: Errors and warnings 2: Errors, warnings, and notes 3: Everything, including debug logging Right now, since we only have one 'log' operation, everything happens at level 1 (warning), so the only meaningful thing that can be done with the new RUST_LOG support is disable logging (=0) for some modules. TODOS: * Language support for logging at a specific level * Also add a log level field to tasks, query the current task as well as the current module before logging (log if one of them allows it) * Revise the C logging API to conform to this set-up (globals for per-module log level, query the task level before logging, stop using a global mask) Implementation notes: Crates now contain two extra data structures. A 'module map' that contains names and pointers to the module-log-level globals for each module in the crate that logs, and a 'crate map' that points at the crate's module map, as well as at the crate maps of all external crates it depends on. These are walked by the runtime (in rust_crate.cpp) to set the currect log levels based on RUST_LOG. These module log globals are allocated as-needed whenever a log expression is encountered, and their location is hard-coded into the logging code, which compares the current level to the log statement's level, and skips over all logging code when it is lower.
2011-04-17 16:29:18 +02:00
create_typedefs(ccx);
collect_items(ccx, crate);
2011-04-20 17:43:13 +02:00
collect_tag_ctors(ccx, crate);
trans_constants(ccx, crate);
trans_mod(cx, crate.node.module);
2011-06-30 09:00:44 -07:00
create_crate_map(ccx);
2011-05-12 15:42:12 -07:00
emit_tydescs(ccx);
// Translate the metadata:
write_metadata(cx.ccx, crate);
2011-05-12 15:42:12 -07:00
if (ccx.sess.get_opts().stats) {
log_err "--- trans stats ---";
log_err #fmt("n_static_tydescs: %u", ccx.stats.n_static_tydescs);
log_err #fmt("n_derived_tydescs: %u", ccx.stats.n_derived_tydescs);
log_err #fmt("n_glues_created: %u", ccx.stats.n_glues_created);
log_err #fmt("n_null_glues: %u", ccx.stats.n_null_glues);
log_err #fmt("n_real_glues: %u", ccx.stats.n_real_glues);
}
ret llmod;
}
//
// Local Variables:
// mode: rust
// fill-column: 78;
// indent-tabs-mode: nil
// c-basic-offset: 4
// buffer-file-coding-system: utf-8-unix
// compile-command: "make -k -C $RBUILD 2>&1 | sed -e 's/\\/x\\//x:\\//g'";
// End:
//