1
Fork 0
rust/src/librustc_trans/intrinsic.rs

1765 lines
72 KiB
Rust
Raw Normal View History

// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
2014-10-27 15:37:07 -07:00
#![allow(non_upper_case_globals)]
use arena::TypedArena;
use intrinsics::{self, Intrinsic};
use libc;
use llvm;
2015-12-12 03:29:35 +00:00
use llvm::{ValueRef, TypeKind};
use rustc::infer;
use rustc::ty::subst;
use rustc::ty::subst::FnSpace;
use abi::{Abi, FnType};
use adt;
use attributes;
use base::*;
use build::*;
use callee::{self, Callee};
use cleanup;
use cleanup::CleanupMethods;
use common::*;
use consts;
use datum::*;
use debuginfo::DebugLoc;
use declare;
use expr;
use glue;
use type_of;
use machine;
use type_::Type;
use rustc::ty::{self, Ty, TypeFoldable};
use Disr;
use rustc::ty::subst::Substs;
use rustc::dep_graph::DepNode;
2016-03-29 08:50:44 +03:00
use rustc::hir;
use syntax::ast;
use syntax::ptr::P;
use syntax::parse::token;
use rustc::lint;
2015-09-19 00:42:57 +02:00
use rustc::session::Session;
use syntax::codemap::{Span, DUMMY_SP};
2015-09-19 00:42:57 +02:00
2015-08-14 15:46:51 -07:00
use std::cmp::Ordering;
fn get_simple_intrinsic(ccx: &CrateContext, name: &str) -> Option<ValueRef> {
let llvm_name = match name {
"sqrtf32" => "llvm.sqrt.f32",
"sqrtf64" => "llvm.sqrt.f64",
"powif32" => "llvm.powi.f32",
"powif64" => "llvm.powi.f64",
"sinf32" => "llvm.sin.f32",
"sinf64" => "llvm.sin.f64",
"cosf32" => "llvm.cos.f32",
"cosf64" => "llvm.cos.f64",
"powf32" => "llvm.pow.f32",
"powf64" => "llvm.pow.f64",
"expf32" => "llvm.exp.f32",
"expf64" => "llvm.exp.f64",
"exp2f32" => "llvm.exp2.f32",
"exp2f64" => "llvm.exp2.f64",
"logf32" => "llvm.log.f32",
"logf64" => "llvm.log.f64",
"log10f32" => "llvm.log10.f32",
"log10f64" => "llvm.log10.f64",
"log2f32" => "llvm.log2.f32",
"log2f64" => "llvm.log2.f64",
"fmaf32" => "llvm.fma.f32",
"fmaf64" => "llvm.fma.f64",
"fabsf32" => "llvm.fabs.f32",
"fabsf64" => "llvm.fabs.f64",
"copysignf32" => "llvm.copysign.f32",
"copysignf64" => "llvm.copysign.f64",
"floorf32" => "llvm.floor.f32",
"floorf64" => "llvm.floor.f64",
"ceilf32" => "llvm.ceil.f32",
"ceilf64" => "llvm.ceil.f64",
"truncf32" => "llvm.trunc.f32",
"truncf64" => "llvm.trunc.f64",
"rintf32" => "llvm.rint.f32",
"rintf64" => "llvm.rint.f64",
"nearbyintf32" => "llvm.nearbyint.f32",
"nearbyintf64" => "llvm.nearbyint.f64",
"roundf32" => "llvm.round.f32",
"roundf64" => "llvm.round.f64",
"assume" => "llvm.assume",
_ => return None
};
Some(ccx.get_intrinsic(&llvm_name))
}
2015-09-19 00:42:57 +02:00
pub fn span_transmute_size_error(a: &Session, b: Span, msg: &str) {
span_err!(a, b, E0512, "{}", msg);
}
/// Performs late verification that intrinsics are used correctly. At present,
/// the only intrinsic that needs such verification is `transmute`.
pub fn check_intrinsics(ccx: &CrateContext) {
let _task = ccx.tcx().dep_graph.in_task(DepNode::IntrinsicUseCheck);
let mut last_failing_id = None;
for transmute_restriction in ccx.tcx().transmute_restrictions.borrow().iter() {
// Sometimes, a single call to transmute will push multiple
// type pairs to test in order to exhaustively test the
// possibility around a type parameter. If one of those fails,
// there is no sense reporting errors on the others.
if last_failing_id == Some(transmute_restriction.id) {
continue;
}
2015-06-18 20:25:05 +03:00
debug!("transmute_restriction: {:?}", transmute_restriction);
assert!(!transmute_restriction.substituted_from.has_param_types());
assert!(!transmute_restriction.substituted_to.has_param_types());
let llfromtype = type_of::sizing_type_of(ccx,
transmute_restriction.substituted_from);
let lltotype = type_of::sizing_type_of(ccx,
transmute_restriction.substituted_to);
let from_type_size = machine::llbitsize_of_real(ccx, llfromtype);
let to_type_size = machine::llbitsize_of_real(ccx, lltotype);
if let ty::TyFnDef(..) = transmute_restriction.substituted_from.sty {
if to_type_size == machine::llbitsize_of_real(ccx, ccx.int_type()) {
// FIXME #19925 Remove this warning after a release cycle.
lint::raw_emit_lint(&ccx.tcx().sess,
&ccx.tcx().sess.lint_store.borrow(),
lint::builtin::TRANSMUTE_FROM_FN_ITEM_TYPES,
(lint::Warn, lint::LintSource::Default),
Some(transmute_restriction.span),
&format!("`{}` is now zero-sized and has to be cast \
to a pointer before transmuting to `{}`",
transmute_restriction.substituted_from,
transmute_restriction.substituted_to));
continue;
}
}
if from_type_size != to_type_size {
last_failing_id = Some(transmute_restriction.id);
if transmute_restriction.original_from != transmute_restriction.substituted_from {
2015-09-19 00:42:57 +02:00
span_transmute_size_error(ccx.sess(), transmute_restriction.span,
2015-10-10 23:21:08 +02:00
&format!("transmute called with differently sized types: \
{} (could be {} bits) to {} (could be {} bits)",
2015-06-18 20:25:05 +03:00
transmute_restriction.original_from,
from_type_size,
2015-06-18 20:25:05 +03:00
transmute_restriction.original_to,
to_type_size));
} else {
2015-09-19 00:42:57 +02:00
span_transmute_size_error(ccx.sess(), transmute_restriction.span,
2015-10-10 23:21:08 +02:00
&format!("transmute called with differently sized types: \
{} ({} bits) to {} ({} bits)",
2015-06-18 20:25:05 +03:00
transmute_restriction.original_from,
from_type_size,
2015-06-18 20:25:05 +03:00
transmute_restriction.original_to,
to_type_size));
}
2014-08-06 11:59:40 +02:00
}
}
ccx.sess().abort_if_errors();
}
/// Remember to add all intrinsics here, in librustc_typeck/check/mod.rs,
/// and in libcore/intrinsics.rs; if you need access to any llvm intrinsics,
/// add them to librustc_trans/trans/context.rs
pub fn trans_intrinsic_call<'a, 'blk, 'tcx>(mut bcx: Block<'blk, 'tcx>,
callee_ty: Ty<'tcx>,
fn_ty: &FnType,
args: callee::CallArgs<'a, 'tcx>,
dest: expr::Dest,
call_debug_location: DebugLoc)
-> Result<'blk, 'tcx> {
let fcx = bcx.fcx;
let ccx = fcx.ccx;
let tcx = bcx.tcx();
let _icx = push_ctxt("trans_intrinsic_call");
let (def_id, substs, sig) = match callee_ty.sty {
ty::TyFnDef(def_id, substs, fty) => {
let sig = tcx.erase_late_bound_regions(&fty.sig);
(def_id, substs, infer::normalize_associated_type(tcx, &sig))
}
_ => bug!("expected fn item type, found {}", callee_ty)
};
let arg_tys = sig.inputs;
let ret_ty = sig.output;
let name = tcx.item_name(def_id).as_str();
let span = match call_debug_location {
DebugLoc::At(_, span) | DebugLoc::ScopeAt(_, span) => span,
DebugLoc::None => {
span_bug!(fcx.span.unwrap_or(DUMMY_SP),
"intrinsic `{}` called with missing span", name);
}
};
let cleanup_scope = fcx.push_custom_cleanup_scope();
// For `transmute` we can just trans the input expr directly into dest
if name == "transmute" {
let llret_ty = type_of::type_of(ccx, ret_ty.unwrap());
match args {
callee::ArgExprs(arg_exprs) => {
assert_eq!(arg_exprs.len(), 1);
let (in_type, out_type) = (*substs.types.get(FnSpace, 0),
*substs.types.get(FnSpace, 1));
let llintype = type_of::type_of(ccx, in_type);
let llouttype = type_of::type_of(ccx, out_type);
let in_type_size = machine::llbitsize_of_real(ccx, llintype);
let out_type_size = machine::llbitsize_of_real(ccx, llouttype);
if let ty::TyFnDef(def_id, substs, _) = in_type.sty {
if out_type_size != 0 {
// FIXME #19925 Remove this hack after a release cycle.
let _ = unpack_datum!(bcx, expr::trans(bcx, &arg_exprs[0]));
let llfn = Callee::def(ccx, def_id, substs).reify(ccx).val;
let llfnty = val_ty(llfn);
let llresult = match dest {
expr::SaveIn(d) => d,
expr::Ignore => alloc_ty(bcx, out_type, "ret")
};
Store(bcx, llfn, PointerCast(bcx, llresult, llfnty.ptr_to()));
if dest == expr::Ignore {
bcx = glue::drop_ty(bcx, llresult, out_type,
call_debug_location);
}
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return Result::new(bcx, llresult);
}
}
// This should be caught by the intrinsicck pass
assert_eq!(in_type_size, out_type_size);
let nonpointer_nonaggregate = |llkind: TypeKind| -> bool {
Special-case transmute for primitive, SIMD & pointer types. This detects (a subset of) the cases when `transmute::<T, U>(x)` can be lowered to a direct `bitcast T x to U` in LLVM. This assists with efficiently handling a SIMD vector as multiple different types, e.g. swapping bytes/words/double words around inside some larger vector type. C compilers like GCC and Clang handle integer vector types as `__m128i` for all widths, and implicitly insert bitcasts as required. This patch allows Rust to express this, even if it takes a bit of `unsafe`, whereas previously it was impossible to do at all without inline assembly. Example: pub fn reverse_u32s(u: u64x2) -> u64x2 { unsafe { let tmp = mem::transmute::<_, u32x4>(u); let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0); mem::transmute::<_, u64x2>(swapped) } } Compiling with `--opt-level=3` gives: Before define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to i128 %u.0.extract.trunc = trunc i128 %1 to i32 %u.4.extract.shift = lshr i128 %1, 32 %u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32 %u.8.extract.shift = lshr i128 %1, 64 %u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32 %u.12.extract.shift = lshr i128 %1, 96 %u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32 %2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0 %3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1 %4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2 %5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc movd %xmm0, %rax punpckhqdq %xmm0, %xmm0 movd %xmm0, %rcx movq %rcx, %rdx shrq $32, %rdx movq %rax, %rsi shrq $32, %rsi movd %eax, %xmm0 movd %ecx, %xmm1 punpckldq %xmm0, %xmm1 movd %esi, %xmm2 movd %edx, %xmm0 punpckldq %xmm2, %xmm0 punpckldq %xmm1, %xmm0 retq After define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc pshufd $27, %xmm0, %xmm0 retq
2014-11-25 18:09:35 +11:00
use llvm::TypeKind::*;
match llkind {
Half | Float | Double | X86_FP80 | FP128 |
PPC_FP128 | Integer | Vector | X86_MMX => true,
_ => false
}
};
// An approximation to which types can be directly cast via
// LLVM's bitcast. This doesn't cover pointer -> pointer casts,
// but does, importantly, cover SIMD types.
let in_kind = llintype.kind();
let ret_kind = llret_ty.kind();
let bitcast_compatible =
(nonpointer_nonaggregate(in_kind) && nonpointer_nonaggregate(ret_kind)) || {
in_kind == TypeKind::Pointer && ret_kind == TypeKind::Pointer
};
let dest = if bitcast_compatible {
// if we're here, the type is scalar-like (a primitive, a
// SIMD type or a pointer), and so can be handled as a
// by-value ValueRef and can also be directly bitcast to the
// target type. Doing this special case makes conversions
// like `u32x4` -> `u64x2` much nicer for LLVM and so more
// efficient (these are done efficiently implicitly in C
// with the `__m128i` type and so this means Rust doesn't
// lose out there).
2016-02-09 21:24:11 +01:00
let expr = &arg_exprs[0];
Special-case transmute for primitive, SIMD & pointer types. This detects (a subset of) the cases when `transmute::<T, U>(x)` can be lowered to a direct `bitcast T x to U` in LLVM. This assists with efficiently handling a SIMD vector as multiple different types, e.g. swapping bytes/words/double words around inside some larger vector type. C compilers like GCC and Clang handle integer vector types as `__m128i` for all widths, and implicitly insert bitcasts as required. This patch allows Rust to express this, even if it takes a bit of `unsafe`, whereas previously it was impossible to do at all without inline assembly. Example: pub fn reverse_u32s(u: u64x2) -> u64x2 { unsafe { let tmp = mem::transmute::<_, u32x4>(u); let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0); mem::transmute::<_, u64x2>(swapped) } } Compiling with `--opt-level=3` gives: Before define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to i128 %u.0.extract.trunc = trunc i128 %1 to i32 %u.4.extract.shift = lshr i128 %1, 32 %u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32 %u.8.extract.shift = lshr i128 %1, 64 %u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32 %u.12.extract.shift = lshr i128 %1, 96 %u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32 %2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0 %3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1 %4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2 %5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc movd %xmm0, %rax punpckhqdq %xmm0, %xmm0 movd %xmm0, %rcx movq %rcx, %rdx shrq $32, %rdx movq %rax, %rsi shrq $32, %rsi movd %eax, %xmm0 movd %ecx, %xmm1 punpckldq %xmm0, %xmm1 movd %esi, %xmm2 movd %edx, %xmm0 punpckldq %xmm2, %xmm0 punpckldq %xmm1, %xmm0 retq After define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc pshufd $27, %xmm0, %xmm0 retq
2014-11-25 18:09:35 +11:00
let datum = unpack_datum!(bcx, expr::trans(bcx, expr));
let datum = unpack_datum!(bcx, datum.to_rvalue_datum(bcx, "transmute_temp"));
let val = if datum.kind.is_by_ref() {
load_ty(bcx, datum.val, datum.ty)
} else {
from_immediate(bcx, datum.val)
Special-case transmute for primitive, SIMD & pointer types. This detects (a subset of) the cases when `transmute::<T, U>(x)` can be lowered to a direct `bitcast T x to U` in LLVM. This assists with efficiently handling a SIMD vector as multiple different types, e.g. swapping bytes/words/double words around inside some larger vector type. C compilers like GCC and Clang handle integer vector types as `__m128i` for all widths, and implicitly insert bitcasts as required. This patch allows Rust to express this, even if it takes a bit of `unsafe`, whereas previously it was impossible to do at all without inline assembly. Example: pub fn reverse_u32s(u: u64x2) -> u64x2 { unsafe { let tmp = mem::transmute::<_, u32x4>(u); let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0); mem::transmute::<_, u64x2>(swapped) } } Compiling with `--opt-level=3` gives: Before define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to i128 %u.0.extract.trunc = trunc i128 %1 to i32 %u.4.extract.shift = lshr i128 %1, 32 %u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32 %u.8.extract.shift = lshr i128 %1, 64 %u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32 %u.12.extract.shift = lshr i128 %1, 96 %u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32 %2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0 %3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1 %4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2 %5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc movd %xmm0, %rax punpckhqdq %xmm0, %xmm0 movd %xmm0, %rcx movq %rcx, %rdx shrq $32, %rdx movq %rax, %rsi shrq $32, %rsi movd %eax, %xmm0 movd %ecx, %xmm1 punpckldq %xmm0, %xmm1 movd %esi, %xmm2 movd %edx, %xmm0 punpckldq %xmm2, %xmm0 punpckldq %xmm1, %xmm0 retq After define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc pshufd $27, %xmm0, %xmm0 retq
2014-11-25 18:09:35 +11:00
};
let cast_val = BitCast(bcx, val, llret_ty);
match dest {
expr::SaveIn(d) => {
// this often occurs in a sequence like `Store(val,
// d); val2 = Load(d)`, so disappears easily.
Store(bcx, cast_val, d);
}
expr::Ignore => {}
}
dest
} else {
// The types are too complicated to do with a by-value
// bitcast, so pointer cast instead. We need to cast the
// dest so the types work out.
let dest = match dest {
expr::SaveIn(d) => expr::SaveIn(PointerCast(bcx, d, llintype.ptr_to())),
expr::Ignore => expr::Ignore
};
2016-02-09 21:24:11 +01:00
bcx = expr::trans_into(bcx, &arg_exprs[0], dest);
Special-case transmute for primitive, SIMD & pointer types. This detects (a subset of) the cases when `transmute::<T, U>(x)` can be lowered to a direct `bitcast T x to U` in LLVM. This assists with efficiently handling a SIMD vector as multiple different types, e.g. swapping bytes/words/double words around inside some larger vector type. C compilers like GCC and Clang handle integer vector types as `__m128i` for all widths, and implicitly insert bitcasts as required. This patch allows Rust to express this, even if it takes a bit of `unsafe`, whereas previously it was impossible to do at all without inline assembly. Example: pub fn reverse_u32s(u: u64x2) -> u64x2 { unsafe { let tmp = mem::transmute::<_, u32x4>(u); let swapped = u32x4(tmp.3, tmp.2, tmp.1, tmp.0); mem::transmute::<_, u64x2>(swapped) } } Compiling with `--opt-level=3` gives: Before define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to i128 %u.0.extract.trunc = trunc i128 %1 to i32 %u.4.extract.shift = lshr i128 %1, 32 %u.4.extract.trunc = trunc i128 %u.4.extract.shift to i32 %u.8.extract.shift = lshr i128 %1, 64 %u.8.extract.trunc = trunc i128 %u.8.extract.shift to i32 %u.12.extract.shift = lshr i128 %1, 96 %u.12.extract.trunc = trunc i128 %u.12.extract.shift to i32 %2 = insertelement <4 x i32> undef, i32 %u.12.extract.trunc, i64 0 %3 = insertelement <4 x i32> %2, i32 %u.8.extract.trunc, i64 1 %4 = insertelement <4 x i32> %3, i32 %u.4.extract.trunc, i64 2 %5 = insertelement <4 x i32> %4, i32 %u.0.extract.trunc, i64 3 %6 = bitcast <4 x i32> %5 to <2 x i64> ret <2 x i64> %6 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc movd %xmm0, %rax punpckhqdq %xmm0, %xmm0 movd %xmm0, %rcx movq %rcx, %rdx shrq $32, %rdx movq %rax, %rsi shrq $32, %rsi movd %eax, %xmm0 movd %ecx, %xmm1 punpckldq %xmm0, %xmm1 movd %esi, %xmm2 movd %edx, %xmm0 punpckldq %xmm2, %xmm0 punpckldq %xmm1, %xmm0 retq After define <2 x i64> @_ZN12reverse_u32s20hbdb206aba18a03d8tbaE(<2 x i64>) unnamed_addr #0 { entry-block: %1 = bitcast <2 x i64> %0 to <4 x i32> %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0> %3 = bitcast <4 x i32> %2 to <2 x i64> ret <2 x i64> %3 } _ZN12reverse_u32s20hbdb206aba18a03d8tbaE: .cfi_startproc pshufd $27, %xmm0, %xmm0 retq
2014-11-25 18:09:35 +11:00
dest
};
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return match dest {
expr::SaveIn(d) => Result::new(bcx, d),
expr::Ignore => Result::new(bcx, C_undef(llret_ty.ptr_to()))
};
}
_ => {
bug!("expected expr as argument for transmute");
}
}
}
// For `move_val_init` we can evaluate the destination address
// (the first argument) and then trans the source value (the
// second argument) directly into the resulting destination
// address.
if name == "move_val_init" {
if let callee::ArgExprs(ref exprs) = args {
let (dest_expr, source_expr) = if exprs.len() != 2 {
bug!("expected two exprs as arguments for `move_val_init` intrinsic");
} else {
(&exprs[0], &exprs[1])
};
// evaluate destination address
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 23:57:40 +02:00
let dest_datum = unpack_datum!(bcx, expr::trans(bcx, dest_expr));
let dest_datum = unpack_datum!(
bcx, dest_datum.to_rvalue_datum(bcx, "arg"));
let dest_datum = unpack_datum!(
bcx, dest_datum.to_appropriate_datum(bcx));
// `expr::trans_into(bcx, expr, dest)` is equiv to
//
// `trans(bcx, expr).store_to_dest(dest)`,
//
// which for `dest == expr::SaveIn(addr)`, is equivalent to:
//
// `trans(bcx, expr).store_to(bcx, addr)`.
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 23:57:40 +02:00
let lldest = expr::Dest::SaveIn(dest_datum.val);
bcx = expr::trans_into(bcx, source_expr, lldest);
let llresult = C_nil(ccx);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
return Result::new(bcx, llresult);
} else {
bug!("expected two exprs as arguments for `move_val_init` intrinsic");
}
}
// save the actual AST arguments for later (some places need to do
// const-evaluation on them)
let expr_arguments = match args {
callee::ArgExprs(args) => Some(args),
_ => None,
};
// Push the arguments.
let mut llargs = Vec::new();
bcx = callee::trans_args(bcx,
Abi::RustIntrinsic,
fn_ty,
&mut callee::Intrinsic,
args,
&mut llargs,
cleanup::CustomScope(cleanup_scope));
fcx.scopes.borrow_mut().last_mut().unwrap().drop_non_lifetime_clean();
2014-10-28 18:32:05 +01:00
// These are the only intrinsic functions that diverge.
if name == "abort" {
let llfn = ccx.get_intrinsic(&("llvm.trap"));
Call(bcx, llfn, &[], call_debug_location);
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_undef(Type::nil(ccx).ptr_to()));
} else if &name[..] == "unreachable" {
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Unreachable(bcx);
return Result::new(bcx, C_nil(ccx));
}
let ret_ty = match ret_ty {
ty::FnConverging(ret_ty) => ret_ty,
ty::FnDiverging => bug!()
};
let llret_ty = type_of::type_of(ccx, ret_ty);
// Get location to store the result. If the user does
// not care about the result, just make a stack slot
let llresult = match dest {
expr::SaveIn(d) => d,
expr::Ignore => {
if !type_is_zero_size(ccx, ret_ty) {
let llresult = alloc_ty(bcx, ret_ty, "intrinsic_result");
call_lifetime_start(bcx, llresult);
llresult
} else {
C_undef(llret_ty.ptr_to())
}
}
};
let simple = get_simple_intrinsic(ccx, &name);
let llval = match (simple, &name[..]) {
(Some(llfn), _) => {
Call(bcx, llfn, &llargs, call_debug_location)
}
2016-03-06 14:29:31 +02:00
(_, "try") => {
bcx = try_intrinsic(bcx, llargs[0], llargs[1], llargs[2], llresult,
call_debug_location);
C_nil(ccx)
}
(_, "breakpoint") => {
let llfn = ccx.get_intrinsic(&("llvm.debugtrap"));
Call(bcx, llfn, &[], call_debug_location)
}
(_, "size_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
}
(_, "size_of_val") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_sized(tcx, tp_ty) {
let (llsize, _) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
llsize
} else {
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llsize_of_alloc(ccx, lltp_ty))
}
}
(_, "min_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
C_uint(ccx, type_of::align_of(ccx, tp_ty))
}
(_, "min_align_of_val") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_sized(tcx, tp_ty) {
let (_, llalign) =
glue::size_and_align_of_dst(&bcx.build(), tp_ty, llargs[1]);
llalign
} else {
C_uint(ccx, type_of::align_of(ccx, tp_ty))
}
}
(_, "pref_align_of") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let lltp_ty = type_of::type_of(ccx, tp_ty);
C_uint(ccx, machine::llalign_of_pref(ccx, lltp_ty))
}
(_, "drop_in_place") => {
let tp_ty = *substs.types.get(FnSpace, 0);
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 23:57:40 +02:00
let ptr = if type_is_sized(tcx, tp_ty) {
llargs[0]
} else {
let scratch = rvalue_scratch_datum(bcx, tp_ty, "tmp");
Store(bcx, llargs[0], expr::get_dataptr(bcx, scratch.val));
Store(bcx, llargs[1], expr::get_meta(bcx, scratch.val));
Pass fat pointers in two immediate arguments This has a number of advantages compared to creating a copy in memory and passing a pointer. The obvious one is that we don't have to put the data into memory but can keep it in registers. Since we're currently passing a pointer anyway (instead of using e.g. a known offset on the stack, which is what the `byval` attribute would achieve), we only use a single additional register for each fat pointer, but save at least two pointers worth of stack in exchange (sometimes more because more than one copy gets eliminated). On archs that pass arguments on the stack, we save a pointer worth of stack even without considering the omitted copies. Additionally, LLVM can optimize the code a lot better, to a large degree due to the fact that lots of copies are gone or can be optimized away. Additionally, we can now emit attributes like nonnull on the data and/or vtable pointers contained in the fat pointer, potentially allowing for even more optimizations. This results in LLVM passes being about 3-7% faster (depending on the crate), and the resulting code is also a few percent smaller, for example: text data filename 5671479 3941461 before/librustc-d8ace771.so 5447663 3905745 after/librustc-d8ace771.so 1944425 2394024 before/libstd-d8ace771.so 1896769 2387610 after/libstd-d8ace771.so I had to remove a call in the backtrace-debuginfo test, because LLVM can now merge the tails of some blocks when optimizations are turned on, which can't correctly preserve line info. Fixes #22924 Cc #22891 (at least for fat pointers the code is good now)
2015-06-18 23:57:40 +02:00
fcx.schedule_lifetime_end(cleanup::CustomScope(cleanup_scope), scratch.val);
scratch.val
};
glue::drop_ty(bcx, ptr, tp_ty, call_debug_location);
C_nil(ccx)
}
(_, "type_name") => {
let tp_ty = *substs.types.get(FnSpace, 0);
2015-06-18 20:25:05 +03:00
let ty_name = token::intern_and_get_ident(&tp_ty.to_string());
C_str_slice(ccx, ty_name)
}
(_, "type_id") => {
let hash = ccx.tcx().hash_crate_independent(*substs.types.get(FnSpace, 0),
&ccx.link_meta().crate_hash);
C_u64(ccx, hash)
}
(_, "init_dropped") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_zero_size(ccx, tp_ty) {
drop_done_fill_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
}
(_, "init") => {
let tp_ty = *substs.types.get(FnSpace, 0);
if !type_is_zero_size(ccx, tp_ty) {
// Just zero out the stack slot. (See comment on base::memzero for explanation)
init_zero_mem(bcx, llresult, tp_ty);
}
C_nil(ccx)
}
// Effectively no-ops
(_, "uninit") | (_, "forget") => {
C_nil(ccx)
}
(_, "needs_drop") => {
let tp_ty = *substs.types.get(FnSpace, 0);
C_bool(ccx, bcx.fcx.type_needs_drop(tp_ty))
}
(_, "offset") => {
let ptr = llargs[0];
let offset = llargs[1];
2014-11-17 21:39:01 +13:00
InBoundsGEP(bcx, ptr, &[offset])
}
(_, "arith_offset") => {
let ptr = llargs[0];
let offset = llargs[1];
GEP(bcx, ptr, &[offset])
}
(_, "copy_nonoverlapping") => {
copy_intrinsic(bcx,
false,
false,
*substs.types.get(FnSpace, 0),
llargs[1],
llargs[0],
llargs[2],
call_debug_location)
}
(_, "copy") => {
copy_intrinsic(bcx,
true,
false,
*substs.types.get(FnSpace, 0),
llargs[1],
llargs[0],
llargs[2],
call_debug_location)
}
(_, "write_bytes") => {
memset_intrinsic(bcx,
false,
*substs.types.get(FnSpace, 0),
llargs[0],
llargs[1],
llargs[2],
call_debug_location)
}
(_, "volatile_copy_nonoverlapping_memory") => {
copy_intrinsic(bcx,
false,
true,
*substs.types.get(FnSpace, 0),
llargs[0],
llargs[1],
llargs[2],
call_debug_location)
}
(_, "volatile_copy_memory") => {
copy_intrinsic(bcx,
true,
true,
*substs.types.get(FnSpace, 0),
llargs[0],
llargs[1],
llargs[2],
call_debug_location)
}
(_, "volatile_set_memory") => {
memset_intrinsic(bcx,
true,
*substs.types.get(FnSpace, 0),
llargs[0],
llargs[1],
llargs[2],
call_debug_location)
}
(_, "volatile_load") => {
let tp_ty = *substs.types.get(FnSpace, 0);
let mut ptr = llargs[0];
if let Some(ty) = fn_ty.ret.cast {
ptr = PointerCast(bcx, ptr, ty.ptr_to());
}
let load = VolatileLoad(bcx, ptr);
unsafe {
llvm::LLVMSetAlignment(load, type_of::align_of(ccx, tp_ty));
}
to_immediate(bcx, load, tp_ty)
},
(_, "volatile_store") => {
let tp_ty = *substs.types.get(FnSpace, 0);
2016-03-19 02:18:33 +00:00
if type_is_fat_ptr(bcx.tcx(), tp_ty) {
VolatileStore(bcx, llargs[1], expr::get_dataptr(bcx, llargs[0]));
VolatileStore(bcx, llargs[2], expr::get_meta(bcx, llargs[0]));
} else {
2016-03-19 02:18:33 +00:00
let val = if fn_ty.args[1].is_indirect() {
Load(bcx, llargs[1])
} else {
from_immediate(bcx, llargs[1])
};
let ptr = PointerCast(bcx, llargs[0], val_ty(val).ptr_to());
let store = VolatileStore(bcx, val, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, tp_ty));
}
}
C_nil(ccx)
},
(_, "ctlz") | (_, "cttz") | (_, "ctpop") | (_, "bswap") |
(_, "add_with_overflow") | (_, "sub_with_overflow") | (_, "mul_with_overflow") |
(_, "overflowing_add") | (_, "overflowing_sub") | (_, "overflowing_mul") |
(_, "unchecked_div") | (_, "unchecked_rem") => {
let sty = &arg_tys[0].sty;
match int_type_width_signed(sty, ccx) {
Some((width, signed)) =>
match &*name {
"ctlz" => count_zeros_intrinsic(bcx, &format!("llvm.ctlz.i{}", width),
llargs[0], call_debug_location),
"cttz" => count_zeros_intrinsic(bcx, &format!("llvm.cttz.i{}", width),
llargs[0], call_debug_location),
"ctpop" => Call(bcx, ccx.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
&llargs, call_debug_location),
"bswap" => {
if width == 8 {
llargs[0] // byte swap a u8/i8 is just a no-op
} else {
Call(bcx, ccx.get_intrinsic(&format!("llvm.bswap.i{}", width)),
&llargs, call_debug_location)
}
}
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" => {
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
if signed { 's' } else { 'u' },
&name[..3], width);
with_overflow_intrinsic(bcx, &intrinsic, llargs[0], llargs[1], llresult,
call_debug_location)
},
"overflowing_add" => Add(bcx, llargs[0], llargs[1], call_debug_location),
"overflowing_sub" => Sub(bcx, llargs[0], llargs[1], call_debug_location),
"overflowing_mul" => Mul(bcx, llargs[0], llargs[1], call_debug_location),
"unchecked_div" =>
if signed {
SDiv(bcx, llargs[0], llargs[1], call_debug_location)
} else {
UDiv(bcx, llargs[0], llargs[1], call_debug_location)
},
"unchecked_rem" =>
if signed {
SRem(bcx, llargs[0], llargs[1], call_debug_location)
} else {
URem(bcx, llargs[0], llargs[1], call_debug_location)
},
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
C_nil(ccx)
}
}
},
(_, "fadd_fast") | (_, "fsub_fast") | (_, "fmul_fast") | (_, "fdiv_fast") |
(_, "frem_fast") => {
let sty = &arg_tys[0].sty;
match float_type_width(sty) {
Some(_width) =>
match &*name {
"fadd_fast" => FAddFast(bcx, llargs[0], llargs[1], call_debug_location),
"fsub_fast" => FSubFast(bcx, llargs[0], llargs[1], call_debug_location),
"fmul_fast" => FMulFast(bcx, llargs[0], llargs[1], call_debug_location),
"fdiv_fast" => FDivFast(bcx, llargs[0], llargs[1], call_debug_location),
"frem_fast" => FRemFast(bcx, llargs[0], llargs[1], call_debug_location),
_ => bug!(),
},
None => {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic float type, found `{}`", name, sty));
C_nil(ccx)
}
}
},
(_, "return_address") => {
if !fcx.fn_ty.ret.is_indirect() {
span_err!(tcx.sess, span, E0510,
2015-09-19 00:42:57 +02:00
"invalid use of `return_address` intrinsic: function \
does not use out pointer");
C_null(Type::i8p(ccx))
} else {
PointerCast(bcx, llvm::get_param(fcx.llfn, 0), Type::i8p(ccx))
}
}
(_, "discriminant_value") => {
let val_ty = substs.types.get(FnSpace, 0);
match val_ty.sty {
ty::TyEnum(..) => {
let repr = adt::represent_type(ccx, *val_ty);
2016-02-09 21:24:11 +01:00
adt::trans_get_discr(bcx, &repr, llargs[0],
Some(llret_ty), true)
}
_ => C_null(llret_ty)
}
}
(_, name) if name.starts_with("simd_") => {
generic_simd_intrinsic(bcx, name,
substs,
callee_ty,
expr_arguments,
&llargs,
ret_ty, llret_ty,
call_debug_location,
span)
}
// This requires that atomic intrinsics follow a specific naming pattern:
// "atomic_<operation>[_<ordering>]", and no ordering means SeqCst
(_, name) if name.starts_with("atomic_") => {
let split: Vec<&str> = name.split('_').collect();
let is_cxchg = split[1] == "cxchg" || split[1] == "cxchgweak";
let (order, failorder) = match split.len() {
2 => (llvm::SequentiallyConsistent, llvm::SequentiallyConsistent),
3 => match split[2] {
"unordered" => (llvm::Unordered, llvm::Unordered),
"relaxed" => (llvm::Monotonic, llvm::Monotonic),
"acq" => (llvm::Acquire, llvm::Acquire),
"rel" => (llvm::Release, llvm::Monotonic),
"acqrel" => (llvm::AcquireRelease, llvm::Acquire),
"failrelaxed" if is_cxchg =>
(llvm::SequentiallyConsistent, llvm::Monotonic),
"failacq" if is_cxchg =>
(llvm::SequentiallyConsistent, llvm::Acquire),
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
},
4 => match (split[2], split[3]) {
("acq", "failrelaxed") if is_cxchg =>
(llvm::Acquire, llvm::Monotonic),
("acqrel", "failrelaxed") if is_cxchg =>
(llvm::AcquireRelease, llvm::Monotonic),
_ => ccx.sess().fatal("unknown ordering in atomic intrinsic")
},
_ => ccx.sess().fatal("Atomic intrinsic not in correct format"),
};
match split[1] {
"cxchg" | "cxchgweak" => {
let sty = &substs.types.get(FnSpace, 0).sty;
if int_type_width_signed(sty, ccx).is_some() {
let weak = if split[1] == "cxchgweak" { llvm::True } else { llvm::False };
let val = AtomicCmpXchg(bcx, llargs[0], llargs[1], llargs[2],
order, failorder, weak);
let result = ExtractValue(bcx, val, 0);
let success = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
Store(bcx, result, StructGEP(bcx, llresult, 0));
Store(bcx, success, StructGEP(bcx, llresult, 1));
} else {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
}
C_nil(ccx)
}
"load" => {
let sty = &substs.types.get(FnSpace, 0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicLoad(bcx, llargs[0], order)
} else {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
C_nil(ccx)
}
}
"store" => {
let sty = &substs.types.get(FnSpace, 0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicStore(bcx, llargs[1], llargs[0], order);
} else {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
}
C_nil(ccx)
}
"fence" => {
AtomicFence(bcx, order, llvm::CrossThread);
C_nil(ccx)
}
"singlethreadfence" => {
AtomicFence(bcx, order, llvm::SingleThread);
C_nil(ccx)
}
// These are all AtomicRMW ops
op => {
let atom_op = match op {
2014-09-19 12:15:39 -07:00
"xchg" => llvm::AtomicXchg,
"xadd" => llvm::AtomicAdd,
"xsub" => llvm::AtomicSub,
"and" => llvm::AtomicAnd,
"nand" => llvm::AtomicNand,
"or" => llvm::AtomicOr,
"xor" => llvm::AtomicXor,
"max" => llvm::AtomicMax,
"min" => llvm::AtomicMin,
"umax" => llvm::AtomicUMax,
"umin" => llvm::AtomicUMin,
_ => ccx.sess().fatal("unknown atomic operation")
};
let sty = &substs.types.get(FnSpace, 0).sty;
if int_type_width_signed(sty, ccx).is_some() {
AtomicRMW(bcx, atom_op, llargs[0], llargs[1], order)
} else {
span_invalid_monomorphization_error(
tcx.sess, span,
&format!("invalid monomorphization of `{}` intrinsic: \
expected basic integer type, found `{}`", name, sty));
C_nil(ccx)
}
}
}
}
(_, _) => {
let intr = match Intrinsic::find(&name) {
2015-08-13 16:00:44 -07:00
Some(intr) => intr,
None => bug!("unknown intrinsic '{}'", name),
2015-08-13 16:00:44 -07:00
};
fn one<T>(x: Vec<T>) -> T {
assert_eq!(x.len(), 1);
x.into_iter().next().unwrap()
}
fn ty_to_type(ccx: &CrateContext, t: &intrinsics::Type,
any_changes_needed: &mut bool) -> Vec<Type> {
2015-08-13 16:00:44 -07:00
use intrinsics::Type::*;
match *t {
Void => vec![Type::void(ccx)],
Integer(_signed, width, llvm_width) => {
*any_changes_needed |= width != llvm_width;
vec![Type::ix(ccx, llvm_width as u64)]
}
2015-08-13 16:00:44 -07:00
Float(x) => {
match x {
32 => vec![Type::f32(ccx)],
64 => vec![Type::f64(ccx)],
_ => bug!()
}
}
Pointer(ref t, ref llvm_elem, _const) => {
*any_changes_needed |= llvm_elem.is_some();
let t = llvm_elem.as_ref().unwrap_or(t);
let elem = one(ty_to_type(ccx, t,
any_changes_needed));
vec![elem.ptr_to()]
}
Vector(ref t, ref llvm_elem, length) => {
*any_changes_needed |= llvm_elem.is_some();
let t = llvm_elem.as_ref().unwrap_or(t);
let elem = one(ty_to_type(ccx, t,
any_changes_needed));
vec![Type::vector(&elem,
length as u64)]
}
Aggregate(false, ref contents) => {
let elems = contents.iter()
.map(|t| one(ty_to_type(ccx, t, any_changes_needed)))
.collect::<Vec<_>>();
vec![Type::struct_(ccx, &elems, false)]
}
Aggregate(true, ref contents) => {
*any_changes_needed = true;
contents.iter()
.flat_map(|t| ty_to_type(ccx, t, any_changes_needed))
.collect()
}
}
}
// This allows an argument list like `foo, (bar, baz),
// qux` to be converted into `foo, bar, baz, qux`, integer
// arguments to be truncated as needed and pointers to be
// cast.
fn modify_as_needed<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
t: &intrinsics::Type,
arg_type: Ty<'tcx>,
llarg: ValueRef)
-> Vec<ValueRef>
{
match *t {
intrinsics::Type::Aggregate(true, ref contents) => {
// We found a tuple that needs squishing! So
// run over the tuple and load each field.
//
// This assumes the type is "simple", i.e. no
// destructors, and the contents are SIMD
// etc.
assert!(!bcx.fcx.type_needs_drop(arg_type));
let repr = adt::represent_type(bcx.ccx(), arg_type);
2016-02-09 21:24:11 +01:00
let repr_ptr = &repr;
let arg = adt::MaybeSizedValue::sized(llarg);
(0..contents.len())
.map(|i| {
Load(bcx, adt::trans_field_ptr(bcx, repr_ptr, arg, Disr(0), i))
})
.collect()
}
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
vec![PointerCast(bcx, llarg,
llvm_elem.ptr_to())]
}
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
let llvm_elem = one(ty_to_type(bcx.ccx(), llvm_elem, &mut false));
vec![BitCast(bcx, llarg,
Type::vector(&llvm_elem, length as u64))]
}
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
// the LLVM intrinsic uses a smaller integer
// size than the C intrinsic's signature, so
// we have to trim it down here.
vec![Trunc(bcx, llarg, Type::ix(bcx.ccx(), llvm_width as u64))]
}
_ => vec![llarg],
2015-08-13 16:00:44 -07:00
}
}
let mut any_changes_needed = false;
let inputs = intr.inputs.iter()
.flat_map(|t| ty_to_type(ccx, t, &mut any_changes_needed))
.collect::<Vec<_>>();
let mut out_changes = false;
let outputs = one(ty_to_type(ccx, &intr.output, &mut out_changes));
// outputting a flattened aggregate is nonsense
assert!(!out_changes);
let llargs = if !any_changes_needed {
// no aggregates to flatten, so no change needed
llargs
} else {
// there are some aggregates that need to be flattened
// in the LLVM call, so we need to run over the types
// again to find them and extract the arguments
intr.inputs.iter()
.zip(&llargs)
.zip(&arg_tys)
.flat_map(|((t, llarg), ty)| modify_as_needed(bcx, t, ty, *llarg))
.collect()
};
assert_eq!(inputs.len(), llargs.len());
let val = match intr.definition {
2015-08-13 16:00:44 -07:00
intrinsics::IntrinsicDef::Named(name) => {
let f = declare::declare_cfn(ccx,
name,
Type::func(&inputs, &outputs));
Call(bcx, f, &llargs, call_debug_location)
}
};
match *intr.output {
intrinsics::Type::Aggregate(flatten, ref elems) => {
// the output is a tuple so we need to munge it properly
assert!(!flatten);
for i in 0..elems.len() {
let val = ExtractValue(bcx, val, i);
Store(bcx, val, StructGEP(bcx, llresult, i));
}
C_nil(ccx)
}
_ => val,
}
}
};
if val_ty(llval) != Type::void(ccx) &&
machine::llsize_of_alloc(ccx, val_ty(llval)) != 0 {
if let Some(ty) = fn_ty.ret.cast {
let ptr = PointerCast(bcx, llresult, ty.ptr_to());
let store = Store(bcx, llval, ptr);
unsafe {
llvm::LLVMSetAlignment(store, type_of::align_of(ccx, ret_ty));
}
} else {
store_ty(bcx, llval, llresult, ret_ty);
}
}
// If we made a temporary stack slot, let's clean it up
match dest {
expr::Ignore => {
bcx = glue::drop_ty(bcx, llresult, ret_ty, call_debug_location);
call_lifetime_end(bcx, llresult);
}
expr::SaveIn(_) => {}
}
fcx.pop_and_trans_custom_cleanup_scope(bcx, cleanup_scope);
Result::new(bcx, llresult)
}
fn copy_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
allow_overlap: bool,
volatile: bool,
tp_ty: Ty<'tcx>,
dst: ValueRef,
src: ValueRef,
count: ValueRef,
call_debug_location: DebugLoc)
-> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
2014-08-06 11:59:40 +02:00
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
2014-09-05 09:18:53 -07:00
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let operation = if allow_overlap {
"memmove"
} else {
"memcpy"
};
let name = format!("llvm.{}.p0i8.p0i8.i{}", operation, int_size);
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
let src_ptr = PointerCast(bcx, src, Type::i8p(ccx));
let llfn = ccx.get_intrinsic(&name);
Call(bcx,
llfn,
&[dst_ptr,
src_ptr,
Mul(bcx, size, count, DebugLoc::None),
align,
C_bool(ccx, volatile)],
call_debug_location)
}
fn memset_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
volatile: bool,
tp_ty: Ty<'tcx>,
dst: ValueRef,
val: ValueRef,
count: ValueRef,
call_debug_location: DebugLoc)
-> ValueRef {
let ccx = bcx.ccx();
let lltp_ty = type_of::type_of(ccx, tp_ty);
2014-08-06 11:59:40 +02:00
let align = C_i32(ccx, type_of::align_of(ccx, tp_ty) as i32);
let size = machine::llsize_of(ccx, lltp_ty);
let int_size = machine::llbitsize_of_real(ccx, ccx.int_type());
let name = format!("llvm.memset.p0i8.i{}", int_size);
let dst_ptr = PointerCast(bcx, dst, Type::i8p(ccx));
let llfn = ccx.get_intrinsic(&name);
Call(bcx,
llfn,
&[dst_ptr,
val,
Mul(bcx, size, count, DebugLoc::None),
align,
C_bool(ccx, volatile)],
call_debug_location)
}
fn count_zeros_intrinsic(bcx: Block,
name: &str,
val: ValueRef,
call_debug_location: DebugLoc)
-> ValueRef {
let y = C_bool(bcx.ccx(), false);
let llfn = bcx.ccx().get_intrinsic(&name);
Call(bcx, llfn, &[val, y], call_debug_location)
}
fn with_overflow_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
name: &str,
a: ValueRef,
b: ValueRef,
out: ValueRef,
call_debug_location: DebugLoc)
-> ValueRef {
let llfn = bcx.ccx().get_intrinsic(&name);
// Convert `i1` to a `bool`, and write it to the out parameter
let val = Call(bcx, llfn, &[a, b], call_debug_location);
let result = ExtractValue(bcx, val, 0);
let overflow = ZExt(bcx, ExtractValue(bcx, val, 1), Type::bool(bcx.ccx()));
Store(bcx, result, StructGEP(bcx, out, 0));
Store(bcx, overflow, StructGEP(bcx, out, 1));
C_nil(bcx.ccx())
}
fn try_intrinsic<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
if bcx.sess().no_landing_pads() {
Call(bcx, func, &[data], dloc);
Store(bcx, C_null(Type::i8p(bcx.ccx())), dest);
bcx
} else if wants_msvc_seh(bcx.sess()) {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
trans_msvc_try(bcx, func, data, local_ptr, dest, dloc)
} else {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
trans_gnu_try(bcx, func, data, local_ptr, dest, dloc)
}
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// MSVC's definition of the `rust_try` function.
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// This implementation uses the new exception handling instructions in LLVM
// which have support in LLVM for SEH on MSVC targets. Although these
// instructions are meant to work for all targets, as of the time of this
// writing, however, LLVM does not recommend the usage of these new instructions
// as the old ones are still more optimized.
fn trans_msvc_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx();
let dloc = DebugLoc::None;
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
SetPersonalityFn(bcx, bcx.fcx.eh_personality());
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let normal = bcx.fcx.new_temp_block("normal");
let catchswitch = bcx.fcx.new_temp_block("catchswitch");
let catchpad = bcx.fcx.new_temp_block("catchpad");
let caught = bcx.fcx.new_temp_block("caught");
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// We're generating an IR snippet that looks like:
//
// declare i32 @rust_try(%func, %data, %ptr) {
// %slot = alloca i8*
// call @llvm.localescape(%slot)
// store %ptr, %slot
// invoke %func(%data) to label %normal unwind label %catchswitch
//
// normal:
// ret i32 0
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// catchswitch:
// %cs = catchswitch within none [%catchpad] unwind to caller
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// catchpad:
// %tok = catchpad within %cs [%rust_try_filter]
// catchret from %tok to label %caught
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// caught:
// ret i32 1
// }
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// This structure follows the basic usage of the instructions in LLVM
// (see their documentation/test cases for examples), but a
// perhaps-surprising part here is the usage of the `localescape`
// intrinsic. This is used to allow the filter function (also generated
// here) to access variables on the stack of this intrinsic. This
// ability enables us to transfer information about the exception being
// thrown to this point, where we're catching the exception.
//
// More information can be found in libstd's seh.rs implementation.
let slot = Alloca(bcx, Type::i8p(ccx), "slot");
let localescape = ccx.get_intrinsic(&"llvm.localescape");
Call(bcx, localescape, &[slot], dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
Store(bcx, local_ptr, slot);
Invoke(bcx, func, &[data], normal.llbb, catchswitch.llbb, dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
Ret(normal, C_i32(ccx, 0), dloc);
let cs = CatchSwitch(catchswitch, None, None, 1);
AddHandler(catchswitch, cs, catchpad.llbb);
let filter = generate_filter_fn(bcx.fcx, bcx.fcx.llfn);
let filter = BitCast(catchpad, filter, Type::i8p(ccx));
let tok = CatchPad(catchpad, cs, &[filter]);
CatchRet(catchpad, tok, caught.llbb);
Ret(caught, C_i32(ccx, 1), dloc);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
return bcx
}
// Definition of the standard "try" function for Rust using the GNU-like model
// of exceptions (e.g. the normal semantics of LLVM's landingpad and invoke
// instructions).
//
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// This translation is a little surprising because we always call a shim
// function instead of inlining the call to `invoke` manually here. This is done
// because in LLVM we're only allowed to have one personality per function
// definition. The call to the `try` intrinsic is being inlined into the
// function calling it, and that function may already have other personality
// functions in play. By calling a shim we're guaranteed that our shim will have
// the right personality function.
fn trans_gnu_try<'blk, 'tcx>(bcx: Block<'blk, 'tcx>,
func: ValueRef,
data: ValueRef,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
local_ptr: ValueRef,
dest: ValueRef,
dloc: DebugLoc) -> Block<'blk, 'tcx> {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let llfn = get_rust_try_fn(bcx.fcx, &mut |bcx| {
let ccx = bcx.ccx();
let tcx = ccx.tcx();
let dloc = DebugLoc::None;
// Translates the shims described above:
//
// bcx:
// invoke %func(%args...) normal %normal unwind %catch
//
// normal:
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// ret 0
//
// catch:
// (ptr, _) = landingpad
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// store ptr, %local_ptr
// ret 1
//
// Note that the `local_ptr` data passed into the `try` intrinsic is
// expected to be `*mut *mut u8` for this to actually work, but that's
// managed by the standard library.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
attributes::emit_uwtable(bcx.fcx.llfn, true);
let catch_pers = match tcx.lang_items.eh_personality_catch() {
Some(did) => {
Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
}
None => bug!("eh_personality_catch not defined"),
};
let then = bcx.fcx.new_temp_block("then");
let catch = bcx.fcx.new_temp_block("catch");
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let func = llvm::get_param(bcx.fcx.llfn, 0);
let data = llvm::get_param(bcx.fcx.llfn, 1);
let local_ptr = llvm::get_param(bcx.fcx.llfn, 2);
Invoke(bcx, func, &[data], then.llbb, catch.llbb, dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
Ret(then, C_i32(ccx, 0), dloc);
// Type indicator for the exception being thrown.
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
//
// The first value in this tuple is a pointer to the exception object
// being thrown. The second value is a "selector" indicating which of
// the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector.
let lpad_ty = Type::struct_(ccx, &[Type::i8p(ccx), Type::i32(ccx)],
false);
let vals = LandingPad(catch, lpad_ty, catch_pers, 1);
AddClause(catch, vals, C_null(Type::i8p(ccx)));
let ptr = ExtractValue(catch, vals, 0);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
Store(catch, ptr, BitCast(catch, local_ptr, Type::i8p(ccx).ptr_to()));
Ret(catch, C_i32(ccx, 1), dloc);
});
// Note that no invoke is used here because by definition this function
// can't panic (that's what it's catching).
let ret = Call(bcx, llfn, &[func, data, local_ptr], dloc);
Store(bcx, ret, dest);
return bcx;
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// Helper function to give a Block to a closure to translate a shim function.
// This is currently primarily used for the `try` intrinsic functions above.
fn gen_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
name: &str,
inputs: Vec<Ty<'tcx>>,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
output: ty::FnOutput<'tcx>,
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
let sig = ty::FnSig {
inputs: inputs,
output: output,
variadic: false,
};
let fn_ty = FnType::new(ccx, Abi::Rust, &sig, &[]);
let rust_fn_ty = ccx.tcx().mk_fn_ptr(ty::BareFnTy {
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(sig)
});
let llfn = declare::define_internal_fn(ccx, name, rust_fn_ty);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let (fcx, block_arena);
block_arena = TypedArena::new();
fcx = FunctionContext::new(ccx, llfn, fn_ty, None, &block_arena);
let bcx = fcx.init(true, None);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
trans(bcx);
fcx.cleanup();
llfn
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
}
// Helper function used to get a handle to the `__rust_try` function used to
// catch exceptions.
//
// This function is only generated once and is then cached.
fn get_rust_try_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
trans: &mut for<'b> FnMut(Block<'b, 'tcx>))
-> ValueRef {
let ccx = fcx.ccx;
if let Some(llfn) = ccx.rust_try_fn().get() {
return llfn;
}
// Define the type up front for the signature of the rust_try function.
let tcx = ccx.tcx();
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
let fn_ty = tcx.mk_fn_ptr(ty::BareFnTy {
2015-07-31 00:04:06 -07:00
unsafety: hir::Unsafety::Unsafe,
abi: Abi::Rust,
sig: ty::Binder(ty::FnSig {
inputs: vec![i8p],
output: ty::FnOutput::FnConverging(tcx.mk_nil()),
variadic: false,
}),
});
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let rust_try = gen_fn(fcx, "__rust_try", vec![fn_ty, i8p, i8p], output, trans);
ccx.rust_try_fn().set(Some(rust_try));
return rust_try
}
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
// For MSVC-style exceptions (SEH), the compiler generates a filter function
// which is used to determine whether an exception is being caught (e.g. if it's
// a Rust exception or some other).
//
// This function is used to generate said filter function. The shim generated
// here is actually just a thin wrapper to call the real implementation in the
// standard library itself. For reasons as to why, see seh.rs in the standard
// library.
fn generate_filter_fn<'a, 'tcx>(fcx: &FunctionContext<'a, 'tcx>,
rust_try_fn: ValueRef)
-> ValueRef {
let ccx = fcx.ccx;
let tcx = ccx.tcx();
let dloc = DebugLoc::None;
let rust_try_filter = match tcx.lang_items.msvc_try_filter() {
Some(did) => {
Callee::def(ccx, did, tcx.mk_substs(Substs::empty())).reify(ccx).val
}
None => bug!("msvc_try_filter not defined"),
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
};
let output = ty::FnOutput::FnConverging(tcx.types.i32);
let i8p = tcx.mk_mut_ptr(tcx.types.i8);
let frameaddress = ccx.get_intrinsic(&"llvm.frameaddress");
let recoverfp = ccx.get_intrinsic(&"llvm.x86.seh.recoverfp");
let localrecover = ccx.get_intrinsic(&"llvm.localrecover");
// On all platforms, once we have the EXCEPTION_POINTERS handle as well as
// the base pointer, we follow the standard layout of:
//
// block:
// %parentfp = call i8* llvm.x86.seh.recoverfp(@rust_try_fn, %bp)
// %arg = call i8* llvm.localrecover(@rust_try_fn, %parentfp, 0)
// %ret = call i32 @the_real_filter_function(%ehptrs, %arg)
// ret i32 %ret
//
// The recoverfp intrinsic is used to recover the frame frame pointer of the
// `rust_try_fn` function, which is then in turn passed to the
// `localrecover` intrinsic (pairing with the `localescape` intrinsic
// mentioned above). Putting all this together means that we now have a
// handle to the arguments passed into the `try` function, allowing writing
// to the stack over there.
//
// For more info, see seh.rs in the standard library.
let do_trans = |bcx: Block, ehptrs, base_pointer| {
let rust_try_fn = BitCast(bcx, rust_try_fn, Type::i8p(ccx));
let parentfp = Call(bcx, recoverfp, &[rust_try_fn, base_pointer], dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let arg = Call(bcx, localrecover,
&[rust_try_fn, parentfp, C_i32(ccx, 0)], dloc);
let ret = Call(bcx, rust_try_filter, &[ehptrs, arg], dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
Ret(bcx, ret, dloc);
};
if ccx.tcx().sess.target.target.arch == "x86" {
// On x86 the filter function doesn't actually receive any arguments.
// Instead the %ebp register contains some contextual information.
//
// Unfortunately I don't know of any great documentation as to what's
// going on here, all I can say is that there's a few tests cases in
// LLVM's test suite which follow this pattern of instructions, so we
// just do the same.
gen_fn(fcx, "__rustc_try_filter", vec![], output, &mut |bcx| {
let ebp = Call(bcx, frameaddress, &[C_i32(ccx, 1)], dloc);
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let exn = InBoundsGEP(bcx, ebp, &[C_i32(ccx, -20)]);
let exn = Load(bcx, BitCast(bcx, exn, Type::i8p(ccx).ptr_to()));
do_trans(bcx, exn, ebp);
})
} else if ccx.tcx().sess.target.target.arch == "x86_64" {
// Conveniently on x86_64 the EXCEPTION_POINTERS handle and base pointer
// are passed in as arguments to the filter function, so we just pass
// those along.
gen_fn(fcx, "__rustc_try_filter", vec![i8p, i8p], output, &mut |bcx| {
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
let exn = llvm::get_param(bcx.fcx.llfn, 0);
let rbp = llvm::get_param(bcx.fcx.llfn, 1);
do_trans(bcx, exn, rbp);
})
} else {
bug!("unknown target to generate a filter function")
trans: Reimplement unwinding on MSVC This commit transitions the compiler to using the new exception handling instructions in LLVM for implementing unwinding for MSVC. This affects both 32 and 64-bit MSVC as they're both now using SEH-based strategies. In terms of standard library support, lots more details about how SEH unwinding is implemented can be found in the commits. In terms of trans, this change necessitated a few modifications: * Branches were added to detect when the old landingpad instruction is used or the new cleanuppad instruction is used to `trans::cleanup`. * The return value from `cleanuppad` is not stored in an `alloca` (because it cannot be). * Each block in trans now has an `Option<LandingPad>` instead of `is_lpad: bool` for indicating whether it's in a landing pad or not. The new exception handling intrinsics require that on MSVC each `call` inside of a landing pad is annotated with which landing pad that it's in. This change to the basic block means that whenever a `call` or `invoke` instruction is generated we know whether to annotate it as part of a cleanuppad or not. * Lots of modifications were made to the instruction builders to construct the new instructions as well as pass the tagging information for the call/invoke instructions. * The translation of the `try` intrinsics for MSVC has been overhauled to use the new `catchpad` instruction. The filter function is now also a rustc-generated function instead of a purely libstd-defined function. The libstd definition still exists, it just has a stable ABI across architectures and leaves some of the really weird implementation details to the compiler (e.g. the `localescape` and `localrecover` intrinsics).
2015-10-23 18:18:44 -07:00
}
}
2015-09-19 00:42:57 +02:00
fn span_invalid_monomorphization_error(a: &Session, b: Span, c: &str) {
span_err!(a, b, E0511, "{}", c);
}
fn generic_simd_intrinsic<'blk, 'tcx, 'a>
(bcx: Block<'blk, 'tcx>,
name: &str,
substs: &'tcx subst::Substs<'tcx>,
callee_ty: Ty<'tcx>,
2015-07-31 00:04:06 -07:00
args: Option<&[P<hir::Expr>]>,
llargs: &[ValueRef],
ret_ty: Ty<'tcx>,
llret_ty: Type,
call_debug_location: DebugLoc,
span: Span) -> ValueRef
{
// macros for error handling:
macro_rules! emit_error {
($msg: tt) => {
emit_error!($msg, )
};
($msg: tt, $($fmt: tt)*) => {
2015-09-19 00:42:57 +02:00
span_invalid_monomorphization_error(
bcx.sess(), span,
2015-09-19 00:42:57 +02:00
&format!(concat!("invalid monomorphization of `{}` intrinsic: ",
$msg),
name, $($fmt)*));
}
}
2015-08-13 16:00:44 -07:00
macro_rules! require {
($cond: expr, $($fmt: tt)*) => {
if !$cond {
emit_error!($($fmt)*);
return C_nil(bcx.ccx())
2015-08-13 16:00:44 -07:00
}
}
}
macro_rules! require_simd {
($ty: expr, $position: expr) => {
require!($ty.is_simd(), "expected SIMD {} type, found non-SIMD `{}`", $position, $ty)
}
}
2015-08-13 16:00:44 -07:00
let tcx = bcx.tcx();
let sig = tcx.erase_late_bound_regions(callee_ty.fn_sig());
let sig = infer::normalize_associated_type(tcx, &sig);
let arg_tys = sig.inputs;
// every intrinsic takes a SIMD vector as its first argument
require_simd!(arg_tys[0], "input");
let in_ty = arg_tys[0];
let in_elem = arg_tys[0].simd_type(tcx);
let in_len = arg_tys[0].simd_size(tcx);
let comparison = match name {
2015-07-31 00:04:06 -07:00
"simd_eq" => Some(hir::BiEq),
"simd_ne" => Some(hir::BiNe),
"simd_lt" => Some(hir::BiLt),
"simd_le" => Some(hir::BiLe),
"simd_gt" => Some(hir::BiGt),
"simd_ge" => Some(hir::BiGe),
_ => None
};
if let Some(cmp_op) = comparison {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len, in_ty,
ret_ty, out_len);
require!(llret_ty.element_type().kind() == llvm::Integer,
"expected return type with integer elements, found `{}` with non-integer `{}`",
ret_ty,
ret_ty.simd_type(tcx));
return compare_simd_types(bcx,
llargs[0],
llargs[1],
in_elem,
llret_ty,
cmp_op,
call_debug_location)
}
if name.starts_with("simd_shuffle") {
let n: usize = match name["simd_shuffle".len()..].parse() {
Ok(n) => n,
Err(_) => span_bug!(span,
"bad `simd_shuffle` instruction only caught in trans?")
};
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(out_len == n,
"expected return type of length {}, found `{}` with length {}",
n, ret_ty, out_len);
require!(in_elem == ret_ty.simd_type(tcx),
"expected return element type `{}` (element of input `{}`), \
found `{}` with element type `{}`",
in_elem, in_ty,
ret_ty, ret_ty.simd_type(tcx));
let total_len = in_len as u64 * 2;
let (vector, indirect) = match args {
Some(args) => {
match consts::const_expr(bcx.ccx(), &args[2], substs, None,
// this should probably help simd error reporting
consts::TrueConst::Yes) {
Ok((vector, _)) => (vector, false),
Err(err) => bcx.sess().span_fatal(span, &err.description()),
}
}
None => (llargs[2], !type_is_immediate(bcx.ccx(), arg_tys[2]))
};
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = if indirect {
Load(bcx, StructGEP(bcx, vector, i))
} else {
const_get_elt(vector, &[i as libc::c_uint])
};
let c = const_to_opt_uint(val);
match c {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
}
Some(idx) if idx >= total_len => {
emit_error!("shuffle index #{} is out of bounds (limit {})",
arg_idx, total_len);
None
}
Some(idx) => Some(C_i32(bcx.ccx(), idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
None => return C_null(llret_ty)
};
return ShuffleVector(bcx, llargs[0], llargs[1], C_vector(&indices))
}
if name == "simd_insert" {
require!(in_elem == arg_tys[2],
"expected inserted type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, arg_tys[2]);
return InsertElement(bcx, llargs[0], llargs[2], llargs[1])
}
if name == "simd_extract" {
require!(ret_ty == in_elem,
"expected return type `{}` (element of input `{}`), found `{}`",
in_elem, in_ty, ret_ty);
return ExtractElement(bcx, llargs[0], llargs[1])
}
2015-07-29 16:40:22 -07:00
if name == "simd_cast" {
require_simd!(ret_ty, "return");
let out_len = ret_ty.simd_size(tcx);
require!(in_len == out_len,
"expected return type with length {} (same as input type `{}`), \
found `{}` with length {}",
in_len, in_ty,
ret_ty, out_len);
2015-07-29 16:40:22 -07:00
// casting cares about nominal type, not just structural type
let out_elem = ret_ty.simd_type(tcx);
2015-07-29 16:40:22 -07:00
if in_elem == out_elem { return llargs[0]; }
2015-07-29 16:40:22 -07:00
2015-08-14 15:46:51 -07:00
enum Style { Float, Int(/* is signed? */ bool), Unsupported }
let (in_style, in_width) = match in_elem.sty {
// vectors of pointer-sized integers should've been
// disallowed before here, so this unwrap is safe.
ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::TyFloat(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
let (out_style, out_width) = match out_elem.sty {
ty::TyInt(i) => (Style::Int(true), i.bit_width().unwrap()),
ty::TyUint(u) => (Style::Int(false), u.bit_width().unwrap()),
ty::TyFloat(f) => (Style::Float, f.bit_width()),
_ => (Style::Unsupported, 0)
};
match (in_style, out_style) {
(Style::Int(in_is_signed), Style::Int(_)) => {
return match in_width.cmp(&out_width) {
Ordering::Greater => Trunc(bcx, llargs[0], llret_ty),
Ordering::Equal => llargs[0],
Ordering::Less => if in_is_signed {
SExt(bcx, llargs[0], llret_ty)
} else {
ZExt(bcx, llargs[0], llret_ty)
}
}
}
2015-08-14 15:46:51 -07:00
(Style::Int(in_is_signed), Style::Float) => {
return if in_is_signed {
SIToFP(bcx, llargs[0], llret_ty)
} else {
UIToFP(bcx, llargs[0], llret_ty)
}
}
2015-08-14 15:46:51 -07:00
(Style::Float, Style::Int(out_is_signed)) => {
return if out_is_signed {
FPToSI(bcx, llargs[0], llret_ty)
} else {
FPToUI(bcx, llargs[0], llret_ty)
2015-07-29 16:40:22 -07:00
}
}
2015-08-14 15:46:51 -07:00
(Style::Float, Style::Float) => {
return match in_width.cmp(&out_width) {
Ordering::Greater => FPTrunc(bcx, llargs[0], llret_ty),
Ordering::Equal => llargs[0],
Ordering::Less => FPExt(bcx, llargs[0], llret_ty)
2015-07-29 16:40:22 -07:00
}
}
2015-08-14 15:46:51 -07:00
_ => {/* Unsupported. Fallthrough. */}
2015-07-29 16:40:22 -07:00
}
require!(false,
"unsupported cast from `{}` with element `{}` to `{}` with element `{}`",
in_ty, in_elem,
ret_ty, out_elem);
2015-07-29 16:40:22 -07:00
}
2015-07-31 11:23:12 -07:00
macro_rules! arith {
($($name: ident: $($($p: ident),* => $call: expr),*;)*) => {
$(
if name == stringify!($name) {
match in_elem.sty {
2015-07-31 11:23:12 -07:00
$(
$(ty::$p(_))|* => {
return $call(bcx, llargs[0], llargs[1], call_debug_location)
}
)*
_ => {},
}
require!(false,
"unsupported operation on `{}` with element `{}`",
in_ty,
in_elem)
2015-07-31 11:23:12 -07:00
})*
}
}
arith! {
simd_add: TyUint, TyInt => Add, TyFloat => FAdd;
simd_sub: TyUint, TyInt => Sub, TyFloat => FSub;
simd_mul: TyUint, TyInt => Mul, TyFloat => FMul;
simd_div: TyFloat => FDiv;
simd_shl: TyUint, TyInt => Shl;
simd_shr: TyUint => LShr, TyInt => AShr;
simd_and: TyUint, TyInt => And;
simd_or: TyUint, TyInt => Or;
simd_xor: TyUint, TyInt => Xor;
}
span_bug!(span, "unknown SIMD intrinsic");
}
// Returns the width of an int TypeVariant, and if it's signed or not
// Returns None if the type is not an integer
fn int_type_width_signed<'tcx>(sty: &ty::TypeVariants<'tcx>, ccx: &CrateContext)
-> Option<(u64, bool)> {
use rustc::ty::{TyInt, TyUint};
match *sty {
TyInt(t) => Some((match t {
ast::IntTy::Is => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for isize: {}", tws),
}
},
ast::IntTy::I8 => 8,
ast::IntTy::I16 => 16,
ast::IntTy::I32 => 32,
ast::IntTy::I64 => 64,
}, true)),
TyUint(t) => Some((match t {
ast::UintTy::Us => {
match &ccx.tcx().sess.target.target.target_pointer_width[..] {
"32" => 32,
"64" => 64,
tws => bug!("Unsupported target word size for usize: {}", tws),
}
},
ast::UintTy::U8 => 8,
ast::UintTy::U16 => 16,
ast::UintTy::U32 => 32,
ast::UintTy::U64 => 64,
}, false)),
_ => None,
}
}
// Returns the width of a float TypeVariant
// Returns None if the type is not a float
fn float_type_width<'tcx>(sty: &ty::TypeVariants<'tcx>)
-> Option<u64> {
use rustc::ty::TyFloat;
match *sty {
TyFloat(t) => Some(match t {
ast::FloatTy::F32 => 32,
ast::FloatTy::F64 => 64,
}),
_ => None,
}
}