1
Fork 0

Auto merge of #65947 - eddyb:fn-abi, r=oli-obk,nagisa

rustc: split FnAbi's into definitions/direct calls ("of_instance") and indirect calls ("of_fn_ptr").

After this PR:
* `InstanceDef::Virtual` is only used for "direct" virtual calls, and shims around those calls use `InstanceDef::ReifyShim` (i.e. for `<dyn Trait as Trait>::f as fn(_)`)
  * this could easily be done for intrinsics as well, to allow their reification, but I didn't do it
* `FnAbi::of_instance` is **always** used for declaring/defining an `fn`, and for direct calls to an `fn`
  * this is great for e.g. https://github.com/rust-lang/rust/pull/65881 (`#[track_caller]`), which can introduce the "caller location" argument into "codegen signatures" by only changing `FnAbi::of_instance`, after this PR
* `FnAbi::of_fn_ptr` is used primarily for indirect calls, i.e. to `fn` pointers
  * *not* virtual calls (which use `FnAbi::of_instance` with `InstanceDef::Virtual`)
  * there's also a couple uses where the `rustc_codegen_llvm` needs to declare (i.e. FFI-import) an LLVM function that has no Rust declaration available at all
    * at least one of them could probably be a "weak lang item" instead

As there are many steps, this PR is best reviewed commit by commit - some of which arguably should be in their own PRs, I may have gotten carried away a bit.

cc @nagisa @rkruppe @oli-obk @anp
This commit is contained in:
bors 2019-12-04 08:22:05 +00:00
commit 5f1d6c4403
34 changed files with 387 additions and 426 deletions

View file

@ -279,18 +279,10 @@ impl<'a, 'b, 'tcx> graph::GraphSuccessors<'b> for ReadOnlyBodyCache<'a, 'tcx> {
impl Deref for ReadOnlyBodyCache<'a, 'tcx> {
type Target = Body<'tcx>;
type Target = &'a Body<'tcx>;
fn deref(&self) -> &Self::Target {
self.body
}
}
impl Index<BasicBlock> for ReadOnlyBodyCache<'a, 'tcx> {
type Output = BasicBlockData<'tcx>;
fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
&self.body[index]
&self.body
}
}

View file

@ -1,8 +1,7 @@
use crate::hir::CodegenFnAttrFlags;
use crate::hir::Unsafety;
use crate::hir::def::Namespace;
use crate::hir::def_id::DefId;
use crate::ty::{self, Ty, PolyFnSig, TypeFoldable, SubstsRef, TyCtxt};
use crate::ty::{self, Ty, TypeFoldable, SubstsRef, TyCtxt};
use crate::ty::print::{FmtPrinter, Printer};
use crate::traits;
use crate::middle::lang_items::DropInPlaceFnLangItem;
@ -10,7 +9,6 @@ use rustc_target::spec::abi::Abi;
use rustc_macros::HashStable;
use std::fmt;
use std::iter;
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, RustcEncodable, RustcDecodable)]
#[derive(HashStable, Lift)]
@ -29,17 +27,26 @@ pub enum InstanceDef<'tcx> {
/// `fn()` pointer where the function itself cannot be turned into a pointer.
///
/// One example in the compiler today is functions annotated with `#[track_caller]`, which
/// must have their implicit caller location argument populated for a call. Because this is a
/// required part of the function's ABI but can't be tracked as a property of the function
/// pointer, we create a single "caller location" at the site where the function is reified.
/// One example is `<dyn Trait as Trait>::fn`, where the shim contains
/// a virtual call, which codegen supports only via a direct call to the
/// `<dyn Trait as Trait>::fn` instance (an `InstanceDef::Virtual`).
///
/// Another example is functions annotated with `#[track_caller]`, which
/// must have their implicit caller location argument populated for a call.
/// Because this is a required part of the function's ABI but can't be tracked
/// as a property of the function pointer, we use a single "caller location"
/// (the definition of the function itself).
ReifyShim(DefId),
/// `<fn() as FnTrait>::call_*`
/// `DefId` is `FnTrait::call_*`.
FnPtrShim(DefId, Ty<'tcx>),
/// `<dyn Trait as Trait>::fn`
/// `<dyn Trait as Trait>::fn`, "direct calls" of which are implicitly
/// codegen'd as virtual calls.
///
/// NB: if this is reified to a `fn` pointer, a `ReifyShim` is used
/// (see `ReifyShim` above for more details on that).
Virtual(DefId, usize),
/// `<[mut closure] as FnOnce>::call_once`
@ -61,70 +68,6 @@ impl<'tcx> Instance<'tcx> {
&ty,
)
}
fn fn_sig_noadjust(&self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
let ty = self.ty(tcx);
match ty.kind {
ty::FnDef(..) |
// Shims currently have type FnPtr. Not sure this should remain.
ty::FnPtr(_) => ty.fn_sig(tcx),
ty::Closure(def_id, substs) => {
let sig = substs.as_closure().sig(def_id, tcx);
let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
sig.map_bound(|sig| tcx.mk_fn_sig(
iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
sig.output(),
sig.c_variadic,
sig.unsafety,
sig.abi
))
}
ty::Generator(def_id, substs, _) => {
let sig = substs.as_generator().poly_sig(def_id, tcx);
let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
let pin_did = tcx.lang_items().pin_type().unwrap();
let pin_adt_ref = tcx.adt_def(pin_did);
let pin_substs = tcx.intern_substs(&[env_ty.into()]);
let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
sig.map_bound(|sig| {
let state_did = tcx.lang_items().gen_state().unwrap();
let state_adt_ref = tcx.adt_def(state_did);
let state_substs = tcx.intern_substs(&[
sig.yield_ty.into(),
sig.return_ty.into(),
]);
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
tcx.mk_fn_sig(iter::once(env_ty),
ret_ty,
false,
Unsafety::Normal,
Abi::Rust
)
})
}
_ => bug!("unexpected type {:?} in Instance::fn_sig_noadjust", ty)
}
}
pub fn fn_sig(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
let mut fn_sig = self.fn_sig_noadjust(tcx);
if let InstanceDef::VtableShim(..) = self.def {
// Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
fn_sig = fn_sig.map_bound(|mut fn_sig| {
let mut inputs_and_output = fn_sig.inputs_and_output.to_vec();
inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
fn_sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
fn_sig
});
}
fn_sig
}
}
impl<'tcx> InstanceDef<'tcx> {
@ -196,7 +139,7 @@ impl<'tcx> fmt::Display for Instance<'tcx> {
write!(f, " - intrinsic")
}
InstanceDef::Virtual(_, num) => {
write!(f, " - shim(#{})", num)
write!(f, " - virtual#{}", num)
}
InstanceDef::FnPtrShim(_, ty) => {
write!(f, " - shim({:?})", ty)
@ -311,20 +254,23 @@ impl<'tcx> Instance<'tcx> {
substs: SubstsRef<'tcx>,
) -> Option<Instance<'tcx>> {
debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
Instance::resolve(tcx, param_env, def_id, substs).map(|resolved| {
Instance::resolve(tcx, param_env, def_id, substs).map(|mut resolved| {
let has_track_caller = |def| tcx.codegen_fn_attrs(def).flags
.contains(CodegenFnAttrFlags::TRACK_CALLER);
match resolved.def {
InstanceDef::Item(def_id) if has_track_caller(def_id) => {
debug!(" => fn pointer created for function with #[track_caller]");
Instance {
def: InstanceDef::ReifyShim(def_id),
substs,
}
},
_ => resolved,
resolved.def = InstanceDef::ReifyShim(def_id);
}
InstanceDef::Virtual(def_id, _) => {
debug!(" => fn pointer created for virtual call");
resolved.def = InstanceDef::ReifyShim(def_id);
}
_ => {}
}
resolved
})
}

View file

@ -2339,6 +2339,76 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
}
}
impl<'tcx> ty::Instance<'tcx> {
// NOTE(eddyb) this is private to avoid using it from outside of
// `FnAbi::of_instance` - any other uses are either too high-level
// for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
// or should go through `FnAbi` instead, to avoid losing any
// adjustments `FnAbi::of_instance` might be performing.
fn fn_sig_for_fn_abi(&self, tcx: TyCtxt<'tcx>) -> ty::PolyFnSig<'tcx> {
let ty = self.ty(tcx);
match ty.kind {
ty::FnDef(..) |
// Shims currently have type FnPtr. Not sure this should remain.
ty::FnPtr(_) => {
let mut sig = ty.fn_sig(tcx);
if let ty::InstanceDef::VtableShim(..) = self.def {
// Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
sig = sig.map_bound(|mut sig| {
let mut inputs_and_output = sig.inputs_and_output.to_vec();
inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
sig
});
}
sig
}
ty::Closure(def_id, substs) => {
let sig = substs.as_closure().sig(def_id, tcx);
let env_ty = tcx.closure_env_ty(def_id, substs).unwrap();
sig.map_bound(|sig| tcx.mk_fn_sig(
iter::once(*env_ty.skip_binder()).chain(sig.inputs().iter().cloned()),
sig.output(),
sig.c_variadic,
sig.unsafety,
sig.abi
))
}
ty::Generator(def_id, substs, _) => {
let sig = substs.as_generator().poly_sig(def_id, tcx);
let env_region = ty::ReLateBound(ty::INNERMOST, ty::BrEnv);
let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
let pin_did = tcx.lang_items().pin_type().unwrap();
let pin_adt_ref = tcx.adt_def(pin_did);
let pin_substs = tcx.intern_substs(&[env_ty.into()]);
let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
sig.map_bound(|sig| {
let state_did = tcx.lang_items().gen_state().unwrap();
let state_adt_ref = tcx.adt_def(state_did);
let state_substs = tcx.intern_substs(&[
sig.yield_ty.into(),
sig.return_ty.into(),
]);
let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
tcx.mk_fn_sig(iter::once(env_ty),
ret_ty,
false,
hir::Unsafety::Normal,
rustc_target::spec::abi::Abi::Rust
)
})
}
_ => bug!("unexpected type {:?} in Instance::fn_sig", ty)
}
}
}
pub trait FnAbiExt<'tcx, C>
where
C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
@ -2347,12 +2417,22 @@ where
+ HasTyCtxt<'tcx>
+ HasParamEnv<'tcx>,
{
fn of_instance(cx: &C, instance: ty::Instance<'tcx>) -> Self;
fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
/// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
///
/// NB: this doesn't handle virtual calls - those should use `FnAbi::of_instance`
/// instead, where the instance is a `InstanceDef::Virtual`.
fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
/// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
/// direct calls to an `fn`.
///
/// NB: that includes virtual calls, which are represented by "direct calls"
/// to a `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self;
fn new_internal(
cx: &C,
sig: ty::FnSig<'tcx>,
sig: ty::PolyFnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
) -> Self;
@ -2367,25 +2447,19 @@ where
+ HasTyCtxt<'tcx>
+ HasParamEnv<'tcx>,
{
fn of_instance(cx: &C, instance: ty::Instance<'tcx>) -> Self {
let sig = instance.fn_sig(cx.tcx());
let sig = cx
.tcx()
.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
call::FnAbi::new(cx, sig, &[])
}
fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
fn of_fn_ptr(cx: &C, sig: ty::PolyFnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
call::FnAbi::new_internal(cx, sig, extra_args, |ty, _| ArgAbi::new(cx.layout_of(ty)))
}
fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
FnAbiExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
fn of_instance(cx: &C, instance: ty::Instance<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
let sig = instance.fn_sig_for_fn_abi(cx.tcx());
call::FnAbi::new_internal(cx, sig, extra_args, |ty, arg_idx| {
let mut layout = cx.layout_of(ty);
// Don't pass the vtable, it's not an argument of the virtual fn.
// Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
// or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
if arg_idx == Some(0) {
if let (ty::InstanceDef::Virtual(..), Some(0)) = (&instance.def, arg_idx) {
let fat_pointer_ty = if layout.is_unsized() {
// unsized `self` is passed as a pointer to `self`
// FIXME (mikeyhew) change this to use &own if it is ever added to the language
@ -2436,15 +2510,19 @@ where
fn new_internal(
cx: &C,
sig: ty::FnSig<'tcx>,
sig: ty::PolyFnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
) -> Self {
debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
let sig = cx
.tcx()
.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
use rustc_target::spec::abi::Abi::*;
let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::C,
RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
// It's the ABI's job to select this, not ours.
System => bug!("system abi should be selected elsewhere"),

View file

@ -372,7 +372,7 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_cconv(&self) -> llvm::CallConv {
match self.conv {
Conv::C => llvm::CCallConv,
Conv::C | Conv::Rust => llvm::CCallConv,
Conv::AmdGpuKernel => llvm::AmdGpuKernel,
Conv::ArmAapcs => llvm::ArmAapcsCallConv,
Conv::Msp430Intr => llvm::Msp430Intr,
@ -388,6 +388,11 @@ impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
}
fn apply_attrs_llfn(&self, cx: &CodegenCx<'ll, 'tcx>, llfn: &'ll Value) {
// FIXME(eddyb) can this also be applied to callsites?
if self.ret.layout.abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(llvm::AttributePlace::Function, llfn);
}
let mut i = 0;
let mut apply = |attrs: &ArgAttributes, ty: Option<&Type>| {
attrs.apply_llfn(llvm::AttributePlace::Argument(i), llfn, ty);

View file

@ -2,19 +2,20 @@
use std::ffi::CString;
use rustc::hir::{CodegenFnAttrFlags, CodegenFnAttrs};
use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::session::Session;
use rustc::session::config::{Sanitizer, OptLevel};
use rustc::ty::{self, TyCtxt, PolyFnSig};
use rustc::ty::{self, TyCtxt, Ty};
use rustc::ty::layout::HasTyCtxt;
use rustc::ty::query::Providers;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::fx::FxHashMap;
use rustc_target::abi::call::Conv;
use rustc_target::spec::PanicStrategy;
use rustc_codegen_ssa::traits::*;
use crate::abi::Abi;
use crate::abi::FnAbi;
use crate::attributes;
use crate::llvm::{self, Attribute};
use crate::llvm::AttributePlace::Function;
@ -26,7 +27,7 @@ use crate::value::Value;
/// Mark LLVM function to use provided inline heuristic.
#[inline]
pub fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) {
fn inline(cx: &CodegenCx<'ll, '_>, val: &'ll Value, inline: InlineAttr) {
use self::InlineAttr::*;
match inline {
Hint => Attribute::InlineHint.apply_llfn(Function, val),
@ -58,7 +59,7 @@ fn unwind(val: &'ll Value, can_unwind: bool) {
/// Tell LLVM if this function should be 'naked', i.e., skip the epilogue and prologue.
#[inline]
pub fn naked(val: &'ll Value, is_naked: bool) {
fn naked(val: &'ll Value, is_naked: bool) {
Attribute::Naked.toggle_llfn(Function, val, is_naked);
}
@ -72,7 +73,7 @@ pub fn set_frame_pointer_elimination(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value)
/// Tell LLVM what instrument function to insert.
#[inline]
pub fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
if cx.sess().instrument_mcount() {
// Similar to `clang -pg` behavior. Handled by the
// `post-inline-ee-instrument` LLVM pass.
@ -88,7 +89,7 @@ pub fn set_instrument_function(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
}
}
pub fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
fn set_probestack(cx: &CodegenCx<'ll, '_>, llfn: &'ll Value) {
// Only use stack probes if the target specification indicates that we
// should be using stack probes
if !cx.sess().target.target.options.stack_probes {
@ -202,11 +203,10 @@ pub(crate) fn default_optimisation_attrs(sess: &Session, llfn: &'ll Value) {
pub fn from_fn_attrs(
cx: &CodegenCx<'ll, 'tcx>,
llfn: &'ll Value,
id: Option<DefId>,
sig: PolyFnSig<'tcx>,
instance: ty::Instance<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
) {
let codegen_fn_attrs = id.map(|id| cx.tcx.codegen_fn_attrs(id))
.unwrap_or_else(|| CodegenFnAttrs::new());
let codegen_fn_attrs = cx.tcx.codegen_fn_attrs(instance.def_id());
match codegen_fn_attrs.optimize {
OptimizeAttr::None => {
@ -224,6 +224,11 @@ pub fn from_fn_attrs(
}
}
// FIXME(eddyb) consolidate these two `inline` calls (and avoid overwrites).
if instance.def.is_inline(cx.tcx) {
inline(cx, llfn, attributes::InlineAttr::Hint);
}
inline(cx, llfn, codegen_fn_attrs.inline);
// The `uwtable` attribute according to LLVM is:
@ -276,8 +281,7 @@ pub fn from_fn_attrs(
// Special attribute for allocator functions, which can't unwind.
false
} else {
let sig = cx.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
if sig.abi == Abi::Rust || sig.abi == Abi::RustCall {
if fn_abi.conv == Conv::Rust {
// Any Rust method (or `extern "Rust" fn` or `extern
// "rust-call" fn`) is explicitly allowed to unwind
// (unless it has no-unwind attribute, handled above).
@ -331,16 +335,14 @@ pub fn from_fn_attrs(
// Note that currently the `wasm-import-module` doesn't do anything, but
// eventually LLVM 7 should read this and ferry the appropriate import
// module to the output file.
if let Some(id) = id {
if cx.tcx.sess.target.target.arch == "wasm32" {
if let Some(module) = wasm_import_module(cx.tcx, id) {
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("wasm-import-module"),
&module,
);
}
if cx.tcx.sess.target.target.arch == "wasm32" {
if let Some(module) = wasm_import_module(cx.tcx, instance.def_id()) {
llvm::AddFunctionAttrStringValue(
llfn,
llvm::AttributePlace::Function,
const_cstr!("wasm-import-module"),
&module,
);
}
}
}

View file

@ -4,6 +4,7 @@
//! and methods are represented as just a fn ptr and not a full
//! closure.
use crate::abi::{FnAbi, FnAbiLlvmExt};
use crate::attributes;
use crate::llvm;
use crate::context::CodegenCx;
@ -11,7 +12,7 @@ use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc::ty::{TypeFoldable, Instance};
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use rustc::ty::layout::{FnAbiExt, HasTyCtxt};
/// Codegens a reference to a fn/method item, monomorphizing and
/// inlining as it goes.
@ -32,19 +33,19 @@ pub fn get_fn(
assert!(!instance.substs.has_escaping_bound_vars());
assert!(!instance.substs.has_param_types());
let sig = instance.fn_sig(cx.tcx());
if let Some(&llfn) = cx.instances.borrow().get(&instance) {
return llfn;
}
let sym = tcx.symbol_name(instance).name.as_str();
debug!("get_fn({:?}: {:?}) => {}", instance, sig, sym);
debug!("get_fn({:?}: {:?}) => {}", instance, instance.ty(cx.tcx()), sym);
// Create a fn pointer with the substituted signature.
let fn_ptr_ty = tcx.mk_fn_ptr(sig);
let llptrty = cx.backend_type(cx.layout_of(fn_ptr_ty));
let fn_abi = FnAbi::of_instance(cx, instance, &[]);
let llfn = if let Some(llfn) = cx.get_declared_value(&sym) {
// Create a fn pointer with the new signature.
let llptrty = fn_abi.ptr_to_llvm_type(cx);
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
@ -76,14 +77,10 @@ pub fn get_fn(
llfn
}
} else {
let llfn = cx.declare_fn(&sym, sig);
assert_eq!(cx.val_ty(llfn), llptrty);
let llfn = cx.declare_fn(&sym, &fn_abi);
debug!("get_fn: not casting pointer!");
if instance.def.is_inline(tcx) {
attributes::inline(cx, llfn, attributes::InlineAttr::Hint);
}
attributes::from_fn_attrs(cx, llfn, Some(instance.def.def_id()), sig);
attributes::from_fn_attrs(cx, llfn, instance, &fn_abi);
let instance_def_id = instance.def_id();

View file

@ -1,3 +1,4 @@
use crate::abi::FnAbi;
use crate::attributes;
use crate::llvm;
use crate::llvm_util;
@ -15,7 +16,7 @@ use rustc::mir::mono::CodegenUnit;
use rustc::session::config::{self, DebugInfo};
use rustc::session::Session;
use rustc::ty::layout::{
LayoutError, LayoutOf, PointeeInfo, Size, TyLayout, VariantIdx, HasParamEnv
FnAbiExt, LayoutError, LayoutOf, PointeeInfo, Size, TyLayout, VariantIdx, HasParamEnv
};
use rustc::ty::{self, Ty, TyCtxt, Instance};
use rustc::util::nodemap::FxHashMap;
@ -420,7 +421,8 @@ impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Abi::C
));
let llfn = self.declare_fn("rust_eh_unwind_resume", sig);
let fn_abi = FnAbi::of_fn_ptr(self, sig, &[]);
let llfn = self.declare_fn("rust_eh_unwind_resume", &fn_abi);
attributes::apply_target_cpu_attr(self, llfn);
unwresume.set(Some(llfn));
llfn

View file

@ -16,7 +16,7 @@ use rustc::hir::CodegenFnAttrFlags;
use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
use rustc::ty::subst::{SubstsRef, GenericArgKind};
use crate::abi::Abi;
use crate::abi::FnAbi;
use crate::common::CodegenCx;
use crate::builder::Builder;
use crate::value::Value;
@ -280,7 +280,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: &'ll Value,
mir: &mir::Body<'_>,
) -> Option<FunctionDebugContext<&'ll DIScope>> {
@ -308,7 +308,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let file_metadata = file_metadata(self, &loc.file.name, def_id.krate);
let function_type_metadata = unsafe {
let fn_signature = get_function_signature(self, sig);
let fn_signature = get_function_signature(self, fn_abi);
llvm::LLVMRustDIBuilderCreateSubroutineType(DIB(self), file_metadata, fn_signature)
};
@ -338,7 +338,7 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let mut flags = DIFlags::FlagPrototyped;
if self.layout_of(sig.output()).abi.is_uninhabited() {
if fn_abi.ret.layout.abi.is_uninhabited() {
flags |= DIFlags::FlagNoReturn;
}
@ -392,25 +392,20 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn get_function_signature<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>,
sig: ty::FnSig<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
) -> &'ll DIArray {
if cx.sess().opts.debuginfo == DebugInfo::Limited {
return create_DIArray(DIB(cx), &[]);
}
let mut signature = Vec::with_capacity(sig.inputs().len() + 1);
let mut signature = Vec::with_capacity(fn_abi.args.len() + 1);
// Return type -- llvm::DIBuilder wants this at index 0
signature.push(match sig.output().kind {
ty::Tuple(ref tys) if tys.is_empty() => None,
_ => Some(type_metadata(cx, sig.output(), syntax_pos::DUMMY_SP))
});
let inputs = if sig.abi == Abi::RustCall {
&sig.inputs()[..sig.inputs().len() - 1]
signature.push(if fn_abi.ret.is_ignore() {
None
} else {
sig.inputs()
};
Some(type_metadata(cx, fn_abi.ret.layout.ty, syntax_pos::DUMMY_SP))
});
// Arguments types
if cx.sess().target.target.options.is_like_msvc {
@ -424,7 +419,8 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
// and a function `fn bar(x: [(); 7])` as `fn bar(x: *const ())`.
// This transformed type is wrong, but these function types are
// already inaccurate due to ABI adjustments (see #42800).
signature.extend(inputs.iter().map(|&t| {
signature.extend(fn_abi.args.iter().map(|arg| {
let t = arg.layout.ty;
let t = match t.kind {
ty::Array(ct, _)
if (ct == cx.tcx.types.u8) || cx.layout_of(ct).is_zst() => {
@ -435,21 +431,11 @@ impl DebugInfoMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Some(type_metadata(cx, t, syntax_pos::DUMMY_SP))
}));
} else {
signature.extend(inputs.iter().map(|t| {
Some(type_metadata(cx, t, syntax_pos::DUMMY_SP))
signature.extend(fn_abi.args.iter().map(|arg| {
Some(type_metadata(cx, arg.layout.ty, syntax_pos::DUMMY_SP))
}));
}
if sig.abi == Abi::RustCall && !sig.inputs().is_empty() {
if let ty::Tuple(args) = sig.inputs()[sig.inputs().len() - 1].kind {
signature.extend(
args.iter().map(|argument_type| {
Some(type_metadata(cx, argument_type.expect_ty(), syntax_pos::DUMMY_SP))
})
);
}
}
create_DIArray(DIB(cx), &signature[..])
}

View file

@ -18,8 +18,7 @@ use crate::attributes;
use crate::context::CodegenCx;
use crate::type_::Type;
use crate::value::Value;
use rustc::ty::{self, PolyFnSig};
use rustc::ty::layout::{FnAbiExt, LayoutOf};
use rustc::ty::Ty;
use rustc::session::config::Sanitizer;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
@ -94,21 +93,12 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn declare_fn(
&self,
name: &str,
sig: PolyFnSig<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
) -> &'ll Value {
debug!("declare_rust_fn(name={:?}, sig={:?})", name, sig);
let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
debug!("declare_rust_fn(name={:?}, fn_abi={:?})", name, fn_abi);
let fn_abi = FnAbi::new(self, sig, &[]);
let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
if self.layout_of(sig.output()).abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
fn_abi.apply_attrs_llfn(self, llfn);
llfn
}
@ -130,28 +120,6 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}
fn define_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
if self.get_defined_value(name).is_some() {
self.sess().fatal(&format!("symbol `{}` already defined", name))
} else {
self.declare_fn(name, fn_sig)
}
}
fn define_internal_fn(
&self,
name: &str,
fn_sig: PolyFnSig<'tcx>,
) -> &'ll Value {
let llfn = self.define_fn(name, fn_sig);
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
llfn
}
fn get_declared_value(&self, name: &str) -> Option<&'ll Value> {
debug!("get_declared_value(name={:?})", name);
let namebuf = SmallCStr::new(name);

View file

@ -1,4 +1,3 @@
use crate::attributes;
use crate::llvm;
use crate::llvm_util;
use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
@ -14,7 +13,7 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::glue;
use rustc_codegen_ssa::base::{to_immediate, wants_msvc_seh, compare_simd_types};
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, Primitive};
use rustc::ty::layout::{self, FnAbiExt, LayoutOf, HasTyCtxt, Primitive};
use rustc::mir::interpret::GlobalId;
use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
use rustc::hir;
@ -992,8 +991,10 @@ fn gen_fn<'ll, 'tcx>(
hir::Unsafety::Unsafe,
Abi::Rust
));
let llfn = cx.define_internal_fn(name, rust_fn_sig);
attributes::from_fn_attrs(cx, llfn, None, rust_fn_sig);
let fn_abi = FnAbi::of_fn_ptr(cx, rust_fn_sig, &[]);
let llfn = cx.declare_fn(name, &fn_abi);
// FIXME(eddyb) find a nicer way to do this.
unsafe { llvm::LLVMRustSetLinkage(llfn, llvm::Linkage::InternalLinkage) };
let bx = Builder::new_block(cx, llfn, "entry-block");
codegen(bx);
llfn

View file

@ -1,3 +1,4 @@
use crate::abi::FnAbi;
use crate::attributes;
use crate::base;
use crate::context::CodegenCx;
@ -6,7 +7,7 @@ use crate::type_of::LayoutLlvmExt;
use rustc::hir::def_id::{DefId, LOCAL_CRATE};
use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::{TypeFoldable, Instance};
use rustc::ty::layout::{LayoutOf, HasTyCtxt};
use rustc::ty::layout::{FnAbiExt, LayoutOf};
use rustc_codegen_ssa::traits::*;
pub use rustc::mir::mono::MonoItem;
@ -42,10 +43,10 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
assert!(!instance.substs.needs_infer() &&
!instance.substs.has_param_types());
let mono_sig = instance.fn_sig(self.tcx());
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
let lldecl = self.declare_fn(symbol_name, mono_sig);
let fn_abi = FnAbi::of_instance(self, instance, &[]);
let lldecl = self.declare_fn(symbol_name, &fn_abi);
unsafe { llvm::LLVMRustSetLinkage(lldecl, base::linkage_to_llvm(linkage)) };
let attrs = self.tcx.codegen_fn_attrs(instance.def_id());
base::set_link_section(lldecl, &attrs);
if linkage == Linkage::LinkOnceODR ||
linkage == Linkage::WeakODR {
@ -67,16 +68,9 @@ impl PreDefineMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}
}
debug!("predefine_fn: mono_sig = {:?} instance = {:?}", mono_sig, instance);
if instance.def.is_inline(self.tcx) {
attributes::inline(self, lldecl, attributes::InlineAttr::Hint);
}
attributes::from_fn_attrs(
self,
lldecl,
Some(instance.def.def_id()),
mono_sig,
);
debug!("predefine_fn: instance = {:?}", instance);
attributes::from_fn_attrs(self, lldecl, instance, &fn_abi);
self.instances.borrow_mut().insert(instance, lldecl);
}

View file

@ -235,11 +235,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
}
ty::FnPtr(sig) => {
let sig = cx.tcx.normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
);
cx.fn_ptr_backend_type(&FnAbi::new(cx, sig, &[]))
cx.fn_ptr_backend_type(&FnAbi::of_fn_ptr(cx, sig, &[]))
}
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
};

View file

@ -368,13 +368,7 @@ pub fn codegen_instance<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
// release builds.
info!("codegen_instance({})", instance);
let sig = instance.fn_sig(cx.tcx());
let sig = cx.tcx().normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
let lldecl = cx.get_fn(instance);
let mir = cx.tcx().instance_mir(instance.def);
mir::codegen_mir::<Bx>(cx, lldecl, mir, instance, sig);
mir::codegen_mir::<Bx>(cx, instance);
}
/// Creates the `main` function which will initialize the rust runtime and call

View file

@ -131,7 +131,7 @@ impl<Bx: BuilderMethods<'a, 'tcx>> LocalAnalyzer<'mir, 'a, 'tcx, Bx> {
};
if is_consume {
let base_ty =
mir::Place::ty_from(place_ref.base, proj_base, &*self.fx.mir, cx.tcx());
mir::Place::ty_from(place_ref.base, proj_base, *self.fx.mir, cx.tcx());
let base_ty = self.fx.monomorphize(&base_ty);
// ZSTs don't require any actual memory access.

View file

@ -24,28 +24,28 @@ use super::operand::OperandValue::{Pair, Ref, Immediate};
/// Used by `FunctionCx::codegen_terminator` for emitting common patterns
/// e.g., creating a basic block, calling a function, etc.
struct TerminatorCodegenHelper<'a, 'tcx> {
bb: &'a mir::BasicBlock,
terminator: &'a mir::Terminator<'tcx>,
struct TerminatorCodegenHelper<'tcx> {
bb: mir::BasicBlock,
terminator: &'tcx mir::Terminator<'tcx>,
funclet_bb: Option<mir::BasicBlock>,
}
impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
/// Returns the associated funclet from `FunctionCx::funclets` for the
/// `funclet_bb` member if it is not `None`.
fn funclet<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
fn funclet<'b, Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
) -> Option<&'c Bx::Funclet> {
fx: &'b mut FunctionCx<'a, 'tcx, Bx>,
) -> Option<&'b Bx::Funclet> {
match self.funclet_bb {
Some(funcl) => fx.funclets[funcl].as_ref(),
None => None,
}
}
fn lltarget<'b, 'c, Bx: BuilderMethods<'b, 'tcx>>(
fn lltarget<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> (Bx::BasicBlock, bool) {
let span = self.terminator.source_info.span;
@ -63,9 +63,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
}
/// Create a basic block.
fn llblock<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
fn llblock<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
target: mir::BasicBlock,
) -> Bx::BasicBlock {
let (lltarget, is_cleanupret) = self.lltarget(fx, target);
@ -83,9 +83,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
}
}
fn funclet_br<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
fn funclet_br<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx,
target: mir::BasicBlock,
) {
@ -101,9 +101,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
/// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
/// return destination `destination` and the cleanup function `cleanup`.
fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
fn do_call<Bx: BuilderMethods<'a, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
fx: &mut FunctionCx<'a, 'tcx, Bx>,
bx: &mut Bx,
fn_abi: FnAbi<'tcx, Ty<'tcx>>,
fn_ptr: Bx::Value,
@ -132,7 +132,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
} else {
let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[*self.bb].is_cleanup {
if fx.mir[self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
// struct, there are "symmetry" issues that cause
@ -151,15 +151,15 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
// Generate sideeffect intrinsic if jumping to any of the targets can form
// a loop.
fn maybe_sideeffect<'b, 'tcx2: 'b, Bx: BuilderMethods<'b, 'tcx2>>(
fn maybe_sideeffect<Bx: BuilderMethods<'a, 'tcx>>(
&self,
mir: mir::ReadOnlyBodyCache<'b, 'tcx>,
mir: mir::ReadOnlyBodyCache<'tcx, 'tcx>,
bx: &mut Bx,
targets: &[mir::BasicBlock],
) {
if bx.tcx().sess.opts.debugging_opts.insert_sideeffect {
if targets.iter().any(|target| {
*target <= *self.bb
if targets.iter().any(|&target| {
target <= self.bb
&& target
.start_location()
.is_predecessor_of(self.bb.start_location(), mir)
@ -173,9 +173,9 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
/// Codegen implementations for some terminator variants.
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// Generates code for a `Resume` terminator.
fn codegen_resume_terminator<'b>(
fn codegen_resume_terminator(
&mut self,
helper: TerminatorCodegenHelper<'b, 'tcx>,
helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx,
) {
if let Some(funclet) = helper.funclet(self) {
@ -201,9 +201,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
}
fn codegen_switchint_terminator<'b>(
fn codegen_switchint_terminator(
&mut self,
helper: TerminatorCodegenHelper<'b, 'tcx>,
helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx,
discr: &mir::Operand<'tcx>,
switch_ty: Ty<'tcx>,
@ -316,15 +316,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
fn codegen_drop_terminator<'b>(
fn codegen_drop_terminator(
&mut self,
helper: TerminatorCodegenHelper<'b, 'tcx>,
helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx,
location: &mir::Place<'tcx>,
target: mir::BasicBlock,
unwind: Option<mir::BasicBlock>,
) {
let ty = location.ty(&*self.mir, bx.tcx()).ty;
let ty = location.ty(*self.mir, bx.tcx()).ty;
let ty = self.monomorphize(&ty);
let drop_fn = Instance::resolve_drop_in_place(bx.tcx(), ty);
@ -345,20 +345,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&args1[..]
};
let (drop_fn, fn_abi) = match ty.kind {
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?
ty::Dynamic(..) => {
let sig = drop_fn.fn_sig(self.cx.tcx());
let sig = self.cx.tcx().normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
);
let fn_abi = FnAbi::new_vtable(&bx, sig, &[]);
let virtual_drop = Instance {
def: ty::InstanceDef::Virtual(drop_fn.def_id(), 0),
substs: drop_fn.substs,
};
let fn_abi = FnAbi::of_instance(&bx, virtual_drop, &[]);
let vtable = args[1];
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_abi), fn_abi)
}
_ => {
(bx.get_fn_addr(drop_fn),
FnAbi::of_instance(&bx, drop_fn))
FnAbi::of_instance(&bx, drop_fn, &[]))
}
};
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
@ -367,9 +368,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
unwind);
}
fn codegen_assert_terminator<'b>(
fn codegen_assert_terminator(
&mut self,
helper: TerminatorCodegenHelper<'b, 'tcx>,
helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx,
terminator: &mir::Terminator<'tcx>,
cond: &mir::Operand<'tcx>,
@ -439,16 +440,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Obtain the panic entry point.
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_abi = FnAbi::of_instance(&bx, instance);
let fn_abi = FnAbi::of_instance(&bx, instance, &[]);
let llfn = bx.get_fn_addr(instance);
// Codegen the actual panic invoke/call.
helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
}
fn codegen_call_terminator<'b>(
fn codegen_call_terminator(
&mut self,
helper: TerminatorCodegenHelper<'b, 'tcx>,
helper: TerminatorCodegenHelper<'tcx>,
mut bx: Bx,
terminator: &mir::Terminator<'tcx>,
func: &mir::Operand<'tcx>,
@ -474,12 +475,20 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bug!("{} is not callable", callee.layout.ty),
};
let def = instance.map(|i| i.def);
if let Some(ty::InstanceDef::DropGlue(_, None)) = def {
// Empty drop glue; a no-op.
let &(_, target) = destination.as_ref().unwrap();
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
// FIXME(eddyb) avoid computing this if possible, when `instance` is
// available - right now `sig` is only needed for getting the `abi`
// and figuring out how many extra args were passed to a C-variadic `fn`.
let sig = callee.layout.ty.fn_sig(bx.tcx());
let sig = bx.tcx().normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
);
let abi = sig.abi;
let abi = sig.abi();
// Handle intrinsics old codegen wants Expr's for, ourselves.
let intrinsic = match def {
@ -489,6 +498,17 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
};
let intrinsic = intrinsic.as_ref().map(|s| &s[..]);
let extra_args = &args[sig.inputs().skip_binder().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(*self.mir, bx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_abi = match instance {
Some(instance) => FnAbi::of_instance(&bx, instance, &extra_args),
None => FnAbi::of_fn_ptr(&bx, sig, &extra_args)
};
if intrinsic == Some("transmute") {
if let Some(destination_ref) = destination.as_ref() {
let &(ref dest, target) = destination_ref;
@ -502,32 +522,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// we can do what we like. Here, we declare that transmuting
// into an uninhabited type is impossible, so anything following
// it must be unreachable.
assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
assert_eq!(fn_abi.ret.layout.abi, layout::Abi::Uninhabited);
bx.unreachable();
}
return;
}
let extra_args = &args[sig.inputs().len()..];
let extra_args = extra_args.iter().map(|op_arg| {
let op_ty = op_arg.ty(&*self.mir, bx.tcx());
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_abi = match def {
Some(ty::InstanceDef::Virtual(..)) => {
FnAbi::new_vtable(&bx, sig, &extra_args)
}
Some(ty::InstanceDef::DropGlue(_, None)) => {
// Empty drop glue; a no-op.
let &(_, target) = destination.as_ref().unwrap();
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.funclet_br(self, &mut bx, target);
return;
}
_ => FnAbi::new(&bx, sig, &extra_args)
};
// For normal codegen, this Miri-specific intrinsic is just a NOP.
if intrinsic == Some("miri_start_panic") {
let target = destination.as_ref().unwrap().1;
@ -549,7 +549,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let def_id =
common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_abi = FnAbi::of_instance(&bx, instance);
let fn_abi = FnAbi::of_instance(&bx, instance, &[]);
let llfn = bx.get_fn_addr(instance);
if let Some((_, target)) = destination.as_ref() {
@ -807,14 +807,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&mut self,
mut bx: Bx,
bb: mir::BasicBlock,
terminator: &mir::Terminator<'tcx>
terminator: &'tcx mir::Terminator<'tcx>
) {
debug!("codegen_terminator: {:?}", terminator);
// Create the cleanup bundle, if needed.
let funclet_bb = self.cleanup_kinds[bb].funclet_bb(bb);
let helper = TerminatorCodegenHelper {
bb: &bb, terminator, funclet_bb
bb, terminator, funclet_bb
};
self.set_debug_loc(&mut bx, terminator.source_info);

View file

@ -1,6 +1,6 @@
use rustc::ty::{self, Ty, TypeFoldable, Instance};
use rustc::ty::layout::{TyLayout, HasTyCtxt, FnAbiExt};
use rustc::mir::{self, Body, ReadOnlyBodyCache};
use rustc::mir;
use rustc_target::abi::call::{FnAbi, PassMode};
use crate::base;
use crate::traits::*;
@ -21,7 +21,7 @@ use self::operand::{OperandRef, OperandValue};
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
instance: Instance<'tcx>,
mir: mir::ReadOnlyBodyCache<'a, 'tcx>,
mir: mir::ReadOnlyBodyCache<'tcx, 'tcx>,
debug_context: Option<FunctionDebugContext<Bx::DIScope>>,
@ -76,7 +76,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
/// All `VarDebuginfo` from the MIR body, partitioned by `Local`.
/// This is `None` if no variable debuginfo/names are needed.
per_local_var_debug_info: Option<IndexVec<mir::Local, Vec<&'a mir::VarDebugInfo<'tcx>>>>,
per_local_var_debug_info: Option<IndexVec<mir::Local, Vec<&'tcx mir::VarDebugInfo<'tcx>>>>,
}
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
@ -121,18 +121,18 @@ impl<'a, 'tcx, V: CodegenObject> LocalRef<'tcx, V> {
pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx: &'a Bx::CodegenCx,
llfn: Bx::Function,
mir: ReadOnlyBodyCache<'a, 'tcx>,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
) {
assert!(!instance.substs.needs_infer());
let fn_abi = FnAbi::new(cx, sig, &[]);
let llfn = cx.get_fn(instance);
let mir = cx.tcx().instance_mir(instance.def);
let fn_abi = FnAbi::of_instance(cx, instance, &[]);
debug!("fn_abi: {:?}", fn_abi);
let debug_context =
cx.create_function_debug_context(instance, sig, llfn, &mir);
let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
let mut bx = Bx::new_block(cx, llfn, "start");
@ -156,7 +156,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}).collect();
let (landing_pads, funclets) = create_funclets(&mir, &mut bx, &cleanup_kinds, &block_bxs);
let mir_body: &Body<'_> = mir.body();
let mir_body: &mir::Body<'_> = mir.body();
let mut fx = FunctionCx {
instance,
mir,
@ -248,8 +248,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
}
}
fn create_funclets<'a, 'b, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
mir: &'b Body<'tcx>,
fn create_funclets<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
mir: &'tcx mir::Body<'tcx>,
bx: &mut Bx,
cleanup_kinds: &IndexVec<mir::BasicBlock, CleanupKind>,
block_bxs: &IndexVec<mir::BasicBlock, Bx::BasicBlock>,

View file

@ -594,7 +594,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let place_ty = mir::Place::ty_from(
place_ref.base,
place_ref.projection,
&*self.mir,
*self.mir,
tcx,
);
self.monomorphize(&place_ty.ty)

View file

@ -460,7 +460,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
mir::Rvalue::Discriminant(ref place) => {
let discr_ty = rvalue.ty(&*self.mir, bx.tcx());
let discr_ty = rvalue.ty(*self.mir, bx.tcx());
let discr = self.codegen_place(&mut bx, &place.as_ref())
.codegen_get_discr(&mut bx, discr_ty);
(bx, OperandRef {
@ -513,7 +513,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
mir::Rvalue::Aggregate(..) => {
// According to `rvalue_creates_operand`, only ZST
// aggregate rvalues are allowed to be operands.
let ty = rvalue.ty(&*self.mir, self.cx.tcx());
let ty = rvalue.ty(*self.mir, self.cx.tcx());
let operand = OperandRef::new_zst(
&mut bx,
self.cx.layout_of(self.monomorphize(&ty)),
@ -710,7 +710,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
true,
mir::Rvalue::Repeat(..) |
mir::Rvalue::Aggregate(..) => {
let ty = rvalue.ty(&*self.mir, self.cx.tcx());
let ty = rvalue.ty(*self.mir, self.cx.tcx());
let ty = self.monomorphize(&ty);
self.cx.spanned_layout_of(ty, span).is_zst()
}

View file

@ -2,8 +2,9 @@ use super::BackendTypes;
use crate::mir::debuginfo::{FunctionDebugContext, VariableKind};
use rustc::hir::def_id::CrateNum;
use rustc::mir;
use rustc::ty::{self, Ty, Instance};
use rustc::ty::{Ty, Instance};
use rustc::ty::layout::Size;
use rustc_target::abi::call::FnAbi;
use syntax::ast::Name;
use syntax_pos::{SourceFile, Span};
@ -17,7 +18,7 @@ pub trait DebugInfoMethods<'tcx>: BackendTypes {
fn create_function_debug_context(
&self,
instance: Instance<'tcx>,
sig: ty::FnSig<'tcx>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
llfn: Self::Function,
mir: &mir::Body<'_>,
) -> Option<FunctionDebugContext<Self::DIScope>>;

View file

@ -1,7 +1,8 @@
use super::BackendTypes;
use rustc::hir::def_id::DefId;
use rustc::mir::mono::{Linkage, Visibility};
use rustc::ty::{self, Instance};
use rustc::ty::{Instance, Ty};
use rustc_target::abi::call::FnAbi;
pub trait DeclareMethods<'tcx>: BackendTypes {
/// Declare a global value.
@ -23,7 +24,7 @@ pub trait DeclareMethods<'tcx>: BackendTypes {
///
/// If theres a value with the same name already declared, the function will
/// update the declaration and return existing Value instead.
fn declare_fn(&self, name: &str, sig: ty::PolyFnSig<'tcx>) -> Self::Function;
fn declare_fn(&self, name: &str, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Function;
/// Declare a global with an intention to define it.
///
@ -38,20 +39,6 @@ pub trait DeclareMethods<'tcx>: BackendTypes {
/// Use this function when you intend to define a global without a name.
fn define_private_global(&self, ty: Self::Type) -> Self::Value;
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
fn define_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
/// Declare a Rust function with an intention to define it.
///
/// Use this function when you intend to define a function. This function will
/// return panic if the name already has a definition associated with it. This
/// can happen with #[no_mangle] or #[export_name], for example.
fn define_internal_fn(&self, name: &str, fn_sig: ty::PolyFnSig<'tcx>) -> Self::Value;
/// Gets declared value by name.
fn get_declared_value(&self, name: &str) -> Option<Self::Value>;

View file

@ -208,7 +208,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let ty = Place::ty_from(
used_place.base,
used_place.projection,
&*self.body,
*self.body,
self.infcx.tcx
).ty;
let needs_note = match ty.kind {
@ -225,7 +225,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let mpi = self.move_data.moves[move_out_indices[0]].path;
let place = &self.move_data.move_paths[mpi].place;
let ty = place.ty(&*self.body, self.infcx.tcx).ty;
let ty = place.ty(*self.body, self.infcx.tcx).ty;
let opt_name =
self.describe_place_with_options(place.as_ref(), IncludingDowncast(true));
let note_msg = match opt_name {
@ -625,7 +625,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let ty = Place::ty_from(
place_base,
place_projection,
&*self.body,
*self.body,
self.infcx.tcx
).ty;
ty.ty_adt_def().filter(|adt| adt.is_union()).map(|_| ty)
@ -1635,7 +1635,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
Place::ty_from(
&place.base,
proj_base,
&*self.body,
*self.body,
tcx
).ty.is_box(),
"Drop of value behind a reference or raw pointer"
@ -1648,7 +1648,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let base_ty = Place::ty_from(
&place.base,
proj_base,
&*self.body,
*self.body,
tcx
).ty;
match base_ty.kind {

View file

@ -372,7 +372,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let base_ty = Place::ty_from(
place.base,
place.projection,
&*self.body,
*self.body,
self.infcx.tcx).ty;
self.describe_field_from_ty(&base_ty, field, Some(*variant_index))
}
@ -502,7 +502,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
..
}) = bbd.terminator {
if let Some(source) = BorrowedContentSource::from_call(
func.ty(&*self.body, tcx),
func.ty(*self.body, tcx),
tcx
) {
return source;
@ -519,7 +519,7 @@ impl<'cx, 'tcx> MirBorrowckCtxt<'cx, 'tcx> {
let base_ty = Place::ty_from(
deref_base.base,
deref_base.projection,
&*self.body,
*self.body,
tcx
).ty;
if base_ty.is_unsafe_ptr() {

View file

@ -646,7 +646,7 @@ impl<'cx, 'tcx> DataflowResultsConsumer<'cx, 'tcx> for MirBorrowckCtxt<'cx, 'tcx
let tcx = self.infcx.tcx;
// Compute the type with accurate region information.
let drop_place_ty = drop_place.ty(&*self.body, self.infcx.tcx);
let drop_place_ty = drop_place.ty(*self.body, self.infcx.tcx);
// Erase the regions.
let drop_place_ty = self.infcx.tcx.erase_regions(&drop_place_ty).ty;

View file

@ -300,7 +300,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// Inspect the type of the content behind the
// borrow to provide feedback about why this
// was a move rather than a copy.
let ty = deref_target_place.ty(&*self.body, self.infcx.tcx).ty;
let ty = deref_target_place.ty(*self.body, self.infcx.tcx).ty;
let upvar_field = self.prefixes(move_place.as_ref(), PrefixSet::All)
.find_map(|p| self.is_upvar_field_projection(p));
@ -411,7 +411,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
};
let move_ty = format!(
"{:?}",
move_place.ty(&*self.body, self.infcx.tcx).ty,
move_place.ty(*self.body, self.infcx.tcx).ty,
);
if let Ok(snippet) = self.infcx.tcx.sess.source_map().span_to_snippet(span) {
let is_option = move_ty.starts_with("std::option::Option");
@ -454,7 +454,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
}
if binds_to.is_empty() {
let place_ty = move_from.ty(&*self.body, self.infcx.tcx).ty;
let place_ty = move_from.ty(*self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(move_from.as_ref()) {
Some(desc) => format!("`{}`", desc),
None => format!("value"),
@ -482,7 +482,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
// No binding. Nothing to suggest.
GroupedMoveError::OtherIllegalMove { ref original_path, use_spans, .. } => {
let span = use_spans.var_or_use();
let place_ty = original_path.ty(&*self.body, self.infcx.tcx).ty;
let place_ty = original_path.ty(*self.body, self.infcx.tcx).ty;
let place_desc = match self.describe_place(original_path.as_ref()) {
Some(desc) => format!("`{}`", desc),
None => format!("value"),

View file

@ -64,7 +64,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
Place::ty_from(
&the_place_err.base,
proj_base,
&*self.body,
*self.body,
self.infcx.tcx
).ty));
@ -115,7 +115,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
Place::ty_from(
the_place_err.base,
the_place_err.projection,
&*self.body,
*self.body,
self.infcx.tcx
)
.ty
@ -229,7 +229,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
if let Some((span, message)) = annotate_struct_field(
self.infcx.tcx,
Place::ty_from(base, proj_base, &*self.body, self.infcx.tcx).ty,
Place::ty_from(base, proj_base, *self.body, self.infcx.tcx).ty,
field,
) {
err.span_suggestion(
@ -304,7 +304,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> {
projection: [proj_base @ .., ProjectionElem::Field(upvar_index, _)],
} => {
debug_assert!(is_closure_or_generator(
Place::ty_from(base, proj_base, &*self.body, self.infcx.tcx).ty
Place::ty_from(base, proj_base, *self.body, self.infcx.tcx).ty
));
err.span_label(span, format!("cannot {ACT}", ACT = act));

View file

@ -1413,9 +1413,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
_ => ConstraintCategory::Assignment,
};
let place_ty = place.ty(&*body, tcx).ty;
let place_ty = place.ty(*body, tcx).ty;
let place_ty = self.normalize(place_ty, location);
let rv_ty = rv.ty(&*body, tcx);
let rv_ty = rv.ty(*body, tcx);
let rv_ty = self.normalize(rv_ty, location);
if let Err(terr) =
self.sub_types_or_anon(rv_ty, place_ty, location.to_locations(), category)
@ -1467,7 +1467,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
ref place,
variant_index,
} => {
let place_type = place.ty(&*body, tcx).ty;
let place_type = place.ty(*body, tcx).ty;
let adt = match place_type.kind {
ty::Adt(adt, _) if adt.is_enum() => adt,
_ => {
@ -1489,7 +1489,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
};
}
StatementKind::AscribeUserType(box(ref place, ref projection), variance) => {
let place_ty = place.ty(&*body, tcx).ty;
let place_ty = place.ty(*body, tcx).ty;
if let Err(terr) = self.relate_type_and_user_type(
place_ty,
variance,
@ -2010,7 +2010,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
// While this is located in `nll::typeck` this error is not an NLL error, it's
// a required check to make sure that repeated elements implement `Copy`.
let span = body.source_info(location).span;
let ty = operand.ty(&*body, tcx);
let ty = operand.ty(*body, tcx);
if !self.infcx.type_is_copy_modulo_regions(self.param_env, ty, span) {
// To determine if `const_in_array_repeat_expressions` feature gate should
// be mentioned, need to check if the rvalue is promotable.
@ -2064,7 +2064,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
Rvalue::Cast(cast_kind, op, ty) => {
match cast_kind {
CastKind::Pointer(PointerCast::ReifyFnPointer) => {
let fn_sig = op.ty(&*body, tcx).fn_sig(tcx);
let fn_sig = op.ty(*body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
@ -2093,7 +2093,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::Pointer(PointerCast::ClosureFnPointer(unsafety)) => {
let sig = match op.ty(&*body, tcx).kind {
let sig = match op.ty(*body, tcx).kind {
ty::Closure(def_id, substs) => {
substs.as_closure().sig_ty(def_id, tcx).fn_sig(tcx)
}
@ -2119,7 +2119,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::Pointer(PointerCast::UnsafeFnPointer) => {
let fn_sig = op.ty(&*body, tcx).fn_sig(tcx);
let fn_sig = op.ty(*body, tcx).fn_sig(tcx);
// The type that we see in the fcx is like
// `foo::<'a, 'b>`, where `foo` is the path to a
@ -2151,7 +2151,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
let &ty = ty;
let trait_ref = ty::TraitRef {
def_id: tcx.lang_items().coerce_unsized_trait().unwrap(),
substs: tcx.mk_substs_trait(op.ty(&*body, tcx), &[ty.into()]),
substs: tcx.mk_substs_trait(op.ty(*body, tcx), &[ty.into()]),
};
self.prove_trait_ref(
@ -2162,7 +2162,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::Pointer(PointerCast::MutToConstPointer) => {
let ty_from = match op.ty(&*body, tcx).kind {
let ty_from = match op.ty(*body, tcx).kind {
ty::RawPtr(ty::TypeAndMut {
ty: ty_from,
mutbl: hir::Mutability::Mutable,
@ -2210,7 +2210,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::Pointer(PointerCast::ArrayToPointer) => {
let ty_from = op.ty(&*body, tcx);
let ty_from = op.ty(*body, tcx);
let opt_ty_elem = match ty_from.kind {
ty::RawPtr(
@ -2272,7 +2272,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
}
CastKind::Misc => {
let ty_from = op.ty(&*body, tcx);
let ty_from = op.ty(*body, tcx);
let cast_ty_from = CastTy::from_ty(ty_from);
let cast_ty_to = CastTy::from_ty(ty);
match (cast_ty_from, cast_ty_to) {
@ -2339,9 +2339,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
| Rvalue::BinaryOp(BinOp::Le, left, right)
| Rvalue::BinaryOp(BinOp::Gt, left, right)
| Rvalue::BinaryOp(BinOp::Ge, left, right) => {
let ty_left = left.ty(&*body, tcx);
let ty_left = left.ty(*body, tcx);
if let ty::RawPtr(_) | ty::FnPtr(_) = ty_left.kind {
let ty_right = right.ty(&*body, tcx);
let ty_right = right.ty(*body, tcx);
let common_ty = self.infcx.next_ty_var(
TypeVariableOrigin {
kind: TypeVariableOriginKind::MiscVariable,

View file

@ -143,7 +143,7 @@ impl<'cx, 'tcx> Iterator for Prefixes<'cx, 'tcx> {
// derefs, except we stop at the deref of a shared
// reference.
let ty = Place::ty_from(cursor.base, proj_base, &*self.body, self.tcx).ty;
let ty = Place::ty_from(cursor.base, proj_base, *self.body, self.tcx).ty;
match ty.kind {
ty::RawPtr(_) |
ty::Ref(

View file

@ -741,23 +741,21 @@ fn visit_instance_use<'tcx>(
}
match instance.def {
ty::InstanceDef::Intrinsic(def_id) => {
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::Intrinsic(_) => {
if !is_direct_call {
bug!("intrinsic {:?} being reified", def_id);
bug!("{:?} being reified", instance);
}
}
ty::InstanceDef::VtableShim(..) |
ty::InstanceDef::ReifyShim(..) |
ty::InstanceDef::Virtual(..) |
ty::InstanceDef::DropGlue(_, None) => {
// Don't need to emit shim if we are calling directly.
// Don't need to emit noop drop glue if we are calling directly.
if !is_direct_call {
output.push(create_fn_mono_item(instance));
}
}
ty::InstanceDef::DropGlue(_, Some(_)) => {
output.push(create_fn_mono_item(instance));
}
ty::InstanceDef::DropGlue(_, Some(_)) |
ty::InstanceDef::VtableShim(..) |
ty::InstanceDef::ReifyShim(..) |
ty::InstanceDef::ClosureOnceShim { .. } |
ty::InstanceDef::Item(..) |
ty::InstanceDef::FnPtrShim(..) |

View file

@ -35,7 +35,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx
ty::InstanceDef::VtableShim(def_id) => {
build_call_shim(
tcx,
def_id,
instance,
Adjustment::DerefMove,
CallKind::Direct(def_id),
None,
@ -60,27 +60,27 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx
build_call_shim(
tcx,
def_id,
instance,
adjustment,
CallKind::Indirect,
Some(arg_tys)
)
}
// We are generating a call back to our def-id, which the
// codegen backend knows to turn to an actual virtual call.
ty::InstanceDef::Virtual(def_id, _) |
// ...or we are generating a direct call to a function for which indirect calls must be
// codegen'd differently than direct ones (example: #[track_caller])
// codegen backend knows to turn to an actual call, be it
// a virtual call, or a direct call to a function for which
// indirect calls must be codegen'd differently than direct ones
// (such as `#[track_caller]`).
ty::InstanceDef::ReifyShim(def_id) => {
build_call_shim(
tcx,
def_id,
instance,
Adjustment::Identity,
CallKind::Direct(def_id),
None
)
}
ty::InstanceDef::ClosureOnceShim { call_once } => {
ty::InstanceDef::ClosureOnceShim { call_once: _ } => {
let fn_mut = tcx.lang_items().fn_mut_trait().unwrap();
let call_mut = tcx
.associated_items(fn_mut)
@ -89,7 +89,7 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx
build_call_shim(
tcx,
call_once,
instance,
Adjustment::RefMut,
CallKind::Direct(call_mut),
None
@ -109,6 +109,9 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceDef<'tcx>) -> &'tcx
bug!("builtin clone shim {:?} not supported", instance)
}
}
ty::InstanceDef::Virtual(..) => {
bug!("InstanceDef::Virtual ({:?}) is for direct calls only", instance)
}
ty::InstanceDef::Intrinsic(_) => {
bug!("creating shims from intrinsics ({:?}) is unsupported", instance)
}
@ -697,7 +700,7 @@ impl CloneShimBuilder<'tcx> {
}
}
/// Builds a "call" shim for `def_id`. The shim calls the
/// Builds a "call" shim for `instance`. The shim calls the
/// function specified by `call_kind`, first adjusting its first
/// argument according to `rcvr_adjustment`.
///
@ -705,17 +708,30 @@ impl CloneShimBuilder<'tcx> {
/// function will be untupled as these types.
fn build_call_shim<'tcx>(
tcx: TyCtxt<'tcx>,
def_id: DefId,
instance: ty::InstanceDef<'tcx>,
rcvr_adjustment: Adjustment,
call_kind: CallKind,
untuple_args: Option<&[Ty<'tcx>]>,
) -> BodyCache<'tcx> {
debug!("build_call_shim(def_id={:?}, rcvr_adjustment={:?}, \
debug!("build_call_shim(instance={:?}, rcvr_adjustment={:?}, \
call_kind={:?}, untuple_args={:?})",
def_id, rcvr_adjustment, call_kind, untuple_args);
instance, rcvr_adjustment, call_kind, untuple_args);
let def_id = instance.def_id();
let sig = tcx.fn_sig(def_id);
let sig = tcx.erase_late_bound_regions(&sig);
let mut sig = tcx.erase_late_bound_regions(&sig);
// FIXME(eddyb) avoid having this snippet both here and in
// `Instance::fn_sig` (introduce `InstanceDef::fn_sig`?).
if let ty::InstanceDef::VtableShim(..) = instance {
// Modify fn(self, ...) to fn(self: *mut Self, ...)
let mut inputs_and_output = sig.inputs_and_output.to_vec();
let self_arg = &mut inputs_and_output[0];
debug_assert!(tcx.generics_of(def_id).has_self && *self_arg == tcx.types.self_param);
*self_arg = tcx.mk_mut_ptr(*self_arg);
sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
}
let span = tcx.def_span(def_id);
debug!("build_call_shim: sig={:?}", sig);
@ -730,14 +746,7 @@ fn build_call_shim<'tcx>(
let rcvr = match rcvr_adjustment {
Adjustment::Identity => Operand::Move(rcvr_l),
Adjustment::Deref => Operand::Copy(tcx.mk_place_deref(rcvr_l)),
Adjustment::DerefMove => {
// fn(Self, ...) -> fn(*mut Self, ...)
let arg_ty = local_decls[rcvr_arg].ty;
debug_assert!(tcx.generics_of(def_id).has_self && arg_ty == tcx.types.self_param);
local_decls[rcvr_arg].ty = tcx.mk_mut_ptr(arg_ty);
Operand::Move(tcx.mk_place_deref(rcvr_l))
}
Adjustment::DerefMove => Operand::Move(tcx.mk_place_deref(rcvr_l)),
Adjustment::RefMut => {
// let rcvr = &mut rcvr;
let ref_rcvr = local_decls.push(temp_decl(

View file

@ -51,7 +51,7 @@ pub trait Qualif {
});
let qualif = base_qualif && Self::in_any_value_of_ty(
cx,
Place::ty_from(place.base, proj_base, &*cx.body, cx.tcx)
Place::ty_from(place.base, proj_base, *cx.body, cx.tcx)
.projection_ty(cx.tcx, elem)
.ty,
);
@ -155,7 +155,7 @@ pub trait Qualif {
// Special-case reborrows to be more like a copy of the reference.
if let &[ref proj_base @ .., elem] = place.projection.as_ref() {
if ProjectionElem::Deref == elem {
let base_ty = Place::ty_from(&place.base, proj_base, &*cx.body, cx.tcx).ty;
let base_ty = Place::ty_from(&place.base, proj_base, *cx.body, cx.tcx).ty;
if let ty::Ref(..) = base_ty.kind {
return Self::in_place(cx, per_local, PlaceRef {
base: &place.base,
@ -221,7 +221,7 @@ impl Qualif for HasMutInterior {
Rvalue::Aggregate(ref kind, _) => {
if let AggregateKind::Adt(def, ..) = **kind {
if Some(def.did) == cx.tcx.lang_items().unsafe_cell_type() {
let ty = rvalue.ty(&*cx.body, cx.tcx);
let ty = rvalue.ty(*cx.body, cx.tcx);
assert_eq!(Self::in_any_value_of_ty(cx, ty), true);
return true;
}

View file

@ -77,7 +77,7 @@ where
args: &[mir::Operand<'tcx>],
return_place: &mir::Place<'tcx>,
) {
let return_ty = return_place.ty(&*self.item.body, self.item.tcx).ty;
let return_ty = return_place.ty(*self.item.body, self.item.tcx).ty;
let qualif = Q::in_call(
self.item,
&|l| self.qualifs_per_local.contains(l),

View file

@ -304,7 +304,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
// Special-case reborrows to be more like a copy of a reference.
if let Rvalue::Ref(_, kind, ref place) = *rvalue {
if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, &*self.body, place) {
if let Some(reborrowed_proj) = place_as_reborrow(self.tcx, *self.body, place) {
let ctx = match kind {
BorrowKind::Shared => PlaceContext::NonMutatingUse(
NonMutatingUseContext::SharedBorrow,
@ -342,7 +342,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
| Rvalue::Ref(_, kind @ BorrowKind::Mut { .. }, ref place)
| Rvalue::Ref(_, kind @ BorrowKind::Unique, ref place)
=> {
let ty = place.ty(&*self.body, self.tcx).ty;
let ty = place.ty(*self.body, self.tcx).ty;
let is_allowed = match ty.kind {
// Inside a `static mut`, `&mut [...]` is allowed.
ty::Array(..) | ty::Slice(_) if self.const_kind() == ConstKind::StaticMut
@ -390,7 +390,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
}
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) => {
let operand_ty = operand.ty(&*self.body, self.tcx);
let operand_ty = operand.ty(*self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
@ -401,7 +401,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
}
Rvalue::BinaryOp(op, ref lhs, _) => {
if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(&*self.body, self.tcx).kind {
if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(*self.body, self.tcx).kind {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
@ -475,7 +475,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
match elem {
ProjectionElem::Deref => {
let base_ty = Place::ty_from(place_base, proj_base, &*self.body, self.tcx).ty;
let base_ty = Place::ty_from(place_base, proj_base, *self.body, self.tcx).ty;
if let ty::RawPtr(_) = base_ty.kind {
if proj_base.is_empty() {
if let (PlaceBase::Local(local), []) = (place_base, proj_base) {
@ -499,7 +499,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
ProjectionElem::Subslice {..} |
ProjectionElem::Field(..) |
ProjectionElem::Index(_) => {
let base_ty = Place::ty_from(place_base, proj_base, &*self.body, self.tcx).ty;
let base_ty = Place::ty_from(place_base, proj_base, *self.body, self.tcx).ty;
match base_ty.ty_adt_def() {
Some(def) if def.is_union() => {
self.check_op(ops::UnionAccess);
@ -548,7 +548,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
match kind {
TerminatorKind::Call { func, .. } => {
let fn_ty = func.ty(&*self.body, self.tcx);
let fn_ty = func.ty(*self.body, self.tcx);
let def_id = match fn_ty.kind {
ty::FnDef(def_id, _) => def_id,
@ -609,7 +609,7 @@ impl Visitor<'tcx> for Validator<'_, 'mir, 'tcx> {
// Check to see if the type of this place can ever have a drop impl. If not, this
// `Drop` terminator is frivolous.
let ty_needs_drop = dropped_place
.ty(&*self.body, self.tcx)
.ty(*self.body, self.tcx)
.ty
.needs_drop(self.tcx, self.param_env);

View file

@ -350,7 +350,7 @@ impl<'tcx> Validator<'_, 'tcx> {
let ty = Place::ty_from(
&place.base,
proj_base,
&*self.body,
*self.body,
self.tcx
)
.projection_ty(self.tcx, elem)
@ -373,7 +373,7 @@ impl<'tcx> Validator<'_, 'tcx> {
}
if let BorrowKind::Mut { .. } = kind {
let ty = place.ty(&*self.body, self.tcx).ty;
let ty = place.ty(*self.body, self.tcx).ty;
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
@ -522,7 +522,7 @@ impl<'tcx> Validator<'_, 'tcx> {
ProjectionElem::Field(..) => {
if self.const_kind.is_none() {
let base_ty =
Place::ty_from(place.base, proj_base, &*self.body, self.tcx).ty;
Place::ty_from(place.base, proj_base, *self.body, self.tcx).ty;
if let Some(def) = base_ty.ty_adt_def() {
// No promotion of union field accesses.
if def.is_union() {
@ -571,7 +571,7 @@ impl<'tcx> Validator<'_, 'tcx> {
fn validate_rvalue(&self, rvalue: &Rvalue<'tcx>) -> Result<(), Unpromotable> {
match *rvalue {
Rvalue::Cast(CastKind::Misc, ref operand, cast_ty) if self.const_kind.is_none() => {
let operand_ty = operand.ty(&*self.body, self.tcx);
let operand_ty = operand.ty(*self.body, self.tcx);
let cast_in = CastTy::from_ty(operand_ty).expect("bad input type for cast");
let cast_out = CastTy::from_ty(cast_ty).expect("bad output type for cast");
match (cast_in, cast_out) {
@ -585,7 +585,7 @@ impl<'tcx> Validator<'_, 'tcx> {
}
Rvalue::BinaryOp(op, ref lhs, _) if self.const_kind.is_none() => {
if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(&*self.body, self.tcx).kind {
if let ty::RawPtr(_) | ty::FnPtr(..) = lhs.ty(*self.body, self.tcx).kind {
assert!(op == BinOp::Eq || op == BinOp::Ne ||
op == BinOp::Le || op == BinOp::Lt ||
op == BinOp::Ge || op == BinOp::Gt ||
@ -620,7 +620,7 @@ impl<'tcx> Validator<'_, 'tcx> {
Rvalue::Ref(_, kind, place) => {
if let BorrowKind::Mut { .. } = kind {
let ty = place.ty(&*self.body, self.tcx).ty;
let ty = place.ty(*self.body, self.tcx).ty;
// In theory, any zero-sized value could be borrowed
// mutably without consequences. However, only &mut []
@ -647,7 +647,7 @@ impl<'tcx> Validator<'_, 'tcx> {
let mut place = place.as_ref();
if let [proj_base @ .., ProjectionElem::Deref] = &place.projection {
let base_ty =
Place::ty_from(&place.base, proj_base, &*self.body, self.tcx).ty;
Place::ty_from(&place.base, proj_base, *self.body, self.tcx).ty;
if let ty::Ref(..) = base_ty.kind {
place = PlaceRef {
base: &place.base,
@ -673,7 +673,7 @@ impl<'tcx> Validator<'_, 'tcx> {
while let [proj_base @ .., elem] = place_projection {
// FIXME(eddyb) this is probably excessive, with
// the exception of `union` member accesses.
let ty = Place::ty_from(place.base, proj_base, &*self.body, self.tcx)
let ty = Place::ty_from(place.base, proj_base, *self.body, self.tcx)
.projection_ty(self.tcx, elem)
.ty;
if ty.is_freeze(self.tcx, self.param_env, DUMMY_SP) {
@ -706,7 +706,7 @@ impl<'tcx> Validator<'_, 'tcx> {
callee: &Operand<'tcx>,
args: &[Operand<'tcx>],
) -> Result<(), Unpromotable> {
let fn_ty = callee.ty(&*self.body, self.tcx);
let fn_ty = callee.ty(*self.body, self.tcx);
if !self.explicit && self.const_kind.is_none() {
if let ty::FnDef(def_id, _) = fn_ty.kind {

View file

@ -492,7 +492,12 @@ impl<'a, Ty> ArgAbi<'a, Ty> {
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum Conv {
// General language calling conventions, for which every target
// should have its own backend (e.g. LLVM) support.
C,
Rust,
// Target-specific calling conventions.
ArmAapcs,