1
Fork 0

Auto merge of #65938 - eddyb:fn-abi-rename, r=oli-obk

rustc_target: rename {Fn,Arg}Type to {Fn,Arg}Abi.

I was trying to tweak the API of `FnType` (now `FnAbi`) and the name kept bothering me.

`FnAbi` is to a function signature a bit like a layout is to a type, so the name still isn't perfect yet, but at least it doesn't have the misleading `Type` in it anymore.

If this can't land I think I can continue my original refactor without it, so I'm not strongly attached to it.

r? @nagisa cc @oli-obk
This commit is contained in:
bors 2019-11-05 05:47:31 +00:00
commit 2e4da3caad
35 changed files with 311 additions and 311 deletions

View file

@ -23,7 +23,7 @@ use rustc_index::vec::{IndexVec, Idx};
pub use rustc_target::abi::*;
use rustc_target::spec::{HasTargetSpec, abi::Abi as SpecAbi};
use rustc_target::abi::call::{
ArgAttribute, ArgAttributes, ArgType, Conv, FnType, PassMode, Reg, RegKind
ArgAttribute, ArgAttributes, ArgAbi, Conv, FnAbi, PassMode, Reg, RegKind
};
pub trait IntegerExt {
@ -2487,7 +2487,7 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for LayoutError<'tcx> {
}
}
pub trait FnTypeExt<'tcx, C>
pub trait FnAbiExt<'tcx, C>
where
C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
+ HasDataLayout
@ -2502,12 +2502,12 @@ where
cx: &C,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
) -> Self;
fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi);
}
impl<'tcx, C> FnTypeExt<'tcx, C> for call::FnType<'tcx, Ty<'tcx>>
impl<'tcx, C> FnAbiExt<'tcx, C> for call::FnAbi<'tcx, Ty<'tcx>>
where
C: LayoutOf<Ty = Ty<'tcx>, TyLayout = TyLayout<'tcx>>
+ HasDataLayout
@ -2520,15 +2520,15 @@ where
let sig = cx
.tcx()
.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
call::FnType::new(cx, sig, &[])
call::FnAbi::new(cx, sig, &[])
}
fn new(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
call::FnType::new_internal(cx, sig, extra_args, |ty, _| ArgType::new(cx.layout_of(ty)))
call::FnAbi::new_internal(cx, sig, extra_args, |ty, _| ArgAbi::new(cx.layout_of(ty)))
}
fn new_vtable(cx: &C, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>]) -> Self {
FnTypeExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
FnAbiExt::new_internal(cx, sig, extra_args, |ty, arg_idx| {
let mut layout = cx.layout_of(ty);
// Don't pass the vtable, it's not an argument of the virtual fn.
// Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
@ -2578,7 +2578,7 @@ where
layout = cx.layout_of(unit_pointer_ty);
layout.ty = fat_pointer_ty;
}
ArgType::new(layout)
ArgAbi::new(layout)
})
}
@ -2586,9 +2586,9 @@ where
cx: &C,
sig: ty::FnSig<'tcx>,
extra_args: &[Ty<'tcx>],
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgType<'tcx, Ty<'tcx>>,
mk_arg_type: impl Fn(Ty<'tcx>, Option<usize>) -> ArgAbi<'tcx, Ty<'tcx>>,
) -> Self {
debug!("FnType::new_internal({:?}, {:?})", sig, extra_args);
debug!("FnAbi::new_internal({:?}, {:?})", sig, extra_args);
use rustc_target::spec::abi::Abi::*;
let conv = match cx.tcx().sess.target.target.adjust_abi(sig.abi) {
@ -2741,7 +2741,7 @@ where
arg
};
let mut fn_ty = FnType {
let mut fn_abi = FnAbi {
ret: arg_of(sig.output(), None),
args: inputs
.iter()
@ -2753,8 +2753,8 @@ where
c_variadic: sig.c_variadic,
conv,
};
fn_ty.adjust_for_abi(cx, sig.abi);
fn_ty
fn_abi.adjust_for_abi(cx, sig.abi);
fn_abi
}
fn adjust_for_abi(&mut self, cx: &C, abi: SpecAbi) {
@ -2767,7 +2767,7 @@ where
|| abi == SpecAbi::RustIntrinsic
|| abi == SpecAbi::PlatformIntrinsic
{
let fixup = |arg: &mut ArgType<'tcx, Ty<'tcx>>| {
let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
if arg.is_ignore() {
return;
}

View file

@ -7,7 +7,7 @@ use crate::type_of::{LayoutLlvmExt};
use rustc_codegen_ssa::MemFlags;
use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::mir::operand::OperandValue;
use rustc_target::abi::call::ArgType;
use rustc_target::abi::call::ArgAbi;
use rustc_codegen_ssa::traits::*;
@ -163,7 +163,7 @@ impl LlvmType for CastTarget {
}
}
pub trait ArgTypeExt<'ll, 'tcx> {
pub trait ArgAbiExt<'ll, 'tcx> {
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn store(
&self,
@ -179,14 +179,14 @@ pub trait ArgTypeExt<'ll, 'tcx> {
);
}
impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
impl ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
/// Gets the LLVM type for a place of the original Rust type of
/// this argument/return, i.e., the result of `type_of::type_of`.
fn memory_ty(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
self.layout.llvm_type(cx)
}
/// Stores a direct/indirect value described by this ArgType into a
/// Stores a direct/indirect value described by this ArgAbi into a
/// place for the original Rust type of this argument/return.
/// Can be used for both storing formal arguments into Rust variables
/// or results of call/invoke instructions into their destinations.
@ -202,7 +202,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
if self.is_sized_indirect() {
OperandValue::Ref(val, None, self.layout.align.abi).store(bx, dst)
} else if self.is_unsized_indirect() {
bug!("unsized ArgType must be handled through store_fn_arg");
bug!("unsized ArgAbi must be handled through store_fn_arg");
} else if let PassMode::Cast(cast) = self.mode {
// FIXME(eddyb): Figure out when the simpler Store is safe, clang
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
@ -279,28 +279,28 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
}
}
impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
impl ArgAbiMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn store_fn_arg(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
idx: &mut usize, dst: PlaceRef<'tcx, Self::Value>
) {
ty.store_fn_arg(self, idx, dst)
arg_abi.store_fn_arg(self, idx, dst)
}
fn store_arg_ty(
fn store_arg(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
val: &'ll Value,
dst: PlaceRef<'tcx, &'ll Value>
) {
ty.store(self, val, dst)
arg_abi.store(self, val, dst)
}
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.memory_ty(self)
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
arg_abi.memory_ty(self)
}
}
pub trait FnTypeLlvmExt<'tcx> {
pub trait FnAbiLlvmExt<'tcx> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type;
fn llvm_cconv(&self) -> llvm::CallConv;
@ -308,7 +308,7 @@ pub trait FnTypeLlvmExt<'tcx> {
fn apply_attrs_callsite(&self, bx: &mut Builder<'a, 'll, 'tcx>, callsite: &'ll Value);
}
impl<'tcx> FnTypeLlvmExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
impl<'tcx> FnAbiLlvmExt<'tcx> for FnAbi<'tcx, Ty<'tcx>> {
fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
let args_capacity: usize = self.args.iter().map(|arg|
if arg.pad.is_some() { 1 } else { 0 } +
@ -478,10 +478,10 @@ impl<'tcx> FnTypeLlvmExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
impl AbiBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn apply_attrs_callsite(
&mut self,
ty: &FnType<'tcx, Ty<'tcx>>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
callsite: Self::Value
) {
ty.apply_attrs_callsite(self, callsite)
fn_abi.apply_attrs_callsite(self, callsite)
}
fn get_param(&self, index: usize) -> Self::Value {

View file

@ -13,13 +13,13 @@
use crate::llvm;
use crate::llvm::AttributePlace::Function;
use crate::abi::{FnType, FnTypeLlvmExt};
use crate::abi::{FnAbi, FnAbiLlvmExt};
use crate::attributes;
use crate::context::CodegenCx;
use crate::type_::Type;
use crate::value::Value;
use rustc::ty::{self, PolyFnSig};
use rustc::ty::layout::{FnTypeExt, LayoutOf};
use rustc::ty::layout::{FnAbiExt, LayoutOf};
use rustc::session::config::Sanitizer;
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::traits::*;
@ -100,14 +100,14 @@ impl DeclareMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let sig = self.tcx.normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), &sig);
debug!("declare_rust_fn (after region erasure) sig={:?}", sig);
let fty = FnType::new(self, sig, &[]);
let llfn = declare_raw_fn(self, name, fty.llvm_cconv(), fty.llvm_type(self));
let fn_abi = FnAbi::new(self, sig, &[]);
let llfn = declare_raw_fn(self, name, fn_abi.llvm_cconv(), fn_abi.llvm_type(self));
if self.layout_of(sig.output()).abi.is_uninhabited() {
llvm::Attribute::NoReturn.apply_llfn(Function, llfn);
}
fty.apply_attrs_llfn(self, llfn);
fn_abi.apply_attrs_llfn(self, llfn);
llfn
}

View file

@ -1,7 +1,7 @@
use crate::attributes;
use crate::llvm;
use crate::llvm_util;
use crate::abi::{Abi, FnType, LlvmType, PassMode};
use crate::abi::{Abi, FnAbi, LlvmType, PassMode};
use crate::context::CodegenCx;
use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
@ -84,7 +84,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx, &'ll Value>],
llresult: &'ll Value,
span: Span,
@ -104,7 +104,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let name = &*tcx.item_name(def_id).as_str();
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout);
let result = PlaceRef::new_sized(llresult, fn_abi.ret.layout);
let simple = get_simple_intrinsic(self, name);
let llval = match name {
@ -147,7 +147,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
self.call(intrinsic, &[args[0].immediate(), args[1].immediate()], None)
}
"va_arg" => {
match fn_ty.ret.layout.abi {
match fn_abi.ret.layout.abi {
layout::Abi::Scalar(ref scalar) => {
match scalar.value {
Primitive::Int(..) => {
@ -276,7 +276,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
"volatile_load" | "unaligned_volatile_load" => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_ty.ret.mode {
if let PassMode::Cast(ty) = fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
}
let load = self.volatile_load(ptr);
@ -715,8 +715,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
_ => bug!("unknown intrinsic '{}'", name),
};
if !fn_ty.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_ty.ret.mode {
if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty) = fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);

View file

@ -8,11 +8,11 @@ use rustc_codegen_ssa::traits::*;
use crate::common;
use crate::type_of::LayoutLlvmExt;
use crate::abi::{LlvmType, FnTypeLlvmExt};
use crate::abi::{LlvmType, FnAbiLlvmExt};
use syntax::ast;
use rustc::ty::Ty;
use rustc::ty::layout::{self, Align, Size, TyLayout};
use rustc_target::abi::call::{CastTarget, FnType, Reg};
use rustc_target::abi::call::{CastTarget, FnAbi, Reg};
use rustc_data_structures::small_c_str::SmallCStr;
use rustc_codegen_ssa::common::TypeKind;
@ -243,7 +243,7 @@ impl BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
assert_ne!(self.type_kind(ty), TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
"don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead");
ty.ptr_to()
}
@ -336,8 +336,8 @@ impl LayoutTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
fn cast_backend_type(&self, ty: &CastTarget) -> &'ll Type {
ty.llvm_type(self)
}
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> &'ll Type {
ty.ptr_to_llvm_type(self)
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> &'ll Type {
fn_abi.ptr_to_llvm_type(self)
}
fn reg_backend_type(&self, ty: &Reg) -> &'ll Type {
ty.llvm_type(self)

View file

@ -1,8 +1,8 @@
use crate::abi::{FnType};
use crate::abi::{FnAbi};
use crate::common::*;
use crate::type_::Type;
use rustc::ty::{self, Ty, TypeFoldable};
use rustc::ty::layout::{self, Align, LayoutOf, FnTypeExt, PointeeInfo, Size, TyLayout};
use rustc::ty::layout::{self, Align, LayoutOf, FnAbiExt, PointeeInfo, Size, TyLayout};
use rustc_target::abi::{FloatTy, TyLayoutMethods};
use rustc::ty::print::obsolete::DefPathBasedNames;
use rustc_codegen_ssa::traits::*;
@ -239,7 +239,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
ty::ParamEnv::reveal_all(),
&sig,
);
cx.fn_ptr_backend_type(&FnType::new(cx, sig, &[]))
cx.fn_ptr_backend_type(&FnAbi::new(cx, sig, &[]))
}
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO)
};

View file

@ -1,4 +1,4 @@
use rustc_target::abi::call::FnType;
use rustc_target::abi::call::FnAbi;
use crate::traits::*;
@ -20,14 +20,14 @@ impl<'a, 'tcx> VirtualIndex {
self,
bx: &mut Bx,
llvtable: Bx::Value,
fn_ty: &FnType<'tcx, Ty<'tcx>>
fn_abi: &FnAbi<'tcx, Ty<'tcx>>
) -> Bx::Value {
// Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(
llvtable,
bx.type_ptr_to(bx.fn_ptr_backend_type(fn_ty))
bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi))
);
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);

View file

@ -1,10 +1,10 @@
use rustc_index::vec::Idx;
use rustc::middle::lang_items;
use rustc::ty::{self, Ty, TypeFoldable, Instance};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, FnTypeExt};
use rustc::ty::layout::{self, LayoutOf, HasTyCtxt, FnAbiExt};
use rustc::mir::{self, PlaceBase, Static, StaticKind};
use rustc::mir::interpret::PanicInfo;
use rustc_target::abi::call::{ArgType, FnType, PassMode};
use rustc_target::abi::call::{ArgAbi, FnAbi, PassMode};
use rustc_target::spec::abi::Abi;
use crate::base;
use crate::MemFlags;
@ -99,13 +99,13 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
}
}
/// Call `fn_ptr` of `fn_ty` with the arguments `llargs`, the optional
/// Call `fn_ptr` of `fn_abi` with the arguments `llargs`, the optional
/// return destination `destination` and the cleanup function `cleanup`.
fn do_call<'c, 'b, Bx: BuilderMethods<'b, 'tcx>>(
&self,
fx: &'c mut FunctionCx<'b, 'tcx, Bx>,
bx: &mut Bx,
fn_ty: FnType<'tcx, Ty<'tcx>>,
fn_abi: FnAbi<'tcx, Ty<'tcx>>,
fn_ptr: Bx::Value,
llargs: &[Bx::Value],
destination: Option<(ReturnDest<'tcx, Bx::Value>, mir::BasicBlock)>,
@ -122,16 +122,16 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
ret_bx,
self.llblock(fx, cleanup),
self.funclet(fx));
bx.apply_attrs_callsite(&fn_ty, invokeret);
bx.apply_attrs_callsite(&fn_abi, invokeret);
if let Some((ret_dest, target)) = destination {
let mut ret_bx = fx.build_block(target);
fx.set_debug_loc(&mut ret_bx, self.terminator.source_info);
fx.store_return(&mut ret_bx, ret_dest, &fn_ty.ret, invokeret);
fx.store_return(&mut ret_bx, ret_dest, &fn_abi.ret, invokeret);
}
} else {
let llret = bx.call(fn_ptr, &llargs, self.funclet(fx));
bx.apply_attrs_callsite(&fn_ty, llret);
bx.apply_attrs_callsite(&fn_abi, llret);
if fx.mir[*self.bb].is_cleanup {
// Cleanup is always the cold path. Don't inline
// drop glue. Also, when there is a deeply-nested
@ -141,7 +141,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'a, 'tcx> {
}
if let Some((ret_dest, target)) = destination {
fx.store_return(bx, ret_dest, &fn_ty.ret, llret);
fx.store_return(bx, ret_dest, &fn_abi.ret, llret);
self.funclet_br(fx, bx, target);
} else {
bx.unreachable();
@ -248,9 +248,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
fn codegen_return_terminator(&mut self, mut bx: Bx) {
// Call `va_end` if this is the definition of a C-variadic function.
if self.fn_ty.c_variadic {
if self.fn_abi.c_variadic {
// The `VaList` "spoofed" argument is just after all the real arguments.
let va_list_arg_idx = self.fn_ty.args.len();
let va_list_arg_idx = self.fn_abi.args.len();
match self.locals[mir::Local::new(1 + va_list_arg_idx)] {
LocalRef::Place(va_list) => {
bx.va_end(va_list.llval);
@ -258,14 +258,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => bug!("C-variadic function must have a `VaList` place"),
}
}
if self.fn_ty.ret.layout.abi.is_uninhabited() {
if self.fn_abi.ret.layout.abi.is_uninhabited() {
// Functions with uninhabited return values are marked `noreturn`,
// so we should make sure that we never actually do.
bx.abort();
bx.unreachable();
return;
}
let llval = match self.fn_ty.ret.mode {
let llval = match self.fn_abi.ret.mode {
PassMode::Ignore | PassMode::Indirect(..) => {
bx.ret_void();
return;
@ -296,7 +296,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let llslot = match op.val {
Immediate(_) | Pair(..) => {
let scratch =
PlaceRef::alloca(&mut bx, self.fn_ty.ret.layout);
PlaceRef::alloca(&mut bx, self.fn_abi.ret.layout);
op.val.store(&mut bx, scratch);
scratch.llval
}
@ -309,7 +309,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let addr = bx.pointercast(llslot, bx.type_ptr_to(
bx.cast_backend_type(&cast_ty)
));
bx.load(addr, self.fn_ty.ret.layout.align.abi)
bx.load(addr, self.fn_abi.ret.layout.align.abi)
}
};
bx.ret(llval);
@ -344,25 +344,25 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
args1 = [place.llval];
&args1[..]
};
let (drop_fn, fn_ty) = match ty.kind {
let (drop_fn, fn_abi) = match ty.kind {
ty::Dynamic(..) => {
let sig = drop_fn.fn_sig(self.cx.tcx());
let sig = self.cx.tcx().normalize_erasing_late_bound_regions(
ty::ParamEnv::reveal_all(),
&sig,
);
let fn_ty = FnType::new_vtable(&bx, sig, &[]);
let fn_abi = FnAbi::new_vtable(&bx, sig, &[]);
let vtable = args[1];
args = &args[..1];
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_abi), fn_abi)
}
_ => {
(bx.get_fn_addr(drop_fn),
FnType::of_instance(&bx, drop_fn))
FnAbi::of_instance(&bx, drop_fn))
}
};
helper.maybe_sideeffect(self.mir, &mut bx, &[target]);
helper.do_call(self, &mut bx, fn_ty, drop_fn, args,
helper.do_call(self, &mut bx, fn_abi, drop_fn, args,
Some((ReturnDest::Nothing, target)),
unwind);
}
@ -439,11 +439,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Obtain the panic entry point.
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_ty = FnType::of_instance(&bx, instance);
let fn_abi = FnAbi::of_instance(&bx, instance);
let llfn = bx.get_fn_addr(instance);
// Codegen the actual panic invoke/call.
helper.do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
helper.do_call(self, &mut bx, fn_abi, llfn, &args, None, cleanup);
}
fn codegen_call_terminator<'b>(
@ -514,9 +514,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
self.monomorphize(&op_ty)
}).collect::<Vec<_>>();
let fn_ty = match def {
let fn_abi = match def {
Some(ty::InstanceDef::Virtual(..)) => {
FnType::new_vtable(&bx, sig, &extra_args)
FnAbi::new_vtable(&bx, sig, &extra_args)
}
Some(ty::InstanceDef::DropGlue(_, None)) => {
// Empty drop glue; a no-op.
@ -525,7 +525,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
helper.funclet_br(self, &mut bx, target);
return;
}
_ => FnType::new(&bx, sig, &extra_args)
_ => FnAbi::new(&bx, sig, &extra_args)
};
// Emit a panic or a no-op for `panic_if_uninhabited`.
@ -541,7 +541,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let def_id =
common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
let instance = ty::Instance::mono(bx.tcx(), def_id);
let fn_ty = FnType::of_instance(&bx, instance);
let fn_abi = FnAbi::of_instance(&bx, instance);
let llfn = bx.get_fn_addr(instance);
if let Some((_, target)) = destination.as_ref() {
@ -551,7 +551,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
helper.do_call(
self,
&mut bx,
fn_ty,
fn_abi,
llfn,
&[msg.0, msg.1, location],
destination.as_ref().map(|(_, bb)| (ReturnDest::Nothing, *bb)),
@ -567,13 +567,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
// The arguments we'll be passing. Plus one to account for outptr, if used.
let arg_count = fn_ty.args.len() + fn_ty.ret.is_indirect() as usize;
let arg_count = fn_abi.args.len() + fn_abi.ret.is_indirect() as usize;
let mut llargs = Vec::with_capacity(arg_count);
// Prepare the return value destination
let ret_dest = if let Some((ref dest, _)) = *destination {
let is_intrinsic = intrinsic.is_some();
self.make_return_dest(&mut bx, dest, &fn_ty.ret, &mut llargs,
self.make_return_dest(&mut bx, dest, &fn_abi.ret, &mut llargs,
is_intrinsic)
} else {
ReturnDest::Nothing
@ -586,7 +586,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let ReturnDest::IndirectOperand(tmp, _) = ret_dest {
location.val.store(&mut bx, tmp);
}
self.store_return(&mut bx, ret_dest, &fn_ty.ret, location.immediate());
self.store_return(&mut bx, ret_dest, &fn_abi.ret, location.immediate());
helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
helper.funclet_br(self, &mut bx, *target);
@ -596,9 +596,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
_ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing =>
bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret))),
bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret))),
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) =>
dst.llval,
ReturnDest::DirectOperand(_) =>
@ -667,11 +667,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}).collect();
bx.codegen_intrinsic_call(*instance.as_ref().unwrap(), &fn_ty, &args, dest,
bx.codegen_intrinsic_call(*instance.as_ref().unwrap(), &fn_abi, &args, dest,
terminator.source_info.span);
if let ReturnDest::IndirectOperand(dst, _) = ret_dest {
self.store_return(&mut bx, ret_dest, &fn_ty.ret, dst.llval);
self.store_return(&mut bx, ret_dest, &fn_abi.ret, dst.llval);
}
if let Some((_, target)) = *destination {
@ -726,7 +726,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match op.val {
Pair(data_ptr, meta) => {
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&mut bx, meta, &fn_ty));
.get_fn(&mut bx, meta, &fn_abi));
llargs.push(data_ptr);
continue 'make_args
}
@ -735,7 +735,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} else if let Ref(data_ptr, Some(meta), _) = op.val {
// by-value dynamic dispatch
llfn = Some(meth::VirtualIndex::from_index(idx)
.get_fn(&mut bx, meta, &fn_ty));
.get_fn(&mut bx, meta, &fn_abi));
llargs.push(data_ptr);
continue;
} else {
@ -755,11 +755,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
_ => {}
}
self.codegen_argument(&mut bx, op, &mut llargs, &fn_ty.args[i]);
self.codegen_argument(&mut bx, op, &mut llargs, &fn_abi.args[i]);
}
if let Some(tup) = untuple {
self.codegen_arguments_untupled(&mut bx, tup, &mut llargs,
&fn_ty.args[first_args.len()..])
&fn_abi.args[first_args.len()..])
}
let fn_ptr = match (llfn, instance) {
@ -771,7 +771,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if let Some((_, target)) = destination.as_ref() {
helper.maybe_sideeffect(self.mir, &mut bx, &[*target]);
}
helper.do_call(self, &mut bx, fn_ty, fn_ptr, &llargs,
helper.do_call(self, &mut bx, fn_abi, fn_ptr, &llargs,
destination.as_ref().map(|&(_, target)| (ret_dest, target)),
cleanup);
}
@ -874,7 +874,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx: &mut Bx,
op: OperandRef<'tcx, Bx::Value>,
llargs: &mut Vec<Bx::Value>,
arg: &ArgType<'tcx, Ty<'tcx>>
arg: &ArgAbi<'tcx, Ty<'tcx>>
) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
@ -967,7 +967,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bx: &mut Bx,
operand: &mir::Operand<'tcx>,
llargs: &mut Vec<Bx::Value>,
args: &[ArgType<'tcx, Ty<'tcx>>]
args: &[ArgAbi<'tcx, Ty<'tcx>>]
) {
let tuple = self.codegen_operand(bx, operand);
@ -1095,7 +1095,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&mut self,
bx: &mut Bx,
dest: &mir::Place<'tcx>,
fn_ret: &ArgType<'tcx, Ty<'tcx>>,
fn_ret: &ArgAbi<'tcx, Ty<'tcx>>,
llargs: &mut Vec<Bx::Value>, is_intrinsic: bool
) -> ReturnDest<'tcx, Bx::Value> {
// If the return is ignored, we can just return a do-nothing `ReturnDest`.
@ -1204,14 +1204,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
&mut self,
bx: &mut Bx,
dest: ReturnDest<'tcx, Bx::Value>,
ret_ty: &ArgType<'tcx, Ty<'tcx>>,
ret_abi: &ArgAbi<'tcx, Ty<'tcx>>,
llval: Bx::Value
) {
use self::ReturnDest::*;
match dest {
Nothing => (),
Store(dst) => bx.store_arg_ty(&ret_ty, llval, dst),
Store(dst) => bx.store_arg(&ret_abi, llval, dst),
IndirectOperand(tmp, index) => {
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
@ -1219,15 +1219,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}
DirectOperand(index) => {
// If there is a cast, we have to store and reload.
let op = if let PassMode::Cast(_) = ret_ty.mode {
let tmp = PlaceRef::alloca(bx, ret_ty.layout);
let op = if let PassMode::Cast(_) = ret_abi.mode {
let tmp = PlaceRef::alloca(bx, ret_abi.layout);
tmp.storage_live(bx);
bx.store_arg_ty(&ret_ty, llval, tmp);
bx.store_arg(&ret_abi, llval, tmp);
let op = bx.load_operand(tmp);
tmp.storage_dead(bx);
op
} else {
OperandRef::from_immediate_or_packed_pair(bx, llval, ret_ty.layout)
OperandRef::from_immediate_or_packed_pair(bx, llval, ret_abi.layout)
};
self.locals[index] = LocalRef::Operand(Some(op));
}

View file

@ -1,7 +1,7 @@
use rustc::ty::{self, Ty, TypeFoldable, Instance};
use rustc::ty::layout::{TyLayout, HasTyCtxt, FnTypeExt};
use rustc::ty::layout::{TyLayout, HasTyCtxt, FnAbiExt};
use rustc::mir::{self, Body};
use rustc_target::abi::call::{FnType, PassMode};
use rustc_target::abi::call::{FnAbi, PassMode};
use crate::base;
use crate::traits::*;
@ -29,7 +29,7 @@ pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
cx: &'a Bx::CodegenCx,
fn_ty: FnType<'tcx, Ty<'tcx>>,
fn_abi: FnAbi<'tcx, Ty<'tcx>>,
/// When unwinding is initiated, we have to store this personality
/// value somewhere so that we can load it and re-use it in the
@ -126,8 +126,8 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
) {
assert!(!instance.substs.needs_infer());
let fn_ty = FnType::new(cx, sig, &[]);
debug!("fn_ty: {:?}", fn_ty);
let fn_abi = FnAbi::new(cx, sig, &[]);
debug!("fn_abi: {:?}", fn_abi);
let debug_context =
cx.create_function_debug_context(instance, sig, llfn, mir);
@ -159,7 +159,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
instance,
mir,
llfn,
fn_ty,
fn_abi,
cx,
personality_slot: None,
blocks: block_bxs,
@ -183,7 +183,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let layout = bx.layout_of(fx.monomorphize(&decl.ty));
assert!(!layout.ty.has_erasable_regions());
if local == mir::RETURN_PLACE && fx.fn_ty.ret.is_indirect() {
if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
debug!("alloc: {:?} (return place) -> place", local);
let llretptr = bx.get_param(0);
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
@ -323,7 +323,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
) -> Vec<LocalRef<'tcx, Bx::Value>> {
let mir = fx.mir;
let mut idx = 0;
let mut llarg_idx = fx.fn_ty.ret.is_indirect() as usize;
let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
mir.args_iter().enumerate().map(|(arg_index, local)| {
let arg_decl = &mir.local_decls[local];
@ -342,7 +342,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
for i in 0..tupled_arg_tys.len() {
let arg = &fx.fn_ty.args[idx];
let arg = &fx.fn_abi.args[idx];
idx += 1;
if arg.pad.is_some() {
llarg_idx += 1;
@ -354,7 +354,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
return LocalRef::Place(place);
}
if fx.fn_ty.c_variadic && arg_index == fx.fn_ty.args.len() {
if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
let arg_ty = fx.monomorphize(&arg_decl.ty);
let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
@ -363,7 +363,7 @@ fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
return LocalRef::Place(va_list);
}
let arg = &fx.fn_ty.args[idx];
let arg = &fx.fn_abi.args[idx];
idx += 1;
if arg.pad.is_some() {
llarg_idx += 1;

View file

@ -1,8 +1,8 @@
use super::BackendTypes;
use rustc::ty::{Ty};
use rustc_target::abi::call::FnType;
use rustc_target::abi::call::FnAbi;
pub trait AbiBuilderMethods<'tcx>: BackendTypes {
fn apply_attrs_callsite(&mut self, ty: &FnType<'tcx, Ty<'tcx>>, callsite: Self::Value);
fn apply_attrs_callsite(&mut self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>, callsite: Self::Value);
fn get_param(&self, index: usize) -> Self::Value;
}

View file

@ -2,7 +2,7 @@ use super::abi::AbiBuilderMethods;
use super::asm::AsmBuilderMethods;
use super::debuginfo::DebugInfoBuilderMethods;
use super::intrinsic::IntrinsicCallMethods;
use super::type_::ArgTypeMethods;
use super::type_::ArgAbiMethods;
use super::{HasCodegen, StaticBuilderMethods};
use crate::common::{AtomicOrdering, AtomicRmwBinOp, IntPredicate, RealPredicate,
SynchronizationScope};
@ -25,7 +25,7 @@ pub enum OverflowOp {
pub trait BuilderMethods<'a, 'tcx>:
HasCodegen<'tcx>
+ DebugInfoBuilderMethods<'tcx>
+ ArgTypeMethods<'tcx>
+ ArgAbiMethods<'tcx>
+ AbiBuilderMethods<'tcx>
+ IntrinsicCallMethods<'tcx>
+ AsmBuilderMethods<'tcx>

View file

@ -1,7 +1,7 @@
use super::BackendTypes;
use crate::mir::operand::OperandRef;
use rustc::ty::{self, Ty};
use rustc_target::abi::call::FnType;
use rustc_target::abi::call::FnAbi;
use syntax_pos::Span;
pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
@ -11,7 +11,7 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
fn codegen_intrinsic_call(
&mut self,
instance: ty::Instance<'tcx>,
fn_ty: &FnType<'tcx, Ty<'tcx>>,
fn_abi: &FnAbi<'tcx, Ty<'tcx>>,
args: &[OperandRef<'tcx, Self::Value>],
llresult: Self::Value,
span: Span,

View file

@ -38,7 +38,7 @@ pub use self::intrinsic::IntrinsicCallMethods;
pub use self::misc::MiscMethods;
pub use self::statics::{StaticMethods, StaticBuilderMethods};
pub use self::type_::{
ArgTypeMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
ArgAbiMethods, BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods, TypeMethods,
};
pub use self::write::{ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
use rustc::ty::layout::{HasParamEnv, HasTyCtxt};

View file

@ -5,7 +5,7 @@ use crate::common::TypeKind;
use crate::mir::place::PlaceRef;
use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, TyLayout};
use rustc_target::abi::call::{ArgType, CastTarget, FnType, Reg};
use rustc_target::abi::call::{ArgAbi, CastTarget, FnAbi, Reg};
use syntax_pos::DUMMY_SP;
// This depends on `Backend` and not `BackendTypes`, because consumers will probably want to use
@ -96,7 +96,7 @@ impl<T> DerivedTypeMethods<'tcx> for T where Self: BaseTypeMethods<'tcx> + MiscM
pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
fn backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn cast_backend_type(&self, ty: &CastTarget) -> Self::Type;
fn fn_ptr_backend_type(&self, ty: &FnType<'tcx, Ty<'tcx>>) -> Self::Type;
fn fn_ptr_backend_type(&self, fn_abi: &FnAbi<'tcx, Ty<'tcx>>) -> Self::Type;
fn reg_backend_type(&self, ty: &Reg) -> Self::Type;
fn immediate_backend_type(&self, layout: TyLayout<'tcx>) -> Self::Type;
fn is_backend_immediate(&self, layout: TyLayout<'tcx>) -> bool;
@ -110,20 +110,20 @@ pub trait LayoutTypeMethods<'tcx>: Backend<'tcx> {
) -> Self::Type;
}
pub trait ArgTypeMethods<'tcx>: HasCodegen<'tcx> {
pub trait ArgAbiMethods<'tcx>: HasCodegen<'tcx> {
fn store_fn_arg(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
idx: &mut usize,
dst: PlaceRef<'tcx, Self::Value>,
);
fn store_arg_ty(
fn store_arg(
&mut self,
ty: &ArgType<'tcx, Ty<'tcx>>,
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
val: Self::Value,
dst: PlaceRef<'tcx, Self::Value>,
);
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> Self::Type;
fn arg_memory_ty(&self, arg_abi: &ArgAbi<'tcx, Ty<'tcx>>) -> Self::Type;
}
pub trait TypeMethods<'tcx>: DerivedTypeMethods<'tcx> + LayoutTypeMethods<'tcx> {}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use crate::abi::call::{FnAbi, ArgAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
@ -31,7 +31,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -65,7 +65,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -99,16 +99,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
arg.make_indirect();
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -1,32 +1,32 @@
use crate::abi::call::{ArgType, FnType, };
use crate::abi::call::{ArgAbi, FnAbi, };
use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(_cx: &C, ret: &mut ArgType<'a, Ty>)
fn classify_ret<'a, Ty, C>(_cx: &C, ret: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
ret.extend_integer_width_to(32);
}
fn classify_arg_ty<'a, Ty, C>(_cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(_cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
arg.extend_integer_width_to(32);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -1,8 +1,8 @@
use crate::abi::call::{Conv, FnType, ArgType, Reg, RegKind, Uniform};
use crate::abi::call::{Conv, FnAbi, ArgAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use crate::spec::HasTargetSpec;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
@ -32,7 +32,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, vfp: bool)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -67,7 +67,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, vfp: bool)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, vfp: bool)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -91,22 +91,22 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, vfp: bool)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
// If this is a target with a hard-float ABI, and the function is not explicitly
// `extern "aapcs"`, then we must use the VFP registers for homogeneous aggregates.
let vfp = cx.target_spec().llvm_target.ends_with("hf")
&& fty.conv != Conv::ArmAapcs
&& !fty.c_variadic;
&& fn_abi.conv != Conv::ArmAapcs
&& !fn_abi.c_variadic;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, vfp);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, vfp);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, vfp);
classify_arg(cx, arg, vfp);
}
}

View file

@ -1,6 +1,6 @@
use crate::abi::call::{FnType, ArgType};
use crate::abi::call::{FnAbi, ArgAbi};
fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
@ -8,7 +8,7 @@ fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
}
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
@ -16,15 +16,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
}
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_,Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_,Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg);
classify_arg(arg);
}
}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{ArgType, FnType, Reg, Uniform};
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
@ -12,7 +12,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Si
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
@ -34,16 +34,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Si
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'_, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, &mut offset);
classify_arg(cx, arg, &mut offset);
}
}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{ArgAttribute, ArgType, CastTarget, FnType, PassMode, Reg, RegKind, Uniform};
use crate::abi::call::{ArgAttribute, ArgAbi, CastTarget, FnAbi, PassMode, Reg, RegKind, Uniform};
use crate::abi::{self, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
fn extend_integer_width_mips<Ty>(arg: &mut ArgType<'_, Ty>, bits: u64) {
fn extend_integer_width_mips<Ty>(arg: &mut ArgAbi<'_, Ty>, bits: u64) {
// Always sign extend u32 values on 64-bit mips
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if let abi::Int(i, signed) = scalar.value {
@ -17,7 +17,7 @@ fn extend_integer_width_mips<Ty>(arg: &mut ArgType<'_, Ty>, bits: u64) {
arg.extend_integer_width_to(bits);
}
fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgAbi<'a, Ty>, i: usize) -> Option<Reg>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -31,7 +31,7 @@ fn float_reg<'a, Ty, C>(cx: &C, ret: &ArgType<'a, Ty>, i: usize) -> Option<Reg>
}
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -73,7 +73,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -141,16 +141,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -379,7 +379,7 @@ impl<'a, Ty> TyLayout<'a, Ty> {
/// Information about how to pass an argument to,
/// or return a value from, a function, under some ABI.
#[derive(Debug)]
pub struct ArgType<'a, Ty> {
pub struct ArgAbi<'a, Ty> {
pub layout: TyLayout<'a, Ty>,
/// Dummy argument, which is emitted before the real argument.
@ -388,9 +388,9 @@ pub struct ArgType<'a, Ty> {
pub mode: PassMode,
}
impl<'a, Ty> ArgType<'a, Ty> {
impl<'a, Ty> ArgAbi<'a, Ty> {
pub fn new(layout: TyLayout<'a, Ty>) -> Self {
ArgType {
ArgAbi {
layout,
pad: None,
mode: PassMode::Direct(ArgAttributes::new()),
@ -516,19 +516,19 @@ pub enum Conv {
/// I will do my best to describe this structure, but these
/// comments are reverse-engineered and may be inaccurate. -NDM
#[derive(Debug)]
pub struct FnType<'a, Ty> {
pub struct FnAbi<'a, Ty> {
/// The LLVM types of each argument.
pub args: Vec<ArgType<'a, Ty>>,
pub args: Vec<ArgAbi<'a, Ty>>,
/// LLVM return type.
pub ret: ArgType<'a, Ty>,
pub ret: ArgAbi<'a, Ty>,
pub c_variadic: bool,
pub conv: Conv,
}
impl<'a, Ty> FnType<'a, Ty> {
impl<'a, Ty> FnAbi<'a, Ty> {
pub fn adjust_for_cabi<C>(&mut self, cx: &C, abi: spec::abi::Abi) -> Result<(), String>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec

View file

@ -1,7 +1,7 @@
// Reference: MSP430 Embedded Application Binary Interface
// http://www.ti.com/lit/an/slaa534/slaa534.pdf
use crate::abi::call::{ArgType, FnType};
use crate::abi::call::{ArgAbi, FnAbi};
// 3.5 Structures or Unions Passed and Returned by Reference
//
@ -9,7 +9,7 @@ use crate::abi::call::{ArgType, FnType};
// returned by reference. To pass a structure or union by reference, the caller
// places its address in the appropriate location: either in a register or on
// the stack, according to its position in the argument list. (..)"
fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
@ -17,7 +17,7 @@ fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
}
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
@ -25,15 +25,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
}
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg);
classify_arg(arg);
}
}

View file

@ -1,9 +1,9 @@
// Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use crate::abi::call::{ArgType, FnType};
use crate::abi::call::{ArgAbi, FnAbi};
fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 32 {
ret.make_indirect();
} else {
@ -11,7 +11,7 @@ fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
}
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 32 {
arg.make_indirect();
} else {
@ -19,15 +19,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
}
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg);
classify_arg(arg);
}
}

View file

@ -1,9 +1,9 @@
// Reference: PTX Writer's Guide to Interoperability
// http://docs.nvidia.com/cuda/ptx-writers-guide-to-interoperability
use crate::abi::call::{ArgType, FnType};
use crate::abi::call::{ArgAbi, FnAbi};
fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
if ret.layout.is_aggregate() && ret.layout.size.bits() > 64 {
ret.make_indirect();
} else {
@ -11,7 +11,7 @@ fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
}
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
if arg.layout.is_aggregate() && arg.layout.size.bits() > 64 {
arg.make_indirect();
} else {
@ -19,15 +19,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
}
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg);
classify_arg(arg);
}
}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{ArgType, FnType, Reg, Uniform};
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
@ -12,7 +12,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Si
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
@ -34,16 +34,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Si
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'_, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, &mut offset);
classify_arg(cx, arg, &mut offset);
}
}

View file

@ -2,7 +2,7 @@
// Alignment of 128 bit types is not currently handled, this will
// need to be fixed when PowerPC vector support is added.
use crate::abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use crate::abi::call::{FnAbi, ArgAbi, Reg, RegKind, Uniform};
use crate::abi::{Endian, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use crate::spec::HasTargetSpec;
@ -13,7 +13,7 @@ enum ABI {
}
use ABI::*;
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
@ -43,7 +43,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: A
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -88,7 +88,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>, abi: ABI)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>, abi: ABI)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -120,7 +120,7 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>, abi: ABI)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
@ -133,12 +133,12 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
}
};
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, abi);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, abi);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, abi);
classify_arg(cx, arg, abi);
}
}

View file

@ -1,9 +1,9 @@
// Reference: RISC-V ELF psABI specification
// https://github.com/riscv/riscv-elf-psabi-doc
use crate::abi::call::{ArgType, FnType};
use crate::abi::call::{ArgAbi, FnAbi};
fn classify_ret_ty<Ty>(arg: &mut ArgType<'_, Ty>, xlen: u64) {
fn classify_ret<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
// "Scalars wider than 2✕XLEN are passed by reference and are replaced in
// the argument list with the address."
// "Aggregates larger than 2✕XLEN bits are passed by reference and are
@ -19,7 +19,7 @@ fn classify_ret_ty<Ty>(arg: &mut ArgType<'_, Ty>, xlen: u64) {
arg.extend_integer_width_to(xlen); // this method only affects integer scalars
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>, xlen: u64) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64) {
// "Scalars wider than 2✕XLEN are passed by reference and are replaced in
// the argument list with the address."
// "Aggregates larger than 2✕XLEN bits are passed by reference and are
@ -35,15 +35,15 @@ fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>, xlen: u64) {
arg.extend_integer_width_to(xlen); // this method only affects integer scalars
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>, xlen: u64) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret, xlen);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>, xlen: u64) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret, xlen);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() {
continue;
}
classify_arg_ty(arg, xlen);
classify_arg(arg, xlen);
}
}

View file

@ -1,10 +1,10 @@
// FIXME: The assumes we're using the non-vector ABI, i.e., compiling
// for a pre-z13 machine or using -mno-vx.
use crate::abi::call::{FnType, ArgType, Reg};
use crate::abi::call::{FnAbi, ArgAbi, Reg};
use crate::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(ret: &mut ArgType<'_, Ty>)
fn classify_ret<'a, Ty, C>(ret: &mut ArgAbi<'_, Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() && ret.layout.size.bits() <= 64 {
@ -31,7 +31,7 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -57,16 +57,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{ArgType, FnType, Reg, Uniform};
use crate::abi::call::{ArgAbi, FnAbi, Reg, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, Size, TyLayoutMethods};
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
if !ret.layout.is_aggregate() {
@ -12,7 +12,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'_, Ty>, offset: &mut Si
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Size)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'_, Ty>, offset: &mut Size)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let dl = cx.data_layout();
@ -34,16 +34,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'_, Ty>, offset: &mut Si
*offset = offset.align_to(align) + size.align_to(align);
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'_, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'_, Ty>)
where Ty: TyLayoutMethods<'a, C>, C: LayoutOf<Ty = Ty> + HasDataLayout
{
let mut offset = Size::ZERO;
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret, &mut offset);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret, &mut offset);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg, &mut offset);
classify_arg(cx, arg, &mut offset);
}
}

View file

@ -1,9 +1,9 @@
// FIXME: This needs an audit for correctness and completeness.
use crate::abi::call::{FnType, ArgType, Reg, RegKind, Uniform};
use crate::abi::call::{FnAbi, ArgAbi, Reg, RegKind, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
-> Option<Uniform>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
@ -31,7 +31,7 @@ fn is_homogeneous_aggregate<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
})
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -59,7 +59,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
ret.make_indirect();
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -85,16 +85,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
});
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -1,7 +1,7 @@
use crate::abi::call::{FnType, ArgType, Uniform};
use crate::abi::call::{FnAbi, ArgAbi, Uniform};
use crate::abi::{HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgType<'a, Ty>) -> bool
fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgAbi<'a, Ty>) -> bool
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -21,7 +21,7 @@ fn unwrap_trivial_aggregate<'a, Ty, C>(cx: &C, val: &mut ArgType<'a, Ty>) -> boo
}
fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
fn classify_ret<'a, Ty, C>(cx: &C, ret: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -33,7 +33,7 @@ fn classify_ret_ty<'a, Ty, C>(cx: &C, ret: &mut ArgType<'a, Ty>)
}
}
fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
@ -45,16 +45,16 @@ fn classify_arg_ty<'a, Ty, C>(cx: &C, arg: &mut ArgType<'a, Ty>)
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
if !fty.ret.is_ignore() {
classify_ret_ty(cx, &mut fty.ret);
if !fn_abi.ret.is_ignore() {
classify_ret(cx, &mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(cx, arg);
classify_arg(cx, arg);
}
}

View file

@ -5,23 +5,23 @@
// can be fixed to work with the correct ABI. See #63649 for further
// discussion.
use crate::abi::call::{FnType, ArgType};
use crate::abi::call::{FnAbi, ArgAbi};
fn classify_ret_ty<Ty>(ret: &mut ArgType<'_, Ty>) {
fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) {
ret.extend_integer_width_to(32);
}
fn classify_arg_ty<Ty>(arg: &mut ArgType<'_, Ty>) {
fn classify_arg<Ty>(arg: &mut ArgAbi<'_, Ty>) {
arg.extend_integer_width_to(32);
}
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
if !fty.ret.is_ignore() {
classify_ret_ty(&mut fty.ret);
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
if !fn_abi.ret.is_ignore() {
classify_ret(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
classify_arg_ty(arg);
classify_arg(arg);
}
}

View file

@ -1,4 +1,4 @@
use crate::abi::call::{ArgAttribute, FnType, PassMode, Reg, RegKind};
use crate::abi::call::{ArgAttribute, FnAbi, PassMode, Reg, RegKind};
use crate::abi::{self, HasDataLayout, LayoutOf, TyLayout, TyLayoutMethods};
use crate::spec::HasTargetSpec;
@ -25,12 +25,12 @@ fn is_single_fp_element<'a, Ty, C>(cx: &C, layout: TyLayout<'a, Ty>) -> bool
}
}
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Flavor)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>, flavor: Flavor)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout + HasTargetSpec
{
if !fty.ret.is_ignore() {
if fty.ret.layout.is_aggregate() {
if !fn_abi.ret.is_ignore() {
if fn_abi.ret.layout.is_aggregate() {
// Returning a structure. Most often, this will use
// a hidden first argument. On some platforms, though,
// small structs are returned as integers.
@ -42,30 +42,30 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Fla
if t.options.abi_return_struct_as_int {
// According to Clang, everyone but MSVC returns single-element
// float aggregates directly in a floating-point register.
if !t.options.is_like_msvc && is_single_fp_element(cx, fty.ret.layout) {
match fty.ret.layout.size.bytes() {
4 => fty.ret.cast_to(Reg::f32()),
8 => fty.ret.cast_to(Reg::f64()),
_ => fty.ret.make_indirect()
if !t.options.is_like_msvc && is_single_fp_element(cx, fn_abi.ret.layout) {
match fn_abi.ret.layout.size.bytes() {
4 => fn_abi.ret.cast_to(Reg::f32()),
8 => fn_abi.ret.cast_to(Reg::f64()),
_ => fn_abi.ret.make_indirect()
}
} else {
match fty.ret.layout.size.bytes() {
1 => fty.ret.cast_to(Reg::i8()),
2 => fty.ret.cast_to(Reg::i16()),
4 => fty.ret.cast_to(Reg::i32()),
8 => fty.ret.cast_to(Reg::i64()),
_ => fty.ret.make_indirect()
match fn_abi.ret.layout.size.bytes() {
1 => fn_abi.ret.cast_to(Reg::i8()),
2 => fn_abi.ret.cast_to(Reg::i16()),
4 => fn_abi.ret.cast_to(Reg::i32()),
8 => fn_abi.ret.cast_to(Reg::i64()),
_ => fn_abi.ret.make_indirect()
}
}
} else {
fty.ret.make_indirect();
fn_abi.ret.make_indirect();
}
} else {
fty.ret.extend_integer_width_to(32);
fn_abi.ret.extend_integer_width_to(32);
}
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
if arg.layout.is_aggregate() {
arg.make_indirect_byval();
@ -86,7 +86,7 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>, flavor: Fla
let mut free_regs = 2;
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
let attrs = match arg.mode {
PassMode::Ignore |
PassMode::Indirect(_, None) => continue,

View file

@ -1,7 +1,7 @@
// The classification code for the x86_64 ABI is taken from the clay language
// https://github.com/jckarter/clay/blob/master/compiler/src/externals.cpp
use crate::abi::call::{ArgType, CastTarget, FnType, Reg, RegKind};
use crate::abi::call::{ArgAbi, CastTarget, FnAbi, Reg, RegKind};
use crate::abi::{self, Abi, HasDataLayout, LayoutOf, Size, TyLayout, TyLayoutMethods};
/// Classification of "eightbyte" components.
@ -21,7 +21,7 @@ struct Memory;
const LARGEST_VECTOR_SIZE: usize = 512;
const MAX_EIGHTBYTES: usize = LARGEST_VECTOR_SIZE / 64;
fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgType<'a, Ty>)
fn classify_arg<'a, Ty, C>(cx: &C, arg: &ArgAbi<'a, Ty>)
-> Result<[Option<Class>; MAX_EIGHTBYTES], Memory>
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
@ -170,14 +170,14 @@ fn cast_target(cls: &[Option<Class>], size: Size) -> CastTarget {
const MAX_INT_REGS: usize = 6; // RDI, RSI, RDX, RCX, R8, R9
const MAX_SSE_REGS: usize = 8; // XMM0-7
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
pub fn compute_abi_info<'a, Ty, C>(cx: &C, fn_abi: &mut FnAbi<'a, Ty>)
where Ty: TyLayoutMethods<'a, C> + Copy,
C: LayoutOf<Ty = Ty, TyLayout = TyLayout<'a, Ty>> + HasDataLayout
{
let mut int_regs = MAX_INT_REGS;
let mut sse_regs = MAX_SSE_REGS;
let mut x86_64_ty = |arg: &mut ArgType<'a, Ty>, is_arg: bool| {
let mut x86_64_arg_or_ret = |arg: &mut ArgAbi<'a, Ty>, is_arg: bool| {
let mut cls_or_mem = classify_arg(cx, arg);
if is_arg {
@ -234,12 +234,12 @@ pub fn compute_abi_info<'a, Ty, C>(cx: &C, fty: &mut FnType<'a, Ty>)
}
};
if !fty.ret.is_ignore() {
x86_64_ty(&mut fty.ret, false);
if !fn_abi.ret.is_ignore() {
x86_64_arg_or_ret(&mut fn_abi.ret, false);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
x86_64_ty(arg, true);
x86_64_arg_or_ret(arg, true);
}
}

View file

@ -1,10 +1,10 @@
use crate::abi::call::{ArgType, FnType, Reg};
use crate::abi::call::{ArgAbi, FnAbi, Reg};
use crate::abi::Abi;
// Win64 ABI: http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
let fixup = |a: &mut ArgType<'_, Ty>| {
pub fn compute_abi_info<Ty>(fn_abi: &mut FnAbi<'_, Ty>) {
let fixup = |a: &mut ArgAbi<'_, Ty>| {
match a.layout.abi {
Abi::Uninhabited => {}
Abi::ScalarPair(..) |
@ -31,10 +31,10 @@ pub fn compute_abi_info<Ty>(fty: &mut FnType<'_, Ty>) {
}
};
if !fty.ret.is_ignore() {
fixup(&mut fty.ret);
if !fn_abi.ret.is_ignore() {
fixup(&mut fn_abi.ret);
}
for arg in &mut fty.args {
for arg in &mut fn_abi.args {
if arg.is_ignore() { continue; }
fixup(arg);
}