Prefixed type methods & removed trait impl for write::CodegenContext
This commit is contained in:
parent
6d42574b7a
commit
e224f063e8
20 changed files with 263 additions and 270 deletions
|
@ -111,16 +111,16 @@ pub trait LlvmType {
|
||||||
impl LlvmType for Reg {
|
impl LlvmType for Reg {
|
||||||
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
fn llvm_type(&self, cx: &CodegenCx<'ll, '_>) -> &'ll Type {
|
||||||
match self.kind {
|
match self.kind {
|
||||||
RegKind::Integer => cx.ix(self.size.bits()),
|
RegKind::Integer => cx.type_ix(self.size.bits()),
|
||||||
RegKind::Float => {
|
RegKind::Float => {
|
||||||
match self.size.bits() {
|
match self.size.bits() {
|
||||||
32 => cx.f32(),
|
32 => cx.type_f32(),
|
||||||
64 => cx.f64(),
|
64 => cx.type_f64(),
|
||||||
_ => bug!("unsupported float: {:?}", self)
|
_ => bug!("unsupported float: {:?}", self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
RegKind::Vector => {
|
RegKind::Vector => {
|
||||||
cx.vector(cx.i8(), self.size.bytes())
|
cx.type_vector(cx.type_i8(), self.size.bytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -144,7 +144,7 @@ impl LlvmType for CastTarget {
|
||||||
|
|
||||||
// Simplify to array when all chunks are the same size and type
|
// Simplify to array when all chunks are the same size and type
|
||||||
if rem_bytes == 0 {
|
if rem_bytes == 0 {
|
||||||
return cx.array(rest_ll_unit, rest_count);
|
return cx.type_array(rest_ll_unit, rest_count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,10 +159,10 @@ impl LlvmType for CastTarget {
|
||||||
if rem_bytes != 0 {
|
if rem_bytes != 0 {
|
||||||
// Only integers can be really split further.
|
// Only integers can be really split further.
|
||||||
assert_eq!(self.rest.unit.kind, RegKind::Integer);
|
assert_eq!(self.rest.unit.kind, RegKind::Integer);
|
||||||
args.push(cx.ix(rem_bytes * 8));
|
args.push(cx.type_ix(rem_bytes * 8));
|
||||||
}
|
}
|
||||||
|
|
||||||
cx.struct_(&args, false)
|
cx.type_struct(&args, false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
||||||
let can_store_through_cast_ptr = false;
|
let can_store_through_cast_ptr = false;
|
||||||
if can_store_through_cast_ptr {
|
if can_store_through_cast_ptr {
|
||||||
let cast_dst = bx.pointercast(dst.llval, cx.ptr_to(cast.llvm_type(cx)));
|
let cast_dst = bx.pointercast(dst.llval, cx.type_ptr_to(cast.llvm_type(cx)));
|
||||||
bx.store(val, cast_dst, self.layout.align);
|
bx.store(val, cast_dst, self.layout.align);
|
||||||
} else {
|
} else {
|
||||||
// The actual return type is a struct, but the ABI
|
// The actual return type is a struct, but the ABI
|
||||||
|
@ -240,9 +240,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
|
|
||||||
// ...and then memcpy it to the intended destination.
|
// ...and then memcpy it to the intended destination.
|
||||||
base::call_memcpy(bx,
|
base::call_memcpy(bx,
|
||||||
bx.pointercast(dst.llval, cx.i8p()),
|
bx.pointercast(dst.llval, cx.type_i8p()),
|
||||||
self.layout.align,
|
self.layout.align,
|
||||||
bx.pointercast(llscratch, cx.i8p()),
|
bx.pointercast(llscratch, cx.type_i8p()),
|
||||||
scratch_align,
|
scratch_align,
|
||||||
cx.const_usize(self.layout.size.bytes()),
|
cx.const_usize(self.layout.size.bytes()),
|
||||||
MemFlags::empty());
|
MemFlags::empty());
|
||||||
|
@ -635,14 +635,14 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||||
);
|
);
|
||||||
|
|
||||||
let llreturn_ty = match self.ret.mode {
|
let llreturn_ty = match self.ret.mode {
|
||||||
PassMode::Ignore => cx.void(),
|
PassMode::Ignore => cx.type_void(),
|
||||||
PassMode::Direct(_) | PassMode::Pair(..) => {
|
PassMode::Direct(_) | PassMode::Pair(..) => {
|
||||||
self.ret.layout.immediate_llvm_type(cx)
|
self.ret.layout.immediate_llvm_type(cx)
|
||||||
}
|
}
|
||||||
PassMode::Cast(cast) => cast.llvm_type(cx),
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
||||||
PassMode::Indirect(..) => {
|
PassMode::Indirect(..) => {
|
||||||
llargument_tys.push(cx.ptr_to(self.ret.memory_ty(cx)));
|
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx)));
|
||||||
cx.void()
|
cx.type_void()
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -668,15 +668,15 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
PassMode::Cast(cast) => cast.llvm_type(cx),
|
PassMode::Cast(cast) => cast.llvm_type(cx),
|
||||||
PassMode::Indirect(_, None) => cx.ptr_to(arg.memory_ty(cx)),
|
PassMode::Indirect(_, None) => cx.type_ptr_to(arg.memory_ty(cx)),
|
||||||
};
|
};
|
||||||
llargument_tys.push(llarg_ty);
|
llargument_tys.push(llarg_ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.variadic {
|
if self.variadic {
|
||||||
cx.variadic_func(&llargument_tys, llreturn_ty)
|
cx.type_variadic_func(&llargument_tys, llreturn_ty)
|
||||||
} else {
|
} else {
|
||||||
cx.func(&llargument_tys, llreturn_ty)
|
cx.type_func(&llargument_tys, llreturn_ty)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -75,9 +75,9 @@ pub fn codegen_inline_asm(
|
||||||
// Depending on how many outputs we have, the return type is different
|
// Depending on how many outputs we have, the return type is different
|
||||||
let num_outputs = output_types.len();
|
let num_outputs = output_types.len();
|
||||||
let output_type = match num_outputs {
|
let output_type = match num_outputs {
|
||||||
0 => bx.cx().void(),
|
0 => bx.cx().type_void(),
|
||||||
1 => output_types[0],
|
1 => output_types[0],
|
||||||
_ => bx.cx().struct_(&output_types, false)
|
_ => bx.cx().type_struct(&output_types, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
||||||
|
|
|
@ -24,7 +24,7 @@ use rustc::session::config::{self, OutputFilenames, OutputType, Passes, Sanitize
|
||||||
use rustc::session::Session;
|
use rustc::session::Session;
|
||||||
use rustc::util::nodemap::FxHashMap;
|
use rustc::util::nodemap::FxHashMap;
|
||||||
use time_graph::{self, TimeGraph, Timeline};
|
use time_graph::{self, TimeGraph, Timeline};
|
||||||
use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic, BasicBlock};
|
use llvm::{self, DiagnosticInfo, PassManager, SMDiagnostic};
|
||||||
use llvm_util;
|
use llvm_util;
|
||||||
use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm,
|
use {CodegenResults, ModuleCodegen, CompiledModule, ModuleKind, // ModuleLlvm,
|
||||||
CachedModuleCodegen};
|
CachedModuleCodegen};
|
||||||
|
@ -46,7 +46,6 @@ use syntax_pos::MultiSpan;
|
||||||
use syntax_pos::symbol::Symbol;
|
use syntax_pos::symbol::Symbol;
|
||||||
use type_::Type;
|
use type_::Type;
|
||||||
use context::{is_pie_binary, get_reloc_model};
|
use context::{is_pie_binary, get_reloc_model};
|
||||||
use interfaces::{Backend, CommonWriteMethods};
|
|
||||||
use common;
|
use common;
|
||||||
use jobserver::{Client, Acquired};
|
use jobserver::{Client, Acquired};
|
||||||
use rustc_demangle;
|
use rustc_demangle;
|
||||||
|
@ -429,15 +428,8 @@ impl CodegenContext<'ll> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'ll> Backend for CodegenContext<'ll> {
|
|
||||||
type Value = &'ll Value;
|
|
||||||
type BasicBlock = &'ll BasicBlock;
|
|
||||||
type Type = &'ll Type;
|
|
||||||
type Context = &'ll llvm::Context;
|
|
||||||
type TypeKind = llvm::TypeKind;
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CommonWriteMethods for CodegenContext<'ll> {
|
impl CodegenContext<'ll> {
|
||||||
fn val_ty(&self, v: &'ll Value) -> &'ll Type {
|
fn val_ty(&self, v: &'ll Value) -> &'ll Type {
|
||||||
common::val_ty(v)
|
common::val_ty(v)
|
||||||
}
|
}
|
||||||
|
@ -446,18 +438,7 @@ impl CommonWriteMethods for CodegenContext<'ll> {
|
||||||
common::const_bytes_in_context(llcx, bytes)
|
common::const_bytes_in_context(llcx, bytes)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_struct_in_context(
|
pub fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
|
||||||
&self,
|
|
||||||
llcx: &'a llvm::Context,
|
|
||||||
elts: &[&'a Value],
|
|
||||||
packed: bool,
|
|
||||||
) -> &'a Value {
|
|
||||||
common::const_struct_in_context(llcx, elts, packed)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl CodegenContext<'ll> {
|
|
||||||
pub fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
|
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMPointerType(ty, 0)
|
llvm::LLVMPointerType(ty, 0)
|
||||||
}
|
}
|
||||||
|
|
|
@ -234,13 +234,13 @@ pub fn unsize_thin_ptr(
|
||||||
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
|
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
|
||||||
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
||||||
assert!(bx.cx().type_is_sized(a));
|
assert!(bx.cx().type_is_sized(a));
|
||||||
let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
|
let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
|
||||||
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
||||||
}
|
}
|
||||||
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
|
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
|
||||||
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
|
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
|
||||||
assert!(bx.cx().type_is_sized(a));
|
assert!(bx.cx().type_is_sized(a));
|
||||||
let ptr_ty = bx.cx().ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
|
let ptr_ty = bx.cx().type_ptr_to(bx.cx().layout_of(b).llvm_type(bx.cx()));
|
||||||
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
||||||
}
|
}
|
||||||
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
|
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
|
||||||
|
@ -353,10 +353,10 @@ fn cast_shift_rhs<'ll, F, G>(bx: &Builder<'_, 'll, '_>,
|
||||||
if op.is_shift() {
|
if op.is_shift() {
|
||||||
let mut rhs_llty = bx.cx().val_ty(rhs);
|
let mut rhs_llty = bx.cx().val_ty(rhs);
|
||||||
let mut lhs_llty = bx.cx().val_ty(lhs);
|
let mut lhs_llty = bx.cx().val_ty(lhs);
|
||||||
if bx.cx().kind(rhs_llty) == TypeKind::Vector {
|
if bx.cx().type_kind(rhs_llty) == TypeKind::Vector {
|
||||||
rhs_llty = bx.cx().element_type(rhs_llty)
|
rhs_llty = bx.cx().element_type(rhs_llty)
|
||||||
}
|
}
|
||||||
if bx.cx().kind(lhs_llty) == TypeKind::Vector {
|
if bx.cx().type_kind(lhs_llty) == TypeKind::Vector {
|
||||||
lhs_llty = bx.cx().element_type(lhs_llty)
|
lhs_llty = bx.cx().element_type(lhs_llty)
|
||||||
}
|
}
|
||||||
let rhs_sz = bx.cx().int_width(rhs_llty);
|
let rhs_sz = bx.cx().int_width(rhs_llty);
|
||||||
|
@ -393,8 +393,8 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>(
|
||||||
bx: &Builder<'_ ,'ll, '_, &'ll Value>,
|
bx: &Builder<'_ ,'ll, '_, &'ll Value>,
|
||||||
val: &'ll Value
|
val: &'ll Value
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
if bx.cx().val_ty(val) == bx.cx().i1() {
|
if bx.cx().val_ty(val) == bx.cx().type_i1() {
|
||||||
bx.zext(val, bx.cx().i8())
|
bx.zext(val, bx.cx().type_i8())
|
||||||
} else {
|
} else {
|
||||||
val
|
val
|
||||||
}
|
}
|
||||||
|
@ -417,7 +417,7 @@ pub fn to_immediate_scalar(
|
||||||
scalar: &layout::Scalar,
|
scalar: &layout::Scalar,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
return bx.trunc(val, bx.cx().i1());
|
return bx.trunc(val, bx.cx().type_i1());
|
||||||
}
|
}
|
||||||
val
|
val
|
||||||
}
|
}
|
||||||
|
@ -434,13 +434,13 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>(
|
||||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||||
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
||||||
let val = bx.load(src, src_align);
|
let val = bx.load(src, src_align);
|
||||||
let ptr = bx.pointercast(dst, bx.cx().ptr_to(bx.cx().val_ty(val)));
|
let ptr = bx.pointercast(dst, bx.cx().type_ptr_to(bx.cx().val_ty(val)));
|
||||||
bx.store_with_flags(val, ptr, dst_align, flags);
|
bx.store_with_flags(val, ptr, dst_align, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let cx = bx.cx();
|
let cx = bx.cx();
|
||||||
let src_ptr = bx.pointercast(src, cx.i8p());
|
let src_ptr = bx.pointercast(src, cx.type_i8p());
|
||||||
let dst_ptr = bx.pointercast(dst, cx.i8p());
|
let dst_ptr = bx.pointercast(dst, cx.type_i8p());
|
||||||
let size = bx.intcast(n_bytes, cx.isize_ty, false);
|
let size = bx.intcast(n_bytes, cx.isize_ty, false);
|
||||||
let volatile = flags.contains(MemFlags::VOLATILE);
|
let volatile = flags.contains(MemFlags::VOLATILE);
|
||||||
bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile);
|
bx.memcpy(dst_ptr, dst_align.abi(), src_ptr, src_align.abi(), size, volatile);
|
||||||
|
@ -551,7 +551,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
|
||||||
use_start_lang_item: bool,
|
use_start_lang_item: bool,
|
||||||
) {
|
) {
|
||||||
let llfty =
|
let llfty =
|
||||||
cx.func(&[cx.t_int(), cx.ptr_to(cx.i8p())], cx.t_int());
|
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int());
|
||||||
|
|
||||||
let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
|
let main_ret_ty = cx.tcx.fn_sig(rust_main_def_id).output();
|
||||||
// Given that `main()` has no arguments,
|
// Given that `main()` has no arguments,
|
||||||
|
@ -594,7 +594,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
|
||||||
start_def_id,
|
start_def_id,
|
||||||
cx.tcx.intern_substs(&[main_ret_ty.into()]),
|
cx.tcx.intern_substs(&[main_ret_ty.into()]),
|
||||||
);
|
);
|
||||||
(start_fn, vec![bx.pointercast(rust_main, cx.ptr_to(cx.i8p())),
|
(start_fn, vec![bx.pointercast(rust_main, cx.type_ptr_to(cx.type_i8p())),
|
||||||
arg_argc, arg_argv])
|
arg_argc, arg_argv])
|
||||||
} else {
|
} else {
|
||||||
debug!("using user-defined start fn");
|
debug!("using user-defined start fn");
|
||||||
|
@ -602,7 +602,7 @@ fn maybe_create_entry_wrapper(cx: &CodegenCx) {
|
||||||
};
|
};
|
||||||
|
|
||||||
let result = bx.call(start_fn, &args, None);
|
let result = bx.call(start_fn, &args, None);
|
||||||
bx.ret(bx.intcast(result, cx.t_int(), true));
|
bx.ret(bx.intcast(result, cx.type_int(), true));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1151,7 +1151,10 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
|
||||||
if !cx.used_statics.borrow().is_empty() {
|
if !cx.used_statics.borrow().is_empty() {
|
||||||
let name = const_cstr!("llvm.used");
|
let name = const_cstr!("llvm.used");
|
||||||
let section = const_cstr!("llvm.metadata");
|
let section = const_cstr!("llvm.metadata");
|
||||||
let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
|
let array = cx.const_array(
|
||||||
|
&cx.type_ptr_to(cx.type_i8()),
|
||||||
|
&*cx.used_statics.borrow()
|
||||||
|
);
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let g = llvm::LLVMAddGlobal(cx.llmod,
|
let g = llvm::LLVMAddGlobal(cx.llmod,
|
||||||
|
|
|
@ -765,7 +765,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}).collect::<Vec<_>>();
|
}).collect::<Vec<_>>();
|
||||||
|
|
||||||
debug!("Asm Output Type: {:?}", output);
|
debug!("Asm Output Type: {:?}", output);
|
||||||
let fty = &self.cx().func(&argtys[..], output);
|
let fty = &self.cx().type_func(&argtys[..], output);
|
||||||
unsafe {
|
unsafe {
|
||||||
// Ask LLVM to verify that the constraints are well-formed.
|
// Ask LLVM to verify that the constraints are well-formed.
|
||||||
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
|
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons);
|
||||||
|
@ -861,9 +861,9 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
||||||
unsafe {
|
unsafe {
|
||||||
let elt_ty = self.cx.val_ty(elt);
|
let elt_ty = self.cx.val_ty(elt);
|
||||||
let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
|
let undef = llvm::LLVMGetUndef(&self.cx().type_vector(elt_ty, num_elts as u64));
|
||||||
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
||||||
let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
|
let vec_i32_ty = &self.cx().type_vector(&self.cx().type_i32(), num_elts as u64);
|
||||||
self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
|
self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1142,9 +1142,9 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
ptr: &'ll Value) -> &'ll Value {
|
ptr: &'ll Value) -> &'ll Value {
|
||||||
let dest_ptr_ty = self.cx.val_ty(ptr);
|
let dest_ptr_ty = self.cx.val_ty(ptr);
|
||||||
let stored_ty = self.cx.val_ty(val);
|
let stored_ty = self.cx.val_ty(val);
|
||||||
let stored_ptr_ty = self.cx.ptr_to(stored_ty);
|
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
|
||||||
|
|
||||||
assert_eq!(self.cx.kind(dest_ptr_ty), llvm::TypeKind::Pointer);
|
assert_eq!(self.cx.type_kind(dest_ptr_ty), llvm::TypeKind::Pointer);
|
||||||
|
|
||||||
if dest_ptr_ty == stored_ptr_ty {
|
if dest_ptr_ty == stored_ptr_ty {
|
||||||
ptr
|
ptr
|
||||||
|
@ -1163,14 +1163,14 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
|
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
|
||||||
let mut fn_ty = self.cx.val_ty(llfn);
|
let mut fn_ty = self.cx.val_ty(llfn);
|
||||||
// Strip off pointers
|
// Strip off pointers
|
||||||
while self.cx.kind(fn_ty) == llvm::TypeKind::Pointer {
|
while self.cx.type_kind(fn_ty) == llvm::TypeKind::Pointer {
|
||||||
fn_ty = self.cx.element_type(fn_ty);
|
fn_ty = self.cx.element_type(fn_ty);
|
||||||
}
|
}
|
||||||
|
|
||||||
assert!(self.cx.kind(fn_ty) == llvm::TypeKind::Function,
|
assert!(self.cx.type_kind(fn_ty) == llvm::TypeKind::Function,
|
||||||
"builder::{} not passed a function, but {:?}", typ, fn_ty);
|
"builder::{} not passed a function, but {:?}", typ, fn_ty);
|
||||||
|
|
||||||
let param_tys = self.cx.func_params(fn_ty);
|
let param_tys = self.cx.func_params_types(fn_ty);
|
||||||
|
|
||||||
let all_args_match = param_tys.iter()
|
let all_args_match = param_tys.iter()
|
||||||
.zip(args.iter().map(|&v| self.cx().val_ty(v)))
|
.zip(args.iter().map(|&v| self.cx().val_ty(v)))
|
||||||
|
@ -1227,7 +1227,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
|
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
|
||||||
|
|
||||||
let ptr = self.pointercast(ptr, self.cx.i8p());
|
let ptr = self.pointercast(ptr, self.cx.type_i8p());
|
||||||
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
|
self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -236,19 +236,19 @@ impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_bool(&self, val: bool) -> &'ll Value {
|
fn const_bool(&self, val: bool) -> &'ll Value {
|
||||||
&self.const_uint(&self.i1(), val as u64)
|
&self.const_uint(&self.type_i1(), val as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_i32(&self, i: i32) -> &'ll Value {
|
fn const_i32(&self, i: i32) -> &'ll Value {
|
||||||
&self.const_int(&self.i32(), i as i64)
|
&self.const_int(&self.type_i32(), i as i64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_u32(&self, i: u32) -> &'ll Value {
|
fn const_u32(&self, i: u32) -> &'ll Value {
|
||||||
&self.const_uint(&self.i32(), i as u64)
|
&self.const_uint(&self.type_i32(), i as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_u64(&self, i: u64) -> &'ll Value {
|
fn const_u64(&self, i: u64) -> &'ll Value {
|
||||||
&self.const_uint(&self.i64(), i)
|
&self.const_uint(&self.type_i64(), i)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_usize(&self, i: u64) -> &'ll Value {
|
fn const_usize(&self, i: u64) -> &'ll Value {
|
||||||
|
@ -262,7 +262,7 @@ impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn const_u8(&self, i: u8) -> &'ll Value {
|
fn const_u8(&self, i: u8) -> &'ll Value {
|
||||||
&self.const_uint(&self.i8(), i as u64)
|
&self.const_uint(&self.type_i8(), i as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
|
||||||
fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
|
fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
|
||||||
let len = s.len();
|
let len = s.len();
|
||||||
let cs = consts::ptrcast(&self.const_cstr(s, false),
|
let cs = consts::ptrcast(&self.const_cstr(s, false),
|
||||||
&self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
|
&self.type_ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
|
||||||
&self.const_fat_ptr(cs, &self.const_usize(len as u64))
|
&self.const_fat_ptr(cs, &self.const_usize(len as u64))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -505,7 +505,7 @@ pub fn shift_mask_val(
|
||||||
mask_llty: &'ll Type,
|
mask_llty: &'ll Type,
|
||||||
invert: bool
|
invert: bool
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
let kind = bx.cx().kind(llty);
|
let kind = bx.cx().type_kind(llty);
|
||||||
match kind {
|
match kind {
|
||||||
TypeKind::Integer => {
|
TypeKind::Integer => {
|
||||||
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
|
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
|
||||||
|
|
|
@ -313,8 +313,8 @@ pub fn codegen_static<'a, 'tcx>(
|
||||||
// boolean SSA values are i1, but they have to be stored in i8 slots,
|
// boolean SSA values are i1, but they have to be stored in i8 slots,
|
||||||
// otherwise some LLVM optimization passes don't work as expected
|
// otherwise some LLVM optimization passes don't work as expected
|
||||||
let mut val_llty = cx.val_ty(v);
|
let mut val_llty = cx.val_ty(v);
|
||||||
let v = if val_llty == cx.i1() {
|
let v = if val_llty == cx.type_i1() {
|
||||||
val_llty = cx.i8();
|
val_llty = cx.type_i8();
|
||||||
llvm::LLVMConstZExt(v, val_llty)
|
llvm::LLVMConstZExt(v, val_llty)
|
||||||
} else {
|
} else {
|
||||||
v
|
v
|
||||||
|
@ -432,7 +432,7 @@ pub fn codegen_static<'a, 'tcx>(
|
||||||
|
|
||||||
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
|
if attrs.flags.contains(CodegenFnAttrFlags::USED) {
|
||||||
// This static will be stored in the llvm.used variable which is an array of i8*
|
// This static will be stored in the llvm.used variable which is an array of i8*
|
||||||
let cast = llvm::LLVMConstPointerCast(g, cx.i8p());
|
let cast = llvm::LLVMConstPointerCast(g, cx.type_i8p());
|
||||||
cx.used_statics.borrow_mut().push(cast);
|
cx.used_statics.borrow_mut().push(cast);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -380,7 +380,7 @@ impl<'b, 'tcx> CodegenCx<'b, 'tcx> {
|
||||||
} else {
|
} else {
|
||||||
"rust_eh_personality"
|
"rust_eh_personality"
|
||||||
};
|
};
|
||||||
let fty = &self.variadic_func(&[], &self.i32());
|
let fty = &self.type_variadic_func(&[], &self.type_i32());
|
||||||
declare::declare_cfn(self, name, fty)
|
declare::declare_cfn(self, name, fty)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -488,7 +488,7 @@ fn declare_intrinsic(
|
||||||
macro_rules! ifn {
|
macro_rules! ifn {
|
||||||
($name:expr, fn() -> $ret:expr) => (
|
($name:expr, fn() -> $ret:expr) => (
|
||||||
if key == $name {
|
if key == $name {
|
||||||
let f = declare::declare_cfn(cx, $name, cx.func(&[], $ret));
|
let f = declare::declare_cfn(cx, $name, cx.type_func(&[], $ret));
|
||||||
llvm::SetUnnamedAddr(f, false);
|
llvm::SetUnnamedAddr(f, false);
|
||||||
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
||||||
return Some(f);
|
return Some(f);
|
||||||
|
@ -496,7 +496,7 @@ fn declare_intrinsic(
|
||||||
);
|
);
|
||||||
($name:expr, fn(...) -> $ret:expr) => (
|
($name:expr, fn(...) -> $ret:expr) => (
|
||||||
if key == $name {
|
if key == $name {
|
||||||
let f = declare::declare_cfn(cx, $name, cx.variadic_func(&[], $ret));
|
let f = declare::declare_cfn(cx, $name, cx.type_variadic_func(&[], $ret));
|
||||||
llvm::SetUnnamedAddr(f, false);
|
llvm::SetUnnamedAddr(f, false);
|
||||||
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
||||||
return Some(f);
|
return Some(f);
|
||||||
|
@ -504,7 +504,7 @@ fn declare_intrinsic(
|
||||||
);
|
);
|
||||||
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
|
($name:expr, fn($($arg:expr),*) -> $ret:expr) => (
|
||||||
if key == $name {
|
if key == $name {
|
||||||
let f = declare::declare_cfn(cx, $name, cx.func(&[$($arg),*], $ret));
|
let f = declare::declare_cfn(cx, $name, cx.type_func(&[$($arg),*], $ret));
|
||||||
llvm::SetUnnamedAddr(f, false);
|
llvm::SetUnnamedAddr(f, false);
|
||||||
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
cx.intrinsics.borrow_mut().insert($name, f.clone());
|
||||||
return Some(f);
|
return Some(f);
|
||||||
|
@ -512,28 +512,28 @@ fn declare_intrinsic(
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
macro_rules! mk_struct {
|
macro_rules! mk_struct {
|
||||||
($($field_ty:expr),*) => (cx.struct_( &[$($field_ty),*], false))
|
($($field_ty:expr),*) => (cx.type_struct( &[$($field_ty),*], false))
|
||||||
}
|
}
|
||||||
|
|
||||||
let i8p = cx.i8p();
|
let i8p = cx.type_i8p();
|
||||||
let void = cx.void();
|
let void = cx.type_void();
|
||||||
let i1 = cx.i1();
|
let i1 = cx.type_i1();
|
||||||
let t_i8 = cx.i8();
|
let t_i8 = cx.type_i8();
|
||||||
let t_i16 = cx.i16();
|
let t_i16 = cx.type_i16();
|
||||||
let t_i32 = cx.i32();
|
let t_i32 = cx.type_i32();
|
||||||
let t_i64 = cx.i64();
|
let t_i64 = cx.type_i64();
|
||||||
let t_i128 = cx.i128();
|
let t_i128 = cx.type_i128();
|
||||||
let t_f32 = cx.f32();
|
let t_f32 = cx.type_f32();
|
||||||
let t_f64 = cx.f64();
|
let t_f64 = cx.type_f64();
|
||||||
|
|
||||||
let t_v2f32 = cx.vector(t_f32, 2);
|
let t_v2f32 = cx.type_vector(t_f32, 2);
|
||||||
let t_v4f32 = cx.vector(t_f32, 4);
|
let t_v4f32 = cx.type_vector(t_f32, 4);
|
||||||
let t_v8f32 = cx.vector(t_f32, 8);
|
let t_v8f32 = cx.type_vector(t_f32, 8);
|
||||||
let t_v16f32 = cx.vector(t_f32, 16);
|
let t_v16f32 = cx.type_vector(t_f32, 16);
|
||||||
|
|
||||||
let t_v2f64 = cx.vector(t_f64, 2);
|
let t_v2f64 = cx.type_vector(t_f64, 2);
|
||||||
let t_v4f64 = cx.vector(t_f64, 4);
|
let t_v4f64 = cx.type_vector(t_f64, 4);
|
||||||
let t_v8f64 = cx.vector(t_f64, 8);
|
let t_v8f64 = cx.type_vector(t_f64, 8);
|
||||||
|
|
||||||
ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
|
ifn!("llvm.memset.p0i8.i16", fn(i8p, t_i8, t_i16, t_i32, i1) -> void);
|
||||||
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
|
ifn!("llvm.memset.p0i8.i32", fn(i8p, t_i8, t_i32, t_i32, i1) -> void);
|
||||||
|
@ -786,8 +786,8 @@ fn declare_intrinsic(
|
||||||
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
|
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void);
|
||||||
|
|
||||||
if cx.sess().opts.debuginfo != DebugInfo::None {
|
if cx.sess().opts.debuginfo != DebugInfo::None {
|
||||||
ifn!("llvm.dbg.declare", fn(cx.metadata(), cx.metadata()) -> void);
|
ifn!("llvm.dbg.declare", fn(cx.type_metadata(), cx.type_metadata()) -> void);
|
||||||
ifn!("llvm.dbg.value", fn(cx.metadata(), t_i64, cx.metadata()) -> void);
|
ifn!("llvm.dbg.value", fn(cx.type_metadata(), t_i64, cx.type_metadata()) -> void);
|
||||||
}
|
}
|
||||||
|
|
||||||
None
|
None
|
||||||
|
|
|
@ -55,7 +55,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
|
||||||
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
|
let section_contents = b"\x01gdb_load_rust_pretty_printers.py\0";
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
let llvm_type = cx.array(cx.i8(),
|
let llvm_type = cx.type_array(cx.type_i8(),
|
||||||
section_contents.len() as u64);
|
section_contents.len() as u64);
|
||||||
|
|
||||||
let section_var = declare::define_global(cx, section_var_name,
|
let section_var = declare::define_global(cx, section_var_name,
|
||||||
|
|
|
@ -11,31 +11,31 @@
|
||||||
use super::backend::Backend;
|
use super::backend::Backend;
|
||||||
|
|
||||||
pub trait TypeMethods : Backend {
|
pub trait TypeMethods : Backend {
|
||||||
fn void(&self) -> Self::Type;
|
fn type_void(&self) -> Self::Type;
|
||||||
fn metadata(&self) -> Self::Type;
|
fn type_metadata(&self) -> Self::Type;
|
||||||
fn i1(&self) -> Self::Type;
|
fn type_i1(&self) -> Self::Type;
|
||||||
fn i8(&self) -> Self::Type;
|
fn type_i8(&self) -> Self::Type;
|
||||||
fn i16(&self) -> Self::Type;
|
fn type_i16(&self) -> Self::Type;
|
||||||
fn i32(&self) -> Self::Type;
|
fn type_i32(&self) -> Self::Type;
|
||||||
fn i64(&self) -> Self::Type;
|
fn type_i64(&self) -> Self::Type;
|
||||||
fn i128(&self) -> Self::Type;
|
fn type_i128(&self) -> Self::Type;
|
||||||
fn ix(&self, num_bites: u64) -> Self::Type;
|
fn type_ix(&self, num_bites: u64) -> Self::Type;
|
||||||
fn f32(&self) -> Self::Type;
|
fn type_f32(&self) -> Self::Type;
|
||||||
fn f64(&self) -> Self::Type;
|
fn type_f64(&self) -> Self::Type;
|
||||||
fn x86_mmx(&self) -> Self::Type;
|
fn type_x86_mmx(&self) -> Self::Type;
|
||||||
|
|
||||||
fn func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
|
fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
|
||||||
fn variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
|
fn type_variadic_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
|
||||||
fn struct_(&self, els: &[Self::Type], packed: bool) -> Self::Type;
|
fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
|
||||||
fn named_struct(&self, name: &str) -> Self::Type;
|
fn type_named_struct(&self, name: &str) -> Self::Type;
|
||||||
fn array(&self, ty: Self::Type, len: u64) -> Self::Type;
|
fn type_array(&self, ty: Self::Type, len: u64) -> Self::Type;
|
||||||
fn vector(&self, ty: Self::Type, len: u64) -> Self::Type;
|
fn type_vector(&self, ty: Self::Type, len: u64) -> Self::Type;
|
||||||
fn kind(&self, ty: Self::Type) -> Self::TypeKind;
|
fn type_kind(&self, ty: Self::Type) -> Self::TypeKind;
|
||||||
fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
|
fn set_struct_body(&self, ty: Self::Type, els: &[Self::Type], packed: bool);
|
||||||
fn ptr_to(&self, ty: Self::Type) -> Self::Type;
|
fn type_ptr_to(&self, ty: Self::Type) -> Self::Type;
|
||||||
fn element_type(&self, ty: Self::Type) -> Self::Type;
|
fn element_type(&self, ty: Self::Type) -> Self::Type;
|
||||||
fn vector_length(&self, ty: Self::Type) -> usize;
|
fn vector_length(&self, ty: Self::Type) -> usize;
|
||||||
fn func_params(&self, ty: Self::Type) -> Vec<Self::Type>;
|
fn func_params_types(&self, ty: Self::Type) -> Vec<Self::Type>;
|
||||||
fn float_width(&self, ty: Self::Type) -> usize;
|
fn float_width(&self, ty: Self::Type) -> usize;
|
||||||
fn int_width(&self, ty: Self::Type) -> u64;
|
fn int_width(&self, ty: Self::Type) -> u64;
|
||||||
}
|
}
|
||||||
|
|
|
@ -252,7 +252,7 @@ pub fn codegen_intrinsic_call(
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
let mut ptr = args[0].immediate();
|
let mut ptr = args[0].immediate();
|
||||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||||
ptr = bx.pointercast(ptr, bx.cx().ptr_to(ty.llvm_type(cx)));
|
ptr = bx.pointercast(ptr, bx.cx().type_ptr_to(ty.llvm_type(cx)));
|
||||||
}
|
}
|
||||||
let load = bx.volatile_load(ptr);
|
let load = bx.volatile_load(ptr);
|
||||||
let align = if name == "unaligned_volatile_load" {
|
let align = if name == "unaligned_volatile_load" {
|
||||||
|
@ -338,7 +338,7 @@ pub fn codegen_intrinsic_call(
|
||||||
args[1].immediate()
|
args[1].immediate()
|
||||||
], None);
|
], None);
|
||||||
let val = bx.extract_value(pair, 0);
|
let val = bx.extract_value(pair, 0);
|
||||||
let overflow = bx.zext(bx.extract_value(pair, 1), cx.bool());
|
let overflow = bx.zext(bx.extract_value(pair, 1), cx.type_bool());
|
||||||
|
|
||||||
let dest = result.project_field(bx, 0);
|
let dest = result.project_field(bx, 0);
|
||||||
bx.store(val, dest.llval, dest.align);
|
bx.store(val, dest.llval, dest.align);
|
||||||
|
@ -388,7 +388,7 @@ pub fn codegen_intrinsic_call(
|
||||||
} else {
|
} else {
|
||||||
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
||||||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||||
let width = cx.const_uint(cx.ix(width), width);
|
let width = cx.const_uint(cx.type_ix(width), width);
|
||||||
let shift = bx.urem(raw_shift, width);
|
let shift = bx.urem(raw_shift, width);
|
||||||
let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
|
let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
|
||||||
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
|
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
|
||||||
|
@ -495,7 +495,7 @@ pub fn codegen_intrinsic_call(
|
||||||
failorder,
|
failorder,
|
||||||
weak);
|
weak);
|
||||||
let val = bx.extract_value(pair, 0);
|
let val = bx.extract_value(pair, 0);
|
||||||
let success = bx.zext(bx.extract_value(pair, 1), bx.cx().bool());
|
let success = bx.zext(bx.extract_value(pair, 1), bx.cx().type_bool());
|
||||||
|
|
||||||
let dest = result.project_field(bx, 0);
|
let dest = result.project_field(bx, 0);
|
||||||
bx.store(val, dest.llval, dest.align);
|
bx.store(val, dest.llval, dest.align);
|
||||||
|
@ -582,32 +582,32 @@ pub fn codegen_intrinsic_call(
|
||||||
fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
|
fn ty_to_type(cx: &CodegenCx<'ll, '_>, t: &intrinsics::Type) -> Vec<&'ll Type> {
|
||||||
use intrinsics::Type::*;
|
use intrinsics::Type::*;
|
||||||
match *t {
|
match *t {
|
||||||
Void => vec![cx.void()],
|
Void => vec![cx.type_void()],
|
||||||
Integer(_signed, _width, llvm_width) => {
|
Integer(_signed, _width, llvm_width) => {
|
||||||
vec![cx.ix( llvm_width as u64)]
|
vec![cx.type_ix( llvm_width as u64)]
|
||||||
}
|
}
|
||||||
Float(x) => {
|
Float(x) => {
|
||||||
match x {
|
match x {
|
||||||
32 => vec![cx.f32()],
|
32 => vec![cx.type_f32()],
|
||||||
64 => vec![cx.f64()],
|
64 => vec![cx.type_f64()],
|
||||||
_ => bug!()
|
_ => bug!()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Pointer(ref t, ref llvm_elem, _const) => {
|
Pointer(ref t, ref llvm_elem, _const) => {
|
||||||
let t = llvm_elem.as_ref().unwrap_or(t);
|
let t = llvm_elem.as_ref().unwrap_or(t);
|
||||||
let elem = one(ty_to_type(cx, t));
|
let elem = one(ty_to_type(cx, t));
|
||||||
vec![cx.ptr_to(elem)]
|
vec![cx.type_ptr_to(elem)]
|
||||||
}
|
}
|
||||||
Vector(ref t, ref llvm_elem, length) => {
|
Vector(ref t, ref llvm_elem, length) => {
|
||||||
let t = llvm_elem.as_ref().unwrap_or(t);
|
let t = llvm_elem.as_ref().unwrap_or(t);
|
||||||
let elem = one(ty_to_type(cx, t));
|
let elem = one(ty_to_type(cx, t));
|
||||||
vec![cx.vector(elem, length as u64)]
|
vec![cx.type_vector(elem, length as u64)]
|
||||||
}
|
}
|
||||||
Aggregate(false, ref contents) => {
|
Aggregate(false, ref contents) => {
|
||||||
let elems = contents.iter()
|
let elems = contents.iter()
|
||||||
.map(|t| one(ty_to_type(cx, t)))
|
.map(|t| one(ty_to_type(cx, t)))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
vec![cx.struct_( &elems, false)]
|
vec![cx.type_struct( &elems, false)]
|
||||||
}
|
}
|
||||||
Aggregate(true, ref contents) => {
|
Aggregate(true, ref contents) => {
|
||||||
contents.iter()
|
contents.iter()
|
||||||
|
@ -646,20 +646,20 @@ pub fn codegen_intrinsic_call(
|
||||||
}
|
}
|
||||||
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
||||||
vec![bx.pointercast(arg.immediate(), bx.cx().ptr_to(llvm_elem))]
|
vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
|
||||||
}
|
}
|
||||||
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
||||||
vec![
|
vec![
|
||||||
bx.bitcast(arg.immediate(),
|
bx.bitcast(arg.immediate(),
|
||||||
bx.cx().vector(llvm_elem, length as u64))
|
bx.cx().type_vector(llvm_elem, length as u64))
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
|
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
|
||||||
// the LLVM intrinsic uses a smaller integer
|
// the LLVM intrinsic uses a smaller integer
|
||||||
// size than the C intrinsic's signature, so
|
// size than the C intrinsic's signature, so
|
||||||
// we have to trim it down here.
|
// we have to trim it down here.
|
||||||
vec![bx.trunc(arg.immediate(), bx.cx().ix(llvm_width as u64))]
|
vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
|
||||||
}
|
}
|
||||||
_ => vec![arg.immediate()],
|
_ => vec![arg.immediate()],
|
||||||
}
|
}
|
||||||
|
@ -681,7 +681,7 @@ pub fn codegen_intrinsic_call(
|
||||||
intrinsics::IntrinsicDef::Named(name) => {
|
intrinsics::IntrinsicDef::Named(name) => {
|
||||||
let f = declare::declare_cfn(cx,
|
let f = declare::declare_cfn(cx,
|
||||||
name,
|
name,
|
||||||
cx.func(&inputs, outputs));
|
cx.type_func(&inputs, outputs));
|
||||||
bx.call(f, &llargs, None)
|
bx.call(f, &llargs, None)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -705,7 +705,7 @@ pub fn codegen_intrinsic_call(
|
||||||
|
|
||||||
if !fn_ty.ret.is_ignore() {
|
if !fn_ty.ret.is_ignore() {
|
||||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||||
let ptr = bx.pointercast(result.llval, cx.ptr_to(ty.llvm_type(cx)));
|
let ptr = bx.pointercast(result.llval, cx.type_ptr_to(ty.llvm_type(cx)));
|
||||||
bx.store(llval, ptr, result.align);
|
bx.store(llval, ptr, result.align);
|
||||||
} else {
|
} else {
|
||||||
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
|
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
|
||||||
|
@ -727,8 +727,8 @@ fn copy_intrinsic(
|
||||||
let (size, align) = cx.size_and_align_of(ty);
|
let (size, align) = cx.size_and_align_of(ty);
|
||||||
let size = cx.const_usize(size.bytes());
|
let size = cx.const_usize(size.bytes());
|
||||||
let align = align.abi();
|
let align = align.abi();
|
||||||
let dst_ptr = bx.pointercast(dst, cx.i8p());
|
let dst_ptr = bx.pointercast(dst, cx.type_i8p());
|
||||||
let src_ptr = bx.pointercast(src, cx.i8p());
|
let src_ptr = bx.pointercast(src, cx.type_i8p());
|
||||||
if allow_overlap {
|
if allow_overlap {
|
||||||
bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
|
bx.memmove(dst_ptr, align, src_ptr, align, bx.mul(size, count), volatile)
|
||||||
} else {
|
} else {
|
||||||
|
@ -748,7 +748,7 @@ fn memset_intrinsic(
|
||||||
let (size, align) = cx.size_and_align_of(ty);
|
let (size, align) = cx.size_and_align_of(ty);
|
||||||
let size = cx.const_usize(size.bytes());
|
let size = cx.const_usize(size.bytes());
|
||||||
let align = cx.const_i32(align.abi() as i32);
|
let align = cx.const_i32(align.abi() as i32);
|
||||||
let dst = bx.pointercast(dst, cx.i8p());
|
let dst = bx.pointercast(dst, cx.type_i8p());
|
||||||
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
|
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -763,7 +763,7 @@ fn try_intrinsic(
|
||||||
if bx.sess().no_landing_pads() {
|
if bx.sess().no_landing_pads() {
|
||||||
bx.call(func, &[data], None);
|
bx.call(func, &[data], None);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
bx.store(cx.const_null(cx.i8p()), dest, ptr_align);
|
bx.store(cx.const_null(cx.type_i8p()), dest, ptr_align);
|
||||||
} else if wants_msvc_seh(bx.sess()) {
|
} else if wants_msvc_seh(bx.sess()) {
|
||||||
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
|
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
|
||||||
} else {
|
} else {
|
||||||
|
@ -839,7 +839,7 @@ fn codegen_msvc_try(
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// More information can be found in libstd's seh.rs implementation.
|
// More information can be found in libstd's seh.rs implementation.
|
||||||
let i64p = cx.ptr_to(cx.i64());
|
let i64p = cx.type_ptr_to(cx.type_i64());
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
let slot = bx.alloca(i64p, "slot", ptr_align);
|
let slot = bx.alloca(i64p, "slot", ptr_align);
|
||||||
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
|
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
|
||||||
|
@ -930,12 +930,12 @@ fn codegen_gnu_try(
|
||||||
// being thrown. The second value is a "selector" indicating which of
|
// being thrown. The second value is a "selector" indicating which of
|
||||||
// the landing pad clauses the exception's type had been matched to.
|
// the landing pad clauses the exception's type had been matched to.
|
||||||
// rust_try ignores the selector.
|
// rust_try ignores the selector.
|
||||||
let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false);
|
let lpad_ty = cx.type_struct(&[cx.type_i8p(), cx.type_i32()], false);
|
||||||
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
|
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
|
||||||
catch.add_clause(vals, bx.cx().const_null(cx.i8p()));
|
catch.add_clause(vals, bx.cx().const_null(cx.type_i8p()));
|
||||||
let ptr = catch.extract_value(vals, 0);
|
let ptr = catch.extract_value(vals, 0);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align);
|
catch.store(ptr, catch.bitcast(local_ptr, cx.type_ptr_to(cx.type_i8p())), ptr_align);
|
||||||
catch.ret(cx.const_i32(1));
|
catch.ret(cx.const_i32(1));
|
||||||
});
|
});
|
||||||
|
|
||||||
|
@ -1078,7 +1078,7 @@ fn generic_simd_intrinsic(
|
||||||
found `{}` with length {}",
|
found `{}` with length {}",
|
||||||
in_len, in_ty,
|
in_len, in_ty,
|
||||||
ret_ty, out_len);
|
ret_ty, out_len);
|
||||||
require!(bx.cx().kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
|
require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
|
||||||
"expected return type with integer elements, found `{}` with non-integer `{}`",
|
"expected return type with integer elements, found `{}` with non-integer `{}`",
|
||||||
ret_ty,
|
ret_ty,
|
||||||
ret_ty.simd_type(tcx));
|
ret_ty.simd_type(tcx));
|
||||||
|
@ -1167,8 +1167,8 @@ fn generic_simd_intrinsic(
|
||||||
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
||||||
}
|
}
|
||||||
// truncate the mask to a vector of i1s
|
// truncate the mask to a vector of i1s
|
||||||
let i1 = bx.cx().i1();
|
let i1 = bx.cx().type_i1();
|
||||||
let i1xn = bx.cx().vector(i1, m_len as u64);
|
let i1xn = bx.cx().type_vector(i1, m_len as u64);
|
||||||
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
||||||
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
||||||
}
|
}
|
||||||
|
@ -1300,16 +1300,16 @@ fn generic_simd_intrinsic(
|
||||||
mut no_pointers: usize) -> &'ll Type {
|
mut no_pointers: usize) -> &'ll Type {
|
||||||
// FIXME: use cx.layout_of(ty).llvm_type() ?
|
// FIXME: use cx.layout_of(ty).llvm_type() ?
|
||||||
let mut elem_ty = match elem_ty.sty {
|
let mut elem_ty = match elem_ty.sty {
|
||||||
ty::Int(v) => cx.int_from_ty( v),
|
ty::Int(v) => cx.type_int_from_ty( v),
|
||||||
ty::Uint(v) => cx.uint_from_ty( v),
|
ty::Uint(v) => cx.type_uint_from_ty( v),
|
||||||
ty::Float(v) => cx.float_from_ty( v),
|
ty::Float(v) => cx.type_float_from_ty( v),
|
||||||
_ => unreachable!(),
|
_ => unreachable!(),
|
||||||
};
|
};
|
||||||
while no_pointers > 0 {
|
while no_pointers > 0 {
|
||||||
elem_ty = cx.ptr_to(elem_ty);
|
elem_ty = cx.type_ptr_to(elem_ty);
|
||||||
no_pointers -= 1;
|
no_pointers -= 1;
|
||||||
}
|
}
|
||||||
cx.vector(elem_ty, vec_len as u64)
|
cx.type_vector(elem_ty, vec_len as u64)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -1386,13 +1386,13 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = bx.cx().i32();
|
let alignment_ty = bx.cx().type_i32();
|
||||||
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
|
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = bx.cx().i1();
|
let i1 = bx.cx().type_i1();
|
||||||
let i1xn = bx.cx().vector(i1, in_len as u64);
|
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -1407,7 +1407,7 @@ fn generic_simd_intrinsic(
|
||||||
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
||||||
bx.cx().func(&[
|
bx.cx().type_func(&[
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
mask_ty,
|
mask_ty,
|
||||||
|
@ -1486,17 +1486,17 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = bx.cx().i32();
|
let alignment_ty = bx.cx().type_i32();
|
||||||
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
|
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = bx.cx().i1();
|
let i1 = bx.cx().type_i1();
|
||||||
let i1xn = bx.cx().vector(i1, in_len as u64);
|
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret_t = bx.cx().void();
|
let ret_t = bx.cx().type_void();
|
||||||
|
|
||||||
// Type of the vector of pointers:
|
// Type of the vector of pointers:
|
||||||
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
||||||
|
@ -1509,7 +1509,7 @@ fn generic_simd_intrinsic(
|
||||||
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
||||||
bx.cx().func(&[llvm_elem_vec_ty,
|
bx.cx().type_func(&[llvm_elem_vec_ty,
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
mask_ty], ret_t));
|
mask_ty], ret_t));
|
||||||
|
@ -1565,8 +1565,8 @@ fn generic_simd_intrinsic(
|
||||||
} else {
|
} else {
|
||||||
// unordered arithmetic reductions do not:
|
// unordered arithmetic reductions do not:
|
||||||
match f.bit_width() {
|
match f.bit_width() {
|
||||||
32 => bx.cx().const_undef(bx.cx().f32()),
|
32 => bx.cx().const_undef(bx.cx().type_f32()),
|
||||||
64 => bx.cx().const_undef(bx.cx().f64()),
|
64 => bx.cx().const_undef(bx.cx().type_f64()),
|
||||||
v => {
|
v => {
|
||||||
return_error!(r#"
|
return_error!(r#"
|
||||||
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
|
@ -1643,8 +1643,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
}
|
}
|
||||||
|
|
||||||
// boolean reductions operate on vectors of i1s:
|
// boolean reductions operate on vectors of i1s:
|
||||||
let i1 = bx.cx().i1();
|
let i1 = bx.cx().type_i1();
|
||||||
let i1xn = bx.cx().vector(i1, in_len as u64);
|
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
||||||
bx.trunc(args[0].immediate(), i1xn)
|
bx.trunc(args[0].immediate(), i1xn)
|
||||||
};
|
};
|
||||||
return match in_elem.sty {
|
return match in_elem.sty {
|
||||||
|
@ -1654,7 +1654,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
if !$boolean {
|
if !$boolean {
|
||||||
r
|
r
|
||||||
} else {
|
} else {
|
||||||
bx.zext(r, bx.cx().bool())
|
bx.zext(r, bx.cx().type_bool())
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
|
@ -42,7 +42,7 @@ impl<'a, 'tcx> VirtualIndex {
|
||||||
|
|
||||||
let llvtable = bx.pointercast(
|
let llvtable = bx.pointercast(
|
||||||
llvtable,
|
llvtable,
|
||||||
bx.cx().ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
|
bx.cx().type_ptr_to(fn_ty.ptr_to_llvm_type(bx.cx()))
|
||||||
);
|
);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
let ptr = bx.load(
|
let ptr = bx.load(
|
||||||
|
@ -63,7 +63,7 @@ impl<'a, 'tcx> VirtualIndex {
|
||||||
// Load the data pointer from the object.
|
// Load the data pointer from the object.
|
||||||
debug!("get_int({:?}, {:?})", llvtable, self);
|
debug!("get_int({:?}, {:?})", llvtable, self);
|
||||||
|
|
||||||
let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize()));
|
let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
|
||||||
let usize_align = bx.tcx().data_layout.pointer_align;
|
let usize_align = bx.tcx().data_layout.pointer_align;
|
||||||
let ptr = bx.load(
|
let ptr = bx.load(
|
||||||
bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
|
bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
|
||||||
|
@ -98,7 +98,7 @@ pub fn get_vtable(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Not in the cache. Build it.
|
// Not in the cache. Build it.
|
||||||
let nullptr = cx.const_null(cx.i8p());
|
let nullptr = cx.const_null(cx.type_i8p());
|
||||||
|
|
||||||
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
|
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
|
||||||
let methods = methods.iter().cloned().map(|opt_mth| {
|
let methods = methods.iter().cloned().map(|opt_mth| {
|
||||||
|
|
|
@ -268,7 +268,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
bx.load(
|
bx.load(
|
||||||
bx.pointercast(llslot, bx.cx().ptr_to(cast_ty.llvm_type(bx.cx()))),
|
bx.pointercast(llslot, bx.cx().type_ptr_to(cast_ty.llvm_type(bx.cx()))),
|
||||||
self.fn_ty.ret.layout.align)
|
self.fn_ty.ret.layout.align)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -560,7 +560,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let dest = match ret_dest {
|
let dest = match ret_dest {
|
||||||
_ if fn_ty.ret.is_indirect() => llargs[0],
|
_ if fn_ty.ret.is_indirect() => llargs[0],
|
||||||
ReturnDest::Nothing => {
|
ReturnDest::Nothing => {
|
||||||
bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
|
bx.cx().const_undef(bx.cx().type_ptr_to(fn_ty.ret.memory_ty(bx.cx())))
|
||||||
}
|
}
|
||||||
ReturnDest::IndirectOperand(dst, _) |
|
ReturnDest::IndirectOperand(dst, _) |
|
||||||
ReturnDest::Store(dst) => dst.llval,
|
ReturnDest::Store(dst) => dst.llval,
|
||||||
|
@ -801,7 +801,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
if by_ref && !arg.is_indirect() {
|
if by_ref && !arg.is_indirect() {
|
||||||
// Have to load the argument, maybe while casting it.
|
// Have to load the argument, maybe while casting it.
|
||||||
if let PassMode::Cast(ty) = arg.mode {
|
if let PassMode::Cast(ty) = arg.mode {
|
||||||
llval = bx.load(bx.pointercast(llval, bx.cx().ptr_to(ty.llvm_type(bx.cx()))),
|
llval = bx.load(bx.pointercast(llval, bx.cx().type_ptr_to(ty.llvm_type(bx.cx()))),
|
||||||
align.min(arg.layout.align));
|
align.min(arg.layout.align));
|
||||||
} else {
|
} else {
|
||||||
// We can't use `PlaceRef::load` here because the argument
|
// We can't use `PlaceRef::load` here because the argument
|
||||||
|
@ -902,7 +902,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
fn landing_pad_type(&self) -> &'ll Type {
|
fn landing_pad_type(&self) -> &'ll Type {
|
||||||
let cx = self.cx;
|
let cx = self.cx;
|
||||||
cx.struct_( &[cx.i8p(), cx.i32()], false)
|
cx.type_struct( &[cx.type_i8p(), cx.type_i32()], false)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn unreachable_block(&mut self) -> &'ll BasicBlock {
|
fn unreachable_block(&mut self) -> &'ll BasicBlock {
|
||||||
|
@ -1014,7 +1014,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
dst: PlaceRef<'tcx, &'ll Value>) {
|
dst: PlaceRef<'tcx, &'ll Value>) {
|
||||||
let src = self.codegen_operand(bx, src);
|
let src = self.codegen_operand(bx, src);
|
||||||
let llty = src.layout.llvm_type(bx.cx());
|
let llty = src.layout.llvm_type(bx.cx());
|
||||||
let cast_ptr = bx.pointercast(dst.llval, bx.cx().ptr_to(llty));
|
let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
|
||||||
let align = src.layout.align.min(dst.layout.align);
|
let align = src.layout.align.min(dst.layout.align);
|
||||||
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
||||||
}
|
}
|
||||||
|
|
|
@ -40,11 +40,11 @@ pub fn scalar_to_llvm(
|
||||||
match cv {
|
match cv {
|
||||||
Scalar::Bits { size: 0, .. } => {
|
Scalar::Bits { size: 0, .. } => {
|
||||||
assert_eq!(0, layout.value.size(cx).bytes());
|
assert_eq!(0, layout.value.size(cx).bytes());
|
||||||
cx.const_undef(cx.ix(0))
|
cx.const_undef(cx.type_ix(0))
|
||||||
},
|
},
|
||||||
Scalar::Bits { bits, size } => {
|
Scalar::Bits { bits, size } => {
|
||||||
assert_eq!(size as u64, layout.value.size(cx).bytes());
|
assert_eq!(size as u64, layout.value.size(cx).bytes());
|
||||||
let llval = cx.const_uint_big(cx.ix(bitsize), bits);
|
let llval = cx.const_uint_big(cx.type_ix(bitsize), bits);
|
||||||
if layout.value == layout::Pointer {
|
if layout.value == layout::Pointer {
|
||||||
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
|
||||||
} else {
|
} else {
|
||||||
|
@ -72,7 +72,7 @@ pub fn scalar_to_llvm(
|
||||||
None => bug!("missing allocation {:?}", ptr.alloc_id),
|
None => bug!("missing allocation {:?}", ptr.alloc_id),
|
||||||
};
|
};
|
||||||
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
|
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
|
||||||
consts::bitcast(base_addr, cx.i8p()),
|
consts::bitcast(base_addr, cx.type_i8p()),
|
||||||
&cx.const_usize(ptr.offset.bytes()),
|
&cx.const_usize(ptr.offset.bytes()),
|
||||||
1,
|
1,
|
||||||
) };
|
) };
|
||||||
|
@ -109,7 +109,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
|
||||||
value: layout::Primitive::Pointer,
|
value: layout::Primitive::Pointer,
|
||||||
valid_range: 0..=!0
|
valid_range: 0..=!0
|
||||||
},
|
},
|
||||||
cx.i8p()
|
cx.type_i8p()
|
||||||
));
|
));
|
||||||
next_offset = offset + pointer_size;
|
next_offset = offset + pointer_size;
|
||||||
}
|
}
|
||||||
|
|
|
@ -419,7 +419,7 @@ fn create_funclets(
|
||||||
// C++ personality function, but `catch (...)` has no type so
|
// C++ personality function, but `catch (...)` has no type so
|
||||||
// it's null. The 64 here is actually a bitfield which
|
// it's null. The 64 here is actually a bitfield which
|
||||||
// represents that this is a catch-all block.
|
// represents that this is a catch-all block.
|
||||||
let null = bx.cx().const_null(bx.cx().i8p());
|
let null = bx.cx().const_null(bx.cx().type_i8p());
|
||||||
let sixty_four = bx.cx().const_i32(64);
|
let sixty_four = bx.cx().const_i32(64);
|
||||||
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
||||||
cp_bx.br(llbb);
|
cp_bx.br(llbb);
|
||||||
|
|
|
@ -348,7 +348,7 @@ impl OperandValue<&'ll Value> {
|
||||||
|
|
||||||
// Allocate an appropriate region on the stack, and copy the value into it
|
// Allocate an appropriate region on the stack, and copy the value into it
|
||||||
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
|
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
|
||||||
let lldst = bx.array_alloca(bx.cx().i8(), llsize, "unsized_tmp", max_align);
|
let lldst = bx.array_alloca(bx.cx().type_i8(), llsize, "unsized_tmp", max_align);
|
||||||
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
|
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
|
||||||
|
|
||||||
// Store the allocated region and the extra to the indirect place.
|
// Store the allocated region and the extra to the indirect place.
|
||||||
|
@ -462,7 +462,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// We've errored, so we don't have to produce working code.
|
// We've errored, so we don't have to produce working code.
|
||||||
let layout = bx.cx().layout_of(ty);
|
let layout = bx.cx().layout_of(ty);
|
||||||
PlaceRef::new_sized(
|
PlaceRef::new_sized(
|
||||||
bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
|
bx.cx().const_undef(bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))),
|
||||||
layout,
|
layout,
|
||||||
layout.align,
|
layout.align,
|
||||||
).load(bx)
|
).load(bx)
|
||||||
|
|
|
@ -67,11 +67,11 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
|
let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
|
||||||
|
|
||||||
let llval = unsafe { LLVMConstInBoundsGEP(
|
let llval = unsafe { LLVMConstInBoundsGEP(
|
||||||
consts::bitcast(base_addr, bx.cx().i8p()),
|
consts::bitcast(base_addr, bx.cx().type_i8p()),
|
||||||
&bx.cx().const_usize(offset.bytes()),
|
&bx.cx().const_usize(offset.bytes()),
|
||||||
1,
|
1,
|
||||||
)};
|
)};
|
||||||
let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
|
let llval = consts::bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
|
||||||
PlaceRef::new_sized(llval, layout, alloc.align)
|
PlaceRef::new_sized(llval, layout, alloc.align)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
let load = bx.load(llptr, self.align);
|
let load = bx.load(llptr, self.align);
|
||||||
scalar_load_metadata(load, scalar);
|
scalar_load_metadata(load, scalar);
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
bx.trunc(load, bx.cx().i1())
|
bx.trunc(load, bx.cx().type_i1())
|
||||||
} else {
|
} else {
|
||||||
load
|
load
|
||||||
}
|
}
|
||||||
|
@ -196,7 +196,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
};
|
};
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
||||||
llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))),
|
llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))),
|
||||||
llextra: if cx.type_has_metadata(field.ty) {
|
llextra: if cx.type_has_metadata(field.ty) {
|
||||||
self.llextra
|
self.llextra
|
||||||
} else {
|
} else {
|
||||||
|
@ -265,7 +265,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
debug!("struct_field_ptr: DST field offset: {:?}", offset);
|
||||||
|
|
||||||
// Cast and adjust pointer
|
// Cast and adjust pointer
|
||||||
let byte_ptr = bx.pointercast(self.llval, cx.i8p());
|
let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
|
||||||
let byte_ptr = bx.gep(byte_ptr, &[offset]);
|
let byte_ptr = bx.gep(byte_ptr, &[offset]);
|
||||||
|
|
||||||
// Finally, cast back to the type expected
|
// Finally, cast back to the type expected
|
||||||
|
@ -273,7 +273,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
|
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
|
||||||
|
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)),
|
llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
|
||||||
llextra: self.llextra,
|
llextra: self.llextra,
|
||||||
layout: field,
|
layout: field,
|
||||||
align: effective_field_align,
|
align: effective_field_align,
|
||||||
|
@ -379,7 +379,10 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
bx.sess().target.target.arch == "aarch64" {
|
bx.sess().target.target.arch == "aarch64" {
|
||||||
// Issue #34427: As workaround for LLVM bug on ARM,
|
// Issue #34427: As workaround for LLVM bug on ARM,
|
||||||
// use memset of 0 before assigning niche value.
|
// use memset of 0 before assigning niche value.
|
||||||
let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
|
let llptr = bx.pointercast(
|
||||||
|
self.llval,
|
||||||
|
bx.cx().type_ptr_to(bx.cx().type_i8())
|
||||||
|
);
|
||||||
let fill_byte = bx.cx().const_u8(0);
|
let fill_byte = bx.cx().const_u8(0);
|
||||||
let (size, align) = self.layout.size_and_align();
|
let (size, align) = self.layout.size_and_align();
|
||||||
let size = bx.cx().const_usize(size.bytes());
|
let size = bx.cx().const_usize(size.bytes());
|
||||||
|
@ -422,7 +425,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
|
|
||||||
// Cast to the appropriate variant struct type.
|
// Cast to the appropriate variant struct type.
|
||||||
let variant_ty = downcast.layout.llvm_type(bx.cx());
|
let variant_ty = downcast.layout.llvm_type(bx.cx());
|
||||||
downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty));
|
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
|
||||||
|
|
||||||
downcast
|
downcast
|
||||||
}
|
}
|
||||||
|
@ -483,7 +486,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// so we generate an abort
|
// so we generate an abort
|
||||||
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
|
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
|
||||||
bx.call(fnname, &[], None);
|
bx.call(fnname, &[], None);
|
||||||
let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
|
let llval = bx.cx().const_undef(
|
||||||
|
bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
|
||||||
|
);
|
||||||
PlaceRef::new_sized(llval, layout, layout.align)
|
PlaceRef::new_sized(llval, layout, layout.align)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -543,7 +548,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// Cast the place pointer type to the new
|
// Cast the place pointer type to the new
|
||||||
// array or slice type (*[%_; new_len]).
|
// array or slice type (*[%_; new_len]).
|
||||||
subslice.llval = bx.pointercast(subslice.llval,
|
subslice.llval = bx.pointercast(subslice.llval,
|
||||||
bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx())));
|
bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
|
||||||
|
|
||||||
subslice
|
subslice
|
||||||
}
|
}
|
||||||
|
|
|
@ -117,7 +117,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
// Use llvm.memset.p0i8.* to initialize byte arrays
|
// Use llvm.memset.p0i8.* to initialize byte arrays
|
||||||
let v = base::from_immediate(&bx, v);
|
let v = base::from_immediate(&bx, v);
|
||||||
if bx.cx().val_ty(v) == bx.cx().i8() {
|
if bx.cx().val_ty(v) == bx.cx().type_i8() {
|
||||||
base::call_memset(&bx, start, v, size, align, false);
|
base::call_memset(&bx, start, v, size, align, false);
|
||||||
return bx;
|
return bx;
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,82 +43,82 @@ impl fmt::Debug for Type {
|
||||||
|
|
||||||
impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
|
|
||||||
fn void(&self) -> &'ll Type {
|
fn type_void(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMVoidTypeInContext(self.llcx)
|
llvm::LLVMVoidTypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn metadata(&self) -> &'ll Type {
|
fn type_metadata(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMRustMetadataTypeInContext(self.llcx)
|
llvm::LLVMRustMetadataTypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn i1(&self) -> &'ll Type {
|
fn type_i1(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMInt1TypeInContext(self.llcx)
|
llvm::LLVMInt1TypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn i8(&self) -> &'ll Type {
|
fn type_i8(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMInt8TypeInContext(self.llcx)
|
llvm::LLVMInt8TypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn i16(&self) -> &'ll Type {
|
fn type_i16(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
||||||
llvm::LLVMInt16TypeInContext(self.llcx)
|
llvm::LLVMInt16TypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn i32(&self) -> &'ll Type {
|
fn type_i32(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMInt32TypeInContext(self.llcx)
|
llvm::LLVMInt32TypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn i64(&self) -> &'ll Type {
|
fn type_i64(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMInt64TypeInContext(self.llcx)
|
llvm::LLVMInt64TypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn i128(&self) -> &'ll Type {
|
fn type_i128(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMIntTypeInContext(self.llcx, 128)
|
llvm::LLVMIntTypeInContext(self.llcx, 128)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Creates an integer type with the given number of bits, e.g. i24
|
// Creates an integer type with the given number of bits, e.g. i24
|
||||||
fn ix(&self, num_bits: u64) -> &'ll Type {
|
fn type_ix(&self, num_bits: u64) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint)
|
llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn f32(&self) -> &'ll Type {
|
fn type_f32(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMFloatTypeInContext(self.llcx)
|
llvm::LLVMFloatTypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn f64(&self) -> &'ll Type {
|
fn type_f64(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMDoubleTypeInContext(self.llcx)
|
llvm::LLVMDoubleTypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn x86_mmx(&self) -> &'ll Type {
|
fn type_x86_mmx(&self) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMX86MMXTypeInContext(self.llcx)
|
llvm::LLVMX86MMXTypeInContext(self.llcx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn func(
|
fn type_func(
|
||||||
&self,
|
&self,
|
||||||
args: &[&'ll Type],
|
args: &[&'ll Type],
|
||||||
ret: &'ll Type
|
ret: &'ll Type
|
||||||
|
@ -129,7 +129,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn variadic_func(
|
fn type_variadic_func(
|
||||||
&self,
|
&self,
|
||||||
args: &[&'ll Type],
|
args: &[&'ll Type],
|
||||||
ret: &'ll Type
|
ret: &'ll Type
|
||||||
|
@ -140,7 +140,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn struct_(
|
fn type_struct(
|
||||||
&self,
|
&self,
|
||||||
els: &[&'ll Type],
|
els: &[&'ll Type],
|
||||||
packed: bool
|
packed: bool
|
||||||
|
@ -152,7 +152,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn named_struct(&self, name: &str) -> &'ll Type {
|
fn type_named_struct(&self, name: &str) -> &'ll Type {
|
||||||
let name = SmallCStr::new(name);
|
let name = SmallCStr::new(name);
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
|
llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr())
|
||||||
|
@ -160,19 +160,19 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
fn array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
|
fn type_array(&self, ty: &'ll Type, len: u64) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMRustArrayType(ty, len)
|
llvm::LLVMRustArrayType(ty, len)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
|
fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMVectorType(ty, len as c_uint)
|
llvm::LLVMVectorType(ty, len as c_uint)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn kind(&self, ty: &'ll Type) -> TypeKind {
|
fn type_kind(&self, ty: &'ll Type) -> TypeKind {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMRustGetTypeKind(ty)
|
llvm::LLVMRustGetTypeKind(ty)
|
||||||
}
|
}
|
||||||
|
@ -185,8 +185,8 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn ptr_to(&self, ty: &'ll Type) -> &'ll Type {
|
fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type {
|
||||||
assert_ne!(self.kind(ty), TypeKind::Function,
|
assert_ne!(self.type_kind(ty), TypeKind::Function,
|
||||||
"don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
|
"don't call ptr_to on function types, use ptr_to_llvm_type on FnType instead");
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMPointerType(ty, 0)
|
llvm::LLVMPointerType(ty, 0)
|
||||||
|
@ -206,7 +206,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
fn func_params(&self, ty: &'ll Type) -> Vec<&'ll Type> {
|
fn func_params_types(&self, ty: &'ll Type) -> Vec<&'ll Type> {
|
||||||
unsafe {
|
unsafe {
|
||||||
let n_args = llvm::LLVMCountParamTypes(ty) as usize;
|
let n_args = llvm::LLVMCountParamTypes(ty) as usize;
|
||||||
let mut args = Vec::with_capacity(n_args);
|
let mut args = Vec::with_capacity(n_args);
|
||||||
|
@ -217,7 +217,7 @@ impl TypeMethods for CodegenCx<'ll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn float_width(&self, ty : &'ll Type) -> usize {
|
fn float_width(&self, ty : &'ll Type) -> usize {
|
||||||
match self.kind(ty) {
|
match self.type_kind(ty) {
|
||||||
TypeKind::Float => 32,
|
TypeKind::Float => 32,
|
||||||
TypeKind::Double => 64,
|
TypeKind::Double => 64,
|
||||||
TypeKind::X86_FP80 => 80,
|
TypeKind::X86_FP80 => 80,
|
||||||
|
@ -252,96 +252,100 @@ impl Type {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type {
|
pub fn i8p_llcx(cx : &write::CodegenContext<'ll>, llcx: &'ll llvm::Context) -> &'ll Type {
|
||||||
cx.ptr_to(Type::i8_llcx(llcx))
|
cx.type_ptr_to(Type::i8_llcx(llcx))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl CodegenCx<'ll, 'tcx> {
|
impl CodegenCx<'ll, 'tcx> {
|
||||||
pub fn bool(&self) -> &'ll Type {
|
pub fn type_bool(&self) -> &'ll Type {
|
||||||
self.i8()
|
self.type_i8()
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn i8p(&self) -> &'ll Type {
|
pub fn type_i8p(&self) -> &'ll Type {
|
||||||
self.ptr_to(self.i8())
|
self.type_ptr_to(self.type_i8())
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn isize(&self) -> &'ll Type {
|
pub fn type_isize(&self) -> &'ll Type {
|
||||||
self.isize_ty
|
self.isize_ty
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn t_int(&self) -> &'ll Type {
|
pub fn type_int(&self) -> &'ll Type {
|
||||||
match &self.sess().target.target.target_c_int_width[..] {
|
match &self.sess().target.target.target_c_int_width[..] {
|
||||||
"16" => self.i16(),
|
"16" => self.type_i16(),
|
||||||
"32" => self.i32(),
|
"32" => self.type_i32(),
|
||||||
"64" => self.i64(),
|
"64" => self.type_i64(),
|
||||||
width => bug!("Unsupported target_c_int_width: {}", width),
|
width => bug!("Unsupported target_c_int_width: {}", width),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn int_from_ty(
|
pub fn type_int_from_ty(
|
||||||
&self,
|
&self,
|
||||||
t: ast::IntTy
|
t: ast::IntTy
|
||||||
) -> &'ll Type {
|
) -> &'ll Type {
|
||||||
match t {
|
match t {
|
||||||
ast::IntTy::Isize => self.isize_ty,
|
ast::IntTy::Isize => self.isize_ty,
|
||||||
ast::IntTy::I8 => self.i8(),
|
ast::IntTy::I8 => self.type_i8(),
|
||||||
ast::IntTy::I16 => self.i16(),
|
ast::IntTy::I16 => self.type_i16(),
|
||||||
ast::IntTy::I32 => self.i32(),
|
ast::IntTy::I32 => self.type_i32(),
|
||||||
ast::IntTy::I64 => self.i64(),
|
ast::IntTy::I64 => self.type_i64(),
|
||||||
ast::IntTy::I128 => self.i128(),
|
ast::IntTy::I128 => self.type_i128(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn uint_from_ty(
|
pub fn type_uint_from_ty(
|
||||||
&self,
|
&self,
|
||||||
t: ast::UintTy
|
t: ast::UintTy
|
||||||
) -> &'ll Type {
|
) -> &'ll Type {
|
||||||
match t {
|
match t {
|
||||||
ast::UintTy::Usize => self.isize_ty,
|
ast::UintTy::Usize => self.isize_ty,
|
||||||
ast::UintTy::U8 => self.i8(),
|
ast::UintTy::U8 => self.type_i8(),
|
||||||
ast::UintTy::U16 => self.i16(),
|
ast::UintTy::U16 => self.type_i16(),
|
||||||
ast::UintTy::U32 => self.i32(),
|
ast::UintTy::U32 => self.type_i32(),
|
||||||
ast::UintTy::U64 => self.i64(),
|
ast::UintTy::U64 => self.type_i64(),
|
||||||
ast::UintTy::U128 => self.i128(),
|
ast::UintTy::U128 => self.type_i128(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn float_from_ty(
|
pub fn type_float_from_ty(
|
||||||
&self,
|
&self,
|
||||||
t: ast::FloatTy
|
t: ast::FloatTy
|
||||||
) -> &'ll Type {
|
) -> &'ll Type {
|
||||||
match t {
|
match t {
|
||||||
ast::FloatTy::F32 => self.f32(),
|
ast::FloatTy::F32 => self.type_f32(),
|
||||||
ast::FloatTy::F64 => self.f64(),
|
ast::FloatTy::F64 => self.type_f64(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn from_integer(&self, i: layout::Integer) -> &'ll Type {
|
pub fn type_from_integer(&self, i: layout::Integer) -> &'ll Type {
|
||||||
use rustc::ty::layout::Integer::*;
|
use rustc::ty::layout::Integer::*;
|
||||||
match i {
|
match i {
|
||||||
I8 => self.i8(),
|
I8 => self.type_i8(),
|
||||||
I16 => self.i16(),
|
I16 => self.type_i16(),
|
||||||
I32 => self.i32(),
|
I32 => self.type_i32(),
|
||||||
I64 => self.i64(),
|
I64 => self.type_i64(),
|
||||||
I128 => self.i128(),
|
I128 => self.type_i128(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a LLVM type that has at most the required alignment,
|
/// Return a LLVM type that has at most the required alignment,
|
||||||
/// as a conservative approximation for unknown pointee types.
|
/// as a conservative approximation for unknown pointee types.
|
||||||
pub fn pointee_for_abi_align(&self, align: Align) -> &'ll Type {
|
pub fn type_pointee_for_abi_align(&self, align: Align) -> &'ll Type {
|
||||||
// FIXME(eddyb) We could find a better approximation if ity.align < align.
|
// FIXME(eddyb) We could find a better approximation if ity.align < align.
|
||||||
let ity = layout::Integer::approximate_abi_align(self, align);
|
let ity = layout::Integer::approximate_abi_align(self, align);
|
||||||
self.from_integer(ity)
|
self.type_from_integer(ity)
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Return a LLVM type that has at most the required alignment,
|
/// Return a LLVM type that has at most the required alignment,
|
||||||
/// and exactly the required size, as a best-effort padding array.
|
/// and exactly the required size, as a best-effort padding array.
|
||||||
pub fn padding_filler(&self, size: Size, align: Align) -> &'ll Type {
|
pub fn type_padding_filler(
|
||||||
|
&self,
|
||||||
|
size: Size,
|
||||||
|
align: Align
|
||||||
|
) -> &'ll Type {
|
||||||
let unit = layout::Integer::approximate_abi_align(self, align);
|
let unit = layout::Integer::approximate_abi_align(self, align);
|
||||||
let size = size.bytes();
|
let size = size.bytes();
|
||||||
let unit_size = unit.size().bytes();
|
let unit_size = unit.size().bytes();
|
||||||
assert_eq!(size % unit_size, 0);
|
assert_eq!(size % unit_size, 0);
|
||||||
self.array(self.from_integer(unit), size / unit_size)
|
self.type_array(self.type_from_integer(unit), size / unit_size)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,14 +38,14 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
|
||||||
(cx.sess().target.target.arch == "x86" ||
|
(cx.sess().target.target.arch == "x86" ||
|
||||||
cx.sess().target.target.arch == "x86_64");
|
cx.sess().target.target.arch == "x86_64");
|
||||||
if use_x86_mmx {
|
if use_x86_mmx {
|
||||||
return cx.x86_mmx()
|
return cx.type_x86_mmx()
|
||||||
} else {
|
} else {
|
||||||
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
|
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO);
|
||||||
return cx.vector(element, count);
|
return cx.type_vector(element, count);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
layout::Abi::ScalarPair(..) => {
|
layout::Abi::ScalarPair(..) => {
|
||||||
return cx.struct_( &[
|
return cx.type_struct( &[
|
||||||
layout.scalar_pair_element_llvm_type(cx, 0, false),
|
layout.scalar_pair_element_llvm_type(cx, 0, false),
|
||||||
layout.scalar_pair_element_llvm_type(cx, 1, false),
|
layout.scalar_pair_element_llvm_type(cx, 1, false),
|
||||||
], false);
|
], false);
|
||||||
|
@ -80,30 +80,30 @@ fn uncached_llvm_type<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
|
||||||
|
|
||||||
match layout.fields {
|
match layout.fields {
|
||||||
layout::FieldPlacement::Union(_) => {
|
layout::FieldPlacement::Union(_) => {
|
||||||
let fill = cx.padding_filler( layout.size, layout.align);
|
let fill = cx.type_padding_filler( layout.size, layout.align);
|
||||||
let packed = false;
|
let packed = false;
|
||||||
match name {
|
match name {
|
||||||
None => {
|
None => {
|
||||||
cx.struct_( &[fill], packed)
|
cx.type_struct( &[fill], packed)
|
||||||
}
|
}
|
||||||
Some(ref name) => {
|
Some(ref name) => {
|
||||||
let llty = cx.named_struct( name);
|
let llty = cx.type_named_struct( name);
|
||||||
cx.set_struct_body(llty, &[fill], packed);
|
cx.set_struct_body(llty, &[fill], packed);
|
||||||
llty
|
llty
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
layout::FieldPlacement::Array { count, .. } => {
|
layout::FieldPlacement::Array { count, .. } => {
|
||||||
cx.array(layout.field(cx, 0).llvm_type(cx), count)
|
cx.type_array(layout.field(cx, 0).llvm_type(cx), count)
|
||||||
}
|
}
|
||||||
layout::FieldPlacement::Arbitrary { .. } => {
|
layout::FieldPlacement::Arbitrary { .. } => {
|
||||||
match name {
|
match name {
|
||||||
None => {
|
None => {
|
||||||
let (llfields, packed) = struct_llfields(cx, layout);
|
let (llfields, packed) = struct_llfields(cx, layout);
|
||||||
cx.struct_( &llfields, packed)
|
cx.type_struct( &llfields, packed)
|
||||||
}
|
}
|
||||||
Some(ref name) => {
|
Some(ref name) => {
|
||||||
let llty = cx.named_struct( name);
|
let llty = cx.type_named_struct( name);
|
||||||
*defer = Some((llty, layout));
|
*defer = Some((llty, layout));
|
||||||
llty
|
llty
|
||||||
}
|
}
|
||||||
|
@ -137,7 +137,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
|
||||||
let padding = target_offset - offset;
|
let padding = target_offset - offset;
|
||||||
let padding_align = prev_effective_align.min(effective_field_align);
|
let padding_align = prev_effective_align.min(effective_field_align);
|
||||||
assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
|
assert_eq!(offset.abi_align(padding_align) + padding, target_offset);
|
||||||
result.push(cx.padding_filler( padding, padding_align));
|
result.push(cx.type_padding_filler( padding, padding_align));
|
||||||
debug!(" padding before: {:?}", padding);
|
debug!(" padding before: {:?}", padding);
|
||||||
|
|
||||||
result.push(field.llvm_type(cx));
|
result.push(field.llvm_type(cx));
|
||||||
|
@ -154,7 +154,7 @@ fn struct_llfields<'a, 'tcx>(cx: &CodegenCx<'a, 'tcx>,
|
||||||
assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
|
assert_eq!(offset.abi_align(padding_align) + padding, layout.size);
|
||||||
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
|
debug!("struct_llfields: pad_bytes: {:?} offset: {:?} stride: {:?}",
|
||||||
padding, offset, layout.size);
|
padding, offset, layout.size);
|
||||||
result.push(cx.padding_filler(padding, padding_align));
|
result.push(cx.type_padding_filler(padding, padding_align));
|
||||||
assert_eq!(result.len(), 1 + field_count * 2);
|
assert_eq!(result.len(), 1 + field_count * 2);
|
||||||
} else {
|
} else {
|
||||||
debug!("struct_llfields: offset: {:?} stride: {:?}",
|
debug!("struct_llfields: offset: {:?} stride: {:?}",
|
||||||
|
@ -256,10 +256,10 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
|
||||||
let llty = match self.ty.sty {
|
let llty = match self.ty.sty {
|
||||||
ty::Ref(_, ty, _) |
|
ty::Ref(_, ty, _) |
|
||||||
ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
|
ty::RawPtr(ty::TypeAndMut { ty, .. }) => {
|
||||||
cx.ptr_to(cx.layout_of(ty).llvm_type(cx))
|
cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx))
|
||||||
}
|
}
|
||||||
ty::Adt(def, _) if def.is_box() => {
|
ty::Adt(def, _) if def.is_box() => {
|
||||||
cx.ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
|
cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
|
||||||
}
|
}
|
||||||
ty::FnPtr(sig) => {
|
ty::FnPtr(sig) => {
|
||||||
let sig = cx.tcx.normalize_erasing_late_bound_regions(
|
let sig = cx.tcx.normalize_erasing_late_bound_regions(
|
||||||
|
@ -317,7 +317,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
|
||||||
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type {
|
||||||
if let layout::Abi::Scalar(ref scalar) = self.abi {
|
if let layout::Abi::Scalar(ref scalar) = self.abi {
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
return cx.i1();
|
return cx.type_i1();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
self.llvm_type(cx)
|
self.llvm_type(cx)
|
||||||
|
@ -326,17 +326,17 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
|
||||||
fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
|
fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>,
|
||||||
scalar: &layout::Scalar, offset: Size) -> &'a Type {
|
scalar: &layout::Scalar, offset: Size) -> &'a Type {
|
||||||
match scalar.value {
|
match scalar.value {
|
||||||
layout::Int(i, _) => cx.from_integer( i),
|
layout::Int(i, _) => cx.type_from_integer( i),
|
||||||
layout::Float(FloatTy::F32) => cx.f32(),
|
layout::Float(FloatTy::F32) => cx.type_f32(),
|
||||||
layout::Float(FloatTy::F64) => cx.f64(),
|
layout::Float(FloatTy::F64) => cx.type_f64(),
|
||||||
layout::Pointer => {
|
layout::Pointer => {
|
||||||
// If we know the alignment, pick something better than i8.
|
// If we know the alignment, pick something better than i8.
|
||||||
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
|
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
|
||||||
cx.pointee_for_abi_align( pointee.align)
|
cx.type_pointee_for_abi_align( pointee.align)
|
||||||
} else {
|
} else {
|
||||||
cx.i8()
|
cx.type_i8()
|
||||||
};
|
};
|
||||||
cx.ptr_to(pointee)
|
cx.type_ptr_to(pointee)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -370,7 +370,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyLayout<'tcx> {
|
||||||
// when immediate. We need to load/store `bool` as `i8` to avoid
|
// when immediate. We need to load/store `bool` as `i8` to avoid
|
||||||
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
|
// crippling LLVM optimizations or triggering other LLVM bugs with `i1`.
|
||||||
if immediate && scalar.is_bool() {
|
if immediate && scalar.is_bool() {
|
||||||
return cx.i1();
|
return cx.type_i1();
|
||||||
}
|
}
|
||||||
|
|
||||||
let offset = if index == 0 {
|
let offset = if index == 0 {
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue