1
Fork 0

Traitification of common.rs methods

This commit is contained in:
Denis Merigoux 2018-08-28 17:03:46 +02:00 committed by Eduard-Mihai Burtescu
parent 3889c2dcfb
commit 8714e6bce6
25 changed files with 518 additions and 417 deletions

View file

@ -11,7 +11,6 @@
use llvm::{self, AttributePlace};
use base;
use builder::{Builder, MemFlags};
use common::C_usize;
use context::CodegenCx;
use mir::place::PlaceRef;
use mir::operand::OperandValue;
@ -19,7 +18,7 @@ use type_::Type;
use type_of::{LayoutLlvmExt, PointerKind};
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use rustc_target::abi::{HasDataLayout, LayoutOf, Size, TyLayout, Abi as LayoutAbi};
use rustc::ty::{self, Ty};
@ -245,7 +244,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
self.layout.align,
bx.pointercast(llscratch, Type::i8p(cx)),
scratch_align,
C_usize(cx, self.layout.size.bytes()),
CodegenCx::c_usize(cx, self.layout.size.bytes()),
MemFlags::empty());
bx.lifetime_end(llscratch, scratch_size);

View file

@ -9,14 +9,14 @@
// except according to those terms.
use llvm;
use common::*;
use context::CodegenCx;
use type_::Type;
use type_of::LayoutLlvmExt;
use builder::Builder;
use value::Value;
use rustc::hir;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use mir::place::PlaceRef;
use mir::operand::OperandValue;
@ -111,7 +111,7 @@ pub fn codegen_inline_asm(
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
key.as_ptr() as *const c_char, key.len() as c_uint);
let val: &'ll Value = C_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
let val: &'ll Value = CodegenCx::c_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));

View file

@ -740,7 +740,7 @@ fn link_natively(sess: &Session,
// with some thread pool working in the background. It seems that no one
// currently knows a fix for this so in the meantime we're left with this...
info!("{:?}", &cmd);
let retry_on_segfault = env::var("RUSTC_RETRY_LINKER_ON_SEGFAULT").is_ok();
let retry_on_segfault = env::var("RUSTc_RETRY_LINKER_ON_SEGFAULT").is_ok();
let mut prog;
let mut i = 0;
loop {

View file

@ -45,8 +45,8 @@ use syntax::ext::hygiene::Mark;
use syntax_pos::MultiSpan;
use syntax_pos::symbol::Symbol;
use type_::Type;
use context::{is_pie_binary, get_reloc_model};
use common::{C_bytes_in_context, val_ty};
use context::{is_pie_binary, get_reloc_model, CodegenCx};
use interfaces::CommonMethods;
use jobserver::{Client, Acquired};
use rustc_demangle;
@ -884,10 +884,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext,
llcx: &llvm::Context,
llmod: &llvm::Module,
bitcode: Option<&[u8]>) {
let llconst = C_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llconst = CodegenCx::c_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal(
llmod,
val_ty(llconst),
CodegenCx::val_ty(llconst),
"rustc.embedded.module\0".as_ptr() as *const _,
);
llvm::LLVMSetInitializer(llglobal, llconst);
@ -904,10 +904,10 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext,
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
let llconst = C_bytes_in_context(llcx, &[]);
let llconst = CodegenCx::c_bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal(
llmod,
val_ty(llconst),
CodegenCx::val_ty(llconst),
"rustc.embedded.cmdline\0".as_ptr() as *const _,
);
llvm::LLVMSetInitializer(llglobal, llconst);

View file

@ -53,9 +53,8 @@ use mir::place::PlaceRef;
use attributes;
use builder::{Builder, MemFlags};
use callee;
use common::{C_bool, C_bytes_in_context, C_usize};
use rustc_mir::monomorphize::item::DefPathBasedNames;
use common::{C_struct_in_context, C_array, val_ty, IntPredicate, RealPredicate};
use common::{IntPredicate, RealPredicate};
use consts;
use context::CodegenCx;
use debuginfo;
@ -75,7 +74,7 @@ use rustc_data_structures::small_c_str::SmallCStr;
use rustc_data_structures::sync::Lrc;
use rustc_data_structures::indexed_vec::Idx;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use std::any::Any;
use std::cmp;
@ -199,7 +198,7 @@ pub fn unsized_info(
let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => {
C_usize(cx, len.unwrap_usize(cx.tcx))
CodegenCx::c_usize(cx, len.unwrap_usize(cx.tcx))
}
(&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker
@ -351,8 +350,8 @@ fn cast_shift_rhs<'ll, F, G>(op: hir::BinOpKind,
{
// Shifts may have any size int on the rhs
if op.is_shift() {
let mut rhs_llty = val_ty(rhs);
let mut lhs_llty = val_ty(lhs);
let mut rhs_llty = CodegenCx::val_ty(rhs);
let mut lhs_llty = CodegenCx::val_ty(lhs);
if rhs_llty.kind() == TypeKind::Vector {
rhs_llty = rhs_llty.element_type()
}
@ -393,7 +392,7 @@ pub fn from_immediate<'a, 'll: 'a, 'tcx: 'll>(
bx: &Builder<'_ ,'ll, '_, &'ll Value>,
val: &'ll Value
) -> &'ll Value {
if val_ty(val) == Type::i1(bx.cx()) {
if CodegenCx::val_ty(val) == Type::i1(bx.cx()) {
bx.zext(val, Type::i8(bx.cx()))
} else {
val
@ -434,7 +433,7 @@ pub fn call_memcpy<'a, 'll: 'a, 'tcx: 'll>(
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = bx.load(src, src_align);
let ptr = bx.pointercast(dst, val_ty(val).ptr_to());
let ptr = bx.pointercast(dst, CodegenCx::val_ty(val).ptr_to());
bx.store_with_flags(val, ptr, dst_align, flags);
return;
}
@ -460,7 +459,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>(
return;
}
call_memcpy(bx, dst, dst_align, src, src_align, C_usize(bx.cx(), size), flags);
call_memcpy(bx, dst, dst_align, src, src_align, CodegenCx::c_usize(bx.cx(), size), flags);
}
pub fn call_memset(
@ -474,7 +473,7 @@ pub fn call_memset(
let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
let volatile = C_bool(bx.cx, volatile);
let volatile = CodegenCx::c_bool(bx.cx, volatile);
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
}
@ -649,12 +648,12 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap();
let llmeta = C_bytes_in_context(metadata_llcx, &compressed);
let llconst = C_struct_in_context(metadata_llcx, &[llmeta], false);
let llmeta = CodegenCx::c_bytes_in_context(metadata_llcx, &compressed);
let llconst = CodegenCx::c_struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap();
let llglobal = unsafe {
llvm::LLVMAddGlobal(metadata_llmod, val_ty(llconst), buf.as_ptr())
llvm::LLVMAddGlobal(metadata_llmod, CodegenCx::val_ty(llconst), buf.as_ptr())
};
unsafe {
llvm::LLVMSetInitializer(llglobal, llconst);
@ -1140,7 +1139,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
// Run replace-all-uses-with for statics that need it
for &(old_g, new_g) in cx.statics_to_rauw.borrow().iter() {
unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, val_ty(old_g));
let bitcast = llvm::LLVMConstPointerCast(new_g, CodegenCx::val_ty(old_g));
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g);
}
@ -1151,11 +1150,11 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
if !cx.used_statics.borrow().is_empty() {
let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata");
let array = C_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow());
let array = CodegenCx::c_array(Type::i8(&cx).ptr_to(), &*cx.used_statics.borrow());
unsafe {
let g = llvm::LLVMAddGlobal(cx.llmod,
val_ty(array),
CodegenCx::val_ty(array),
name.as_ptr());
llvm::LLVMSetInitializer(g, array);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::AppendingLinkage);

View file

@ -11,6 +11,7 @@
use llvm::{AtomicRmwBinOp, AtomicOrdering, SynchronizationScope, AsmDialect};
use llvm::{self, False, OperandBundleDef, BasicBlock};
use common::{self, *};
use context::CodegenCx;
use type_;
use value::Value;
use libc::{c_uint, c_char};
@ -18,7 +19,7 @@ use rustc::ty::TyCtxt;
use rustc::ty::layout::{Align, Size};
use rustc::session::{config, Session};
use rustc_data_structures::small_c_str::SmallCStr;
use interfaces::{BuilderMethods, Backend};
use interfaces::{BuilderMethods, Backend, CommonMethods};
use syntax;
use std::borrow::Cow;
@ -59,6 +60,7 @@ impl Backend for Builder<'a, 'll, 'tcx> {
type Value = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll type_::Type;
type Context = &'ll llvm::Context;
}
impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
@ -525,10 +527,10 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
}
unsafe {
let llty = val_ty(load);
let llty = CodegenCx::val_ty(load);
let v = [
C_uint_big(llty, range.start),
C_uint_big(llty, range.end)
CodegenCx::c_uint_big(llty, range.start),
CodegenCx::c_uint_big(llty, range.end)
];
llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
@ -575,7 +577,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
// *always* point to a metadata value of the integer 1.
//
// [1]: http://llvm.org/docs/LangRef.html#store-instruction
let one = C_i32(self.cx, 1);
let one = CodegenCx::c_i32(self.cx, 1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
}
@ -758,7 +760,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
let argtys = inputs.iter().map(|v| {
debug!("Asm Input Type: {:?}", *v);
val_ty(*v)
CodegenCx::val_ty(*v)
}).collect::<Vec<_>>();
debug!("Asm Output Type: {:?}", output);
@ -857,11 +859,11 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
fn vector_splat(&self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
unsafe {
let elt_ty = val_ty(elt);
let elt_ty = CodegenCx::val_ty(elt);
let undef = llvm::LLVMGetUndef(type_::Type::vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, C_i32(self.cx, 0));
let vec = self.insert_element(undef, elt, CodegenCx::c_i32(self.cx, 0));
let vec_i32_ty = type_::Type::vector(type_::Type::i32(self.cx), num_elts as u64);
self.shuffle_vector(vec, undef, C_null(vec_i32_ty))
self.shuffle_vector(vec, undef, CodegenCx::c_null(vec_i32_ty))
}
}
@ -1137,8 +1139,8 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
fn check_store<'b>(&self,
val: &'ll Value,
ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = val_ty(ptr);
let stored_ty = val_ty(val);
let dest_ptr_ty = CodegenCx::val_ty(ptr);
let stored_ty = CodegenCx::val_ty(val);
let stored_ptr_ty = stored_ty.ptr_to();
assert_eq!(dest_ptr_ty.kind(), llvm::TypeKind::Pointer);
@ -1158,7 +1160,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
typ: &str,
llfn: &'ll Value,
args: &'b [&'ll Value]) -> Cow<'b, [&'ll Value]> {
let mut fn_ty = val_ty(llfn);
let mut fn_ty = CodegenCx::val_ty(llfn);
// Strip off pointers
while fn_ty.kind() == llvm::TypeKind::Pointer {
fn_ty = fn_ty.element_type();
@ -1170,7 +1172,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
let param_tys = fn_ty.func_params();
let all_args_match = param_tys.iter()
.zip(args.iter().map(|&v| val_ty(v)))
.zip(args.iter().map(|&v| CodegenCx::val_ty(v)))
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
if all_args_match {
@ -1181,7 +1183,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
.zip(args.iter())
.enumerate()
.map(|(i, (expected_ty, &actual_val))| {
let actual_ty = val_ty(actual_val);
let actual_ty = CodegenCx::val_ty(actual_val);
if expected_ty != actual_ty {
debug!("Type mismatch in function call of {:?}. \
Expected {:?} for param {}, got {:?}; injecting bitcast",
@ -1225,7 +1227,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, type_::Type::i8p(self.cx));
self.call(lifetime_intrinsic, &[C_u64(self.cx, size), ptr], None);
self.call(lifetime_intrinsic, &[CodegenCx::c_u64(self.cx, size), ptr], None);
}
fn call(&self, llfn: &'ll Value, args: &[&'ll Value],

View file

@ -15,13 +15,14 @@
//! closure.
use attributes;
use common::{self, CodegenCx};
use common::{CodegenCx};
use consts;
use declare;
use llvm;
use monomorphize::Instance;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::CommonMethods;
use rustc::hir::def_id::DefId;
use rustc::ty::{self, TypeFoldable};
@ -83,7 +84,7 @@ pub fn get_fn(
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if common::val_ty(llfn) != llptrty {
if CodegenCx::val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
consts::ptrcast(llfn, llptrty)
} else {
@ -92,7 +93,7 @@ pub fn get_fn(
}
} else {
let llfn = declare::declare_fn(cx, &sym, sig);
assert_eq!(common::val_ty(llfn), llptrty);
assert_eq!(CodegenCx::val_ty(llfn), llptrty);
debug!("get_fn: not casting pointer!");
if instance.def.is_inline(tcx) {

View file

@ -13,7 +13,7 @@
//! Code that is useful in various codegen modules.
use llvm::{self, TypeKind};
use llvm::{True, False, Bool};
use llvm::{True, False, Bool, BasicBlock};
use rustc::hir::def_id::DefId;
use rustc::middle::lang_items::LangItem;
use abi;
@ -24,6 +24,7 @@ use declare;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::{Backend, CommonMethods};
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::layout::{HasDataLayout, LayoutOf};
@ -192,225 +193,241 @@ impl Funclet<'ll> {
}
}
pub fn val_ty(v: &'ll Value) -> &'ll Type {
unsafe {
llvm::LLVMTypeOf(v)
}
impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> {
type Value = &'ll Value;
type BasicBlock = &'ll BasicBlock;
type Type = &'ll Type;
type Context = &'ll llvm::Context;
}
// LLVM constant constructors.
pub fn C_null(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstNull(t)
}
}
pub fn C_undef(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMGetUndef(t)
}
}
pub fn C_int(t: &'ll Type, i: i64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i as u64, True)
}
}
pub fn C_uint(t: &'ll Type, i: u64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i, False)
}
}
pub fn C_uint_big(t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
pub fn C_bool(cx: &CodegenCx<'ll, '_>, val: bool) -> &'ll Value {
C_uint(Type::i1(cx), val as u64)
}
pub fn C_i32(cx: &CodegenCx<'ll, '_>, i: i32) -> &'ll Value {
C_int(Type::i32(cx), i as i64)
}
pub fn C_u32(cx: &CodegenCx<'ll, '_>, i: u32) -> &'ll Value {
C_uint(Type::i32(cx), i as u64)
}
pub fn C_u64(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value {
C_uint(Type::i64(cx), i)
}
pub fn C_usize(cx: &CodegenCx<'ll, '_>, i: u64) -> &'ll Value {
let bit_size = cx.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1<<bit_size));
impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> {
fn val_ty(v: &'ll Value) -> &'ll Type {
unsafe {
llvm::LLVMTypeOf(v)
}
}
C_uint(cx.isize_ty, i)
}
// LLVM constant constructors.
fn c_null(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMConstNull(t)
}
}
pub fn C_u8(cx: &CodegenCx<'ll, '_>, i: u8) -> &'ll Value {
C_uint(Type::i8(cx), i as u64)
}
fn c_undef(t: &'ll Type) -> &'ll Value {
unsafe {
llvm::LLVMGetUndef(t)
}
}
fn c_int(t: &'ll Type, i: i64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i as u64, True)
}
}
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
pub fn C_cstr(
cx: &CodegenCx<'ll, '_>,
s: LocalInternedString,
null_terminated: bool,
) -> &'ll Value {
unsafe {
if let Some(&llval) = cx.const_cstr_cache.borrow().get(&s) {
return llval;
fn c_uint(t: &'ll Type, i: u64) -> &'ll Value {
unsafe {
llvm::LLVMConstInt(t, i, False)
}
}
fn c_uint_big(t: &'ll Type, u: u128) -> &'ll Value {
unsafe {
let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
}
}
fn c_bool(&self, val: bool) -> &'ll Value {
Self::c_uint(Type::i1(&self), val as u64)
}
fn c_i32(&self, i: i32) -> &'ll Value {
Self::c_int(Type::i32(&self), i as i64)
}
fn c_u32(&self, i: u32) -> &'ll Value {
Self::c_uint(Type::i32(&self), i as u64)
}
fn c_u64(&self, i: u64) -> &'ll Value {
Self::c_uint(Type::i64(&self), i)
}
fn c_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 {
// make sure it doesn't overflow
assert!(i < (1<<bit_size));
}
let sc = llvm::LLVMConstStringInContext(cx.llcx,
s.as_ptr() as *const c_char,
s.len() as c_uint,
!null_terminated as Bool);
let sym = cx.generate_local_symbol_name("str");
let g = declare::define_global(cx, &sym[..], val_ty(sc)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
cx.const_cstr_cache.borrow_mut().insert(s, g);
g
Self::c_uint(&self.isize_ty, i)
}
}
// NB: Do not use `do_spill_noroot` to make this into a constant string, or
// you will be kicked off fast isel. See issue #4352 for an example of this.
pub fn C_str_slice(cx: &CodegenCx<'ll, '_>, s: LocalInternedString) -> &'ll Value {
let len = s.len();
let cs = consts::ptrcast(C_cstr(cx, s, false),
cx.layout_of(cx.tcx.mk_str()).llvm_type(cx).ptr_to());
C_fat_ptr(cx, cs, C_usize(cx, len as u64))
}
pub fn C_fat_ptr(cx: &CodegenCx<'ll, '_>, ptr: &'ll Value, meta: &'ll Value) -> &'ll Value {
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
C_struct(cx, &[ptr, meta], false)
}
pub fn C_struct(cx: &CodegenCx<'ll, '_>, elts: &[&'ll Value], packed: bool) -> &'ll Value {
C_struct_in_context(cx.llcx, elts, packed)
}
pub fn C_struct_in_context(
llcx: &'ll llvm::Context,
elts: &[&'ll Value],
packed: bool,
) -> &'ll Value {
unsafe {
llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
fn c_u8(&self, i: u8) -> &'ll Value {
Self::c_uint(Type::i8(&self), i as u64)
}
}
pub fn C_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint);
// This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings.
fn c_cstr(
&self,
s: LocalInternedString,
null_terminated: bool,
) -> &'ll Value {
unsafe {
if let Some(&llval) = &self.const_cstr_cache.borrow().get(&s) {
return llval;
}
let sc = llvm::LLVMConstStringInContext(&self.llcx,
s.as_ptr() as *const c_char,
s.len() as c_uint,
!null_terminated as Bool);
let sym = &self.generate_local_symbol_name("str");
let g = declare::define_global(&self, &sym[..], Self::val_ty(sc)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", sym);
});
llvm::LLVMSetInitializer(g, sc);
llvm::LLVMSetGlobalConstant(g, True);
llvm::LLVMRustSetLinkage(g, llvm::Linkage::InternalLinkage);
&self.const_cstr_cache.borrow_mut().insert(s, g);
g
}
}
}
pub fn C_vector(elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
// NB: Do not use `do_spill_noroot` to make this into a constant string, or
// you will be kicked off fast isel. See issue #4352 for an example of this.
fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value {
let len = s.len();
let cs = consts::ptrcast(&self.c_cstr(s, false),
&self.layout_of(&self.tcx.mk_str()).llvm_type(&self).ptr_to());
&self.c_fat_ptr(cs, &self.c_usize(len as u64))
}
}
pub fn C_bytes(cx: &CodegenCx<'ll, '_>, bytes: &[u8]) -> &'ll Value {
C_bytes_in_context(cx.llcx, bytes)
}
pub fn C_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
fn c_fat_ptr(
&self,
ptr: &'ll Value,
meta: &'ll Value
) -> &'ll Value {
assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1);
&self.c_struct(&[ptr, meta], false)
}
}
pub fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, idx={}, r={:?})",
v, idx, r);
r
fn c_struct(
&self,
elts: &[&'ll Value],
packed: bool
) -> &'ll Value {
Self::c_struct_in_context(&self.llcx, elts, packed)
}
}
pub fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> {
unsafe {
if is_const_real(v) {
let mut loses_info: llvm::Bool = ::std::mem::uninitialized();
let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info);
let loses_info = if loses_info == 1 { true } else { false };
Some((r, loses_info))
} else {
None
fn c_struct_in_context(
llcx: &'a llvm::Context,
elts: &[&'a Value],
packed: bool,
) -> &'a Value {
unsafe {
llvm::LLVMConstStructInContext(llcx,
elts.as_ptr(), elts.len() as c_uint,
packed as Bool)
}
}
fn c_array(ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint);
}
}
fn c_vector(elts: &[&'ll Value]) -> &'ll Value {
unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
}
}
fn c_bytes(&self, bytes: &[u8]) -> &'ll Value {
Self::c_bytes_in_context(&self.llcx, bytes)
}
fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe {
let ptr = bytes.as_ptr() as *const c_char;
return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
}
}
fn const_get_elt(v: &'ll Value, idx: u64) -> &'ll Value {
unsafe {
assert_eq!(idx as c_uint as u64, idx);
let us = &[idx as c_uint];
let r = llvm::LLVMConstExtractValue(v, us.as_ptr(), us.len() as c_uint);
debug!("const_get_elt(v={:?}, idx={}, r={:?})",
v, idx, r);
r
}
}
fn const_get_real(v: &'ll Value) -> Option<(f64, bool)> {
unsafe {
if Self::is_const_real(v) {
let mut loses_info: llvm::Bool = ::std::mem::uninitialized();
let r = llvm::LLVMConstRealGetDouble(v, &mut loses_info);
let loses_info = if loses_info == 1 { true } else { false };
Some((r, loses_info))
} else {
None
}
}
}
fn const_to_uint(v: &'ll Value) -> u64 {
unsafe {
llvm::LLVMConstIntGetZExtValue(v)
}
}
fn is_const_integral(v: &'ll Value) -> bool {
unsafe {
llvm::LLVMIsAConstantInt(v).is_some()
}
}
fn is_const_real(v: &'ll Value) -> bool {
unsafe {
llvm::LLVMIsAConstantFP(v).is_some()
}
}
fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option<u128> {
unsafe {
if Self::is_const_integral(v) {
let (mut lo, mut hi) = (0u64, 0u64);
let success = llvm::LLVMRustConstInt128Get(v, sign_ext,
&mut hi, &mut lo);
if success {
Some(hi_lo_to_u128(lo, hi))
} else {
None
}
} else {
None
}
}
}
}
pub fn const_to_uint(v: &'ll Value) -> u64 {
unsafe {
llvm::LLVMConstIntGetZExtValue(v)
}
}
pub fn is_const_integral(v: &'ll Value) -> bool {
unsafe {
llvm::LLVMIsAConstantInt(v).is_some()
}
}
pub fn is_const_real(v: &'ll Value) -> bool {
unsafe {
llvm::LLVMIsAConstantFP(v).is_some()
}
}
#[inline]
fn hi_lo_to_u128(lo: u64, hi: u64) -> u128 {
((hi as u128) << 64) | (lo as u128)
}
pub fn const_to_opt_u128(v: &'ll Value, sign_ext: bool) -> Option<u128> {
unsafe {
if is_const_integral(v) {
let (mut lo, mut hi) = (0u64, 0u64);
let success = llvm::LLVMRustConstInt128Get(v, sign_ext,
&mut hi, &mut lo);
if success {
Some(hi_lo_to_u128(lo, hi))
} else {
None
}
} else {
None
}
}
}
pub fn langcall(tcx: TyCtxt,
span: Option<Span>,
msg: &str,
@ -456,7 +473,7 @@ pub fn build_unchecked_rshift(
}
fn shift_mask_rhs(bx: &Builder<'a, 'll, 'tcx>, rhs: &'ll Value) -> &'ll Value {
let rhs_llty = val_ty(rhs);
let rhs_llty = CodegenCx::val_ty(rhs);
bx.and(rhs, shift_mask_val(bx, rhs_llty, rhs_llty, false))
}
@ -472,9 +489,9 @@ pub fn shift_mask_val(
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = llty.int_width() - 1;
if invert {
C_int(mask_llty, !val as i64)
CodegenCx::c_int(mask_llty, !val as i64)
} else {
C_uint(mask_llty, val)
CodegenCx::c_uint(mask_llty, val)
}
},
TypeKind::Vector => {

View file

@ -15,7 +15,7 @@ use rustc::hir::Node;
use debuginfo;
use base;
use monomorphize::MonoItem;
use common::{CodegenCx, val_ty};
use common::CodegenCx;
use declare;
use monomorphize::Instance;
use syntax_pos::Span;
@ -24,6 +24,7 @@ use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use rustc::ty::{self, Ty};
use interfaces::CommonMethods;
use rustc::ty::layout::{Align, LayoutOf};
@ -72,13 +73,14 @@ pub fn addr_of_mut(
let gv = match kind {
Some(kind) if !cx.tcx.sess.fewer_names() => {
let name = cx.generate_local_symbol_name(kind);
let gv = declare::define_global(cx, &name[..], val_ty(cv)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
let gv = declare::define_global(cx, &name[..],
CodegenCx::val_ty(cv)).unwrap_or_else(||{
bug!("symbol `{}` is already defined", name);
});
llvm::LLVMRustSetLinkage(gv, llvm::Linkage::PrivateLinkage);
gv
},
_ => declare::define_private_global(cx, val_ty(cv)),
_ => declare::define_private_global(cx, CodegenCx::val_ty(cv)),
};
llvm::LLVMSetInitializer(gv, cv);
set_global_alignment(cx, gv, align);
@ -310,7 +312,7 @@ pub fn codegen_static<'a, 'tcx>(
// boolean SSA values are i1, but they have to be stored in i8 slots,
// otherwise some LLVM optimization passes don't work as expected
let mut val_llty = val_ty(v);
let mut val_llty = CodegenCx::val_ty(v);
let v = if val_llty == Type::i1(cx) {
val_llty = Type::i8(cx);
llvm::LLVMConstZExt(v, val_llty)

View file

@ -12,13 +12,13 @@
use llvm;
use common::{C_bytes, CodegenCx, C_i32};
use common::CodegenCx;
use builder::Builder;
use declare;
use rustc::session::config::DebugInfo;
use type_::Type;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use syntax::attr;
@ -30,7 +30,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx);
// Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global.
let indices = [C_i32(bx.cx, 0), C_i32(bx.cx, 0)];
let indices = [CodegenCx::c_i32(bx.cx, 0), CodegenCx::c_i32(bx.cx, 0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
unsafe {
@ -64,7 +64,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
bug!("symbol `{}` is already defined", section_var_name)
});
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
llvm::LLVMSetInitializer(section_var, C_bytes(cx, section_contents));
llvm::LLVMSetInitializer(section_var, CodegenCx::c_bytes(cx, section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);

View file

@ -18,6 +18,7 @@ use super::namespace::mangled_name_of_instance;
use super::type_names::compute_debuginfo_type_name;
use super::{CrateDebugContext};
use abi;
use interfaces::CommonMethods;
use value::Value;
use llvm;
@ -32,7 +33,7 @@ use rustc::hir::def_id::{DefId, CrateNum, LOCAL_CRATE};
use rustc::ich::NodeIdHashingMode;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc::ty::Instance;
use common::{CodegenCx, C_u64};
use common::CodegenCx;
use rustc::ty::{self, AdtKind, ParamEnv, Ty, TyCtxt};
use rustc::ty::layout::{self, Align, HasDataLayout, Integer, IntegerExt, LayoutOf,
PrimitiveExt, Size, TyLayout};
@ -1810,7 +1811,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
member_description.offset.bits(),
match member_description.discriminant {
None => None,
Some(value) => Some(C_u64(cx, value)),
Some(value) => Some(cx.c_u64(value)),
},
member_description.flags,
member_description.type_metadata))

View file

@ -16,11 +16,12 @@ use std;
use builder::Builder;
use common::*;
use context::CodegenCx;
use meth;
use rustc::ty::layout::LayoutOf;
use rustc::ty::{self, Ty};
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
pub fn size_and_align_of_dst(
bx: &Builder<'_, 'll, 'tcx>,
@ -33,8 +34,8 @@ pub fn size_and_align_of_dst(
let (size, align) = bx.cx.size_and_align_of(t);
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, info, size, align);
let size = C_usize(bx.cx, size.bytes());
let align = C_usize(bx.cx, align.abi());
let size = CodegenCx::c_usize(bx.cx, size.bytes());
let align = CodegenCx::c_usize(bx.cx, align.abi());
return (size, align);
}
match t.sty {
@ -48,8 +49,8 @@ pub fn size_and_align_of_dst(
// The info in this case is the length of the str, so the size is that
// times the unit size.
let (size, align) = bx.cx.size_and_align_of(unit);
(bx.mul(info.unwrap(), C_usize(bx.cx, size.bytes())),
C_usize(bx.cx, align.abi()))
(bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx, size.bytes())),
CodegenCx::c_usize(bx.cx, align.abi()))
}
_ => {
let cx = bx.cx;
@ -65,8 +66,8 @@ pub fn size_and_align_of_dst(
let sized_align = layout.align.abi();
debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align);
let sized_size = C_usize(cx, sized_size);
let sized_align = C_usize(cx, sized_align);
let sized_size = CodegenCx::c_usize(cx, sized_size);
let sized_align = CodegenCx::c_usize(cx, sized_align);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
@ -92,12 +93,12 @@ pub fn size_and_align_of_dst(
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let align = match (const_to_opt_u128(sized_align, false),
const_to_opt_u128(unsized_align, false)) {
let align = match (CodegenCx::const_to_opt_u128(sized_align, false),
CodegenCx::const_to_opt_u128(unsized_align, false)) {
(Some(sized_align), Some(unsized_align)) => {
// If both alignments are constant, (the sized_align should always be), then
// pick the correct alignment statically.
C_usize(cx, std::cmp::max(sized_align, unsized_align) as u64)
CodegenCx::c_usize(cx, std::cmp::max(sized_align, unsized_align) as u64)
}
_ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
sized_align,
@ -115,7 +116,7 @@ pub fn size_and_align_of_dst(
//
// `(size + (align-1)) & -align`
let addend = bx.sub(align, C_usize(bx.cx, 1));
let addend = bx.sub(align, CodegenCx::c_usize(bx.cx, 1));
let size = bx.and(bx.add(size, addend), bx.neg(align));
(size, align)

View file

@ -12,4 +12,5 @@ pub trait Backend {
type Value;
type BasicBlock;
type Type;
type Context;
}

View file

@ -0,0 +1,61 @@
// Copyright 2018 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use super::Backend;
use syntax::symbol::LocalInternedString;
pub trait CommonMethods : Backend {
fn val_ty(v: Self::Value) -> Self::Type;
// Constant constructors
fn c_null(t: Self::Type) -> Self::Value;
fn c_undef(t: Self::Type) -> Self::Value;
fn c_int(t: Self::Type, i: i64) -> Self::Value;
fn c_uint(t: Self::Type, i: u64) -> Self::Value;
fn c_uint_big(t: Self::Type, u: u128) -> Self::Value;
fn c_bool(&self, val: bool) -> Self::Value;
fn c_i32(&self, i: i32) -> Self::Value;
fn c_u32(&self, i: u32) -> Self::Value;
fn c_u64(&self, i: u64) -> Self::Value;
fn c_usize(&self, i: u64) -> Self::Value;
fn c_u8(&self, i: u8) -> Self::Value;
fn c_cstr(
&self,
s: LocalInternedString,
null_terminated: bool,
) -> Self::Value;
fn c_str_slice(&self, s: LocalInternedString) -> Self::Value;
fn c_fat_ptr(
&self,
ptr: Self::Value,
meta: Self::Value
) -> Self::Value;
fn c_struct(
&self,
elts: &[Self::Value],
packed: bool
) -> Self::Value;
fn c_struct_in_context(
llcx: Self::Context,
elts: &[Self::Value],
packed: bool,
) -> Self::Value;
fn c_array(ty: Self::Type, elts: &[Self::Value]) -> Self::Value;
fn c_vector(elts: &[Self::Value]) -> Self::Value;
fn c_bytes(&self, bytes: &[u8]) -> Self::Value;
fn c_bytes_in_context(llcx: Self::Context, bytes: &[u8]) -> Self::Value;
fn const_get_elt(v: Self::Value, idx: u64) -> Self::Value;
fn const_get_real(v: Self::Value) -> Option<(f64, bool)>;
fn const_to_uint(v: Self::Value) -> u64;
fn is_const_integral(v: Self::Value) -> bool;
fn is_const_real(v: Self::Value) -> bool;
fn const_to_opt_u128(v: Self::Value, sign_ext: bool) -> Option<u128>;
}

View file

@ -10,6 +10,8 @@
mod builder;
mod backend;
mod common;
pub use self::builder::BuilderMethods;
pub use self::backend::Backend;
pub use self::common::CommonMethods;

View file

@ -19,6 +19,7 @@ use mir::place::PlaceRef;
use mir::operand::{OperandRef, OperandValue};
use base::*;
use common::*;
use context::CodegenCx;
use declare;
use glue;
use type_::Type;
@ -31,7 +32,7 @@ use syntax::symbol::Symbol;
use builder::Builder;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use rustc::session::Session;
use syntax_pos::Span;
@ -126,11 +127,11 @@ pub fn codegen_intrinsic_call(
},
"likely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
bx.call(expect, &[args[0].immediate(), C_bool(cx, true)], None)
bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, true)], None)
}
"unlikely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1"));
bx.call(expect, &[args[0].immediate(), C_bool(cx, false)], None)
bx.call(expect, &[args[0].immediate(), CodegenCx::c_bool(cx, false)], None)
}
"try" => {
try_intrinsic(bx, cx,
@ -146,7 +147,7 @@ pub fn codegen_intrinsic_call(
}
"size_of" => {
let tp_ty = substs.type_at(0);
C_usize(cx, cx.size_of(tp_ty).bytes())
CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes())
}
"size_of_val" => {
let tp_ty = substs.type_at(0);
@ -155,12 +156,12 @@ pub fn codegen_intrinsic_call(
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize
} else {
C_usize(cx, cx.size_of(tp_ty).bytes())
CodegenCx::c_usize(cx, cx.size_of(tp_ty).bytes())
}
}
"min_align_of" => {
let tp_ty = substs.type_at(0);
C_usize(cx, cx.align_of(tp_ty).abi())
CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi())
}
"min_align_of_val" => {
let tp_ty = substs.type_at(0);
@ -169,20 +170,20 @@ pub fn codegen_intrinsic_call(
glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign
} else {
C_usize(cx, cx.align_of(tp_ty).abi())
CodegenCx::c_usize(cx, cx.align_of(tp_ty).abi())
}
}
"pref_align_of" => {
let tp_ty = substs.type_at(0);
C_usize(cx, cx.align_of(tp_ty).pref())
CodegenCx::c_usize(cx, cx.align_of(tp_ty).pref())
}
"type_name" => {
let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
C_str_slice(cx, ty_name)
CodegenCx::c_str_slice(cx, ty_name)
}
"type_id" => {
C_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
CodegenCx::c_u64(cx, cx.tcx.type_id_hash(substs.type_at(0)))
}
"init" => {
let ty = substs.type_at(0);
@ -191,7 +192,14 @@ pub fn codegen_intrinsic_call(
// If we store a zero constant, LLVM will drown in vreg allocation for large data
// structures, and the generated code will be awful. (A telltale sign of this is
// large quantities of `mov [byte ptr foo],0` in the generated code.)
memset_intrinsic(bx, false, ty, llresult, C_u8(cx, 0), C_usize(cx, 1));
memset_intrinsic(
bx,
false,
ty,
llresult,
CodegenCx::c_u8(cx, 0),
CodegenCx::c_usize(cx, 1)
);
}
return;
}
@ -202,7 +210,7 @@ pub fn codegen_intrinsic_call(
"needs_drop" => {
let tp_ty = substs.type_at(0);
C_bool(cx, bx.cx.type_needs_drop(tp_ty))
CodegenCx::c_bool(cx, bx.cx.type_needs_drop(tp_ty))
}
"offset" => {
let ptr = args[0].immediate();
@ -279,9 +287,9 @@ pub fn codegen_intrinsic_call(
};
bx.call(expect, &[
args[0].immediate(),
C_i32(cx, rw),
CodegenCx::c_i32(cx, rw),
args[1].immediate(),
C_i32(cx, cache_type)
CodegenCx::c_i32(cx, cache_type)
], None)
},
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
@ -294,12 +302,12 @@ pub fn codegen_intrinsic_call(
Some((width, signed)) =>
match name {
"ctlz" | "cttz" => {
let y = C_bool(bx.cx, false);
let y = CodegenCx::c_bool(bx.cx, false);
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
bx.call(llfn, &[args[0].immediate(), y], None)
}
"ctlz_nonzero" | "cttz_nonzero" => {
let y = C_bool(bx.cx, true);
let y = CodegenCx::c_bool(bx.cx, true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
let llfn = cx.get_intrinsic(llvm_name);
bx.call(llfn, &[args[0].immediate(), y], None)
@ -380,7 +388,7 @@ pub fn codegen_intrinsic_call(
} else {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let width = C_uint(Type::ix(cx, width), width);
let width = CodegenCx::c_uint(Type::ix(cx, width), width);
let shift = bx.urem(raw_shift, width);
let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
@ -717,7 +725,7 @@ fn copy_intrinsic(
) -> &'ll Value {
let cx = bx.cx;
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
let size = CodegenCx::c_usize(cx, size.bytes());
let align = align.abi();
let dst_ptr = bx.pointercast(dst, Type::i8p(cx));
let src_ptr = bx.pointercast(src, Type::i8p(cx));
@ -738,8 +746,8 @@ fn memset_intrinsic(
) -> &'ll Value {
let cx = bx.cx;
let (size, align) = cx.size_and_align_of(ty);
let size = C_usize(cx, size.bytes());
let align = C_i32(cx, align.abi() as i32);
let size = CodegenCx::c_usize(cx, size.bytes());
let align = CodegenCx::c_i32(cx, align.abi() as i32);
let dst = bx.pointercast(dst, Type::i8p(cx));
call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
}
@ -755,7 +763,7 @@ fn try_intrinsic(
if bx.sess().no_landing_pads() {
bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(C_null(Type::i8p(&bx.cx)), dest, ptr_align);
bx.store(CodegenCx::c_null(Type::i8p(&bx.cx)), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else {
@ -836,7 +844,7 @@ fn codegen_msvc_try(
let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(C_i32(cx, 0));
normal.ret(CodegenCx::c_i32(cx, 0));
let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb());
@ -846,19 +854,19 @@ fn codegen_msvc_try(
Some(did) => ::consts::get_static(cx, did),
None => bug!("msvc_try_filter not defined"),
};
let tok = catchpad.catch_pad(cs, &[tydesc, C_i32(cx, 0), slot]);
let tok = catchpad.catch_pad(cs, &[tydesc, CodegenCx::c_i32(cx, 0), slot]);
let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align);
let val1 = C_i32(cx, 1);
let val1 = CodegenCx::c_i32(cx, 1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb());
caught.ret(C_i32(cx, 1));
caught.ret(CodegenCx::c_i32(cx, 1));
});
// Note that no invoke is used here because by definition this function
@ -914,7 +922,7 @@ fn codegen_gnu_try(
let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2);
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
then.ret(C_i32(cx, 0));
then.ret(CodegenCx::c_i32(cx, 0));
// Type indicator for the exception being thrown.
//
@ -924,11 +932,11 @@ fn codegen_gnu_try(
// rust_try ignores the selector.
let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
catch.add_clause(vals, C_null(Type::i8p(cx)));
catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx)));
let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, Type::i8p(cx).ptr_to()), ptr_align);
catch.ret(C_i32(cx, 1));
catch.ret(CodegenCx::c_i32(cx, 1));
});
// Note that no invoke is used here because by definition this function
@ -1106,8 +1114,8 @@ fn generic_simd_intrinsic(
let indices: Option<Vec<_>> = (0..n)
.map(|i| {
let arg_idx = i;
let val = const_get_elt(vector, i as u64);
match const_to_opt_u128(val, true) {
let val = CodegenCx::const_get_elt(vector, i as u64);
match CodegenCx::const_to_opt_u128(val, true) {
None => {
emit_error!("shuffle index #{} is not a constant", arg_idx);
None
@ -1117,18 +1125,18 @@ fn generic_simd_intrinsic(
arg_idx, total_len);
None
}
Some(idx) => Some(C_i32(bx.cx, idx as i32)),
Some(idx) => Some(CodegenCx::c_i32(bx.cx, idx as i32)),
}
})
.collect();
let indices = match indices {
Some(i) => i,
None => return Ok(C_null(llret_ty))
None => return Ok(CodegenCx::c_null(llret_ty))
};
return Ok(bx.shuffle_vector(args[0].immediate(),
args[1].immediate(),
C_vector(&indices)))
CodegenCx::c_vector(&indices)))
}
if name == "simd_insert" {
@ -1379,7 +1387,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = Type::i32(bx.cx);
let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
@ -1479,7 +1487,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value:
let alignment_ty = Type::i32(bx.cx);
let alignment = C_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = {
@ -1541,7 +1549,7 @@ fn generic_simd_intrinsic(
// code is generated
// * if the accumulator of the fmul isn't 1, incorrect
// code is generated
match const_get_real(acc) {
match CodegenCx::const_get_real(acc) {
None => return_error!("accumulator of {} is not a constant", $name),
Some((v, loses_info)) => {
if $name.contains("mul") && v != 1.0_f64 {
@ -1557,8 +1565,8 @@ fn generic_simd_intrinsic(
} else {
// unordered arithmetic reductions do not:
match f.bit_width() {
32 => C_undef(Type::f32(bx.cx)),
64 => C_undef(Type::f64(bx.cx)),
32 => CodegenCx::c_undef(Type::f32(bx.cx)),
64 => CodegenCx::c_undef(Type::f64(bx.cx)),
v => {
return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,

View file

@ -215,7 +215,7 @@ pub enum TypeKind {
Double = 3,
X86_FP80 = 4,
FP128 = 5,
PPC_FP128 = 6,
PPc_FP128 = 6,
Label = 7,
Integer = 8,
Function = 9,

View file

@ -10,14 +10,14 @@
use abi::{FnType, FnTypeExt};
use callee;
use common::*;
use context::CodegenCx;
use builder::Builder;
use consts;
use monomorphize;
use type_::Type;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use rustc::ty::{self, Ty};
use rustc::ty::layout::HasDataLayout;
@ -43,7 +43,10 @@ impl<'a, 'tcx> VirtualIndex {
let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx).ptr_to());
let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), ptr_align);
let ptr = bx.load(
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
ptr_align
);
bx.nonnull_metadata(ptr);
// Vtable loads are invariant
bx.set_invariant_load(ptr);
@ -60,7 +63,10 @@ impl<'a, 'tcx> VirtualIndex {
let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
let usize_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load(bx.inbounds_gep(llvtable, &[C_usize(bx.cx, self.0)]), usize_align);
let ptr = bx.load(
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
usize_align
);
// Vtable loads are invariant
bx.set_invariant_load(ptr);
ptr
@ -90,7 +96,7 @@ pub fn get_vtable(
}
// Not in the cache. Build it.
let nullptr = C_null(Type::i8p(cx));
let nullptr = CodegenCx::c_null(Type::i8p(cx));
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
let methods = methods.iter().cloned().map(|opt_mth| {
@ -106,11 +112,11 @@ pub fn get_vtable(
// /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [
callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)),
C_usize(cx, size.bytes()),
C_usize(cx, align.abi())
CodegenCx::c_usize(cx, size.bytes()),
CodegenCx::c_usize(cx, align.abi())
].iter().cloned().chain(methods).collect();
let vtable_const = C_struct(cx, &components, false);
let vtable_const = CodegenCx::c_struct(cx, &components, false);
let align = cx.data_layout().pointer_align;
let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable"));

View file

@ -18,7 +18,8 @@ use abi::{Abi, ArgType, ArgTypeExt, FnType, FnTypeExt, LlvmType, PassMode};
use base;
use callee;
use builder::{Builder, MemFlags};
use common::{self, C_bool, C_str_slice, C_struct, C_u32, C_uint_big, C_undef, IntPredicate};
use common::{self, IntPredicate};
use context::CodegenCx;
use consts;
use meth;
use monomorphize;
@ -26,7 +27,7 @@ use type_of::LayoutLlvmExt;
use type_::Type;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use syntax::symbol::Symbol;
use syntax_pos::Pos;
@ -171,7 +172,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
slot.storage_dead(&bx);
if !bx.sess().target.target.options.custom_unwind_resume {
let mut lp = C_undef(self.landing_pad_type());
let mut lp = CodegenCx::c_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0);
lp = bx.insert_value(lp, lp1, 1);
bx.resume(lp);
@ -209,7 +210,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
}
} else {
let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
let llval = C_uint_big(switch_llty, values[0]);
let llval = CodegenCx::c_uint_big(switch_llty, values[0]);
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
bx.cond_br(cmp, lltrue, llfalse);
}
@ -220,7 +221,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
values.len());
let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
for (&value, target) in values.iter().zip(targets) {
let llval = C_uint_big(switch_llty, value);
let llval = CodegenCx::c_uint_big(switch_llty, value);
let llbb = llblock(self, *target);
bx.add_case(switch, llval, llbb)
}
@ -323,7 +324,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
let cond = self.codegen_operand(&bx, cond).immediate();
let mut const_cond = common::const_to_opt_u128(cond, false).map(|c| c == 1);
let mut const_cond = CodegenCx::const_to_opt_u128(cond, false).map(|c| c == 1);
// This case can currently arise only from functions marked
// with #[rustc_inherit_overflow_checks] and inlined from
@ -346,7 +347,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Pass the condition through llvm.expect for branch hinting.
let expect = bx.cx.get_intrinsic(&"llvm.expect.i1");
let cond = bx.call(expect, &[cond, C_bool(bx.cx, expected)], None);
let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx, expected)], None);
// Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target);
@ -364,9 +365,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Get the location information.
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = C_str_slice(bx.cx, filename);
let line = C_u32(bx.cx, loc.line as u32);
let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1);
let filename = CodegenCx::c_str_slice(bx.cx, filename);
let line = CodegenCx::c_u32(bx.cx, loc.line as u32);
let col = CodegenCx::c_u32(bx.cx, loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
@ -377,7 +378,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate();
let file_line_col = C_struct(bx.cx, &[filename, line, col], false);
let file_line_col = CodegenCx::c_struct(bx.cx,
&[filename, line, col], false);
let file_line_col = consts::addr_of(bx.cx,
file_line_col,
align,
@ -388,10 +390,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
_ => {
let str = msg.description();
let msg_str = Symbol::intern(str).as_str();
let msg_str = C_str_slice(bx.cx, msg_str);
let msg_file_line_col = C_struct(bx.cx,
&[msg_str, filename, line, col],
false);
let msg_str = CodegenCx::c_str_slice(bx.cx, msg_str);
let msg_file_line_col = CodegenCx::c_struct(
bx.cx,
&[msg_str, filename, line, col],
false
);
let msg_file_line_col = consts::addr_of(bx.cx,
msg_file_line_col,
align,
@ -497,9 +501,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
{
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = C_str_slice(bx.cx, filename);
let line = C_u32(bx.cx, loc.line as u32);
let col = C_u32(bx.cx, loc.col.to_usize() as u32 + 1);
let filename = bx.cx.c_str_slice(filename);
let line = bx.cx.c_u32(loc.line as u32);
let col = bx.cx.c_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align);
@ -510,10 +514,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
);
let msg_str = Symbol::intern(&str).as_str();
let msg_str = C_str_slice(bx.cx, msg_str);
let msg_file_line_col = C_struct(bx.cx,
&[msg_str, filename, line, col],
false);
let msg_str = bx.cx.c_str_slice(msg_str);
let msg_file_line_col = bx.cx.c_struct(
&[msg_str, filename, line, col],
false,
);
let msg_file_line_col = consts::addr_of(bx.cx,
msg_file_line_col,
align,
@ -558,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => {
C_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
}
ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => dst.llval,
@ -739,7 +744,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
arg: &ArgType<'tcx, Ty<'tcx>>) {
// Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad {
llargs.push(C_undef(ty.llvm_type(bx.cx)));
llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx)));
}
if arg.is_ignore() {

View file

@ -19,14 +19,13 @@ use rustc::ty::{self, Ty};
use rustc::ty::layout::{self, HasDataLayout, LayoutOf, Size};
use builder::Builder;
use common::{CodegenCx};
use common::{C_bytes, C_struct, C_uint_big, C_undef, C_usize};
use consts;
use type_of::LayoutLlvmExt;
use type_::Type;
use syntax::ast::Mutability;
use syntax::source_map::Span;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use super::super::callee;
use super::FunctionCx;
@ -41,11 +40,11 @@ pub fn scalar_to_llvm(
match cv {
Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes());
C_undef(Type::ix(cx, 0))
CodegenCx::c_undef(Type::ix(cx, 0))
},
Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes());
let llval = C_uint_big(Type::ix(cx, bitsize), bits);
let llval = CodegenCx::c_uint_big(Type::ix(cx, bitsize), bits);
if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else {
@ -74,7 +73,7 @@ pub fn scalar_to_llvm(
};
let llval = unsafe { llvm::LLVMConstInBoundsGEP(
consts::bitcast(base_addr, Type::i8p(cx)),
&C_usize(cx, ptr.offset.bytes()),
&CodegenCx::c_usize(cx, ptr.offset.bytes()),
1,
) };
if layout.value != layout::Pointer {
@ -97,7 +96,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
assert_eq!(offset as usize as u64, offset);
let offset = offset as usize;
if offset > next_offset {
llvals.push(C_bytes(cx, &alloc.bytes[next_offset..offset]));
llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset..offset]));
}
let ptr_offset = read_target_uint(
dl.endian,
@ -115,10 +114,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
next_offset = offset + pointer_size;
}
if alloc.bytes.len() >= next_offset {
llvals.push(C_bytes(cx, &alloc.bytes[next_offset ..]));
llvals.push(CodegenCx::c_bytes(cx, &alloc.bytes[next_offset ..]));
}
C_struct(cx, &llvals, true)
CodegenCx::c_struct(cx, &llvals, true)
}
pub fn codegen_static_initializer(
@ -208,7 +207,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bug!("simd shuffle field {:?}", field)
}
}).collect();
let llval = C_struct(bx.cx, &values?, false);
let llval = CodegenCx::c_struct(bx.cx, &values?, false);
Ok((llval, c.ty))
})
.unwrap_or_else(|_| {
@ -219,7 +218,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// We've errored, so we don't have to produce working code.
let ty = self.monomorphize(&ty);
let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
(C_undef(llty), ty)
(CodegenCx::c_undef(llty), ty)
})
}
}

View file

@ -8,7 +8,6 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use common::{C_i32, C_null};
use libc::c_uint;
use llvm::{self, BasicBlock};
use llvm::debuginfo::DIScope;
@ -26,7 +25,7 @@ use monomorphize::Instance;
use abi::{ArgTypeExt, FnType, FnTypeExt, PassMode};
use type_::Type;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use syntax_pos::{DUMMY_SP, NO_EXPANSION, BytePos, Span};
use syntax::symbol::keywords;
@ -421,8 +420,8 @@ fn create_funclets(
// C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block.
let null = C_null(Type::i8p(bx.cx));
let sixty_four = C_i32(bx.cx, 64);
let null = CodegenCx::c_null(Type::i8p(bx.cx));
let sixty_four = CodegenCx::c_i32(bx.cx, 64);
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
cp_bx.br(llbb);
}

View file

@ -14,14 +14,14 @@ use rustc::ty;
use rustc::ty::layout::{self, Align, LayoutOf, TyLayout};
use base;
use common::{CodegenCx, C_undef, C_usize};
use common::CodegenCx;
use builder::{Builder, MemFlags};
use value::Value;
use type_of::LayoutLlvmExt;
use type_::Type;
use glue;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use std::fmt;
@ -73,7 +73,7 @@ impl OperandRef<'tcx, &'ll Value> {
layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> {
assert!(layout.is_zst());
OperandRef {
val: OperandValue::Immediate(C_undef(layout.immediate_llvm_type(cx))),
val: OperandValue::Immediate(CodegenCx::c_undef(layout.immediate_llvm_type(cx))),
layout
}
}
@ -167,7 +167,7 @@ impl OperandRef<'tcx, &'ll Value> {
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
self, llty);
// Reconstruct the immediate aggregate.
let mut llpair = C_undef(llty);
let mut llpair = CodegenCx::c_undef(llty);
llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
llpair
@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> {
// `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
OperandValue::Immediate(
bx.extract_element(llval, C_usize(bx.cx, i as u64)))
bx.extract_element(llval, CodegenCx::c_usize(bx.cx, i as u64)))
}
_ => bug!("OperandRef::extract_field({:?}): not applicable", self)
@ -463,7 +463,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// We've errored, so we don't have to produce working code.
let layout = bx.cx.layout_of(ty);
PlaceRef::new_sized(
C_undef(layout.llvm_type(bx.cx).ptr_to()),
CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()),
layout,
layout.align,
).load(bx)

View file

@ -15,7 +15,7 @@ use rustc::mir;
use rustc::mir::tcx::PlaceTy;
use base;
use builder::Builder;
use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big, IntPredicate};
use common::{CodegenCx, IntPredicate};
use consts;
use type_of::LayoutLlvmExt;
use type_::Type;
@ -23,7 +23,7 @@ use value::Value;
use glue;
use mir::constant::const_alloc_to_llvm;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
@ -69,7 +69,7 @@ impl PlaceRef<'tcx, &'ll Value> {
let llval = unsafe { LLVMConstInBoundsGEP(
consts::bitcast(base_addr, Type::i8p(bx.cx)),
&C_usize(bx.cx, offset.bytes()),
&CodegenCx::c_usize(bx.cx, offset.bytes()),
1,
)};
let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
@ -103,7 +103,7 @@ impl PlaceRef<'tcx, &'ll Value> {
assert_eq!(count, 0);
self.llextra.unwrap()
} else {
C_usize(cx, count)
CodegenCx::c_usize(cx, count)
}
} else {
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
@ -248,7 +248,7 @@ impl PlaceRef<'tcx, &'ll Value> {
let meta = self.llextra;
let unaligned_offset = C_usize(cx, offset.bytes());
let unaligned_offset = CodegenCx::c_usize(cx, offset.bytes());
// Get the alignment of the field
let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
@ -259,7 +259,7 @@ impl PlaceRef<'tcx, &'ll Value> {
// (unaligned offset + (align - 1)) & -align
// Calculate offset
let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64));
let align_sub_1 = bx.sub(unsized_align, CodegenCx::c_usize(cx, 1u64));
let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
bx.neg(unsized_align));
@ -289,14 +289,14 @@ impl PlaceRef<'tcx, &'ll Value> {
) -> &'ll Value {
let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
if self.layout.abi.is_uninhabited() {
return C_undef(cast_to);
return CodegenCx::c_undef(cast_to);
}
match self.layout.variants {
layout::Variants::Single { index } => {
let discr_val = self.layout.ty.ty_adt_def().map_or(
index.as_u32() as u128,
|def| def.discriminant_for_variant(bx.cx.tcx, index).val);
return C_uint_big(cast_to, discr_val);
return CodegenCx::c_uint_big(cast_to, discr_val);
}
layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {},
@ -327,22 +327,23 @@ impl PlaceRef<'tcx, &'ll Value> {
if niche_variants.start() == niche_variants.end() {
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_start == 0 {
// HACK(eddyb) Using `C_null` as it works on all types.
C_null(niche_llty)
// HACK(eddyb) Using `c_null` as it works on all types.
CodegenCx::c_null(niche_llty)
} else {
C_uint_big(niche_llty, niche_start)
CodegenCx::c_uint_big(niche_llty, niche_start)
};
bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
C_uint(cast_to, niche_variants.start().as_u32() as u64),
C_uint(cast_to, dataful_variant.as_u32() as u64))
CodegenCx::c_uint(cast_to, niche_variants.start().as_u32() as u64),
CodegenCx::c_uint(cast_to, dataful_variant.as_u32() as u64))
} else {
// Rebase from niche values to discriminant values.
let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta));
let lldiscr_max = C_uint(niche_llty, niche_variants.end().as_u32() as u64);
let lldiscr = bx.sub(lldiscr, CodegenCx::c_uint_big(niche_llty, delta));
let lldiscr_max =
CodegenCx::c_uint(niche_llty, niche_variants.end().as_u32() as u64);
bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
bx.intcast(lldiscr, cast_to, false),
C_uint(cast_to, dataful_variant.as_u32() as u64))
CodegenCx::c_uint(cast_to, dataful_variant.as_u32() as u64))
}
}
}
@ -364,7 +365,7 @@ impl PlaceRef<'tcx, &'ll Value> {
.discriminant_for_variant(bx.tcx(), variant_index)
.val;
bx.store(
C_uint_big(ptr.layout.llvm_type(bx.cx), to),
CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx), to),
ptr.llval,
ptr.align);
}
@ -380,10 +381,10 @@ impl PlaceRef<'tcx, &'ll Value> {
// Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value.
let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
let fill_byte = C_u8(bx.cx, 0);
let fill_byte = CodegenCx::c_u8(bx.cx, 0);
let (size, align) = self.layout.size_and_align();
let size = C_usize(bx.cx, size.bytes());
let align = C_u32(bx.cx, align.abi() as u32);
let size = CodegenCx::c_usize(bx.cx, size.bytes());
let align = CodegenCx::c_u32(bx.cx, align.abi() as u32);
base::call_memset(bx, llptr, fill_byte, size, align, false);
}
@ -394,10 +395,10 @@ impl PlaceRef<'tcx, &'ll Value> {
.wrapping_add(niche_start);
// FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_value == 0 {
// HACK(eddyb) Using `C_null` as it works on all types.
C_null(niche_llty)
// HACK(eddyb) Using `c_null` as it works on all types.
CodegenCx::c_null(niche_llty)
} else {
C_uint_big(niche_llty, niche_value)
CodegenCx::c_uint_big(niche_llty, niche_value)
};
OperandValue::Immediate(niche_llval).store(bx, niche);
}
@ -408,7 +409,7 @@ impl PlaceRef<'tcx, &'ll Value> {
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'tcx, &'ll Value> {
PlaceRef {
llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx, 0), llindex]),
llextra: None,
layout: self.layout.field(bx.cx, 0),
align: self.align
@ -483,7 +484,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// so we generate an abort
let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None);
let llval = C_undef(layout.llvm_type(bx.cx).ptr_to());
let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to());
PlaceRef::new_sized(llval, layout, layout.align)
}
}
@ -516,20 +517,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::ProjectionElem::ConstantIndex { offset,
from_end: false,
min_length: _ } => {
let lloffset = C_usize(bx.cx, offset as u64);
let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
cg_base.project_index(bx, lloffset)
}
mir::ProjectionElem::ConstantIndex { offset,
from_end: true,
min_length: _ } => {
let lloffset = C_usize(bx.cx, offset as u64);
let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
let lllen = cg_base.len(bx.cx);
let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex)
}
mir::ProjectionElem::Subslice { from, to } => {
let mut subslice = cg_base.project_index(bx,
C_usize(bx.cx, from as u64));
CodegenCx::c_usize(bx.cx, from as u64));
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem)
.to_ty(bx.tcx());
@ -537,7 +538,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if subslice.layout.is_unsized() {
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
C_usize(bx.cx, (from as u64) + (to as u64))));
CodegenCx::c_usize(bx.cx, (from as u64) + (to as u64))));
}
// Cast the place pointer type to the new

View file

@ -19,18 +19,15 @@ use std::{u128, i128};
use base;
use builder::Builder;
use callee;
use common::{self, val_ty};
use common::{
C_bool, C_u8, C_i32, C_u32, C_u64, C_undef, C_null, C_usize,
C_uint, C_uint_big, IntPredicate, RealPredicate
};
use common::{self, IntPredicate, RealPredicate};
use context::CodegenCx;
use consts;
use monomorphize;
use type_::Type;
use type_of::LayoutLlvmExt;
use value::Value;
use interfaces::BuilderMethods;
use interfaces::{BuilderMethods, CommonMethods};
use super::{FunctionCx, LocalRef};
use super::operand::{OperandRef, OperandValue};
@ -106,28 +103,28 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
return bx;
}
let start = dest.project_index(&bx, C_usize(bx.cx, 0)).llval;
let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx, 0)).llval;
if let OperandValue::Immediate(v) = cg_elem.val {
let align = C_i32(bx.cx, dest.align.abi() as i32);
let size = C_usize(bx.cx, dest.layout.size.bytes());
let align = CodegenCx::c_i32(bx.cx, dest.align.abi() as i32);
let size = CodegenCx::c_usize(bx.cx, dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays
if common::is_const_integral(v) && common::const_to_uint(v) == 0 {
let fill = C_u8(bx.cx, 0);
if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 {
let fill = CodegenCx::c_u8(bx.cx, 0);
base::call_memset(&bx, start, fill, size, align, false);
return bx;
}
// Use llvm.memset.p0i8.* to initialize byte arrays
let v = base::from_immediate(&bx, v);
if common::val_ty(v) == Type::i8(bx.cx) {
if CodegenCx::val_ty(v) == Type::i8(bx.cx) {
base::call_memset(&bx, start, v, size, align, false);
return bx;
}
}
let count = C_usize(bx.cx, count);
let count = CodegenCx::c_usize(bx.cx, count);
let end = dest.project_index(&bx, count).llval;
let header_bx = bx.build_sibling_block("repeat_loop_header");
@ -135,7 +132,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let next_bx = bx.build_sibling_block("repeat_loop_next");
bx.br(header_bx.llbb());
let current = header_bx.phi(common::val_ty(start), &[start], &[bx.llbb()]);
let current = header_bx.phi(CodegenCx::val_ty(start), &[start], &[bx.llbb()]);
let keep_going = header_bx.icmp(IntPredicate::IntNE, current, end);
header_bx.cond_br(keep_going, body_bx.llbb(), next_bx.llbb());
@ -143,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
cg_elem.val.store(&body_bx,
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
let next = body_bx.inbounds_gep(current, &[C_usize(bx.cx, 1)]);
let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx, 1)]);
body_bx.br(header_bx.llbb());
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
@ -296,7 +293,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let ll_t_out = cast.immediate_llvm_type(bx.cx);
if operand.layout.abi.is_uninhabited() {
return (bx, OperandRef {
val: OperandValue::Immediate(C_undef(ll_t_out)),
val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)),
layout: cast,
});
}
@ -310,7 +307,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let discr_val = def
.discriminant_for_variant(bx.cx.tcx, index)
.val;
let discr = C_uint_big(ll_t_out, discr_val);
let discr = CodegenCx::c_uint_big(ll_t_out, discr_val);
return (bx, OperandRef {
val: OperandValue::Immediate(discr),
layout: cast,
@ -341,7 +338,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
base::call_assume(&bx, bx.icmp(
IntPredicate::IntULE,
llval,
C_uint_big(ll_t_in, *scalar.valid_range.end())
CodegenCx::c_uint_big(ll_t_in, *scalar.valid_range.end())
));
}
}
@ -492,7 +489,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
assert!(bx.cx.type_is_sized(ty));
let val = C_usize(bx.cx, bx.cx.size_of(ty).bytes());
let val = CodegenCx::c_usize(bx.cx, bx.cx.size_of(ty).bytes());
let tcx = bx.tcx();
(bx, OperandRef {
val: OperandValue::Immediate(val),
@ -503,8 +500,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let (size, align) = bx.cx.size_and_align_of(content_ty);
let llsize = C_usize(bx.cx, size.bytes());
let llalign = C_usize(bx.cx, align.abi());
let llsize = CodegenCx::c_usize(bx.cx, size.bytes());
let llalign = CodegenCx::c_usize(bx.cx, align.abi());
let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = box_layout.llvm_type(bx.cx);
@ -551,7 +548,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx.tcx);
return common::C_usize(bx.cx, n);
return CodegenCx::c_usize(bx.cx, n);
}
}
}
@ -609,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
C_bool(bx.cx, match op {
CodegenCx::c_bool(bx.cx, match op {
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
_ => unreachable!()
@ -688,7 +685,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// while the current crate doesn't use overflow checks.
if !bx.cx.check_overflow {
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
return OperandValue::Pair(val, C_bool(bx.cx, false));
return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx, false));
}
let (val, of) = match op {
@ -707,12 +704,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bx.extract_value(res, 1))
}
mir::BinOp::Shl | mir::BinOp::Shr => {
let lhs_llty = val_ty(lhs);
let rhs_llty = val_ty(rhs);
let lhs_llty = CodegenCx::val_ty(lhs);
let rhs_llty = CodegenCx::val_ty(rhs);
let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
let outer_bits = bx.and(rhs, invert_mask);
let of = bx.icmp(IntPredicate::IntNE, outer_bits, C_null(rhs_llty));
let of = bx.icmp(IntPredicate::IntNE, outer_bits, CodegenCx::c_null(rhs_llty));
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
(val, of)
@ -839,9 +836,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
use rustc_apfloat::Float;
const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
<< (Single::MAX_EXP - Single::PRECISION as i16);
let max = C_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
let infinity_bits = C_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
let infinity_bits = CodegenCx::c_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
let infinity = consts::bitcast(infinity_bits, float_ty);
bx.select(overflow, infinity, bx.uitofp(x, float_ty))
} else {
@ -910,8 +907,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
}
let float_bits_to_llval = |bits| {
let bits_llval = match float_ty.float_width() {
32 => C_u32(bx.cx, bits as u32),
64 => C_u64(bx.cx, bits as u64),
32 => CodegenCx::c_u32(bx.cx, bits as u32),
64 => CodegenCx::c_u64(bx.cx, bits as u64),
n => bug!("unsupported float width {}", n),
};
consts::bitcast(bits_llval, float_ty)
@ -966,8 +963,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
// performed is ultimately up to the backend, but at least x86 does perform them.
let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
let int_max = C_uint_big(int_ty, int_max(signed, int_ty));
let int_min = C_uint_big(int_ty, int_min(signed, int_ty) as u128);
let int_max = CodegenCx::c_uint_big(int_ty, int_max(signed, int_ty));
let int_min = CodegenCx::c_uint_big(int_ty, int_min(signed, int_ty) as u128);
let s0 = bx.select(less_or_nan, int_min, fptosui_result);
let s1 = bx.select(greater, int_max, s0);
@ -976,7 +973,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
// Therefore we only need to execute this step for signed integer types.
if signed {
// LLVM has no isNaN predicate, so we use (x == x) instead
bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, C_uint(int_ty, 0))
bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, CodegenCx::c_uint(int_ty, 0))
} else {
s1
}

View file

@ -324,7 +324,7 @@ impl Type {
TypeKind::Float => 32,
TypeKind::Double => 64,
TypeKind::X86_FP80 => 80,
TypeKind::FP128 | TypeKind::PPC_FP128 => 128,
TypeKind::FP128 | TypeKind::PPc_FP128 => 128,
_ => bug!("llvm_float_width called on a non-float type")
}
}