1
Fork 0

Prefixed const methods with "const" instead of "c"

This commit is contained in:
Denis Merigoux 2018-09-06 11:57:42 -07:00 committed by Eduard-Mihai Burtescu
parent 730b13ab51
commit 6d42574b7a
19 changed files with 212 additions and 212 deletions

View file

@ -244,7 +244,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
self.layout.align, self.layout.align,
bx.pointercast(llscratch, cx.i8p()), bx.pointercast(llscratch, cx.i8p()),
scratch_align, scratch_align,
cx.c_usize(self.layout.size.bytes()), cx.const_usize(self.layout.size.bytes()),
MemFlags::empty()); MemFlags::empty());
bx.lifetime_end(llscratch, scratch_size); bx.lifetime_end(llscratch, scratch_size);

View file

@ -110,7 +110,7 @@ pub fn codegen_inline_asm(
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx, let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
key.as_ptr() as *const c_char, key.len() as c_uint); key.as_ptr() as *const c_char, key.len() as c_uint);
let val: &'ll Value = bx.cx().c_i32(ia.ctxt.outer().as_u32() as i32); let val: &'ll Value = bx.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
llvm::LLVMSetMetadata(r, kind, llvm::LLVMSetMetadata(r, kind,
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1)); llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));

View file

@ -442,17 +442,17 @@ impl CommonWriteMethods for CodegenContext<'ll> {
common::val_ty(v) common::val_ty(v)
} }
fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
common::c_bytes_in_context(llcx, bytes) common::const_bytes_in_context(llcx, bytes)
} }
fn c_struct_in_context( fn const_struct_in_context(
&self, &self,
llcx: &'a llvm::Context, llcx: &'a llvm::Context,
elts: &[&'a Value], elts: &[&'a Value],
packed: bool, packed: bool,
) -> &'a Value { ) -> &'a Value {
common::c_struct_in_context(llcx, elts, packed) common::const_struct_in_context(llcx, elts, packed)
} }
} }
@ -926,7 +926,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext,
llcx: &llvm::Context, llcx: &llvm::Context,
llmod: &llvm::Module, llmod: &llvm::Module,
bitcode: Option<&[u8]>) { bitcode: Option<&[u8]>) {
let llconst = cgcx.c_bytes_in_context(llcx, bitcode.unwrap_or(&[])); let llconst = cgcx.const_bytes_in_context(llcx, bitcode.unwrap_or(&[]));
let llglobal = llvm::LLVMAddGlobal( let llglobal = llvm::LLVMAddGlobal(
llmod, llmod,
cgcx.val_ty(llconst), cgcx.val_ty(llconst),
@ -946,7 +946,7 @@ unsafe fn embed_bitcode(cgcx: &CodegenContext,
llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage); llvm::LLVMRustSetLinkage(llglobal, llvm::Linkage::PrivateLinkage);
llvm::LLVMSetGlobalConstant(llglobal, llvm::True); llvm::LLVMSetGlobalConstant(llglobal, llvm::True);
let llconst = cgcx.c_bytes_in_context(llcx, &[]); let llconst = cgcx.const_bytes_in_context(llcx, &[]);
let llglobal = llvm::LLVMAddGlobal( let llglobal = llvm::LLVMAddGlobal(
llmod, llmod,
cgcx.val_ty(llconst), cgcx.val_ty(llconst),

View file

@ -198,7 +198,7 @@ pub fn unsized_info(
let (source, target) = cx.tcx.struct_lockstep_tails(source, target); let (source, target) = cx.tcx.struct_lockstep_tails(source, target);
match (&source.sty, &target.sty) { match (&source.sty, &target.sty) {
(&ty::Array(_, len), &ty::Slice(_)) => { (&ty::Array(_, len), &ty::Slice(_)) => {
cx.c_usize(len.unwrap_usize(cx.tcx)) cx.const_usize(len.unwrap_usize(cx.tcx))
} }
(&ty::Dynamic(..), &ty::Dynamic(..)) => { (&ty::Dynamic(..), &ty::Dynamic(..)) => {
// For now, upcasts are limited to changes in marker // For now, upcasts are limited to changes in marker
@ -460,7 +460,7 @@ pub fn memcpy_ty<'a, 'll: 'a, 'tcx: 'll>(
return; return;
} }
call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().c_usize(size), flags); call_memcpy(bx, dst, dst_align, src, src_align, bx.cx().const_usize(size), flags);
} }
pub fn call_memset( pub fn call_memset(
@ -474,7 +474,7 @@ pub fn call_memset(
let ptr_width = &bx.cx().sess().target.target.target_pointer_width; let ptr_width = &bx.cx().sess().target.target.target_pointer_width;
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width); let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key); let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
let volatile = bx.cx().c_bool(volatile); let volatile = bx.cx().const_bool(volatile);
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None) bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
} }
@ -649,8 +649,8 @@ fn write_metadata<'a, 'gcx>(tcx: TyCtxt<'a, 'gcx, 'gcx>,
DeflateEncoder::new(&mut compressed, Compression::fast()) DeflateEncoder::new(&mut compressed, Compression::fast())
.write_all(&metadata.raw_data).unwrap(); .write_all(&metadata.raw_data).unwrap();
let llmeta = llvm_module.c_bytes_in_context(metadata_llcx, &compressed); let llmeta = llvm_module.const_bytes_in_context(metadata_llcx, &compressed);
let llconst = llvm_module.c_struct_in_context(metadata_llcx, &[llmeta], false); let llconst = llvm_module.const_struct_in_context(metadata_llcx, &[llmeta], false);
let name = exported_symbols::metadata_symbol_name(tcx); let name = exported_symbols::metadata_symbol_name(tcx);
let buf = CString::new(name).unwrap(); let buf = CString::new(name).unwrap();
let llglobal = unsafe { let llglobal = unsafe {
@ -1151,7 +1151,7 @@ fn compile_codegen_unit<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
if !cx.used_statics.borrow().is_empty() { if !cx.used_statics.borrow().is_empty() {
let name = const_cstr!("llvm.used"); let name = const_cstr!("llvm.used");
let section = const_cstr!("llvm.metadata"); let section = const_cstr!("llvm.metadata");
let array = cx.c_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow()); let array = cx.const_array(&cx.ptr_to(cx.i8()), &*cx.used_statics.borrow());
unsafe { unsafe {
let g = llvm::LLVMAddGlobal(cx.llmod, let g = llvm::LLVMAddGlobal(cx.llmod,

View file

@ -530,8 +530,8 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { unsafe {
let llty = self.cx.val_ty(load); let llty = self.cx.val_ty(load);
let v = [ let v = [
self.cx.c_uint_big(llty, range.start), self.cx.const_uint_big(llty, range.start),
self.cx.c_uint_big(llty, range.end) self.cx.const_uint_big(llty, range.end)
]; ];
llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint, llvm::LLVMSetMetadata(load, llvm::MD_range as c_uint,
@ -578,7 +578,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
// *always* point to a metadata value of the integer 1. // *always* point to a metadata value of the integer 1.
// //
// [1]: http://llvm.org/docs/LangRef.html#store-instruction // [1]: http://llvm.org/docs/LangRef.html#store-instruction
let one = self.cx.c_i32(1); let one = self.cx.const_i32(1);
let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1); let node = llvm::LLVMMDNodeInContext(self.cx.llcx, &one, 1);
llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node); llvm::LLVMSetMetadata(store, llvm::MD_nontemporal as c_uint, node);
} }
@ -862,9 +862,9 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { unsafe {
let elt_ty = self.cx.val_ty(elt); let elt_ty = self.cx.val_ty(elt);
let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64)); let undef = llvm::LLVMGetUndef(&self.cx().vector(elt_ty, num_elts as u64));
let vec = self.insert_element(undef, elt, self.cx.c_i32(0)); let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64); let vec_i32_ty = &self.cx().vector(&self.cx().i32(), num_elts as u64);
self.shuffle_vector(vec, undef, self.cx().c_null(vec_i32_ty)) self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
} }
} }
@ -1228,7 +1228,7 @@ impl BuilderMethods<'a, 'll, 'tcx> for Builder<'a, 'll, 'tcx> {
let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic); let lifetime_intrinsic = self.cx.get_intrinsic(intrinsic);
let ptr = self.pointercast(ptr, self.cx.i8p()); let ptr = self.pointercast(ptr, self.cx.i8p());
self.call(lifetime_intrinsic, &[self.cx.c_u64(size), ptr], None); self.call(lifetime_intrinsic, &[self.cx.const_u64(size), ptr], None);
} }
fn call(&self, llfn: &'ll Value, args: &[&'ll Value], fn call(&self, llfn: &'ll Value, args: &[&'ll Value],

View file

@ -204,71 +204,71 @@ impl Backend for CodegenCx<'ll, 'tcx> {
impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> { impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
// LLVM constant constructors. // LLVM constant constructors.
fn c_null(&self, t: &'ll Type) -> &'ll Value { fn const_null(&self, t: &'ll Type) -> &'ll Value {
unsafe { unsafe {
llvm::LLVMConstNull(t) llvm::LLVMConstNull(t)
} }
} }
fn c_undef(&self, t: &'ll Type) -> &'ll Value { fn const_undef(&self, t: &'ll Type) -> &'ll Value {
unsafe { unsafe {
llvm::LLVMGetUndef(t) llvm::LLVMGetUndef(t)
} }
} }
fn c_int(&self, t: &'ll Type, i: i64) -> &'ll Value { fn const_int(&self, t: &'ll Type, i: i64) -> &'ll Value {
unsafe { unsafe {
llvm::LLVMConstInt(t, i as u64, True) llvm::LLVMConstInt(t, i as u64, True)
} }
} }
fn c_uint(&self, t: &'ll Type, i: u64) -> &'ll Value { fn const_uint(&self, t: &'ll Type, i: u64) -> &'ll Value {
unsafe { unsafe {
llvm::LLVMConstInt(t, i, False) llvm::LLVMConstInt(t, i, False)
} }
} }
fn c_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value { fn const_uint_big(&self, t: &'ll Type, u: u128) -> &'ll Value {
unsafe { unsafe {
let words = [u as u64, (u >> 64) as u64]; let words = [u as u64, (u >> 64) as u64];
llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr()) llvm::LLVMConstIntOfArbitraryPrecision(t, 2, words.as_ptr())
} }
} }
fn c_bool(&self, val: bool) -> &'ll Value { fn const_bool(&self, val: bool) -> &'ll Value {
&self.c_uint(&self.i1(), val as u64) &self.const_uint(&self.i1(), val as u64)
} }
fn c_i32(&self, i: i32) -> &'ll Value { fn const_i32(&self, i: i32) -> &'ll Value {
&self.c_int(&self.i32(), i as i64) &self.const_int(&self.i32(), i as i64)
} }
fn c_u32(&self, i: u32) -> &'ll Value { fn const_u32(&self, i: u32) -> &'ll Value {
&self.c_uint(&self.i32(), i as u64) &self.const_uint(&self.i32(), i as u64)
} }
fn c_u64(&self, i: u64) -> &'ll Value { fn const_u64(&self, i: u64) -> &'ll Value {
&self.c_uint(&self.i64(), i) &self.const_uint(&self.i64(), i)
} }
fn c_usize(&self, i: u64) -> &'ll Value { fn const_usize(&self, i: u64) -> &'ll Value {
let bit_size = self.data_layout().pointer_size.bits(); let bit_size = self.data_layout().pointer_size.bits();
if bit_size < 64 { if bit_size < 64 {
// make sure it doesn't overflow // make sure it doesn't overflow
assert!(i < (1<<bit_size)); assert!(i < (1<<bit_size));
} }
&self.c_uint(&self.isize_ty, i) &self.const_uint(&self.isize_ty, i)
} }
fn c_u8(&self, i: u8) -> &'ll Value { fn const_u8(&self, i: u8) -> &'ll Value {
&self.c_uint(&self.i8(), i as u64) &self.const_uint(&self.i8(), i as u64)
} }
// This is a 'c-like' raw string, which differs from // This is a 'c-like' raw string, which differs from
// our boxed-and-length-annotated strings. // our boxed-and-length-annotated strings.
fn c_cstr( fn const_cstr(
&self, &self,
s: LocalInternedString, s: LocalInternedString,
null_terminated: bool, null_terminated: bool,
@ -297,45 +297,45 @@ impl<'ll, 'tcx: 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
// NB: Do not use `do_spill_noroot` to make this into a constant string, or // NB: Do not use `do_spill_noroot` to make this into a constant string, or
// you will be kicked off fast isel. See issue #4352 for an example of this. // you will be kicked off fast isel. See issue #4352 for an example of this.
fn c_str_slice(&self, s: LocalInternedString) -> &'ll Value { fn const_str_slice(&self, s: LocalInternedString) -> &'ll Value {
let len = s.len(); let len = s.len();
let cs = consts::ptrcast(&self.c_cstr(s, false), let cs = consts::ptrcast(&self.const_cstr(s, false),
&self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self))); &self.ptr_to(&self.layout_of(&self.tcx.mk_str()).llvm_type(&self)));
&self.c_fat_ptr(cs, &self.c_usize(len as u64)) &self.const_fat_ptr(cs, &self.const_usize(len as u64))
} }
fn c_fat_ptr( fn const_fat_ptr(
&self, &self,
ptr: &'ll Value, ptr: &'ll Value,
meta: &'ll Value meta: &'ll Value
) -> &'ll Value { ) -> &'ll Value {
assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_ADDR, 0);
assert_eq!(abi::FAT_PTR_EXTRA, 1); assert_eq!(abi::FAT_PTR_EXTRA, 1);
&self.c_struct(&[ptr, meta], false) &self.const_struct(&[ptr, meta], false)
} }
fn c_struct( fn const_struct(
&self, &self,
elts: &[&'ll Value], elts: &[&'ll Value],
packed: bool packed: bool
) -> &'ll Value { ) -> &'ll Value {
&self.c_struct_in_context(&self.llcx, elts, packed) &self.const_struct_in_context(&self.llcx, elts, packed)
} }
fn c_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value { fn const_array(&self, ty: &'ll Type, elts: &[&'ll Value]) -> &'ll Value {
unsafe { unsafe {
return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint); return llvm::LLVMConstArray(ty, elts.as_ptr(), elts.len() as c_uint);
} }
} }
fn c_vector(&self, elts: &[&'ll Value]) -> &'ll Value { fn const_vector(&self, elts: &[&'ll Value]) -> &'ll Value {
unsafe { unsafe {
return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint); return llvm::LLVMConstVector(elts.as_ptr(), elts.len() as c_uint);
} }
} }
fn c_bytes(&self, bytes: &[u8]) -> &'ll Value { fn const_bytes(&self, bytes: &[u8]) -> &'ll Value {
&self.c_bytes_in_context(&self.llcx, bytes) &self.const_bytes_in_context(&self.llcx, bytes)
} }
fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value { fn const_get_elt(&self, v: &'ll Value, idx: u64) -> &'ll Value {
@ -406,14 +406,14 @@ pub fn val_ty(v: &'ll Value) -> &'ll Type {
} }
} }
pub fn c_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { pub fn const_bytes_in_context(llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
unsafe { unsafe {
let ptr = bytes.as_ptr() as *const c_char; let ptr = bytes.as_ptr() as *const c_char;
return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True); return llvm::LLVMConstStringInContext(llcx, ptr, bytes.len() as c_uint, True);
} }
} }
pub fn c_struct_in_context( pub fn const_struct_in_context(
llcx: &'a llvm::Context, llcx: &'a llvm::Context,
elts: &[&'a Value], elts: &[&'a Value],
packed: bool, packed: bool,
@ -430,17 +430,17 @@ impl<'ll, 'tcx: 'll> CommonWriteMethods for CodegenCx<'ll, 'tcx> {
val_ty(v) val_ty(v)
} }
fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
c_bytes_in_context(llcx, bytes) const_bytes_in_context(llcx, bytes)
} }
fn c_struct_in_context( fn const_struct_in_context(
&self, &self,
llcx: &'a llvm::Context, llcx: &'a llvm::Context,
elts: &[&'a Value], elts: &[&'a Value],
packed: bool, packed: bool,
) -> &'a Value { ) -> &'a Value {
c_struct_in_context(llcx, elts, packed) const_struct_in_context(llcx, elts, packed)
} }
} }
@ -511,9 +511,9 @@ pub fn shift_mask_val(
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc. // i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
let val = bx.cx().int_width(llty) - 1; let val = bx.cx().int_width(llty) - 1;
if invert { if invert {
bx.cx.c_int(mask_llty, !val as i64) bx.cx.const_int(mask_llty, !val as i64)
} else { } else {
bx.cx.c_uint(mask_llty, val) bx.cx.const_uint(mask_llty, val)
} }
}, },
TypeKind::Vector => { TypeKind::Vector => {

View file

@ -29,7 +29,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx()); let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
// Load just the first byte as that's all that's necessary to force // Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global. // LLVM to keep around the reference to the global.
let indices = [bx.cx().c_i32(0), bx.cx().c_i32(0)]; let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element); let volative_load_instruction = bx.volatile_load(element);
unsafe { unsafe {
@ -63,7 +63,7 @@ pub fn get_or_insert_gdb_debug_scripts_section_global(cx: &CodegenCx<'ll, '_>)
bug!("symbol `{}` is already defined", section_var_name) bug!("symbol `{}` is already defined", section_var_name)
}); });
llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _); llvm::LLVMSetSection(section_var, section_name.as_ptr() as *const _);
llvm::LLVMSetInitializer(section_var, cx.c_bytes(section_contents)); llvm::LLVMSetInitializer(section_var, cx.const_bytes(section_contents));
llvm::LLVMSetGlobalConstant(section_var, llvm::True); llvm::LLVMSetGlobalConstant(section_var, llvm::True);
llvm::LLVMSetUnnamedAddr(section_var, llvm::True); llvm::LLVMSetUnnamedAddr(section_var, llvm::True);
llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage); llvm::LLVMRustSetLinkage(section_var, llvm::Linkage::LinkOnceODRLinkage);

View file

@ -1811,7 +1811,7 @@ fn set_members_of_composite_type(cx: &CodegenCx<'ll, '_>,
member_description.offset.bits(), member_description.offset.bits(),
match member_description.discriminant { match member_description.discriminant {
None => None, None => None,
Some(value) => Some(cx.c_u64(value)), Some(value) => Some(cx.const_u64(value)),
}, },
member_description.flags, member_description.flags,
member_description.type_metadata)) member_description.type_metadata))

View file

@ -33,8 +33,8 @@ pub fn size_and_align_of_dst(
let (size, align) = bx.cx().size_and_align_of(t); let (size, align) = bx.cx().size_and_align_of(t);
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}", debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
t, info, size, align); t, info, size, align);
let size = bx.cx().c_usize(size.bytes()); let size = bx.cx().const_usize(size.bytes());
let align = bx.cx().c_usize(align.abi()); let align = bx.cx().const_usize(align.abi());
return (size, align); return (size, align);
} }
match t.sty { match t.sty {
@ -48,8 +48,8 @@ pub fn size_and_align_of_dst(
// The info in this case is the length of the str, so the size is that // The info in this case is the length of the str, so the size is that
// times the unit size. // times the unit size.
let (size, align) = bx.cx().size_and_align_of(unit); let (size, align) = bx.cx().size_and_align_of(unit);
(bx.mul(info.unwrap(), bx.cx().c_usize(size.bytes())), (bx.mul(info.unwrap(), bx.cx().const_usize(size.bytes())),
bx.cx().c_usize(align.abi())) bx.cx().const_usize(align.abi()))
} }
_ => { _ => {
let cx = bx.cx(); let cx = bx.cx();
@ -65,8 +65,8 @@ pub fn size_and_align_of_dst(
let sized_align = layout.align.abi(); let sized_align = layout.align.abi();
debug!("DST {} statically sized prefix size: {} align: {}", debug!("DST {} statically sized prefix size: {} align: {}",
t, sized_size, sized_align); t, sized_size, sized_align);
let sized_size = cx.c_usize(sized_size); let sized_size = cx.const_usize(sized_size);
let sized_align = cx.c_usize(sized_align); let sized_align = cx.const_usize(sized_align);
// Recurse to get the size of the dynamically sized field (must be // Recurse to get the size of the dynamically sized field (must be
// the last field). // the last field).
@ -97,7 +97,7 @@ pub fn size_and_align_of_dst(
(Some(sized_align), Some(unsized_align)) => { (Some(sized_align), Some(unsized_align)) => {
// If both alignments are constant, (the sized_align should always be), then // If both alignments are constant, (the sized_align should always be), then
// pick the correct alignment statically. // pick the correct alignment statically.
cx.c_usize(std::cmp::max(sized_align, unsized_align) as u64) cx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
} }
_ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align), _ => bx.select(bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align),
sized_align, sized_align,
@ -115,7 +115,7 @@ pub fn size_and_align_of_dst(
// //
// `(size + (align-1)) & -align` // `(size + (align-1)) & -align`
let addend = bx.sub(align, bx.cx().c_usize(1)); let addend = bx.sub(align, bx.cx().const_usize(1));
let size = bx.and(bx.add(size, addend), bx.neg(align)); let size = bx.and(bx.add(size, addend), bx.neg(align));
(size, align) (size, align)

View file

@ -13,36 +13,36 @@ use syntax::symbol::LocalInternedString;
pub trait CommonMethods: Backend + CommonWriteMethods { pub trait CommonMethods: Backend + CommonWriteMethods {
// Constant constructors // Constant constructors
fn c_null(&self, t: Self::Type) -> Self::Value; fn const_null(&self, t: Self::Type) -> Self::Value;
fn c_undef(&self, t: Self::Type) -> Self::Value; fn const_undef(&self, t: Self::Type) -> Self::Value;
fn c_int(&self, t: Self::Type, i: i64) -> Self::Value; fn const_int(&self, t: Self::Type, i: i64) -> Self::Value;
fn c_uint(&self, t: Self::Type, i: u64) -> Self::Value; fn const_uint(&self, t: Self::Type, i: u64) -> Self::Value;
fn c_uint_big(&self, t: Self::Type, u: u128) -> Self::Value; fn const_uint_big(&self, t: Self::Type, u: u128) -> Self::Value;
fn c_bool(&self, val: bool) -> Self::Value; fn const_bool(&self, val: bool) -> Self::Value;
fn c_i32(&self, i: i32) -> Self::Value; fn const_i32(&self, i: i32) -> Self::Value;
fn c_u32(&self, i: u32) -> Self::Value; fn const_u32(&self, i: u32) -> Self::Value;
fn c_u64(&self, i: u64) -> Self::Value; fn const_u64(&self, i: u64) -> Self::Value;
fn c_usize(&self, i: u64) -> Self::Value; fn const_usize(&self, i: u64) -> Self::Value;
fn c_u8(&self, i: u8) -> Self::Value; fn const_u8(&self, i: u8) -> Self::Value;
fn c_cstr( fn const_cstr(
&self, &self,
s: LocalInternedString, s: LocalInternedString,
null_terminated: bool, null_terminated: bool,
) -> Self::Value; ) -> Self::Value;
fn c_str_slice(&self, s: LocalInternedString) -> Self::Value; fn const_str_slice(&self, s: LocalInternedString) -> Self::Value;
fn c_fat_ptr( fn const_fat_ptr(
&self, &self,
ptr: Self::Value, ptr: Self::Value,
meta: Self::Value meta: Self::Value
) -> Self::Value; ) -> Self::Value;
fn c_struct( fn const_struct(
&self, &self,
elts: &[Self::Value], elts: &[Self::Value],
packed: bool packed: bool
) -> Self::Value; ) -> Self::Value;
fn c_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value; fn const_array(&self, ty: Self::Type, elts: &[Self::Value]) -> Self::Value;
fn c_vector(&self, elts: &[Self::Value]) -> Self::Value; fn const_vector(&self, elts: &[Self::Value]) -> Self::Value;
fn c_bytes(&self, bytes: &[u8]) -> Self::Value; fn const_bytes(&self, bytes: &[u8]) -> Self::Value;
fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value; fn const_get_elt(&self, v: Self::Value, idx: u64) -> Self::Value;
fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>; fn const_get_real(&self, v: Self::Value) -> Option<(f64, bool)>;
@ -55,8 +55,8 @@ pub trait CommonMethods: Backend + CommonWriteMethods {
pub trait CommonWriteMethods: Backend { pub trait CommonWriteMethods: Backend {
fn val_ty(&self, v: Self::Value) -> Self::Type; fn val_ty(&self, v: Self::Value) -> Self::Type;
fn c_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value; fn const_bytes_in_context(&self, llcx: Self::Context, bytes: &[u8]) -> Self::Value;
fn c_struct_in_context( fn const_struct_in_context(
&self, &self,
llcx: Self::Context, llcx: Self::Context,
elts: &[Self::Value], elts: &[Self::Value],

View file

@ -127,11 +127,11 @@ pub fn codegen_intrinsic_call(
}, },
"likely" => { "likely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1")); let expect = cx.get_intrinsic(&("llvm.expect.i1"));
bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(true)], None) bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(true)], None)
} }
"unlikely" => { "unlikely" => {
let expect = cx.get_intrinsic(&("llvm.expect.i1")); let expect = cx.get_intrinsic(&("llvm.expect.i1"));
bx.call(expect, &[args[0].immediate(), bx.cx().c_bool(false)], None) bx.call(expect, &[args[0].immediate(), bx.cx().const_bool(false)], None)
} }
"try" => { "try" => {
try_intrinsic(bx, cx, try_intrinsic(bx, cx,
@ -147,7 +147,7 @@ pub fn codegen_intrinsic_call(
} }
"size_of" => { "size_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
cx.c_usize(cx.size_of(tp_ty).bytes()) cx.const_usize(cx.size_of(tp_ty).bytes())
} }
"size_of_val" => { "size_of_val" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
@ -156,12 +156,12 @@ pub fn codegen_intrinsic_call(
glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llsize llsize
} else { } else {
cx.c_usize(cx.size_of(tp_ty).bytes()) cx.const_usize(cx.size_of(tp_ty).bytes())
} }
} }
"min_align_of" => { "min_align_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
cx.c_usize(cx.align_of(tp_ty).abi()) cx.const_usize(cx.align_of(tp_ty).abi())
} }
"min_align_of_val" => { "min_align_of_val" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
@ -170,20 +170,20 @@ pub fn codegen_intrinsic_call(
glue::size_and_align_of_dst(bx, tp_ty, Some(meta)); glue::size_and_align_of_dst(bx, tp_ty, Some(meta));
llalign llalign
} else { } else {
cx.c_usize(cx.align_of(tp_ty).abi()) cx.const_usize(cx.align_of(tp_ty).abi())
} }
} }
"pref_align_of" => { "pref_align_of" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
cx.c_usize(cx.align_of(tp_ty).pref()) cx.const_usize(cx.align_of(tp_ty).pref())
} }
"type_name" => { "type_name" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str(); let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
cx.c_str_slice(ty_name) cx.const_str_slice(ty_name)
} }
"type_id" => { "type_id" => {
cx.c_u64(cx.tcx.type_id_hash(substs.type_at(0))) cx.const_u64(cx.tcx.type_id_hash(substs.type_at(0)))
} }
"init" => { "init" => {
let ty = substs.type_at(0); let ty = substs.type_at(0);
@ -197,8 +197,8 @@ pub fn codegen_intrinsic_call(
false, false,
ty, ty,
llresult, llresult,
cx.c_u8(0), cx.const_u8(0),
cx.c_usize(1) cx.const_usize(1)
); );
} }
return; return;
@ -210,7 +210,7 @@ pub fn codegen_intrinsic_call(
"needs_drop" => { "needs_drop" => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
cx.c_bool(bx.cx().type_needs_drop(tp_ty)) cx.const_bool(bx.cx().type_needs_drop(tp_ty))
} }
"offset" => { "offset" => {
let ptr = args[0].immediate(); let ptr = args[0].immediate();
@ -287,9 +287,9 @@ pub fn codegen_intrinsic_call(
}; };
bx.call(expect, &[ bx.call(expect, &[
args[0].immediate(), args[0].immediate(),
cx.c_i32(rw), cx.const_i32(rw),
args[1].immediate(), args[1].immediate(),
cx.c_i32(cache_type) cx.const_i32(cache_type)
], None) ], None)
}, },
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" | "ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
@ -302,12 +302,12 @@ pub fn codegen_intrinsic_call(
Some((width, signed)) => Some((width, signed)) =>
match name { match name {
"ctlz" | "cttz" => { "ctlz" | "cttz" => {
let y = cx.c_bool(false); let y = cx.const_bool(false);
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width)); let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
bx.call(llfn, &[args[0].immediate(), y], None) bx.call(llfn, &[args[0].immediate(), y], None)
} }
"ctlz_nonzero" | "cttz_nonzero" => { "ctlz_nonzero" | "cttz_nonzero" => {
let y = cx.c_bool(true); let y = cx.const_bool(true);
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width); let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
let llfn = cx.get_intrinsic(llvm_name); let llfn = cx.get_intrinsic(llvm_name);
bx.call(llfn, &[args[0].immediate(), y], None) bx.call(llfn, &[args[0].immediate(), y], None)
@ -388,7 +388,7 @@ pub fn codegen_intrinsic_call(
} else { } else {
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW)) // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW)) // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
let width = cx.c_uint(cx.ix(width), width); let width = cx.const_uint(cx.ix(width), width);
let shift = bx.urem(raw_shift, width); let shift = bx.urem(raw_shift, width);
let inv_shift = bx.urem(bx.sub(width, raw_shift), width); let inv_shift = bx.urem(bx.sub(width, raw_shift), width);
let shift1 = bx.shl(val, if is_left { shift } else { inv_shift }); let shift1 = bx.shl(val, if is_left { shift } else { inv_shift });
@ -725,7 +725,7 @@ fn copy_intrinsic(
) -> &'ll Value { ) -> &'ll Value {
let cx = bx.cx(); let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty); let (size, align) = cx.size_and_align_of(ty);
let size = cx.c_usize(size.bytes()); let size = cx.const_usize(size.bytes());
let align = align.abi(); let align = align.abi();
let dst_ptr = bx.pointercast(dst, cx.i8p()); let dst_ptr = bx.pointercast(dst, cx.i8p());
let src_ptr = bx.pointercast(src, cx.i8p()); let src_ptr = bx.pointercast(src, cx.i8p());
@ -746,8 +746,8 @@ fn memset_intrinsic(
) -> &'ll Value { ) -> &'ll Value {
let cx = bx.cx(); let cx = bx.cx();
let (size, align) = cx.size_and_align_of(ty); let (size, align) = cx.size_and_align_of(ty);
let size = cx.c_usize(size.bytes()); let size = cx.const_usize(size.bytes());
let align = cx.c_i32(align.abi() as i32); let align = cx.const_i32(align.abi() as i32);
let dst = bx.pointercast(dst, cx.i8p()); let dst = bx.pointercast(dst, cx.i8p());
call_memset(bx, dst, val, bx.mul(size, count), align, volatile) call_memset(bx, dst, val, bx.mul(size, count), align, volatile)
} }
@ -763,7 +763,7 @@ fn try_intrinsic(
if bx.sess().no_landing_pads() { if bx.sess().no_landing_pads() {
bx.call(func, &[data], None); bx.call(func, &[data], None);
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align;
bx.store(cx.c_null(cx.i8p()), dest, ptr_align); bx.store(cx.const_null(cx.i8p()), dest, ptr_align);
} else if wants_msvc_seh(bx.sess()) { } else if wants_msvc_seh(bx.sess()) {
codegen_msvc_try(bx, cx, func, data, local_ptr, dest); codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
} else { } else {
@ -844,7 +844,7 @@ fn codegen_msvc_try(
let slot = bx.alloca(i64p, "slot", ptr_align); let slot = bx.alloca(i64p, "slot", ptr_align);
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None); bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
normal.ret(cx.c_i32(0)); normal.ret(cx.const_i32(0));
let cs = catchswitch.catch_switch(None, None, 1); let cs = catchswitch.catch_switch(None, None, 1);
catchswitch.add_handler(cs, catchpad.llbb()); catchswitch.add_handler(cs, catchpad.llbb());
@ -854,19 +854,19 @@ fn codegen_msvc_try(
Some(did) => ::consts::get_static(cx, did), Some(did) => ::consts::get_static(cx, did),
None => bug!("msvc_try_filter not defined"), None => bug!("msvc_try_filter not defined"),
}; };
let tok = catchpad.catch_pad(cs, &[tydesc, cx.c_i32(0), slot]); let tok = catchpad.catch_pad(cs, &[tydesc, cx.const_i32(0), slot]);
let addr = catchpad.load(slot, ptr_align); let addr = catchpad.load(slot, ptr_align);
let i64_align = bx.tcx().data_layout.i64_align; let i64_align = bx.tcx().data_layout.i64_align;
let arg1 = catchpad.load(addr, i64_align); let arg1 = catchpad.load(addr, i64_align);
let val1 = cx.c_i32(1); let val1 = cx.const_i32(1);
let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align); let arg2 = catchpad.load(catchpad.inbounds_gep(addr, &[val1]), i64_align);
let local_ptr = catchpad.bitcast(local_ptr, i64p); let local_ptr = catchpad.bitcast(local_ptr, i64p);
catchpad.store(arg1, local_ptr, i64_align); catchpad.store(arg1, local_ptr, i64_align);
catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align); catchpad.store(arg2, catchpad.inbounds_gep(local_ptr, &[val1]), i64_align);
catchpad.catch_ret(tok, caught.llbb()); catchpad.catch_ret(tok, caught.llbb());
caught.ret(cx.c_i32(1)); caught.ret(cx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
@ -922,7 +922,7 @@ fn codegen_gnu_try(
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let local_ptr = llvm::get_param(bx.llfn(), 2); let local_ptr = llvm::get_param(bx.llfn(), 2);
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None); bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
then.ret(cx.c_i32(0)); then.ret(cx.const_i32(0));
// Type indicator for the exception being thrown. // Type indicator for the exception being thrown.
// //
@ -932,11 +932,11 @@ fn codegen_gnu_try(
// rust_try ignores the selector. // rust_try ignores the selector.
let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false); let lpad_ty = cx.struct_(&[cx.i8p(), cx.i32()], false);
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1); let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
catch.add_clause(vals, bx.cx().c_null(cx.i8p())); catch.add_clause(vals, bx.cx().const_null(cx.i8p()));
let ptr = catch.extract_value(vals, 0); let ptr = catch.extract_value(vals, 0);
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align;
catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align); catch.store(ptr, catch.bitcast(local_ptr, cx.ptr_to(cx.i8p())), ptr_align);
catch.ret(cx.c_i32(1)); catch.ret(cx.const_i32(1));
}); });
// Note that no invoke is used here because by definition this function // Note that no invoke is used here because by definition this function
@ -1125,18 +1125,18 @@ fn generic_simd_intrinsic(
arg_idx, total_len); arg_idx, total_len);
None None
} }
Some(idx) => Some(bx.cx().c_i32(idx as i32)), Some(idx) => Some(bx.cx().const_i32(idx as i32)),
} }
}) })
.collect(); .collect();
let indices = match indices { let indices = match indices {
Some(i) => i, Some(i) => i,
None => return Ok(bx.cx().c_null(llret_ty)) None => return Ok(bx.cx().const_null(llret_ty))
}; };
return Ok(bx.shuffle_vector(args[0].immediate(), return Ok(bx.shuffle_vector(args[0].immediate(),
args[1].immediate(), args[1].immediate(),
bx.cx().c_vector(&indices))) bx.cx().const_vector(&indices)))
} }
if name == "simd_insert" { if name == "simd_insert" {
@ -1387,7 +1387,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value: // Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().i32(); let alignment_ty = bx.cx().i32();
let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s: // Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = { let (mask, mask_ty) = {
@ -1487,7 +1487,7 @@ fn generic_simd_intrinsic(
// Alignment of T, must be a constant integer value: // Alignment of T, must be a constant integer value:
let alignment_ty = bx.cx().i32(); let alignment_ty = bx.cx().i32();
let alignment = bx.cx().c_i32(bx.cx().align_of(in_elem).abi() as i32); let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).abi() as i32);
// Truncate the mask vector to a vector of i1s: // Truncate the mask vector to a vector of i1s:
let (mask, mask_ty) = { let (mask, mask_ty) = {
@ -1565,8 +1565,8 @@ fn generic_simd_intrinsic(
} else { } else {
// unordered arithmetic reductions do not: // unordered arithmetic reductions do not:
match f.bit_width() { match f.bit_width() {
32 => bx.cx().c_undef(bx.cx().f32()), 32 => bx.cx().const_undef(bx.cx().f32()),
64 => bx.cx().c_undef(bx.cx().f64()), 64 => bx.cx().const_undef(bx.cx().f64()),
v => { v => {
return_error!(r#" return_error!(r#"
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#, unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,

View file

@ -364,17 +364,17 @@ impl CommonWriteMethods for ModuleLlvm<'ll> {
common::val_ty(v) common::val_ty(v)
} }
fn c_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value { fn const_bytes_in_context(&self, llcx: &'ll llvm::Context, bytes: &[u8]) -> &'ll Value {
common::c_bytes_in_context(llcx, bytes) common::const_bytes_in_context(llcx, bytes)
} }
fn c_struct_in_context( fn const_struct_in_context(
&self, &self,
llcx: &'a llvm::Context, llcx: &'a llvm::Context,
elts: &[&'a Value], elts: &[&'a Value],
packed: bool, packed: bool,
) -> &'a Value { ) -> &'a Value {
common::c_struct_in_context(llcx, elts, packed) common::const_struct_in_context(llcx, elts, packed)
} }
} }

View file

@ -46,7 +46,7 @@ impl<'a, 'tcx> VirtualIndex {
); );
let ptr_align = bx.tcx().data_layout.pointer_align; let ptr_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load( let ptr = bx.load(
bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
ptr_align ptr_align
); );
bx.nonnull_metadata(ptr); bx.nonnull_metadata(ptr);
@ -66,7 +66,7 @@ impl<'a, 'tcx> VirtualIndex {
let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize())); let llvtable = bx.pointercast(llvtable, bx.cx().ptr_to(bx.cx().isize()));
let usize_align = bx.tcx().data_layout.pointer_align; let usize_align = bx.tcx().data_layout.pointer_align;
let ptr = bx.load( let ptr = bx.load(
bx.inbounds_gep(llvtable, &[bx.cx().c_usize(self.0)]), bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]),
usize_align usize_align
); );
// Vtable loads are invariant // Vtable loads are invariant
@ -98,7 +98,7 @@ pub fn get_vtable(
} }
// Not in the cache. Build it. // Not in the cache. Build it.
let nullptr = cx.c_null(cx.i8p()); let nullptr = cx.const_null(cx.i8p());
let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty)); let methods = tcx.vtable_methods(trait_ref.with_self_ty(tcx, ty));
let methods = methods.iter().cloned().map(|opt_mth| { let methods = methods.iter().cloned().map(|opt_mth| {
@ -114,11 +114,11 @@ pub fn get_vtable(
// ///////////////////////////////////////////////////////////////////////////////////////////// // /////////////////////////////////////////////////////////////////////////////////////////////
let components: Vec<_> = [ let components: Vec<_> = [
callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)), callee::get_fn(cx, monomorphize::resolve_drop_in_place(cx.tcx, ty)),
cx.c_usize(size.bytes()), cx.const_usize(size.bytes()),
cx.c_usize(align.abi()) cx.const_usize(align.abi())
].iter().cloned().chain(methods).collect(); ].iter().cloned().chain(methods).collect();
let vtable_const = cx.c_struct(&components, false); let vtable_const = cx.const_struct(&components, false);
let align = cx.data_layout().pointer_align; let align = cx.data_layout().pointer_align;
let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable")); let vtable = consts::addr_of(cx, vtable_const, align, Some("vtable"));

View file

@ -171,7 +171,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
slot.storage_dead(&bx); slot.storage_dead(&bx);
if !bx.sess().target.target.options.custom_unwind_resume { if !bx.sess().target.target.options.custom_unwind_resume {
let mut lp = bx.cx().c_undef(self.landing_pad_type()); let mut lp = bx.cx().const_undef(self.landing_pad_type());
lp = bx.insert_value(lp, lp0, 0); lp = bx.insert_value(lp, lp0, 0);
lp = bx.insert_value(lp, lp1, 1); lp = bx.insert_value(lp, lp1, 1);
bx.resume(lp); bx.resume(lp);
@ -209,7 +209,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
} }
} else { } else {
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
let llval = bx.cx().c_uint_big(switch_llty, values[0]); let llval = bx.cx().const_uint_big(switch_llty, values[0]);
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval); let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
bx.cond_br(cmp, lltrue, llfalse); bx.cond_br(cmp, lltrue, llfalse);
} }
@ -220,7 +220,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
values.len()); values.len());
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx()); let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
for (&value, target) in values.iter().zip(targets) { for (&value, target) in values.iter().zip(targets) {
let llval =bx.cx().c_uint_big(switch_llty, value); let llval =bx.cx().const_uint_big(switch_llty, value);
let llbb = llblock(self, *target); let llbb = llblock(self, *target);
bx.add_case(switch, llval, llbb) bx.add_case(switch, llval, llbb)
} }
@ -346,7 +346,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Pass the condition through llvm.expect for branch hinting. // Pass the condition through llvm.expect for branch hinting.
let expect = bx.cx().get_intrinsic(&"llvm.expect.i1"); let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
let cond = bx.call(expect, &[cond, bx.cx().c_bool(expected)], None); let cond = bx.call(expect, &[cond, bx.cx().const_bool(expected)], None);
// Create the failure block and the conditional branch to it. // Create the failure block and the conditional branch to it.
let lltarget = llblock(self, target); let lltarget = llblock(self, target);
@ -364,9 +364,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// Get the location information. // Get the location information.
let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = bx.cx().c_str_slice(filename); let filename = bx.cx().const_str_slice(filename);
let line = bx.cx().c_u32(loc.line as u32); let line = bx.cx().const_u32(loc.line as u32);
let col = bx.cx().c_u32(loc.col.to_usize() as u32 + 1); let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align) .max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align); .max(tcx.data_layout.pointer_align);
@ -377,7 +377,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let len = self.codegen_operand(&mut bx, len).immediate(); let len = self.codegen_operand(&mut bx, len).immediate();
let index = self.codegen_operand(&mut bx, index).immediate(); let index = self.codegen_operand(&mut bx, index).immediate();
let file_line_col = bx.cx().c_struct(&[filename, line, col], false); let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
let file_line_col = consts::addr_of(bx.cx(), let file_line_col = consts::addr_of(bx.cx(),
file_line_col, file_line_col,
align, align,
@ -388,8 +388,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
_ => { _ => {
let str = msg.description(); let str = msg.description();
let msg_str = Symbol::intern(str).as_str(); let msg_str = Symbol::intern(str).as_str();
let msg_str = bx.cx().c_str_slice(msg_str); let msg_str = bx.cx().const_str_slice(msg_str);
let msg_file_line_col = bx.cx().c_struct( let msg_file_line_col = bx.cx().const_struct(
&[msg_str, filename, line, col], &[msg_str, filename, line, col],
false false
); );
@ -498,9 +498,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
{ {
let loc = bx.sess().source_map().lookup_char_pos(span.lo()); let loc = bx.sess().source_map().lookup_char_pos(span.lo());
let filename = Symbol::intern(&loc.file.name.to_string()).as_str(); let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
let filename = bx.cx.c_str_slice(filename); let filename = bx.cx.const_str_slice(filename);
let line = bx.cx.c_u32(loc.line as u32); let line = bx.cx.const_u32(loc.line as u32);
let col = bx.cx.c_u32(loc.col.to_usize() as u32 + 1); let col = bx.cx.const_u32(loc.col.to_usize() as u32 + 1);
let align = tcx.data_layout.aggregate_align let align = tcx.data_layout.aggregate_align
.max(tcx.data_layout.i32_align) .max(tcx.data_layout.i32_align)
.max(tcx.data_layout.pointer_align); .max(tcx.data_layout.pointer_align);
@ -511,8 +511,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" } if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
); );
let msg_str = Symbol::intern(&str).as_str(); let msg_str = Symbol::intern(&str).as_str();
let msg_str = bx.cx.c_str_slice(msg_str); let msg_str = bx.cx.const_str_slice(msg_str);
let msg_file_line_col = bx.cx.c_struct( let msg_file_line_col = bx.cx.const_struct(
&[msg_str, filename, line, col], &[msg_str, filename, line, col],
false, false,
); );
@ -560,7 +560,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let dest = match ret_dest { let dest = match ret_dest {
_ if fn_ty.ret.is_indirect() => llargs[0], _ if fn_ty.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => { ReturnDest::Nothing => {
bx.cx().c_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx()))) bx.cx().const_undef(bx.cx().ptr_to(fn_ty.ret.memory_ty(bx.cx())))
} }
ReturnDest::IndirectOperand(dst, _) | ReturnDest::IndirectOperand(dst, _) |
ReturnDest::Store(dst) => dst.llval, ReturnDest::Store(dst) => dst.llval,
@ -741,7 +741,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
arg: &ArgType<'tcx, Ty<'tcx>>) { arg: &ArgType<'tcx, Ty<'tcx>>) {
// Fill padding with undef value, where applicable. // Fill padding with undef value, where applicable.
if let Some(ty) = arg.pad { if let Some(ty) = arg.pad {
llargs.push(bx.cx().c_undef(ty.llvm_type(bx.cx()))); llargs.push(bx.cx().const_undef(ty.llvm_type(bx.cx())));
} }
if arg.is_ignore() { if arg.is_ignore() {

View file

@ -40,11 +40,11 @@ pub fn scalar_to_llvm(
match cv { match cv {
Scalar::Bits { size: 0, .. } => { Scalar::Bits { size: 0, .. } => {
assert_eq!(0, layout.value.size(cx).bytes()); assert_eq!(0, layout.value.size(cx).bytes());
cx.c_undef(cx.ix(0)) cx.const_undef(cx.ix(0))
}, },
Scalar::Bits { bits, size } => { Scalar::Bits { bits, size } => {
assert_eq!(size as u64, layout.value.size(cx).bytes()); assert_eq!(size as u64, layout.value.size(cx).bytes());
let llval = cx.c_uint_big(cx.ix(bitsize), bits); let llval = cx.const_uint_big(cx.ix(bitsize), bits);
if layout.value == layout::Pointer { if layout.value == layout::Pointer {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) } unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else { } else {
@ -73,7 +73,7 @@ pub fn scalar_to_llvm(
}; };
let llval = unsafe { llvm::LLVMConstInBoundsGEP( let llval = unsafe { llvm::LLVMConstInBoundsGEP(
consts::bitcast(base_addr, cx.i8p()), consts::bitcast(base_addr, cx.i8p()),
&cx.c_usize(ptr.offset.bytes()), &cx.const_usize(ptr.offset.bytes()),
1, 1,
) }; ) };
if layout.value != layout::Pointer { if layout.value != layout::Pointer {
@ -96,7 +96,7 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
assert_eq!(offset as usize as u64, offset); assert_eq!(offset as usize as u64, offset);
let offset = offset as usize; let offset = offset as usize;
if offset > next_offset { if offset > next_offset {
llvals.push(cx.c_bytes(&alloc.bytes[next_offset..offset])); llvals.push(cx.const_bytes(&alloc.bytes[next_offset..offset]));
} }
let ptr_offset = read_target_uint( let ptr_offset = read_target_uint(
dl.endian, dl.endian,
@ -114,10 +114,10 @@ pub fn const_alloc_to_llvm(cx: &CodegenCx<'ll, '_>, alloc: &Allocation) -> &'ll
next_offset = offset + pointer_size; next_offset = offset + pointer_size;
} }
if alloc.bytes.len() >= next_offset { if alloc.bytes.len() >= next_offset {
llvals.push(cx.c_bytes(&alloc.bytes[next_offset ..])); llvals.push(cx.const_bytes(&alloc.bytes[next_offset ..]));
} }
cx.c_struct(&llvals, true) cx.const_struct(&llvals, true)
} }
pub fn codegen_static_initializer( pub fn codegen_static_initializer(
@ -207,7 +207,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
bug!("simd shuffle field {:?}", field) bug!("simd shuffle field {:?}", field)
} }
}).collect(); }).collect();
let llval = bx.cx().c_struct(&values?, false); let llval = bx.cx().const_struct(&values?, false);
Ok((llval, c.ty)) Ok((llval, c.ty))
}) })
.unwrap_or_else(|_| { .unwrap_or_else(|_| {
@ -218,7 +218,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// We've errored, so we don't have to produce working code. // We've errored, so we don't have to produce working code.
let ty = self.monomorphize(&ty); let ty = self.monomorphize(&ty);
let llty = bx.cx().layout_of(ty).llvm_type(bx.cx()); let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
(bx.cx().c_undef(llty), ty) (bx.cx().const_undef(llty), ty)
}) })
} }
} }

View file

@ -419,8 +419,8 @@ fn create_funclets(
// C++ personality function, but `catch (...)` has no type so // C++ personality function, but `catch (...)` has no type so
// it's null. The 64 here is actually a bitfield which // it's null. The 64 here is actually a bitfield which
// represents that this is a catch-all block. // represents that this is a catch-all block.
let null = bx.cx().c_null(bx.cx().i8p()); let null = bx.cx().const_null(bx.cx().i8p());
let sixty_four = bx.cx().c_i32(64); let sixty_four = bx.cx().const_i32(64);
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]); cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
cp_bx.br(llbb); cp_bx.br(llbb);
} }

View file

@ -72,7 +72,7 @@ impl OperandRef<'tcx, &'ll Value> {
layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> { layout: TyLayout<'tcx>) -> OperandRef<'tcx, &'ll Value> {
assert!(layout.is_zst()); assert!(layout.is_zst());
OperandRef { OperandRef {
val: OperandValue::Immediate(cx.c_undef(layout.immediate_llvm_type(cx))), val: OperandValue::Immediate(cx.const_undef(layout.immediate_llvm_type(cx))),
layout layout
} }
} }
@ -166,7 +166,7 @@ impl OperandRef<'tcx, &'ll Value> {
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}", debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
self, llty); self, llty);
// Reconstruct the immediate aggregate. // Reconstruct the immediate aggregate.
let mut llpair = bx.cx().c_undef(llty); let mut llpair = bx.cx().const_undef(llty);
llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0); llpair = bx.insert_value(llpair, base::from_immediate(bx, a), 0);
llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1); llpair = bx.insert_value(llpair, base::from_immediate(bx, b), 1);
llpair llpair
@ -231,7 +231,7 @@ impl OperandRef<'tcx, &'ll Value> {
// `#[repr(simd)]` types are also immediate. // `#[repr(simd)]` types are also immediate.
(OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => { (OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
OperandValue::Immediate( OperandValue::Immediate(
bx.extract_element(llval, bx.cx().c_usize(i as u64))) bx.extract_element(llval, bx.cx().const_usize(i as u64)))
} }
_ => bug!("OperandRef::extract_field({:?}): not applicable", self) _ => bug!("OperandRef::extract_field({:?}): not applicable", self)
@ -462,7 +462,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// We've errored, so we don't have to produce working code. // We've errored, so we don't have to produce working code.
let layout = bx.cx().layout_of(ty); let layout = bx.cx().layout_of(ty);
PlaceRef::new_sized( PlaceRef::new_sized(
bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))), bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))),
layout, layout,
layout.align, layout.align,
).load(bx) ).load(bx)

View file

@ -68,7 +68,7 @@ impl PlaceRef<'tcx, &'ll Value> {
let llval = unsafe { LLVMConstInBoundsGEP( let llval = unsafe { LLVMConstInBoundsGEP(
consts::bitcast(base_addr, bx.cx().i8p()), consts::bitcast(base_addr, bx.cx().i8p()),
&bx.cx().c_usize(offset.bytes()), &bx.cx().const_usize(offset.bytes()),
1, 1,
)}; )};
let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx()))); let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
@ -102,7 +102,7 @@ impl PlaceRef<'tcx, &'ll Value> {
assert_eq!(count, 0); assert_eq!(count, 0);
self.llextra.unwrap() self.llextra.unwrap()
} else { } else {
cx.c_usize(count) cx.const_usize(count)
} }
} else { } else {
bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout) bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
@ -247,7 +247,7 @@ impl PlaceRef<'tcx, &'ll Value> {
let meta = self.llextra; let meta = self.llextra;
let unaligned_offset = cx.c_usize(offset.bytes()); let unaligned_offset = cx.const_usize(offset.bytes());
// Get the alignment of the field // Get the alignment of the field
let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta); let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
@ -258,7 +258,7 @@ impl PlaceRef<'tcx, &'ll Value> {
// (unaligned offset + (align - 1)) & -align // (unaligned offset + (align - 1)) & -align
// Calculate offset // Calculate offset
let align_sub_1 = bx.sub(unsized_align, cx.c_usize(1u64)); let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
let offset = bx.and(bx.add(unaligned_offset, align_sub_1), let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
bx.neg(unsized_align)); bx.neg(unsized_align));
@ -288,14 +288,14 @@ impl PlaceRef<'tcx, &'ll Value> {
) -> &'ll Value { ) -> &'ll Value {
let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx()); let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
if self.layout.abi.is_uninhabited() { if self.layout.abi.is_uninhabited() {
return bx.cx().c_undef(cast_to); return bx.cx().const_undef(cast_to);
} }
match self.layout.variants { match self.layout.variants {
layout::Variants::Single { index } => { layout::Variants::Single { index } => {
let discr_val = self.layout.ty.ty_adt_def().map_or( let discr_val = self.layout.ty.ty_adt_def().map_or(
index.as_u32() as u128, index.as_u32() as u128,
|def| def.discriminant_for_variant(bx.cx().tcx, index).val); |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
return bx.cx().c_uint_big(cast_to, discr_val); return bx.cx().const_uint_big(cast_to, discr_val);
} }
layout::Variants::Tagged { .. } | layout::Variants::Tagged { .. } |
layout::Variants::NicheFilling { .. } => {}, layout::Variants::NicheFilling { .. } => {},
@ -327,22 +327,22 @@ impl PlaceRef<'tcx, &'ll Value> {
// FIXME(eddyb) Check the actual primitive type here. // FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_start == 0 { let niche_llval = if niche_start == 0 {
// HACK(eddyb) Using `c_null` as it works on all types. // HACK(eddyb) Using `c_null` as it works on all types.
bx.cx().c_null(niche_llty) bx.cx().const_null(niche_llty)
} else { } else {
bx.cx().c_uint_big(niche_llty, niche_start) bx.cx().const_uint_big(niche_llty, niche_start)
}; };
bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval), bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
bx.cx().c_uint(cast_to, niche_variants.start().as_u32() as u64), bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
bx.cx().c_uint(cast_to, dataful_variant.as_u32() as u64)) bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
} else { } else {
// Rebase from niche values to discriminant values. // Rebase from niche values to discriminant values.
let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128); let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
let lldiscr = bx.sub(lldiscr, bx.cx().c_uint_big(niche_llty, delta)); let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
let lldiscr_max = let lldiscr_max =
bx.cx().c_uint(niche_llty, niche_variants.end().as_u32() as u64); bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max), bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
bx.intcast(lldiscr, cast_to, false), bx.intcast(lldiscr, cast_to, false),
bx.cx().c_uint(cast_to, dataful_variant.as_u32() as u64)) bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
} }
} }
} }
@ -364,7 +364,7 @@ impl PlaceRef<'tcx, &'ll Value> {
.discriminant_for_variant(bx.tcx(), variant_index) .discriminant_for_variant(bx.tcx(), variant_index)
.val; .val;
bx.store( bx.store(
bx.cx().c_uint_big(ptr.layout.llvm_type(bx.cx()), to), bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
ptr.llval, ptr.llval,
ptr.align); ptr.align);
} }
@ -380,10 +380,10 @@ impl PlaceRef<'tcx, &'ll Value> {
// Issue #34427: As workaround for LLVM bug on ARM, // Issue #34427: As workaround for LLVM bug on ARM,
// use memset of 0 before assigning niche value. // use memset of 0 before assigning niche value.
let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8())); let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
let fill_byte = bx.cx().c_u8(0); let fill_byte = bx.cx().const_u8(0);
let (size, align) = self.layout.size_and_align(); let (size, align) = self.layout.size_and_align();
let size = bx.cx().c_usize(size.bytes()); let size = bx.cx().const_usize(size.bytes());
let align = bx.cx().c_u32(align.abi() as u32); let align = bx.cx().const_u32(align.abi() as u32);
base::call_memset(bx, llptr, fill_byte, size, align, false); base::call_memset(bx, llptr, fill_byte, size, align, false);
} }
@ -395,9 +395,9 @@ impl PlaceRef<'tcx, &'ll Value> {
// FIXME(eddyb) Check the actual primitive type here. // FIXME(eddyb) Check the actual primitive type here.
let niche_llval = if niche_value == 0 { let niche_llval = if niche_value == 0 {
// HACK(eddyb) Using `c_null` as it works on all types. // HACK(eddyb) Using `c_null` as it works on all types.
bx.cx().c_null(niche_llty) bx.cx().const_null(niche_llty)
} else { } else {
bx.cx().c_uint_big(niche_llty, niche_value) bx.cx().const_uint_big(niche_llty, niche_value)
}; };
OperandValue::Immediate(niche_llval).store(bx, niche); OperandValue::Immediate(niche_llval).store(bx, niche);
} }
@ -408,7 +408,7 @@ impl PlaceRef<'tcx, &'ll Value> {
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value) pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
-> PlaceRef<'tcx, &'ll Value> { -> PlaceRef<'tcx, &'ll Value> {
PlaceRef { PlaceRef {
llval: bx.inbounds_gep(self.llval, &[bx.cx().c_usize(0), llindex]), llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
llextra: None, llextra: None,
layout: self.layout.field(bx.cx(), 0), layout: self.layout.field(bx.cx(), 0),
align: self.align align: self.align
@ -483,7 +483,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// so we generate an abort // so we generate an abort
let fnname = bx.cx().get_intrinsic(&("llvm.trap")); let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
bx.call(fnname, &[], None); bx.call(fnname, &[], None);
let llval = bx.cx().c_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx()))); let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
PlaceRef::new_sized(llval, layout, layout.align) PlaceRef::new_sized(llval, layout, layout.align)
} }
} }
@ -516,20 +516,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::ProjectionElem::ConstantIndex { offset, mir::ProjectionElem::ConstantIndex { offset,
from_end: false, from_end: false,
min_length: _ } => { min_length: _ } => {
let lloffset = bx.cx().c_usize(offset as u64); let lloffset = bx.cx().const_usize(offset as u64);
cg_base.project_index(bx, lloffset) cg_base.project_index(bx, lloffset)
} }
mir::ProjectionElem::ConstantIndex { offset, mir::ProjectionElem::ConstantIndex { offset,
from_end: true, from_end: true,
min_length: _ } => { min_length: _ } => {
let lloffset = bx.cx().c_usize(offset as u64); let lloffset = bx.cx().const_usize(offset as u64);
let lllen = cg_base.len(bx.cx()); let lllen = cg_base.len(bx.cx());
let llindex = bx.sub(lllen, lloffset); let llindex = bx.sub(lllen, lloffset);
cg_base.project_index(bx, llindex) cg_base.project_index(bx, llindex)
} }
mir::ProjectionElem::Subslice { from, to } => { mir::ProjectionElem::Subslice { from, to } => {
let mut subslice = cg_base.project_index(bx, let mut subslice = cg_base.project_index(bx,
bx.cx().c_usize(from as u64)); bx.cx().const_usize(from as u64));
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty } let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
.projection_ty(tcx, &projection.elem) .projection_ty(tcx, &projection.elem)
.to_ty(bx.tcx()); .to_ty(bx.tcx());
@ -537,7 +537,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if subslice.layout.is_unsized() { if subslice.layout.is_unsized() {
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(), subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
bx.cx().c_usize((from as u64) + (to as u64)))); bx.cx().const_usize((from as u64) + (to as u64))));
} }
// Cast the place pointer type to the new // Cast the place pointer type to the new

View file

@ -102,15 +102,15 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
return bx; return bx;
} }
let start = dest.project_index(&bx, bx.cx().c_usize(0)).llval; let start = dest.project_index(&bx, bx.cx().const_usize(0)).llval;
if let OperandValue::Immediate(v) = cg_elem.val { if let OperandValue::Immediate(v) = cg_elem.val {
let align = bx.cx().c_i32(dest.align.abi() as i32); let align = bx.cx().const_i32(dest.align.abi() as i32);
let size = bx.cx().c_usize(dest.layout.size.bytes()); let size = bx.cx().const_usize(dest.layout.size.bytes());
// Use llvm.memset.p0i8.* to initialize all zero arrays // Use llvm.memset.p0i8.* to initialize all zero arrays
if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 { if bx.cx().is_const_integral(v) && bx.cx().const_to_uint(v) == 0 {
let fill = bx.cx().c_u8(0); let fill = bx.cx().const_u8(0);
base::call_memset(&bx, start, fill, size, align, false); base::call_memset(&bx, start, fill, size, align, false);
return bx; return bx;
} }
@ -123,7 +123,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
} }
} }
let count = bx.cx().c_usize(count); let count = bx.cx().const_usize(count);
let end = dest.project_index(&bx, count).llval; let end = dest.project_index(&bx, count).llval;
let header_bx = bx.build_sibling_block("repeat_loop_header"); let header_bx = bx.build_sibling_block("repeat_loop_header");
@ -139,7 +139,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
cg_elem.val.store(&body_bx, cg_elem.val.store(&body_bx,
PlaceRef::new_sized(current, cg_elem.layout, dest.align)); PlaceRef::new_sized(current, cg_elem.layout, dest.align));
let next = body_bx.inbounds_gep(current, &[bx.cx().c_usize(1)]); let next = body_bx.inbounds_gep(current, &[bx.cx().const_usize(1)]);
body_bx.br(header_bx.llbb()); body_bx.br(header_bx.llbb());
header_bx.add_incoming_to_phi(current, next, body_bx.llbb()); header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
@ -291,7 +291,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
assert!(cast.is_llvm_immediate()); assert!(cast.is_llvm_immediate());
let ll_t_out = cast.immediate_llvm_type(bx.cx()); let ll_t_out = cast.immediate_llvm_type(bx.cx());
if operand.layout.abi.is_uninhabited() { if operand.layout.abi.is_uninhabited() {
let val = OperandValue::Immediate(bx.cx().c_undef(ll_t_out)); let val = OperandValue::Immediate(bx.cx().const_undef(ll_t_out));
return (bx, OperandRef { return (bx, OperandRef {
val, val,
layout: cast, layout: cast,
@ -307,7 +307,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let discr_val = def let discr_val = def
.discriminant_for_variant(bx.cx().tcx, index) .discriminant_for_variant(bx.cx().tcx, index)
.val; .val;
let discr = bx.cx().c_uint_big(ll_t_out, discr_val); let discr = bx.cx().const_uint_big(ll_t_out, discr_val);
return (bx, OperandRef { return (bx, OperandRef {
val: OperandValue::Immediate(discr), val: OperandValue::Immediate(discr),
layout: cast, layout: cast,
@ -338,7 +338,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
base::call_assume(&bx, bx.icmp( base::call_assume(&bx, bx.icmp(
IntPredicate::IntULE, IntPredicate::IntULE,
llval, llval,
bx.cx().c_uint_big(ll_t_in, *scalar.valid_range.end()) bx.cx().const_uint_big(ll_t_in, *scalar.valid_range.end())
)); ));
} }
} }
@ -489,7 +489,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => { mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
assert!(bx.cx().type_is_sized(ty)); assert!(bx.cx().type_is_sized(ty));
let val = bx.cx().c_usize(bx.cx().size_of(ty).bytes()); let val = bx.cx().const_usize(bx.cx().size_of(ty).bytes());
let tcx = bx.tcx(); let tcx = bx.tcx();
(bx, OperandRef { (bx, OperandRef {
val: OperandValue::Immediate(val), val: OperandValue::Immediate(val),
@ -500,8 +500,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => { mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty); let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
let (size, align) = bx.cx().size_and_align_of(content_ty); let (size, align) = bx.cx().size_and_align_of(content_ty);
let llsize = bx.cx().c_usize(size.bytes()); let llsize = bx.cx().const_usize(size.bytes());
let llalign = bx.cx().c_usize(align.abi()); let llalign = bx.cx().const_usize(align.abi());
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty)); let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
let llty_ptr = box_layout.llvm_type(bx.cx()); let llty_ptr = box_layout.llvm_type(bx.cx());
@ -548,7 +548,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
if let LocalRef::Operand(Some(op)) = self.locals[index] { if let LocalRef::Operand(Some(op)) = self.locals[index] {
if let ty::Array(_, n) = op.layout.ty.sty { if let ty::Array(_, n) = op.layout.ty.sty {
let n = n.unwrap_usize(bx.cx().tcx); let n = n.unwrap_usize(bx.cx().tcx);
return bx.cx().c_usize(n); return bx.cx().const_usize(n);
} }
} }
} }
@ -606,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs), mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt | mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit { mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
bx.cx().c_bool(match op { bx.cx().const_bool(match op {
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false, mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true, mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
_ => unreachable!() _ => unreachable!()
@ -685,7 +685,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
// while the current crate doesn't use overflow checks. // while the current crate doesn't use overflow checks.
if !bx.cx().check_overflow { if !bx.cx().check_overflow {
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
return OperandValue::Pair(val, bx.cx().c_bool(false)); return OperandValue::Pair(val, bx.cx().const_bool(false));
} }
let (val, of) = match op { let (val, of) = match op {
@ -709,7 +709,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true); let invert_mask = common::shift_mask_val(&bx, lhs_llty, rhs_llty, true);
let outer_bits = bx.and(rhs, invert_mask); let outer_bits = bx.and(rhs, invert_mask);
let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().c_null(rhs_llty)); let of = bx.icmp(IntPredicate::IntNE, outer_bits, bx.cx().const_null(rhs_llty));
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty); let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
(val, of) (val, of)
@ -838,9 +838,9 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
use rustc_apfloat::Float; use rustc_apfloat::Float;
const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1) const MAX_F32_PLUS_HALF_ULP: u128 = ((1 << (Single::PRECISION + 1)) - 1)
<< (Single::MAX_EXP - Single::PRECISION as i16); << (Single::MAX_EXP - Single::PRECISION as i16);
let max = bx.cx().c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP); let max = bx.cx().const_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
let overflow = bx.icmp(IntPredicate::IntUGE, x, max); let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
let infinity_bits = bx.cx().c_u32(ieee::Single::INFINITY.to_bits() as u32); let infinity_bits = bx.cx().const_u32(ieee::Single::INFINITY.to_bits() as u32);
let infinity = consts::bitcast(infinity_bits, float_ty); let infinity = consts::bitcast(infinity_bits, float_ty);
bx.select(overflow, infinity, bx.uitofp(x, float_ty)) bx.select(overflow, infinity, bx.uitofp(x, float_ty))
} else { } else {
@ -918,8 +918,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
let float_bits_to_llval = |bits| { let float_bits_to_llval = |bits| {
let bits_llval = match bx.cx().float_width(float_ty) { let bits_llval = match bx.cx().float_width(float_ty) {
32 => bx.cx().c_u32(bits as u32), 32 => bx.cx().const_u32(bits as u32),
64 => bx.cx().c_u64(bits as u64), 64 => bx.cx().const_u64(bits as u64),
n => bug!("unsupported float width {}", n), n => bug!("unsupported float width {}", n),
}; };
consts::bitcast(bits_llval, float_ty) consts::bitcast(bits_llval, float_ty)
@ -974,8 +974,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
// performed is ultimately up to the backend, but at least x86 does perform them. // performed is ultimately up to the backend, but at least x86 does perform them.
let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min); let less_or_nan = bx.fcmp(RealPredicate::RealULT, x, f_min);
let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max); let greater = bx.fcmp(RealPredicate::RealOGT, x, f_max);
let int_max = bx.cx().c_uint_big(int_ty, int_max(signed, int_ty)); let int_max = bx.cx().const_uint_big(int_ty, int_max(signed, int_ty));
let int_min = bx.cx().c_uint_big(int_ty, int_min(signed, int_ty) as u128); let int_min = bx.cx().const_uint_big(int_ty, int_min(signed, int_ty) as u128);
let s0 = bx.select(less_or_nan, int_min, fptosui_result); let s0 = bx.select(less_or_nan, int_min, fptosui_result);
let s1 = bx.select(greater, int_max, s0); let s1 = bx.select(greater, int_max, s0);
@ -984,7 +984,7 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
// Therefore we only need to execute this step for signed integer types. // Therefore we only need to execute this step for signed integer types.
if signed { if signed {
// LLVM has no isNaN predicate, so we use (x == x) instead // LLVM has no isNaN predicate, so we use (x == x) instead
bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().c_uint(int_ty, 0)) bx.select(bx.fcmp(RealPredicate::RealOEQ, x, x), s1, bx.cx().const_uint(int_ty, 0))
} else { } else {
s1 s1
} }