Use implicit deref instead of BuilderMethods::cx()
This commit is contained in:
parent
e45733048e
commit
ceb29e2ac4
13 changed files with 270 additions and 271 deletions
|
@ -212,7 +212,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
|
||||||
let can_store_through_cast_ptr = false;
|
let can_store_through_cast_ptr = false;
|
||||||
if can_store_through_cast_ptr {
|
if can_store_through_cast_ptr {
|
||||||
let cast_ptr_llty = bx.cx().type_ptr_to(cast.llvm_type(bx.cx()));
|
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx));
|
||||||
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
|
||||||
bx.store(val, cast_dst, self.layout.align.abi);
|
bx.store(val, cast_dst, self.layout.align.abi);
|
||||||
} else {
|
} else {
|
||||||
|
@ -231,9 +231,9 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
// bitcasting to the struct type yields invalid cast errors.
|
// bitcasting to the struct type yields invalid cast errors.
|
||||||
|
|
||||||
// We instead thus allocate some scratch space...
|
// We instead thus allocate some scratch space...
|
||||||
let scratch_size = cast.size(bx.cx());
|
let scratch_size = cast.size(bx);
|
||||||
let scratch_align = cast.align(bx.cx());
|
let scratch_align = cast.align(bx);
|
||||||
let llscratch = bx.alloca(cast.llvm_type(bx.cx()), "abi_cast", scratch_align);
|
let llscratch = bx.alloca(cast.llvm_type(bx), "abi_cast", scratch_align);
|
||||||
bx.lifetime_start(llscratch, scratch_size);
|
bx.lifetime_start(llscratch, scratch_size);
|
||||||
|
|
||||||
// ...where we first store the value...
|
// ...where we first store the value...
|
||||||
|
@ -245,7 +245,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
self.layout.align.abi,
|
self.layout.align.abi,
|
||||||
llscratch,
|
llscratch,
|
||||||
scratch_align,
|
scratch_align,
|
||||||
bx.cx().const_usize(self.layout.size.bytes()),
|
bx.const_usize(self.layout.size.bytes()),
|
||||||
MemFlags::empty()
|
MemFlags::empty()
|
||||||
);
|
);
|
||||||
|
|
||||||
|
@ -299,7 +299,7 @@ impl ArgTypeMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
ty.store(self, val, dst)
|
ty.store(self, val, dst)
|
||||||
}
|
}
|
||||||
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
|
fn memory_ty(&self, ty: &ArgType<'tcx, Ty<'tcx>>) -> &'ll Type {
|
||||||
ty.memory_ty(self.cx())
|
ty.memory_ty(self)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -780,7 +780,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||||
// by the LLVM verifier.
|
// by the LLVM verifier.
|
||||||
if let layout::Int(..) = scalar.value {
|
if let layout::Int(..) = scalar.value {
|
||||||
if !scalar.is_bool() {
|
if !scalar.is_bool() {
|
||||||
let range = scalar.valid_range_exclusive(bx.cx());
|
let range = scalar.valid_range_exclusive(bx);
|
||||||
if range.start != range.end {
|
if range.start != range.end {
|
||||||
bx.range_metadata(callsite, range);
|
bx.range_metadata(callsite, range);
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,7 +57,7 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
// Default per-arch clobbers
|
// Default per-arch clobbers
|
||||||
// Basically what clang does
|
// Basically what clang does
|
||||||
let arch_clobbers = match &self.cx().sess().target.target.arch[..] {
|
let arch_clobbers = match &self.sess().target.target.arch[..] {
|
||||||
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
|
"x86" | "x86_64" => vec!["~{dirflag}", "~{fpsr}", "~{flags}"],
|
||||||
"mips" | "mips64" => vec!["~{$1}"],
|
"mips" | "mips64" => vec!["~{$1}"],
|
||||||
_ => Vec::new()
|
_ => Vec::new()
|
||||||
|
@ -76,9 +76,9 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
// Depending on how many outputs we have, the return type is different
|
// Depending on how many outputs we have, the return type is different
|
||||||
let num_outputs = output_types.len();
|
let num_outputs = output_types.len();
|
||||||
let output_type = match num_outputs {
|
let output_type = match num_outputs {
|
||||||
0 => self.cx().type_void(),
|
0 => self.type_void(),
|
||||||
1 => output_types[0],
|
1 => output_types[0],
|
||||||
_ => self.cx().type_struct(&output_types, false)
|
_ => self.type_struct(&output_types, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
||||||
|
@ -108,13 +108,13 @@ impl AsmBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
// back to source locations. See #17552.
|
// back to source locations. See #17552.
|
||||||
unsafe {
|
unsafe {
|
||||||
let key = "srcloc";
|
let key = "srcloc";
|
||||||
let kind = llvm::LLVMGetMDKindIDInContext(self.cx().llcx,
|
let kind = llvm::LLVMGetMDKindIDInContext(self.llcx,
|
||||||
key.as_ptr() as *const c_char, key.len() as c_uint);
|
key.as_ptr() as *const c_char, key.len() as c_uint);
|
||||||
|
|
||||||
let val: &'ll Value = self.cx().const_i32(ia.ctxt.outer().as_u32() as i32);
|
let val: &'ll Value = self.const_i32(ia.ctxt.outer().as_u32() as i32);
|
||||||
|
|
||||||
llvm::LLVMSetMetadata(r, kind,
|
llvm::LLVMSetMetadata(r, kind,
|
||||||
llvm::LLVMMDNodeInContext(self.cx().llcx, &val, 1));
|
llvm::LLVMMDNodeInContext(self.llcx, &val, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
true
|
true
|
||||||
|
|
|
@ -143,11 +143,11 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn count_insn(&self, category: &str) {
|
fn count_insn(&self, category: &str) {
|
||||||
if self.cx().sess().codegen_stats() {
|
if self.sess().codegen_stats() {
|
||||||
self.cx().stats.borrow_mut().n_llvm_insns += 1;
|
self.stats.borrow_mut().n_llvm_insns += 1;
|
||||||
}
|
}
|
||||||
if self.cx().sess().count_llvm_insns() {
|
if self.sess().count_llvm_insns() {
|
||||||
*self.cx().stats
|
*self.stats
|
||||||
.borrow_mut()
|
.borrow_mut()
|
||||||
.llvm_insns
|
.llvm_insns
|
||||||
.entry(category.to_string())
|
.entry(category.to_string())
|
||||||
|
@ -475,8 +475,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
use rustc::ty::{Int, Uint};
|
use rustc::ty::{Int, Uint};
|
||||||
|
|
||||||
let new_sty = match ty.sty {
|
let new_sty = match ty.sty {
|
||||||
Int(Isize) => Int(self.cx().tcx.sess.target.isize_ty),
|
Int(Isize) => Int(self.tcx.sess.target.isize_ty),
|
||||||
Uint(Usize) => Uint(self.cx().tcx.sess.target.usize_ty),
|
Uint(Usize) => Uint(self.tcx.sess.target.usize_ty),
|
||||||
ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
|
ref t @ Uint(_) | ref t @ Int(_) => t.clone(),
|
||||||
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
|
_ => panic!("tried to get overflow intrinsic for op applied to non-int type")
|
||||||
};
|
};
|
||||||
|
@ -529,7 +529,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
let intrinsic = self.cx().get_intrinsic(&name);
|
let intrinsic = self.get_intrinsic(&name);
|
||||||
let res = self.call(intrinsic, &[lhs, rhs], None);
|
let res = self.call(intrinsic, &[lhs, rhs], None);
|
||||||
(
|
(
|
||||||
self.extract_value(res, 0),
|
self.extract_value(res, 0),
|
||||||
|
@ -637,7 +637,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let vr = scalar.valid_range.clone();
|
let vr = scalar.valid_range.clone();
|
||||||
match scalar.value {
|
match scalar.value {
|
||||||
layout::Int(..) => {
|
layout::Int(..) => {
|
||||||
let range = scalar.valid_range_exclusive(bx.cx());
|
let range = scalar.valid_range_exclusive(bx);
|
||||||
if range.start != range.end {
|
if range.start != range.end {
|
||||||
bx.range_metadata(load, range);
|
bx.range_metadata(load, range);
|
||||||
}
|
}
|
||||||
|
@ -676,7 +676,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let load = self.load(llptr, align);
|
let load = self.load(llptr, align);
|
||||||
scalar_load_metadata(self, load, scalar);
|
scalar_load_metadata(self, load, scalar);
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
self.trunc(load, self.cx().type_i1())
|
self.trunc(load, self.type_i1())
|
||||||
} else {
|
} else {
|
||||||
load
|
load
|
||||||
}
|
}
|
||||||
|
@ -696,7 +696,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
|
|
||||||
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
|
fn range_metadata(&mut self, load: &'ll Value, range: Range<u128>) {
|
||||||
if self.cx().sess().target.target.arch == "amdgpu" {
|
if self.sess().target.target.arch == "amdgpu" {
|
||||||
// amdgpu/LLVM does something weird and thinks a i64 value is
|
// amdgpu/LLVM does something weird and thinks a i64 value is
|
||||||
// split into a v2i32, halving the bitwidth LLVM expects,
|
// split into a v2i32, halving the bitwidth LLVM expects,
|
||||||
// tripping an assertion. So, for now, just disable this
|
// tripping an assertion. So, for now, just disable this
|
||||||
|
@ -942,7 +942,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}).collect::<Vec<_>>();
|
}).collect::<Vec<_>>();
|
||||||
|
|
||||||
debug!("Asm Output Type: {:?}", output);
|
debug!("Asm Output Type: {:?}", output);
|
||||||
let fty = self.cx().type_func(&argtys[..], output);
|
let fty = self.type_func(&argtys[..], output);
|
||||||
unsafe {
|
unsafe {
|
||||||
// Ask LLVM to verify that the constraints are well-formed.
|
// Ask LLVM to verify that the constraints are well-formed.
|
||||||
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr());
|
let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_ptr());
|
||||||
|
@ -970,14 +970,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||||
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
||||||
let val = self.load(src, src_align);
|
let val = self.load(src, src_align);
|
||||||
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
|
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
|
||||||
self.store_with_flags(val, ptr, dst_align, flags);
|
self.store_with_flags(val, ptr, dst_align, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let size = self.intcast(size, self.cx().type_isize(), false);
|
let size = self.intcast(size, self.type_isize(), false);
|
||||||
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
||||||
let dst = self.pointercast(dst, self.cx().type_i8p());
|
let dst = self.pointercast(dst, self.type_i8p());
|
||||||
let src = self.pointercast(src, self.cx().type_i8p());
|
let src = self.pointercast(src, self.type_i8p());
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
|
llvm::LLVMRustBuildMemCpy(self.llbuilder, dst, dst_align.bytes() as c_uint,
|
||||||
src, src_align.bytes() as c_uint, size, is_volatile);
|
src, src_align.bytes() as c_uint, size, is_volatile);
|
||||||
|
@ -990,14 +990,14 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||||
// HACK(nox): This is inefficient but there is no nontemporal memmove.
|
// HACK(nox): This is inefficient but there is no nontemporal memmove.
|
||||||
let val = self.load(src, src_align);
|
let val = self.load(src, src_align);
|
||||||
let ptr = self.pointercast(dst, self.cx().type_ptr_to(self.cx().val_ty(val)));
|
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
|
||||||
self.store_with_flags(val, ptr, dst_align, flags);
|
self.store_with_flags(val, ptr, dst_align, flags);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let size = self.intcast(size, self.cx().type_isize(), false);
|
let size = self.intcast(size, self.type_isize(), false);
|
||||||
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
let is_volatile = flags.contains(MemFlags::VOLATILE);
|
||||||
let dst = self.pointercast(dst, self.cx().type_i8p());
|
let dst = self.pointercast(dst, self.type_i8p());
|
||||||
let src = self.pointercast(src, self.cx().type_i8p());
|
let src = self.pointercast(src, self.type_i8p());
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
|
llvm::LLVMRustBuildMemMove(self.llbuilder, dst, dst_align.bytes() as c_uint,
|
||||||
src, src_align.bytes() as c_uint, size, is_volatile);
|
src, src_align.bytes() as c_uint, size, is_volatile);
|
||||||
|
@ -1012,12 +1012,12 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
align: Align,
|
align: Align,
|
||||||
flags: MemFlags,
|
flags: MemFlags,
|
||||||
) {
|
) {
|
||||||
let ptr_width = &self.cx().sess().target.target.target_pointer_width;
|
let ptr_width = &self.sess().target.target.target_pointer_width;
|
||||||
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
||||||
let llintrinsicfn = self.cx().get_intrinsic(&intrinsic_key);
|
let llintrinsicfn = self.get_intrinsic(&intrinsic_key);
|
||||||
let ptr = self.pointercast(ptr, self.cx().type_i8p());
|
let ptr = self.pointercast(ptr, self.type_i8p());
|
||||||
let align = self.cx().const_u32(align.bytes() as u32);
|
let align = self.const_u32(align.bytes() as u32);
|
||||||
let volatile = self.cx().const_bool(flags.contains(MemFlags::VOLATILE));
|
let volatile = self.const_bool(flags.contains(MemFlags::VOLATILE));
|
||||||
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
|
self.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1083,10 +1083,10 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
fn vector_splat(&mut self, num_elts: usize, elt: &'ll Value) -> &'ll Value {
|
||||||
unsafe {
|
unsafe {
|
||||||
let elt_ty = self.cx.val_ty(elt);
|
let elt_ty = self.cx.val_ty(elt);
|
||||||
let undef = llvm::LLVMGetUndef(self.cx().type_vector(elt_ty, num_elts as u64));
|
let undef = llvm::LLVMGetUndef(self.type_vector(elt_ty, num_elts as u64));
|
||||||
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
let vec = self.insert_element(undef, elt, self.cx.const_i32(0));
|
||||||
let vec_i32_ty = self.cx().type_vector(self.cx().type_i32(), num_elts as u64);
|
let vec_i32_ty = self.type_vector(self.type_i32(), num_elts as u64);
|
||||||
self.shuffle_vector(vec, undef, self.cx().const_null(vec_i32_ty))
|
self.shuffle_vector(vec, undef, self.const_null(vec_i32_ty))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1397,7 +1397,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let param_tys = self.cx.func_params_types(fn_ty);
|
let param_tys = self.cx.func_params_types(fn_ty);
|
||||||
|
|
||||||
let all_args_match = param_tys.iter()
|
let all_args_match = param_tys.iter()
|
||||||
.zip(args.iter().map(|&v| self.cx().val_ty(v)))
|
.zip(args.iter().map(|&v| self.val_ty(v)))
|
||||||
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
|
.all(|(expected_ty, actual_ty)| *expected_ty == actual_ty);
|
||||||
|
|
||||||
if all_args_match {
|
if all_args_match {
|
||||||
|
@ -1408,7 +1408,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
.zip(args.iter())
|
.zip(args.iter())
|
||||||
.enumerate()
|
.enumerate()
|
||||||
.map(|(i, (expected_ty, &actual_val))| {
|
.map(|(i, (expected_ty, &actual_val))| {
|
||||||
let actual_ty = self.cx().val_ty(actual_val);
|
let actual_ty = self.val_ty(actual_val);
|
||||||
if expected_ty != actual_ty {
|
if expected_ty != actual_ty {
|
||||||
debug!("Type mismatch in function call of {:?}. \
|
debug!("Type mismatch in function call of {:?}. \
|
||||||
Expected {:?} for param {}, got {:?}; injecting bitcast",
|
Expected {:?} for param {}, got {:?}; injecting bitcast",
|
||||||
|
|
|
@ -24,11 +24,11 @@ use syntax::attr;
|
||||||
/// Inserts a side-effect free instruction sequence that makes sure that the
|
/// Inserts a side-effect free instruction sequence that makes sure that the
|
||||||
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
||||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) {
|
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder) {
|
||||||
if needs_gdb_debug_scripts_section(bx.cx()) {
|
if needs_gdb_debug_scripts_section(bx) {
|
||||||
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
|
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
|
||||||
// Load just the first byte as that's all that's necessary to force
|
// Load just the first byte as that's all that's necessary to force
|
||||||
// LLVM to keep around the reference to the global.
|
// LLVM to keep around the reference to the global.
|
||||||
let indices = [bx.cx().const_i32(0), bx.cx().const_i32(0)];
|
let indices = [bx.const_i32(0), bx.const_i32(0)];
|
||||||
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
|
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
|
||||||
let volative_load_instruction = bx.volatile_load(element);
|
let volative_load_instruction = bx.volatile_load(element);
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -41,7 +41,7 @@ pub fn set_source_location<D>(
|
||||||
};
|
};
|
||||||
|
|
||||||
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
|
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
|
||||||
debug!("set_source_location: {}", bx.cx().sess().source_map().span_to_string(span));
|
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
|
||||||
let loc = span_start(bx.cx(), span);
|
let loc = span_start(bx.cx(), span);
|
||||||
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
|
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
|
||||||
} else {
|
} else {
|
||||||
|
@ -76,7 +76,7 @@ pub fn set_debug_location(
|
||||||
// For MSVC, set the column number to zero.
|
// For MSVC, set the column number to zero.
|
||||||
// Otherwise, emit it. This mimics clang behaviour.
|
// Otherwise, emit it. This mimics clang behaviour.
|
||||||
// See discussion in https://github.com/rust-lang/rust/issues/42921
|
// See discussion in https://github.com/rust-lang/rust/issues/42921
|
||||||
let col_used = if bx.cx().sess().target.target.options.is_like_msvc {
|
let col_used = if bx.sess().target.target.options.is_like_msvc {
|
||||||
UNKNOWN_COLUMN_NUMBER
|
UNKNOWN_COLUMN_NUMBER
|
||||||
} else {
|
} else {
|
||||||
col as c_uint
|
col as c_uint
|
||||||
|
|
|
@ -96,7 +96,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
llresult: &'ll Value,
|
llresult: &'ll Value,
|
||||||
span: Span,
|
span: Span,
|
||||||
) {
|
) {
|
||||||
let tcx = self.cx().tcx;
|
let tcx = self.tcx;
|
||||||
|
|
||||||
let (def_id, substs) = match callee_ty.sty {
|
let (def_id, substs) = match callee_ty.sty {
|
||||||
ty::FnDef(def_id, substs) => (def_id, substs),
|
ty::FnDef(def_id, substs) => (def_id, substs),
|
||||||
|
@ -109,10 +109,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let ret_ty = sig.output();
|
let ret_ty = sig.output();
|
||||||
let name = &*tcx.item_name(def_id).as_str();
|
let name = &*tcx.item_name(def_id).as_str();
|
||||||
|
|
||||||
let llret_ty = self.cx().layout_of(ret_ty).llvm_type(self.cx());
|
let llret_ty = self.layout_of(ret_ty).llvm_type(self);
|
||||||
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
|
let result = PlaceRef::new_sized(llresult, fn_ty.ret.layout, fn_ty.ret.layout.align.abi);
|
||||||
|
|
||||||
let simple = get_simple_intrinsic(self.cx(), name);
|
let simple = get_simple_intrinsic(self, name);
|
||||||
let llval = match name {
|
let llval = match name {
|
||||||
_ if simple.is_some() => {
|
_ if simple.is_some() => {
|
||||||
self.call(simple.unwrap(),
|
self.call(simple.unwrap(),
|
||||||
|
@ -123,12 +123,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
"likely" => {
|
"likely" => {
|
||||||
let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
|
let expect = self.get_intrinsic(&("llvm.expect.i1"));
|
||||||
self.call(expect, &[args[0].immediate(), self.cx().const_bool(true)], None)
|
self.call(expect, &[args[0].immediate(), self.const_bool(true)], None)
|
||||||
}
|
}
|
||||||
"unlikely" => {
|
"unlikely" => {
|
||||||
let expect = self.cx().get_intrinsic(&("llvm.expect.i1"));
|
let expect = self.get_intrinsic(&("llvm.expect.i1"));
|
||||||
self.call(expect, &[args[0].immediate(), self.cx().const_bool(false)], None)
|
self.call(expect, &[args[0].immediate(), self.const_bool(false)], None)
|
||||||
}
|
}
|
||||||
"try" => {
|
"try" => {
|
||||||
try_intrinsic(self,
|
try_intrinsic(self,
|
||||||
|
@ -139,12 +139,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
"breakpoint" => {
|
"breakpoint" => {
|
||||||
let llfn = self.cx().get_intrinsic(&("llvm.debugtrap"));
|
let llfn = self.get_intrinsic(&("llvm.debugtrap"));
|
||||||
self.call(llfn, &[], None)
|
self.call(llfn, &[], None)
|
||||||
}
|
}
|
||||||
"size_of" => {
|
"size_of" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
|
self.const_usize(self.size_of(tp_ty).bytes())
|
||||||
}
|
}
|
||||||
"size_of_val" => {
|
"size_of_val" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
|
@ -153,12 +153,12 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
||||||
llsize
|
llsize
|
||||||
} else {
|
} else {
|
||||||
self.cx().const_usize(self.cx().size_of(tp_ty).bytes())
|
self.const_usize(self.size_of(tp_ty).bytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"min_align_of" => {
|
"min_align_of" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
|
self.const_usize(self.align_of(tp_ty).bytes())
|
||||||
}
|
}
|
||||||
"min_align_of_val" => {
|
"min_align_of_val" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
|
@ -167,24 +167,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
glue::size_and_align_of_dst(self, tp_ty, Some(meta));
|
||||||
llalign
|
llalign
|
||||||
} else {
|
} else {
|
||||||
self.cx().const_usize(self.cx().align_of(tp_ty).bytes())
|
self.const_usize(self.align_of(tp_ty).bytes())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
"pref_align_of" => {
|
"pref_align_of" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
self.cx().const_usize(self.cx().layout_of(tp_ty).align.pref.bytes())
|
self.const_usize(self.layout_of(tp_ty).align.pref.bytes())
|
||||||
}
|
}
|
||||||
"type_name" => {
|
"type_name" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
|
let ty_name = Symbol::intern(&tp_ty.to_string()).as_str();
|
||||||
self.cx().const_str_slice(ty_name)
|
self.const_str_slice(ty_name)
|
||||||
}
|
}
|
||||||
"type_id" => {
|
"type_id" => {
|
||||||
self.cx().const_u64(self.cx().tcx.type_id_hash(substs.type_at(0)))
|
self.const_u64(self.tcx.type_id_hash(substs.type_at(0)))
|
||||||
}
|
}
|
||||||
"init" => {
|
"init" => {
|
||||||
let ty = substs.type_at(0);
|
let ty = substs.type_at(0);
|
||||||
if !self.cx().layout_of(ty).is_zst() {
|
if !self.layout_of(ty).is_zst() {
|
||||||
// Just zero out the stack slot.
|
// Just zero out the stack slot.
|
||||||
// If we store a zero constant, LLVM will drown in vreg allocation for large
|
// If we store a zero constant, LLVM will drown in vreg allocation for large
|
||||||
// data structures, and the generated code will be awful. (A telltale sign of
|
// data structures, and the generated code will be awful. (A telltale sign of
|
||||||
|
@ -194,8 +194,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
false,
|
false,
|
||||||
ty,
|
ty,
|
||||||
llresult,
|
llresult,
|
||||||
self.cx().const_u8(0),
|
self.const_u8(0),
|
||||||
self.cx().const_usize(1)
|
self.const_usize(1)
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -207,7 +207,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
"needs_drop" => {
|
"needs_drop" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
|
|
||||||
self.cx().const_bool(self.cx().type_needs_drop(tp_ty))
|
self.const_bool(self.type_needs_drop(tp_ty))
|
||||||
}
|
}
|
||||||
"offset" => {
|
"offset" => {
|
||||||
let ptr = args[0].immediate();
|
let ptr = args[0].immediate();
|
||||||
|
@ -255,18 +255,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
let mut ptr = args[0].immediate();
|
let mut ptr = args[0].immediate();
|
||||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||||
ptr = self.pointercast(ptr, self.cx().type_ptr_to(ty.llvm_type(self.cx())));
|
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
|
||||||
}
|
}
|
||||||
let load = self.volatile_load(ptr);
|
let load = self.volatile_load(ptr);
|
||||||
let align = if name == "unaligned_volatile_load" {
|
let align = if name == "unaligned_volatile_load" {
|
||||||
1
|
1
|
||||||
} else {
|
} else {
|
||||||
self.cx().align_of(tp_ty).bytes() as u32
|
self.align_of(tp_ty).bytes() as u32
|
||||||
};
|
};
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMSetAlignment(load, align);
|
llvm::LLVMSetAlignment(load, align);
|
||||||
}
|
}
|
||||||
to_immediate(self, load, self.cx().layout_of(tp_ty))
|
to_immediate(self, load, self.layout_of(tp_ty))
|
||||||
},
|
},
|
||||||
"volatile_store" => {
|
"volatile_store" => {
|
||||||
let dst = args[0].deref(self.cx());
|
let dst = args[0].deref(self.cx());
|
||||||
|
@ -280,7 +280,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
},
|
},
|
||||||
"prefetch_read_data" | "prefetch_write_data" |
|
"prefetch_read_data" | "prefetch_write_data" |
|
||||||
"prefetch_read_instruction" | "prefetch_write_instruction" => {
|
"prefetch_read_instruction" | "prefetch_write_instruction" => {
|
||||||
let expect = self.cx().get_intrinsic(&("llvm.prefetch"));
|
let expect = self.get_intrinsic(&("llvm.prefetch"));
|
||||||
let (rw, cache_type) = match name {
|
let (rw, cache_type) = match name {
|
||||||
"prefetch_read_data" => (0, 1),
|
"prefetch_read_data" => (0, 1),
|
||||||
"prefetch_write_data" => (1, 1),
|
"prefetch_write_data" => (1, 1),
|
||||||
|
@ -290,9 +290,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
};
|
};
|
||||||
self.call(expect, &[
|
self.call(expect, &[
|
||||||
args[0].immediate(),
|
args[0].immediate(),
|
||||||
self.cx().const_i32(rw),
|
self.const_i32(rw),
|
||||||
args[1].immediate(),
|
args[1].immediate(),
|
||||||
self.cx().const_i32(cache_type)
|
self.const_i32(cache_type)
|
||||||
], None)
|
], None)
|
||||||
},
|
},
|
||||||
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
|
"ctlz" | "ctlz_nonzero" | "cttz" | "cttz_nonzero" | "ctpop" | "bswap" |
|
||||||
|
@ -301,24 +301,24 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
|
"unchecked_div" | "unchecked_rem" | "unchecked_shl" | "unchecked_shr" | "exact_div" |
|
||||||
"rotate_left" | "rotate_right" => {
|
"rotate_left" | "rotate_right" => {
|
||||||
let ty = arg_tys[0];
|
let ty = arg_tys[0];
|
||||||
match int_type_width_signed(ty, self.cx()) {
|
match int_type_width_signed(ty, self) {
|
||||||
Some((width, signed)) =>
|
Some((width, signed)) =>
|
||||||
match name {
|
match name {
|
||||||
"ctlz" | "cttz" => {
|
"ctlz" | "cttz" => {
|
||||||
let y = self.cx().const_bool(false);
|
let y = self.const_bool(false);
|
||||||
let llfn = self.cx().get_intrinsic(
|
let llfn = self.get_intrinsic(
|
||||||
&format!("llvm.{}.i{}", name, width),
|
&format!("llvm.{}.i{}", name, width),
|
||||||
);
|
);
|
||||||
self.call(llfn, &[args[0].immediate(), y], None)
|
self.call(llfn, &[args[0].immediate(), y], None)
|
||||||
}
|
}
|
||||||
"ctlz_nonzero" | "cttz_nonzero" => {
|
"ctlz_nonzero" | "cttz_nonzero" => {
|
||||||
let y = self.cx().const_bool(true);
|
let y = self.const_bool(true);
|
||||||
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
|
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
|
||||||
let llfn = self.cx().get_intrinsic(llvm_name);
|
let llfn = self.get_intrinsic(llvm_name);
|
||||||
self.call(llfn, &[args[0].immediate(), y], None)
|
self.call(llfn, &[args[0].immediate(), y], None)
|
||||||
}
|
}
|
||||||
"ctpop" => self.call(
|
"ctpop" => self.call(
|
||||||
self.cx().get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
self.get_intrinsic(&format!("llvm.ctpop.i{}", width)),
|
||||||
&[args[0].immediate()],
|
&[args[0].immediate()],
|
||||||
None
|
None
|
||||||
),
|
),
|
||||||
|
@ -327,7 +327,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
args[0].immediate() // byte swap a u8/i8 is just a no-op
|
args[0].immediate() // byte swap a u8/i8 is just a no-op
|
||||||
} else {
|
} else {
|
||||||
self.call(
|
self.call(
|
||||||
self.cx().get_intrinsic(
|
self.get_intrinsic(
|
||||||
&format!("llvm.bswap.i{}", width),
|
&format!("llvm.bswap.i{}", width),
|
||||||
),
|
),
|
||||||
&[args[0].immediate()],
|
&[args[0].immediate()],
|
||||||
|
@ -337,7 +337,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
"bitreverse" => {
|
"bitreverse" => {
|
||||||
self.call(
|
self.call(
|
||||||
self.cx().get_intrinsic(
|
self.get_intrinsic(
|
||||||
&format!("llvm.bitreverse.i{}", width),
|
&format!("llvm.bitreverse.i{}", width),
|
||||||
),
|
),
|
||||||
&[args[0].immediate()],
|
&[args[0].immediate()],
|
||||||
|
@ -348,7 +348,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
||||||
if signed { 's' } else { 'u' },
|
if signed { 's' } else { 'u' },
|
||||||
&name[..3], width);
|
&name[..3], width);
|
||||||
let llfn = self.cx().get_intrinsic(&intrinsic);
|
let llfn = self.get_intrinsic(&intrinsic);
|
||||||
|
|
||||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
// Convert `i1` to a `bool`, and write it to the out parameter
|
||||||
let pair = self.call(llfn, &[
|
let pair = self.call(llfn, &[
|
||||||
|
@ -357,7 +357,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
], None);
|
], None);
|
||||||
let val = self.extract_value(pair, 0);
|
let val = self.extract_value(pair, 0);
|
||||||
let overflow = self.extract_value(pair, 1);
|
let overflow = self.extract_value(pair, 1);
|
||||||
let overflow = self.zext(overflow, self.cx().type_bool());
|
let overflow = self.zext(overflow, self.type_bool());
|
||||||
|
|
||||||
let dest = result.project_field(self, 0);
|
let dest = result.project_field(self, 0);
|
||||||
self.store(val, dest.llval, dest.align);
|
self.store(val, dest.llval, dest.align);
|
||||||
|
@ -402,13 +402,13 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
// rotate = funnel shift with first two args the same
|
// rotate = funnel shift with first two args the same
|
||||||
let llvm_name = &format!("llvm.fsh{}.i{}",
|
let llvm_name = &format!("llvm.fsh{}.i{}",
|
||||||
if is_left { 'l' } else { 'r' }, width);
|
if is_left { 'l' } else { 'r' }, width);
|
||||||
let llfn = self.cx().get_intrinsic(llvm_name);
|
let llfn = self.get_intrinsic(llvm_name);
|
||||||
self.call(llfn, &[val, val, raw_shift], None)
|
self.call(llfn, &[val, val, raw_shift], None)
|
||||||
} else {
|
} else {
|
||||||
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
// rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
|
||||||
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
// rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
|
||||||
let width = self.cx().const_uint(
|
let width = self.const_uint(
|
||||||
self.cx().type_ix(width),
|
self.type_ix(width),
|
||||||
width,
|
width,
|
||||||
);
|
);
|
||||||
let shift = self.urem(raw_shift, width);
|
let shift = self.urem(raw_shift, width);
|
||||||
|
@ -496,16 +496,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
(SequentiallyConsistent, Monotonic),
|
(SequentiallyConsistent, Monotonic),
|
||||||
"failacq" if is_cxchg =>
|
"failacq" if is_cxchg =>
|
||||||
(SequentiallyConsistent, Acquire),
|
(SequentiallyConsistent, Acquire),
|
||||||
_ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
|
_ => self.sess().fatal("unknown ordering in atomic intrinsic")
|
||||||
},
|
},
|
||||||
4 => match (split[2], split[3]) {
|
4 => match (split[2], split[3]) {
|
||||||
("acq", "failrelaxed") if is_cxchg =>
|
("acq", "failrelaxed") if is_cxchg =>
|
||||||
(Acquire, Monotonic),
|
(Acquire, Monotonic),
|
||||||
("acqrel", "failrelaxed") if is_cxchg =>
|
("acqrel", "failrelaxed") if is_cxchg =>
|
||||||
(AcquireRelease, Monotonic),
|
(AcquireRelease, Monotonic),
|
||||||
_ => self.cx().sess().fatal("unknown ordering in atomic intrinsic")
|
_ => self.sess().fatal("unknown ordering in atomic intrinsic")
|
||||||
},
|
},
|
||||||
_ => self.cx().sess().fatal("Atomic intrinsic not in correct format"),
|
_ => self.sess().fatal("Atomic intrinsic not in correct format"),
|
||||||
};
|
};
|
||||||
|
|
||||||
let invalid_monomorphization = |ty| {
|
let invalid_monomorphization = |ty| {
|
||||||
|
@ -517,7 +517,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
match split[1] {
|
match split[1] {
|
||||||
"cxchg" | "cxchgweak" => {
|
"cxchg" | "cxchgweak" => {
|
||||||
let ty = substs.type_at(0);
|
let ty = substs.type_at(0);
|
||||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
if int_type_width_signed(ty, self).is_some() {
|
||||||
let weak = split[1] == "cxchgweak";
|
let weak = split[1] == "cxchgweak";
|
||||||
let pair = self.atomic_cmpxchg(
|
let pair = self.atomic_cmpxchg(
|
||||||
args[0].immediate(),
|
args[0].immediate(),
|
||||||
|
@ -528,7 +528,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
weak);
|
weak);
|
||||||
let val = self.extract_value(pair, 0);
|
let val = self.extract_value(pair, 0);
|
||||||
let success = self.extract_value(pair, 1);
|
let success = self.extract_value(pair, 1);
|
||||||
let success = self.zext(success, self.cx().type_bool());
|
let success = self.zext(success, self.type_bool());
|
||||||
|
|
||||||
let dest = result.project_field(self, 0);
|
let dest = result.project_field(self, 0);
|
||||||
self.store(val, dest.llval, dest.align);
|
self.store(val, dest.llval, dest.align);
|
||||||
|
@ -542,8 +542,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
"load" => {
|
"load" => {
|
||||||
let ty = substs.type_at(0);
|
let ty = substs.type_at(0);
|
||||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
if int_type_width_signed(ty, self).is_some() {
|
||||||
let size = self.cx().size_of(ty);
|
let size = self.size_of(ty);
|
||||||
self.atomic_load(args[0].immediate(), order, size)
|
self.atomic_load(args[0].immediate(), order, size)
|
||||||
} else {
|
} else {
|
||||||
return invalid_monomorphization(ty);
|
return invalid_monomorphization(ty);
|
||||||
|
@ -552,8 +552,8 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
"store" => {
|
"store" => {
|
||||||
let ty = substs.type_at(0);
|
let ty = substs.type_at(0);
|
||||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
if int_type_width_signed(ty, self).is_some() {
|
||||||
let size = self.cx().size_of(ty);
|
let size = self.size_of(ty);
|
||||||
self.atomic_store(
|
self.atomic_store(
|
||||||
args[1].immediate(),
|
args[1].immediate(),
|
||||||
args[0].immediate(),
|
args[0].immediate(),
|
||||||
|
@ -590,11 +590,11 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
"min" => AtomicRmwBinOp::AtomicMin,
|
"min" => AtomicRmwBinOp::AtomicMin,
|
||||||
"umax" => AtomicRmwBinOp::AtomicUMax,
|
"umax" => AtomicRmwBinOp::AtomicUMax,
|
||||||
"umin" => AtomicRmwBinOp::AtomicUMin,
|
"umin" => AtomicRmwBinOp::AtomicUMin,
|
||||||
_ => self.cx().sess().fatal("unknown atomic operation")
|
_ => self.sess().fatal("unknown atomic operation")
|
||||||
};
|
};
|
||||||
|
|
||||||
let ty = substs.type_at(0);
|
let ty = substs.type_at(0);
|
||||||
if int_type_width_signed(ty, self.cx()).is_some() {
|
if int_type_width_signed(ty, self).is_some() {
|
||||||
self.atomic_rmw(
|
self.atomic_rmw(
|
||||||
atom_op,
|
atom_op,
|
||||||
args[0].immediate(),
|
args[0].immediate(),
|
||||||
|
@ -681,7 +681,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
// This assumes the type is "simple", i.e. no
|
// This assumes the type is "simple", i.e. no
|
||||||
// destructors, and the contents are SIMD
|
// destructors, and the contents are SIMD
|
||||||
// etc.
|
// etc.
|
||||||
assert!(!bx.cx().type_needs_drop(arg.layout.ty));
|
assert!(!bx.type_needs_drop(arg.layout.ty));
|
||||||
let (ptr, align) = match arg.val {
|
let (ptr, align) = match arg.val {
|
||||||
OperandValue::Ref(ptr, None, align) => (ptr, align),
|
OperandValue::Ref(ptr, None, align) => (ptr, align),
|
||||||
_ => bug!()
|
_ => bug!()
|
||||||
|
@ -693,21 +693,21 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}).collect()
|
}).collect()
|
||||||
}
|
}
|
||||||
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
let llvm_elem = one(ty_to_type(bx, llvm_elem));
|
||||||
vec![bx.pointercast(arg.immediate(), bx.cx().type_ptr_to(llvm_elem))]
|
vec![bx.pointercast(arg.immediate(), bx.type_ptr_to(llvm_elem))]
|
||||||
}
|
}
|
||||||
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
let llvm_elem = one(ty_to_type(bx, llvm_elem));
|
||||||
vec![
|
vec![
|
||||||
bx.bitcast(arg.immediate(),
|
bx.bitcast(arg.immediate(),
|
||||||
bx.cx().type_vector(llvm_elem, length as u64))
|
bx.type_vector(llvm_elem, length as u64))
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
|
intrinsics::Type::Integer(_, width, llvm_width) if width != llvm_width => {
|
||||||
// the LLVM intrinsic uses a smaller integer
|
// the LLVM intrinsic uses a smaller integer
|
||||||
// size than the C intrinsic's signature, so
|
// size than the C intrinsic's signature, so
|
||||||
// we have to trim it down here.
|
// we have to trim it down here.
|
||||||
vec![bx.trunc(arg.immediate(), bx.cx().type_ix(llvm_width as u64))]
|
vec![bx.trunc(arg.immediate(), bx.type_ix(llvm_width as u64))]
|
||||||
}
|
}
|
||||||
_ => vec![arg.immediate()],
|
_ => vec![arg.immediate()],
|
||||||
}
|
}
|
||||||
|
@ -715,10 +715,10 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
|
|
||||||
let inputs = intr.inputs.iter()
|
let inputs = intr.inputs.iter()
|
||||||
.flat_map(|t| ty_to_type(self.cx(), t))
|
.flat_map(|t| ty_to_type(self, t))
|
||||||
.collect::<Vec<_>>();
|
.collect::<Vec<_>>();
|
||||||
|
|
||||||
let outputs = one(ty_to_type(self.cx(), &intr.output));
|
let outputs = one(ty_to_type(self, &intr.output));
|
||||||
|
|
||||||
let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
|
let llargs: Vec<_> = intr.inputs.iter().zip(args).flat_map(|(t, arg)| {
|
||||||
modify_as_needed(self, t, arg)
|
modify_as_needed(self, t, arg)
|
||||||
|
@ -727,9 +727,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
let val = match intr.definition {
|
let val = match intr.definition {
|
||||||
intrinsics::IntrinsicDef::Named(name) => {
|
intrinsics::IntrinsicDef::Named(name) => {
|
||||||
let f = self.cx().declare_cfn(
|
let f = self.declare_cfn(
|
||||||
name,
|
name,
|
||||||
self.cx().type_func(&inputs, outputs),
|
self.type_func(&inputs, outputs),
|
||||||
);
|
);
|
||||||
self.call(f, &llargs, None)
|
self.call(f, &llargs, None)
|
||||||
}
|
}
|
||||||
|
@ -754,7 +754,7 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
|
|
||||||
if !fn_ty.ret.is_ignore() {
|
if !fn_ty.ret.is_ignore() {
|
||||||
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
if let PassMode::Cast(ty) = fn_ty.ret.mode {
|
||||||
let ptr_llty = self.cx().type_ptr_to(ty.llvm_type(self.cx()));
|
let ptr_llty = self.type_ptr_to(ty.llvm_type(self));
|
||||||
let ptr = self.pointercast(result.llval, ptr_llty);
|
let ptr = self.pointercast(result.llval, ptr_llty);
|
||||||
self.store(llval, ptr, result.align);
|
self.store(llval, ptr, result.align);
|
||||||
} else {
|
} else {
|
||||||
|
@ -765,18 +765,18 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
|
||||||
}
|
}
|
||||||
|
|
||||||
fn abort(&mut self) {
|
fn abort(&mut self) {
|
||||||
let fnname = self.cx().get_intrinsic(&("llvm.trap"));
|
let fnname = self.get_intrinsic(&("llvm.trap"));
|
||||||
self.call(fnname, &[], None);
|
self.call(fnname, &[], None);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn assume(&mut self, val: Self::Value) {
|
fn assume(&mut self, val: Self::Value) {
|
||||||
let assume_intrinsic = self.cx().get_intrinsic("llvm.assume");
|
let assume_intrinsic = self.get_intrinsic("llvm.assume");
|
||||||
self.call(assume_intrinsic, &[val], None);
|
self.call(assume_intrinsic, &[val], None);
|
||||||
}
|
}
|
||||||
|
|
||||||
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
|
fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value {
|
||||||
let expect = self.cx().get_intrinsic(&"llvm.expect.i1");
|
let expect = self.get_intrinsic(&"llvm.expect.i1");
|
||||||
self.call(expect, &[cond, self.cx().const_bool(expected)], None)
|
self.call(expect, &[cond, self.const_bool(expected)], None)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -789,8 +789,8 @@ fn copy_intrinsic(
|
||||||
src: &'ll Value,
|
src: &'ll Value,
|
||||||
count: &'ll Value,
|
count: &'ll Value,
|
||||||
) {
|
) {
|
||||||
let (size, align) = bx.cx().size_and_align_of(ty);
|
let (size, align) = bx.size_and_align_of(ty);
|
||||||
let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
|
let size = bx.mul(bx.const_usize(size.bytes()), count);
|
||||||
let flags = if volatile {
|
let flags = if volatile {
|
||||||
MemFlags::VOLATILE
|
MemFlags::VOLATILE
|
||||||
} else {
|
} else {
|
||||||
|
@ -811,8 +811,8 @@ fn memset_intrinsic(
|
||||||
val: &'ll Value,
|
val: &'ll Value,
|
||||||
count: &'ll Value
|
count: &'ll Value
|
||||||
) {
|
) {
|
||||||
let (size, align) = bx.cx().size_and_align_of(ty);
|
let (size, align) = bx.size_and_align_of(ty);
|
||||||
let size = bx.mul(bx.cx().const_usize(size.bytes()), count);
|
let size = bx.mul(bx.const_usize(size.bytes()), count);
|
||||||
let flags = if volatile {
|
let flags = if volatile {
|
||||||
MemFlags::VOLATILE
|
MemFlags::VOLATILE
|
||||||
} else {
|
} else {
|
||||||
|
@ -828,11 +828,11 @@ fn try_intrinsic(
|
||||||
local_ptr: &'ll Value,
|
local_ptr: &'ll Value,
|
||||||
dest: &'ll Value,
|
dest: &'ll Value,
|
||||||
) {
|
) {
|
||||||
if bx.cx().sess().no_landing_pads() {
|
if bx.sess().no_landing_pads() {
|
||||||
bx.call(func, &[data], None);
|
bx.call(func, &[data], None);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
||||||
bx.store(bx.cx().const_null(bx.cx().type_i8p()), dest, ptr_align);
|
bx.store(bx.const_null(bx.type_i8p()), dest, ptr_align);
|
||||||
} else if wants_msvc_seh(bx.cx().sess()) {
|
} else if wants_msvc_seh(bx.sess()) {
|
||||||
codegen_msvc_try(bx, func, data, local_ptr, dest);
|
codegen_msvc_try(bx, func, data, local_ptr, dest);
|
||||||
} else {
|
} else {
|
||||||
codegen_gnu_try(bx, func, data, local_ptr, dest);
|
codegen_gnu_try(bx, func, data, local_ptr, dest);
|
||||||
|
@ -853,8 +853,8 @@ fn codegen_msvc_try(
|
||||||
local_ptr: &'ll Value,
|
local_ptr: &'ll Value,
|
||||||
dest: &'ll Value,
|
dest: &'ll Value,
|
||||||
) {
|
) {
|
||||||
let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
|
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
|
||||||
bx.set_personality_fn(bx.cx().eh_personality());
|
bx.set_personality_fn(bx.eh_personality());
|
||||||
|
|
||||||
let mut normal = bx.build_sibling_block("normal");
|
let mut normal = bx.build_sibling_block("normal");
|
||||||
let mut catchswitch = bx.build_sibling_block("catchswitch");
|
let mut catchswitch = bx.build_sibling_block("catchswitch");
|
||||||
|
@ -904,12 +904,12 @@ fn codegen_msvc_try(
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// More information can be found in libstd's seh.rs implementation.
|
// More information can be found in libstd's seh.rs implementation.
|
||||||
let i64p = bx.cx().type_ptr_to(bx.cx().type_i64());
|
let i64p = bx.type_ptr_to(bx.type_i64());
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
||||||
let slot = bx.alloca(i64p, "slot", ptr_align);
|
let slot = bx.alloca(i64p, "slot", ptr_align);
|
||||||
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
|
bx.invoke(func, &[data], normal.llbb(), catchswitch.llbb(), None);
|
||||||
|
|
||||||
normal.ret(bx.cx().const_i32(0));
|
normal.ret(bx.const_i32(0));
|
||||||
|
|
||||||
let cs = catchswitch.catch_switch(None, None, 1);
|
let cs = catchswitch.catch_switch(None, None, 1);
|
||||||
catchswitch.add_handler(cs, catchpad.llbb());
|
catchswitch.add_handler(cs, catchpad.llbb());
|
||||||
|
@ -918,12 +918,12 @@ fn codegen_msvc_try(
|
||||||
Some(did) => bx.cx().get_static(did),
|
Some(did) => bx.cx().get_static(did),
|
||||||
None => bug!("msvc_try_filter not defined"),
|
None => bug!("msvc_try_filter not defined"),
|
||||||
};
|
};
|
||||||
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.cx().const_i32(0), slot]);
|
let funclet = catchpad.catch_pad(cs, &[tydesc, bx.const_i32(0), slot]);
|
||||||
let addr = catchpad.load(slot, ptr_align);
|
let addr = catchpad.load(slot, ptr_align);
|
||||||
|
|
||||||
let i64_align = bx.tcx().data_layout.i64_align.abi;
|
let i64_align = bx.tcx().data_layout.i64_align.abi;
|
||||||
let arg1 = catchpad.load(addr, i64_align);
|
let arg1 = catchpad.load(addr, i64_align);
|
||||||
let val1 = bx.cx().const_i32(1);
|
let val1 = bx.const_i32(1);
|
||||||
let gep1 = catchpad.inbounds_gep(addr, &[val1]);
|
let gep1 = catchpad.inbounds_gep(addr, &[val1]);
|
||||||
let arg2 = catchpad.load(gep1, i64_align);
|
let arg2 = catchpad.load(gep1, i64_align);
|
||||||
let local_ptr = catchpad.bitcast(local_ptr, i64p);
|
let local_ptr = catchpad.bitcast(local_ptr, i64p);
|
||||||
|
@ -932,7 +932,7 @@ fn codegen_msvc_try(
|
||||||
catchpad.store(arg2, gep2, i64_align);
|
catchpad.store(arg2, gep2, i64_align);
|
||||||
catchpad.catch_ret(&funclet, caught.llbb());
|
catchpad.catch_ret(&funclet, caught.llbb());
|
||||||
|
|
||||||
caught.ret(bx.cx().const_i32(1));
|
caught.ret(bx.const_i32(1));
|
||||||
});
|
});
|
||||||
|
|
||||||
// Note that no invoke is used here because by definition this function
|
// Note that no invoke is used here because by definition this function
|
||||||
|
@ -960,7 +960,7 @@ fn codegen_gnu_try(
|
||||||
local_ptr: &'ll Value,
|
local_ptr: &'ll Value,
|
||||||
dest: &'ll Value,
|
dest: &'ll Value,
|
||||||
) {
|
) {
|
||||||
let llfn = get_rust_try_fn(bx.cx(), &mut |mut bx| {
|
let llfn = get_rust_try_fn(bx, &mut |mut bx| {
|
||||||
// Codegens the shims described above:
|
// Codegens the shims described above:
|
||||||
//
|
//
|
||||||
// bx:
|
// bx:
|
||||||
|
@ -985,7 +985,7 @@ fn codegen_gnu_try(
|
||||||
let data = llvm::get_param(bx.llfn(), 1);
|
let data = llvm::get_param(bx.llfn(), 1);
|
||||||
let local_ptr = llvm::get_param(bx.llfn(), 2);
|
let local_ptr = llvm::get_param(bx.llfn(), 2);
|
||||||
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
|
bx.invoke(func, &[data], then.llbb(), catch.llbb(), None);
|
||||||
then.ret(bx.cx().const_i32(0));
|
then.ret(bx.const_i32(0));
|
||||||
|
|
||||||
// Type indicator for the exception being thrown.
|
// Type indicator for the exception being thrown.
|
||||||
//
|
//
|
||||||
|
@ -993,14 +993,14 @@ fn codegen_gnu_try(
|
||||||
// being thrown. The second value is a "selector" indicating which of
|
// being thrown. The second value is a "selector" indicating which of
|
||||||
// the landing pad clauses the exception's type had been matched to.
|
// the landing pad clauses the exception's type had been matched to.
|
||||||
// rust_try ignores the selector.
|
// rust_try ignores the selector.
|
||||||
let lpad_ty = bx.cx().type_struct(&[bx.cx().type_i8p(), bx.cx().type_i32()], false);
|
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false);
|
||||||
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
|
let vals = catch.landing_pad(lpad_ty, bx.eh_personality(), 1);
|
||||||
catch.add_clause(vals, bx.cx().const_null(bx.cx().type_i8p()));
|
catch.add_clause(vals, bx.const_null(bx.type_i8p()));
|
||||||
let ptr = catch.extract_value(vals, 0);
|
let ptr = catch.extract_value(vals, 0);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
||||||
let bitcast = catch.bitcast(local_ptr, bx.cx().type_ptr_to(bx.cx().type_i8p()));
|
let bitcast = catch.bitcast(local_ptr, bx.type_ptr_to(bx.type_i8p()));
|
||||||
catch.store(ptr, bitcast, ptr_align);
|
catch.store(ptr, bitcast, ptr_align);
|
||||||
catch.ret(bx.cx().const_i32(1));
|
catch.ret(bx.const_i32(1));
|
||||||
});
|
});
|
||||||
|
|
||||||
// Note that no invoke is used here because by definition this function
|
// Note that no invoke is used here because by definition this function
|
||||||
|
@ -1081,7 +1081,7 @@ fn generic_simd_intrinsic(
|
||||||
};
|
};
|
||||||
($msg: tt, $($fmt: tt)*) => {
|
($msg: tt, $($fmt: tt)*) => {
|
||||||
span_invalid_monomorphization_error(
|
span_invalid_monomorphization_error(
|
||||||
bx.cx().sess(), span,
|
bx.sess(), span,
|
||||||
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
|
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
|
||||||
name, $($fmt)*));
|
name, $($fmt)*));
|
||||||
}
|
}
|
||||||
|
@ -1142,7 +1142,7 @@ fn generic_simd_intrinsic(
|
||||||
found `{}` with length {}",
|
found `{}` with length {}",
|
||||||
in_len, in_ty,
|
in_len, in_ty,
|
||||||
ret_ty, out_len);
|
ret_ty, out_len);
|
||||||
require!(bx.cx().type_kind(bx.cx().element_type(llret_ty)) == TypeKind::Integer,
|
require!(bx.type_kind(bx.element_type(llret_ty)) == TypeKind::Integer,
|
||||||
"expected return type with integer elements, found `{}` with non-integer `{}`",
|
"expected return type with integer elements, found `{}` with non-integer `{}`",
|
||||||
ret_ty,
|
ret_ty,
|
||||||
ret_ty.simd_type(tcx));
|
ret_ty.simd_type(tcx));
|
||||||
|
@ -1178,8 +1178,8 @@ fn generic_simd_intrinsic(
|
||||||
let indices: Option<Vec<_>> = (0..n)
|
let indices: Option<Vec<_>> = (0..n)
|
||||||
.map(|i| {
|
.map(|i| {
|
||||||
let arg_idx = i;
|
let arg_idx = i;
|
||||||
let val = bx.cx().const_get_elt(vector, i as u64);
|
let val = bx.const_get_elt(vector, i as u64);
|
||||||
match bx.cx().const_to_opt_u128(val, true) {
|
match bx.const_to_opt_u128(val, true) {
|
||||||
None => {
|
None => {
|
||||||
emit_error!("shuffle index #{} is not a constant", arg_idx);
|
emit_error!("shuffle index #{} is not a constant", arg_idx);
|
||||||
None
|
None
|
||||||
|
@ -1189,18 +1189,18 @@ fn generic_simd_intrinsic(
|
||||||
arg_idx, total_len);
|
arg_idx, total_len);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Some(idx) => Some(bx.cx().const_i32(idx as i32)),
|
Some(idx) => Some(bx.const_i32(idx as i32)),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
let indices = match indices {
|
let indices = match indices {
|
||||||
Some(i) => i,
|
Some(i) => i,
|
||||||
None => return Ok(bx.cx().const_null(llret_ty))
|
None => return Ok(bx.const_null(llret_ty))
|
||||||
};
|
};
|
||||||
|
|
||||||
return Ok(bx.shuffle_vector(args[0].immediate(),
|
return Ok(bx.shuffle_vector(args[0].immediate(),
|
||||||
args[1].immediate(),
|
args[1].immediate(),
|
||||||
bx.cx().const_vector(&indices)))
|
bx.const_vector(&indices)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if name == "simd_insert" {
|
if name == "simd_insert" {
|
||||||
|
@ -1231,8 +1231,8 @@ fn generic_simd_intrinsic(
|
||||||
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
||||||
}
|
}
|
||||||
// truncate the mask to a vector of i1s
|
// truncate the mask to a vector of i1s
|
||||||
let i1 = bx.cx().type_i1();
|
let i1 = bx.type_i1();
|
||||||
let i1xn = bx.cx().type_vector(i1, m_len as u64);
|
let i1xn = bx.type_vector(i1, m_len as u64);
|
||||||
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
||||||
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
||||||
}
|
}
|
||||||
|
@ -1252,7 +1252,7 @@ fn generic_simd_intrinsic(
|
||||||
};
|
};
|
||||||
($msg: tt, $($fmt: tt)*) => {
|
($msg: tt, $($fmt: tt)*) => {
|
||||||
span_invalid_monomorphization_error(
|
span_invalid_monomorphization_error(
|
||||||
bx.cx().sess(), span,
|
bx.sess(), span,
|
||||||
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
|
&format!(concat!("invalid monomorphization of `{}` intrinsic: ", $msg),
|
||||||
name, $($fmt)*));
|
name, $($fmt)*));
|
||||||
}
|
}
|
||||||
|
@ -1293,7 +1293,7 @@ fn generic_simd_intrinsic(
|
||||||
};
|
};
|
||||||
|
|
||||||
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
|
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
|
||||||
let intrinsic = bx.cx().get_intrinsic(&llvm_name);
|
let intrinsic = bx.get_intrinsic(&llvm_name);
|
||||||
let c = bx.call(intrinsic,
|
let c = bx.call(intrinsic,
|
||||||
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
|
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
|
||||||
None);
|
None);
|
||||||
|
@ -1450,28 +1450,28 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = bx.cx().type_i32();
|
let alignment_ty = bx.type_i32();
|
||||||
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
|
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = bx.cx().type_i1();
|
let i1 = bx.type_i1();
|
||||||
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
let i1xn = bx.type_vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Type of the vector of pointers:
|
// Type of the vector of pointers:
|
||||||
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
|
||||||
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
||||||
|
|
||||||
// Type of the vector of elements:
|
// Type of the vector of elements:
|
||||||
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
|
||||||
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
||||||
|
|
||||||
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = bx.cx().declare_cfn(&llvm_intrinsic,
|
let f = bx.declare_cfn(&llvm_intrinsic,
|
||||||
bx.cx().type_func(&[
|
bx.type_func(&[
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
mask_ty,
|
mask_ty,
|
||||||
|
@ -1550,30 +1550,30 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = bx.cx().type_i32();
|
let alignment_ty = bx.type_i32();
|
||||||
let alignment = bx.cx().const_i32(bx.cx().align_of(in_elem).bytes() as i32);
|
let alignment = bx.const_i32(bx.align_of(in_elem).bytes() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = bx.cx().type_i1();
|
let i1 = bx.type_i1();
|
||||||
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
let i1xn = bx.type_vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret_t = bx.cx().type_void();
|
let ret_t = bx.type_void();
|
||||||
|
|
||||||
// Type of the vector of pointers:
|
// Type of the vector of pointers:
|
||||||
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count);
|
||||||
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
||||||
|
|
||||||
// Type of the vector of elements:
|
// Type of the vector of elements:
|
||||||
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1);
|
||||||
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
||||||
|
|
||||||
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = bx.cx().declare_cfn(&llvm_intrinsic,
|
let f = bx.declare_cfn(&llvm_intrinsic,
|
||||||
bx.cx().type_func(&[llvm_elem_vec_ty,
|
bx.type_func(&[llvm_elem_vec_ty,
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
mask_ty], ret_t));
|
mask_ty], ret_t));
|
||||||
|
@ -1613,7 +1613,7 @@ fn generic_simd_intrinsic(
|
||||||
// code is generated
|
// code is generated
|
||||||
// * if the accumulator of the fmul isn't 1, incorrect
|
// * if the accumulator of the fmul isn't 1, incorrect
|
||||||
// code is generated
|
// code is generated
|
||||||
match bx.cx().const_get_real(acc) {
|
match bx.const_get_real(acc) {
|
||||||
None => return_error!("accumulator of {} is not a constant", $name),
|
None => return_error!("accumulator of {} is not a constant", $name),
|
||||||
Some((v, loses_info)) => {
|
Some((v, loses_info)) => {
|
||||||
if $name.contains("mul") && v != 1.0_f64 {
|
if $name.contains("mul") && v != 1.0_f64 {
|
||||||
|
@ -1629,8 +1629,8 @@ fn generic_simd_intrinsic(
|
||||||
} else {
|
} else {
|
||||||
// unordered arithmetic reductions do not:
|
// unordered arithmetic reductions do not:
|
||||||
match f.bit_width() {
|
match f.bit_width() {
|
||||||
32 => bx.cx().const_undef(bx.cx().type_f32()),
|
32 => bx.const_undef(bx.type_f32()),
|
||||||
64 => bx.cx().const_undef(bx.cx().type_f64()),
|
64 => bx.const_undef(bx.type_f64()),
|
||||||
v => {
|
v => {
|
||||||
return_error!(r#"
|
return_error!(r#"
|
||||||
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
|
@ -1707,8 +1707,8 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
}
|
}
|
||||||
|
|
||||||
// boolean reductions operate on vectors of i1s:
|
// boolean reductions operate on vectors of i1s:
|
||||||
let i1 = bx.cx().type_i1();
|
let i1 = bx.type_i1();
|
||||||
let i1xn = bx.cx().type_vector(i1, in_len as u64);
|
let i1xn = bx.type_vector(i1, in_len as u64);
|
||||||
bx.trunc(args[0].immediate(), i1xn)
|
bx.trunc(args[0].immediate(), i1xn)
|
||||||
};
|
};
|
||||||
return match in_elem.sty {
|
return match in_elem.sty {
|
||||||
|
@ -1718,7 +1718,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
if !$boolean {
|
if !$boolean {
|
||||||
r
|
r
|
||||||
} else {
|
} else {
|
||||||
bx.zext(r, bx.cx().type_bool())
|
bx.zext(r, bx.type_bool())
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
|
@ -194,7 +194,7 @@ fn shift_mask_rhs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
bx: &mut Bx,
|
bx: &mut Bx,
|
||||||
rhs: Bx::Value
|
rhs: Bx::Value
|
||||||
) -> Bx::Value {
|
) -> Bx::Value {
|
||||||
let rhs_llty = bx.cx().val_ty(rhs);
|
let rhs_llty = bx.val_ty(rhs);
|
||||||
let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
|
let shift_val = shift_mask_val(bx, rhs_llty, rhs_llty, false);
|
||||||
bx.and(rhs, shift_val)
|
bx.and(rhs, shift_val)
|
||||||
}
|
}
|
||||||
|
@ -205,25 +205,25 @@ pub fn shift_mask_val<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
mask_llty: Bx::Type,
|
mask_llty: Bx::Type,
|
||||||
invert: bool
|
invert: bool
|
||||||
) -> Bx::Value {
|
) -> Bx::Value {
|
||||||
let kind = bx.cx().type_kind(llty);
|
let kind = bx.type_kind(llty);
|
||||||
match kind {
|
match kind {
|
||||||
TypeKind::Integer => {
|
TypeKind::Integer => {
|
||||||
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
|
// i8/u8 can shift by at most 7, i16/u16 by at most 15, etc.
|
||||||
let val = bx.cx().int_width(llty) - 1;
|
let val = bx.int_width(llty) - 1;
|
||||||
if invert {
|
if invert {
|
||||||
bx.cx().const_int(mask_llty, !val as i64)
|
bx.const_int(mask_llty, !val as i64)
|
||||||
} else {
|
} else {
|
||||||
bx.cx().const_uint(mask_llty, val)
|
bx.const_uint(mask_llty, val)
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
TypeKind::Vector => {
|
TypeKind::Vector => {
|
||||||
let mask = shift_mask_val(
|
let mask = shift_mask_val(
|
||||||
bx,
|
bx,
|
||||||
bx.cx().element_type(llty),
|
bx.element_type(llty),
|
||||||
bx.cx().element_type(mask_llty),
|
bx.element_type(mask_llty),
|
||||||
invert
|
invert
|
||||||
);
|
);
|
||||||
bx.vector_splat(bx.cx().vector_length(mask_llty), mask)
|
bx.vector_splat(bx.vector_length(mask_llty), mask)
|
||||||
},
|
},
|
||||||
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
|
_ => bug!("shift_mask_val: expected Integer or Vector, found {:?}", kind),
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,7 +16,6 @@ use std;
|
||||||
|
|
||||||
use common::IntPredicate;
|
use common::IntPredicate;
|
||||||
use meth;
|
use meth;
|
||||||
use rustc::ty::layout::LayoutOf;
|
|
||||||
use rustc::ty::{self, Ty};
|
use rustc::ty::{self, Ty};
|
||||||
use traits::*;
|
use traits::*;
|
||||||
|
|
||||||
|
@ -25,12 +24,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
t: Ty<'tcx>,
|
t: Ty<'tcx>,
|
||||||
info: Option<Bx::Value>
|
info: Option<Bx::Value>
|
||||||
) -> (Bx::Value, Bx::Value) {
|
) -> (Bx::Value, Bx::Value) {
|
||||||
let layout = bx.cx().layout_of(t);
|
let layout = bx.layout_of(t);
|
||||||
debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
|
debug!("size_and_align_of_dst(ty={}, info={:?}): layout: {:?}",
|
||||||
t, info, layout);
|
t, info, layout);
|
||||||
if !layout.is_unsized() {
|
if !layout.is_unsized() {
|
||||||
let size = bx.cx().const_usize(layout.size.bytes());
|
let size = bx.const_usize(layout.size.bytes());
|
||||||
let align = bx.cx().const_usize(layout.align.abi.bytes());
|
let align = bx.const_usize(layout.align.abi.bytes());
|
||||||
return (size, align);
|
return (size, align);
|
||||||
}
|
}
|
||||||
match t.sty {
|
match t.sty {
|
||||||
|
@ -40,11 +39,11 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
|
(meth::SIZE.get_usize(bx, vtable), meth::ALIGN.get_usize(bx, vtable))
|
||||||
}
|
}
|
||||||
ty::Slice(_) | ty::Str => {
|
ty::Slice(_) | ty::Str => {
|
||||||
let unit = layout.field(bx.cx(), 0);
|
let unit = layout.field(bx, 0);
|
||||||
// The info in this case is the length of the str, so the size is that
|
// The info in this case is the length of the str, so the size is that
|
||||||
// times the unit size.
|
// times the unit size.
|
||||||
(bx.mul(info.unwrap(), bx.cx().const_usize(unit.size.bytes())),
|
(bx.mul(info.unwrap(), bx.const_usize(unit.size.bytes())),
|
||||||
bx.cx().const_usize(unit.align.abi.bytes()))
|
bx.const_usize(unit.align.abi.bytes()))
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
// First get the size of all statically known fields.
|
// First get the size of all statically known fields.
|
||||||
|
@ -58,12 +57,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
let sized_align = layout.align.abi.bytes();
|
let sized_align = layout.align.abi.bytes();
|
||||||
debug!("DST {} statically sized prefix size: {} align: {}",
|
debug!("DST {} statically sized prefix size: {} align: {}",
|
||||||
t, sized_size, sized_align);
|
t, sized_size, sized_align);
|
||||||
let sized_size = bx.cx().const_usize(sized_size);
|
let sized_size = bx.const_usize(sized_size);
|
||||||
let sized_align = bx.cx().const_usize(sized_align);
|
let sized_align = bx.const_usize(sized_align);
|
||||||
|
|
||||||
// Recurse to get the size of the dynamically sized field (must be
|
// Recurse to get the size of the dynamically sized field (must be
|
||||||
// the last field).
|
// the last field).
|
||||||
let field_ty = layout.field(bx.cx(), i).ty;
|
let field_ty = layout.field(bx, i).ty;
|
||||||
let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
|
let (unsized_size, mut unsized_align) = size_and_align_of_dst(bx, field_ty, info);
|
||||||
|
|
||||||
// FIXME (#26403, #27023): We should be adding padding
|
// FIXME (#26403, #27023): We should be adding padding
|
||||||
|
@ -85,12 +84,12 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
|
|
||||||
// Choose max of two known alignments (combined value must
|
// Choose max of two known alignments (combined value must
|
||||||
// be aligned according to more restrictive of the two).
|
// be aligned according to more restrictive of the two).
|
||||||
let align = match (bx.cx().const_to_opt_u128(sized_align, false),
|
let align = match (bx.const_to_opt_u128(sized_align, false),
|
||||||
bx.cx().const_to_opt_u128(unsized_align, false)) {
|
bx.const_to_opt_u128(unsized_align, false)) {
|
||||||
(Some(sized_align), Some(unsized_align)) => {
|
(Some(sized_align), Some(unsized_align)) => {
|
||||||
// If both alignments are constant, (the sized_align should always be), then
|
// If both alignments are constant, (the sized_align should always be), then
|
||||||
// pick the correct alignment statically.
|
// pick the correct alignment statically.
|
||||||
bx.cx().const_usize(std::cmp::max(sized_align, unsized_align) as u64)
|
bx.const_usize(std::cmp::max(sized_align, unsized_align) as u64)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
|
let cmp = bx.icmp(IntPredicate::IntUGT, sized_align, unsized_align);
|
||||||
|
@ -108,7 +107,7 @@ pub fn size_and_align_of_dst<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
// emulated via the semi-standard fast bit trick:
|
// emulated via the semi-standard fast bit trick:
|
||||||
//
|
//
|
||||||
// `(size + (align-1)) & -align`
|
// `(size + (align-1)) & -align`
|
||||||
let one = bx.cx().const_usize(1);
|
let one = bx.const_usize(1);
|
||||||
let addend = bx.sub(align, one);
|
let addend = bx.sub(align, one);
|
||||||
let add = bx.add(size, addend);
|
let add = bx.add(size, addend);
|
||||||
let neg = bx.neg(align);
|
let neg = bx.neg(align);
|
||||||
|
|
|
@ -39,10 +39,10 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||||
|
|
||||||
let llvtable = bx.pointercast(
|
let llvtable = bx.pointercast(
|
||||||
llvtable,
|
llvtable,
|
||||||
bx.cx().type_ptr_to(bx.cx().fn_ptr_backend_type(fn_ty))
|
bx.type_ptr_to(bx.fn_ptr_backend_type(fn_ty))
|
||||||
);
|
);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
||||||
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
|
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||||
let ptr = bx.load(gep, ptr_align);
|
let ptr = bx.load(gep, ptr_align);
|
||||||
bx.nonnull_metadata(ptr);
|
bx.nonnull_metadata(ptr);
|
||||||
// Vtable loads are invariant
|
// Vtable loads are invariant
|
||||||
|
@ -58,9 +58,9 @@ impl<'a, 'tcx: 'a> VirtualIndex {
|
||||||
// Load the data pointer from the object.
|
// Load the data pointer from the object.
|
||||||
debug!("get_int({:?}, {:?})", llvtable, self);
|
debug!("get_int({:?}, {:?})", llvtable, self);
|
||||||
|
|
||||||
let llvtable = bx.pointercast(llvtable, bx.cx().type_ptr_to(bx.cx().type_isize()));
|
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
|
||||||
let usize_align = bx.tcx().data_layout.pointer_align.abi;
|
let usize_align = bx.tcx().data_layout.pointer_align.abi;
|
||||||
let gep = bx.inbounds_gep(llvtable, &[bx.cx().const_usize(self.0)]);
|
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||||
let ptr = bx.load(gep, usize_align);
|
let ptr = bx.load(gep, usize_align);
|
||||||
// Vtable loads are invariant
|
// Vtable loads are invariant
|
||||||
bx.set_invariant_load(ptr);
|
bx.set_invariant_load(ptr);
|
||||||
|
|
|
@ -182,13 +182,13 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
let lp1 = bx.load_operand(lp1).immediate();
|
let lp1 = bx.load_operand(lp1).immediate();
|
||||||
slot.storage_dead(&mut bx);
|
slot.storage_dead(&mut bx);
|
||||||
|
|
||||||
if !bx.cx().sess().target.target.options.custom_unwind_resume {
|
if !bx.sess().target.target.options.custom_unwind_resume {
|
||||||
let mut lp = bx.cx().const_undef(self.landing_pad_type());
|
let mut lp = bx.const_undef(self.landing_pad_type());
|
||||||
lp = bx.insert_value(lp, lp0, 0);
|
lp = bx.insert_value(lp, lp0, 0);
|
||||||
lp = bx.insert_value(lp, lp1, 1);
|
lp = bx.insert_value(lp, lp1, 1);
|
||||||
bx.resume(lp);
|
bx.resume(lp);
|
||||||
} else {
|
} else {
|
||||||
bx.call(bx.cx().eh_unwind_resume(), &[lp0], funclet(self));
|
bx.call(bx.eh_unwind_resume(), &[lp0], funclet(self));
|
||||||
bx.unreachable();
|
bx.unreachable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -218,10 +218,10 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
bx.cond_br(discr.immediate(), lltrue, llfalse);
|
bx.cond_br(discr.immediate(), lltrue, llfalse);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let switch_llty = bx.cx().immediate_backend_type(
|
let switch_llty = bx.immediate_backend_type(
|
||||||
bx.cx().layout_of(switch_ty)
|
bx.layout_of(switch_ty)
|
||||||
);
|
);
|
||||||
let llval = bx.cx().const_uint_big(switch_llty, values[0]);
|
let llval = bx.const_uint_big(switch_llty, values[0]);
|
||||||
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
|
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
|
||||||
bx.cond_br(cmp, lltrue, llfalse);
|
bx.cond_br(cmp, lltrue, llfalse);
|
||||||
}
|
}
|
||||||
|
@ -230,11 +230,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
let switch = bx.switch(discr.immediate(),
|
let switch = bx.switch(discr.immediate(),
|
||||||
llblock(self, *otherwise),
|
llblock(self, *otherwise),
|
||||||
values.len());
|
values.len());
|
||||||
let switch_llty = bx.cx().immediate_backend_type(
|
let switch_llty = bx.immediate_backend_type(
|
||||||
bx.cx().layout_of(switch_ty)
|
bx.layout_of(switch_ty)
|
||||||
);
|
);
|
||||||
for (&value, target) in values.iter().zip(targets) {
|
for (&value, target) in values.iter().zip(targets) {
|
||||||
let llval = bx.cx().const_uint_big(switch_llty, value);
|
let llval = bx.const_uint_big(switch_llty, value);
|
||||||
let llbb = llblock(self, *target);
|
let llbb = llblock(self, *target);
|
||||||
bx.add_case(switch, llval, llbb)
|
bx.add_case(switch, llval, llbb)
|
||||||
}
|
}
|
||||||
|
@ -283,8 +283,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
llval
|
llval
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let addr = bx.pointercast(llslot, bx.cx().type_ptr_to(
|
let addr = bx.pointercast(llslot, bx.type_ptr_to(
|
||||||
bx.cx().cast_backend_type(&cast_ty)
|
bx.cast_backend_type(&cast_ty)
|
||||||
));
|
));
|
||||||
bx.load(addr, self.fn_ty.ret.layout.align.abi)
|
bx.load(addr, self.fn_ty.ret.layout.align.abi)
|
||||||
}
|
}
|
||||||
|
@ -299,7 +299,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
mir::TerminatorKind::Drop { ref location, target, unwind } => {
|
mir::TerminatorKind::Drop { ref location, target, unwind } => {
|
||||||
let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
|
let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
|
||||||
let ty = self.monomorphize(&ty);
|
let ty = self.monomorphize(&ty);
|
||||||
let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx(), ty);
|
let drop_fn = monomorphize::resolve_drop_in_place(bx.tcx(), ty);
|
||||||
|
|
||||||
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
|
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
|
||||||
// we don't actually need to drop anything.
|
// we don't actually need to drop anything.
|
||||||
|
@ -323,14 +323,14 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
ty::ParamEnv::reveal_all(),
|
ty::ParamEnv::reveal_all(),
|
||||||
&sig,
|
&sig,
|
||||||
);
|
);
|
||||||
let fn_ty = bx.cx().new_vtable(sig, &[]);
|
let fn_ty = bx.new_vtable(sig, &[]);
|
||||||
let vtable = args[1];
|
let vtable = args[1];
|
||||||
args = &args[..1];
|
args = &args[..1];
|
||||||
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
|
(meth::DESTRUCTOR.get_fn(&mut bx, vtable, &fn_ty), fn_ty)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
(bx.cx().get_fn(drop_fn),
|
(bx.get_fn(drop_fn),
|
||||||
bx.cx().fn_type_of_instance(&drop_fn))
|
bx.fn_type_of_instance(&drop_fn))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
do_call(self, &mut bx, fn_ty, drop_fn, args,
|
do_call(self, &mut bx, fn_ty, drop_fn, args,
|
||||||
|
@ -340,7 +340,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
|
|
||||||
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
|
mir::TerminatorKind::Assert { ref cond, expected, ref msg, target, cleanup } => {
|
||||||
let cond = self.codegen_operand(&mut bx, cond).immediate();
|
let cond = self.codegen_operand(&mut bx, cond).immediate();
|
||||||
let mut const_cond = bx.cx().const_to_opt_u128(cond, false).map(|c| c == 1);
|
let mut const_cond = bx.const_to_opt_u128(cond, false).map(|c| c == 1);
|
||||||
|
|
||||||
// This case can currently arise only from functions marked
|
// This case can currently arise only from functions marked
|
||||||
// with #[rustc_inherit_overflow_checks] and inlined from
|
// with #[rustc_inherit_overflow_checks] and inlined from
|
||||||
|
@ -349,7 +349,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// NOTE: Unlike binops, negation doesn't have its own
|
// NOTE: Unlike binops, negation doesn't have its own
|
||||||
// checked operation, just a comparison with the minimum
|
// checked operation, just a comparison with the minimum
|
||||||
// value, so we have to check for the assert message.
|
// value, so we have to check for the assert message.
|
||||||
if !bx.cx().check_overflow() {
|
if !bx.check_overflow() {
|
||||||
if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
|
if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
|
||||||
const_cond = Some(expected);
|
const_cond = Some(expected);
|
||||||
}
|
}
|
||||||
|
@ -378,11 +378,11 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
self.set_debug_loc(&mut bx, terminator.source_info);
|
self.set_debug_loc(&mut bx, terminator.source_info);
|
||||||
|
|
||||||
// Get the location information.
|
// Get the location information.
|
||||||
let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
|
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
|
||||||
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
||||||
let filename = bx.cx().const_str_slice(filename);
|
let filename = bx.const_str_slice(filename);
|
||||||
let line = bx.cx().const_u32(loc.line as u32);
|
let line = bx.const_u32(loc.line as u32);
|
||||||
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
|
let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
|
||||||
let align = tcx.data_layout.aggregate_align.abi
|
let align = tcx.data_layout.aggregate_align.abi
|
||||||
.max(tcx.data_layout.i32_align.abi)
|
.max(tcx.data_layout.i32_align.abi)
|
||||||
.max(tcx.data_layout.pointer_align.abi);
|
.max(tcx.data_layout.pointer_align.abi);
|
||||||
|
@ -393,8 +393,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
let len = self.codegen_operand(&mut bx, len).immediate();
|
let len = self.codegen_operand(&mut bx, len).immediate();
|
||||||
let index = self.codegen_operand(&mut bx, index).immediate();
|
let index = self.codegen_operand(&mut bx, index).immediate();
|
||||||
|
|
||||||
let file_line_col = bx.cx().const_struct(&[filename, line, col], false);
|
let file_line_col = bx.const_struct(&[filename, line, col], false);
|
||||||
let file_line_col = bx.cx().static_addr_of(
|
let file_line_col = bx.static_addr_of(
|
||||||
file_line_col,
|
file_line_col,
|
||||||
align,
|
align,
|
||||||
Some("panic_bounds_check_loc")
|
Some("panic_bounds_check_loc")
|
||||||
|
@ -405,12 +405,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
_ => {
|
_ => {
|
||||||
let str = msg.description();
|
let str = msg.description();
|
||||||
let msg_str = Symbol::intern(str).as_str();
|
let msg_str = Symbol::intern(str).as_str();
|
||||||
let msg_str = bx.cx().const_str_slice(msg_str);
|
let msg_str = bx.const_str_slice(msg_str);
|
||||||
let msg_file_line_col = bx.cx().const_struct(
|
let msg_file_line_col = bx.const_struct(
|
||||||
&[msg_str, filename, line, col],
|
&[msg_str, filename, line, col],
|
||||||
false
|
false
|
||||||
);
|
);
|
||||||
let msg_file_line_col = bx.cx().static_addr_of(
|
let msg_file_line_col = bx.static_addr_of(
|
||||||
msg_file_line_col,
|
msg_file_line_col,
|
||||||
align,
|
align,
|
||||||
Some("panic_loc")
|
Some("panic_loc")
|
||||||
|
@ -423,8 +423,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// Obtain the panic entry point.
|
// Obtain the panic entry point.
|
||||||
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
|
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
|
||||||
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
||||||
let fn_ty = bx.cx().fn_type_of_instance(&instance);
|
let fn_ty = bx.fn_type_of_instance(&instance);
|
||||||
let llfn = bx.cx().get_fn(instance);
|
let llfn = bx.get_fn(instance);
|
||||||
|
|
||||||
// Codegen the actual panic invoke/call.
|
// Codegen the actual panic invoke/call.
|
||||||
do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
|
do_call(self, &mut bx, fn_ty, llfn, &args, None, cleanup);
|
||||||
|
@ -446,7 +446,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
|
|
||||||
let (instance, mut llfn) = match callee.layout.ty.sty {
|
let (instance, mut llfn) = match callee.layout.ty.sty {
|
||||||
ty::FnDef(def_id, substs) => {
|
ty::FnDef(def_id, substs) => {
|
||||||
(Some(ty::Instance::resolve(bx.cx().tcx(),
|
(Some(ty::Instance::resolve(bx.tcx(),
|
||||||
ty::ParamEnv::reveal_all(),
|
ty::ParamEnv::reveal_all(),
|
||||||
def_id,
|
def_id,
|
||||||
substs).unwrap()),
|
substs).unwrap()),
|
||||||
|
@ -485,7 +485,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
// we can do what we like. Here, we declare that transmuting
|
// we can do what we like. Here, we declare that transmuting
|
||||||
// into an uninhabited type is impossible, so anything following
|
// into an uninhabited type is impossible, so anything following
|
||||||
// it must be unreachable.
|
// it must be unreachable.
|
||||||
assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited);
|
assert_eq!(bx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
|
||||||
bx.unreachable();
|
bx.unreachable();
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -499,7 +499,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
|
|
||||||
let fn_ty = match def {
|
let fn_ty = match def {
|
||||||
Some(ty::InstanceDef::Virtual(..)) => {
|
Some(ty::InstanceDef::Virtual(..)) => {
|
||||||
bx.cx().new_vtable(sig, &extra_args)
|
bx.new_vtable(sig, &extra_args)
|
||||||
}
|
}
|
||||||
Some(ty::InstanceDef::DropGlue(_, None)) => {
|
Some(ty::InstanceDef::DropGlue(_, None)) => {
|
||||||
// empty drop glue - a nop.
|
// empty drop glue - a nop.
|
||||||
|
@ -507,18 +507,18 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
funclet_br(self, &mut bx, target);
|
funclet_br(self, &mut bx, target);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_ => bx.cx().new_fn_type(sig, &extra_args)
|
_ => bx.new_fn_type(sig, &extra_args)
|
||||||
};
|
};
|
||||||
|
|
||||||
// emit a panic instead of instantiating an uninhabited type
|
// emit a panic instead of instantiating an uninhabited type
|
||||||
if (intrinsic == Some("init") || intrinsic == Some("uninit")) &&
|
if (intrinsic == Some("init") || intrinsic == Some("uninit")) &&
|
||||||
fn_ty.ret.layout.abi.is_uninhabited()
|
fn_ty.ret.layout.abi.is_uninhabited()
|
||||||
{
|
{
|
||||||
let loc = bx.cx().sess().source_map().lookup_char_pos(span.lo());
|
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
|
||||||
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
||||||
let filename = bx.cx().const_str_slice(filename);
|
let filename = bx.const_str_slice(filename);
|
||||||
let line = bx.cx().const_u32(loc.line as u32);
|
let line = bx.const_u32(loc.line as u32);
|
||||||
let col = bx.cx().const_u32(loc.col.to_usize() as u32 + 1);
|
let col = bx.const_u32(loc.col.to_usize() as u32 + 1);
|
||||||
let align = tcx.data_layout.aggregate_align.abi
|
let align = tcx.data_layout.aggregate_align.abi
|
||||||
.max(tcx.data_layout.i32_align.abi)
|
.max(tcx.data_layout.i32_align.abi)
|
||||||
.max(tcx.data_layout.pointer_align.abi);
|
.max(tcx.data_layout.pointer_align.abi);
|
||||||
|
@ -529,12 +529,12 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
|
if intrinsic == Some("init") { "zeroed" } else { "uninitialized" }
|
||||||
);
|
);
|
||||||
let msg_str = Symbol::intern(&str).as_str();
|
let msg_str = Symbol::intern(&str).as_str();
|
||||||
let msg_str = bx.cx().const_str_slice(msg_str);
|
let msg_str = bx.const_str_slice(msg_str);
|
||||||
let msg_file_line_col = bx.cx().const_struct(
|
let msg_file_line_col = bx.const_struct(
|
||||||
&[msg_str, filename, line, col],
|
&[msg_str, filename, line, col],
|
||||||
false,
|
false,
|
||||||
);
|
);
|
||||||
let msg_file_line_col = bx.cx().static_addr_of(
|
let msg_file_line_col = bx.static_addr_of(
|
||||||
msg_file_line_col,
|
msg_file_line_col,
|
||||||
align,
|
align,
|
||||||
Some("panic_loc"),
|
Some("panic_loc"),
|
||||||
|
@ -544,8 +544,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
let def_id =
|
let def_id =
|
||||||
common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
|
common::langcall(bx.tcx(), Some(span), "", lang_items::PanicFnLangItem);
|
||||||
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
||||||
let fn_ty = bx.cx().fn_type_of_instance(&instance);
|
let fn_ty = bx.fn_type_of_instance(&instance);
|
||||||
let llfn = bx.cx().get_fn(instance);
|
let llfn = bx.get_fn(instance);
|
||||||
|
|
||||||
// Codegen the actual panic invoke/call.
|
// Codegen the actual panic invoke/call.
|
||||||
do_call(
|
do_call(
|
||||||
|
@ -577,7 +577,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
let dest = match ret_dest {
|
let dest = match ret_dest {
|
||||||
_ if fn_ty.ret.is_indirect() => llargs[0],
|
_ if fn_ty.ret.is_indirect() => llargs[0],
|
||||||
ReturnDest::Nothing => {
|
ReturnDest::Nothing => {
|
||||||
bx.cx().const_undef(bx.cx().type_ptr_to(bx.memory_ty(&fn_ty.ret)))
|
bx.const_undef(bx.type_ptr_to(bx.memory_ty(&fn_ty.ret)))
|
||||||
}
|
}
|
||||||
ReturnDest::IndirectOperand(dst, _) |
|
ReturnDest::IndirectOperand(dst, _) |
|
||||||
ReturnDest::Store(dst) => dst.llval,
|
ReturnDest::Store(dst) => dst.llval,
|
||||||
|
@ -611,7 +611,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
);
|
);
|
||||||
return OperandRef {
|
return OperandRef {
|
||||||
val: Immediate(llval),
|
val: Immediate(llval),
|
||||||
layout: bx.cx().layout_of(ty),
|
layout: bx.layout_of(ty),
|
||||||
};
|
};
|
||||||
|
|
||||||
},
|
},
|
||||||
|
@ -629,7 +629,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
);
|
);
|
||||||
return OperandRef {
|
return OperandRef {
|
||||||
val: Immediate(llval),
|
val: Immediate(llval),
|
||||||
layout: bx.cx().layout_of(ty)
|
layout: bx.layout_of(ty)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -639,7 +639,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
|
|
||||||
let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx());
|
let callee_ty = instance.as_ref().unwrap().ty(bx.tcx());
|
||||||
bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
|
bx.codegen_intrinsic_call(callee_ty, &fn_ty, &args, dest,
|
||||||
terminator.source_info.span);
|
terminator.source_info.span);
|
||||||
|
|
||||||
|
@ -736,7 +736,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
|
|
||||||
let fn_ptr = match (llfn, instance) {
|
let fn_ptr = match (llfn, instance) {
|
||||||
(Some(llfn), _) => llfn,
|
(Some(llfn), _) => llfn,
|
||||||
(None, Some(instance)) => bx.cx().get_fn(instance),
|
(None, Some(instance)) => bx.get_fn(instance),
|
||||||
_ => span_bug!(span, "no llfn for call"),
|
_ => span_bug!(span, "no llfn for call"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -760,7 +760,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
) {
|
) {
|
||||||
// Fill padding with undef value, where applicable.
|
// Fill padding with undef value, where applicable.
|
||||||
if let Some(ty) = arg.pad {
|
if let Some(ty) = arg.pad {
|
||||||
llargs.push(bx.cx().const_undef(bx.cx().reg_backend_type(&ty)))
|
llargs.push(bx.const_undef(bx.reg_backend_type(&ty)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if arg.is_ignore() {
|
if arg.is_ignore() {
|
||||||
|
@ -820,8 +820,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
if by_ref && !arg.is_indirect() {
|
if by_ref && !arg.is_indirect() {
|
||||||
// Have to load the argument, maybe while casting it.
|
// Have to load the argument, maybe while casting it.
|
||||||
if let PassMode::Cast(ty) = arg.mode {
|
if let PassMode::Cast(ty) = arg.mode {
|
||||||
let addr = bx.pointercast(llval, bx.cx().type_ptr_to(
|
let addr = bx.pointercast(llval, bx.type_ptr_to(
|
||||||
bx.cx().cast_backend_type(&ty))
|
bx.cast_backend_type(&ty))
|
||||||
);
|
);
|
||||||
llval = bx.load(addr, align.min(arg.layout.align.abi));
|
llval = bx.load(addr, align.min(arg.layout.align.abi));
|
||||||
} else {
|
} else {
|
||||||
|
@ -1030,7 +1030,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
|
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
|
||||||
LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
|
LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
|
||||||
LocalRef::Operand(None) => {
|
LocalRef::Operand(None) => {
|
||||||
let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst));
|
let dst_layout = bx.layout_of(self.monomorphized_place_ty(dst));
|
||||||
assert!(!dst_layout.ty.has_erasable_regions());
|
assert!(!dst_layout.ty.has_erasable_regions());
|
||||||
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
|
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
|
||||||
place.storage_live(bx);
|
place.storage_live(bx);
|
||||||
|
@ -1057,8 +1057,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
dst: PlaceRef<'tcx, Bx::Value>
|
dst: PlaceRef<'tcx, Bx::Value>
|
||||||
) {
|
) {
|
||||||
let src = self.codegen_operand(bx, src);
|
let src = self.codegen_operand(bx, src);
|
||||||
let llty = bx.cx().backend_type(src.layout);
|
let llty = bx.backend_type(src.layout);
|
||||||
let cast_ptr = bx.pointercast(dst.llval, bx.cx().type_ptr_to(llty));
|
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
|
||||||
let align = src.layout.align.abi.min(dst.align);
|
let align = src.layout.align.abi.min(dst.align);
|
||||||
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,7 @@ use rustc::mir;
|
||||||
use rustc_data_structures::indexed_vec::Idx;
|
use rustc_data_structures::indexed_vec::Idx;
|
||||||
use rustc::mir::interpret::{GlobalId, ConstValue};
|
use rustc::mir::interpret::{GlobalId, ConstValue};
|
||||||
use rustc::ty::{self, Ty};
|
use rustc::ty::{self, Ty};
|
||||||
use rustc::ty::layout::{self, LayoutOf};
|
use rustc::ty::layout;
|
||||||
use syntax::source_map::Span;
|
use syntax::source_map::Span;
|
||||||
use traits::*;
|
use traits::*;
|
||||||
|
|
||||||
|
@ -75,20 +75,20 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
c,
|
c,
|
||||||
)?;
|
)?;
|
||||||
if let Some(prim) = field.val.try_to_scalar() {
|
if let Some(prim) = field.val.try_to_scalar() {
|
||||||
let layout = bx.cx().layout_of(field_ty);
|
let layout = bx.layout_of(field_ty);
|
||||||
let scalar = match layout.abi {
|
let scalar = match layout.abi {
|
||||||
layout::Abi::Scalar(ref x) => x,
|
layout::Abi::Scalar(ref x) => x,
|
||||||
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
||||||
};
|
};
|
||||||
Ok(bx.cx().scalar_to_backend(
|
Ok(bx.scalar_to_backend(
|
||||||
prim, scalar,
|
prim, scalar,
|
||||||
bx.cx().immediate_backend_type(layout),
|
bx.immediate_backend_type(layout),
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
bug!("simd shuffle field {:?}", field)
|
bug!("simd shuffle field {:?}", field)
|
||||||
}
|
}
|
||||||
}).collect();
|
}).collect();
|
||||||
let llval = bx.cx().const_struct(&values?, false);
|
let llval = bx.const_struct(&values?, false);
|
||||||
Ok((llval, c.ty))
|
Ok((llval, c.ty))
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|_| {
|
.unwrap_or_else(|_| {
|
||||||
|
@ -98,8 +98,8 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
);
|
);
|
||||||
// We've errored, so we don't have to produce working code.
|
// We've errored, so we don't have to produce working code.
|
||||||
let ty = self.monomorphize(&ty);
|
let ty = self.monomorphize(&ty);
|
||||||
let llty = bx.cx().backend_type(bx.cx().layout_of(ty));
|
let llty = bx.backend_type(bx.layout_of(ty));
|
||||||
(bx.cx().const_undef(llty), ty)
|
(bx.const_undef(llty), ty)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@
|
||||||
|
|
||||||
use libc::c_uint;
|
use libc::c_uint;
|
||||||
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
|
use rustc::ty::{self, Ty, TypeFoldable, UpvarSubsts};
|
||||||
use rustc::ty::layout::{LayoutOf, TyLayout, HasTyCtxt};
|
use rustc::ty::layout::{TyLayout, HasTyCtxt};
|
||||||
use rustc::mir::{self, Mir};
|
use rustc::mir::{self, Mir};
|
||||||
use rustc::ty::subst::Substs;
|
use rustc::ty::subst::Substs;
|
||||||
use rustc::session::config::DebugInfo;
|
use rustc::session::config::DebugInfo;
|
||||||
|
@ -266,14 +266,14 @@ pub fn codegen_mir<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
|
|
||||||
let mut allocate_local = |local| {
|
let mut allocate_local = |local| {
|
||||||
let decl = &mir.local_decls[local];
|
let decl = &mir.local_decls[local];
|
||||||
let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
|
let layout = bx.layout_of(fx.monomorphize(&decl.ty));
|
||||||
assert!(!layout.ty.has_erasable_regions());
|
assert!(!layout.ty.has_erasable_regions());
|
||||||
|
|
||||||
if let Some(name) = decl.name {
|
if let Some(name) = decl.name {
|
||||||
// User variable
|
// User variable
|
||||||
let debug_scope = fx.scopes[decl.visibility_scope];
|
let debug_scope = fx.scopes[decl.visibility_scope];
|
||||||
let dbg = debug_scope.is_valid() &&
|
let dbg = debug_scope.is_valid() &&
|
||||||
bx.cx().sess().opts.debuginfo == DebugInfo::Full;
|
bx.sess().opts.debuginfo == DebugInfo::Full;
|
||||||
|
|
||||||
if !memory_locals.contains(local) && !dbg {
|
if !memory_locals.contains(local) && !dbg {
|
||||||
debug!("alloc: {:?} ({}) -> operand", local, name);
|
debug!("alloc: {:?} ({}) -> operand", local, name);
|
||||||
|
@ -376,7 +376,7 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
{
|
{
|
||||||
block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
|
block_bxs.iter_enumerated().zip(cleanup_kinds).map(|((bb, &llbb), cleanup_kind)| {
|
||||||
match *cleanup_kind {
|
match *cleanup_kind {
|
||||||
CleanupKind::Funclet if base::wants_msvc_seh(bx.cx().sess()) => {}
|
CleanupKind::Funclet if base::wants_msvc_seh(bx.sess()) => {}
|
||||||
_ => return (None, None)
|
_ => return (None, None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -415,8 +415,8 @@ fn create_funclets<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
// C++ personality function, but `catch (...)` has no type so
|
// C++ personality function, but `catch (...)` has no type so
|
||||||
// it's null. The 64 here is actually a bitfield which
|
// it's null. The 64 here is actually a bitfield which
|
||||||
// represents that this is a catch-all block.
|
// represents that this is a catch-all block.
|
||||||
let null = bx.cx().const_null(bx.cx().type_i8p());
|
let null = bx.const_null(bx.type_i8p());
|
||||||
let sixty_four = bx.cx().const_i32(64);
|
let sixty_four = bx.const_i32(64);
|
||||||
funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
funclet = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
||||||
cp_bx.br(llbb);
|
cp_bx.br(llbb);
|
||||||
}
|
}
|
||||||
|
@ -451,7 +451,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
|
|
||||||
// Get the argument scope, if it exists and if we need it.
|
// Get the argument scope, if it exists and if we need it.
|
||||||
let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
|
let arg_scope = scopes[mir::OUTERMOST_SOURCE_SCOPE];
|
||||||
let arg_scope = if bx.cx().sess().opts.debuginfo == DebugInfo::Full {
|
let arg_scope = if bx.sess().opts.debuginfo == DebugInfo::Full {
|
||||||
arg_scope.scope_metadata
|
arg_scope.scope_metadata
|
||||||
} else {
|
} else {
|
||||||
None
|
None
|
||||||
|
@ -478,7 +478,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
_ => bug!("spread argument isn't a tuple?!")
|
_ => bug!("spread argument isn't a tuple?!")
|
||||||
};
|
};
|
||||||
|
|
||||||
let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
|
let place = PlaceRef::alloca(bx, bx.layout_of(arg_ty), &name);
|
||||||
for i in 0..tupled_arg_tys.len() {
|
for i in 0..tupled_arg_tys.len() {
|
||||||
let arg = &fx.fn_ty.args[idx];
|
let arg = &fx.fn_ty.args[idx];
|
||||||
idx += 1;
|
idx += 1;
|
||||||
|
@ -524,18 +524,18 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
return local(OperandRef::new_zst(bx.cx(), arg.layout));
|
return local(OperandRef::new_zst(bx.cx(), arg.layout));
|
||||||
}
|
}
|
||||||
PassMode::Direct(_) => {
|
PassMode::Direct(_) => {
|
||||||
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
bx.set_value_name(llarg, &name);
|
bx.set_value_name(llarg, &name);
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
return local(
|
return local(
|
||||||
OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
|
OperandRef::from_immediate_or_packed_pair(bx, llarg, arg.layout));
|
||||||
}
|
}
|
||||||
PassMode::Pair(..) => {
|
PassMode::Pair(..) => {
|
||||||
let a = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let a = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
bx.set_value_name(a, &(name.clone() + ".0"));
|
bx.set_value_name(a, &(name.clone() + ".0"));
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
|
|
||||||
let b = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let b = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
bx.set_value_name(b, &(name + ".1"));
|
bx.set_value_name(b, &(name + ".1"));
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
|
|
||||||
|
@ -552,16 +552,16 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
// Don't copy an indirect argument to an alloca, the caller
|
// Don't copy an indirect argument to an alloca, the caller
|
||||||
// already put it in a temporary alloca and gave it up.
|
// already put it in a temporary alloca and gave it up.
|
||||||
// FIXME: lifetimes
|
// FIXME: lifetimes
|
||||||
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
bx.set_value_name(llarg, &name);
|
bx.set_value_name(llarg, &name);
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
|
PlaceRef::new_sized(llarg, arg.layout, arg.layout.align.abi)
|
||||||
} else if arg.is_unsized_indirect() {
|
} else if arg.is_unsized_indirect() {
|
||||||
// As the storage for the indirect argument lives during
|
// As the storage for the indirect argument lives during
|
||||||
// the whole function call, we just copy the fat pointer.
|
// the whole function call, we just copy the fat pointer.
|
||||||
let llarg = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let llarg = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
let llextra = bx.cx().get_param(bx.llfn(), llarg_idx as c_uint);
|
let llextra = bx.get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
llarg_idx += 1;
|
llarg_idx += 1;
|
||||||
let indirect_operand = OperandValue::Pair(llarg, llextra);
|
let indirect_operand = OperandValue::Pair(llarg, llextra);
|
||||||
|
|
||||||
|
@ -599,7 +599,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
// Or is it the closure environment?
|
// Or is it the closure environment?
|
||||||
let (closure_layout, env_ref) = match arg.layout.ty.sty {
|
let (closure_layout, env_ref) = match arg.layout.ty.sty {
|
||||||
ty::RawPtr(ty::TypeAndMut { ty, .. }) |
|
ty::RawPtr(ty::TypeAndMut { ty, .. }) |
|
||||||
ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true),
|
ty::Ref(_, ty, _) => (bx.layout_of(ty), true),
|
||||||
_ => (arg.layout, false)
|
_ => (arg.layout, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -618,10 +618,10 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
// doesn't actually strip the offset when splitting the closure
|
// doesn't actually strip the offset when splitting the closure
|
||||||
// environment into its components so it ends up out of bounds.
|
// environment into its components so it ends up out of bounds.
|
||||||
// (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
|
// (cuviper) It seems to be fine without the alloca on LLVM 6 and later.
|
||||||
let env_alloca = !env_ref && bx.cx().closure_env_needs_indirect_debuginfo();
|
let env_alloca = !env_ref && bx.closure_env_needs_indirect_debuginfo();
|
||||||
let env_ptr = if env_alloca {
|
let env_ptr = if env_alloca {
|
||||||
let scratch = PlaceRef::alloca(bx,
|
let scratch = PlaceRef::alloca(bx,
|
||||||
bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
|
bx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
|
||||||
"__debuginfo_env_ptr");
|
"__debuginfo_env_ptr");
|
||||||
bx.store(place.llval, scratch.llval, scratch.align);
|
bx.store(place.llval, scratch.llval, scratch.align);
|
||||||
scratch.llval
|
scratch.llval
|
||||||
|
@ -632,7 +632,7 @@ fn arg_local_refs<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>>(
|
||||||
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
|
for (i, (decl, ty)) in mir.upvar_decls.iter().zip(upvar_tys).enumerate() {
|
||||||
let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
|
let byte_offset_of_var_in_env = closure_layout.fields.offset(i).bytes();
|
||||||
|
|
||||||
let ops = bx.cx().debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
|
let ops = bx.debuginfo_upvar_decls_ops_sequence(byte_offset_of_var_in_env);
|
||||||
|
|
||||||
// The environment and the capture can each be indirect.
|
// The environment and the capture can each be indirect.
|
||||||
|
|
||||||
|
|
|
@ -89,7 +89,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
if let OperandValue::Immediate(_) = op.val {
|
if let OperandValue::Immediate(_) = op.val {
|
||||||
acc.push(op.immediate());
|
acc.push(op.immediate());
|
||||||
} else {
|
} else {
|
||||||
span_err!(bx.cx().sess(), span.to_owned(), E0669,
|
span_err!(bx.sess(), span.to_owned(), E0669,
|
||||||
"invalid value for constraint in inline assembly");
|
"invalid value for constraint in inline assembly");
|
||||||
}
|
}
|
||||||
acc
|
acc
|
||||||
|
@ -98,7 +98,7 @@ impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
||||||
if input_vals.len() == inputs.len() {
|
if input_vals.len() == inputs.len() {
|
||||||
let res = bx.codegen_inline_asm(asm, outputs, input_vals);
|
let res = bx.codegen_inline_asm(asm, outputs, input_vals);
|
||||||
if !res {
|
if !res {
|
||||||
span_err!(bx.cx().sess(), statement.source_info.span, E0668,
|
span_err!(bx.sess(), statement.source_info.span, E0668,
|
||||||
"malformed inline assembly");
|
"malformed inline assembly");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue