Replaced Codegen field access by trait method
This commit is contained in:
parent
8714e6bce6
commit
d325844804
16 changed files with 247 additions and 247 deletions
|
@ -202,7 +202,7 @@ impl ArgTypeExt<'ll, 'tcx> for ArgType<'tcx, Ty<'tcx>> {
|
||||||
if self.is_ignore() {
|
if self.is_ignore() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
if self.is_sized_indirect() {
|
if self.is_sized_indirect() {
|
||||||
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
|
OperandValue::Ref(val, None, self.layout.align).store(bx, dst)
|
||||||
} else if self.is_unsized_indirect() {
|
} else if self.is_unsized_indirect() {
|
||||||
|
@ -757,7 +757,7 @@ impl<'tcx> FnTypeExt<'tcx> for FnType<'tcx, Ty<'tcx>> {
|
||||||
// by the LLVM verifier.
|
// by the LLVM verifier.
|
||||||
if let layout::Int(..) = scalar.value {
|
if let layout::Int(..) = scalar.value {
|
||||||
if !scalar.is_bool() {
|
if !scalar.is_bool() {
|
||||||
let range = scalar.valid_range_exclusive(bx.cx);
|
let range = scalar.valid_range_exclusive(bx.cx());
|
||||||
if range.start != range.end {
|
if range.start != range.end {
|
||||||
bx.range_metadata(callsite, range);
|
bx.range_metadata(callsite, range);
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,7 +44,7 @@ pub fn codegen_inline_asm(
|
||||||
if out.is_indirect {
|
if out.is_indirect {
|
||||||
indirect_outputs.push(place.load(bx).immediate());
|
indirect_outputs.push(place.load(bx).immediate());
|
||||||
} else {
|
} else {
|
||||||
output_types.push(place.layout.llvm_type(bx.cx));
|
output_types.push(place.layout.llvm_type(bx.cx()));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !indirect_outputs.is_empty() {
|
if !indirect_outputs.is_empty() {
|
||||||
|
@ -76,9 +76,9 @@ pub fn codegen_inline_asm(
|
||||||
// Depending on how many outputs we have, the return type is different
|
// Depending on how many outputs we have, the return type is different
|
||||||
let num_outputs = output_types.len();
|
let num_outputs = output_types.len();
|
||||||
let output_type = match num_outputs {
|
let output_type = match num_outputs {
|
||||||
0 => Type::void(bx.cx),
|
0 => Type::void(bx.cx()),
|
||||||
1 => output_types[0],
|
1 => output_types[0],
|
||||||
_ => Type::struct_(bx.cx, &output_types, false)
|
_ => Type::struct_(bx.cx(), &output_types, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
let asm = CString::new(ia.asm.as_str().as_bytes()).unwrap();
|
||||||
|
@ -108,13 +108,13 @@ pub fn codegen_inline_asm(
|
||||||
// back to source locations. See #17552.
|
// back to source locations. See #17552.
|
||||||
unsafe {
|
unsafe {
|
||||||
let key = "srcloc";
|
let key = "srcloc";
|
||||||
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx.llcx,
|
let kind = llvm::LLVMGetMDKindIDInContext(bx.cx().llcx,
|
||||||
key.as_ptr() as *const c_char, key.len() as c_uint);
|
key.as_ptr() as *const c_char, key.len() as c_uint);
|
||||||
|
|
||||||
let val: &'ll Value = CodegenCx::c_i32(bx.cx, ia.ctxt.outer().as_u32() as i32);
|
let val: &'ll Value = CodegenCx::c_i32(bx.cx(), ia.ctxt.outer().as_u32() as i32);
|
||||||
|
|
||||||
llvm::LLVMSetMetadata(r, kind,
|
llvm::LLVMSetMetadata(r, kind,
|
||||||
llvm::LLVMMDNodeInContext(bx.cx.llcx, &val, 1));
|
llvm::LLVMMDNodeInContext(bx.cx().llcx, &val, 1));
|
||||||
}
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
|
|
@ -233,24 +233,24 @@ pub fn unsize_thin_ptr(
|
||||||
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
|
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) |
|
||||||
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
|
(&ty::RawPtr(ty::TypeAndMut { ty: a, .. }),
|
||||||
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
&ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
|
||||||
assert!(bx.cx.type_is_sized(a));
|
assert!(bx.cx().type_is_sized(a));
|
||||||
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
|
let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to();
|
||||||
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
|
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
||||||
}
|
}
|
||||||
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
|
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) if def_a.is_box() && def_b.is_box() => {
|
||||||
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
|
let (a, b) = (src_ty.boxed_ty(), dst_ty.boxed_ty());
|
||||||
assert!(bx.cx.type_is_sized(a));
|
assert!(bx.cx().type_is_sized(a));
|
||||||
let ptr_ty = bx.cx.layout_of(b).llvm_type(bx.cx).ptr_to();
|
let ptr_ty = bx.cx().layout_of(b).llvm_type(bx.cx()).ptr_to();
|
||||||
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx, a, b, None))
|
(bx.pointercast(src, ptr_ty), unsized_info(bx.cx(), a, b, None))
|
||||||
}
|
}
|
||||||
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
|
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
|
||||||
assert_eq!(def_a, def_b);
|
assert_eq!(def_a, def_b);
|
||||||
|
|
||||||
let src_layout = bx.cx.layout_of(src_ty);
|
let src_layout = bx.cx().layout_of(src_ty);
|
||||||
let dst_layout = bx.cx.layout_of(dst_ty);
|
let dst_layout = bx.cx().layout_of(dst_ty);
|
||||||
let mut result = None;
|
let mut result = None;
|
||||||
for i in 0..src_layout.fields.count() {
|
for i in 0..src_layout.fields.count() {
|
||||||
let src_f = src_layout.field(bx.cx, i);
|
let src_f = src_layout.field(bx.cx(), i);
|
||||||
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
|
assert_eq!(src_layout.fields.offset(i).bytes(), 0);
|
||||||
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
|
assert_eq!(dst_layout.fields.offset(i).bytes(), 0);
|
||||||
if src_f.is_zst() {
|
if src_f.is_zst() {
|
||||||
|
@ -258,15 +258,15 @@ pub fn unsize_thin_ptr(
|
||||||
}
|
}
|
||||||
assert_eq!(src_layout.size, src_f.size);
|
assert_eq!(src_layout.size, src_f.size);
|
||||||
|
|
||||||
let dst_f = dst_layout.field(bx.cx, i);
|
let dst_f = dst_layout.field(bx.cx(), i);
|
||||||
assert_ne!(src_f.ty, dst_f.ty);
|
assert_ne!(src_f.ty, dst_f.ty);
|
||||||
assert_eq!(result, None);
|
assert_eq!(result, None);
|
||||||
result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
|
result = Some(unsize_thin_ptr(bx, src, src_f.ty, dst_f.ty));
|
||||||
}
|
}
|
||||||
let (lldata, llextra) = result.unwrap();
|
let (lldata, llextra) = result.unwrap();
|
||||||
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
||||||
(bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx, 0, true)),
|
(bx.bitcast(lldata, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 0, true)),
|
||||||
bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx, 1, true)))
|
bx.bitcast(llextra, dst_layout.scalar_pair_element_llvm_type(bx.cx(), 1, true)))
|
||||||
}
|
}
|
||||||
_ => bug!("unsize_thin_ptr: called on bad types"),
|
_ => bug!("unsize_thin_ptr: called on bad types"),
|
||||||
}
|
}
|
||||||
|
@ -288,8 +288,8 @@ pub fn coerce_unsized_into(
|
||||||
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
|
// i.e. &'a fmt::Debug+Send => &'a fmt::Debug
|
||||||
// So we need to pointercast the base to ensure
|
// So we need to pointercast the base to ensure
|
||||||
// the types match up.
|
// the types match up.
|
||||||
let thin_ptr = dst.layout.field(bx.cx, abi::FAT_PTR_ADDR);
|
let thin_ptr = dst.layout.field(bx.cx(), abi::FAT_PTR_ADDR);
|
||||||
(bx.pointercast(base, thin_ptr.llvm_type(bx.cx)), info)
|
(bx.pointercast(base, thin_ptr.llvm_type(bx.cx())), info)
|
||||||
}
|
}
|
||||||
OperandValue::Immediate(base) => {
|
OperandValue::Immediate(base) => {
|
||||||
unsize_thin_ptr(bx, base, src_ty, dst_ty)
|
unsize_thin_ptr(bx, base, src_ty, dst_ty)
|
||||||
|
@ -384,7 +384,7 @@ pub fn wants_msvc_seh(sess: &Session) -> bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
|
pub fn call_assume(bx: &Builder<'_, 'll, '_>, val: &'ll Value) {
|
||||||
let assume_intrinsic = bx.cx.get_intrinsic("llvm.assume");
|
let assume_intrinsic = bx.cx().get_intrinsic("llvm.assume");
|
||||||
bx.call(assume_intrinsic, &[val], None);
|
bx.call(assume_intrinsic, &[val], None);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -416,7 +416,7 @@ pub fn to_immediate_scalar(
|
||||||
scalar: &layout::Scalar,
|
scalar: &layout::Scalar,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
return bx.trunc(val, Type::i1(bx.cx));
|
return bx.trunc(val, Type::i1(bx.cx()));
|
||||||
}
|
}
|
||||||
val
|
val
|
||||||
}
|
}
|
||||||
|
@ -470,10 +470,10 @@ pub fn call_memset(
|
||||||
align: &'ll Value,
|
align: &'ll Value,
|
||||||
volatile: bool,
|
volatile: bool,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
let ptr_width = &bx.cx.sess().target.target.target_pointer_width;
|
let ptr_width = &bx.cx().sess().target.target.target_pointer_width;
|
||||||
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
let intrinsic_key = format!("llvm.memset.p0i8.i{}", ptr_width);
|
||||||
let llintrinsicfn = bx.cx.get_intrinsic(&intrinsic_key);
|
let llintrinsicfn = bx.cx().get_intrinsic(&intrinsic_key);
|
||||||
let volatile = CodegenCx::c_bool(bx.cx, volatile);
|
let volatile = CodegenCx::c_bool(bx.cx(), volatile);
|
||||||
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
|
bx.call(llintrinsicfn, &[ptr, fill_byte, size, align, volatile], None)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -193,14 +193,14 @@ impl Funclet<'ll> {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
impl Backend for CodegenCx<'ll, 'tcx, &'ll Value> {
|
impl Backend for CodegenCx<'ll, 'tcx> {
|
||||||
type Value = &'ll Value;
|
type Value = &'ll Value;
|
||||||
type BasicBlock = &'ll BasicBlock;
|
type BasicBlock = &'ll BasicBlock;
|
||||||
type Type = &'ll Type;
|
type Type = &'ll Type;
|
||||||
type Context = &'ll llvm::Context;
|
type Context = &'ll llvm::Context;
|
||||||
}
|
}
|
||||||
|
|
||||||
impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx, &'ll Value> {
|
impl<'ll, 'tcx : 'll> CommonMethods for CodegenCx<'ll, 'tcx> {
|
||||||
fn val_ty(v: &'ll Value) -> &'ll Type {
|
fn val_ty(v: &'ll Value) -> &'ll Type {
|
||||||
unsafe {
|
unsafe {
|
||||||
llvm::LLVMTypeOf(v)
|
llvm::LLVMTypeOf(v)
|
||||||
|
|
|
@ -26,11 +26,11 @@ use syntax::attr;
|
||||||
/// Inserts a side-effect free instruction sequence that makes sure that the
|
/// Inserts a side-effect free instruction sequence that makes sure that the
|
||||||
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
|
||||||
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
|
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &Builder) {
|
||||||
if needs_gdb_debug_scripts_section(bx.cx) {
|
if needs_gdb_debug_scripts_section(bx.cx()) {
|
||||||
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx);
|
let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx.cx());
|
||||||
// Load just the first byte as that's all that's necessary to force
|
// Load just the first byte as that's all that's necessary to force
|
||||||
// LLVM to keep around the reference to the global.
|
// LLVM to keep around the reference to the global.
|
||||||
let indices = [CodegenCx::c_i32(bx.cx, 0), CodegenCx::c_i32(bx.cx, 0)];
|
let indices = [CodegenCx::c_i32(bx.cx(), 0), CodegenCx::c_i32(bx.cx(), 0)];
|
||||||
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
|
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
|
||||||
let volative_load_instruction = bx.volatile_load(element);
|
let volative_load_instruction = bx.volatile_load(element);
|
||||||
unsafe {
|
unsafe {
|
||||||
|
|
|
@ -494,7 +494,7 @@ pub fn declare_local(
|
||||||
span: Span,
|
span: Span,
|
||||||
) {
|
) {
|
||||||
assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
|
assert!(!dbg_context.get_ref(span).source_locations_enabled.get());
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
|
|
||||||
let file = span_start(cx, span).file;
|
let file = span_start(cx, span).file;
|
||||||
let file_metadata = file_metadata(cx,
|
let file_metadata = file_metadata(cx,
|
||||||
|
|
|
@ -42,7 +42,7 @@ pub fn set_source_location(
|
||||||
|
|
||||||
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
|
let dbg_loc = if function_debug_context.source_locations_enabled.get() {
|
||||||
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
|
debug!("set_source_location: {}", bx.sess().source_map().span_to_string(span));
|
||||||
let loc = span_start(bx.cx, span);
|
let loc = span_start(bx.cx(), span);
|
||||||
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
|
InternalDebugLocation::new(scope.unwrap(), loc.line, loc.col.to_usize())
|
||||||
} else {
|
} else {
|
||||||
UnknownLocation
|
UnknownLocation
|
||||||
|
@ -88,7 +88,7 @@ pub fn set_debug_location(
|
||||||
// For MSVC, set the column number to zero.
|
// For MSVC, set the column number to zero.
|
||||||
// Otherwise, emit it. This mimics clang behaviour.
|
// Otherwise, emit it. This mimics clang behaviour.
|
||||||
// See discussion in https://github.com/rust-lang/rust/issues/42921
|
// See discussion in https://github.com/rust-lang/rust/issues/42921
|
||||||
let col_used = if bx.cx.sess().target.target.options.is_like_msvc {
|
let col_used = if bx.cx().sess().target.target.options.is_like_msvc {
|
||||||
UNKNOWN_COLUMN_NUMBER
|
UNKNOWN_COLUMN_NUMBER
|
||||||
} else {
|
} else {
|
||||||
col as c_uint
|
col as c_uint
|
||||||
|
@ -97,7 +97,7 @@ pub fn set_debug_location(
|
||||||
|
|
||||||
unsafe {
|
unsafe {
|
||||||
Some(llvm::LLVMRustDIBuilderCreateDebugLocation(
|
Some(llvm::LLVMRustDIBuilderCreateDebugLocation(
|
||||||
debug_context(bx.cx).llcontext,
|
debug_context(bx.cx()).llcontext,
|
||||||
line as c_uint,
|
line as c_uint,
|
||||||
col_used,
|
col_used,
|
||||||
scope,
|
scope,
|
||||||
|
|
|
@ -30,12 +30,12 @@ pub fn size_and_align_of_dst(
|
||||||
) -> (&'ll Value, &'ll Value) {
|
) -> (&'ll Value, &'ll Value) {
|
||||||
debug!("calculate size of DST: {}; with lost info: {:?}",
|
debug!("calculate size of DST: {}; with lost info: {:?}",
|
||||||
t, info);
|
t, info);
|
||||||
if bx.cx.type_is_sized(t) {
|
if bx.cx().type_is_sized(t) {
|
||||||
let (size, align) = bx.cx.size_and_align_of(t);
|
let (size, align) = bx.cx().size_and_align_of(t);
|
||||||
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
|
debug!("size_and_align_of_dst t={} info={:?} size: {:?} align: {:?}",
|
||||||
t, info, size, align);
|
t, info, size, align);
|
||||||
let size = CodegenCx::c_usize(bx.cx, size.bytes());
|
let size = CodegenCx::c_usize(bx.cx(), size.bytes());
|
||||||
let align = CodegenCx::c_usize(bx.cx, align.abi());
|
let align = CodegenCx::c_usize(bx.cx(), align.abi());
|
||||||
return (size, align);
|
return (size, align);
|
||||||
}
|
}
|
||||||
match t.sty {
|
match t.sty {
|
||||||
|
@ -48,12 +48,12 @@ pub fn size_and_align_of_dst(
|
||||||
let unit = t.sequence_element_type(bx.tcx());
|
let unit = t.sequence_element_type(bx.tcx());
|
||||||
// The info in this case is the length of the str, so the size is that
|
// The info in this case is the length of the str, so the size is that
|
||||||
// times the unit size.
|
// times the unit size.
|
||||||
let (size, align) = bx.cx.size_and_align_of(unit);
|
let (size, align) = bx.cx().size_and_align_of(unit);
|
||||||
(bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx, size.bytes())),
|
(bx.mul(info.unwrap(), CodegenCx::c_usize(bx.cx(), size.bytes())),
|
||||||
CodegenCx::c_usize(bx.cx, align.abi()))
|
CodegenCx::c_usize(bx.cx(), align.abi()))
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
// First get the size of all statically known fields.
|
// First get the size of all statically known fields.
|
||||||
// Don't use size_of because it also rounds up to alignment, which we
|
// Don't use size_of because it also rounds up to alignment, which we
|
||||||
// want to avoid, as the unsized field's alignment could be smaller.
|
// want to avoid, as the unsized field's alignment could be smaller.
|
||||||
|
@ -116,7 +116,7 @@ pub fn size_and_align_of_dst(
|
||||||
//
|
//
|
||||||
// `(size + (align-1)) & -align`
|
// `(size + (align-1)) & -align`
|
||||||
|
|
||||||
let addend = bx.sub(align, CodegenCx::c_usize(bx.cx, 1));
|
let addend = bx.sub(align, CodegenCx::c_usize(bx.cx(), 1));
|
||||||
let size = bx.and(bx.add(size, addend), bx.neg(align));
|
let size = bx.and(bx.add(size, addend), bx.neg(align));
|
||||||
|
|
||||||
(size, align)
|
(size, align)
|
||||||
|
|
|
@ -98,7 +98,7 @@ pub fn codegen_intrinsic_call(
|
||||||
llresult: &'ll Value,
|
llresult: &'ll Value,
|
||||||
span: Span,
|
span: Span,
|
||||||
) {
|
) {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
let tcx = cx.tcx;
|
let tcx = cx.tcx;
|
||||||
|
|
||||||
let (def_id, substs) = match callee_ty.sty {
|
let (def_id, substs) = match callee_ty.sty {
|
||||||
|
@ -210,7 +210,7 @@ pub fn codegen_intrinsic_call(
|
||||||
"needs_drop" => {
|
"needs_drop" => {
|
||||||
let tp_ty = substs.type_at(0);
|
let tp_ty = substs.type_at(0);
|
||||||
|
|
||||||
CodegenCx::c_bool(cx, bx.cx.type_needs_drop(tp_ty))
|
CodegenCx::c_bool(cx, bx.cx().type_needs_drop(tp_ty))
|
||||||
}
|
}
|
||||||
"offset" => {
|
"offset" => {
|
||||||
let ptr = args[0].immediate();
|
let ptr = args[0].immediate();
|
||||||
|
@ -266,12 +266,12 @@ pub fn codegen_intrinsic_call(
|
||||||
to_immediate(bx, load, cx.layout_of(tp_ty))
|
to_immediate(bx, load, cx.layout_of(tp_ty))
|
||||||
},
|
},
|
||||||
"volatile_store" => {
|
"volatile_store" => {
|
||||||
let dst = args[0].deref(bx.cx);
|
let dst = args[0].deref(bx.cx());
|
||||||
args[1].val.volatile_store(bx, dst);
|
args[1].val.volatile_store(bx, dst);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
"unaligned_volatile_store" => {
|
"unaligned_volatile_store" => {
|
||||||
let dst = args[0].deref(bx.cx);
|
let dst = args[0].deref(bx.cx());
|
||||||
args[1].val.unaligned_volatile_store(bx, dst);
|
args[1].val.unaligned_volatile_store(bx, dst);
|
||||||
return;
|
return;
|
||||||
},
|
},
|
||||||
|
@ -302,12 +302,12 @@ pub fn codegen_intrinsic_call(
|
||||||
Some((width, signed)) =>
|
Some((width, signed)) =>
|
||||||
match name {
|
match name {
|
||||||
"ctlz" | "cttz" => {
|
"ctlz" | "cttz" => {
|
||||||
let y = CodegenCx::c_bool(bx.cx, false);
|
let y = CodegenCx::c_bool(bx.cx(), false);
|
||||||
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
|
let llfn = cx.get_intrinsic(&format!("llvm.{}.i{}", name, width));
|
||||||
bx.call(llfn, &[args[0].immediate(), y], None)
|
bx.call(llfn, &[args[0].immediate(), y], None)
|
||||||
}
|
}
|
||||||
"ctlz_nonzero" | "cttz_nonzero" => {
|
"ctlz_nonzero" | "cttz_nonzero" => {
|
||||||
let y = CodegenCx::c_bool(bx.cx, true);
|
let y = CodegenCx::c_bool(bx.cx(), true);
|
||||||
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
|
let llvm_name = &format!("llvm.{}.i{}", &name[..4], width);
|
||||||
let llfn = cx.get_intrinsic(llvm_name);
|
let llfn = cx.get_intrinsic(llvm_name);
|
||||||
bx.call(llfn, &[args[0].immediate(), y], None)
|
bx.call(llfn, &[args[0].immediate(), y], None)
|
||||||
|
@ -330,7 +330,7 @@ pub fn codegen_intrinsic_call(
|
||||||
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
let intrinsic = format!("llvm.{}{}.with.overflow.i{}",
|
||||||
if signed { 's' } else { 'u' },
|
if signed { 's' } else { 'u' },
|
||||||
&name[..3], width);
|
&name[..3], width);
|
||||||
let llfn = bx.cx.get_intrinsic(&intrinsic);
|
let llfn = bx.cx().get_intrinsic(&intrinsic);
|
||||||
|
|
||||||
// Convert `i1` to a `bool`, and write it to the out parameter
|
// Convert `i1` to a `bool`, and write it to the out parameter
|
||||||
let pair = bx.call(llfn, &[
|
let pair = bx.call(llfn, &[
|
||||||
|
@ -431,7 +431,7 @@ pub fn codegen_intrinsic_call(
|
||||||
},
|
},
|
||||||
|
|
||||||
"discriminant_value" => {
|
"discriminant_value" => {
|
||||||
args[0].deref(bx.cx).codegen_get_discr(bx, ret_ty)
|
args[0].deref(bx.cx()).codegen_get_discr(bx, ret_ty)
|
||||||
}
|
}
|
||||||
|
|
||||||
name if name.starts_with("simd_") => {
|
name if name.starts_with("simd_") => {
|
||||||
|
@ -495,7 +495,7 @@ pub fn codegen_intrinsic_call(
|
||||||
failorder,
|
failorder,
|
||||||
weak);
|
weak);
|
||||||
let val = bx.extract_value(pair, 0);
|
let val = bx.extract_value(pair, 0);
|
||||||
let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx));
|
let success = bx.zext(bx.extract_value(pair, 1), Type::bool(bx.cx()));
|
||||||
|
|
||||||
let dest = result.project_field(bx, 0);
|
let dest = result.project_field(bx, 0);
|
||||||
bx.store(val, dest.llval, dest.align);
|
bx.store(val, dest.llval, dest.align);
|
||||||
|
@ -566,7 +566,7 @@ pub fn codegen_intrinsic_call(
|
||||||
}
|
}
|
||||||
|
|
||||||
"nontemporal_store" => {
|
"nontemporal_store" => {
|
||||||
let dst = args[0].deref(bx.cx);
|
let dst = args[0].deref(bx.cx());
|
||||||
args[1].val.nontemporal_store(bx, dst);
|
args[1].val.nontemporal_store(bx, dst);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -634,7 +634,7 @@ pub fn codegen_intrinsic_call(
|
||||||
// This assumes the type is "simple", i.e. no
|
// This assumes the type is "simple", i.e. no
|
||||||
// destructors, and the contents are SIMD
|
// destructors, and the contents are SIMD
|
||||||
// etc.
|
// etc.
|
||||||
assert!(!bx.cx.type_needs_drop(arg.layout.ty));
|
assert!(!bx.cx().type_needs_drop(arg.layout.ty));
|
||||||
let (ptr, align) = match arg.val {
|
let (ptr, align) = match arg.val {
|
||||||
OperandValue::Ref(ptr, None, align) => (ptr, align),
|
OperandValue::Ref(ptr, None, align) => (ptr, align),
|
||||||
_ => bug!()
|
_ => bug!()
|
||||||
|
@ -645,11 +645,11 @@ pub fn codegen_intrinsic_call(
|
||||||
}).collect()
|
}).collect()
|
||||||
}
|
}
|
||||||
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
intrinsics::Type::Pointer(_, Some(ref llvm_elem), _) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
|
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
||||||
vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
|
vec![bx.pointercast(arg.immediate(), llvm_elem.ptr_to())]
|
||||||
}
|
}
|
||||||
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
intrinsics::Type::Vector(_, Some(ref llvm_elem), length) => {
|
||||||
let llvm_elem = one(ty_to_type(bx.cx, llvm_elem));
|
let llvm_elem = one(ty_to_type(bx.cx(), llvm_elem));
|
||||||
vec![
|
vec![
|
||||||
bx.bitcast(arg.immediate(),
|
bx.bitcast(arg.immediate(),
|
||||||
Type::vector(llvm_elem, length as u64))
|
Type::vector(llvm_elem, length as u64))
|
||||||
|
@ -659,7 +659,7 @@ pub fn codegen_intrinsic_call(
|
||||||
// the LLVM intrinsic uses a smaller integer
|
// the LLVM intrinsic uses a smaller integer
|
||||||
// size than the C intrinsic's signature, so
|
// size than the C intrinsic's signature, so
|
||||||
// we have to trim it down here.
|
// we have to trim it down here.
|
||||||
vec![bx.trunc(arg.immediate(), Type::ix(bx.cx, llvm_width as u64))]
|
vec![bx.trunc(arg.immediate(), Type::ix(bx.cx(), llvm_width as u64))]
|
||||||
}
|
}
|
||||||
_ => vec![arg.immediate()],
|
_ => vec![arg.immediate()],
|
||||||
}
|
}
|
||||||
|
@ -723,7 +723,7 @@ fn copy_intrinsic(
|
||||||
src: &'ll Value,
|
src: &'ll Value,
|
||||||
count: &'ll Value,
|
count: &'ll Value,
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
let (size, align) = cx.size_and_align_of(ty);
|
let (size, align) = cx.size_and_align_of(ty);
|
||||||
let size = CodegenCx::c_usize(cx, size.bytes());
|
let size = CodegenCx::c_usize(cx, size.bytes());
|
||||||
let align = align.abi();
|
let align = align.abi();
|
||||||
|
@ -744,7 +744,7 @@ fn memset_intrinsic(
|
||||||
val: &'ll Value,
|
val: &'ll Value,
|
||||||
count: &'ll Value
|
count: &'ll Value
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
let (size, align) = cx.size_and_align_of(ty);
|
let (size, align) = cx.size_and_align_of(ty);
|
||||||
let size = CodegenCx::c_usize(cx, size.bytes());
|
let size = CodegenCx::c_usize(cx, size.bytes());
|
||||||
let align = CodegenCx::c_i32(cx, align.abi() as i32);
|
let align = CodegenCx::c_i32(cx, align.abi() as i32);
|
||||||
|
@ -763,7 +763,7 @@ fn try_intrinsic(
|
||||||
if bx.sess().no_landing_pads() {
|
if bx.sess().no_landing_pads() {
|
||||||
bx.call(func, &[data], None);
|
bx.call(func, &[data], None);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
bx.store(CodegenCx::c_null(Type::i8p(&bx.cx)), dest, ptr_align);
|
bx.store(CodegenCx::c_null(Type::i8p(&bx.cx())), dest, ptr_align);
|
||||||
} else if wants_msvc_seh(bx.sess()) {
|
} else if wants_msvc_seh(bx.sess()) {
|
||||||
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
|
codegen_msvc_try(bx, cx, func, data, local_ptr, dest);
|
||||||
} else {
|
} else {
|
||||||
|
@ -787,9 +787,9 @@ fn codegen_msvc_try(
|
||||||
dest: &'ll Value,
|
dest: &'ll Value,
|
||||||
) {
|
) {
|
||||||
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
|
|
||||||
bx.set_personality_fn(bx.cx.eh_personality());
|
bx.set_personality_fn(bx.cx().eh_personality());
|
||||||
|
|
||||||
let normal = bx.build_sibling_block("normal");
|
let normal = bx.build_sibling_block("normal");
|
||||||
let catchswitch = bx.build_sibling_block("catchswitch");
|
let catchswitch = bx.build_sibling_block("catchswitch");
|
||||||
|
@ -896,7 +896,7 @@ fn codegen_gnu_try(
|
||||||
dest: &'ll Value,
|
dest: &'ll Value,
|
||||||
) {
|
) {
|
||||||
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
let llfn = get_rust_try_fn(cx, &mut |bx| {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
|
|
||||||
// Codegens the shims described above:
|
// Codegens the shims described above:
|
||||||
//
|
//
|
||||||
|
@ -931,7 +931,7 @@ fn codegen_gnu_try(
|
||||||
// the landing pad clauses the exception's type had been matched to.
|
// the landing pad clauses the exception's type had been matched to.
|
||||||
// rust_try ignores the selector.
|
// rust_try ignores the selector.
|
||||||
let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
|
let lpad_ty = Type::struct_(cx, &[Type::i8p(cx), Type::i32(cx)], false);
|
||||||
let vals = catch.landing_pad(lpad_ty, bx.cx.eh_personality(), 1);
|
let vals = catch.landing_pad(lpad_ty, bx.cx().eh_personality(), 1);
|
||||||
catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx)));
|
catch.add_clause(vals, CodegenCx::c_null(Type::i8p(cx)));
|
||||||
let ptr = catch.extract_value(vals, 0);
|
let ptr = catch.extract_value(vals, 0);
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
|
@ -1125,7 +1125,7 @@ fn generic_simd_intrinsic(
|
||||||
arg_idx, total_len);
|
arg_idx, total_len);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
Some(idx) => Some(CodegenCx::c_i32(bx.cx, idx as i32)),
|
Some(idx) => Some(CodegenCx::c_i32(bx.cx(), idx as i32)),
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
.collect();
|
.collect();
|
||||||
|
@ -1167,7 +1167,7 @@ fn generic_simd_intrinsic(
|
||||||
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
_ => return_error!("mask element type is `{}`, expected `i_`", m_elem_ty)
|
||||||
}
|
}
|
||||||
// truncate the mask to a vector of i1s
|
// truncate the mask to a vector of i1s
|
||||||
let i1 = Type::i1(bx.cx);
|
let i1 = Type::i1(bx.cx());
|
||||||
let i1xn = Type::vector(i1, m_len as u64);
|
let i1xn = Type::vector(i1, m_len as u64);
|
||||||
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
let m_i1s = bx.trunc(args[0].immediate(), i1xn);
|
||||||
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
return Ok(bx.select(m_i1s, args[1].immediate(), args[2].immediate()));
|
||||||
|
@ -1229,7 +1229,7 @@ fn generic_simd_intrinsic(
|
||||||
};
|
};
|
||||||
|
|
||||||
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
|
let llvm_name = &format!("llvm.{0}.v{1}{2}", name, in_len, ety);
|
||||||
let intrinsic = bx.cx.get_intrinsic(&llvm_name);
|
let intrinsic = bx.cx().get_intrinsic(&llvm_name);
|
||||||
let c = bx.call(intrinsic,
|
let c = bx.call(intrinsic,
|
||||||
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
|
&args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(),
|
||||||
None);
|
None);
|
||||||
|
@ -1386,27 +1386,27 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = Type::i32(bx.cx);
|
let alignment_ty = Type::i32(bx.cx());
|
||||||
let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
|
let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = Type::i1(bx.cx);
|
let i1 = Type::i1(bx.cx());
|
||||||
let i1xn = Type::vector(i1, in_len as u64);
|
let i1xn = Type::vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
// Type of the vector of pointers:
|
// Type of the vector of pointers:
|
||||||
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
||||||
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
||||||
|
|
||||||
// Type of the vector of elements:
|
// Type of the vector of elements:
|
||||||
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
|
||||||
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
||||||
|
|
||||||
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.gather.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
|
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
||||||
Type::func(&[
|
Type::func(&[
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
|
@ -1486,29 +1486,29 @@ fn generic_simd_intrinsic(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Alignment of T, must be a constant integer value:
|
// Alignment of T, must be a constant integer value:
|
||||||
let alignment_ty = Type::i32(bx.cx);
|
let alignment_ty = Type::i32(bx.cx());
|
||||||
let alignment = CodegenCx::c_i32(bx.cx, bx.cx.align_of(in_elem).abi() as i32);
|
let alignment = CodegenCx::c_i32(bx.cx(), bx.cx().align_of(in_elem).abi() as i32);
|
||||||
|
|
||||||
// Truncate the mask vector to a vector of i1s:
|
// Truncate the mask vector to a vector of i1s:
|
||||||
let (mask, mask_ty) = {
|
let (mask, mask_ty) = {
|
||||||
let i1 = Type::i1(bx.cx);
|
let i1 = Type::i1(bx.cx());
|
||||||
let i1xn = Type::vector(i1, in_len as u64);
|
let i1xn = Type::vector(i1, in_len as u64);
|
||||||
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
(bx.trunc(args[2].immediate(), i1xn), i1xn)
|
||||||
};
|
};
|
||||||
|
|
||||||
let ret_t = Type::void(bx.cx);
|
let ret_t = Type::void(bx.cx());
|
||||||
|
|
||||||
// Type of the vector of pointers:
|
// Type of the vector of pointers:
|
||||||
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count);
|
||||||
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count);
|
||||||
|
|
||||||
// Type of the vector of elements:
|
// Type of the vector of elements:
|
||||||
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx, underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_ty = llvm_vector_ty(bx.cx(), underlying_ty, in_len, pointer_count - 1);
|
||||||
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1);
|
||||||
|
|
||||||
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
let llvm_intrinsic = format!("llvm.masked.scatter.{}.{}",
|
||||||
llvm_elem_vec_str, llvm_pointer_vec_str);
|
llvm_elem_vec_str, llvm_pointer_vec_str);
|
||||||
let f = declare::declare_cfn(bx.cx, &llvm_intrinsic,
|
let f = declare::declare_cfn(bx.cx(), &llvm_intrinsic,
|
||||||
Type::func(&[llvm_elem_vec_ty,
|
Type::func(&[llvm_elem_vec_ty,
|
||||||
llvm_pointer_vec_ty,
|
llvm_pointer_vec_ty,
|
||||||
alignment_ty,
|
alignment_ty,
|
||||||
|
@ -1565,8 +1565,8 @@ fn generic_simd_intrinsic(
|
||||||
} else {
|
} else {
|
||||||
// unordered arithmetic reductions do not:
|
// unordered arithmetic reductions do not:
|
||||||
match f.bit_width() {
|
match f.bit_width() {
|
||||||
32 => CodegenCx::c_undef(Type::f32(bx.cx)),
|
32 => CodegenCx::c_undef(Type::f32(bx.cx())),
|
||||||
64 => CodegenCx::c_undef(Type::f64(bx.cx)),
|
64 => CodegenCx::c_undef(Type::f64(bx.cx())),
|
||||||
v => {
|
v => {
|
||||||
return_error!(r#"
|
return_error!(r#"
|
||||||
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
|
@ -1643,7 +1643,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
}
|
}
|
||||||
|
|
||||||
// boolean reductions operate on vectors of i1s:
|
// boolean reductions operate on vectors of i1s:
|
||||||
let i1 = Type::i1(bx.cx);
|
let i1 = Type::i1(bx.cx());
|
||||||
let i1xn = Type::vector(i1, in_len as u64);
|
let i1xn = Type::vector(i1, in_len as u64);
|
||||||
bx.trunc(args[0].immediate(), i1xn)
|
bx.trunc(args[0].immediate(), i1xn)
|
||||||
};
|
};
|
||||||
|
@ -1654,7 +1654,7 @@ unsupported {} from `{}` with element `{}` of size `{}` to `{}`"#,
|
||||||
if !$boolean {
|
if !$boolean {
|
||||||
r
|
r
|
||||||
} else {
|
} else {
|
||||||
bx.zext(r, Type::bool(bx.cx))
|
bx.zext(r, Type::bool(bx.cx()))
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
},
|
},
|
||||||
|
|
|
@ -41,10 +41,10 @@ impl<'a, 'tcx> VirtualIndex {
|
||||||
// Load the data pointer from the object.
|
// Load the data pointer from the object.
|
||||||
debug!("get_fn({:?}, {:?})", llvtable, self);
|
debug!("get_fn({:?}, {:?})", llvtable, self);
|
||||||
|
|
||||||
let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx).ptr_to());
|
let llvtable = bx.pointercast(llvtable, fn_ty.ptr_to_llvm_type(bx.cx()).ptr_to());
|
||||||
let ptr_align = bx.tcx().data_layout.pointer_align;
|
let ptr_align = bx.tcx().data_layout.pointer_align;
|
||||||
let ptr = bx.load(
|
let ptr = bx.load(
|
||||||
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
|
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
|
||||||
ptr_align
|
ptr_align
|
||||||
);
|
);
|
||||||
bx.nonnull_metadata(ptr);
|
bx.nonnull_metadata(ptr);
|
||||||
|
@ -61,10 +61,10 @@ impl<'a, 'tcx> VirtualIndex {
|
||||||
// Load the data pointer from the object.
|
// Load the data pointer from the object.
|
||||||
debug!("get_int({:?}, {:?})", llvtable, self);
|
debug!("get_int({:?}, {:?})", llvtable, self);
|
||||||
|
|
||||||
let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx).ptr_to());
|
let llvtable = bx.pointercast(llvtable, Type::isize(bx.cx()).ptr_to());
|
||||||
let usize_align = bx.tcx().data_layout.pointer_align;
|
let usize_align = bx.tcx().data_layout.pointer_align;
|
||||||
let ptr = bx.load(
|
let ptr = bx.load(
|
||||||
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx, self.0)]),
|
bx.inbounds_gep(llvtable, &[CodegenCx::c_usize(bx.cx(), self.0)]),
|
||||||
usize_align
|
usize_align
|
||||||
);
|
);
|
||||||
// Vtable loads are invariant
|
// Vtable loads are invariant
|
||||||
|
|
|
@ -177,7 +177,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
lp = bx.insert_value(lp, lp1, 1);
|
lp = bx.insert_value(lp, lp1, 1);
|
||||||
bx.resume(lp);
|
bx.resume(lp);
|
||||||
} else {
|
} else {
|
||||||
bx.call(bx.cx.eh_unwind_resume(), &[lp0], cleanup_bundle);
|
bx.call(bx.cx().eh_unwind_resume(), &[lp0], cleanup_bundle);
|
||||||
bx.unreachable();
|
bx.unreachable();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -185,7 +185,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
mir::TerminatorKind::Abort => {
|
mir::TerminatorKind::Abort => {
|
||||||
// Call core::intrinsics::abort()
|
// Call core::intrinsics::abort()
|
||||||
let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
|
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
|
||||||
bx.call(fnname, &[], None);
|
bx.call(fnname, &[], None);
|
||||||
bx.unreachable();
|
bx.unreachable();
|
||||||
}
|
}
|
||||||
|
@ -209,7 +209,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
bx.cond_br(discr.immediate(), lltrue, llfalse);
|
bx.cond_br(discr.immediate(), lltrue, llfalse);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
|
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
|
||||||
let llval = CodegenCx::c_uint_big(switch_llty, values[0]);
|
let llval = CodegenCx::c_uint_big(switch_llty, values[0]);
|
||||||
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
|
let cmp = bx.icmp(IntPredicate::IntEQ, discr.immediate(), llval);
|
||||||
bx.cond_br(cmp, lltrue, llfalse);
|
bx.cond_br(cmp, lltrue, llfalse);
|
||||||
|
@ -219,7 +219,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let switch = bx.switch(discr.immediate(),
|
let switch = bx.switch(discr.immediate(),
|
||||||
llblock(self, *otherwise),
|
llblock(self, *otherwise),
|
||||||
values.len());
|
values.len());
|
||||||
let switch_llty = bx.cx.layout_of(switch_ty).immediate_llvm_type(bx.cx);
|
let switch_llty = bx.cx().layout_of(switch_ty).immediate_llvm_type(bx.cx());
|
||||||
for (&value, target) in values.iter().zip(targets) {
|
for (&value, target) in values.iter().zip(targets) {
|
||||||
let llval = CodegenCx::c_uint_big(switch_llty, value);
|
let llval = CodegenCx::c_uint_big(switch_llty, value);
|
||||||
let llbb = llblock(self, *target);
|
let llbb = llblock(self, *target);
|
||||||
|
@ -269,7 +269,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
bx.load(
|
bx.load(
|
||||||
bx.pointercast(llslot, cast_ty.llvm_type(bx.cx).ptr_to()),
|
bx.pointercast(llslot, cast_ty.llvm_type(bx.cx()).ptr_to()),
|
||||||
self.fn_ty.ret.layout.align)
|
self.fn_ty.ret.layout.align)
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -283,7 +283,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
mir::TerminatorKind::Drop { ref location, target, unwind } => {
|
mir::TerminatorKind::Drop { ref location, target, unwind } => {
|
||||||
let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
|
let ty = location.ty(self.mir, bx.tcx()).to_ty(bx.tcx());
|
||||||
let ty = self.monomorphize(&ty);
|
let ty = self.monomorphize(&ty);
|
||||||
let drop_fn = monomorphize::resolve_drop_in_place(bx.cx.tcx, ty);
|
let drop_fn = monomorphize::resolve_drop_in_place(bx.cx().tcx, ty);
|
||||||
|
|
||||||
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
|
if let ty::InstanceDef::DropGlue(_, None) = drop_fn.def {
|
||||||
// we don't actually need to drop anything.
|
// we don't actually need to drop anything.
|
||||||
|
@ -302,19 +302,19 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
};
|
};
|
||||||
let (drop_fn, fn_ty) = match ty.sty {
|
let (drop_fn, fn_ty) = match ty.sty {
|
||||||
ty::Dynamic(..) => {
|
ty::Dynamic(..) => {
|
||||||
let sig = drop_fn.fn_sig(bx.cx.tcx);
|
let sig = drop_fn.fn_sig(bx.tcx());
|
||||||
let sig = bx.tcx().normalize_erasing_late_bound_regions(
|
let sig = bx.tcx().normalize_erasing_late_bound_regions(
|
||||||
ty::ParamEnv::reveal_all(),
|
ty::ParamEnv::reveal_all(),
|
||||||
&sig,
|
&sig,
|
||||||
);
|
);
|
||||||
let fn_ty = FnType::new_vtable(bx.cx, sig, &[]);
|
let fn_ty = FnType::new_vtable(bx.cx(), sig, &[]);
|
||||||
let vtable = args[1];
|
let vtable = args[1];
|
||||||
args = &args[..1];
|
args = &args[..1];
|
||||||
(meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
|
(meth::DESTRUCTOR.get_fn(&bx, vtable, &fn_ty), fn_ty)
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
(callee::get_fn(bx.cx, drop_fn),
|
(callee::get_fn(bx.cx(), drop_fn),
|
||||||
FnType::of_instance(bx.cx, &drop_fn))
|
FnType::of_instance(bx.cx(), &drop_fn))
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
do_call(self, bx, fn_ty, drop_fn, args,
|
do_call(self, bx, fn_ty, drop_fn, args,
|
||||||
|
@ -333,7 +333,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// NOTE: Unlike binops, negation doesn't have its own
|
// NOTE: Unlike binops, negation doesn't have its own
|
||||||
// checked operation, just a comparison with the minimum
|
// checked operation, just a comparison with the minimum
|
||||||
// value, so we have to check for the assert message.
|
// value, so we have to check for the assert message.
|
||||||
if !bx.cx.check_overflow {
|
if !bx.cx().check_overflow {
|
||||||
if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
|
if let mir::interpret::EvalErrorKind::OverflowNeg = *msg {
|
||||||
const_cond = Some(expected);
|
const_cond = Some(expected);
|
||||||
}
|
}
|
||||||
|
@ -346,8 +346,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Pass the condition through llvm.expect for branch hinting.
|
// Pass the condition through llvm.expect for branch hinting.
|
||||||
let expect = bx.cx.get_intrinsic(&"llvm.expect.i1");
|
let expect = bx.cx().get_intrinsic(&"llvm.expect.i1");
|
||||||
let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx, expected)], None);
|
let cond = bx.call(expect, &[cond, CodegenCx::c_bool(bx.cx(), expected)], None);
|
||||||
|
|
||||||
// Create the failure block and the conditional branch to it.
|
// Create the failure block and the conditional branch to it.
|
||||||
let lltarget = llblock(self, target);
|
let lltarget = llblock(self, target);
|
||||||
|
@ -365,9 +365,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// Get the location information.
|
// Get the location information.
|
||||||
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
|
let loc = bx.sess().source_map().lookup_char_pos(span.lo());
|
||||||
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
let filename = Symbol::intern(&loc.file.name.to_string()).as_str();
|
||||||
let filename = CodegenCx::c_str_slice(bx.cx, filename);
|
let filename = CodegenCx::c_str_slice(bx.cx(), filename);
|
||||||
let line = CodegenCx::c_u32(bx.cx, loc.line as u32);
|
let line = CodegenCx::c_u32(bx.cx(), loc.line as u32);
|
||||||
let col = CodegenCx::c_u32(bx.cx, loc.col.to_usize() as u32 + 1);
|
let col = CodegenCx::c_u32(bx.cx(), loc.col.to_usize() as u32 + 1);
|
||||||
let align = tcx.data_layout.aggregate_align
|
let align = tcx.data_layout.aggregate_align
|
||||||
.max(tcx.data_layout.i32_align)
|
.max(tcx.data_layout.i32_align)
|
||||||
.max(tcx.data_layout.pointer_align);
|
.max(tcx.data_layout.pointer_align);
|
||||||
|
@ -378,9 +378,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let len = self.codegen_operand(&mut bx, len).immediate();
|
let len = self.codegen_operand(&mut bx, len).immediate();
|
||||||
let index = self.codegen_operand(&mut bx, index).immediate();
|
let index = self.codegen_operand(&mut bx, index).immediate();
|
||||||
|
|
||||||
let file_line_col = CodegenCx::c_struct(bx.cx,
|
let file_line_col = CodegenCx::c_struct(bx.cx(),
|
||||||
&[filename, line, col], false);
|
&[filename, line, col], false);
|
||||||
let file_line_col = consts::addr_of(bx.cx,
|
let file_line_col = consts::addr_of(bx.cx(),
|
||||||
file_line_col,
|
file_line_col,
|
||||||
align,
|
align,
|
||||||
Some("panic_bounds_check_loc"));
|
Some("panic_bounds_check_loc"));
|
||||||
|
@ -390,13 +390,13 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
_ => {
|
_ => {
|
||||||
let str = msg.description();
|
let str = msg.description();
|
||||||
let msg_str = Symbol::intern(str).as_str();
|
let msg_str = Symbol::intern(str).as_str();
|
||||||
let msg_str = CodegenCx::c_str_slice(bx.cx, msg_str);
|
let msg_str = CodegenCx::c_str_slice(bx.cx(), msg_str);
|
||||||
let msg_file_line_col = CodegenCx::c_struct(
|
let msg_file_line_col = CodegenCx::c_struct(
|
||||||
bx.cx,
|
bx.cx(),
|
||||||
&[msg_str, filename, line, col],
|
&[msg_str, filename, line, col],
|
||||||
false
|
false
|
||||||
);
|
);
|
||||||
let msg_file_line_col = consts::addr_of(bx.cx,
|
let msg_file_line_col = consts::addr_of(bx.cx(),
|
||||||
msg_file_line_col,
|
msg_file_line_col,
|
||||||
align,
|
align,
|
||||||
Some("panic_loc"));
|
Some("panic_loc"));
|
||||||
|
@ -408,8 +408,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// Obtain the panic entry point.
|
// Obtain the panic entry point.
|
||||||
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
|
let def_id = common::langcall(bx.tcx(), Some(span), "", lang_item);
|
||||||
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
||||||
let fn_ty = FnType::of_instance(bx.cx, &instance);
|
let fn_ty = FnType::of_instance(bx.cx(), &instance);
|
||||||
let llfn = callee::get_fn(bx.cx, instance);
|
let llfn = callee::get_fn(bx.cx(), instance);
|
||||||
|
|
||||||
// Codegen the actual panic invoke/call.
|
// Codegen the actual panic invoke/call.
|
||||||
do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
|
do_call(self, bx, fn_ty, llfn, &args, None, cleanup);
|
||||||
|
@ -431,7 +431,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
let (instance, mut llfn) = match callee.layout.ty.sty {
|
let (instance, mut llfn) = match callee.layout.ty.sty {
|
||||||
ty::FnDef(def_id, substs) => {
|
ty::FnDef(def_id, substs) => {
|
||||||
(Some(ty::Instance::resolve(bx.cx.tcx,
|
(Some(ty::Instance::resolve(bx.cx().tcx,
|
||||||
ty::ParamEnv::reveal_all(),
|
ty::ParamEnv::reveal_all(),
|
||||||
def_id,
|
def_id,
|
||||||
substs).unwrap()),
|
substs).unwrap()),
|
||||||
|
@ -470,7 +470,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// we can do what we like. Here, we declare that transmuting
|
// we can do what we like. Here, we declare that transmuting
|
||||||
// into an uninhabited type is impossible, so anything following
|
// into an uninhabited type is impossible, so anything following
|
||||||
// it must be unreachable.
|
// it must be unreachable.
|
||||||
assert_eq!(bx.cx.layout_of(sig.output()).abi, layout::Abi::Uninhabited);
|
assert_eq!(bx.cx().layout_of(sig.output()).abi, layout::Abi::Uninhabited);
|
||||||
bx.unreachable();
|
bx.unreachable();
|
||||||
}
|
}
|
||||||
return;
|
return;
|
||||||
|
@ -484,7 +484,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
let fn_ty = match def {
|
let fn_ty = match def {
|
||||||
Some(ty::InstanceDef::Virtual(..)) => {
|
Some(ty::InstanceDef::Virtual(..)) => {
|
||||||
FnType::new_vtable(bx.cx, sig, &extra_args)
|
FnType::new_vtable(bx.cx(), sig, &extra_args)
|
||||||
}
|
}
|
||||||
Some(ty::InstanceDef::DropGlue(_, None)) => {
|
Some(ty::InstanceDef::DropGlue(_, None)) => {
|
||||||
// empty drop glue - a nop.
|
// empty drop glue - a nop.
|
||||||
|
@ -492,7 +492,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
funclet_br(self, bx, target);
|
funclet_br(self, bx, target);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_ => FnType::new(bx.cx, sig, &extra_args)
|
_ => FnType::new(bx.cx(), sig, &extra_args)
|
||||||
};
|
};
|
||||||
|
|
||||||
// emit a panic instead of instantiating an uninhabited type
|
// emit a panic instead of instantiating an uninhabited type
|
||||||
|
@ -563,7 +563,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let dest = match ret_dest {
|
let dest = match ret_dest {
|
||||||
_ if fn_ty.ret.is_indirect() => llargs[0],
|
_ if fn_ty.ret.is_indirect() => llargs[0],
|
||||||
ReturnDest::Nothing => {
|
ReturnDest::Nothing => {
|
||||||
CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx).ptr_to())
|
CodegenCx::c_undef(fn_ty.ret.memory_ty(bx.cx()).ptr_to())
|
||||||
}
|
}
|
||||||
ReturnDest::IndirectOperand(dst, _) |
|
ReturnDest::IndirectOperand(dst, _) |
|
||||||
ReturnDest::Store(dst) => dst.llval,
|
ReturnDest::Store(dst) => dst.llval,
|
||||||
|
@ -597,7 +597,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
);
|
);
|
||||||
return OperandRef {
|
return OperandRef {
|
||||||
val: Immediate(llval),
|
val: Immediate(llval),
|
||||||
layout: bx.cx.layout_of(ty),
|
layout: bx.cx().layout_of(ty),
|
||||||
};
|
};
|
||||||
|
|
||||||
},
|
},
|
||||||
|
@ -615,7 +615,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
);
|
);
|
||||||
return OperandRef {
|
return OperandRef {
|
||||||
val: Immediate(llval),
|
val: Immediate(llval),
|
||||||
layout: bx.cx.layout_of(ty)
|
layout: bx.cx().layout_of(ty)
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -625,7 +625,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}).collect();
|
}).collect();
|
||||||
|
|
||||||
|
|
||||||
let callee_ty = instance.as_ref().unwrap().ty(bx.cx.tcx);
|
let callee_ty = instance.as_ref().unwrap().ty(bx.cx().tcx);
|
||||||
codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
|
codegen_intrinsic_call(&bx, callee_ty, &fn_ty, &args, dest,
|
||||||
terminator.source_info.span);
|
terminator.source_info.span);
|
||||||
|
|
||||||
|
@ -722,7 +722,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
let fn_ptr = match (llfn, instance) {
|
let fn_ptr = match (llfn, instance) {
|
||||||
(Some(llfn), _) => llfn,
|
(Some(llfn), _) => llfn,
|
||||||
(None, Some(instance)) => callee::get_fn(bx.cx, instance),
|
(None, Some(instance)) => callee::get_fn(bx.cx(), instance),
|
||||||
_ => span_bug!(span, "no llfn for call"),
|
_ => span_bug!(span, "no llfn for call"),
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -744,7 +744,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
arg: &ArgType<'tcx, Ty<'tcx>>) {
|
arg: &ArgType<'tcx, Ty<'tcx>>) {
|
||||||
// Fill padding with undef value, where applicable.
|
// Fill padding with undef value, where applicable.
|
||||||
if let Some(ty) = arg.pad {
|
if let Some(ty) = arg.pad {
|
||||||
llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx)));
|
llargs.push(CodegenCx::c_undef(ty.llvm_type(bx.cx())));
|
||||||
}
|
}
|
||||||
|
|
||||||
if arg.is_ignore() {
|
if arg.is_ignore() {
|
||||||
|
@ -804,7 +804,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
if by_ref && !arg.is_indirect() {
|
if by_ref && !arg.is_indirect() {
|
||||||
// Have to load the argument, maybe while casting it.
|
// Have to load the argument, maybe while casting it.
|
||||||
if let PassMode::Cast(ty) = arg.mode {
|
if let PassMode::Cast(ty) = arg.mode {
|
||||||
llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx).ptr_to()),
|
llval = bx.load(bx.pointercast(llval, ty.llvm_type(bx.cx()).ptr_to()),
|
||||||
align.min(arg.layout.align));
|
align.min(arg.layout.align));
|
||||||
} else {
|
} else {
|
||||||
// We can't use `PlaceRef::load` here because the argument
|
// We can't use `PlaceRef::load` here because the argument
|
||||||
|
@ -855,7 +855,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
&mut self,
|
&mut self,
|
||||||
bx: &Builder<'a, 'll, 'tcx>
|
bx: &Builder<'a, 'll, 'tcx>
|
||||||
) -> PlaceRef<'tcx, &'ll Value> {
|
) -> PlaceRef<'tcx, &'ll Value> {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
if let Some(slot) = self.personality_slot {
|
if let Some(slot) = self.personality_slot {
|
||||||
slot
|
slot
|
||||||
} else {
|
} else {
|
||||||
|
@ -992,7 +992,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
|
LocalRef::Place(place) => self.codegen_transmute_into(bx, src, place),
|
||||||
LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
|
LocalRef::UnsizedPlace(_) => bug!("transmute must not involve unsized locals"),
|
||||||
LocalRef::Operand(None) => {
|
LocalRef::Operand(None) => {
|
||||||
let dst_layout = bx.cx.layout_of(self.monomorphized_place_ty(dst));
|
let dst_layout = bx.cx().layout_of(self.monomorphized_place_ty(dst));
|
||||||
assert!(!dst_layout.ty.has_erasable_regions());
|
assert!(!dst_layout.ty.has_erasable_regions());
|
||||||
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
|
let place = PlaceRef::alloca(bx, dst_layout, "transmute_temp");
|
||||||
place.storage_live(bx);
|
place.storage_live(bx);
|
||||||
|
@ -1016,7 +1016,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
src: &mir::Operand<'tcx>,
|
src: &mir::Operand<'tcx>,
|
||||||
dst: PlaceRef<'tcx, &'ll Value>) {
|
dst: PlaceRef<'tcx, &'ll Value>) {
|
||||||
let src = self.codegen_operand(bx, src);
|
let src = self.codegen_operand(bx, src);
|
||||||
let llty = src.layout.llvm_type(bx.cx);
|
let llty = src.layout.llvm_type(bx.cx());
|
||||||
let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
|
let cast_ptr = bx.pointercast(dst.llval, llty.ptr_to());
|
||||||
let align = src.layout.align.min(dst.layout.align);
|
let align = src.layout.align.min(dst.layout.align);
|
||||||
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
src.val.store(bx, PlaceRef::new_sized(cast_ptr, src.layout, align));
|
||||||
|
|
|
@ -194,20 +194,20 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
c,
|
c,
|
||||||
)?;
|
)?;
|
||||||
if let Some(prim) = field.val.try_to_scalar() {
|
if let Some(prim) = field.val.try_to_scalar() {
|
||||||
let layout = bx.cx.layout_of(field_ty);
|
let layout = bx.cx().layout_of(field_ty);
|
||||||
let scalar = match layout.abi {
|
let scalar = match layout.abi {
|
||||||
layout::Abi::Scalar(ref x) => x,
|
layout::Abi::Scalar(ref x) => x,
|
||||||
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
||||||
};
|
};
|
||||||
Ok(scalar_to_llvm(
|
Ok(scalar_to_llvm(
|
||||||
bx.cx, prim, scalar,
|
bx.cx(), prim, scalar,
|
||||||
layout.immediate_llvm_type(bx.cx),
|
layout.immediate_llvm_type(bx.cx()),
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
bug!("simd shuffle field {:?}", field)
|
bug!("simd shuffle field {:?}", field)
|
||||||
}
|
}
|
||||||
}).collect();
|
}).collect();
|
||||||
let llval = CodegenCx::c_struct(bx.cx, &values?, false);
|
let llval = CodegenCx::c_struct(bx.cx(), &values?, false);
|
||||||
Ok((llval, c.ty))
|
Ok((llval, c.ty))
|
||||||
})
|
})
|
||||||
.unwrap_or_else(|_| {
|
.unwrap_or_else(|_| {
|
||||||
|
@ -217,7 +217,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
);
|
);
|
||||||
// We've errored, so we don't have to produce working code.
|
// We've errored, so we don't have to produce working code.
|
||||||
let ty = self.monomorphize(&ty);
|
let ty = self.monomorphize(&ty);
|
||||||
let llty = bx.cx.layout_of(ty).llvm_type(bx.cx);
|
let llty = bx.cx().layout_of(ty).llvm_type(bx.cx());
|
||||||
(CodegenCx::c_undef(llty), ty)
|
(CodegenCx::c_undef(llty), ty)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -275,7 +275,7 @@ pub fn codegen_mir(
|
||||||
|
|
||||||
let mut allocate_local = |local| {
|
let mut allocate_local = |local| {
|
||||||
let decl = &mir.local_decls[local];
|
let decl = &mir.local_decls[local];
|
||||||
let layout = bx.cx.layout_of(fx.monomorphize(&decl.ty));
|
let layout = bx.cx().layout_of(fx.monomorphize(&decl.ty));
|
||||||
assert!(!layout.ty.has_erasable_regions());
|
assert!(!layout.ty.has_erasable_regions());
|
||||||
|
|
||||||
if let Some(name) = decl.name {
|
if let Some(name) = decl.name {
|
||||||
|
@ -285,7 +285,7 @@ pub fn codegen_mir(
|
||||||
|
|
||||||
if !memory_locals.contains(local) && !dbg {
|
if !memory_locals.contains(local) && !dbg {
|
||||||
debug!("alloc: {:?} ({}) -> operand", local, name);
|
debug!("alloc: {:?} ({}) -> operand", local, name);
|
||||||
return LocalRef::new_operand(bx.cx, layout);
|
return LocalRef::new_operand(bx.cx(), layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("alloc: {:?} ({}) -> place", local, name);
|
debug!("alloc: {:?} ({}) -> place", local, name);
|
||||||
|
@ -327,7 +327,7 @@ pub fn codegen_mir(
|
||||||
// alloca in advance. Instead we wait until we see the
|
// alloca in advance. Instead we wait until we see the
|
||||||
// definition and update the operand there.
|
// definition and update the operand there.
|
||||||
debug!("alloc: {:?} -> operand", local);
|
debug!("alloc: {:?} -> operand", local);
|
||||||
LocalRef::new_operand(bx.cx, layout)
|
LocalRef::new_operand(bx.cx(), layout)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
@ -420,8 +420,8 @@ fn create_funclets(
|
||||||
// C++ personality function, but `catch (...)` has no type so
|
// C++ personality function, but `catch (...)` has no type so
|
||||||
// it's null. The 64 here is actually a bitfield which
|
// it's null. The 64 here is actually a bitfield which
|
||||||
// represents that this is a catch-all block.
|
// represents that this is a catch-all block.
|
||||||
let null = CodegenCx::c_null(Type::i8p(bx.cx));
|
let null = CodegenCx::c_null(Type::i8p(bx.cx()));
|
||||||
let sixty_four = CodegenCx::c_i32(bx.cx, 64);
|
let sixty_four = CodegenCx::c_i32(bx.cx(), 64);
|
||||||
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
cleanup = cp_bx.catch_pad(cs, &[null, sixty_four, null]);
|
||||||
cp_bx.br(llbb);
|
cp_bx.br(llbb);
|
||||||
}
|
}
|
||||||
|
@ -480,7 +480,7 @@ fn arg_local_refs(
|
||||||
_ => bug!("spread argument isn't a tuple?!")
|
_ => bug!("spread argument isn't a tuple?!")
|
||||||
};
|
};
|
||||||
|
|
||||||
let place = PlaceRef::alloca(bx, bx.cx.layout_of(arg_ty), &name);
|
let place = PlaceRef::alloca(bx, bx.cx().layout_of(arg_ty), &name);
|
||||||
for i in 0..tupled_arg_tys.len() {
|
for i in 0..tupled_arg_tys.len() {
|
||||||
let arg = &fx.fn_ty.args[idx];
|
let arg = &fx.fn_ty.args[idx];
|
||||||
idx += 1;
|
idx += 1;
|
||||||
|
@ -523,7 +523,7 @@ fn arg_local_refs(
|
||||||
let local = |op| LocalRef::Operand(Some(op));
|
let local = |op| LocalRef::Operand(Some(op));
|
||||||
match arg.mode {
|
match arg.mode {
|
||||||
PassMode::Ignore => {
|
PassMode::Ignore => {
|
||||||
return local(OperandRef::new_zst(bx.cx, arg.layout));
|
return local(OperandRef::new_zst(bx.cx(), arg.layout));
|
||||||
}
|
}
|
||||||
PassMode::Direct(_) => {
|
PassMode::Direct(_) => {
|
||||||
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
|
let llarg = llvm::get_param(bx.llfn(), llarg_idx as c_uint);
|
||||||
|
@ -602,7 +602,7 @@ fn arg_local_refs(
|
||||||
// Or is it the closure environment?
|
// Or is it the closure environment?
|
||||||
let (closure_layout, env_ref) = match arg.layout.ty.sty {
|
let (closure_layout, env_ref) = match arg.layout.ty.sty {
|
||||||
ty::RawPtr(ty::TypeAndMut { ty, .. }) |
|
ty::RawPtr(ty::TypeAndMut { ty, .. }) |
|
||||||
ty::Ref(_, ty, _) => (bx.cx.layout_of(ty), true),
|
ty::Ref(_, ty, _) => (bx.cx().layout_of(ty), true),
|
||||||
_ => (arg.layout, false)
|
_ => (arg.layout, false)
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -624,7 +624,7 @@ fn arg_local_refs(
|
||||||
let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
|
let env_alloca = !env_ref && llvm_util::get_major_version() < 6;
|
||||||
let env_ptr = if env_alloca {
|
let env_ptr = if env_alloca {
|
||||||
let scratch = PlaceRef::alloca(bx,
|
let scratch = PlaceRef::alloca(bx,
|
||||||
bx.cx.layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
|
bx.cx().layout_of(tcx.mk_mut_ptr(arg.layout.ty)),
|
||||||
"__debuginfo_env_ptr");
|
"__debuginfo_env_ptr");
|
||||||
bx.store(place.llval, scratch.llval, scratch.align);
|
bx.store(place.llval, scratch.llval, scratch.align);
|
||||||
scratch.llval
|
scratch.llval
|
||||||
|
|
|
@ -81,10 +81,10 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
|
pub fn from_const(bx: &Builder<'a, 'll, 'tcx>,
|
||||||
val: &'tcx ty::Const<'tcx>)
|
val: &'tcx ty::Const<'tcx>)
|
||||||
-> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
|
-> Result<OperandRef<'tcx, &'ll Value>, ErrorHandled> {
|
||||||
let layout = bx.cx.layout_of(val.ty);
|
let layout = bx.cx().layout_of(val.ty);
|
||||||
|
|
||||||
if layout.is_zst() {
|
if layout.is_zst() {
|
||||||
return Ok(OperandRef::new_zst(bx.cx, layout));
|
return Ok(OperandRef::new_zst(bx.cx(), layout));
|
||||||
}
|
}
|
||||||
|
|
||||||
let val = match val.val {
|
let val = match val.val {
|
||||||
|
@ -95,10 +95,10 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
_ => bug!("from_const: invalid ByVal layout: {:#?}", layout)
|
||||||
};
|
};
|
||||||
let llval = scalar_to_llvm(
|
let llval = scalar_to_llvm(
|
||||||
bx.cx,
|
bx.cx(),
|
||||||
x,
|
x,
|
||||||
scalar,
|
scalar,
|
||||||
layout.immediate_llvm_type(bx.cx),
|
layout.immediate_llvm_type(bx.cx()),
|
||||||
);
|
);
|
||||||
OperandValue::Immediate(llval)
|
OperandValue::Immediate(llval)
|
||||||
},
|
},
|
||||||
|
@ -108,14 +108,14 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
|
_ => bug!("from_const: invalid ScalarPair layout: {:#?}", layout)
|
||||||
};
|
};
|
||||||
let a_llval = scalar_to_llvm(
|
let a_llval = scalar_to_llvm(
|
||||||
bx.cx,
|
bx.cx(),
|
||||||
a,
|
a,
|
||||||
a_scalar,
|
a_scalar,
|
||||||
layout.scalar_pair_element_llvm_type(bx.cx, 0, true),
|
layout.scalar_pair_element_llvm_type(bx.cx(), 0, true),
|
||||||
);
|
);
|
||||||
let b_layout = layout.scalar_pair_element_llvm_type(bx.cx, 1, true);
|
let b_layout = layout.scalar_pair_element_llvm_type(bx.cx(), 1, true);
|
||||||
let b_llval = scalar_to_llvm(
|
let b_llval = scalar_to_llvm(
|
||||||
bx.cx,
|
bx.cx(),
|
||||||
b,
|
b,
|
||||||
b_scalar,
|
b_scalar,
|
||||||
b_layout,
|
b_layout,
|
||||||
|
@ -163,7 +163,7 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
/// For other cases, see `immediate`.
|
/// For other cases, see `immediate`.
|
||||||
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
|
pub fn immediate_or_packed_pair(self, bx: &Builder<'a, 'll, 'tcx>) -> &'ll Value {
|
||||||
if let OperandValue::Pair(a, b) = self.val {
|
if let OperandValue::Pair(a, b) = self.val {
|
||||||
let llty = self.layout.llvm_type(bx.cx);
|
let llty = self.layout.llvm_type(bx.cx());
|
||||||
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
|
debug!("Operand::immediate_or_packed_pair: packing {:?} into {:?}",
|
||||||
self, llty);
|
self, llty);
|
||||||
// Reconstruct the immediate aggregate.
|
// Reconstruct the immediate aggregate.
|
||||||
|
@ -200,13 +200,13 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
bx: &Builder<'a, 'll, 'tcx>,
|
bx: &Builder<'a, 'll, 'tcx>,
|
||||||
i: usize,
|
i: usize,
|
||||||
) -> OperandRef<'tcx, &'ll Value> {
|
) -> OperandRef<'tcx, &'ll Value> {
|
||||||
let field = self.layout.field(bx.cx, i);
|
let field = self.layout.field(bx.cx(), i);
|
||||||
let offset = self.layout.fields.offset(i);
|
let offset = self.layout.fields.offset(i);
|
||||||
|
|
||||||
let mut val = match (self.val, &self.layout.abi) {
|
let mut val = match (self.val, &self.layout.abi) {
|
||||||
// If the field is ZST, it has no data.
|
// If the field is ZST, it has no data.
|
||||||
_ if field.is_zst() => {
|
_ if field.is_zst() => {
|
||||||
return OperandRef::new_zst(bx.cx, field);
|
return OperandRef::new_zst(bx.cx(), field);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Newtype of a scalar, scalar pair or vector.
|
// Newtype of a scalar, scalar pair or vector.
|
||||||
|
@ -219,12 +219,12 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
// Extract a scalar component from a pair.
|
// Extract a scalar component from a pair.
|
||||||
(OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
|
(OperandValue::Pair(a_llval, b_llval), &layout::Abi::ScalarPair(ref a, ref b)) => {
|
||||||
if offset.bytes() == 0 {
|
if offset.bytes() == 0 {
|
||||||
assert_eq!(field.size, a.value.size(bx.cx));
|
assert_eq!(field.size, a.value.size(bx.cx()));
|
||||||
OperandValue::Immediate(a_llval)
|
OperandValue::Immediate(a_llval)
|
||||||
} else {
|
} else {
|
||||||
assert_eq!(offset, a.value.size(bx.cx)
|
assert_eq!(offset, a.value.size(bx.cx())
|
||||||
.abi_align(b.value.align(bx.cx)));
|
.abi_align(b.value.align(bx.cx())));
|
||||||
assert_eq!(field.size, b.value.size(bx.cx));
|
assert_eq!(field.size, b.value.size(bx.cx()));
|
||||||
OperandValue::Immediate(b_llval)
|
OperandValue::Immediate(b_llval)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -232,7 +232,7 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
// `#[repr(simd)]` types are also immediate.
|
// `#[repr(simd)]` types are also immediate.
|
||||||
(OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
|
(OperandValue::Immediate(llval), &layout::Abi::Vector { .. }) => {
|
||||||
OperandValue::Immediate(
|
OperandValue::Immediate(
|
||||||
bx.extract_element(llval, CodegenCx::c_usize(bx.cx, i as u64)))
|
bx.extract_element(llval, CodegenCx::c_usize(bx.cx(), i as u64)))
|
||||||
}
|
}
|
||||||
|
|
||||||
_ => bug!("OperandRef::extract_field({:?}): not applicable", self)
|
_ => bug!("OperandRef::extract_field({:?}): not applicable", self)
|
||||||
|
@ -241,11 +241,11 @@ impl OperandRef<'tcx, &'ll Value> {
|
||||||
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
|
||||||
match val {
|
match val {
|
||||||
OperandValue::Immediate(ref mut llval) => {
|
OperandValue::Immediate(ref mut llval) => {
|
||||||
*llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx));
|
*llval = bx.bitcast(*llval, field.immediate_llvm_type(bx.cx()));
|
||||||
}
|
}
|
||||||
OperandValue::Pair(ref mut a, ref mut b) => {
|
OperandValue::Pair(ref mut a, ref mut b) => {
|
||||||
*a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx, 0, true));
|
*a = bx.bitcast(*a, field.scalar_pair_element_llvm_type(bx.cx(), 0, true));
|
||||||
*b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx, 1, true));
|
*b = bx.bitcast(*b, field.scalar_pair_element_llvm_type(bx.cx(), 1, true));
|
||||||
}
|
}
|
||||||
OperandValue::Ref(..) => bug!()
|
OperandValue::Ref(..) => bug!()
|
||||||
}
|
}
|
||||||
|
@ -349,7 +349,7 @@ impl OperandValue<&'ll Value> {
|
||||||
|
|
||||||
// Allocate an appropriate region on the stack, and copy the value into it
|
// Allocate an appropriate region on the stack, and copy the value into it
|
||||||
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
|
let (llsize, _) = glue::size_and_align_of_dst(bx, unsized_ty, Some(llextra));
|
||||||
let lldst = bx.array_alloca(Type::i8(bx.cx), llsize, "unsized_tmp", max_align);
|
let lldst = bx.array_alloca(Type::i8(bx.cx()), llsize, "unsized_tmp", max_align);
|
||||||
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
|
base::call_memcpy(bx, lldst, max_align, llptr, min_align, llsize, flags);
|
||||||
|
|
||||||
// Store the allocated region and the extra to the indirect place.
|
// Store the allocated region and the extra to the indirect place.
|
||||||
|
@ -394,9 +394,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// ZSTs don't require any actual memory access.
|
// ZSTs don't require any actual memory access.
|
||||||
// FIXME(eddyb) deduplicate this with the identical
|
// FIXME(eddyb) deduplicate this with the identical
|
||||||
// checks in `codegen_consume` and `extract_field`.
|
// checks in `codegen_consume` and `extract_field`.
|
||||||
let elem = o.layout.field(bx.cx, 0);
|
let elem = o.layout.field(bx.cx(), 0);
|
||||||
if elem.is_zst() {
|
if elem.is_zst() {
|
||||||
return Some(OperandRef::new_zst(bx.cx, elem));
|
return Some(OperandRef::new_zst(bx.cx(), elem));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_ => {}
|
_ => {}
|
||||||
|
@ -415,11 +415,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
debug!("codegen_consume(place={:?})", place);
|
debug!("codegen_consume(place={:?})", place);
|
||||||
|
|
||||||
let ty = self.monomorphized_place_ty(place);
|
let ty = self.monomorphized_place_ty(place);
|
||||||
let layout = bx.cx.layout_of(ty);
|
let layout = bx.cx().layout_of(ty);
|
||||||
|
|
||||||
// ZSTs don't require any actual memory access.
|
// ZSTs don't require any actual memory access.
|
||||||
if layout.is_zst() {
|
if layout.is_zst() {
|
||||||
return OperandRef::new_zst(bx.cx, layout);
|
return OperandRef::new_zst(bx.cx(), layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
if let Some(o) = self.maybe_codegen_consume_direct(bx, place) {
|
if let Some(o) = self.maybe_codegen_consume_direct(bx, place) {
|
||||||
|
@ -458,12 +458,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
// Allow RalfJ to sleep soundly knowing that even refactorings that remove
|
// Allow RalfJ to sleep soundly knowing that even refactorings that remove
|
||||||
// the above error (or silence it under some conditions) will not cause UB
|
// the above error (or silence it under some conditions) will not cause UB
|
||||||
let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
|
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
|
||||||
bx.call(fnname, &[], None);
|
bx.call(fnname, &[], None);
|
||||||
// We've errored, so we don't have to produce working code.
|
// We've errored, so we don't have to produce working code.
|
||||||
let layout = bx.cx.layout_of(ty);
|
let layout = bx.cx().layout_of(ty);
|
||||||
PlaceRef::new_sized(
|
PlaceRef::new_sized(
|
||||||
CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to()),
|
CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to()),
|
||||||
layout,
|
layout,
|
||||||
layout.align,
|
layout.align,
|
||||||
).load(bx)
|
).load(bx)
|
||||||
|
|
|
@ -64,15 +64,15 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
alloc: &mir::interpret::Allocation,
|
alloc: &mir::interpret::Allocation,
|
||||||
offset: Size,
|
offset: Size,
|
||||||
) -> PlaceRef<'tcx, &'ll Value> {
|
) -> PlaceRef<'tcx, &'ll Value> {
|
||||||
let init = const_alloc_to_llvm(bx.cx, alloc);
|
let init = const_alloc_to_llvm(bx.cx(), alloc);
|
||||||
let base_addr = consts::addr_of(bx.cx, init, layout.align, None);
|
let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
|
||||||
|
|
||||||
let llval = unsafe { LLVMConstInBoundsGEP(
|
let llval = unsafe { LLVMConstInBoundsGEP(
|
||||||
consts::bitcast(base_addr, Type::i8p(bx.cx)),
|
consts::bitcast(base_addr, Type::i8p(bx.cx())),
|
||||||
&CodegenCx::c_usize(bx.cx, offset.bytes()),
|
&CodegenCx::c_usize(bx.cx(), offset.bytes()),
|
||||||
1,
|
1,
|
||||||
)};
|
)};
|
||||||
let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
|
let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to());
|
||||||
PlaceRef::new_sized(llval, layout, alloc.align)
|
PlaceRef::new_sized(llval, layout, alloc.align)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,7 +80,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
-> PlaceRef<'tcx, &'ll Value> {
|
-> PlaceRef<'tcx, &'ll Value> {
|
||||||
debug!("alloca({:?}: {:?})", name, layout);
|
debug!("alloca({:?}: {:?})", name, layout);
|
||||||
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
|
assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
|
||||||
let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
|
let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
|
||||||
Self::new_sized(tmp, layout, layout.align)
|
Self::new_sized(tmp, layout, layout.align)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,8 +92,8 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
) -> PlaceRef<'tcx, &'ll Value> {
|
) -> PlaceRef<'tcx, &'ll Value> {
|
||||||
debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
|
debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
|
||||||
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
|
assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
|
||||||
let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty);
|
let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
|
||||||
let ptr_layout = bx.cx.layout_of(ptr_ty);
|
let ptr_layout = bx.cx().layout_of(ptr_ty);
|
||||||
Self::alloca(bx, ptr_layout, name)
|
Self::alloca(bx, ptr_layout, name)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,14 +116,14 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
|
assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
|
||||||
|
|
||||||
if self.layout.is_zst() {
|
if self.layout.is_zst() {
|
||||||
return OperandRef::new_zst(bx.cx, self.layout);
|
return OperandRef::new_zst(bx.cx(), self.layout);
|
||||||
}
|
}
|
||||||
|
|
||||||
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
|
let scalar_load_metadata = |load, scalar: &layout::Scalar| {
|
||||||
let vr = scalar.valid_range.clone();
|
let vr = scalar.valid_range.clone();
|
||||||
match scalar.value {
|
match scalar.value {
|
||||||
layout::Int(..) => {
|
layout::Int(..) => {
|
||||||
let range = scalar.valid_range_exclusive(bx.cx);
|
let range = scalar.valid_range_exclusive(bx.cx());
|
||||||
if range.start != range.end {
|
if range.start != range.end {
|
||||||
bx.range_metadata(load, range);
|
bx.range_metadata(load, range);
|
||||||
}
|
}
|
||||||
|
@ -160,7 +160,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
let load = bx.load(llptr, self.align);
|
let load = bx.load(llptr, self.align);
|
||||||
scalar_load_metadata(load, scalar);
|
scalar_load_metadata(load, scalar);
|
||||||
if scalar.is_bool() {
|
if scalar.is_bool() {
|
||||||
bx.trunc(load, Type::i1(bx.cx))
|
bx.trunc(load, Type::i1(bx.cx()))
|
||||||
} else {
|
} else {
|
||||||
load
|
load
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
bx: &Builder<'a, 'll, 'tcx>,
|
bx: &Builder<'a, 'll, 'tcx>,
|
||||||
ix: usize,
|
ix: usize,
|
||||||
) -> PlaceRef<'tcx, &'ll Value> {
|
) -> PlaceRef<'tcx, &'ll Value> {
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
let field = self.layout.field(cx, ix);
|
let field = self.layout.field(cx, ix);
|
||||||
let offset = self.layout.fields.offset(ix);
|
let offset = self.layout.fields.offset(ix);
|
||||||
let effective_field_align = self.align.restrict_for_offset(offset);
|
let effective_field_align = self.align.restrict_for_offset(offset);
|
||||||
|
@ -287,7 +287,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
bx: &Builder<'a, 'll, 'tcx>,
|
bx: &Builder<'a, 'll, 'tcx>,
|
||||||
cast_to: Ty<'tcx>
|
cast_to: Ty<'tcx>
|
||||||
) -> &'ll Value {
|
) -> &'ll Value {
|
||||||
let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
|
let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
|
||||||
if self.layout.abi.is_uninhabited() {
|
if self.layout.abi.is_uninhabited() {
|
||||||
return CodegenCx::c_undef(cast_to);
|
return CodegenCx::c_undef(cast_to);
|
||||||
}
|
}
|
||||||
|
@ -295,7 +295,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
layout::Variants::Single { index } => {
|
layout::Variants::Single { index } => {
|
||||||
let discr_val = self.layout.ty.ty_adt_def().map_or(
|
let discr_val = self.layout.ty.ty_adt_def().map_or(
|
||||||
index.as_u32() as u128,
|
index.as_u32() as u128,
|
||||||
|def| def.discriminant_for_variant(bx.cx.tcx, index).val);
|
|def| def.discriminant_for_variant(bx.cx().tcx, index).val);
|
||||||
return CodegenCx::c_uint_big(cast_to, discr_val);
|
return CodegenCx::c_uint_big(cast_to, discr_val);
|
||||||
}
|
}
|
||||||
layout::Variants::Tagged { .. } |
|
layout::Variants::Tagged { .. } |
|
||||||
|
@ -323,7 +323,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
niche_start,
|
niche_start,
|
||||||
..
|
..
|
||||||
} => {
|
} => {
|
||||||
let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
|
let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
|
||||||
if niche_variants.start() == niche_variants.end() {
|
if niche_variants.start() == niche_variants.end() {
|
||||||
// FIXME(eddyb) Check the actual primitive type here.
|
// FIXME(eddyb) Check the actual primitive type here.
|
||||||
let niche_llval = if niche_start == 0 {
|
let niche_llval = if niche_start == 0 {
|
||||||
|
@ -352,7 +352,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
/// Set the discriminant for a new value of the given case of the given
|
/// Set the discriminant for a new value of the given case of the given
|
||||||
/// representation.
|
/// representation.
|
||||||
pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
|
pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
|
||||||
if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() {
|
if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
match self.layout.variants {
|
match self.layout.variants {
|
||||||
|
@ -365,7 +365,7 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
.discriminant_for_variant(bx.tcx(), variant_index)
|
.discriminant_for_variant(bx.tcx(), variant_index)
|
||||||
.val;
|
.val;
|
||||||
bx.store(
|
bx.store(
|
||||||
CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx), to),
|
CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx()), to),
|
||||||
ptr.llval,
|
ptr.llval,
|
||||||
ptr.align);
|
ptr.align);
|
||||||
}
|
}
|
||||||
|
@ -380,16 +380,16 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
bx.sess().target.target.arch == "aarch64" {
|
bx.sess().target.target.arch == "aarch64" {
|
||||||
// Issue #34427: As workaround for LLVM bug on ARM,
|
// Issue #34427: As workaround for LLVM bug on ARM,
|
||||||
// use memset of 0 before assigning niche value.
|
// use memset of 0 before assigning niche value.
|
||||||
let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
|
let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to());
|
||||||
let fill_byte = CodegenCx::c_u8(bx.cx, 0);
|
let fill_byte = CodegenCx::c_u8(bx.cx(), 0);
|
||||||
let (size, align) = self.layout.size_and_align();
|
let (size, align) = self.layout.size_and_align();
|
||||||
let size = CodegenCx::c_usize(bx.cx, size.bytes());
|
let size = CodegenCx::c_usize(bx.cx(), size.bytes());
|
||||||
let align = CodegenCx::c_u32(bx.cx, align.abi() as u32);
|
let align = CodegenCx::c_u32(bx.cx(), align.abi() as u32);
|
||||||
base::call_memset(bx, llptr, fill_byte, size, align, false);
|
base::call_memset(bx, llptr, fill_byte, size, align, false);
|
||||||
}
|
}
|
||||||
|
|
||||||
let niche = self.project_field(bx, 0);
|
let niche = self.project_field(bx, 0);
|
||||||
let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
|
let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
|
||||||
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
|
let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
|
||||||
let niche_value = (niche_value as u128)
|
let niche_value = (niche_value as u128)
|
||||||
.wrapping_add(niche_start);
|
.wrapping_add(niche_start);
|
||||||
|
@ -409,9 +409,9 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
|
pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
|
||||||
-> PlaceRef<'tcx, &'ll Value> {
|
-> PlaceRef<'tcx, &'ll Value> {
|
||||||
PlaceRef {
|
PlaceRef {
|
||||||
llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx, 0), llindex]),
|
llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx(), 0), llindex]),
|
||||||
llextra: None,
|
llextra: None,
|
||||||
layout: self.layout.field(bx.cx, 0),
|
layout: self.layout.field(bx.cx(), 0),
|
||||||
align: self.align
|
align: self.align
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -419,10 +419,10 @@ impl PlaceRef<'tcx, &'ll Value> {
|
||||||
pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
|
pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
|
||||||
-> PlaceRef<'tcx, &'ll Value> {
|
-> PlaceRef<'tcx, &'ll Value> {
|
||||||
let mut downcast = *self;
|
let mut downcast = *self;
|
||||||
downcast.layout = self.layout.for_variant(bx.cx, variant_index);
|
downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
|
||||||
|
|
||||||
// Cast to the appropriate variant struct type.
|
// Cast to the appropriate variant struct type.
|
||||||
let variant_ty = downcast.layout.llvm_type(bx.cx);
|
let variant_ty = downcast.layout.llvm_type(bx.cx());
|
||||||
downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
|
downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
|
||||||
|
|
||||||
downcast
|
downcast
|
||||||
|
@ -444,7 +444,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
-> PlaceRef<'tcx, &'ll Value> {
|
-> PlaceRef<'tcx, &'ll Value> {
|
||||||
debug!("codegen_place(place={:?})", place);
|
debug!("codegen_place(place={:?})", place);
|
||||||
|
|
||||||
let cx = bx.cx;
|
let cx = bx.cx();
|
||||||
let tcx = cx.tcx;
|
let tcx = cx.tcx;
|
||||||
|
|
||||||
if let mir::Place::Local(index) = *place {
|
if let mir::Place::Local(index) = *place {
|
||||||
|
@ -482,9 +482,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// and compile-time agree on values
|
// and compile-time agree on values
|
||||||
// With floats that won't always be true
|
// With floats that won't always be true
|
||||||
// so we generate an abort
|
// so we generate an abort
|
||||||
let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
|
let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
|
||||||
bx.call(fnname, &[], None);
|
bx.call(fnname, &[], None);
|
||||||
let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx).ptr_to());
|
let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to());
|
||||||
PlaceRef::new_sized(llval, layout, layout.align)
|
PlaceRef::new_sized(llval, layout, layout.align)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -498,7 +498,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
elem: mir::ProjectionElem::Deref
|
elem: mir::ProjectionElem::Deref
|
||||||
}) => {
|
}) => {
|
||||||
// Load the pointer from its location.
|
// Load the pointer from its location.
|
||||||
self.codegen_consume(bx, base).deref(bx.cx)
|
self.codegen_consume(bx, base).deref(bx.cx())
|
||||||
}
|
}
|
||||||
mir::Place::Projection(ref projection) => {
|
mir::Place::Projection(ref projection) => {
|
||||||
let cg_base = self.codegen_place(bx, &projection.base);
|
let cg_base = self.codegen_place(bx, &projection.base);
|
||||||
|
@ -517,34 +517,34 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
mir::ProjectionElem::ConstantIndex { offset,
|
mir::ProjectionElem::ConstantIndex { offset,
|
||||||
from_end: false,
|
from_end: false,
|
||||||
min_length: _ } => {
|
min_length: _ } => {
|
||||||
let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
|
let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
|
||||||
cg_base.project_index(bx, lloffset)
|
cg_base.project_index(bx, lloffset)
|
||||||
}
|
}
|
||||||
mir::ProjectionElem::ConstantIndex { offset,
|
mir::ProjectionElem::ConstantIndex { offset,
|
||||||
from_end: true,
|
from_end: true,
|
||||||
min_length: _ } => {
|
min_length: _ } => {
|
||||||
let lloffset = CodegenCx::c_usize(bx.cx, offset as u64);
|
let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
|
||||||
let lllen = cg_base.len(bx.cx);
|
let lllen = cg_base.len(bx.cx());
|
||||||
let llindex = bx.sub(lllen, lloffset);
|
let llindex = bx.sub(lllen, lloffset);
|
||||||
cg_base.project_index(bx, llindex)
|
cg_base.project_index(bx, llindex)
|
||||||
}
|
}
|
||||||
mir::ProjectionElem::Subslice { from, to } => {
|
mir::ProjectionElem::Subslice { from, to } => {
|
||||||
let mut subslice = cg_base.project_index(bx,
|
let mut subslice = cg_base.project_index(bx,
|
||||||
CodegenCx::c_usize(bx.cx, from as u64));
|
CodegenCx::c_usize(bx.cx(), from as u64));
|
||||||
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
|
let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
|
||||||
.projection_ty(tcx, &projection.elem)
|
.projection_ty(tcx, &projection.elem)
|
||||||
.to_ty(bx.tcx());
|
.to_ty(bx.tcx());
|
||||||
subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
|
subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
|
||||||
|
|
||||||
if subslice.layout.is_unsized() {
|
if subslice.layout.is_unsized() {
|
||||||
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
|
subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
|
||||||
CodegenCx::c_usize(bx.cx, (from as u64) + (to as u64))));
|
CodegenCx::c_usize(bx.cx(), (from as u64) + (to as u64))));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cast the place pointer type to the new
|
// Cast the place pointer type to the new
|
||||||
// array or slice type (*[%_; new_len]).
|
// array or slice type (*[%_; new_len]).
|
||||||
subslice.llval = bx.pointercast(subslice.llval,
|
subslice.llval = bx.pointercast(subslice.llval,
|
||||||
subslice.layout.llvm_type(bx.cx).ptr_to());
|
subslice.layout.llvm_type(bx.cx()).ptr_to());
|
||||||
|
|
||||||
subslice
|
subslice
|
||||||
}
|
}
|
||||||
|
|
|
@ -103,28 +103,28 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
return bx;
|
return bx;
|
||||||
}
|
}
|
||||||
|
|
||||||
let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx, 0)).llval;
|
let start = dest.project_index(&bx, CodegenCx::c_usize(bx.cx(), 0)).llval;
|
||||||
|
|
||||||
if let OperandValue::Immediate(v) = cg_elem.val {
|
if let OperandValue::Immediate(v) = cg_elem.val {
|
||||||
let align = CodegenCx::c_i32(bx.cx, dest.align.abi() as i32);
|
let align = CodegenCx::c_i32(bx.cx(), dest.align.abi() as i32);
|
||||||
let size = CodegenCx::c_usize(bx.cx, dest.layout.size.bytes());
|
let size = CodegenCx::c_usize(bx.cx(), dest.layout.size.bytes());
|
||||||
|
|
||||||
// Use llvm.memset.p0i8.* to initialize all zero arrays
|
// Use llvm.memset.p0i8.* to initialize all zero arrays
|
||||||
if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 {
|
if CodegenCx::is_const_integral(v) && CodegenCx::const_to_uint(v) == 0 {
|
||||||
let fill = CodegenCx::c_u8(bx.cx, 0);
|
let fill = CodegenCx::c_u8(bx.cx(), 0);
|
||||||
base::call_memset(&bx, start, fill, size, align, false);
|
base::call_memset(&bx, start, fill, size, align, false);
|
||||||
return bx;
|
return bx;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Use llvm.memset.p0i8.* to initialize byte arrays
|
// Use llvm.memset.p0i8.* to initialize byte arrays
|
||||||
let v = base::from_immediate(&bx, v);
|
let v = base::from_immediate(&bx, v);
|
||||||
if CodegenCx::val_ty(v) == Type::i8(bx.cx) {
|
if CodegenCx::val_ty(v) == Type::i8(bx.cx()) {
|
||||||
base::call_memset(&bx, start, v, size, align, false);
|
base::call_memset(&bx, start, v, size, align, false);
|
||||||
return bx;
|
return bx;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
let count = CodegenCx::c_usize(bx.cx, count);
|
let count = CodegenCx::c_usize(bx.cx(), count);
|
||||||
let end = dest.project_index(&bx, count).llval;
|
let end = dest.project_index(&bx, count).llval;
|
||||||
|
|
||||||
let header_bx = bx.build_sibling_block("repeat_loop_header");
|
let header_bx = bx.build_sibling_block("repeat_loop_header");
|
||||||
|
@ -140,7 +140,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
cg_elem.val.store(&body_bx,
|
cg_elem.val.store(&body_bx,
|
||||||
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
|
PlaceRef::new_sized(current, cg_elem.layout, dest.align));
|
||||||
|
|
||||||
let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx, 1)]);
|
let next = body_bx.inbounds_gep(current, &[CodegenCx::c_usize(bx.cx(), 1)]);
|
||||||
body_bx.br(header_bx.llbb());
|
body_bx.br(header_bx.llbb());
|
||||||
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
|
header_bx.add_incoming_to_phi(current, next, body_bx.llbb());
|
||||||
|
|
||||||
|
@ -210,18 +210,18 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
mir::Rvalue::Cast(ref kind, ref source, mir_cast_ty) => {
|
||||||
let operand = self.codegen_operand(&bx, source);
|
let operand = self.codegen_operand(&bx, source);
|
||||||
debug!("cast operand is {:?}", operand);
|
debug!("cast operand is {:?}", operand);
|
||||||
let cast = bx.cx.layout_of(self.monomorphize(&mir_cast_ty));
|
let cast = bx.cx().layout_of(self.monomorphize(&mir_cast_ty));
|
||||||
|
|
||||||
let val = match *kind {
|
let val = match *kind {
|
||||||
mir::CastKind::ReifyFnPointer => {
|
mir::CastKind::ReifyFnPointer => {
|
||||||
match operand.layout.ty.sty {
|
match operand.layout.ty.sty {
|
||||||
ty::FnDef(def_id, substs) => {
|
ty::FnDef(def_id, substs) => {
|
||||||
if bx.cx.tcx.has_attr(def_id, "rustc_args_required_const") {
|
if bx.cx().tcx.has_attr(def_id, "rustc_args_required_const") {
|
||||||
bug!("reifying a fn ptr that requires \
|
bug!("reifying a fn ptr that requires \
|
||||||
const arguments");
|
const arguments");
|
||||||
}
|
}
|
||||||
OperandValue::Immediate(
|
OperandValue::Immediate(
|
||||||
callee::resolve_and_get_fn(bx.cx, def_id, substs))
|
callee::resolve_and_get_fn(bx.cx(), def_id, substs))
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
|
bug!("{} cannot be reified to a fn ptr", operand.layout.ty)
|
||||||
|
@ -232,8 +232,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
match operand.layout.ty.sty {
|
match operand.layout.ty.sty {
|
||||||
ty::Closure(def_id, substs) => {
|
ty::Closure(def_id, substs) => {
|
||||||
let instance = monomorphize::resolve_closure(
|
let instance = monomorphize::resolve_closure(
|
||||||
bx.cx.tcx, def_id, substs, ty::ClosureKind::FnOnce);
|
bx.cx().tcx, def_id, substs, ty::ClosureKind::FnOnce);
|
||||||
OperandValue::Immediate(callee::get_fn(bx.cx, instance))
|
OperandValue::Immediate(callee::get_fn(bx.cx(), instance))
|
||||||
}
|
}
|
||||||
_ => {
|
_ => {
|
||||||
bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
|
bug!("{} cannot be cast to a fn ptr", operand.layout.ty)
|
||||||
|
@ -256,7 +256,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// HACK(eddyb) have to bitcast pointers
|
// HACK(eddyb) have to bitcast pointers
|
||||||
// until LLVM removes pointee types.
|
// until LLVM removes pointee types.
|
||||||
let lldata = bx.pointercast(lldata,
|
let lldata = bx.pointercast(lldata,
|
||||||
cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
|
cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
|
||||||
OperandValue::Pair(lldata, llextra)
|
OperandValue::Pair(lldata, llextra)
|
||||||
}
|
}
|
||||||
OperandValue::Immediate(lldata) => {
|
OperandValue::Immediate(lldata) => {
|
||||||
|
@ -275,12 +275,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
if let OperandValue::Pair(data_ptr, meta) = operand.val {
|
if let OperandValue::Pair(data_ptr, meta) = operand.val {
|
||||||
if cast.is_llvm_scalar_pair() {
|
if cast.is_llvm_scalar_pair() {
|
||||||
let data_cast = bx.pointercast(data_ptr,
|
let data_cast = bx.pointercast(data_ptr,
|
||||||
cast.scalar_pair_element_llvm_type(bx.cx, 0, true));
|
cast.scalar_pair_element_llvm_type(bx.cx(), 0, true));
|
||||||
OperandValue::Pair(data_cast, meta)
|
OperandValue::Pair(data_cast, meta)
|
||||||
} else { // cast to thin-ptr
|
} else { // cast to thin-ptr
|
||||||
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
|
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and
|
||||||
// pointer-cast of that pointer to desired pointer type.
|
// pointer-cast of that pointer to desired pointer type.
|
||||||
let llcast_ty = cast.immediate_llvm_type(bx.cx);
|
let llcast_ty = cast.immediate_llvm_type(bx.cx());
|
||||||
let llval = bx.pointercast(data_ptr, llcast_ty);
|
let llval = bx.pointercast(data_ptr, llcast_ty);
|
||||||
OperandValue::Immediate(llval)
|
OperandValue::Immediate(llval)
|
||||||
}
|
}
|
||||||
|
@ -290,7 +290,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
mir::CastKind::Misc => {
|
mir::CastKind::Misc => {
|
||||||
assert!(cast.is_llvm_immediate());
|
assert!(cast.is_llvm_immediate());
|
||||||
let ll_t_out = cast.immediate_llvm_type(bx.cx);
|
let ll_t_out = cast.immediate_llvm_type(bx.cx());
|
||||||
if operand.layout.abi.is_uninhabited() {
|
if operand.layout.abi.is_uninhabited() {
|
||||||
return (bx, OperandRef {
|
return (bx, OperandRef {
|
||||||
val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)),
|
val: OperandValue::Immediate(CodegenCx::c_undef(ll_t_out)),
|
||||||
|
@ -300,12 +300,12 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let r_t_in = CastTy::from_ty(operand.layout.ty)
|
let r_t_in = CastTy::from_ty(operand.layout.ty)
|
||||||
.expect("bad input type for cast");
|
.expect("bad input type for cast");
|
||||||
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
|
let r_t_out = CastTy::from_ty(cast.ty).expect("bad output type for cast");
|
||||||
let ll_t_in = operand.layout.immediate_llvm_type(bx.cx);
|
let ll_t_in = operand.layout.immediate_llvm_type(bx.cx());
|
||||||
match operand.layout.variants {
|
match operand.layout.variants {
|
||||||
layout::Variants::Single { index } => {
|
layout::Variants::Single { index } => {
|
||||||
if let Some(def) = operand.layout.ty.ty_adt_def() {
|
if let Some(def) = operand.layout.ty.ty_adt_def() {
|
||||||
let discr_val = def
|
let discr_val = def
|
||||||
.discriminant_for_variant(bx.cx.tcx, index)
|
.discriminant_for_variant(bx.cx().tcx, index)
|
||||||
.val;
|
.val;
|
||||||
let discr = CodegenCx::c_uint_big(ll_t_out, discr_val);
|
let discr = CodegenCx::c_uint_big(ll_t_out, discr_val);
|
||||||
return (bx, OperandRef {
|
return (bx, OperandRef {
|
||||||
|
@ -328,7 +328,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// then `i1 1` (i.e. E::B) is effectively `i8 -1`.
|
// then `i1 1` (i.e. E::B) is effectively `i8 -1`.
|
||||||
signed = !scalar.is_bool() && s;
|
signed = !scalar.is_bool() && s;
|
||||||
|
|
||||||
let er = scalar.valid_range_exclusive(bx.cx);
|
let er = scalar.valid_range_exclusive(bx.cx());
|
||||||
if er.end != er.start &&
|
if er.end != er.start &&
|
||||||
scalar.valid_range.end() > scalar.valid_range.start() {
|
scalar.valid_range.end() > scalar.valid_range.start() {
|
||||||
// We want `table[e as usize]` to not
|
// We want `table[e as usize]` to not
|
||||||
|
@ -367,7 +367,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
(CastTy::FnPtr, CastTy::Int(_)) =>
|
(CastTy::FnPtr, CastTy::Int(_)) =>
|
||||||
bx.ptrtoint(llval, ll_t_out),
|
bx.ptrtoint(llval, ll_t_out),
|
||||||
(CastTy::Int(_), CastTy::Ptr(_)) => {
|
(CastTy::Int(_), CastTy::Ptr(_)) => {
|
||||||
let usize_llval = bx.intcast(llval, bx.cx.isize_ty, signed);
|
let usize_llval = bx.intcast(llval, bx.cx().isize_ty, signed);
|
||||||
bx.inttoptr(usize_llval, ll_t_out)
|
bx.inttoptr(usize_llval, ll_t_out)
|
||||||
}
|
}
|
||||||
(CastTy::Int(_), CastTy::Float) =>
|
(CastTy::Int(_), CastTy::Float) =>
|
||||||
|
@ -394,7 +394,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
// Note: places are indirect, so storing the `llval` into the
|
// Note: places are indirect, so storing the `llval` into the
|
||||||
// destination effectively creates a reference.
|
// destination effectively creates a reference.
|
||||||
let val = if !bx.cx.type_has_metadata(ty) {
|
let val = if !bx.cx().type_has_metadata(ty) {
|
||||||
OperandValue::Immediate(cg_place.llval)
|
OperandValue::Immediate(cg_place.llval)
|
||||||
} else {
|
} else {
|
||||||
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
|
OperandValue::Pair(cg_place.llval, cg_place.llextra.unwrap())
|
||||||
|
@ -412,7 +412,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let size = self.evaluate_array_len(&bx, place);
|
let size = self.evaluate_array_len(&bx, place);
|
||||||
let operand = OperandRef {
|
let operand = OperandRef {
|
||||||
val: OperandValue::Immediate(size),
|
val: OperandValue::Immediate(size),
|
||||||
layout: bx.cx.layout_of(bx.tcx().types.usize),
|
layout: bx.cx().layout_of(bx.tcx().types.usize),
|
||||||
};
|
};
|
||||||
(bx, operand)
|
(bx, operand)
|
||||||
}
|
}
|
||||||
|
@ -438,7 +438,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
};
|
};
|
||||||
let operand = OperandRef {
|
let operand = OperandRef {
|
||||||
val: OperandValue::Immediate(llresult),
|
val: OperandValue::Immediate(llresult),
|
||||||
layout: bx.cx.layout_of(
|
layout: bx.cx().layout_of(
|
||||||
op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
|
op.ty(bx.tcx(), lhs.layout.ty, rhs.layout.ty)),
|
||||||
};
|
};
|
||||||
(bx, operand)
|
(bx, operand)
|
||||||
|
@ -453,7 +453,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
|
let operand_ty = bx.tcx().intern_tup(&[val_ty, bx.tcx().types.bool]);
|
||||||
let operand = OperandRef {
|
let operand = OperandRef {
|
||||||
val: result,
|
val: result,
|
||||||
layout: bx.cx.layout_of(operand_ty)
|
layout: bx.cx().layout_of(operand_ty)
|
||||||
};
|
};
|
||||||
|
|
||||||
(bx, operand)
|
(bx, operand)
|
||||||
|
@ -488,8 +488,8 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
|
|
||||||
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
|
mir::Rvalue::NullaryOp(mir::NullOp::SizeOf, ty) => {
|
||||||
assert!(bx.cx.type_is_sized(ty));
|
assert!(bx.cx().type_is_sized(ty));
|
||||||
let val = CodegenCx::c_usize(bx.cx, bx.cx.size_of(ty).bytes());
|
let val = CodegenCx::c_usize(bx.cx(), bx.cx().size_of(ty).bytes());
|
||||||
let tcx = bx.tcx();
|
let tcx = bx.tcx();
|
||||||
(bx, OperandRef {
|
(bx, OperandRef {
|
||||||
val: OperandValue::Immediate(val),
|
val: OperandValue::Immediate(val),
|
||||||
|
@ -499,11 +499,11 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
|
|
||||||
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
|
mir::Rvalue::NullaryOp(mir::NullOp::Box, content_ty) => {
|
||||||
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
|
let content_ty: Ty<'tcx> = self.monomorphize(&content_ty);
|
||||||
let (size, align) = bx.cx.size_and_align_of(content_ty);
|
let (size, align) = bx.cx().size_and_align_of(content_ty);
|
||||||
let llsize = CodegenCx::c_usize(bx.cx, size.bytes());
|
let llsize = CodegenCx::c_usize(bx.cx(), size.bytes());
|
||||||
let llalign = CodegenCx::c_usize(bx.cx, align.abi());
|
let llalign = CodegenCx::c_usize(bx.cx(), align.abi());
|
||||||
let box_layout = bx.cx.layout_of(bx.tcx().mk_box(content_ty));
|
let box_layout = bx.cx().layout_of(bx.tcx().mk_box(content_ty));
|
||||||
let llty_ptr = box_layout.llvm_type(bx.cx);
|
let llty_ptr = box_layout.llvm_type(bx.cx());
|
||||||
|
|
||||||
// Allocate space:
|
// Allocate space:
|
||||||
let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
|
let def_id = match bx.tcx().lang_items().require(ExchangeMallocFnLangItem) {
|
||||||
|
@ -513,7 +513,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
let instance = ty::Instance::mono(bx.tcx(), def_id);
|
||||||
let r = callee::get_fn(bx.cx, instance);
|
let r = callee::get_fn(bx.cx(), instance);
|
||||||
let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
|
let val = bx.pointercast(bx.call(r, &[llsize, llalign], None), llty_ptr);
|
||||||
|
|
||||||
let operand = OperandRef {
|
let operand = OperandRef {
|
||||||
|
@ -547,14 +547,14 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
if let mir::Place::Local(index) = *place {
|
if let mir::Place::Local(index) = *place {
|
||||||
if let LocalRef::Operand(Some(op)) = self.locals[index] {
|
if let LocalRef::Operand(Some(op)) = self.locals[index] {
|
||||||
if let ty::Array(_, n) = op.layout.ty.sty {
|
if let ty::Array(_, n) = op.layout.ty.sty {
|
||||||
let n = n.unwrap_usize(bx.cx.tcx);
|
let n = n.unwrap_usize(bx.cx().tcx);
|
||||||
return CodegenCx::c_usize(bx.cx, n);
|
return CodegenCx::c_usize(bx.cx(), n);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// use common size calculation for non zero-sized types
|
// use common size calculation for non zero-sized types
|
||||||
let cg_value = self.codegen_place(&bx, place);
|
let cg_value = self.codegen_place(&bx, place);
|
||||||
return cg_value.len(bx.cx);
|
return cg_value.len(bx.cx());
|
||||||
}
|
}
|
||||||
|
|
||||||
pub fn codegen_scalar_binop(
|
pub fn codegen_scalar_binop(
|
||||||
|
@ -606,7 +606,7 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
|
mir::BinOp::Shr => common::build_unchecked_rshift(bx, input_ty, lhs, rhs),
|
||||||
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
|
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt |
|
||||||
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
|
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => if is_unit {
|
||||||
CodegenCx::c_bool(bx.cx, match op {
|
CodegenCx::c_bool(bx.cx(), match op {
|
||||||
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
|
mir::BinOp::Ne | mir::BinOp::Lt | mir::BinOp::Gt => false,
|
||||||
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
|
mir::BinOp::Eq | mir::BinOp::Le | mir::BinOp::Ge => true,
|
||||||
_ => unreachable!()
|
_ => unreachable!()
|
||||||
|
@ -683,9 +683,9 @@ impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
|
||||||
// with #[rustc_inherit_overflow_checks] and inlined from
|
// with #[rustc_inherit_overflow_checks] and inlined from
|
||||||
// another crate (mostly core::num generic/#[inline] fns),
|
// another crate (mostly core::num generic/#[inline] fns),
|
||||||
// while the current crate doesn't use overflow checks.
|
// while the current crate doesn't use overflow checks.
|
||||||
if !bx.cx.check_overflow {
|
if !bx.cx().check_overflow {
|
||||||
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
|
let val = self.codegen_scalar_binop(bx, op, lhs, rhs, input_ty);
|
||||||
return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx, false));
|
return OperandValue::Pair(val, CodegenCx::c_bool(bx.cx(), false));
|
||||||
}
|
}
|
||||||
|
|
||||||
let (val, of) = match op {
|
let (val, of) = match op {
|
||||||
|
@ -817,7 +817,7 @@ fn get_overflow_intrinsic(
|
||||||
},
|
},
|
||||||
};
|
};
|
||||||
|
|
||||||
bx.cx.get_intrinsic(&name)
|
bx.cx().get_intrinsic(&name)
|
||||||
}
|
}
|
||||||
|
|
||||||
fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
|
fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
|
||||||
|
@ -838,7 +838,7 @@ fn cast_int_to_float(bx: &Builder<'_, 'll, '_>,
|
||||||
<< (Single::MAX_EXP - Single::PRECISION as i16);
|
<< (Single::MAX_EXP - Single::PRECISION as i16);
|
||||||
let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
|
let max = CodegenCx::c_uint_big(int_ty, MAX_F32_PLUS_HALF_ULP);
|
||||||
let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
|
let overflow = bx.icmp(IntPredicate::IntUGE, x, max);
|
||||||
let infinity_bits = CodegenCx::c_u32(bx.cx, ieee::Single::INFINITY.to_bits() as u32);
|
let infinity_bits = CodegenCx::c_u32(bx.cx(), ieee::Single::INFINITY.to_bits() as u32);
|
||||||
let infinity = consts::bitcast(infinity_bits, float_ty);
|
let infinity = consts::bitcast(infinity_bits, float_ty);
|
||||||
bx.select(overflow, infinity, bx.uitofp(x, float_ty))
|
bx.select(overflow, infinity, bx.uitofp(x, float_ty))
|
||||||
} else {
|
} else {
|
||||||
|
@ -907,8 +907,8 @@ fn cast_float_to_int(bx: &Builder<'_, 'll, '_>,
|
||||||
}
|
}
|
||||||
let float_bits_to_llval = |bits| {
|
let float_bits_to_llval = |bits| {
|
||||||
let bits_llval = match float_ty.float_width() {
|
let bits_llval = match float_ty.float_width() {
|
||||||
32 => CodegenCx::c_u32(bx.cx, bits as u32),
|
32 => CodegenCx::c_u32(bx.cx(), bits as u32),
|
||||||
64 => CodegenCx::c_u64(bx.cx, bits as u64),
|
64 => CodegenCx::c_u64(bx.cx(), bits as u64),
|
||||||
n => bug!("unsupported float width {}", n),
|
n => bug!("unsupported float width {}", n),
|
||||||
};
|
};
|
||||||
consts::bitcast(bits_llval, float_ty)
|
consts::bitcast(bits_llval, float_ty)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue