1
Fork 0

Auto merge of #86873 - nikic:opaque-ptrs, r=nagisa

Improve opaque pointers support

Opaque pointers are coming, and rustc is not ready.

This adds partial support by passing an explicit load type to LLVM. Two issues I've encountered:
 * The necessary type was not available at the point where non-temporal copies were generated. I've pushed the code for that upwards out of the memcpy implementation and moved the position of a cast to make do with the types we have available. (I'm not sure that cast is needed at all, but have retained it in the interest of conservativeness.)
 * The `PlaceRef::project_deref()` function used during debuginfo generation seems to be buggy in some way -- though I haven't figured out specifically what it does wrong. Replacing it with `load_operand().deref()` did the trick, but I don't really know what I'm doing here.
This commit is contained in:
bors 2021-07-10 19:01:41 +00:00
commit 432e145bd5
13 changed files with 81 additions and 74 deletions

View file

@ -410,17 +410,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
} }
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value { fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe { unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint); llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load load
} }
} }
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value { fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
unsafe { unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED); let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetVolatile(load, llvm::True); llvm::LLVMSetVolatile(load, llvm::True);
load load
} }
@ -428,6 +428,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn atomic_load( fn atomic_load(
&mut self, &mut self,
ty: &'ll Type,
ptr: &'ll Value, ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering, order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size, size: Size,
@ -435,6 +436,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe { unsafe {
let load = llvm::LLVMRustBuildAtomicLoad( let load = llvm::LLVMRustBuildAtomicLoad(
self.llbuilder, self.llbuilder,
ty,
ptr, ptr,
UNNAMED, UNNAMED,
AtomicOrdering::from_generic(order), AtomicOrdering::from_generic(order),
@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
} }
let llval = const_llval.unwrap_or_else(|| { let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval, place.align); let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi { if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar); scalar_load_metadata(self, load, scalar);
} }
@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let mut load = |i, scalar: &abi::Scalar, align| { let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = self.struct_gep(place.llval, i as u64); let llptr = self.struct_gep(place.llval, i as u64);
let load = self.load(llptr, align); let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar); scalar_load_metadata(self, load, scalar);
self.to_immediate_scalar(load, scalar) self.to_immediate_scalar(load, scalar)
}; };
@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value, size: &'ll Value,
flags: MemFlags, flags: MemFlags,
) { ) {
if flags.contains(MemFlags::NONTEMPORAL) { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.type_isize(), false); let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE); let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p()); let dst = self.pointercast(dst, self.type_i8p());
@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value, size: &'ll Value,
flags: MemFlags, flags: MemFlags,
) { ) {
if flags.contains(MemFlags::NONTEMPORAL) { assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
// HACK(nox): This is inefficient but there is no nontemporal memmove.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
let size = self.intcast(size, self.type_isize(), false); let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE); let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p()); let dst = self.pointercast(dst, self.type_i8p());

View file

@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_,
// LLVM to keep around the reference to the global. // LLVM to keep around the reference to the global.
let indices = [bx.const_i32(0), bx.const_i32(0)]; let indices = [bx.const_i32(0), bx.const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices); let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element); let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
unsafe { unsafe {
llvm::LLVMSetAlignment(volative_load_instruction, 1); llvm::LLVMSetAlignment(volative_load_instruction, 1);
} }

View file

@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => { sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0); let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate(); let ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_abi.ret.mode { let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self))); let llty = ty.llvm_type(self);
} let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
let load = self.volatile_load(ptr); self.volatile_load(llty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
};
let align = if name == sym::unaligned_volatile_load { let align = if name == sym::unaligned_volatile_load {
1 1
} else { } else {
@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let integer_ty = self.type_ix(layout.size.bits()); let integer_ty = self.type_ix(layout.size.bits());
let ptr_ty = self.type_ptr_to(integer_ty); let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty); let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(a_ptr, layout.align.abi); let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
let b_ptr = self.bitcast(b, ptr_ty); let b_ptr = self.bitcast(b, ptr_ty);
let b_val = self.load(b_ptr, layout.align.abi); let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val) self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else { } else {
let i8p_ty = self.type_i8p(); let i8p_ty = self.type_i8p();
@ -540,7 +543,7 @@ fn codegen_msvc_try(
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang // Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8); let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]); let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(slot, ptr_align); let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet)); catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb()); catchpad_rust.catch_ret(&funclet, caught.llbb());

View file

@ -1385,7 +1385,12 @@ extern "C" {
Val: &'a Value, Val: &'a Value,
Name: *const c_char, Name: *const c_char,
) -> &'a Value; ) -> &'a Value;
pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value; pub fn LLVMBuildLoad2(
B: &Builder<'a>,
Ty: &'a Type,
PointerVal: &'a Value,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value; pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
@ -1631,6 +1636,7 @@ extern "C" {
// Atomic Operations // Atomic Operations
pub fn LLVMRustBuildAtomicLoad( pub fn LLVMRustBuildAtomicLoad(
B: &Builder<'a>, B: &Builder<'a>,
ElementType: &'a Type,
PointerVal: &'a Value, PointerVal: &'a Value,
Name: *const c_char, Name: *const c_char,
Order: AtomicOrdering, Order: AtomicOrdering,

View file

@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg(
slot_size: Align, slot_size: Align,
allow_higher_align: bool, allow_higher_align: bool,
) -> (&'ll Value, Align) { ) -> (&'ll Value, Align) {
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p()); let va_list_ty = bx.type_i8p();
let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty { let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty) bx.bitcast(list.immediate(), va_list_ptr_ty)
} else { } else {
list.immediate() list.immediate()
}; };
let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi); let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size { let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) (round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
@ -82,10 +83,10 @@ fn emit_ptr_va_arg(
let (addr, addr_align) = let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
if indirect { if indirect {
let tmp_ret = bx.load(addr, addr_align); let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(tmp_ret, align.abi) bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
} else { } else {
bx.load(addr, addr_align) bx.load(llty, addr, addr_align)
} }
} }
@ -118,7 +119,7 @@ fn emit_aapcs_va_arg(
}; };
// if the offset >= 0 then the value will be on the stack // if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(reg_off, offset_align); let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero); let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb()); bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
@ -139,8 +140,9 @@ fn emit_aapcs_va_arg(
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero); let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb()); maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
let top_type = bx.type_i8p();
let top = in_reg.struct_gep(va_list_addr, reg_top_index); let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi); let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
// reg_value = *(@top + reg_off_v); // reg_value = *(@top + reg_off_v);
let mut reg_addr = in_reg.gep(top, &[reg_off_v]); let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
@ -149,8 +151,9 @@ fn emit_aapcs_va_arg(
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32); let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
reg_addr = in_reg.gep(reg_addr, &[offset]); reg_addr = in_reg.gep(reg_addr, &[offset]);
} }
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx))); let reg_type = layout.llvm_type(bx);
let reg_value = in_reg.load(reg_addr, layout.align.abi); let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
in_reg.br(&end.llbb()); in_reg.br(&end.llbb());
// On Stack block // On Stack block

View file

@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object. // Load the data pointer from the object.
debug!("get_fn({:?}, {:?})", llvtable, self); debug!("get_fn({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi))); let llty = bx.fn_ptr_backend_type(fn_abi);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(gep, ptr_align); let ptr = bx.load(llty, gep, ptr_align);
bx.nonnull_metadata(ptr); bx.nonnull_metadata(ptr);
// Vtable loads are invariant. // Vtable loads are invariant.
bx.set_invariant_load(ptr); bx.set_invariant_load(ptr);
@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object. // Load the data pointer from the object.
debug!("get_int({:?}, {:?})", llvtable, self); debug!("get_int({:?}, {:?})", llvtable, self);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize())); let llty = bx.type_isize();
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi; let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]); let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(gep, usize_align); let ptr = bx.load(llty, gep, usize_align);
// Vtable loads are invariant. // Vtable loads are invariant.
bx.set_invariant_load(ptr); bx.set_invariant_load(ptr);
ptr ptr

View file

@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
PassMode::Direct(_) | PassMode::Pair(..) => { PassMode::Direct(_) | PassMode::Pair(..) => {
let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref()); let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
if let Ref(llval, _, align) = op.val { if let Ref(llval, _, align) = op.val {
bx.load(llval, align) bx.load(bx.backend_type(op.layout), llval, align)
} else { } else {
op.immediate_or_packed_pair(&mut bx) op.immediate_or_packed_pair(&mut bx)
} }
@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
llval llval
} }
}; };
let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty))); let ty = bx.cast_backend_type(&cast_ty);
bx.load(addr, self.fn_abi.ret.layout.align.abi) let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
} }
}; };
bx.ret(llval); bx.ret(llval);
@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if by_ref && !arg.is_indirect() { if by_ref && !arg.is_indirect() {
// Have to load the argument, maybe while casting it. // Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty) = arg.mode { if let PassMode::Cast(ty) = arg.mode {
let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty))); let llty = bx.cast_backend_type(&ty);
llval = bx.load(addr, align.min(arg.layout.align.abi)); let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else { } else {
// We can't use `PlaceRef::load` here because the argument // We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI // may have a type we don't treat as immediate, but the ABI
// used for this call is passing it by-value. In that case, // used for this call is passing it by-value. In that case,
// the load would just produce `OperandValue::Ref` instead // the load would just produce `OperandValue::Ref` instead
// of the `OperandValue::Immediate` we need for the call. // of the `OperandValue::Immediate` we need for the call.
llval = bx.load(llval, align); llval = bx.load(bx.backend_type(arg.layout), llval, align);
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi { if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
if scalar.is_bool() { if scalar.is_bool() {
bx.range_metadata(llval, 0..2); bx.range_metadata(llval, 0..2);

View file

@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match *elem { match *elem {
mir::ProjectionElem::Deref => { mir::ProjectionElem::Deref => {
indirect_offsets.push(Size::ZERO); indirect_offsets.push(Size::ZERO);
place = place.project_deref(bx); place = bx.load_operand(place).deref(bx.cx());
} }
mir::ProjectionElem::Field(field, _) => { mir::ProjectionElem::Field(field, _) => {
let i = field.index(); let i = field.index();

View file

@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if ty.is_unsafe_ptr() { if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers, // Some platforms do not support atomic operations on pointers,
// so we cast to integer first... // so we cast to integer first...
let ptr_llty = bx.type_ptr_to(bx.type_isize()); let llty = bx.type_isize();
let ptr_llty = bx.type_ptr_to(llty);
source = bx.pointercast(source, ptr_llty); source = bx.pointercast(source, ptr_llty);
} let result = bx.atomic_load(llty, source, order, size);
let result = bx.atomic_load(source, order, size);
if ty.is_unsafe_ptr() {
// ... and then cast the result back to a pointer // ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout)) bx.inttoptr(result, bx.backend_type(layout))
} else { } else {
result bx.atomic_load(bx.backend_type(layout), source, order, size)
} }
} else { } else {
return invalid_monomorphization(ty); return invalid_monomorphization(ty);

View file

@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
} }
match self { match self {
OperandValue::Ref(r, None, source_align) => { OperandValue::Ref(r, None, source_align) => {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout);
let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
let val = bx.load(ty, ptr, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags);
return;
}
base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags) base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
} }
OperandValue::Ref(_, Some(_), _) => { OperandValue::Ref(_, Some(_), _) => {

View file

@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
downcast downcast
} }
pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self {
let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref");
let layout = bx.layout_of(target_ty.ty);
PlaceRef {
llval: bx.load(self.llval, self.align),
llextra: None,
layout,
align: layout.align.abi,
}
}
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) { pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
bx.lifetime_start(self.llval, self.layout.size); bx.lifetime_start(self.llval, self.layout.size);
} }

View file

@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>:
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value; fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value; fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value; fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value; fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value; fn atomic_load(
&mut self,
ty: Self::Type,
ptr: Self::Value,
order: AtomicOrdering,
size: Size,
) -> Self::Value;
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
-> OperandRef<'tcx, Self::Value>; -> OperandRef<'tcx, Self::Value>;

View file

@ -349,11 +349,10 @@ extern "C" void LLVMRustSetFastMath(LLVMValueRef V) {
} }
extern "C" LLVMValueRef extern "C" LLVMValueRef
LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMValueRef Source, const char *Name, LLVMRustBuildAtomicLoad(LLVMBuilderRef B, LLVMTypeRef Ty, LLVMValueRef Source,
LLVMAtomicOrdering Order) { const char *Name, LLVMAtomicOrdering Order) {
Value *Ptr = unwrap(Source); Value *Ptr = unwrap(Source);
Type *Ty = Ptr->getType()->getPointerElementType(); LoadInst *LI = unwrap(B)->CreateLoad(unwrap(Ty), Ptr, Name);
LoadInst *LI = unwrap(B)->CreateLoad(Ty, Ptr, Name);
LI->setAtomic(fromRust(Order)); LI->setAtomic(fromRust(Order));
return wrap(LI); return wrap(LI);
} }