Auto merge of #86873 - nikic:opaque-ptrs, r=nagisa

Improve opaque pointers support

Opaque pointers are coming, and rustc is not ready.

This adds partial support by passing an explicit load type to LLVM. Two issues I've encountered:
 * The necessary type was not available at the point where non-temporal copies were generated. I've pushed the code for that upwards out of the memcpy implementation and moved the position of a cast to make do with the types we have available. (I'm not sure that cast is needed at all, but have retained it in the interest of conservativeness.)
 * The `PlaceRef::project_deref()` function used during debuginfo generation seems to be buggy in some way -- though I haven't figured out specifically what it does wrong. Replacing it with `load_operand().deref()` did the trick, but I don't really know what I'm doing here.
This commit is contained in:
bors 2021-07-10 19:01:41 +00:00
commit 432e145bd5
13 changed files with 81 additions and 74 deletions

View file

@ -410,17 +410,17 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
fn load(&mut self, ptr: &'ll Value, align: Align) -> &'ll Value {
fn load(&mut self, ty: &'ll Type, ptr: &'ll Value, align: Align) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetAlignment(load, align.bytes() as c_uint);
load
}
}
fn volatile_load(&mut self, ptr: &'ll Value) -> &'ll Value {
fn volatile_load(&mut self, ty: &'ll Type, ptr: &'ll Value) -> &'ll Value {
unsafe {
let load = llvm::LLVMBuildLoad(self.llbuilder, ptr, UNNAMED);
let load = llvm::LLVMBuildLoad2(self.llbuilder, ty, ptr, UNNAMED);
llvm::LLVMSetVolatile(load, llvm::True);
load
}
@ -428,6 +428,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
fn atomic_load(
&mut self,
ty: &'ll Type,
ptr: &'ll Value,
order: rustc_codegen_ssa::common::AtomicOrdering,
size: Size,
@ -435,6 +436,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
unsafe {
let load = llvm::LLVMRustBuildAtomicLoad(
self.llbuilder,
ty,
ptr,
UNNAMED,
AtomicOrdering::from_generic(order),
@ -486,7 +488,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
}
}
let llval = const_llval.unwrap_or_else(|| {
let load = self.load(place.llval, place.align);
let load = self.load(place.layout.llvm_type(self), place.llval, place.align);
if let abi::Abi::Scalar(ref scalar) = place.layout.abi {
scalar_load_metadata(self, load, scalar);
}
@ -498,7 +500,8 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let mut load = |i, scalar: &abi::Scalar, align| {
let llptr = self.struct_gep(place.llval, i as u64);
let load = self.load(llptr, align);
let llty = place.layout.scalar_pair_element_llvm_type(self, i, false);
let load = self.load(llty, llptr, align);
scalar_load_metadata(self, load, scalar);
self.to_immediate_scalar(load, scalar)
};
@ -815,13 +818,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
@ -848,13 +845,7 @@ impl BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: &'ll Value,
flags: MemFlags,
) {
if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memmove.
let val = self.load(src, src_align);
let ptr = self.pointercast(dst, self.type_ptr_to(self.val_ty(val)));
self.store_with_flags(val, ptr, dst_align, flags);
return;
}
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());

View file

@ -20,7 +20,7 @@ pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_,
// LLVM to keep around the reference to the global.
let indices = [bx.const_i32(0), bx.const_i32(0)];
let element = bx.inbounds_gep(gdb_debug_scripts_section, &indices);
let volative_load_instruction = bx.volatile_load(element);
let volative_load_instruction = bx.volatile_load(bx.type_i8(), element);
unsafe {
llvm::LLVMSetAlignment(volative_load_instruction, 1);
}

View file

@ -162,11 +162,14 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
sym::volatile_load | sym::unaligned_volatile_load => {
let tp_ty = substs.type_at(0);
let mut ptr = args[0].immediate();
if let PassMode::Cast(ty) = fn_abi.ret.mode {
ptr = self.pointercast(ptr, self.type_ptr_to(ty.llvm_type(self)));
}
let load = self.volatile_load(ptr);
let ptr = args[0].immediate();
let load = if let PassMode::Cast(ty) = fn_abi.ret.mode {
let llty = ty.llvm_type(self);
let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
self.volatile_load(llty, ptr)
} else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
};
let align = if name == sym::unaligned_volatile_load {
1
} else {
@ -319,9 +322,9 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
let integer_ty = self.type_ix(layout.size.bits());
let ptr_ty = self.type_ptr_to(integer_ty);
let a_ptr = self.bitcast(a, ptr_ty);
let a_val = self.load(a_ptr, layout.align.abi);
let a_val = self.load(integer_ty, a_ptr, layout.align.abi);
let b_ptr = self.bitcast(b, ptr_ty);
let b_val = self.load(b_ptr, layout.align.abi);
let b_val = self.load(integer_ty, b_ptr, layout.align.abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else {
let i8p_ty = self.type_i8p();
@ -540,7 +543,7 @@ fn codegen_msvc_try(
// Source: MicrosoftCXXABI::getAddrOfCXXCatchHandlerType in clang
let flags = bx.const_i32(8);
let funclet = catchpad_rust.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = catchpad_rust.load(slot, ptr_align);
let ptr = catchpad_rust.load(bx.type_i8p(), slot, ptr_align);
catchpad_rust.call(catch_func, &[data, ptr], Some(&funclet));
catchpad_rust.catch_ret(&funclet, caught.llbb());

View file

@ -1385,7 +1385,12 @@ extern "C" {
Val: &'a Value,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildLoad(B: &Builder<'a>, PointerVal: &'a Value, Name: *const c_char) -> &'a Value;
pub fn LLVMBuildLoad2(
B: &Builder<'a>,
Ty: &'a Type,
PointerVal: &'a Value,
Name: *const c_char,
) -> &'a Value;
pub fn LLVMBuildStore(B: &Builder<'a>, Val: &'a Value, Ptr: &'a Value) -> &'a Value;
@ -1631,6 +1636,7 @@ extern "C" {
// Atomic Operations
pub fn LLVMRustBuildAtomicLoad(
B: &Builder<'a>,
ElementType: &'a Type,
PointerVal: &'a Value,
Name: *const c_char,
Order: AtomicOrdering,

View file

@ -32,14 +32,15 @@ fn emit_direct_ptr_va_arg(
slot_size: Align,
allow_higher_align: bool,
) -> (&'ll Value, Align) {
let va_list_ptr_ty = bx.cx().type_ptr_to(bx.cx.type_i8p());
let va_list_ty = bx.type_i8p();
let va_list_ptr_ty = bx.type_ptr_to(va_list_ty);
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
} else {
list.immediate()
};
let ptr = bx.load(va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align)
@ -82,10 +83,10 @@ fn emit_ptr_va_arg(
let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align);
if indirect {
let tmp_ret = bx.load(addr, addr_align);
bx.load(tmp_ret, align.abi)
let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
} else {
bx.load(addr, addr_align)
bx.load(llty, addr, addr_align)
}
}
@ -118,7 +119,7 @@ fn emit_aapcs_va_arg(
};
// if the offset >= 0 then the value will be on the stack
let mut reg_off_v = bx.load(reg_off, offset_align);
let mut reg_off_v = bx.load(bx.type_i32(), reg_off, offset_align);
let use_stack = bx.icmp(IntPredicate::IntSGE, reg_off_v, zero);
bx.cond_br(use_stack, &on_stack.llbb(), &maybe_reg.llbb());
@ -139,8 +140,9 @@ fn emit_aapcs_va_arg(
let use_stack = maybe_reg.icmp(IntPredicate::IntSGT, new_reg_off_v, zero);
maybe_reg.cond_br(use_stack, &on_stack.llbb(), &in_reg.llbb());
let top_type = bx.type_i8p();
let top = in_reg.struct_gep(va_list_addr, reg_top_index);
let top = in_reg.load(top, bx.tcx().data_layout.pointer_align.abi);
let top = in_reg.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
// reg_value = *(@top + reg_off_v);
let mut reg_addr = in_reg.gep(top, &[reg_off_v]);
@ -149,8 +151,9 @@ fn emit_aapcs_va_arg(
let offset = bx.const_i32((slot_size - layout.size.bytes()) as i32);
reg_addr = in_reg.gep(reg_addr, &[offset]);
}
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(layout.llvm_type(bx)));
let reg_value = in_reg.load(reg_addr, layout.align.abi);
let reg_type = layout.llvm_type(bx);
let reg_addr = in_reg.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = in_reg.load(reg_type, reg_addr, layout.align.abi);
in_reg.br(&end.llbb());
// On Stack block