Auto merge of #86873 - nikic:opaque-ptrs, r=nagisa
Improve opaque pointers support Opaque pointers are coming, and rustc is not ready. This adds partial support by passing an explicit load type to LLVM. Two issues I've encountered: * The necessary type was not available at the point where non-temporal copies were generated. I've pushed the code for that upwards out of the memcpy implementation and moved the position of a cast to make do with the types we have available. (I'm not sure that cast is needed at all, but have retained it in the interest of conservativeness.) * The `PlaceRef::project_deref()` function used during debuginfo generation seems to be buggy in some way -- though I haven't figured out specifically what it does wrong. Replacing it with `load_operand().deref()` did the trick, but I don't really know what I'm doing here.
This commit is contained in:
commit
432e145bd5
13 changed files with 81 additions and 74 deletions
|
@ -20,10 +20,11 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
// Load the data pointer from the object.
|
||||
debug!("get_fn({:?}, {:?})", llvtable, self);
|
||||
|
||||
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.fn_ptr_backend_type(fn_abi)));
|
||||
let llty = bx.fn_ptr_backend_type(fn_abi);
|
||||
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
|
||||
let ptr_align = bx.tcx().data_layout.pointer_align.abi;
|
||||
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, ptr_align);
|
||||
let ptr = bx.load(llty, gep, ptr_align);
|
||||
bx.nonnull_metadata(ptr);
|
||||
// Vtable loads are invariant.
|
||||
bx.set_invariant_load(ptr);
|
||||
|
@ -38,10 +39,11 @@ impl<'a, 'tcx> VirtualIndex {
|
|||
// Load the data pointer from the object.
|
||||
debug!("get_int({:?}, {:?})", llvtable, self);
|
||||
|
||||
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(bx.type_isize()));
|
||||
let llty = bx.type_isize();
|
||||
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
|
||||
let usize_align = bx.tcx().data_layout.pointer_align.abi;
|
||||
let gep = bx.inbounds_gep(llvtable, &[bx.const_usize(self.0)]);
|
||||
let ptr = bx.load(gep, usize_align);
|
||||
let ptr = bx.load(llty, gep, usize_align);
|
||||
// Vtable loads are invariant.
|
||||
bx.set_invariant_load(ptr);
|
||||
ptr
|
||||
|
|
|
@ -260,7 +260,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
PassMode::Direct(_) | PassMode::Pair(..) => {
|
||||
let op = self.codegen_consume(&mut bx, mir::Place::return_place().as_ref());
|
||||
if let Ref(llval, _, align) = op.val {
|
||||
bx.load(llval, align)
|
||||
bx.load(bx.backend_type(op.layout), llval, align)
|
||||
} else {
|
||||
op.immediate_or_packed_pair(&mut bx)
|
||||
}
|
||||
|
@ -287,8 +287,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
llval
|
||||
}
|
||||
};
|
||||
let addr = bx.pointercast(llslot, bx.type_ptr_to(bx.cast_backend_type(&cast_ty)));
|
||||
bx.load(addr, self.fn_abi.ret.layout.align.abi)
|
||||
let ty = bx.cast_backend_type(&cast_ty);
|
||||
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty));
|
||||
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
|
||||
}
|
||||
};
|
||||
bx.ret(llval);
|
||||
|
@ -1086,15 +1087,16 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
if by_ref && !arg.is_indirect() {
|
||||
// Have to load the argument, maybe while casting it.
|
||||
if let PassMode::Cast(ty) = arg.mode {
|
||||
let addr = bx.pointercast(llval, bx.type_ptr_to(bx.cast_backend_type(&ty)));
|
||||
llval = bx.load(addr, align.min(arg.layout.align.abi));
|
||||
let llty = bx.cast_backend_type(&ty);
|
||||
let addr = bx.pointercast(llval, bx.type_ptr_to(llty));
|
||||
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
|
||||
} else {
|
||||
// We can't use `PlaceRef::load` here because the argument
|
||||
// may have a type we don't treat as immediate, but the ABI
|
||||
// used for this call is passing it by-value. In that case,
|
||||
// the load would just produce `OperandValue::Ref` instead
|
||||
// of the `OperandValue::Immediate` we need for the call.
|
||||
llval = bx.load(llval, align);
|
||||
llval = bx.load(bx.backend_type(arg.layout), llval, align);
|
||||
if let abi::Abi::Scalar(ref scalar) = arg.layout.abi {
|
||||
if scalar.is_bool() {
|
||||
bx.range_metadata(llval, 0..2);
|
||||
|
|
|
@ -274,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
match *elem {
|
||||
mir::ProjectionElem::Deref => {
|
||||
indirect_offsets.push(Size::ZERO);
|
||||
place = place.project_deref(bx);
|
||||
place = bx.load_operand(place).deref(bx.cx());
|
||||
}
|
||||
mir::ProjectionElem::Field(field, _) => {
|
||||
let i = field.index();
|
||||
|
|
|
@ -448,15 +448,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
|||
if ty.is_unsafe_ptr() {
|
||||
// Some platforms do not support atomic operations on pointers,
|
||||
// so we cast to integer first...
|
||||
let ptr_llty = bx.type_ptr_to(bx.type_isize());
|
||||
let llty = bx.type_isize();
|
||||
let ptr_llty = bx.type_ptr_to(llty);
|
||||
source = bx.pointercast(source, ptr_llty);
|
||||
}
|
||||
let result = bx.atomic_load(source, order, size);
|
||||
if ty.is_unsafe_ptr() {
|
||||
let result = bx.atomic_load(llty, source, order, size);
|
||||
// ... and then cast the result back to a pointer
|
||||
bx.inttoptr(result, bx.backend_type(layout))
|
||||
} else {
|
||||
result
|
||||
bx.atomic_load(bx.backend_type(layout), source, order, size)
|
||||
}
|
||||
} else {
|
||||
return invalid_monomorphization(ty);
|
||||
|
|
|
@ -289,6 +289,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
|
|||
}
|
||||
match self {
|
||||
OperandValue::Ref(r, None, source_align) => {
|
||||
if flags.contains(MemFlags::NONTEMPORAL) {
|
||||
// HACK(nox): This is inefficient but there is no nontemporal memcpy.
|
||||
let ty = bx.backend_type(dest.layout);
|
||||
let ptr = bx.pointercast(r, bx.type_ptr_to(ty));
|
||||
let val = bx.load(ty, ptr, source_align);
|
||||
bx.store_with_flags(val, dest.llval, dest.align, flags);
|
||||
return;
|
||||
}
|
||||
base::memcpy_ty(bx, dest.llval, dest.align, r, source_align, dest.layout, flags)
|
||||
}
|
||||
OperandValue::Ref(_, Some(_), _) => {
|
||||
|
|
|
@ -402,18 +402,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
|
|||
downcast
|
||||
}
|
||||
|
||||
pub fn project_deref<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) -> Self {
|
||||
let target_ty = self.layout.ty.builtin_deref(true).expect("failed to deref");
|
||||
let layout = bx.layout_of(target_ty.ty);
|
||||
|
||||
PlaceRef {
|
||||
llval: bx.load(self.llval, self.align),
|
||||
llextra: None,
|
||||
layout,
|
||||
align: layout.align.abi,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
|
||||
bx.lifetime_start(self.llval, self.layout.size);
|
||||
}
|
||||
|
|
|
@ -137,9 +137,15 @@ pub trait BuilderMethods<'a, 'tcx>:
|
|||
fn dynamic_alloca(&mut self, ty: Self::Type, align: Align) -> Self::Value;
|
||||
fn array_alloca(&mut self, ty: Self::Type, len: Self::Value, align: Align) -> Self::Value;
|
||||
|
||||
fn load(&mut self, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn volatile_load(&mut self, ptr: Self::Value) -> Self::Value;
|
||||
fn atomic_load(&mut self, ptr: Self::Value, order: AtomicOrdering, size: Size) -> Self::Value;
|
||||
fn load(&mut self, ty: Self::Type, ptr: Self::Value, align: Align) -> Self::Value;
|
||||
fn volatile_load(&mut self, ty: Self::Type, ptr: Self::Value) -> Self::Value;
|
||||
fn atomic_load(
|
||||
&mut self,
|
||||
ty: Self::Type,
|
||||
ptr: Self::Value,
|
||||
order: AtomicOrdering,
|
||||
size: Size,
|
||||
) -> Self::Value;
|
||||
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
|
||||
-> OperandRef<'tcx, Self::Value>;
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue