1
Fork 0

Auto merge of #105545 - erikdesjardins:ptrclean, r=bjorn3

cleanup: remove pointee types

This can't be merged until the oldest LLVM version we support uses opaque pointers, which will be the case after #114148. (Also note `-Cllvm-args="-opaque-pointers=0"` can technically be used in LLVM 15, though I don't think we should support that configuration.)

I initially hoped this would provide some minor perf win, but in https://github.com/rust-lang/rust/pull/105412#issuecomment-1341224450 it had very little impact, so this is only valuable as a cleanup.

As a followup, this will enable #96242 to be resolved.

r? `@ghost`

`@rustbot` label S-blocked
This commit is contained in:
bors 2023-08-01 19:44:17 +00:00
commit abd3637e42
32 changed files with 227 additions and 584 deletions

View file

@ -27,7 +27,6 @@ use rustc_codegen_ssa::traits::{
BaseTypeMethods, BaseTypeMethods,
BuilderMethods, BuilderMethods,
ConstMethods, ConstMethods,
DerivedTypeMethods,
LayoutTypeMethods, LayoutTypeMethods,
HasCodegen, HasCodegen,
OverflowOp, OverflowOp,

View file

@ -16,6 +16,10 @@ use crate::context::CodegenCx;
use crate::type_of::LayoutGccExt; use crate::type_of::LayoutGccExt;
impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
pub fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
self.context.new_cast(None, val, ty)
}
pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> { pub fn const_bytes(&self, bytes: &[u8]) -> RValue<'gcc> {
bytes_in_context(self, bytes) bytes_in_context(self, bytes)
} }
@ -242,10 +246,6 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
const_alloc_to_gcc(self, alloc) const_alloc_to_gcc(self, alloc)
} }
fn const_ptrcast(&self, val: RValue<'gcc>, ty: Type<'gcc>) -> RValue<'gcc> {
self.context.new_cast(None, val, ty)
}
fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> { fn const_bitcast(&self, value: RValue<'gcc>, typ: Type<'gcc>) -> RValue<'gcc> {
if value.get_type() == self.bool_type.make_pointer() { if value.get_type() == self.bool_type.make_pointer() {
if let Some(pointee) = typ.get_pointee() { if let Some(pointee) = typ.get_pointee() {

View file

@ -12,7 +12,7 @@ use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::mir::place::PlaceRef;
use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods}; use rustc_codegen_ssa::traits::{ArgAbiMethods, BaseTypeMethods, BuilderMethods, ConstMethods, IntrinsicCallMethods};
#[cfg(feature="master")] #[cfg(feature="master")]
use rustc_codegen_ssa::traits::{DerivedTypeMethods, MiscMethods}; use rustc_codegen_ssa::traits::MiscMethods;
use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_codegen_ssa::errors::InvalidMonomorphization;
use rustc_middle::bug; use rustc_middle::bug;
use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::{self, Instance, Ty};

View file

@ -54,6 +54,23 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> {
self.u128_type self.u128_type
} }
pub fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> {
ty.make_pointer()
}
pub fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> {
// TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE?
ty.make_pointer()
}
pub fn type_i8p(&self) -> Type<'gcc> {
self.type_ptr_to(self.type_i8())
}
pub fn type_i8p_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
self.type_ptr_to_ext(self.type_i8(), address_space)
}
pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> { pub fn type_pointee_for_align(&self, align: Align) -> Type<'gcc> {
// FIXME(eddyb) We could find a better approximation if ity.align < align. // FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = Integer::approximate_align(self, align); let ity = Integer::approximate_align(self, align);
@ -149,13 +166,12 @@ impl<'gcc, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
} }
} }
fn type_ptr_to(&self, ty: Type<'gcc>) -> Type<'gcc> { fn type_ptr(&self) -> Type<'gcc> {
ty.make_pointer() self.type_ptr_to(self.type_void())
} }
fn type_ptr_to_ext(&self, ty: Type<'gcc>, _address_space: AddressSpace) -> Type<'gcc> { fn type_ptr_ext(&self, address_space: AddressSpace) -> Type<'gcc> {
// TODO(antoyo): use address_space, perhaps with TYPE_ADDR_SPACE? self.type_ptr_to_ext(self.type_void(), address_space)
ty.make_pointer()
} }
fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> { fn element_type(&self, ty: Type<'gcc>) -> Type<'gcc> {

View file

@ -216,9 +216,7 @@ impl<'ll, 'tcx> ArgAbiExt<'ll, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
// uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}.
let can_store_through_cast_ptr = false; let can_store_through_cast_ptr = false;
if can_store_through_cast_ptr { if can_store_through_cast_ptr {
let cast_ptr_llty = bx.type_ptr_to(cast.llvm_type(bx)); bx.store(val, dst.llval, self.layout.align.abi);
let cast_dst = bx.pointercast(dst.llval, cast_ptr_llty);
bx.store(val, cast_dst, self.layout.align.abi);
} else { } else {
// The actual return type is a struct, but the ABI // The actual return type is a struct, but the ABI
// adaptation code has cast it into some scalar type. The // adaptation code has cast it into some scalar type. The
@ -336,7 +334,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_llvm_type(cx),
PassMode::Cast(cast, _) => cast.llvm_type(cx), PassMode::Cast(cast, _) => cast.llvm_type(cx),
PassMode::Indirect { .. } => { PassMode::Indirect { .. } => {
llargument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); llargument_tys.push(cx.type_ptr());
cx.type_void() cx.type_void()
} }
}; };
@ -364,9 +362,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
} }
cast.llvm_type(cx) cast.llvm_type(cx)
} }
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => { PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => cx.type_ptr(),
cx.type_ptr_to(arg.memory_ty(cx))
}
}; };
llargument_tys.push(llarg_ty); llargument_tys.push(llarg_ty);
} }
@ -379,12 +375,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> {
} }
fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type { fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type {
unsafe { cx.type_ptr_ext(cx.data_layout().instruction_address_space)
llvm::LLVMPointerType(
self.llvm_type(cx),
cx.data_layout().instruction_address_space.0 as c_uint,
)
}
} }
fn llvm_cconv(&self) -> llvm::CallConv { fn llvm_cconv(&self) -> llvm::CallConv {

View file

@ -28,7 +28,7 @@ pub(crate) unsafe fn codegen(
tws => bug!("Unsupported target word size for int: {}", tws), tws => bug!("Unsupported target word size for int: {}", tws),
}; };
let i8 = llvm::LLVMInt8TypeInContext(llcx); let i8 = llvm::LLVMInt8TypeInContext(llcx);
let i8p = llvm::LLVMPointerType(i8, 0); let i8p = llvm::LLVMPointerTypeInContext(llcx, 0);
let void = llvm::LLVMVoidTypeInContext(llcx); let void = llvm::LLVMVoidTypeInContext(llcx);
if kind == AllocatorKind::Default { if kind == AllocatorKind::Default {

View file

@ -4,7 +4,6 @@ use crate::back::profiling::{
}; };
use crate::base; use crate::base;
use crate::common; use crate::common;
use crate::consts;
use crate::errors::{ use crate::errors::{
CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode, CopyBitcode, FromLlvmDiag, FromLlvmOptimizationDiag, LlvmError, WithLlvmError, WriteBytecode,
}; };
@ -992,7 +991,7 @@ fn create_msvc_imps(
let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" }; let prefix = if cgcx.target_arch == "x86" { "\x01__imp__" } else { "\x01__imp_" };
unsafe { unsafe {
let i8p_ty = Type::i8p_llcx(llcx); let ptr_ty = Type::ptr_llcx(llcx);
let globals = base::iter_globals(llmod) let globals = base::iter_globals(llmod)
.filter(|&val| { .filter(|&val| {
llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage llvm::LLVMRustGetLinkage(val) == llvm::Linkage::ExternalLinkage
@ -1012,8 +1011,8 @@ fn create_msvc_imps(
.collect::<Vec<_>>(); .collect::<Vec<_>>();
for (imp_name, val) in globals { for (imp_name, val) in globals {
let imp = llvm::LLVMAddGlobal(llmod, i8p_ty, imp_name.as_ptr().cast()); let imp = llvm::LLVMAddGlobal(llmod, ptr_ty, imp_name.as_ptr().cast());
llvm::LLVMSetInitializer(imp, consts::ptrcast(val, i8p_ty)); llvm::LLVMSetInitializer(imp, val);
llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage); llvm::LLVMRustSetLinkage(imp, llvm::Linkage::ExternalLinkage);
} }
} }

View file

@ -123,8 +123,7 @@ pub fn compile_codegen_unit(tcx: TyCtxt<'_>, cgu_name: Symbol) -> (ModuleCodegen
// happen after the llvm.used variables are created. // happen after the llvm.used variables are created.
for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() { for &(old_g, new_g) in cx.statics_to_rauw().borrow().iter() {
unsafe { unsafe {
let bitcast = llvm::LLVMConstPointerCast(new_g, cx.val_ty(old_g)); llvm::LLVMReplaceAllUsesWith(old_g, new_g);
llvm::LLVMReplaceAllUsesWith(old_g, bitcast);
llvm::LLVMDeleteGlobal(old_g); llvm::LLVMDeleteGlobal(old_g);
} }
} }

View file

@ -652,7 +652,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags, flags: MemFlags,
) -> &'ll Value { ) -> &'ll Value {
debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags); debug!("Store {:?} -> {:?} ({:?})", val, ptr, flags);
let ptr = self.check_store(val, ptr); assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
unsafe { unsafe {
let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr); let store = llvm::LLVMBuildStore(self.llbuilder, val, ptr);
let align = let align =
@ -682,7 +682,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
size: Size, size: Size,
) { ) {
debug!("Store {:?} -> {:?}", val, ptr); debug!("Store {:?} -> {:?}", val, ptr);
let ptr = self.check_store(val, ptr); assert_eq!(self.cx.type_kind(self.cx.val_ty(ptr)), TypeKind::Pointer);
unsafe { unsafe {
let store = llvm::LLVMRustBuildAtomicStore( let store = llvm::LLVMRustBuildAtomicStore(
self.llbuilder, self.llbuilder,
@ -873,8 +873,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported"); assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memcpy not supported");
let size = self.intcast(size, self.type_isize(), false); let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE); let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_i8p());
unsafe { unsafe {
llvm::LLVMRustBuildMemCpy( llvm::LLVMRustBuildMemCpy(
self.llbuilder, self.llbuilder,
@ -900,8 +898,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported"); assert!(!flags.contains(MemFlags::NONTEMPORAL), "non-temporal memmove not supported");
let size = self.intcast(size, self.type_isize(), false); let size = self.intcast(size, self.type_isize(), false);
let is_volatile = flags.contains(MemFlags::VOLATILE); let is_volatile = flags.contains(MemFlags::VOLATILE);
let dst = self.pointercast(dst, self.type_i8p());
let src = self.pointercast(src, self.type_i8p());
unsafe { unsafe {
llvm::LLVMRustBuildMemMove( llvm::LLVMRustBuildMemMove(
self.llbuilder, self.llbuilder,
@ -924,7 +920,6 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
flags: MemFlags, flags: MemFlags,
) { ) {
let is_volatile = flags.contains(MemFlags::VOLATILE); let is_volatile = flags.contains(MemFlags::VOLATILE);
let ptr = self.pointercast(ptr, self.type_i8p());
unsafe { unsafe {
llvm::LLVMRustBuildMemSet( llvm::LLVMRustBuildMemSet(
self.llbuilder, self.llbuilder,
@ -981,7 +976,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { fn cleanup_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 0); let landing_pad = self.landing_pad(ty, pers_fn, 0);
unsafe { unsafe {
llvm::LLVMSetCleanup(landing_pad, llvm::True); llvm::LLVMSetCleanup(landing_pad, llvm::True);
@ -990,14 +985,14 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
} }
fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) { fn filter_landing_pad(&mut self, pers_fn: &'ll Value) -> (&'ll Value, &'ll Value) {
let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let landing_pad = self.landing_pad(ty, pers_fn, 1); let landing_pad = self.landing_pad(ty, pers_fn, 1);
self.add_clause(landing_pad, self.const_array(self.type_i8p(), &[])); self.add_clause(landing_pad, self.const_array(self.type_ptr(), &[]));
(self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1)) (self.extract_value(landing_pad, 0), self.extract_value(landing_pad, 1))
} }
fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) { fn resume(&mut self, exn0: &'ll Value, exn1: &'ll Value) {
let ty = self.type_struct(&[self.type_i8p(), self.type_i32()], false); let ty = self.type_struct(&[self.type_ptr(), self.type_i32()], false);
let mut exn = self.const_poison(ty); let mut exn = self.const_poison(ty);
exn = self.insert_value(exn, exn0, 0); exn = self.insert_value(exn, exn0, 0);
exn = self.insert_value(exn, exn1, 1); exn = self.insert_value(exn, exn1, 1);
@ -1161,7 +1156,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) }; let llfn = unsafe { llvm::LLVMRustGetInstrProfIncrementIntrinsic(self.cx().llmod) };
let llty = self.cx.type_func( let llty = self.cx.type_func(
&[self.cx.type_i8p(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()], &[self.cx.type_ptr(), self.cx.type_i64(), self.cx.type_i32(), self.cx.type_i32()],
self.cx.type_void(), self.cx.type_void(),
); );
let args = &[fn_name, hash, num_counters, index]; let args = &[fn_name, hash, num_counters, index];
@ -1387,25 +1382,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
ret.expect("LLVM does not have support for catchret") ret.expect("LLVM does not have support for catchret")
} }
fn check_store(&mut self, val: &'ll Value, ptr: &'ll Value) -> &'ll Value {
let dest_ptr_ty = self.cx.val_ty(ptr);
let stored_ty = self.cx.val_ty(val);
let stored_ptr_ty = self.cx.type_ptr_to(stored_ty);
assert_eq!(self.cx.type_kind(dest_ptr_ty), TypeKind::Pointer);
if dest_ptr_ty == stored_ptr_ty {
ptr
} else {
debug!(
"type mismatch in store. \
Expected {:?}, got {:?}; inserting bitcast",
dest_ptr_ty, stored_ptr_ty
);
self.bitcast(ptr, stored_ptr_ty)
}
}
fn check_call<'b>( fn check_call<'b>(
&mut self, &mut self,
typ: &str, typ: &str,
@ -1466,7 +1442,6 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> {
return; return;
} }
let ptr = self.pointercast(ptr, self.cx.type_i8p());
self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]);
} }

View file

@ -4,13 +4,11 @@
//! and methods are represented as just a fn ptr and not a full //! and methods are represented as just a fn ptr and not a full
//! closure. //! closure.
use crate::abi::FnAbiLlvmExt;
use crate::attributes; use crate::attributes;
use crate::common; use crate::common;
use crate::context::CodegenCx; use crate::context::CodegenCx;
use crate::llvm; use crate::llvm;
use crate::value::Value; use crate::value::Value;
use rustc_codegen_ssa::traits::*;
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt};
use rustc_middle::ty::{self, Instance, TypeVisitableExt}; use rustc_middle::ty::{self, Instance, TypeVisitableExt};
@ -45,39 +43,7 @@ pub fn get_fn<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>) ->
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty()); let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
let llfn = if let Some(llfn) = cx.get_declared_value(sym) { let llfn = if let Some(llfn) = cx.get_declared_value(sym) {
// Create a fn pointer with the new signature. llfn
let llptrty = fn_abi.ptr_to_llvm_type(cx);
// This is subtle and surprising, but sometimes we have to bitcast
// the resulting fn pointer. The reason has to do with external
// functions. If you have two crates that both bind the same C
// library, they may not use precisely the same types: for
// example, they will probably each declare their own structs,
// which are distinct types from LLVM's point of view (nominal
// types).
//
// Now, if those two crates are linked into an application, and
// they contain inlined code, you can wind up with a situation
// where both of those functions wind up being loaded into this
// application simultaneously. In that case, the same function
// (from LLVM's point of view) requires two types. But of course
// LLVM won't allow one function to have two types.
//
// What we currently do, therefore, is declare the function with
// one of the two types (whichever happens to come first) and then
// bitcast as needed when the function is referenced to make sure
// it has the type we expect.
//
// This can occur on either a crate-local or crate-external
// reference. It also occurs when testing libcore and in some
// other weird situations. Annoying.
if cx.val_ty(llfn) != llptrty {
debug!("get_fn: casting {:?} to {:?}", llfn, llptrty);
cx.const_ptrcast(llfn, llptrty)
} else {
debug!("get_fn: not casting pointer!");
llfn
}
} else { } else {
let instance_def_id = instance.def_id(); let instance_def_id = instance.def_id();
let llfn = if tcx.sess.target.arch == "x86" && let llfn = if tcx.sess.target.arch == "x86" &&

View file

@ -1,10 +1,9 @@
//! Code that is useful in various codegen modules. //! Code that is useful in various codegen modules.
use crate::consts::{self, const_alloc_to_llvm}; use crate::consts::const_alloc_to_llvm;
pub use crate::context::CodegenCx; pub use crate::context::CodegenCx;
use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True}; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, OperandBundleDef, True};
use crate::type_::Type; use crate::type_::Type;
use crate::type_of::LayoutLlvmExt;
use crate::value::Value; use crate::value::Value;
use rustc_ast::Mutability; use rustc_ast::Mutability;
@ -13,7 +12,6 @@ use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
use rustc_hir::def_id::DefId; use rustc_hir::def_id::DefId;
use rustc_middle::bug; use rustc_middle::bug;
use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar}; use rustc_middle::mir::interpret::{ConstAllocation, GlobalAlloc, Scalar};
use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType}; use rustc_session::cstore::{DllCallingConvention, DllImport, PeImportNameType};
use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer}; use rustc_target::abi::{self, AddressSpace, HasDataLayout, Pointer};
@ -211,11 +209,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
}) })
.1; .1;
let len = s.len(); let len = s.len();
let cs = consts::ptrcast( (str_global, self.const_usize(len as u64))
str_global,
self.type_ptr_to(self.layout_of(self.tcx.types.str_).llvm_type(self)),
);
(cs, self.const_usize(len as u64))
} }
fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value { fn const_struct(&self, elts: &[&'ll Value], packed: bool) -> &'ll Value {
@ -292,7 +286,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
let llval = unsafe { let llval = unsafe {
llvm::LLVMConstInBoundsGEP2( llvm::LLVMConstInBoundsGEP2(
self.type_i8(), self.type_i8(),
self.const_bitcast(base_addr, self.type_i8p_ext(base_addr_space)), self.const_bitcast(base_addr, self.type_ptr_ext(base_addr_space)),
&self.const_usize(offset.bytes()), &self.const_usize(offset.bytes()),
1, 1,
) )
@ -310,10 +304,6 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
const_alloc_to_llvm(self, alloc) const_alloc_to_llvm(self, alloc)
} }
fn const_ptrcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
consts::ptrcast(val, ty)
}
fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
self.const_bitcast(val, ty) self.const_bitcast(val, ty)
} }
@ -322,7 +312,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { unsafe {
llvm::LLVMConstInBoundsGEP2( llvm::LLVMConstInBoundsGEP2(
self.type_i8(), self.type_i8(),
self.const_bitcast(base_addr, self.type_i8p()), base_addr,
&self.const_usize(offset.bytes()), &self.const_usize(offset.bytes()),
1, 1,
) )

View file

@ -103,7 +103,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
value: Primitive::Pointer(address_space), value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size), valid_range: WrappingRange::full(dl.pointer_size),
}, },
cx.type_i8p_ext(address_space), cx.type_ptr_ext(address_space),
)); ));
next_offset = offset + pointer_size; next_offset = offset + pointer_size;
} }
@ -179,7 +179,7 @@ fn check_and_apply_linkage<'ll, 'tcx>(
}) })
}); });
llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage); llvm::LLVMRustSetLinkage(g2, llvm::Linkage::InternalLinkage);
llvm::LLVMSetInitializer(g2, cx.const_ptrcast(g1, llty)); llvm::LLVMSetInitializer(g2, g1);
g2 g2
} }
} else if cx.tcx.sess.target.arch == "x86" && } else if cx.tcx.sess.target.arch == "x86" &&
@ -193,10 +193,6 @@ fn check_and_apply_linkage<'ll, 'tcx>(
} }
} }
pub fn ptrcast<'ll>(val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstPointerCast(val, ty) }
}
impl<'ll> CodegenCx<'ll, '_> { impl<'ll> CodegenCx<'ll, '_> {
pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value { pub(crate) fn const_bitcast(&self, val: &'ll Value, ty: &'ll Type) -> &'ll Value {
unsafe { llvm::LLVMConstBitCast(val, ty) } unsafe { llvm::LLVMConstBitCast(val, ty) }
@ -250,7 +246,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) { let g = if def_id.is_local() && !self.tcx.is_foreign_item(def_id) {
let llty = self.layout_of(ty).llvm_type(self); let llty = self.layout_of(ty).llvm_type(self);
if let Some(g) = self.get_declared_value(sym) { if let Some(g) = self.get_declared_value(sym) {
if self.val_ty(g) != self.type_ptr_to(llty) { if self.val_ty(g) != self.type_ptr() {
span_bug!(self.tcx.def_span(def_id), "Conflicting types for static"); span_bug!(self.tcx.def_span(def_id), "Conflicting types for static");
} }
} }
@ -551,16 +547,14 @@ impl<'ll> StaticMethods for CodegenCx<'ll, '_> {
} }
} }
/// Add a global value to a list to be stored in the `llvm.used` variable, an array of i8*. /// Add a global value to a list to be stored in the `llvm.used` variable, an array of ptr.
fn add_used_global(&self, global: &'ll Value) { fn add_used_global(&self, global: &'ll Value) {
let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; self.used_statics.borrow_mut().push(global);
self.used_statics.borrow_mut().push(cast);
} }
/// Add a global value to a list to be stored in the `llvm.compiler.used` variable, /// Add a global value to a list to be stored in the `llvm.compiler.used` variable,
/// an array of i8*. /// an array of ptr.
fn add_compiler_used_global(&self, global: &'ll Value) { fn add_compiler_used_global(&self, global: &'ll Value) {
let cast = unsafe { llvm::LLVMConstPointerCast(global, self.type_i8p()) }; self.compiler_used_statics.borrow_mut().push(global);
self.compiler_used_statics.borrow_mut().push(cast);
} }
} }

View file

@ -59,17 +59,6 @@ pub struct CodegenCx<'ll, 'tcx> {
/// Cache of constant strings, /// Cache of constant strings,
pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>, pub const_str_cache: RefCell<FxHashMap<String, &'ll Value>>,
/// Reverse-direction for const ptrs cast from globals.
///
/// Key is a Value holding a `*T`,
/// Val is a Value holding a `*[T]`.
///
/// Needed because LLVM loses pointer->pointee association
/// when we ptrcast, and we have to ptrcast during codegen
/// of a `[T]` const because we form a slice, a `(*T,usize)` pair, not
/// a pointer to an LLVM array type. Similar for trait objects.
pub const_unsized: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
/// Cache of emitted const globals (value -> global) /// Cache of emitted const globals (value -> global)
pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>, pub const_globals: RefCell<FxHashMap<&'ll Value, &'ll Value>>,
@ -464,7 +453,6 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
instances: Default::default(), instances: Default::default(),
vtables: Default::default(), vtables: Default::default(),
const_str_cache: Default::default(), const_str_cache: Default::default(),
const_unsized: Default::default(),
const_globals: Default::default(), const_globals: Default::default(),
statics_to_rauw: RefCell::new(Vec::new()), statics_to_rauw: RefCell::new(Vec::new()),
used_statics: RefCell::new(Vec::new()), used_statics: RefCell::new(Vec::new()),
@ -495,7 +483,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) { pub(crate) fn create_used_variable_impl(&self, name: &'static CStr, values: &[&'ll Value]) {
let section = cstr!("llvm.metadata"); let section = cstr!("llvm.metadata");
let array = self.const_array(self.type_ptr_to(self.type_i8()), values); let array = self.const_array(self.type_ptr(), values);
unsafe { unsafe {
let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr()); let g = llvm::LLVMAddGlobal(self.llmod, self.val_ty(array), name.as_ptr());
@ -673,7 +661,7 @@ impl<'ll> CodegenCx<'ll, '_> {
($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false)) ($($field_ty:expr),*) => (self.type_struct( &[$($field_ty),*], false))
} }
let i8p = self.type_i8p(); let ptr = self.type_ptr();
let void = self.type_void(); let void = self.type_void();
let i1 = self.type_i1(); let i1 = self.type_i1();
let t_i8 = self.type_i8(); let t_i8 = self.type_i8();
@ -687,7 +675,7 @@ impl<'ll> CodegenCx<'ll, '_> {
let t_metadata = self.type_metadata(); let t_metadata = self.type_metadata();
let t_token = self.type_token(); let t_token = self.type_token();
ifn!("llvm.wasm.get.exception", fn(t_token) -> i8p); ifn!("llvm.wasm.get.exception", fn(t_token) -> ptr);
ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32); ifn!("llvm.wasm.get.ehselector", fn(t_token) -> t_i32);
ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32); ifn!("llvm.wasm.trunc.unsigned.i32.f32", fn(t_f32) -> t_i32);
@ -723,7 +711,7 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.trap", fn() -> void); ifn!("llvm.trap", fn() -> void);
ifn!("llvm.debugtrap", fn() -> void); ifn!("llvm.debugtrap", fn() -> void);
ifn!("llvm.frameaddress", fn(t_i32) -> i8p); ifn!("llvm.frameaddress", fn(t_i32) -> ptr);
ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32); ifn!("llvm.powi.f32", fn(t_f32, t_i32) -> t_f32);
ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64); ifn!("llvm.powi.f64", fn(t_f64, t_i32) -> t_f64);
@ -890,43 +878,43 @@ impl<'ll> CodegenCx<'ll, '_> {
ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64); ifn!("llvm.usub.sat.i64", fn(t_i64, t_i64) -> t_i64);
ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128); ifn!("llvm.usub.sat.i128", fn(t_i128, t_i128) -> t_i128);
ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void); ifn!("llvm.lifetime.start.p0i8", fn(t_i64, ptr) -> void);
ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void); ifn!("llvm.lifetime.end.p0i8", fn(t_i64, ptr) -> void);
ifn!("llvm.expect.i1", fn(i1, i1) -> i1); ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32); ifn!("llvm.eh.typeid.for", fn(ptr) -> t_i32);
ifn!("llvm.localescape", fn(...) -> void); ifn!("llvm.localescape", fn(...) -> void);
ifn!("llvm.localrecover", fn(i8p, i8p, t_i32) -> i8p); ifn!("llvm.localrecover", fn(ptr, ptr, t_i32) -> ptr);
ifn!("llvm.x86.seh.recoverfp", fn(i8p, i8p) -> i8p); ifn!("llvm.x86.seh.recoverfp", fn(ptr, ptr) -> ptr);
ifn!("llvm.assume", fn(i1) -> void); ifn!("llvm.assume", fn(i1) -> void);
ifn!("llvm.prefetch", fn(i8p, t_i32, t_i32, t_i32) -> void); ifn!("llvm.prefetch", fn(ptr, t_i32, t_i32, t_i32) -> void);
// This isn't an "LLVM intrinsic", but LLVM's optimization passes // This isn't an "LLVM intrinsic", but LLVM's optimization passes
// recognize it like one and we assume it exists in `core::slice::cmp` // recognize it like one and we assume it exists in `core::slice::cmp`
match self.sess().target.arch.as_ref() { match self.sess().target.arch.as_ref() {
"avr" | "msp430" => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i16), "avr" | "msp430" => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i16),
_ => ifn!("memcmp", fn(i8p, i8p, t_isize) -> t_i32), _ => ifn!("memcmp", fn(ptr, ptr, t_isize) -> t_i32),
} }
// variadic intrinsics // variadic intrinsics
ifn!("llvm.va_start", fn(i8p) -> void); ifn!("llvm.va_start", fn(ptr) -> void);
ifn!("llvm.va_end", fn(i8p) -> void); ifn!("llvm.va_end", fn(ptr) -> void);
ifn!("llvm.va_copy", fn(i8p, i8p) -> void); ifn!("llvm.va_copy", fn(ptr, ptr) -> void);
if self.sess().instrument_coverage() { if self.sess().instrument_coverage() {
ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void); ifn!("llvm.instrprof.increment", fn(ptr, t_i64, t_i32, t_i32) -> void);
} }
ifn!("llvm.type.test", fn(i8p, t_metadata) -> i1); ifn!("llvm.type.test", fn(ptr, t_metadata) -> i1);
ifn!("llvm.type.checked.load", fn(i8p, t_i32, t_metadata) -> mk_struct! {i8p, i1}); ifn!("llvm.type.checked.load", fn(ptr, t_i32, t_metadata) -> mk_struct! {ptr, i1});
if self.sess().opts.debuginfo != DebugInfo::None { if self.sess().opts.debuginfo != DebugInfo::None {
ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void); ifn!("llvm.dbg.declare", fn(t_metadata, t_metadata) -> void);
ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void); ifn!("llvm.dbg.value", fn(t_metadata, t_i64, t_metadata) -> void);
} }
ifn!("llvm.ptrmask", fn(i8p, t_isize) -> i8p); ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr);
None None
} }
@ -940,12 +928,10 @@ impl<'ll> CodegenCx<'ll, '_> {
let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() { let eh_catch_typeinfo = match tcx.lang_items().eh_catch_typeinfo() {
Some(def_id) => self.get_static(def_id), Some(def_id) => self.get_static(def_id),
_ => { _ => {
let ty = self let ty = self.type_struct(&[self.type_ptr(), self.type_ptr()], false);
.type_struct(&[self.type_ptr_to(self.type_isize()), self.type_i8p()], false);
self.declare_global("rust_eh_catch_typeinfo", ty) self.declare_global("rust_eh_catch_typeinfo", ty)
} }
}; };
let eh_catch_typeinfo = self.const_bitcast(eh_catch_typeinfo, self.type_i8p());
self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo)); self.eh_catch_typeinfo.set(Some(eh_catch_typeinfo));
eh_catch_typeinfo eh_catch_typeinfo
} }

View file

@ -17,8 +17,7 @@ use rustc_span::symbol::sym;
/// .debug_gdb_scripts global is referenced, so it isn't removed by the linker. /// .debug_gdb_scripts global is referenced, so it isn't removed by the linker.
pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) { pub fn insert_reference_to_gdb_debug_scripts_section_global(bx: &mut Builder<'_, '_, '_>) {
if needs_gdb_debug_scripts_section(bx) { if needs_gdb_debug_scripts_section(bx) {
let gdb_debug_scripts_section = let gdb_debug_scripts_section = get_or_insert_gdb_debug_scripts_section_global(bx);
bx.const_bitcast(get_or_insert_gdb_debug_scripts_section_global(bx), bx.type_i8p());
// Load just the first byte as that's all that's necessary to force // Load just the first byte as that's all that's necessary to force
// LLVM to keep around the reference to the global. // LLVM to keep around the reference to the global.
let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section); let volatile_load_instruction = bx.volatile_load(bx.type_i8(), gdb_debug_scripts_section);

View file

@ -167,7 +167,6 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
let ptr = args[0].immediate(); let ptr = args[0].immediate();
let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { let load = if let PassMode::Cast(ty, _) = &fn_abi.ret.mode {
let llty = ty.llvm_type(self); let llty = ty.llvm_type(self);
let ptr = self.pointercast(ptr, self.type_ptr_to(llty));
self.volatile_load(llty, ptr) self.volatile_load(llty, ptr)
} else { } else {
self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr) self.volatile_load(self.layout_of(tp_ty).llvm_type(self), ptr)
@ -317,18 +316,12 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
self.const_bool(true) self.const_bool(true)
} else if use_integer_compare { } else if use_integer_compare {
let integer_ty = self.type_ix(layout.size().bits()); let integer_ty = self.type_ix(layout.size().bits());
let ptr_ty = self.type_ptr_to(integer_ty); let a_val = self.load(integer_ty, a, layout.align().abi);
let a_ptr = self.bitcast(a, ptr_ty); let b_val = self.load(integer_ty, b, layout.align().abi);
let a_val = self.load(integer_ty, a_ptr, layout.align().abi);
let b_ptr = self.bitcast(b, ptr_ty);
let b_val = self.load(integer_ty, b_ptr, layout.align().abi);
self.icmp(IntPredicate::IntEQ, a_val, b_val) self.icmp(IntPredicate::IntEQ, a_val, b_val)
} else { } else {
let i8p_ty = self.type_i8p();
let a_ptr = self.bitcast(a, i8p_ty);
let b_ptr = self.bitcast(b, i8p_ty);
let n = self.const_usize(layout.size().bytes()); let n = self.const_usize(layout.size().bytes());
let cmp = self.call_intrinsic("memcmp", &[a_ptr, b_ptr, n]); let cmp = self.call_intrinsic("memcmp", &[a, b, n]);
match self.cx.sess().target.arch.as_ref() { match self.cx.sess().target.arch.as_ref() {
"avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)), "avr" | "msp430" => self.icmp(IntPredicate::IntEQ, cmp, self.const_i16(0)),
_ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)), _ => self.icmp(IntPredicate::IntEQ, cmp, self.const_i32(0)),
@ -383,10 +376,8 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
}; };
if !fn_abi.ret.is_ignore() { if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { if let PassMode::Cast(_, _) = &fn_abi.ret.mode {
let ptr_llty = self.type_ptr_to(ty.llvm_type(self)); self.store(llval, result.llval, result.align);
let ptr = self.pointercast(result.llval, ptr_llty);
self.store(llval, ptr, result.align);
} else { } else {
OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) OperandRef::from_immediate_or_packed_pair(self, llval, result.layout)
.val .val
@ -410,9 +401,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Value) -> Self::Value {
// Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time
// optimization pass replaces calls to this intrinsic with code to test type membership. // optimization pass replaces calls to this intrinsic with code to test type membership.
let i8p_ty = self.type_i8p(); self.call_intrinsic("llvm.type.test", &[pointer, typeid])
let bitcast = self.bitcast(pointer, i8p_ty);
self.call_intrinsic("llvm.type.test", &[bitcast, typeid])
} }
fn type_checked_load( fn type_checked_load(
@ -444,7 +433,7 @@ fn try_intrinsic<'ll>(
dest: &'ll Value, dest: &'ll Value,
) { ) {
if bx.sess().panic_strategy() == PanicStrategy::Abort { if bx.sess().panic_strategy() == PanicStrategy::Abort {
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.call(try_func_ty, None, None, try_func, &[data], None); bx.call(try_func_ty, None, None, try_func, &[data], None);
// Return 0 unconditionally from the intrinsic call; // Return 0 unconditionally from the intrinsic call;
// we can never unwind. // we can never unwind.
@ -544,8 +533,8 @@ fn codegen_msvc_try<'ll>(
// //
// More information can be found in libstd's seh.rs implementation. // More information can be found in libstd's seh.rs implementation.
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let slot = bx.alloca(bx.type_i8p(), ptr_align); let slot = bx.alloca(bx.type_ptr(), ptr_align);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal); bx.switch_to_block(normal);
@ -568,10 +557,10 @@ fn codegen_msvc_try<'ll>(
// //
// When modifying, make sure that the type_name string exactly matches // When modifying, make sure that the type_name string exactly matches
// the one used in library/panic_unwind/src/seh.rs. // the one used in library/panic_unwind/src/seh.rs.
let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_i8p()); let type_info_vtable = bx.declare_global("??_7type_info@@6B@", bx.type_ptr());
let type_name = bx.const_bytes(b"rust_panic\0"); let type_name = bx.const_bytes(b"rust_panic\0");
let type_info = let type_info =
bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_i8p()), type_name], false); bx.const_struct(&[type_info_vtable, bx.const_null(bx.type_ptr()), type_name], false);
let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info)); let tydesc = bx.declare_global("__rust_panic_type_info", bx.val_ty(type_info));
unsafe { unsafe {
llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage); llvm::LLVMRustSetLinkage(tydesc, llvm::Linkage::LinkOnceODRLinkage);
@ -588,15 +577,15 @@ fn codegen_msvc_try<'ll>(
bx.switch_to_block(catchpad_rust); bx.switch_to_block(catchpad_rust);
let flags = bx.const_i32(8); let flags = bx.const_i32(8);
let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]); let funclet = bx.catch_pad(cs, &[tydesc, flags, slot]);
let ptr = bx.load(bx.type_i8p(), slot, ptr_align); let ptr = bx.load(bx.type_ptr(), slot, ptr_align);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught); bx.catch_ret(&funclet, caught);
// The flag value of 64 indicates a "catch-all". // The flag value of 64 indicates a "catch-all".
bx.switch_to_block(catchpad_foreign); bx.switch_to_block(catchpad_foreign);
let flags = bx.const_i32(64); let flags = bx.const_i32(64);
let null = bx.const_null(bx.type_i8p()); let null = bx.const_null(bx.type_ptr());
let funclet = bx.catch_pad(cs, &[null, flags, null]); let funclet = bx.catch_pad(cs, &[null, flags, null]);
bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet)); bx.call(catch_ty, None, None, catch_func, &[data, null], Some(&funclet));
bx.catch_ret(&funclet, caught); bx.catch_ret(&funclet, caught);
@ -655,7 +644,7 @@ fn codegen_wasm_try<'ll>(
// ret i32 1 // ret i32 1
// } // }
// //
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None); bx.invoke(try_func_ty, None, None, try_func, &[data], normal, catchswitch, None);
bx.switch_to_block(normal); bx.switch_to_block(normal);
@ -665,13 +654,13 @@ fn codegen_wasm_try<'ll>(
let cs = bx.catch_switch(None, None, &[catchpad]); let cs = bx.catch_switch(None, None, &[catchpad]);
bx.switch_to_block(catchpad); bx.switch_to_block(catchpad);
let null = bx.const_null(bx.type_i8p()); let null = bx.const_null(bx.type_ptr());
let funclet = bx.catch_pad(cs, &[null]); let funclet = bx.catch_pad(cs, &[null]);
let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]); let ptr = bx.call_intrinsic("llvm.wasm.get.exception", &[funclet.cleanuppad()]);
let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]); let _sel = bx.call_intrinsic("llvm.wasm.get.ehselector", &[funclet.cleanuppad()]);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet)); bx.call(catch_ty, None, None, catch_func, &[data, ptr], Some(&funclet));
bx.catch_ret(&funclet, caught); bx.catch_ret(&funclet, caught);
@ -723,7 +712,7 @@ fn codegen_gnu_try<'ll>(
let try_func = llvm::get_param(bx.llfn(), 0); let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then); bx.switch_to_block(then);
@ -736,12 +725,12 @@ fn codegen_gnu_try<'ll>(
// the landing pad clauses the exception's type had been matched to. // the landing pad clauses the exception's type had been matched to.
// rust_try ignores the selector. // rust_try ignores the selector.
bx.switch_to_block(catch); bx.switch_to_block(catch);
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 1);
let tydesc = bx.const_null(bx.type_i8p()); let tydesc = bx.const_null(bx.type_ptr());
bx.add_clause(vals, tydesc); bx.add_clause(vals, tydesc);
let ptr = bx.extract_value(vals, 0); let ptr = bx.extract_value(vals, 0);
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, ptr], None); bx.call(catch_ty, None, None, catch_func, &[data, ptr], None);
bx.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
@ -787,7 +776,7 @@ fn codegen_emcc_try<'ll>(
let try_func = llvm::get_param(bx.llfn(), 0); let try_func = llvm::get_param(bx.llfn(), 0);
let data = llvm::get_param(bx.llfn(), 1); let data = llvm::get_param(bx.llfn(), 1);
let catch_func = llvm::get_param(bx.llfn(), 2); let catch_func = llvm::get_param(bx.llfn(), 2);
let try_func_ty = bx.type_func(&[bx.type_i8p()], bx.type_void()); let try_func_ty = bx.type_func(&[bx.type_ptr()], bx.type_void());
bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None); bx.invoke(try_func_ty, None, None, try_func, &[data], then, catch, None);
bx.switch_to_block(then); bx.switch_to_block(then);
@ -800,10 +789,10 @@ fn codegen_emcc_try<'ll>(
// the landing pad clauses the exception's type had been matched to. // the landing pad clauses the exception's type had been matched to.
bx.switch_to_block(catch); bx.switch_to_block(catch);
let tydesc = bx.eh_catch_typeinfo(); let tydesc = bx.eh_catch_typeinfo();
let lpad_ty = bx.type_struct(&[bx.type_i8p(), bx.type_i32()], false); let lpad_ty = bx.type_struct(&[bx.type_ptr(), bx.type_i32()], false);
let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2); let vals = bx.landing_pad(lpad_ty, bx.eh_personality(), 2);
bx.add_clause(vals, tydesc); bx.add_clause(vals, tydesc);
bx.add_clause(vals, bx.const_null(bx.type_i8p())); bx.add_clause(vals, bx.const_null(bx.type_ptr()));
let ptr = bx.extract_value(vals, 0); let ptr = bx.extract_value(vals, 0);
let selector = bx.extract_value(vals, 1); let selector = bx.extract_value(vals, 1);
@ -816,7 +805,7 @@ fn codegen_emcc_try<'ll>(
// create an alloca and pass a pointer to that. // create an alloca and pass a pointer to that.
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let i8_align = bx.tcx().data_layout.i8_align.abi; let i8_align = bx.tcx().data_layout.i8_align.abi;
let catch_data_type = bx.type_struct(&[bx.type_i8p(), bx.type_bool()], false); let catch_data_type = bx.type_struct(&[bx.type_ptr(), bx.type_bool()], false);
let catch_data = bx.alloca(catch_data_type, ptr_align); let catch_data = bx.alloca(catch_data_type, ptr_align);
let catch_data_0 = let catch_data_0 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]); bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(0)]);
@ -824,9 +813,8 @@ fn codegen_emcc_try<'ll>(
let catch_data_1 = let catch_data_1 =
bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]); bx.inbounds_gep(catch_data_type, catch_data, &[bx.const_usize(0), bx.const_usize(1)]);
bx.store(is_rust_panic, catch_data_1, i8_align); bx.store(is_rust_panic, catch_data_1, i8_align);
let catch_data = bx.bitcast(catch_data, bx.type_i8p());
let catch_ty = bx.type_func(&[bx.type_i8p(), bx.type_i8p()], bx.type_void()); let catch_ty = bx.type_func(&[bx.type_ptr(), bx.type_ptr()], bx.type_void());
bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None); bx.call(catch_ty, None, None, catch_func, &[data, catch_data], None);
bx.ret(bx.const_i32(1)); bx.ret(bx.const_i32(1));
}); });
@ -967,8 +955,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let place = PlaceRef::alloca(bx, args[0].layout); let place = PlaceRef::alloca(bx, args[0].layout);
args[0].val.store(bx, place); args[0].val.store(bx, place);
let int_ty = bx.type_ix(expected_bytes * 8); let int_ty = bx.type_ix(expected_bytes * 8);
let ptr = bx.pointercast(place.llval, bx.cx.type_ptr_to(int_ty)); bx.load(int_ty, place.llval, Align::ONE)
bx.load(int_ty, ptr, Align::ONE)
} }
_ => return_error!(InvalidMonomorphization::InvalidBitmask { _ => return_error!(InvalidMonomorphization::InvalidBitmask {
span, span,
@ -1217,7 +1204,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE); let ptr = bx.alloca(bx.type_ix(expected_bytes * 8), Align::ONE);
bx.store(ze, ptr, Align::ONE); bx.store(ze, ptr, Align::ONE);
let array_ty = bx.type_array(bx.type_i8(), expected_bytes); let array_ty = bx.type_array(bx.type_i8(), expected_bytes);
let ptr = bx.pointercast(ptr, bx.cx.type_ptr_to(array_ty));
return Ok(bx.load(array_ty, ptr, Align::ONE)); return Ok(bx.load(array_ty, ptr, Align::ONE));
} }
_ => return_error!(InvalidMonomorphization::CannotReturn { _ => return_error!(InvalidMonomorphization::CannotReturn {
@ -1321,50 +1307,34 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
// FIXME: use: // FIXME: use:
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Function.h#L182
// https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81 // https://github.com/llvm-mirror/llvm/blob/master/include/llvm/IR/Intrinsics.h#L81
fn llvm_vector_str( fn llvm_vector_str(bx: &Builder<'_, '_, '_>, elem_ty: Ty<'_>, vec_len: u64) -> String {
elem_ty: Ty<'_>,
vec_len: u64,
no_pointers: usize,
bx: &Builder<'_, '_, '_>,
) -> String {
let p0s: String = "p0".repeat(no_pointers);
match *elem_ty.kind() { match *elem_ty.kind() {
ty::Int(v) => format!( ty::Int(v) => format!(
"v{}{}i{}", "v{}i{}",
vec_len, vec_len,
p0s,
// Normalize to prevent crash if v: IntTy::Isize // Normalize to prevent crash if v: IntTy::Isize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
), ),
ty::Uint(v) => format!( ty::Uint(v) => format!(
"v{}{}i{}", "v{}i{}",
vec_len, vec_len,
p0s,
// Normalize to prevent crash if v: UIntTy::Usize // Normalize to prevent crash if v: UIntTy::Usize
v.normalize(bx.target_spec().pointer_width).bit_width().unwrap() v.normalize(bx.target_spec().pointer_width).bit_width().unwrap()
), ),
ty::Float(v) => format!("v{}{}f{}", vec_len, p0s, v.bit_width()), ty::Float(v) => format!("v{}f{}", vec_len, v.bit_width()),
ty::RawPtr(_) => format!("v{}p0", vec_len),
_ => unreachable!(), _ => unreachable!(),
} }
} }
fn llvm_vector_ty<'ll>( fn llvm_vector_ty<'ll>(cx: &CodegenCx<'ll, '_>, elem_ty: Ty<'_>, vec_len: u64) -> &'ll Type {
cx: &CodegenCx<'ll, '_>, let elem_ty = match *elem_ty.kind() {
elem_ty: Ty<'_>,
vec_len: u64,
mut no_pointers: usize,
) -> &'ll Type {
// FIXME: use cx.layout_of(ty).llvm_type() ?
let mut elem_ty = match *elem_ty.kind() {
ty::Int(v) => cx.type_int_from_ty(v), ty::Int(v) => cx.type_int_from_ty(v),
ty::Uint(v) => cx.type_uint_from_ty(v), ty::Uint(v) => cx.type_uint_from_ty(v),
ty::Float(v) => cx.type_float_from_ty(v), ty::Float(v) => cx.type_float_from_ty(v),
ty::RawPtr(_) => cx.type_ptr(),
_ => unreachable!(), _ => unreachable!(),
}; };
while no_pointers > 0 {
elem_ty = cx.type_ptr_to(elem_ty);
no_pointers -= 1;
}
cx.type_vector(elem_ty, vec_len) cx.type_vector(elem_ty, vec_len)
} }
@ -1419,47 +1389,26 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty } InvalidMonomorphization::ExpectedReturnType { span, name, in_ty, ret_ty }
); );
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer // The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument // to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem => (ptr_count(element_ty1), non_ptr(element_ty1)), require!(
_ => { matches!(
require!( element_ty1.kind(),
false, ty::RawPtr(p) if p.ty == in_elem && p.ty.kind() == element_ty0.kind()
InvalidMonomorphization::ExpectedElementType { ),
span, InvalidMonomorphization::ExpectedElementType {
name, span,
expected_element: element_ty1, name,
second_arg: arg_tys[1], expected_element: element_ty1,
in_elem, second_arg: arg_tys[1],
in_ty, in_elem,
mutability: ExpectedPointerMutability::Not, in_ty,
} mutability: ExpectedPointerMutability::Not,
);
unreachable!();
} }
}; );
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width: // The element type of the third argument must be a signed integer type of any width:
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
@ -1490,12 +1439,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
}; };
// Type of the vector of pointers: // Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements: // Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic = let llvm_intrinsic =
format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}"); format!("llvm.masked.gather.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
@ -1559,50 +1508,28 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
} }
); );
// This counts how many pointers
fn ptr_count(t: Ty<'_>) -> usize {
match t.kind() {
ty::RawPtr(p) => 1 + ptr_count(p.ty),
_ => 0,
}
}
// Non-ptr type
fn non_ptr(t: Ty<'_>) -> Ty<'_> {
match t.kind() {
ty::RawPtr(p) => non_ptr(p.ty),
_ => t,
}
}
// The second argument must be a simd vector with an element type that's a pointer // The second argument must be a simd vector with an element type that's a pointer
// to the element type of the first argument // to the element type of the first argument
let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx());
let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx());
let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx());
let (pointer_count, underlying_ty) = match element_ty1.kind() {
ty::RawPtr(p) if p.ty == in_elem && p.mutbl.is_mut() => { require!(
(ptr_count(element_ty1), non_ptr(element_ty1)) matches!(
element_ty1.kind(),
ty::RawPtr(p)
if p.ty == in_elem && p.mutbl.is_mut() && p.ty.kind() == element_ty0.kind()
),
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
} }
_ => { );
require!(
false,
InvalidMonomorphization::ExpectedElementType {
span,
name,
expected_element: element_ty1,
second_arg: arg_tys[1],
in_elem,
in_ty,
mutability: ExpectedPointerMutability::Mut,
}
);
unreachable!();
}
};
assert!(pointer_count > 0);
assert_eq!(pointer_count - 1, ptr_count(element_ty0));
assert_eq!(underlying_ty, non_ptr(element_ty0));
// The element type of the third argument must be a signed integer type of any width: // The element type of the third argument must be a signed integer type of any width:
match element_ty2.kind() { match element_ty2.kind() {
@ -1634,12 +1561,12 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
let ret_t = bx.type_void(); let ret_t = bx.type_void();
// Type of the vector of pointers: // Type of the vector of pointers:
let llvm_pointer_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count); let llvm_pointer_vec_ty = llvm_vector_ty(bx, element_ty1, in_len);
let llvm_pointer_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count, bx); let llvm_pointer_vec_str = llvm_vector_str(bx, element_ty1, in_len);
// Type of the vector of elements: // Type of the vector of elements:
let llvm_elem_vec_ty = llvm_vector_ty(bx, underlying_ty, in_len, pointer_count - 1); let llvm_elem_vec_ty = llvm_vector_ty(bx, element_ty0, in_len);
let llvm_elem_vec_str = llvm_vector_str(underlying_ty, in_len, pointer_count - 1, bx); let llvm_elem_vec_str = llvm_vector_str(bx, element_ty0, in_len);
let llvm_intrinsic = let llvm_intrinsic =
format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}"); format!("llvm.masked.scatter.{llvm_elem_vec_str}.{llvm_pointer_vec_str}");
@ -1857,11 +1784,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>(
} }
} }
if in_elem == out_elem { return Ok(args[0].immediate());
return Ok(args[0].immediate());
} else {
return Ok(bx.pointercast(args[0].immediate(), llret_ty));
}
} }
if name == sym::simd_expose_addr { if name == sym::simd_expose_addr {

View file

@ -1073,7 +1073,7 @@ extern "C" {
// Operations on array, pointer, and vector types (sequence types) // Operations on array, pointer, and vector types (sequence types)
pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type; pub fn LLVMRustArrayType(ElementType: &Type, ElementCount: u64) -> &Type;
pub fn LLVMPointerType(ElementType: &Type, AddressSpace: c_uint) -> &Type; pub fn LLVMPointerTypeInContext(C: &Context, AddressSpace: c_uint) -> &Type;
pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type; pub fn LLVMVectorType(ElementType: &Type, ElementCount: c_uint) -> &Type;
pub fn LLVMGetElementType(Ty: &Type) -> &Type; pub fn LLVMGetElementType(Ty: &Type) -> &Type;

View file

@ -112,12 +112,6 @@ impl<'ll> CodegenCx<'ll, '_> {
} }
} }
pub(crate) fn type_pointee_for_align(&self, align: Align) -> &'ll Type {
// FIXME(eddyb) We could find a better approximation if ity.align < align.
let ity = Integer::approximate_align(self, align);
self.type_from_integer(ity)
}
/// Return a LLVM type that has at most the required alignment, /// Return a LLVM type that has at most the required alignment,
/// and exactly the required size, as a best-effort padding array. /// and exactly the required size, as a best-effort padding array.
pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type { pub(crate) fn type_padding_filler(&self, size: Size, align: Align) -> &'ll Type {
@ -189,17 +183,12 @@ impl<'ll, 'tcx> BaseTypeMethods<'tcx> for CodegenCx<'ll, 'tcx> {
unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() }
} }
fn type_ptr_to(&self, ty: &'ll Type) -> &'ll Type { fn type_ptr(&self) -> &'ll Type {
assert_ne!( self.type_ptr_ext(AddressSpace::DATA)
self.type_kind(ty),
TypeKind::Function,
"don't call ptr_to on function types, use ptr_to_llvm_type on FnAbi instead or explicitly specify an address space if it makes sense"
);
ty.ptr_to(AddressSpace::DATA)
} }
fn type_ptr_to_ext(&self, ty: &'ll Type, address_space: AddressSpace) -> &'ll Type { fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type {
ty.ptr_to(address_space) unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) }
} }
fn element_type(&self, ty: &'ll Type) -> &'ll Type { fn element_type(&self, ty: &'ll Type) -> &'ll Type {
@ -247,12 +236,8 @@ impl Type {
unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) } unsafe { llvm::LLVMIntTypeInContext(llcx, num_bits as c_uint) }
} }
pub fn i8p_llcx(llcx: &llvm::Context) -> &Type { pub fn ptr_llcx(llcx: &llvm::Context) -> &Type {
Type::i8_llcx(llcx).ptr_to(AddressSpace::DATA) unsafe { llvm::LLVMPointerTypeInContext(llcx, AddressSpace::DATA.0) }
}
fn ptr_to(&self, address_space: AddressSpace) -> &Type {
unsafe { llvm::LLVMPointerType(self, address_space.0) }
} }
} }

View file

@ -23,7 +23,7 @@ fn uncached_llvm_type<'a, 'tcx>(
match layout.abi { match layout.abi {
Abi::Scalar(_) => bug!("handled elsewhere"), Abi::Scalar(_) => bug!("handled elsewhere"),
Abi::Vector { element, count } => { Abi::Vector { element, count } => {
let element = layout.scalar_llvm_type_at(cx, element, Size::ZERO); let element = layout.scalar_llvm_type_at(cx, element);
return cx.type_vector(element, count); return cx.type_vector(element, count);
} }
Abi::ScalarPair(..) => { Abi::ScalarPair(..) => {
@ -179,12 +179,7 @@ pub trait LayoutLlvmExt<'tcx> {
fn is_llvm_scalar_pair(&self) -> bool; fn is_llvm_scalar_pair(&self) -> bool;
fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; fn llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type; fn immediate_llvm_type<'a>(&self, cx: &CodegenCx<'a, 'tcx>) -> &'a Type;
fn scalar_llvm_type_at<'a>( fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type;
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: Scalar,
offset: Size,
) -> &'a Type;
fn scalar_pair_element_llvm_type<'a>( fn scalar_pair_element_llvm_type<'a>(
&self, &self,
cx: &CodegenCx<'a, 'tcx>, cx: &CodegenCx<'a, 'tcx>,
@ -230,16 +225,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return llty; return llty;
} }
let llty = match *self.ty.kind() { let llty = match *self.ty.kind() {
ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { ty::Ref(..) | ty::RawPtr(_) => cx.type_ptr(),
cx.type_ptr_to(cx.layout_of(ty).llvm_type(cx)) ty::Adt(def, _) if def.is_box() => cx.type_ptr(),
}
ty::Adt(def, _) if def.is_box() => {
cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).llvm_type(cx))
}
ty::FnPtr(sig) => { ty::FnPtr(sig) => {
cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty())) cx.fn_ptr_backend_type(cx.fn_abi_of_fn_ptr(sig, ty::List::empty()))
} }
_ => self.scalar_llvm_type_at(cx, scalar, Size::ZERO), _ => self.scalar_llvm_type_at(cx, scalar),
}; };
cx.scalar_lltypes.borrow_mut().insert(self.ty, llty); cx.scalar_lltypes.borrow_mut().insert(self.ty, llty);
return llty; return llty;
@ -300,25 +291,12 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
self.llvm_type(cx) self.llvm_type(cx)
} }
fn scalar_llvm_type_at<'a>( fn scalar_llvm_type_at<'a>(&self, cx: &CodegenCx<'a, 'tcx>, scalar: Scalar) -> &'a Type {
&self,
cx: &CodegenCx<'a, 'tcx>,
scalar: Scalar,
offset: Size,
) -> &'a Type {
match scalar.primitive() { match scalar.primitive() {
Int(i, _) => cx.type_from_integer(i), Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(), F32 => cx.type_f32(),
F64 => cx.type_f64(), F64 => cx.type_f64(),
Pointer(address_space) => { Pointer(address_space) => cx.type_ptr_ext(address_space),
// If we know the alignment, pick something better than i8.
let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
cx.type_pointee_for_align(pointee.align)
} else {
cx.type_i8()
};
cx.type_ptr_to_ext(pointee, address_space)
}
} }
} }
@ -364,8 +342,7 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
return cx.type_i1(); return cx.type_i1();
} }
let offset = if index == 0 { Size::ZERO } else { a.size(cx).align_to(b.align(cx).abi) }; self.scalar_llvm_type_at(cx, scalar)
self.scalar_llvm_type_at(cx, scalar, offset)
} }
fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 { fn llvm_field_index<'a>(&self, cx: &CodegenCx<'a, 'tcx>, index: usize) -> u64 {

View file

@ -5,7 +5,7 @@ use crate::value::Value;
use rustc_codegen_ssa::mir::operand::OperandRef; use rustc_codegen_ssa::mir::operand::OperandRef;
use rustc_codegen_ssa::{ use rustc_codegen_ssa::{
common::IntPredicate, common::IntPredicate,
traits::{BaseTypeMethods, BuilderMethods, ConstMethods, DerivedTypeMethods}, traits::{BaseTypeMethods, BuilderMethods, ConstMethods},
}; };
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf};
use rustc_middle::ty::Ty; use rustc_middle::ty::Ty;
@ -26,24 +26,18 @@ fn round_pointer_up_to_alignment<'ll>(
fn emit_direct_ptr_va_arg<'ll, 'tcx>( fn emit_direct_ptr_va_arg<'ll, 'tcx>(
bx: &mut Builder<'_, 'll, 'tcx>, bx: &mut Builder<'_, 'll, 'tcx>,
list: OperandRef<'tcx, &'ll Value>, list: OperandRef<'tcx, &'ll Value>,
llty: &'ll Type,
size: Size, size: Size,
align: Align, align: Align,
slot_size: Align, slot_size: Align,
allow_higher_align: bool, allow_higher_align: bool,
) -> (&'ll Value, Align) { ) -> (&'ll Value, Align) {
let va_list_ty = bx.type_i8p(); let va_list_ty = bx.type_ptr();
let va_list_ptr_ty = bx.type_ptr_to(va_list_ty); let va_list_addr = list.immediate();
let va_list_addr = if list.layout.llvm_type(bx.cx) != va_list_ptr_ty {
bx.bitcast(list.immediate(), va_list_ptr_ty)
} else {
list.immediate()
};
let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi); let ptr = bx.load(va_list_ty, va_list_addr, bx.tcx().data_layout.pointer_align.abi);
let (addr, addr_align) = if allow_higher_align && align > slot_size { let (addr, addr_align) = if allow_higher_align && align > slot_size {
(round_pointer_up_to_alignment(bx, ptr, align, bx.cx().type_i8p()), align) (round_pointer_up_to_alignment(bx, ptr, align, bx.type_ptr()), align)
} else { } else {
(ptr, slot_size) (ptr, slot_size)
}; };
@ -56,9 +50,9 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>(
if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big {
let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32);
let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]); let adjusted = bx.inbounds_gep(bx.type_i8(), addr, &[adjusted_size]);
(bx.bitcast(adjusted, bx.cx().type_ptr_to(llty)), addr_align) (adjusted, addr_align)
} else { } else {
(bx.bitcast(addr, bx.cx().type_ptr_to(llty)), addr_align) (addr, addr_align)
} }
} }
@ -81,7 +75,7 @@ fn emit_ptr_va_arg<'ll, 'tcx>(
(layout.llvm_type(bx.cx), layout.size, layout.align) (layout.llvm_type(bx.cx), layout.size, layout.align)
}; };
let (addr, addr_align) = let (addr, addr_align) =
emit_direct_ptr_va_arg(bx, list, llty, size, align.abi, slot_size, allow_higher_align); emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align);
if indirect { if indirect {
let tmp_ret = bx.load(llty, addr, addr_align); let tmp_ret = bx.load(llty, addr, addr_align);
bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi)
@ -146,7 +140,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
bx.cond_br(use_stack, on_stack, in_reg); bx.cond_br(use_stack, on_stack, in_reg);
bx.switch_to_block(in_reg); bx.switch_to_block(in_reg);
let top_type = bx.type_i8p(); let top_type = bx.type_ptr();
let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index); let top = bx.struct_gep(va_list_ty, va_list_addr, reg_top_index);
let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi); let top = bx.load(top_type, top, bx.tcx().data_layout.pointer_align.abi);
@ -158,7 +152,6 @@ fn emit_aapcs_va_arg<'ll, 'tcx>(
reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]); reg_addr = bx.gep(bx.type_i8(), reg_addr, &[offset]);
} }
let reg_type = layout.llvm_type(bx); let reg_type = layout.llvm_type(bx);
let reg_addr = bx.bitcast(reg_addr, bx.cx.type_ptr_to(reg_type));
let reg_value = bx.load(reg_type, reg_addr, layout.align.abi); let reg_value = bx.load(reg_type, reg_addr, layout.align.abi);
bx.br(end); bx.br(end);
@ -218,7 +211,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Work out the address of the value in the register save area. // Work out the address of the value in the register save area.
let reg_ptr = let reg_ptr =
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3)); bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 3));
let reg_ptr_v = bx.load(bx.type_i8p(), reg_ptr, bx.tcx().data_layout.pointer_align.abi); let reg_ptr_v = bx.load(bx.type_ptr(), reg_ptr, bx.tcx().data_layout.pointer_align.abi);
let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8)); let scaled_reg_count = bx.mul(reg_count_v, bx.const_u64(8));
let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding)); let reg_off = bx.add(scaled_reg_count, bx.const_u64(reg_save_index * 8 + reg_padding));
let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]); let reg_addr = bx.gep(bx.type_i8(), reg_ptr_v, &[reg_off]);
@ -234,7 +227,7 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Work out the address of the value in the argument overflow area. // Work out the address of the value in the argument overflow area.
let arg_ptr = let arg_ptr =
bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2)); bx.struct_gep(va_list_ty, va_list_addr, va_list_layout.llvm_field_index(bx.cx, 2));
let arg_ptr_v = bx.load(bx.type_i8p(), arg_ptr, bx.tcx().data_layout.pointer_align.abi); let arg_ptr_v = bx.load(bx.type_ptr(), arg_ptr, bx.tcx().data_layout.pointer_align.abi);
let arg_off = bx.const_u64(padding); let arg_off = bx.const_u64(padding);
let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]); let mem_addr = bx.gep(bx.type_i8(), arg_ptr_v, &[arg_off]);
@ -246,14 +239,12 @@ fn emit_s390x_va_arg<'ll, 'tcx>(
// Return the appropriate result. // Return the appropriate result.
bx.switch_to_block(end); bx.switch_to_block(end);
let val_addr = bx.phi(bx.type_i8p(), &[reg_addr, mem_addr], &[in_reg, in_mem]); let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]);
let val_type = layout.llvm_type(bx); let val_type = layout.llvm_type(bx);
let val_addr = if indirect { let val_addr = if indirect {
let ptr_type = bx.cx.type_ptr_to(val_type); bx.load(bx.cx.type_ptr(), val_addr, bx.tcx().data_layout.pointer_align.abi)
let ptr_addr = bx.bitcast(val_addr, bx.cx.type_ptr_to(ptr_type));
bx.load(ptr_type, ptr_addr, bx.tcx().data_layout.pointer_align.abi)
} else { } else {
bx.bitcast(val_addr, bx.cx.type_ptr_to(val_type)) val_addr
}; };
bx.load(val_type, val_addr, layout.align.abi) bx.load(val_type, val_addr, layout.align.abi)
} }

View file

@ -165,50 +165,27 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target)); cx.tcx().vtable_trait_upcasting_coercion_new_vptr_slot((source, target));
if let Some(entry_idx) = vptr_entry_idx { if let Some(entry_idx) = vptr_entry_idx {
let ptr_ty = cx.type_i8p(); let ptr_ty = cx.type_ptr();
let ptr_align = cx.tcx().data_layout.pointer_align.abi; let ptr_align = cx.tcx().data_layout.pointer_align.abi;
let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
let llvtable = bx.pointercast(old_info, bx.type_ptr_to(ptr_ty));
let gep = bx.inbounds_gep( let gep = bx.inbounds_gep(
ptr_ty, ptr_ty,
llvtable, old_info,
&[bx.const_usize(u64::try_from(entry_idx).unwrap())], &[bx.const_usize(u64::try_from(entry_idx).unwrap())],
); );
let new_vptr = bx.load(ptr_ty, gep, ptr_align); let new_vptr = bx.load(ptr_ty, gep, ptr_align);
bx.nonnull_metadata(new_vptr); bx.nonnull_metadata(new_vptr);
// VTable loads are invariant. // VTable loads are invariant.
bx.set_invariant_load(new_vptr); bx.set_invariant_load(new_vptr);
bx.pointercast(new_vptr, vtable_ptr_ty) new_vptr
} else { } else {
old_info old_info
} }
} }
(_, &ty::Dynamic(ref data, _, target_dyn_kind)) => { (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()),
let vtable_ptr_ty = vtable_ptr_ty(cx, target, target_dyn_kind);
cx.const_ptrcast(meth::get_vtable(cx, source, data.principal()), vtable_ptr_ty)
}
_ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target),
} }
} }
// Returns the vtable pointer type of a `dyn` or `dyn*` type
fn vtable_ptr_ty<'tcx, Cx: CodegenMethods<'tcx>>(
cx: &Cx,
target: Ty<'tcx>,
kind: ty::DynKind,
) -> <Cx as BackendTypes>::Type {
cx.scalar_pair_element_backend_type(
cx.layout_of(match kind {
// vtable is the second field of `*mut dyn Trait`
ty::Dyn => Ty::new_mut_ptr(cx.tcx(), target),
// vtable is the second field of `dyn* Trait`
ty::DynStar => target,
}),
1,
true,
)
}
/// Coerces `src` to `dst_ty`. `src_ty` must be a pointer. /// Coerces `src` to `dst_ty`. `src_ty` must be a pointer.
pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx: &mut Bx, bx: &mut Bx,
@ -222,8 +199,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
(&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) (&ty::Ref(_, a, _), &ty::Ref(_, b, _) | &ty::RawPtr(ty::TypeAndMut { ty: b, .. }))
| (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => { | (&ty::RawPtr(ty::TypeAndMut { ty: a, .. }), &ty::RawPtr(ty::TypeAndMut { ty: b, .. })) => {
assert_eq!(bx.cx().type_is_sized(a), old_info.is_none()); assert_eq!(bx.cx().type_is_sized(a), old_info.is_none());
let ptr_ty = bx.cx().type_ptr_to(bx.cx().backend_type(bx.cx().layout_of(b))); (src, unsized_info(bx, a, b, old_info))
(bx.pointercast(src, ptr_ty), unsized_info(bx, a, b, old_info))
} }
(&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => { (&ty::Adt(def_a, _), &ty::Adt(def_b, _)) => {
assert_eq!(def_a, def_b); assert_eq!(def_a, def_b);
@ -248,11 +224,7 @@ pub fn unsize_ptr<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
assert_eq!(result, None); assert_eq!(result, None);
result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info)); result = Some(unsize_ptr(bx, src, src_f.ty, dst_f.ty, old_info));
} }
let (lldata, llextra) = result.unwrap(); result.unwrap()
let lldata_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 0, true);
let llextra_ty = bx.cx().scalar_pair_element_backend_type(dst_layout, 1, true);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
(bx.bitcast(lldata, lldata_ty), bx.bitcast(llextra, llextra_ty))
} }
_ => bug!("unsize_ptr: called on bad types"), _ => bug!("unsize_ptr: called on bad types"),
} }
@ -271,11 +243,9 @@ pub fn cast_to_dyn_star<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)), matches!(dst_ty.kind(), ty::Dynamic(_, _, ty::DynStar)),
"destination type must be a dyn*" "destination type must be a dyn*"
); );
// FIXME(dyn-star): We can remove this when all supported LLVMs use opaque ptrs only.
let unit_ptr = bx.cx().type_ptr_to(bx.cx().type_struct(&[], false));
let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) { let src = match bx.cx().type_kind(bx.cx().backend_type(src_ty_and_layout)) {
TypeKind::Pointer => bx.pointercast(src, unit_ptr), TypeKind::Pointer => src,
TypeKind::Integer => bx.inttoptr(src, unit_ptr), TypeKind::Integer => bx.inttoptr(src, bx.type_ptr()),
// FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr. // FIXME(dyn-star): We probably have to do a bitcast first, then inttoptr.
kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"), kind => bug!("unexpected TypeKind for left-hand side of `dyn*` cast: {kind:?}"),
}; };
@ -398,11 +368,6 @@ pub fn memcpy_ty<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
if flags == MemFlags::empty() if flags == MemFlags::empty()
&& let Some(bty) = bx.cx().scalar_copy_backend_type(layout) && let Some(bty) = bx.cx().scalar_copy_backend_type(layout)
{ {
// I look forward to only supporting opaque pointers
let pty = bx.type_ptr_to(bty);
let src = bx.pointercast(src, pty);
let dst = bx.pointercast(dst, pty);
let temp = bx.load(bty, src, src_align); let temp = bx.load(bty, src, src_align);
bx.store(temp, dst, dst_align); bx.store(temp, dst, dst_align);
} else { } else {
@ -456,7 +421,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
// The entry function is either `int main(void)` or `int main(int argc, char **argv)`, // The entry function is either `int main(void)` or `int main(int argc, char **argv)`,
// depending on whether the target needs `argc` and `argv` to be passed in. // depending on whether the target needs `argc` and `argv` to be passed in.
let llfty = if cx.sess().target.main_needs_argc_argv { let llfty = if cx.sess().target.main_needs_argc_argv {
cx.type_func(&[cx.type_int(), cx.type_ptr_to(cx.type_i8p())], cx.type_int()) cx.type_func(&[cx.type_int(), cx.type_ptr()], cx.type_int())
} else { } else {
cx.type_func(&[], cx.type_int()) cx.type_func(&[], cx.type_int())
}; };
@ -490,7 +455,7 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
bx.insert_reference_to_gdb_debug_scripts_section_global(); bx.insert_reference_to_gdb_debug_scripts_section_global();
let isize_ty = cx.type_isize(); let isize_ty = cx.type_isize();
let i8pp_ty = cx.type_ptr_to(cx.type_i8p()); let ptr_ty = cx.type_ptr();
let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx); let (arg_argc, arg_argv) = get_argc_argv(cx, &mut bx);
let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type { let (start_fn, start_ty, args) = if let EntryFnType::Main { sigpipe } = entry_type {
@ -509,12 +474,11 @@ pub fn maybe_create_entry_wrapper<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
let i8_ty = cx.type_i8(); let i8_ty = cx.type_i8();
let arg_sigpipe = bx.const_u8(sigpipe); let arg_sigpipe = bx.const_u8(sigpipe);
let start_ty = let start_ty = cx.type_func(&[cx.val_ty(rust_main), isize_ty, ptr_ty, i8_ty], isize_ty);
cx.type_func(&[cx.val_ty(rust_main), isize_ty, i8pp_ty, i8_ty], isize_ty);
(start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe]) (start_fn, start_ty, vec![rust_main, arg_argc, arg_argv, arg_sigpipe])
} else { } else {
debug!("using user-defined start fn"); debug!("using user-defined start fn");
let start_ty = cx.type_func(&[isize_ty, i8pp_ty], isize_ty); let start_ty = cx.type_func(&[isize_ty, ptr_ty], isize_ty);
(rust_main, start_ty, vec![arg_argc, arg_argv]) (rust_main, start_ty, vec![arg_argc, arg_argv])
}; };
@ -541,7 +505,7 @@ fn get_argc_argv<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
} else { } else {
// The Rust start function doesn't need `argc` and `argv`, so just pass zeros. // The Rust start function doesn't need `argc` and `argv`, so just pass zeros.
let arg_argc = bx.const_int(cx.type_int(), 0); let arg_argc = bx.const_int(cx.type_int(), 0);
let arg_argv = bx.const_null(cx.type_ptr_to(cx.type_i8p())); let arg_argv = bx.const_null(cx.type_ptr());
(arg_argc, arg_argv) (arg_argc, arg_argv)
} }
} }

View file

@ -23,7 +23,6 @@ impl<'a, 'tcx> VirtualIndex {
// Load the data pointer from the object. // Load the data pointer from the object.
debug!("get_fn({llvtable:?}, {ty:?}, {self:?})"); debug!("get_fn({llvtable:?}, {ty:?}, {self:?})");
let llty = bx.fn_ptr_backend_type(fn_abi); let llty = bx.fn_ptr_backend_type(fn_abi);
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
if bx.cx().sess().opts.unstable_opts.virtual_function_elimination if bx.cx().sess().opts.unstable_opts.virtual_function_elimination
&& bx.cx().sess().lto() == Lto::Fat && bx.cx().sess().lto() == Lto::Fat
@ -33,7 +32,7 @@ impl<'a, 'tcx> VirtualIndex {
.unwrap(); .unwrap();
let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes(); let vtable_byte_offset = self.0 * bx.data_layout().pointer_size.bytes();
let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid); let func = bx.type_checked_load(llvtable, vtable_byte_offset, typeid);
bx.pointercast(func, llty) func
} else { } else {
let ptr_align = bx.tcx().data_layout.pointer_align.abi; let ptr_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
@ -54,7 +53,6 @@ impl<'a, 'tcx> VirtualIndex {
debug!("get_int({:?}, {:?})", llvtable, self); debug!("get_int({:?}, {:?})", llvtable, self);
let llty = bx.type_isize(); let llty = bx.type_isize();
let llvtable = bx.pointercast(llvtable, bx.type_ptr_to(llty));
let usize_align = bx.tcx().data_layout.pointer_align.abi; let usize_align = bx.tcx().data_layout.pointer_align.abi;
let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]); let gep = bx.inbounds_gep(llty, llvtable, &[bx.const_usize(self.0)]);
let ptr = bx.load(llty, gep, usize_align); let ptr = bx.load(llty, gep, usize_align);

View file

@ -439,8 +439,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"), ZeroSized => bug!("ZST return value shouldn't be in PassMode::Cast"),
}; };
let ty = bx.cast_backend_type(cast_ty); let ty = bx.cast_backend_type(cast_ty);
let addr = bx.pointercast(llslot, bx.type_ptr_to(ty)); bx.load(ty, llslot, self.fn_abi.ret.layout.align.abi)
bx.load(ty, addr, self.fn_abi.ret.layout.align.abi)
} }
}; };
bx.ret(llval); bx.ret(llval);
@ -852,9 +851,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
Some(intrinsic) => { Some(intrinsic) => {
let dest = match ret_dest { let dest = match ret_dest {
_ if fn_abi.ret.is_indirect() => llargs[0], _ if fn_abi.ret.is_indirect() => llargs[0],
ReturnDest::Nothing => { ReturnDest::Nothing => bx.const_undef(bx.type_ptr()),
bx.const_undef(bx.type_ptr_to(bx.arg_memory_ty(&fn_abi.ret)))
}
ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval, ReturnDest::IndirectOperand(dst, _) | ReturnDest::Store(dst) => dst.llval,
ReturnDest::DirectOperand(_) => { ReturnDest::DirectOperand(_) => {
bug!("Cannot use direct operand with an intrinsic call") bug!("Cannot use direct operand with an intrinsic call")
@ -1424,8 +1421,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Have to load the argument, maybe while casting it. // Have to load the argument, maybe while casting it.
if let PassMode::Cast(ty, _) = &arg.mode { if let PassMode::Cast(ty, _) = &arg.mode {
let llty = bx.cast_backend_type(ty); let llty = bx.cast_backend_type(ty);
let addr = bx.pointercast(llval, bx.type_ptr_to(llty)); llval = bx.load(llty, llval, align.min(arg.layout.align.abi));
llval = bx.load(llty, addr, align.min(arg.layout.align.abi));
} else { } else {
// We can't use `PlaceRef::load` here because the argument // We can't use `PlaceRef::load` here because the argument
// may have a type we don't treat as immediate, but the ABI // may have a type we don't treat as immediate, but the ABI
@ -1630,7 +1626,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// represents that this is a catch-all block. // represents that this is a catch-all block.
bx = Bx::build(self.cx, cp_llbb); bx = Bx::build(self.cx, cp_llbb);
let null = let null =
bx.const_null(bx.type_i8p_ext(bx.cx().data_layout().instruction_address_space)); bx.const_null(bx.type_ptr_ext(bx.cx().data_layout().instruction_address_space));
let sixty_four = bx.const_i32(64); let sixty_four = bx.const_i32(64);
funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null])); funclet = Some(bx.catch_pad(cs, &[null, sixty_four, null]));
} else { } else {

View file

@ -270,7 +270,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
sym::const_allocate => { sym::const_allocate => {
// returns a null pointer at runtime. // returns a null pointer at runtime.
bx.const_null(bx.type_i8p()) bx.const_null(bx.type_ptr())
} }
sym::const_deallocate => { sym::const_deallocate => {
@ -310,14 +310,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = fn_args.type_at(0); let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let weak = instruction == "cxchgweak"; let weak = instruction == "cxchgweak";
let mut dst = args[0].immediate(); let dst = args[0].immediate();
let mut cmp = args[1].immediate(); let mut cmp = args[1].immediate();
let mut src = args[2].immediate(); let mut src = args[2].immediate();
if ty.is_unsafe_ptr() { if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers, // Some platforms do not support atomic operations on pointers,
// so we cast to integer first. // so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
dst = bx.pointercast(dst, ptr_llty);
cmp = bx.ptrtoint(cmp, bx.type_isize()); cmp = bx.ptrtoint(cmp, bx.type_isize());
src = bx.ptrtoint(src, bx.type_isize()); src = bx.ptrtoint(src, bx.type_isize());
} }
@ -342,13 +340,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let layout = bx.layout_of(ty); let layout = bx.layout_of(ty);
let size = layout.size; let size = layout.size;
let mut source = args[0].immediate(); let source = args[0].immediate();
if ty.is_unsafe_ptr() { if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers, // Some platforms do not support atomic operations on pointers,
// so we cast to integer first... // so we cast to integer first...
let llty = bx.type_isize(); let llty = bx.type_isize();
let ptr_llty = bx.type_ptr_to(llty);
source = bx.pointercast(source, ptr_llty);
let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size); let result = bx.atomic_load(llty, source, parse_ordering(bx, ordering), size);
// ... and then cast the result back to a pointer // ... and then cast the result back to a pointer
bx.inttoptr(result, bx.backend_type(layout)) bx.inttoptr(result, bx.backend_type(layout))
@ -365,12 +361,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let size = bx.layout_of(ty).size; let size = bx.layout_of(ty).size;
let mut val = args[1].immediate(); let mut val = args[1].immediate();
let mut ptr = args[0].immediate(); let ptr = args[0].immediate();
if ty.is_unsafe_ptr() { if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers, // Some platforms do not support atomic operations on pointers,
// so we cast to integer first. // so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize()); val = bx.ptrtoint(val, bx.type_isize());
} }
bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size); bx.atomic_store(val, ptr, parse_ordering(bx, ordering), size);
@ -409,13 +403,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let ty = fn_args.type_at(0); let ty = fn_args.type_at(0);
if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() { if int_type_width_signed(ty, bx.tcx()).is_some() || ty.is_unsafe_ptr() {
let mut ptr = args[0].immediate(); let ptr = args[0].immediate();
let mut val = args[1].immediate(); let mut val = args[1].immediate();
if ty.is_unsafe_ptr() { if ty.is_unsafe_ptr() {
// Some platforms do not support atomic operations on pointers, // Some platforms do not support atomic operations on pointers,
// so we cast to integer first. // so we cast to integer first.
let ptr_llty = bx.type_ptr_to(bx.type_isize());
ptr = bx.pointercast(ptr, ptr_llty);
val = bx.ptrtoint(val, bx.type_isize()); val = bx.ptrtoint(val, bx.type_isize());
} }
bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering)) bx.atomic_rmw(atom_op, ptr, val, parse_ordering(bx, ordering))
@ -470,10 +462,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
}; };
if !fn_abi.ret.is_ignore() { if !fn_abi.ret.is_ignore() {
if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { if let PassMode::Cast(..) = &fn_abi.ret.mode {
let ptr_llty = bx.type_ptr_to(bx.cast_backend_type(ty)); bx.store(llval, result.llval, result.align);
let ptr = bx.pointercast(result.llval, ptr_llty);
bx.store(llval, ptr, result.align);
} else { } else {
OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout)
.val .val

View file

@ -2,7 +2,6 @@ use super::place::PlaceRef;
use super::{FunctionCx, LocalRef}; use super::{FunctionCx, LocalRef};
use crate::base; use crate::base;
use crate::common::TypeKind;
use crate::glue; use crate::glue;
use crate::traits::*; use crate::traits::*;
use crate::MemFlags; use crate::MemFlags;
@ -132,7 +131,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) -> Self { ) -> Self {
let alloc_align = alloc.inner().align; let alloc_align = alloc.inner().align;
assert_eq!(alloc_align, layout.align.abi); assert_eq!(alloc_align, layout.align.abi);
let ty = bx.type_ptr_to(bx.cx().backend_type(layout));
let read_scalar = |start, size, s: abi::Scalar, ty| { let read_scalar = |start, size, s: abi::Scalar, ty| {
let val = alloc let val = alloc
@ -156,7 +154,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => { Abi::Scalar(s @ abi::Scalar::Initialized { .. }) => {
let size = s.size(bx); let size = s.size(bx);
assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size");
let val = read_scalar(Size::ZERO, size, s, ty); let val = read_scalar(Size::ZERO, size, s, bx.type_ptr());
OperandRef { val: OperandValue::Immediate(val), layout } OperandRef { val: OperandValue::Immediate(val), layout }
} }
Abi::ScalarPair( Abi::ScalarPair(
@ -187,7 +185,6 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
let base_addr = bx.static_addr_of(init, alloc_align, None); let base_addr = bx.static_addr_of(init, alloc_align, None);
let llval = bx.const_ptr_byte_offset(base_addr, offset); let llval = bx.const_ptr_byte_offset(base_addr, offset);
let llval = bx.const_bitcast(llval, ty);
bx.load_operand(PlaceRef::new_sized(llval, layout)) bx.load_operand(PlaceRef::new_sized(llval, layout))
} }
} }
@ -314,38 +311,22 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
) => { ) => {
// Bools in union fields needs to be truncated. // Bools in union fields needs to be truncated.
*llval = bx.to_immediate(*llval, field); *llval = bx.to_immediate(*llval, field);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let ty = bx.cx().immediate_backend_type(field);
if bx.type_kind(ty) == TypeKind::Pointer {
*llval = bx.pointercast(*llval, ty);
}
} }
(OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => { (OperandValue::Pair(a, b), Abi::ScalarPair(a_abi, b_abi)) => {
// Bools in union fields needs to be truncated. // Bools in union fields needs to be truncated.
*a = bx.to_immediate_scalar(*a, a_abi); *a = bx.to_immediate_scalar(*a, a_abi);
*b = bx.to_immediate_scalar(*b, b_abi); *b = bx.to_immediate_scalar(*b, b_abi);
// HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
let a_ty = bx.cx().scalar_pair_element_backend_type(field, 0, true);
let b_ty = bx.cx().scalar_pair_element_backend_type(field, 1, true);
if bx.type_kind(a_ty) == TypeKind::Pointer {
*a = bx.pointercast(*a, a_ty);
}
if bx.type_kind(b_ty) == TypeKind::Pointer {
*b = bx.pointercast(*b, b_ty);
}
} }
// Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]); // Newtype vector of array, e.g. #[repr(simd)] struct S([i32; 4]);
(OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => { (OperandValue::Immediate(llval), Abi::Aggregate { sized: true }) => {
assert!(matches!(self.layout.abi, Abi::Vector { .. })); assert!(matches!(self.layout.abi, Abi::Vector { .. }));
let llty = bx.cx().backend_type(self.layout);
let llfield_ty = bx.cx().backend_type(field); let llfield_ty = bx.cx().backend_type(field);
// Can't bitcast an aggregate, so round trip through memory. // Can't bitcast an aggregate, so round trip through memory.
let lltemp = bx.alloca(llfield_ty, field.align.abi); let llptr = bx.alloca(llfield_ty, field.align.abi);
let llptr = bx.pointercast(lltemp, bx.cx().type_ptr_to(llty));
bx.store(*llval, llptr, field.align.abi); bx.store(*llval, llptr, field.align.abi);
*llval = bx.load(llfield_ty, lltemp, field.align.abi); *llval = bx.load(llfield_ty, llptr, field.align.abi);
} }
(OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => { (OperandValue::Immediate(_), Abi::Uninhabited | Abi::Aggregate { sized: false }) => {
bug!() bug!()
@ -380,9 +361,8 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true); let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1)) OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else { } else {
let bty = bx.cx().backend_type(layout); let ptr = bx.cx().type_ptr();
let ptr_bty = bx.cx().type_ptr_to(bty); OperandValue::Ref(bx.const_poison(ptr), None, layout.align.abi)
OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
} }
} }
@ -434,8 +414,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
if flags.contains(MemFlags::NONTEMPORAL) { if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy. // HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = bx.backend_type(dest.layout); let ty = bx.backend_type(dest.layout);
let ptr = bx.pointercast(r, bx.type_ptr_to(ty)); let val = bx.load(ty, r, source_align);
let val = bx.load(ty, ptr, source_align);
bx.store_with_flags(val, dest.llval, dest.align, flags); bx.store_with_flags(val, dest.llval, dest.align, flags);
return; return;
} }

View file

@ -115,8 +115,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
} }
Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => { Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
// ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer. // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); bx.gep(bx.cx().type_i8(), self.llval, &[bx.const_usize(offset.bytes())])
bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
} }
Abi::Scalar(_) | Abi::ScalarPair(..) => { Abi::Scalar(_) | Abi::ScalarPair(..) => {
// All fields of Scalar and ScalarPair layouts must have been handled by this point. // All fields of Scalar and ScalarPair layouts must have been handled by this point.
@ -133,8 +132,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
} }
}; };
PlaceRef { PlaceRef {
// HACK(eddyb): have to bitcast pointers until LLVM removes pointee types. llval,
llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None }, llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
layout: field, layout: field,
align: effective_field_align, align: effective_field_align,
@ -194,20 +192,10 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
debug!("struct_field_ptr: DST field offset: {:?}", offset); debug!("struct_field_ptr: DST field offset: {:?}", offset);
// Cast and adjust pointer. // Adjust pointer.
let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p()); let ptr = bx.gep(bx.cx().type_i8(), self.llval, &[offset]);
let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
// Finally, cast back to the type expected. PlaceRef { llval: ptr, llextra: self.llextra, layout: field, align: effective_field_align }
let ll_fty = bx.cx().backend_type(field);
debug!("struct_field_ptr: Field type is {:?}", ll_fty);
PlaceRef {
llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
llextra: self.llextra,
layout: field,
align: effective_field_align,
}
} }
/// Obtain the actual discriminant of a value. /// Obtain the actual discriminant of a value.
@ -416,11 +404,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self { ) -> Self {
let mut downcast = *self; let mut downcast = *self;
downcast.layout = self.layout.for_variant(bx.cx(), variant_index); downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
// Cast to the appropriate variant struct type.
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast downcast
} }
@ -431,11 +414,6 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
) -> Self { ) -> Self {
let mut downcast = *self; let mut downcast = *self;
downcast.layout = bx.cx().layout_of(ty); downcast.layout = bx.cx().layout_of(ty);
// Cast to the appropriate type.
let variant_ty = bx.cx().backend_type(downcast.layout);
downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
downcast downcast
} }
@ -515,13 +493,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
)); ));
} }
// Cast the place pointer type to the new
// array or slice type (`*[%_; new_len]`).
subslice.llval = bx.pointercast(
subslice.llval,
bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
);
subslice subslice
} }
mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v), mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),

View file

@ -182,9 +182,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Immediate(..) | OperandValue::Pair(..) => { OperandValue::Immediate(..) | OperandValue::Pair(..) => {
// When we have immediate(s), the alignment of the source is irrelevant, // When we have immediate(s), the alignment of the source is irrelevant,
// so we can store them using the destination's alignment. // so we can store them using the destination's alignment.
let llty = bx.backend_type(src.layout); src.val.store(bx, PlaceRef::new_sized_aligned(dst.llval, src.layout, dst.align));
let cast_ptr = bx.pointercast(dst.llval, bx.type_ptr_to(llty));
src.val.store(bx, PlaceRef::new_sized_aligned(cast_ptr, src.layout, dst.align));
} }
} }
} }
@ -222,9 +220,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
OperandValue::Ref(ptr, meta, align) => { OperandValue::Ref(ptr, meta, align) => {
debug_assert_eq!(meta, None); debug_assert_eq!(meta, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref)); debug_assert!(matches!(operand_kind, OperandValueKind::Ref));
let cast_bty = bx.backend_type(cast); let fake_place = PlaceRef::new_sized_aligned(ptr, cast, align);
let cast_ptr = bx.pointercast(ptr, bx.type_ptr_to(cast_bty));
let fake_place = PlaceRef::new_sized_aligned(cast_ptr, cast, align);
Some(bx.load_operand(fake_place).val) Some(bx.load_operand(fake_place).val)
} }
OperandValue::ZeroSized => { OperandValue::ZeroSized => {
@ -479,18 +475,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
{ {
if let OperandValue::Pair(data_ptr, meta) = operand.val { if let OperandValue::Pair(data_ptr, meta) = operand.val {
if bx.cx().is_backend_scalar_pair(cast) { if bx.cx().is_backend_scalar_pair(cast) {
let data_cast = bx.pointercast( OperandValue::Pair(data_ptr, meta)
data_ptr,
bx.cx().scalar_pair_element_backend_type(cast, 0, true),
);
OperandValue::Pair(data_cast, meta)
} else { } else {
// cast to thin-ptr // Cast of fat-ptr to thin-ptr is an extraction of data-ptr.
// Cast of fat-ptr to thin-ptr is an extraction of data-ptr and OperandValue::Immediate(data_ptr)
// pointer-cast of that pointer to desired pointer type.
let llcast_ty = bx.cx().immediate_backend_type(cast);
let llval = bx.pointercast(data_ptr, llcast_ty);
OperandValue::Immediate(llval)
} }
} else { } else {
bug!("unexpected non-pair operand"); bug!("unexpected non-pair operand");
@ -735,13 +723,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
mir::Rvalue::ShallowInitBox(ref operand, content_ty) => { mir::Rvalue::ShallowInitBox(ref operand, content_ty) => {
let operand = self.codegen_operand(bx, operand); let operand = self.codegen_operand(bx, operand);
let lloperand = operand.immediate(); let val = operand.immediate();
let content_ty = self.monomorphize(content_ty); let content_ty = self.monomorphize(content_ty);
let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty)); let box_layout = bx.cx().layout_of(Ty::new_box(bx.tcx(), content_ty));
let llty_ptr = bx.cx().backend_type(box_layout);
let val = bx.pointercast(lloperand, llty_ptr);
OperandRef { val: OperandValue::Immediate(val), layout: box_layout } OperandRef { val: OperandValue::Immediate(val), layout: box_layout }
} }
} }

View file

@ -36,7 +36,6 @@ pub trait ConstMethods<'tcx>: BackendTypes {
fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value;
fn const_ptrcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value; fn const_bitcast(&self, val: Self::Value, ty: Self::Type) -> Self::Value;
fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value; fn const_ptr_byte_offset(&self, val: Self::Value, offset: abi::Size) -> Self::Value;
} }

View file

@ -26,8 +26,8 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type; fn type_func(&self, args: &[Self::Type], ret: Self::Type) -> Self::Type;
fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type; fn type_struct(&self, els: &[Self::Type], packed: bool) -> Self::Type;
fn type_kind(&self, ty: Self::Type) -> TypeKind; fn type_kind(&self, ty: Self::Type) -> TypeKind;
fn type_ptr_to(&self, ty: Self::Type) -> Self::Type; fn type_ptr(&self) -> Self::Type;
fn type_ptr_to_ext(&self, ty: Self::Type, address_space: AddressSpace) -> Self::Type; fn type_ptr_ext(&self, address_space: AddressSpace) -> Self::Type;
fn element_type(&self, ty: Self::Type) -> Self::Type; fn element_type(&self, ty: Self::Type) -> Self::Type;
/// Returns the number of elements in `self` if it is a LLVM vector type. /// Returns the number of elements in `self` if it is a LLVM vector type.
@ -42,14 +42,6 @@ pub trait BaseTypeMethods<'tcx>: Backend<'tcx> {
} }
pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> { pub trait DerivedTypeMethods<'tcx>: BaseTypeMethods<'tcx> + MiscMethods<'tcx> {
fn type_i8p(&self) -> Self::Type {
self.type_i8p_ext(AddressSpace::DATA)
}
fn type_i8p_ext(&self, address_space: AddressSpace) -> Self::Type {
self.type_ptr_to_ext(self.type_i8(), address_space)
}
fn type_int(&self) -> Self::Type { fn type_int(&self) -> Self::Type {
match &self.sess().target.c_int_width[..] { match &self.sess().target.c_int_width[..] {
"16" => self.type_i16(), "16" => self.type_i16(),

View file

@ -23,7 +23,7 @@ extern "platform-intrinsic" {
#[no_mangle] #[no_mangle]
pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>, pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>,
values: Vec2<f32>) -> Vec2<f32> { values: Vec2<f32>) -> Vec2<f32> {
// CHECK: call <2 x float> @llvm.masked.gather.v2f32.{{.+}}(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}}) // CHECK: call <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x float> {{.*}})
simd_gather(values, pointers, mask) simd_gather(values, pointers, mask)
} }
@ -31,6 +31,6 @@ pub unsafe fn gather_f32x2(pointers: Vec2<*const f32>, mask: Vec2<i32>,
#[no_mangle] #[no_mangle]
pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2<i32>, pub unsafe fn gather_pf32x2(pointers: Vec2<*const *const f32>, mask: Vec2<i32>,
values: Vec2<*const f32>) -> Vec2<*const f32> { values: Vec2<*const f32>) -> Vec2<*const f32> {
// CHECK: call <2 x ptr> @llvm.masked.gather.{{.+}}(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}}) // CHECK: call <2 x ptr> @llvm.masked.gather.v2p0.v2p0(<2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}, <2 x ptr> {{.*}})
simd_gather(values, pointers, mask) simd_gather(values, pointers, mask)
} }

View file

@ -23,7 +23,7 @@ extern "platform-intrinsic" {
#[no_mangle] #[no_mangle]
pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>, pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>,
values: Vec2<f32>) { values: Vec2<f32>) {
// CHECK: call void @llvm.masked.scatter.v2f32.v2p0{{.*}}(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) // CHECK: call void @llvm.masked.scatter.v2f32.v2p0(<2 x float> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
simd_scatter(values, pointers, mask) simd_scatter(values, pointers, mask)
} }
@ -32,6 +32,6 @@ pub unsafe fn scatter_f32x2(pointers: Vec2<*mut f32>, mask: Vec2<i32>,
#[no_mangle] #[no_mangle]
pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2<i32>, pub unsafe fn scatter_pf32x2(pointers: Vec2<*mut *const f32>, mask: Vec2<i32>,
values: Vec2<*const f32>) { values: Vec2<*const f32>) {
// CHECK: call void @llvm.masked.scatter.v2p0{{.*}}.v2p0{{.*}}(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}}) // CHECK: call void @llvm.masked.scatter.v2p0.v2p0(<2 x ptr> {{.*}}, <2 x ptr> {{.*}}, i32 {{.*}}, <2 x i1> {{.*}})
simd_scatter(values, pointers, mask) simd_scatter(values, pointers, mask)
} }

View file

@ -1,23 +0,0 @@
// run-pass
// compile-flags: -Copt-level=0 -Cllvm-args=-opaque-pointers=0
// (the ability to disable opaque pointers has been removed in LLVM 17)
// ignore-llvm-version: 17 - 99
// This test can be removed once non-opaque pointers are gone from LLVM, maybe.
#![feature(dyn_star, pointer_like_trait)]
#![allow(incomplete_features)]
use std::fmt::Debug;
use std::marker::PointerLike;
fn make_dyn_star<'a>(t: impl PointerLike + Debug + 'a) -> dyn* Debug + 'a {
t as _
}
fn main() {
println!("{:?}", make_dyn_star(Box::new(1i32)));
println!("{:?}", make_dyn_star(2usize));
println!("{:?}", make_dyn_star((3usize,)));
}

View file

@ -1,4 +1,5 @@
error: values of the type `[&usize; usize::MAX]` are too big for the current architecture error: values of the type `[&usize; usize::MAX]` are too big for the current architecture
--> $SRC_DIR/alloc/src/boxed.rs:LL:COL
error: aborting due to previous error error: aborting due to previous error