abi: add AddressSpace field to Primitive::Pointer

...and remove it from `PointeeInfo`, which isn't meant for this.

There are still various places (marked with FIXMEs) that assume all pointers
have the same size and alignment. Fixing this requires parsing non-default
address spaces in the data layout string, which will be done in a followup.
This commit is contained in:
Erik Desjardins 2023-01-22 23:03:58 -05:00
parent 96f8f99589
commit 009192b01b
26 changed files with 224 additions and 179 deletions

View file

@ -267,6 +267,9 @@ impl TargetDataLayout {
["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?, ["a", ref a @ ..] => dl.aggregate_align = align(a, "a")?,
["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?, ["f32", ref a @ ..] => dl.f32_align = align(a, "f32")?,
["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?, ["f64", ref a @ ..] => dl.f64_align = align(a, "f64")?,
// FIXME(erikdesjardins): we should be parsing nonzero address spaces
// this will require replacing TargetDataLayout::{pointer_size,pointer_align}
// with e.g. `fn pointer_size_in(AddressSpace)`
[p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => { [p @ "p", s, ref a @ ..] | [p @ "p0", s, ref a @ ..] => {
dl.pointer_size = size(s, p)?; dl.pointer_size = size(s, p)?;
dl.pointer_align = align(a, p)?; dl.pointer_align = align(a, p)?;
@ -861,7 +864,7 @@ pub enum Primitive {
Int(Integer, bool), Int(Integer, bool),
F32, F32,
F64, F64,
Pointer, Pointer(AddressSpace),
} }
impl Primitive { impl Primitive {
@ -872,7 +875,10 @@ impl Primitive {
Int(i, _) => i.size(), Int(i, _) => i.size(),
F32 => Size::from_bits(32), F32 => Size::from_bits(32),
F64 => Size::from_bits(64), F64 => Size::from_bits(64),
Pointer => dl.pointer_size, // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
// different address spaces can have different sizes
// (but TargetDataLayout doesn't currently parse that part of the DL string)
Pointer(_) => dl.pointer_size,
} }
} }
@ -883,14 +889,12 @@ impl Primitive {
Int(i, _) => i.align(dl), Int(i, _) => i.align(dl),
F32 => dl.f32_align, F32 => dl.f32_align,
F64 => dl.f64_align, F64 => dl.f64_align,
Pointer => dl.pointer_align, // FIXME(erikdesjardins): ignoring address space is technically wrong, pointers in
// different address spaces can have different alignments
// (but TargetDataLayout doesn't currently parse that part of the DL string)
Pointer(_) => dl.pointer_align,
} }
} }
#[inline]
pub fn is_ptr(self) -> bool {
matches!(self, Pointer)
}
} }
/// Inclusive wrap-around range of valid values, that is, if /// Inclusive wrap-around range of valid values, that is, if
@ -1176,7 +1180,8 @@ impl FieldsShape {
/// An identifier that specifies the address space that some operation /// An identifier that specifies the address space that some operation
/// should operate on. Special address spaces have an effect on code generation, /// should operate on. Special address spaces have an effect on code generation,
/// depending on the target and the address spaces it implements. /// depending on the target and the address spaces it implements.
#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[cfg_attr(feature = "nightly", derive(HashStable_Generic))]
pub struct AddressSpace(pub u32); pub struct AddressSpace(pub u32);
impl AddressSpace { impl AddressSpace {
@ -1456,7 +1461,6 @@ pub struct PointeeInfo {
pub size: Size, pub size: Size,
pub align: Align, pub align: Align,
pub safe: Option<PointerKind>, pub safe: Option<PointerKind>,
pub address_space: AddressSpace,
} }
/// Used in `might_permit_raw_init` to indicate the kind of initialisation /// Used in `might_permit_raw_init` to indicate the kind of initialisation

View file

@ -35,7 +35,8 @@ pub(crate) fn scalar_to_clif_type(tcx: TyCtxt<'_>, scalar: Scalar) -> Type {
}, },
Primitive::F32 => types::F32, Primitive::F32 => types::F32,
Primitive::F64 => types::F64, Primitive::F64 => types::F64,
Primitive::Pointer => pointer_ty(tcx), // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
Primitive::Pointer(_) => pointer_ty(tcx),
} }
} }

View file

@ -709,7 +709,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> {
bx.range_metadata(load, vr); bx.range_metadata(load, vr);
} }
} }
abi::Pointer if vr.start < vr.end && !vr.contains(0) => { abi::Pointer(_) if vr.start < vr.end && !vr.contains(0) => {
bx.nonnull_metadata(load); bx.nonnull_metadata(load);
} }
_ => {} _ => {}

View file

@ -211,7 +211,7 @@ impl<'gcc, 'tcx> ConstMethods<'tcx> for CodegenCx<'gcc, 'tcx> {
let base_addr = self.const_bitcast(base_addr, self.usize_type); let base_addr = self.const_bitcast(base_addr, self.usize_type);
let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64);
let ptr = self.const_bitcast(base_addr + offset, ptr_type); let ptr = self.const_bitcast(base_addr + offset, ptr_type);
if layout.primitive() != Pointer { if !matches!(layout.primitive(), Pointer(_)) {
self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) self.const_bitcast(ptr.dereference(None).to_rvalue(), ty)
} }
else { else {

View file

@ -7,9 +7,9 @@ use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}
use rustc_middle::mir::mono::MonoItem; use rustc_middle::mir::mono::MonoItem;
use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::ty::{self, Instance, Ty};
use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::layout::LayoutOf;
use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, Scalar as InterpScalar, read_target_uint}; use rustc_middle::mir::interpret::{self, ConstAllocation, ErrorHandled, GlobalAlloc, Scalar as InterpScalar, read_target_uint};
use rustc_span::def_id::DefId; use rustc_span::def_id::DefId;
use rustc_target::abi::{self, Align, HasDataLayout, Primitive, Size, WrappingRange}; use rustc_target::abi::{self, AddressSpace, Align, HasDataLayout, Primitive, Size, WrappingRange};
use crate::base; use crate::base;
use crate::context::CodegenCx; use crate::context::CodegenCx;
@ -322,13 +322,21 @@ pub fn const_alloc_to_gcc<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, alloc: ConstAl
) )
.expect("const_alloc_to_llvm: could not read relocation pointer") .expect("const_alloc_to_llvm: could not read relocation pointer")
as u64; as u64;
let address_space = match cx.tcx.global_alloc(alloc_id) {
GlobalAlloc::Function(..) => cx.data_layout().instruction_address_space,
GlobalAlloc::Static(..) | GlobalAlloc::Memory(..) | GlobalAlloc::VTable(..) => {
AddressSpace::DATA
}
};
llvals.push(cx.scalar_to_backend( llvals.push(cx.scalar_to_backend(
InterpScalar::from_pointer( InterpScalar::from_pointer(
interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)), interpret::Pointer::new(alloc_id, Size::from_bytes(ptr_offset)),
&cx.tcx, &cx.tcx,
), ),
abi::Scalar::Initialized { value: Primitive::Pointer, valid_range: WrappingRange::full(dl.pointer_size) }, abi::Scalar::Initialized { value: Primitive::Pointer(address_space), valid_range: WrappingRange::full(dl.pointer_size) },
cx.type_i8p(), cx.type_i8p_ext(address_space),
)); ));
next_offset = offset + pointer_size; next_offset = offset + pointer_size;
} }

View file

@ -253,7 +253,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
Int(i, false) => cx.type_from_unsigned_integer(i), Int(i, false) => cx.type_from_unsigned_integer(i),
F32 => cx.type_f32(), F32 => cx.type_f32(),
F64 => cx.type_f64(), F64 => cx.type_f64(),
Pointer => { Pointer(address_space) => {
// If we know the alignment, pick something better than i8. // If we know the alignment, pick something better than i8.
let pointee = let pointee =
if let Some(pointee) = self.pointee_info_at(cx, offset) { if let Some(pointee) = self.pointee_info_at(cx, offset) {
@ -262,7 +262,7 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> {
else { else {
cx.type_i8() cx.type_i8()
}; };
cx.type_ptr_to(pointee) cx.type_ptr_to_ext(pointee, address_space)
} }
} }
} }

View file

@ -849,6 +849,7 @@ fn dummy_output_type<'ll>(cx: &CodegenCx<'ll, '_>, reg: InlineAsmRegClass) -> &'
/// Helper function to get the LLVM type for a Scalar. Pointers are returned as /// Helper function to get the LLVM type for a Scalar. Pointers are returned as
/// the equivalent integer type. /// the equivalent integer type.
fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type { fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Type {
let dl = &cx.tcx.data_layout;
match scalar.primitive() { match scalar.primitive() {
Primitive::Int(Integer::I8, _) => cx.type_i8(), Primitive::Int(Integer::I8, _) => cx.type_i8(),
Primitive::Int(Integer::I16, _) => cx.type_i16(), Primitive::Int(Integer::I16, _) => cx.type_i16(),
@ -856,7 +857,8 @@ fn llvm_asm_scalar_type<'ll>(cx: &CodegenCx<'ll, '_>, scalar: Scalar) -> &'ll Ty
Primitive::Int(Integer::I64, _) => cx.type_i64(), Primitive::Int(Integer::I64, _) => cx.type_i64(),
Primitive::F32 => cx.type_f32(), Primitive::F32 => cx.type_f32(),
Primitive::F64 => cx.type_f64(), Primitive::F64 => cx.type_f64(),
Primitive::Pointer => cx.type_isize(), // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
Primitive::Pointer(_) => cx.type_from_integer(dl.ptr_sized_integer()),
_ => unreachable!(), _ => unreachable!(),
} }
} }
@ -868,6 +870,7 @@ fn llvm_fixup_input<'ll, 'tcx>(
reg: InlineAsmRegClass, reg: InlineAsmRegClass,
layout: &TyAndLayout<'tcx>, layout: &TyAndLayout<'tcx>,
) -> &'ll Value { ) -> &'ll Value {
let dl = &bx.tcx.data_layout;
match (reg, layout.abi) { match (reg, layout.abi) {
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg), Abi::Scalar(s)) => {
if let Primitive::Int(Integer::I8, _) = s.primitive() { if let Primitive::Int(Integer::I8, _) = s.primitive() {
@ -881,8 +884,10 @@ fn llvm_fixup_input<'ll, 'tcx>(
let elem_ty = llvm_asm_scalar_type(bx.cx, s); let elem_ty = llvm_asm_scalar_type(bx.cx, s);
let count = 16 / layout.size.bytes(); let count = 16 / layout.size.bytes();
let vec_ty = bx.cx.type_vector(elem_ty, count); let vec_ty = bx.cx.type_vector(elem_ty, count);
if let Primitive::Pointer = s.primitive() { // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
value = bx.ptrtoint(value, bx.cx.type_isize()); if let Primitive::Pointer(_) = s.primitive() {
let t = bx.type_from_integer(dl.ptr_sized_integer());
value = bx.ptrtoint(value, t);
} }
bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0))
} }
@ -958,7 +963,7 @@ fn llvm_fixup_output<'ll, 'tcx>(
} }
(InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => { (InlineAsmRegClass::AArch64(AArch64InlineAsmRegClass::vreg_low16), Abi::Scalar(s)) => {
value = bx.extract_element(value, bx.const_i32(0)); value = bx.extract_element(value, bx.const_i32(0));
if let Primitive::Pointer = s.primitive() { if let Primitive::Pointer(_) = s.primitive() {
value = bx.inttoptr(value, layout.llvm_type(bx.cx)); value = bx.inttoptr(value, layout.llvm_type(bx.cx));
} }
value value

View file

@ -511,7 +511,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> {
bx.range_metadata(load, scalar.valid_range(bx)); bx.range_metadata(load, scalar.valid_range(bx));
} }
} }
abi::Pointer => { abi::Pointer(_) => {
if !scalar.valid_range(bx).contains(0) { if !scalar.valid_range(bx).contains(0) {
bx.nonnull_metadata(load); bx.nonnull_metadata(load);
} }

View file

@ -236,7 +236,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
Scalar::Int(int) => { Scalar::Int(int) => {
let data = int.assert_bits(layout.size(self)); let data = int.assert_bits(layout.size(self));
let llval = self.const_uint_big(self.type_ix(bitsize), data); let llval = self.const_uint_big(self.type_ix(bitsize), data);
if layout.primitive() == Pointer { if matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstIntToPtr(llval, llty) } unsafe { llvm::LLVMConstIntToPtr(llval, llty) }
} else { } else {
self.const_bitcast(llval, llty) self.const_bitcast(llval, llty)
@ -284,7 +284,7 @@ impl<'ll, 'tcx> ConstMethods<'tcx> for CodegenCx<'ll, 'tcx> {
1, 1,
) )
}; };
if layout.primitive() != Pointer { if !matches!(layout.primitive(), Pointer(_)) {
unsafe { llvm::LLVMConstPtrToInt(llval, llty) } unsafe { llvm::LLVMConstPtrToInt(llval, llty) }
} else { } else {
self.const_bitcast(llval, llty) self.const_bitcast(llval, llty)

View file

@ -111,7 +111,7 @@ pub fn const_alloc_to_llvm<'ll>(cx: &CodegenCx<'ll, '_>, alloc: ConstAllocation<
&cx.tcx, &cx.tcx,
), ),
Scalar::Initialized { Scalar::Initialized {
value: Primitive::Pointer, value: Primitive::Pointer(address_space),
valid_range: WrappingRange::full(dl.pointer_size), valid_range: WrappingRange::full(dl.pointer_size),
}, },
cx.type_i8p_ext(address_space), cx.type_i8p_ext(address_space),

View file

@ -122,7 +122,8 @@ fn tag_base_type<'ll, 'tcx>(
Primitive::Int(t, _) => t, Primitive::Int(t, _) => t,
Primitive::F32 => Integer::I32, Primitive::F32 => Integer::I32,
Primitive::F64 => Integer::I64, Primitive::F64 => Integer::I64,
Primitive::Pointer => { // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
Primitive::Pointer(_) => {
// If the niche is the NULL value of a reference, then `discr_enum_ty` will be // If the niche is the NULL value of a reference, then `discr_enum_ty` will be
// a RawPtr. CodeView doesn't know what to do with enums whose base type is a // a RawPtr. CodeView doesn't know what to do with enums whose base type is a
// pointer so we fix this up to just be `usize`. // pointer so we fix this up to just be `usize`.

View file

@ -149,7 +149,7 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> {
emit_va_arg(self, args[0], ret_ty) emit_va_arg(self, args[0], ret_ty)
} }
} }
Primitive::F64 | Primitive::Pointer => { Primitive::F64 | Primitive::Pointer(_) => {
emit_va_arg(self, args[0], ret_ty) emit_va_arg(self, args[0], ret_ty)
} }
// `va_arg` should never be used with the return type f32. // `va_arg` should never be used with the return type f32.

View file

@ -7,7 +7,7 @@ use rustc_middle::bug;
use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout};
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths}; use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use rustc_middle::ty::{self, Ty, TypeVisitable}; use rustc_middle::ty::{self, Ty, TypeVisitable};
use rustc_target::abi::{Abi, AddressSpace, Align, FieldsShape}; use rustc_target::abi::{Abi, Align, FieldsShape};
use rustc_target::abi::{Int, Pointer, F32, F64}; use rustc_target::abi::{Int, Pointer, F32, F64};
use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants}; use rustc_target::abi::{PointeeInfo, Scalar, Size, TyAbiInterface, Variants};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
@ -312,14 +312,13 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> {
Int(i, _) => cx.type_from_integer(i), Int(i, _) => cx.type_from_integer(i),
F32 => cx.type_f32(), F32 => cx.type_f32(),
F64 => cx.type_f64(), F64 => cx.type_f64(),
Pointer => { Pointer(address_space) => {
// If we know the alignment, pick something better than i8. // If we know the alignment, pick something better than i8.
let (pointee, address_space) = let pointee = if let Some(pointee) = self.pointee_info_at(cx, offset) {
if let Some(pointee) = self.pointee_info_at(cx, offset) { cx.type_pointee_for_align(pointee.align)
(cx.type_pointee_for_align(pointee.align), pointee.address_space) } else {
} else { cx.type_i8()
(cx.type_i8(), AddressSpace::DATA) };
};
cx.type_ptr_to_ext(pointee, address_space) cx.type_ptr_to_ext(pointee, address_space)
} }
} }

View file

@ -1801,8 +1801,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match (src.layout.abi, dst.layout.abi) { match (src.layout.abi, dst.layout.abi) {
(abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => { (abi::Abi::Scalar(src_scalar), abi::Abi::Scalar(dst_scalar)) => {
// HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers. // HACK(eddyb) LLVM doesn't like `bitcast`s between pointers and non-pointers.
let src_is_ptr = src_scalar.primitive() == abi::Pointer; let src_is_ptr = matches!(src_scalar.primitive(), abi::Pointer(_));
let dst_is_ptr = dst_scalar.primitive() == abi::Pointer; let dst_is_ptr = matches!(dst_scalar.primitive(), abi::Pointer(_));
if src_is_ptr == dst_is_ptr { if src_is_ptr == dst_is_ptr {
assert_eq!(src.layout.size, dst.layout.size); assert_eq!(src.layout.size, dst.layout.size);

View file

@ -9,7 +9,7 @@ use rustc_middle::mir;
use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::mir::tcx::PlaceTy;
use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
use rustc_middle::ty::{self, Ty}; use rustc_middle::ty::{self, Ty};
use rustc_target::abi::{Abi, Align, FieldsShape, Int, TagEncoding}; use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding};
use rustc_target::abi::{VariantIdx, Variants}; use rustc_target::abi::{VariantIdx, Variants};
#[derive(Copy, Clone, Debug)] #[derive(Copy, Clone, Debug)]
@ -209,6 +209,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
bx: &mut Bx, bx: &mut Bx,
cast_to: Ty<'tcx>, cast_to: Ty<'tcx>,
) -> V { ) -> V {
let dl = &bx.tcx().data_layout;
let cast_to_layout = bx.cx().layout_of(cast_to); let cast_to_layout = bx.cx().layout_of(cast_to);
let cast_to_size = cast_to_layout.layout.size(); let cast_to_size = cast_to_layout.layout.size();
let cast_to = bx.cx().immediate_backend_type(cast_to_layout); let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
@ -250,12 +251,14 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => { TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
// Cast to an integer so we don't have to treat a pointer as a // Cast to an integer so we don't have to treat a pointer as a
// special case. // special case.
let (tag, tag_llty) = if tag_scalar.primitive().is_ptr() { let (tag, tag_llty) = match tag_scalar.primitive() {
let t = bx.type_isize(); // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
let tag = bx.ptrtoint(tag_imm, t); Pointer(_) => {
(tag, t) let t = bx.type_from_integer(dl.ptr_sized_integer());
} else { let tag = bx.ptrtoint(tag_imm, t);
(tag_imm, bx.cx().immediate_backend_type(tag_op.layout)) (tag, t)
}
_ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
}; };
let tag_size = tag_scalar.size(bx.cx()); let tag_size = tag_scalar.size(bx.cx());

View file

@ -319,7 +319,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size"); assert_eq!(size, mplace.layout.size, "abi::Scalar size does not match layout size");
let scalar = alloc.read_scalar( let scalar = alloc.read_scalar(
alloc_range(Size::ZERO, size), alloc_range(Size::ZERO, size),
/*read_provenance*/ s.is_ptr(), /*read_provenance*/ matches!(s, abi::Pointer(_)),
)?; )?;
Some(ImmTy { imm: scalar.into(), layout: mplace.layout }) Some(ImmTy { imm: scalar.into(), layout: mplace.layout })
} }
@ -335,11 +335,11 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields assert!(b_offset.bytes() > 0); // in `operand_field` we use the offset to tell apart the fields
let a_val = alloc.read_scalar( let a_val = alloc.read_scalar(
alloc_range(Size::ZERO, a_size), alloc_range(Size::ZERO, a_size),
/*read_provenance*/ a.is_ptr(), /*read_provenance*/ matches!(a, abi::Pointer(_)),
)?; )?;
let b_val = alloc.read_scalar( let b_val = alloc.read_scalar(
alloc_range(b_offset, b_size), alloc_range(b_offset, b_size),
/*read_provenance*/ b.is_ptr(), /*read_provenance*/ matches!(b, abi::Pointer(_)),
)?; )?;
Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout }) Some(ImmTy { imm: Immediate::ScalarPair(a_val, b_val), layout: mplace.layout })
} }

View file

@ -38,6 +38,7 @@ fn unpack_option_like<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
impl<'a, 'tcx> FnCtxt<'a, 'tcx> { impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
pub fn check_transmute(&self, from: Ty<'tcx>, to: Ty<'tcx>, hir_id: HirId) { pub fn check_transmute(&self, from: Ty<'tcx>, to: Ty<'tcx>, hir_id: HirId) {
let tcx = self.tcx; let tcx = self.tcx;
let dl = &tcx.data_layout;
let span = tcx.hir().span(hir_id); let span = tcx.hir().span(hir_id);
let normalize = |ty| { let normalize = |ty| {
let ty = self.resolve_vars_if_possible(ty); let ty = self.resolve_vars_if_possible(ty);
@ -69,7 +70,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> {
// Special-case transmuting from `typeof(function)` and // Special-case transmuting from `typeof(function)` and
// `Option<typeof(function)>` to present a clearer error. // `Option<typeof(function)>` to present a clearer error.
let from = unpack_option_like(tcx, from); let from = unpack_option_like(tcx, from);
if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer.size(&tcx) { if let (&ty::FnDef(..), SizeSkeleton::Known(size_to)) = (from.kind(), sk_to) && size_to == Pointer(dl.instruction_address_space).size(&tcx) {
struct_span_err!(tcx.sess, span, E0591, "can't transmute zero-sized type") struct_span_err!(tcx.sess, span, E0591, "can't transmute zero-sized type")
.note(&format!("source type: {from}")) .note(&format!("source type: {from}"))
.note(&format!("target type: {to}")) .note(&format!("target type: {to}"))

View file

@ -128,7 +128,8 @@ impl PrimitiveExt for Primitive {
Int(i, signed) => i.to_ty(tcx, signed), Int(i, signed) => i.to_ty(tcx, signed),
F32 => tcx.types.f32, F32 => tcx.types.f32,
F64 => tcx.types.f64, F64 => tcx.types.f64,
Pointer => tcx.mk_mut_ptr(tcx.mk_unit()), // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
Pointer(_) => tcx.mk_mut_ptr(tcx.mk_unit()),
} }
} }
@ -138,7 +139,11 @@ impl PrimitiveExt for Primitive {
fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> { fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
match *self { match *self {
Int(i, signed) => i.to_ty(tcx, signed), Int(i, signed) => i.to_ty(tcx, signed),
Pointer => tcx.types.usize, // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
Pointer(_) => {
let signed = false;
tcx.data_layout().ptr_sized_integer().to_ty(tcx, signed)
}
F32 | F64 => bug!("floats do not have an int type"), F32 | F64 => bug!("floats do not have an int type"),
} }
} }
@ -812,132 +817,125 @@ where
let tcx = cx.tcx(); let tcx = cx.tcx();
let param_env = cx.param_env(); let param_env = cx.param_env();
let addr_space_of_ty = |ty: Ty<'tcx>| { let pointee_info =
if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA } match *this.ty.kind() {
}; ty::RawPtr(mt) if offset.bytes() == 0 => {
tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
let pointee_info = match *this.ty.kind() { size: layout.size,
ty::RawPtr(mt) if offset.bytes() == 0 => { align: layout.align.abi,
tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo { safe: None,
size: layout.size, })
align: layout.align.abi,
safe: None,
address_space: addr_space_of_ty(mt.ty),
})
}
ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
size: layout.size,
align: layout.align.abi,
safe: None,
address_space: cx.data_layout().instruction_address_space,
})
}
ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
let address_space = addr_space_of_ty(ty);
let kind = if tcx.sess.opts.optimize == OptLevel::No {
// Use conservative pointer kind if not optimizing. This saves us the
// Freeze/Unpin queries, and can save time in the codegen backend (noalias
// attributes in LLVM have compile-time cost even in unoptimized builds).
PointerKind::SharedMutable
} else {
match mt {
hir::Mutability::Not => {
if ty.is_freeze(tcx, cx.param_env()) {
PointerKind::Frozen
} else {
PointerKind::SharedMutable
}
}
hir::Mutability::Mut => {
// References to self-referential structures should not be considered
// noalias, as another pointer to the structure can be obtained, that
// is not based-on the original reference. We consider all !Unpin
// types to be potentially self-referential here.
if ty.is_unpin(tcx, cx.param_env()) {
PointerKind::UniqueBorrowed
} else {
PointerKind::UniqueBorrowedPinned
}
}
}
};
tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
size: layout.size,
align: layout.align.abi,
safe: Some(kind),
address_space,
})
}
_ => {
let mut data_variant = match this.variants {
// Within the discriminant field, only the niche itself is
// always initialized, so we only check for a pointer at its
// offset.
//
// If the niche is a pointer, it's either valid (according
// to its type), or null (which the niche field's scalar
// validity range encodes). This allows using
// `dereferenceable_or_null` for e.g., `Option<&T>`, and
// this will continue to work as long as we don't start
// using more niches than just null (e.g., the first page of
// the address space, or unaligned pointers).
Variants::Multiple {
tag_encoding: TagEncoding::Niche { untagged_variant, .. },
tag_field,
..
} if this.fields.offset(tag_field) == offset => {
Some(this.for_variant(cx, untagged_variant))
}
_ => Some(this),
};
if let Some(variant) = data_variant {
// We're not interested in any unions.
if let FieldsShape::Union(_) = variant.fields {
data_variant = None;
}
} }
ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
let mut result = None; tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| {
PointeeInfo { size: layout.size, align: layout.align.abi, safe: None }
if let Some(variant) = data_variant { })
let ptr_end = offset + Pointer.size(cx); }
for i in 0..variant.fields.count() { ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
let field_start = variant.fields.offset(i); let kind = if tcx.sess.opts.optimize == OptLevel::No {
if field_start <= offset { // Use conservative pointer kind if not optimizing. This saves us the
let field = variant.field(cx, i); // Freeze/Unpin queries, and can save time in the codegen backend (noalias
result = field.to_result().ok().and_then(|field| { // attributes in LLVM have compile-time cost even in unoptimized builds).
if ptr_end <= field_start + field.size { PointerKind::SharedMutable
// We found the right field, look inside it. } else {
let field_info = match mt {
field.pointee_info_at(cx, offset - field_start); hir::Mutability::Not => {
field_info if ty.is_freeze(tcx, cx.param_env()) {
PointerKind::Frozen
} else { } else {
None PointerKind::SharedMutable
}
}
hir::Mutability::Mut => {
// References to self-referential structures should not be considered
// noalias, as another pointer to the structure can be obtained, that
// is not based-on the original reference. We consider all !Unpin
// types to be potentially self-referential here.
if ty.is_unpin(tcx, cx.param_env()) {
PointerKind::UniqueBorrowed
} else {
PointerKind::UniqueBorrowedPinned
}
}
}
};
tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
size: layout.size,
align: layout.align.abi,
safe: Some(kind),
})
}
_ => {
let mut data_variant = match this.variants {
// Within the discriminant field, only the niche itself is
// always initialized, so we only check for a pointer at its
// offset.
//
// If the niche is a pointer, it's either valid (according
// to its type), or null (which the niche field's scalar
// validity range encodes). This allows using
// `dereferenceable_or_null` for e.g., `Option<&T>`, and
// this will continue to work as long as we don't start
// using more niches than just null (e.g., the first page of
// the address space, or unaligned pointers).
Variants::Multiple {
tag_encoding: TagEncoding::Niche { untagged_variant, .. },
tag_field,
..
} if this.fields.offset(tag_field) == offset => {
Some(this.for_variant(cx, untagged_variant))
}
_ => Some(this),
};
if let Some(variant) = data_variant {
// We're not interested in any unions.
if let FieldsShape::Union(_) = variant.fields {
data_variant = None;
}
}
let mut result = None;
if let Some(variant) = data_variant {
// FIXME(erikdesjardins): handle non-default addrspace ptr sizes
// (requires passing in the expected address space from the caller)
let ptr_end = offset + Pointer(AddressSpace::DATA).size(cx);
for i in 0..variant.fields.count() {
let field_start = variant.fields.offset(i);
if field_start <= offset {
let field = variant.field(cx, i);
result = field.to_result().ok().and_then(|field| {
if ptr_end <= field_start + field.size {
// We found the right field, look inside it.
let field_info =
field.pointee_info_at(cx, offset - field_start);
field_info
} else {
None
}
});
if result.is_some() {
break;
} }
});
if result.is_some() {
break;
} }
} }
} }
}
// FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`. // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
if let Some(ref mut pointee) = result { if let Some(ref mut pointee) = result {
if let ty::Adt(def, _) = this.ty.kind() { if let ty::Adt(def, _) = this.ty.kind() {
if def.is_box() && offset.bytes() == 0 { if def.is_box() && offset.bytes() == 0 {
pointee.safe = Some(PointerKind::UniqueOwned); pointee.safe = Some(PointerKind::UniqueOwned);
}
} }
} }
}
result result
} }
}; };
debug!( debug!(
"pointee_info_at (offset={:?}, type kind: {:?}) => {:?}", "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",

View file

@ -39,7 +39,7 @@ where
{ {
match arg_layout.abi { match arg_layout.abi {
Abi::Scalar(scalar) => match scalar.primitive() { Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => { abi::Int(..) | abi::Pointer(_) => {
if arg_layout.size.bits() > xlen { if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv); return Err(CannotUseFpConv);
} }

View file

@ -346,7 +346,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> {
// The primitive for this algorithm. // The primitive for this algorithm.
Abi::Scalar(scalar) => { Abi::Scalar(scalar) => {
let kind = match scalar.primitive() { let kind = match scalar.primitive() {
abi::Int(..) | abi::Pointer => RegKind::Integer, abi::Int(..) | abi::Pointer(_) => RegKind::Integer,
abi::F32 | abi::F64 => RegKind::Float, abi::F32 | abi::F64 => RegKind::Float,
}; };
Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size }))

View file

@ -45,7 +45,7 @@ where
{ {
match arg_layout.abi { match arg_layout.abi {
Abi::Scalar(scalar) => match scalar.primitive() { Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => { abi::Int(..) | abi::Pointer(_) => {
if arg_layout.size.bits() > xlen { if arg_layout.size.bits() > xlen {
return Err(CannotUseFpConv); return Err(CannotUseFpConv);
} }

View file

@ -83,7 +83,7 @@ where
(abi::F32, _) => offset += Reg::f32().size, (abi::F32, _) => offset += Reg::f32().size,
(_, abi::F64) => offset += Reg::f64().size, (_, abi::F64) => offset += Reg::f64().size,
(abi::Int(i, _signed), _) => offset += i.size(), (abi::Int(i, _signed), _) => offset += i.size(),
(abi::Pointer, _) => offset += Reg::i64().size, (abi::Pointer(_), _) => offset += Reg::i64().size,
_ => {} _ => {}
} }

View file

@ -50,7 +50,7 @@ where
Abi::Uninhabited => return Ok(()), Abi::Uninhabited => return Ok(()),
Abi::Scalar(scalar) => match scalar.primitive() { Abi::Scalar(scalar) => match scalar.primitive() {
abi::Int(..) | abi::Pointer => Class::Int, abi::Int(..) | abi::Pointer(_) => Class::Int,
abi::F32 | abi::F64 => Class::Sse, abi::F32 | abi::F64 => Class::Sse,
}, },

View file

@ -244,7 +244,7 @@ fn adjust_for_rust_scalar<'tcx>(
} }
// Only pointer types handled below. // Only pointer types handled below.
let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return }; let Scalar::Initialized { value: Pointer(_), valid_range} = scalar else { return };
if !valid_range.contains(0) { if !valid_range.contains(0) {
attrs.set(ArgAttribute::NonNull); attrs.set(ArgAttribute::NonNull);
@ -479,7 +479,7 @@ fn fn_abi_adjust_for_abi<'tcx>(
} }
let size = arg.layout.size; let size = arg.layout.size;
if arg.layout.is_unsized() || size > Pointer.size(cx) { if arg.layout.is_unsized() || size > Pointer(AddressSpace::DATA).size(cx) {
arg.make_indirect(); arg.make_indirect();
} else { } else {
// We want to pass small aggregates as immediates, but using // We want to pass small aggregates as immediates, but using

View file

@ -134,7 +134,7 @@ fn layout_of_uncached<'tcx>(
ty::FloatTy::F64 => F64, ty::FloatTy::F64 => F64,
}), }),
ty::FnPtr(_) => { ty::FnPtr(_) => {
let mut ptr = scalar_unit(Pointer); let mut ptr = scalar_unit(Pointer(dl.instruction_address_space));
ptr.valid_range_mut().start = 1; ptr.valid_range_mut().start = 1;
tcx.intern_layout(LayoutS::scalar(cx, ptr)) tcx.intern_layout(LayoutS::scalar(cx, ptr))
} }
@ -144,7 +144,7 @@ fn layout_of_uncached<'tcx>(
// Potentially-wide pointers. // Potentially-wide pointers.
ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
let mut data_ptr = scalar_unit(Pointer); let mut data_ptr = scalar_unit(Pointer(AddressSpace::DATA));
if !ty.is_unsafe_ptr() { if !ty.is_unsafe_ptr() {
data_ptr.valid_range_mut().start = 1; data_ptr.valid_range_mut().start = 1;
} }
@ -178,7 +178,7 @@ fn layout_of_uncached<'tcx>(
} }
ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
ty::Dynamic(..) => { ty::Dynamic(..) => {
let mut vtable = scalar_unit(Pointer); let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
vtable.valid_range_mut().start = 1; vtable.valid_range_mut().start = 1;
vtable vtable
} }
@ -195,7 +195,7 @@ fn layout_of_uncached<'tcx>(
ty::Dynamic(_, _, ty::DynStar) => { ty::Dynamic(_, _, ty::DynStar) => {
let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false)); let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
data.valid_range_mut().start = 0; data.valid_range_mut().start = 0;
let mut vtable = scalar_unit(Pointer); let mut vtable = scalar_unit(Pointer(AddressSpace::DATA));
vtable.valid_range_mut().start = 1; vtable.valid_range_mut().start = 1;
tcx.intern_layout(cx.scalar_pair(data, vtable)) tcx.intern_layout(cx.scalar_pair(data, vtable))
} }

View file

@ -109,3 +109,28 @@ pub unsafe fn transmute_fn_ptr_to_data(x: fn()) -> *const () {
// as long as it doesn't cause a verifier error by using `bitcast`. // as long as it doesn't cause a verifier error by using `bitcast`.
transmute(x) transmute(x)
} }
pub enum Either<T, U> { A(T), B(U) }
// Previously, we would codegen this as passing/returning a scalar pair of `{ i8, ptr }`,
// with the `ptr` field representing both `&i32` and `fn()` depending on the variant.
// This is incorrect, because `fn()` should be `ptr addrspace(1)`, not `ptr`.
// CHECK: define{{.+}}void @should_not_combine_addrspace({{.+\*|ptr}}{{.+}}sret{{.+}}%0, {{.+\*|ptr}}{{.+}}%x)
#[no_mangle]
#[inline(never)]
pub fn should_not_combine_addrspace(x: Either<&i32, fn()>) -> Either<&i32, fn()> {
x
}
// The incorrectness described above would result in us producing (after optimizations)
// a `ptrtoint`/`inttoptr` roundtrip to convert from `ptr` to `ptr addrspace(1)`.
// CHECK-LABEL: @call_with_fn_ptr
#[no_mangle]
pub fn call_with_fn_ptr<'a>(f: fn()) -> Either<&'a i32, fn()> {
// CHECK-NOT: ptrtoint
// CHECK-NOT: inttoptr
// CHECK: call addrspace(1) void @should_not_combine_addrspace
should_not_combine_addrspace(Either::B(f))
}