Merge commit '3a9bf72932' into sync_cg_clif-2023-12-19

This commit is contained in:
bjorn3 2023-12-19 12:46:39 +00:00
commit d5c38ded26
13 changed files with 223 additions and 115 deletions

View file

@ -353,7 +353,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
fx,
rustc_hir::LangItem::PanicBoundsCheck,
&[index, len, location],
source_info.span,
Some(source_info.span),
);
}
AssertKind::MisalignedPointerDereference { ref required, ref found } => {
@ -365,7 +365,7 @@ fn codegen_fn_body(fx: &mut FunctionCx<'_, '_, '_>, start_block: Block) {
fx,
rustc_hir::LangItem::PanicMisalignedPointerDereference,
&[required, found, location],
source_info.span,
Some(source_info.span),
);
}
_ => {
@ -945,19 +945,19 @@ pub(crate) fn codegen_panic<'tcx>(
let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
let args = [msg_ptr, msg_len, location];
codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, source_info.span);
codegen_panic_inner(fx, rustc_hir::LangItem::Panic, &args, Some(source_info.span));
}
pub(crate) fn codegen_panic_nounwind<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
msg_str: &str,
source_info: mir::SourceInfo,
span: Option<Span>,
) {
let msg_ptr = fx.anonymous_str(msg_str);
let msg_len = fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(msg_str.len()).unwrap());
let args = [msg_ptr, msg_len];
codegen_panic_inner(fx, rustc_hir::LangItem::PanicNounwind, &args, source_info.span);
codegen_panic_inner(fx, rustc_hir::LangItem::PanicNounwind, &args, span);
}
pub(crate) fn codegen_unwind_terminate<'tcx>(
@ -967,16 +967,16 @@ pub(crate) fn codegen_unwind_terminate<'tcx>(
) {
let args = [];
codegen_panic_inner(fx, reason.lang_item(), &args, source_info.span);
codegen_panic_inner(fx, reason.lang_item(), &args, Some(source_info.span));
}
fn codegen_panic_inner<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
lang_item: rustc_hir::LangItem,
args: &[Value],
span: Span,
span: Option<Span>,
) {
let def_id = fx.tcx.require_lang_item(lang_item, Some(span));
let def_id = fx.tcx.require_lang_item(lang_item, span);
let instance = Instance::mono(fx.tcx, def_id).polymorphize(fx.tcx);
let symbol_name = fx.tcx.symbol_name(instance).name;

View file

@ -98,11 +98,15 @@ fn clif_pair_type_from_ty<'tcx>(
/// Is a pointer to this type a fat ptr?
pub(crate) fn has_ptr_meta<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> bool {
let ptr_ty = Ty::new_ptr(tcx, TypeAndMut { ty, mutbl: rustc_hir::Mutability::Not });
match &tcx.layout_of(ParamEnv::reveal_all().and(ptr_ty)).unwrap().abi {
Abi::Scalar(_) => false,
Abi::ScalarPair(_, _) => true,
abi => unreachable!("Abi of ptr to {:?} is {:?}???", ty, abi),
if ty.is_sized(tcx, ParamEnv::reveal_all()) {
return false;
}
let tail = tcx.struct_tail_erasing_lifetimes(ty, ParamEnv::reveal_all());
match tail.kind() {
ty::Foreign(..) => false,
ty::Str | ty::Slice(..) | ty::Dynamic(..) => true,
_ => bug!("unexpected unsized tail: {:?}", tail),
}
}

View file

@ -487,13 +487,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
size
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
Some(ptr.load_scalar_pair(fx).1)
} else {
fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
None
};
let (size, _align) = crate::unsize::size_and_align_of(fx, layout, meta);
ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
}
sym::min_align_of_val => {
@ -502,13 +501,12 @@ fn codegen_regular_intrinsic_call<'tcx>(
let layout = fx.layout_of(generic_args.type_at(0));
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
align
let meta = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
Some(ptr.load_scalar_pair(fx).1)
} else {
fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
None
};
let (_size, align) = crate::unsize::size_and_align_of(fx, layout, meta);
ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
}
@ -688,7 +686,7 @@ fn codegen_regular_intrinsic_call<'tcx>(
}
})
});
crate::base::codegen_panic_nounwind(fx, &msg_str, source_info);
crate::base::codegen_panic_nounwind(fx, &msg_str, Some(source_info.span));
return;
}
}

View file

@ -2,6 +2,9 @@
//!
//! [`PointerCoercion::Unsize`]: `rustc_middle::ty::adjustment::PointerCoercion::Unsize`
use rustc_middle::ty::print::{with_no_trimmed_paths, with_no_visible_paths};
use crate::base::codegen_panic_nounwind;
use crate::prelude::*;
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/base.rs#L159-L307
@ -187,63 +190,113 @@ pub(crate) fn coerce_dyn_star<'tcx>(
// Adapted from https://github.com/rust-lang/rust/blob/2a663555ddf36f6b041445894a8c175cd1bc718c/src/librustc_codegen_ssa/glue.rs
pub(crate) fn size_and_align_of_dst<'tcx>(
pub(crate) fn size_and_align_of<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
info: Value,
info: Option<Value>,
) -> (Value, Value) {
assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
match layout.ty.kind() {
if layout.is_sized() {
return (
fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64),
fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64),
);
}
let ty = layout.ty;
match ty.kind() {
ty::Dynamic(..) => {
// load size/align from vtable
(crate::vtable::size_of_obj(fx, info), crate::vtable::min_align_of_obj(fx, info))
(
crate::vtable::size_of_obj(fx, info.unwrap()),
crate::vtable::min_align_of_obj(fx, info.unwrap()),
)
}
ty::Slice(_) | ty::Str => {
let unit = layout.field(fx, 0);
// The info in this case is the length of the str, so the size is that
// times the unit size.
(
fx.bcx.ins().imul_imm(info, unit.size.bytes() as i64),
fx.bcx.ins().imul_imm(info.unwrap(), unit.size.bytes() as i64),
fx.bcx.ins().iconst(fx.pointer_type, unit.align.abi.bytes() as i64),
)
}
_ => {
ty::Foreign(_) => {
let trap_block = fx.bcx.create_block();
let true_ = fx.bcx.ins().iconst(types::I8, 1);
let next_block = fx.bcx.create_block();
fx.bcx.ins().brif(true_, trap_block, &[], next_block, &[]);
fx.bcx.seal_block(trap_block);
fx.bcx.seal_block(next_block);
fx.bcx.switch_to_block(trap_block);
// `extern` type. We cannot compute the size, so panic.
let msg_str = with_no_visible_paths!({
with_no_trimmed_paths!({
format!("attempted to compute the size or alignment of extern type `{ty}`")
})
});
codegen_panic_nounwind(fx, &msg_str, None);
fx.bcx.switch_to_block(next_block);
// This function does not return so we can now return whatever we want.
let size = fx.bcx.ins().iconst(fx.pointer_type, 42);
let align = fx.bcx.ins().iconst(fx.pointer_type, 42);
(size, align)
}
ty::Adt(..) | ty::Tuple(..) => {
// First get the size of all statically known fields.
// Don't use size_of because it also rounds up to alignment, which we
// want to avoid, as the unsized field's alignment could be smaller.
assert!(!layout.ty.is_simd());
let i = layout.fields.count() - 1;
let sized_size = layout.fields.offset(i).bytes();
let unsized_offset_unadjusted = layout.fields.offset(i).bytes();
let unsized_offset_unadjusted =
fx.bcx.ins().iconst(fx.pointer_type, unsized_offset_unadjusted as i64);
let sized_align = layout.align.abi.bytes();
let sized_align = fx.bcx.ins().iconst(fx.pointer_type, sized_align as i64);
// Recurse to get the size of the dynamically sized field (must be
// the last field).
let field_layout = layout.field(fx, i);
let (unsized_size, mut unsized_align) = size_and_align_of_dst(fx, field_layout, info);
let (unsized_size, mut unsized_align) = size_and_align_of(fx, field_layout, info);
// FIXME (#26403, #27023): We should be adding padding
// to `sized_size` (to accommodate the `unsized_align`
// required of the unsized field that follows) before
// summing it with `sized_size`. (Note that since #26403
// is unfixed, we do not yet add the necessary padding
// here. But this is where the add would go.)
// # First compute the dynamic alignment
// Return the sum of sizes and max of aligns.
let size = fx.bcx.ins().iadd_imm(unsized_size, sized_size as i64);
// Packed types ignore the alignment of their fields.
if let ty::Adt(def, _) = layout.ty.kind() {
if def.repr().packed() {
unsized_align = sized_align;
// For packed types, we need to cap the alignment.
if let ty::Adt(def, _) = ty.kind() {
if let Some(packed) = def.repr().pack {
if packed.bytes() == 1 {
// We know this will be capped to 1.
unsized_align = fx.bcx.ins().iconst(fx.pointer_type, 1);
} else {
// We have to dynamically compute `min(unsized_align, packed)`.
let packed = fx.bcx.ins().iconst(fx.pointer_type, packed.bytes() as i64);
let cmp = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, unsized_align, packed);
unsized_align = fx.bcx.ins().select(cmp, unsized_align, packed);
}
}
}
// Choose max of two known alignments (combined value must
// be aligned according to more restrictive of the two).
let cmp = fx.bcx.ins().icmp(IntCC::UnsignedGreaterThan, sized_align, unsized_align);
let align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
let full_align = fx.bcx.ins().select(cmp, sized_align, unsized_align);
// # Then compute the dynamic size
// The full formula for the size would be:
// let unsized_offset_adjusted = unsized_offset_unadjusted.align_to(unsized_align);
// let full_size = (unsized_offset_adjusted + unsized_size).align_to(full_align);
// However, `unsized_size` is a multiple of `unsized_align`.
// Therefore, we can equivalently do the `align_to(unsized_align)` *after* adding `unsized_size`:
// let full_size = (unsized_offset_unadjusted + unsized_size).align_to(unsized_align).align_to(full_align);
// Furthermore, `align >= unsized_align`, and therefore we only need to do:
// let full_size = (unsized_offset_unadjusted + unsized_size).align_to(full_align);
let full_size = fx.bcx.ins().iadd(unsized_offset_unadjusted, unsized_size);
// Issue #27023: must add any necessary padding to `size`
// (to make it a multiple of `align`) before returning it.
@ -255,12 +308,13 @@ pub(crate) fn size_and_align_of_dst<'tcx>(
// emulated via the semi-standard fast bit trick:
//
// `(size + (align-1)) & -align`
let addend = fx.bcx.ins().iadd_imm(align, -1);
let add = fx.bcx.ins().iadd(size, addend);
let neg = fx.bcx.ins().ineg(align);
let size = fx.bcx.ins().band(add, neg);
let addend = fx.bcx.ins().iadd_imm(full_align, -1);
let add = fx.bcx.ins().iadd(full_size, addend);
let neg = fx.bcx.ins().ineg(full_align);
let full_size = fx.bcx.ins().band(add, neg);
(size, align)
(full_size, full_align)
}
_ => bug!("size_and_align_of_dst: {ty} not supported"),
}
}

View file

@ -20,34 +20,36 @@ fn codegen_field<'tcx>(
(base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
};
if let Some(extra) = extra {
if field_layout.is_sized() {
return simple(fx);
}
match field_layout.ty.kind() {
ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
ty::Adt(def, _) if def.repr().packed() => {
assert_eq!(layout.align.abi.bytes(), 1);
simple(fx)
}
_ => {
// We have to align the offset for DST's
let unaligned_offset = field_offset.bytes();
let (_, unsized_align) =
crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
if field_layout.is_sized() {
return simple(fx);
}
match field_layout.ty.kind() {
ty::Slice(..) | ty::Str => simple(fx),
_ => {
let unaligned_offset = field_offset.bytes();
let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
// Get the alignment of the field
let (_, mut unsized_align) = crate::unsize::size_and_align_of(fx, field_layout, extra);
(base.offset_value(fx, offset), field_layout)
// For packed types, we need to cap alignment.
if let ty::Adt(def, _) = layout.ty.kind() {
if let Some(packed) = def.repr().pack {
let packed = fx.bcx.ins().iconst(fx.pointer_type, packed.bytes() as i64);
let cmp = fx.bcx.ins().icmp(IntCC::UnsignedLessThan, unsized_align, packed);
unsized_align = fx.bcx.ins().select(cmp, unsized_align, packed);
}
}
// Bump the unaligned offset up to the appropriate alignment
let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
let offset = fx.bcx.ins().band(and_lhs, and_rhs);
(base.offset_value(fx, offset), field_layout)
}
} else {
simple(fx)
}
}
@ -731,13 +733,8 @@ impl<'tcx> CPlace<'tcx> {
};
let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
if field_layout.is_unsized() {
if let ty::Foreign(_) = field_layout.ty.kind() {
assert!(extra.is_none());
CPlace::for_ptr(field_ptr, field_layout)
} else {
CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
}
if has_ptr_meta(fx.tcx, field_layout.ty) {
CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
} else {
CPlace::for_ptr(field_ptr, field_layout)
}