1
Fork 0

Check CastKind::Transmute sizes in a better way

Fixes #110005
This commit is contained in:
Scott McMurray 2023-04-06 13:53:10 -07:00
parent 7f6edd3f15
commit 454bca514a
3 changed files with 134 additions and 32 deletions

View file

@ -259,6 +259,31 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> {
} }
impl<'a, 'tcx, V: CodegenObject> OperandValue<V> { impl<'a, 'tcx, V: CodegenObject> OperandValue<V> {
/// Returns an `OperandValue` that's generally UB to use in any way.
///
/// Depending on the `layout`, returns an `Immediate` or `Pair` containing
/// poison value(s), or a `Ref` containing a poison pointer.
///
/// Supports sized types only.
pub fn poison<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
bx: &mut Bx,
layout: TyAndLayout<'tcx>,
) -> OperandValue<V> {
assert!(layout.is_sized());
if bx.cx().is_backend_immediate(layout) {
let ibty = bx.cx().immediate_backend_type(layout);
OperandValue::Immediate(bx.const_poison(ibty))
} else if bx.cx().is_backend_scalar_pair(layout) {
let ibty0 = bx.cx().scalar_pair_element_backend_type(layout, 0, true);
let ibty1 = bx.cx().scalar_pair_element_backend_type(layout, 1, true);
OperandValue::Pair(bx.const_poison(ibty0), bx.const_poison(ibty1))
} else {
let bty = bx.cx().backend_type(layout);
let ptr_bty = bx.cx().type_ptr_to(bty);
OperandValue::Ref(bx.const_poison(ptr_bty), None, layout.align.abi)
}
}
pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>( pub fn store<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
self, self,
bx: &mut Bx, bx: &mut Bx,

View file

@ -158,17 +158,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
debug_assert!(src.layout.is_sized()); debug_assert!(src.layout.is_sized());
debug_assert!(dst.layout.is_sized()); debug_assert!(dst.layout.is_sized());
if src.layout.size != dst.layout.size
|| src.layout.abi.is_uninhabited()
|| dst.layout.abi.is_uninhabited()
{
// In all of these cases it's UB to run this transmute, but that's
// known statically so might as well trap for it, rather than just
// making it unreachable.
bx.abort();
return;
}
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) { if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
val.store(bx, dst); val.store(bx, dst);
return; return;
@ -202,8 +191,21 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
operand: OperandRef<'tcx, Bx::Value>, operand: OperandRef<'tcx, Bx::Value>,
cast: TyAndLayout<'tcx>, cast: TyAndLayout<'tcx>,
) -> Option<OperandValue<Bx::Value>> { ) -> Option<OperandValue<Bx::Value>> {
// Callers already checked that the layout sizes match // Check for transmutes that are always UB.
debug_assert_eq!(operand.layout.size, cast.size); if operand.layout.size != cast.size
|| operand.layout.abi.is_uninhabited()
|| cast.abi.is_uninhabited()
{
if !operand.layout.abi.is_uninhabited() {
// Since this is known statically and the input could have existed
// without already having hit UB, might as well trap for it.
bx.abort();
}
// Because this transmute is UB, return something easy to generate,
// since it's fine that later uses of the value are probably UB.
return Some(OperandValue::poison(bx, cast));
}
let operand_kind = self.value_kind(operand.layout); let operand_kind = self.value_kind(operand.layout);
let cast_kind = self.value_kind(cast); let cast_kind = self.value_kind(cast);
@ -221,11 +223,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let OperandValueKind::Immediate(in_scalar) = operand_kind else { let OperandValueKind::Immediate(in_scalar) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}"); bug!("Found {operand_kind:?} for operand {operand:?}");
}; };
if let OperandValueKind::Immediate(out_scalar) = cast_kind { if let OperandValueKind::Immediate(out_scalar) = cast_kind
&& in_scalar.size(self.cx) == out_scalar.size(self.cx)
{
let cast_bty = bx.backend_type(cast); let cast_bty = bx.backend_type(cast);
Some(OperandValue::Immediate(Self::transmute_immediate( Some(OperandValue::Immediate(
bx, imm, in_scalar, out_scalar, cast_bty, self.transmute_immediate(bx, imm, in_scalar, out_scalar, cast_bty),
))) ))
} else { } else {
None None
} }
@ -234,12 +238,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let OperandValueKind::Pair(in_a, in_b) = operand_kind else { let OperandValueKind::Pair(in_a, in_b) = operand_kind else {
bug!("Found {operand_kind:?} for operand {operand:?}"); bug!("Found {operand_kind:?} for operand {operand:?}");
}; };
if let OperandValueKind::Pair(out_a, out_b) = cast_kind { if let OperandValueKind::Pair(out_a, out_b) = cast_kind
&& in_a.size(self.cx) == out_a.size(self.cx)
&& in_b.size(self.cx) == out_b.size(self.cx)
{
let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false); let out_a_ibty = bx.scalar_pair_element_backend_type(cast, 0, false);
let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false); let out_b_ibty = bx.scalar_pair_element_backend_type(cast, 1, false);
Some(OperandValue::Pair( Some(OperandValue::Pair(
Self::transmute_immediate(bx, imm_a, in_a, out_a, out_a_ibty), self.transmute_immediate(bx, imm_a, in_a, out_a, out_a_ibty),
Self::transmute_immediate(bx, imm_b, in_b, out_b, out_b_ibty), self.transmute_immediate(bx, imm_b, in_b, out_b, out_b_ibty),
)) ))
} else { } else {
None None
@ -254,12 +261,15 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
/// `to_backend_ty` must be the *non*-immediate backend type (so it will be /// `to_backend_ty` must be the *non*-immediate backend type (so it will be
/// `i8`, not `i1`, for `bool`-like types.) /// `i8`, not `i1`, for `bool`-like types.)
fn transmute_immediate( fn transmute_immediate(
&self,
bx: &mut Bx, bx: &mut Bx,
mut imm: Bx::Value, mut imm: Bx::Value,
from_scalar: abi::Scalar, from_scalar: abi::Scalar,
to_scalar: abi::Scalar, to_scalar: abi::Scalar,
to_backend_ty: Bx::Type, to_backend_ty: Bx::Type,
) -> Bx::Value { ) -> Bx::Value {
debug_assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
use abi::Primitive::*; use abi::Primitive::*;
imm = bx.from_immediate(imm); imm = bx.from_immediate(imm);
imm = match (from_scalar.primitive(), to_scalar.primitive()) { imm = match (from_scalar.primitive(), to_scalar.primitive()) {
@ -831,14 +841,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let operand_ty = operand.ty(self.mir, self.cx.tcx()); let operand_ty = operand.ty(self.mir, self.cx.tcx());
let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty)); let cast_layout = self.cx.layout_of(self.monomorphize(cast_ty));
let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty)); let operand_layout = self.cx.layout_of(self.monomorphize(operand_ty));
if operand_layout.size != cast_layout.size
|| operand_layout.abi.is_uninhabited()
|| cast_layout.abi.is_uninhabited()
{
// Send UB cases to the full form so the operand version can
// `bitcast` without worrying about malformed IR.
return false;
}
match (self.value_kind(operand_layout), self.value_kind(cast_layout)) { match (self.value_kind(operand_layout), self.value_kind(cast_layout)) {
// Can always load from a pointer as needed // Can always load from a pointer as needed
@ -847,9 +849,12 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
// Need to generate an `alloc` to get a pointer from an immediate // Need to generate an `alloc` to get a pointer from an immediate
(OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false, (OperandValueKind::Immediate(..) | OperandValueKind::Pair(..), OperandValueKind::Ref) => false,
// When we have scalar immediates, we can convert them as needed // When we have scalar immediates, we can only convert things
(OperandValueKind::Immediate(..), OperandValueKind::Immediate(..)) | // where the sizes match, to avoid endianness questions.
(OperandValueKind::Pair(..), OperandValueKind::Pair(..)) => true, (OperandValueKind::Immediate(a), OperandValueKind::Immediate(b)) =>
a.size(self.cx) == b.size(self.cx),
(OperandValueKind::Pair(a0, a1), OperandValueKind::Pair(b0, b1)) =>
a0.size(self.cx) == b0.size(self.cx) && a1.size(self.cx) == b1.size(self.cx),
// Send mixings between scalars and pairs through the memory route // Send mixings between scalars and pairs through the memory route
// FIXME: Maybe this could use insertvalue/extractvalue instead? // FIXME: Maybe this could use insertvalue/extractvalue instead?

View file

@ -54,6 +54,32 @@ pub unsafe fn check_smaller_size(x: u32) -> u16 {
} }
} }
// CHECK-LABEL: @check_smaller_array(
#[no_mangle]
#[custom_mir(dialect = "runtime", phase = "initial")]
pub unsafe fn check_smaller_array(x: [u32; 7]) -> [u32; 3] {
// CHECK: call void @llvm.trap
mir!{
{
RET = CastTransmute(x);
Return()
}
}
}
// CHECK-LABEL: @check_bigger_array(
#[no_mangle]
#[custom_mir(dialect = "runtime", phase = "initial")]
pub unsafe fn check_bigger_array(x: [u32; 3]) -> [u32; 7] {
// CHECK: call void @llvm.trap
mir!{
{
RET = CastTransmute(x);
Return()
}
}
}
// CHECK-LABEL: @check_to_uninhabited( // CHECK-LABEL: @check_to_uninhabited(
#[no_mangle] #[no_mangle]
#[custom_mir(dialect = "runtime", phase = "initial")] #[custom_mir(dialect = "runtime", phase = "initial")]
@ -71,7 +97,7 @@ pub unsafe fn check_to_uninhabited(x: u16) -> BigNever {
#[no_mangle] #[no_mangle]
#[custom_mir(dialect = "runtime", phase = "initial")] #[custom_mir(dialect = "runtime", phase = "initial")]
pub unsafe fn check_from_uninhabited(x: BigNever) -> u16 { pub unsafe fn check_from_uninhabited(x: BigNever) -> u16 {
// CHECK: call void @llvm.trap // CHECK: ret i16 poison
mir!{ mir!{
{ {
RET = CastTransmute(x); RET = CastTransmute(x);
@ -301,3 +327,49 @@ pub unsafe fn check_pair_to_array(x: (i64, u64)) -> [u8; 16] {
// CHECK: store i64 %x.1, ptr %{{.+}}, align 1 // CHECK: store i64 %x.1, ptr %{{.+}}, align 1
transmute(x) transmute(x)
} }
// CHECK-LABEL: @check_heterogeneous_integer_pair(
#[no_mangle]
pub unsafe fn check_heterogeneous_integer_pair(x: (i32, bool)) -> (bool, u32) {
// CHECK: store i32 %x.0
// CHECK: %[[WIDER:.+]] = zext i1 %x.1 to i8
// CHECK: store i8 %[[WIDER]]
// CHECK: %[[BYTE:.+]] = load i8
// CHECK: trunc i8 %[[BYTE:.+]] to i1
// CHECK: load i32
transmute(x)
}
// CHECK-LABEL: @check_heterogeneous_float_pair(
#[no_mangle]
pub unsafe fn check_heterogeneous_float_pair(x: (f64, f32)) -> (f32, f64) {
// CHECK: store double %x.0
// CHECK: store float %x.1
// CHECK: %[[A:.+]] = load float
// CHECK: %[[B:.+]] = load double
// CHECK: %[[P:.+]] = insertvalue { float, double } poison, float %[[A]], 0
// CHECK: insertvalue { float, double } %[[P]], double %[[B]], 1
transmute(x)
}
// CHECK-LABEL: @check_issue_110005(
#[no_mangle]
pub unsafe fn check_issue_110005(x: (usize, bool)) -> Option<Box<[u8]>> {
// CHECK: store i64 %x.0
// CHECK: %[[WIDER:.+]] = zext i1 %x.1 to i8
// CHECK: store i8 %[[WIDER]]
// CHECK: load ptr
// CHECK: load i64
transmute(x)
}
// CHECK-LABEL: @check_pair_to_dst_ref(
#[no_mangle]
pub unsafe fn check_pair_to_dst_ref<'a>(x: (usize, usize)) -> &'a [u8] {
// CHECK: %0 = inttoptr i64 %x.0 to ptr
// CHECK: %1 = insertvalue { ptr, i64 } poison, ptr %0, 0
// CHECK: %2 = insertvalue { ptr, i64 } %1, i64 %x.1, 1
// CHECK: ret { ptr, i64 } %2
transmute(x)
}