1
Fork 0

Auto merge of #127995 - workingjubilee:say-turings-prayer, r=BoxyUwU

compiler: Never debug_assert in codegen

In the name of Turing and his Hoarey heralds, assert our truths before creating a monster!

The `rustc_codegen_llvm` and `rustc_codegen_ssa` crates are fairly critical for rustc's correctness. Small mistakes here can easily result in undefined behavior, since a "small mistake" can mean something like "link and execute the wrong code". We should probably run any and all asserts in these modules unconditionally on whether this is a "debug build", and damn the costs in performance.

...Especially because the costs in performance seem to be *nothing*. It is not clear how much correctness we gain here, but I'll take free correctness improvements.
This commit is contained in:
bors 2024-07-25 07:52:31 +00:00
commit 28e684b470
16 changed files with 57 additions and 65 deletions

View file

@ -330,7 +330,7 @@ impl<'ll> CodegenCx<'ll, '_> {
// If this assertion triggers, there's something wrong with commandline // If this assertion triggers, there's something wrong with commandline
// argument validation. // argument validation.
debug_assert!( assert!(
!(self.tcx.sess.opts.cg.linker_plugin_lto.enabled() !(self.tcx.sess.opts.cg.linker_plugin_lto.enabled()
&& self.tcx.sess.target.is_like_windows && self.tcx.sess.target.is_like_windows
&& self.tcx.sess.opts.cg.prefer_dynamic) && self.tcx.sess.opts.cg.prefer_dynamic)

View file

@ -170,7 +170,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
) -> DINodeCreationResult<'ll> { ) -> DINodeCreationResult<'ll> {
// The debuginfo generated by this function is only valid if `ptr_type` is really just // The debuginfo generated by this function is only valid if `ptr_type` is really just
// a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`. // a (fat) pointer. Make sure it is not called for e.g. `Box<T, NonZSTAllocator>`.
debug_assert_eq!( assert_eq!(
cx.size_and_align_of(ptr_type), cx.size_and_align_of(ptr_type),
cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type)) cx.size_and_align_of(Ty::new_mut_ptr(cx.tcx, pointee_type))
); );
@ -185,7 +185,7 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
match fat_pointer_kind(cx, pointee_type) { match fat_pointer_kind(cx, pointee_type) {
None => { None => {
// This is a thin pointer. Create a regular pointer type and give it the correct name. // This is a thin pointer. Create a regular pointer type and give it the correct name.
debug_assert_eq!( assert_eq!(
(data_layout.pointer_size, data_layout.pointer_align.abi), (data_layout.pointer_size, data_layout.pointer_align.abi),
cx.size_and_align_of(ptr_type), cx.size_and_align_of(ptr_type),
"ptr_type={ptr_type}, pointee_type={pointee_type}", "ptr_type={ptr_type}, pointee_type={pointee_type}",
@ -240,8 +240,8 @@ fn build_pointer_or_reference_di_node<'ll, 'tcx>(
FatPtrKind::Slice => ("data_ptr", "length"), FatPtrKind::Slice => ("data_ptr", "length"),
}; };
debug_assert_eq!(abi::FAT_PTR_ADDR, 0); assert_eq!(abi::FAT_PTR_ADDR, 0);
debug_assert_eq!(abi::FAT_PTR_EXTRA, 1); assert_eq!(abi::FAT_PTR_EXTRA, 1);
// The data pointer type is a regular, thin pointer, regardless of whether this // The data pointer type is a regular, thin pointer, regardless of whether this
// is a slice or a trait object. // is a slice or a trait object.
@ -498,7 +498,7 @@ pub fn type_di_node<'ll, 'tcx>(cx: &CodegenCx<'ll, 'tcx>, t: Ty<'tcx>) -> &'ll D
} }
}; };
debug_assert_eq!(di_node_for_uid as *const _, di_node as *const _); assert_eq!(di_node_for_uid as *const _, di_node as *const _);
} else { } else {
debug_context(cx).type_map.insert(unique_type_id, di_node); debug_context(cx).type_map.insert(unique_type_id, di_node);
} }
@ -1060,7 +1060,7 @@ fn build_struct_type_di_node<'ll, 'tcx>(
let ty::Adt(adt_def, _) = struct_type.kind() else { let ty::Adt(adt_def, _) = struct_type.kind() else {
bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type); bug!("build_struct_type_di_node() called with non-struct-type: {:?}", struct_type);
}; };
debug_assert!(adt_def.is_struct()); assert!(adt_def.is_struct());
let containing_scope = get_namespace_for_item(cx, adt_def.did()); let containing_scope = get_namespace_for_item(cx, adt_def.did());
let struct_type_and_layout = cx.layout_of(struct_type); let struct_type_and_layout = cx.layout_of(struct_type);
let variant_def = adt_def.non_enum_variant(); let variant_def = adt_def.non_enum_variant();
@ -1130,7 +1130,7 @@ fn build_upvar_field_di_nodes<'ll, 'tcx>(
} }
}; };
debug_assert!( assert!(
up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)) up_var_tys.iter().all(|t| t == cx.tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t))
); );

View file

@ -204,7 +204,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let enum_type_and_layout = cx.layout_of(enum_type); let enum_type_and_layout = cx.layout_of(enum_type);
let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false); let enum_type_name = compute_debuginfo_type_name(cx.tcx, enum_type, false);
debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout)); assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
type_map::build_type_with_children( type_map::build_type_with_children(
cx, cx,
@ -279,7 +279,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
let coroutine_type_and_layout = cx.layout_of(coroutine_type); let coroutine_type_and_layout = cx.layout_of(coroutine_type);
let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false); let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);
debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout)); assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
type_map::build_type_with_children( type_map::build_type_with_children(
cx, cx,
@ -517,7 +517,7 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
if is_128_bits { if is_128_bits {
DiscrKind::Exact128(discr_val) DiscrKind::Exact128(discr_val)
} else { } else {
debug_assert_eq!(discr_val, discr_val as u64 as u128); assert_eq!(discr_val, discr_val as u64 as u128);
DiscrKind::Exact(discr_val as u64) DiscrKind::Exact(discr_val as u64)
} }
} }
@ -526,8 +526,8 @@ fn build_variant_struct_wrapper_type_di_node<'ll, 'tcx>(
if is_128_bits { if is_128_bits {
DiscrKind::Range128(min, max) DiscrKind::Range128(min, max)
} else { } else {
debug_assert_eq!(min, min as u64 as u128); assert_eq!(min, min as u64 as u128);
debug_assert_eq!(max, max as u64 as u128); assert_eq!(max, max as u64 as u128);
DiscrKind::Range(min as u64, max as u64) DiscrKind::Range(min as u64, max as u64)
} }
} }
@ -815,7 +815,7 @@ fn build_union_fields_for_direct_tag_enum_or_coroutine<'ll, 'tcx>(
} }
})); }));
debug_assert_eq!( assert_eq!(
cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty), cx.size_and_align_of(enum_type_and_layout.field(cx, tag_field).ty),
cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout)) cx.size_and_align_of(super::tag_base_type(cx, enum_type_and_layout))
); );

View file

@ -106,7 +106,7 @@ fn tag_base_type<'ll, 'tcx>(
cx: &CodegenCx<'ll, 'tcx>, cx: &CodegenCx<'ll, 'tcx>,
enum_type_and_layout: TyAndLayout<'tcx>, enum_type_and_layout: TyAndLayout<'tcx>,
) -> Ty<'tcx> { ) -> Ty<'tcx> {
debug_assert!(match enum_type_and_layout.ty.kind() { assert!(match enum_type_and_layout.ty.kind() {
ty::Coroutine(..) => true, ty::Coroutine(..) => true,
ty::Adt(adt_def, _) => adt_def.is_enum(), ty::Adt(adt_def, _) => adt_def.is_enum(),
_ => false, _ => false,
@ -251,7 +251,7 @@ fn build_enum_variant_struct_type_di_node<'ll, 'tcx>(
variant_layout: TyAndLayout<'tcx>, variant_layout: TyAndLayout<'tcx>,
di_flags: DIFlags, di_flags: DIFlags,
) -> &'ll DIType { ) -> &'ll DIType {
debug_assert_eq!(variant_layout.ty, enum_type_and_layout.ty); assert_eq!(variant_layout.ty, enum_type_and_layout.ty);
type_map::build_type_with_children( type_map::build_type_with_children(
cx, cx,

View file

@ -65,7 +65,7 @@ pub(super) fn build_enum_type_di_node<'ll, 'tcx>(
let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did()); let visibility_flags = visibility_di_flags(cx, enum_adt_def.did(), enum_adt_def.did());
debug_assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout)); assert!(!wants_c_like_enum_debuginfo(enum_type_and_layout));
type_map::build_type_with_children( type_map::build_type_with_children(
cx, cx,
@ -142,7 +142,7 @@ pub(super) fn build_coroutine_di_node<'ll, 'tcx>(
let containing_scope = get_namespace_for_item(cx, coroutine_def_id); let containing_scope = get_namespace_for_item(cx, coroutine_def_id);
let coroutine_type_and_layout = cx.layout_of(coroutine_type); let coroutine_type_and_layout = cx.layout_of(coroutine_type);
debug_assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout)); assert!(!wants_c_like_enum_debuginfo(coroutine_type_and_layout));
let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false); let coroutine_type_name = compute_debuginfo_type_name(cx.tcx, coroutine_type, false);

View file

@ -36,7 +36,7 @@ mod private {
/// A unique identifier for anything that we create a debuginfo node for. /// A unique identifier for anything that we create a debuginfo node for.
/// The types it contains are expected to already be normalized (which /// The types it contains are expected to already be normalized (which
/// is debug_asserted in the constructors). /// is asserted in the constructors).
/// ///
/// Note that there are some things that only show up in debuginfo, like /// Note that there are some things that only show up in debuginfo, like
/// the separate type descriptions for each enum variant. These get an ID /// the separate type descriptions for each enum variant. These get an ID
@ -58,12 +58,12 @@ pub(super) enum UniqueTypeId<'tcx> {
impl<'tcx> UniqueTypeId<'tcx> { impl<'tcx> UniqueTypeId<'tcx> {
pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self { pub fn for_ty(tcx: TyCtxt<'tcx>, t: Ty<'tcx>) -> Self {
debug_assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t)); assert_eq!(t, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), t));
UniqueTypeId::Ty(t, private::HiddenZst) UniqueTypeId::Ty(t, private::HiddenZst)
} }
pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self { pub fn for_enum_variant_part(tcx: TyCtxt<'tcx>, enum_ty: Ty<'tcx>) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)); assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantPart(enum_ty, private::HiddenZst) UniqueTypeId::VariantPart(enum_ty, private::HiddenZst)
} }
@ -72,7 +72,7 @@ impl<'tcx> UniqueTypeId<'tcx> {
enum_ty: Ty<'tcx>, enum_ty: Ty<'tcx>,
variant_idx: VariantIdx, variant_idx: VariantIdx,
) -> Self { ) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)); assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst) UniqueTypeId::VariantStructType(enum_ty, variant_idx, private::HiddenZst)
} }
@ -81,7 +81,7 @@ impl<'tcx> UniqueTypeId<'tcx> {
enum_ty: Ty<'tcx>, enum_ty: Ty<'tcx>,
variant_idx: VariantIdx, variant_idx: VariantIdx,
) -> Self { ) -> Self {
debug_assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty)); assert_eq!(enum_ty, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), enum_ty));
UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst) UniqueTypeId::VariantStructTypeCppLikeWrapper(enum_ty, variant_idx, private::HiddenZst)
} }
@ -90,11 +90,8 @@ impl<'tcx> UniqueTypeId<'tcx> {
self_type: Ty<'tcx>, self_type: Ty<'tcx>,
implemented_trait: Option<PolyExistentialTraitRef<'tcx>>, implemented_trait: Option<PolyExistentialTraitRef<'tcx>>,
) -> Self { ) -> Self {
debug_assert_eq!( assert_eq!(self_type, tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type));
self_type, assert_eq!(
tcx.normalize_erasing_regions(ParamEnv::reveal_all(), self_type)
);
debug_assert_eq!(
implemented_trait, implemented_trait,
tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait) tcx.normalize_erasing_regions(ParamEnv::reveal_all(), implemented_trait)
); );
@ -252,10 +249,7 @@ pub(super) fn build_type_with_children<'ll, 'tcx>(
members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>, members: impl FnOnce(&CodegenCx<'ll, 'tcx>, &'ll DIType) -> SmallVec<&'ll DIType>,
generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>, generics: impl FnOnce(&CodegenCx<'ll, 'tcx>) -> SmallVec<&'ll DIType>,
) -> DINodeCreationResult<'ll> { ) -> DINodeCreationResult<'ll> {
debug_assert_eq!( assert_eq!(debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id), None);
debug_context(cx).type_map.di_node_for_unique_id(stub_info.unique_type_id),
None
);
debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata); debug_context(cx).type_map.insert(stub_info.unique_type_id, stub_info.metadata);

View file

@ -81,7 +81,7 @@ pub(crate) fn fat_pointer_kind<'ll, 'tcx>(
ty::Dynamic(..) => Some(FatPtrKind::Dyn), ty::Dynamic(..) => Some(FatPtrKind::Dyn),
ty::Foreign(_) => { ty::Foreign(_) => {
// Assert that pointers to foreign types really are thin: // Assert that pointers to foreign types really are thin:
debug_assert_eq!( assert_eq!(
cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)), cx.size_of(Ty::new_imm_ptr(cx.tcx, pointee_tail_ty)),
cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8)) cx.size_of(Ty::new_imm_ptr(cx.tcx, cx.tcx.types.u8))
); );

View file

@ -352,7 +352,7 @@ fn exported_symbols_provider_local(
} }
MonoItem::Fn(Instance { def: InstanceKind::DropGlue(def_id, Some(ty)), args }) => { MonoItem::Fn(Instance { def: InstanceKind::DropGlue(def_id, Some(ty)), args }) => {
// A little sanity-check // A little sanity-check
debug_assert_eq!( assert_eq!(
args.non_erasable_generics(tcx, def_id).next(), args.non_erasable_generics(tcx, def_id).next(),
Some(GenericArgKind::Type(ty)) Some(GenericArgKind::Type(ty))
); );
@ -370,7 +370,7 @@ fn exported_symbols_provider_local(
args, args,
}) => { }) => {
// A little sanity-check // A little sanity-check
debug_assert_eq!( assert_eq!(
args.non_erasable_generics(tcx, def_id).next(), args.non_erasable_generics(tcx, def_id).next(),
Some(GenericArgKind::Type(ty)) Some(GenericArgKind::Type(ty))
); );
@ -462,7 +462,7 @@ fn upstream_monomorphizations_for_provider(
tcx: TyCtxt<'_>, tcx: TyCtxt<'_>,
def_id: DefId, def_id: DefId,
) -> Option<&UnordMap<GenericArgsRef<'_>, CrateNum>> { ) -> Option<&UnordMap<GenericArgsRef<'_>, CrateNum>> {
debug_assert!(!def_id.is_local()); assert!(!def_id.is_local());
tcx.upstream_monomorphizations(()).get(&def_id) tcx.upstream_monomorphizations(()).get(&def_id)
} }

View file

@ -1512,7 +1512,7 @@ fn start_executing_work<B: ExtraBackendMethods>(
// We reduce the `running` counter by one. The // We reduce the `running` counter by one. The
// `tokens.truncate()` below will take care of // `tokens.truncate()` below will take care of
// giving the Token back. // giving the Token back.
debug_assert!(running_with_own_token > 0); assert!(running_with_own_token > 0);
running_with_own_token -= 1; running_with_own_token -= 1;
main_thread_state = MainThreadState::Lending; main_thread_state = MainThreadState::Lending;
} }

View file

@ -459,7 +459,7 @@ fn push_debuginfo_type_name<'tcx>(
output: &mut String, output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>, visited: &mut FxHashSet<Ty<'tcx>>,
) { ) {
debug_assert!(!wants_c_like_enum_debuginfo(ty_and_layout)); assert!(!wants_c_like_enum_debuginfo(ty_and_layout));
output.push_str("enum2$<"); output.push_str("enum2$<");
push_inner(output, visited); push_inner(output, visited);
push_close_angle_bracket(true, output); push_close_angle_bracket(true, output);
@ -660,7 +660,7 @@ fn push_generic_params_internal<'tcx>(
output: &mut String, output: &mut String,
visited: &mut FxHashSet<Ty<'tcx>>, visited: &mut FxHashSet<Ty<'tcx>>,
) -> bool { ) -> bool {
debug_assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args)); assert_eq!(args, tcx.normalize_erasing_regions(ty::ParamEnv::reveal_all(), args));
let mut args = args.non_erasable_generics(tcx, def_id).peekable(); let mut args = args.non_erasable_generics(tcx, def_id).peekable();
if args.peek().is_none() { if args.peek().is_none() {
return false; return false;

View file

@ -84,7 +84,7 @@ impl<'a, 'tcx> TerminatorCodegenHelper<'tcx> {
} }
if is_cleanupret { if is_cleanupret {
// Cross-funclet jump - need a trampoline // Cross-funclet jump - need a trampoline
debug_assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess)); assert!(base::wants_new_eh_instructions(fx.cx.tcx().sess));
debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target); debug!("llbb_with_cleanup: creating cleanup trampoline for {:?}", target);
let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target); let name = &format!("{:?}_cleanup_trampoline_{:?}", self.bb, target);
let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name); let trampoline_llbb = Bx::append_block(fx.cx, fx.llfn, name);

View file

@ -194,7 +194,7 @@ fn calculate_debuginfo_offset<
} }
_ => { _ => {
// Sanity check for `can_use_in_debuginfo`. // Sanity check for `can_use_in_debuginfo`.
debug_assert!(!elem.can_use_in_debuginfo()); assert!(!elem.can_use_in_debuginfo());
bug!("unsupported var debuginfo projection `{:?}`", projection) bug!("unsupported var debuginfo projection `{:?}`", projection)
} }
} }
@ -502,7 +502,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } = let DebugInfoOffset { direct_offset, indirect_offsets, result: fragment_layout } =
calculate_debuginfo_offset(bx, &fragment.projection, var_layout); calculate_debuginfo_offset(bx, &fragment.projection, var_layout);
debug_assert!(indirect_offsets.is_empty()); assert!(indirect_offsets.is_empty());
if fragment_layout.size == Size::ZERO { if fragment_layout.size == Size::ZERO {
// Fragment is a ZST, so does not represent anything. Avoid generating anything // Fragment is a ZST, so does not represent anything. Avoid generating anything

View file

@ -565,7 +565,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
for elem in place_ref.projection.iter() { for elem in place_ref.projection.iter() {
match elem { match elem {
mir::ProjectionElem::Field(ref f, _) => { mir::ProjectionElem::Field(ref f, _) => {
debug_assert!( assert!(
!o.layout.ty.is_any_ptr(), !o.layout.ty.is_any_ptr(),
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \ "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {f:?} of pointer {o:?}", but tried to access field {f:?} of pointer {o:?}",

View file

@ -55,7 +55,7 @@ impl<V: CodegenObject> PlaceValue<V> {
/// Creates a `PlaceRef` to this location with the given type. /// Creates a `PlaceRef` to this location with the given type.
pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> { pub fn with_type<'tcx>(self, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
debug_assert!( assert!(
layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(), layout.is_unsized() || layout.abi.is_uninhabited() || self.llextra.is_none(),
"Had pointer metadata {:?} for sized type {layout:?}", "Had pointer metadata {:?} for sized type {layout:?}",
self.llextra, self.llextra,
@ -488,7 +488,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
cg_base = match *elem { cg_base = match *elem {
mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()), mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
mir::ProjectionElem::Field(ref field, _) => { mir::ProjectionElem::Field(ref field, _) => {
debug_assert!( assert!(
!cg_base.layout.ty.is_any_ptr(), !cg_base.layout.ty.is_any_ptr(),
"Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \ "Bad PlaceRef: destructing pointers should use cast/PtrMetadata, \
but tried to access field {field:?} of pointer {cg_base:?}", but tried to access field {field:?} of pointer {cg_base:?}",

View file

@ -168,8 +168,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
dst: PlaceRef<'tcx, Bx::Value>, dst: PlaceRef<'tcx, Bx::Value>,
) { ) {
// The MIR validator enforces no unsized transmutes. // The MIR validator enforces no unsized transmutes.
debug_assert!(src.layout.is_sized()); assert!(src.layout.is_sized());
debug_assert!(dst.layout.is_sized()); assert!(dst.layout.is_sized());
if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) { if let Some(val) = self.codegen_transmute_operand(bx, src, dst.layout) {
val.store(bx, dst); val.store(bx, dst);
@ -223,8 +223,8 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
match operand.val { match operand.val {
OperandValue::Ref(source_place_val) => { OperandValue::Ref(source_place_val) => {
debug_assert_eq!(source_place_val.llextra, None); assert_eq!(source_place_val.llextra, None);
debug_assert!(matches!(operand_kind, OperandValueKind::Ref)); assert!(matches!(operand_kind, OperandValueKind::Ref));
Some(bx.load_operand(source_place_val.with_type(cast)).val) Some(bx.load_operand(source_place_val.with_type(cast)).val)
} }
OperandValue::ZeroSized => { OperandValue::ZeroSized => {
@ -295,7 +295,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
to_scalar: abi::Scalar, to_scalar: abi::Scalar,
to_backend_ty: Bx::Type, to_backend_ty: Bx::Type,
) -> Bx::Value { ) -> Bx::Value {
debug_assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx)); assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx));
use abi::Primitive::*; use abi::Primitive::*;
imm = bx.from_immediate(imm); imm = bx.from_immediate(imm);
@ -639,9 +639,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
(OperandValue::Immediate(llval), operand.layout) (OperandValue::Immediate(llval), operand.layout)
} }
mir::UnOp::PtrMetadata => { mir::UnOp::PtrMetadata => {
debug_assert!( assert!(operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),);
operand.layout.ty.is_unsafe_ptr() || operand.layout.ty.is_ref(),
);
let (_, meta) = operand.val.pointer_parts(); let (_, meta) = operand.val.pointer_parts();
assert_eq!(operand.layout.fields.count() > 1, meta.is_some()); assert_eq!(operand.layout.fields.count() > 1, meta.is_some());
if let Some(meta) = meta { if let Some(meta) = meta {
@ -651,7 +649,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
} }
}; };
debug_assert!( assert!(
val.is_expected_variant_for_type(self.cx, layout), val.is_expected_variant_for_type(self.cx, layout),
"Made wrong variant {val:?} for type {layout:?}", "Made wrong variant {val:?} for type {layout:?}",
); );
@ -742,7 +740,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
bug!("Field {field_idx:?} is {p:?} making {layout:?}"); bug!("Field {field_idx:?} is {p:?} making {layout:?}");
}); });
let scalars = self.value_kind(op.layout).scalars().unwrap(); let scalars = self.value_kind(op.layout).scalars().unwrap();
debug_assert_eq!(values.len(), scalars.len()); assert_eq!(values.len(), scalars.len());
inputs.extend(values); inputs.extend(values);
input_scalars.extend(scalars); input_scalars.extend(scalars);
} }
@ -760,7 +758,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
); );
let val = OperandValue::from_immediates(inputs); let val = OperandValue::from_immediates(inputs);
debug_assert!( assert!(
val.is_expected_variant_for_type(self.cx, layout), val.is_expected_variant_for_type(self.cx, layout),
"Made wrong variant {val:?} for type {layout:?}", "Made wrong variant {val:?} for type {layout:?}",
); );
@ -805,7 +803,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
let val = cg_place.val.address(); let val = cg_place.val.address();
let ty = cg_place.layout.ty; let ty = cg_place.layout.ty;
debug_assert!( assert!(
if bx.cx().type_has_metadata(ty) { if bx.cx().type_has_metadata(ty) {
matches!(val, OperandValue::Pair(..)) matches!(val, OperandValue::Pair(..))
} else { } else {
@ -927,7 +925,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
} }
mir::BinOp::Cmp => { mir::BinOp::Cmp => {
use std::cmp::Ordering; use std::cmp::Ordering;
debug_assert!(!is_float); assert!(!is_float);
let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed); let pred = |op| base::bin_op_to_icmp_predicate(op, is_signed);
if bx.cx().tcx().sess.opts.optimize == OptLevel::No { if bx.cx().tcx().sess.opts.optimize == OptLevel::No {
// FIXME: This actually generates tighter assembly, and is a classic trick // FIXME: This actually generates tighter assembly, and is a classic trick
@ -1111,7 +1109,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
if layout.is_zst() { if layout.is_zst() {
OperandValueKind::ZeroSized OperandValueKind::ZeroSized
} else if self.cx.is_backend_immediate(layout) { } else if self.cx.is_backend_immediate(layout) {
debug_assert!(!self.cx.is_backend_scalar_pair(layout)); assert!(!self.cx.is_backend_scalar_pair(layout));
OperandValueKind::Immediate(match layout.abi { OperandValueKind::Immediate(match layout.abi {
abi::Abi::Scalar(s) => s, abi::Abi::Scalar(s) => s,
abi::Abi::Vector { element, .. } => element, abi::Abi::Vector { element, .. } => element,

View file

@ -165,7 +165,7 @@ pub trait BuilderMethods<'a, 'tcx>:
size: Size, size: Size,
) -> Self::Value; ) -> Self::Value;
fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value { fn load_from_place(&mut self, ty: Self::Type, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None); assert_eq!(place.llextra, None);
self.load(ty, place.llval, place.align) self.load(ty, place.llval, place.align)
} }
fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>) fn load_operand(&mut self, place: PlaceRef<'tcx, Self::Value>)
@ -184,7 +184,7 @@ pub trait BuilderMethods<'a, 'tcx>:
fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value; fn store(&mut self, val: Self::Value, ptr: Self::Value, align: Align) -> Self::Value;
fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value { fn store_to_place(&mut self, val: Self::Value, place: PlaceValue<Self::Value>) -> Self::Value {
debug_assert_eq!(place.llextra, None); assert_eq!(place.llextra, None);
self.store(val, place.llval, place.align) self.store(val, place.llval, place.align)
} }
fn store_with_flags( fn store_with_flags(
@ -200,7 +200,7 @@ pub trait BuilderMethods<'a, 'tcx>:
place: PlaceValue<Self::Value>, place: PlaceValue<Self::Value>,
flags: MemFlags, flags: MemFlags,
) -> Self::Value { ) -> Self::Value {
debug_assert_eq!(place.llextra, None); assert_eq!(place.llextra, None);
self.store_with_flags(val, place.llval, place.align, flags) self.store_with_flags(val, place.llval, place.align, flags)
} }
fn atomic_store( fn atomic_store(
@ -320,9 +320,9 @@ pub trait BuilderMethods<'a, 'tcx>:
layout: TyAndLayout<'tcx>, layout: TyAndLayout<'tcx>,
flags: MemFlags, flags: MemFlags,
) { ) {
debug_assert!(layout.is_sized(), "cannot typed-copy an unsigned type"); assert!(layout.is_sized(), "cannot typed-copy an unsigned type");
debug_assert!(src.llextra.is_none(), "cannot directly copy from unsized values"); assert!(src.llextra.is_none(), "cannot directly copy from unsized values");
debug_assert!(dst.llextra.is_none(), "cannot directly copy into unsized values"); assert!(dst.llextra.is_none(), "cannot directly copy into unsized values");
if flags.contains(MemFlags::NONTEMPORAL) { if flags.contains(MemFlags::NONTEMPORAL) {
// HACK(nox): This is inefficient but there is no nontemporal memcpy. // HACK(nox): This is inefficient but there is no nontemporal memcpy.
let ty = self.backend_type(layout); let ty = self.backend_type(layout);