diff --git a/example/alloc_system.rs b/example/alloc_system.rs index e756b347e89..56ff84e4bdf 100644 --- a/example/alloc_system.rs +++ b/example/alloc_system.rs @@ -12,6 +12,7 @@ target_arch = "mips", target_arch = "mips32r6", target_arch = "powerpc", + target_arch = "csky", target_arch = "powerpc64"))] const MIN_ALIGN: usize = 8; #[cfg(any(target_arch = "x86_64", diff --git a/example/mini_core.rs b/example/mini_core.rs index 58df29bb625..34328520343 100644 --- a/example/mini_core.rs +++ b/example/mini_core.rs @@ -429,6 +429,15 @@ fn panic_cannot_unwind() -> ! { } } +#[lang = "panic_in_cleanup"] +#[rustc_nounwind] +fn panic_in_cleanup() -> ! { + unsafe { + libc::printf("panic in a destructor during cleanup\n\0" as *const str as *const i8); + intrinsics::abort(); + } +} + #[lang = "panic_bounds_check"] #[track_caller] fn panic_bounds_check(index: usize, len: usize) -> ! { diff --git a/failing-ui-tests.txt b/failing-ui-tests.txt index 8ec151f7838..ed56a11a170 100644 --- a/failing-ui-tests.txt +++ b/failing-ui-tests.txt @@ -68,3 +68,6 @@ tests/ui/lto/thin-lto-global-allocator.rs tests/ui/lto/msvc-imp-present.rs tests/ui/lto/lto-thin-rustc-loads-linker-plugin.rs tests/ui/lto/all-crates.rs +tests/ui/async-await/deep-futures-are-freeze.rs +tests/ui/closures/capture-unsized-by-ref.rs +tests/ui/generator/resume-after-return.rs diff --git a/rust-toolchain b/rust-toolchain index 1b60d708007..25a1cea98cc 100644 --- a/rust-toolchain +++ b/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2023-08-12" +channel = "nightly-2023-10-08" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/src/abi.rs b/src/abi.rs index 813abaac793..35bb0b6e5f4 100644 --- a/src/abi.rs +++ b/src/abi.rs @@ -115,7 +115,7 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { match self.ret.mode { PassMode::Ignore => cx.type_void(), PassMode::Direct(_) | PassMode::Pair(..) => self.ret.layout.immediate_gcc_type(cx), - PassMode::Cast(ref cast, _) => cast.gcc_type(cx), + PassMode::Cast { ref cast, .. } => cast.gcc_type(cx), PassMode::Indirect { .. } => { argument_tys.push(cx.type_ptr_to(self.ret.memory_ty(cx))); cx.type_void() @@ -141,11 +141,11 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { let arg_ty = match arg.mode { PassMode::Ignore => continue, PassMode::Pair(a, b) => { - argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 0, true), &a)); - argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 1, true), &b)); + argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 0), &a)); + argument_tys.push(apply_attrs(arg.layout.scalar_pair_element_gcc_type(cx, 1), &b)); continue; } - PassMode::Cast(ref cast, pad_i32) => { + PassMode::Cast { ref cast, pad_i32 } => { // add padding if pad_i32 { argument_tys.push(Reg::i32().gcc_type(cx)); @@ -153,18 +153,18 @@ impl<'gcc, 'tcx> FnAbiGccExt<'gcc, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { let ty = cast.gcc_type(cx); apply_attrs(ty, &cast.attrs) } - PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: true } => { + PassMode::Indirect { attrs: _, meta_attrs: None, on_stack: true } => { // This is a "byval" argument, so we don't apply the `restrict` attribute on it. on_stack_param_indices.insert(argument_tys.len()); arg.memory_ty(cx) }, PassMode::Direct(attrs) => apply_attrs(arg.layout.immediate_gcc_type(cx), &attrs), - PassMode::Indirect { attrs, extra_attrs: None, on_stack: false } => { + PassMode::Indirect { attrs, meta_attrs: None, on_stack: false } => { apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs) } - PassMode::Indirect { attrs, extra_attrs: Some(extra_attrs), on_stack } => { + PassMode::Indirect { attrs, meta_attrs: Some(meta_attrs), on_stack } => { assert!(!on_stack); - apply_attrs(apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs), &extra_attrs) + apply_attrs(apply_attrs(cx.type_ptr_to(arg.memory_ty(cx)), &attrs), &meta_attrs) } }; argument_tys.push(arg_ty); diff --git a/src/asm.rs b/src/asm.rs index b0e615d2de2..f3a9ca77a67 100644 --- a/src/asm.rs +++ b/src/asm.rs @@ -107,7 +107,7 @@ enum ConstraintOrRegister { impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { - fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], _instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { + fn codegen_inline_asm(&mut self, template: &[InlineAsmTemplatePiece], rust_operands: &[InlineAsmOperandRef<'tcx, Self>], options: InlineAsmOptions, span: &[Span], instance: Instance<'_>, _dest_catch_funclet: Option<(Self::BasicBlock, Self::BasicBlock, Option<&Self::Funclet>)>) { if options.contains(InlineAsmOptions::MAY_UNWIND) { self.sess() .create_err(UnwindingInlineAsm { span: span[0] }) @@ -173,7 +173,7 @@ impl<'a, 'gcc, 'tcx> AsmBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { let is_target_supported = reg.reg_class().supported_types(asm_arch).iter() .any(|&(_, feature)| { if let Some(feature) = feature { - self.tcx.sess.target_features.contains(&feature) + self.tcx.asm_target_features(instance.def_id()).contains(&feature) } else { true // Register class is unconditionally supported } @@ -593,6 +593,8 @@ fn reg_to_gcc(reg: InlineAsmRegOrRegClass) -> ConstraintOrRegister { InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => "r", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => "a", InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => "d", + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => "r", + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => "d", // more specific than "r" InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => "f", InlineAsmRegClass::Msp430(Msp430InlineAsmRegClass::reg) => "r", @@ -669,6 +671,8 @@ fn dummy_output_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, reg: InlineAsmRegCl InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_addr) => cx.type_i32(), InlineAsmRegClass::M68k(M68kInlineAsmRegClass::reg_data) => cx.type_i32(), + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::reg) => cx.type_i32(), + InlineAsmRegClass::CSKY(CSKYInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::reg) => cx.type_i32(), InlineAsmRegClass::Mips(MipsInlineAsmRegClass::freg) => cx.type_f32(), InlineAsmRegClass::Msp430(_) => unimplemented!(), @@ -856,6 +860,7 @@ fn modifier_to_gcc(arch: InlineAsmArch, reg: InlineAsmRegClass, modifier: Option InlineAsmRegClass::S390x(_) => None, InlineAsmRegClass::Msp430(_) => None, InlineAsmRegClass::M68k(_) => None, + InlineAsmRegClass::CSKY(_) => None, InlineAsmRegClass::SpirV(SpirVInlineAsmRegClass::reg) => { bug!("LLVM backend does not support SPIR-V") } diff --git a/src/builder.rs b/src/builder.rs index 04100f2ad2e..b7841808934 100644 --- a/src/builder.rs +++ b/src/builder.rs @@ -656,7 +656,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn unchecked_sadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - a + b + self.gcc_add(a, b) } fn unchecked_uadd(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { @@ -664,7 +664,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn unchecked_ssub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - a - b + self.gcc_sub(a, b) } fn unchecked_usub(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { @@ -673,11 +673,11 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn unchecked_smul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - a * b + self.gcc_mul(a, b) } fn unchecked_umul(&mut self, a: RValue<'gcc>, b: RValue<'gcc>) -> RValue<'gcc> { - a * b + self.gcc_mul(a, b) } fn fadd_fast(&mut self, lhs: RValue<'gcc>, rhs: RValue<'gcc>) -> RValue<'gcc> { @@ -814,7 +814,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { let mut load = |i, scalar: &abi::Scalar, align| { let llptr = self.struct_gep(pair_type, place.llval, i as u64); - let llty = place.layout.scalar_pair_element_gcc_type(self, i, false); + let llty = place.layout.scalar_pair_element_gcc_type(self, i); let load = self.load(llty, llptr, align); scalar_load_metadata(self, load, scalar); if scalar.is_bool() { self.trunc(load, self.type_i1()) } else { load } @@ -1421,7 +1421,7 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { self.cx } - fn do_not_inline(&mut self, _llret: RValue<'gcc>) { + fn apply_attrs_to_cleanup_callsite(&mut self, _llret: RValue<'gcc>) { // FIXME(bjorn3): implement } diff --git a/src/callee.rs b/src/callee.rs index a96bd66ba79..9fc77627b1b 100644 --- a/src/callee.rs +++ b/src/callee.rs @@ -100,7 +100,7 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) // whether we are sharing generics or not. The important thing here is // that the visibility we apply to the declaration is the same one that // has been applied to the definition (wherever that definition may be). - let is_generic = instance.args.non_erasable_generics().next().is_some(); + let is_generic = instance.args.non_erasable_generics(tcx, instance.def_id()).next().is_some(); if is_generic { // This is a monomorphization. Its expected visibility depends diff --git a/src/context.rs b/src/context.rs index 88dcafa7370..dcebd92a61c 100644 --- a/src/context.rs +++ b/src/context.rs @@ -7,6 +7,7 @@ use rustc_codegen_ssa::traits::{ BaseTypeMethods, MiscMethods, }; +use rustc_codegen_ssa::errors as ssa_errors; use rustc_data_structures::base_n; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_middle::span_bug; @@ -479,7 +480,7 @@ impl<'gcc, 'tcx> LayoutOfHelpers<'tcx> for CodegenCx<'gcc, 'tcx> { if let LayoutError::SizeOverflow(_) | LayoutError::ReferencesError(_) = err { self.sess().emit_fatal(respan(span, err.into_diagnostic())) } else { - span_bug!(span, "failed to get layout for `{}`: {}", ty, err) + self.tcx.sess.emit_fatal(ssa_errors::FailedToGetLayout { span, ty, err }) } } } diff --git a/src/debuginfo.rs b/src/debuginfo.rs index a81585d4128..d1bfd833cd8 100644 --- a/src/debuginfo.rs +++ b/src/debuginfo.rs @@ -55,7 +55,7 @@ impl<'gcc, 'tcx> DebugInfoMethods<'tcx> for CodegenCx<'gcc, 'tcx> { _fn_abi: &FnAbi<'tcx, Ty<'tcx>>, _llfn: RValue<'gcc>, _mir: &mir::Body<'tcx>, - ) -> Option> { + ) -> Option> { // TODO(antoyo) None } diff --git a/src/intrinsic/mod.rs b/src/intrinsic/mod.rs index fab5cba6476..9caed459a29 100644 --- a/src/intrinsic/mod.rs +++ b/src/intrinsic/mod.rs @@ -144,7 +144,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { sym::volatile_load | sym::unaligned_volatile_load => { let tp_ty = fn_args.type_at(0); let mut ptr = args[0].immediate(); - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode { ptr = self.pointercast(ptr, self.type_ptr_to(ty.gcc_type(self))); } let load = self.volatile_load(ptr.get_type(), ptr); @@ -353,7 +353,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { }; if !fn_abi.ret.is_ignore() { - if let PassMode::Cast(ty, _) = &fn_abi.ret.mode { + if let PassMode::Cast { cast: ty, .. } = &fn_abi.ret.mode { let ptr_llty = self.type_ptr_to(ty.gcc_type(self)); let ptr = self.pointercast(result.llval, ptr_llty); self.store(llval, ptr, result.align); @@ -449,7 +449,7 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { else if self.is_unsized_indirect() { bug!("unsized `ArgAbi` must be handled through `store_fn_arg`"); } - else if let PassMode::Cast(ref cast, _) = self.mode { + else if let PassMode::Cast { ref cast, .. } = self.mode { // FIXME(eddyb): Figure out when the simpler Store is safe, clang // uses it for i16 -> {i8, i8}, but not for i24 -> {i8, i8, i8}. let can_store_through_cast_ptr = false; @@ -511,10 +511,10 @@ impl<'gcc, 'tcx> ArgAbiExt<'gcc, 'tcx> for ArgAbi<'tcx, Ty<'tcx>> { PassMode::Pair(..) => { OperandValue::Pair(next(), next()).store(bx, dst); }, - PassMode::Indirect { extra_attrs: Some(_), .. } => { + PassMode::Indirect { meta_attrs: Some(_), .. } => { OperandValue::Ref(next(), Some(next()), self.layout.align.abi).store(bx, dst); }, - PassMode::Direct(_) | PassMode::Indirect { extra_attrs: None, .. } | PassMode::Cast(..) => { + PassMode::Direct(_) | PassMode::Indirect { meta_attrs: None, .. } | PassMode::Cast { .. } => { let next_arg = next(); self.store(bx, next_arg, dst); }, diff --git a/src/lib.rs b/src/lib.rs index df33e6cbd61..fe233930560 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -98,7 +98,7 @@ use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMes use rustc_fluent_macro::fluent_messages; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; -use rustc_middle::query::Providers; +use rustc_middle::util::Providers; use rustc_middle::ty::TyCtxt; use rustc_session::config::{Lto, OptLevel, OutputFilenames}; use rustc_session::Session; diff --git a/src/type_of.rs b/src/type_of.rs index 84d57838512..c2eab295acd 100644 --- a/src/type_of.rs +++ b/src/type_of.rs @@ -4,7 +4,7 @@ use gccjit::{Struct, Type}; use crate::rustc_codegen_ssa::traits::{BaseTypeMethods, DerivedTypeMethods, LayoutTypeMethods}; use rustc_middle::bug; use rustc_middle::ty::{self, Ty, TypeVisitableExt}; -use rustc_middle::ty::layout::{FnAbiOf, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{LayoutOf, TyAndLayout}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_target::abi::{self, Abi, Align, F32, F64, FieldsShape, Int, Integer, Pointer, PointeeInfo, Size, TyAbiInterface, Variants}; use rustc_target::abi::call::{CastTarget, FnAbi, Reg}; @@ -74,8 +74,8 @@ fn uncached_gcc_type<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, layout: TyAndLayout Abi::ScalarPair(..) => { return cx.type_struct( &[ - layout.scalar_pair_element_gcc_type(cx, 0, false), - layout.scalar_pair_element_gcc_type(cx, 1, false), + layout.scalar_pair_element_gcc_type(cx, 0), + layout.scalar_pair_element_gcc_type(cx, 1), ], false, ); @@ -150,7 +150,7 @@ pub trait LayoutGccExt<'tcx> { fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn immediate_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc>; fn scalar_gcc_type_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, scalar: &abi::Scalar, offset: Size) -> Type<'gcc>; - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc>; + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc>; fn gcc_field_index(&self, index: usize) -> u64; fn pointee_info_at<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, offset: Size) -> Option; } @@ -182,6 +182,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { /// of that field's type - this is useful for taking the address of /// that field and ensuring the struct has the right alignment. fn gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>) -> Type<'gcc> { + use crate::rustc_middle::ty::layout::FnAbiOf; + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. if let Abi::Scalar(ref scalar) = self.abi { // Use a different cache for scalars because pointers to DSTs // can be either fat or thin (data pointers of fat pointers). @@ -190,12 +194,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } let ty = match *self.ty.kind() { - ty::Ref(_, ty, _) | ty::RawPtr(ty::TypeAndMut { ty, .. }) => { - cx.type_ptr_to(cx.layout_of(ty).gcc_type(cx)) - } - ty::Adt(def, _) if def.is_box() => { - cx.type_ptr_to(cx.layout_of(self.ty.boxed_ty()).gcc_type(cx)) - } + // NOTE: we cannot remove this match like in the LLVM codegen because the call + // to fn_ptr_backend_type handle the on-stack attribute. + // TODO(antoyo): find a less hackish way to hande the on-stack attribute. ty::FnPtr(sig) => cx.fn_ptr_backend_type(&cx.fn_abi_of_fn_ptr(sig, ty::List::empty())), _ => self.scalar_gcc_type_at(cx, scalar, Size::ZERO), }; @@ -272,23 +273,10 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { } } - fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize, immediate: bool) -> Type<'gcc> { - // TODO(antoyo): remove llvm hack: - // HACK(eddyb) special-case fat pointers until LLVM removes - // pointee types, to avoid bitcasting every `OperandRef::deref`. - match self.ty.kind() { - ty::Ref(..) | ty::RawPtr(_) => { - return self.field(cx, index).gcc_type(cx); - } - // only wide pointer boxes are handled as pointers - // thin pointer boxes with scalar allocators are handled by the general logic below - ty::Adt(def, args) if def.is_box() && cx.layout_of(args.type_at(1)).is_zst() => { - let ptr_ty = Ty::new_mut_ptr(cx.tcx,self.ty.boxed_ty()); - return cx.layout_of(ptr_ty).scalar_pair_element_gcc_type(cx, index, immediate); - } - _ => {} - } - + fn scalar_pair_element_gcc_type<'gcc>(&self, cx: &CodegenCx<'gcc, 'tcx>, index: usize) -> Type<'gcc> { + // This must produce the same result for `repr(transparent)` wrappers as for the inner type! + // In other words, this should generally not look at the type at all, but only at the + // layout. let (a, b) = match self.abi { Abi::ScalarPair(ref a, ref b) => (a, b), _ => bug!("TyAndLayout::scalar_pair_element_llty({:?}): not applicable", self), @@ -367,8 +355,8 @@ impl<'gcc, 'tcx> LayoutTypeMethods<'tcx> for CodegenCx<'gcc, 'tcx> { layout.gcc_field_index(index) } - fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, immediate: bool) -> Type<'gcc> { - layout.scalar_pair_element_gcc_type(self, index, immediate) + fn scalar_pair_element_backend_type(&self, layout: TyAndLayout<'tcx>, index: usize, _immediate: bool) -> Type<'gcc> { + layout.scalar_pair_element_gcc_type(self, index) } fn cast_backend_type(&self, ty: &CastTarget) -> Type<'gcc> { diff --git a/test.sh b/test.sh index 5b7ef7ab101..e4cbd6fbcaf 100755 --- a/test.sh +++ b/test.sh @@ -215,7 +215,7 @@ function setup_rustc() { rm config.toml || true cat > config.toml <