Auto merge of #99745 - JohnTitor:rollup-lvrie64, r=JohnTitor

Rollup of 7 pull requests

Successful merges:

 - #98211 (Implement `fs::get_path` for FreeBSD.)
 - #99353 (Slightly improve mismatched GAT where clause error)
 - #99593 (Suggest removing the tuple struct field for the unwrapped value)
 - #99615 (Remove some explicit `self.infcx` for `FnCtxt`, which already derefs into `InferCtxt`)
 - #99711 (Remove reachable coverage without counters)
 - #99718 (Avoid `&str`/`Symbol` to `String` conversions)
 - #99720 (Sync rustc_codegen_cranelift)

Failed merges:

r? `@ghost`
`@rustbot` modify labels: rollup
This commit is contained in:
bors 2022-07-26 04:28:41 +00:00
commit b629c85bd7
74 changed files with 843 additions and 550 deletions

View file

@ -4,6 +4,7 @@ mod comments;
mod pass_mode;
mod returning;
use cranelift_module::ModuleError;
use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use rustc_middle::ty::layout::FnAbiOf;
use rustc_target::abi::call::{Conv, FnAbi};
@ -69,7 +70,17 @@ pub(crate) fn import_function<'tcx>(
) -> FuncId {
let name = tcx.symbol_name(inst).name;
let sig = get_function_sig(tcx, module.isa().triple(), inst);
module.declare_function(name, Linkage::Import, &sig).unwrap()
match module.declare_function(name, Linkage::Import, &sig) {
Ok(func_id) => func_id,
Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
"attempt to declare `{name}` as function, but it was already declared as static"
)),
Err(ModuleError::IncompatibleSignature(_, prev_sig, new_sig)) => tcx.sess.fatal(&format!(
"attempt to declare `{name}` with signature {new_sig:?}, \
but it was already declared with signature {prev_sig:?}"
)),
Err(err) => Err::<_, _>(err).unwrap(),
}
}
impl<'tcx> FunctionCx<'_, '_, 'tcx> {
@ -182,6 +193,15 @@ pub(crate) fn codegen_fn_prelude<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, start_
}
let fn_abi = fx.fn_abi.take().unwrap();
// FIXME implement variadics in cranelift
if fn_abi.c_variadic {
fx.tcx.sess.span_fatal(
fx.mir.span,
"Defining variadic functions is not yet supported by Cranelift",
);
}
let mut arg_abis_iter = fn_abi.args.iter();
let func_params = fx
@ -376,9 +396,15 @@ pub(crate) fn codegen_terminator_call<'tcx>(
RevealAllLayoutCx(fx.tcx).fn_abi_of_fn_ptr(fn_ty.fn_sig(fx.tcx), extra_args)
};
let is_cold = instance
.map(|inst| fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD))
.unwrap_or(false);
let is_cold = if fn_sig.abi == Abi::RustCold {
true
} else {
instance
.map(|inst| {
fx.tcx.codegen_fn_attrs(inst.def_id()).flags.contains(CodegenFnAttrFlags::COLD)
})
.unwrap_or(false)
};
if is_cold {
fx.bcx.set_cold_block(fx.bcx.current_block().unwrap());
if let Some(destination_block) = target {

View file

@ -18,9 +18,9 @@ fn reg_to_abi_param(reg: Reg) -> AbiParam {
let clif_ty = match (reg.kind, reg.size.bytes()) {
(RegKind::Integer, 1) => types::I8,
(RegKind::Integer, 2) => types::I16,
(RegKind::Integer, 4) => types::I32,
(RegKind::Integer, 8) => types::I64,
(RegKind::Integer, 16) => types::I128,
(RegKind::Integer, 3..=4) => types::I32,
(RegKind::Integer, 5..=8) => types::I64,
(RegKind::Integer, 9..=16) => types::I128,
(RegKind::Float, 4) => types::F32,
(RegKind::Float, 8) => types::F64,
(RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
@ -48,23 +48,9 @@ fn cast_target_to_abi_params(cast: CastTarget) -> SmallVec<[AbiParam; 2]> {
)
};
if cast.prefix.iter().all(|x| x.is_none()) {
// Simplify to a single unit when there is no prefix and size <= unit size
if cast.rest.total <= cast.rest.unit.size {
let clif_ty = match (cast.rest.unit.kind, cast.rest.unit.size.bytes()) {
(RegKind::Integer, 1) => types::I8,
(RegKind::Integer, 2) => types::I16,
(RegKind::Integer, 3..=4) => types::I32,
(RegKind::Integer, 5..=8) => types::I64,
(RegKind::Integer, 9..=16) => types::I128,
(RegKind::Float, 4) => types::F32,
(RegKind::Float, 8) => types::F64,
(RegKind::Vector, size) => types::I8.by(u16::try_from(size).unwrap()).unwrap(),
_ => unreachable!("{:?}", cast.rest.unit),
};
return smallvec![AbiParam::new(clif_ty)];
}
}
// Note: Unlike the LLVM equivalent of this code we don't have separate branches for when there
// is no prefix as a single unit, an array and a heterogeneous struct are not represented using
// different types in Cranelift IR. Instead a single array of primitive types is used.
// Create list of fields in the main structure
let mut args = cast
@ -230,7 +216,7 @@ pub(super) fn adjust_arg_for_abi<'tcx>(
arg_abi: &ArgAbi<'tcx, Ty<'tcx>>,
is_owned: bool,
) -> SmallVec<[Value; 2]> {
assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty);
assert_assignable(fx, arg.layout().ty, arg_abi.layout.ty, 16);
match arg_abi.mode {
PassMode::Ignore => smallvec![],
PassMode::Direct(_) => smallvec![arg.load_scalar(fx)],

View file

@ -86,7 +86,7 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
let mut entries = Vec::new();
for (entry_name, entry) in self.entries {
for (mut entry_name, entry) in self.entries {
// FIXME only read the symbol table of the object files to avoid having to keep all
// object files in memory at once, or read them twice.
let data = match entry {
@ -109,6 +109,23 @@ impl<'a> ArchiveBuilder<'a> for ArArchiveBuilder<'a> {
};
if !self.no_builtin_ranlib {
if symbol_table.contains_key(&entry_name) {
// The ar crate can't handle creating a symbol table in case of multiple archive
// members with the same name. Work around this by prepending a number until we
// get a unique name.
for i in 1.. {
let new_name = format!("{}_", i)
.into_bytes()
.into_iter()
.chain(entry_name.iter().copied())
.collect::<Vec<_>>();
if !symbol_table.contains_key(&new_name) {
entry_name = new_name;
break;
}
}
}
match object::File::parse(&*data) {
Ok(object) => {
symbol_table.insert(

View file

@ -175,10 +175,37 @@ fn compile_fn<'tcx>(
);
});
#[cfg(any())] // This is never true
let _clif_guard = {
use std::fmt::Write;
let func_clone = context.func.clone();
let clif_comments_clone = clif_comments.clone();
let mut clif = String::new();
for flag in module.isa().flags().iter() {
writeln!(clif, "set {}", flag).unwrap();
}
write!(clif, "target {}", module.isa().triple().architecture.to_string()).unwrap();
for isa_flag in module.isa().isa_flags().iter() {
write!(clif, " {}", isa_flag).unwrap();
}
writeln!(clif, "\n").unwrap();
crate::PrintOnPanic(move || {
let mut clif = clif.clone();
::cranelift_codegen::write::decorate_function(
&mut &clif_comments_clone,
&mut clif,
&func_clone,
)
.unwrap();
clif
})
};
// Define function
tcx.sess.time("define function", || {
context.want_disasm = crate::pretty_clif::should_write_ir(tcx);
module.define_function(func_id, context).unwrap()
module.define_function(func_id, context).unwrap();
});
// Write optimized function to file for debugging
@ -815,15 +842,7 @@ pub(crate) fn codegen_place<'tcx>(
for elem in place.projection {
match elem {
PlaceElem::Deref => {
if cplace.layout().ty.is_box() {
cplace = cplace
.place_field(fx, Field::new(0)) // Box<T> -> Unique<T>
.place_field(fx, Field::new(0)) // Unique<T> -> NonNull<T>
.place_field(fx, Field::new(0)) // NonNull<T> -> *mut T
.place_deref(fx);
} else {
cplace = cplace.place_deref(fx);
}
cplace = cplace.place_deref(fx);
}
PlaceElem::Field(field, _ty) => {
cplace = cplace.place_field(fx, field);

View file

@ -149,17 +149,8 @@ pub(crate) fn clif_int_or_float_cast(
}
let is_not_nan = fx.bcx.ins().fcmp(FloatCC::Equal, from, from);
if to_ty == types::I128 {
// FIXME(bytecodealliance/wasmtime#3963): select.i128 on fcmp eq miscompiles
let (lsb, msb) = fx.bcx.ins().isplit(val);
let zero = fx.bcx.ins().iconst(types::I64, 0);
let lsb = fx.bcx.ins().select(is_not_nan, lsb, zero);
let msb = fx.bcx.ins().select(is_not_nan, msb, zero);
fx.bcx.ins().iconcat(lsb, msb)
} else {
let zero = fx.bcx.ins().iconst(to_ty, 0);
fx.bcx.ins().select(is_not_nan, val, zero)
}
let zero = fx.bcx.ins().iconst(to_ty, 0);
fx.bcx.ins().select(is_not_nan, val, zero)
} else if from_ty.is_float() && to_ty.is_float() {
// float -> float
match (from_ty, to_ty) {

View file

@ -328,14 +328,18 @@ fn data_id_for_static(
let attrs = tcx.codegen_fn_attrs(def_id);
let data_id = module
.declare_data(
&*symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
)
.unwrap();
let data_id = match module.declare_data(
&*symbol_name,
linkage,
is_mutable,
attrs.flags.contains(CodegenFnAttrFlags::THREAD_LOCAL),
) {
Ok(data_id) => data_id,
Err(ModuleError::IncompatibleDeclaration(_)) => tcx.sess.fatal(&format!(
"attempt to declare `{symbol_name}` as static, but it was already declared as function"
)),
Err(err) => Err::<_, _>(err).unwrap(),
};
if rlinkage.is_some() {
// Comment copied from https://github.com/rust-lang/rust/blob/45060c2a66dfd667f88bd8b94261b28a58d85bd5/src/librustc_codegen_llvm/consts.rs#L141
@ -441,7 +445,8 @@ fn define_all_allocs(tcx: TyCtxt<'_>, module: &mut dyn Module, cx: &mut Constant
let data_id = match reloc_target_alloc {
GlobalAlloc::Function(instance) => {
assert_eq!(addend, 0);
let func_id = crate::abi::import_function(tcx, module, instance);
let func_id =
crate::abi::import_function(tcx, module, instance.polymorphize(tcx));
let local_func_id = module.declare_func_in_data(func_id, &mut data_ctx);
data_ctx.write_function_addr(offset.bytes() as u32, local_func_id);
continue;

View file

@ -18,86 +18,96 @@ pub(crate) fn codegen_inline_asm<'tcx>(
) {
// FIXME add .eh_frame unwind info directives
if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(1));
return;
} else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
&& matches!(
template[1],
InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
)
&& template[2] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
&& template[4] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
&& matches!(
template[6],
InlineAsmTemplatePiece::Placeholder { operand_idx: 0, modifier: Some('r'), span: _ }
)
{
assert_eq!(operands.len(), 4);
let (leaf, eax_place) = match operands[1] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let ebx_place = match operands[0] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
X86InlineAsmRegClass::reg
))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (sub_leaf, ecx_place) = match operands[2] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let edx_place = match operands[3] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
if !template.is_empty() {
if template[0] == InlineAsmTemplatePiece::String("int $$0x29".to_string()) {
let true_ = fx.bcx.ins().iconst(types::I32, 1);
fx.bcx.ins().trapnz(true_, TrapCode::User(1));
return;
} else if template[0] == InlineAsmTemplatePiece::String("movq %rbx, ".to_string())
&& matches!(
template[1],
InlineAsmTemplatePiece::Placeholder {
operand_idx: 0,
modifier: Some('r'),
span: _
}
)
&& template[2] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[3] == InlineAsmTemplatePiece::String("cpuid".to_string())
&& template[4] == InlineAsmTemplatePiece::String("\n".to_string())
&& template[5] == InlineAsmTemplatePiece::String("xchgq %rbx, ".to_string())
&& matches!(
template[6],
InlineAsmTemplatePiece::Placeholder {
operand_idx: 0,
modifier: Some('r'),
span: _
}
)
{
assert_eq!(operands.len(), 4);
let (leaf, eax_place) = match operands[1] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::ax))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let ebx_place = match operands[0] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::RegClass(InlineAsmRegClass::X86(
X86InlineAsmRegClass::reg
))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (sub_leaf, ecx_place) = match operands[2] {
InlineAsmOperand::InOut { reg, late: true, ref in_value, out_place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::cx))
);
(
crate::base::codegen_operand(fx, in_value).load_scalar(fx),
crate::base::codegen_place(fx, out_place.unwrap()),
)
}
_ => unreachable!(),
};
let edx_place = match operands[3] {
InlineAsmOperand::Out { reg, late: true, place } => {
assert_eq!(
reg,
InlineAsmRegOrRegClass::Reg(InlineAsmReg::X86(X86InlineAsmReg::dx))
);
crate::base::codegen_place(fx, place.unwrap())
}
_ => unreachable!(),
};
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
let (eax, ebx, ecx, edx) = crate::intrinsics::codegen_cpuid_call(fx, leaf, sub_leaf);
eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
return;
} else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
} else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
eax_place.write_cvalue(fx, CValue::by_val(eax, fx.layout_of(fx.tcx.types.u32)));
ebx_place.write_cvalue(fx, CValue::by_val(ebx, fx.layout_of(fx.tcx.types.u32)));
ecx_place.write_cvalue(fx, CValue::by_val(ecx, fx.layout_of(fx.tcx.types.u32)));
edx_place.write_cvalue(fx, CValue::by_val(edx, fx.layout_of(fx.tcx.types.u32)));
return;
} else if fx.tcx.symbol_name(fx.instance).name.starts_with("___chkstk") {
// ___chkstk, ___chkstk_ms and __alloca are only used on Windows
crate::trap::trap_unimplemented(fx, "Stack probes are not supported");
} else if fx.tcx.symbol_name(fx.instance).name == "__alloca" {
crate::trap::trap_unimplemented(fx, "Alloca is not supported");
}
}
let mut inputs = Vec::new();

View file

@ -404,7 +404,9 @@ fn codegen_regular_intrinsic_call<'tcx>(
};
size_of_val, (c ptr) {
let layout = fx.layout_of(substs.type_at(0));
let size = if layout.is_unsized() {
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
size
@ -418,7 +420,9 @@ fn codegen_regular_intrinsic_call<'tcx>(
};
min_align_of_val, (c ptr) {
let layout = fx.layout_of(substs.type_at(0));
let align = if layout.is_unsized() {
// Note: Can't use is_unsized here as truly unsized types need to take the fixed size
// branch
let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
let (_ptr, info) = ptr.load_scalar_pair(fx);
let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
align
@ -1145,6 +1149,20 @@ fn codegen_regular_intrinsic_call<'tcx>(
// FIXME implement black_box semantics
ret.write_cvalue(fx, a);
};
// FIXME implement variadics in cranelift
va_copy, (o _dest, o _src) {
fx.tcx.sess.span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",
);
};
va_arg | va_end, (o _valist) {
fx.tcx.sess.span_fatal(
source_info.span,
"Defining variadic functions is not yet supported by Cranelift",
);
};
}
let ret_block = fx.get_block(destination.unwrap());

View file

@ -141,7 +141,11 @@ impl<'tcx> CodegenCx<'tcx> {
let unwind_context =
UnwindContext::new(isa, matches!(backend_config.codegen_mode, CodegenMode::Aot));
let debug_context = if debug_info { Some(DebugContext::new(tcx, isa)) } else { None };
let debug_context = if debug_info && !tcx.sess.target.options.is_like_windows {
Some(DebugContext::new(tcx, isa))
} else {
None
};
CodegenCx {
tcx,
global_asm: String::new(),
@ -243,6 +247,7 @@ fn build_isa(sess: &Session, backend_config: &BackendConfig) -> Box<dyn isa::Tar
flags_builder.set("enable_probestack", "false").unwrap(); // __cranelift_probestack is not provided
let enable_verifier = if backend_config.enable_verifier { "true" } else { "false" };
flags_builder.set("enable_verifier", enable_verifier).unwrap();
flags_builder.set("regalloc_checker", enable_verifier).unwrap();
let tls_model = match target_triple.binary_format {
BinaryFormat::Elf => "elf_gd",

View file

@ -109,7 +109,8 @@ pub(crate) fn maybe_create_entry_wrapper(
tcx.mk_substs([GenericArg::from(main_ret_ty)].iter()),
)
.unwrap()
.unwrap();
.unwrap()
.polymorphize(tcx);
let report_name = tcx.symbol_name(report).name;
let report_sig = get_function_sig(tcx, m.isa().triple(), report);

View file

@ -66,7 +66,7 @@ use rustc_session::config::OutputType;
use crate::prelude::*;
#[derive(Debug)]
#[derive(Clone, Debug)]
pub(crate) struct CommentWriter {
enabled: bool,
global_comments: Vec<String>,
@ -237,6 +237,7 @@ pub(crate) fn write_clif_file<'tcx>(
func: &cranelift_codegen::ir::Function,
mut clif_comments: &CommentWriter,
) {
// FIXME work around filename too long errors
write_ir_file(
tcx,
|| format!("{}.{}.clif", tcx.symbol_name(instance).name, postfix),

View file

@ -153,11 +153,7 @@ pub(crate) fn size_and_align_of_dst<'tcx>(
layout: TyAndLayout<'tcx>,
info: Value,
) -> (Value, Value) {
if !layout.is_unsized() {
let size = fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64);
let align = fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64);
return (size, align);
}
assert!(layout.is_unsized() || layout.abi == Abi::Uninhabited);
match layout.ty.kind() {
ty::Dynamic(..) => {
// load size/align from vtable

View file

@ -324,6 +324,12 @@ impl<'tcx> CPlace<'tcx> {
};
}
if layout.size.bytes() >= u64::from(u32::MAX - 16) {
fx.tcx
.sess
.fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
}
let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
@ -420,7 +426,7 @@ impl<'tcx> CPlace<'tcx> {
}
pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
assert_assignable(fx, from.layout().ty, self.layout().ty);
assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
}
@ -774,18 +780,25 @@ pub(crate) fn assert_assignable<'tcx>(
fx: &FunctionCx<'_, '_, 'tcx>,
from_ty: Ty<'tcx>,
to_ty: Ty<'tcx>,
limit: usize,
) {
if limit == 0 {
// assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
// soundness. don't attempt to check deep types to avoid exponential behavior in certain
// cases.
return;
}
match (from_ty.kind(), to_ty.kind()) {
(ty::Ref(_, a, _), ty::Ref(_, b, _))
| (
ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
) => {
assert_assignable(fx, *a, *b);
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
| (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
assert_assignable(fx, *a, *b);
assert_assignable(fx, *a, *b, limit - 1);
}
(ty::FnPtr(_), ty::FnPtr(_)) => {
let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
@ -815,6 +828,17 @@ pub(crate) fn assert_assignable<'tcx>(
}
// dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
}
(&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
let mut types_a = types_a.iter();
let mut types_b = types_b.iter();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
if adt_def_a.did() == adt_def_b.did() =>
{
@ -822,18 +846,37 @@ pub(crate) fn assert_assignable<'tcx>(
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b),
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b),
(ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
(&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
if def_id_a == def_id_b =>
{
let mut types_a = substs_a.types();
let mut types_b = substs_b.types();
loop {
match (types_a.next(), types_b.next()) {
(Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
(None, None) => return,
(Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
}
}
}
(ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
// No way to check if it is correct or not with polymorphization enabled
}
_ => {
assert_eq!(
from_ty, to_ty,
from_ty,
to_ty,
"Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",
from_ty, to_ty, fx,
from_ty.kind(),
to_ty.kind(),
fx,
);
}
}