Merge commit 'ef07e8e60f' into sync_cg_clif-2023-04-29

This commit is contained in:
bjorn3 2023-04-29 12:00:43 +00:00
commit a8697f9565
51 changed files with 1469 additions and 675 deletions

View file

@ -6,8 +6,6 @@ use std::borrow::Cow;
use rustc_middle::mir;
use rustc_target::abi::call::PassMode;
use cranelift_codegen::entity::EntityRef;
use crate::prelude::*;
pub(super) fn add_args_header_comment(fx: &mut FunctionCx<'_, '_, '_>) {
@ -91,35 +89,7 @@ pub(super) fn add_local_place_comments<'tcx>(
largest_niche: _,
} = layout.0.0;
let (kind, extra) = match *place.inner() {
CPlaceInner::Var(place_local, var) => {
assert_eq!(local, place_local);
("ssa", Cow::Owned(format!(",var={}", var.index())))
}
CPlaceInner::VarPair(place_local, var1, var2) => {
assert_eq!(local, place_local);
("ssa", Cow::Owned(format!("var=({}, {})", var1.index(), var2.index())))
}
CPlaceInner::VarLane(_local, _var, _lane) => unreachable!(),
CPlaceInner::Addr(ptr, meta) => {
let meta = if let Some(meta) = meta {
Cow::Owned(format!("meta={}", meta))
} else {
Cow::Borrowed("")
};
match ptr.debug_base_and_offset() {
(crate::pointer::PointerBase::Addr(addr), offset) => {
("reuse", format!("storage={}{}{}", addr, offset, meta).into())
}
(crate::pointer::PointerBase::Stack(stack_slot), offset) => {
("stack", format!("storage={}{}{}", stack_slot, offset, meta).into())
}
(crate::pointer::PointerBase::Dangling(align), offset) => {
("zst", format!("align={},offset={}", align.bytes(), offset).into())
}
}
}
};
let (kind, extra) = place.debug_comment();
fx.add_global_comment(format!(
"{:<5} {:5} {:30} {:4}b {}, {}{}{}",

View file

@ -605,9 +605,9 @@ pub(crate) fn codegen_drop<'tcx>(
// | ... |
// \-------/
//
let (ptr, vtable) = drop_place.to_ptr_maybe_unsized();
let (ptr, vtable) = drop_place.to_ptr_unsized();
let ptr = ptr.get_addr(fx);
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable.unwrap());
let drop_fn = crate::vtable::drop_fn_of_obj(fx, vtable);
// FIXME(eddyb) perhaps move some of this logic into
// `Instance::resolve_drop_in_place`?

View file

@ -84,7 +84,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
attrs
)],
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
smallvec![AbiParam::new(vector_ty)]
}
_ => unreachable!("{:?}", self.layout.abi),
@ -135,7 +135,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> {
(None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))])
}
Abi::Vector { .. } => {
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout).unwrap();
let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout);
(None, vec![AbiParam::new(vector_ty)])
}
_ => unreachable!("{:?}", self.layout.abi),

View file

@ -63,11 +63,11 @@ pub(super) fn codegen_with_call_return_arg<'tcx>(
let (ret_temp_place, return_ptr) = match ret_arg_abi.mode {
PassMode::Ignore => (None, None),
PassMode::Indirect { attrs: _, extra_attrs: None, on_stack: _ } => {
if matches!(ret_place.inner(), CPlaceInner::Addr(_, None)) {
if let Some(ret_ptr) = ret_place.try_to_ptr() {
// This is an optimization to prevent unnecessary copies of the return value when
// the return place is already a memory place as opposed to a register.
// This match arm can be safely removed.
(None, Some(ret_place.to_ptr().get_addr(fx)))
(None, Some(ret_ptr.get_addr(fx)))
} else {
let place = CPlace::new_stack_slot(fx, ret_arg_abi.layout);
(Some(place), Some(place.to_ptr().get_addr(fx)))

View file

@ -141,16 +141,6 @@ pub(crate) fn compile_fn(
context.clear();
context.func = codegened_func.func;
// If the return block is not reachable, then the SSA builder may have inserted an `iconst.i128`
// instruction, which doesn't have an encoding.
context.compute_cfg();
context.compute_domtree();
context.eliminate_unreachable_code(module.isa()).unwrap();
context.dce(module.isa()).unwrap();
// Some Cranelift optimizations expect the domtree to not yet be computed and as such don't
// invalidate it when it would change.
context.domtree.clear();
#[cfg(any())] // This is never true
let _clif_guard = {
use std::fmt::Write;
@ -182,27 +172,6 @@ pub(crate) fn compile_fn(
cx.profiler.generic_activity("define function").run(|| {
context.want_disasm = cx.should_write_ir;
module.define_function(codegened_func.func_id, context).unwrap();
if cx.profiler.enabled() {
let mut recording_args = false;
cx.profiler
.generic_activity_with_arg_recorder(
"define function (clif pass timings)",
|recorder| {
let pass_times = cranelift_codegen::timing::take_current();
// Replace newlines with | as measureme doesn't allow control characters like
// newlines inside strings.
recorder.record_arg(format!("{}", pass_times).replace('\n', " | "));
recording_args = true;
},
)
.run(|| {
if recording_args {
// Wait a tiny bit to ensure chrome's profiler doesn't hide the event
std::thread::sleep(std::time::Duration::from_nanos(2))
}
});
}
});
if cx.should_write_ir {
@ -216,7 +185,7 @@ pub(crate) fn compile_fn(
&clif_comments,
);
if let Some(disasm) = &context.compiled_code().unwrap().disasm {
if let Some(disasm) = &context.compiled_code().unwrap().vcode {
crate::pretty_clif::write_ir_file(
&cx.output_filenames,
&format!("{}.vcode", codegened_func.symbol_name),
@ -524,13 +493,14 @@ fn codegen_stmt<'tcx>(
fx.set_debug_loc(stmt.source_info);
#[cfg(any())] // This is never true
match &stmt.kind {
StatementKind::StorageLive(..) | StatementKind::StorageDead(..) => {} // Those are not very useful
_ => {
if fx.clif_comments.enabled() {
let inst = fx.bcx.func.layout.last_inst(cur_block).unwrap();
fx.add_comment(inst, format!("{:?}", stmt));
with_no_trimmed_paths!({
fx.add_comment(inst, format!("{:?}", stmt));
});
}
}
}
@ -715,11 +685,11 @@ fn codegen_stmt<'tcx>(
}
Rvalue::Cast(CastKind::Pointer(PointerCast::Unsize), ref operand, _to_ty) => {
let operand = codegen_operand(fx, operand);
operand.unsize_value(fx, lval);
crate::unsize::coerce_unsized_into(fx, operand, lval);
}
Rvalue::Cast(CastKind::DynStar, ref operand, _) => {
let operand = codegen_operand(fx, operand);
operand.coerce_dyn_star(fx, lval);
crate::unsize::coerce_dyn_star(fx, operand, lval);
}
Rvalue::Cast(CastKind::Transmute, ref operand, _to_ty) => {
let operand = codegen_operand(fx, operand);
@ -791,7 +761,10 @@ fn codegen_stmt<'tcx>(
layout.offset_of_subfield(fx, fields.iter().map(|f| f.index())).bytes()
}
};
let val = CValue::const_val(fx, fx.layout_of(fx.tcx.types.usize), val.into());
let val = CValue::by_val(
fx.bcx.ins().iconst(fx.pointer_type, i64::try_from(val).unwrap()),
fx.layout_of(fx.tcx.types.usize),
);
lval.write_cvalue(fx, val);
}
Rvalue::Aggregate(ref kind, ref operands) => {
@ -866,9 +839,7 @@ fn codegen_array_len<'tcx>(fx: &mut FunctionCx<'_, '_, 'tcx>, place: CPlace<'tcx
let len = fx.monomorphize(len).eval_target_usize(fx.tcx, ParamEnv::reveal_all()) as i64;
fx.bcx.ins().iconst(fx.pointer_type, len)
}
ty::Slice(_elem_ty) => {
place.to_ptr_maybe_unsized().1.expect("Length metadata for slice place")
}
ty::Slice(_elem_ty) => place.to_ptr_unsized().1,
_ => bug!("Rvalue::Len({:?})", place),
}
}
@ -922,8 +893,7 @@ pub(crate) fn codegen_place<'tcx>(
ty::Slice(elem_ty) => {
assert!(from_end, "slice subslices should be `from_end`");
let elem_layout = fx.layout_of(*elem_ty);
let (ptr, len) = cplace.to_ptr_maybe_unsized();
let len = len.unwrap();
let (ptr, len) = cplace.to_ptr_unsized();
cplace = CPlace::for_ptr_with_extra(
ptr.offset_i64(fx, elem_layout.size.bytes() as i64 * (from as i64)),
fx.bcx.ins().iadd_imm(len, -(from as i64 + to as i64)),

View file

@ -103,7 +103,7 @@ pub(crate) fn clif_int_or_float_cast(
vec![AbiParam::new(types::I64X2)],
&[from],
)[0];
// FIXME use bitcast instead of store to get from i64x2 to i128
// FIXME(bytecodealliance/wasmtime#6104) use bitcast instead of store to get from i64x2 to i128
let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
size: 16,

View file

@ -7,7 +7,6 @@ use crate::prelude::*;
pub(crate) fn maybe_codegen<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
checked: bool,
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> Option<CValue<'tcx>> {
@ -22,47 +21,97 @@ pub(crate) fn maybe_codegen<'tcx>(
let is_signed = type_sign(lhs.layout().ty);
match bin_op {
BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => {
assert!(!checked);
None
BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => None,
BinOp::Add | BinOp::Sub => None,
BinOp::Mul => {
let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret_val = fx.lib_call(
"__multi3",
vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I128)],
&args,
)[0];
Some(CValue::by_val(
ret_val,
fx.layout_of(if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 }),
))
}
BinOp::Add | BinOp::Sub if !checked => None,
BinOp::Mul if !checked || is_signed => {
if !checked {
BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
BinOp::Div | BinOp::Rem => {
let name = match (bin_op, is_signed) {
(BinOp::Div, false) => "__udivti3",
(BinOp::Div, true) => "__divti3",
(BinOp::Rem, false) => "__umodti3",
(BinOp::Rem, true) => "__modti3",
_ => unreachable!(),
};
if fx.tcx.sess.target.is_like_windows {
let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret = fx.lib_call(
name,
vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I64X2)],
&args,
)[0];
// FIXME(bytecodealliance/wasmtime#6104) use bitcast instead of store to get from i64x2 to i128
let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
Some(ret_place.to_cvalue(fx))
} else {
let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret_val = fx.lib_call(
"__multi3",
name,
vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I128)],
&args,
)[0];
Some(CValue::by_val(
ret_val,
fx.layout_of(if is_signed { fx.tcx.types.i128 } else { fx.tcx.types.u128 }),
))
} else {
let out_ty = fx.tcx.mk_tup(&[lhs.layout().ty, fx.tcx.types.bool]);
let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
let lhs = lhs.load_scalar(fx);
let rhs = rhs.load_scalar(fx);
let oflow_ptr = oflow.to_ptr().get_addr(fx);
let res = fx.lib_call_unadjusted(
"__muloti4",
vec![
AbiParam::new(types::I128),
AbiParam::new(types::I128),
AbiParam::new(fx.pointer_type),
],
vec![AbiParam::new(types::I128)],
&[lhs, rhs, oflow_ptr],
)[0];
let oflow = oflow.to_cvalue(fx).load_scalar(fx);
let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
Some(CValue::by_val(ret_val, lhs.layout()))
}
}
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => None,
BinOp::Shl | BinOp::Shr => None,
}
}
pub(crate) fn maybe_codegen_checked<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
lhs: CValue<'tcx>,
rhs: CValue<'tcx>,
) -> Option<CValue<'tcx>> {
if lhs.layout().ty != fx.tcx.types.u128
&& lhs.layout().ty != fx.tcx.types.i128
&& rhs.layout().ty != fx.tcx.types.u128
&& rhs.layout().ty != fx.tcx.types.i128
{
return None;
}
let is_signed = type_sign(lhs.layout().ty);
match bin_op {
BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => unreachable!(),
BinOp::Mul if is_signed => {
let out_ty = fx.tcx.mk_tup(&[lhs.layout().ty, fx.tcx.types.bool]);
let oflow = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32));
let lhs = lhs.load_scalar(fx);
let rhs = rhs.load_scalar(fx);
let oflow_ptr = oflow.to_ptr().get_addr(fx);
let res = fx.lib_call_unadjusted(
"__muloti4",
vec![
AbiParam::new(types::I128),
AbiParam::new(types::I128),
AbiParam::new(fx.pointer_type),
],
vec![AbiParam::new(types::I128)],
&[lhs, rhs, oflow_ptr],
)[0];
let oflow = oflow.to_cvalue(fx).load_scalar(fx);
let oflow = fx.bcx.ins().ireduce(types::I8, oflow);
Some(CValue::by_val_pair(res, oflow, fx.layout_of(out_ty)))
}
BinOp::Add | BinOp::Sub | BinOp::Mul => {
assert!(checked);
let out_ty = fx.tcx.mk_tup(&[lhs.layout().ty, fx.tcx.types.bool]);
let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty));
let param_types = vec![
@ -83,42 +132,8 @@ pub(crate) fn maybe_codegen<'tcx>(
Some(out_place.to_cvalue(fx))
}
BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"),
BinOp::Div | BinOp::Rem => {
assert!(!checked);
let name = match (bin_op, is_signed) {
(BinOp::Div, false) => "__udivti3",
(BinOp::Div, true) => "__divti3",
(BinOp::Rem, false) => "__umodti3",
(BinOp::Rem, true) => "__modti3",
_ => unreachable!(),
};
if fx.tcx.sess.target.is_like_windows {
let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret = fx.lib_call(
name,
vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I64X2)],
&args,
)[0];
// FIXME use bitcast instead of store to get from i64x2 to i128
let ret_place = CPlace::new_stack_slot(fx, lhs.layout());
ret_place.to_ptr().store(fx, ret, MemFlags::trusted());
Some(ret_place.to_cvalue(fx))
} else {
let args = [lhs.load_scalar(fx), rhs.load_scalar(fx)];
let ret_val = fx.lib_call(
name,
vec![AbiParam::new(types::I128), AbiParam::new(types::I128)],
vec![AbiParam::new(types::I128)],
&args,
)[0];
Some(CValue::by_val(ret_val, lhs.layout()))
}
}
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => {
assert!(!checked);
None
}
BinOp::Shl | BinOp::Shr => None,
BinOp::Div | BinOp::Rem => unreachable!(),
BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => unreachable!(),
BinOp::Shl | BinOp::Shr => unreachable!(),
}
}

View file

@ -72,19 +72,6 @@ fn clif_type_from_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<types::Typ
pointer_ty(tcx)
}
}
ty::Adt(adt_def, _) if adt_def.repr().simd() => {
let (element, count) = match &tcx.layout_of(ParamEnv::reveal_all().and(ty)).unwrap().abi
{
Abi::Vector { element, count } => (*element, *count),
_ => unreachable!(),
};
match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => vector_ty,
_ => return None,
}
}
ty::Param(_) => bug!("ty param {:?}", ty),
_ => return None,
})
@ -96,12 +83,7 @@ fn clif_pair_type_from_ty<'tcx>(
) -> Option<(types::Type, types::Type)> {
Some(match ty.kind() {
ty::Tuple(types) if types.len() == 2 => {
let a = clif_type_from_ty(tcx, types[0])?;
let b = clif_type_from_ty(tcx, types[1])?;
if a.is_vector() || b.is_vector() {
return None;
}
(a, b)
(clif_type_from_ty(tcx, types[0])?, clif_type_from_ty(tcx, types[1])?)
}
ty::RawPtr(TypeAndMut { ty: pointee_ty, mutbl: _ }) | ty::Ref(_, pointee_ty, _) => {
if has_ptr_meta(tcx, *pointee_ty) {
@ -431,7 +413,11 @@ impl<'tcx> FunctionCx<'_, '_, 'tcx> {
// Note: must be kept in sync with get_caller_location from cg_ssa
pub(crate) fn get_caller_location(&mut self, mut source_info: mir::SourceInfo) -> CValue<'tcx> {
let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, span: Span| {
let span_to_caller_location = |fx: &mut FunctionCx<'_, '_, 'tcx>, mut span: Span| {
// Remove `Inlined` marks as they pollute `expansion_cause`.
while span.is_inlined() {
span.remove_mark();
}
let topmost = span.ctxt().outer_expn().expansion_cause().unwrap_or(span);
let caller = fx.tcx.sess.source_map().lookup_char_pos(topmost.lo());
let const_loc = fx.tcx.const_caller_location((

View file

@ -25,8 +25,18 @@ impl ConcurrencyLimiter {
.clone()
.into_helper_thread(move |token| {
let mut state = state_helper.lock().unwrap();
state.add_new_token(token.unwrap());
available_token_condvar_helper.notify_one();
match token {
Ok(token) => {
state.add_new_token(token);
available_token_condvar_helper.notify_one();
}
Err(err) => {
state.poison(format!("failed to acquire jobserver token: {}", err));
// Notify all threads waiting for a token to give them a chance to
// gracefully exit.
available_token_condvar_helper.notify_all();
}
}
})
.unwrap();
ConcurrencyLimiter {
@ -37,16 +47,31 @@ impl ConcurrencyLimiter {
}
}
pub(super) fn acquire(&mut self) -> ConcurrencyLimiterToken {
pub(super) fn acquire(&mut self, handler: &rustc_errors::Handler) -> ConcurrencyLimiterToken {
let mut state = self.state.lock().unwrap();
loop {
state.assert_invariants();
if state.try_start_job() {
return ConcurrencyLimiterToken {
state: self.state.clone(),
available_token_condvar: self.available_token_condvar.clone(),
};
match state.try_start_job() {
Ok(true) => {
return ConcurrencyLimiterToken {
state: self.state.clone(),
available_token_condvar: self.available_token_condvar.clone(),
};
}
Ok(false) => {}
Err(err) => {
// An error happened when acquiring the token. Raise it as fatal error.
// Make sure to drop the mutex guard first to prevent poisoning the mutex.
drop(state);
if let Some(err) = err {
handler.fatal(&err).raise();
} else {
// The error was already emitted, but compilation continued. Raise a silent
// fatal error.
rustc_errors::FatalError.raise();
}
}
}
self.helper_thread.as_mut().unwrap().request_token();
@ -100,13 +125,22 @@ mod state {
pending_jobs: usize,
active_jobs: usize,
poisoned: bool,
stored_error: Option<String>,
// None is used to represent the implicit token, Some to represent explicit tokens
tokens: Vec<Option<Acquired>>,
}
impl ConcurrencyLimiterState {
pub(super) fn new(pending_jobs: usize) -> Self {
ConcurrencyLimiterState { pending_jobs, active_jobs: 0, tokens: vec![None] }
ConcurrencyLimiterState {
pending_jobs,
active_jobs: 0,
poisoned: false,
stored_error: None,
tokens: vec![None],
}
}
pub(super) fn assert_invariants(&self) {
@ -127,14 +161,18 @@ mod state {
self.drop_excess_capacity();
}
pub(super) fn try_start_job(&mut self) -> bool {
pub(super) fn try_start_job(&mut self) -> Result<bool, Option<String>> {
if self.poisoned {
return Err(self.stored_error.take());
}
if self.active_jobs < self.tokens.len() {
// Using existing token
self.job_started();
return true;
return Ok(true);
}
false
Ok(false)
}
pub(super) fn job_started(&mut self) {
@ -161,6 +199,11 @@ mod state {
self.assert_invariants();
}
pub(super) fn poison(&mut self, error: String) {
self.poisoned = true;
self.stored_error = Some(error);
}
fn drop_excess_capacity(&mut self) {
self.assert_invariants();

View file

@ -159,6 +159,8 @@ pub(crate) fn codegen_const_value<'tcx>(
_ => unreachable!(),
};
// FIXME avoid this extra copy to the stack and directly write to the final
// destination
let place = CPlace::new_stack_slot(fx, layout);
place.to_ptr().store(fx, val, MemFlags::trusted());
place.to_cvalue(fx)

View file

@ -324,6 +324,10 @@ fn module_codegen(
OngoingModuleCodegen::Async(std::thread::spawn(move || {
cx.profiler.clone().verbose_generic_activity_with_arg("compile functions", &*cgu_name).run(
|| {
cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
cx.profiler.clone(),
)));
let mut cached_context = Context::new();
for codegened_func in codegened_functions {
crate::base::compile_fn(
@ -407,7 +411,7 @@ pub(crate) fn run_aot(
backend_config.clone(),
global_asm_config.clone(),
cgu.name(),
concurrency_limiter.acquire(),
concurrency_limiter.acquire(tcx.sess.diagnostic()),
),
module_codegen,
Some(rustc_middle::dep_graph::hash_result),

View file

@ -224,6 +224,10 @@ pub(crate) fn codegen_and_compile_fn<'tcx>(
module: &mut dyn Module,
instance: Instance<'tcx>,
) {
cranelift_codegen::timing::set_thread_profiler(Box::new(super::MeasuremeProfiler(
cx.profiler.clone(),
)));
tcx.prof.generic_activity("codegen and compile fn").run(|| {
let _inst_guard =
crate::PrintOnPanic(|| format!("{:?} {}", instance, tcx.symbol_name(instance).name));

View file

@ -4,6 +4,7 @@
//! [`codegen_fn`]: crate::base::codegen_fn
//! [`codegen_static`]: crate::constant::codegen_static
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_middle::mir::mono::{Linkage as RLinkage, MonoItem, Visibility};
use crate::prelude::*;
@ -39,3 +40,31 @@ fn predefine_mono_items<'tcx>(
}
});
}
struct MeasuremeProfiler(SelfProfilerRef);
struct TimingGuard {
profiler: std::mem::ManuallyDrop<SelfProfilerRef>,
inner: Option<rustc_data_structures::profiling::TimingGuard<'static>>,
}
impl Drop for TimingGuard {
fn drop(&mut self) {
self.inner.take();
unsafe {
std::mem::ManuallyDrop::drop(&mut self.profiler);
}
}
}
impl cranelift_codegen::timing::Profiler for MeasuremeProfiler {
fn start_pass(&self, pass: cranelift_codegen::timing::Pass) -> Box<dyn std::any::Any> {
let mut timing_guard =
TimingGuard { profiler: std::mem::ManuallyDrop::new(self.0.clone()), inner: None };
timing_guard.inner = Some(
unsafe { &*(&*timing_guard.profiler as &SelfProfilerRef as *const SelfProfilerRef) }
.generic_activity(pass.description()),
);
Box::new(timing_guard)
}
}

View file

@ -104,7 +104,6 @@ pub(crate) fn compile_global_asm(
return Ok(None);
}
// FIXME fix linker error on macOS
if cfg!(not(feature = "inline_asm")) {
return Err(
"asm! and global_asm! support is disabled while compiling rustc_codegen_cranelift"

View file

@ -51,17 +51,13 @@ fn report_atomic_type_validation_error<'tcx>(
fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
}
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type {
let (element, count) = match layout.abi {
Abi::Vector { element, count } => (element, count),
_ => unreachable!(),
};
match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
// Cranelift currently only implements icmp for 128bit vectors.
Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
_ => None,
}
scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()).unwrap()
}
fn simd_for_each_lane<'tcx>(
@ -1107,8 +1103,8 @@ fn codegen_regular_intrinsic_call<'tcx>(
fx.bcx.ins().call_indirect(f_sig, f, &[data]);
let layout = ret.layout();
let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
let layout = fx.layout_of(fx.tcx.types.i32);
let ret_val = CValue::by_val(fx.bcx.ins().iconst(types::I32, 0), layout);
ret.write_cvalue(fx, ret_val);
}

View file

@ -253,7 +253,7 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>(
}
ret.write_cvalue(fx, base);
let ret_lane = ret.place_field(fx, FieldIdx::new(idx.try_into().unwrap()));
let ret_lane = ret.place_lane(fx, idx.try_into().unwrap());
ret_lane.write_cvalue(fx, val);
}

View file

@ -110,7 +110,7 @@ mod prelude {
pub(crate) use crate::common::*;
pub(crate) use crate::debuginfo::{DebugContext, UnwindContext};
pub(crate) use crate::pointer::Pointer;
pub(crate) use crate::value_and_place::{CPlace, CPlaceInner, CValue};
pub(crate) use crate::value_and_place::{CPlace, CValue};
}
struct PrintOnPanic<F: Fn() -> String>(F);

View file

@ -118,7 +118,7 @@ pub(crate) fn codegen_int_binop<'tcx>(
);
}
if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, false, in_lhs, in_rhs) {
if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, in_lhs, in_rhs) {
return res;
}
@ -173,7 +173,7 @@ pub(crate) fn codegen_checked_int_binop<'tcx>(
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
if let Some(res) = crate::codegen_i128::maybe_codegen(fx, bin_op, true, in_lhs, in_rhs) {
if let Some(res) = crate::codegen_i128::maybe_codegen_checked(fx, bin_op, in_lhs, in_rhs) {
return res;
}

View file

@ -7,48 +7,51 @@
//! test compile
//! target x86_64
//!
//! function u0:0(i64, i64, i64) system_v {
//! ; symbol _ZN119_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$u27$a$u20$$RF$$u27$b$u20$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17he85059d5e6a760a0E
//! ; instance Instance { def: Item(DefId(0/0:29 ~ example[8787]::{{impl}}[0]::call_once[0])), substs: [ReErased, ReErased] }
//! ; sig ([IsNotEmpty, (&&[u16],)]; c_variadic: false)->(u8, u8)
//! function u0:22(i64) -> i8, i8 system_v {
//! ; symbol _ZN97_$LT$example..IsNotEmpty$u20$as$u20$mini_core..FnOnce$LT$$LP$$RF$$RF$$u5b$u16$u5d$$C$$RP$$GT$$GT$9call_once17hd517c453d67c0915E
//! ; instance Instance { def: Item(WithOptConstParam { did: DefId(0:42 ~ example[4e51]::{impl#0}::call_once), const_param_did: None }), substs: [ReErased, ReErased] }
//! ; abi FnAbi { args: [ArgAbi { layout: TyAndLayout { ty: IsNotEmpty, layout: Layout { size: Size(0 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: Aggregate { sized: true }, fields: Arbitrary { offsets: [], memory_index: [] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Ignore }, ArgAbi { layout: TyAndLayout { ty: &&[u16], layout: Layout { size: Size(8 bytes), align: AbiAndPrefAlign { abi: Align(8 bytes), pref: Align(8 bytes) }, abi: Scalar(Initialized { value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), fields: Primitive, largest_niche: Some(Niche { offset: Size(0 bytes), value: Pointer(AddressSpace(0)), valid_range: 1..=18446744073709551615 }), variants: Single { index: 0 } } }, mode: Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) }], ret: ArgAbi { layout: TyAndLayout { ty: (u8, u8), layout: Layout { size: Size(2 bytes), align: AbiAndPrefAlign { abi: Align(1 bytes), pref: Align(8 bytes) }, abi: ScalarPair(Initialized { value: Int(I8, false), valid_range: 0..=255 }, Initialized { value: Int(I8, false), valid_range: 0..=255 }), fields: Arbitrary { offsets: [Size(0 bytes), Size(1 bytes)], memory_index: [0, 1] }, largest_niche: None, variants: Single { index: 0 } } }, mode: Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) }, c_variadic: false, fixed_count: 1, conv: Rust, can_unwind: false }
//!
//! ; ssa {_2: NOT_SSA, _4: NOT_SSA, _0: NOT_SSA, _3: (empty), _1: NOT_SSA}
//! ; msg loc.idx param pass mode ssa flags ty
//! ; ret _0 = v0 ByRef NOT_SSA (u8, u8)
//! ; arg _1 = v1 ByRef NOT_SSA IsNotEmpty
//! ; arg _2.0 = v2 ByVal(types::I64) NOT_SSA &&[u16]
//! ; kind loc.idx param pass mode ty
//! ; ssa _0 (u8, u8) 2b 1, 8 var=(0, 1)
//! ; ret _0 - Pair(ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }, ArgAttributes { regular: NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: None }) (u8, u8)
//! ; arg _1 - Ignore IsNotEmpty
//! ; arg _2.0 = v0 Direct(ArgAttributes { regular: NonNull | NoUndef, arg_ext: None, pointee_size: Size(0 bytes), pointee_align: Some(Align(8 bytes)) }) &&[u16]
//!
//! ss0 = explicit_slot 0 ; _1: IsNotEmpty size=0 align=1,8
//! ss1 = explicit_slot 8 ; _2: (&&[u16],) size=8 align=8,8
//! ss2 = explicit_slot 8 ; _4: (&&[u16],) size=8 align=8,8
//! sig0 = (i64, i64, i64) system_v
//! sig1 = (i64, i64, i64) system_v
//! fn0 = colocated u0:6 sig1 ; Instance { def: Item(DefId(0/0:31 ~ example[8787]::{{impl}}[1]::call_mut[0])), substs: [ReErased, ReErased] }
//! ; kind local ty size align (abi,pref)
//! ; zst _1 IsNotEmpty 0b 1, 8 align=8,offset=
//! ; stack _2 (&&[u16],) 8b 8, 8 storage=ss0
//! ; ssa _3 &mut IsNotEmpty 8b 8, 8 var=2
//!
//! block0(v0: i64, v1: i64, v2: i64):
//! v3 = stack_addr.i64 ss0
//! v4 = stack_addr.i64 ss1
//! store v2, v4
//! v5 = stack_addr.i64 ss2
//! ss0 = explicit_slot 16
//! sig0 = (i64, i64) -> i8, i8 system_v
//! fn0 = colocated u0:23 sig0 ; Instance { def: Item(WithOptConstParam { did: DefId(0:46 ~ example[4e51]::{impl#1}::call_mut), const_param_did: None }), substs: [ReErased, ReErased] }
//!
//! block0(v0: i64):
//! nop
//! ; write_cvalue: Addr(Pointer { base: Stack(ss0), offset: Offset32(0) }, None): &&[u16] <- ByVal(v0): &&[u16]
//! stack_store v0, ss0
//! jump block1
//!
//! block1:
//! nop
//! ; _3 = &mut _1
//! ; _4 = _2
//! v6 = load.i64 v4
//! store v6, v5
//! v1 = iconst.i64 8
//! ; write_cvalue: Var(_3, var2): &mut IsNotEmpty <- ByVal(v1): &mut IsNotEmpty
//! ;
//! ; _0 = const mini_core::FnMut::call_mut(move _3, move _4)
//! v7 = load.i64 v5
//! call fn0(v0, v3, v7)
//! ; _0 = <IsNotEmpty as mini_core::FnMut<(&&[u16],)>>::call_mut(move _3, _2)
//! v2 = stack_load.i64 ss0
//! v3, v4 = call fn0(v1, v2) ; v1 = 8
//! v5 -> v3
//! v6 -> v4
//! ; write_cvalue: VarPair(_0, var0, var1): (u8, u8) <- ByValPair(v3, v4): (u8, u8)
//! jump block2
//!
//! block2:
//! nop
//! ;
//! ; return
//! return
//! return v5, v6
//! }
//! ```

View file

@ -2,8 +2,8 @@
use crate::prelude::*;
use cranelift_codegen::entity::EntityRef;
use cranelift_codegen::ir::immediates::Offset32;
use cranelift_codegen::ir::{InstructionData, Opcode};
fn codegen_field<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
@ -214,17 +214,7 @@ impl<'tcx> CValue<'tcx> {
) -> CValue<'tcx> {
let layout = self.1;
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count } => {
let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
let field = u8::try_from(field.index()).unwrap();
assert!(field < count);
let lane = fx.bcx.ins().extractlane(val, field);
let field_layout = layout.field(&*fx, usize::from(field));
CValue::by_val(lane, field_layout)
}
_ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByVal(_) => unreachable!(),
CValueInner::ByValPair(val1, val2) => match layout.abi {
Abi::ScalarPair(_, _) => {
let val = match field.as_u32() {
@ -258,16 +248,7 @@ impl<'tcx> CValue<'tcx> {
let lane_layout = fx.layout_of(lane_ty);
assert!(lane_idx < lane_count);
match self.0 {
CValueInner::ByVal(val) => match layout.abi {
Abi::Vector { element: _, count: _ } => {
assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
let lane_idx = u8::try_from(lane_idx).unwrap();
let lane = fx.bcx.ins().extractlane(val, lane_idx);
CValue::by_val(lane, lane_layout)
}
_ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
},
CValueInner::ByValPair(_, _) => unreachable!(),
CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => unreachable!(),
CValueInner::ByRef(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
@ -277,14 +258,6 @@ impl<'tcx> CValue<'tcx> {
}
}
pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
crate::unsize::coerce_unsized_into(fx, self, dest);
}
pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
crate::unsize::coerce_dyn_star(fx, self, dest);
}
/// If `ty` is signed, `const_val` must already be sign extended.
pub(crate) fn const_val(
fx: &mut FunctionCx<'_, '_, 'tcx>,
@ -345,10 +318,9 @@ pub(crate) struct CPlace<'tcx> {
}
#[derive(Debug, Copy, Clone)]
pub(crate) enum CPlaceInner {
enum CPlaceInner {
Var(Local, Variable),
VarPair(Local, Variable, Variable),
VarLane(Local, Variable, u8),
Addr(Pointer, Option<Value>),
}
@ -357,10 +329,6 @@ impl<'tcx> CPlace<'tcx> {
self.layout
}
pub(crate) fn inner(&self) -> &CPlaceInner {
&self.inner
}
pub(crate) fn new_stack_slot(
fx: &mut FunctionCx<'_, '_, 'tcx>,
layout: TyAndLayout<'tcx>,
@ -442,12 +410,6 @@ impl<'tcx> CPlace<'tcx> {
//fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
CValue::by_val_pair(val1, val2, layout)
}
CPlaceInner::VarLane(_local, var, lane) => {
let val = fx.bcx.use_var(var);
//fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
let val = fx.bcx.ins().extractlane(val, lane);
CValue::by_val(val, layout)
}
CPlaceInner::Addr(ptr, extra) => {
if let Some(extra) = extra {
CValue::by_ref_unsized(ptr, extra, layout)
@ -458,21 +420,56 @@ impl<'tcx> CPlace<'tcx> {
}
}
#[track_caller]
pub(crate) fn to_ptr(self) -> Pointer {
match self.to_ptr_maybe_unsized() {
(ptr, None) => ptr,
(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
pub(crate) fn debug_comment(self) -> (&'static str, String) {
match self.inner {
CPlaceInner::Var(_local, var) => ("ssa", format!("var={}", var.index())),
CPlaceInner::VarPair(_local, var1, var2) => {
("ssa", format!("var=({}, {})", var1.index(), var2.index()))
}
CPlaceInner::Addr(ptr, meta) => {
let meta =
if let Some(meta) = meta { format!(",meta={}", meta) } else { String::new() };
match ptr.debug_base_and_offset() {
(crate::pointer::PointerBase::Addr(addr), offset) => {
("reuse", format!("storage={}{}{}", addr, offset, meta))
}
(crate::pointer::PointerBase::Stack(stack_slot), offset) => {
("stack", format!("storage={}{}{}", stack_slot, offset, meta))
}
(crate::pointer::PointerBase::Dangling(align), offset) => {
("zst", format!("align={},offset={}", align.bytes(), offset))
}
}
}
}
}
#[track_caller]
pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
pub(crate) fn to_ptr(self) -> Pointer {
match self.inner {
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
CPlaceInner::Var(_, _)
| CPlaceInner::VarPair(_, _, _)
| CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
CPlaceInner::Addr(ptr, None) => ptr,
CPlaceInner::Addr(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
bug!("Expected CPlace::Addr, found {:?}", self)
}
}
}
#[track_caller]
pub(crate) fn to_ptr_unsized(self) -> (Pointer, Value) {
match self.inner {
CPlaceInner::Addr(ptr, Some(extra)) => (ptr, extra),
CPlaceInner::Addr(_, None) | CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
bug!("Expected unsized cplace, found {:?}", self)
}
}
}
pub(crate) fn try_to_ptr(self) -> Option<Pointer> {
match self.inner {
CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => None,
CPlaceInner::Addr(ptr, None) => Some(ptr),
CPlaceInner::Addr(_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
}
}
@ -496,7 +493,7 @@ impl<'tcx> CPlace<'tcx> {
from: CValue<'tcx>,
method: &'static str,
) {
fn transmute_value<'tcx>(
fn transmute_scalar<'tcx>(
fx: &mut FunctionCx<'_, '_, 'tcx>,
var: Variable,
data: Value,
@ -520,7 +517,7 @@ impl<'tcx> CPlace<'tcx> {
| (types::F64, types::I64) => codegen_bitcast(fx, dst_ty, data),
_ if src_ty.is_vector() && dst_ty.is_vector() => codegen_bitcast(fx, dst_ty, data),
_ if src_ty.is_vector() || dst_ty.is_vector() => {
// FIXME do something more efficient for transmutes between vectors and integers.
// FIXME(bytecodealliance/wasmtime#6104) do something more efficient for transmutes between vectors and integers.
let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
kind: StackSlotKind::ExplicitSlot,
// FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
@ -554,7 +551,7 @@ impl<'tcx> CPlace<'tcx> {
format!(
"{}: {:?}: {:?} <- {:?}: {:?}",
method,
self.inner(),
self.inner,
self.layout().ty,
from.0,
from.layout().ty
@ -563,32 +560,11 @@ impl<'tcx> CPlace<'tcx> {
}
let dst_layout = self.layout();
let to_ptr = match self.inner {
match self.inner {
CPlaceInner::Var(_local, var) => {
if let ty::Array(element, len) = dst_layout.ty.kind() {
// Can only happen for vector types
let len = u32::try_from(len.eval_target_usize(fx.tcx, ParamEnv::reveal_all()))
.unwrap();
let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
let data = match from.0 {
CValueInner::ByRef(ptr, None) => {
let mut flags = MemFlags::new();
flags.set_notrap();
ptr.load(fx, vector_ty, flags)
}
CValueInner::ByVal(_)
| CValueInner::ByValPair(_, _)
| CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
};
fx.bcx.def_var(var, data);
return;
}
let data = CValue(from.0, dst_layout).load_scalar(fx);
let dst_ty = fx.clif_type(self.layout().ty).unwrap();
transmute_value(fx, var, data, dst_ty);
return;
transmute_scalar(fx, var, data, dst_ty);
}
CPlaceInner::VarPair(_local, var1, var2) => {
let (data1, data2) = if from.layout().ty == dst_layout.ty {
@ -599,80 +575,61 @@ impl<'tcx> CPlace<'tcx> {
CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx)
};
let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
transmute_value(fx, var1, data1, dst_ty1);
transmute_value(fx, var2, data2, dst_ty2);
return;
transmute_scalar(fx, var1, data1, dst_ty1);
transmute_scalar(fx, var2, data2, dst_ty2);
}
CPlaceInner::VarLane(_local, var, lane) => {
let data = from.load_scalar(fx);
// First get the old vector
let vector = fx.bcx.use_var(var);
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
// Next insert the written lane into the vector
let vector = fx.bcx.ins().insertlane(vector, data, lane);
// Finally write the new vector
//fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
fx.bcx.def_var(var, vector);
return;
}
CPlaceInner::Addr(ptr, None) => {
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
CPlaceInner::Addr(to_ptr, None) => {
if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
return;
}
ptr
}
CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
};
let mut flags = MemFlags::new();
flags.set_notrap();
match from.layout().abi {
// FIXME make Abi::Vector work too
Abi::Scalar(_) => {
let val = from.load_scalar(fx);
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, flags);
to_ptr.offset(fx, b_offset).store(fx, extra, flags);
return;
}
_ => {}
}
let mut flags = MemFlags::new();
flags.set_notrap();
match from.layout().abi {
Abi::Scalar(_) => {
let val = from.load_scalar(fx);
to_ptr.store(fx, val, flags);
return;
}
Abi::ScalarPair(a_scalar, b_scalar) => {
let (value, extra) = from.load_scalar_pair(fx);
let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
to_ptr.store(fx, value, flags);
to_ptr.offset(fx, b_offset).store(fx, extra, flags);
return;
}
_ => {}
}
match from.0 {
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
match from.0 {
CValueInner::ByVal(val) => {
to_ptr.store(fx, val, flags);
}
CValueInner::ByValPair(_, _) => {
bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
}
CValueInner::ByRef(from_ptr, None) => {
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
let src_layout = from.1;
let size = dst_layout.size.bytes();
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memory_copy(
fx.target_config,
to_addr,
from_addr,
size,
dst_align,
src_align,
true,
flags,
);
}
CValueInner::ByRef(_, Some(_)) => todo!(),
}
}
CValueInner::ByValPair(_, _) => {
bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
}
CValueInner::ByRef(from_ptr, None) => {
let from_addr = from_ptr.get_addr(fx);
let to_addr = to_ptr.get_addr(fx);
let src_layout = from.1;
let size = dst_layout.size.bytes();
let src_align = src_layout.align.abi.bytes() as u8;
let dst_align = dst_layout.align.abi.bytes() as u8;
fx.bcx.emit_small_memory_copy(
fx.target_config,
to_addr,
from_addr,
size,
dst_align,
src_align,
true,
flags,
);
}
CValueInner::ByRef(_, Some(_)) => todo!(),
}
}
@ -692,40 +649,6 @@ impl<'tcx> CPlace<'tcx> {
let layout = self.layout();
match self.inner {
CPlaceInner::Var(local, var) => match layout.ty.kind() {
ty::Array(_, _) => {
// Can only happen for vector types
return CPlace {
inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
let f0 = &adt_def.non_enum_variant().fields[FieldIdx::from_u32(0)];
let f0_ty = f0.ty(fx.tcx, substs);
match f0_ty.kind() {
ty::Array(_, _) => {
assert_eq!(field.as_u32(), 0);
return CPlace {
inner: CPlaceInner::Var(local, var),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
_ => {
return CPlace {
inner: CPlaceInner::VarLane(
local,
var,
field.as_u32().try_into().unwrap(),
),
layout: layout.field(fx, field.as_u32().try_into().unwrap()),
};
}
}
}
_ => {}
},
CPlaceInner::VarPair(local, var1, var2) => {
let layout = layout.field(&*fx, field.index());
@ -738,7 +661,12 @@ impl<'tcx> CPlace<'tcx> {
_ => {}
}
let (base, extra) = self.to_ptr_maybe_unsized();
let (base, extra) = match self.inner {
CPlaceInner::Addr(ptr, extra) => (ptr, extra),
CPlaceInner::Var(_, _) | CPlaceInner::VarPair(_, _, _) => {
bug!("Expected CPlace::Addr, found {:?}", self)
}
};
let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
if field_layout.is_unsized() {
@ -767,15 +695,8 @@ impl<'tcx> CPlace<'tcx> {
assert!(lane_idx < lane_count);
match self.inner {
CPlaceInner::Var(local, var) => {
assert!(matches!(layout.abi, Abi::Vector { .. }));
CPlace {
inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
layout: lane_layout,
}
}
CPlaceInner::Var(_, _) => unreachable!(),
CPlaceInner::VarPair(_, _, _) => unreachable!(),
CPlaceInner::VarLane(_, _, _) => unreachable!(),
CPlaceInner::Addr(ptr, None) => {
let field_offset = lane_layout.size * lane_idx;
let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
@ -794,34 +715,13 @@ impl<'tcx> CPlace<'tcx> {
ty::Array(elem_ty, _) => {
let elem_layout = fx.layout_of(*elem_ty);
match self.inner {
CPlaceInner::Var(local, var) => {
// This is a hack to handle `vector_val.0[1]`. It doesn't allow dynamic
// indexing.
let lane_idx = match fx.bcx.func.dfg.insts
[fx.bcx.func.dfg.value_def(index).unwrap_inst()]
{
InstructionData::UnaryImm { opcode: Opcode::Iconst, imm } => imm,
_ => bug!(
"Dynamic indexing into a vector type is not supported: {self:?}[{index}]"
),
};
return CPlace {
inner: CPlaceInner::VarLane(
local,
var,
lane_idx.bits().try_into().unwrap(),
),
layout: elem_layout,
};
}
CPlaceInner::Addr(addr, None) => (elem_layout, addr),
CPlaceInner::Addr(_, Some(_))
| CPlaceInner::VarPair(_, _, _)
| CPlaceInner::VarLane(_, _, _) => bug!("Can't index into {self:?}"),
CPlaceInner::Var(_, _)
| CPlaceInner::Addr(_, Some(_))
| CPlaceInner::VarPair(_, _, _) => bug!("Can't index into {self:?}"),
}
// FIXME use VarLane in case of Var with simd type
}
ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_unsized().0),
_ => bug!("place_index({:?})", self.layout().ty),
};
@ -846,12 +746,8 @@ impl<'tcx> CPlace<'tcx> {
layout: TyAndLayout<'tcx>,
) -> CValue<'tcx> {
if has_ptr_meta(fx.tcx, self.layout().ty) {
let (ptr, extra) = self.to_ptr_maybe_unsized();
CValue::by_val_pair(
ptr.get_addr(fx),
extra.expect("unsized type without metadata"),
layout,
)
let (ptr, extra) = self.to_ptr_unsized();
CValue::by_val_pair(ptr.get_addr(fx), extra, layout)
} else {
CValue::by_val(self.to_ptr().get_addr(fx), layout)
}