2023-01-17 00:00:00 +00:00
|
|
|
use crate::base;
|
2019-02-09 23:31:47 +09:00
|
|
|
use crate::traits::*;
|
2020-03-29 16:41:09 +02:00
|
|
|
use rustc_middle::mir;
|
2020-04-05 10:30:32 -03:00
|
|
|
use rustc_middle::mir::interpret::ErrorHandled;
|
2021-09-02 00:09:34 +03:00
|
|
|
use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, TyAndLayout};
|
2023-02-22 02:18:40 +00:00
|
|
|
use rustc_middle::ty::{self, Instance, Ty, TyCtxt, TypeFoldable, TypeVisitableExt};
|
2019-10-29 16:35:26 +02:00
|
|
|
use rustc_target::abi::call::{FnAbi, PassMode};
|
2016-04-07 22:35:11 +03:00
|
|
|
|
2016-07-31 22:33:41 +08:00
|
|
|
use std::iter;
|
2016-03-08 14:38:13 +02:00
|
|
|
|
2019-09-26 05:30:10 +00:00
|
|
|
use rustc_index::bit_set::BitSet;
|
2023-04-19 10:57:17 +00:00
|
|
|
use rustc_index::IndexVec;
|
2016-03-11 18:00:52 +13:00
|
|
|
|
2020-01-22 19:45:22 +02:00
|
|
|
use self::debuginfo::{FunctionDebugContext, PerLocalVarDebugInfo};
|
2017-12-01 19:16:39 +02:00
|
|
|
use self::place::PlaceRef;
|
2020-03-29 16:41:09 +02:00
|
|
|
use rustc_middle::mir::traversal;
|
2016-03-11 13:14:51 +13:00
|
|
|
|
Various improvements to MIR and LLVM IR Construction
Primarily affects the MIR construction, which indirectly improves LLVM
IR generation, but some LLVM IR changes have been made too.
* Handle "statement expressions" more intelligently. These are
expressions that always evaluate to `()`. Previously a temporary would
be generated as a destination to translate into, which is unnecessary.
This affects assignment, augmented assignment, `return`, `break` and
`continue`.
* Avoid inserting drops for non-drop types in more places. Scheduled
drops were already skipped for types that we knew wouldn't need
dropping at construction time. However manually-inserted drops like
those for `x` in `x = y;` were still generated. `build_drop` now takes
a type parameter like its `schedule_drop` counterpart and checks to
see if the type needs dropping.
* Avoid generating an extra temporary for an assignment where the types
involved don't need dropping. Previously an expression like
`a = b + 1;` would result in a temporary for `b + 1`. This is so the
RHS can be evaluated, then the LHS evaluated and dropped and have
everything work correctly. However, this isn't necessary if the `LHS`
doesn't need a drop, as we can just overwrite the existing value.
* Improves lvalue analysis to allow treating an `Rvalue::Use` as an
operand in certain conditions. The reason for it never being an
operand is so it can be zeroed/drop-filled, but this is only true for
types that need dropping.
The first two changes result in significantly fewer MIR blocks being
generated, as previously almost every statement would end up generating
a new block due to the drop of the `()` temporary being generated.
2016-04-15 12:36:16 +12:00
|
|
|
use self::operand::{OperandRef, OperandValue};
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2022-10-17 08:29:40 +11:00
|
|
|
// Used for tracking the state of generated basic blocks.
|
|
|
|
enum CachedLlbb<T> {
|
|
|
|
/// Nothing created yet.
|
|
|
|
None,
|
|
|
|
|
|
|
|
/// Has been created.
|
|
|
|
Some(T),
|
|
|
|
|
|
|
|
/// Nothing created yet, and nothing should be.
|
|
|
|
Skip,
|
|
|
|
}
|
|
|
|
|
2018-05-08 16:10:16 +03:00
|
|
|
/// Master context for codegenning from MIR.
|
2019-10-26 01:41:17 -04:00
|
|
|
pub struct FunctionCx<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> {
|
2018-01-16 09:31:48 +01:00
|
|
|
instance: Instance<'tcx>,
|
|
|
|
|
2020-04-12 10:31:00 -07:00
|
|
|
mir: &'tcx mir::Body<'tcx>,
|
2016-12-18 11:08:57 -07:00
|
|
|
|
2020-02-11 01:27:33 +02:00
|
|
|
debug_context: Option<FunctionDebugContext<Bx::DIScope, Bx::DILocation>>,
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2019-10-13 11:28:19 +02:00
|
|
|
llfn: Bx::Function,
|
2015-12-19 16:47:52 +02:00
|
|
|
|
2018-09-20 15:47:22 +02:00
|
|
|
cx: &'a Bx::CodegenCx,
|
2016-12-19 17:48:41 -07:00
|
|
|
|
2021-08-26 21:58:34 +03:00
|
|
|
fn_abi: &'tcx FnAbi<'tcx, Ty<'tcx>>,
|
2016-12-19 19:16:36 -07:00
|
|
|
|
2015-10-21 17:42:25 -04:00
|
|
|
/// When unwinding is initiated, we have to store this personality
|
|
|
|
/// value somewhere so that we can load it and re-use it in the
|
|
|
|
/// resume instruction. The personality is (afaik) some kind of
|
|
|
|
/// value used for C++ unwinding, which must filter by type: we
|
|
|
|
/// don't really care about it very much. Anyway, this value
|
|
|
|
/// contains an alloca into which the personality is stored and
|
|
|
|
/// then later loaded when generating the DIVERGE_BLOCK.
|
2019-06-16 12:41:24 +03:00
|
|
|
personality_slot: Option<PlaceRef<'tcx, Bx::Value>>,
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2021-05-06 17:37:19 +03:00
|
|
|
/// A backend `BasicBlock` for each MIR `BasicBlock`, created lazily
|
|
|
|
/// as-needed (e.g. RPO reaching it or another block branching to it).
|
|
|
|
// FIXME(eddyb) rename `llbbs` and other `ll`-prefixed things to use a
|
|
|
|
// more backend-agnostic prefix such as `cg` (i.e. this would be `cgbbs`).
|
2022-10-17 08:29:40 +11:00
|
|
|
cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>>,
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2016-05-29 22:01:06 +03:00
|
|
|
/// The funclet status of each basic block
|
2023-01-17 00:00:00 +00:00
|
|
|
cleanup_kinds: Option<IndexVec<mir::BasicBlock, analyze::CleanupKind>>,
|
2016-05-29 22:01:06 +03:00
|
|
|
|
2021-05-15 09:17:46 +03:00
|
|
|
/// When targeting MSVC, this stores the cleanup info for each funclet BB.
|
|
|
|
/// This is initialized at the same time as the `landing_pads` entry for the
|
|
|
|
/// funclets' head block, i.e. when needed by an unwind / `cleanup_ret` edge.
|
2018-11-13 12:51:42 +02:00
|
|
|
funclets: IndexVec<mir::BasicBlock, Option<Bx::Funclet>>,
|
2017-05-23 23:47:15 +03:00
|
|
|
|
2021-05-15 09:17:46 +03:00
|
|
|
/// This stores the cached landing/cleanup pad block for a given BB.
|
|
|
|
// FIXME(eddyb) rename this to `eh_pads`.
|
2018-09-20 15:47:22 +02:00
|
|
|
landing_pads: IndexVec<mir::BasicBlock, Option<Bx::BasicBlock>>,
|
2016-05-29 22:01:06 +03:00
|
|
|
|
2015-12-19 16:47:52 +02:00
|
|
|
/// Cached unreachable block
|
2018-09-20 15:47:22 +02:00
|
|
|
unreachable_block: Option<Bx::BasicBlock>,
|
2015-12-19 16:47:52 +02:00
|
|
|
|
2022-10-31 01:01:24 +00:00
|
|
|
/// Cached terminate upon unwinding block
|
2022-10-10 22:40:40 +01:00
|
|
|
terminate_block: Option<Bx::BasicBlock>,
|
2022-01-14 23:54:26 +00:00
|
|
|
|
2016-06-20 23:55:14 +03:00
|
|
|
/// The location where each MIR arg/var/tmp/ret is stored. This is
|
2017-12-01 14:31:47 +02:00
|
|
|
/// usually an `PlaceRef` representing an alloca, but not always:
|
2015-11-02 09:39:59 -05:00
|
|
|
/// sometimes we can skip the alloca and just store the value
|
|
|
|
/// directly using an `OperandRef`, which makes for tighter LLVM
|
|
|
|
/// IR. The conditions for using an `OperandRef` are as follows:
|
|
|
|
///
|
2017-09-21 20:40:50 +03:00
|
|
|
/// - the type of the local must be judged "immediate" by `is_llvm_immediate`
|
2015-11-02 09:39:59 -05:00
|
|
|
/// - the operand must never be referenced indirectly
|
|
|
|
/// - we should not take its address using the `&` operator
|
2017-12-01 14:39:51 +02:00
|
|
|
/// - nor should it appear in a place path like `tmp.a`
|
2015-11-02 09:39:59 -05:00
|
|
|
/// - the operand must be defined by an rvalue that can generate immediate
|
|
|
|
/// values
|
2015-11-03 15:50:04 -05:00
|
|
|
///
|
|
|
|
/// Avoiding allocs can also be important for certain intrinsics,
|
|
|
|
/// notably `expect`.
|
2018-09-20 15:47:22 +02:00
|
|
|
locals: IndexVec<mir::Local, LocalRef<'tcx, Bx::Value>>,
|
2016-04-07 22:35:11 +03:00
|
|
|
|
2020-01-22 19:45:22 +02:00
|
|
|
/// All `VarDebugInfo` from the MIR body, partitioned by `Local`.
|
|
|
|
/// This is `None` if no var`#[non_exhaustive]`iable debuginfo/names are needed.
|
|
|
|
per_local_var_debug_info:
|
|
|
|
Option<IndexVec<mir::Local, Vec<PerLocalVarDebugInfo<'tcx, Bx::DIVariable>>>>,
|
2019-10-27 17:31:12 -07:00
|
|
|
|
|
|
|
/// Caller location propagated if this function has `#[track_caller]`.
|
2019-12-06 16:09:40 -08:00
|
|
|
caller_location: Option<OperandRef<'tcx, Bx::Value>>,
|
2015-10-21 17:42:25 -04:00
|
|
|
}
|
|
|
|
|
2019-10-26 01:41:17 -04:00
|
|
|
impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
|
2020-10-24 02:21:18 +02:00
|
|
|
pub fn monomorphize<T>(&self, value: T) -> T
|
2018-03-03 08:23:28 -05:00
|
|
|
where
|
2023-02-22 02:18:40 +00:00
|
|
|
T: Copy + TypeFoldable<TyCtxt<'tcx>>,
|
2017-04-21 21:02:14 -04:00
|
|
|
{
|
2020-03-11 19:36:07 +00:00
|
|
|
debug!("monomorphize: self.instance={:?}", self.instance);
|
2020-11-06 00:00:00 +00:00
|
|
|
self.instance.subst_mir_and_normalize_erasing_regions(
|
|
|
|
self.cx.tcx(),
|
|
|
|
ty::ParamEnv::reveal_all(),
|
2023-05-29 13:46:10 +02:00
|
|
|
ty::EarlyBinder::bind(value),
|
2020-11-06 00:00:00 +00:00
|
|
|
)
|
2016-12-18 16:05:40 -07:00
|
|
|
}
|
2016-06-07 19:21:56 +03:00
|
|
|
}
|
|
|
|
|
2018-08-02 17:48:44 +03:00
|
|
|
enum LocalRef<'tcx, V> {
|
|
|
|
Place(PlaceRef<'tcx, V>),
|
2018-05-29 00:12:55 +09:00
|
|
|
/// `UnsizedPlace(p)`: `p` itself is a thin pointer (indirect place).
|
|
|
|
/// `*p` is the fat pointer that references the actual unsized place.
|
|
|
|
/// Every time it is initialized, we have to reallocate the place
|
|
|
|
/// and update the fat pointer. That's the reason why it is indirect.
|
2018-08-02 17:48:44 +03:00
|
|
|
UnsizedPlace(PlaceRef<'tcx, V>),
|
2023-03-24 20:36:59 -07:00
|
|
|
/// The backend [`OperandValue`] has already been generated.
|
|
|
|
Operand(OperandRef<'tcx, V>),
|
|
|
|
/// Will be a `Self::Operand` once we get to its definition.
|
|
|
|
PendingOperand,
|
2015-11-02 09:39:59 -05:00
|
|
|
}
|
|
|
|
|
2023-05-07 03:00:41 -07:00
|
|
|
impl<'tcx, V: CodegenObject> LocalRef<'tcx, V> {
|
|
|
|
fn new_operand(layout: TyAndLayout<'tcx>) -> LocalRef<'tcx, V> {
|
2017-09-20 18:17:23 +03:00
|
|
|
if layout.is_zst() {
|
2016-04-17 14:37:52 +12:00
|
|
|
// Zero-size temporaries aren't always initialized, which
|
|
|
|
// doesn't matter because they don't contain data, but
|
|
|
|
// we need something in the operand.
|
2023-05-07 03:00:41 -07:00
|
|
|
LocalRef::Operand(OperandRef::zero_sized(layout))
|
2016-04-17 14:37:52 +12:00
|
|
|
} else {
|
2023-03-24 20:36:59 -07:00
|
|
|
LocalRef::PendingOperand
|
2016-04-17 14:37:52 +12:00
|
|
|
}
|
2016-04-16 17:38:18 +12:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-10-21 17:42:25 -04:00
|
|
|
///////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2021-09-04 10:14:12 +02:00
|
|
|
#[instrument(level = "debug", skip(cx))]
|
2019-06-14 19:39:39 +03:00
|
|
|
pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
2018-09-20 15:47:22 +02:00
|
|
|
cx: &'a Bx::CodegenCx,
|
2016-12-18 11:08:57 -07:00
|
|
|
instance: Instance<'tcx>,
|
|
|
|
) {
|
2023-04-27 08:34:11 +01:00
|
|
|
assert!(!instance.substs.has_infer());
|
2018-12-02 18:04:39 +01:00
|
|
|
|
2019-10-29 16:26:25 +02:00
|
|
|
let llfn = cx.get_fn(instance);
|
|
|
|
|
|
|
|
let mir = cx.tcx().instance_mir(instance.def);
|
|
|
|
|
2021-09-02 00:29:15 +03:00
|
|
|
let fn_abi = cx.fn_abi_of_instance(instance, ty::List::empty());
|
2019-10-29 16:35:26 +02:00
|
|
|
debug!("fn_abi: {:?}", fn_abi);
|
2019-09-11 17:52:39 +03:00
|
|
|
|
2019-10-29 20:01:31 +02:00
|
|
|
let debug_context = cx.create_function_debug_context(instance, &fn_abi, llfn, &mir);
|
2019-09-11 17:52:39 +03:00
|
|
|
|
2021-05-06 18:57:04 +03:00
|
|
|
let start_llbb = Bx::append_block(cx, llfn, "start");
|
2022-10-19 10:34:45 +11:00
|
|
|
let mut start_bx = Bx::build(cx, start_llbb);
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2022-11-17 02:58:42 +00:00
|
|
|
if mir.basic_blocks.iter().any(|bb| {
|
|
|
|
bb.is_cleanup || matches!(bb.terminator().unwind(), Some(mir::UnwindAction::Terminate))
|
|
|
|
}) {
|
2022-10-19 10:34:45 +11:00
|
|
|
start_bx.set_personality_fn(cx.eh_personality());
|
2017-05-23 23:47:15 +03:00
|
|
|
}
|
2015-11-02 09:39:59 -05:00
|
|
|
|
2023-02-15 11:43:41 +00:00
|
|
|
let cleanup_kinds = base::wants_msvc_seh(cx.tcx().sess).then(|| analyze::cleanup_kinds(&mir));
|
2023-01-17 00:00:00 +00:00
|
|
|
|
2022-10-17 08:29:40 +11:00
|
|
|
let cached_llbbs: IndexVec<mir::BasicBlock, CachedLlbb<Bx::BasicBlock>> =
|
|
|
|
mir.basic_blocks
|
|
|
|
.indices()
|
|
|
|
.map(|bb| {
|
|
|
|
if bb == mir::START_BLOCK { CachedLlbb::Some(start_llbb) } else { CachedLlbb::None }
|
|
|
|
})
|
|
|
|
.collect();
|
2016-08-24 19:34:31 -07:00
|
|
|
|
2018-01-05 07:34:28 +02:00
|
|
|
let mut fx = FunctionCx {
|
2018-01-16 09:31:48 +01:00
|
|
|
instance,
|
2019-10-26 01:41:17 -04:00
|
|
|
mir,
|
2017-08-06 22:54:09 -07:00
|
|
|
llfn,
|
2019-10-29 16:35:26 +02:00
|
|
|
fn_abi,
|
2018-01-05 07:04:08 +02:00
|
|
|
cx,
|
2017-06-01 21:50:53 +03:00
|
|
|
personality_slot: None,
|
2021-05-06 17:37:19 +03:00
|
|
|
cached_llbbs,
|
2016-08-24 19:34:31 -07:00
|
|
|
unreachable_block: None,
|
2022-10-10 22:40:40 +01:00
|
|
|
terminate_block: None,
|
2017-08-06 22:54:09 -07:00
|
|
|
cleanup_kinds,
|
2022-07-05 00:00:00 +00:00
|
|
|
landing_pads: IndexVec::from_elem(None, &mir.basic_blocks),
|
|
|
|
funclets: IndexVec::from_fn_n(|_| None, mir.basic_blocks.len()),
|
2016-08-24 19:34:31 -07:00
|
|
|
locals: IndexVec::new(),
|
2017-08-06 22:54:09 -07:00
|
|
|
debug_context,
|
2020-01-22 19:45:22 +02:00
|
|
|
per_local_var_debug_info: None,
|
2019-10-27 17:31:12 -07:00
|
|
|
caller_location: None,
|
2016-08-24 19:34:31 -07:00
|
|
|
};
|
|
|
|
|
2022-10-19 10:34:45 +11:00
|
|
|
fx.per_local_var_debug_info = fx.compute_per_local_var_debug_info(&mut start_bx);
|
2020-01-22 19:45:22 +02:00
|
|
|
|
2021-01-24 12:12:08 +01:00
|
|
|
// Evaluate all required consts; codegen later assumes that CTFE will never fail.
|
|
|
|
let mut all_consts_ok = true;
|
2020-04-22 11:16:06 -03:00
|
|
|
for const_ in &mir.required_consts {
|
2020-04-05 10:30:32 -03:00
|
|
|
if let Err(err) = fx.eval_mir_constant(const_) {
|
2021-01-24 12:12:08 +01:00
|
|
|
all_consts_ok = false;
|
2020-04-05 10:30:32 -03:00
|
|
|
match err {
|
|
|
|
// errored or at least linted
|
2022-11-15 12:06:20 +01:00
|
|
|
ErrorHandled::Reported(_) => {}
|
2020-04-05 10:30:32 -03:00
|
|
|
ErrorHandled::TooGeneric => {
|
2022-08-18 10:13:37 +08:00
|
|
|
span_bug!(const_.span, "codegen encountered polymorphic constant: {:?}", err)
|
2020-04-05 10:30:32 -03:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2021-01-24 12:12:08 +01:00
|
|
|
if !all_consts_ok {
|
|
|
|
// We leave the IR in some half-built state here, and rely on this code not even being
|
|
|
|
// submitted to LLVM once an error was raised.
|
|
|
|
return;
|
|
|
|
}
|
2020-04-05 10:30:32 -03:00
|
|
|
|
2019-10-23 13:46:23 -04:00
|
|
|
let memory_locals = analyze::non_ssa_locals(&fx);
|
2016-12-18 16:05:40 -07:00
|
|
|
|
2015-10-21 17:42:25 -04:00
|
|
|
// Allocate variable and temp allocas
|
2018-01-05 07:34:28 +02:00
|
|
|
fx.locals = {
|
2022-10-19 10:34:45 +11:00
|
|
|
let args = arg_local_refs(&mut start_bx, &mut fx, &memory_locals);
|
2016-09-25 01:38:27 +02:00
|
|
|
|
2018-10-05 15:08:49 +02:00
|
|
|
let mut allocate_local = |local| {
|
2020-04-12 10:31:00 -07:00
|
|
|
let decl = &mir.local_decls[local];
|
2022-10-19 10:34:45 +11:00
|
|
|
let layout = start_bx.layout_of(fx.monomorphize(decl.ty));
|
2022-01-12 03:19:52 +00:00
|
|
|
assert!(!layout.ty.has_erasable_regions());
|
2016-06-20 23:55:14 +03:00
|
|
|
|
2019-10-29 16:35:26 +02:00
|
|
|
if local == mir::RETURN_PLACE && fx.fn_abi.ret.is_indirect() {
|
2019-09-13 10:28:14 +03:00
|
|
|
debug!("alloc: {:?} (return place) -> place", local);
|
2022-10-19 10:34:45 +11:00
|
|
|
let llretptr = start_bx.get_param(0);
|
2019-09-13 10:28:14 +03:00
|
|
|
return LocalRef::Place(PlaceRef::new_sized(llretptr, layout));
|
|
|
|
}
|
2016-04-07 22:35:11 +03:00
|
|
|
|
2019-09-13 10:28:14 +03:00
|
|
|
if memory_locals.contains(local) {
|
2019-09-13 20:04:54 +03:00
|
|
|
debug!("alloc: {:?} -> place", local);
|
2018-05-29 00:12:55 +09:00
|
|
|
if layout.is_unsized() {
|
2022-10-19 10:34:45 +11:00
|
|
|
LocalRef::UnsizedPlace(PlaceRef::alloca_unsized_indirect(&mut start_bx, layout))
|
2018-05-29 00:12:55 +09:00
|
|
|
} else {
|
2022-10-19 10:34:45 +11:00
|
|
|
LocalRef::Place(PlaceRef::alloca(&mut start_bx, layout))
|
2016-09-25 01:38:27 +02:00
|
|
|
}
|
2016-06-20 23:55:14 +03:00
|
|
|
} else {
|
2019-09-13 20:04:54 +03:00
|
|
|
debug!("alloc: {:?} -> operand", local);
|
2023-05-07 03:00:41 -07:00
|
|
|
LocalRef::new_operand(layout)
|
2016-06-20 23:55:14 +03:00
|
|
|
}
|
2016-09-25 01:38:27 +02:00
|
|
|
};
|
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
let retptr = allocate_local(mir::RETURN_PLACE);
|
2016-09-25 01:38:27 +02:00
|
|
|
iter::once(retptr)
|
|
|
|
.chain(args.into_iter())
|
2020-04-12 10:31:00 -07:00
|
|
|
.chain(mir.vars_and_temps_iter().map(allocate_local))
|
2016-09-25 01:38:27 +02:00
|
|
|
.collect()
|
2016-06-20 23:55:14 +03:00
|
|
|
};
|
2015-10-21 17:42:25 -04:00
|
|
|
|
2019-09-13 10:28:14 +03:00
|
|
|
// Apply debuginfo to the newly allocated locals.
|
2022-10-19 10:34:45 +11:00
|
|
|
fx.debug_introduce_locals(&mut start_bx);
|
2023-03-04 22:51:23 +08:00
|
|
|
|
2023-03-08 11:19:12 +08:00
|
|
|
// The builders will be created separately for each basic block at `codegen_block`.
|
|
|
|
// So drop the builder of `start_llbb` to avoid having two at the same time.
|
2023-03-04 22:51:23 +08:00
|
|
|
drop(start_bx);
|
2019-09-13 10:28:14 +03:00
|
|
|
|
2018-05-08 16:10:16 +03:00
|
|
|
// Codegen the body of each block using reverse postorder
|
2021-05-06 17:37:19 +03:00
|
|
|
for (bb, _) in traversal::reverse_postorder(&mir) {
|
2019-10-14 01:38:38 -04:00
|
|
|
fx.codegen_block(bb);
|
2015-10-21 17:42:25 -04:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-02-08 14:53:55 +01:00
|
|
|
/// Produces, for each argument, a `Value` pointing at the
|
2017-12-01 14:39:51 +02:00
|
|
|
/// argument's value. As arguments are places, these are always
|
2015-10-21 17:42:25 -04:00
|
|
|
/// indirect.
|
2019-10-26 01:41:17 -04:00
|
|
|
fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
|
2018-10-04 15:23:10 +02:00
|
|
|
bx: &mut Bx,
|
2019-11-07 06:04:14 -08:00
|
|
|
fx: &mut FunctionCx<'a, 'tcx, Bx>,
|
Merge indexed_set.rs into bitvec.rs, and rename it bit_set.rs.
Currently we have two files implementing bitsets (and 2D bit matrices).
This commit combines them into one, taking the best features from each.
This involves renaming a lot of things. The high level changes are as
follows.
- bitvec.rs --> bit_set.rs
- indexed_set.rs --> (removed)
- BitArray + IdxSet --> BitSet (merged, see below)
- BitVector --> GrowableBitSet
- {,Sparse,Hybrid}IdxSet --> {,Sparse,Hybrid}BitSet
- BitMatrix --> BitMatrix
- SparseBitMatrix --> SparseBitMatrix
The changes within the bitset types themselves are as follows.
```
OLD OLD NEW
BitArray<C> IdxSet<T> BitSet<T>
-------- ------ ------
grow - grow
new - (remove)
new_empty new_empty new_empty
new_filled new_filled new_filled
- to_hybrid to_hybrid
clear clear clear
set_up_to set_up_to set_up_to
clear_above - clear_above
count - count
contains(T) contains(&T) contains(T)
contains_all - superset
is_empty - is_empty
insert(T) add(&T) insert(T)
insert_all - insert_all()
remove(T) remove(&T) remove(T)
words words words
words_mut words_mut words_mut
- overwrite overwrite
merge union union
- subtract subtract
- intersect intersect
iter iter iter
```
In general, when choosing names I went with:
- names that are more obvious (e.g. `BitSet` over `IdxSet`).
- names that are more like the Rust libraries (e.g. `T` over `C`,
`insert` over `add`);
- names that are more set-like (e.g. `union` over `merge`, `superset`
over `contains_all`, `domain_size` over `num_bits`).
Also, using `T` for index arguments seems more sensible than `&T` --
even though the latter is standard in Rust collection types -- because
indices are always copyable. It also results in fewer `&` and `*`
sigils in practice.
2018-09-14 15:07:25 +10:00
|
|
|
memory_locals: &BitSet<mir::Local>,
|
2018-09-20 15:47:22 +02:00
|
|
|
) -> Vec<LocalRef<'tcx, Bx::Value>> {
|
2019-11-04 19:52:19 -05:00
|
|
|
let mir = fx.mir;
|
2016-03-06 16:30:21 +02:00
|
|
|
let mut idx = 0;
|
2019-10-29 16:35:26 +02:00
|
|
|
let mut llarg_idx = fx.fn_abi.ret.is_indirect() as usize;
|
2016-04-07 22:35:11 +03:00
|
|
|
|
Support `#[track_caller]` on closures and generators
This PR allows applying a `#[track_caller]` attribute to a
closure/generator expression. The attribute as interpreted as applying
to the compiler-generated implementation of the corresponding trait
method (`FnOnce::call_once`, `FnMut::call_mut`, `Fn::call`, or
`Generator::resume`).
This feature does not have its own feature gate - however, it requires
`#![feature(stmt_expr_attributes)]` in order to actually apply
an attribute to a closure or generator.
This is implemented in the same way as for functions - an extra
location argument is appended to the end of the ABI. For closures,
this argument is *not* part of the 'tupled' argument storing the
parameters - the final closure argument for `#[track_caller]` closures
is no longer a tuple.
For direct (monomorphized) calls, the necessary support was already
implemented - we just needeed to adjust some assertions around checking
the ABI and argument count to take closures into account.
For calls through a trait object, more work was needed.
When creating a `ReifyShim`, we need to create a shim
for the trait method (e.g. `FnOnce::call_mut`) - unlike normal
functions, closures are never invoked directly, and always go through a
trait method.
Additional handling was needed for `InstanceDef::ClosureOnceShim`. In
order to pass location information throgh a direct (monomorphized) call
to `FnOnce::call_once` on an `FnMut` closure, we need to make
`ClosureOnceShim` aware of `#[tracked_caller]`. A new field
`track_caller` is added to `ClosureOnceShim` - this is used by
`InstanceDef::requires_caller` location, allowing codegen to
pass through the extra location argument.
Since `ClosureOnceShim.track_caller` is only used by codegen,
we end up generating two identical MIR shims - one for
`track_caller == true`, and one for `track_caller == false`. However,
these two shims are used by the entire crate (i.e. it's two shims total,
not two shims per unique closure), so this shouldn't a big deal.
2021-06-27 14:01:11 -05:00
|
|
|
let mut num_untupled = None;
|
|
|
|
|
2019-11-07 06:04:14 -08:00
|
|
|
let args = mir
|
|
|
|
.args_iter()
|
|
|
|
.enumerate()
|
|
|
|
.map(|(arg_index, local)| {
|
2019-11-04 19:52:19 -05:00
|
|
|
let arg_decl = &mir.local_decls[local];
|
2019-12-22 17:42:04 -05:00
|
|
|
|
2019-11-04 19:52:19 -05:00
|
|
|
if Some(local) == mir.spread_arg {
|
2018-11-27 02:59:49 +00:00
|
|
|
// This argument (e.g., the last argument in the "rust-call" ABI)
|
2016-09-27 02:03:35 +02:00
|
|
|
// is a tuple that was spread at the ABI level and now we have
|
|
|
|
// to reconstruct it into a tuple local variable, from multiple
|
|
|
|
// individual LLVM function arguments.
|
2019-12-22 17:42:04 -05:00
|
|
|
|
2020-10-24 02:21:18 +02:00
|
|
|
let arg_ty = fx.monomorphize(arg_decl.ty);
|
2022-02-19 00:48:49 +01:00
|
|
|
let ty::Tuple(tupled_arg_tys) = arg_ty.kind() else {
|
|
|
|
bug!("spread argument isn't a tuple?!");
|
2016-09-27 02:03:35 +02:00
|
|
|
};
|
2019-12-22 17:42:04 -05:00
|
|
|
|
2023-04-26 19:46:10 -04:00
|
|
|
let layout = bx.layout_of(arg_ty);
|
|
|
|
|
|
|
|
// FIXME: support unsized params in "rust-call" ABI
|
|
|
|
if layout.is_unsized() {
|
|
|
|
span_bug!(
|
|
|
|
arg_decl.source_info.span,
|
|
|
|
"\"rust-call\" ABI does not support unsized params",
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
let place = PlaceRef::alloca(bx, layout);
|
2017-09-20 05:16:06 +03:00
|
|
|
for i in 0..tupled_arg_tys.len() {
|
2019-10-29 16:35:26 +02:00
|
|
|
let arg = &fx.fn_abi.args[idx];
|
2016-09-27 02:03:35 +02:00
|
|
|
idx += 1;
|
2022-08-25 22:19:38 +10:00
|
|
|
if let PassMode::Cast(_, true) = arg.mode {
|
2018-01-12 23:32:14 +01:00
|
|
|
llarg_idx += 1;
|
2019-12-22 17:42:04 -05:00
|
|
|
}
|
2018-10-05 15:08:49 +02:00
|
|
|
let pr_field = place.project_field(bx, i);
|
|
|
|
bx.store_fn_arg(arg, &mut llarg_idx, pr_field);
|
2018-01-12 23:32:14 +01:00
|
|
|
}
|
Support `#[track_caller]` on closures and generators
This PR allows applying a `#[track_caller]` attribute to a
closure/generator expression. The attribute as interpreted as applying
to the compiler-generated implementation of the corresponding trait
method (`FnOnce::call_once`, `FnMut::call_mut`, `Fn::call`, or
`Generator::resume`).
This feature does not have its own feature gate - however, it requires
`#![feature(stmt_expr_attributes)]` in order to actually apply
an attribute to a closure or generator.
This is implemented in the same way as for functions - an extra
location argument is appended to the end of the ABI. For closures,
this argument is *not* part of the 'tupled' argument storing the
parameters - the final closure argument for `#[track_caller]` closures
is no longer a tuple.
For direct (monomorphized) calls, the necessary support was already
implemented - we just needeed to adjust some assertions around checking
the ABI and argument count to take closures into account.
For calls through a trait object, more work was needed.
When creating a `ReifyShim`, we need to create a shim
for the trait method (e.g. `FnOnce::call_mut`) - unlike normal
functions, closures are never invoked directly, and always go through a
trait method.
Additional handling was needed for `InstanceDef::ClosureOnceShim`. In
order to pass location information throgh a direct (monomorphized) call
to `FnOnce::call_once` on an `FnMut` closure, we need to make
`ClosureOnceShim` aware of `#[tracked_caller]`. A new field
`track_caller` is added to `ClosureOnceShim` - this is used by
`InstanceDef::requires_caller` location, allowing codegen to
pass through the extra location argument.
Since `ClosureOnceShim.track_caller` is only used by codegen,
we end up generating two identical MIR shims - one for
`track_caller == true`, and one for `track_caller == false`. However,
these two shims are used by the entire crate (i.e. it's two shims total,
not two shims per unique closure), so this shouldn't a big deal.
2021-06-27 14:01:11 -05:00
|
|
|
assert_eq!(
|
|
|
|
None,
|
|
|
|
num_untupled.replace(tupled_arg_tys.len()),
|
|
|
|
"Replaced existing num_tupled"
|
|
|
|
);
|
2016-10-13 14:55:31 -04:00
|
|
|
|
2017-12-01 14:39:51 +02:00
|
|
|
return LocalRef::Place(place);
|
2016-03-08 14:24:44 +02:00
|
|
|
}
|
|
|
|
|
2019-10-29 16:35:26 +02:00
|
|
|
if fx.fn_abi.c_variadic && arg_index == fx.fn_abi.args.len() {
|
2020-10-24 02:21:18 +02:00
|
|
|
let arg_ty = fx.monomorphize(arg_decl.ty);
|
2019-08-10 14:38:17 +03:00
|
|
|
|
|
|
|
let va_list = PlaceRef::alloca(bx, bx.layout_of(arg_ty));
|
|
|
|
bx.va_start(va_list.llval);
|
|
|
|
|
|
|
|
return LocalRef::Place(va_list);
|
|
|
|
}
|
|
|
|
|
2019-10-29 16:35:26 +02:00
|
|
|
let arg = &fx.fn_abi.args[idx];
|
2016-03-06 16:30:21 +02:00
|
|
|
idx += 1;
|
2022-08-25 22:19:38 +10:00
|
|
|
if let PassMode::Cast(_, true) = arg.mode {
|
2017-09-20 05:16:06 +03:00
|
|
|
llarg_idx += 1;
|
2017-10-10 20:54:50 +03:00
|
|
|
}
|
2017-09-20 05:16:06 +03:00
|
|
|
|
2019-09-13 10:28:14 +03:00
|
|
|
if !memory_locals.contains(local) {
|
2017-09-20 05:16:06 +03:00
|
|
|
// We don't have to cast or keep the argument in the alloca.
|
|
|
|
// FIXME(eddyb): We should figure out how to use llvm.dbg.value instead
|
|
|
|
// of putting everything in allocas just so we can use llvm.dbg.declare.
|
2023-03-24 20:36:59 -07:00
|
|
|
let local = |op| LocalRef::Operand(op);
|
2017-10-10 20:54:50 +03:00
|
|
|
match arg.mode {
|
2019-08-10 14:38:17 +03:00
|
|
|
PassMode::Ignore => {
|
2023-05-07 03:00:41 -07:00
|
|
|
return local(OperandRef::zero_sized(arg.layout));
|
2017-10-10 20:54:50 +03:00
|
|
|
}
|
|
|
|
PassMode::Direct(_) => {
|
2018-12-04 20:20:45 +01:00
|
|
|
let llarg = bx.get_param(llarg_idx);
|
2017-10-10 20:54:50 +03:00
|
|
|
llarg_idx += 1;
|
2018-01-05 07:12:32 +02:00
|
|
|
return local(OperandRef::from_immediate_or_packed_pair(
|
|
|
|
bx, llarg, arg.layout,
|
2019-12-22 17:42:04 -05:00
|
|
|
));
|
|
|
|
}
|
2017-10-10 20:54:50 +03:00
|
|
|
PassMode::Pair(..) => {
|
2018-01-05 07:12:32 +02:00
|
|
|
let (a, b) = (bx.get_param(llarg_idx), bx.get_param(llarg_idx + 1));
|
2019-09-04 15:21:46 +03:00
|
|
|
llarg_idx += 2;
|
2019-12-22 17:42:04 -05:00
|
|
|
|
2017-10-10 20:54:50 +03:00
|
|
|
return local(OperandRef {
|
2018-01-05 07:12:32 +02:00
|
|
|
val: OperandValue::Pair(a, b),
|
2019-12-06 16:09:40 -08:00
|
|
|
layout: arg.layout,
|
2019-12-22 17:42:04 -05:00
|
|
|
});
|
|
|
|
}
|
|
|
|
_ => {}
|
2017-10-10 20:54:50 +03:00
|
|
|
}
|
2017-09-20 05:16:06 +03:00
|
|
|
}
|
2017-10-10 20:54:50 +03:00
|
|
|
|
2019-09-13 10:28:14 +03:00
|
|
|
if arg.is_sized_indirect() {
|
2017-10-10 20:54:50 +03:00
|
|
|
// Don't copy an indirect argument to an alloca, the caller
|
|
|
|
// already put it in a temporary alloca and gave it up.
|
|
|
|
// FIXME: lifetimes
|
2018-12-04 20:20:45 +01:00
|
|
|
let llarg = bx.get_param(llarg_idx);
|
2017-09-20 05:16:06 +03:00
|
|
|
llarg_idx += 1;
|
2019-09-13 10:28:14 +03:00
|
|
|
LocalRef::Place(PlaceRef::new_sized(llarg, arg.layout))
|
2018-05-29 00:12:55 +09:00
|
|
|
} else if arg.is_unsized_indirect() {
|
|
|
|
// As the storage for the indirect argument lives during
|
|
|
|
// the whole function call, we just copy the fat pointer.
|
2018-12-04 20:20:45 +01:00
|
|
|
let llarg = bx.get_param(llarg_idx);
|
2018-05-29 00:12:55 +09:00
|
|
|
llarg_idx += 1;
|
2018-12-04 20:20:45 +01:00
|
|
|
let llextra = bx.get_param(llarg_idx);
|
2018-05-29 00:12:55 +09:00
|
|
|
llarg_idx += 1;
|
|
|
|
let indirect_operand = OperandValue::Pair(llarg, llextra);
|
2019-12-22 17:42:04 -05:00
|
|
|
|
2019-09-12 19:04:30 +03:00
|
|
|
let tmp = PlaceRef::alloca_unsized_indirect(bx, arg.layout);
|
2018-09-14 17:48:57 +02:00
|
|
|
indirect_operand.store(bx, tmp);
|
2019-09-13 10:28:14 +03:00
|
|
|
LocalRef::UnsizedPlace(tmp)
|
2016-03-06 16:30:21 +02:00
|
|
|
} else {
|
2019-09-12 19:04:30 +03:00
|
|
|
let tmp = PlaceRef::alloca(bx, arg.layout);
|
2019-08-10 14:38:17 +03:00
|
|
|
bx.store_fn_arg(arg, &mut llarg_idx, tmp);
|
2019-09-13 10:28:14 +03:00
|
|
|
LocalRef::Place(tmp)
|
2018-05-29 00:12:55 +09:00
|
|
|
}
|
2019-11-07 06:04:14 -08:00
|
|
|
})
|
|
|
|
.collect::<Vec<_>>();
|
|
|
|
|
|
|
|
if fx.instance.def.requires_caller_location(bx.tcx()) {
|
Support `#[track_caller]` on closures and generators
This PR allows applying a `#[track_caller]` attribute to a
closure/generator expression. The attribute as interpreted as applying
to the compiler-generated implementation of the corresponding trait
method (`FnOnce::call_once`, `FnMut::call_mut`, `Fn::call`, or
`Generator::resume`).
This feature does not have its own feature gate - however, it requires
`#![feature(stmt_expr_attributes)]` in order to actually apply
an attribute to a closure or generator.
This is implemented in the same way as for functions - an extra
location argument is appended to the end of the ABI. For closures,
this argument is *not* part of the 'tupled' argument storing the
parameters - the final closure argument for `#[track_caller]` closures
is no longer a tuple.
For direct (monomorphized) calls, the necessary support was already
implemented - we just needeed to adjust some assertions around checking
the ABI and argument count to take closures into account.
For calls through a trait object, more work was needed.
When creating a `ReifyShim`, we need to create a shim
for the trait method (e.g. `FnOnce::call_mut`) - unlike normal
functions, closures are never invoked directly, and always go through a
trait method.
Additional handling was needed for `InstanceDef::ClosureOnceShim`. In
order to pass location information throgh a direct (monomorphized) call
to `FnOnce::call_once` on an `FnMut` closure, we need to make
`ClosureOnceShim` aware of `#[tracked_caller]`. A new field
`track_caller` is added to `ClosureOnceShim` - this is used by
`InstanceDef::requires_caller` location, allowing codegen to
pass through the extra location argument.
Since `ClosureOnceShim.track_caller` is only used by codegen,
we end up generating two identical MIR shims - one for
`track_caller == true`, and one for `track_caller == false`. However,
these two shims are used by the entire crate (i.e. it's two shims total,
not two shims per unique closure), so this shouldn't a big deal.
2021-06-27 14:01:11 -05:00
|
|
|
let mir_args = if let Some(num_untupled) = num_untupled {
|
|
|
|
// Subtract off the tupled argument that gets 'expanded'
|
|
|
|
args.len() - 1 + num_untupled
|
|
|
|
} else {
|
|
|
|
args.len()
|
|
|
|
};
|
2019-11-07 06:04:14 -08:00
|
|
|
assert_eq!(
|
|
|
|
fx.fn_abi.args.len(),
|
Support `#[track_caller]` on closures and generators
This PR allows applying a `#[track_caller]` attribute to a
closure/generator expression. The attribute as interpreted as applying
to the compiler-generated implementation of the corresponding trait
method (`FnOnce::call_once`, `FnMut::call_mut`, `Fn::call`, or
`Generator::resume`).
This feature does not have its own feature gate - however, it requires
`#![feature(stmt_expr_attributes)]` in order to actually apply
an attribute to a closure or generator.
This is implemented in the same way as for functions - an extra
location argument is appended to the end of the ABI. For closures,
this argument is *not* part of the 'tupled' argument storing the
parameters - the final closure argument for `#[track_caller]` closures
is no longer a tuple.
For direct (monomorphized) calls, the necessary support was already
implemented - we just needeed to adjust some assertions around checking
the ABI and argument count to take closures into account.
For calls through a trait object, more work was needed.
When creating a `ReifyShim`, we need to create a shim
for the trait method (e.g. `FnOnce::call_mut`) - unlike normal
functions, closures are never invoked directly, and always go through a
trait method.
Additional handling was needed for `InstanceDef::ClosureOnceShim`. In
order to pass location information throgh a direct (monomorphized) call
to `FnOnce::call_once` on an `FnMut` closure, we need to make
`ClosureOnceShim` aware of `#[tracked_caller]`. A new field
`track_caller` is added to `ClosureOnceShim` - this is used by
`InstanceDef::requires_caller` location, allowing codegen to
pass through the extra location argument.
Since `ClosureOnceShim.track_caller` is only used by codegen,
we end up generating two identical MIR shims - one for
`track_caller == true`, and one for `track_caller == false`. However,
these two shims are used by the entire crate (i.e. it's two shims total,
not two shims per unique closure), so this shouldn't a big deal.
2021-06-27 14:01:11 -05:00
|
|
|
mir_args + 1,
|
|
|
|
"#[track_caller] instance {:?} must have 1 more argument in their ABI than in their MIR",
|
|
|
|
fx.instance
|
2019-11-07 06:04:14 -08:00
|
|
|
);
|
2019-12-06 16:09:40 -08:00
|
|
|
|
2019-12-06 17:05:51 -08:00
|
|
|
let arg = fx.fn_abi.args.last().unwrap();
|
2019-12-06 16:09:40 -08:00
|
|
|
match arg.mode {
|
|
|
|
PassMode::Direct(_) => (),
|
2019-12-06 17:05:51 -08:00
|
|
|
_ => bug!("caller location must be PassMode::Direct, found {:?}", arg.mode),
|
2019-12-06 16:09:40 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
fx.caller_location = Some(OperandRef {
|
|
|
|
val: OperandValue::Immediate(bx.get_param(llarg_idx)),
|
|
|
|
layout: arg.layout,
|
|
|
|
});
|
2019-11-07 06:04:14 -08:00
|
|
|
}
|
|
|
|
|
|
|
|
args
|
2015-10-21 17:42:25 -04:00
|
|
|
}
|
|
|
|
|
2015-11-03 06:35:09 -05:00
|
|
|
mod analyze;
|
2015-10-21 17:42:25 -04:00
|
|
|
mod block;
|
2018-09-20 15:47:22 +02:00
|
|
|
pub mod constant;
|
2020-08-15 04:42:13 -07:00
|
|
|
pub mod coverageinfo;
|
2019-09-04 19:44:58 +03:00
|
|
|
pub mod debuginfo;
|
2020-09-15 23:35:31 +02:00
|
|
|
mod intrinsic;
|
2017-06-25 12:41:24 +03:00
|
|
|
pub mod operand;
|
2017-12-01 14:39:51 +02:00
|
|
|
pub mod place;
|
2016-01-30 19:32:50 +02:00
|
|
|
mod rvalue;
|
2015-10-21 17:42:25 -04:00
|
|
|
mod statement;
|