2020-02-26 02:20:33 +01:00
|
|
|
use crate::dep_graph::{self, DepConstructor, DepNode, DepNodeParams};
|
2020-01-02 04:53:12 +01:00
|
|
|
use crate::hir::exports::Export;
|
2020-02-07 13:13:35 +01:00
|
|
|
use crate::hir::map;
|
2020-02-07 11:14:47 +01:00
|
|
|
use crate::hir::{HirOwner, HirOwnerItems};
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::infer::canonical::{self, Canonical};
|
2020-01-05 10:58:44 +01:00
|
|
|
use crate::lint::LintLevelMap;
|
2019-12-24 05:30:02 +01:00
|
|
|
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::middle::cstore::{CrateSource, DepKind, NativeLibraryKind};
|
|
|
|
use crate::middle::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLibrary};
|
|
|
|
use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportLevel};
|
|
|
|
use crate::middle::lang_items::{LangItem, LanguageItems};
|
|
|
|
use crate::middle::lib_features::LibFeatures;
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::middle::privacy::AccessLevels;
|
|
|
|
use crate::middle::region;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::middle::stability::{self, DeprecationEntry};
|
|
|
|
use crate::mir;
|
|
|
|
use crate::mir::interpret::GlobalId;
|
2020-02-15 12:57:46 +13:00
|
|
|
use crate::mir::interpret::{ConstEvalRawResult, ConstEvalResult, ConstValue};
|
2020-01-11 15:22:36 +13:00
|
|
|
use crate::mir::interpret::{LitToConstError, LitToConstInput};
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::mir::mono::CodegenUnit;
|
|
|
|
use crate::session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
|
2018-12-08 20:30:23 +01:00
|
|
|
use crate::session::CrateDisambiguator;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::traits::query::{
|
|
|
|
CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
|
|
|
|
CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
|
|
|
|
CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution,
|
|
|
|
};
|
2020-01-06 20:13:24 +01:00
|
|
|
use crate::traits::query::{
|
|
|
|
DropckOutlivesResult, DtorckConstraint, MethodAutoderefStepsResult, NormalizationResult,
|
|
|
|
OutlivesBound,
|
|
|
|
};
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::traits::specialization_graph;
|
|
|
|
use crate::traits::Clauses;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::traits::{self, Vtable};
|
2019-02-05 11:20:45 -06:00
|
|
|
use crate::ty::steal::Steal;
|
2019-02-09 22:11:53 +08:00
|
|
|
use crate::ty::subst::SubstsRef;
|
2020-01-30 20:28:16 +00:00
|
|
|
use crate::ty::util::AlwaysRequiresDrop;
|
2019-12-22 17:42:04 -05:00
|
|
|
use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
|
2019-09-26 05:30:10 +00:00
|
|
|
use crate::util::common::ErrorReported;
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc_data_structures::fingerprint::Fingerprint;
|
|
|
|
use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
|
2020-01-05 02:37:57 +01:00
|
|
|
use rustc_data_structures::profiling::ProfileCategory::*;
|
2017-09-14 17:40:37 +02:00
|
|
|
use rustc_data_structures::stable_hasher::StableVec;
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc_data_structures::svh::Svh;
|
Merge indexed_set.rs into bitvec.rs, and rename it bit_set.rs.
Currently we have two files implementing bitsets (and 2D bit matrices).
This commit combines them into one, taking the best features from each.
This involves renaming a lot of things. The high level changes are as
follows.
- bitvec.rs --> bit_set.rs
- indexed_set.rs --> (removed)
- BitArray + IdxSet --> BitSet (merged, see below)
- BitVector --> GrowableBitSet
- {,Sparse,Hybrid}IdxSet --> {,Sparse,Hybrid}BitSet
- BitMatrix --> BitMatrix
- SparseBitMatrix --> SparseBitMatrix
The changes within the bitset types themselves are as follows.
```
OLD OLD NEW
BitArray<C> IdxSet<T> BitSet<T>
-------- ------ ------
grow - grow
new - (remove)
new_empty new_empty new_empty
new_filled new_filled new_filled
- to_hybrid to_hybrid
clear clear clear
set_up_to set_up_to set_up_to
clear_above - clear_above
count - count
contains(T) contains(&T) contains(T)
contains_all - superset
is_empty - is_empty
insert(T) add(&T) insert(T)
insert_all - insert_all()
remove(T) remove(&T) remove(T)
words words words
words_mut words_mut words_mut
- overwrite overwrite
merge union union
- subtract subtract
- intersect intersect
iter iter iter
```
In general, when choosing names I went with:
- names that are more obvious (e.g. `BitSet` over `IdxSet`).
- names that are more like the Rust libraries (e.g. `T` over `C`,
`insert` over `add`);
- names that are more set-like (e.g. `union` over `merge`, `superset`
over `contains_all`, `domain_size` over `num_bits`).
Also, using `T` for index arguments seems more sensible than `&T` --
even though the latter is standard in Rust collection types -- because
indices are always copyable. It also results in fewer `&` and `*`
sigils in practice.
2018-09-14 15:07:25 +10:00
|
|
|
use rustc_data_structures::sync::Lrc;
|
2020-01-05 02:37:57 +01:00
|
|
|
use rustc_hir as hir;
|
|
|
|
use rustc_hir::def::DefKind;
|
|
|
|
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, DefIndex};
|
2020-02-06 12:16:51 +01:00
|
|
|
use rustc_hir::{Crate, HirIdSet, ItemLocalId, TraitCandidate};
|
2019-12-22 17:42:04 -05:00
|
|
|
use rustc_index::vec::IndexVec;
|
Merge indexed_set.rs into bitvec.rs, and rename it bit_set.rs.
Currently we have two files implementing bitsets (and 2D bit matrices).
This commit combines them into one, taking the best features from each.
This involves renaming a lot of things. The high level changes are as
follows.
- bitvec.rs --> bit_set.rs
- indexed_set.rs --> (removed)
- BitArray + IdxSet --> BitSet (merged, see below)
- BitVector --> GrowableBitSet
- {,Sparse,Hybrid}IdxSet --> {,Sparse,Hybrid}BitSet
- BitMatrix --> BitMatrix
- SparseBitMatrix --> SparseBitMatrix
The changes within the bitset types themselves are as follows.
```
OLD OLD NEW
BitArray<C> IdxSet<T> BitSet<T>
-------- ------ ------
grow - grow
new - (remove)
new_empty new_empty new_empty
new_filled new_filled new_filled
- to_hybrid to_hybrid
clear clear clear
set_up_to set_up_to set_up_to
clear_above - clear_above
count - count
contains(T) contains(&T) contains(T)
contains_all - superset
is_empty - is_empty
insert(T) add(&T) insert(T)
insert_all - insert_all()
remove(T) remove(&T) remove(T)
words words words
words_mut words_mut words_mut
- overwrite overwrite
merge union union
- subtract subtract
- intersect intersect
iter iter iter
```
In general, when choosing names I went with:
- names that are more obvious (e.g. `BitSet` over `IdxSet`).
- names that are more like the Rust libraries (e.g. `T` over `C`,
`insert` over `add`);
- names that are more set-like (e.g. `union` over `merge`, `superset`
over `contains_all`, `domain_size` over `num_bits`).
Also, using `T` for index arguments seems more sensible than `&T` --
even though the latter is standard in Rust collection types -- because
indices are always copyable. It also results in fewer `&` and `*`
sigils in practice.
2018-09-14 15:07:25 +10:00
|
|
|
use rustc_target::spec::PanicStrategy;
|
2017-09-07 16:11:58 +02:00
|
|
|
|
2020-02-29 20:37:32 +03:00
|
|
|
use rustc_ast::ast;
|
2020-01-11 13:15:20 +01:00
|
|
|
use rustc_attr as attr;
|
2020-01-01 19:30:57 +01:00
|
|
|
use rustc_span::symbol::Symbol;
|
2019-12-31 20:15:40 +03:00
|
|
|
use rustc_span::{Span, DUMMY_SP};
|
2018-10-01 15:31:27 +02:00
|
|
|
use std::borrow::Cow;
|
2020-02-08 04:14:29 +01:00
|
|
|
use std::collections::BTreeMap;
|
2020-02-12 11:50:00 +01:00
|
|
|
use std::convert::TryFrom;
|
2017-04-24 18:06:39 +03:00
|
|
|
use std::ops::Deref;
|
2017-09-12 09:32:37 -07:00
|
|
|
use std::sync::Arc;
|
2015-12-22 16:39:33 -05:00
|
|
|
|
2017-09-18 05:40:13 -04:00
|
|
|
#[macro_use]
|
|
|
|
mod plumbing;
|
2020-02-26 02:20:33 +01:00
|
|
|
pub use self::plumbing::CycleError;
|
2017-09-18 05:40:13 -04:00
|
|
|
use self::plumbing::*;
|
2017-09-12 11:04:46 -07:00
|
|
|
|
2020-02-15 09:48:10 +01:00
|
|
|
mod stats;
|
|
|
|
pub use self::stats::print_stats;
|
|
|
|
|
2018-03-15 10:03:36 +01:00
|
|
|
mod job;
|
2019-01-28 15:51:47 +01:00
|
|
|
#[cfg(parallel_compiler)]
|
2018-04-06 12:56:59 +02:00
|
|
|
pub use self::job::handle_deadlock;
|
2020-01-31 04:00:03 +01:00
|
|
|
use self::job::QueryJobInfo;
|
2020-02-12 11:50:00 +01:00
|
|
|
pub use self::job::{QueryInfo, QueryJob, QueryJobId};
|
2018-03-15 10:03:36 +01:00
|
|
|
|
2017-09-18 05:40:13 -04:00
|
|
|
mod keys;
|
2018-06-13 16:44:43 +03:00
|
|
|
use self::keys::Key;
|
2017-09-13 20:26:39 -07:00
|
|
|
|
2017-09-18 05:40:13 -04:00
|
|
|
mod values;
|
|
|
|
use self::values::Value;
|
2017-09-13 20:26:39 -07:00
|
|
|
|
2020-02-08 07:38:00 +01:00
|
|
|
mod caches;
|
|
|
|
use self::caches::CacheSelector;
|
|
|
|
|
2017-09-18 05:40:13 -04:00
|
|
|
mod config;
|
2018-12-03 01:14:35 +01:00
|
|
|
use self::config::QueryAccessors;
|
2019-12-22 17:42:04 -05:00
|
|
|
pub use self::config::QueryConfig;
|
|
|
|
pub(crate) use self::config::QueryDescription;
|
2017-04-28 09:40:48 -04:00
|
|
|
|
2017-10-19 14:32:39 +02:00
|
|
|
mod on_disk_cache;
|
|
|
|
pub use self::on_disk_cache::OnDiskCache;
|
|
|
|
|
2019-12-17 14:44:07 +01:00
|
|
|
mod profiling_support;
|
|
|
|
pub use self::profiling_support::{IntoSelfProfilingString, QueryKeyStringBuilder};
|
|
|
|
|
2018-12-03 01:14:35 +01:00
|
|
|
// Each of these queries corresponds to a function pointer field in the
|
2018-06-13 16:44:43 +03:00
|
|
|
// `Providers` struct for requesting a value of that type, and a method
|
|
|
|
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
|
|
|
|
// which memoizes and does dep-graph tracking, wrapping around the actual
|
|
|
|
// `Providers` that the driver creates (using several `rustc_*` crates).
|
2018-01-31 16:08:14 +01:00
|
|
|
//
|
2018-06-13 16:44:43 +03:00
|
|
|
// The result type of each query must implement `Clone`, and additionally
|
|
|
|
// `ty::query::values::Value`, which produces an appropriate placeholder
|
|
|
|
// (error) value if the query resulted in a query cycle.
|
|
|
|
// Queries marked with `fatal_cycle` do not need the latter implementation,
|
2018-01-31 16:08:14 +01:00
|
|
|
// as they will raise an fatal error on query cycles instead.
|
2018-12-03 01:14:35 +01:00
|
|
|
|
2020-02-26 00:26:38 +01:00
|
|
|
rustc_query_append! { [define_queries!][<'tcx>] }
|
2020-02-26 02:20:33 +01:00
|
|
|
|
|
|
|
/// The red/green evaluation system will try to mark a specific DepNode in the
|
|
|
|
/// dependency graph as green by recursively trying to mark the dependencies of
|
|
|
|
/// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
|
|
|
|
/// where we don't know if it is red or green and we therefore actually have
|
|
|
|
/// to recompute its value in order to find out. Since the only piece of
|
|
|
|
/// information that we have at that point is the `DepNode` we are trying to
|
|
|
|
/// re-evaluate, we need some way to re-run a query from just that. This is what
|
|
|
|
/// `force_from_dep_node()` implements.
|
|
|
|
///
|
|
|
|
/// In the general case, a `DepNode` consists of a `DepKind` and an opaque
|
|
|
|
/// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
|
|
|
|
/// is usually constructed by computing a stable hash of the query-key that the
|
|
|
|
/// `DepNode` corresponds to. Consequently, it is not in general possible to go
|
|
|
|
/// back from hash to query-key (since hash functions are not reversible). For
|
|
|
|
/// this reason `force_from_dep_node()` is expected to fail from time to time
|
|
|
|
/// because we just cannot find out, from the `DepNode` alone, what the
|
|
|
|
/// corresponding query-key is and therefore cannot re-run the query.
|
|
|
|
///
|
|
|
|
/// The system deals with this case letting `try_mark_green` fail which forces
|
|
|
|
/// the root query to be re-evaluated.
|
|
|
|
///
|
|
|
|
/// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
|
|
|
|
/// Fortunately, we can use some contextual information that will allow us to
|
|
|
|
/// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
|
|
|
|
/// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
|
|
|
|
/// valid `DefPathHash`. Since we also always build a huge table that maps every
|
|
|
|
/// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
|
|
|
|
/// everything we need to re-run the query.
|
|
|
|
///
|
|
|
|
/// Take the `mir_validated` query as an example. Like many other queries, it
|
|
|
|
/// just has a single parameter: the `DefId` of the item it will compute the
|
|
|
|
/// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
|
|
|
|
/// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
|
|
|
|
/// is actually a `DefPathHash`, and can therefore just look up the corresponding
|
|
|
|
/// `DefId` in `tcx.def_path_hash_to_def_id`.
|
|
|
|
///
|
|
|
|
/// When you implement a new query, it will likely have a corresponding new
|
|
|
|
/// `DepKind`, and you'll have to support it here in `force_from_dep_node()`. As
|
|
|
|
/// a rule of thumb, if your query takes a `DefId` or `DefIndex` as sole parameter,
|
|
|
|
/// then `force_from_dep_node()` should not fail for it. Otherwise, you can just
|
|
|
|
/// add it to the "We don't have enough information to reconstruct..." group in
|
|
|
|
/// the match below.
|
|
|
|
pub fn force_from_dep_node<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> bool {
|
|
|
|
use crate::dep_graph::DepKind;
|
|
|
|
|
|
|
|
// We must avoid ever having to call `force_from_dep_node()` for a
|
|
|
|
// `DepNode::codegen_unit`:
|
|
|
|
// Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
|
|
|
|
// would always end up having to evaluate the first caller of the
|
|
|
|
// `codegen_unit` query that *is* reconstructible. This might very well be
|
|
|
|
// the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
|
|
|
|
// to re-trigger calling the `codegen_unit` query with the right key. At
|
|
|
|
// that point we would already have re-done all the work we are trying to
|
|
|
|
// avoid doing in the first place.
|
|
|
|
// The solution is simple: Just explicitly call the `codegen_unit` query for
|
|
|
|
// each CGU, right after partitioning. This way `try_mark_green` will always
|
|
|
|
// hit the cache instead of having to go through `force_from_dep_node`.
|
|
|
|
// This assertion makes sure, we actually keep applying the solution above.
|
|
|
|
debug_assert!(
|
|
|
|
dep_node.kind != DepKind::codegen_unit,
|
|
|
|
"calling force_from_dep_node() on DepKind::codegen_unit"
|
|
|
|
);
|
|
|
|
|
|
|
|
if !dep_node.kind.can_reconstruct_query_key() {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
rustc_dep_node_force!([dep_node, tcx]
|
|
|
|
// These are inputs that are expected to be pre-allocated and that
|
|
|
|
// should therefore always be red or green already.
|
|
|
|
DepKind::CrateMetadata |
|
|
|
|
|
|
|
|
// These are anonymous nodes.
|
|
|
|
DepKind::TraitSelect |
|
|
|
|
|
|
|
|
// We don't have enough information to reconstruct the query key of
|
|
|
|
// these.
|
|
|
|
DepKind::CompileCodegenUnit => {
|
|
|
|
bug!("force_from_dep_node: encountered {:?}", dep_node)
|
|
|
|
}
|
|
|
|
);
|
|
|
|
|
|
|
|
false
|
|
|
|
}
|
|
|
|
|
|
|
|
impl DepNode {
|
|
|
|
/// Check whether the query invocation corresponding to the given
|
|
|
|
/// DepNode is eligible for on-disk-caching. If so, this is method
|
|
|
|
/// will execute the query corresponding to the given DepNode.
|
|
|
|
/// Also, as a sanity check, it expects that the corresponding query
|
|
|
|
/// invocation has been marked as green already.
|
|
|
|
pub fn try_load_from_on_disk_cache<'tcx>(&self, tcx: TyCtxt<'tcx>) {
|
|
|
|
use crate::dep_graph::DepKind;
|
|
|
|
|
|
|
|
rustc_dep_node_try_load_from_on_disk_cache!(self, tcx)
|
|
|
|
}
|
|
|
|
}
|