1
Fork 0

Auto merge of #110040 - ndrewxie:issue-84447-partial-1, r=lcnr,michaelwoerister

Removed use of iteration through a HashMap/HashSet in rustc_incremental and replaced with IndexMap/IndexSet

This allows for the `#[allow(rustc::potential_query_instability)]` in rustc_incremental to be removed, moving towards fixing #84447 (although a LOT more modules have to be changed to fully resolve it). Only HashMaps/HashSets that are being iterated through have been modified (although many structs and traits outside of rustc_incremental had to be modified as well, as they had fields/methods that involved a HashMap/HashSet that would be iterated through)

I'm making a PR for just 1 module changed to test for performance regressions and such, for future changes I'll either edit this PR to reflect additional modules being converted, or batch multiple modules of changes together and make a PR for each group of modules.
This commit is contained in:
bors 2023-06-08 07:30:03 +00:00
commit a0df04c0f2
25 changed files with 259 additions and 190 deletions

View file

@ -414,7 +414,9 @@ pub struct Size {
// Safety: Ord is implement as just comparing numerical values and numerical values // Safety: Ord is implement as just comparing numerical values and numerical values
// are not changed by (de-)serialization. // are not changed by (de-)serialization.
#[cfg(feature = "nightly")] #[cfg(feature = "nightly")]
unsafe impl StableOrd for Size {} unsafe impl StableOrd for Size {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
// This is debug-printed a lot in larger structs, don't waste too much space there // This is debug-printed a lot in larger structs, don't waste too much space there
impl fmt::Debug for Size { impl fmt::Debug for Size {

View file

@ -54,8 +54,8 @@ impl OngoingCodegen {
self, self,
sess: &Session, sess: &Session,
backend_config: &BackendConfig, backend_config: &BackendConfig,
) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) { ) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
let mut work_products = FxHashMap::default(); let mut work_products = FxIndexMap::default();
let mut modules = vec![]; let mut modules = vec![];
for module_codegen in self.modules { for module_codegen in self.modules {

View file

@ -88,7 +88,7 @@ mod prelude {
}; };
pub(crate) use rustc_target::abi::{Abi, FieldIdx, Scalar, Size, VariantIdx, FIRST_VARIANT}; pub(crate) use rustc_target::abi::{Abi, FieldIdx, Scalar, Size, VariantIdx, FIRST_VARIANT};
pub(crate) use rustc_data_structures::fx::FxHashMap; pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
pub(crate) use rustc_index::Idx; pub(crate) use rustc_index::Idx;
@ -223,7 +223,7 @@ impl CodegenBackend for CraneliftCodegenBackend {
ongoing_codegen: Box<dyn Any>, ongoing_codegen: Box<dyn Any>,
sess: &Session, sess: &Session,
_outputs: &OutputFilenames, _outputs: &OutputFilenames,
) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
Ok(ongoing_codegen Ok(ongoing_codegen
.downcast::<driver::aot::OngoingCodegen>() .downcast::<driver::aot::OngoingCodegen>()
.unwrap() .unwrap()

View file

@ -75,7 +75,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig,
use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule}; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
use rustc_codegen_ssa::target_features::supported_target_features; use rustc_codegen_ssa::target_features::supported_target_features;
use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods}; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage}; use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages; use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata; use rustc_metadata::EncodedMetadata;
@ -137,7 +137,7 @@ impl CodegenBackend for GccCodegenBackend {
Box::new(res) Box::new(res)
} }
fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
let (codegen_results, work_products) = ongoing_codegen let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>() .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
.expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>") .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")

View file

@ -34,7 +34,7 @@ use rustc_codegen_ssa::back::write::{
use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::traits::*;
use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::ModuleCodegen;
use rustc_codegen_ssa::{CodegenResults, CompiledModule}; use rustc_codegen_ssa::{CodegenResults, CompiledModule};
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage}; use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
use rustc_fluent_macro::fluent_messages; use rustc_fluent_macro::fluent_messages;
use rustc_metadata::EncodedMetadata; use rustc_metadata::EncodedMetadata;
@ -356,7 +356,7 @@ impl CodegenBackend for LlvmCodegenBackend {
ongoing_codegen: Box<dyn Any>, ongoing_codegen: Box<dyn Any>,
sess: &Session, sess: &Session,
outputs: &OutputFilenames, outputs: &OutputFilenames,
) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
let (codegen_results, work_products) = ongoing_codegen let (codegen_results, work_products) = ongoing_codegen
.downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>() .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
.expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>") .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")

View file

@ -9,7 +9,7 @@ use crate::{
}; };
use jobserver::{Acquired, Client}; use jobserver::{Acquired, Client};
use rustc_ast::attr; use rustc_ast::attr;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::memmap::Mmap; use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::profiling::TimingGuard; use rustc_data_structures::profiling::TimingGuard;
@ -498,8 +498,8 @@ pub fn start_async_codegen<B: ExtraBackendMethods>(
fn copy_all_cgu_workproducts_to_incr_comp_cache_dir( fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
sess: &Session, sess: &Session,
compiled_modules: &CompiledModules, compiled_modules: &CompiledModules,
) -> FxHashMap<WorkProductId, WorkProduct> { ) -> FxIndexMap<WorkProductId, WorkProduct> {
let mut work_products = FxHashMap::default(); let mut work_products = FxIndexMap::default();
if sess.opts.incremental.is_none() { if sess.opts.incremental.is_none() {
return work_products; return work_products;
@ -1885,7 +1885,7 @@ pub struct OngoingCodegen<B: ExtraBackendMethods> {
} }
impl<B: ExtraBackendMethods> OngoingCodegen<B> { impl<B: ExtraBackendMethods> OngoingCodegen<B> {
pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) { pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
let _timer = sess.timer("finish_ongoing_codegen"); let _timer = sess.timer("finish_ongoing_codegen");
self.shared_emitter_main.check(sess, true); self.shared_emitter_main.check(sess, true);

View file

@ -6,7 +6,7 @@ use crate::back::write::TargetMachineFactoryFn;
use crate::{CodegenResults, ModuleCodegen}; use crate::{CodegenResults, ModuleCodegen};
use rustc_ast::expand::allocator::AllocatorKind; use rustc_ast::expand::allocator::AllocatorKind;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync::{DynSend, DynSync}; use rustc_data_structures::sync::{DynSend, DynSync};
use rustc_errors::ErrorGuaranteed; use rustc_errors::ErrorGuaranteed;
use rustc_metadata::EncodedMetadata; use rustc_metadata::EncodedMetadata;
@ -101,7 +101,7 @@ pub trait CodegenBackend {
ongoing_codegen: Box<dyn Any>, ongoing_codegen: Box<dyn Any>,
sess: &Session, sess: &Session,
outputs: &OutputFilenames, outputs: &OutputFilenames,
) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed>; ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
/// This is called on the returned `Box<dyn Any>` from `join_codegen` /// This is called on the returned `Box<dyn Any>` from `join_codegen`
/// ///

View file

@ -233,7 +233,17 @@ pub trait ToStableHashKey<HCX> {
/// - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete /// - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete
/// values depend on state that might be different between /// values depend on state that might be different between
/// compilation sessions. /// compilation sessions.
pub unsafe trait StableOrd: Ord {} ///
/// The associated constant `CAN_USE_UNSTABLE_SORT` denotes whether
/// unstable sorting can be used for this type. Set to true if and
/// only if `a == b` implies `a` and `b` are fully indistinguishable.
pub unsafe trait StableOrd: Ord {
const CAN_USE_UNSTABLE_SORT: bool;
}
unsafe impl<T: StableOrd> StableOrd for &T {
const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
}
/// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since /// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since
/// that has the same requirements. /// that has the same requirements.
@ -253,7 +263,9 @@ macro_rules! impl_stable_traits_for_trivial_type {
} }
} }
unsafe impl $crate::stable_hasher::StableOrd for $t {} unsafe impl $crate::stable_hasher::StableOrd for $t {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
}; };
} }
@ -339,6 +351,10 @@ impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2)
} }
} }
unsafe impl<T1: StableOrd, T2: StableOrd> StableOrd for (T1, T2) {
const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT;
}
impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3) impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
where where
T1: HashStable<CTX>, T1: HashStable<CTX>,
@ -353,6 +369,11 @@ where
} }
} }
unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd> StableOrd for (T1, T2, T3) {
const CAN_USE_UNSTABLE_SORT: bool =
T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT && T3::CAN_USE_UNSTABLE_SORT;
}
impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4) impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
where where
T1: HashStable<CTX>, T1: HashStable<CTX>,
@ -369,6 +390,15 @@ where
} }
} }
unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd, T4: StableOrd> StableOrd
for (T1, T2, T3, T4)
{
const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT
&& T2::CAN_USE_UNSTABLE_SORT
&& T3::CAN_USE_UNSTABLE_SORT
&& T4::CAN_USE_UNSTABLE_SORT;
}
impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] { impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
self.len().hash_stable(ctx, hasher); self.len().hash_stable(ctx, hasher);
@ -459,6 +489,10 @@ impl<CTX> HashStable<CTX> for str {
} }
} }
unsafe impl StableOrd for &str {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
impl<CTX> HashStable<CTX> for String { impl<CTX> HashStable<CTX> for String {
#[inline] #[inline]
fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
@ -468,7 +502,9 @@ impl<CTX> HashStable<CTX> for String {
// Safety: String comparison only depends on their contents and the // Safety: String comparison only depends on their contents and the
// contents are not changed by (de-)serialization. // contents are not changed by (de-)serialization.
unsafe impl StableOrd for String {} unsafe impl StableOrd for String {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
impl<HCX> ToStableHashKey<HCX> for String { impl<HCX> ToStableHashKey<HCX> for String {
type KeyType = String; type KeyType = String;
@ -494,7 +530,9 @@ impl<CTX> HashStable<CTX> for bool {
} }
// Safety: sort order of bools is not changed by (de-)serialization. // Safety: sort order of bools is not changed by (de-)serialization.
unsafe impl StableOrd for bool {} unsafe impl StableOrd for bool {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
impl<T, CTX> HashStable<CTX> for Option<T> impl<T, CTX> HashStable<CTX> for Option<T>
where where
@ -512,7 +550,9 @@ where
} }
// Safety: the Option wrapper does not add instability to comparison. // Safety: the Option wrapper does not add instability to comparison.
unsafe impl<T: StableOrd> StableOrd for Option<T> {} unsafe impl<T: StableOrd> StableOrd for Option<T> {
const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
}
impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2> impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
where where

View file

@ -140,12 +140,12 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
} }
#[inline] #[inline]
pub fn into_sorted_stable_ord(self, use_stable_sort: bool) -> Vec<T> pub fn into_sorted_stable_ord(self) -> Vec<T>
where where
T: Ord + StableOrd, T: Ord + StableOrd,
{ {
let mut items: Vec<T> = self.0.collect(); let mut items: Vec<T> = self.0.collect();
if use_stable_sort { if !T::CAN_USE_UNSTABLE_SORT {
items.sort(); items.sort();
} else { } else {
items.sort_unstable() items.sort_unstable()
@ -161,6 +161,10 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx)); items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
items items
} }
pub fn collect<C: From<UnordItems<T, I>>>(self) -> C {
self.into()
}
} }
/// This is a set collection type that tries very hard to not expose /// This is a set collection type that tries very hard to not expose

View file

@ -166,7 +166,9 @@ impl ItemLocalId {
// Safety: Ord is implement as just comparing the ItemLocalId's numerical // Safety: Ord is implement as just comparing the ItemLocalId's numerical
// values and these are not changed by (de-)serialization. // values and these are not changed by (de-)serialization.
unsafe impl StableOrd for ItemLocalId {} unsafe impl StableOrd for ItemLocalId {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
/// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`. /// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
pub const CRATE_HIR_ID: HirId = pub const CRATE_HIR_ID: HirId =

View file

@ -35,7 +35,7 @@
use crate::errors; use crate::errors;
use rustc_ast as ast; use rustc_ast as ast;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxIndexSet;
use rustc_data_structures::graph::implementation::{Direction, NodeIndex, INCOMING, OUTGOING}; use rustc_data_structures::graph::implementation::{Direction, NodeIndex, INCOMING, OUTGOING};
use rustc_graphviz as dot; use rustc_graphviz as dot;
use rustc_hir as hir; use rustc_hir as hir;
@ -258,7 +258,7 @@ fn dump_graph(query: &DepGraphQuery) {
} }
#[allow(missing_docs)] #[allow(missing_docs)]
pub struct GraphvizDepGraph(FxHashSet<DepKind>, Vec<(DepKind, DepKind)>); pub struct GraphvizDepGraph(FxIndexSet<DepKind>, Vec<(DepKind, DepKind)>);
impl<'a> dot::GraphWalk<'a> for GraphvizDepGraph { impl<'a> dot::GraphWalk<'a> for GraphvizDepGraph {
type Node = DepKind; type Node = DepKind;
@ -303,7 +303,7 @@ impl<'a> dot::Labeller<'a> for GraphvizDepGraph {
fn node_set<'q>( fn node_set<'q>(
query: &'q DepGraphQuery, query: &'q DepGraphQuery,
filter: &DepNodeFilter, filter: &DepNodeFilter,
) -> Option<FxHashSet<&'q DepNode>> { ) -> Option<FxIndexSet<&'q DepNode>> {
debug!("node_set(filter={:?})", filter); debug!("node_set(filter={:?})", filter);
if filter.accepts_all() { if filter.accepts_all() {
@ -315,9 +315,9 @@ fn node_set<'q>(
fn filter_nodes<'q>( fn filter_nodes<'q>(
query: &'q DepGraphQuery, query: &'q DepGraphQuery,
sources: &Option<FxHashSet<&'q DepNode>>, sources: &Option<FxIndexSet<&'q DepNode>>,
targets: &Option<FxHashSet<&'q DepNode>>, targets: &Option<FxIndexSet<&'q DepNode>>,
) -> FxHashSet<DepKind> { ) -> FxIndexSet<DepKind> {
if let Some(sources) = sources { if let Some(sources) = sources {
if let Some(targets) = targets { if let Some(targets) = targets {
walk_between(query, sources, targets) walk_between(query, sources, targets)
@ -333,10 +333,10 @@ fn filter_nodes<'q>(
fn walk_nodes<'q>( fn walk_nodes<'q>(
query: &'q DepGraphQuery, query: &'q DepGraphQuery,
starts: &FxHashSet<&'q DepNode>, starts: &FxIndexSet<&'q DepNode>,
direction: Direction, direction: Direction,
) -> FxHashSet<DepKind> { ) -> FxIndexSet<DepKind> {
let mut set = FxHashSet::default(); let mut set = FxIndexSet::default();
for &start in starts { for &start in starts {
debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING); debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
if set.insert(start.kind) { if set.insert(start.kind) {
@ -357,9 +357,9 @@ fn walk_nodes<'q>(
fn walk_between<'q>( fn walk_between<'q>(
query: &'q DepGraphQuery, query: &'q DepGraphQuery,
sources: &FxHashSet<&'q DepNode>, sources: &FxIndexSet<&'q DepNode>,
targets: &FxHashSet<&'q DepNode>, targets: &FxIndexSet<&'q DepNode>,
) -> FxHashSet<DepKind> { ) -> FxIndexSet<DepKind> {
// This is a bit tricky. We want to include a node only if it is: // This is a bit tricky. We want to include a node only if it is:
// (a) reachable from a source and (b) will reach a target. And we // (a) reachable from a source and (b) will reach a target. And we
// have to be careful about cycles etc. Luckily efficiency is not // have to be careful about cycles etc. Luckily efficiency is not
@ -426,8 +426,8 @@ fn walk_between<'q>(
} }
} }
fn filter_edges(query: &DepGraphQuery, nodes: &FxHashSet<DepKind>) -> Vec<(DepKind, DepKind)> { fn filter_edges(query: &DepGraphQuery, nodes: &FxIndexSet<DepKind>) -> Vec<(DepKind, DepKind)> {
let uniq: FxHashSet<_> = query let uniq: FxIndexSet<_> = query
.edges() .edges()
.into_iter() .into_iter()
.map(|(s, t)| (s.kind, t.kind)) .map(|(s, t)| (s.kind, t.kind))

View file

@ -24,7 +24,7 @@
use crate::errors; use crate::errors;
use rustc_ast as ast; use rustc_ast as ast;
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::unord::UnordSet;
use rustc_hir::def_id::LOCAL_CRATE; use rustc_hir::def_id::LOCAL_CRATE;
use rustc_middle::mir::mono::CodegenUnitNameBuilder; use rustc_middle::mir::mono::CodegenUnitNameBuilder;
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
@ -52,7 +52,7 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>) {
struct AssertModuleSource<'tcx> { struct AssertModuleSource<'tcx> {
tcx: TyCtxt<'tcx>, tcx: TyCtxt<'tcx>,
available_cgus: FxHashSet<Symbol>, available_cgus: UnordSet<Symbol>,
} }
impl<'tcx> AssertModuleSource<'tcx> { impl<'tcx> AssertModuleSource<'tcx> {
@ -118,9 +118,8 @@ impl<'tcx> AssertModuleSource<'tcx> {
debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name); debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name);
if !self.available_cgus.contains(&cgu_name) { if !self.available_cgus.contains(&cgu_name) {
let mut cgu_names: Vec<&str> = let cgu_names: Vec<&str> =
self.available_cgus.iter().map(|cgu| cgu.as_str()).collect(); self.available_cgus.items().map(|cgu| cgu.as_str()).into_sorted_stable_ord();
cgu_names.sort();
self.tcx.sess.emit_err(errors::NoModuleNamed { self.tcx.sess.emit_err(errors::NoModuleNamed {
span: attr.span, span: attr.span,
user_path, user_path,

View file

@ -4,7 +4,6 @@
#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
#![feature(never_type)] #![feature(never_type)]
#![recursion_limit = "256"] #![recursion_limit = "256"]
#![allow(rustc::potential_query_instability)]
#![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)] #![deny(rustc::diagnostic_outside_of_impl)]

View file

@ -22,6 +22,7 @@
use crate::errors; use crate::errors;
use rustc_ast::{self as ast, Attribute, NestedMetaItem}; use rustc_ast::{self as ast, Attribute, NestedMetaItem};
use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::unord::UnordSet;
use rustc_hir::def_id::LocalDefId; use rustc_hir::def_id::LocalDefId;
use rustc_hir::intravisit; use rustc_hir::intravisit;
use rustc_hir::Node as HirNode; use rustc_hir::Node as HirNode;
@ -125,7 +126,7 @@ const LABELS_ADT: &[&[&str]] = &[BASE_HIR, BASE_STRUCT];
// //
// type_of for these. // type_of for these.
type Labels = FxHashSet<String>; type Labels = UnordSet<String>;
/// Represents the requested configuration by rustc_clean/dirty /// Represents the requested configuration by rustc_clean/dirty
struct Assertion { struct Assertion {
@ -197,7 +198,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
let (name, mut auto) = self.auto_labels(item_id, attr); let (name, mut auto) = self.auto_labels(item_id, attr);
let except = self.except(attr); let except = self.except(attr);
let loaded_from_disk = self.loaded_from_disk(attr); let loaded_from_disk = self.loaded_from_disk(attr);
for e in except.iter() { for e in except.items().map(|x| x.as_str()).into_sorted_stable_ord() {
if !auto.remove(e) { if !auto.remove(e) {
self.tcx.sess.emit_fatal(errors::AssertionAuto { span: attr.span, name, e }); self.tcx.sess.emit_fatal(errors::AssertionAuto { span: attr.span, name, e });
} }
@ -376,15 +377,17 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
continue; continue;
}; };
self.checked_attrs.insert(attr.id); self.checked_attrs.insert(attr.id);
for label in assertion.clean { for label in assertion.clean.items().map(|x| x.as_str()).into_sorted_stable_ord() {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_clean(item_span, dep_node); self.assert_clean(item_span, dep_node);
} }
for label in assertion.dirty { for label in assertion.dirty.items().map(|x| x.as_str()).into_sorted_stable_ord() {
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_dirty(item_span, dep_node); self.assert_dirty(item_span, dep_node);
} }
for label in assertion.loaded_from_disk { for label in
assertion.loaded_from_disk.items().map(|x| x.as_str()).into_sorted_stable_ord()
{
let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap(); let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
self.assert_loaded_from_disk(item_span, dep_node); self.assert_loaded_from_disk(item_span, dep_node);
} }

View file

@ -104,8 +104,9 @@
//! implemented. //! implemented.
use crate::errors; use crate::errors;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
use rustc_data_structures::svh::Svh; use rustc_data_structures::svh::Svh;
use rustc_data_structures::unord::{UnordMap, UnordSet};
use rustc_data_structures::{base_n, flock}; use rustc_data_structures::{base_n, flock};
use rustc_errors::ErrorGuaranteed; use rustc_errors::ErrorGuaranteed;
use rustc_fs_util::{link_or_copy, try_canonicalize, LinkOrCopy}; use rustc_fs_util::{link_or_copy, try_canonicalize, LinkOrCopy};
@ -635,8 +636,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
// First do a pass over the crate directory, collecting lock files and // First do a pass over the crate directory, collecting lock files and
// session directories // session directories
let mut session_directories = FxHashSet::default(); let mut session_directories = FxIndexSet::default();
let mut lock_files = FxHashSet::default(); let mut lock_files = UnordSet::default();
for dir_entry in crate_directory.read_dir()? { for dir_entry in crate_directory.read_dir()? {
let Ok(dir_entry) = dir_entry else { let Ok(dir_entry) = dir_entry else {
@ -657,10 +658,11 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
// This is something we don't know, leave it alone // This is something we don't know, leave it alone
} }
} }
session_directories.sort();
// Now map from lock files to session directories // Now map from lock files to session directories
let lock_file_to_session_dir: FxHashMap<String, Option<String>> = lock_files let lock_file_to_session_dir: UnordMap<String, Option<String>> = lock_files
.into_iter() .into_items()
.map(|lock_file_name| { .map(|lock_file_name| {
assert!(lock_file_name.ends_with(LOCK_FILE_EXT)); assert!(lock_file_name.ends_with(LOCK_FILE_EXT));
let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len(); let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len();
@ -670,11 +672,13 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
}; };
(lock_file_name, session_dir.map(String::clone)) (lock_file_name, session_dir.map(String::clone))
}) })
.collect(); .into();
// Delete all lock files, that don't have an associated directory. They must // Delete all lock files, that don't have an associated directory. They must
// be some kind of leftover // be some kind of leftover
for (lock_file_name, directory_name) in &lock_file_to_session_dir { for (lock_file_name, directory_name) in
lock_file_to_session_dir.items().into_sorted_stable_ord()
{
if directory_name.is_none() { if directory_name.is_none() {
let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else { let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else {
debug!( debug!(
@ -685,19 +689,19 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
continue; continue;
}; };
let lock_file_path = crate_directory.join(&**lock_file_name); let lock_file_path = crate_directory.join(&*lock_file_name);
if is_old_enough_to_be_collected(timestamp) { if is_old_enough_to_be_collected(timestamp) {
debug!( debug!(
"garbage_collect_session_directories() - deleting \ "garbage_collect_session_directories() - deleting \
garbage lock file: {}", garbage lock file: {}",
lock_file_path.display() lock_file_path.display()
); );
delete_session_dir_lock_file(sess, &lock_file_path); delete_session_dir_lock_file(sess, &lock_file_path);
} else { } else {
debug!( debug!(
"garbage_collect_session_directories() - lock file with \ "garbage_collect_session_directories() - lock file with \
no session dir not old enough to be collected: {}", no session dir not old enough to be collected: {}",
lock_file_path.display() lock_file_path.display()
); );
} }
@ -705,14 +709,14 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
} }
// Filter out `None` directories // Filter out `None` directories
let lock_file_to_session_dir: FxHashMap<String, String> = lock_file_to_session_dir let lock_file_to_session_dir: UnordMap<String, String> = lock_file_to_session_dir
.into_iter() .into_items()
.filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n))) .filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n)))
.collect(); .into();
// Delete all session directories that don't have a lock file. // Delete all session directories that don't have a lock file.
for directory_name in session_directories { for directory_name in session_directories {
if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) { if !lock_file_to_session_dir.items().any(|(_, dir)| *dir == directory_name) {
let path = crate_directory.join(directory_name); let path = crate_directory.join(directory_name);
if let Err(err) = safe_remove_dir_all(&path) { if let Err(err) = safe_remove_dir_all(&path) {
sess.emit_warning(errors::InvalidGcFailed { path: &path, err }); sess.emit_warning(errors::InvalidGcFailed { path: &path, err });
@ -721,103 +725,103 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
} }
// Now garbage collect the valid session directories. // Now garbage collect the valid session directories.
let mut deletion_candidates = vec![]; let deletion_candidates =
lock_file_to_session_dir.items().filter_map(|(lock_file_name, directory_name)| {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
for (lock_file_name, directory_name) in &lock_file_to_session_dir { let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
debug!( debug!(
"found session-dir with malformed timestamp: {}", "found session-dir with malformed timestamp: {}",
crate_directory.join(directory_name).display() crate_directory.join(directory_name).display()
); );
// Ignore it // Ignore it
continue; return None;
}; };
if is_finalized(directory_name) { if is_finalized(directory_name) {
let lock_file_path = crate_directory.join(lock_file_name); let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new( match flock::Lock::new(
&lock_file_path, &lock_file_path,
false, // don't wait false, // don't wait
false, // don't create the lock-file false, // don't create the lock-file
true, true,
) { ) {
// get an exclusive lock // get an exclusive lock
Ok(lock) => { Ok(lock) => {
debug!( debug!(
"garbage_collect_session_directories() - \ "garbage_collect_session_directories() - \
successfully acquired lock" successfully acquired lock"
); );
debug!( debug!(
"garbage_collect_session_directories() - adding \ "garbage_collect_session_directories() - adding \
deletion candidate: {}", deletion candidate: {}",
directory_name directory_name
); );
// Note that we are holding on to the lock // Note that we are holding on to the lock
deletion_candidates.push(( return Some((
timestamp, (timestamp, crate_directory.join(directory_name)),
crate_directory.join(directory_name), Some(lock),
Some(lock), ));
)); }
} Err(_) => {
Err(_) => { debug!(
debug!( "garbage_collect_session_directories() - \
"garbage_collect_session_directories() - \
not collecting, still in use" not collecting, still in use"
); );
}
} }
} } else if is_old_enough_to_be_collected(timestamp) {
} else if is_old_enough_to_be_collected(timestamp) { // When cleaning out "-working" session directories, i.e.
// When cleaning out "-working" session directories, i.e. // session directories that might still be in use by another
// session directories that might still be in use by another // compiler instance, we only look a directories that are
// compiler instance, we only look a directories that are // at least ten seconds old. This is supposed to reduce the
// at least ten seconds old. This is supposed to reduce the // chance of deleting a directory in the time window where
// chance of deleting a directory in the time window where // the process has allocated the directory but has not yet
// the process has allocated the directory but has not yet // acquired the file-lock on it.
// acquired the file-lock on it.
// Try to acquire the directory lock. If we can't, it // Try to acquire the directory lock. If we can't, it
// means that the owning process is still alive and we // means that the owning process is still alive and we
// leave this directory alone. // leave this directory alone.
let lock_file_path = crate_directory.join(lock_file_name); let lock_file_path = crate_directory.join(lock_file_name);
match flock::Lock::new( match flock::Lock::new(
&lock_file_path, &lock_file_path,
false, // don't wait false, // don't wait
false, // don't create the lock-file false, // don't create the lock-file
true, true,
) { ) {
// get an exclusive lock // get an exclusive lock
Ok(lock) => { Ok(lock) => {
debug!( debug!(
"garbage_collect_session_directories() - \ "garbage_collect_session_directories() - \
successfully acquired lock" successfully acquired lock"
); );
delete_old(sess, &crate_directory.join(directory_name)); delete_old(sess, &crate_directory.join(directory_name));
// Let's make it explicit that the file lock is released at this point, // Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here // or rather, that we held on to it until here
drop(lock); drop(lock);
} }
Err(_) => { Err(_) => {
debug!( debug!(
"garbage_collect_session_directories() - \ "garbage_collect_session_directories() - \
not collecting, still in use" not collecting, still in use"
); );
}
} }
} } else {
} else { debug!(
debug!( "garbage_collect_session_directories() - not finalized, not \
"garbage_collect_session_directories() - not finalized, not \
old enough" old enough"
); );
} }
} None
});
let deletion_candidates = deletion_candidates.into();
// Delete all but the most recent of the candidates // Delete all but the most recent of the candidates
for (path, lock) in all_except_most_recent(deletion_candidates) { all_except_most_recent(deletion_candidates).into_items().all(|(path, lock)| {
debug!("garbage_collect_session_directories() - deleting `{}`", path.display()); debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
if let Err(err) = safe_remove_dir_all(&path) { if let Err(err) = safe_remove_dir_all(&path) {
@ -829,7 +833,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
// Let's make it explicit that the file lock is released at this point, // Let's make it explicit that the file lock is released at this point,
// or rather, that we held on to it until here // or rather, that we held on to it until here
drop(lock); drop(lock);
} true
});
Ok(()) Ok(())
} }
@ -845,18 +850,19 @@ fn delete_old(sess: &Session, path: &Path) {
} }
fn all_except_most_recent( fn all_except_most_recent(
deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>, deletion_candidates: UnordMap<(SystemTime, PathBuf), Option<flock::Lock>>,
) -> FxHashMap<PathBuf, Option<flock::Lock>> { ) -> UnordMap<PathBuf, Option<flock::Lock>> {
let most_recent = deletion_candidates.iter().map(|&(timestamp, ..)| timestamp).max(); let most_recent = deletion_candidates.items().map(|(&(timestamp, _), _)| timestamp).max();
if let Some(most_recent) = most_recent { if let Some(most_recent) = most_recent {
deletion_candidates UnordMap::from(
.into_iter() deletion_candidates
.filter(|&(timestamp, ..)| timestamp != most_recent) .into_items()
.map(|(_, path, lock)| (path, lock)) .filter(|&((timestamp, _), _)| timestamp != most_recent)
.collect() .map(|((_, path), lock)| (path, lock)),
)
} else { } else {
FxHashMap::default() UnordMap::default()
} }
} }

View file

@ -2,26 +2,19 @@ use super::*;
#[test] #[test]
fn test_all_except_most_recent() { fn test_all_except_most_recent() {
let input: UnordMap<_, Option<flock::Lock>> = UnordMap::from_iter([
((UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4")), None),
((UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1")), None),
((UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5")), None),
((UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3")), None),
((UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2")), None),
]);
assert_eq!( assert_eq!(
all_except_most_recent(vec![ all_except_most_recent(input).into_items().map(|(path, _)| path).into_sorted_stable_ord(),
(UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None), vec![PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4")]
(UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None),
(UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None),
(UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None),
(UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None),
])
.keys()
.cloned()
.collect::<FxHashSet<PathBuf>>(),
[PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4"),]
.into_iter()
.collect::<FxHashSet<PathBuf>>()
); );
assert_eq!( assert!(all_except_most_recent(UnordMap::default()).is_empty());
all_except_most_recent(vec![]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
FxHashSet::default()
);
} }
#[test] #[test]

View file

@ -1,8 +1,8 @@
//! Code to save/load the dep-graph from files. //! Code to save/load the dep-graph from files.
use crate::errors; use crate::errors;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::memmap::Mmap; use rustc_data_structures::memmap::Mmap;
use rustc_data_structures::unord::UnordMap;
use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
use rustc_middle::query::on_disk_cache::OnDiskCache; use rustc_middle::query::on_disk_cache::OnDiskCache;
use rustc_serialize::opaque::MemDecoder; use rustc_serialize::opaque::MemDecoder;
@ -16,7 +16,7 @@ use super::file_format;
use super::fs::*; use super::fs::*;
use super::work_product; use super::work_product;
type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>; type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
#[derive(Debug)] #[derive(Debug)]
/// Represents the result of an attempt to load incremental compilation data. /// Represents the result of an attempt to load incremental compilation data.
@ -147,7 +147,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
let report_incremental_info = sess.opts.unstable_opts.incremental_info; let report_incremental_info = sess.opts.unstable_opts.incremental_info;
let expected_hash = sess.opts.dep_tracking_hash(false); let expected_hash = sess.opts.dep_tracking_hash(false);
let mut prev_work_products = FxHashMap::default(); let mut prev_work_products = UnordMap::default();
// If we are only building with -Zquery-dep-graph but without an actual // If we are only building with -Zquery-dep-graph but without an actual
// incr. comp. session directory, we skip this. Otherwise we'd fail // incr. comp. session directory, we skip this. Otherwise we'd fail
@ -163,7 +163,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
Decodable::decode(&mut work_product_decoder); Decodable::decode(&mut work_product_decoder);
for swp in work_products { for swp in work_products {
let all_files_exist = swp.work_product.saved_files.iter().all(|(_, path)| { let all_files_exist = swp.work_product.saved_files.items().all(|(_, path)| {
let exists = in_incr_comp_dir_sess(sess, path).exists(); let exists = in_incr_comp_dir_sess(sess, path).exists();
if !exists && sess.opts.unstable_opts.incremental_info { if !exists && sess.opts.unstable_opts.incremental_info {
eprintln!("incremental: could not find file for work product: {path}",); eprintln!("incremental: could not find file for work product: {path}",);

View file

@ -1,5 +1,5 @@
use crate::errors; use crate::errors;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::sync::join; use rustc_data_structures::sync::join;
use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
use rustc_middle::ty::TyCtxt; use rustc_middle::ty::TyCtxt;
@ -79,7 +79,7 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
pub fn save_work_product_index( pub fn save_work_product_index(
sess: &Session, sess: &Session,
dep_graph: &DepGraph, dep_graph: &DepGraph,
new_work_products: FxHashMap<WorkProductId, WorkProduct>, new_work_products: FxIndexMap<WorkProductId, WorkProduct>,
) { ) {
if sess.opts.incremental.is_none() { if sess.opts.incremental.is_none() {
return; return;
@ -105,7 +105,7 @@ pub fn save_work_product_index(
if !new_work_products.contains_key(id) { if !new_work_products.contains_key(id) {
work_product::delete_workproduct_files(sess, wp); work_product::delete_workproduct_files(sess, wp);
debug_assert!( debug_assert!(
!wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) !wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
); );
} }
} }
@ -113,13 +113,13 @@ pub fn save_work_product_index(
// Check that we did not delete one of the current work-products: // Check that we did not delete one of the current work-products:
debug_assert!({ debug_assert!({
new_work_products.iter().all(|(_, wp)| { new_work_products.iter().all(|(_, wp)| {
wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists()) wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
}) })
}); });
} }
fn encode_work_product_index( fn encode_work_product_index(
work_products: &FxHashMap<WorkProductId, WorkProduct>, work_products: &FxIndexMap<WorkProductId, WorkProduct>,
encoder: &mut FileEncoder, encoder: &mut FileEncoder,
) { ) {
let serialized_products: Vec<_> = work_products let serialized_products: Vec<_> = work_products
@ -146,7 +146,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult
pub fn build_dep_graph( pub fn build_dep_graph(
sess: &Session, sess: &Session,
prev_graph: SerializedDepGraph, prev_graph: SerializedDepGraph,
prev_work_products: FxHashMap<WorkProductId, WorkProduct>, prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
) -> Option<DepGraph> { ) -> Option<DepGraph> {
if sess.opts.incremental.is_none() { if sess.opts.incremental.is_none() {
// No incremental compilation. // No incremental compilation.

View file

@ -4,7 +4,7 @@
use crate::errors; use crate::errors;
use crate::persist::fs::*; use crate::persist::fs::*;
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::unord::UnordMap;
use rustc_fs_util::link_or_copy; use rustc_fs_util::link_or_copy;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
use rustc_session::Session; use rustc_session::Session;
@ -20,7 +20,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
debug!(?cgu_name, ?files); debug!(?cgu_name, ?files);
sess.opts.incremental.as_ref()?; sess.opts.incremental.as_ref()?;
let mut saved_files = FxHashMap::default(); let mut saved_files = UnordMap::default();
for (ext, path) in files { for (ext, path) in files {
let file_name = format!("{cgu_name}.{ext}"); let file_name = format!("{cgu_name}.{ext}");
let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name); let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name);
@ -46,7 +46,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
/// Removes files for a given work product. /// Removes files for a given work product.
pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) { pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) {
for (_, path) in &work_product.saved_files { for (_, path) in work_product.saved_files.items().into_sorted_stable_ord() {
let path = in_incr_comp_dir_sess(sess, path); let path = in_incr_comp_dir_sess(sess, path);
if let Err(err) = std_fs::remove_file(&path) { if let Err(err) = std_fs::remove_file(&path) {
sess.emit_warning(errors::DeleteWorkProduct { path: &path, err }); sess.emit_warning(errors::DeleteWorkProduct { path: &path, err });

View file

@ -5,6 +5,7 @@ use crate::passes;
use rustc_ast as ast; use rustc_ast as ast;
use rustc_codegen_ssa::traits::CodegenBackend; use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::CodegenResults; use rustc_codegen_ssa::CodegenResults;
use rustc_data_structures::fx::FxIndexMap;
use rustc_data_structures::steal::Steal; use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh; use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal}; use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal};
@ -193,9 +194,15 @@ impl<'tcx> Queries<'tcx> {
let future_opt = self.dep_graph_future()?.steal(); let future_opt = self.dep_graph_future()?.steal();
let dep_graph = future_opt let dep_graph = future_opt
.and_then(|future| { .and_then(|future| {
let (prev_graph, prev_work_products) = let (prev_graph, mut prev_work_products) =
sess.time("blocked_on_dep_graph_loading", || future.open().open(sess)); sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
// Convert from UnordMap to FxIndexMap by sorting
let prev_work_product_ids =
prev_work_products.items().map(|x| *x.0).into_sorted_stable_ord();
let prev_work_products = prev_work_product_ids
.into_iter()
.map(|x| (x, prev_work_products.remove(&x).unwrap()))
.collect::<FxIndexMap<_, _>>();
rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products) rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
}) })
.unwrap_or_else(DepGraph::new_disabled); .unwrap_or_else(DepGraph::new_disabled);

View file

@ -46,7 +46,7 @@ use super::{DepContext, DepKind, FingerprintStyle};
use crate::ich::StableHashingContext; use crate::ich::StableHashingContext;
use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint}; use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
use rustc_hir::definitions::DefPathHash; use rustc_hir::definitions::DefPathHash;
use std::fmt; use std::fmt;
use std::hash::Hash; use std::hash::Hash;
@ -247,3 +247,14 @@ impl<HCX> HashStable<HCX> for WorkProductId {
self.hash.hash_stable(hcx, hasher) self.hash.hash_stable(hcx, hasher)
} }
} }
impl<HCX> ToStableHashKey<HCX> for WorkProductId {
type KeyType = Fingerprint;
#[inline]
fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
self.hash
}
}
unsafe impl StableOrd for WorkProductId {
// Fingerprint can use unstable (just a tuple of `u64`s), so WorkProductId can as well
const CAN_USE_UNSTABLE_SORT: bool = true;
}

View file

@ -1,11 +1,12 @@
use parking_lot::Mutex; use parking_lot::Mutex;
use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef}; use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal; use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}; use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
use rustc_data_structures::unord::UnordMap;
use rustc_index::IndexVec; use rustc_index::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use smallvec::{smallvec, SmallVec}; use smallvec::{smallvec, SmallVec};
@ -93,7 +94,7 @@ pub struct DepGraphData<K: DepKind> {
/// things available to us. If we find that they are not dirty, we /// things available to us. If we find that they are not dirty, we
/// load the path to the file storing those work-products here into /// load the path to the file storing those work-products here into
/// this map. We can later look for and extract that data. /// this map. We can later look for and extract that data.
previous_work_products: FxHashMap<WorkProductId, WorkProduct>, previous_work_products: FxIndexMap<WorkProductId, WorkProduct>,
dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
@ -116,7 +117,7 @@ impl<K: DepKind> DepGraph<K> {
pub fn new( pub fn new(
profiler: &SelfProfilerRef, profiler: &SelfProfilerRef,
prev_graph: SerializedDepGraph<K>, prev_graph: SerializedDepGraph<K>,
prev_work_products: FxHashMap<WorkProductId, WorkProduct>, prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
encoder: FileEncoder, encoder: FileEncoder,
record_graph: bool, record_graph: bool,
record_stats: bool, record_stats: bool,
@ -688,7 +689,7 @@ impl<K: DepKind> DepGraph<K> {
/// Access the map of work-products created during the cached run. Only /// Access the map of work-products created during the cached run. Only
/// used during saving of the dep-graph. /// used during saving of the dep-graph.
pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> { pub fn previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct> {
&self.data.as_ref().unwrap().previous_work_products &self.data.as_ref().unwrap().previous_work_products
} }
@ -1048,7 +1049,7 @@ pub struct WorkProduct {
/// ///
/// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
/// the object file's path, and "dwo" to the dwarf object file's path. /// the object file's path, and "dwo" to the dwarf object file's path.
pub saved_files: FxHashMap<String, String>, pub saved_files: UnordMap<String, String>,
} }
// Index type for `DepNodeData`'s edges. // Index type for `DepNodeData`'s edges.

View file

@ -311,7 +311,9 @@ pub enum OutputType {
} }
// Safety: Trivial C-Style enums have a stable sort order across compilation sessions. // Safety: Trivial C-Style enums have a stable sort order across compilation sessions.
unsafe impl StableOrd for OutputType {} unsafe impl StableOrd for OutputType {
const CAN_USE_UNSTABLE_SORT: bool = true;
}
impl<HCX: HashStableContext> ToStableHashKey<HCX> for OutputType { impl<HCX: HashStableContext> ToStableHashKey<HCX> for OutputType {
type KeyType = Self; type KeyType = Self;

View file

@ -160,7 +160,7 @@ impl LateLintPass<'_> for WildcardImports {
) )
}; };
let mut imports = used_imports.items().map(ToString::to_string).into_sorted_stable_ord(false); let mut imports = used_imports.items().map(ToString::to_string).into_sorted_stable_ord();
let imports_string = if imports.len() == 1 { let imports_string = if imports.len() == 1 {
imports.pop().unwrap() imports.pop().unwrap()
} else if braced_glob { } else if braced_glob {

View file

@ -15,7 +15,7 @@ extern crate rustc_target;
use rustc_codegen_ssa::traits::CodegenBackend; use rustc_codegen_ssa::traits::CodegenBackend;
use rustc_codegen_ssa::{CodegenResults, CrateInfo}; use rustc_codegen_ssa::{CodegenResults, CrateInfo};
use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxIndexMap;
use rustc_errors::ErrorGuaranteed; use rustc_errors::ErrorGuaranteed;
use rustc_metadata::EncodedMetadata; use rustc_metadata::EncodedMetadata;
use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
@ -49,11 +49,11 @@ impl CodegenBackend for TheBackend {
ongoing_codegen: Box<dyn Any>, ongoing_codegen: Box<dyn Any>,
_sess: &Session, _sess: &Session,
_outputs: &OutputFilenames, _outputs: &OutputFilenames,
) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> { ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
let codegen_results = ongoing_codegen let codegen_results = ongoing_codegen
.downcast::<CodegenResults>() .downcast::<CodegenResults>()
.expect("in join_codegen: ongoing_codegen is not a CodegenResults"); .expect("in join_codegen: ongoing_codegen is not a CodegenResults");
Ok((*codegen_results, FxHashMap::default())) Ok((*codegen_results, FxIndexMap::default()))
} }
fn link( fn link(