about summary refs log tree commit diff
diff options
context:
space:
mode:
-rw-r--r--compiler/rustc_abi/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/driver/aot.rs4
-rw-r--r--compiler/rustc_codegen_cranelift/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_gcc/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_llvm/src/lib.rs4
-rw-r--r--compiler/rustc_codegen_ssa/src/back/write.rs8
-rw-r--r--compiler/rustc_codegen_ssa/src/traits/backend.rs4
-rw-r--r--compiler/rustc_const_eval/src/interpret/intrinsics.rs4
-rw-r--r--compiler/rustc_data_structures/src/stable_hasher.rs50
-rw-r--r--compiler/rustc_data_structures/src/unord.rs8
-rw-r--r--compiler/rustc_hir/src/hir_id.rs4
-rw-r--r--compiler/rustc_hir_analysis/src/check/intrinsic.rs2
-rw-r--r--compiler/rustc_incremental/src/assert_dep_graph.rs28
-rw-r--r--compiler/rustc_incremental/src/assert_module_sources.rs9
-rw-r--r--compiler/rustc_incremental/src/lib.rs1
-rw-r--r--compiler/rustc_incremental/src/persist/dirty_clean.rs13
-rw-r--r--compiler/rustc_incremental/src/persist/fs.rs208
-rw-r--r--compiler/rustc_incremental/src/persist/fs/tests.rs27
-rw-r--r--compiler/rustc_incremental/src/persist/load.rs8
-rw-r--r--compiler/rustc_incremental/src/persist/save.rs12
-rw-r--r--compiler/rustc_incremental/src/persist/work_product.rs6
-rw-r--r--compiler/rustc_interface/src/queries.rs11
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs9
-rw-r--r--compiler/rustc_middle/src/ty/util.rs4
-rw-r--r--compiler/rustc_query_system/src/dep_graph/dep_node.rs13
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs11
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs15
-rw-r--r--compiler/rustc_session/src/config.rs4
-rw-r--r--library/core/src/any.rs31
-rw-r--r--library/core/src/intrinsics.rs17
-rw-r--r--library/std/src/sys/common/thread_local/fast_local.rs22
-rw-r--r--library/std/src/sys/common/thread_local/mod.rs21
-rw-r--r--library/std/src/thread/mod.rs2
-rw-r--r--src/bootstrap/compile.rs4
-rw-r--r--src/bootstrap/tool.rs2
-rw-r--r--src/ci/stage-build.py150
-rw-r--r--src/tools/clippy/clippy_lints/src/wildcard_imports.rs2
-rw-r--r--tests/run-make-fulldeps/hotplug_codegen_backend/the_backend.rs6
38 files changed, 464 insertions, 272 deletions
diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs
index 2ee63c286ba..e1b9987f578 100644
--- a/compiler/rustc_abi/src/lib.rs
+++ b/compiler/rustc_abi/src/lib.rs
@@ -414,7 +414,9 @@ pub struct Size {
 // Safety: Ord is implement as just comparing numerical values and numerical values
 // are not changed by (de-)serialization.
 #[cfg(feature = "nightly")]
-unsafe impl StableOrd for Size {}
+unsafe impl StableOrd for Size {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
 
 // This is debug-printed a lot in larger structs, don't waste too much space there
 impl fmt::Debug for Size {
diff --git a/compiler/rustc_codegen_cranelift/src/driver/aot.rs b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
index aad9a9647f8..d143bcc96ef 100644
--- a/compiler/rustc_codegen_cranelift/src/driver/aot.rs
+++ b/compiler/rustc_codegen_cranelift/src/driver/aot.rs
@@ -54,8 +54,8 @@ impl OngoingCodegen {
         self,
         sess: &Session,
         backend_config: &BackendConfig,
-    ) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
-        let mut work_products = FxHashMap::default();
+    ) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
+        let mut work_products = FxIndexMap::default();
         let mut modules = vec![];
 
         for module_codegen in self.modules {
diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs
index 9966cc2ef3c..095fbe62c19 100644
--- a/compiler/rustc_codegen_cranelift/src/lib.rs
+++ b/compiler/rustc_codegen_cranelift/src/lib.rs
@@ -88,7 +88,7 @@ mod prelude {
     };
     pub(crate) use rustc_target::abi::{Abi, FieldIdx, Scalar, Size, VariantIdx, FIRST_VARIANT};
 
-    pub(crate) use rustc_data_structures::fx::FxHashMap;
+    pub(crate) use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
 
     pub(crate) use rustc_index::Idx;
 
@@ -223,7 +223,7 @@ impl CodegenBackend for CraneliftCodegenBackend {
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
         _outputs: &OutputFilenames,
-    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+    ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
         Ok(ongoing_codegen
             .downcast::<driver::aot::OngoingCodegen>()
             .unwrap()
diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs
index 442ce0ea542..ea013c4428c 100644
--- a/compiler/rustc_codegen_gcc/src/lib.rs
+++ b/compiler/rustc_codegen_gcc/src/lib.rs
@@ -75,7 +75,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLTOInput, ModuleConfig,
 use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule};
 use rustc_codegen_ssa::target_features::supported_target_features;
 use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, ModuleBufferMethods, ThinBufferMethods, WriteBackendMethods};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, Handler, SubdiagnosticMessage};
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
@@ -137,7 +137,7 @@ impl CodegenBackend for GccCodegenBackend {
         Box::new(res)
     }
 
-    fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+    fn join_codegen(&self, ongoing_codegen: Box<dyn Any>, sess: &Session, _outputs: &OutputFilenames) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
         let (codegen_results, work_products) = ongoing_codegen
             .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<GccCodegenBackend>>()
             .expect("Expected GccCodegenBackend's OngoingCodegen, found Box<Any>")
diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs
index ff9909c720e..24968e00cc8 100644
--- a/compiler/rustc_codegen_llvm/src/lib.rs
+++ b/compiler/rustc_codegen_llvm/src/lib.rs
@@ -34,7 +34,7 @@ use rustc_codegen_ssa::back::write::{
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::ModuleCodegen;
 use rustc_codegen_ssa::{CodegenResults, CompiledModule};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_errors::{DiagnosticMessage, ErrorGuaranteed, FatalError, Handler, SubdiagnosticMessage};
 use rustc_fluent_macro::fluent_messages;
 use rustc_metadata::EncodedMetadata;
@@ -356,7 +356,7 @@ impl CodegenBackend for LlvmCodegenBackend {
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
         outputs: &OutputFilenames,
-    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+    ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
         let (codegen_results, work_products) = ongoing_codegen
             .downcast::<rustc_codegen_ssa::back::write::OngoingCodegen<LlvmCodegenBackend>>()
             .expect("Expected LlvmCodegenBackend's OngoingCodegen, found Box<Any>")
diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs
index 10e9e5588f6..701d0d73ad3 100644
--- a/compiler/rustc_codegen_ssa/src/back/write.rs
+++ b/compiler/rustc_codegen_ssa/src/back/write.rs
@@ -9,7 +9,7 @@ use crate::{
 };
 use jobserver::{Acquired, Client};
 use rustc_ast::attr;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
 use rustc_data_structures::memmap::Mmap;
 use rustc_data_structures::profiling::SelfProfilerRef;
 use rustc_data_structures::profiling::TimingGuard;
@@ -498,8 +498,8 @@ pub fn start_async_codegen<B: ExtraBackendMethods>(
 fn copy_all_cgu_workproducts_to_incr_comp_cache_dir(
     sess: &Session,
     compiled_modules: &CompiledModules,
-) -> FxHashMap<WorkProductId, WorkProduct> {
-    let mut work_products = FxHashMap::default();
+) -> FxIndexMap<WorkProductId, WorkProduct> {
+    let mut work_products = FxIndexMap::default();
 
     if sess.opts.incremental.is_none() {
         return work_products;
@@ -1885,7 +1885,7 @@ pub struct OngoingCodegen<B: ExtraBackendMethods> {
 }
 
 impl<B: ExtraBackendMethods> OngoingCodegen<B> {
-    pub fn join(self, sess: &Session) -> (CodegenResults, FxHashMap<WorkProductId, WorkProduct>) {
+    pub fn join(self, sess: &Session) -> (CodegenResults, FxIndexMap<WorkProductId, WorkProduct>) {
         let _timer = sess.timer("finish_ongoing_codegen");
 
         self.shared_emitter_main.check(sess, true);
diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs
index d83bfc74082..b3c9ecf8b93 100644
--- a/compiler/rustc_codegen_ssa/src/traits/backend.rs
+++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs
@@ -6,7 +6,7 @@ use crate::back::write::TargetMachineFactoryFn;
 use crate::{CodegenResults, ModuleCodegen};
 
 use rustc_ast::expand::allocator::AllocatorKind;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::sync::{DynSend, DynSync};
 use rustc_errors::ErrorGuaranteed;
 use rustc_metadata::EncodedMetadata;
@@ -101,7 +101,7 @@ pub trait CodegenBackend {
         ongoing_codegen: Box<dyn Any>,
         sess: &Session,
         outputs: &OutputFilenames,
-    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
+    ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed>;
 
     /// This is called on the returned `Box<dyn Any>` from `join_codegen`
     ///
diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
index fffb9a7f264..7192bbc00d5 100644
--- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs
+++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs
@@ -77,7 +77,7 @@ pub(crate) fn eval_nullary_intrinsic<'tcx>(
         }
         sym::type_id => {
             ensure_monomorphic_enough(tcx, tp_ty)?;
-            ConstValue::from_u64(tcx.type_id_hash(tp_ty).as_u64())
+            ConstValue::from_u128(tcx.type_id_hash(tp_ty).as_u128())
         }
         sym::variant_count => match tp_ty.kind() {
             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
@@ -169,7 +169,7 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 let ty = match intrinsic_name {
                     sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
                     sym::needs_drop => self.tcx.types.bool,
-                    sym::type_id => self.tcx.types.u64,
+                    sym::type_id => self.tcx.types.u128,
                     sym::type_name => self.tcx.mk_static_str(),
                     _ => bug!(),
                 };
diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs
index 6d57d81c56a..0c1fb7518fa 100644
--- a/compiler/rustc_data_structures/src/stable_hasher.rs
+++ b/compiler/rustc_data_structures/src/stable_hasher.rs
@@ -233,7 +233,17 @@ pub trait ToStableHashKey<HCX> {
 ///  - `DefIndex`, `CrateNum`, `LocalDefId`, because their concrete
 ///    values depend on state that might be different between
 ///    compilation sessions.
-pub unsafe trait StableOrd: Ord {}
+///
+/// The associated constant `CAN_USE_UNSTABLE_SORT` denotes whether
+/// unstable sorting can be used for this type. Set to true if and
+/// only if `a == b` implies `a` and `b` are fully indistinguishable.
+pub unsafe trait StableOrd: Ord {
+    const CAN_USE_UNSTABLE_SORT: bool;
+}
+
+unsafe impl<T: StableOrd> StableOrd for &T {
+    const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
 
 /// Implement HashStable by just calling `Hash::hash()`. Also implement `StableOrd` for the type since
 /// that has the same requirements.
@@ -253,7 +263,9 @@ macro_rules! impl_stable_traits_for_trivial_type {
             }
         }
 
-        unsafe impl $crate::stable_hasher::StableOrd for $t {}
+        unsafe impl $crate::stable_hasher::StableOrd for $t {
+            const CAN_USE_UNSTABLE_SORT: bool = true;
+        }
     };
 }
 
@@ -339,6 +351,10 @@ impl<T1: HashStable<CTX>, T2: HashStable<CTX>, CTX> HashStable<CTX> for (T1, T2)
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd> StableOrd for (T1, T2) {
+    const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T1, T2, T3, CTX> HashStable<CTX> for (T1, T2, T3)
 where
     T1: HashStable<CTX>,
@@ -353,6 +369,11 @@ where
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd> StableOrd for (T1, T2, T3) {
+    const CAN_USE_UNSTABLE_SORT: bool =
+        T1::CAN_USE_UNSTABLE_SORT && T2::CAN_USE_UNSTABLE_SORT && T3::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T1, T2, T3, T4, CTX> HashStable<CTX> for (T1, T2, T3, T4)
 where
     T1: HashStable<CTX>,
@@ -369,6 +390,15 @@ where
     }
 }
 
+unsafe impl<T1: StableOrd, T2: StableOrd, T3: StableOrd, T4: StableOrd> StableOrd
+    for (T1, T2, T3, T4)
+{
+    const CAN_USE_UNSTABLE_SORT: bool = T1::CAN_USE_UNSTABLE_SORT
+        && T2::CAN_USE_UNSTABLE_SORT
+        && T3::CAN_USE_UNSTABLE_SORT
+        && T4::CAN_USE_UNSTABLE_SORT;
+}
+
 impl<T: HashStable<CTX>, CTX> HashStable<CTX> for [T] {
     default fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) {
         self.len().hash_stable(ctx, hasher);
@@ -459,6 +489,10 @@ impl<CTX> HashStable<CTX> for str {
     }
 }
 
+unsafe impl StableOrd for &str {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
+
 impl<CTX> HashStable<CTX> for String {
     #[inline]
     fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
@@ -468,7 +502,9 @@ impl<CTX> HashStable<CTX> for String {
 
 // Safety: String comparison only depends on their contents and the
 // contents are not changed by (de-)serialization.
-unsafe impl StableOrd for String {}
+unsafe impl StableOrd for String {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
 
 impl<HCX> ToStableHashKey<HCX> for String {
     type KeyType = String;
@@ -494,7 +530,9 @@ impl<CTX> HashStable<CTX> for bool {
 }
 
 // Safety: sort order of bools is not changed by (de-)serialization.
-unsafe impl StableOrd for bool {}
+unsafe impl StableOrd for bool {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
 
 impl<T, CTX> HashStable<CTX> for Option<T>
 where
@@ -512,7 +550,9 @@ where
 }
 
 // Safety: the Option wrapper does not add instability to comparison.
-unsafe impl<T: StableOrd> StableOrd for Option<T> {}
+unsafe impl<T: StableOrd> StableOrd for Option<T> {
+    const CAN_USE_UNSTABLE_SORT: bool = T::CAN_USE_UNSTABLE_SORT;
+}
 
 impl<T1, T2, CTX> HashStable<CTX> for Result<T1, T2>
 where
diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs
index 6c8d5414631..e18c7b415f6 100644
--- a/compiler/rustc_data_structures/src/unord.rs
+++ b/compiler/rustc_data_structures/src/unord.rs
@@ -140,12 +140,12 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
     }
 
     #[inline]
-    pub fn into_sorted_stable_ord(self, use_stable_sort: bool) -> Vec<T>
+    pub fn into_sorted_stable_ord(self) -> Vec<T>
     where
         T: Ord + StableOrd,
     {
         let mut items: Vec<T> = self.0.collect();
-        if use_stable_sort {
+        if !T::CAN_USE_UNSTABLE_SORT {
             items.sort();
         } else {
             items.sort_unstable()
@@ -161,6 +161,10 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> {
         items.sort_by_cached_key(|x| x.to_stable_hash_key(hcx));
         items
     }
+
+    pub fn collect<C: From<UnordItems<T, I>>>(self) -> C {
+        self.into()
+    }
 }
 
 /// This is a set collection type that tries very hard to not expose
diff --git a/compiler/rustc_hir/src/hir_id.rs b/compiler/rustc_hir/src/hir_id.rs
index d549f52f873..34c61577936 100644
--- a/compiler/rustc_hir/src/hir_id.rs
+++ b/compiler/rustc_hir/src/hir_id.rs
@@ -166,7 +166,9 @@ impl ItemLocalId {
 
 // Safety: Ord is implement as just comparing the ItemLocalId's numerical
 // values and these are not changed by (de-)serialization.
-unsafe impl StableOrd for ItemLocalId {}
+unsafe impl StableOrd for ItemLocalId {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
 
 /// The `HirId` corresponding to `CRATE_NODE_ID` and `CRATE_DEF_ID`.
 pub const CRATE_HIR_ID: HirId =
diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
index 1f18017f00b..36c468e7789 100644
--- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs
+++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs
@@ -217,7 +217,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
             sym::needs_drop => (1, Vec::new(), tcx.types.bool),
 
             sym::type_name => (1, Vec::new(), tcx.mk_static_str()),
-            sym::type_id => (1, Vec::new(), tcx.types.u64),
+            sym::type_id => (1, Vec::new(), tcx.types.u128),
             sym::offset => (2, vec![param(0), param(1)], param(0)),
             sym::arith_offset => (
                 1,
diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs
index 22bd12f2e63..52a84b204d0 100644
--- a/compiler/rustc_incremental/src/assert_dep_graph.rs
+++ b/compiler/rustc_incremental/src/assert_dep_graph.rs
@@ -35,7 +35,7 @@
 
 use crate::errors;
 use rustc_ast as ast;
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::fx::FxIndexSet;
 use rustc_data_structures::graph::implementation::{Direction, NodeIndex, INCOMING, OUTGOING};
 use rustc_graphviz as dot;
 use rustc_hir as hir;
@@ -258,7 +258,7 @@ fn dump_graph(query: &DepGraphQuery) {
 }
 
 #[allow(missing_docs)]
-pub struct GraphvizDepGraph(FxHashSet<DepKind>, Vec<(DepKind, DepKind)>);
+pub struct GraphvizDepGraph(FxIndexSet<DepKind>, Vec<(DepKind, DepKind)>);
 
 impl<'a> dot::GraphWalk<'a> for GraphvizDepGraph {
     type Node = DepKind;
@@ -303,7 +303,7 @@ impl<'a> dot::Labeller<'a> for GraphvizDepGraph {
 fn node_set<'q>(
     query: &'q DepGraphQuery,
     filter: &DepNodeFilter,
-) -> Option<FxHashSet<&'q DepNode>> {
+) -> Option<FxIndexSet<&'q DepNode>> {
     debug!("node_set(filter={:?})", filter);
 
     if filter.accepts_all() {
@@ -315,9 +315,9 @@ fn node_set<'q>(
 
 fn filter_nodes<'q>(
     query: &'q DepGraphQuery,
-    sources: &Option<FxHashSet<&'q DepNode>>,
-    targets: &Option<FxHashSet<&'q DepNode>>,
-) -> FxHashSet<DepKind> {
+    sources: &Option<FxIndexSet<&'q DepNode>>,
+    targets: &Option<FxIndexSet<&'q DepNode>>,
+) -> FxIndexSet<DepKind> {
     if let Some(sources) = sources {
         if let Some(targets) = targets {
             walk_between(query, sources, targets)
@@ -333,10 +333,10 @@ fn filter_nodes<'q>(
 
 fn walk_nodes<'q>(
     query: &'q DepGraphQuery,
-    starts: &FxHashSet<&'q DepNode>,
+    starts: &FxIndexSet<&'q DepNode>,
     direction: Direction,
-) -> FxHashSet<DepKind> {
-    let mut set = FxHashSet::default();
+) -> FxIndexSet<DepKind> {
+    let mut set = FxIndexSet::default();
     for &start in starts {
         debug!("walk_nodes: start={:?} outgoing?={:?}", start, direction == OUTGOING);
         if set.insert(start.kind) {
@@ -357,9 +357,9 @@ fn walk_nodes<'q>(
 
 fn walk_between<'q>(
     query: &'q DepGraphQuery,
-    sources: &FxHashSet<&'q DepNode>,
-    targets: &FxHashSet<&'q DepNode>,
-) -> FxHashSet<DepKind> {
+    sources: &FxIndexSet<&'q DepNode>,
+    targets: &FxIndexSet<&'q DepNode>,
+) -> FxIndexSet<DepKind> {
     // This is a bit tricky. We want to include a node only if it is:
     // (a) reachable from a source and (b) will reach a target. And we
     // have to be careful about cycles etc. Luckily efficiency is not
@@ -426,8 +426,8 @@ fn walk_between<'q>(
     }
 }
 
-fn filter_edges(query: &DepGraphQuery, nodes: &FxHashSet<DepKind>) -> Vec<(DepKind, DepKind)> {
-    let uniq: FxHashSet<_> = query
+fn filter_edges(query: &DepGraphQuery, nodes: &FxIndexSet<DepKind>) -> Vec<(DepKind, DepKind)> {
+    let uniq: FxIndexSet<_> = query
         .edges()
         .into_iter()
         .map(|(s, t)| (s.kind, t.kind))
diff --git a/compiler/rustc_incremental/src/assert_module_sources.rs b/compiler/rustc_incremental/src/assert_module_sources.rs
index c550e553bb0..0111a6d302d 100644
--- a/compiler/rustc_incremental/src/assert_module_sources.rs
+++ b/compiler/rustc_incremental/src/assert_module_sources.rs
@@ -24,7 +24,7 @@
 
 use crate::errors;
 use rustc_ast as ast;
-use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::unord::UnordSet;
 use rustc_hir::def_id::LOCAL_CRATE;
 use rustc_middle::mir::mono::CodegenUnitNameBuilder;
 use rustc_middle::ty::TyCtxt;
@@ -52,7 +52,7 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>) {
 
 struct AssertModuleSource<'tcx> {
     tcx: TyCtxt<'tcx>,
-    available_cgus: FxHashSet<Symbol>,
+    available_cgus: UnordSet<Symbol>,
 }
 
 impl<'tcx> AssertModuleSource<'tcx> {
@@ -118,9 +118,8 @@ impl<'tcx> AssertModuleSource<'tcx> {
         debug!("mapping '{}' to cgu name '{}'", self.field(attr, sym::module), cgu_name);
 
         if !self.available_cgus.contains(&cgu_name) {
-            let mut cgu_names: Vec<&str> =
-                self.available_cgus.iter().map(|cgu| cgu.as_str()).collect();
-            cgu_names.sort();
+            let cgu_names: Vec<&str> =
+                self.available_cgus.items().map(|cgu| cgu.as_str()).into_sorted_stable_ord();
             self.tcx.sess.emit_err(errors::NoModuleNamed {
                 span: attr.span,
                 user_path,
diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs
index 11710c368ce..b9171fad55b 100644
--- a/compiler/rustc_incremental/src/lib.rs
+++ b/compiler/rustc_incremental/src/lib.rs
@@ -4,7 +4,6 @@
 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
 #![feature(never_type)]
 #![recursion_limit = "256"]
-#![allow(rustc::potential_query_instability)]
 #![deny(rustc::untranslatable_diagnostic)]
 #![deny(rustc::diagnostic_outside_of_impl)]
 
diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs
index 43274091cb8..cbe77e7b16d 100644
--- a/compiler/rustc_incremental/src/persist/dirty_clean.rs
+++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs
@@ -22,6 +22,7 @@
 use crate::errors;
 use rustc_ast::{self as ast, Attribute, NestedMetaItem};
 use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::unord::UnordSet;
 use rustc_hir::def_id::LocalDefId;
 use rustc_hir::intravisit;
 use rustc_hir::Node as HirNode;
@@ -125,7 +126,7 @@ const LABELS_ADT: &[&[&str]] = &[BASE_HIR, BASE_STRUCT];
 //
 //     type_of for these.
 
-type Labels = FxHashSet<String>;
+type Labels = UnordSet<String>;
 
 /// Represents the requested configuration by rustc_clean/dirty
 struct Assertion {
@@ -197,7 +198,7 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
         let (name, mut auto) = self.auto_labels(item_id, attr);
         let except = self.except(attr);
         let loaded_from_disk = self.loaded_from_disk(attr);
-        for e in except.iter() {
+        for e in except.items().map(|x| x.as_str()).into_sorted_stable_ord() {
             if !auto.remove(e) {
                 self.tcx.sess.emit_fatal(errors::AssertionAuto { span: attr.span, name, e });
             }
@@ -376,15 +377,17 @@ impl<'tcx> DirtyCleanVisitor<'tcx> {
                 continue;
             };
             self.checked_attrs.insert(attr.id);
-            for label in assertion.clean {
+            for label in assertion.clean.items().map(|x| x.as_str()).into_sorted_stable_ord() {
                 let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
                 self.assert_clean(item_span, dep_node);
             }
-            for label in assertion.dirty {
+            for label in assertion.dirty.items().map(|x| x.as_str()).into_sorted_stable_ord() {
                 let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
                 self.assert_dirty(item_span, dep_node);
             }
-            for label in assertion.loaded_from_disk {
+            for label in
+                assertion.loaded_from_disk.items().map(|x| x.as_str()).into_sorted_stable_ord()
+            {
                 let dep_node = DepNode::from_label_string(self.tcx, &label, def_path_hash).unwrap();
                 self.assert_loaded_from_disk(item_span, dep_node);
             }
diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs
index e3c688b3e98..243057b99bc 100644
--- a/compiler/rustc_incremental/src/persist/fs.rs
+++ b/compiler/rustc_incremental/src/persist/fs.rs
@@ -104,8 +104,9 @@
 //! implemented.
 
 use crate::errors;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::{FxHashSet, FxIndexSet};
 use rustc_data_structures::svh::Svh;
+use rustc_data_structures::unord::{UnordMap, UnordSet};
 use rustc_data_structures::{base_n, flock};
 use rustc_errors::ErrorGuaranteed;
 use rustc_fs_util::{link_or_copy, try_canonicalize, LinkOrCopy};
@@ -635,8 +636,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
 
     // First do a pass over the crate directory, collecting lock files and
     // session directories
-    let mut session_directories = FxHashSet::default();
-    let mut lock_files = FxHashSet::default();
+    let mut session_directories = FxIndexSet::default();
+    let mut lock_files = UnordSet::default();
 
     for dir_entry in crate_directory.read_dir()? {
         let Ok(dir_entry) = dir_entry else {
@@ -657,10 +658,11 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
             // This is something we don't know, leave it alone
         }
     }
+    session_directories.sort();
 
     // Now map from lock files to session directories
-    let lock_file_to_session_dir: FxHashMap<String, Option<String>> = lock_files
-        .into_iter()
+    let lock_file_to_session_dir: UnordMap<String, Option<String>> = lock_files
+        .into_items()
         .map(|lock_file_name| {
             assert!(lock_file_name.ends_with(LOCK_FILE_EXT));
             let dir_prefix_end = lock_file_name.len() - LOCK_FILE_EXT.len();
@@ -670,11 +672,13 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
             };
             (lock_file_name, session_dir.map(String::clone))
         })
-        .collect();
+        .into();
 
     // Delete all lock files, that don't have an associated directory. They must
     // be some kind of leftover
-    for (lock_file_name, directory_name) in &lock_file_to_session_dir {
+    for (lock_file_name, directory_name) in
+        lock_file_to_session_dir.items().into_sorted_stable_ord()
+    {
         if directory_name.is_none() {
             let Ok(timestamp) = extract_timestamp_from_session_dir(lock_file_name) else {
                 debug!(
@@ -685,19 +689,19 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
                 continue;
             };
 
-            let lock_file_path = crate_directory.join(&**lock_file_name);
+            let lock_file_path = crate_directory.join(&*lock_file_name);
 
             if is_old_enough_to_be_collected(timestamp) {
                 debug!(
                     "garbage_collect_session_directories() - deleting \
-                        garbage lock file: {}",
+                    garbage lock file: {}",
                     lock_file_path.display()
                 );
                 delete_session_dir_lock_file(sess, &lock_file_path);
             } else {
                 debug!(
                     "garbage_collect_session_directories() - lock file with \
-                        no session dir not old enough to be collected: {}",
+                    no session dir not old enough to be collected: {}",
                     lock_file_path.display()
                 );
             }
@@ -705,14 +709,14 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
     }
 
     // Filter out `None` directories
-    let lock_file_to_session_dir: FxHashMap<String, String> = lock_file_to_session_dir
-        .into_iter()
+    let lock_file_to_session_dir: UnordMap<String, String> = lock_file_to_session_dir
+        .into_items()
         .filter_map(|(lock_file_name, directory_name)| directory_name.map(|n| (lock_file_name, n)))
-        .collect();
+        .into();
 
     // Delete all session directories that don't have a lock file.
     for directory_name in session_directories {
-        if !lock_file_to_session_dir.values().any(|dir| *dir == directory_name) {
+        if !lock_file_to_session_dir.items().any(|(_, dir)| *dir == directory_name) {
             let path = crate_directory.join(directory_name);
             if let Err(err) = safe_remove_dir_all(&path) {
                 sess.emit_warning(errors::InvalidGcFailed { path: &path, err });
@@ -721,103 +725,103 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
     }
 
     // Now garbage collect the valid session directories.
-    let mut deletion_candidates = vec![];
+    let deletion_candidates =
+        lock_file_to_session_dir.items().filter_map(|(lock_file_name, directory_name)| {
+            debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
 
-    for (lock_file_name, directory_name) in &lock_file_to_session_dir {
-        debug!("garbage_collect_session_directories() - inspecting: {}", directory_name);
-
-        let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
+            let Ok(timestamp) = extract_timestamp_from_session_dir(directory_name) else {
             debug!(
                 "found session-dir with malformed timestamp: {}",
                 crate_directory.join(directory_name).display()
             );
             // Ignore it
-            continue;
+            return None;
         };
 
-        if is_finalized(directory_name) {
-            let lock_file_path = crate_directory.join(lock_file_name);
-            match flock::Lock::new(
-                &lock_file_path,
-                false, // don't wait
-                false, // don't create the lock-file
-                true,
-            ) {
-                // get an exclusive lock
-                Ok(lock) => {
-                    debug!(
-                        "garbage_collect_session_directories() - \
+            if is_finalized(directory_name) {
+                let lock_file_path = crate_directory.join(lock_file_name);
+                match flock::Lock::new(
+                    &lock_file_path,
+                    false, // don't wait
+                    false, // don't create the lock-file
+                    true,
+                ) {
+                    // get an exclusive lock
+                    Ok(lock) => {
+                        debug!(
+                            "garbage_collect_session_directories() - \
                             successfully acquired lock"
-                    );
-                    debug!(
-                        "garbage_collect_session_directories() - adding \
+                        );
+                        debug!(
+                            "garbage_collect_session_directories() - adding \
                             deletion candidate: {}",
-                        directory_name
-                    );
-
-                    // Note that we are holding on to the lock
-                    deletion_candidates.push((
-                        timestamp,
-                        crate_directory.join(directory_name),
-                        Some(lock),
-                    ));
-                }
-                Err(_) => {
-                    debug!(
-                        "garbage_collect_session_directories() - \
+                            directory_name
+                        );
+
+                        // Note that we are holding on to the lock
+                        return Some((
+                            (timestamp, crate_directory.join(directory_name)),
+                            Some(lock),
+                        ));
+                    }
+                    Err(_) => {
+                        debug!(
+                            "garbage_collect_session_directories() - \
                             not collecting, still in use"
-                    );
+                        );
+                    }
                 }
-            }
-        } else if is_old_enough_to_be_collected(timestamp) {
-            // When cleaning out "-working" session directories, i.e.
-            // session directories that might still be in use by another
-            // compiler instance, we only look a directories that are
-            // at least ten seconds old. This is supposed to reduce the
-            // chance of deleting a directory in the time window where
-            // the process has allocated the directory but has not yet
-            // acquired the file-lock on it.
-
-            // Try to acquire the directory lock. If we can't, it
-            // means that the owning process is still alive and we
-            // leave this directory alone.
-            let lock_file_path = crate_directory.join(lock_file_name);
-            match flock::Lock::new(
-                &lock_file_path,
-                false, // don't wait
-                false, // don't create the lock-file
-                true,
-            ) {
-                // get an exclusive lock
-                Ok(lock) => {
-                    debug!(
-                        "garbage_collect_session_directories() - \
+            } else if is_old_enough_to_be_collected(timestamp) {
+                // When cleaning out "-working" session directories, i.e.
+                // session directories that might still be in use by another
+                // compiler instance, we only look a directories that are
+                // at least ten seconds old. This is supposed to reduce the
+                // chance of deleting a directory in the time window where
+                // the process has allocated the directory but has not yet
+                // acquired the file-lock on it.
+
+                // Try to acquire the directory lock. If we can't, it
+                // means that the owning process is still alive and we
+                // leave this directory alone.
+                let lock_file_path = crate_directory.join(lock_file_name);
+                match flock::Lock::new(
+                    &lock_file_path,
+                    false, // don't wait
+                    false, // don't create the lock-file
+                    true,
+                ) {
+                    // get an exclusive lock
+                    Ok(lock) => {
+                        debug!(
+                            "garbage_collect_session_directories() - \
                             successfully acquired lock"
-                    );
+                        );
 
-                    delete_old(sess, &crate_directory.join(directory_name));
+                        delete_old(sess, &crate_directory.join(directory_name));
 
-                    // Let's make it explicit that the file lock is released at this point,
-                    // or rather, that we held on to it until here
-                    drop(lock);
-                }
-                Err(_) => {
-                    debug!(
-                        "garbage_collect_session_directories() - \
+                        // Let's make it explicit that the file lock is released at this point,
+                        // or rather, that we held on to it until here
+                        drop(lock);
+                    }
+                    Err(_) => {
+                        debug!(
+                            "garbage_collect_session_directories() - \
                             not collecting, still in use"
-                    );
+                        );
+                    }
                 }
-            }
-        } else {
-            debug!(
-                "garbage_collect_session_directories() - not finalized, not \
+            } else {
+                debug!(
+                    "garbage_collect_session_directories() - not finalized, not \
                     old enough"
-            );
-        }
-    }
+                );
+            }
+            None
+        });
+    let deletion_candidates = deletion_candidates.into();
 
     // Delete all but the most recent of the candidates
-    for (path, lock) in all_except_most_recent(deletion_candidates) {
+    all_except_most_recent(deletion_candidates).into_items().all(|(path, lock)| {
         debug!("garbage_collect_session_directories() - deleting `{}`", path.display());
 
         if let Err(err) = safe_remove_dir_all(&path) {
@@ -829,7 +833,8 @@ pub fn garbage_collect_session_directories(sess: &Session) -> io::Result<()> {
         // Let's make it explicit that the file lock is released at this point,
         // or rather, that we held on to it until here
         drop(lock);
-    }
+        true
+    });
 
     Ok(())
 }
@@ -845,18 +850,19 @@ fn delete_old(sess: &Session, path: &Path) {
 }
 
 fn all_except_most_recent(
-    deletion_candidates: Vec<(SystemTime, PathBuf, Option<flock::Lock>)>,
-) -> FxHashMap<PathBuf, Option<flock::Lock>> {
-    let most_recent = deletion_candidates.iter().map(|&(timestamp, ..)| timestamp).max();
+    deletion_candidates: UnordMap<(SystemTime, PathBuf), Option<flock::Lock>>,
+) -> UnordMap<PathBuf, Option<flock::Lock>> {
+    let most_recent = deletion_candidates.items().map(|(&(timestamp, _), _)| timestamp).max();
 
     if let Some(most_recent) = most_recent {
-        deletion_candidates
-            .into_iter()
-            .filter(|&(timestamp, ..)| timestamp != most_recent)
-            .map(|(_, path, lock)| (path, lock))
-            .collect()
+        UnordMap::from(
+            deletion_candidates
+                .into_items()
+                .filter(|&((timestamp, _), _)| timestamp != most_recent)
+                .map(|((_, path), lock)| (path, lock)),
+        )
     } else {
-        FxHashMap::default()
+        UnordMap::default()
     }
 }
 
diff --git a/compiler/rustc_incremental/src/persist/fs/tests.rs b/compiler/rustc_incremental/src/persist/fs/tests.rs
index 184796948b6..644b8187621 100644
--- a/compiler/rustc_incremental/src/persist/fs/tests.rs
+++ b/compiler/rustc_incremental/src/persist/fs/tests.rs
@@ -2,26 +2,19 @@ use super::*;
 
 #[test]
 fn test_all_except_most_recent() {
+    let input: UnordMap<_, Option<flock::Lock>> = UnordMap::from_iter([
+        ((UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4")), None),
+        ((UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1")), None),
+        ((UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5")), None),
+        ((UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3")), None),
+        ((UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2")), None),
+    ]);
     assert_eq!(
-        all_except_most_recent(vec![
-            (UNIX_EPOCH + Duration::new(4, 0), PathBuf::from("4"), None),
-            (UNIX_EPOCH + Duration::new(1, 0), PathBuf::from("1"), None),
-            (UNIX_EPOCH + Duration::new(5, 0), PathBuf::from("5"), None),
-            (UNIX_EPOCH + Duration::new(3, 0), PathBuf::from("3"), None),
-            (UNIX_EPOCH + Duration::new(2, 0), PathBuf::from("2"), None),
-        ])
-        .keys()
-        .cloned()
-        .collect::<FxHashSet<PathBuf>>(),
-        [PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4"),]
-            .into_iter()
-            .collect::<FxHashSet<PathBuf>>()
+        all_except_most_recent(input).into_items().map(|(path, _)| path).into_sorted_stable_ord(),
+        vec![PathBuf::from("1"), PathBuf::from("2"), PathBuf::from("3"), PathBuf::from("4")]
     );
 
-    assert_eq!(
-        all_except_most_recent(vec![]).keys().cloned().collect::<FxHashSet<PathBuf>>(),
-        FxHashSet::default()
-    );
+    assert!(all_except_most_recent(UnordMap::default()).is_empty());
 }
 
 #[test]
diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs
index a4407a93ff3..bb479b5bdcc 100644
--- a/compiler/rustc_incremental/src/persist/load.rs
+++ b/compiler/rustc_incremental/src/persist/load.rs
@@ -1,8 +1,8 @@
 //! Code to save/load the dep-graph from files.
 
 use crate::errors;
-use rustc_data_structures::fx::FxHashMap;
 use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::unord::UnordMap;
 use rustc_middle::dep_graph::{SerializedDepGraph, WorkProduct, WorkProductId};
 use rustc_middle::query::on_disk_cache::OnDiskCache;
 use rustc_serialize::opaque::MemDecoder;
@@ -16,7 +16,7 @@ use super::file_format;
 use super::fs::*;
 use super::work_product;
 
-type WorkProductMap = FxHashMap<WorkProductId, WorkProduct>;
+type WorkProductMap = UnordMap<WorkProductId, WorkProduct>;
 
 #[derive(Debug)]
 /// Represents the result of an attempt to load incremental compilation data.
@@ -147,7 +147,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
     let report_incremental_info = sess.opts.unstable_opts.incremental_info;
     let expected_hash = sess.opts.dep_tracking_hash(false);
 
-    let mut prev_work_products = FxHashMap::default();
+    let mut prev_work_products = UnordMap::default();
 
     // If we are only building with -Zquery-dep-graph but without an actual
     // incr. comp. session directory, we skip this. Otherwise we'd fail
@@ -163,7 +163,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture {
                 Decodable::decode(&mut work_product_decoder);
 
             for swp in work_products {
-                let all_files_exist = swp.work_product.saved_files.iter().all(|(_, path)| {
+                let all_files_exist = swp.work_product.saved_files.items().all(|(_, path)| {
                     let exists = in_incr_comp_dir_sess(sess, path).exists();
                     if !exists && sess.opts.unstable_opts.incremental_info {
                         eprintln!("incremental: could not find file for work product: {path}",);
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs
index 7376be6be8b..bfaa52f9c81 100644
--- a/compiler/rustc_incremental/src/persist/save.rs
+++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -1,5 +1,5 @@
 use crate::errors;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::sync::join;
 use rustc_middle::dep_graph::{DepGraph, SerializedDepGraph, WorkProduct, WorkProductId};
 use rustc_middle::ty::TyCtxt;
@@ -79,7 +79,7 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) {
 pub fn save_work_product_index(
     sess: &Session,
     dep_graph: &DepGraph,
-    new_work_products: FxHashMap<WorkProductId, WorkProduct>,
+    new_work_products: FxIndexMap<WorkProductId, WorkProduct>,
 ) {
     if sess.opts.incremental.is_none() {
         return;
@@ -105,7 +105,7 @@ pub fn save_work_product_index(
         if !new_work_products.contains_key(id) {
             work_product::delete_workproduct_files(sess, wp);
             debug_assert!(
-                !wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
+                !wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
             );
         }
     }
@@ -113,13 +113,13 @@ pub fn save_work_product_index(
     // Check that we did not delete one of the current work-products:
     debug_assert!({
         new_work_products.iter().all(|(_, wp)| {
-            wp.saved_files.iter().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
+            wp.saved_files.items().all(|(_, path)| in_incr_comp_dir_sess(sess, path).exists())
         })
     });
 }
 
 fn encode_work_product_index(
-    work_products: &FxHashMap<WorkProductId, WorkProduct>,
+    work_products: &FxIndexMap<WorkProductId, WorkProduct>,
     encoder: &mut FileEncoder,
 ) {
     let serialized_products: Vec<_> = work_products
@@ -146,7 +146,7 @@ fn encode_query_cache(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult
 pub fn build_dep_graph(
     sess: &Session,
     prev_graph: SerializedDepGraph,
-    prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
+    prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
 ) -> Option<DepGraph> {
     if sess.opts.incremental.is_none() {
         // No incremental compilation.
diff --git a/compiler/rustc_incremental/src/persist/work_product.rs b/compiler/rustc_incremental/src/persist/work_product.rs
index dc98fbeb0d1..bce5ca1e16b 100644
--- a/compiler/rustc_incremental/src/persist/work_product.rs
+++ b/compiler/rustc_incremental/src/persist/work_product.rs
@@ -4,7 +4,7 @@
 
 use crate::errors;
 use crate::persist::fs::*;
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::unord::UnordMap;
 use rustc_fs_util::link_or_copy;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
 use rustc_session::Session;
@@ -20,7 +20,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
     debug!(?cgu_name, ?files);
     sess.opts.incremental.as_ref()?;
 
-    let mut saved_files = FxHashMap::default();
+    let mut saved_files = UnordMap::default();
     for (ext, path) in files {
         let file_name = format!("{cgu_name}.{ext}");
         let path_in_incr_dir = in_incr_comp_dir_sess(sess, &file_name);
@@ -46,7 +46,7 @@ pub fn copy_cgu_workproduct_to_incr_comp_cache_dir(
 
 /// Removes files for a given work product.
 pub fn delete_workproduct_files(sess: &Session, work_product: &WorkProduct) {
-    for (_, path) in &work_product.saved_files {
+    for (_, path) in work_product.saved_files.items().into_sorted_stable_ord() {
         let path = in_incr_comp_dir_sess(sess, path);
         if let Err(err) = std_fs::remove_file(&path) {
             sess.emit_warning(errors::DeleteWorkProduct { path: &path, err });
diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs
index c441a8ffd6f..455a8129656 100644
--- a/compiler/rustc_interface/src/queries.rs
+++ b/compiler/rustc_interface/src/queries.rs
@@ -5,6 +5,7 @@ use crate::passes;
 use rustc_ast as ast;
 use rustc_codegen_ssa::traits::CodegenBackend;
 use rustc_codegen_ssa::CodegenResults;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_data_structures::steal::Steal;
 use rustc_data_structures::svh::Svh;
 use rustc_data_structures::sync::{AppendOnlyIndexVec, Lrc, OnceCell, RwLock, WorkerLocal};
@@ -193,9 +194,15 @@ impl<'tcx> Queries<'tcx> {
             let future_opt = self.dep_graph_future()?.steal();
             let dep_graph = future_opt
                 .and_then(|future| {
-                    let (prev_graph, prev_work_products) =
+                    let (prev_graph, mut prev_work_products) =
                         sess.time("blocked_on_dep_graph_loading", || future.open().open(sess));
-
+                    // Convert from UnordMap to FxIndexMap by sorting
+                    let prev_work_product_ids =
+                        prev_work_products.items().map(|x| *x.0).into_sorted_stable_ord();
+                    let prev_work_products = prev_work_product_ids
+                        .into_iter()
+                        .map(|x| (x, prev_work_products.remove(&x).unwrap()))
+                        .collect::<FxIndexMap<_, _>>();
                     rustc_incremental::build_dep_graph(sess, prev_graph, prev_work_products)
                 })
                 .unwrap_or_else(DepGraph::new_disabled);
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
index 91caf9db336..0416411dfe1 100644
--- a/compiler/rustc_middle/src/mir/interpret/value.rs
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -97,6 +97,10 @@ impl<'tcx> ConstValue<'tcx> {
         ConstValue::Scalar(Scalar::from_u64(i))
     }
 
+    pub fn from_u128(i: u128) -> Self {
+        ConstValue::Scalar(Scalar::from_u128(i))
+    }
+
     pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
         ConstValue::Scalar(Scalar::from_target_usize(i, cx))
     }
@@ -241,6 +245,11 @@ impl<Prov> Scalar<Prov> {
     }
 
     #[inline]
+    pub fn from_u128(i: u128) -> Self {
+        Scalar::Int(i.into())
+    }
+
+    #[inline]
     pub fn from_target_usize(i: u64, cx: &impl HasDataLayout) -> Self {
         Self::from_uint(i, cx.data_layout().pointer_size)
     }
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index d45e4d595a7..c9f69c37782 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -11,7 +11,7 @@ use crate::ty::{
 use crate::ty::{GenericArgKind, SubstsRef};
 use rustc_apfloat::Float as _;
 use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher};
+use rustc_data_structures::stable_hasher::{Hash128, HashStable, StableHasher};
 use rustc_errors::ErrorGuaranteed;
 use rustc_hir as hir;
 use rustc_hir::def::{CtorOf, DefKind, Res};
@@ -129,7 +129,7 @@ impl IntTypeExt for IntegerType {
 impl<'tcx> TyCtxt<'tcx> {
     /// Creates a hash of the type `Ty` which will be the same no matter what crate
     /// context it's calculated within. This is used by the `type_id` intrinsic.
-    pub fn type_id_hash(self, ty: Ty<'tcx>) -> Hash64 {
+    pub fn type_id_hash(self, ty: Ty<'tcx>) -> Hash128 {
         // We want the type_id be independent of the types free regions, so we
         // erase them. The erase_regions() call will also anonymize bound
         // regions, which is desirable too.
diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
index 9e1ca6ab515..39a4cb1b179 100644
--- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs
@@ -46,7 +46,7 @@ use super::{DepContext, DepKind, FingerprintStyle};
 use crate::ich::StableHashingContext;
 
 use rustc_data_structures::fingerprint::{Fingerprint, PackedFingerprint};
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, StableOrd, ToStableHashKey};
 use rustc_hir::definitions::DefPathHash;
 use std::fmt;
 use std::hash::Hash;
@@ -247,3 +247,14 @@ impl<HCX> HashStable<HCX> for WorkProductId {
         self.hash.hash_stable(hcx, hasher)
     }
 }
+impl<HCX> ToStableHashKey<HCX> for WorkProductId {
+    type KeyType = Fingerprint;
+    #[inline]
+    fn to_stable_hash_key(&self, _: &HCX) -> Self::KeyType {
+        self.hash
+    }
+}
+unsafe impl StableOrd for WorkProductId {
+    // Fingerprint can use unstable (just a tuple of `u64`s), so WorkProductId can as well
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index c0d7386dd6a..c9e80a6d9bc 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -1,11 +1,12 @@
 use parking_lot::Mutex;
 use rustc_data_structures::fingerprint::Fingerprint;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap};
 use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef};
 use rustc_data_structures::sharded::{self, Sharded};
 use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_data_structures::steal::Steal;
 use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
+use rustc_data_structures::unord::UnordMap;
 use rustc_index::IndexVec;
 use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
 use smallvec::{smallvec, SmallVec};
@@ -93,7 +94,7 @@ pub struct DepGraphData<K: DepKind> {
     /// things available to us. If we find that they are not dirty, we
     /// load the path to the file storing those work-products here into
     /// this map. We can later look for and extract that data.
-    previous_work_products: FxHashMap<WorkProductId, WorkProduct>,
+    previous_work_products: FxIndexMap<WorkProductId, WorkProduct>,
 
     dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>,
 
@@ -116,7 +117,7 @@ impl<K: DepKind> DepGraph<K> {
     pub fn new(
         profiler: &SelfProfilerRef,
         prev_graph: SerializedDepGraph<K>,
-        prev_work_products: FxHashMap<WorkProductId, WorkProduct>,
+        prev_work_products: FxIndexMap<WorkProductId, WorkProduct>,
         encoder: FileEncoder,
         record_graph: bool,
         record_stats: bool,
@@ -688,7 +689,7 @@ impl<K: DepKind> DepGraph<K> {
 
     /// Access the map of work-products created during the cached run. Only
     /// used during saving of the dep-graph.
-    pub fn previous_work_products(&self) -> &FxHashMap<WorkProductId, WorkProduct> {
+    pub fn previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct> {
         &self.data.as_ref().unwrap().previous_work_products
     }
 
@@ -1048,7 +1049,7 @@ pub struct WorkProduct {
     ///
     /// By convention, file extensions are currently used as identifiers, i.e. the key "o" maps to
     /// the object file's path, and "dwo" to the dwarf object file's path.
-    pub saved_files: FxHashMap<String, String>,
+    pub saved_files: UnordMap<String, String>,
 }
 
 // Index type for `DepNodeData`'s edges.
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 730e4c8d30d..b2bc33c7e0d 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -69,6 +69,8 @@ where
         make_query: fn(Qcx, K) -> QueryStackFrame<D>,
         jobs: &mut QueryMap<D>,
     ) -> Option<()> {
+        let mut active = Vec::new();
+
         #[cfg(parallel_compiler)]
         {
             // We use try_lock_shards here since we are called from the
@@ -77,8 +79,7 @@ where
             for shard in shards.iter() {
                 for (k, v) in shard.iter() {
                     if let QueryResult::Started(ref job) = *v {
-                        let query = make_query(qcx, *k);
-                        jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+                        active.push((*k, job.clone()));
                     }
                 }
             }
@@ -91,12 +92,18 @@ where
             // really hurt much.)
             for (k, v) in self.active.try_lock()?.iter() {
                 if let QueryResult::Started(ref job) = *v {
-                    let query = make_query(qcx, *k);
-                    jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+                    active.push((*k, job.clone()));
                 }
             }
         }
 
+        // Call `make_query` while we're not holding a `self.active` lock as `make_query` may call
+        // queries leading to a deadlock.
+        for (key, job) in active {
+            let query = make_query(qcx, key);
+            jobs.insert(job.id, QueryJobInfo { query, job });
+        }
+
         Some(())
     }
 }
diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs
index 24291301a32..21491afa942 100644
--- a/compiler/rustc_session/src/config.rs
+++ b/compiler/rustc_session/src/config.rs
@@ -311,7 +311,9 @@ pub enum OutputType {
 }
 
 // Safety: Trivial C-Style enums have a stable sort order across compilation sessions.
-unsafe impl StableOrd for OutputType {}
+unsafe impl StableOrd for OutputType {
+    const CAN_USE_UNSTABLE_SORT: bool = true;
+}
 
 impl<HCX: HashStableContext> ToStableHashKey<HCX> for OutputType {
     type KeyType = Self;
diff --git a/library/core/src/any.rs b/library/core/src/any.rs
index 7969f4055dd..09f52d692d0 100644
--- a/library/core/src/any.rs
+++ b/library/core/src/any.rs
@@ -153,6 +153,7 @@
 #![stable(feature = "rust1", since = "1.0.0")]
 
 use crate::fmt;
+use crate::hash;
 use crate::intrinsics;
 
 ///////////////////////////////////////////////////////////////////////////////
@@ -662,10 +663,10 @@ impl dyn Any + Send + Sync {
 /// While `TypeId` implements `Hash`, `PartialOrd`, and `Ord`, it is worth
 /// noting that the hashes and ordering will vary between Rust releases. Beware
 /// of relying on them inside of your code!
-#[derive(Clone, Copy, Debug, Hash, Eq, PartialOrd, Ord)]
+#[derive(Clone, Copy, Debug, Eq, PartialOrd, Ord)]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub struct TypeId {
-    t: u64,
+    t: u128,
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -696,7 +697,31 @@ impl TypeId {
     #[stable(feature = "rust1", since = "1.0.0")]
     #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
     pub const fn of<T: ?Sized + 'static>() -> TypeId {
-        TypeId { t: intrinsics::type_id::<T>() }
+        #[cfg(bootstrap)]
+        let t = intrinsics::type_id::<T>() as u128;
+        #[cfg(not(bootstrap))]
+        let t: u128 = intrinsics::type_id::<T>();
+        TypeId { t }
+    }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for TypeId {
+    #[inline]
+    fn hash<H: hash::Hasher>(&self, state: &mut H) {
+        // We only hash the lower 64 bits of our (128 bit) internal numeric ID,
+        // because:
+        // - The hashing algorithm which backs `TypeId` is expected to be
+        //   unbiased and high quality, meaning further mixing would be somewhat
+        //   redundant compared to choosing (the lower) 64 bits arbitrarily.
+        // - `Hasher::finish` returns a u64 anyway, so the extra entropy we'd
+        //   get from hashing the full value would probably not be useful
+        //   (especially given the previous point about the lower 64 bits being
+        //   high quality on their own).
+        // - It is correct to do so -- only hashing a subset of `self` is still
+        //   with an `Eq` implementation that considers the entire value, as
+        //   ours does.
+        (self.t as u64).hash(state);
     }
 }
 
diff --git a/library/core/src/intrinsics.rs b/library/core/src/intrinsics.rs
index 6dca1fe1e69..9b8612485ac 100644
--- a/library/core/src/intrinsics.rs
+++ b/library/core/src/intrinsics.rs
@@ -1057,8 +1057,25 @@ extern "rust-intrinsic" {
     #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
     #[rustc_safe_intrinsic]
     #[rustc_nounwind]
+    #[cfg(bootstrap)]
     pub fn type_id<T: ?Sized + 'static>() -> u64;
 
+    /// Gets an identifier which is globally unique to the specified type. This
+    /// function will return the same value for a type regardless of whichever
+    /// crate it is invoked in.
+    ///
+    /// Note that, unlike most intrinsics, this is safe to call;
+    /// it does not require an `unsafe` block.
+    /// Therefore, implementations must not require the user to uphold
+    /// any safety invariants.
+    ///
+    /// The stabilized version of this intrinsic is [`core::any::TypeId::of`].
+    #[rustc_const_unstable(feature = "const_type_id", issue = "77125")]
+    #[rustc_safe_intrinsic]
+    #[rustc_nounwind]
+    #[cfg(not(bootstrap))]
+    pub fn type_id<T: ?Sized + 'static>() -> u128;
+
     /// A guard for unsafe functions that cannot ever be executed if `T` is uninhabited:
     /// This will statically either panic, or do nothing.
     ///
diff --git a/library/std/src/sys/common/thread_local/fast_local.rs b/library/std/src/sys/common/thread_local/fast_local.rs
index 447044a798b..bc5da1a1896 100644
--- a/library/std/src/sys/common/thread_local/fast_local.rs
+++ b/library/std/src/sys/common/thread_local/fast_local.rs
@@ -33,20 +33,21 @@ pub macro thread_local_inner {
             // 1 == dtor registered, dtor not run
             // 2 == dtor registered and is running or has run
             #[thread_local]
-            static mut STATE: $crate::primitive::u8 = 0;
+            static STATE: $crate::cell::Cell<$crate::primitive::u8> = $crate::cell::Cell::new(0);
 
+            // Safety: Performs `drop_in_place(ptr as *mut $t)`, and requires
+            // all that comes with it.
             unsafe extern "C" fn destroy(ptr: *mut $crate::primitive::u8) {
-                let ptr = ptr as *mut $t;
-
-                unsafe {
-                    $crate::debug_assert_eq!(STATE, 1);
-                    STATE = 2;
-                    $crate::ptr::drop_in_place(ptr);
-                }
+                $crate::thread::local_impl::abort_on_dtor_unwind(|| {
+                    let old_state = STATE.replace(2);
+                    $crate::debug_assert_eq!(old_state, 1);
+                    // Safety: safety requirement is passed on to caller.
+                    unsafe { $crate::ptr::drop_in_place(ptr.cast::<$t>()); }
+                });
             }
 
             unsafe {
-                match STATE {
+                match STATE.get() {
                     // 0 == we haven't registered a destructor, so do
                     //   so now.
                     0 => {
@@ -54,7 +55,7 @@ pub macro thread_local_inner {
                             $crate::ptr::addr_of_mut!(VAL) as *mut $crate::primitive::u8,
                             destroy,
                         );
-                        STATE = 1;
+                        STATE.set(1);
                         $crate::option::Option::Some(&VAL)
                     }
                     // 1 == the destructor is registered and the value
@@ -148,7 +149,6 @@ impl<T> fmt::Debug for Key<T> {
         f.debug_struct("Key").finish_non_exhaustive()
     }
 }
-
 impl<T> Key<T> {
     pub const fn new() -> Key<T> {
         Key { inner: LazyKeyInner::new(), dtor_state: Cell::new(DtorState::Unregistered) }
diff --git a/library/std/src/sys/common/thread_local/mod.rs b/library/std/src/sys/common/thread_local/mod.rs
index 77f64588310..975509bd412 100644
--- a/library/std/src/sys/common/thread_local/mod.rs
+++ b/library/std/src/sys/common/thread_local/mod.rs
@@ -101,3 +101,24 @@ mod lazy {
         }
     }
 }
+
+/// Run a callback in a scenario which must not unwind (such as a `extern "C"
+/// fn` declared in a user crate). If the callback unwinds anyway, then
+/// `rtabort` with a message about thread local panicking on drop.
+#[inline]
+pub fn abort_on_dtor_unwind(f: impl FnOnce()) {
+    // Using a guard like this is lower cost.
+    let guard = DtorUnwindGuard;
+    f();
+    core::mem::forget(guard);
+
+    struct DtorUnwindGuard;
+    impl Drop for DtorUnwindGuard {
+        #[inline]
+        fn drop(&mut self) {
+            // This is not terribly descriptive, but it doesn't need to be as we'll
+            // already have printed a panic message at this point.
+            rtabort!("thread local panicked on drop");
+        }
+    }
+}
diff --git a/library/std/src/thread/mod.rs b/library/std/src/thread/mod.rs
index f712c872708..d9973185bc4 100644
--- a/library/std/src/thread/mod.rs
+++ b/library/std/src/thread/mod.rs
@@ -206,7 +206,7 @@ cfg_if::cfg_if! {
         #[doc(hidden)]
         #[unstable(feature = "thread_local_internals", issue = "none")]
         pub mod local_impl {
-            pub use crate::sys::common::thread_local::{thread_local_inner, Key};
+            pub use crate::sys::common::thread_local::{thread_local_inner, Key, abort_on_dtor_unwind};
         }
     }
 }
diff --git a/src/bootstrap/compile.rs b/src/bootstrap/compile.rs
index a9e15d89246..72d791b5434 100644
--- a/src/bootstrap/compile.rs
+++ b/src/bootstrap/compile.rs
@@ -118,6 +118,10 @@ impl Step for Std {
             || builder.config.keep_stage_std.contains(&compiler.stage)
         {
             builder.info("Warning: Using a potentially old libstd. This may not behave well.");
+
+            copy_third_party_objects(builder, &compiler, target);
+            copy_self_contained_objects(builder, &compiler, target);
+
             builder.ensure(StdLink::from_std(self, compiler));
             return;
         }
diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs
index 0f0a3bb8775..962cbf758d4 100644
--- a/src/bootstrap/tool.rs
+++ b/src/bootstrap/tool.rs
@@ -711,7 +711,7 @@ impl Step for RustAnalyzerProcMacroSrv {
             tool: "rust-analyzer-proc-macro-srv",
             mode: Mode::ToolStd,
             path: "src/tools/rust-analyzer/crates/proc-macro-srv-cli",
-            extra_features: vec!["proc-macro-srv/sysroot-abi".to_owned()],
+            extra_features: vec!["sysroot-abi".to_owned()],
             is_optional_tool: false,
             source_type: SourceType::InTree,
             allow_features: RustAnalyzer::ALLOW_FEATURES,
diff --git a/src/ci/stage-build.py b/src/ci/stage-build.py
index 91bd137085e..febc0492b94 100644
--- a/src/ci/stage-build.py
+++ b/src/ci/stage-build.py
@@ -620,11 +620,17 @@ def get_files(directory: Path, filter: Optional[Callable[[Path], bool]] = None)
             yield path
 
 
-def build_rustc(
+def bootstrap_build(
         pipeline: Pipeline,
         args: List[str],
-        env: Optional[Dict[str, str]] = None
+        env: Optional[Dict[str, str]] = None,
+        targets: Iterable[str] = ("library/std", )
 ):
+    if env is None:
+        env = {}
+    else:
+        env = dict(env)
+    env["RUST_BACKTRACE"] = "1"
     arguments = [
                     sys.executable,
                     pipeline.checkout_path() / "x.py",
@@ -632,8 +638,7 @@ def build_rustc(
                     "--target", PGO_HOST,
                     "--host", PGO_HOST,
                     "--stage", "2",
-                    "library/std"
-                ] + args
+                    ] + list(targets) + args
     cmd(arguments, env=env)
 
 
@@ -776,18 +781,18 @@ def record_metrics(pipeline: Pipeline, timer: Timer):
     if metrics is None:
         return
     llvm_steps = tuple(metrics.find_all_by_type("bootstrap::llvm::Llvm"))
-    assert len(llvm_steps) > 0
     llvm_duration = sum(step.duration for step in llvm_steps)
 
     rustc_steps = tuple(metrics.find_all_by_type("bootstrap::compile::Rustc"))
-    assert len(rustc_steps) > 0
     rustc_duration = sum(step.duration for step in rustc_steps)
 
     # The LLVM step is part of the Rustc step
-    rustc_duration -= llvm_duration
+    rustc_duration = max(0, rustc_duration - llvm_duration)
 
-    timer.add_duration("LLVM", llvm_duration)
-    timer.add_duration("Rustc", rustc_duration)
+    if llvm_duration > 0:
+        timer.add_duration("LLVM", llvm_duration)
+    if rustc_duration > 0:
+        timer.add_duration("Rustc", rustc_duration)
 
     log_metrics(metrics)
 
@@ -872,79 +877,114 @@ download-ci-llvm = true
     ))
 
 
-def execute_build_pipeline(timer: Timer, pipeline: Pipeline, runner: BenchmarkRunner, final_build_args: List[str]):
+def execute_build_pipeline(timer: Timer, pipeline: Pipeline, runner: BenchmarkRunner, dist_build_args: List[str]):
     # Clear and prepare tmp directory
     shutil.rmtree(pipeline.opt_artifacts(), ignore_errors=True)
     os.makedirs(pipeline.opt_artifacts(), exist_ok=True)
 
     pipeline.build_rustc_perf()
 
-    # Stage 1: Build rustc + PGO instrumented LLVM
-    with timer.section("Stage 1 (LLVM PGO)") as stage1:
-        with stage1.section("Build rustc and LLVM") as rustc_build:
-            build_rustc(pipeline, args=[
-                "--llvm-profile-generate"
-            ], env=dict(
-                LLVM_PROFILE_DIR=str(pipeline.llvm_profile_dir_root() / "prof-%p")
-            ))
-            record_metrics(pipeline, rustc_build)
+    """
+    Stage 1: Build PGO instrumented rustc
+
+    We use a normal build of LLVM, because gathering PGO profiles for LLVM and `rustc` at the same time
+    can cause issues.
+    """
+    with timer.section("Stage 1 (rustc PGO)") as stage1:
+        with stage1.section("Build PGO instrumented rustc and LLVM") as rustc_pgo_instrument:
+            bootstrap_build(pipeline, args=[
+                "--rust-profile-generate",
+                pipeline.rustc_profile_dir_root()
+            ])
+            record_metrics(pipeline, rustc_pgo_instrument)
 
         with stage1.section("Gather profiles"):
-            gather_llvm_profiles(pipeline, runner)
+            gather_rustc_profiles(pipeline, runner)
         print_free_disk_space(pipeline)
 
-    clear_llvm_files(pipeline)
-    final_build_args += [
-        "--llvm-profile-use",
-        pipeline.llvm_profile_merged_file()
-    ]
-
-    # Stage 2: Build PGO instrumented rustc + LLVM
-    with timer.section("Stage 2 (rustc PGO)") as stage2:
-        with stage2.section("Build rustc and LLVM") as rustc_build:
-            build_rustc(pipeline, args=[
-                "--rust-profile-generate",
-                pipeline.rustc_profile_dir_root()
+        with stage1.section("Build PGO optimized rustc") as rustc_pgo_use:
+            bootstrap_build(pipeline, args=[
+                "--rust-profile-use",
+                pipeline.rustc_profile_merged_file()
             ])
-            record_metrics(pipeline, rustc_build)
+            record_metrics(pipeline, rustc_pgo_use)
+        dist_build_args += [
+            "--rust-profile-use",
+            pipeline.rustc_profile_merged_file()
+        ]
+
+    """
+    Stage 2: Gather LLVM PGO profiles
+    """
+    with timer.section("Stage 2 (LLVM PGO)") as stage2:
+        # Clear normal LLVM artifacts
+        clear_llvm_files(pipeline)
+
+        with stage2.section("Build PGO instrumented LLVM") as llvm_pgo_instrument:
+            bootstrap_build(pipeline, args=[
+                "--llvm-profile-generate",
+                # We want to keep the already built PGO-optimized `rustc`.
+                "--keep-stage", "0",
+                "--keep-stage", "1"
+            ], env=dict(
+                LLVM_PROFILE_DIR=str(pipeline.llvm_profile_dir_root() / "prof-%p")
+            ))
+            record_metrics(pipeline, llvm_pgo_instrument)
 
         with stage2.section("Gather profiles"):
-            gather_rustc_profiles(pipeline, runner)
+            gather_llvm_profiles(pipeline, runner)
+
+        dist_build_args += [
+            "--llvm-profile-use",
+            pipeline.llvm_profile_merged_file(),
+        ]
         print_free_disk_space(pipeline)
 
-    clear_llvm_files(pipeline)
-    final_build_args += [
-        "--rust-profile-use",
-        pipeline.rustc_profile_merged_file()
-    ]
+        # Clear PGO-instrumented LLVM artifacts
+        clear_llvm_files(pipeline)
 
-    # Stage 3: Build rustc + BOLT instrumented LLVM
+    """
+    Stage 3: Build BOLT instrumented LLVM
+
+    We build a PGO optimized LLVM in this step, then instrument it with BOLT and gather BOLT profiles.
+    Note that we don't remove LLVM artifacts after this step, so that they are reused in the final dist build.
+    BOLT instrumentation is performed "on-the-fly" when the LLVM library is copied to the sysroot of rustc,
+    therefore the LLVM artifacts on disk are not "tainted" with BOLT instrumentation and they can be reused.
+    """
     if pipeline.supports_bolt():
         with timer.section("Stage 3 (LLVM BOLT)") as stage3:
-            with stage3.section("Build rustc and LLVM") as rustc_build:
-                build_rustc(pipeline, args=[
+            with stage3.section("Build BOLT instrumented LLVM") as llvm_bolt_instrument:
+                bootstrap_build(pipeline, args=[
                     "--llvm-profile-use",
                     pipeline.llvm_profile_merged_file(),
                     "--llvm-bolt-profile-generate",
-                    "--rust-profile-use",
-                    pipeline.rustc_profile_merged_file()
+                    # We want to keep the already built PGO-optimized `rustc`.
+                    "--keep-stage", "0",
+                    "--keep-stage", "1"
                 ])
-                record_metrics(pipeline, rustc_build)
+                record_metrics(pipeline, llvm_bolt_instrument)
 
             with stage3.section("Gather profiles"):
                 gather_llvm_bolt_profiles(pipeline, runner)
 
-        # LLVM is not being cleared here, we want to reuse the previous build
-        print_free_disk_space(pipeline)
-        final_build_args += [
-            "--llvm-bolt-profile-use",
-            pipeline.llvm_bolt_profile_merged_file()
-        ]
+            dist_build_args += [
+                "--llvm-bolt-profile-use",
+                pipeline.llvm_bolt_profile_merged_file()
+            ]
+            print_free_disk_space(pipeline)
 
-    # Stage 4: Build PGO optimized rustc + PGO/BOLT optimized LLVM
-    with timer.section("Stage 4 (final build)") as stage4:
-        cmd(final_build_args)
-        record_metrics(pipeline, stage4)
+    # We want to keep the already built PGO-optimized `rustc`.
+    dist_build_args += [
+        "--keep-stage", "0",
+        "--keep-stage", "1"
+    ]
+
+    """
+    Final stage: Build PGO optimized rustc + PGO/BOLT optimized LLVM
+    """
+    with timer.section("Final stage (dist build)") as final_stage:
+        cmd(dist_build_args)
+        record_metrics(pipeline, final_stage)
 
     # Try builds can be in various broken states, so we don't want to gatekeep them with tests
     if not is_try_build():
diff --git a/src/tools/clippy/clippy_lints/src/wildcard_imports.rs b/src/tools/clippy/clippy_lints/src/wildcard_imports.rs
index b6e4cd22789..2a3d86988bb 100644
--- a/src/tools/clippy/clippy_lints/src/wildcard_imports.rs
+++ b/src/tools/clippy/clippy_lints/src/wildcard_imports.rs
@@ -160,7 +160,7 @@ impl LateLintPass<'_> for WildcardImports {
                     )
                 };
 
-                let mut imports = used_imports.items().map(ToString::to_string).into_sorted_stable_ord(false);
+                let mut imports = used_imports.items().map(ToString::to_string).into_sorted_stable_ord();
                 let imports_string = if imports.len() == 1 {
                     imports.pop().unwrap()
                 } else if braced_glob {
diff --git a/tests/run-make-fulldeps/hotplug_codegen_backend/the_backend.rs b/tests/run-make-fulldeps/hotplug_codegen_backend/the_backend.rs
index 7db100a08a1..8a275751e38 100644
--- a/tests/run-make-fulldeps/hotplug_codegen_backend/the_backend.rs
+++ b/tests/run-make-fulldeps/hotplug_codegen_backend/the_backend.rs
@@ -15,7 +15,7 @@ extern crate rustc_target;
 
 use rustc_codegen_ssa::traits::CodegenBackend;
 use rustc_codegen_ssa::{CodegenResults, CrateInfo};
-use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::fx::FxIndexMap;
 use rustc_errors::ErrorGuaranteed;
 use rustc_metadata::EncodedMetadata;
 use rustc_middle::dep_graph::{WorkProduct, WorkProductId};
@@ -49,11 +49,11 @@ impl CodegenBackend for TheBackend {
         ongoing_codegen: Box<dyn Any>,
         _sess: &Session,
         _outputs: &OutputFilenames,
-    ) -> Result<(CodegenResults, FxHashMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
+    ) -> Result<(CodegenResults, FxIndexMap<WorkProductId, WorkProduct>), ErrorGuaranteed> {
         let codegen_results = ongoing_codegen
             .downcast::<CodegenResults>()
             .expect("in join_codegen: ongoing_codegen is not a CodegenResults");
-        Ok((*codegen_results, FxHashMap::default()))
+        Ok((*codegen_results, FxIndexMap::default()))
     }
 
     fn link(