diff options
Diffstat (limited to 'compiler')
32 files changed, 740 insertions, 987 deletions
diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs index 2f73e44faf6..3724438cc6e 100644 --- a/compiler/rustc_ast_passes/src/feature_gate.rs +++ b/compiler/rustc_ast_passes/src/feature_gate.rs @@ -313,7 +313,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { include => external_doc cfg => doc_cfg masked => doc_masked - spotlight => doc_spotlight + notable_trait => doc_notable_trait keyword => doc_keyword ); } diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs index ce9f711b27e..f006351647e 100644 --- a/compiler/rustc_feature/src/accepted.rs +++ b/compiler/rustc_feature/src/accepted.rs @@ -276,7 +276,7 @@ declare_features! ( /// The smallest useful subset of `const_generics`. (accepted, min_const_generics, "1.51.0", Some(74878), None), /// The `unsafe_op_in_unsafe_fn` lint (allowed by default): no longer treat an unsafe function as an unsafe block. - (accepted, unsafe_block_in_unsafe_fn, "1.51.0", Some(71668), None), + (accepted, unsafe_block_in_unsafe_fn, "1.52.0", Some(71668), None), /// Allows the use of or-patterns (e.g., `0 | 1`). (accepted, or_patterns, "1.53.0", Some(54883), None), diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs index fb08a7b8cea..040260f5cf5 100644 --- a/compiler/rustc_feature/src/active.rs +++ b/compiler/rustc_feature/src/active.rs @@ -216,6 +216,10 @@ declare_features! ( /// Renamed from `optin_builtin_traits`. (active, auto_traits, "1.50.0", Some(13231), None), + /// Allows `#[doc(notable_trait)]`. + /// Renamed from `doc_spotlight`. + (active, doc_notable_trait, "1.52.0", Some(45040), None), + // no-tracking-issue-end // ------------------------------------------------------------------------- @@ -374,9 +378,6 @@ declare_features! ( /// Allows `#[doc(masked)]`. (active, doc_masked, "1.21.0", Some(44027), None), - /// Allows `#[doc(spotlight)]`. - (active, doc_spotlight, "1.22.0", Some(45040), None), - /// Allows `#[doc(include = "some-file")]`. (active, external_doc, "1.22.0", Some(44732), None), diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs index aff66053c93..8ba67a00f8d 100644 --- a/compiler/rustc_feature/src/removed.rs +++ b/compiler/rustc_feature/src/removed.rs @@ -80,6 +80,11 @@ declare_features! ( Some("subsumed by `#![feature(allocator_internals)]`")), /// Allows identifying crates that contain sanitizer runtimes. (removed, sanitizer_runtime, "1.17.0", None, None, None), + /// Allows `#[doc(spotlight)]`. + /// The attribute was renamed to `#[doc(notable_trait)]` + /// and the feature to `doc_notable_trait`. + (removed, doc_spotlight, "1.22.0", Some(45040), None, + Some("renamed to `doc_notable_trait`")), (removed, proc_macro_mod, "1.27.0", Some(54727), None, Some("subsumed by `#![feature(proc_macro_hygiene)]`")), (removed, proc_macro_expr, "1.27.0", Some(54727), None, diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs index a080b0ce339..b5680beae14 100644 --- a/compiler/rustc_incremental/src/assert_dep_graph.rs +++ b/compiler/rustc_incremental/src/assert_dep_graph.rs @@ -40,8 +40,9 @@ use rustc_graphviz as dot; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; -use rustc_middle::dep_graph::debug::{DepNodeFilter, EdgeFilter}; -use rustc_middle::dep_graph::{DepGraphQuery, DepKind, DepNode, DepNodeExt}; +use rustc_middle::dep_graph::{ + DepGraphQuery, DepKind, DepNode, DepNodeExt, DepNodeFilter, EdgeFilter, +}; use rustc_middle::hir::map::Map; use rustc_middle::ty::TyCtxt; use rustc_span::symbol::{sym, Symbol}; @@ -54,7 +55,7 @@ use std::io::{BufWriter, Write}; pub fn assert_dep_graph(tcx: TyCtxt<'_>) { tcx.dep_graph.with_ignore(|| { if tcx.sess.opts.debugging_opts.dump_dep_graph { - dump_graph(tcx); + tcx.dep_graph.with_query(dump_graph); } if !tcx.sess.opts.debugging_opts.query_dep_graph { @@ -200,29 +201,29 @@ fn check_paths<'tcx>(tcx: TyCtxt<'tcx>, if_this_changed: &Sources, then_this_wou } return; } - let query = tcx.dep_graph.query(); - for &(_, source_def_id, ref source_dep_node) in if_this_changed { - let dependents = query.transitive_predecessors(source_dep_node); - for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need { - if !dependents.contains(&target_dep_node) { - tcx.sess.span_err( - target_span, - &format!( - "no path from `{}` to `{}`", - tcx.def_path_str(source_def_id), - target_pass - ), - ); - } else { - tcx.sess.span_err(target_span, "OK"); + tcx.dep_graph.with_query(|query| { + for &(_, source_def_id, ref source_dep_node) in if_this_changed { + let dependents = query.transitive_predecessors(source_dep_node); + for &(target_span, ref target_pass, _, ref target_dep_node) in then_this_would_need { + if !dependents.contains(&target_dep_node) { + tcx.sess.span_err( + target_span, + &format!( + "no path from `{}` to `{}`", + tcx.def_path_str(source_def_id), + target_pass + ), + ); + } else { + tcx.sess.span_err(target_span, "OK"); + } } } - } + }); } -fn dump_graph(tcx: TyCtxt<'_>) { +fn dump_graph(query: &DepGraphQuery) { let path: String = env::var("RUST_DEP_GRAPH").unwrap_or_else(|_| "dep_graph".to_string()); - let query = tcx.dep_graph.query(); let nodes = match env::var("RUST_DEP_GRAPH_FILTER") { Ok(string) => { diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs index 95456c07b10..f089cbcfca6 100644 --- a/compiler/rustc_incremental/src/lib.rs +++ b/compiler/rustc_incremental/src/lib.rs @@ -14,7 +14,7 @@ mod assert_dep_graph; pub mod assert_module_sources; mod persist; -pub use assert_dep_graph::assert_dep_graph; +use assert_dep_graph::assert_dep_graph; pub use persist::copy_cgu_workproduct_to_incr_comp_cache_dir; pub use persist::delete_workproduct_files; pub use persist::finalize_session_directory; @@ -26,4 +26,4 @@ pub use persist::prepare_session_directory; pub use persist::save_dep_graph; pub use persist::save_work_product_index; pub use persist::LoadResult; -pub use persist::{load_dep_graph, DepGraphFuture}; +pub use persist::{build_dep_graph, load_dep_graph, DepGraphFuture}; diff --git a/compiler/rustc_incremental/src/persist/dirty_clean.rs b/compiler/rustc_incremental/src/persist/dirty_clean.rs index 91b7221f205..e7bd488af8e 100644 --- a/compiler/rustc_incremental/src/persist/dirty_clean.rs +++ b/compiler/rustc_incremental/src/persist/dirty_clean.rs @@ -14,7 +14,6 @@ //! the required condition is not met. use rustc_ast::{self as ast, Attribute, NestedMetaItem}; -use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; @@ -381,10 +380,7 @@ impl DirtyCleanVisitor<'tcx> { fn assert_dirty(&self, item_span: Span, dep_node: DepNode) { debug!("assert_dirty({:?})", dep_node); - let current_fingerprint = self.get_fingerprint(&dep_node); - let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); - - if current_fingerprint == prev_fingerprint { + if self.tcx.dep_graph.is_green(&dep_node) { let dep_node_str = self.dep_node_str(&dep_node); self.tcx .sess @@ -392,28 +388,10 @@ impl DirtyCleanVisitor<'tcx> { } } - fn get_fingerprint(&self, dep_node: &DepNode) -> Option<Fingerprint> { - if self.tcx.dep_graph.dep_node_exists(dep_node) { - let dep_node_index = self.tcx.dep_graph.dep_node_index_of(dep_node); - Some(self.tcx.dep_graph.fingerprint_of(dep_node_index)) - } else { - None - } - } - fn assert_clean(&self, item_span: Span, dep_node: DepNode) { debug!("assert_clean({:?})", dep_node); - let current_fingerprint = self.get_fingerprint(&dep_node); - let prev_fingerprint = self.tcx.dep_graph.prev_fingerprint_of(&dep_node); - - // if the node wasn't previously evaluated and now is (or vice versa), - // then the node isn't actually clean or dirty. - if (current_fingerprint == None) ^ (prev_fingerprint == None) { - return; - } - - if current_fingerprint != prev_fingerprint { + if self.tcx.dep_graph.is_red(&dep_node) { let dep_node_str = self.dep_node_str(&dep_node); self.tcx .sess diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs index c7a6c1195c5..30c6c408bc7 100644 --- a/compiler/rustc_incremental/src/persist/fs.rs +++ b/compiler/rustc_incremental/src/persist/fs.rs @@ -122,6 +122,7 @@ mod tests; const LOCK_FILE_EXT: &str = ".lock"; const DEP_GRAPH_FILENAME: &str = "dep-graph.bin"; +const STAGING_DEP_GRAPH_FILENAME: &str = "dep-graph.part.bin"; const WORK_PRODUCTS_FILENAME: &str = "work-products.bin"; const QUERY_CACHE_FILENAME: &str = "query-cache.bin"; @@ -134,6 +135,9 @@ const INT_ENCODE_BASE: usize = base_n::CASE_INSENSITIVE; pub fn dep_graph_path(sess: &Session) -> PathBuf { in_incr_comp_dir_sess(sess, DEP_GRAPH_FILENAME) } +pub fn staging_dep_graph_path(sess: &Session) -> PathBuf { + in_incr_comp_dir_sess(sess, STAGING_DEP_GRAPH_FILENAME) +} pub fn dep_graph_path_from(incr_comp_session_dir: &Path) -> PathBuf { in_incr_comp_dir(incr_comp_session_dir, DEP_GRAPH_FILENAME) } diff --git a/compiler/rustc_incremental/src/persist/load.rs b/compiler/rustc_incremental/src/persist/load.rs index 2b5649bb059..259e540c612 100644 --- a/compiler/rustc_incremental/src/persist/load.rs +++ b/compiler/rustc_incremental/src/persist/load.rs @@ -5,7 +5,7 @@ use rustc_hir::definitions::Definitions; use rustc_middle::dep_graph::{PreviousDepGraph, SerializedDepGraph, WorkProduct, WorkProductId}; use rustc_middle::ty::query::OnDiskCache; use rustc_serialize::opaque::Decoder; -use rustc_serialize::Decodable as RustcDecodable; +use rustc_serialize::Decodable; use rustc_session::Session; use std::path::Path; @@ -120,7 +120,7 @@ pub fn load_dep_graph(sess: &Session) -> DepGraphFuture { // Decode the list of work_products let mut work_product_decoder = Decoder::new(&work_products_data[..], start_pos); let work_products: Vec<SerializedWorkProduct> = - RustcDecodable::decode(&mut work_product_decoder).unwrap_or_else(|e| { + Decodable::decode(&mut work_product_decoder).unwrap_or_else(|e| { let msg = format!( "Error decoding `work-products` from incremental \ compilation session directory: {}", diff --git a/compiler/rustc_incremental/src/persist/mod.rs b/compiler/rustc_incremental/src/persist/mod.rs index 8821b34b502..1336189bc0d 100644 --- a/compiler/rustc_incremental/src/persist/mod.rs +++ b/compiler/rustc_incremental/src/persist/mod.rs @@ -18,6 +18,7 @@ pub use fs::prepare_session_directory; pub use load::load_query_result_cache; pub use load::LoadResult; pub use load::{load_dep_graph, DepGraphFuture}; +pub use save::build_dep_graph; pub use save::save_dep_graph; pub use save::save_work_product_index; pub use work_product::copy_cgu_workproduct_to_incr_comp_cache_dir; diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 45d474b89b8..d558af3c1d5 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs @@ -1,6 +1,6 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::join; -use rustc_middle::dep_graph::{DepGraph, WorkProduct, WorkProductId}; +use rustc_middle::dep_graph::{DepGraph, PreviousDepGraph, WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use rustc_serialize::Encodable as RustcEncodable; @@ -15,6 +15,9 @@ use super::file_format; use super::fs::*; use super::work_product; +/// Save and dump the DepGraph. +/// +/// No query must be invoked after this function. pub fn save_dep_graph(tcx: TyCtxt<'_>) { debug!("save_dep_graph()"); tcx.dep_graph.with_ignore(|| { @@ -29,6 +32,14 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) { let query_cache_path = query_cache_path(sess); let dep_graph_path = dep_graph_path(sess); + let staging_dep_graph_path = staging_dep_graph_path(sess); + + sess.time("assert_dep_graph", || crate::assert_dep_graph(tcx)); + sess.time("check_dirty_clean", || dirty_clean::check_dirty_clean_annotations(tcx)); + + if sess.opts.debugging_opts.incremental_info { + tcx.dep_graph.print_incremental_info() + } join( move || { @@ -36,16 +47,26 @@ pub fn save_dep_graph(tcx: TyCtxt<'_>) { save_in(sess, query_cache_path, "query cache", |e| encode_query_cache(tcx, e)); }); }, - || { + move || { sess.time("incr_comp_persist_dep_graph", || { - save_in(sess, dep_graph_path, "dependency graph", |e| { - sess.time("incr_comp_encode_dep_graph", || encode_dep_graph(tcx, e)) - }); + if let Err(err) = tcx.dep_graph.encode(&tcx.sess.prof) { + sess.err(&format!( + "failed to write dependency graph to `{}`: {}", + staging_dep_graph_path.display(), + err + )); + } + if let Err(err) = fs::rename(&staging_dep_graph_path, &dep_graph_path) { + sess.err(&format!( + "failed to move dependency graph from `{}` to `{}`: {}", + staging_dep_graph_path.display(), + dep_graph_path.display(), + err + )); + } }); }, ); - - dirty_clean::check_dirty_clean_annotations(tcx); }) } @@ -92,7 +113,7 @@ pub fn save_work_product_index( }); } -fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F) +pub(crate) fn save_in<F>(sess: &Session, path_buf: PathBuf, name: &str, encode: F) where F: FnOnce(&mut FileEncoder) -> FileEncodeResult, { @@ -144,21 +165,6 @@ where debug!("save: data written to disk successfully"); } -fn encode_dep_graph(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult { - // First encode the commandline arguments hash - tcx.sess.opts.dep_tracking_hash().encode(encoder)?; - - if tcx.sess.opts.debugging_opts.incremental_info { - tcx.dep_graph.print_incremental_info(); - } - - // There is a tiny window between printing the incremental info above and encoding the dep - // graph below in which the dep graph could change, thus making the printed incremental info - // slightly out of date. If this matters to you, please feel free to submit a patch. :) - - tcx.sess.time("incr_comp_encode_serialized_dep_graph", || tcx.dep_graph.encode(encoder)) -} - fn encode_work_product_index( work_products: &FxHashMap<WorkProductId, WorkProduct>, encoder: &mut FileEncoder, @@ -177,3 +183,56 @@ fn encode_work_product_index( fn encode_query_cache(tcx: TyCtxt<'_>, encoder: &mut FileEncoder) -> FileEncodeResult { tcx.sess.time("incr_comp_serialize_result_cache", || tcx.serialize_query_result_cache(encoder)) } + +pub fn build_dep_graph( + sess: &Session, + prev_graph: PreviousDepGraph, + prev_work_products: FxHashMap<WorkProductId, WorkProduct>, +) -> Option<DepGraph> { + if sess.opts.incremental.is_none() { + // No incremental compilation. + return None; + } + + // Stream the dep-graph to an alternate file, to avoid overwriting anything in case of errors. + let path_buf = staging_dep_graph_path(sess); + + let mut encoder = match FileEncoder::new(&path_buf) { + Ok(encoder) => encoder, + Err(err) => { + sess.err(&format!( + "failed to create dependency graph at `{}`: {}", + path_buf.display(), + err + )); + return None; + } + }; + + if let Err(err) = file_format::write_file_header(&mut encoder, sess.is_nightly_build()) { + sess.err(&format!( + "failed to write dependency graph header to `{}`: {}", + path_buf.display(), + err + )); + return None; + } + + // First encode the commandline arguments hash + if let Err(err) = sess.opts.dep_tracking_hash().encode(&mut encoder) { + sess.err(&format!( + "failed to write dependency graph hash `{}`: {}", + path_buf.display(), + err + )); + return None; + } + + Some(DepGraph::new( + prev_graph, + prev_work_products, + encoder, + sess.opts.debugging_opts.query_dep_graph, + sess.opts.debugging_opts.incremental_info, + )) +} diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 87d00287828..c693155994f 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -1021,9 +1021,6 @@ pub fn start_codegen<'tcx>( rustc_symbol_mangling::test::report_symbol_names(tcx); } - tcx.sess.time("assert_dep_graph", || rustc_incremental::assert_dep_graph(tcx)); - tcx.sess.time("serialize_dep_graph", || rustc_incremental::save_dep_graph(tcx)); - info!("Post-codegen\n{:?}", tcx.debug_stats()); if tcx.sess.opts.output_types.contains_key(&OutputType::Mir) { diff --git a/compiler/rustc_interface/src/queries.rs b/compiler/rustc_interface/src/queries.rs index 9c38d2b91ab..01853eab530 100644 --- a/compiler/rustc_interface/src/queries.rs +++ b/compiler/rustc_interface/src/queries.rs @@ -207,7 +207,13 @@ impl<'tcx> Queries<'tcx> { }) .open(self.session()) }); - DepGraph::new(prev_graph, prev_work_products) + + rustc_incremental::build_dep_graph( + self.session(), + prev_graph, + prev_work_products, + ) + .unwrap_or_else(DepGraph::new_disabled) } }) }) @@ -435,6 +441,9 @@ impl Compiler { if self.session().opts.debugging_opts.query_stats { gcx.enter(rustc_query_impl::print_stats); } + + self.session() + .time("serialize_dep_graph", || gcx.enter(rustc_incremental::save_dep_graph)); } _timer = Some(self.session().timer("free_global_ctxt")); diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs index c688b23be1d..d2fe9af34fb 100644 --- a/compiler/rustc_middle/src/dep_graph/mod.rs +++ b/compiler/rustc_middle/src/dep_graph/mod.rs @@ -8,8 +8,8 @@ use rustc_session::Session; mod dep_node; pub use rustc_query_system::dep_graph::{ - debug, hash_result, DepContext, DepNodeColor, DepNodeIndex, SerializedDepNodeIndex, - WorkProduct, WorkProductId, + debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex, + SerializedDepNodeIndex, WorkProduct, WorkProductId, }; crate use dep_node::make_compile_codegen_unit; @@ -20,6 +20,7 @@ pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>; pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>; pub type PreviousDepGraph = rustc_query_system::dep_graph::PreviousDepGraph<DepKind>; pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>; +pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>; impl rustc_query_system::dep_graph::DepKind for DepKind { const NULL: Self = DepKind::Null; diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index 922d14bc2f5..00dec3b355f 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -323,6 +323,9 @@ pub enum ObligationCauseCode<'tcx> { /// #[feature(trivial_bounds)] is not enabled TrivialBound, + + /// If `X` is the concrete type of an opaque type `impl Y`, then `X` must implement `Y` + OpaqueType, } impl ObligationCauseCode<'_> { diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 4767e5743c2..d295b17d902 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -511,13 +511,18 @@ impl<T> Trait<T> for X { "consider constraining the associated type `{}` to `{}`", values.found, values.expected, ); - if !self.suggest_constraint( + if !(self.suggest_constraining_opaque_associated_type( + db, + &msg, + proj_ty, + values.expected, + ) || self.suggest_constraint( db, &msg, body_owner_def_id, proj_ty, values.expected, - ) { + )) { db.help(&msg); db.note( "for more information, visit \ @@ -701,20 +706,7 @@ impl<T> Trait<T> for X { } } - if let ty::Opaque(def_id, _) = *proj_ty.self_ty().kind() { - // When the expected `impl Trait` is not defined in the current item, it will come from - // a return type. This can occur when dealing with `TryStream` (#71035). - if self.constrain_associated_type_structured_suggestion( - db, - self.def_span(def_id), - &assoc, - proj_ty.trait_ref_and_own_substs(self).1, - values.found, - &msg, - ) { - return; - } - } + self.suggest_constraining_opaque_associated_type(db, &msg, proj_ty, values.found); if self.point_at_associated_type(db, body_owner_def_id, values.found) { return; @@ -752,6 +744,30 @@ fn foo(&self) -> Self::T { String::new() } } } + /// When the expected `impl Trait` is not defined in the current item, it will come from + /// a return type. This can occur when dealing with `TryStream` (#71035). + fn suggest_constraining_opaque_associated_type( + self, + db: &mut DiagnosticBuilder<'_>, + msg: &str, + proj_ty: &ty::ProjectionTy<'tcx>, + ty: Ty<'tcx>, + ) -> bool { + let assoc = self.associated_item(proj_ty.item_def_id); + if let ty::Opaque(def_id, _) = *proj_ty.self_ty().kind() { + self.constrain_associated_type_structured_suggestion( + db, + self.def_span(def_id), + &assoc, + proj_ty.trait_ref_and_own_substs(self).1, + ty, + &msg, + ) + } else { + false + } + } + fn point_at_methods_that_satisfy_associated_type( self, db: &mut DiagnosticBuilder<'_>, diff --git a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs index 77d91366224..1bb447d1057 100644 --- a/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs +++ b/compiler/rustc_mir/src/borrow_check/type_check/input_output.rs @@ -70,6 +70,12 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // Equate expected input tys with those in the MIR. for (argument_index, &normalized_input_ty) in normalized_input_tys.iter().enumerate() { + if argument_index + 1 >= body.local_decls.len() { + self.tcx() + .sess + .delay_span_bug(body.span, "found more normalized_input_ty than local_decls"); + break; + } // In MIR, argument N is stored in local N+1. let local = Local::new(argument_index + 1); diff --git a/compiler/rustc_mir_build/src/build/expr/into.rs b/compiler/rustc_mir_build/src/build/expr/into.rs index 2097f38c25d..80a5bff8cad 100644 --- a/compiler/rustc_mir_build/src/build/expr/into.rs +++ b/compiler/rustc_mir_build/src/build/expr/into.rs @@ -111,18 +111,17 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ExprKind::LogicalOp { op, lhs, rhs } => { // And: // - // [block: If(lhs)] -true-> [else_block: If(rhs)] -true-> [true_block] - // | | (false) - // +----------false-----------+------------------> [false_block] + // [block: If(lhs)] -true-> [else_block: dest = (rhs)] + // | (false) + // [shortcurcuit_block: dest = false] // // Or: // - // [block: If(lhs)] -false-> [else_block: If(rhs)] -true-> [true_block] - // | (true) | (false) - // [true_block] [false_block] + // [block: If(lhs)] -false-> [else_block: dest = (rhs)] + // | (true) + // [shortcurcuit_block: dest = true] - let (true_block, false_block, mut else_block, join_block) = ( - this.cfg.start_new_block(), + let (shortcircuit_block, mut else_block, join_block) = ( this.cfg.start_new_block(), this.cfg.start_new_block(), this.cfg.start_new_block(), @@ -130,41 +129,31 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let lhs = unpack!(block = this.as_local_operand(block, lhs)); let blocks = match op { - LogicalOp::And => (else_block, false_block), - LogicalOp::Or => (true_block, else_block), + LogicalOp::And => (else_block, shortcircuit_block), + LogicalOp::Or => (shortcircuit_block, else_block), }; let term = TerminatorKind::if_(this.tcx, lhs, blocks.0, blocks.1); this.cfg.terminate(block, source_info, term); - let rhs = unpack!(else_block = this.as_local_operand(else_block, rhs)); - let term = TerminatorKind::if_(this.tcx, rhs, true_block, false_block); - this.cfg.terminate(else_block, source_info, term); - this.cfg.push_assign_constant( - true_block, + shortcircuit_block, source_info, destination, Constant { span: expr_span, user_ty: None, - literal: ty::Const::from_bool(this.tcx, true).into(), + literal: match op { + LogicalOp::And => ty::Const::from_bool(this.tcx, false).into(), + LogicalOp::Or => ty::Const::from_bool(this.tcx, true).into(), + }, }, ); + this.cfg.goto(shortcircuit_block, source_info, join_block); - this.cfg.push_assign_constant( - false_block, - source_info, - destination, - Constant { - span: expr_span, - user_ty: None, - literal: ty::Const::from_bool(this.tcx, false).into(), - }, - ); + let rhs = unpack!(else_block = this.as_local_operand(else_block, rhs)); + this.cfg.push_assign(else_block, source_info, destination, Rvalue::Use(rhs)); + this.cfg.goto(else_block, source_info, join_block); - // Link up both branches: - this.cfg.goto(true_block, source_info, join_block); - this.cfg.goto(false_block, source_info, join_block); join_block.unit() } ExprKind::Loop { body } => { diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 85add83f88b..d1a3971f569 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -9,7 +9,7 @@ use rustc_middle::ty::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_ast::{Attribute, Lit, LitKind, NestedMetaItem}; -use rustc_errors::{pluralize, struct_span_err}; +use rustc_errors::{pluralize, struct_span_err, Applicability}; use rustc_hir as hir; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; @@ -648,10 +648,10 @@ impl CheckAttrVisitor<'tcx> { | sym::masked | sym::no_default_passes | sym::no_inline + | sym::notable_trait | sym::passes | sym::plugins | sym::primitive - | sym::spotlight | sym::test => {} _ => { @@ -660,11 +660,23 @@ impl CheckAttrVisitor<'tcx> { hir_id, i_meta.span, |lint| { - let msg = format!( + let mut diag = lint.build(&format!( "unknown `doc` attribute `{}`", rustc_ast_pretty::pprust::path_to_string(&i_meta.path), - ); - lint.build(&msg).emit(); + )); + if i_meta.has_name(sym::spotlight) { + diag.note( + "`doc(spotlight)` was renamed to `doc(notable_trait)`", + ); + diag.span_suggestion_short( + i_meta.span, + "use `notable_trait` instead", + String::from("notable_trait"), + Applicability::MachineApplicable, + ); + diag.note("`doc(spotlight)` is now a no-op"); + } + diag.emit(); }, ); is_valid = false; diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index d958b3c18cd..4194b28dc7d 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -477,10 +477,7 @@ macro_rules! define_queries { return } - debug_assert!(tcx.dep_graph - .node_color(dep_node) - .map(|c| c.is_green()) - .unwrap_or(false)); + debug_assert!(tcx.dep_graph.is_green(dep_node)); let key = recover(*tcx, dep_node).unwrap_or_else(|| panic!("Failed to recover key for {:?} with hash {}", dep_node, dep_node.hash)); if queries::$name::cache_on_disk(tcx, &key, None) { diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs index 43429cd11a2..a544ac2c343 100644 --- a/compiler/rustc_query_system/src/dep_graph/debug.rs +++ b/compiler/rustc_query_system/src/dep_graph/debug.rs @@ -1,6 +1,8 @@ //! Code for debugging the dep-graph. -use super::{DepKind, DepNode}; +use super::{DepKind, DepNode, DepNodeIndex}; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::sync::Lock; use std::error::Error; /// A dep-node filter goes from a user-defined string to a query over @@ -34,13 +36,14 @@ impl DepNodeFilter { /// A filter like `F -> G` where `F` and `G` are valid dep-node /// filters. This can be used to test the source/target independently. -pub struct EdgeFilter { +pub struct EdgeFilter<K: DepKind> { pub source: DepNodeFilter, pub target: DepNodeFilter, + pub index_to_node: Lock<FxHashMap<DepNodeIndex, DepNode<K>>>, } -impl EdgeFilter { - pub fn new(test: &str) -> Result<EdgeFilter, Box<dyn Error>> { +impl<K: DepKind> EdgeFilter<K> { + pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> { let parts: Vec<_> = test.split("->").collect(); if parts.len() != 2 { Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into()) @@ -48,12 +51,13 @@ impl EdgeFilter { Ok(EdgeFilter { source: DepNodeFilter::new(parts[0]), target: DepNodeFilter::new(parts[1]), + index_to_node: Lock::new(FxHashMap::default()), }) } } #[cfg(debug_assertions)] - pub fn test<K: DepKind>(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool { + pub fn test(&self, source: &DepNode<K>, target: &DepNode<K>) -> bool { self.source.test(source) && self.target.test(target) } } diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 9fe2497a57b..7a0fc320663 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1,31 +1,33 @@ use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::QueryInvocationId; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, LockGuard, Lrc, Ordering}; +use rustc_data_structures::steal::Steal; +use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering}; use rustc_data_structures::unlikely; use rustc_errors::Diagnostic; -use rustc_index::vec::{Idx, IndexVec}; -use rustc_serialize::{Encodable, Encoder}; +use rustc_index::vec::IndexVec; +use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use parking_lot::{Condvar, Mutex}; use smallvec::{smallvec, SmallVec}; use std::collections::hash_map::Entry; -use std::env; use std::hash::Hash; use std::marker::PhantomData; use std::mem; -use std::ops::Range; use std::sync::atomic::Ordering::Relaxed; -use super::debug::EdgeFilter; use super::prev::PreviousDepGraph; use super::query::DepGraphQuery; -use super::serialized::SerializedDepNodeIndex; +use super::serialized::{GraphEncoder, SerializedDepNodeIndex}; use super::{DepContext, DepKind, DepNode, HasDepContext, WorkProductId}; use crate::query::QueryContext; +#[cfg(debug_assertions)] +use {super::debug::EdgeFilter, std::env}; + #[derive(Clone)] pub struct DepGraph<K: DepKind> { data: Option<Lrc<DepGraphData<K>>>, @@ -109,6 +111,9 @@ impl<K: DepKind> DepGraph<K> { pub fn new( prev_graph: PreviousDepGraph<K>, prev_work_products: FxHashMap<WorkProductId, WorkProduct>, + encoder: FileEncoder, + record_graph: bool, + record_stats: bool, ) -> DepGraph<K> { let prev_graph_node_count = prev_graph.node_count(); @@ -116,7 +121,12 @@ impl<K: DepKind> DepGraph<K> { data: Some(Lrc::new(DepGraphData { previous_work_products: prev_work_products, dep_node_debug: Default::default(), - current: CurrentDepGraph::new(prev_graph_node_count), + current: CurrentDepGraph::new( + prev_graph_node_count, + encoder, + record_graph, + record_stats, + ), emitting_diagnostics: Default::default(), emitting_diagnostics_cond_var: Condvar::new(), previous: prev_graph, @@ -136,62 +146,10 @@ impl<K: DepKind> DepGraph<K> { self.data.is_some() } - pub fn query(&self) -> DepGraphQuery<K> { - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - - // Note locking order: `prev_index_to_index`, then `data`. - let prev_index_to_index = data.current.prev_index_to_index.lock(); - let data = data.current.data.lock(); - let node_count = data.hybrid_indices.len(); - let edge_count = self.edge_count(&data); - - let mut nodes = Vec::with_capacity(node_count); - let mut edge_list_indices = Vec::with_capacity(node_count); - let mut edge_list_data = Vec::with_capacity(edge_count); - - // See `DepGraph`'s `Encodable` implementation for notes on the approach used here. - - edge_list_data.extend(data.unshared_edges.iter().map(|i| i.index())); - - for &hybrid_index in data.hybrid_indices.iter() { - match hybrid_index.into() { - HybridIndex::New(new_index) => { - nodes.push(data.new.nodes[new_index]); - let edges = &data.new.edges[new_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::Red(red_index) => { - nodes.push(previous.index_to_node(data.red.node_indices[red_index])); - let edges = &data.red.edges[red_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::LightGreen(lg_index) => { - nodes.push(previous.index_to_node(data.light_green.node_indices[lg_index])); - let edges = &data.light_green.edges[lg_index]; - edge_list_indices.push((edges.start.index(), edges.end.index())); - } - HybridIndex::DarkGreen(prev_index) => { - nodes.push(previous.index_to_node(prev_index)); - - let edges_iter = previous - .edge_targets_from(prev_index) - .iter() - .map(|&dst| prev_index_to_index[dst].unwrap().index()); - - let start = edge_list_data.len(); - edge_list_data.extend(edges_iter); - let end = edge_list_data.len(); - edge_list_indices.push((start, end)); - } - } + pub fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) { + if let Some(data) = &self.data { + data.current.encoder.borrow().with_query(f) } - - debug_assert_eq!(nodes.len(), node_count); - debug_assert_eq!(edge_list_indices.len(), node_count); - debug_assert_eq!(edge_list_data.len(), edge_count); - - DepGraphQuery::new(&nodes[..], &edge_list_indices[..], &edge_list_data[..]) } pub fn assert_ignored(&self) { @@ -283,56 +241,16 @@ impl<K: DepKind> DepGraph<K> { let print_status = cfg!(debug_assertions) && dcx.sess().opts.debugging_opts.dep_tasks; // Intern the new `DepNode`. - let dep_node_index = if let Some(prev_index) = data.previous.node_to_index_opt(&key) { - // Determine the color and index of the new `DepNode`. - let (color, dep_node_index) = if let Some(current_fingerprint) = current_fingerprint - { - if current_fingerprint == data.previous.fingerprint_by_index(prev_index) { - if print_status { - eprintln!("[task::green] {:?}", key); - } - - // This is a light green node: it existed in the previous compilation, - // its query was re-executed, and it has the same result as before. - let dep_node_index = - data.current.intern_light_green_node(&data.previous, prev_index, edges); - - (DepNodeColor::Green(dep_node_index), dep_node_index) - } else { - if print_status { - eprintln!("[task::red] {:?}", key); - } - - // This is a red node: it existed in the previous compilation, its query - // was re-executed, but it has a different result from before. - let dep_node_index = data.current.intern_red_node( - &data.previous, - prev_index, - edges, - current_fingerprint, - ); - - (DepNodeColor::Red, dep_node_index) - } - } else { - if print_status { - eprintln!("[task::unknown] {:?}", key); - } - - // This is a red node, effectively: it existed in the previous compilation - // session, its query was re-executed, but it doesn't compute a result hash - // (i.e. it represents a `no_hash` query), so we have no way of determining - // whether or not the result was the same as before. - let dep_node_index = data.current.intern_red_node( - &data.previous, - prev_index, - edges, - Fingerprint::ZERO, - ); - - (DepNodeColor::Red, dep_node_index) - }; + let (dep_node_index, prev_and_color) = data.current.intern_node( + dcx.profiler(), + &data.previous, + key, + edges, + current_fingerprint, + print_status, + ); + if let Some((prev_index, color)) = prev_and_color { debug_assert!( data.colors.get(prev_index).is_none(), "DepGraph::with_task() - Duplicate DepNodeColor \ @@ -341,20 +259,7 @@ impl<K: DepKind> DepGraph<K> { ); data.colors.insert(prev_index, color); - dep_node_index - } else { - if print_status { - eprintln!("[task::new] {:?}", key); - } - - // This is a new node: it didn't exist in the previous compilation session. - data.current.intern_new_node( - &data.previous, - key, - edges, - current_fingerprint.unwrap_or(Fingerprint::ZERO), - ) - }; + } (result, dep_node_index) } else { @@ -368,7 +273,12 @@ impl<K: DepKind> DepGraph<K> { /// Executes something within an "anonymous" task, that is, a task the /// `DepNode` of which is determined by the list of inputs it read from. - pub fn with_anon_task<OP, R>(&self, dep_kind: K, op: OP) -> (R, DepNodeIndex) + pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>( + &self, + cx: Ctxt, + dep_kind: K, + op: OP, + ) -> (R, DepNodeIndex) where OP: FnOnce() -> R, { @@ -396,7 +306,7 @@ impl<K: DepKind> DepGraph<K> { }; let dep_node_index = data.current.intern_new_node( - &data.previous, + cx.profiler(), target_dep_node, task_deps.reads, Fingerprint::ZERO, @@ -451,7 +361,7 @@ impl<K: DepKind> DepGraph<K> { { if let Some(target) = task_deps.node { if let Some(ref forbidden_edge) = data.current.forbidden_edge { - let src = self.dep_node_of(dep_node_index); + let src = forbidden_edge.index_to_node.lock()[&dep_node_index]; if forbidden_edge.test(&src, &target) { panic!("forbidden edge {:?} -> {:?} created", src, target) } @@ -488,38 +398,6 @@ impl<K: DepKind> DepGraph<K> { self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some() } - #[cfg(debug_assertions)] - fn dep_node_of(&self, dep_node_index: DepNodeIndex) -> DepNode<K> { - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - let data = data.current.data.lock(); - - match data.hybrid_indices[dep_node_index].into() { - HybridIndex::New(new_index) => data.new.nodes[new_index], - HybridIndex::Red(red_index) => previous.index_to_node(data.red.node_indices[red_index]), - HybridIndex::LightGreen(light_green_index) => { - previous.index_to_node(data.light_green.node_indices[light_green_index]) - } - HybridIndex::DarkGreen(prev_index) => previous.index_to_node(prev_index), - } - } - - #[inline] - pub fn fingerprint_of(&self, dep_node_index: DepNodeIndex) -> Fingerprint { - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - let data = data.current.data.lock(); - - match data.hybrid_indices[dep_node_index].into() { - HybridIndex::New(new_index) => data.new.fingerprints[new_index], - HybridIndex::Red(red_index) => data.red.fingerprints[red_index], - HybridIndex::LightGreen(light_green_index) => { - previous.fingerprint_by_index(data.light_green.node_indices[light_green_index]) - } - HybridIndex::DarkGreen(prev_index) => previous.fingerprint_by_index(prev_index), - } - } - pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> { self.data.as_ref().unwrap().previous.fingerprint_of(dep_node) } @@ -554,29 +432,13 @@ impl<K: DepKind> DepGraph<K> { self.data.as_ref()?.dep_node_debug.borrow().get(&dep_node).cloned() } - fn edge_count(&self, node_data: &LockGuard<'_, DepNodeData<K>>) -> usize { - let data = self.data.as_ref().unwrap(); - let previous = &data.previous; - - let mut edge_count = node_data.unshared_edges.len(); - - for &hybrid_index in node_data.hybrid_indices.iter() { - if let HybridIndex::DarkGreen(prev_index) = hybrid_index.into() { - edge_count += previous.edge_targets_from(prev_index).len() - } - } - - edge_count - } - - pub fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { + fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> { if let Some(ref data) = self.data { if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) { return data.colors.get(prev_index); } else { - // This is a node that did not exist in the previous compilation - // session, so we consider it to be red. - return Some(DepNodeColor::Red); + // This is a node that did not exist in the previous compilation session. + return None; } } @@ -775,11 +637,13 @@ impl<K: DepKind> DepGraph<K> { // There may be multiple threads trying to mark the same dep node green concurrently - let dep_node_index = { - // We allocating an entry for the node in the current dependency graph and - // adding all the appropriate edges imported from the previous graph - data.current.intern_dark_green_node(&data.previous, prev_dep_node_index) - }; + // We allocating an entry for the node in the current dependency graph and + // adding all the appropriate edges imported from the previous graph + let dep_node_index = data.current.promote_node_and_deps_to_current( + tcx.dep_context().profiler(), + &data.previous, + prev_dep_node_index, + ); // ... emitting any stored diagnostic ... @@ -862,6 +726,12 @@ impl<K: DepKind> DepGraph<K> { } } + // Returns true if the given node has been marked as red during the + // current compilation session. Used in various assertions + pub fn is_red(&self, dep_node: &DepNode<K>) -> bool { + self.node_color(dep_node) == Some(DepNodeColor::Red) + } + // Returns true if the given node has been marked as green during the // current compilation session. Used in various assertions pub fn is_green(&self, dep_node: &DepNode<K>) -> bool { @@ -911,106 +781,20 @@ impl<K: DepKind> DepGraph<K> { } pub fn print_incremental_info(&self) { - #[derive(Clone)] - struct Stat<Kind: DepKind> { - kind: Kind, - node_counter: u64, - edge_counter: u64, + if let Some(data) = &self.data { + data.current.encoder.borrow().print_incremental_info( + data.current.total_read_count.load(Relaxed), + data.current.total_duplicate_read_count.load(Relaxed), + ) } + } - let data = self.data.as_ref().unwrap(); - let prev = &data.previous; - let current = &data.current; - let data = current.data.lock(); - - let mut stats: FxHashMap<_, Stat<K>> = FxHashMap::with_hasher(Default::default()); - - for &hybrid_index in data.hybrid_indices.iter() { - let (kind, edge_count) = match hybrid_index.into() { - HybridIndex::New(new_index) => { - let kind = data.new.nodes[new_index].kind; - let edge_range = &data.new.edges[new_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::Red(red_index) => { - let kind = prev.index_to_node(data.red.node_indices[red_index]).kind; - let edge_range = &data.red.edges[red_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::LightGreen(lg_index) => { - let kind = prev.index_to_node(data.light_green.node_indices[lg_index]).kind; - let edge_range = &data.light_green.edges[lg_index]; - (kind, edge_range.end.as_usize() - edge_range.start.as_usize()) - } - HybridIndex::DarkGreen(prev_index) => { - let kind = prev.index_to_node(prev_index).kind; - let edge_count = prev.edge_targets_from(prev_index).len(); - (kind, edge_count) - } - }; - - let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 }); - stat.node_counter += 1; - stat.edge_counter += edge_count as u64; - } - - let total_node_count = data.hybrid_indices.len(); - let total_edge_count = self.edge_count(&data); - - // Drop the lock guard. - std::mem::drop(data); - - let mut stats: Vec<_> = stats.values().cloned().collect(); - stats.sort_by_key(|s| -(s.node_counter as i64)); - - const SEPARATOR: &str = "[incremental] --------------------------------\ - ----------------------------------------------\ - ------------"; - - eprintln!("[incremental]"); - eprintln!("[incremental] DepGraph Statistics"); - eprintln!("{}", SEPARATOR); - eprintln!("[incremental]"); - eprintln!("[incremental] Total Node Count: {}", total_node_count); - eprintln!("[incremental] Total Edge Count: {}", total_edge_count); - - if cfg!(debug_assertions) { - let total_edge_reads = current.total_read_count.load(Relaxed); - let total_duplicate_edge_reads = current.total_duplicate_read_count.load(Relaxed); - - eprintln!("[incremental] Total Edge Reads: {}", total_edge_reads); - eprintln!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads); - } - - eprintln!("[incremental]"); - - eprintln!( - "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", - "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count" - ); - - eprintln!( - "[incremental] -------------------------------------\ - |------------------\ - |-------------\ - |------------------|" - ); - - for stat in stats { - let node_kind_ratio = (100.0 * (stat.node_counter as f64)) / (total_node_count as f64); - let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64); - - eprintln!( - "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", - format!("{:?}", stat.kind), - node_kind_ratio, - stat.node_counter, - node_kind_avg_edges, - ); + pub fn encode(&self, profiler: &SelfProfilerRef) -> FileEncodeResult { + if let Some(data) = &self.data { + data.current.encoder.steal().finish(profiler) + } else { + Ok(()) } - - eprintln!("{}", SEPARATOR); - eprintln!("[incremental]"); } fn next_virtual_depnode_index(&self) -> DepNodeIndex { @@ -1019,142 +803,6 @@ impl<K: DepKind> DepGraph<K> { } } -impl<E: Encoder, K: DepKind + Encodable<E>> Encodable<E> for DepGraph<K> { - fn encode(&self, e: &mut E) -> Result<(), E::Error> { - // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` - // using data copied from the `DepGraph`. But copying created a large memory spike, so we - // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we - // deserialize that data into a `SerializedDepGraph` in the next compilation session, we - // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to - // be in sync. If you update this encoding, be sure to update the decoding, and vice-versa. - - let data = self.data.as_ref().unwrap(); - let prev = &data.previous; - - // Note locking order: `prev_index_to_index`, then `data`. - let prev_index_to_index = data.current.prev_index_to_index.lock(); - let data = data.current.data.lock(); - let new = &data.new; - let red = &data.red; - let lg = &data.light_green; - - let node_count = data.hybrid_indices.len(); - let edge_count = self.edge_count(&data); - - // `rustc_middle::ty::query::OnDiskCache` expects nodes to be encoded in `DepNodeIndex` - // order. The edges in `edge_list_data` don't need to be in a particular order, as long as - // each node references its edges as a contiguous range within it. Therefore, we can encode - // `edge_list_data` directly from `unshared_edges`. It meets the above requirements, as - // each non-dark-green node already knows the range of edges to reference within it, which - // they'll encode in `edge_list_indices`. Dark green nodes, however, don't have their edges - // in `unshared_edges`, so need to add them to `edge_list_data`. - - use HybridIndex::*; - - // Encoded values (nodes, etc.) are explicitly typed below to avoid inadvertently - // serializing data in the wrong format (i.e. one incompatible with `SerializedDepGraph`). - e.emit_struct("SerializedDepGraph", 4, |e| { - e.emit_struct_field("nodes", 0, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `DepNode`s. - e.emit_seq(node_count, |e| { - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let node: DepNode<K> = match hybrid_index.into() { - New(i) => new.nodes[i], - Red(i) => prev.index_to_node(red.node_indices[i]), - LightGreen(i) => prev.index_to_node(lg.node_indices[i]), - DarkGreen(prev_index) => prev.index_to_node(prev_index), - }; - - e.emit_seq_elt(seq_index, |e| node.encode(e))?; - } - - Ok(()) - }) - })?; - - e.emit_struct_field("fingerprints", 1, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `Fingerprints`s. - e.emit_seq(node_count, |e| { - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let fingerprint: Fingerprint = match hybrid_index.into() { - New(i) => new.fingerprints[i], - Red(i) => red.fingerprints[i], - LightGreen(i) => prev.fingerprint_by_index(lg.node_indices[i]), - DarkGreen(prev_index) => prev.fingerprint_by_index(prev_index), - }; - - e.emit_seq_elt(seq_index, |e| fingerprint.encode(e))?; - } - - Ok(()) - }) - })?; - - e.emit_struct_field("edge_list_indices", 2, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of `(u32, u32)`s. - e.emit_seq(node_count, |e| { - // Dark green node edges start after the unshared (all other nodes') edges. - let mut dark_green_edge_index = data.unshared_edges.len(); - - for (seq_index, &hybrid_index) in data.hybrid_indices.iter().enumerate() { - let edge_indices: (u32, u32) = match hybrid_index.into() { - New(i) => (new.edges[i].start.as_u32(), new.edges[i].end.as_u32()), - Red(i) => (red.edges[i].start.as_u32(), red.edges[i].end.as_u32()), - LightGreen(i) => (lg.edges[i].start.as_u32(), lg.edges[i].end.as_u32()), - DarkGreen(prev_index) => { - let edge_count = prev.edge_targets_from(prev_index).len(); - let start = dark_green_edge_index as u32; - dark_green_edge_index += edge_count; - let end = dark_green_edge_index as u32; - (start, end) - } - }; - - e.emit_seq_elt(seq_index, |e| edge_indices.encode(e))?; - } - - assert_eq!(dark_green_edge_index, edge_count); - - Ok(()) - }) - })?; - - e.emit_struct_field("edge_list_data", 3, |e| { - // `SerializedDepGraph` expects this to be encoded as a sequence of - // `SerializedDepNodeIndex`. - e.emit_seq(edge_count, |e| { - for (seq_index, &edge) in data.unshared_edges.iter().enumerate() { - let serialized_edge = SerializedDepNodeIndex::new(edge.index()); - e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; - } - - let mut seq_index = data.unshared_edges.len(); - - for &hybrid_index in data.hybrid_indices.iter() { - if let DarkGreen(prev_index) = hybrid_index.into() { - for &edge in prev.edge_targets_from(prev_index) { - // Dark green node edges are stored in the previous graph - // and must be converted to edges in the current graph, - // and then serialized as `SerializedDepNodeIndex`. - let serialized_edge = SerializedDepNodeIndex::new( - prev_index_to_index[edge].as_ref().unwrap().index(), - ); - - e.emit_seq_elt(seq_index, |e| serialized_edge.encode(e))?; - seq_index += 1; - } - } - } - - assert_eq!(seq_index, edge_count); - - Ok(()) - }) - }) - }) - } -} - /// A "work product" is an intermediate result that we save into the /// incremental directory for later re-use. The primary example are /// the object files that we save for each partition at code @@ -1193,216 +841,20 @@ pub struct WorkProduct { pub saved_file: Option<String>, } -// The maximum value of the follow index types leaves the upper two bits unused -// so that we can store multiple index types in `CompressedHybridIndex`, and use -// those bits to encode which index type it contains. - -// Index type for `NewDepNodeData`. -rustc_index::newtype_index! { - struct NewDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -// Index type for `RedDepNodeData`. -rustc_index::newtype_index! { - struct RedDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -// Index type for `LightGreenDepNodeData`. -rustc_index::newtype_index! { - struct LightGreenDepNodeIndex { - MAX = 0x7FFF_FFFF - } -} - -/// Compressed representation of `HybridIndex` enum. Bits unused by the -/// contained index types are used to encode which index type it contains. -#[derive(Copy, Clone)] -struct CompressedHybridIndex(u32); - -impl CompressedHybridIndex { - const NEW_TAG: u32 = 0b0000_0000_0000_0000_0000_0000_0000_0000; - const RED_TAG: u32 = 0b0100_0000_0000_0000_0000_0000_0000_0000; - const LIGHT_GREEN_TAG: u32 = 0b1000_0000_0000_0000_0000_0000_0000_0000; - const DARK_GREEN_TAG: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000; - - const TAG_MASK: u32 = 0b1100_0000_0000_0000_0000_0000_0000_0000; - const INDEX_MASK: u32 = !Self::TAG_MASK; -} - -impl From<NewDepNodeIndex> for CompressedHybridIndex { - #[inline] - fn from(index: NewDepNodeIndex) -> Self { - CompressedHybridIndex(Self::NEW_TAG | index.as_u32()) - } -} - -impl From<RedDepNodeIndex> for CompressedHybridIndex { - #[inline] - fn from(index: RedDepNodeIndex) -> Self { - CompressedHybridIndex(Self::RED_TAG | index.as_u32()) - } -} - -impl From<LightGreenDepNodeIndex> for CompressedHybridIndex { - #[inline] - fn from(index: LightGreenDepNodeIndex) -> Self { - CompressedHybridIndex(Self::LIGHT_GREEN_TAG | index.as_u32()) - } -} - -impl From<SerializedDepNodeIndex> for CompressedHybridIndex { - #[inline] - fn from(index: SerializedDepNodeIndex) -> Self { - CompressedHybridIndex(Self::DARK_GREEN_TAG | index.as_u32()) - } -} - -/// Contains an index into one of several node data collections. Elsewhere, we -/// store `CompressedHyridIndex` instead of this to save space, but convert to -/// this type during processing to take advantage of the enum match ergonomics. -enum HybridIndex { - New(NewDepNodeIndex), - Red(RedDepNodeIndex), - LightGreen(LightGreenDepNodeIndex), - DarkGreen(SerializedDepNodeIndex), -} - -impl From<CompressedHybridIndex> for HybridIndex { - #[inline] - fn from(hybrid_index: CompressedHybridIndex) -> Self { - let index = hybrid_index.0 & CompressedHybridIndex::INDEX_MASK; - - match hybrid_index.0 & CompressedHybridIndex::TAG_MASK { - CompressedHybridIndex::NEW_TAG => HybridIndex::New(NewDepNodeIndex::from_u32(index)), - CompressedHybridIndex::RED_TAG => HybridIndex::Red(RedDepNodeIndex::from_u32(index)), - CompressedHybridIndex::LIGHT_GREEN_TAG => { - HybridIndex::LightGreen(LightGreenDepNodeIndex::from_u32(index)) - } - CompressedHybridIndex::DARK_GREEN_TAG => { - HybridIndex::DarkGreen(SerializedDepNodeIndex::from_u32(index)) - } - _ => unreachable!(), - } - } -} - // Index type for `DepNodeData`'s edges. rustc_index::newtype_index! { struct EdgeIndex { .. } } -/// Data for nodes in the current graph, divided into different collections -/// based on their presence in the previous graph, and if present, their color. -/// We divide nodes this way because different types of nodes are able to share -/// more or less data with the previous graph. -/// -/// To enable more sharing, we distinguish between two kinds of green nodes. -/// Light green nodes are nodes in the previous graph that have been marked -/// green because we re-executed their queries and the results were the same as -/// in the previous session. Dark green nodes are nodes in the previous graph -/// that have been marked green because we were able to mark all of their -/// dependencies green. -/// -/// Both light and dark green nodes can share the dep node and fingerprint with -/// the previous graph, but for light green nodes, we can't be sure that the -/// edges may be shared without comparing them against the previous edges, so we -/// store them directly (an approach in which we compare edges with the previous -/// edges to see if they can be shared was evaluated, but was not found to be -/// very profitable). -/// -/// For dark green nodes, we can share everything with the previous graph, which -/// is why the `HybridIndex::DarkGreen` enum variant contains the index of the -/// node in the previous graph, and why we don't have a separate collection for -/// dark green node data--the collection is the `PreviousDepGraph` itself. -/// -/// (Note that for dark green nodes, the edges in the previous graph -/// (`SerializedDepNodeIndex`s) must be converted to edges in the current graph -/// (`DepNodeIndex`s). `CurrentDepGraph` contains `prev_index_to_index`, which -/// can perform this conversion. It should always be possible, as by definition, -/// a dark green node is one whose dependencies from the previous session have -/// all been marked green--which means `prev_index_to_index` contains them.) -/// -/// Node data is stored in parallel vectors to eliminate the padding between -/// elements that would be needed to satisfy alignment requirements of the -/// structure that would contain all of a node's data. We could group tightly -/// packing subsets of node data together and use fewer vectors, but for -/// consistency's sake, we use separate vectors for each piece of data. -struct DepNodeData<K> { - /// Data for nodes not in previous graph. - new: NewDepNodeData<K>, - - /// Data for nodes in previous graph that have been marked red. - red: RedDepNodeData, - - /// Data for nodes in previous graph that have been marked light green. - light_green: LightGreenDepNodeData, - - // Edges for all nodes other than dark-green ones. Edges for each node - // occupy a contiguous region of this collection, which a node can reference - // using two indices. Storing edges this way rather than using an `EdgesVec` - // for each node reduces memory consumption by a not insignificant amount - // when compiling large crates. The downside is that we have to copy into - // this collection the edges from the `EdgesVec`s that are built up during - // query execution. But this is mostly balanced out by the more efficient - // implementation of `DepGraph::serialize` enabled by this representation. - unshared_edges: IndexVec<EdgeIndex, DepNodeIndex>, - - /// Mapping from `DepNodeIndex` to an index into a collection above. - /// Indicates which of the above collections contains a node's data. - /// - /// This collection is wasteful in time and space during incr-full builds, - /// because for those, all nodes are new. However, the waste is relatively - /// small, and the maintenance cost of avoiding using this for incr-full - /// builds is somewhat high and prone to bugginess. It does not seem worth - /// it at the time of this writing, but we may want to revisit the idea. - hybrid_indices: IndexVec<DepNodeIndex, CompressedHybridIndex>, -} - -/// Data for nodes not in previous graph. Since we cannot share any data with -/// the previous graph, so we must store all of such a node's data here. -struct NewDepNodeData<K> { - nodes: IndexVec<NewDepNodeIndex, DepNode<K>>, - edges: IndexVec<NewDepNodeIndex, Range<EdgeIndex>>, - fingerprints: IndexVec<NewDepNodeIndex, Fingerprint>, -} - -/// Data for nodes in previous graph that have been marked red. We can share the -/// dep node with the previous graph, but the edges may be different, and the -/// fingerprint is known to be different, so we store the latter two directly. -struct RedDepNodeData { - node_indices: IndexVec<RedDepNodeIndex, SerializedDepNodeIndex>, - edges: IndexVec<RedDepNodeIndex, Range<EdgeIndex>>, - fingerprints: IndexVec<RedDepNodeIndex, Fingerprint>, -} - -/// Data for nodes in previous graph that have been marked green because we -/// re-executed their queries and the results were the same as in the previous -/// session. We can share the dep node and the fingerprint with the previous -/// graph, but the edges may be different, so we store them directly. -struct LightGreenDepNodeData { - node_indices: IndexVec<LightGreenDepNodeIndex, SerializedDepNodeIndex>, - edges: IndexVec<LightGreenDepNodeIndex, Range<EdgeIndex>>, -} - /// `CurrentDepGraph` stores the dependency graph for the current session. It /// will be populated as we run queries or tasks. We never remove nodes from the /// graph: they are only added. /// -/// The nodes in it are identified by a `DepNodeIndex`. Internally, this maps to -/// a `HybridIndex`, which identifies which collection in the `data` field -/// contains a node's data. Which collection is used for a node depends on -/// whether the node was present in the `PreviousDepGraph`, and if so, the color -/// of the node. Each type of node can share more or less data with the previous -/// graph. When possible, we can store just the index of the node in the -/// previous graph, rather than duplicating its data in our own collections. -/// This is important, because these graph structures are some of the largest in -/// the compiler. +/// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes +/// in memory. This is important, because these graph structures are some of the +/// largest in the compiler. /// -/// For the same reason, we also avoid storing `DepNode`s more than once as map +/// For this reason, we avoid storing `DepNode`s more than once as map /// keys. The `new_node_to_index` map only contains nodes not in the previous /// graph, and we map nodes in the previous graph to indices via a two-step /// mapping. `PreviousDepGraph` maps from `DepNode` to `SerializedDepNodeIndex`, @@ -1417,15 +869,15 @@ struct LightGreenDepNodeData { /// `new_node_to_index` and `data`, or `prev_index_to_index` and `data`. When /// manipulating both, we acquire `new_node_to_index` or `prev_index_to_index` /// first, and `data` second. -pub(super) struct CurrentDepGraph<K> { - data: Lock<DepNodeData<K>>, +pub(super) struct CurrentDepGraph<K: DepKind> { + encoder: Steal<GraphEncoder<K>>, new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>, prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>, /// Used to trap when a specific edge is added to the graph. /// This is used for debug purposes and is only active with `debug_assertions`. - #[allow(dead_code)] - forbidden_edge: Option<EdgeFilter>, + #[cfg(debug_assertions)] + forbidden_edge: Option<EdgeFilter<K>>, /// Anonymous `DepNode`s are nodes whose IDs we compute from the list of /// their edges. This has the beneficial side-effect that multiple anonymous @@ -1447,7 +899,12 @@ pub(super) struct CurrentDepGraph<K> { } impl<K: DepKind> CurrentDepGraph<K> { - fn new(prev_graph_node_count: usize) -> CurrentDepGraph<K> { + fn new( + prev_graph_node_count: usize, + encoder: FileEncoder, + record_graph: bool, + record_stats: bool, + ) -> CurrentDepGraph<K> { use std::time::{SystemTime, UNIX_EPOCH}; let duration = SystemTime::now().duration_since(UNIX_EPOCH).unwrap(); @@ -1455,70 +912,29 @@ impl<K: DepKind> CurrentDepGraph<K> { let mut stable_hasher = StableHasher::new(); nanos.hash(&mut stable_hasher); - let forbidden_edge = if cfg!(debug_assertions) { - match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { - Ok(s) => match EdgeFilter::new(&s) { - Ok(f) => Some(f), - Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), - }, - Err(_) => None, - } - } else { - None + #[cfg(debug_assertions)] + let forbidden_edge = match env::var("RUST_FORBID_DEP_GRAPH_EDGE") { + Ok(s) => match EdgeFilter::new(&s) { + Ok(f) => Some(f), + Err(err) => panic!("RUST_FORBID_DEP_GRAPH_EDGE invalid: {}", err), + }, + Err(_) => None, }; - // Pre-allocate the dep node structures. We over-allocate a little so - // that we hopefully don't have to re-allocate during this compilation - // session. The over-allocation for new nodes is 2% plus a small - // constant to account for the fact that in very small crates 2% might - // not be enough. The allocation for red and green node data doesn't - // include a constant, as we don't want to allocate anything for these - // structures during full incremental builds, where they aren't used. - // - // These estimates are based on the distribution of node and edge counts - // seen in rustc-perf benchmarks, adjusted somewhat to account for the - // fact that these benchmarks aren't perfectly representative. - // - // FIXME Use a collection type that doesn't copy node and edge data and - // grow multiplicatively on reallocation. Without such a collection or - // solution having the same effect, there is a performance hazard here - // in both time and space, as growing these collections means copying a - // large amount of data and doubling already large buffer capacities. A - // solution for this will also mean that it's less important to get - // these estimates right. - let new_node_count_estimate = (prev_graph_node_count * 2) / 100 + 200; - let red_node_count_estimate = (prev_graph_node_count * 3) / 100; - let light_green_node_count_estimate = (prev_graph_node_count * 25) / 100; - let total_node_count_estimate = prev_graph_node_count + new_node_count_estimate; - - let average_edges_per_node_estimate = 6; - let unshared_edge_count_estimate = average_edges_per_node_estimate - * (new_node_count_estimate + red_node_count_estimate + light_green_node_count_estimate); - // We store a large collection of these in `prev_index_to_index` during // non-full incremental builds, and want to ensure that the element size // doesn't inadvertently increase. static_assert_size!(Option<DepNodeIndex>, 4); + let new_node_count_estimate = 102 * prev_graph_node_count / 100 + 200; + CurrentDepGraph { - data: Lock::new(DepNodeData { - new: NewDepNodeData { - nodes: IndexVec::with_capacity(new_node_count_estimate), - edges: IndexVec::with_capacity(new_node_count_estimate), - fingerprints: IndexVec::with_capacity(new_node_count_estimate), - }, - red: RedDepNodeData { - node_indices: IndexVec::with_capacity(red_node_count_estimate), - edges: IndexVec::with_capacity(red_node_count_estimate), - fingerprints: IndexVec::with_capacity(red_node_count_estimate), - }, - light_green: LightGreenDepNodeData { - node_indices: IndexVec::with_capacity(light_green_node_count_estimate), - edges: IndexVec::with_capacity(light_green_node_count_estimate), - }, - unshared_edges: IndexVec::with_capacity(unshared_edge_count_estimate), - hybrid_indices: IndexVec::with_capacity(total_node_count_estimate), - }), + encoder: Steal::new(GraphEncoder::new( + encoder, + prev_graph_node_count, + record_graph, + record_stats, + )), new_node_to_index: Sharded::new(|| { FxHashMap::with_capacity_and_hasher( new_node_count_estimate / sharded::SHARDS, @@ -1527,89 +943,143 @@ impl<K: DepKind> CurrentDepGraph<K> { }), prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)), anon_id_seed: stable_hasher.finish(), + #[cfg(debug_assertions)] forbidden_edge, total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), } } + #[cfg(debug_assertions)] + fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) { + if let Some(forbidden_edge) = &self.forbidden_edge { + forbidden_edge.index_to_node.lock().insert(dep_node_index, key); + } + } + + /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it. + /// Assumes that this is a node that has no equivalent in the previous dep-graph. fn intern_new_node( &self, - prev_graph: &PreviousDepGraph<K>, - dep_node: DepNode<K>, + profiler: &SelfProfilerRef, + key: DepNode<K>, edges: EdgesVec, - fingerprint: Fingerprint, + current_fingerprint: Fingerprint, ) -> DepNodeIndex { - debug_assert!( - prev_graph.node_to_index_opt(&dep_node).is_none(), - "node in previous graph should be interned using one \ - of `intern_red_node`, `intern_light_green_node`, etc." - ); - - match self.new_node_to_index.get_shard_by_value(&dep_node).lock().entry(dep_node) { + match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => { - let data = &mut *self.data.lock(); - let new_index = data.new.nodes.push(dep_node); - add_edges(&mut data.unshared_edges, &mut data.new.edges, edges); - data.new.fingerprints.push(fingerprint); - let dep_node_index = data.hybrid_indices.push(new_index.into()); + let dep_node_index = + self.encoder.borrow().send(profiler, key, current_fingerprint, edges); entry.insert(dep_node_index); + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key); dep_node_index } } } - fn intern_red_node( + fn intern_node( &self, + profiler: &SelfProfilerRef, prev_graph: &PreviousDepGraph<K>, - prev_index: SerializedDepNodeIndex, + key: DepNode<K>, edges: EdgesVec, - fingerprint: Fingerprint, - ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); + fingerprint: Option<Fingerprint>, + print_status: bool, + ) -> (DepNodeIndex, Option<(SerializedDepNodeIndex, DepNodeColor)>) { + let print_status = cfg!(debug_assertions) && print_status; + + if let Some(prev_index) = prev_graph.node_to_index_opt(&key) { + // Determine the color and index of the new `DepNode`. + if let Some(fingerprint) = fingerprint { + if fingerprint == prev_graph.fingerprint_by_index(prev_index) { + if print_status { + eprintln!("[task::green] {:?}", key); + } - let mut prev_index_to_index = self.prev_index_to_index.lock(); + // This is a green node: it existed in the previous compilation, + // its query was re-executed, and it has the same result as before. + let mut prev_index_to_index = self.prev_index_to_index.lock(); + + let dep_node_index = match prev_index_to_index[prev_index] { + Some(dep_node_index) => dep_node_index, + None => { + let dep_node_index = + self.encoder.borrow().send(profiler, key, fingerprint, edges); + prev_index_to_index[prev_index] = Some(dep_node_index); + dep_node_index + } + }; - match prev_index_to_index[prev_index] { - Some(dep_node_index) => dep_node_index, - None => { - let data = &mut *self.data.lock(); - let red_index = data.red.node_indices.push(prev_index); - add_edges(&mut data.unshared_edges, &mut data.red.edges, edges); - data.red.fingerprints.push(fingerprint); - let dep_node_index = data.hybrid_indices.push(red_index.into()); - prev_index_to_index[prev_index] = Some(dep_node_index); - dep_node_index - } - } - } + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key); + (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index)))) + } else { + if print_status { + eprintln!("[task::red] {:?}", key); + } - fn intern_light_green_node( - &self, - prev_graph: &PreviousDepGraph<K>, - prev_index: SerializedDepNodeIndex, - edges: EdgesVec, - ) -> DepNodeIndex { - self.debug_assert_not_in_new_nodes(prev_graph, prev_index); + // This is a red node: it existed in the previous compilation, its query + // was re-executed, but it has a different result from before. + let mut prev_index_to_index = self.prev_index_to_index.lock(); + + let dep_node_index = match prev_index_to_index[prev_index] { + Some(dep_node_index) => dep_node_index, + None => { + let dep_node_index = + self.encoder.borrow().send(profiler, key, fingerprint, edges); + prev_index_to_index[prev_index] = Some(dep_node_index); + dep_node_index + } + }; - let mut prev_index_to_index = self.prev_index_to_index.lock(); + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key); + (dep_node_index, Some((prev_index, DepNodeColor::Red))) + } + } else { + if print_status { + eprintln!("[task::unknown] {:?}", key); + } - match prev_index_to_index[prev_index] { - Some(dep_node_index) => dep_node_index, - None => { - let data = &mut *self.data.lock(); - let light_green_index = data.light_green.node_indices.push(prev_index); - add_edges(&mut data.unshared_edges, &mut data.light_green.edges, edges); - let dep_node_index = data.hybrid_indices.push(light_green_index.into()); - prev_index_to_index[prev_index] = Some(dep_node_index); - dep_node_index + // This is a red node, effectively: it existed in the previous compilation + // session, its query was re-executed, but it doesn't compute a result hash + // (i.e. it represents a `no_hash` query), so we have no way of determining + // whether or not the result was the same as before. + let mut prev_index_to_index = self.prev_index_to_index.lock(); + + let dep_node_index = match prev_index_to_index[prev_index] { + Some(dep_node_index) => dep_node_index, + None => { + let dep_node_index = + self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges); + prev_index_to_index[prev_index] = Some(dep_node_index); + dep_node_index + } + }; + + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key); + (dep_node_index, Some((prev_index, DepNodeColor::Red))) + } + } else { + if print_status { + eprintln!("[task::new] {:?}", key); } + + let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO); + + // This is a new node: it didn't exist in the previous compilation session. + let dep_node_index = self.intern_new_node(profiler, key, edges, fingerprint); + + (dep_node_index, None) } } - fn intern_dark_green_node( + fn promote_node_and_deps_to_current( &self, + profiler: &SelfProfilerRef, prev_graph: &PreviousDepGraph<K>, prev_index: SerializedDepNodeIndex, ) -> DepNodeIndex { @@ -1620,9 +1090,20 @@ impl<K: DepKind> CurrentDepGraph<K> { match prev_index_to_index[prev_index] { Some(dep_node_index) => dep_node_index, None => { - let mut data = self.data.lock(); - let dep_node_index = data.hybrid_indices.push(prev_index.into()); + let key = prev_graph.index_to_node(prev_index); + let dep_node_index = self.encoder.borrow().send( + profiler, + key, + prev_graph.fingerprint_by_index(prev_index), + prev_graph + .edge_targets_from(prev_index) + .iter() + .map(|i| prev_index_to_index[*i].unwrap()) + .collect(), + ); prev_index_to_index[prev_index] = Some(dep_node_index); + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key); dep_node_index } } @@ -1642,18 +1123,6 @@ impl<K: DepKind> CurrentDepGraph<K> { } } -#[inline] -fn add_edges<I: Idx>( - edges: &mut IndexVec<EdgeIndex, DepNodeIndex>, - edge_indices: &mut IndexVec<I, Range<EdgeIndex>>, - new_edges: EdgesVec, -) { - let start = edges.next_index(); - edges.extend(new_edges); - let end = edges.next_index(); - edge_indices.push(start..end); -} - /// The capacity of the `reads` field `SmallVec` const TASK_DEPS_READS_CAP: usize = 8; type EdgesVec = SmallVec<[DepNodeIndex; TASK_DEPS_READS_CAP]>; diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index e8fb71be3e0..1b6ecf3e637 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -13,6 +13,7 @@ pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::Lock; +use rustc_serialize::{opaque::FileEncoder, Encodable}; use rustc_session::Session; use std::fmt; @@ -59,7 +60,7 @@ impl<T: DepContext> HasDepContext for T { } /// Describe the different families of dependency nodes. -pub trait DepKind: Copy + fmt::Debug + Eq + Hash { +pub trait DepKind: Copy + fmt::Debug + Eq + Hash + Send + Encodable<FileEncoder> + 'static { const NULL: Self; /// Return whether this kind always require evaluation. diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs index e678a16249b..27b3b5e1366 100644 --- a/compiler/rustc_query_system/src/dep_graph/query.rs +++ b/compiler/rustc_query_system/src/dep_graph/query.rs @@ -1,34 +1,43 @@ use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING}; +use rustc_index::vec::IndexVec; -use super::{DepKind, DepNode}; +use super::{DepKind, DepNode, DepNodeIndex}; pub struct DepGraphQuery<K> { pub graph: Graph<DepNode<K>, ()>, pub indices: FxHashMap<DepNode<K>, NodeIndex>, + pub dep_index_to_index: IndexVec<DepNodeIndex, Option<NodeIndex>>, } impl<K: DepKind> DepGraphQuery<K> { - pub fn new( - nodes: &[DepNode<K>], - edge_list_indices: &[(usize, usize)], - edge_list_data: &[usize], - ) -> DepGraphQuery<K> { - let mut graph = Graph::with_capacity(nodes.len(), edge_list_data.len()); - let mut indices = FxHashMap::default(); - for node in nodes { - indices.insert(*node, graph.add_node(*node)); + pub fn new(prev_node_count: usize) -> DepGraphQuery<K> { + let node_count = prev_node_count + prev_node_count / 4; + let edge_count = 6 * node_count; + + let graph = Graph::with_capacity(node_count, edge_count); + let indices = FxHashMap::default(); + let dep_index_to_index = IndexVec::new(); + + DepGraphQuery { graph, indices, dep_index_to_index } + } + + pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) { + let source = self.graph.add_node(node); + if index.index() >= self.dep_index_to_index.len() { + self.dep_index_to_index.resize(index.index() + 1, None); } + self.dep_index_to_index[index] = Some(source); + self.indices.insert(node, source); - for (source, &(start, end)) in edge_list_indices.iter().enumerate() { - for &target in &edge_list_data[start..end] { - let source = indices[&nodes[source]]; - let target = indices[&nodes[target]]; - graph.add_edge(source, target, ()); + for &target in edges.iter() { + let target = self.dep_index_to_index[target]; + // We may miss the edges that are pushed while the `DepGraphQuery` is being accessed. + // Skip them to issues. + if let Some(target) = target { + self.graph.add_edge(source, target, ()); } } - - DepGraphQuery { graph, indices } } pub fn nodes(&self) -> Vec<&DepNode<K>> { diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 9bb922b0a90..6f3d1fb7199 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -1,9 +1,28 @@ //! The data that we will serialize and deserialize. +//! +//! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies +//! specified inline. The total number of nodes and edges are stored as the last +//! 16 bytes of the file, so we can find them easily at decoding time. +//! +//! The serialisation is performed on-demand when each node is emitted. Using this +//! scheme, we do not need to keep the current graph in memory. +//! +//! The deserisalisation is performed manually, in order to convert from the stored +//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the +//! node and edge count are stored at the end of the file, all the arrays can be +//! pre-allocated with the right length. -use super::{DepKind, DepNode}; +use super::query::DepGraphQuery; +use super::{DepKind, DepNode, DepNodeIndex}; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_index::vec::IndexVec; -use rustc_serialize::{Decodable, Decoder}; +use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::profiling::SelfProfilerRef; +use rustc_data_structures::sync::Lock; +use rustc_index::vec::{Idx, IndexVec}; +use rustc_serialize::opaque::{self, FileEncodeResult, FileEncoder, IntEncodedWithFixedSize}; +use rustc_serialize::{Decodable, Decoder, Encodable}; +use smallvec::SmallVec; +use std::convert::TryInto; // The maximum value of `SerializedDepNodeIndex` leaves the upper two bits // unused so that we can store multiple index types in `CompressedHybridIndex`, @@ -50,78 +69,239 @@ impl<K: DepKind> SerializedDepGraph<K> { } } -impl<D: Decoder, K: DepKind + Decodable<D>> Decodable<D> for SerializedDepGraph<K> { - fn decode(d: &mut D) -> Result<SerializedDepGraph<K>, D::Error> { - // We used to serialize the dep graph by creating and serializing a `SerializedDepGraph` - // using data copied from the `DepGraph`. But copying created a large memory spike, so we - // now serialize directly from the `DepGraph` as if it's a `SerializedDepGraph`. Because we - // deserialize that data into a `SerializedDepGraph` in the next compilation session, we - // need `DepGraph`'s `Encodable` and `SerializedDepGraph`'s `Decodable` implementations to - // be in sync. If you update this decoding, be sure to update the encoding, and vice-versa. - // - // We mimic the sequence of `Encode` and `Encodable` method calls used by the `DepGraph`'s - // `Encodable` implementation with the corresponding sequence of `Decode` and `Decodable` - // method calls. E.g. `Decode::read_struct` pairs with `Encode::emit_struct`, `DepNode`'s - // `decode` pairs with `DepNode`'s `encode`, and so on. Any decoding methods not associated - // with corresponding encoding methods called in `DepGraph`'s `Encodable` implementation - // are off limits, because we'd be relying on their implementation details. - // - // For example, because we know it happens to do the right thing, its tempting to just use - // `IndexVec`'s `Decodable` implementation to decode into some of the collections below, - // even though `DepGraph` doesn't use its `Encodable` implementation. But the `IndexVec` - // implementation could change, and we'd have a bug. - // - // Variables below are explicitly typed so that anyone who changes the `SerializedDepGraph` - // representation without updating this function will encounter a compilation error, and - // know to update this and possibly the `DepGraph` `Encodable` implementation accordingly - // (the latter should serialize data in a format compatible with our representation). - - d.read_struct("SerializedDepGraph", 4, |d| { - let nodes: IndexVec<SerializedDepNodeIndex, DepNode<K>> = - d.read_struct_field("nodes", 0, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; +impl<'a, K: DepKind + Decodable<opaque::Decoder<'a>>> Decodable<opaque::Decoder<'a>> + for SerializedDepGraph<K> +{ + #[instrument(skip(d))] + fn decode(d: &mut opaque::Decoder<'a>) -> Result<SerializedDepGraph<K>, String> { + let start_position = d.position(); - let fingerprints: IndexVec<SerializedDepNodeIndex, Fingerprint> = - d.read_struct_field("fingerprints", 1, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; + // The last 16 bytes are the node count and edge count. + debug!("position: {:?}", d.position()); + d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE); + debug!("position: {:?}", d.position()); - let edge_list_indices: IndexVec<SerializedDepNodeIndex, (u32, u32)> = d - .read_struct_field("edge_list_indices", 2, |d| { - d.read_seq(|d, len| { - let mut v = IndexVec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); - } - Ok(v) - }) - })?; + let node_count = IntEncodedWithFixedSize::decode(d)?.0 as usize; + let edge_count = IntEncodedWithFixedSize::decode(d)?.0 as usize; + debug!(?node_count, ?edge_count); + + debug!("position: {:?}", d.position()); + d.set_position(start_position); + debug!("position: {:?}", d.position()); + + let mut nodes = IndexVec::with_capacity(node_count); + let mut fingerprints = IndexVec::with_capacity(node_count); + let mut edge_list_indices = IndexVec::with_capacity(node_count); + let mut edge_list_data = Vec::with_capacity(edge_count); + + for _index in 0..node_count { + d.read_struct("NodeInfo", 3, |d| { + let dep_node: DepNode<K> = d.read_struct_field("node", 0, Decodable::decode)?; + let _i: SerializedDepNodeIndex = nodes.push(dep_node); + debug_assert_eq!(_i.index(), _index); - let edge_list_data: Vec<SerializedDepNodeIndex> = - d.read_struct_field("edge_list_data", 3, |d| { + let fingerprint: Fingerprint = + d.read_struct_field("fingerprint", 1, Decodable::decode)?; + let _i: SerializedDepNodeIndex = fingerprints.push(fingerprint); + debug_assert_eq!(_i.index(), _index); + + d.read_struct_field("edges", 2, |d| { d.read_seq(|d, len| { - let mut v = Vec::with_capacity(len); - for i in 0..len { - v.push(d.read_seq_elt(i, |d| Decodable::decode(d))?); + let start = edge_list_data.len().try_into().unwrap(); + for e in 0..len { + let edge = d.read_seq_elt(e, Decodable::decode)?; + edge_list_data.push(edge); } - Ok(v) + let end = edge_list_data.len().try_into().unwrap(); + let _i: SerializedDepNodeIndex = edge_list_indices.push((start, end)); + debug_assert_eq!(_i.index(), _index); + Ok(()) }) - })?; + }) + })?; + } + + Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) + } +} + +#[derive(Debug, Encodable, Decodable)] +pub struct NodeInfo<K: DepKind> { + node: DepNode<K>, + fingerprint: Fingerprint, + edges: SmallVec<[DepNodeIndex; 8]>, +} + +struct Stat<K: DepKind> { + kind: K, + node_counter: u64, + edge_counter: u64, +} + +struct EncoderState<K: DepKind> { + encoder: FileEncoder, + total_node_count: usize, + total_edge_count: usize, + result: FileEncodeResult, + stats: Option<FxHashMap<K, Stat<K>>>, +} + +impl<K: DepKind> EncoderState<K> { + fn new(encoder: FileEncoder, record_stats: bool) -> Self { + Self { + encoder, + total_edge_count: 0, + total_node_count: 0, + result: Ok(()), + stats: if record_stats { Some(FxHashMap::default()) } else { None }, + } + } + + #[instrument(skip(self, record_graph))] + fn encode_node( + &mut self, + node: &NodeInfo<K>, + record_graph: &Option<Lock<DepGraphQuery<K>>>, + ) -> DepNodeIndex { + let index = DepNodeIndex::new(self.total_node_count); + self.total_node_count += 1; + + let edge_count = node.edges.len(); + self.total_edge_count += edge_count; + + if let Some(record_graph) = &record_graph { + // Do not ICE when a query is called from within `with_query`. + if let Some(record_graph) = &mut record_graph.try_lock() { + record_graph.push(index, node.node, &node.edges); + } + } + + if let Some(stats) = &mut self.stats { + let kind = node.node.kind; + + let stat = stats.entry(kind).or_insert(Stat { kind, node_counter: 0, edge_counter: 0 }); + stat.node_counter += 1; + stat.edge_counter += edge_count as u64; + } + + debug!(?index, ?node); + let encoder = &mut self.encoder; + if self.result.is_ok() { + self.result = node.encode(encoder); + } + index + } + + fn finish(self) -> FileEncodeResult { + let Self { mut encoder, total_node_count, total_edge_count, result, stats: _ } = self; + let () = result?; + + let node_count = total_node_count.try_into().unwrap(); + let edge_count = total_edge_count.try_into().unwrap(); + + debug!(?node_count, ?edge_count); + debug!("position: {:?}", encoder.position()); + IntEncodedWithFixedSize(node_count).encode(&mut encoder)?; + IntEncodedWithFixedSize(edge_count).encode(&mut encoder)?; + debug!("position: {:?}", encoder.position()); + // Drop the encoder so that nothing is written after the counts. + encoder.flush() + } +} + +pub struct GraphEncoder<K: DepKind> { + status: Lock<EncoderState<K>>, + record_graph: Option<Lock<DepGraphQuery<K>>>, +} + +impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> { + pub fn new( + encoder: FileEncoder, + prev_node_count: usize, + record_graph: bool, + record_stats: bool, + ) -> Self { + let record_graph = + if record_graph { Some(Lock::new(DepGraphQuery::new(prev_node_count))) } else { None }; + let status = Lock::new(EncoderState::new(encoder, record_stats)); + GraphEncoder { status, record_graph } + } + + pub(crate) fn with_query(&self, f: impl Fn(&DepGraphQuery<K>)) { + if let Some(record_graph) = &self.record_graph { + f(&record_graph.lock()) + } + } + + pub(crate) fn print_incremental_info( + &self, + total_read_count: u64, + total_duplicate_read_count: u64, + ) { + let status = self.status.lock(); + if let Some(record_stats) = &status.stats { + let mut stats: Vec<_> = record_stats.values().collect(); + stats.sort_by_key(|s| -(s.node_counter as i64)); + + const SEPARATOR: &str = "[incremental] --------------------------------\ + ----------------------------------------------\ + ------------"; + + eprintln!("[incremental]"); + eprintln!("[incremental] DepGraph Statistics"); + eprintln!("{}", SEPARATOR); + eprintln!("[incremental]"); + eprintln!("[incremental] Total Node Count: {}", status.total_node_count); + eprintln!("[incremental] Total Edge Count: {}", status.total_edge_count); + + if cfg!(debug_assertions) { + eprintln!("[incremental] Total Edge Reads: {}", total_read_count); + eprintln!( + "[incremental] Total Duplicate Edge Reads: {}", + total_duplicate_read_count + ); + } + + eprintln!("[incremental]"); + eprintln!( + "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", + "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count" + ); + eprintln!("{}", SEPARATOR); + + for stat in stats { + let node_kind_ratio = + (100.0 * (stat.node_counter as f64)) / (status.total_node_count as f64); + let node_kind_avg_edges = (stat.edge_counter as f64) / (stat.node_counter as f64); + + eprintln!( + "[incremental] {:<36}|{:>16.1}% |{:>12} |{:>17.1} |", + format!("{:?}", stat.kind), + node_kind_ratio, + stat.node_counter, + node_kind_avg_edges, + ); + } + + eprintln!("{}", SEPARATOR); + eprintln!("[incremental]"); + } + } + + pub(crate) fn send( + &self, + profiler: &SelfProfilerRef, + node: DepNode<K>, + fingerprint: Fingerprint, + edges: SmallVec<[DepNodeIndex; 8]>, + ) -> DepNodeIndex { + let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph"); + let node = NodeInfo { node, fingerprint, edges }; + self.status.lock().encode_node(&node, &self.record_graph) + } - Ok(SerializedDepGraph { nodes, fingerprints, edge_list_indices, edge_list_data }) - }) + pub fn finish(self, profiler: &SelfProfilerRef) -> FileEncodeResult { + let _prof_timer = profiler.generic_activity("incr_comp_encode_dep_graph"); + self.status.into_inner().finish() } } diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs index 3db57c0ab3a..071144f38e7 100644 --- a/compiler/rustc_query_system/src/lib.rs +++ b/compiler/rustc_query_system/src/lib.rs @@ -2,6 +2,7 @@ #![feature(const_fn)] #![feature(const_panic)] #![feature(core_intrinsics)] +#![feature(drain_filter)] #![feature(hash_raw_entry)] #![feature(iter_zip)] #![feature(min_specialization)] diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 77267489a75..fb8a53048fa 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -449,9 +449,11 @@ where let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| { tcx.start_query(job.id, diagnostics, || { - tcx.dep_context() - .dep_graph() - .with_anon_task(query.dep_kind, || query.compute(tcx, key)) + tcx.dep_context().dep_graph().with_anon_task( + *tcx.dep_context(), + query.dep_kind, + || query.compute(tcx, key), + ) }) }); @@ -537,7 +539,7 @@ where // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. if unlikely!(tcx.dep_context().sess().opts.debugging_opts.incremental_verify_ich) { - incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query); + incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query); } result @@ -560,7 +562,7 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*tcx.dep_context(), &result, dep_node, dep_node_index, query); + incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query); result } @@ -570,14 +572,12 @@ fn incremental_verify_ich<CTX, K, V: Debug>( tcx: CTX::DepContext, result: &V, dep_node: &DepNode<CTX::DepKind>, - dep_node_index: DepNodeIndex, query: &QueryVtable<CTX, K, V>, ) where CTX: QueryContext, { assert!( - Some(tcx.dep_graph().fingerprint_of(dep_node_index)) - == tcx.dep_graph().prev_fingerprint_of(dep_node), + tcx.dep_graph().is_green(dep_node), "fingerprint for green query instance not loaded from cache: {:?}", dep_node, ); @@ -588,9 +588,15 @@ fn incremental_verify_ich<CTX, K, V: Debug>( let new_hash = query.hash_result(&mut hcx, result).unwrap_or(Fingerprint::ZERO); debug!("END verify_ich({:?})", dep_node); - let old_hash = tcx.dep_graph().fingerprint_of(dep_node_index); + let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node); - assert!(new_hash == old_hash, "found unstable fingerprints for {:?}: {:?}", dep_node, result); + assert_eq!( + Some(new_hash), + old_hash, + "found unstable fingerprints for {:?}: {:?}", + dep_node, + result + ); } fn force_query_with_job<C, CTX>( diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 116519855d7..c5f929ab183 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -477,6 +477,7 @@ symbols! { doc_cfg, doc_keyword, doc_masked, + doc_notable_trait, doc_spotlight, doctest, document_private_items, @@ -801,6 +802,7 @@ symbols! { noreturn, nostack, not, + notable_trait, note, object_safe_for_dispatch, of, diff --git a/compiler/rustc_trait_selection/src/opaque_types.rs b/compiler/rustc_trait_selection/src/opaque_types.rs index b6c3768d887..fb4a8ce687c 100644 --- a/compiler/rustc_trait_selection/src/opaque_types.rs +++ b/compiler/rustc_trait_selection/src/opaque_types.rs @@ -1171,7 +1171,7 @@ impl<'a, 'tcx> Instantiator<'a, 'tcx> { // This also instantiates nested instances of `impl Trait`. let predicate = self.instantiate_opaque_types_in_map(predicate); - let cause = traits::ObligationCause::new(span, self.body_id, traits::MiscObligation); + let cause = traits::ObligationCause::new(span, self.body_id, traits::OpaqueType); // Require that the predicate holds for the concrete type. debug!("instantiate_opaque_types: predicate={:?}", predicate); diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index 93a37bd4090..15a3d3ddd8d 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -1226,10 +1226,11 @@ impl<'a, 'tcx> InferCtxtPrivExt<'tcx> for InferCtxt<'a, 'tcx> { ); let is_normalized_ty_expected = !matches!( - obligation.cause.code, + obligation.cause.code.peel_derives(), ObligationCauseCode::ItemObligation(_) | ObligationCauseCode::BindingObligation(_, _) | ObligationCauseCode::ObjectCastObligation(_) + | ObligationCauseCode::OpaqueType ); if let Err(error) = self.at(&obligation.cause, obligation.param_env).eq_exp( diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs index 326b85e1013..0549db06806 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/suggestions.rs @@ -1840,6 +1840,7 @@ impl<'a, 'tcx> InferCtxtExt<'tcx> for InferCtxt<'a, 'tcx> { | ObligationCauseCode::MethodReceiver | ObligationCauseCode::ReturnNoExpression | ObligationCauseCode::UnifyReceiver(..) + | ObligationCauseCode::OpaqueType | ObligationCauseCode::MiscObligation => {} ObligationCauseCode::SliceOrArrayElem => { err.note("slice and array elements must have `Sized` type"); diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 1c51597fff2..f441246909b 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -981,7 +981,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { OP: FnOnce(&mut Self) -> R, { let (result, dep_node) = - self.tcx().dep_graph.with_anon_task(DepKind::TraitSelect, || op(self)); + self.tcx().dep_graph.with_anon_task(self.tcx(), DepKind::TraitSelect, || op(self)); self.tcx().dep_graph.read_index(dep_node); (result, dep_node) } |
