diff options
| author | bors <bors@rust-lang.org> | 2023-04-29 21:58:13 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2023-04-29 21:58:13 +0000 |
| commit | f5adff6bd8b29ac7dd173b36f0c8c35bb1c593c5 (patch) | |
| tree | 0af42fdb67cd5ee8e9100915fda59e84efec4029 /compiler/rustc_query_impl/src | |
| parent | 87b1f891ea76713462cfc5a15137a8fe2b24ecc2 (diff) | |
| parent | b6943736bd37e0e932089c27dd0638f0a7ddc3fe (diff) | |
| download | rust-f5adff6bd8b29ac7dd173b36f0c8c35bb1c593c5.tar.gz rust-f5adff6bd8b29ac7dd173b36f0c8c35bb1c593c5.zip | |
Auto merge of #109611 - Zoxc:query-engine-rem, r=cjgillot
Remove `QueryEngine` trait This removes the `QueryEngine` trait and `Queries` from `rustc_query_impl` and replaced them with function pointers and fields in `QuerySystem`. As a side effect `OnDiskCache` is moved back into `rustc_middle` and the `OnDiskCache` trait is also removed. This has a couple of benefits. - `TyCtxt` is used in the query system instead of the removed `QueryCtxt` which is larger. - Function pointers are more flexible to work with. A variant of https://github.com/rust-lang/rust/pull/107802 is included which avoids the double indirection. For https://github.com/rust-lang/rust/pull/108938 we can name entry point `__rust_end_short_backtrace` to avoid some overhead. For https://github.com/rust-lang/rust/pull/108062 it avoids the duplicate `QueryEngine` structs. - `QueryContext` now implements `DepContext` which avoids many `dep_context()` calls in `rustc_query_system`. - The `rustc_driver` size is reduced by 0.33%, hopefully that means some bootstrap improvements. - This avoids the unsafe code around the `QueryEngine` trait. r? `@cjgillot`
Diffstat (limited to 'compiler/rustc_query_impl/src')
| -rw-r--r-- | compiler/rustc_query_impl/src/lib.rs | 30 | ||||
| -rw-r--r-- | compiler/rustc_query_impl/src/on_disk_cache.rs | 1064 | ||||
| -rw-r--r-- | compiler/rustc_query_impl/src/plumbing.rs | 303 | ||||
| -rw-r--r-- | compiler/rustc_query_impl/src/profiling_support.rs | 16 |
4 files changed, 144 insertions, 1269 deletions
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 7001a1eed57..82b335f4b4b 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -12,11 +12,9 @@ #![deny(rustc::diagnostic_outside_of_impl)] #[macro_use] -extern crate rustc_macros; -#[macro_use] extern crate rustc_middle; -use rustc_data_structures::sync::AtomicU64; +use crate::plumbing::{encode_all_query_results, try_mark_green}; use rustc_middle::arena::Arena; use rustc_middle::dep_graph::{self, DepKind, DepKindStruct}; use rustc_middle::query::erase::{erase, restore, Erase}; @@ -24,7 +22,7 @@ use rustc_middle::query::AsLocalKey; use rustc_middle::ty::query::{ query_keys, query_provided, query_provided_to_value, query_storage, query_values, }; -use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine}; +use rustc_middle::ty::query::{ExternProviders, Providers, QueryEngine, QuerySystemFns}; use rustc_middle::ty::TyCtxt; use rustc_query_system::dep_graph::SerializedDepNodeIndex; use rustc_query_system::Value; @@ -32,15 +30,10 @@ use rustc_span::Span; #[macro_use] mod plumbing; -pub use plumbing::QueryCtxt; -use rustc_query_system::query::*; -#[cfg(parallel_compiler)] -pub use rustc_query_system::query::{deadlock, QueryContext}; +pub use crate::plumbing::QueryCtxt; pub use rustc_query_system::query::QueryConfig; - -mod on_disk_cache; -pub use on_disk_cache::OnDiskCache; +use rustc_query_system::query::*; mod profiling_support; pub use self::profiling_support::alloc_self_profile_query_strings; @@ -54,9 +47,16 @@ trait QueryConfigRestored<'tcx>: QueryConfig<QueryCtxt<'tcx>> + Default { rustc_query_append! { define_queries! } -impl<'tcx> Queries<'tcx> { - // Force codegen in the dyn-trait transformation in this crate. - pub fn as_dyn(&'tcx self) -> &'tcx dyn QueryEngine<'tcx> { - self +pub fn query_system_fns<'tcx>( + local_providers: Providers, + extern_providers: ExternProviders, +) -> QuerySystemFns<'tcx> { + QuerySystemFns { + engine: engine(), + local_providers, + extern_providers, + query_structs: make_dep_kind_array!(query_structs).to_vec(), + encode_query_results: encode_all_query_results, + try_mark_green: try_mark_green, } } diff --git a/compiler/rustc_query_impl/src/on_disk_cache.rs b/compiler/rustc_query_impl/src/on_disk_cache.rs deleted file mode 100644 index c0f2d7803d4..00000000000 --- a/compiler/rustc_query_impl/src/on_disk_cache.rs +++ /dev/null @@ -1,1064 +0,0 @@ -use crate::QueryCtxt; -use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; -use rustc_data_structures::memmap::Mmap; -use rustc_data_structures::stable_hasher::Hash64; -use rustc_data_structures::sync::{HashMapExt, Lock, Lrc, RwLock}; -use rustc_data_structures::unhash::UnhashMap; -use rustc_data_structures::unord::UnordSet; -use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, StableCrateId, LOCAL_CRATE}; -use rustc_hir::definitions::DefPathHash; -use rustc_index::{Idx, IndexVec}; -use rustc_middle::dep_graph::{DepNodeIndex, SerializedDepNodeIndex}; -use rustc_middle::mir::interpret::{AllocDecodingSession, AllocDecodingState}; -use rustc_middle::mir::{self, interpret}; -use rustc_middle::ty::codec::{RefDecodable, TyDecoder, TyEncoder}; -use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_query_system::dep_graph::DepContext; -use rustc_query_system::query::{QueryCache, QuerySideEffects}; -use rustc_serialize::{ - opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder}, - Decodable, Decoder, Encodable, Encoder, -}; -use rustc_session::Session; -use rustc_span::hygiene::{ - ExpnId, HygieneDecodeContext, HygieneEncodeContext, SyntaxContext, SyntaxContextData, -}; -use rustc_span::source_map::{SourceMap, StableSourceFileId}; -use rustc_span::{BytePos, ExpnData, ExpnHash, Pos, SourceFile, Span}; -use rustc_span::{CachingSourceMapView, Symbol}; -use std::collections::hash_map::Entry; -use std::io; -use std::mem; - -const TAG_FILE_FOOTER: u128 = 0xC0FFEE_C0FFEE_C0FFEE_C0FFEE_C0FFEE; - -// A normal span encoded with both location information and a `SyntaxContext` -const TAG_FULL_SPAN: u8 = 0; -// A partial span with no location information, encoded only with a `SyntaxContext` -const TAG_PARTIAL_SPAN: u8 = 1; -const TAG_RELATIVE_SPAN: u8 = 2; - -const TAG_SYNTAX_CONTEXT: u8 = 0; -const TAG_EXPN_DATA: u8 = 1; - -// Tags for encoding Symbol's -const SYMBOL_STR: u8 = 0; -const SYMBOL_OFFSET: u8 = 1; -const SYMBOL_PREINTERNED: u8 = 2; - -/// Provides an interface to incremental compilation data cached from the -/// previous compilation session. This data will eventually include the results -/// of a few selected queries (like `typeck` and `mir_optimized`) and -/// any side effects that have been emitted during a query. -pub struct OnDiskCache<'sess> { - // The complete cache data in serialized form. - serialized_data: RwLock<Option<Mmap>>, - - // Collects all `QuerySideEffects` created during the current compilation - // session. - current_side_effects: Lock<FxHashMap<DepNodeIndex, QuerySideEffects>>, - - source_map: &'sess SourceMap, - file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, - - // Caches that are populated lazily during decoding. - file_index_to_file: Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>, - - // A map from dep-node to the position of the cached query result in - // `serialized_data`. - query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - - // A map from dep-node to the position of any associated `QuerySideEffects` in - // `serialized_data`. - prev_side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - - alloc_decoding_state: AllocDecodingState, - - // A map from syntax context ids to the position of their associated - // `SyntaxContextData`. We use a `u32` instead of a `SyntaxContext` - // to represent the fact that we are storing *encoded* ids. When we decode - // a `SyntaxContext`, a new id will be allocated from the global `HygieneData`, - // which will almost certainly be different than the serialized id. - syntax_contexts: FxHashMap<u32, AbsoluteBytePos>, - // A map from the `DefPathHash` of an `ExpnId` to the position - // of their associated `ExpnData`. Ideally, we would store a `DefId`, - // but we need to decode this before we've constructed a `TyCtxt` (which - // makes it difficult to decode a `DefId`). - - // Note that these `DefPathHashes` correspond to both local and foreign - // `ExpnData` (e.g `ExpnData.krate` may not be `LOCAL_CRATE`). Alternatively, - // we could look up the `ExpnData` from the metadata of foreign crates, - // but it seemed easier to have `OnDiskCache` be independent of the `CStore`. - expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>, - // Additional information used when decoding hygiene data. - hygiene_context: HygieneDecodeContext, - // Maps `ExpnHash`es to their raw value from the *previous* - // compilation session. This is used as an initial 'guess' when - // we try to map an `ExpnHash` to its value in the current - // compilation session. - foreign_expn_data: UnhashMap<ExpnHash, u32>, -} - -// This type is used only for serialization and deserialization. -#[derive(Encodable, Decodable)] -struct Footer { - file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, - query_result_index: EncodedDepNodeIndex, - side_effects_index: EncodedDepNodeIndex, - // The location of all allocations. - interpret_alloc_index: Vec<u32>, - // See `OnDiskCache.syntax_contexts` - syntax_contexts: FxHashMap<u32, AbsoluteBytePos>, - // See `OnDiskCache.expn_data` - expn_data: UnhashMap<ExpnHash, AbsoluteBytePos>, - foreign_expn_data: UnhashMap<ExpnHash, u32>, -} - -pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; - -#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)] -struct SourceFileIndex(u32); - -#[derive(Copy, Clone, Debug, Hash, Eq, PartialEq, Encodable, Decodable)] -pub struct AbsoluteBytePos(u64); - -impl AbsoluteBytePos { - fn new(pos: usize) -> AbsoluteBytePos { - AbsoluteBytePos(pos.try_into().expect("Incremental cache file size overflowed u64.")) - } - - fn to_usize(self) -> usize { - self.0 as usize - } -} - -/// An `EncodedSourceFileId` is the same as a `StableSourceFileId` except that -/// the source crate is represented as a [StableCrateId] instead of as a -/// `CrateNum`. This way `EncodedSourceFileId` can be encoded and decoded -/// without any additional context, i.e. with a simple `opaque::Decoder` (which -/// is the only thing available when decoding the cache's [Footer]. -#[derive(Encodable, Decodable, Clone, Debug)] -struct EncodedSourceFileId { - file_name_hash: Hash64, - stable_crate_id: StableCrateId, -} - -impl EncodedSourceFileId { - fn translate(&self, tcx: TyCtxt<'_>) -> StableSourceFileId { - let cnum = tcx.stable_crate_id_to_crate_num(self.stable_crate_id); - StableSourceFileId { file_name_hash: self.file_name_hash, cnum } - } - - fn new(tcx: TyCtxt<'_>, file: &SourceFile) -> EncodedSourceFileId { - let source_file_id = StableSourceFileId::new(file); - EncodedSourceFileId { - file_name_hash: source_file_id.file_name_hash, - stable_crate_id: tcx.stable_crate_id(source_file_id.cnum), - } - } -} - -impl<'sess> rustc_middle::ty::OnDiskCache<'sess> for OnDiskCache<'sess> { - /// Creates a new `OnDiskCache` instance from the serialized data in `data`. - fn new(sess: &'sess Session, data: Mmap, start_pos: usize) -> Self { - debug_assert!(sess.opts.incremental.is_some()); - - // Wrap in a scope so we can borrow `data`. - let footer: Footer = { - let mut decoder = MemDecoder::new(&data, start_pos); - - // Decode the *position* of the footer, which can be found in the - // last 8 bytes of the file. - let footer_pos = decoder - .with_position(decoder.len() - IntEncodedWithFixedSize::ENCODED_SIZE, |decoder| { - IntEncodedWithFixedSize::decode(decoder).0 as usize - }); - // Decode the file footer, which contains all the lookup tables, etc. - decoder.with_position(footer_pos, |decoder| decode_tagged(decoder, TAG_FILE_FOOTER)) - }; - - Self { - serialized_data: RwLock::new(Some(data)), - file_index_to_stable_id: footer.file_index_to_stable_id, - file_index_to_file: Default::default(), - source_map: sess.source_map(), - current_side_effects: Default::default(), - query_result_index: footer.query_result_index.into_iter().collect(), - prev_side_effects_index: footer.side_effects_index.into_iter().collect(), - alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index), - syntax_contexts: footer.syntax_contexts, - expn_data: footer.expn_data, - foreign_expn_data: footer.foreign_expn_data, - hygiene_context: Default::default(), - } - } - - fn new_empty(source_map: &'sess SourceMap) -> Self { - Self { - serialized_data: RwLock::new(None), - file_index_to_stable_id: Default::default(), - file_index_to_file: Default::default(), - source_map, - current_side_effects: Default::default(), - query_result_index: Default::default(), - prev_side_effects_index: Default::default(), - alloc_decoding_state: AllocDecodingState::new(Vec::new()), - syntax_contexts: FxHashMap::default(), - expn_data: UnhashMap::default(), - foreign_expn_data: UnhashMap::default(), - hygiene_context: Default::default(), - } - } - - /// Execute all cache promotions and release the serialized backing Mmap. - /// - /// Cache promotions require invoking queries, which needs to read the serialized data. - /// In order to serialize the new on-disk cache, the former on-disk cache file needs to be - /// deleted, hence we won't be able to refer to its memmapped data. - fn drop_serialized_data(&self, tcx: TyCtxt<'_>) { - // Load everything into memory so we can write it out to the on-disk - // cache. The vast majority of cacheable query results should already - // be in memory, so this should be a cheap operation. - // Do this *before* we clone 'latest_foreign_def_path_hashes', since - // loading existing queries may cause us to create new DepNodes, which - // may in turn end up invoking `store_foreign_def_id_hash` - tcx.dep_graph.exec_cache_promotions(tcx); - - *self.serialized_data.write() = None; - } - - fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult { - // Serializing the `DepGraph` should not modify it. - tcx.dep_graph.with_ignore(|| { - // Allocate `SourceFileIndex`es. - let (file_to_file_index, file_index_to_stable_id) = { - let files = tcx.sess.source_map().files(); - let mut file_to_file_index = - FxHashMap::with_capacity_and_hasher(files.len(), Default::default()); - let mut file_index_to_stable_id = - FxHashMap::with_capacity_and_hasher(files.len(), Default::default()); - - for (index, file) in files.iter().enumerate() { - let index = SourceFileIndex(index as u32); - let file_ptr: *const SourceFile = &**file as *const _; - file_to_file_index.insert(file_ptr, index); - let source_file_id = EncodedSourceFileId::new(tcx, &file); - file_index_to_stable_id.insert(index, source_file_id); - } - - (file_to_file_index, file_index_to_stable_id) - }; - - let hygiene_encode_context = HygieneEncodeContext::default(); - - let mut encoder = CacheEncoder { - tcx, - encoder, - type_shorthands: Default::default(), - predicate_shorthands: Default::default(), - interpret_allocs: Default::default(), - source_map: CachingSourceMapView::new(tcx.sess.source_map()), - file_to_file_index, - hygiene_context: &hygiene_encode_context, - symbol_table: Default::default(), - }; - - // Encode query results. - let mut query_result_index = EncodedDepNodeIndex::new(); - - tcx.sess.time("encode_query_results", || { - let enc = &mut encoder; - let qri = &mut query_result_index; - QueryCtxt::from_tcx(tcx).encode_query_results(enc, qri); - }); - - // Encode side effects. - let side_effects_index: EncodedDepNodeIndex = self - .current_side_effects - .borrow() - .iter() - .map(|(dep_node_index, side_effects)| { - let pos = AbsoluteBytePos::new(encoder.position()); - let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index()); - encoder.encode_tagged(dep_node_index, side_effects); - - (dep_node_index, pos) - }) - .collect(); - - let interpret_alloc_index = { - let mut interpret_alloc_index = Vec::new(); - let mut n = 0; - loop { - let new_n = encoder.interpret_allocs.len(); - // If we have found new IDs, serialize those too. - if n == new_n { - // Otherwise, abort. - break; - } - interpret_alloc_index.reserve(new_n - n); - for idx in n..new_n { - let id = encoder.interpret_allocs[idx]; - let pos: u32 = encoder.position().try_into().unwrap(); - interpret_alloc_index.push(pos); - interpret::specialized_encode_alloc_id(&mut encoder, tcx, id); - } - n = new_n; - } - interpret_alloc_index - }; - - let mut syntax_contexts = FxHashMap::default(); - let mut expn_data = UnhashMap::default(); - let mut foreign_expn_data = UnhashMap::default(); - - // Encode all hygiene data (`SyntaxContextData` and `ExpnData`) from the current - // session. - - hygiene_encode_context.encode( - &mut encoder, - |encoder, index, ctxt_data| { - let pos = AbsoluteBytePos::new(encoder.position()); - encoder.encode_tagged(TAG_SYNTAX_CONTEXT, ctxt_data); - syntax_contexts.insert(index, pos); - }, - |encoder, expn_id, data, hash| { - if expn_id.krate == LOCAL_CRATE { - let pos = AbsoluteBytePos::new(encoder.position()); - encoder.encode_tagged(TAG_EXPN_DATA, data); - expn_data.insert(hash, pos); - } else { - foreign_expn_data.insert(hash, expn_id.local_id.as_u32()); - } - }, - ); - - // Encode the file footer. - let footer_pos = encoder.position() as u64; - encoder.encode_tagged( - TAG_FILE_FOOTER, - &Footer { - file_index_to_stable_id, - query_result_index, - side_effects_index, - interpret_alloc_index, - syntax_contexts, - expn_data, - foreign_expn_data, - }, - ); - - // Encode the position of the footer as the last 8 bytes of the - // file so we know where to look for it. - IntEncodedWithFixedSize(footer_pos).encode(&mut encoder.encoder); - - // DO NOT WRITE ANYTHING TO THE ENCODER AFTER THIS POINT! The address - // of the footer must be the last thing in the data stream. - - encoder.finish() - }) - } -} - -impl<'sess> OnDiskCache<'sess> { - pub fn as_dyn(&self) -> &dyn rustc_middle::ty::OnDiskCache<'sess> { - self as _ - } - - /// Loads a `QuerySideEffects` created during the previous compilation session. - pub fn load_side_effects( - &self, - tcx: TyCtxt<'_>, - dep_node_index: SerializedDepNodeIndex, - ) -> QuerySideEffects { - let side_effects: Option<QuerySideEffects> = - self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index); - - side_effects.unwrap_or_default() - } - - /// Stores a `QuerySideEffects` emitted during the current compilation session. - /// Anything stored like this will be available via `load_side_effects` in - /// the next compilation session. - #[inline(never)] - #[cold] - pub fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) { - let mut current_side_effects = self.current_side_effects.borrow_mut(); - let prev = current_side_effects.insert(dep_node_index, side_effects); - debug_assert!(prev.is_none()); - } - - /// Return whether the cached query result can be decoded. - pub fn loadable_from_disk(&self, dep_node_index: SerializedDepNodeIndex) -> bool { - self.query_result_index.contains_key(&dep_node_index) - // with_decoder is infallible, so we can stop here - } - - /// Returns the cached query result if there is something in the cache for - /// the given `SerializedDepNodeIndex`; otherwise returns `None`. - pub fn try_load_query_result<'tcx, T>( - &self, - tcx: TyCtxt<'tcx>, - dep_node_index: SerializedDepNodeIndex, - ) -> Option<T> - where - T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, - { - let opt_value = self.load_indexed(tcx, dep_node_index, &self.query_result_index); - debug_assert_eq!(opt_value.is_some(), self.loadable_from_disk(dep_node_index)); - opt_value - } - - /// Stores side effect emitted during computation of an anonymous query. - /// Since many anonymous queries can share the same `DepNode`, we aggregate - /// them -- as opposed to regular queries where we assume that there is a - /// 1:1 relationship between query-key and `DepNode`. - #[inline(never)] - #[cold] - pub fn store_side_effects_for_anon_node( - &self, - dep_node_index: DepNodeIndex, - side_effects: QuerySideEffects, - ) { - let mut current_side_effects = self.current_side_effects.borrow_mut(); - - let x = current_side_effects.entry(dep_node_index).or_default(); - x.append(side_effects); - } - - fn load_indexed<'tcx, T>( - &self, - tcx: TyCtxt<'tcx>, - dep_node_index: SerializedDepNodeIndex, - index: &FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - ) -> Option<T> - where - T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, - { - let pos = index.get(&dep_node_index).cloned()?; - let value = self.with_decoder(tcx, pos, |decoder| decode_tagged(decoder, dep_node_index)); - Some(value) - } - - fn with_decoder<'a, 'tcx, T, F: for<'s> FnOnce(&mut CacheDecoder<'s, 'tcx>) -> T>( - &'sess self, - tcx: TyCtxt<'tcx>, - pos: AbsoluteBytePos, - f: F, - ) -> T - where - T: Decodable<CacheDecoder<'a, 'tcx>>, - { - let serialized_data = self.serialized_data.read(); - let mut decoder = CacheDecoder { - tcx, - opaque: MemDecoder::new(serialized_data.as_deref().unwrap_or(&[]), pos.to_usize()), - source_map: self.source_map, - file_index_to_file: &self.file_index_to_file, - file_index_to_stable_id: &self.file_index_to_stable_id, - alloc_decoding_session: self.alloc_decoding_state.new_decoding_session(), - syntax_contexts: &self.syntax_contexts, - expn_data: &self.expn_data, - foreign_expn_data: &self.foreign_expn_data, - hygiene_context: &self.hygiene_context, - }; - f(&mut decoder) - } -} - -//- DECODING ------------------------------------------------------------------- - -/// A decoder that can read from the incremental compilation cache. It is similar to the one -/// we use for crate metadata decoding in that it can rebase spans and eventually -/// will also handle things that contain `Ty` instances. -pub struct CacheDecoder<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - opaque: MemDecoder<'a>, - source_map: &'a SourceMap, - file_index_to_file: &'a Lock<FxHashMap<SourceFileIndex, Lrc<SourceFile>>>, - file_index_to_stable_id: &'a FxHashMap<SourceFileIndex, EncodedSourceFileId>, - alloc_decoding_session: AllocDecodingSession<'a>, - syntax_contexts: &'a FxHashMap<u32, AbsoluteBytePos>, - expn_data: &'a UnhashMap<ExpnHash, AbsoluteBytePos>, - foreign_expn_data: &'a UnhashMap<ExpnHash, u32>, - hygiene_context: &'a HygieneDecodeContext, -} - -impl<'a, 'tcx> CacheDecoder<'a, 'tcx> { - fn file_index_to_file(&self, index: SourceFileIndex) -> Lrc<SourceFile> { - let CacheDecoder { - tcx, - ref file_index_to_file, - ref file_index_to_stable_id, - ref source_map, - .. - } = *self; - - file_index_to_file - .borrow_mut() - .entry(index) - .or_insert_with(|| { - let stable_id = file_index_to_stable_id[&index].translate(tcx); - - // If this `SourceFile` is from a foreign crate, then make sure - // that we've imported all of the source files from that crate. - // This has usually already been done during macro invocation. - // However, when encoding query results like `TypeckResults`, - // we might encode an `AdtDef` for a foreign type (because it - // was referenced in the body of the function). There is no guarantee - // that we will load the source files from that crate during macro - // expansion, so we use `import_source_files` to ensure that the foreign - // source files are actually imported before we call `source_file_by_stable_id`. - if stable_id.cnum != LOCAL_CRATE { - self.tcx.cstore_untracked().import_source_files(self.tcx.sess, stable_id.cnum); - } - - source_map - .source_file_by_stable_id(stable_id) - .expect("failed to lookup `SourceFile` in new context") - }) - .clone() - } -} - -// Decodes something that was encoded with `encode_tagged()` and verify that the -// tag matches and the correct amount of bytes was read. -fn decode_tagged<D, T, V>(decoder: &mut D, expected_tag: T) -> V -where - T: Decodable<D> + Eq + std::fmt::Debug, - V: Decodable<D>, - D: Decoder, -{ - let start_pos = decoder.position(); - - let actual_tag = T::decode(decoder); - assert_eq!(actual_tag, expected_tag); - let value = V::decode(decoder); - let end_pos = decoder.position(); - - let expected_len: u64 = Decodable::decode(decoder); - assert_eq!((end_pos - start_pos) as u64, expected_len); - - value -} - -impl<'a, 'tcx> TyDecoder for CacheDecoder<'a, 'tcx> { - type I = TyCtxt<'tcx>; - const CLEAR_CROSS_CRATE: bool = false; - - #[inline] - fn interner(&self) -> TyCtxt<'tcx> { - self.tcx - } - - fn cached_ty_for_shorthand<F>(&mut self, shorthand: usize, or_insert_with: F) -> Ty<'tcx> - where - F: FnOnce(&mut Self) -> Ty<'tcx>, - { - let tcx = self.tcx; - - let cache_key = ty::CReaderCacheKey { cnum: None, pos: shorthand }; - - if let Some(&ty) = tcx.ty_rcache.borrow().get(&cache_key) { - return ty; - } - - let ty = or_insert_with(self); - // This may overwrite the entry, but it should overwrite with the same value. - tcx.ty_rcache.borrow_mut().insert_same(cache_key, ty); - ty - } - - fn with_position<F, R>(&mut self, pos: usize, f: F) -> R - where - F: FnOnce(&mut Self) -> R, - { - debug_assert!(pos < self.opaque.len()); - - let new_opaque = MemDecoder::new(self.opaque.data(), pos); - let old_opaque = mem::replace(&mut self.opaque, new_opaque); - let r = f(self); - self.opaque = old_opaque; - r - } - - fn decode_alloc_id(&mut self) -> interpret::AllocId { - let alloc_decoding_session = self.alloc_decoding_session; - alloc_decoding_session.decode_alloc_id(self) - } -} - -rustc_middle::implement_ty_decoder!(CacheDecoder<'a, 'tcx>); - -// This ensures that the `Decodable<opaque::Decoder>::decode` specialization for `Vec<u8>` is used -// when a `CacheDecoder` is passed to `Decodable::decode`. Unfortunately, we have to manually opt -// into specializations this way, given how `CacheDecoder` and the decoding traits currently work. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Vec<u8> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - Decodable::decode(&mut d.opaque) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for SyntaxContext { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let syntax_contexts = decoder.syntax_contexts; - rustc_span::hygiene::decode_syntax_context(decoder, decoder.hygiene_context, |this, id| { - // This closure is invoked if we haven't already decoded the data for the `SyntaxContext` we are deserializing. - // We look up the position of the associated `SyntaxData` and decode it. - let pos = syntax_contexts.get(&id).unwrap(); - this.with_position(pos.to_usize(), |decoder| { - let data: SyntaxContextData = decode_tagged(decoder, TAG_SYNTAX_CONTEXT); - data - }) - }) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for ExpnId { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let hash = ExpnHash::decode(decoder); - if hash.is_root() { - return ExpnId::root(); - } - - if let Some(expn_id) = ExpnId::from_hash(hash) { - return expn_id; - } - - let krate = decoder.tcx.stable_crate_id_to_crate_num(hash.stable_crate_id()); - - let expn_id = if krate == LOCAL_CRATE { - // We look up the position of the associated `ExpnData` and decode it. - let pos = decoder - .expn_data - .get(&hash) - .unwrap_or_else(|| panic!("Bad hash {:?} (map {:?})", hash, decoder.expn_data)); - - let data: ExpnData = decoder - .with_position(pos.to_usize(), |decoder| decode_tagged(decoder, TAG_EXPN_DATA)); - let expn_id = rustc_span::hygiene::register_local_expn_id(data, hash); - - #[cfg(debug_assertions)] - { - use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; - let local_hash = decoder.tcx.with_stable_hashing_context(|mut hcx| { - let mut hasher = StableHasher::new(); - expn_id.expn_data().hash_stable(&mut hcx, &mut hasher); - hasher.finish() - }); - debug_assert_eq!(hash.local_hash(), local_hash); - } - - expn_id - } else { - let index_guess = decoder.foreign_expn_data[&hash]; - decoder.tcx.cstore_untracked().expn_hash_to_expn_id( - decoder.tcx.sess, - krate, - index_guess, - hash, - ) - }; - - debug_assert_eq!(expn_id.krate, krate); - expn_id - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Span { - fn decode(decoder: &mut CacheDecoder<'a, 'tcx>) -> Self { - let ctxt = SyntaxContext::decode(decoder); - let parent = Option::<LocalDefId>::decode(decoder); - let tag: u8 = Decodable::decode(decoder); - - if tag == TAG_PARTIAL_SPAN { - return Span::new(BytePos(0), BytePos(0), ctxt, parent); - } else if tag == TAG_RELATIVE_SPAN { - let dlo = u32::decode(decoder); - let dto = u32::decode(decoder); - - let enclosing = decoder.tcx.source_span_untracked(parent.unwrap()).data_untracked(); - let span = Span::new( - enclosing.lo + BytePos::from_u32(dlo), - enclosing.lo + BytePos::from_u32(dto), - ctxt, - parent, - ); - - return span; - } else { - debug_assert_eq!(tag, TAG_FULL_SPAN); - } - - let file_lo_index = SourceFileIndex::decode(decoder); - let line_lo = usize::decode(decoder); - let col_lo = BytePos::decode(decoder); - let len = BytePos::decode(decoder); - - let file_lo = decoder.file_index_to_file(file_lo_index); - let lo = file_lo.lines(|lines| lines[line_lo - 1] + col_lo); - let hi = lo + len; - - Span::new(lo, hi, ctxt, parent) - } -} - -// copy&paste impl from rustc_metadata -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for Symbol { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - let tag = d.read_u8(); - - match tag { - SYMBOL_STR => { - let s = d.read_str(); - Symbol::intern(s) - } - SYMBOL_OFFSET => { - // read str offset - let pos = d.read_usize(); - - // move to str offset and read - d.opaque.with_position(pos, |d| { - let s = d.read_str(); - Symbol::intern(s) - }) - } - SYMBOL_PREINTERNED => { - let symbol_index = d.read_u32(); - Symbol::new_from_decoded(symbol_index) - } - _ => unreachable!(), - } - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for CrateNum { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - let stable_id = StableCrateId::decode(d); - let cnum = d.tcx.stable_crate_id_to_crate_num(stable_id); - cnum - } -} - -// This impl makes sure that we get a runtime error when we try decode a -// `DefIndex` that is not contained in a `DefId`. Such a case would be problematic -// because we would not know how to transform the `DefIndex` to the current -// context. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefIndex { - fn decode(_d: &mut CacheDecoder<'a, 'tcx>) -> DefIndex { - panic!("trying to decode `DefIndex` outside the context of a `DefId`") - } -} - -// Both the `CrateNum` and the `DefIndex` of a `DefId` can change in between two -// compilation sessions. We use the `DefPathHash`, which is stable across -// sessions, to map the old `DefId` to the new one. -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for DefId { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - // Load the `DefPathHash` which is was we encoded the `DefId` as. - let def_path_hash = DefPathHash::decode(d); - - // Using the `DefPathHash`, we can lookup the new `DefId`. - // Subtle: We only encode a `DefId` as part of a query result. - // If we get to this point, then all of the query inputs were green, - // which means that the definition with this hash is guaranteed to - // still exist in the current compilation session. - d.tcx.def_path_hash_to_def_id(def_path_hash, &mut || { - panic!("Failed to convert DefPathHash {def_path_hash:?}") - }) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx UnordSet<LocalDefId> { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> - for &'tcx FxHashMap<DefId, ty::EarlyBinder<Ty<'tcx>>> -{ - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> - for &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> -{ - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Predicate<'tcx>, Span)] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [(ty::Clause<'tcx>, Span)] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -impl<'a, 'tcx> Decodable<CacheDecoder<'a, 'tcx>> for &'tcx [rustc_ast::InlineAsmTemplatePiece] { - fn decode(d: &mut CacheDecoder<'a, 'tcx>) -> Self { - RefDecodable::decode(d) - } -} - -macro_rules! impl_ref_decoder { - (<$tcx:tt> $($ty:ty,)*) => { - $(impl<'a, $tcx> Decodable<CacheDecoder<'a, $tcx>> for &$tcx [$ty] { - fn decode(d: &mut CacheDecoder<'a, $tcx>) -> Self { - RefDecodable::decode(d) - } - })* - }; -} - -impl_ref_decoder! {<'tcx> - Span, - rustc_ast::Attribute, - rustc_span::symbol::Ident, - ty::Variance, - rustc_span::def_id::DefId, - rustc_span::def_id::LocalDefId, - (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo), - ty::DeducedParamAttrs, -} - -//- ENCODING ------------------------------------------------------------------- - -/// An encoder that can write to the incremental compilation cache. -pub struct CacheEncoder<'a, 'tcx> { - tcx: TyCtxt<'tcx>, - encoder: FileEncoder, - type_shorthands: FxHashMap<Ty<'tcx>, usize>, - predicate_shorthands: FxHashMap<ty::PredicateKind<'tcx>, usize>, - interpret_allocs: FxIndexSet<interpret::AllocId>, - source_map: CachingSourceMapView<'tcx>, - file_to_file_index: FxHashMap<*const SourceFile, SourceFileIndex>, - hygiene_context: &'a HygieneEncodeContext, - symbol_table: FxHashMap<Symbol, usize>, -} - -impl<'a, 'tcx> CacheEncoder<'a, 'tcx> { - fn source_file_index(&mut self, source_file: Lrc<SourceFile>) -> SourceFileIndex { - self.file_to_file_index[&(&*source_file as *const SourceFile)] - } - - /// Encode something with additional information that allows to do some - /// sanity checks when decoding the data again. This method will first - /// encode the specified tag, then the given value, then the number of - /// bytes taken up by tag and value. On decoding, we can then verify that - /// we get the expected tag and read the expected number of bytes. - fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) { - let start_pos = self.position(); - - tag.encode(self); - value.encode(self); - - let end_pos = self.position(); - ((end_pos - start_pos) as u64).encode(self); - } - - fn finish(self) -> Result<usize, io::Error> { - self.encoder.finish() - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for SyntaxContext { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - rustc_span::hygiene::raw_encode_syntax_context(*self, s.hygiene_context, s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for ExpnId { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.hygiene_context.schedule_expn_data_for_encoding(*self); - self.expn_hash().encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Span { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - let span_data = self.data_untracked(); - span_data.ctxt.encode(s); - span_data.parent.encode(s); - - if span_data.is_dummy() { - return TAG_PARTIAL_SPAN.encode(s); - } - - if let Some(parent) = span_data.parent { - let enclosing = s.tcx.source_span(parent).data_untracked(); - if enclosing.contains(span_data) { - TAG_RELATIVE_SPAN.encode(s); - (span_data.lo - enclosing.lo).to_u32().encode(s); - (span_data.hi - enclosing.lo).to_u32().encode(s); - return; - } - } - - let pos = s.source_map.byte_pos_to_line_and_col(span_data.lo); - let partial_span = match &pos { - Some((file_lo, _, _)) => !file_lo.contains(span_data.hi), - None => true, - }; - - if partial_span { - return TAG_PARTIAL_SPAN.encode(s); - } - - let (file_lo, line_lo, col_lo) = pos.unwrap(); - - let len = span_data.hi - span_data.lo; - - let source_file_index = s.source_file_index(file_lo); - - TAG_FULL_SPAN.encode(s); - source_file_index.encode(s); - line_lo.encode(s); - col_lo.encode(s); - len.encode(s); - } -} - -// copy&paste impl from rustc_metadata -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for Symbol { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - // if symbol preinterned, emit tag and symbol index - if self.is_preinterned() { - s.encoder.emit_u8(SYMBOL_PREINTERNED); - s.encoder.emit_u32(self.as_u32()); - } else { - // otherwise write it as string or as offset to it - match s.symbol_table.entry(*self) { - Entry::Vacant(o) => { - s.encoder.emit_u8(SYMBOL_STR); - let pos = s.encoder.position(); - o.insert(pos); - s.emit_str(self.as_str()); - } - Entry::Occupied(o) => { - let x = *o.get(); - s.emit_u8(SYMBOL_OFFSET); - s.emit_usize(x); - } - } - } - } -} - -impl<'a, 'tcx> TyEncoder for CacheEncoder<'a, 'tcx> { - type I = TyCtxt<'tcx>; - const CLEAR_CROSS_CRATE: bool = false; - - fn position(&self) -> usize { - self.encoder.position() - } - fn type_shorthands(&mut self) -> &mut FxHashMap<Ty<'tcx>, usize> { - &mut self.type_shorthands - } - fn predicate_shorthands(&mut self) -> &mut FxHashMap<ty::PredicateKind<'tcx>, usize> { - &mut self.predicate_shorthands - } - fn encode_alloc_id(&mut self, alloc_id: &interpret::AllocId) { - let (index, _) = self.interpret_allocs.insert_full(*alloc_id); - - index.encode(self); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for CrateNum { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.tcx.stable_crate_id(*self).encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefId { - fn encode(&self, s: &mut CacheEncoder<'a, 'tcx>) { - s.tcx.def_path_hash(*self).encode(s); - } -} - -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for DefIndex { - fn encode(&self, _: &mut CacheEncoder<'a, 'tcx>) { - bug!("encoding `DefIndex` without context"); - } -} - -macro_rules! encoder_methods { - ($($name:ident($ty:ty);)*) => { - #[inline] - $(fn $name(&mut self, value: $ty) { - self.encoder.$name(value) - })* - } -} - -impl<'a, 'tcx> Encoder for CacheEncoder<'a, 'tcx> { - encoder_methods! { - emit_usize(usize); - emit_u128(u128); - emit_u64(u64); - emit_u32(u32); - emit_u16(u16); - emit_u8(u8); - - emit_isize(isize); - emit_i128(i128); - emit_i64(i64); - emit_i32(i32); - emit_i16(i16); - emit_i8(i8); - - emit_bool(bool); - emit_char(char); - emit_str(&str); - emit_raw_bytes(&[u8]); - } -} - -// This ensures that the `Encodable<opaque::FileEncoder>::encode` specialization for byte slices -// is used when a `CacheEncoder` having an `opaque::FileEncoder` is passed to `Encodable::encode`. -// Unfortunately, we have to manually opt into specializations this way, given how `CacheEncoder` -// and the encoding traits currently work. -impl<'a, 'tcx> Encodable<CacheEncoder<'a, 'tcx>> for [u8] { - fn encode(&self, e: &mut CacheEncoder<'a, 'tcx>) { - self.encode(&mut e.encoder); - } -} - -pub(crate) fn encode_query_results<'a, 'tcx, Q>( - query: Q, - qcx: QueryCtxt<'tcx>, - encoder: &mut CacheEncoder<'a, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, -) where - Q: super::QueryConfigRestored<'tcx>, - Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>, -{ - let _timer = qcx - .tcx - .profiler() - .verbose_generic_activity_with_arg("encode_query_results_for", query.name()); - - assert!(query.query_state(qcx).all_inactive()); - let cache = query.query_cache(qcx); - cache.iter(&mut |key, value, dep_node| { - if query.cache_on_disk(qcx.tcx, &key) { - let dep_node = SerializedDepNodeIndex::new(dep_node.index()); - - // Record position of the cache entry. - query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.encoder.position()))); - - // Encode the type check tables with the `SerializedDepNodeIndex` - // as tag. - encoder.encode_tagged(dep_node, &Q::restore(*value)); - } - }); -} diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 32222df25d4..9f8ac7ccd0b 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -2,35 +2,44 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex}; -use crate::profiling_support::QueryKeyStringCache; -use crate::{on_disk_cache, Queries}; +use crate::rustc_middle::dep_graph::DepContext; +use crate::rustc_middle::ty::TyEncoder; use rustc_data_structures::stable_hasher::{Hash64, HashStable, StableHasher}; -use rustc_data_structures::sync::{AtomicU64, Lock}; -use rustc_errors::{Diagnostic, Handler}; +use rustc_data_structures::sync::Lock; +use rustc_errors::Diagnostic; +use rustc_index::Idx; use rustc_middle::dep_graph::{ self, DepKind, DepKindStruct, DepNode, DepNodeIndex, SerializedDepNodeIndex, }; +use rustc_middle::query::on_disk_cache::AbsoluteBytePos; +use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder, EncodedDepNodeIndex}; use rustc_middle::query::Key; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_middle::ty::{self, TyCtxt}; use rustc_query_system::dep_graph::{DepNodeParams, HasDepContext}; use rustc_query_system::ich::StableHashingContext; use rustc_query_system::query::{ - force_query, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, QueryStackFrame, + force_query, QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, + QueryStackFrame, }; use rustc_query_system::{LayoutOfDepth, QueryOverflow}; use rustc_serialize::Decodable; +use rustc_serialize::Encodable; use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; -use std::any::Any; use std::num::NonZeroU64; use thin_vec::ThinVec; #[derive(Copy, Clone)] pub struct QueryCtxt<'tcx> { pub tcx: TyCtxt<'tcx>, - pub queries: &'tcx Queries<'tcx>, +} + +impl<'tcx> QueryCtxt<'tcx> { + #[inline] + pub fn new(tcx: TyCtxt<'tcx>) -> Self { + QueryCtxt { tcx } + } } impl<'tcx> std::ops::Deref for QueryCtxt<'tcx> { @@ -53,44 +62,56 @@ impl<'tcx> HasDepContext for QueryCtxt<'tcx> { } impl QueryContext for QueryCtxt<'_> { + #[inline] fn next_job_id(self) -> QueryJobId { QueryJobId( NonZeroU64::new( - self.queries.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), + self.query_system.jobs.fetch_add(1, rustc_data_structures::sync::Ordering::Relaxed), ) .unwrap(), ) } + #[inline] fn current_query_job(self) -> Option<QueryJobId> { - tls::with_related_context(*self, |icx| icx.query) + tls::with_related_context(self.tcx, |icx| icx.query) } fn try_collect_active_jobs(self) -> Option<QueryMap<DepKind>> { - self.queries.try_collect_active_jobs(*self) + let mut jobs = QueryMap::default(); + + for query in &self.query_system.fns.query_structs { + (query.try_collect_active_jobs)(self.tcx, &mut jobs); + } + + Some(jobs) } // Interactions with on_disk_cache fn load_side_effects(self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects { - self.queries + self.query_system .on_disk_cache .as_ref() - .map(|c| c.load_side_effects(*self, prev_dep_node_index)) + .map(|c| c.load_side_effects(self.tcx, prev_dep_node_index)) .unwrap_or_default() } + #[inline(never)] + #[cold] fn store_side_effects(self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects) { - if let Some(c) = self.queries.on_disk_cache.as_ref() { + if let Some(c) = self.query_system.on_disk_cache.as_ref() { c.store_side_effects(dep_node_index, side_effects) } } + #[inline(never)] + #[cold] fn store_side_effects_for_anon_node( self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, ) { - if let Some(c) = self.queries.on_disk_cache.as_ref() { + if let Some(c) = self.query_system.on_disk_cache.as_ref() { c.store_side_effects_for_anon_node(dep_node_index, side_effects) } } @@ -109,14 +130,14 @@ impl QueryContext for QueryCtxt<'_> { // The `TyCtxt` stored in TLS has the same global interner lifetime // as `self`, so we use `with_related_context` to relate the 'tcx lifetimes // when accessing the `ImplicitCtxt`. - tls::with_related_context(*self, move |current_icx| { + tls::with_related_context(self.tcx, move |current_icx| { if depth_limit && !self.recursion_limit().value_within_limit(current_icx.query_depth) { self.depth_limit_error(token); } // Update the `ImplicitCtxt` to point to our new query job. let new_icx = ImplicitCtxt { - tcx: *self, + tcx: self.tcx, query: Some(token), diagnostics, query_depth: current_icx.query_depth + depth_limit as usize, @@ -152,51 +173,20 @@ impl QueryContext for QueryCtxt<'_> { } } -impl<'tcx> QueryCtxt<'tcx> { - #[inline] - pub fn from_tcx(tcx: TyCtxt<'tcx>) -> Self { - let queries = tcx.queries.as_any(); - let queries = unsafe { - let queries = std::mem::transmute::<&dyn Any, &dyn Any>(queries); - let queries = queries.downcast_ref().unwrap(); - let queries = std::mem::transmute::<&Queries<'_>, &Queries<'_>>(queries); - queries - }; - QueryCtxt { tcx, queries } - } - - pub(crate) fn on_disk_cache(self) -> Option<&'tcx on_disk_cache::OnDiskCache<'tcx>> { - self.queries.on_disk_cache.as_ref() - } +pub(super) fn try_mark_green<'tcx>(tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool { + tcx.dep_graph.try_mark_green(QueryCtxt::new(tcx), dep_node).is_some() +} - pub(super) fn encode_query_results( - self, - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, - ) { - for query in &self.queries.query_structs { - if let Some(encode) = query.encode_query_results { - encode(self, encoder, query_result_index); - } +pub(super) fn encode_all_query_results<'tcx>( + tcx: TyCtxt<'tcx>, + encoder: &mut CacheEncoder<'_, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex, +) { + for query in &tcx.query_system.fns.query_structs { + if let Some(encode) = query.encode_query_results { + encode(tcx, encoder, query_result_index); } } - - pub fn try_print_query_stack( - self, - query: Option<QueryJobId>, - handler: &Handler, - num_frames: Option<usize>, - ) -> usize { - rustc_query_system::query::print_query_stack(self, query, handler, num_frames) - } -} - -#[derive(Clone, Copy)] -pub(crate) struct QueryStruct<'tcx> { - pub try_collect_active_jobs: fn(QueryCtxt<'tcx>, &mut QueryMap<DepKind>) -> Option<()>, - pub alloc_self_profile_query_strings: fn(TyCtxt<'tcx>, &mut QueryKeyStringCache), - pub encode_query_results: - Option<fn(QueryCtxt<'tcx>, &mut CacheEncoder<'_, 'tcx>, &mut EncodedDepNodeIndex)>, } macro_rules! handle_cycle_error { @@ -276,13 +266,13 @@ macro_rules! hash_result { macro_rules! call_provider { ([][$qcx:expr, $name:ident, $key:expr]) => {{ - ($qcx.queries.local_providers.$name)($qcx.tcx, $key) + ($qcx.query_system.fns.local_providers.$name)($qcx, $key) }}; ([(separate_provide_extern) $($rest:tt)*][$qcx:expr, $name:ident, $key:expr]) => {{ if let Some(key) = $key.as_local_key() { - ($qcx.queries.local_providers.$name)($qcx.tcx, key) + ($qcx.query_system.fns.local_providers.$name)($qcx, key) } else { - ($qcx.queries.extern_providers.$name)($qcx.tcx, $key) + ($qcx.query_system.fns.extern_providers.$name)($qcx, $key) } }}; ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { @@ -306,7 +296,7 @@ pub(crate) fn create_query_frame< 'tcx, K: Copy + Key + for<'a> HashStable<StableHashingContext<'a>>, >( - tcx: QueryCtxt<'tcx>, + tcx: TyCtxt<'tcx>, do_describe: fn(TyCtxt<'tcx>, K) -> String, key: K, kind: DepKind, @@ -318,7 +308,7 @@ pub(crate) fn create_query_frame< // Showing visible path instead of any path is not that important in production. ty::print::with_no_visible_paths!( // Force filename-line mode to avoid invoking `type_of` query. - ty::print::with_forced_impl_filename_line!(do_describe(tcx.tcx, key)) + ty::print::with_forced_impl_filename_line!(do_describe(tcx, key)) ) ); let description = @@ -328,7 +318,7 @@ pub(crate) fn create_query_frame< // so exit to avoid infinite recursion. None } else { - Some(key.default_span(*tcx)) + Some(key.default_span(tcx)) }; let def_id = key.key_as_def_id(); let def_kind = if kind == dep_graph::DepKind::opt_def_kind { @@ -350,6 +340,34 @@ pub(crate) fn create_query_frame< QueryStackFrame::new(description, span, def_id, def_kind, kind, ty_adt_id, hash) } +pub(crate) fn encode_query_results<'a, 'tcx, Q>( + query: Q, + qcx: QueryCtxt<'tcx>, + encoder: &mut CacheEncoder<'a, 'tcx>, + query_result_index: &mut EncodedDepNodeIndex, +) where + Q: super::QueryConfigRestored<'tcx>, + Q::RestoredValue: Encodable<CacheEncoder<'a, 'tcx>>, +{ + let _timer = + qcx.profiler().verbose_generic_activity_with_arg("encode_query_results_for", query.name()); + + assert!(query.query_state(qcx).all_inactive()); + let cache = query.query_cache(qcx); + cache.iter(&mut |key, value, dep_node| { + if query.cache_on_disk(qcx.tcx, &key) { + let dep_node = SerializedDepNodeIndex::new(dep_node.index()); + + // Record position of the cache entry. + query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); + + // Encode the type check tables with the `SerializedDepNodeIndex` + // as tag. + encoder.encode_tagged(dep_node, &Q::restore(*value)); + } + }); +} + fn try_load_from_on_disk_cache<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) where Q: QueryConfig<QueryCtxt<'tcx>>, @@ -364,8 +382,8 @@ where } } -pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool { - if let Some(cache) = tcx.on_disk_cache().as_ref() { +pub(crate) fn loadable_from_disk<'tcx>(tcx: TyCtxt<'tcx>, id: SerializedDepNodeIndex) -> bool { + if let Some(cache) = tcx.query_system.on_disk_cache.as_ref() { cache.loadable_from_disk(id) } else { false @@ -373,13 +391,13 @@ pub(crate) fn loadable_from_disk<'tcx>(tcx: QueryCtxt<'tcx>, id: SerializedDepNo } pub(crate) fn try_load_from_disk<'tcx, V>( - tcx: QueryCtxt<'tcx>, + tcx: TyCtxt<'tcx>, id: SerializedDepNodeIndex, ) -> Option<V> where V: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, { - tcx.on_disk_cache().as_ref()?.try_load_query_result(*tcx, id) + tcx.query_system.on_disk_cache.as_ref()?.try_load_query_result(tcx, id) } fn force_from_dep_node<'tcx, Q>(query: Q, tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool @@ -407,8 +425,7 @@ where if let Some(key) = Q::Key::recover(tcx, &dep_node) { #[cfg(debug_assertions)] let _guard = tracing::span!(tracing::Level::TRACE, stringify!($name), ?key).entered(); - let tcx = QueryCtxt::from_tcx(tcx); - force_query(query, tcx, key, dep_node); + force_query(query, QueryCtxt::new(tcx), key, dep_node); true } else { false @@ -461,8 +478,33 @@ macro_rules! define_queries { ( $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => { - define_queries_struct! { - input: ($(([$($modifiers)*] [$($attr)*] [$name]))*) + mod get_query { + use super::*; + + $( + #[inline(always)] + #[tracing::instrument(level = "trace", skip(tcx))] + pub(super) fn $name<'tcx>( + tcx: TyCtxt<'tcx>, + span: Span, + key: query_keys::$name<'tcx>, + mode: QueryMode, + ) -> Option<Erase<query_values::$name<'tcx>>> { + get_query( + queries::$name::default(), + QueryCtxt::new(tcx), + span, + key, + mode + ) + } + )* + } + + pub(crate) fn engine() -> QueryEngine { + QueryEngine { + $($name: get_query::$name,)* + } } #[allow(nonstandard_style)] @@ -502,7 +544,7 @@ macro_rules! define_queries { fn query_state<'a>(self, tcx: QueryCtxt<'tcx>) -> &'a QueryState<Self::Key, crate::dep_graph::DepKind> where QueryCtxt<'tcx>: 'a { - &tcx.queries.$name + &tcx.query_system.states.$name } #[inline(always)] @@ -521,7 +563,7 @@ macro_rules! define_queries { fn compute(self, qcx: QueryCtxt<'tcx>, key: Self::Key) -> Self::Value { query_provided_to_value::$name( qcx.tcx, - call_provider!([$($modifiers)*][qcx, $name, key]) + call_provider!([$($modifiers)*][qcx.tcx, $name, key]) ) } @@ -535,7 +577,7 @@ macro_rules! define_queries { if ::rustc_middle::query::cached::$name(_qcx.tcx, _key) { Some(|qcx: QueryCtxt<'tcx>, dep_node| { let value = $crate::plumbing::try_load_from_disk::<query_provided::$name<'tcx>>( - qcx, + qcx.tcx, dep_node ); value.map(|value| query_provided_to_value::$name(qcx.tcx, value)) @@ -557,7 +599,7 @@ macro_rules! define_queries { ) -> bool { should_ever_cache_on_disk!([$($modifiers)*] { self.cache_on_disk(_qcx.tcx, _key) && - $crate::plumbing::loadable_from_disk(_qcx, _index) + $crate::plumbing::loadable_from_disk(_qcx.tcx, _index) } { false }) @@ -684,14 +726,13 @@ macro_rules! define_queries { } mod query_structs { - use rustc_middle::ty::TyCtxt; - use $crate::plumbing::{QueryStruct, QueryCtxt}; - use $crate::profiling_support::QueryKeyStringCache; - use rustc_query_system::query::QueryMap; + use super::*; + use rustc_middle::ty::query::QueryStruct; + use rustc_middle::ty::query::QueryKeyStringCache; use rustc_middle::dep_graph::DepKind; pub(super) const fn dummy_query_struct<'tcx>() -> QueryStruct<'tcx> { - fn noop_try_collect_active_jobs(_: QueryCtxt<'_>, _: &mut QueryMap<DepKind>) -> Option<()> { + fn noop_try_collect_active_jobs(_: TyCtxt<'_>, _: &mut QueryMap<DepKind>) -> Option<()> { None } fn noop_alloc_self_profile_query_strings(_: TyCtxt<'_>, _: &mut QueryKeyStringCache) {} @@ -717,7 +758,7 @@ macro_rules! define_queries { let name = stringify!($name); $crate::plumbing::create_query_frame(tcx, rustc_middle::query::descs::$name, key, kind, name) }; - tcx.queries.$name.try_collect_active_jobs( + tcx.query_system.states.$name.try_collect_active_jobs( tcx, make_query, qmap, @@ -731,10 +772,10 @@ macro_rules! define_queries { string_cache, ) }, - encode_query_results: expand_if_cached!([$($modifiers)*], |qcx, encoder, query_result_index| - $crate::on_disk_cache::encode_query_results::<super::queries::$name<'tcx>>( + encode_query_results: expand_if_cached!([$($modifiers)*], |tcx, encoder, query_result_index| + $crate::plumbing::encode_query_results::<super::queries::$name<'tcx>>( super::queries::$name::default(), - qcx, + QueryCtxt::new(tcx), encoder, query_result_index, ) @@ -747,93 +788,3 @@ macro_rules! define_queries { } } } - -use crate::{ExternProviders, OnDiskCache, Providers}; - -impl<'tcx> Queries<'tcx> { - pub fn new( - local_providers: Providers, - extern_providers: ExternProviders, - on_disk_cache: Option<OnDiskCache<'tcx>>, - ) -> Self { - use crate::query_structs; - Queries { - local_providers: Box::new(local_providers), - extern_providers: Box::new(extern_providers), - query_structs: make_dep_kind_array!(query_structs).to_vec(), - on_disk_cache, - jobs: AtomicU64::new(1), - ..Queries::default() - } - } -} - -macro_rules! define_queries_struct { - ( - input: ($(([$($modifiers:tt)*] [$($attr:tt)*] [$name:ident]))*)) => { - #[derive(Default)] - pub struct Queries<'tcx> { - local_providers: Box<Providers>, - extern_providers: Box<ExternProviders>, - query_structs: Vec<$crate::plumbing::QueryStruct<'tcx>>, - pub on_disk_cache: Option<OnDiskCache<'tcx>>, - jobs: AtomicU64, - - $( - $(#[$attr])* - $name: QueryState< - <queries::$name<'tcx> as QueryConfig<QueryCtxt<'tcx>>>::Key, - rustc_middle::dep_graph::DepKind, - >, - )* - } - - impl<'tcx> Queries<'tcx> { - pub(crate) fn try_collect_active_jobs( - &'tcx self, - tcx: TyCtxt<'tcx>, - ) -> Option<QueryMap<rustc_middle::dep_graph::DepKind>> { - let tcx = QueryCtxt { tcx, queries: self }; - let mut jobs = QueryMap::default(); - - for query in &self.query_structs { - (query.try_collect_active_jobs)(tcx, &mut jobs); - } - - Some(jobs) - } - } - - impl<'tcx> QueryEngine<'tcx> for Queries<'tcx> { - fn as_any(&'tcx self) -> &'tcx dyn std::any::Any { - let this = unsafe { std::mem::transmute::<&Queries<'_>, &Queries<'_>>(self) }; - this as _ - } - - fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool { - let qcx = QueryCtxt { tcx, queries: self }; - tcx.dep_graph.try_mark_green(qcx, dep_node).is_some() - } - - $($(#[$attr])* - #[inline(always)] - #[tracing::instrument(level = "trace", skip(self, tcx))] - fn $name( - &'tcx self, - tcx: TyCtxt<'tcx>, - span: Span, - key: query_keys::$name<'tcx>, - mode: QueryMode, - ) -> Option<Erase<query_values::$name<'tcx>>> { - let qcx = QueryCtxt { tcx, queries: self }; - get_query( - queries::$name::default(), - qcx, - span, - key, - mode - ) - })* - } - }; -} diff --git a/compiler/rustc_query_impl/src/profiling_support.rs b/compiler/rustc_query_impl/src/profiling_support.rs index 08b588a8c94..7d9306f8087 100644 --- a/compiler/rustc_query_impl/src/profiling_support.rs +++ b/compiler/rustc_query_impl/src/profiling_support.rs @@ -1,24 +1,13 @@ -use crate::QueryCtxt; use measureme::{StringComponent, StringId}; -use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::profiling::SelfProfiler; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LocalDefId, LOCAL_CRATE}; use rustc_hir::definitions::DefPathData; +use rustc_middle::ty::query::QueryKeyStringCache; use rustc_middle::ty::TyCtxt; use rustc_query_system::query::QueryCache; use std::fmt::Debug; use std::io::Write; -pub(crate) struct QueryKeyStringCache { - def_id_cache: FxHashMap<DefId, StringId>, -} - -impl QueryKeyStringCache { - fn new() -> QueryKeyStringCache { - QueryKeyStringCache { def_id_cache: Default::default() } - } -} - struct QueryKeyStringBuilder<'p, 'tcx> { profiler: &'p SelfProfiler, tcx: TyCtxt<'tcx>, @@ -253,9 +242,8 @@ pub fn alloc_self_profile_query_strings(tcx: TyCtxt<'_>) { } let mut string_cache = QueryKeyStringCache::new(); - let queries = QueryCtxt::from_tcx(tcx); - for query in &queries.queries.query_structs { + for query in &tcx.query_system.fns.query_structs { (query.alloc_self_profile_query_strings)(tcx, &mut string_cache); } } |
