diff options
Diffstat (limited to 'compiler/rustc_middle/src')
28 files changed, 613 insertions, 435 deletions
| diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index e5cc23c213d..cf0549fa668 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -60,6 +60,7 @@ #![feature(try_trait_v2_residual)] #![feature(try_trait_v2_yeet)] #![feature(type_alias_impl_trait)] +#![feature(unwrap_infallible)] #![feature(yeet_expr)] #![recursion_limit = "256"] // tidy-alphabetical-end diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs index 341a735f88f..f70b7b6fad7 100644 --- a/compiler/rustc_middle/src/lint.rs +++ b/compiler/rustc_middle/src/lint.rs @@ -211,11 +211,28 @@ impl LintExpectation { } fn explain_lint_level_source( + sess: &Session, lint: &'static Lint, level: Level, src: LintLevelSource, err: &mut Diag<'_, ()>, ) { + // Find the name of the lint group that contains the given lint. + // Assumes the lint only belongs to one group. + let lint_group_name = |lint| { + let lint_groups_iter = sess.lint_groups_iter(); + let lint_id = LintId::of(lint); + lint_groups_iter + .filter(|lint_group| !lint_group.is_externally_loaded) + .find(|lint_group| { + lint_group + .lints + .iter() + .find(|lint_group_lint| **lint_group_lint == lint_id) + .is_some() + }) + .map(|lint_group| lint_group.name) + }; let name = lint.name_lower(); if let Level::Allow = level { // Do not point at `#[allow(compat_lint)]` as the reason for a compatibility lint @@ -224,7 +241,15 @@ fn explain_lint_level_source( } match src { LintLevelSource::Default => { - err.note_once(format!("`#[{}({})]` on by default", level.as_str(), name)); + let level_str = level.as_str(); + match lint_group_name(lint) { + Some(group_name) => { + err.note_once(format!("`#[{level_str}({name})]` (part of `#[{level_str}({group_name})]`) on by default")); + } + None => { + err.note_once(format!("`#[{level_str}({name})]` on by default")); + } + } } LintLevelSource::CommandLine(lint_flag_val, orig_level) => { let flag = orig_level.to_cmd_flag(); @@ -427,7 +452,7 @@ pub fn lint_level( decorate(&mut err); } - explain_lint_level_source(lint, level, src, &mut err); + explain_lint_level_source(sess, lint, level, src, &mut err); err.emit() } lint_level_impl(sess, lint, level, span, Box::new(decorate)) diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs index 94384e64afd..78cafe8566b 100644 --- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs +++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs @@ -1,13 +1,12 @@ use std::borrow::Cow; use rustc_abi::Align; -use rustc_ast::expand::autodiff_attrs::AutoDiffAttrs; -use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr}; +use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, Linkage, OptimizeAttr}; +use rustc_hir::def_id::DefId; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; use rustc_span::Symbol; use rustc_target::spec::SanitizerSet; -use crate::mir::mono::Linkage; use crate::ty::{InstanceKind, TyCtxt}; impl<'tcx> TyCtxt<'tcx> { @@ -36,14 +35,10 @@ pub struct CodegenFnAttrs { pub inline: InlineAttr, /// Parsed representation of the `#[optimize]` attribute pub optimize: OptimizeAttr, - /// The `#[export_name = "..."]` attribute, indicating a custom symbol a - /// function should be exported under - pub export_name: Option<Symbol>, - /// The `#[link_name = "..."]` attribute, indicating a custom symbol an - /// imported function should be imported as. Note that `export_name` - /// probably isn't set when this is set, this is for foreign items while - /// `#[export_name]` is for Rust-defined functions. - pub link_name: Option<Symbol>, + /// The name this function will be imported/exported under. This can be set + /// using the `#[export_name = "..."]` or `#[link_name = "..."]` attribute + /// depending on if this is a function definition or foreign function. + pub symbol_name: Option<Symbol>, /// The `#[link_ordinal = "..."]` attribute, indicating an ordinal an /// imported function has in the dynamic library. Note that this must not /// be set when `link_name` is set. This is for foreign items with the @@ -62,8 +57,8 @@ pub struct CodegenFnAttrs { /// The `#[link_section = "..."]` attribute, or what executable section this /// should be placed in. pub link_section: Option<Symbol>, - /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which - /// instrumentation should be disabled inside the annotated function. + /// The `#[sanitize(xyz = "off")]` attribute. Indicates sanitizers for which + /// instrumentation should be disabled inside the function. pub no_sanitize: SanitizerSet, /// The `#[instruction_set(set)]` attribute. Indicates if the generated code should /// be generated against a specific instruction set. Only usable on architectures which allow @@ -75,17 +70,25 @@ pub struct CodegenFnAttrs { /// The `#[patchable_function_entry(...)]` attribute. Indicates how many nops should be around /// the function entry. pub patchable_function_entry: Option<PatchableFunctionEntry>, - /// For the `#[autodiff]` macros. - pub autodiff_item: Option<AutoDiffAttrs>, +} + +#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable, PartialEq, Eq)] +pub enum TargetFeatureKind { + /// The feature is implied by another feature, rather than explicitly added by the + /// `#[target_feature]` attribute + Implied, + /// The feature is added by the regular `target_feature` attribute. + Enabled, + /// The feature is added by the unsafe `force_target_feature` attribute. + Forced, } #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] pub struct TargetFeature { /// The name of the target feature (e.g. "avx") pub name: Symbol, - /// The feature is implied by another feature, rather than explicitly added by the - /// `#[target_feature]` attribute - pub implied: bool, + /// The way this feature was enabled. + pub kind: TargetFeatureKind, } #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] @@ -170,8 +173,7 @@ impl CodegenFnAttrs { flags: CodegenFnAttrFlags::empty(), inline: InlineAttr::None, optimize: OptimizeAttr::Default, - export_name: None, - link_name: None, + symbol_name: None, link_ordinal: None, target_features: vec![], safe_target_features: false, @@ -182,7 +184,6 @@ impl CodegenFnAttrs { instruction_set: None, alignment: None, patchable_function_entry: None, - autodiff_item: None, } } @@ -193,10 +194,14 @@ impl CodegenFnAttrs { /// * `#[linkage]` is present /// /// Keep this in sync with the logic for the unused_attributes for `#[inline]` lint. - pub fn contains_extern_indicator(&self) -> bool { + pub fn contains_extern_indicator(&self, tcx: TyCtxt<'_>, did: DefId) -> bool { + if tcx.is_foreign_item(did) { + return false; + } + self.flags.contains(CodegenFnAttrFlags::NO_MANGLE) || self.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) - || self.export_name.is_some() + || self.symbol_name.is_some() || match self.linkage { // These are private, so make sure we don't try to consider // them external. diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs index 785ddd1ee29..e3e04c9d180 100644 --- a/compiler/rustc_middle/src/middle/privacy.rs +++ b/compiler/rustc_middle/src/middle/privacy.rs @@ -181,11 +181,7 @@ impl EffectiveVisibilities { // nominal visibility. For some items nominal visibility doesn't make sense so we // don't check this condition for them. let is_impl = matches!(tcx.def_kind(def_id), DefKind::Impl { .. }); - let is_associated_item_in_trait_impl = tcx - .impl_of_assoc(def_id.to_def_id()) - .and_then(|impl_id| tcx.trait_id_of_impl(impl_id)) - .is_some(); - if !is_impl && !is_associated_item_in_trait_impl { + if !is_impl && tcx.trait_impl_of_assoc(def_id.to_def_id()).is_none() { let nominal_vis = tcx.visibility(def_id); if !nominal_vis.is_at_least(ev.reachable, tcx) { span_bug!( diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 4a19cf1563c..18520089e3e 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -4,7 +4,7 @@ use std::num::NonZero; use rustc_ast::NodeId; -use rustc_errors::{Applicability, Diag, EmissionGuarantee}; +use rustc_errors::{Applicability, Diag, EmissionGuarantee, LintBuffer}; use rustc_feature::GateIssue; use rustc_hir::attrs::{DeprecatedSince, Deprecation}; use rustc_hir::def_id::{DefId, LocalDefId}; @@ -12,7 +12,7 @@ use rustc_hir::{self as hir, ConstStability, DefaultBodyStability, HirId, Stabil use rustc_macros::{Decodable, Encodable, HashStable, Subdiagnostic}; use rustc_session::Session; use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE}; -use rustc_session::lint::{BuiltinLintDiag, DeprecatedSinceKind, Level, Lint, LintBuffer}; +use rustc_session::lint::{BuiltinLintDiag, DeprecatedSinceKind, Level, Lint}; use rustc_session::parse::feature_err_issue; use rustc_span::{Span, Symbol, sym}; use tracing::debug; diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 96131d47a17..164433aede2 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -448,6 +448,11 @@ impl<'tcx> Const<'tcx> { Self::Val(val, ty) } + #[inline] + pub fn from_ty_value(tcx: TyCtxt<'tcx>, val: ty::Value<'tcx>) -> Self { + Self::Ty(val.ty, ty::Const::new_value(tcx, val.valtree, val.ty)) + } + pub fn from_bits( tcx: TyCtxt<'tcx>, bits: u128, diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 27ead514531..2ea92a39d48 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -306,8 +306,6 @@ pub enum AllocError { ScalarSizeMismatch(ScalarSizeMismatch), /// Encountered a pointer where we needed raw bytes. ReadPointerAsInt(Option<BadBytesAccess>), - /// Partially overwriting a pointer. - OverwritePartialPointer(Size), /// Partially copying a pointer. ReadPartialPointer(Size), /// Using uninitialized data where it is not allowed. @@ -331,9 +329,6 @@ impl AllocError { ReadPointerAsInt(info) => InterpErrorKind::Unsupported( UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))), ), - OverwritePartialPointer(offset) => InterpErrorKind::Unsupported( - UnsupportedOpInfo::OverwritePartialPointer(Pointer::new(alloc_id, offset)), - ), ReadPartialPointer(offset) => InterpErrorKind::Unsupported( UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)), ), @@ -633,11 +628,11 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> &mut self, cx: &impl HasDataLayout, range: AllocRange, - ) -> AllocResult<&mut [u8]> { + ) -> &mut [u8] { self.mark_init(range, true); - self.provenance.clear(range, cx)?; + self.provenance.clear(range, cx); - Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]) + &mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()] } /// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases @@ -646,15 +641,15 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> &mut self, cx: &impl HasDataLayout, range: AllocRange, - ) -> AllocResult<*mut [u8]> { + ) -> *mut [u8] { self.mark_init(range, true); - self.provenance.clear(range, cx)?; + self.provenance.clear(range, cx); assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check // Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`. let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize()); let len = range.end().bytes_usize() - range.start.bytes_usize(); - Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len)) + ptr::slice_from_raw_parts_mut(begin_ptr, len) } /// This gives direct mutable access to the entire buffer, just exposing their internal state @@ -723,26 +718,45 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> let ptr = Pointer::new(prov, Size::from_bytes(bits)); return Ok(Scalar::from_pointer(ptr, cx)); } - - // If we can work on pointers byte-wise, join the byte-wise provenances. - if Prov::OFFSET_IS_ADDR { - let mut prov = self.provenance.get(range.start, cx); + // The other easy case is total absence of provenance. + if self.provenance.range_empty(range, cx) { + return Ok(Scalar::from_uint(bits, range.size)); + } + // If we get here, we have to check per-byte provenance, and join them together. + let prov = 'prov: { + // Initialize with first fragment. Must have index 0. + let Some((mut joint_prov, 0)) = self.provenance.get_byte(range.start, cx) else { + break 'prov None; + }; + // Update with the remaining fragments. for offset in Size::from_bytes(1)..range.size { - let this_prov = self.provenance.get(range.start + offset, cx); - prov = Prov::join(prov, this_prov); + // Ensure there is provenance here and it has the right index. + let Some((frag_prov, frag_idx)) = + self.provenance.get_byte(range.start + offset, cx) + else { + break 'prov None; + }; + // Wildcard provenance is allowed to come with any index (this is needed + // for Miri's native-lib mode to work). + if u64::from(frag_idx) != offset.bytes() && Some(frag_prov) != Prov::WILDCARD { + break 'prov None; + } + // Merge this byte's provenance with the previous ones. + joint_prov = match Prov::join(joint_prov, frag_prov) { + Some(prov) => prov, + None => break 'prov None, + }; } - // Now use this provenance. - let ptr = Pointer::new(prov, Size::from_bytes(bits)); - return Ok(Scalar::from_maybe_pointer(ptr, cx)); - } else { - // Without OFFSET_IS_ADDR, the only remaining case we can handle is total absence of - // provenance. - if self.provenance.range_empty(range, cx) { - return Ok(Scalar::from_uint(bits, range.size)); - } - // Else we have mixed provenance, that doesn't work. + break 'prov Some(joint_prov); + }; + if prov.is_none() && !Prov::OFFSET_IS_ADDR { + // There are some bytes with provenance here but overall the provenance does not add up. + // We need `OFFSET_IS_ADDR` to fall back to no-provenance here; without that option, we must error. return Err(AllocError::ReadPartialPointer(range.start)); } + // We can use this provenance. + let ptr = Pointer::new(prov, Size::from_bytes(bits)); + return Ok(Scalar::from_maybe_pointer(ptr, cx)); } else { // We are *not* reading a pointer. // If we can just ignore provenance or there is none, that's easy. @@ -782,7 +796,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> let endian = cx.data_layout().endian; // Yes we do overwrite all the bytes in `dst`. - let dst = self.get_bytes_unchecked_for_overwrite(cx, range)?; + let dst = self.get_bytes_unchecked_for_overwrite(cx, range); write_target_uint(endian, dst, bytes).unwrap(); // See if we have to also store some provenance. @@ -795,10 +809,9 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> } /// Write "uninit" to the given memory range. - pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { + pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) { self.mark_init(range, false); - self.provenance.clear(range, cx)?; - Ok(()) + self.provenance.clear(range, cx); } /// Mark all bytes in the given range as initialised and reset the provenance @@ -817,9 +830,12 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> } /// Remove all provenance in the given memory range. - pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { - self.provenance.clear(range, cx)?; - return Ok(()); + pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) { + self.provenance.clear(range, cx); + } + + pub fn provenance_merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool { + self.provenance.merge_bytes(cx) } /// Applies a previously prepared provenance copy. diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs index 119d4be64e6..ea8596ea286 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -10,7 +10,7 @@ use rustc_macros::HashStable; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use tracing::trace; -use super::{AllocError, AllocRange, AllocResult, CtfeProvenance, Provenance, alloc_range}; +use super::{AllocRange, CtfeProvenance, Provenance, alloc_range}; /// Stores the provenance information of pointers stored in memory. #[derive(Clone, PartialEq, Eq, Hash, Debug)] @@ -19,25 +19,25 @@ pub struct ProvenanceMap<Prov = CtfeProvenance> { /// `Provenance` in this map applies from the given offset for an entire pointer-size worth of /// bytes. Two entries in this map are always at least a pointer size apart. ptrs: SortedMap<Size, Prov>, - /// Provenance in this map only applies to the given single byte. - /// This map is disjoint from the previous. It will always be empty when - /// `Prov::OFFSET_IS_ADDR` is false. - bytes: Option<Box<SortedMap<Size, Prov>>>, + /// This stores byte-sized provenance fragments. + /// The `u8` indicates the position of this byte inside its original pointer. + /// If the bytes are re-assembled in their original order, the pointer can be used again. + /// Wildcard provenance is allowed to have index 0 everywhere. + bytes: Option<Box<SortedMap<Size, (Prov, u8)>>>, } // These impls are generic over `Prov` since `CtfeProvenance` is only decodable/encodable // for some particular `D`/`S`. impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> { fn decode(d: &mut D) -> Self { - assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized + // `bytes` is not in the serialized format Self { ptrs: Decodable::decode(d), bytes: None } } } impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> { fn encode(&self, s: &mut S) { let Self { ptrs, bytes } = self; - assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized - debug_assert!(bytes.is_none()); // without `OFFSET_IS_ADDR`, this is always empty + assert!(bytes.is_none()); // interning refuses allocations with pointer fragments ptrs.encode(s) } } @@ -58,10 +58,10 @@ impl ProvenanceMap { /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and /// indeed that is how codegen treats them). /// - /// Only exposed with `CtfeProvenance` provenance, since it panics if there is bytewise provenance. + /// Only use on interned allocations, as other allocations may have per-byte provenance! #[inline] pub fn ptrs(&self) -> &SortedMap<Size, CtfeProvenance> { - debug_assert!(self.bytes.is_none()); // `CtfeProvenance::OFFSET_IS_ADDR` is false so this cannot fail + assert!(self.bytes.is_none(), "`ptrs()` called on non-interned allocation"); &self.ptrs } } @@ -88,12 +88,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } /// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster. - pub(super) fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { + fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx)) } /// Returns all byte-wise provenance in the given range. - fn range_bytes_get(&self, range: AllocRange) -> &[(Size, Prov)] { + fn range_bytes_get(&self, range: AllocRange) -> &[(Size, (Prov, u8))] { if let Some(bytes) = self.bytes.as_ref() { bytes.range(range.start..range.end()) } else { @@ -107,19 +107,47 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } /// Get the provenance of a single byte. - pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> { + pub fn get_byte(&self, offset: Size, cx: &impl HasDataLayout) -> Option<(Prov, u8)> { let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx); debug_assert!(prov.len() <= 1); if let Some(entry) = prov.first() { // If it overlaps with this byte, it is on this byte. debug_assert!(self.bytes.as_ref().is_none_or(|b| !b.contains_key(&offset))); - Some(entry.1) + Some((entry.1, (offset - entry.0).bytes() as u8)) } else { // Look up per-byte provenance. self.bytes.as_ref().and_then(|b| b.get(&offset).copied()) } } + /// Attempt to merge per-byte provenance back into ptr chunks, if the right fragments + /// sit next to each other. Return `false` is that is not possible due to partial pointers. + pub fn merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool { + let Some(bytes) = self.bytes.as_deref_mut() else { + return true; + }; + let ptr_size = cx.data_layout().pointer_size(); + while let Some((offset, (prov, _))) = bytes.iter().next().copied() { + // Check if this fragment starts a pointer. + let range = offset..offset + ptr_size; + let frags = bytes.range(range.clone()); + if frags.len() != ptr_size.bytes_usize() { + return false; + } + for (idx, (_offset, (frag_prov, frag_idx))) in frags.iter().copied().enumerate() { + if frag_prov != prov || frag_idx != idx as u8 { + return false; + } + } + // Looks like a pointer! Move it over to the ptr provenance map. + bytes.remove_range(range); + self.ptrs.insert(offset, prov); + } + // We managed to convert everything into whole pointers. + self.bytes = None; + true + } + /// Check if there is ptr-sized provenance at the given index. /// Does not mean anything for bytewise provenance! But can be useful as an optimization. pub fn get_ptr(&self, offset: Size) -> Option<Prov> { @@ -137,7 +165,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Yields all the provenances stored in this map. pub fn provenances(&self) -> impl Iterator<Item = Prov> { - let bytes = self.bytes.iter().flat_map(|b| b.values()); + let bytes = self.bytes.iter().flat_map(|b| b.values().map(|(p, _i)| p)); self.ptrs.values().chain(bytes).copied() } @@ -148,16 +176,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Removes all provenance inside the given range. /// If there is provenance overlapping with the edges, might result in an error. - pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult { + pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) { let start = range.start; let end = range.end(); // Clear the bytewise part -- this is easy. - if Prov::OFFSET_IS_ADDR { - if let Some(bytes) = self.bytes.as_mut() { - bytes.remove_range(start..end); - } - } else { - debug_assert!(self.bytes.is_none()); + if let Some(bytes) = self.bytes.as_mut() { + bytes.remove_range(start..end); } let pointer_size = cx.data_layout().pointer_size(); @@ -168,7 +192,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Find all provenance overlapping the given range. if self.range_ptrs_is_empty(range, cx) { // No provenance in this range, we are done. This is the common case. - return Ok(()); + return; } // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much @@ -179,28 +203,20 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // We need to handle clearing the provenance from parts of a pointer. if first < start { - if !Prov::OFFSET_IS_ADDR { - // We can't split up the provenance into less than a pointer. - return Err(AllocError::OverwritePartialPointer(first)); - } // Insert the remaining part in the bytewise provenance. let prov = self.ptrs[&first]; let bytes = self.bytes.get_or_insert_with(Box::default); for offset in first..start { - bytes.insert(offset, prov); + bytes.insert(offset, (prov, (offset - first).bytes() as u8)); } } if last > end { let begin_of_last = last - pointer_size; - if !Prov::OFFSET_IS_ADDR { - // We can't split up the provenance into less than a pointer. - return Err(AllocError::OverwritePartialPointer(begin_of_last)); - } // Insert the remaining part in the bytewise provenance. let prov = self.ptrs[&begin_of_last]; let bytes = self.bytes.get_or_insert_with(Box::default); for offset in end..last { - bytes.insert(offset, prov); + bytes.insert(offset, (prov, (offset - begin_of_last).bytes() as u8)); } } @@ -208,8 +224,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine, // i.e., this will not remove any other provenance just after the ones we care about. self.ptrs.remove_range(first..last); - - Ok(()) } /// Overwrites all provenance in the given range with wildcard provenance. @@ -218,10 +232,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// /// Provided for usage in Miri and panics otherwise. pub fn write_wildcards(&mut self, cx: &impl HasDataLayout, range: AllocRange) { - assert!( - Prov::OFFSET_IS_ADDR, - "writing wildcard provenance is not supported when `OFFSET_IS_ADDR` is false" - ); let wildcard = Prov::WILDCARD.unwrap(); let bytes = self.bytes.get_or_insert_with(Box::default); @@ -229,21 +239,22 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Remove pointer provenances that overlap with the range, then readd the edge ones bytewise. let ptr_range = Self::adjusted_range_ptrs(range, cx); let ptrs = self.ptrs.range(ptr_range.clone()); - if let Some((offset, prov)) = ptrs.first() { - for byte_ofs in *offset..range.start { - bytes.insert(byte_ofs, *prov); + if let Some((offset, prov)) = ptrs.first().copied() { + for byte_ofs in offset..range.start { + bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8)); } } - if let Some((offset, prov)) = ptrs.last() { - for byte_ofs in range.end()..*offset + cx.data_layout().pointer_size() { - bytes.insert(byte_ofs, *prov); + if let Some((offset, prov)) = ptrs.last().copied() { + for byte_ofs in range.end()..offset + cx.data_layout().pointer_size() { + bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8)); } } self.ptrs.remove_range(ptr_range); // Overwrite bytewise provenance. for offset in range.start..range.end() { - bytes.insert(offset, wildcard); + // The fragment index does not matter for wildcard provenance. + bytes.insert(offset, (wildcard, 0)); } } } @@ -253,7 +264,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Offsets are already adjusted to the destination allocation. pub struct ProvenanceCopy<Prov> { dest_ptrs: Option<Box<[(Size, Prov)]>>, - dest_bytes: Option<Box<[(Size, Prov)]>>, + dest_bytes: Option<Box<[(Size, (Prov, u8))]>>, } impl<Prov: Provenance> ProvenanceMap<Prov> { @@ -263,7 +274,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest: Size, count: u64, cx: &impl HasDataLayout, - ) -> AllocResult<ProvenanceCopy<Prov>> { + ) -> ProvenanceCopy<Prov> { let shift_offset = move |idx, offset| { // compute offset for current repetition let dest_offset = dest + src.size * idx; // `Size` operations @@ -301,24 +312,16 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { let mut dest_bytes_box = None; let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first(); let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first(); - if !Prov::OFFSET_IS_ADDR { - // There can't be any bytewise provenance, and we cannot split up the begin/end overlap. - if let Some(entry) = begin_overlap { - return Err(AllocError::ReadPartialPointer(entry.0)); - } - if let Some(entry) = end_overlap { - return Err(AllocError::ReadPartialPointer(entry.0)); - } - debug_assert!(self.bytes.is_none()); - } else { - let mut bytes = Vec::new(); + // We only need to go here if there is some overlap or some bytewise provenance. + if begin_overlap.is_some() || end_overlap.is_some() || self.bytes.is_some() { + let mut bytes: Vec<(Size, (Prov, u8))> = Vec::new(); // First, if there is a part of a pointer at the start, add that. if let Some(entry) = begin_overlap { trace!("start overlapping entry: {entry:?}"); // For really small copies, make sure we don't run off the end of the `src` range. let entry_end = cmp::min(entry.0 + ptr_size, src.end()); for offset in src.start..entry_end { - bytes.push((offset, entry.1)); + bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8))); } } else { trace!("no start overlapping entry"); @@ -334,8 +337,9 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { let entry_start = cmp::max(entry.0, src.start); for offset in entry_start..src.end() { if bytes.last().is_none_or(|bytes_entry| bytes_entry.0 < offset) { - // The last entry, if it exists, has a lower offset than us. - bytes.push((offset, entry.1)); + // The last entry, if it exists, has a lower offset than us, so we + // can add it at the end and remain sorted. + bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8))); } else { // There already is an entry for this offset in there! This can happen when the // start and end range checks actually end up hitting the same pointer, so we @@ -358,7 +362,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest_bytes_box = Some(dest_bytes.into_boxed_slice()); } - Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box }) + ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box } } /// Applies a provenance copy. @@ -368,14 +372,10 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { if let Some(dest_ptrs) = copy.dest_ptrs { self.ptrs.insert_presorted(dest_ptrs.into()); } - if Prov::OFFSET_IS_ADDR { - if let Some(dest_bytes) = copy.dest_bytes - && !dest_bytes.is_empty() - { - self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into()); - } - } else { - debug_assert!(copy.dest_bytes.is_none()); + if let Some(dest_bytes) = copy.dest_bytes + && !dest_bytes.is_empty() + { + self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into()); } } } diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 77427a12d11..7c72a8ec243 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -582,9 +582,6 @@ pub enum UnsupportedOpInfo { // // The variants below are only reachable from CTFE/const prop, miri will never emit them. // - /// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state - /// cannot be represented by the CTFE interpreter. - OverwritePartialPointer(Pointer<AllocId>), /// Attempting to read or copy parts of a pointer to somewhere else; without knowing absolute /// addresses, the resulting state cannot be represented by the CTFE interpreter. ReadPartialPointer(Pointer<AllocId>), diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index e25ff7651f6..c581b2ebf09 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -56,7 +56,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {} /// mostly opaque; the `Machine` trait extends it with some more operations that also have access to /// some global state. /// The `Debug` rendering is used to display bare provenance, and for the default impl of `fmt`. -pub trait Provenance: Copy + fmt::Debug + 'static { +pub trait Provenance: Copy + PartialEq + fmt::Debug + 'static { /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address. /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are /// different from what the Abstract Machine prescribes, so the interpreter must prevent any @@ -79,7 +79,7 @@ pub trait Provenance: Copy + fmt::Debug + 'static { fn get_alloc_id(self) -> Option<AllocId>; /// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance. - fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>; + fn join(left: Self, right: Self) -> Option<Self>; } /// The type of provenance in the compile-time interpreter. @@ -192,8 +192,8 @@ impl Provenance for CtfeProvenance { Some(self.alloc_id()) } - fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> { - panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false") + fn join(left: Self, right: Self) -> Option<Self> { + if left == right { Some(left) } else { None } } } @@ -224,8 +224,8 @@ impl Provenance for AllocId { Some(self) } - fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> { - panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false") + fn join(_left: Self, _right: Self) -> Option<Self> { + unreachable!() } } diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index c55c7fc6002..c977e5329c2 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -115,48 +115,6 @@ impl MirPhase { MirPhase::Runtime(runtime_phase) => (3, 1 + runtime_phase as usize), } } - - /// Parses a `MirPhase` from a pair of strings. Panics if this isn't possible for any reason. - pub fn parse(dialect: String, phase: Option<String>) -> Self { - match &*dialect.to_ascii_lowercase() { - "built" => { - assert!(phase.is_none(), "Cannot specify a phase for `Built` MIR"); - MirPhase::Built - } - "analysis" => Self::Analysis(AnalysisPhase::parse(phase)), - "runtime" => Self::Runtime(RuntimePhase::parse(phase)), - _ => bug!("Unknown MIR dialect: '{}'", dialect), - } - } -} - -impl AnalysisPhase { - pub fn parse(phase: Option<String>) -> Self { - let Some(phase) = phase else { - return Self::Initial; - }; - - match &*phase.to_ascii_lowercase() { - "initial" => Self::Initial, - "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup, - _ => bug!("Unknown analysis phase: '{}'", phase), - } - } -} - -impl RuntimePhase { - pub fn parse(phase: Option<String>) -> Self { - let Some(phase) = phase else { - return Self::Initial; - }; - - match &*phase.to_ascii_lowercase() { - "initial" => Self::Initial, - "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup, - "optimized" => Self::Optimized, - _ => bug!("Unknown runtime phase: '{}'", phase), - } - } } /// Where a specific `mir::Body` comes from. diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index e5864660575..533066bdef9 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use std::fmt; use std::hash::Hash; -use rustc_ast::expand::autodiff_attrs::AutoDiffItem; use rustc_data_structures::base_n::{BaseNString, CASE_INSENSITIVE, ToBaseN}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxIndexMap; @@ -10,9 +9,8 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHas use rustc_data_structures::unord::UnordMap; use rustc_hashes::Hash128; use rustc_hir::ItemId; -use rustc_hir::attrs::InlineAttr; +use rustc_hir::attrs::{InlineAttr, Linkage}; use rustc_hir::def_id::{CrateNum, DefId, DefIdSet, LOCAL_CRATE}; -use rustc_index::Idx; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; use rustc_query_system::ich::StableHashingContext; use rustc_session::config::OptLevel; @@ -151,7 +149,7 @@ impl<'tcx> MonoItem<'tcx> { // instantiation: // We emit an unused_attributes lint for this case, which should be kept in sync if possible. let codegen_fn_attrs = tcx.codegen_instance_attrs(instance.def); - if codegen_fn_attrs.contains_extern_indicator() + if codegen_fn_attrs.contains_extern_indicator(tcx, instance.def.def_id()) || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { return InstantiationMode::GloballyShared { may_conflict: false }; @@ -337,7 +335,6 @@ impl ToStableHashKey<StableHashingContext<'_>> for MonoItem<'_> { pub struct MonoItemPartitions<'tcx> { pub codegen_units: &'tcx [CodegenUnit<'tcx>], pub all_mono_items: &'tcx DefIdSet, - pub autodiff_items: &'tcx [AutoDiffItem], } #[derive(Debug, HashStable)] @@ -369,22 +366,6 @@ pub struct MonoItemData { pub size_estimate: usize, } -/// Specifies the linkage type for a `MonoItem`. -/// -/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants. -#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)] -pub enum Linkage { - External, - AvailableExternally, - LinkOnceAny, - LinkOnceODR, - WeakAny, - WeakODR, - Internal, - ExternalWeak, - Common, -} - /// Specifies the symbol visibility with regards to dynamic linking. /// /// Visibility doesn't have any effect when linkage is internal. @@ -526,45 +507,68 @@ impl<'tcx> CodegenUnit<'tcx> { tcx: TyCtxt<'tcx>, ) -> Vec<(MonoItem<'tcx>, MonoItemData)> { // The codegen tests rely on items being process in the same order as - // they appear in the file, so for local items, we sort by node_id first + // they appear in the file, so for local items, we sort by span first #[derive(PartialEq, Eq, PartialOrd, Ord)] - struct ItemSortKey<'tcx>(Option<usize>, SymbolName<'tcx>); - + struct ItemSortKey<'tcx>(Option<Span>, SymbolName<'tcx>); + + // We only want to take HirIds of user-defines instances into account. + // The others don't matter for the codegen tests and can even make item + // order unstable. + fn local_item_id<'tcx>(item: MonoItem<'tcx>) -> Option<DefId> { + match item { + MonoItem::Fn(ref instance) => match instance.def { + InstanceKind::Item(def) => def.as_local().map(|_| def), + InstanceKind::VTableShim(..) + | InstanceKind::ReifyShim(..) + | InstanceKind::Intrinsic(..) + | InstanceKind::FnPtrShim(..) + | InstanceKind::Virtual(..) + | InstanceKind::ClosureOnceShim { .. } + | InstanceKind::ConstructCoroutineInClosureShim { .. } + | InstanceKind::DropGlue(..) + | InstanceKind::CloneShim(..) + | InstanceKind::ThreadLocalShim(..) + | InstanceKind::FnPtrAddrShim(..) + | InstanceKind::AsyncDropGlue(..) + | InstanceKind::FutureDropPollShim(..) + | InstanceKind::AsyncDropGlueCtorShim(..) => None, + }, + MonoItem::Static(def_id) => def_id.as_local().map(|_| def_id), + MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id.to_def_id()), + } + } fn item_sort_key<'tcx>(tcx: TyCtxt<'tcx>, item: MonoItem<'tcx>) -> ItemSortKey<'tcx> { ItemSortKey( - match item { - MonoItem::Fn(ref instance) => { - match instance.def { - // We only want to take HirIds of user-defined - // instances into account. The others don't matter for - // the codegen tests and can even make item order - // unstable. - InstanceKind::Item(def) => def.as_local().map(Idx::index), - InstanceKind::VTableShim(..) - | InstanceKind::ReifyShim(..) - | InstanceKind::Intrinsic(..) - | InstanceKind::FnPtrShim(..) - | InstanceKind::Virtual(..) - | InstanceKind::ClosureOnceShim { .. } - | InstanceKind::ConstructCoroutineInClosureShim { .. } - | InstanceKind::DropGlue(..) - | InstanceKind::CloneShim(..) - | InstanceKind::ThreadLocalShim(..) - | InstanceKind::FnPtrAddrShim(..) - | InstanceKind::AsyncDropGlue(..) - | InstanceKind::FutureDropPollShim(..) - | InstanceKind::AsyncDropGlueCtorShim(..) => None, - } - } - MonoItem::Static(def_id) => def_id.as_local().map(Idx::index), - MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id.index()), - }, + local_item_id(item) + .map(|def_id| tcx.def_span(def_id).find_ancestor_not_from_macro()) + .flatten(), item.symbol_name(tcx), ) } let mut items: Vec<_> = self.items().iter().map(|(&i, &data)| (i, data)).collect(); - items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i)); + if !tcx.sess.opts.unstable_opts.codegen_source_order { + // In this case, we do not need to keep the items in any specific order, as the input + // is already deterministic. + // + // However, it seems that moving related things (such as different + // monomorphizations of the same function) close to one another is actually beneficial + // for LLVM performance. + // LLVM will codegen the items in the order we pass them to it, and when it handles + // similar things in succession, it seems that it leads to better cache utilization, + // less branch mispredictions and in general to better performance. + // For example, if we have functions `a`, `c::<u32>`, `b`, `c::<i16>`, `d` and + // `c::<bool>`, it seems that it helps LLVM's performance to codegen the three `c` + // instantiations right after one another, as they will likely reference similar types, + // call similar functions, etc. + // + // See https://github.com/rust-lang/rust/pull/145358 for more details. + // + // Sorting by symbol name should not incur any new non-determinism. + items.sort_by_cached_key(|&(i, _)| i.symbol_name(tcx)); + } else { + items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i)); + } items } diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 84abcf550d2..128ae8549f7 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -929,7 +929,10 @@ impl<'tcx> TerminatorKind<'tcx> { } Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"), Unreachable => write!(fmt, "unreachable"), - Drop { place, .. } => write!(fmt, "drop({place:?})"), + Drop { place, async_fut: None, .. } => write!(fmt, "drop({place:?})"), + Drop { place, async_fut: Some(async_fut), .. } => { + write!(fmt, "async drop({place:?}; poll={async_fut:?})") + } Call { func, args, destination, .. } => { write!(fmt, "{destination:?} = ")?; write!(fmt, "{func:?}(")?; @@ -1789,7 +1792,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( ascii.push('╼'); i += ptr_size; } - } else if let Some(prov) = alloc.provenance().get(i, &tcx) { + } else if let Some((prov, idx)) = alloc.provenance().get_byte(i, &tcx) { // Memory with provenance must be defined assert!( alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok() @@ -1799,7 +1802,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( // Format is similar to "oversized" above. let j = i.bytes_usize(); let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0]; - write!(w, "╾{c:02x}{prov:#?} (1 ptr byte)╼")?; + write!(w, "╾{c:02x}{prov:#?} (ptr fragment {idx})╼")?; i += Size::from_bytes(1); } else if alloc .init_mask() @@ -1932,7 +1935,7 @@ fn pretty_print_const_value_tcx<'tcx>( let args = tcx.lift(args).unwrap(); let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); p.print_alloc_ids = true; - p.print_value_path(variant_def.def_id, args)?; + p.pretty_print_value_path(variant_def.def_id, args)?; fmt.write_str(&p.into_buffer())?; match variant_def.ctor_kind() { @@ -1974,7 +1977,7 @@ fn pretty_print_const_value_tcx<'tcx>( (ConstValue::ZeroSized, ty::FnDef(d, s)) => { let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); p.print_alloc_ids = true; - p.print_value_path(*d, s)?; + p.pretty_print_value_path(*d, s)?; fmt.write_str(&p.into_buffer())?; return Ok(()); } diff --git a/compiler/rustc_middle/src/mir/statement.rs b/compiler/rustc_middle/src/mir/statement.rs index 683d7b48617..6e52bc775ef 100644 --- a/compiler/rustc_middle/src/mir/statement.rs +++ b/compiler/rustc_middle/src/mir/statement.rs @@ -160,7 +160,11 @@ impl<'tcx> PlaceTy<'tcx> { /// Convenience wrapper around `projection_ty_core` for `PlaceElem`, /// where we can just use the `Ty` that is already stored inline on /// field projection elems. - pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> { + pub fn projection_ty<V: ::std::fmt::Debug>( + self, + tcx: TyCtxt<'tcx>, + elem: ProjectionElem<V, Ty<'tcx>>, + ) -> PlaceTy<'tcx> { self.projection_ty_core(tcx, &elem, |ty| ty, |_, _, _, ty| ty, |ty| ty) } @@ -290,6 +294,36 @@ impl<V, T> ProjectionElem<V, T> { Self::UnwrapUnsafeBinder(..) => false, } } + + /// Returns the `ProjectionKind` associated to this projection. + pub fn kind(self) -> ProjectionKind { + self.try_map(|_| Some(()), |_| ()).unwrap() + } + + /// Apply functions to types and values in this projection and return the result. + pub fn try_map<V2, T2>( + self, + v: impl FnOnce(V) -> Option<V2>, + t: impl FnOnce(T) -> T2, + ) -> Option<ProjectionElem<V2, T2>> { + Some(match self { + ProjectionElem::Deref => ProjectionElem::Deref, + ProjectionElem::Downcast(name, read_variant) => { + ProjectionElem::Downcast(name, read_variant) + } + ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, t(ty)), + ProjectionElem::ConstantIndex { offset, min_length, from_end } => { + ProjectionElem::ConstantIndex { offset, min_length, from_end } + } + ProjectionElem::Subslice { from, to, from_end } => { + ProjectionElem::Subslice { from, to, from_end } + } + ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(t(ty)), + ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(t(ty)), + ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(t(ty)), + ProjectionElem::Index(val) => ProjectionElem::Index(v(val)?), + }) + } } /// Alias for projections as they appear in `UserTypeProjection`, where we diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 42a68b29ec7..904d78d69b6 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -531,13 +531,20 @@ macro_rules! make_mir_visitor { unwind: _, replace: _, drop: _, - async_fut: _, + async_fut, } => { self.visit_place( place, PlaceContext::MutatingUse(MutatingUseContext::Drop), location ); + if let Some(async_fut) = async_fut { + self.visit_local( + $(&$mutability)? *async_fut, + PlaceContext::MutatingUse(MutatingUseContext::Borrow), + location + ); + } } TerminatorKind::Call { diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index a8b357bf105..ea62461ebeb 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -343,6 +343,7 @@ trivial! { rustc_span::Symbol, rustc_span::Ident, rustc_target::spec::PanicStrategy, + rustc_target::spec::SanitizerSet, rustc_type_ir::Variance, u32, usize, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index a4589576594..d4f88c458a8 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -100,7 +100,7 @@ use rustc_session::lint::LintExpectationId; use rustc_span::def_id::LOCAL_CRATE; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span, Symbol}; -use rustc_target::spec::PanicStrategy; +use rustc_target::spec::{PanicStrategy, SanitizerSet}; use {rustc_abi as abi, rustc_ast as ast, rustc_hir as hir}; use crate::infer::canonical::{self, Canonical}; @@ -1116,6 +1116,11 @@ rustc_queries! { } /// Unsafety-check this `LocalDefId`. + query check_transmutes(key: LocalDefId) { + desc { |tcx| "check transmute calls inside `{}`", tcx.def_path_str(key) } + } + + /// Unsafety-check this `LocalDefId`. query check_unsafety(key: LocalDefId) { desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key) } } @@ -1864,6 +1869,12 @@ rustc_queries! { feedable } + /// Returns whether the field corresponding to the `DefId` has a default field value. + query default_field(def_id: DefId) -> Option<DefId> { + desc { |tcx| "looking up the `const` corresponding to the default for `{}`", tcx.def_path_str(def_id) } + separate_provide_extern + } + query check_well_formed(key: LocalDefId) -> Result<(), ErrorGuaranteed> { desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key) } return_result_from_ensure_ok @@ -2680,6 +2691,16 @@ rustc_queries! { desc { |tcx| "looking up anon const kind of `{}`", tcx.def_path_str(def_id) } separate_provide_extern } + + /// Checks for the nearest `#[sanitize(xyz = "off")]` or + /// `#[sanitize(xyz = "on")]` on this def and any enclosing defs, up to the + /// crate root. + /// + /// Returns the set of sanitizers that is explicitly disabled for this def. + query disabled_sanitizers_for(key: LocalDefId) -> SanitizerSet { + desc { |tcx| "checking what set of sanitizers are enabled on `{}`", tcx.def_path_str(key) } + feedable + } } rustc_with_all_queries! { define_callbacks! } diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs index a7ac3442898..546791135ba 100644 --- a/compiler/rustc_middle/src/query/on_disk_cache.rs +++ b/compiler/rustc_middle/src/query/on_disk_cache.rs @@ -645,34 +645,29 @@ impl<'a, 'tcx> SpanDecoder for CacheDecoder<'a, 'tcx> { let parent = Option::<LocalDefId>::decode(self); let tag: u8 = Decodable::decode(self); - if tag == TAG_PARTIAL_SPAN { - return Span::new(BytePos(0), BytePos(0), ctxt, parent); - } else if tag == TAG_RELATIVE_SPAN { - let dlo = u32::decode(self); - let dto = u32::decode(self); - - let enclosing = self.tcx.source_span_untracked(parent.unwrap()).data_untracked(); - let span = Span::new( - enclosing.lo + BytePos::from_u32(dlo), - enclosing.lo + BytePos::from_u32(dto), - ctxt, - parent, - ); - - return span; - } else { - debug_assert_eq!(tag, TAG_FULL_SPAN); - } - - let file_lo_index = SourceFileIndex::decode(self); - let line_lo = usize::decode(self); - let col_lo = RelativeBytePos::decode(self); - let len = BytePos::decode(self); - - let file_lo = self.file_index_to_file(file_lo_index); - let lo = file_lo.lines()[line_lo - 1] + col_lo; - let lo = file_lo.absolute_position(lo); - let hi = lo + len; + let (lo, hi) = match tag { + TAG_PARTIAL_SPAN => (BytePos(0), BytePos(0)), + TAG_RELATIVE_SPAN => { + let dlo = u32::decode(self); + let dto = u32::decode(self); + + let enclosing = self.tcx.source_span_untracked(parent.unwrap()).data_untracked(); + (enclosing.lo + BytePos::from_u32(dlo), enclosing.lo + BytePos::from_u32(dto)) + } + TAG_FULL_SPAN => { + let file_lo_index = SourceFileIndex::decode(self); + let line_lo = usize::decode(self); + let col_lo = RelativeBytePos::decode(self); + let len = BytePos::decode(self); + + let file_lo = self.file_index_to_file(file_lo_index); + let lo = file_lo.lines()[line_lo - 1] + col_lo; + let lo = file_lo.absolute_position(lo); + let hi = lo + len; + (lo, hi) + } + _ => unreachable!(), + }; Span::new(lo, hi, ctxt, parent) } diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index 3dd6d2c8928..f1d19800a78 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -832,17 +832,17 @@ pub enum PatKind<'tcx> { }, /// One of the following: - /// * `&str` (represented as a valtree), which will be handled as a string pattern and thus + /// * `&str`, which will be handled as a string pattern and thus /// exhaustiveness checking will detect if you use the same string twice in different /// patterns. - /// * integer, bool, char or float (represented as a valtree), which will be handled by + /// * integer, bool, char or float, which will be handled by /// exhaustiveness to cover exactly its own value, similar to `&str`, but these values are /// much simpler. /// * raw pointers derived from integers, other raw pointers will have already resulted in an // error. /// * `String`, if `string_deref_patterns` is enabled. Constant { - value: mir::Const<'tcx>, + value: ty::Value<'tcx>, }, /// Pattern obtained by converting a constant (inline or named) to its pattern @@ -935,7 +935,7 @@ impl<'tcx> PatRange<'tcx> { let lo_is_min = match self.lo { PatRangeBoundary::NegInfinity => true, PatRangeBoundary::Finite(value) => { - let lo = value.try_to_bits(size).unwrap() ^ bias; + let lo = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias; lo <= min } PatRangeBoundary::PosInfinity => false, @@ -944,7 +944,7 @@ impl<'tcx> PatRange<'tcx> { let hi_is_max = match self.hi { PatRangeBoundary::NegInfinity => false, PatRangeBoundary::Finite(value) => { - let hi = value.try_to_bits(size).unwrap() ^ bias; + let hi = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias; hi > max || hi == max && self.end == RangeEnd::Included } PatRangeBoundary::PosInfinity => true, @@ -957,22 +957,17 @@ impl<'tcx> PatRange<'tcx> { } #[inline] - pub fn contains( - &self, - value: mir::Const<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<bool> { + pub fn contains(&self, value: ty::Value<'tcx>, tcx: TyCtxt<'tcx>) -> Option<bool> { use Ordering::*; - debug_assert_eq!(self.ty, value.ty()); + debug_assert_eq!(value.ty, self.ty); let ty = self.ty; - let value = PatRangeBoundary::Finite(value); + let value = PatRangeBoundary::Finite(value.valtree); // For performance, it's important to only do the second comparison if necessary. Some( - match self.lo.compare_with(value, ty, tcx, typing_env)? { + match self.lo.compare_with(value, ty, tcx)? { Less | Equal => true, Greater => false, - } && match value.compare_with(self.hi, ty, tcx, typing_env)? { + } && match value.compare_with(self.hi, ty, tcx)? { Less => true, Equal => self.end == RangeEnd::Included, Greater => false, @@ -981,21 +976,16 @@ impl<'tcx> PatRange<'tcx> { } #[inline] - pub fn overlaps( - &self, - other: &Self, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<bool> { + pub fn overlaps(&self, other: &Self, tcx: TyCtxt<'tcx>) -> Option<bool> { use Ordering::*; debug_assert_eq!(self.ty, other.ty); // For performance, it's important to only do the second comparison if necessary. Some( - match other.lo.compare_with(self.hi, self.ty, tcx, typing_env)? { + match other.lo.compare_with(self.hi, self.ty, tcx)? { Less => true, Equal => self.end == RangeEnd::Included, Greater => false, - } && match self.lo.compare_with(other.hi, self.ty, tcx, typing_env)? { + } && match self.lo.compare_with(other.hi, self.ty, tcx)? { Less => true, Equal => other.end == RangeEnd::Included, Greater => false, @@ -1006,11 +996,13 @@ impl<'tcx> PatRange<'tcx> { impl<'tcx> fmt::Display for PatRange<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let PatRangeBoundary::Finite(value) = &self.lo { + if let &PatRangeBoundary::Finite(valtree) = &self.lo { + let value = ty::Value { ty: self.ty, valtree }; write!(f, "{value}")?; } - if let PatRangeBoundary::Finite(value) = &self.hi { + if let &PatRangeBoundary::Finite(valtree) = &self.hi { write!(f, "{}", self.end)?; + let value = ty::Value { ty: self.ty, valtree }; write!(f, "{value}")?; } else { // `0..` is parsed as an inclusive range, we must display it correctly. @@ -1024,7 +1016,8 @@ impl<'tcx> fmt::Display for PatRange<'tcx> { /// If present, the const must be of a numeric type. #[derive(Copy, Clone, Debug, PartialEq, HashStable, TypeVisitable)] pub enum PatRangeBoundary<'tcx> { - Finite(mir::Const<'tcx>), + /// The type of this valtree is stored in the surrounding `PatRange`. + Finite(ty::ValTree<'tcx>), NegInfinity, PosInfinity, } @@ -1035,20 +1028,15 @@ impl<'tcx> PatRangeBoundary<'tcx> { matches!(self, Self::Finite(..)) } #[inline] - pub fn as_finite(self) -> Option<mir::Const<'tcx>> { + pub fn as_finite(self) -> Option<ty::ValTree<'tcx>> { match self { Self::Finite(value) => Some(value), Self::NegInfinity | Self::PosInfinity => None, } } - pub fn eval_bits( - self, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> u128 { + pub fn to_bits(self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> u128 { match self { - Self::Finite(value) => value.eval_bits(tcx, typing_env), + Self::Finite(value) => value.try_to_scalar_int().unwrap().to_bits_unchecked(), Self::NegInfinity => { // Unwrap is ok because the type is known to be numeric. ty.numeric_min_and_max_as_bits(tcx).unwrap().0 @@ -1060,14 +1048,8 @@ impl<'tcx> PatRangeBoundary<'tcx> { } } - #[instrument(skip(tcx, typing_env), level = "debug", ret)] - pub fn compare_with( - self, - other: Self, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<Ordering> { + #[instrument(skip(tcx), level = "debug", ret)] + pub fn compare_with(self, other: Self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<Ordering> { use PatRangeBoundary::*; match (self, other) { // When comparing with infinities, we must remember that `0u8..` and `0u8..=255` @@ -1095,8 +1077,8 @@ impl<'tcx> PatRangeBoundary<'tcx> { _ => {} } - let a = self.eval_bits(ty, tcx, typing_env); - let b = other.eval_bits(ty, tcx, typing_env); + let a = self.to_bits(ty, tcx); + let b = other.to_bits(ty, tcx); match ty.kind() { ty::Float(ty::FloatTy::F16) => { diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index 5bdde3a514e..32f91bfba6b 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -760,6 +760,9 @@ pub enum DynCompatibilityViolation { // Supertrait has a non-lifetime `for<T>` binder. SupertraitNonLifetimeBinder(SmallVec<[Span; 1]>), + // Trait has a `const Trait` supertrait. + SupertraitConst(SmallVec<[Span; 1]>), + /// Method has something illegal. Method(Symbol, MethodViolationCode, Span), @@ -785,6 +788,9 @@ impl DynCompatibilityViolation { DynCompatibilityViolation::SupertraitNonLifetimeBinder(_) => { "where clause cannot reference non-lifetime `for<...>` variables".into() } + DynCompatibilityViolation::SupertraitConst(_) => { + "it cannot have a `const` supertrait".into() + } DynCompatibilityViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => { format!("associated function `{name}` has no `self` parameter").into() } @@ -842,7 +848,8 @@ impl DynCompatibilityViolation { match self { DynCompatibilityViolation::SizedSelf(_) | DynCompatibilityViolation::SupertraitSelf(_) - | DynCompatibilityViolation::SupertraitNonLifetimeBinder(..) => { + | DynCompatibilityViolation::SupertraitNonLifetimeBinder(..) + | DynCompatibilityViolation::SupertraitConst(_) => { DynCompatibilityViolationSolution::None } DynCompatibilityViolation::Method( @@ -873,15 +880,17 @@ impl DynCompatibilityViolation { match self { DynCompatibilityViolation::SupertraitSelf(spans) | DynCompatibilityViolation::SizedSelf(spans) - | DynCompatibilityViolation::SupertraitNonLifetimeBinder(spans) => spans.clone(), + | DynCompatibilityViolation::SupertraitNonLifetimeBinder(spans) + | DynCompatibilityViolation::SupertraitConst(spans) => spans.clone(), DynCompatibilityViolation::AssocConst(_, span) | DynCompatibilityViolation::GAT(_, span) - | DynCompatibilityViolation::Method(_, _, span) - if *span != DUMMY_SP => - { - smallvec![*span] + | DynCompatibilityViolation::Method(_, _, span) => { + if *span != DUMMY_SP { + smallvec![*span] + } else { + smallvec![] + } } - _ => smallvec![], } } } diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index d95006dcf4a..a14e47d7082 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -2,10 +2,12 @@ use std::fmt; use std::ops::Deref; use rustc_data_structures::intern::Interned; +use rustc_hir::def::Namespace; use rustc_macros::{HashStable, Lift, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; use super::ScalarInt; use crate::mir::interpret::{ErrorHandled, Scalar}; +use crate::ty::print::{FmtPrinter, PrettyPrinter}; use crate::ty::{self, Ty, TyCtxt}; /// This datastructure is used to represent the value of constants used in the type system. @@ -133,6 +135,8 @@ pub type ConstToValTreeResult<'tcx> = Result<Result<ValTree<'tcx>, Ty<'tcx>>, Er /// A type-level constant value. /// /// Represents a typed, fully evaluated constant. +/// Note that this is also used by pattern elaboration to represent values which cannot occur in types, +/// such as raw pointers and floats. #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable, Lift)] pub struct Value<'tcx> { @@ -203,3 +207,14 @@ impl<'tcx> rustc_type_ir::inherent::ValueConst<TyCtxt<'tcx>> for Value<'tcx> { self.valtree } } + +impl<'tcx> fmt::Display for Value<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(move |tcx| { + let cv = tcx.lift(*self).unwrap(); + let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); + p.pretty_print_const_valtree(cv, /*print_ty*/ true)?; + f.write_str(&p.into_buffer()) + }) + } +} diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs index b3042904a29..b9a6f67ab0d 100644 --- a/compiler/rustc_middle/src/ty/diagnostics.rs +++ b/compiler/rustc_middle/src/ty/diagnostics.rs @@ -23,7 +23,7 @@ impl IntoDiagArg for Ty<'_> { fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { ty::tls::with(|tcx| { let ty = tcx.short_string(self, path); - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(ty)) + DiagArgValue::Str(std::borrow::Cow::Owned(ty)) }) } } @@ -32,7 +32,7 @@ impl IntoDiagArg for Instance<'_> { fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { ty::tls::with(|tcx| { let instance = tcx.short_string_namespace(self, path, Namespace::ValueNS); - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(instance)) + DiagArgValue::Str(std::borrow::Cow::Owned(instance)) }) } } diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 73e1661106e..a7298af502e 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -32,7 +32,7 @@ use rustc_data_structures::intern::Interned; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; use rustc_data_structures::unord::{UnordMap, UnordSet}; -use rustc_errors::{Diag, ErrorGuaranteed}; +use rustc_errors::{Diag, ErrorGuaranteed, LintBuffer}; use rustc_hir::attrs::{AttributeKind, StrippedCfgItem}; use rustc_hir::def::{CtorKind, CtorOf, DefKind, DocLinkResMap, LifetimeRes, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LocalDefIdMap}; @@ -46,7 +46,6 @@ use rustc_macros::{ }; use rustc_query_system::ich::StableHashingContext; use rustc_serialize::{Decodable, Encodable}; -use rustc_session::lint::LintBuffer; pub use rustc_session::lint::RegisteredTools; use rustc_span::hygiene::MacroKind; use rustc_span::{DUMMY_SP, ExpnId, ExpnKind, Ident, Span, Symbol, sym}; @@ -1925,21 +1924,50 @@ impl<'tcx> TyCtxt<'tcx> { self.impl_trait_ref(def_id).map(|tr| tr.skip_binder().def_id) } - /// If the given `DefId` is an associated item, returns the `DefId` of the parent trait or impl. - pub fn assoc_parent(self, def_id: DefId) -> Option<DefId> { - self.def_kind(def_id).is_assoc().then(|| self.parent(def_id)) + /// If the given `DefId` is an associated item, returns the `DefId` and `DefKind` of the parent trait or impl. + pub fn assoc_parent(self, def_id: DefId) -> Option<(DefId, DefKind)> { + if !self.def_kind(def_id).is_assoc() { + return None; + } + let parent = self.parent(def_id); + let def_kind = self.def_kind(parent); + Some((parent, def_kind)) } /// If the given `DefId` is an associated item of a trait, /// returns the `DefId` of the trait; otherwise, returns `None`. pub fn trait_of_assoc(self, def_id: DefId) -> Option<DefId> { - self.assoc_parent(def_id).filter(|id| self.def_kind(id) == DefKind::Trait) + match self.assoc_parent(def_id) { + Some((id, DefKind::Trait)) => Some(id), + _ => None, + } } /// If the given `DefId` is an associated item of an impl, /// returns the `DefId` of the impl; otherwise returns `None`. pub fn impl_of_assoc(self, def_id: DefId) -> Option<DefId> { - self.assoc_parent(def_id).filter(|id| matches!(self.def_kind(id), DefKind::Impl { .. })) + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { .. })) => Some(id), + _ => None, + } + } + + /// If the given `DefId` is an associated item of an inherent impl, + /// returns the `DefId` of the impl; otherwise, returns `None`. + pub fn inherent_impl_of_assoc(self, def_id: DefId) -> Option<DefId> { + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { of_trait: false })) => Some(id), + _ => None, + } + } + + /// If the given `DefId` is an associated item of a trait impl, + /// returns the `DefId` of the impl; otherwise, returns `None`. + pub fn trait_impl_of_assoc(self, def_id: DefId) -> Option<DefId> { + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { of_trait: true })) => Some(id), + _ => None, + } } pub fn is_exportable(self, def_id: DefId) -> bool { diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index efa017074db..9e6f277ef77 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -19,18 +19,16 @@ pub trait Print<'tcx, P> { fn print(&self, p: &mut P) -> Result<(), PrintError>; } -/// Interface for outputting user-facing "type-system entities" -/// (paths, types, lifetimes, constants, etc.) as a side-effect -/// (e.g. formatting, like `PrettyPrinter` implementors do) or by -/// constructing some alternative representation (e.g. an AST), -/// which the associated types allow passing through the methods. -/// -/// For pretty-printing/formatting in particular, see `PrettyPrinter`. -// -// FIXME(eddyb) find a better name; this is more general than "printing". +/// A trait that "prints" user-facing type system entities: paths, types, lifetimes, constants, +/// etc. "Printing" here means building up a representation of the entity's path, usually as a +/// `String` (e.g. "std::io::Read") or a `Vec<Symbol>` (e.g. `[sym::std, sym::io, sym::Read]`). The +/// representation is built up by appending one or more pieces. The specific details included in +/// the built-up representation depend on the purpose of the printer. The more advanced printers +/// also rely on the `PrettyPrinter` sub-trait. pub trait Printer<'tcx>: Sized { fn tcx<'a>(&'a self) -> TyCtxt<'tcx>; + /// Appends a representation of an entity with a normal path, e.g. "std::io::Read". fn print_def_path( &mut self, def_id: DefId, @@ -39,6 +37,7 @@ pub trait Printer<'tcx>: Sized { self.default_print_def_path(def_id, args) } + /// Like `print_def_path`, but for `DefPathData::Impl`. fn print_impl_path( &mut self, impl_def_id: DefId, @@ -64,46 +63,74 @@ pub trait Printer<'tcx>: Sized { self.default_print_impl_path(impl_def_id, self_ty, impl_trait_ref) } + /// Appends a representation of a region. fn print_region(&mut self, region: ty::Region<'tcx>) -> Result<(), PrintError>; + /// Appends a representation of a type. fn print_type(&mut self, ty: Ty<'tcx>) -> Result<(), PrintError>; + /// Appends a representation of a list of `PolyExistentialPredicate`s. fn print_dyn_existential( &mut self, predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>, ) -> Result<(), PrintError>; + /// Appends a representation of a const. fn print_const(&mut self, ct: ty::Const<'tcx>) -> Result<(), PrintError>; - fn path_crate(&mut self, cnum: CrateNum) -> Result<(), PrintError>; + /// Appends a representation of a crate name, e.g. `std`, or even ``. + fn print_crate_name(&mut self, cnum: CrateNum) -> Result<(), PrintError>; - fn path_qualified( + /// Appends a representation of a (full or partial) simple path, in two parts. `print_prefix`, + /// when called, appends the representation of the leading segments. The rest of the method + /// appends the representation of the final segment, the details of which are in + /// `disambiguated_data`. + /// + /// E.g. `std::io` + `Read` -> `std::io::Read`. + fn print_path_with_simple( &mut self, - self_ty: Ty<'tcx>, - trait_ref: Option<ty::TraitRef<'tcx>>, + print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, + disambiguated_data: &DisambiguatedDefPathData, ) -> Result<(), PrintError>; - fn path_append_impl( + /// Similar to `print_path_with_simple`, but the final segment is an `impl` segment. + /// + /// E.g. `slice` + `<impl [T]>` -> `slice::<impl [T]>`, which may then be further appended to, + /// giving a longer path representation such as `slice::<impl [T]>::to_vec_in::ConvertVec`. + fn print_path_with_impl( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError>; - fn path_append( + /// Appends a representation of a path ending in generic args, in two parts. `print_prefix`, + /// when called, appends the leading segments. The rest of the method appends the + /// representation of the generic args. (Some printers choose to skip appending the generic + /// args.) + /// + /// E.g. `ImplementsTraitForUsize` + `<usize>` -> `ImplementsTraitForUsize<usize>`. + fn print_path_with_generic_args( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, - disambiguated_data: &DisambiguatedDefPathData, + args: &[GenericArg<'tcx>], ) -> Result<(), PrintError>; - fn path_generic_args( + /// Appends a representation of a qualified path segment, e.g. `<OsString as From<&T>>`. + /// If `trait_ref` is `None`, it may fall back to simpler forms, e.g. `<Vec<T>>` or just `Foo`. + fn print_path_with_qualified( &mut self, - print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, - args: &[GenericArg<'tcx>], + self_ty: Ty<'tcx>, + trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError>; - fn should_truncate(&mut self) -> bool { - false + fn print_coroutine_with_kind( + &mut self, + def_id: DefId, + parent_args: &'tcx [GenericArg<'tcx>], + kind: Ty<'tcx>, + ) -> Result<(), PrintError> { + self.print_path_with_generic_args(|p| p.print_def_path(def_id, parent_args), &[kind.into()]) } // Defaults (should not be overridden): @@ -120,7 +147,7 @@ pub trait Printer<'tcx>: Sized { match key.disambiguated_data.data { DefPathData::CrateRoot => { assert!(key.parent.is_none()); - self.path_crate(def_id.krate) + self.print_crate_name(def_id.krate) } DefPathData::Impl => self.print_impl_path(def_id, args), @@ -144,9 +171,10 @@ pub trait Printer<'tcx>: Sized { )) = self.tcx().coroutine_kind(def_id) && args.len() > parent_args.len() { - return self.path_generic_args( - |p| p.print_def_path(def_id, parent_args), - &args[..parent_args.len() + 1][..1], + return self.print_coroutine_with_kind( + def_id, + parent_args, + args[parent_args.len()].expect_ty(), ); } else { // Closures' own generics are only captures, don't print them. @@ -166,7 +194,7 @@ pub trait Printer<'tcx>: Sized { _ => { if !generics.is_own_empty() && args.len() >= generics.count() { let args = generics.own_args_no_defaults(self.tcx(), args); - return self.path_generic_args( + return self.print_path_with_generic_args( |p| p.print_def_path(def_id, parent_args), args, ); @@ -182,7 +210,7 @@ pub trait Printer<'tcx>: Sized { && self.tcx().generics_of(parent_def_id).parent_count == 0; } - self.path_append( + self.print_path_with_simple( |p: &mut Self| { if trait_qualify_parent { let trait_ref = ty::TraitRef::new( @@ -190,7 +218,7 @@ pub trait Printer<'tcx>: Sized { parent_def_id, parent_args.iter().copied(), ); - p.path_qualified(trait_ref.self_ty(), Some(trait_ref)) + p.print_path_with_qualified(trait_ref.self_ty(), Some(trait_ref)) } else { p.print_def_path(parent_def_id, parent_args) } @@ -233,11 +261,15 @@ pub trait Printer<'tcx>: Sized { // If the impl is not co-located with either self-type or // trait-type, then fallback to a format that identifies // the module more clearly. - self.path_append_impl(|p| p.print_def_path(parent_def_id, &[]), self_ty, impl_trait_ref) + self.print_path_with_impl( + |p| p.print_def_path(parent_def_id, &[]), + self_ty, + impl_trait_ref, + ) } else { // Otherwise, try to give a good form that would be valid language // syntax. Preferably using associated item notation. - self.path_qualified(self_ty, impl_trait_ref) + self.print_path_with_qualified(self_ty, impl_trait_ref) } } } diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 67244e767cb..74caee7336a 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -245,7 +245,7 @@ impl<'tcx> RegionHighlightMode<'tcx> { /// Trait for printers that pretty-print using `fmt::Write` to the printer. pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { /// Like `print_def_path` but for value paths. - fn print_value_path( + fn pretty_print_value_path( &mut self, def_id: DefId, args: &'tcx [GenericArg<'tcx>], @@ -253,7 +253,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { self.print_def_path(def_id, args) } - fn print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> + fn pretty_print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> where T: Print<'tcx, Self> + TypeFoldable<TyCtxt<'tcx>>, { @@ -333,10 +333,14 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { f: impl FnOnce(&mut Self) -> Result<(), PrintError>, ) -> Result<(), PrintError>; - /// Returns `true` if the region should be printed in - /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`. - /// This is typically the case for all non-`'_` regions. - fn should_print_region(&self, region: ty::Region<'tcx>) -> bool; + fn should_truncate(&mut self) -> bool { + false + } + + /// Returns `true` if the region should be printed in optional positions, + /// e.g., `&'a T` or `dyn Tr + 'b`. (Regions like the one in `Cow<'static, T>` + /// will always be printed.) + fn should_print_optional_region(&self, region: ty::Region<'tcx>) -> bool; fn reset_type_limit(&mut self) {} @@ -470,7 +474,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { // path to the crate followed by the path to the item within the crate. if let Some(cnum) = def_id.as_crate_root() { if cnum == LOCAL_CRATE { - self.path_crate(cnum)?; + self.print_crate_name(cnum)?; return Ok(true); } @@ -494,7 +498,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { // or avoid ending up with `ExternCrateSource::Extern`, // for the injected `std`/`core`. if span.is_dummy() { - self.path_crate(cnum)?; + self.print_crate_name(cnum)?; return Ok(true); } @@ -508,13 +512,13 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { return Ok(true); } (ExternCrateSource::Path, LOCAL_CRATE) => { - self.path_crate(cnum)?; + self.print_crate_name(cnum)?; return Ok(true); } _ => {} }, None => { - self.path_crate(cnum)?; + self.print_crate_name(cnum)?; return Ok(true); } } @@ -624,7 +628,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { return Ok(false); } callers.push(visible_parent); - // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid + // HACK(eddyb) this bypasses `print_path_with_simple`'s prefix printing to avoid // knowing ahead of time whether the entire path will succeed or not. // To support printers that do not implement `PrettyPrinter`, a `Vec` or // linked list on the stack would need to be built, before any printing. @@ -633,11 +637,14 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { true => {} } callers.pop(); - self.path_append(|_| Ok(()), &DisambiguatedDefPathData { data, disambiguator: 0 })?; + self.print_path_with_simple( + |_| Ok(()), + &DisambiguatedDefPathData { data, disambiguator: 0 }, + )?; Ok(true) } - fn pretty_path_qualified( + fn pretty_print_path_with_qualified( &mut self, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, @@ -672,7 +679,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { }) } - fn pretty_path_append_impl( + fn pretty_print_path_with_impl( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, self_ty: Ty<'tcx>, @@ -710,7 +717,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } ty::Ref(r, ty, mutbl) => { write!(self, "&")?; - if self.should_print_region(r) { + if self.should_print_optional_region(r) { r.print(self)?; write!(self, " ")?; } @@ -739,7 +746,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } sig.print(self)?; write!(self, " {{")?; - self.print_value_path(def_id, args)?; + self.pretty_print_value_path(def_id, args)?; write!(self, "}}")?; } } @@ -778,7 +785,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { }, ty::Adt(def, args) => self.print_def_path(def.did(), args)?, ty::Dynamic(data, r, repr) => { - let print_r = self.should_print_region(r); + let print_r = self.should_print_optional_region(r); if print_r { write!(self, "(")?; } @@ -1308,10 +1315,10 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { alias_ty: ty::AliasTerm<'tcx>, ) -> Result<(), PrintError> { let def_key = self.tcx().def_key(alias_ty.def_id); - self.path_generic_args( + self.print_path_with_generic_args( |p| { - p.path_append( - |p| p.path_qualified(alias_ty.self_ty(), None), + p.print_path_with_simple( + |p| p.print_path_with_qualified(alias_ty.self_ty(), None), &def_key.disambiguated_data, ) }, @@ -1386,7 +1393,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { if let ty::Tuple(tys) = principal.args.type_at(0).kind() { let mut projections = predicates.projection_bounds(); if let (Some(proj), None) = (projections.next(), projections.next()) { - p.pretty_fn_sig( + p.pretty_print_fn_sig( tys, false, proj.skip_binder().term.as_type().expect("Return type was a const"), @@ -1396,7 +1403,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } } - // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`, + // HACK(eddyb) this duplicates `FmtPrinter`'s `print_path_with_generic_args`, // in order to place the projections inside the `<...>`. if !resugared { let principal_with_self = @@ -1488,7 +1495,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { Ok(()) } - fn pretty_fn_sig( + fn pretty_print_fn_sig( &mut self, inputs: &[Ty<'tcx>], c_variadic: bool, @@ -1525,7 +1532,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args }) => { match self.tcx().def_kind(def) { DefKind::Const | DefKind::AssocConst => { - self.print_value_path(def, args)?; + self.pretty_print_value_path(def, args)?; } DefKind::AnonConst => { if def.is_local() @@ -1534,13 +1541,13 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { { write!(self, "{snip}")?; } else { - // Do not call `print_value_path` as if a parent of this anon const is - // an impl it will attempt to print out the impl trait ref i.e. `<T as - // Trait>::{constant#0}`. This would cause printing to enter an - // infinite recursion if the anon const is in the self type i.e. - // `impl<T: Default> Default for [T; 32 - 1 - 1 - 1] {` where we would - // try to print - // `<[T; /* print constant#0 again */] as // Default>::{constant#0}`. + // Do not call `pretty_print_value_path` as if a parent of this anon + // const is an impl it will attempt to print out the impl trait ref + // i.e. `<T as Trait>::{constant#0}`. This would cause printing to + // enter an infinite recursion if the anon const is in the self type + // i.e. `impl<T: Default> Default for [T; 32 - 1 - 1 - 1] {` where we + // would try to print `<[T; /* print constant#0 again */] as // + // Default>::{constant#0}`. write!( self, "{}::{}", @@ -1742,7 +1749,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { self.tcx().try_get_global_alloc(prov.alloc_id()) { self.typed_value( - |this| this.print_value_path(instance.def_id(), instance.args), + |this| this.pretty_print_value_path(instance.def_id(), instance.args), |this| this.print_type(ty), " as ", )?; @@ -1936,7 +1943,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { let variant_idx = contents.variant.expect("destructed const of adt without variant idx"); let variant_def = &def.variant(variant_idx); - self.print_value_path(variant_def.def_id, args)?; + self.pretty_print_value_path(variant_def.def_id, args)?; match variant_def.ctor_kind() { Some(CtorKind::Const) => {} Some(CtorKind::Fn) => { @@ -1972,7 +1979,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } (_, ty::FnDef(def_id, args)) => { // Never allowed today, but we still encounter them in invalid const args. - self.print_value_path(def_id, args)?; + self.pretty_print_value_path(def_id, args)?; return Ok(()); } // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading @@ -1993,7 +2000,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { Ok(()) } - fn pretty_closure_as_impl( + fn pretty_print_closure_as_impl( &mut self, closure: ty::ClosureArgs<TyCtxt<'tcx>>, ) -> Result<(), PrintError> { @@ -2131,8 +2138,6 @@ impl<'a, 'tcx> FmtPrinter<'a, 'tcx> { } } -// HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always -// (but also some things just print a `DefId` generally so maybe we need this?) fn guess_def_namespace(tcx: TyCtxt<'_>, def_id: DefId) -> Namespace { match tcx.def_key(def_id).disambiguated_data.data { DefPathData::TypeNs(..) | DefPathData::CrateRoot | DefPathData::OpaqueTy => { @@ -2157,6 +2162,7 @@ impl<'t> TyCtxt<'t> { self.def_path_str_with_args(def_id, &[]) } + /// For this one we determine the appropriate namespace for the `def_id`. pub fn def_path_str_with_args( self, def_id: impl IntoQueryParam<DefId>, @@ -2169,16 +2175,17 @@ impl<'t> TyCtxt<'t> { FmtPrinter::print_string(self, ns, |p| p.print_def_path(def_id, args)).unwrap() } + /// For this one we always use value namespace. pub fn value_path_str_with_args( self, def_id: impl IntoQueryParam<DefId>, args: &'t [GenericArg<'t>], ) -> String { let def_id = def_id.into_query_param(); - let ns = guess_def_namespace(self, def_id); + let ns = Namespace::ValueNS; debug!("value_path_str: def_id={:?}, ns={:?}", def_id, ns); - FmtPrinter::print_string(self, ns, |p| p.print_value_path(def_id, args)).unwrap() + FmtPrinter::print_string(self, ns, |p| p.print_def_path(def_id, args)).unwrap() } } @@ -2230,7 +2237,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { self.print_def_path(parent_def_id, &[])?; - // HACK(eddyb) copy of `path_append` to avoid + // HACK(eddyb) copy of `print_path_with_simple` to avoid // constructing a `DisambiguatedDefPathData`. if !self.empty_path { write!(self, "::")?; @@ -2295,10 +2302,6 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { } } - fn should_truncate(&mut self) -> bool { - !self.type_length_limit.value_within_limit(self.printed_type_count) - } - fn print_dyn_existential( &mut self, predicates: &'tcx ty::List<ty::PolyExistentialPredicate<'tcx>>, @@ -2310,7 +2313,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { self.pretty_print_const(ct, false) } - fn path_crate(&mut self, cnum: CrateNum) -> Result<(), PrintError> { + fn print_crate_name(&mut self, cnum: CrateNum) -> Result<(), PrintError> { self.empty_path = true; if cnum == LOCAL_CRATE { if self.tcx.sess.at_least_rust_2018() { @@ -2327,23 +2330,23 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn path_qualified( + fn print_path_with_qualified( &mut self, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - self.pretty_path_qualified(self_ty, trait_ref)?; + self.pretty_print_path_with_qualified(self_ty, trait_ref)?; self.empty_path = false; Ok(()) } - fn path_append_impl( + fn print_path_with_impl( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, self_ty: Ty<'tcx>, trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - self.pretty_path_append_impl( + self.pretty_print_path_with_impl( |p| { print_prefix(p)?; if !p.empty_path { @@ -2359,7 +2362,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn path_append( + fn print_path_with_simple( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, disambiguated_data: &DisambiguatedDefPathData, @@ -2390,7 +2393,7 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn path_generic_args( + fn print_path_with_generic_args( &mut self, print_prefix: impl FnOnce(&mut Self) -> Result<(), PrintError>, args: &[GenericArg<'tcx>], @@ -2421,7 +2424,7 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { self.0.const_infer_name_resolver.as_ref().and_then(|func| func(id)) } - fn print_value_path( + fn pretty_print_value_path( &mut self, def_id: DefId, args: &'tcx [GenericArg<'tcx>], @@ -2433,7 +2436,7 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> + fn pretty_print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> where T: Print<'tcx, Self> + TypeFoldable<TyCtxt<'tcx>>, { @@ -2487,7 +2490,11 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn should_print_region(&self, region: ty::Region<'tcx>) -> bool { + fn should_truncate(&mut self) -> bool { + !self.type_length_limit.value_within_limit(self.printed_type_count) + } + + fn should_print_optional_region(&self, region: ty::Region<'tcx>) -> bool { let highlight = self.region_highlight_mode; if highlight.region_highlighted(region).is_some() { return true; @@ -2892,7 +2899,7 @@ where T: Print<'tcx, P> + TypeFoldable<TyCtxt<'tcx>>, { fn print(&self, p: &mut P) -> Result<(), PrintError> { - p.print_in_binder(self) + p.pretty_print_in_binder(self) } } @@ -3090,7 +3097,7 @@ define_print! { } write!(p, "fn")?; - p.pretty_fn_sig(self.inputs(), self.c_variadic, self.output())?; + p.pretty_print_fn_sig(self.inputs(), self.c_variadic, self.output())?; } ty::TraitRef<'tcx> { @@ -3225,7 +3232,7 @@ define_print! { // The args don't contain the self ty (as it has been erased) but the corresp. // generics do as the trait always has a self ty param. We need to offset. let args = &self.args[p.tcx().generics_of(self.def_id).parent_count - 1..]; - p.path_generic_args(|p| write!(p, "{name}"), args)?; + p.print_path_with_generic_args(|p| write!(p, "{name}"), args)?; write!(p, " = ")?; self.term.print(p)?; } @@ -3314,7 +3321,7 @@ define_print_and_forward_display! { } PrintClosureAsImpl<'tcx> { - p.pretty_closure_as_impl(self.closure)?; + p.pretty_print_closure_as_impl(self.closure)?; } ty::ParamTy { diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 0e2aff6f9bd..89ef46b1ae5 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -12,7 +12,6 @@ use rustc_hir::def_id::LocalDefId; use rustc_span::source_map::Spanned; use rustc_type_ir::{ConstKind, TypeFolder, VisitorResult, try_visit}; -use super::print::PrettyPrinter; use super::{GenericArg, GenericArgKind, Pattern, Region}; use crate::mir::PlaceElem; use crate::ty::print::{FmtPrinter, Printer, with_no_trimmed_paths}; @@ -168,15 +167,11 @@ impl<'tcx> fmt::Debug for ty::Const<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // If this is a value, we spend some effort to make it look nice. if let ConstKind::Value(cv) = self.kind() { - return ty::tls::with(move |tcx| { - let cv = tcx.lift(cv).unwrap(); - let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); - p.pretty_print_const_valtree(cv, /*print_ty*/ true)?; - f.write_str(&p.into_buffer()) - }); + write!(f, "{}", cv) + } else { + // Fall back to something verbose. + write!(f, "{:?}", self.kind()) } - // Fall back to something verbose. - write!(f, "{:?}", self.kind()) } } diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 72474a60566..755fc68d86f 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -821,10 +821,38 @@ impl<'tcx> Ty<'tcx> { #[inline] pub fn new_coroutine_witness( tcx: TyCtxt<'tcx>, - id: DefId, + def_id: DefId, args: GenericArgsRef<'tcx>, ) -> Ty<'tcx> { - Ty::new(tcx, CoroutineWitness(id, args)) + if cfg!(debug_assertions) { + tcx.debug_assert_args_compatible(tcx.typeck_root_def_id(def_id), args); + } + Ty::new(tcx, CoroutineWitness(def_id, args)) + } + + pub fn new_coroutine_witness_for_coroutine( + tcx: TyCtxt<'tcx>, + def_id: DefId, + coroutine_args: GenericArgsRef<'tcx>, + ) -> Ty<'tcx> { + tcx.debug_assert_args_compatible(def_id, coroutine_args); + // HACK: Coroutine witness types are lifetime erased, so they + // never reference any lifetime args from the coroutine. We erase + // the regions here since we may get into situations where a + // coroutine is recursively contained within itself, leading to + // witness types that differ by region args. This means that + // cycle detection in fulfillment will not kick in, which leads + // to unnecessary overflows in async code. See the issue: + // <https://github.com/rust-lang/rust/issues/145151>. + let args = + ty::GenericArgs::for_item(tcx, tcx.typeck_root_def_id(def_id), |def, _| { + match def.kind { + ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(), + ty::GenericParamDefKind::Type { .. } + | ty::GenericParamDefKind::Const { .. } => coroutine_args[def.index as usize], + } + }); + Ty::new_coroutine_witness(tcx, def_id, args) } // misc @@ -985,6 +1013,14 @@ impl<'tcx> rustc_type_ir::inherent::Ty<TyCtxt<'tcx>> for Ty<'tcx> { Ty::new_coroutine_witness(interner, def_id, args) } + fn new_coroutine_witness_for_coroutine( + interner: TyCtxt<'tcx>, + def_id: DefId, + coroutine_args: ty::GenericArgsRef<'tcx>, + ) -> Self { + Ty::new_coroutine_witness_for_coroutine(interner, def_id, coroutine_args) + } + fn new_ptr(interner: TyCtxt<'tcx>, ty: Self, mutbl: hir::Mutability) -> Self { Ty::new_ptr(interner, ty, mutbl) } diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs index 6b187c5325a..f42dbbd2ac3 100644 --- a/compiler/rustc_middle/src/ty/typeck_results.rs +++ b/compiler/rustc_middle/src/ty/typeck_results.rs @@ -210,6 +210,11 @@ pub struct TypeckResults<'tcx> { /// on closure size. pub closure_size_eval: LocalDefIdMap<ClosureSizeProfileData<'tcx>>, + /// Stores the types involved in calls to `transmute` intrinsic. These are meant to be checked + /// outside of typeck and borrowck to avoid cycles with opaque types and coroutine layout + /// computation. + pub transmutes_to_check: Vec<(Ty<'tcx>, Ty<'tcx>, HirId)>, + /// Container types and field indices of `offset_of!` expressions offset_of_data: ItemLocalMap<(Ty<'tcx>, Vec<(VariantIdx, FieldIdx)>)>, } @@ -241,6 +246,7 @@ impl<'tcx> TypeckResults<'tcx> { rvalue_scopes: Default::default(), coroutine_stalled_predicates: Default::default(), closure_size_eval: Default::default(), + transmutes_to_check: Default::default(), offset_of_data: Default::default(), } } | 
