diff options
Diffstat (limited to 'compiler/rustc_middle/src')
32 files changed, 527 insertions, 340 deletions
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index e5cc23c213d..cf0549fa668 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -60,6 +60,7 @@ #![feature(try_trait_v2_residual)] #![feature(try_trait_v2_yeet)] #![feature(type_alias_impl_trait)] +#![feature(unwrap_infallible)] #![feature(yeet_expr)] #![recursion_limit = "256"] // tidy-alphabetical-end diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs index 341a735f88f..f70b7b6fad7 100644 --- a/compiler/rustc_middle/src/lint.rs +++ b/compiler/rustc_middle/src/lint.rs @@ -211,11 +211,28 @@ impl LintExpectation { } fn explain_lint_level_source( + sess: &Session, lint: &'static Lint, level: Level, src: LintLevelSource, err: &mut Diag<'_, ()>, ) { + // Find the name of the lint group that contains the given lint. + // Assumes the lint only belongs to one group. + let lint_group_name = |lint| { + let lint_groups_iter = sess.lint_groups_iter(); + let lint_id = LintId::of(lint); + lint_groups_iter + .filter(|lint_group| !lint_group.is_externally_loaded) + .find(|lint_group| { + lint_group + .lints + .iter() + .find(|lint_group_lint| **lint_group_lint == lint_id) + .is_some() + }) + .map(|lint_group| lint_group.name) + }; let name = lint.name_lower(); if let Level::Allow = level { // Do not point at `#[allow(compat_lint)]` as the reason for a compatibility lint @@ -224,7 +241,15 @@ fn explain_lint_level_source( } match src { LintLevelSource::Default => { - err.note_once(format!("`#[{}({})]` on by default", level.as_str(), name)); + let level_str = level.as_str(); + match lint_group_name(lint) { + Some(group_name) => { + err.note_once(format!("`#[{level_str}({name})]` (part of `#[{level_str}({group_name})]`) on by default")); + } + None => { + err.note_once(format!("`#[{level_str}({name})]` on by default")); + } + } } LintLevelSource::CommandLine(lint_flag_val, orig_level) => { let flag = orig_level.to_cmd_flag(); @@ -427,7 +452,7 @@ pub fn lint_level( decorate(&mut err); } - explain_lint_level_source(lint, level, src, &mut err); + explain_lint_level_source(sess, lint, level, src, &mut err); err.emit() } lint_level_impl(sess, lint, level, span, Box::new(decorate)) diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs index 52341df0740..866736f74a0 100644 --- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs +++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs @@ -1,14 +1,11 @@ use std::borrow::Cow; use rustc_abi::Align; -use rustc_ast::expand::autodiff_attrs::AutoDiffAttrs; -use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, OptimizeAttr}; -use rustc_hir::def_id::DefId; +use rustc_hir::attrs::{InlineAttr, InstructionSetAttr, Linkage, OptimizeAttr}; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; use rustc_span::Symbol; use rustc_target::spec::SanitizerSet; -use crate::mir::mono::Linkage; use crate::ty::{InstanceKind, TyCtxt}; impl<'tcx> TyCtxt<'tcx> { @@ -37,14 +34,10 @@ pub struct CodegenFnAttrs { pub inline: InlineAttr, /// Parsed representation of the `#[optimize]` attribute pub optimize: OptimizeAttr, - /// The `#[export_name = "..."]` attribute, indicating a custom symbol a - /// function should be exported under - pub export_name: Option<Symbol>, - /// The `#[link_name = "..."]` attribute, indicating a custom symbol an - /// imported function should be imported as. Note that `export_name` - /// probably isn't set when this is set, this is for foreign items while - /// `#[export_name]` is for Rust-defined functions. - pub link_name: Option<Symbol>, + /// The name this function will be imported/exported under. This can be set + /// using the `#[export_name = "..."]` or `#[link_name = "..."]` attribute + /// depending on if this is a function definition or foreign function. + pub symbol_name: Option<Symbol>, /// The `#[link_ordinal = "..."]` attribute, indicating an ordinal an /// imported function has in the dynamic library. Note that this must not /// be set when `link_name` is set. This is for foreign items with the @@ -63,8 +56,8 @@ pub struct CodegenFnAttrs { /// The `#[link_section = "..."]` attribute, or what executable section this /// should be placed in. pub link_section: Option<Symbol>, - /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which - /// instrumentation should be disabled inside the annotated function. + /// The `#[sanitize(xyz = "off")]` attribute. Indicates sanitizers for which + /// instrumentation should be disabled inside the function. pub no_sanitize: SanitizerSet, /// The `#[instruction_set(set)]` attribute. Indicates if the generated code should /// be generated against a specific instruction set. Only usable on architectures which allow @@ -76,17 +69,25 @@ pub struct CodegenFnAttrs { /// The `#[patchable_function_entry(...)]` attribute. Indicates how many nops should be around /// the function entry. pub patchable_function_entry: Option<PatchableFunctionEntry>, - /// For the `#[autodiff]` macros. - pub autodiff_item: Option<AutoDiffAttrs>, +} + +#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable, PartialEq, Eq)] +pub enum TargetFeatureKind { + /// The feature is implied by another feature, rather than explicitly added by the + /// `#[target_feature]` attribute + Implied, + /// The feature is added by the regular `target_feature` attribute. + Enabled, + /// The feature is added by the unsafe `force_target_feature` attribute. + Forced, } #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] pub struct TargetFeature { /// The name of the target feature (e.g. "avx") pub name: Symbol, - /// The feature is implied by another feature, rather than explicitly added by the - /// `#[target_feature]` attribute - pub implied: bool, + /// The way this feature was enabled. + pub kind: TargetFeatureKind, } #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] @@ -159,6 +160,8 @@ bitflags::bitflags! { const ALLOCATOR_ZEROED = 1 << 14; /// `#[no_builtins]`: indicates that disable implicit builtin knowledge of functions for the function. const NO_BUILTINS = 1 << 15; + /// Marks foreign items, to make `contains_extern_indicator` cheaper. + const FOREIGN_ITEM = 1 << 16; } } rustc_data_structures::external_bitflags_debug! { CodegenFnAttrFlags } @@ -171,8 +174,7 @@ impl CodegenFnAttrs { flags: CodegenFnAttrFlags::empty(), inline: InlineAttr::None, optimize: OptimizeAttr::Default, - export_name: None, - link_name: None, + symbol_name: None, link_ordinal: None, target_features: vec![], safe_target_features: false, @@ -183,7 +185,6 @@ impl CodegenFnAttrs { instruction_set: None, alignment: None, patchable_function_entry: None, - autodiff_item: None, } } @@ -194,14 +195,14 @@ impl CodegenFnAttrs { /// * `#[linkage]` is present /// /// Keep this in sync with the logic for the unused_attributes for `#[inline]` lint. - pub fn contains_extern_indicator(&self, tcx: TyCtxt<'_>, did: DefId) -> bool { - if tcx.is_foreign_item(did) { + pub fn contains_extern_indicator(&self) -> bool { + if self.flags.contains(CodegenFnAttrFlags::FOREIGN_ITEM) { return false; } self.flags.contains(CodegenFnAttrFlags::NO_MANGLE) || self.flags.contains(CodegenFnAttrFlags::RUSTC_STD_INTERNAL_SYMBOL) - || self.export_name.is_some() + || self.symbol_name.is_some() || match self.linkage { // These are private, so make sure we don't try to consider // them external. diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs index 785ddd1ee29..e3e04c9d180 100644 --- a/compiler/rustc_middle/src/middle/privacy.rs +++ b/compiler/rustc_middle/src/middle/privacy.rs @@ -181,11 +181,7 @@ impl EffectiveVisibilities { // nominal visibility. For some items nominal visibility doesn't make sense so we // don't check this condition for them. let is_impl = matches!(tcx.def_kind(def_id), DefKind::Impl { .. }); - let is_associated_item_in_trait_impl = tcx - .impl_of_assoc(def_id.to_def_id()) - .and_then(|impl_id| tcx.trait_id_of_impl(impl_id)) - .is_some(); - if !is_impl && !is_associated_item_in_trait_impl { + if !is_impl && tcx.trait_impl_of_assoc(def_id.to_def_id()).is_none() { let nominal_vis = tcx.visibility(def_id); if !nominal_vis.is_at_least(ev.reachable, tcx) { span_bug!( diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 4a19cf1563c..18520089e3e 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -4,7 +4,7 @@ use std::num::NonZero; use rustc_ast::NodeId; -use rustc_errors::{Applicability, Diag, EmissionGuarantee}; +use rustc_errors::{Applicability, Diag, EmissionGuarantee, LintBuffer}; use rustc_feature::GateIssue; use rustc_hir::attrs::{DeprecatedSince, Deprecation}; use rustc_hir::def_id::{DefId, LocalDefId}; @@ -12,7 +12,7 @@ use rustc_hir::{self as hir, ConstStability, DefaultBodyStability, HirId, Stabil use rustc_macros::{Decodable, Encodable, HashStable, Subdiagnostic}; use rustc_session::Session; use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE}; -use rustc_session::lint::{BuiltinLintDiag, DeprecatedSinceKind, Level, Lint, LintBuffer}; +use rustc_session::lint::{BuiltinLintDiag, DeprecatedSinceKind, Level, Lint}; use rustc_session::parse::feature_err_issue; use rustc_span::{Span, Symbol, sym}; use tracing::debug; diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 96131d47a17..164433aede2 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -448,6 +448,11 @@ impl<'tcx> Const<'tcx> { Self::Val(val, ty) } + #[inline] + pub fn from_ty_value(tcx: TyCtxt<'tcx>, val: ty::Value<'tcx>) -> Self { + Self::Ty(val.ty, ty::Const::new_value(tcx, val.valtree, val.ty)) + } + pub fn from_bits( tcx: TyCtxt<'tcx>, bits: u128, diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 27ead514531..2ea92a39d48 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -306,8 +306,6 @@ pub enum AllocError { ScalarSizeMismatch(ScalarSizeMismatch), /// Encountered a pointer where we needed raw bytes. ReadPointerAsInt(Option<BadBytesAccess>), - /// Partially overwriting a pointer. - OverwritePartialPointer(Size), /// Partially copying a pointer. ReadPartialPointer(Size), /// Using uninitialized data where it is not allowed. @@ -331,9 +329,6 @@ impl AllocError { ReadPointerAsInt(info) => InterpErrorKind::Unsupported( UnsupportedOpInfo::ReadPointerAsInt(info.map(|b| (alloc_id, b))), ), - OverwritePartialPointer(offset) => InterpErrorKind::Unsupported( - UnsupportedOpInfo::OverwritePartialPointer(Pointer::new(alloc_id, offset)), - ), ReadPartialPointer(offset) => InterpErrorKind::Unsupported( UnsupportedOpInfo::ReadPartialPointer(Pointer::new(alloc_id, offset)), ), @@ -633,11 +628,11 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> &mut self, cx: &impl HasDataLayout, range: AllocRange, - ) -> AllocResult<&mut [u8]> { + ) -> &mut [u8] { self.mark_init(range, true); - self.provenance.clear(range, cx)?; + self.provenance.clear(range, cx); - Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]) + &mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()] } /// A raw pointer variant of `get_bytes_unchecked_for_overwrite` that avoids invalidating existing immutable aliases @@ -646,15 +641,15 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> &mut self, cx: &impl HasDataLayout, range: AllocRange, - ) -> AllocResult<*mut [u8]> { + ) -> *mut [u8] { self.mark_init(range, true); - self.provenance.clear(range, cx)?; + self.provenance.clear(range, cx); assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check // Crucially, we go via `AllocBytes::as_mut_ptr`, not `AllocBytes::deref_mut`. let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize()); let len = range.end().bytes_usize() - range.start.bytes_usize(); - Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len)) + ptr::slice_from_raw_parts_mut(begin_ptr, len) } /// This gives direct mutable access to the entire buffer, just exposing their internal state @@ -723,26 +718,45 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> let ptr = Pointer::new(prov, Size::from_bytes(bits)); return Ok(Scalar::from_pointer(ptr, cx)); } - - // If we can work on pointers byte-wise, join the byte-wise provenances. - if Prov::OFFSET_IS_ADDR { - let mut prov = self.provenance.get(range.start, cx); + // The other easy case is total absence of provenance. + if self.provenance.range_empty(range, cx) { + return Ok(Scalar::from_uint(bits, range.size)); + } + // If we get here, we have to check per-byte provenance, and join them together. + let prov = 'prov: { + // Initialize with first fragment. Must have index 0. + let Some((mut joint_prov, 0)) = self.provenance.get_byte(range.start, cx) else { + break 'prov None; + }; + // Update with the remaining fragments. for offset in Size::from_bytes(1)..range.size { - let this_prov = self.provenance.get(range.start + offset, cx); - prov = Prov::join(prov, this_prov); + // Ensure there is provenance here and it has the right index. + let Some((frag_prov, frag_idx)) = + self.provenance.get_byte(range.start + offset, cx) + else { + break 'prov None; + }; + // Wildcard provenance is allowed to come with any index (this is needed + // for Miri's native-lib mode to work). + if u64::from(frag_idx) != offset.bytes() && Some(frag_prov) != Prov::WILDCARD { + break 'prov None; + } + // Merge this byte's provenance with the previous ones. + joint_prov = match Prov::join(joint_prov, frag_prov) { + Some(prov) => prov, + None => break 'prov None, + }; } - // Now use this provenance. - let ptr = Pointer::new(prov, Size::from_bytes(bits)); - return Ok(Scalar::from_maybe_pointer(ptr, cx)); - } else { - // Without OFFSET_IS_ADDR, the only remaining case we can handle is total absence of - // provenance. - if self.provenance.range_empty(range, cx) { - return Ok(Scalar::from_uint(bits, range.size)); - } - // Else we have mixed provenance, that doesn't work. + break 'prov Some(joint_prov); + }; + if prov.is_none() && !Prov::OFFSET_IS_ADDR { + // There are some bytes with provenance here but overall the provenance does not add up. + // We need `OFFSET_IS_ADDR` to fall back to no-provenance here; without that option, we must error. return Err(AllocError::ReadPartialPointer(range.start)); } + // We can use this provenance. + let ptr = Pointer::new(prov, Size::from_bytes(bits)); + return Ok(Scalar::from_maybe_pointer(ptr, cx)); } else { // We are *not* reading a pointer. // If we can just ignore provenance or there is none, that's easy. @@ -782,7 +796,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> let endian = cx.data_layout().endian; // Yes we do overwrite all the bytes in `dst`. - let dst = self.get_bytes_unchecked_for_overwrite(cx, range)?; + let dst = self.get_bytes_unchecked_for_overwrite(cx, range); write_target_uint(endian, dst, bytes).unwrap(); // See if we have to also store some provenance. @@ -795,10 +809,9 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> } /// Write "uninit" to the given memory range. - pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { + pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) { self.mark_init(range, false); - self.provenance.clear(range, cx)?; - Ok(()) + self.provenance.clear(range, cx); } /// Mark all bytes in the given range as initialised and reset the provenance @@ -817,9 +830,12 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> } /// Remove all provenance in the given memory range. - pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { - self.provenance.clear(range, cx)?; - return Ok(()); + pub fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) { + self.provenance.clear(range, cx); + } + + pub fn provenance_merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool { + self.provenance.merge_bytes(cx) } /// Applies a previously prepared provenance copy. diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs index 119d4be64e6..dbbd95408c8 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -10,7 +10,7 @@ use rustc_macros::HashStable; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; use tracing::trace; -use super::{AllocError, AllocRange, AllocResult, CtfeProvenance, Provenance, alloc_range}; +use super::{AllocRange, CtfeProvenance, Provenance, alloc_range}; /// Stores the provenance information of pointers stored in memory. #[derive(Clone, PartialEq, Eq, Hash, Debug)] @@ -19,25 +19,25 @@ pub struct ProvenanceMap<Prov = CtfeProvenance> { /// `Provenance` in this map applies from the given offset for an entire pointer-size worth of /// bytes. Two entries in this map are always at least a pointer size apart. ptrs: SortedMap<Size, Prov>, - /// Provenance in this map only applies to the given single byte. - /// This map is disjoint from the previous. It will always be empty when - /// `Prov::OFFSET_IS_ADDR` is false. - bytes: Option<Box<SortedMap<Size, Prov>>>, + /// This stores byte-sized provenance fragments. + /// The `u8` indicates the position of this byte inside its original pointer. + /// If the bytes are re-assembled in their original order, the pointer can be used again. + /// Wildcard provenance is allowed to have index 0 everywhere. + bytes: Option<Box<SortedMap<Size, (Prov, u8)>>>, } // These impls are generic over `Prov` since `CtfeProvenance` is only decodable/encodable // for some particular `D`/`S`. impl<D: Decoder, Prov: Provenance + Decodable<D>> Decodable<D> for ProvenanceMap<Prov> { fn decode(d: &mut D) -> Self { - assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized + // `bytes` is not in the serialized format Self { ptrs: Decodable::decode(d), bytes: None } } } impl<S: Encoder, Prov: Provenance + Encodable<S>> Encodable<S> for ProvenanceMap<Prov> { fn encode(&self, s: &mut S) { let Self { ptrs, bytes } = self; - assert!(!Prov::OFFSET_IS_ADDR); // only `CtfeProvenance` is ever serialized - debug_assert!(bytes.is_none()); // without `OFFSET_IS_ADDR`, this is always empty + assert!(bytes.is_none()); // interning refuses allocations with pointer fragments ptrs.encode(s) } } @@ -58,10 +58,10 @@ impl ProvenanceMap { /// Give access to the ptr-sized provenances (which can also be thought of as relocations, and /// indeed that is how codegen treats them). /// - /// Only exposed with `CtfeProvenance` provenance, since it panics if there is bytewise provenance. + /// Only use on interned allocations, as other allocations may have per-byte provenance! #[inline] pub fn ptrs(&self) -> &SortedMap<Size, CtfeProvenance> { - debug_assert!(self.bytes.is_none()); // `CtfeProvenance::OFFSET_IS_ADDR` is false so this cannot fail + assert!(self.bytes.is_none(), "`ptrs()` called on non-interned allocation"); &self.ptrs } } @@ -88,12 +88,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } /// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster. - pub(super) fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { + fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx)) } /// Returns all byte-wise provenance in the given range. - fn range_bytes_get(&self, range: AllocRange) -> &[(Size, Prov)] { + fn range_bytes_get(&self, range: AllocRange) -> &[(Size, (Prov, u8))] { if let Some(bytes) = self.bytes.as_ref() { bytes.range(range.start..range.end()) } else { @@ -107,19 +107,58 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } /// Get the provenance of a single byte. - pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> { + pub fn get_byte(&self, offset: Size, cx: &impl HasDataLayout) -> Option<(Prov, u8)> { let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx); debug_assert!(prov.len() <= 1); if let Some(entry) = prov.first() { // If it overlaps with this byte, it is on this byte. debug_assert!(self.bytes.as_ref().is_none_or(|b| !b.contains_key(&offset))); - Some(entry.1) + Some((entry.1, (offset - entry.0).bytes() as u8)) } else { // Look up per-byte provenance. self.bytes.as_ref().and_then(|b| b.get(&offset).copied()) } } + /// Gets the provenances of all bytes (including from pointers) in a range. + pub fn get_range( + &self, + cx: &impl HasDataLayout, + range: AllocRange, + ) -> impl Iterator<Item = Prov> { + let ptr_provs = self.range_ptrs_get(range, cx).iter().map(|(_, p)| *p); + let byte_provs = self.range_bytes_get(range).iter().map(|(_, (p, _))| *p); + ptr_provs.chain(byte_provs) + } + + /// Attempt to merge per-byte provenance back into ptr chunks, if the right fragments + /// sit next to each other. Return `false` is that is not possible due to partial pointers. + pub fn merge_bytes(&mut self, cx: &impl HasDataLayout) -> bool { + let Some(bytes) = self.bytes.as_deref_mut() else { + return true; + }; + let ptr_size = cx.data_layout().pointer_size(); + while let Some((offset, (prov, _))) = bytes.iter().next().copied() { + // Check if this fragment starts a pointer. + let range = offset..offset + ptr_size; + let frags = bytes.range(range.clone()); + if frags.len() != ptr_size.bytes_usize() { + return false; + } + for (idx, (_offset, (frag_prov, frag_idx))) in frags.iter().copied().enumerate() { + if frag_prov != prov || frag_idx != idx as u8 { + return false; + } + } + // Looks like a pointer! Move it over to the ptr provenance map. + bytes.remove_range(range); + self.ptrs.insert(offset, prov); + } + // We managed to convert everything into whole pointers. + self.bytes = None; + true + } + /// Check if there is ptr-sized provenance at the given index. /// Does not mean anything for bytewise provenance! But can be useful as an optimization. pub fn get_ptr(&self, offset: Size) -> Option<Prov> { @@ -137,7 +176,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Yields all the provenances stored in this map. pub fn provenances(&self) -> impl Iterator<Item = Prov> { - let bytes = self.bytes.iter().flat_map(|b| b.values()); + let bytes = self.bytes.iter().flat_map(|b| b.values().map(|(p, _i)| p)); self.ptrs.values().chain(bytes).copied() } @@ -148,16 +187,12 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Removes all provenance inside the given range. /// If there is provenance overlapping with the edges, might result in an error. - pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) -> AllocResult { + pub fn clear(&mut self, range: AllocRange, cx: &impl HasDataLayout) { let start = range.start; let end = range.end(); // Clear the bytewise part -- this is easy. - if Prov::OFFSET_IS_ADDR { - if let Some(bytes) = self.bytes.as_mut() { - bytes.remove_range(start..end); - } - } else { - debug_assert!(self.bytes.is_none()); + if let Some(bytes) = self.bytes.as_mut() { + bytes.remove_range(start..end); } let pointer_size = cx.data_layout().pointer_size(); @@ -168,7 +203,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Find all provenance overlapping the given range. if self.range_ptrs_is_empty(range, cx) { // No provenance in this range, we are done. This is the common case. - return Ok(()); + return; } // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much @@ -179,28 +214,20 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // We need to handle clearing the provenance from parts of a pointer. if first < start { - if !Prov::OFFSET_IS_ADDR { - // We can't split up the provenance into less than a pointer. - return Err(AllocError::OverwritePartialPointer(first)); - } // Insert the remaining part in the bytewise provenance. let prov = self.ptrs[&first]; let bytes = self.bytes.get_or_insert_with(Box::default); for offset in first..start { - bytes.insert(offset, prov); + bytes.insert(offset, (prov, (offset - first).bytes() as u8)); } } if last > end { let begin_of_last = last - pointer_size; - if !Prov::OFFSET_IS_ADDR { - // We can't split up the provenance into less than a pointer. - return Err(AllocError::OverwritePartialPointer(begin_of_last)); - } // Insert the remaining part in the bytewise provenance. let prov = self.ptrs[&begin_of_last]; let bytes = self.bytes.get_or_insert_with(Box::default); for offset in end..last { - bytes.insert(offset, prov); + bytes.insert(offset, (prov, (offset - begin_of_last).bytes() as u8)); } } @@ -208,8 +235,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine, // i.e., this will not remove any other provenance just after the ones we care about. self.ptrs.remove_range(first..last); - - Ok(()) } /// Overwrites all provenance in the given range with wildcard provenance. @@ -218,10 +243,6 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// /// Provided for usage in Miri and panics otherwise. pub fn write_wildcards(&mut self, cx: &impl HasDataLayout, range: AllocRange) { - assert!( - Prov::OFFSET_IS_ADDR, - "writing wildcard provenance is not supported when `OFFSET_IS_ADDR` is false" - ); let wildcard = Prov::WILDCARD.unwrap(); let bytes = self.bytes.get_or_insert_with(Box::default); @@ -229,21 +250,22 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // Remove pointer provenances that overlap with the range, then readd the edge ones bytewise. let ptr_range = Self::adjusted_range_ptrs(range, cx); let ptrs = self.ptrs.range(ptr_range.clone()); - if let Some((offset, prov)) = ptrs.first() { - for byte_ofs in *offset..range.start { - bytes.insert(byte_ofs, *prov); + if let Some((offset, prov)) = ptrs.first().copied() { + for byte_ofs in offset..range.start { + bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8)); } } - if let Some((offset, prov)) = ptrs.last() { - for byte_ofs in range.end()..*offset + cx.data_layout().pointer_size() { - bytes.insert(byte_ofs, *prov); + if let Some((offset, prov)) = ptrs.last().copied() { + for byte_ofs in range.end()..offset + cx.data_layout().pointer_size() { + bytes.insert(byte_ofs, (prov, (byte_ofs - offset).bytes() as u8)); } } self.ptrs.remove_range(ptr_range); // Overwrite bytewise provenance. for offset in range.start..range.end() { - bytes.insert(offset, wildcard); + // The fragment index does not matter for wildcard provenance. + bytes.insert(offset, (wildcard, 0)); } } } @@ -253,7 +275,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// Offsets are already adjusted to the destination allocation. pub struct ProvenanceCopy<Prov> { dest_ptrs: Option<Box<[(Size, Prov)]>>, - dest_bytes: Option<Box<[(Size, Prov)]>>, + dest_bytes: Option<Box<[(Size, (Prov, u8))]>>, } impl<Prov: Provenance> ProvenanceMap<Prov> { @@ -263,7 +285,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest: Size, count: u64, cx: &impl HasDataLayout, - ) -> AllocResult<ProvenanceCopy<Prov>> { + ) -> ProvenanceCopy<Prov> { let shift_offset = move |idx, offset| { // compute offset for current repetition let dest_offset = dest + src.size * idx; // `Size` operations @@ -301,24 +323,16 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { let mut dest_bytes_box = None; let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first(); let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first(); - if !Prov::OFFSET_IS_ADDR { - // There can't be any bytewise provenance, and we cannot split up the begin/end overlap. - if let Some(entry) = begin_overlap { - return Err(AllocError::ReadPartialPointer(entry.0)); - } - if let Some(entry) = end_overlap { - return Err(AllocError::ReadPartialPointer(entry.0)); - } - debug_assert!(self.bytes.is_none()); - } else { - let mut bytes = Vec::new(); + // We only need to go here if there is some overlap or some bytewise provenance. + if begin_overlap.is_some() || end_overlap.is_some() || self.bytes.is_some() { + let mut bytes: Vec<(Size, (Prov, u8))> = Vec::new(); // First, if there is a part of a pointer at the start, add that. if let Some(entry) = begin_overlap { trace!("start overlapping entry: {entry:?}"); // For really small copies, make sure we don't run off the end of the `src` range. let entry_end = cmp::min(entry.0 + ptr_size, src.end()); for offset in src.start..entry_end { - bytes.push((offset, entry.1)); + bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8))); } } else { trace!("no start overlapping entry"); @@ -334,8 +348,9 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { let entry_start = cmp::max(entry.0, src.start); for offset in entry_start..src.end() { if bytes.last().is_none_or(|bytes_entry| bytes_entry.0 < offset) { - // The last entry, if it exists, has a lower offset than us. - bytes.push((offset, entry.1)); + // The last entry, if it exists, has a lower offset than us, so we + // can add it at the end and remain sorted. + bytes.push((offset, (entry.1, (offset - entry.0).bytes() as u8))); } else { // There already is an entry for this offset in there! This can happen when the // start and end range checks actually end up hitting the same pointer, so we @@ -358,7 +373,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { dest_bytes_box = Some(dest_bytes.into_boxed_slice()); } - Ok(ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box }) + ProvenanceCopy { dest_ptrs: dest_ptrs_box, dest_bytes: dest_bytes_box } } /// Applies a provenance copy. @@ -368,14 +383,10 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { if let Some(dest_ptrs) = copy.dest_ptrs { self.ptrs.insert_presorted(dest_ptrs.into()); } - if Prov::OFFSET_IS_ADDR { - if let Some(dest_bytes) = copy.dest_bytes - && !dest_bytes.is_empty() - { - self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into()); - } - } else { - debug_assert!(copy.dest_bytes.is_none()); + if let Some(dest_bytes) = copy.dest_bytes + && !dest_bytes.is_empty() + { + self.bytes.get_or_insert_with(Box::default).insert_presorted(dest_bytes.into()); } } } diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 77427a12d11..7c72a8ec243 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -582,9 +582,6 @@ pub enum UnsupportedOpInfo { // // The variants below are only reachable from CTFE/const prop, miri will never emit them. // - /// Overwriting parts of a pointer; without knowing absolute addresses, the resulting state - /// cannot be represented by the CTFE interpreter. - OverwritePartialPointer(Pointer<AllocId>), /// Attempting to read or copy parts of a pointer to somewhere else; without knowing absolute /// addresses, the resulting state cannot be represented by the CTFE interpreter. ReadPartialPointer(Pointer<AllocId>), diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index e25ff7651f6..c581b2ebf09 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -56,7 +56,7 @@ impl<T: HasDataLayout> PointerArithmetic for T {} /// mostly opaque; the `Machine` trait extends it with some more operations that also have access to /// some global state. /// The `Debug` rendering is used to display bare provenance, and for the default impl of `fmt`. -pub trait Provenance: Copy + fmt::Debug + 'static { +pub trait Provenance: Copy + PartialEq + fmt::Debug + 'static { /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address. /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are /// different from what the Abstract Machine prescribes, so the interpreter must prevent any @@ -79,7 +79,7 @@ pub trait Provenance: Copy + fmt::Debug + 'static { fn get_alloc_id(self) -> Option<AllocId>; /// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance. - fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>; + fn join(left: Self, right: Self) -> Option<Self>; } /// The type of provenance in the compile-time interpreter. @@ -192,8 +192,8 @@ impl Provenance for CtfeProvenance { Some(self.alloc_id()) } - fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> { - panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false") + fn join(left: Self, right: Self) -> Option<Self> { + if left == right { Some(left) } else { None } } } @@ -224,8 +224,8 @@ impl Provenance for AllocId { Some(self) } - fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> { - panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false") + fn join(_left: Self, _right: Self) -> Option<Self> { + unreachable!() } } diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index c55c7fc6002..c977e5329c2 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -115,48 +115,6 @@ impl MirPhase { MirPhase::Runtime(runtime_phase) => (3, 1 + runtime_phase as usize), } } - - /// Parses a `MirPhase` from a pair of strings. Panics if this isn't possible for any reason. - pub fn parse(dialect: String, phase: Option<String>) -> Self { - match &*dialect.to_ascii_lowercase() { - "built" => { - assert!(phase.is_none(), "Cannot specify a phase for `Built` MIR"); - MirPhase::Built - } - "analysis" => Self::Analysis(AnalysisPhase::parse(phase)), - "runtime" => Self::Runtime(RuntimePhase::parse(phase)), - _ => bug!("Unknown MIR dialect: '{}'", dialect), - } - } -} - -impl AnalysisPhase { - pub fn parse(phase: Option<String>) -> Self { - let Some(phase) = phase else { - return Self::Initial; - }; - - match &*phase.to_ascii_lowercase() { - "initial" => Self::Initial, - "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup, - _ => bug!("Unknown analysis phase: '{}'", phase), - } - } -} - -impl RuntimePhase { - pub fn parse(phase: Option<String>) -> Self { - let Some(phase) = phase else { - return Self::Initial; - }; - - match &*phase.to_ascii_lowercase() { - "initial" => Self::Initial, - "post_cleanup" | "post-cleanup" | "postcleanup" => Self::PostCleanup, - "optimized" => Self::Optimized, - _ => bug!("Unknown runtime phase: '{}'", phase), - } - } } /// Where a specific `mir::Body` comes from. diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index 0d98e055d95..6b45b2dc3ff 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use std::fmt; use std::hash::Hash; -use rustc_ast::expand::autodiff_attrs::AutoDiffItem; use rustc_data_structures::base_n::{BaseNString, CASE_INSENSITIVE, ToBaseN}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxIndexMap; @@ -10,7 +9,7 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHas use rustc_data_structures::unord::UnordMap; use rustc_hashes::Hash128; use rustc_hir::ItemId; -use rustc_hir::attrs::InlineAttr; +use rustc_hir::attrs::{InlineAttr, Linkage}; use rustc_hir::def_id::{CrateNum, DefId, DefIdSet, LOCAL_CRATE}; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; use rustc_query_system::ich::StableHashingContext; @@ -150,7 +149,7 @@ impl<'tcx> MonoItem<'tcx> { // instantiation: // We emit an unused_attributes lint for this case, which should be kept in sync if possible. let codegen_fn_attrs = tcx.codegen_instance_attrs(instance.def); - if codegen_fn_attrs.contains_extern_indicator(tcx, instance.def.def_id()) + if codegen_fn_attrs.contains_extern_indicator() || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::NAKED) { return InstantiationMode::GloballyShared { may_conflict: false }; @@ -336,7 +335,6 @@ impl ToStableHashKey<StableHashingContext<'_>> for MonoItem<'_> { pub struct MonoItemPartitions<'tcx> { pub codegen_units: &'tcx [CodegenUnit<'tcx>], pub all_mono_items: &'tcx DefIdSet, - pub autodiff_items: &'tcx [AutoDiffItem], } #[derive(Debug, HashStable)] @@ -368,22 +366,6 @@ pub struct MonoItemData { pub size_estimate: usize, } -/// Specifies the linkage type for a `MonoItem`. -/// -/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants. -#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)] -pub enum Linkage { - External, - AvailableExternally, - LinkOnceAny, - LinkOnceODR, - WeakAny, - WeakODR, - Internal, - ExternalWeak, - Common, -} - /// Specifies the symbol visibility with regards to dynamic linking. /// /// Visibility doesn't have any effect when linkage is internal. @@ -566,10 +548,27 @@ impl<'tcx> CodegenUnit<'tcx> { let mut items: Vec<_> = self.items().iter().map(|(&i, &data)| (i, data)).collect(); if !tcx.sess.opts.unstable_opts.codegen_source_order { - // It's already deterministic, so we can just use it. - return items; + // In this case, we do not need to keep the items in any specific order, as the input + // is already deterministic. + // + // However, it seems that moving related things (such as different + // monomorphizations of the same function) close to one another is actually beneficial + // for LLVM performance. + // LLVM will codegen the items in the order we pass them to it, and when it handles + // similar things in succession, it seems that it leads to better cache utilization, + // less branch mispredictions and in general to better performance. + // For example, if we have functions `a`, `c::<u32>`, `b`, `c::<i16>`, `d` and + // `c::<bool>`, it seems that it helps LLVM's performance to codegen the three `c` + // instantiations right after one another, as they will likely reference similar types, + // call similar functions, etc. + // + // See https://github.com/rust-lang/rust/pull/145358 for more details. + // + // Sorting by symbol name should not incur any new non-determinism. + items.sort_by_cached_key(|&(i, _)| i.symbol_name(tcx)); + } else { + items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i)); } - items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i)); items } diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index d4d925d2057..128ae8549f7 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -929,7 +929,10 @@ impl<'tcx> TerminatorKind<'tcx> { } Yield { value, resume_arg, .. } => write!(fmt, "{resume_arg:?} = yield({value:?})"), Unreachable => write!(fmt, "unreachable"), - Drop { place, .. } => write!(fmt, "drop({place:?})"), + Drop { place, async_fut: None, .. } => write!(fmt, "drop({place:?})"), + Drop { place, async_fut: Some(async_fut), .. } => { + write!(fmt, "async drop({place:?}; poll={async_fut:?})") + } Call { func, args, destination, .. } => { write!(fmt, "{destination:?} = ")?; write!(fmt, "{func:?}(")?; @@ -1789,7 +1792,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( ascii.push('╼'); i += ptr_size; } - } else if let Some(prov) = alloc.provenance().get(i, &tcx) { + } else if let Some((prov, idx)) = alloc.provenance().get_byte(i, &tcx) { // Memory with provenance must be defined assert!( alloc.init_mask().is_range_initialized(alloc_range(i, Size::from_bytes(1))).is_ok() @@ -1799,7 +1802,7 @@ pub fn write_allocation_bytes<'tcx, Prov: Provenance, Extra, Bytes: AllocBytes>( // Format is similar to "oversized" above. let j = i.bytes_usize(); let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0]; - write!(w, "╾{c:02x}{prov:#?} (1 ptr byte)╼")?; + write!(w, "╾{c:02x}{prov:#?} (ptr fragment {idx})╼")?; i += Size::from_bytes(1); } else if alloc .init_mask() diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs index a8a95c699d8..466b9c7a3c2 100644 --- a/compiler/rustc_middle/src/mir/query.rs +++ b/compiler/rustc_middle/src/mir/query.rs @@ -149,8 +149,15 @@ pub enum ConstraintCategory<'tcx> { /// A constraint that doesn't correspond to anything the user sees. Internal, - /// An internal constraint derived from an illegal universe relation. - IllegalUniverse, + /// An internal constraint added when a region outlives a placeholder + /// it cannot name and therefore has to outlive `'static`. The argument + /// is the unnameable placeholder and the constraint is always between + /// an SCC representative and `'static`. + OutlivesUnnameablePlaceholder( + #[type_foldable(identity)] + #[type_visitable(ignore)] + ty::RegionVid, + ), } #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] diff --git a/compiler/rustc_middle/src/mir/statement.rs b/compiler/rustc_middle/src/mir/statement.rs index 683d7b48617..6e52bc775ef 100644 --- a/compiler/rustc_middle/src/mir/statement.rs +++ b/compiler/rustc_middle/src/mir/statement.rs @@ -160,7 +160,11 @@ impl<'tcx> PlaceTy<'tcx> { /// Convenience wrapper around `projection_ty_core` for `PlaceElem`, /// where we can just use the `Ty` that is already stored inline on /// field projection elems. - pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> { + pub fn projection_ty<V: ::std::fmt::Debug>( + self, + tcx: TyCtxt<'tcx>, + elem: ProjectionElem<V, Ty<'tcx>>, + ) -> PlaceTy<'tcx> { self.projection_ty_core(tcx, &elem, |ty| ty, |_, _, _, ty| ty, |ty| ty) } @@ -290,6 +294,36 @@ impl<V, T> ProjectionElem<V, T> { Self::UnwrapUnsafeBinder(..) => false, } } + + /// Returns the `ProjectionKind` associated to this projection. + pub fn kind(self) -> ProjectionKind { + self.try_map(|_| Some(()), |_| ()).unwrap() + } + + /// Apply functions to types and values in this projection and return the result. + pub fn try_map<V2, T2>( + self, + v: impl FnOnce(V) -> Option<V2>, + t: impl FnOnce(T) -> T2, + ) -> Option<ProjectionElem<V2, T2>> { + Some(match self { + ProjectionElem::Deref => ProjectionElem::Deref, + ProjectionElem::Downcast(name, read_variant) => { + ProjectionElem::Downcast(name, read_variant) + } + ProjectionElem::Field(f, ty) => ProjectionElem::Field(f, t(ty)), + ProjectionElem::ConstantIndex { offset, min_length, from_end } => { + ProjectionElem::ConstantIndex { offset, min_length, from_end } + } + ProjectionElem::Subslice { from, to, from_end } => { + ProjectionElem::Subslice { from, to, from_end } + } + ProjectionElem::OpaqueCast(ty) => ProjectionElem::OpaqueCast(t(ty)), + ProjectionElem::Subtype(ty) => ProjectionElem::Subtype(t(ty)), + ProjectionElem::UnwrapUnsafeBinder(ty) => ProjectionElem::UnwrapUnsafeBinder(t(ty)), + ProjectionElem::Index(val) => ProjectionElem::Index(v(val)?), + }) + } } /// Alias for projections as they appear in `UserTypeProjection`, where we diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index 6039a03aa29..3b8def67f92 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -1371,12 +1371,7 @@ pub enum Rvalue<'tcx> { /// Creates an array where each element is the value of the operand. /// - /// This is the cause of a bug in the case where the repetition count is zero because the value - /// is not dropped, see [#74836]. - /// /// Corresponds to source code like `[x; 32]`. - /// - /// [#74836]: https://github.com/rust-lang/rust/issues/74836 Repeat(Operand<'tcx>, ty::Const<'tcx>), /// Creates a reference of the indicated kind to the place. diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 42a68b29ec7..904d78d69b6 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -531,13 +531,20 @@ macro_rules! make_mir_visitor { unwind: _, replace: _, drop: _, - async_fut: _, + async_fut, } => { self.visit_place( place, PlaceContext::MutatingUse(MutatingUseContext::Drop), location ); + if let Some(async_fut) = async_fut { + self.visit_local( + $(&$mutability)? *async_fut, + PlaceContext::MutatingUse(MutatingUseContext::Borrow), + location + ); + } } TerminatorKind::Call { diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index a8b357bf105..ea62461ebeb 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -343,6 +343,7 @@ trivial! { rustc_span::Symbol, rustc_span::Ident, rustc_target::spec::PanicStrategy, + rustc_target::spec::SanitizerSet, rustc_type_ir::Variance, u32, usize, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index f4d0120a2e7..7bd8a0525a2 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -100,7 +100,7 @@ use rustc_session::lint::LintExpectationId; use rustc_span::def_id::LOCAL_CRATE; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span, Symbol}; -use rustc_target::spec::PanicStrategy; +use rustc_target::spec::{PanicStrategy, SanitizerSet}; use {rustc_abi as abi, rustc_ast as ast, rustc_hir as hir}; use crate::infer::canonical::{self, Canonical}; @@ -696,6 +696,22 @@ rustc_queries! { return_result_from_ensure_ok } + /// Used in case `mir_borrowck` fails to prove an obligation. We generally assume that + /// all goals we prove in MIR type check hold as we've already checked them in HIR typeck. + /// + /// However, we replace each free region in the MIR body with a unique region inference + /// variable. As we may rely on structural identity when proving goals this may cause a + /// goal to no longer hold. We store obligations for which this may happen during HIR + /// typeck in the `TypeckResults`. We then uniquify and reprove them in case MIR typeck + /// encounters an unexpected error. We expect this to result in an error when used and + /// delay a bug if it does not. + query check_potentially_region_dependent_goals(key: LocalDefId) -> Result<(), ErrorGuaranteed> { + desc { + |tcx| "reproving potentially region dependent HIR typeck goals for `{}", + tcx.def_path_str(key) + } + } + /// MIR after our optimization passes have run. This is MIR that is ready /// for codegen. This is also the only query that can fetch non-local MIR, at present. query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> { @@ -1116,6 +1132,11 @@ rustc_queries! { } /// Unsafety-check this `LocalDefId`. + query check_transmutes(key: LocalDefId) { + desc { |tcx| "check transmute calls inside `{}`", tcx.def_path_str(key) } + } + + /// Unsafety-check this `LocalDefId`. query check_unsafety(key: LocalDefId) { desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key) } } @@ -2686,6 +2707,16 @@ rustc_queries! { desc { |tcx| "looking up anon const kind of `{}`", tcx.def_path_str(def_id) } separate_provide_extern } + + /// Checks for the nearest `#[sanitize(xyz = "off")]` or + /// `#[sanitize(xyz = "on")]` on this def and any enclosing defs, up to the + /// crate root. + /// + /// Returns the set of sanitizers that is explicitly disabled for this def. + query disabled_sanitizers_for(key: LocalDefId) -> SanitizerSet { + desc { |tcx| "checking what set of sanitizers are enabled on `{}`", tcx.def_path_str(key) } + feedable + } } rustc_with_all_queries! { define_callbacks! } diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs index a7ac3442898..546791135ba 100644 --- a/compiler/rustc_middle/src/query/on_disk_cache.rs +++ b/compiler/rustc_middle/src/query/on_disk_cache.rs @@ -645,34 +645,29 @@ impl<'a, 'tcx> SpanDecoder for CacheDecoder<'a, 'tcx> { let parent = Option::<LocalDefId>::decode(self); let tag: u8 = Decodable::decode(self); - if tag == TAG_PARTIAL_SPAN { - return Span::new(BytePos(0), BytePos(0), ctxt, parent); - } else if tag == TAG_RELATIVE_SPAN { - let dlo = u32::decode(self); - let dto = u32::decode(self); - - let enclosing = self.tcx.source_span_untracked(parent.unwrap()).data_untracked(); - let span = Span::new( - enclosing.lo + BytePos::from_u32(dlo), - enclosing.lo + BytePos::from_u32(dto), - ctxt, - parent, - ); - - return span; - } else { - debug_assert_eq!(tag, TAG_FULL_SPAN); - } - - let file_lo_index = SourceFileIndex::decode(self); - let line_lo = usize::decode(self); - let col_lo = RelativeBytePos::decode(self); - let len = BytePos::decode(self); - - let file_lo = self.file_index_to_file(file_lo_index); - let lo = file_lo.lines()[line_lo - 1] + col_lo; - let lo = file_lo.absolute_position(lo); - let hi = lo + len; + let (lo, hi) = match tag { + TAG_PARTIAL_SPAN => (BytePos(0), BytePos(0)), + TAG_RELATIVE_SPAN => { + let dlo = u32::decode(self); + let dto = u32::decode(self); + + let enclosing = self.tcx.source_span_untracked(parent.unwrap()).data_untracked(); + (enclosing.lo + BytePos::from_u32(dlo), enclosing.lo + BytePos::from_u32(dto)) + } + TAG_FULL_SPAN => { + let file_lo_index = SourceFileIndex::decode(self); + let line_lo = usize::decode(self); + let col_lo = RelativeBytePos::decode(self); + let len = BytePos::decode(self); + + let file_lo = self.file_index_to_file(file_lo_index); + let lo = file_lo.lines()[line_lo - 1] + col_lo; + let lo = file_lo.absolute_position(lo); + let hi = lo + len; + (lo, hi) + } + _ => unreachable!(), + }; Span::new(lo, hi, ctxt, parent) } diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index 3dd6d2c8928..f1d19800a78 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -832,17 +832,17 @@ pub enum PatKind<'tcx> { }, /// One of the following: - /// * `&str` (represented as a valtree), which will be handled as a string pattern and thus + /// * `&str`, which will be handled as a string pattern and thus /// exhaustiveness checking will detect if you use the same string twice in different /// patterns. - /// * integer, bool, char or float (represented as a valtree), which will be handled by + /// * integer, bool, char or float, which will be handled by /// exhaustiveness to cover exactly its own value, similar to `&str`, but these values are /// much simpler. /// * raw pointers derived from integers, other raw pointers will have already resulted in an // error. /// * `String`, if `string_deref_patterns` is enabled. Constant { - value: mir::Const<'tcx>, + value: ty::Value<'tcx>, }, /// Pattern obtained by converting a constant (inline or named) to its pattern @@ -935,7 +935,7 @@ impl<'tcx> PatRange<'tcx> { let lo_is_min = match self.lo { PatRangeBoundary::NegInfinity => true, PatRangeBoundary::Finite(value) => { - let lo = value.try_to_bits(size).unwrap() ^ bias; + let lo = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias; lo <= min } PatRangeBoundary::PosInfinity => false, @@ -944,7 +944,7 @@ impl<'tcx> PatRange<'tcx> { let hi_is_max = match self.hi { PatRangeBoundary::NegInfinity => false, PatRangeBoundary::Finite(value) => { - let hi = value.try_to_bits(size).unwrap() ^ bias; + let hi = value.try_to_scalar_int().unwrap().to_bits(size) ^ bias; hi > max || hi == max && self.end == RangeEnd::Included } PatRangeBoundary::PosInfinity => true, @@ -957,22 +957,17 @@ impl<'tcx> PatRange<'tcx> { } #[inline] - pub fn contains( - &self, - value: mir::Const<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<bool> { + pub fn contains(&self, value: ty::Value<'tcx>, tcx: TyCtxt<'tcx>) -> Option<bool> { use Ordering::*; - debug_assert_eq!(self.ty, value.ty()); + debug_assert_eq!(value.ty, self.ty); let ty = self.ty; - let value = PatRangeBoundary::Finite(value); + let value = PatRangeBoundary::Finite(value.valtree); // For performance, it's important to only do the second comparison if necessary. Some( - match self.lo.compare_with(value, ty, tcx, typing_env)? { + match self.lo.compare_with(value, ty, tcx)? { Less | Equal => true, Greater => false, - } && match value.compare_with(self.hi, ty, tcx, typing_env)? { + } && match value.compare_with(self.hi, ty, tcx)? { Less => true, Equal => self.end == RangeEnd::Included, Greater => false, @@ -981,21 +976,16 @@ impl<'tcx> PatRange<'tcx> { } #[inline] - pub fn overlaps( - &self, - other: &Self, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<bool> { + pub fn overlaps(&self, other: &Self, tcx: TyCtxt<'tcx>) -> Option<bool> { use Ordering::*; debug_assert_eq!(self.ty, other.ty); // For performance, it's important to only do the second comparison if necessary. Some( - match other.lo.compare_with(self.hi, self.ty, tcx, typing_env)? { + match other.lo.compare_with(self.hi, self.ty, tcx)? { Less => true, Equal => self.end == RangeEnd::Included, Greater => false, - } && match self.lo.compare_with(other.hi, self.ty, tcx, typing_env)? { + } && match self.lo.compare_with(other.hi, self.ty, tcx)? { Less => true, Equal => other.end == RangeEnd::Included, Greater => false, @@ -1006,11 +996,13 @@ impl<'tcx> PatRange<'tcx> { impl<'tcx> fmt::Display for PatRange<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let PatRangeBoundary::Finite(value) = &self.lo { + if let &PatRangeBoundary::Finite(valtree) = &self.lo { + let value = ty::Value { ty: self.ty, valtree }; write!(f, "{value}")?; } - if let PatRangeBoundary::Finite(value) = &self.hi { + if let &PatRangeBoundary::Finite(valtree) = &self.hi { write!(f, "{}", self.end)?; + let value = ty::Value { ty: self.ty, valtree }; write!(f, "{value}")?; } else { // `0..` is parsed as an inclusive range, we must display it correctly. @@ -1024,7 +1016,8 @@ impl<'tcx> fmt::Display for PatRange<'tcx> { /// If present, the const must be of a numeric type. #[derive(Copy, Clone, Debug, PartialEq, HashStable, TypeVisitable)] pub enum PatRangeBoundary<'tcx> { - Finite(mir::Const<'tcx>), + /// The type of this valtree is stored in the surrounding `PatRange`. + Finite(ty::ValTree<'tcx>), NegInfinity, PosInfinity, } @@ -1035,20 +1028,15 @@ impl<'tcx> PatRangeBoundary<'tcx> { matches!(self, Self::Finite(..)) } #[inline] - pub fn as_finite(self) -> Option<mir::Const<'tcx>> { + pub fn as_finite(self) -> Option<ty::ValTree<'tcx>> { match self { Self::Finite(value) => Some(value), Self::NegInfinity | Self::PosInfinity => None, } } - pub fn eval_bits( - self, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> u128 { + pub fn to_bits(self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> u128 { match self { - Self::Finite(value) => value.eval_bits(tcx, typing_env), + Self::Finite(value) => value.try_to_scalar_int().unwrap().to_bits_unchecked(), Self::NegInfinity => { // Unwrap is ok because the type is known to be numeric. ty.numeric_min_and_max_as_bits(tcx).unwrap().0 @@ -1060,14 +1048,8 @@ impl<'tcx> PatRangeBoundary<'tcx> { } } - #[instrument(skip(tcx, typing_env), level = "debug", ret)] - pub fn compare_with( - self, - other: Self, - ty: Ty<'tcx>, - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ) -> Option<Ordering> { + #[instrument(skip(tcx), level = "debug", ret)] + pub fn compare_with(self, other: Self, ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<Ordering> { use PatRangeBoundary::*; match (self, other) { // When comparing with infinities, we must remember that `0u8..` and `0u8..=255` @@ -1095,8 +1077,8 @@ impl<'tcx> PatRangeBoundary<'tcx> { _ => {} } - let a = self.eval_bits(ty, tcx, typing_env); - let b = other.eval_bits(ty, tcx, typing_env); + let a = self.to_bits(ty, tcx); + let b = other.to_bits(ty, tcx); match ty.kind() { ty::Float(ty::FloatTy::F16) => { diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index 5bdde3a514e..32f91bfba6b 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -760,6 +760,9 @@ pub enum DynCompatibilityViolation { // Supertrait has a non-lifetime `for<T>` binder. SupertraitNonLifetimeBinder(SmallVec<[Span; 1]>), + // Trait has a `const Trait` supertrait. + SupertraitConst(SmallVec<[Span; 1]>), + /// Method has something illegal. Method(Symbol, MethodViolationCode, Span), @@ -785,6 +788,9 @@ impl DynCompatibilityViolation { DynCompatibilityViolation::SupertraitNonLifetimeBinder(_) => { "where clause cannot reference non-lifetime `for<...>` variables".into() } + DynCompatibilityViolation::SupertraitConst(_) => { + "it cannot have a `const` supertrait".into() + } DynCompatibilityViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => { format!("associated function `{name}` has no `self` parameter").into() } @@ -842,7 +848,8 @@ impl DynCompatibilityViolation { match self { DynCompatibilityViolation::SizedSelf(_) | DynCompatibilityViolation::SupertraitSelf(_) - | DynCompatibilityViolation::SupertraitNonLifetimeBinder(..) => { + | DynCompatibilityViolation::SupertraitNonLifetimeBinder(..) + | DynCompatibilityViolation::SupertraitConst(_) => { DynCompatibilityViolationSolution::None } DynCompatibilityViolation::Method( @@ -873,15 +880,17 @@ impl DynCompatibilityViolation { match self { DynCompatibilityViolation::SupertraitSelf(spans) | DynCompatibilityViolation::SizedSelf(spans) - | DynCompatibilityViolation::SupertraitNonLifetimeBinder(spans) => spans.clone(), + | DynCompatibilityViolation::SupertraitNonLifetimeBinder(spans) + | DynCompatibilityViolation::SupertraitConst(spans) => spans.clone(), DynCompatibilityViolation::AssocConst(_, span) | DynCompatibilityViolation::GAT(_, span) - | DynCompatibilityViolation::Method(_, _, span) - if *span != DUMMY_SP => - { - smallvec![*span] + | DynCompatibilityViolation::Method(_, _, span) => { + if *span != DUMMY_SP { + smallvec![*span] + } else { + smallvec![] + } } - _ => smallvec![], } } } diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index d95006dcf4a..a14e47d7082 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -2,10 +2,12 @@ use std::fmt; use std::ops::Deref; use rustc_data_structures::intern::Interned; +use rustc_hir::def::Namespace; use rustc_macros::{HashStable, Lift, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; use super::ScalarInt; use crate::mir::interpret::{ErrorHandled, Scalar}; +use crate::ty::print::{FmtPrinter, PrettyPrinter}; use crate::ty::{self, Ty, TyCtxt}; /// This datastructure is used to represent the value of constants used in the type system. @@ -133,6 +135,8 @@ pub type ConstToValTreeResult<'tcx> = Result<Result<ValTree<'tcx>, Ty<'tcx>>, Er /// A type-level constant value. /// /// Represents a typed, fully evaluated constant. +/// Note that this is also used by pattern elaboration to represent values which cannot occur in types, +/// such as raw pointers and floats. #[derive(Copy, Clone, Debug, Hash, Eq, PartialEq)] #[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable, Lift)] pub struct Value<'tcx> { @@ -203,3 +207,14 @@ impl<'tcx> rustc_type_ir::inherent::ValueConst<TyCtxt<'tcx>> for Value<'tcx> { self.valtree } } + +impl<'tcx> fmt::Display for Value<'tcx> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ty::tls::with(move |tcx| { + let cv = tcx.lift(*self).unwrap(); + let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); + p.pretty_print_const_valtree(cv, /*print_ty*/ true)?; + f.write_str(&p.into_buffer()) + }) + } +} diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 49a4733de3b..4990a85466e 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -33,7 +33,7 @@ use rustc_errors::{ Applicability, Diag, DiagCtxtHandle, ErrorGuaranteed, LintDiagnostic, LintEmitter, MultiSpan, }; use rustc_hir::attrs::AttributeKind; -use rustc_hir::def::{CtorKind, DefKind}; +use rustc_hir::def::{CtorKind, CtorOf, DefKind}; use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE, LocalDefId}; use rustc_hir::definitions::{DefPathData, Definitions, DisambiguatorState}; use rustc_hir::intravisit::VisitorExt; @@ -445,7 +445,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> { } fn fn_is_const(self, def_id: DefId) -> bool { - debug_assert_matches!(self.def_kind(def_id), DefKind::Fn | DefKind::AssocFn); + debug_assert_matches!( + self.def_kind(def_id), + DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(CtorOf::Struct, CtorKind::Fn) + ); self.is_conditionally_const(def_id) } @@ -1418,6 +1421,8 @@ pub struct TyCtxt<'tcx> { } impl<'tcx> LintEmitter for TyCtxt<'tcx> { + type Id = HirId; + fn emit_node_span_lint( self, lint: &'static Lint, diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs index b3042904a29..b9a6f67ab0d 100644 --- a/compiler/rustc_middle/src/ty/diagnostics.rs +++ b/compiler/rustc_middle/src/ty/diagnostics.rs @@ -23,7 +23,7 @@ impl IntoDiagArg for Ty<'_> { fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { ty::tls::with(|tcx| { let ty = tcx.short_string(self, path); - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(ty)) + DiagArgValue::Str(std::borrow::Cow::Owned(ty)) }) } } @@ -32,7 +32,7 @@ impl IntoDiagArg for Instance<'_> { fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { ty::tls::with(|tcx| { let instance = tcx.short_string_namespace(self, path, Namespace::ValueNS); - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(instance)) + DiagArgValue::Str(std::borrow::Cow::Owned(instance)) }) } } diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 73e1661106e..e567ba05f61 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -32,7 +32,7 @@ use rustc_data_structures::intern::Interned; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; use rustc_data_structures::unord::{UnordMap, UnordSet}; -use rustc_errors::{Diag, ErrorGuaranteed}; +use rustc_errors::{Diag, ErrorGuaranteed, LintBuffer}; use rustc_hir::attrs::{AttributeKind, StrippedCfgItem}; use rustc_hir::def::{CtorKind, CtorOf, DefKind, DocLinkResMap, LifetimeRes, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LocalDefIdMap}; @@ -46,7 +46,6 @@ use rustc_macros::{ }; use rustc_query_system::ich::StableHashingContext; use rustc_serialize::{Decodable, Encodable}; -use rustc_session::lint::LintBuffer; pub use rustc_session::lint::RegisteredTools; use rustc_span::hygiene::MacroKind; use rustc_span::{DUMMY_SP, ExpnId, ExpnKind, Ident, Span, Symbol, sym}; @@ -830,14 +829,15 @@ impl<'tcx> OpaqueHiddenType<'tcx> { // Convert the type from the function into a type valid outside by mapping generic // parameters to into the context of the opaque. // - // We erase regions when doing this during HIR typeck. + // We erase regions when doing this during HIR typeck. We manually use `fold_regions` + // here as we do not want to anonymize bound variables. let this = match defining_scope_kind { - DefiningScopeKind::HirTypeck => tcx.erase_regions(self), + DefiningScopeKind::HirTypeck => fold_regions(tcx, self, |_, _| tcx.lifetimes.re_erased), DefiningScopeKind::MirBorrowck => self, }; let result = this.fold_with(&mut opaque_types::ReverseMapper::new(tcx, map, self.span)); if cfg!(debug_assertions) && matches!(defining_scope_kind, DefiningScopeKind::HirTypeck) { - assert_eq!(result.ty, tcx.erase_regions(result.ty)); + assert_eq!(result.ty, fold_regions(tcx, result.ty, |_, _| tcx.lifetimes.re_erased)); } result } @@ -1925,21 +1925,50 @@ impl<'tcx> TyCtxt<'tcx> { self.impl_trait_ref(def_id).map(|tr| tr.skip_binder().def_id) } - /// If the given `DefId` is an associated item, returns the `DefId` of the parent trait or impl. - pub fn assoc_parent(self, def_id: DefId) -> Option<DefId> { - self.def_kind(def_id).is_assoc().then(|| self.parent(def_id)) + /// If the given `DefId` is an associated item, returns the `DefId` and `DefKind` of the parent trait or impl. + pub fn assoc_parent(self, def_id: DefId) -> Option<(DefId, DefKind)> { + if !self.def_kind(def_id).is_assoc() { + return None; + } + let parent = self.parent(def_id); + let def_kind = self.def_kind(parent); + Some((parent, def_kind)) } /// If the given `DefId` is an associated item of a trait, /// returns the `DefId` of the trait; otherwise, returns `None`. pub fn trait_of_assoc(self, def_id: DefId) -> Option<DefId> { - self.assoc_parent(def_id).filter(|id| self.def_kind(id) == DefKind::Trait) + match self.assoc_parent(def_id) { + Some((id, DefKind::Trait)) => Some(id), + _ => None, + } } /// If the given `DefId` is an associated item of an impl, /// returns the `DefId` of the impl; otherwise returns `None`. pub fn impl_of_assoc(self, def_id: DefId) -> Option<DefId> { - self.assoc_parent(def_id).filter(|id| matches!(self.def_kind(id), DefKind::Impl { .. })) + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { .. })) => Some(id), + _ => None, + } + } + + /// If the given `DefId` is an associated item of an inherent impl, + /// returns the `DefId` of the impl; otherwise, returns `None`. + pub fn inherent_impl_of_assoc(self, def_id: DefId) -> Option<DefId> { + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { of_trait: false })) => Some(id), + _ => None, + } + } + + /// If the given `DefId` is an associated item of a trait impl, + /// returns the `DefId` of the impl; otherwise, returns `None`. + pub fn trait_impl_of_assoc(self, def_id: DefId) -> Option<DefId> { + match self.assoc_parent(def_id) { + Some((id, DefKind::Impl { of_trait: true })) => Some(id), + _ => None, + } } pub fn is_exportable(self, def_id: DefId) -> bool { diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index e6feafea122..9e6f277ef77 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -124,6 +124,15 @@ pub trait Printer<'tcx>: Sized { trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError>; + fn print_coroutine_with_kind( + &mut self, + def_id: DefId, + parent_args: &'tcx [GenericArg<'tcx>], + kind: Ty<'tcx>, + ) -> Result<(), PrintError> { + self.print_path_with_generic_args(|p| p.print_def_path(def_id, parent_args), &[kind.into()]) + } + // Defaults (should not be overridden): #[instrument(skip(self), level = "debug")] @@ -162,9 +171,10 @@ pub trait Printer<'tcx>: Sized { )) = self.tcx().coroutine_kind(def_id) && args.len() > parent_args.len() { - return self.print_path_with_generic_args( - |p| p.print_def_path(def_id, parent_args), - &args[..parent_args.len() + 1][..1], + return self.print_coroutine_with_kind( + def_id, + parent_args, + args[parent_args.len()].expect_ty(), ); } else { // Closures' own generics are only captures, don't print them. diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 0a976f3a0ac..74caee7336a 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -337,10 +337,10 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { false } - /// Returns `true` if the region should be printed in - /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`. - /// This is typically the case for all non-`'_` regions. - fn should_print_region(&self, region: ty::Region<'tcx>) -> bool; + /// Returns `true` if the region should be printed in optional positions, + /// e.g., `&'a T` or `dyn Tr + 'b`. (Regions like the one in `Cow<'static, T>` + /// will always be printed.) + fn should_print_optional_region(&self, region: ty::Region<'tcx>) -> bool; fn reset_type_limit(&mut self) {} @@ -717,7 +717,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } ty::Ref(r, ty, mutbl) => { write!(self, "&")?; - if self.should_print_region(r) { + if self.should_print_optional_region(r) { r.print(self)?; write!(self, " ")?; } @@ -785,7 +785,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { }, ty::Adt(def, args) => self.print_def_path(def.did(), args)?, ty::Dynamic(data, r, repr) => { - let print_r = self.should_print_region(r); + let print_r = self.should_print_optional_region(r); if print_r { write!(self, "(")?; } @@ -2494,7 +2494,7 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { !self.type_length_limit.value_within_limit(self.printed_type_count) } - fn should_print_region(&self, region: ty::Region<'tcx>) -> bool { + fn should_print_optional_region(&self, region: ty::Region<'tcx>) -> bool { let highlight = self.region_highlight_mode; if highlight.region_highlighted(region).is_some() { return true; diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 0e2aff6f9bd..89ef46b1ae5 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -12,7 +12,6 @@ use rustc_hir::def_id::LocalDefId; use rustc_span::source_map::Spanned; use rustc_type_ir::{ConstKind, TypeFolder, VisitorResult, try_visit}; -use super::print::PrettyPrinter; use super::{GenericArg, GenericArgKind, Pattern, Region}; use crate::mir::PlaceElem; use crate::ty::print::{FmtPrinter, Printer, with_no_trimmed_paths}; @@ -168,15 +167,11 @@ impl<'tcx> fmt::Debug for ty::Const<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { // If this is a value, we spend some effort to make it look nice. if let ConstKind::Value(cv) = self.kind() { - return ty::tls::with(move |tcx| { - let cv = tcx.lift(cv).unwrap(); - let mut p = FmtPrinter::new(tcx, Namespace::ValueNS); - p.pretty_print_const_valtree(cv, /*print_ty*/ true)?; - f.write_str(&p.into_buffer()) - }); + write!(f, "{}", cv) + } else { + // Fall back to something verbose. + write!(f, "{:?}", self.kind()) } - // Fall back to something verbose. - write!(f, "{:?}", self.kind()) } } diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 72474a60566..755fc68d86f 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -821,10 +821,38 @@ impl<'tcx> Ty<'tcx> { #[inline] pub fn new_coroutine_witness( tcx: TyCtxt<'tcx>, - id: DefId, + def_id: DefId, args: GenericArgsRef<'tcx>, ) -> Ty<'tcx> { - Ty::new(tcx, CoroutineWitness(id, args)) + if cfg!(debug_assertions) { + tcx.debug_assert_args_compatible(tcx.typeck_root_def_id(def_id), args); + } + Ty::new(tcx, CoroutineWitness(def_id, args)) + } + + pub fn new_coroutine_witness_for_coroutine( + tcx: TyCtxt<'tcx>, + def_id: DefId, + coroutine_args: GenericArgsRef<'tcx>, + ) -> Ty<'tcx> { + tcx.debug_assert_args_compatible(def_id, coroutine_args); + // HACK: Coroutine witness types are lifetime erased, so they + // never reference any lifetime args from the coroutine. We erase + // the regions here since we may get into situations where a + // coroutine is recursively contained within itself, leading to + // witness types that differ by region args. This means that + // cycle detection in fulfillment will not kick in, which leads + // to unnecessary overflows in async code. See the issue: + // <https://github.com/rust-lang/rust/issues/145151>. + let args = + ty::GenericArgs::for_item(tcx, tcx.typeck_root_def_id(def_id), |def, _| { + match def.kind { + ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(), + ty::GenericParamDefKind::Type { .. } + | ty::GenericParamDefKind::Const { .. } => coroutine_args[def.index as usize], + } + }); + Ty::new_coroutine_witness(tcx, def_id, args) } // misc @@ -985,6 +1013,14 @@ impl<'tcx> rustc_type_ir::inherent::Ty<TyCtxt<'tcx>> for Ty<'tcx> { Ty::new_coroutine_witness(interner, def_id, args) } + fn new_coroutine_witness_for_coroutine( + interner: TyCtxt<'tcx>, + def_id: DefId, + coroutine_args: ty::GenericArgsRef<'tcx>, + ) -> Self { + Ty::new_coroutine_witness_for_coroutine(interner, def_id, coroutine_args) + } + fn new_ptr(interner: TyCtxt<'tcx>, ty: Self, mutbl: hir::Mutability) -> Self { Ty::new_ptr(interner, ty, mutbl) } diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs index 59e2b2a034d..4e38d969192 100644 --- a/compiler/rustc_middle/src/ty/trait_def.rs +++ b/compiler/rustc_middle/src/ty/trait_def.rs @@ -6,6 +6,7 @@ use rustc_hir as hir; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE}; use rustc_macros::{Decodable, Encodable, HashStable}; +use rustc_span::symbol::sym; use tracing::debug; use crate::query::LocalCrate; @@ -239,6 +240,12 @@ pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> Trait /// Query provider for `incoherent_impls`. pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] { + if let Some(def_id) = simp.def() + && !tcx.has_attr(def_id, sym::rustc_has_incoherent_inherent_impls) + { + return &[]; + } + let mut impls = Vec::new(); for cnum in iter::once(LOCAL_CRATE).chain(tcx.crates(()).iter().copied()) { for &impl_def_id in tcx.crate_incoherent_impls((cnum, simp)) { diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs index 6b187c5325a..8dd80aab946 100644 --- a/compiler/rustc_middle/src/ty/typeck_results.rs +++ b/compiler/rustc_middle/src/ty/typeck_results.rs @@ -206,10 +206,25 @@ pub struct TypeckResults<'tcx> { /// formatting modified file tests/ui/coroutine/retain-resume-ref.rs pub coroutine_stalled_predicates: FxIndexSet<(ty::Predicate<'tcx>, ObligationCause<'tcx>)>, + /// Goals proven during HIR typeck which may be potentially region dependent. + /// + /// Borrowck *uniquifies* regions which may cause these goal to be ambiguous in MIR + /// type check. We ICE if goals fail in borrowck to detect bugs during MIR building or + /// missed checks in HIR typeck. To avoid ICE due to region dependence we store all + /// goals which may be region dependent and reprove them in case borrowck encounters + /// an error. + pub potentially_region_dependent_goals: + FxIndexSet<(ty::Predicate<'tcx>, ObligationCause<'tcx>)>, + /// Contains the data for evaluating the effect of feature `capture_disjoint_fields` /// on closure size. pub closure_size_eval: LocalDefIdMap<ClosureSizeProfileData<'tcx>>, + /// Stores the types involved in calls to `transmute` intrinsic. These are meant to be checked + /// outside of typeck and borrowck to avoid cycles with opaque types and coroutine layout + /// computation. + pub transmutes_to_check: Vec<(Ty<'tcx>, Ty<'tcx>, HirId)>, + /// Container types and field indices of `offset_of!` expressions offset_of_data: ItemLocalMap<(Ty<'tcx>, Vec<(VariantIdx, FieldIdx)>)>, } @@ -240,7 +255,9 @@ impl<'tcx> TypeckResults<'tcx> { closure_fake_reads: Default::default(), rvalue_scopes: Default::default(), coroutine_stalled_predicates: Default::default(), + potentially_region_dependent_goals: Default::default(), closure_size_eval: Default::default(), + transmutes_to_check: Default::default(), offset_of_data: Default::default(), } } |
