diff options
Diffstat (limited to 'compiler')
575 files changed, 11504 insertions, 8819 deletions
diff --git a/compiler/rustc_abi/Cargo.toml b/compiler/rustc_abi/Cargo.toml index 86dc84e2016..5f9afc46a1a 100644 --- a/compiler/rustc_abi/Cargo.toml +++ b/compiler/rustc_abi/Cargo.toml @@ -6,13 +6,13 @@ edition = "2024" [dependencies] # tidy-alphabetical-start bitflags = "2.4.1" -rand = { version = "0.8.4", default-features = false, optional = true } -rand_xoshiro = { version = "0.6.0", optional = true } -rustc_data_structures = { path = "../rustc_data_structures", optional = true } +rand = { version = "0.9.0", default-features = false, optional = true } +rand_xoshiro = { version = "0.7.0", optional = true } +rustc_data_structures = { path = "../rustc_data_structures", optional = true } rustc_hashes = { path = "../rustc_hashes" } rustc_index = { path = "../rustc_index", default-features = false } rustc_macros = { path = "../rustc_macros", optional = true } -rustc_serialize = { path = "../rustc_serialize", optional = true } +rustc_serialize = { path = "../rustc_serialize", optional = true } rustc_span = { path = "../rustc_span", optional = true } tracing = "0.1" # tidy-alphabetical-end diff --git a/compiler/rustc_abi/src/callconv.rs b/compiler/rustc_abi/src/callconv.rs index 4529ab8058e..a21e1aee9b0 100644 --- a/compiler/rustc_abi/src/callconv.rs +++ b/compiler/rustc_abi/src/callconv.rs @@ -74,7 +74,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { Ok(HomogeneousAggregate::Homogeneous(Reg { kind, size: self.size })) } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { assert!(!self.is_zst()); Ok(HomogeneousAggregate::Homogeneous(Reg { kind: RegKind::Vector, diff --git a/compiler/rustc_abi/src/extern_abi.rs b/compiler/rustc_abi/src/extern_abi.rs index 543c2f8ab12..4d70afd4e0b 100644 --- a/compiler/rustc_abi/src/extern_abi.rs +++ b/compiler/rustc_abi/src/extern_abi.rs @@ -191,6 +191,17 @@ impl StableOrd for ExternAbi { } impl ExternAbi { + /// An ABI "like Rust" + /// + /// These ABIs are fully controlled by the Rust compiler, which means they + /// - support unwinding with `-Cpanic=unwind`, unlike `extern "C"` + /// - often diverge from the C ABI + /// - are subject to change between compiler versions + pub fn is_rustic_abi(self) -> bool { + use ExternAbi::*; + matches!(self, Rust | RustCall | RustIntrinsic | RustCold) + } + pub fn supports_varargs(self) -> bool { // * C and Cdecl obviously support varargs. // * C can be based on Aapcs, SysV64 or Win64, so they must support varargs. diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 3f83787ea37..7bffeaf4cc9 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -4,6 +4,7 @@ use std::{cmp, iter}; use rustc_hashes::Hash64; use rustc_index::Idx; +use rustc_index::bit_set::BitMatrix; use tracing::debug; use crate::{ @@ -12,6 +13,9 @@ use crate::{ Variants, WrappingRange, }; +mod coroutine; +mod simple; + #[cfg(feature = "nightly")] mod ty; @@ -60,17 +64,28 @@ pub enum LayoutCalculatorError<F> { /// The fields or variants have irreconcilable reprs ReprConflict, + + /// The length of an SIMD type is zero + ZeroLengthSimdType, + + /// The length of an SIMD type exceeds the maximum number of lanes + OversizedSimdType { max_lanes: u64 }, + + /// An element type of an SIMD type isn't a primitive + NonPrimitiveSimdType(F), } impl<F> LayoutCalculatorError<F> { pub fn without_payload(&self) -> LayoutCalculatorError<()> { - match self { - LayoutCalculatorError::UnexpectedUnsized(_) => { - LayoutCalculatorError::UnexpectedUnsized(()) - } - LayoutCalculatorError::SizeOverflow => LayoutCalculatorError::SizeOverflow, - LayoutCalculatorError::EmptyUnion => LayoutCalculatorError::EmptyUnion, - LayoutCalculatorError::ReprConflict => LayoutCalculatorError::ReprConflict, + use LayoutCalculatorError::*; + match *self { + UnexpectedUnsized(_) => UnexpectedUnsized(()), + SizeOverflow => SizeOverflow, + EmptyUnion => EmptyUnion, + ReprConflict => ReprConflict, + ZeroLengthSimdType => ZeroLengthSimdType, + OversizedSimdType { max_lanes } => OversizedSimdType { max_lanes }, + NonPrimitiveSimdType(_) => NonPrimitiveSimdType(()), } } @@ -78,13 +93,15 @@ impl<F> LayoutCalculatorError<F> { /// /// Intended for use by rust-analyzer, as neither it nor `rustc_abi` depend on fluent infra. pub fn fallback_fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + use LayoutCalculatorError::*; f.write_str(match self { - LayoutCalculatorError::UnexpectedUnsized(_) => { - "an unsized type was found where a sized type was expected" + UnexpectedUnsized(_) => "an unsized type was found where a sized type was expected", + SizeOverflow => "size overflow", + EmptyUnion => "type is a union with no fields", + ReprConflict => "type has an invalid repr", + ZeroLengthSimdType | OversizedSimdType { .. } | NonPrimitiveSimdType(_) => { + "invalid simd type definition" } - LayoutCalculatorError::SizeOverflow => "size overflow", - LayoutCalculatorError::EmptyUnion => "type is a union with no fields", - LayoutCalculatorError::ReprConflict => "type has an invalid repr", }) } } @@ -102,41 +119,115 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Self { cx } } - pub fn scalar_pair<FieldIdx: Idx, VariantIdx: Idx>( + pub fn array_like<FieldIdx: Idx, VariantIdx: Idx, F>( &self, - a: Scalar, - b: Scalar, - ) -> LayoutData<FieldIdx, VariantIdx> { - let dl = self.cx.data_layout(); - let b_align = b.align(dl); - let align = a.align(dl).max(b_align).max(dl.aggregate_align); - let b_offset = a.size(dl).align_to(b_align.abi); - let size = (b_offset + b.size(dl)).align_to(align.abi); + element: &LayoutData<FieldIdx, VariantIdx>, + count_if_sized: Option<u64>, // None for slices + ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> { + let count = count_if_sized.unwrap_or(0); + let size = + element.size.checked_mul(count, &self.cx).ok_or(LayoutCalculatorError::SizeOverflow)?; + + Ok(LayoutData { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Array { stride: element.size, count }, + backend_repr: BackendRepr::Memory { sized: count_if_sized.is_some() }, + largest_niche: element.largest_niche.filter(|_| count != 0), + uninhabited: element.uninhabited && count != 0, + align: element.align, + size, + max_repr_align: None, + unadjusted_abi_align: element.align.abi, + randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)), + }) + } - // HACK(nox): We iter on `b` and then `a` because `max_by_key` - // returns the last maximum. - let largest_niche = Niche::from_scalar(dl, b_offset, b) - .into_iter() - .chain(Niche::from_scalar(dl, Size::ZERO, a)) - .max_by_key(|niche| niche.available(dl)); + pub fn simd_type< + FieldIdx: Idx, + VariantIdx: Idx, + F: AsRef<LayoutData<FieldIdx, VariantIdx>> + fmt::Debug, + >( + &self, + element: F, + count: u64, + repr_packed: bool, + ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> { + let elt = element.as_ref(); + if count == 0 { + return Err(LayoutCalculatorError::ZeroLengthSimdType); + } else if count > crate::MAX_SIMD_LANES { + return Err(LayoutCalculatorError::OversizedSimdType { + max_lanes: crate::MAX_SIMD_LANES, + }); + } - let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes()); + let BackendRepr::Scalar(e_repr) = elt.backend_repr else { + return Err(LayoutCalculatorError::NonPrimitiveSimdType(element)); + }; + + // Compute the size and alignment of the vector + let dl = self.cx.data_layout(); + let size = + elt.size.checked_mul(count, dl).ok_or_else(|| LayoutCalculatorError::SizeOverflow)?; + let (repr, align) = if repr_packed && !count.is_power_of_two() { + // Non-power-of-two vectors have padding up to the next power-of-two. + // If we're a packed repr, remove the padding while keeping the alignment as close + // to a vector as possible. + ( + BackendRepr::Memory { sized: true }, + AbiAndPrefAlign { + abi: Align::max_aligned_factor(size), + pref: dl.llvmlike_vector_align(size).pref, + }, + ) + } else { + (BackendRepr::SimdVector { element: e_repr, count }, dl.llvmlike_vector_align(size)) + }; + let size = size.align_to(align.abi); - LayoutData { + Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Arbitrary { - offsets: [Size::ZERO, b_offset].into(), - memory_index: [0, 1].into(), + offsets: [Size::ZERO].into(), + memory_index: [0].into(), }, - backend_repr: BackendRepr::ScalarPair(a, b), - largest_niche, + backend_repr: repr, + largest_niche: elt.largest_niche, uninhabited: false, - align, size, + align, max_repr_align: None, - unadjusted_abi_align: align.abi, - randomization_seed: Hash64::new(combined_seed), - } + unadjusted_abi_align: elt.align.abi, + randomization_seed: elt.randomization_seed.wrapping_add(Hash64::new(count)), + }) + } + + /// Compute the layout for a coroutine. + /// + /// This uses dedicated code instead of [`Self::layout_of_struct_or_enum`], as coroutine + /// fields may be shared between multiple variants (see the [`coroutine`] module for details). + pub fn coroutine< + 'a, + F: Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + fmt::Debug + Copy, + VariantIdx: Idx, + FieldIdx: Idx, + LocalIdx: Idx, + >( + &self, + local_layouts: &IndexSlice<LocalIdx, F>, + prefix_layouts: IndexVec<FieldIdx, F>, + variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>, + storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>, + tag_to_layout: impl Fn(Scalar) -> F, + ) -> LayoutCalculatorResult<FieldIdx, VariantIdx, F> { + coroutine::layout( + self, + local_layouts, + prefix_layouts, + variant_fields, + storage_conflicts, + tag_to_layout, + ) } pub fn univariant< @@ -214,25 +305,6 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { layout } - pub fn layout_of_never_type<FieldIdx: Idx, VariantIdx: Idx>( - &self, - ) -> LayoutData<FieldIdx, VariantIdx> { - let dl = self.cx.data_layout(); - // This is also used for uninhabited enums, so we use `Variants::Empty`. - LayoutData { - variants: Variants::Empty, - fields: FieldsShape::Primitive, - backend_repr: BackendRepr::Memory { sized: true }, - largest_niche: None, - uninhabited: true, - align: dl.i8_align, - size: Size::ZERO, - max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, - randomization_seed: Hash64::ZERO, - } - } - pub fn layout_of_struct_or_enum< 'a, FieldIdx: Idx, @@ -260,7 +332,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { Some(present_first) => present_first, // Uninhabited because it has no variants, or only absent ones. None if is_enum => { - return Ok(self.layout_of_never_type()); + return Ok(LayoutData::never_type(&self.cx)); } // If it's a struct, still compute a layout so that we can still compute the // field offsets. @@ -386,13 +458,15 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { BackendRepr::Memory { sized: true } } // Vectors require at least element alignment, else disable the opt - BackendRepr::Vector { element, count: _ } if element.align(dl).abi > align.abi => { + BackendRepr::SimdVector { element, count: _ } + if element.align(dl).abi > align.abi => + { BackendRepr::Memory { sized: true } } // the alignment tests passed and we can use this BackendRepr::Scalar(..) | BackendRepr::ScalarPair(..) - | BackendRepr::Vector { .. } + | BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => repr, }, }; @@ -464,7 +538,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { hide_niches(a); hide_niches(b); } - BackendRepr::Vector { element, count: _ } => hide_niches(element), + BackendRepr::SimdVector { element, count: _ } => hide_niches(element), BackendRepr::Memory { sized: _ } => {} } st.largest_niche = None; @@ -947,7 +1021,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { // Common prim might be uninit. Scalar::Union { value: prim } }; - let pair = self.scalar_pair::<FieldIdx, VariantIdx>(tag, prim_scalar); + let pair = + LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, tag, prim_scalar); let pair_offsets = match pair.fields { FieldsShape::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index.raw, [0, 1]); @@ -1314,7 +1389,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { match field.backend_repr { // For plain scalars, or vectors of them, we can't unpack // newtypes for `#[repr(C)]`, as that affects C ABIs. - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } if optimize_abi => { + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } + if optimize_abi => + { abi = field.backend_repr; } // But scalar pairs are Rust-specific and get @@ -1337,7 +1414,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { } else { ((j, b), (i, a)) }; - let pair = self.scalar_pair::<FieldIdx, VariantIdx>(a, b); + let pair = + LayoutData::<FieldIdx, VariantIdx>::scalar_pair(&self.cx, a, b); let pair_offsets = match pair.fields { FieldsShape::Arbitrary { ref offsets, ref memory_index } => { assert_eq!(memory_index.raw, [0, 1]); diff --git a/compiler/rustc_abi/src/layout/coroutine.rs b/compiler/rustc_abi/src/layout/coroutine.rs new file mode 100644 index 00000000000..27e704d538c --- /dev/null +++ b/compiler/rustc_abi/src/layout/coroutine.rs @@ -0,0 +1,320 @@ +//! Coroutine layout logic. +//! +//! When laying out coroutines, we divide our saved local fields into two +//! categories: overlap-eligible and overlap-ineligible. +//! +//! Those fields which are ineligible for overlap go in a "prefix" at the +//! beginning of the layout, and always have space reserved for them. +//! +//! Overlap-eligible fields are only assigned to one variant, so we lay +//! those fields out for each variant and put them right after the +//! prefix. +//! +//! Finally, in the layout details, we point to the fields from the +//! variants they are assigned to. It is possible for some fields to be +//! included in multiple variants. No field ever "moves around" in the +//! layout; its offset is always the same. +//! +//! Also included in the layout are the upvars and the discriminant. +//! These are included as fields on the "outer" layout; they are not part +//! of any variant. + +use std::iter; + +use rustc_index::bit_set::{BitMatrix, DenseBitSet}; +use rustc_index::{Idx, IndexSlice, IndexVec}; +use tracing::{debug, trace}; + +use crate::{ + BackendRepr, FieldsShape, HasDataLayout, Integer, LayoutData, Primitive, ReprOptions, Scalar, + StructKind, TagEncoding, Variants, WrappingRange, +}; + +/// Overlap eligibility and variant assignment for each CoroutineSavedLocal. +#[derive(Clone, Debug, PartialEq)] +enum SavedLocalEligibility<VariantIdx, FieldIdx> { + Unassigned, + Assigned(VariantIdx), + Ineligible(Option<FieldIdx>), +} + +/// Compute the eligibility and assignment of each local. +fn coroutine_saved_local_eligibility<VariantIdx: Idx, FieldIdx: Idx, LocalIdx: Idx>( + nb_locals: usize, + variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>, + storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>, +) -> (DenseBitSet<LocalIdx>, IndexVec<LocalIdx, SavedLocalEligibility<VariantIdx, FieldIdx>>) { + use SavedLocalEligibility::*; + + let mut assignments: IndexVec<LocalIdx, _> = IndexVec::from_elem_n(Unassigned, nb_locals); + + // The saved locals not eligible for overlap. These will get + // "promoted" to the prefix of our coroutine. + let mut ineligible_locals = DenseBitSet::new_empty(nb_locals); + + // Figure out which of our saved locals are fields in only + // one variant. The rest are deemed ineligible for overlap. + for (variant_index, fields) in variant_fields.iter_enumerated() { + for local in fields { + match assignments[*local] { + Unassigned => { + assignments[*local] = Assigned(variant_index); + } + Assigned(idx) => { + // We've already seen this local at another suspension + // point, so it is no longer a candidate. + trace!( + "removing local {:?} in >1 variant ({:?}, {:?})", + local, variant_index, idx + ); + ineligible_locals.insert(*local); + assignments[*local] = Ineligible(None); + } + Ineligible(_) => {} + } + } + } + + // Next, check every pair of eligible locals to see if they + // conflict. + for local_a in storage_conflicts.rows() { + let conflicts_a = storage_conflicts.count(local_a); + if ineligible_locals.contains(local_a) { + continue; + } + + for local_b in storage_conflicts.iter(local_a) { + // local_a and local_b are storage live at the same time, therefore they + // cannot overlap in the coroutine layout. The only way to guarantee + // this is if they are in the same variant, or one is ineligible + // (which means it is stored in every variant). + if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] { + continue; + } + + // If they conflict, we will choose one to make ineligible. + // This is not always optimal; it's just a greedy heuristic that + // seems to produce good results most of the time. + let conflicts_b = storage_conflicts.count(local_b); + let (remove, other) = + if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) }; + ineligible_locals.insert(remove); + assignments[remove] = Ineligible(None); + trace!("removing local {:?} due to conflict with {:?}", remove, other); + } + } + + // Count the number of variants in use. If only one of them, then it is + // impossible to overlap any locals in our layout. In this case it's + // always better to make the remaining locals ineligible, so we can + // lay them out with the other locals in the prefix and eliminate + // unnecessary padding bytes. + { + let mut used_variants = DenseBitSet::new_empty(variant_fields.len()); + for assignment in &assignments { + if let Assigned(idx) = assignment { + used_variants.insert(*idx); + } + } + if used_variants.count() < 2 { + for assignment in assignments.iter_mut() { + *assignment = Ineligible(None); + } + ineligible_locals.insert_all(); + } + } + + // Write down the order of our locals that will be promoted to the prefix. + { + for (idx, local) in ineligible_locals.iter().enumerate() { + assignments[local] = Ineligible(Some(FieldIdx::new(idx))); + } + } + debug!("coroutine saved local assignments: {:?}", assignments); + + (ineligible_locals, assignments) +} + +/// Compute the full coroutine layout. +pub(super) fn layout< + 'a, + F: core::ops::Deref<Target = &'a LayoutData<FieldIdx, VariantIdx>> + core::fmt::Debug + Copy, + VariantIdx: Idx, + FieldIdx: Idx, + LocalIdx: Idx, +>( + calc: &super::LayoutCalculator<impl HasDataLayout>, + local_layouts: &IndexSlice<LocalIdx, F>, + mut prefix_layouts: IndexVec<FieldIdx, F>, + variant_fields: &IndexSlice<VariantIdx, IndexVec<FieldIdx, LocalIdx>>, + storage_conflicts: &BitMatrix<LocalIdx, LocalIdx>, + tag_to_layout: impl Fn(Scalar) -> F, +) -> super::LayoutCalculatorResult<FieldIdx, VariantIdx, F> { + use SavedLocalEligibility::*; + + let (ineligible_locals, assignments) = + coroutine_saved_local_eligibility(local_layouts.len(), variant_fields, storage_conflicts); + + // Build a prefix layout, including "promoting" all ineligible + // locals as part of the prefix. We compute the layout of all of + // these fields at once to get optimal packing. + let tag_index = prefix_layouts.len(); + + // `variant_fields` already accounts for the reserved variants, so no need to add them. + let max_discr = (variant_fields.len() - 1) as u128; + let discr_int = Integer::fit_unsigned(max_discr); + let tag = Scalar::Initialized { + value: Primitive::Int(discr_int, /* signed = */ false), + valid_range: WrappingRange { start: 0, end: max_discr }, + }; + + let promoted_layouts = ineligible_locals.iter().map(|local| local_layouts[local]); + prefix_layouts.push(tag_to_layout(tag)); + prefix_layouts.extend(promoted_layouts); + let prefix = + calc.univariant(&prefix_layouts, &ReprOptions::default(), StructKind::AlwaysSized)?; + + let (prefix_size, prefix_align) = (prefix.size, prefix.align); + + // Split the prefix layout into the "outer" fields (upvars and + // discriminant) and the "promoted" fields. Promoted fields will + // get included in each variant that requested them in + // CoroutineLayout. + debug!("prefix = {:#?}", prefix); + let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { + FieldsShape::Arbitrary { mut offsets, memory_index } => { + let mut inverse_memory_index = memory_index.invert_bijective_mapping(); + + // "a" (`0..b_start`) and "b" (`b_start..`) correspond to + // "outer" and "promoted" fields respectively. + let b_start = FieldIdx::new(tag_index + 1); + let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.index())); + let offsets_a = offsets; + + // Disentangle the "a" and "b" components of `inverse_memory_index` + // by preserving the order but keeping only one disjoint "half" each. + // FIXME(eddyb) build a better abstraction for permutations, if possible. + let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index + .iter() + .filter_map(|&i| i.index().checked_sub(b_start.index()).map(FieldIdx::new)) + .collect(); + inverse_memory_index.raw.retain(|&i| i.index() < b_start.index()); + let inverse_memory_index_a = inverse_memory_index; + + // Since `inverse_memory_index_{a,b}` each only refer to their + // respective fields, they can be safely inverted + let memory_index_a = inverse_memory_index_a.invert_bijective_mapping(); + let memory_index_b = inverse_memory_index_b.invert_bijective_mapping(); + + let outer_fields = + FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }; + (outer_fields, offsets_b, memory_index_b) + } + _ => unreachable!(), + }; + + let mut size = prefix.size; + let mut align = prefix.align; + let variants = variant_fields + .iter_enumerated() + .map(|(index, variant_fields)| { + // Only include overlap-eligible fields when we compute our variant layout. + let variant_only_tys = variant_fields + .iter() + .filter(|local| match assignments[**local] { + Unassigned => unreachable!(), + Assigned(v) if v == index => true, + Assigned(_) => unreachable!("assignment does not match variant"), + Ineligible(_) => false, + }) + .map(|local| local_layouts[*local]); + + let mut variant = calc.univariant( + &variant_only_tys.collect::<IndexVec<_, _>>(), + &ReprOptions::default(), + StructKind::Prefixed(prefix_size, prefix_align.abi), + )?; + variant.variants = Variants::Single { index }; + + let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else { + unreachable!(); + }; + + // Now, stitch the promoted and variant-only fields back together in + // the order they are mentioned by our CoroutineLayout. + // Because we only use some subset (that can differ between variants) + // of the promoted fields, we can't just pick those elements of the + // `promoted_memory_index` (as we'd end up with gaps). + // So instead, we build an "inverse memory_index", as if all of the + // promoted fields were being used, but leave the elements not in the + // subset as `invalid_field_idx`, which we can filter out later to + // obtain a valid (bijective) mapping. + let invalid_field_idx = promoted_memory_index.len() + memory_index.len(); + let mut combined_inverse_memory_index = + IndexVec::from_elem_n(FieldIdx::new(invalid_field_idx), invalid_field_idx); + + let mut offsets_and_memory_index = iter::zip(offsets, memory_index); + let combined_offsets = variant_fields + .iter_enumerated() + .map(|(i, local)| { + let (offset, memory_index) = match assignments[*local] { + Unassigned => unreachable!(), + Assigned(_) => { + let (offset, memory_index) = offsets_and_memory_index.next().unwrap(); + (offset, promoted_memory_index.len() as u32 + memory_index) + } + Ineligible(field_idx) => { + let field_idx = field_idx.unwrap(); + (promoted_offsets[field_idx], promoted_memory_index[field_idx]) + } + }; + combined_inverse_memory_index[memory_index] = i; + offset + }) + .collect(); + + // Remove the unused slots and invert the mapping to obtain the + // combined `memory_index` (also see previous comment). + combined_inverse_memory_index.raw.retain(|&i| i.index() != invalid_field_idx); + let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping(); + + variant.fields = FieldsShape::Arbitrary { + offsets: combined_offsets, + memory_index: combined_memory_index, + }; + + size = size.max(variant.size); + align = align.max(variant.align); + Ok(variant) + }) + .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; + + size = size.align_to(align.abi); + + let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited()); + let abi = BackendRepr::Memory { sized: true }; + + Ok(LayoutData { + variants: Variants::Multiple { + tag, + tag_encoding: TagEncoding::Direct, + tag_field: tag_index, + variants, + }, + fields: outer_fields, + backend_repr: abi, + // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to + // self-referentiality), getting the discriminant can cause aliasing violations. + // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that + // would do the same for us here. + // See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>. + // FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`. + largest_niche: None, + uninhabited, + size, + align, + max_repr_align: None, + unadjusted_abi_align: align.abi, + randomization_seed: Default::default(), + }) +} diff --git a/compiler/rustc_abi/src/layout/simple.rs b/compiler/rustc_abi/src/layout/simple.rs new file mode 100644 index 00000000000..0d0706defc2 --- /dev/null +++ b/compiler/rustc_abi/src/layout/simple.rs @@ -0,0 +1,148 @@ +use std::num::NonZero; + +use rustc_hashes::Hash64; +use rustc_index::{Idx, IndexVec}; + +use crate::{ + BackendRepr, FieldsShape, HasDataLayout, LayoutData, Niche, Primitive, Scalar, Size, Variants, +}; + +/// "Simple" layout constructors that cannot fail. +impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { + pub fn unit<C: HasDataLayout>(cx: &C, sized: bool) -> Self { + let dl = cx.data_layout(); + LayoutData { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Arbitrary { + offsets: IndexVec::new(), + memory_index: IndexVec::new(), + }, + backend_repr: BackendRepr::Memory { sized }, + largest_niche: None, + uninhabited: false, + align: dl.i8_align, + size: Size::ZERO, + max_repr_align: None, + unadjusted_abi_align: dl.i8_align.abi, + randomization_seed: Hash64::new(0), + } + } + + pub fn never_type<C: HasDataLayout>(cx: &C) -> Self { + let dl = cx.data_layout(); + // This is also used for uninhabited enums, so we use `Variants::Empty`. + LayoutData { + variants: Variants::Empty, + fields: FieldsShape::Primitive, + backend_repr: BackendRepr::Memory { sized: true }, + largest_niche: None, + uninhabited: true, + align: dl.i8_align, + size: Size::ZERO, + max_repr_align: None, + unadjusted_abi_align: dl.i8_align.abi, + randomization_seed: Hash64::ZERO, + } + } + + pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self { + let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); + let size = scalar.size(cx); + let align = scalar.align(cx); + + let range = scalar.valid_range(cx); + + // All primitive types for which we don't have subtype coercions should get a distinct seed, + // so that types wrapping them can use randomization to arrive at distinct layouts. + // + // Some type information is already lost at this point, so as an approximation we derive + // the seed from what remains. For example on 64-bit targets usize and u64 can no longer + // be distinguished. + let randomization_seed = size + .bytes() + .wrapping_add( + match scalar.primitive() { + Primitive::Int(_, true) => 1, + Primitive::Int(_, false) => 2, + Primitive::Float(_) => 3, + Primitive::Pointer(_) => 4, + } << 32, + ) + // distinguishes references from pointers + .wrapping_add((range.start as u64).rotate_right(16)) + // distinguishes char from u32 and bool from u8 + .wrapping_add((range.end as u64).rotate_right(16)); + + LayoutData { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Primitive, + backend_repr: BackendRepr::Scalar(scalar), + largest_niche, + uninhabited: false, + size, + align, + max_repr_align: None, + unadjusted_abi_align: align.abi, + randomization_seed: Hash64::new(randomization_seed), + } + } + + pub fn scalar_pair<C: HasDataLayout>(cx: &C, a: Scalar, b: Scalar) -> Self { + let dl = cx.data_layout(); + let b_align = b.align(dl); + let align = a.align(dl).max(b_align).max(dl.aggregate_align); + let b_offset = a.size(dl).align_to(b_align.abi); + let size = (b_offset + b.size(dl)).align_to(align.abi); + + // HACK(nox): We iter on `b` and then `a` because `max_by_key` + // returns the last maximum. + let largest_niche = Niche::from_scalar(dl, b_offset, b) + .into_iter() + .chain(Niche::from_scalar(dl, Size::ZERO, a)) + .max_by_key(|niche| niche.available(dl)); + + let combined_seed = a.size(dl).bytes().wrapping_add(b.size(dl).bytes()); + + LayoutData { + variants: Variants::Single { index: VariantIdx::new(0) }, + fields: FieldsShape::Arbitrary { + offsets: [Size::ZERO, b_offset].into(), + memory_index: [0, 1].into(), + }, + backend_repr: BackendRepr::ScalarPair(a, b), + largest_niche, + uninhabited: false, + align, + size, + max_repr_align: None, + unadjusted_abi_align: align.abi, + randomization_seed: Hash64::new(combined_seed), + } + } + + /// Returns a dummy layout for an uninhabited variant. + /// + /// Uninhabited variants get pruned as part of the layout calculation, + /// so this can be used after the fact to reconstitute a layout. + pub fn uninhabited_variant<C: HasDataLayout>(cx: &C, index: VariantIdx, fields: usize) -> Self { + let dl = cx.data_layout(); + LayoutData { + variants: Variants::Single { index }, + fields: match NonZero::new(fields) { + Some(fields) => FieldsShape::Union(fields), + None => FieldsShape::Arbitrary { + offsets: IndexVec::new(), + memory_index: IndexVec::new(), + }, + }, + backend_repr: BackendRepr::Memory { sized: true }, + largest_niche: None, + uninhabited: true, + align: dl.i8_align, + size: Size::ZERO, + max_repr_align: None, + unadjusted_abi_align: dl.i8_align.abi, + randomization_seed: Hash64::ZERO, + } + } +} diff --git a/compiler/rustc_abi/src/layout/ty.rs b/compiler/rustc_abi/src/layout/ty.rs index 221e990ae86..4f43c0e6f8e 100644 --- a/compiler/rustc_abi/src/layout/ty.rs +++ b/compiler/rustc_abi/src/layout/ty.rs @@ -150,6 +150,12 @@ impl<'a, Ty> Deref for TyAndLayout<'a, Ty> { } } +impl<'a, Ty> AsRef<LayoutData<FieldIdx, VariantIdx>> for TyAndLayout<'a, Ty> { + fn as_ref(&self) -> &LayoutData<FieldIdx, VariantIdx> { + &*self.layout.0.0 + } +} + /// Trait that needs to be implemented by the higher-level type representation /// (e.g. `rustc_middle::ty::Ty`), to provide `rustc_target::abi` functionality. pub trait TyAbiInterface<'a, C>: Sized + std::fmt::Debug { @@ -219,7 +225,7 @@ impl<'a, Ty> TyAndLayout<'a, Ty> { C: HasDataLayout, { match self.backend_repr { - BackendRepr::Vector { .. } => self.size == expected_size, + BackendRepr::SimdVector { .. } => self.size == expected_size, BackendRepr::Memory { .. } => { if self.fields.count() == 1 && self.fields.offset(0).bytes() == 0 { self.field(cx, 0).is_single_vector_element(cx, expected_size) diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 81e4e255f37..078b676e40e 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -5,7 +5,6 @@ #![cfg_attr(feature = "nightly", feature(rustc_attrs))] #![cfg_attr(feature = "nightly", feature(rustdoc_internals))] #![cfg_attr(feature = "nightly", feature(step_trait))] -#![warn(unreachable_pub)] // tidy-alphabetical-end /*! ABI handling for rustc @@ -205,6 +204,13 @@ impl ReprOptions { } } +/// The maximum supported number of lanes in a SIMD vector. +/// +/// This value is selected based on backend support: +/// * LLVM does not appear to have a vector width limit. +/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer. +pub const MAX_SIMD_LANES: u64 = 1 << 0xF; + /// Parsed [Data layout](https://llvm.org/docs/LangRef.html#data-layout) /// for a target, which contains everything needed to compute layouts. #[derive(Debug, PartialEq, Eq)] @@ -1410,7 +1416,7 @@ impl AddressSpace { pub enum BackendRepr { Scalar(Scalar), ScalarPair(Scalar, Scalar), - Vector { + SimdVector { element: Scalar, count: u64, }, @@ -1426,9 +1432,9 @@ impl BackendRepr { #[inline] pub fn is_unsized(&self) -> bool { match *self { - BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => !sized, } } @@ -1467,7 +1473,7 @@ impl BackendRepr { BackendRepr::Scalar(s) => Some(s.align(cx).abi), BackendRepr::ScalarPair(s1, s2) => Some(s1.align(cx).max(s2.align(cx)).abi), // The align of a Vector can vary in surprising ways - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, } } @@ -1489,7 +1495,7 @@ impl BackendRepr { Some(size) } // The size of a Vector can vary in surprising ways - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => None, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => None, } } @@ -1500,8 +1506,8 @@ impl BackendRepr { BackendRepr::ScalarPair(s1, s2) => { BackendRepr::ScalarPair(s1.to_union(), s2.to_union()) } - BackendRepr::Vector { element, count } => { - BackendRepr::Vector { element: element.to_union(), count } + BackendRepr::SimdVector { element, count } => { + BackendRepr::SimdVector { element: element.to_union(), count } } BackendRepr::Memory { .. } => BackendRepr::Memory { sized: true }, } @@ -1513,8 +1519,8 @@ impl BackendRepr { // We do *not* ignore the sign since it matters for some ABIs (e.g. s390x). (BackendRepr::Scalar(l), BackendRepr::Scalar(r)) => l.primitive() == r.primitive(), ( - BackendRepr::Vector { element: element_l, count: count_l }, - BackendRepr::Vector { element: element_r, count: count_r }, + BackendRepr::SimdVector { element: element_l, count: count_l }, + BackendRepr::SimdVector { element: element_r, count: count_r }, ) => element_l.primitive() == element_r.primitive() && count_l == count_r, (BackendRepr::ScalarPair(l1, l2), BackendRepr::ScalarPair(r1, r2)) => { l1.primitive() == r1.primitive() && l2.primitive() == r2.primitive() @@ -1735,7 +1741,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { /// Returns `true` if this is an aggregate type (including a ScalarPair!) pub fn is_aggregate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => false, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => false, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => true, } } @@ -1744,48 +1750,6 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { pub fn is_uninhabited(&self) -> bool { self.uninhabited } - - pub fn scalar<C: HasDataLayout>(cx: &C, scalar: Scalar) -> Self { - let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); - let size = scalar.size(cx); - let align = scalar.align(cx); - - let range = scalar.valid_range(cx); - - // All primitive types for which we don't have subtype coercions should get a distinct seed, - // so that types wrapping them can use randomization to arrive at distinct layouts. - // - // Some type information is already lost at this point, so as an approximation we derive - // the seed from what remains. For example on 64-bit targets usize and u64 can no longer - // be distinguished. - let randomization_seed = size - .bytes() - .wrapping_add( - match scalar.primitive() { - Primitive::Int(_, true) => 1, - Primitive::Int(_, false) => 2, - Primitive::Float(_) => 3, - Primitive::Pointer(_) => 4, - } << 32, - ) - // distinguishes references from pointers - .wrapping_add((range.start as u64).rotate_right(16)) - // distinguishes char from u32 and bool from u8 - .wrapping_add((range.end as u64).rotate_right(16)); - - LayoutData { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Primitive, - backend_repr: BackendRepr::Scalar(scalar), - largest_niche, - uninhabited: false, - size, - align, - max_repr_align: None, - unadjusted_abi_align: align.abi, - randomization_seed: Hash64::new(randomization_seed), - } - } } impl<FieldIdx: Idx, VariantIdx: Idx> fmt::Debug for LayoutData<FieldIdx, VariantIdx> @@ -1812,7 +1776,7 @@ where f.debug_struct("Layout") .field("size", size) .field("align", align) - .field("abi", backend_repr) + .field("backend_repr", backend_repr) .field("fields", fields) .field("largest_niche", largest_niche) .field("uninhabited", uninhabited) @@ -1877,9 +1841,9 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { /// non-trivial alignment constraints. You probably want to use `is_1zst` instead. pub fn is_zst(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) | BackendRepr::Vector { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::ScalarPair(..) + | BackendRepr::SimdVector { .. } => false, BackendRepr::Memory { sized } => sized && self.size.bytes() == 0, } } diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index b21ccba93bb..6aaac072e4b 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -23,7 +23,6 @@ #![feature(maybe_uninit_slice)] #![feature(rustc_attrs)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::alloc::Layout; @@ -93,7 +92,7 @@ impl<T> ArenaChunk<T> { #[inline] fn end(&mut self) -> *mut T { unsafe { - if mem::size_of::<T>() == 0 { + if size_of::<T>() == 0 { // A pointer as large as possible for zero-sized elements. ptr::without_provenance_mut(!0) } else { @@ -151,7 +150,7 @@ impl<T> TypedArena<T> { } unsafe { - if mem::size_of::<T>() == 0 { + if size_of::<T>() == 0 { self.ptr.set(self.ptr.get().wrapping_byte_add(1)); let ptr = ptr::NonNull::<T>::dangling().as_ptr(); // Don't drop the object. This `write` is equivalent to `forget`. @@ -173,13 +172,13 @@ impl<T> TypedArena<T> { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). let available_bytes = self.end.get().addr() - self.ptr.get().addr(); - let additional_bytes = additional.checked_mul(mem::size_of::<T>()).unwrap(); + let additional_bytes = additional.checked_mul(size_of::<T>()).unwrap(); available_bytes >= additional_bytes } #[inline] fn alloc_raw_slice(&self, len: usize) -> *mut T { - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); assert!(len != 0); // Ensure the current chunk can fit `len` objects. @@ -213,7 +212,7 @@ impl<T> TypedArena<T> { // So we collect all the elements beforehand, which takes care of reentrancy and panic // safety. This function is much less hot than `DroplessArena::alloc_from_iter`, so it // doesn't need to be hyper-optimized. - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect(); if vec.is_empty() { @@ -236,7 +235,7 @@ impl<T> TypedArena<T> { unsafe { // We need the element size to convert chunk sizes (ranging from // PAGE to HUGE_PAGE bytes) to element counts. - let elem_size = cmp::max(1, mem::size_of::<T>()); + let elem_size = cmp::max(1, size_of::<T>()); let mut chunks = self.chunks.borrow_mut(); let mut new_cap; if let Some(last_chunk) = chunks.last_mut() { @@ -246,7 +245,7 @@ impl<T> TypedArena<T> { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). let used_bytes = self.ptr.get().addr() - last_chunk.start().addr(); - last_chunk.entries = used_bytes / mem::size_of::<T>(); + last_chunk.entries = used_bytes / size_of::<T>(); } // If the previous chunk's len is less than HUGE_PAGE @@ -276,7 +275,7 @@ impl<T> TypedArena<T> { let end = self.ptr.get().addr(); // We then calculate the number of elements to be dropped in the last chunk, // which is the filled area's length. - let diff = if mem::size_of::<T>() == 0 { + let diff = if size_of::<T>() == 0 { // `T` is ZST. It can't have a drop flag, so the value here doesn't matter. We get // the number of zero-sized values in the last and only chunk, just out of caution. // Recall that `end` was incremented for each allocated value. @@ -284,7 +283,7 @@ impl<T> TypedArena<T> { } else { // FIXME: this should *likely* use `offset_from`, but more // investigation is needed (including running tests in miri). - (end - start) / mem::size_of::<T>() + (end - start) / size_of::<T>() }; // Pass that to the `destroy` method. unsafe { @@ -329,7 +328,7 @@ fn align_up(val: usize, align: usize) -> usize { // Pointer alignment is common in compiler types, so keep `DroplessArena` aligned to them // to optimize away alignment code. -const DROPLESS_ALIGNMENT: usize = mem::align_of::<usize>(); +const DROPLESS_ALIGNMENT: usize = align_of::<usize>(); /// An arena that can hold objects of multiple different types that impl `Copy` /// and/or satisfy `!mem::needs_drop`. @@ -447,7 +446,7 @@ impl DroplessArena { #[inline] pub fn alloc<T>(&self, object: T) -> &mut T { assert!(!mem::needs_drop::<T>()); - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); let mem = self.alloc_raw(Layout::new::<T>()) as *mut T; @@ -471,7 +470,7 @@ impl DroplessArena { T: Copy, { assert!(!mem::needs_drop::<T>()); - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); assert!(!slice.is_empty()); let mem = self.alloc_raw(Layout::for_value::<[T]>(slice)) as *mut T; @@ -546,7 +545,7 @@ impl DroplessArena { // Warning: this function is reentrant: `iter` could hold a reference to `&self` and // allocate additional elements while we're iterating. let iter = iter.into_iter(); - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); assert!(!mem::needs_drop::<T>()); let size_hint = iter.size_hint(); diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index 29c1d34a125..1b831c454e6 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -39,7 +39,7 @@ pub use crate::format::*; use crate::ptr::P; use crate::token::{self, CommentKind, Delimiter}; use crate::tokenstream::{DelimSpan, LazyAttrTokenStream, TokenStream}; -use crate::util::parser::{AssocOp, ExprPrecedence}; +use crate::util::parser::{ExprPrecedence, Fixity}; /// A "Label" is an identifier of some point in sources, /// e.g. in the following code: @@ -124,10 +124,19 @@ impl Path { self.segments.first().is_some_and(|segment| segment.ident.name == kw::PathRoot) } - /// If this path is a single identifier with no arguments, does not ensure - /// that the path resolves to a const param, the caller should check this. - pub fn is_potential_trivial_const_arg(&self) -> bool { - matches!(self.segments[..], [PathSegment { args: None, .. }]) + /// Check if this path is potentially a trivial const arg, i.e., one that can _potentially_ + /// be represented without an anon const in the HIR. + /// + /// If `allow_mgca_arg` is true (as should be the case in most situations when + /// `#![feature(min_generic_const_args)]` is enabled), then this always returns true + /// because all paths are valid. + /// + /// Otherwise, it returns true iff the path has exactly one segment, and it has no generic args + /// (i.e., it is _potentially_ a const parameter). + #[tracing::instrument(level = "debug", ret)] + pub fn is_potential_trivial_const_arg(&self, allow_mgca_arg: bool) -> bool { + allow_mgca_arg + || self.segments.len() == 1 && self.segments.iter().all(|seg| seg.args.is_none()) } } @@ -417,9 +426,11 @@ impl WhereClause { /// A single predicate in a where-clause. #[derive(Clone, Encodable, Decodable, Debug)] pub struct WherePredicate { + pub attrs: AttrVec, pub kind: WherePredicateKind, pub id: NodeId, pub span: Span, + pub is_placeholder: bool, } /// Predicate kind in where-clause. @@ -937,8 +948,37 @@ impl BinOpKind { matches!(self, BinOpKind::And | BinOpKind::Or) } + pub fn precedence(&self) -> ExprPrecedence { + use BinOpKind::*; + match *self { + Mul | Div | Rem => ExprPrecedence::Product, + Add | Sub => ExprPrecedence::Sum, + Shl | Shr => ExprPrecedence::Shift, + BitAnd => ExprPrecedence::BitAnd, + BitXor => ExprPrecedence::BitXor, + BitOr => ExprPrecedence::BitOr, + Lt | Gt | Le | Ge | Eq | Ne => ExprPrecedence::Compare, + And => ExprPrecedence::LAnd, + Or => ExprPrecedence::LOr, + } + } + + pub fn fixity(&self) -> Fixity { + use BinOpKind::*; + match self { + Eq | Ne | Lt | Le | Gt | Ge => Fixity::None, + Add | Sub | Mul | Div | Rem | And | Or | BitXor | BitAnd | BitOr | Shl | Shr => { + Fixity::Left + } + } + } + pub fn is_comparison(self) -> bool { - crate::util::parser::AssocOp::from_ast_binop(self).is_comparison() + use BinOpKind::*; + match self { + Eq | Ne | Lt | Le | Gt | Ge => true, + Add | Sub | Mul | Div | Rem | And | Or | BitXor | BitAnd | BitOr | Shl | Shr => false, + } } /// Returns `true` if the binary operator takes its arguments by value. @@ -1177,22 +1217,31 @@ pub struct Expr { } impl Expr { - /// Could this expr be either `N`, or `{ N }`, where `N` is a const parameter. + /// Check if this expression is potentially a trivial const arg, i.e., one that can _potentially_ + /// be represented without an anon const in the HIR. + /// + /// This will unwrap at most one block level (curly braces). After that, if the expression + /// is a path, it mostly dispatches to [`Path::is_potential_trivial_const_arg`]. + /// See there for more info about `allow_mgca_arg`. /// - /// If this is not the case, name resolution does not resolve `N` when using - /// `min_const_generics` as more complex expressions are not supported. + /// The only additional thing to note is that when `allow_mgca_arg` is false, this function + /// will only allow paths with no qself, before dispatching to the `Path` function of + /// the same name. /// - /// Does not ensure that the path resolves to a const param, the caller should check this. + /// Does not ensure that the path resolves to a const param/item, the caller should check this. /// This also does not consider macros, so it's only correct after macro-expansion. - pub fn is_potential_trivial_const_arg(&self) -> bool { + pub fn is_potential_trivial_const_arg(&self, allow_mgca_arg: bool) -> bool { let this = self.maybe_unwrap_block(); - - if let ExprKind::Path(None, path) = &this.kind - && path.is_potential_trivial_const_arg() - { - true + if allow_mgca_arg { + matches!(this.kind, ExprKind::Path(..)) } else { - false + if let ExprKind::Path(None, path) = &this.kind + && path.is_potential_trivial_const_arg(allow_mgca_arg) + { + true + } else { + false + } } } @@ -1332,7 +1381,7 @@ impl Expr { ExprKind::Range(..) => ExprPrecedence::Range, // Binop-like expr kinds, handled by `AssocOp`. - ExprKind::Binary(op, ..) => AssocOp::from_ast_binop(op.node).precedence(), + ExprKind::Binary(op, ..) => op.node.precedence(), ExprKind::Cast(..) => ExprPrecedence::Cast, ExprKind::Assign(..) | @@ -1350,6 +1399,7 @@ impl Expr { // Never need parens ExprKind::Array(_) | ExprKind::Await(..) + | ExprKind::Use(..) | ExprKind::Block(..) | ExprKind::Call(..) | ExprKind::ConstBlock(_) @@ -1424,6 +1474,15 @@ pub enum RangeLimits { Closed, } +impl RangeLimits { + pub fn as_str(&self) -> &'static str { + match self { + RangeLimits::HalfOpen => "..", + RangeLimits::Closed => "..=", + } + } +} + /// A method call (e.g. `x.foo::<Bar, Baz>(a, b, c)`). #[derive(Clone, Encodable, Decodable, Debug)] pub struct MethodCall { @@ -1530,6 +1589,8 @@ pub enum ExprKind { Gen(CaptureBy, P<Block>, GenBlockKind, Span), /// An await expression (`my_future.await`). Span is of await keyword. Await(P<Expr>, Span), + /// A use expression (`x.use`). Span is of use keyword. + Use(P<Expr>, Span), /// A try block (`try { ... }`). TryBlock(P<Block>), @@ -1699,8 +1760,17 @@ pub enum CaptureBy { /// The span of the `move` keyword. move_kw: Span, }, - /// `move` keyword was not specified. + /// `move` or `use` keywords were not specified. Ref, + /// `use |x| y + x`. + /// + /// Note that if you have a regular closure like `|| x.use`, this will *not* result + /// in a `Use` capture. Instead, the `ExprUseVisitor` will look at the type + /// of `x` and treat `x.use` as either a copy/clone/move as appropriate. + Use { + /// The span of the `use` keyword. + use_kw: Span, + }, } /// Closure lifetime binder, `for<'a, 'b>` in `for<'a, 'b> |_: &'a (), _: &'b ()|`. @@ -2583,6 +2653,8 @@ pub enum SelfKind { Value(Mutability), /// `&'lt self`, `&'lt mut self` Region(Option<Lifetime>, Mutability), + /// `&'lt pin const self`, `&'lt pin mut self` + Pinned(Option<Lifetime>, Mutability), /// `self: TYPE`, `mut self: TYPE` Explicit(P<Ty>, Mutability), } @@ -2592,6 +2664,8 @@ impl SelfKind { match self { SelfKind::Region(None, mutbl) => mutbl.ref_prefix_str().to_string(), SelfKind::Region(Some(lt), mutbl) => format!("&{lt} {}", mutbl.prefix_str()), + SelfKind::Pinned(None, mutbl) => format!("&pin {}", mutbl.ptr_str()), + SelfKind::Pinned(Some(lt), mutbl) => format!("&{lt} pin {}", mutbl.ptr_str()), SelfKind::Value(_) | SelfKind::Explicit(_, _) => { unreachable!("if we had an explicit self, we wouldn't be here") } @@ -2608,11 +2682,13 @@ impl Param { if ident.name == kw::SelfLower { return match self.ty.kind { TyKind::ImplicitSelf => Some(respan(self.pat.span, SelfKind::Value(mutbl))), - TyKind::Ref(lt, MutTy { ref ty, mutbl }) - | TyKind::PinnedRef(lt, MutTy { ref ty, mutbl }) + TyKind::Ref(lt, MutTy { ref ty, mutbl }) if ty.kind.is_implicit_self() => { + Some(respan(self.pat.span, SelfKind::Region(lt, mutbl))) + } + TyKind::PinnedRef(lt, MutTy { ref ty, mutbl }) if ty.kind.is_implicit_self() => { - Some(respan(self.pat.span, SelfKind::Region(lt, mutbl))) + Some(respan(self.pat.span, SelfKind::Pinned(lt, mutbl))) } _ => Some(respan( self.pat.span.to(self.ty.span), @@ -2654,6 +2730,15 @@ impl Param { tokens: None, }), ), + SelfKind::Pinned(lt, mutbl) => ( + mutbl, + P(Ty { + id: DUMMY_NODE_ID, + kind: TyKind::PinnedRef(lt, MutTy { ty: infer_ty, mutbl }), + span, + tokens: None, + }), + ), }; Param { attrs, @@ -3381,6 +3466,7 @@ pub struct Fn { pub generics: Generics, pub sig: FnSig, pub contract: Option<P<FnContract>>, + pub define_opaque: Option<ThinVec<(NodeId, Path)>>, pub body: Option<P<Block>>, } @@ -3678,7 +3764,7 @@ mod size_asserts { static_assert_size!(Block, 32); static_assert_size!(Expr, 72); static_assert_size!(ExprKind, 40); - static_assert_size!(Fn, 168); + static_assert_size!(Fn, 176); static_assert_size!(ForeignItem, 88); static_assert_size!(ForeignItemKind, 16); static_assert_size!(GenericArg, 24); diff --git a/compiler/rustc_ast/src/ast_traits.rs b/compiler/rustc_ast/src/ast_traits.rs index 346edc87c86..849cc650e9d 100644 --- a/compiler/rustc_ast/src/ast_traits.rs +++ b/compiler/rustc_ast/src/ast_traits.rs @@ -11,7 +11,7 @@ use crate::tokenstream::LazyAttrTokenStream; use crate::{ Arm, AssocItem, AttrItem, AttrKind, AttrVec, Attribute, Block, Crate, Expr, ExprField, FieldDef, ForeignItem, GenericParam, Item, NodeId, Param, Pat, PatField, Path, Stmt, StmtKind, - Ty, Variant, Visibility, + Ty, Variant, Visibility, WherePredicate, }; /// A utility trait to reduce boilerplate. @@ -79,6 +79,7 @@ impl_has_node_id!( Stmt, Ty, Variant, + WherePredicate, ); impl<T: AstDeref<Target: HasNodeId>> HasNodeId for T { @@ -127,7 +128,16 @@ macro_rules! impl_has_tokens_none { } impl_has_tokens!(AssocItem, AttrItem, Block, Expr, ForeignItem, Item, Pat, Path, Ty, Visibility); -impl_has_tokens_none!(Arm, ExprField, FieldDef, GenericParam, Param, PatField, Variant); +impl_has_tokens_none!( + Arm, + ExprField, + FieldDef, + GenericParam, + Param, + PatField, + Variant, + WherePredicate +); impl<T: AstDeref<Target: HasTokens>> HasTokens for T { fn tokens(&self) -> Option<&LazyAttrTokenStream> { @@ -199,23 +209,13 @@ impl HasTokens for Attribute { impl HasTokens for Nonterminal { fn tokens(&self) -> Option<&LazyAttrTokenStream> { match self { - Nonterminal::NtItem(item) => item.tokens(), - Nonterminal::NtStmt(stmt) => stmt.tokens(), Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => expr.tokens(), - Nonterminal::NtPat(pat) => pat.tokens(), - Nonterminal::NtMeta(attr_item) => attr_item.tokens(), - Nonterminal::NtPath(path) => path.tokens(), Nonterminal::NtBlock(block) => block.tokens(), } } fn tokens_mut(&mut self) -> Option<&mut Option<LazyAttrTokenStream>> { match self { - Nonterminal::NtItem(item) => item.tokens_mut(), - Nonterminal::NtStmt(stmt) => stmt.tokens_mut(), Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => expr.tokens_mut(), - Nonterminal::NtPat(pat) => pat.tokens_mut(), - Nonterminal::NtMeta(attr_item) => attr_item.tokens_mut(), - Nonterminal::NtPath(path) => path.tokens_mut(), Nonterminal::NtBlock(block) => block.tokens_mut(), } } @@ -285,6 +285,7 @@ impl_has_attrs!( Param, PatField, Variant, + WherePredicate, ); impl_has_attrs_none!(Attribute, AttrItem, Block, Pat, Path, Ty, Visibility); diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs index df2f4b88712..4d613085d79 100644 --- a/compiler/rustc_ast/src/attr/mod.rs +++ b/compiler/rustc_ast/src/attr/mod.rs @@ -14,7 +14,7 @@ use crate::ast::{ PathSegment, Safety, }; use crate::ptr::P; -use crate::token::{self, CommentKind, Delimiter, Token}; +use crate::token::{self, CommentKind, Delimiter, InvisibleOrigin, MetaVarKind, Token}; use crate::tokenstream::{ DelimSpan, LazyAttrTokenStream, Spacing, TokenStream, TokenStreamIter, TokenTree, }; @@ -405,11 +405,17 @@ impl MetaItem { let span = span.with_hi(segments.last().unwrap().ident.span.hi()); Path { span, segments, tokens: None } } - Some(TokenTree::Token(Token { kind: token::Interpolated(nt), .. }, _)) => match &**nt { - token::Nonterminal::NtMeta(item) => return item.meta(item.path.span), - token::Nonterminal::NtPath(path) => (**path).clone(), - _ => return None, - }, + Some(TokenTree::Delimited( + _span, + _spacing, + Delimiter::Invisible(InvisibleOrigin::MetaVar( + MetaVarKind::Meta { .. } | MetaVarKind::Path, + )), + _stream, + )) => { + // This path is currently unreachable in the test suite. + unreachable!() + } Some(TokenTree::Token( Token { kind: token::OpenDelim(_) | token::CloseDelim(_), .. }, _, diff --git a/compiler/rustc_ast/src/format.rs b/compiler/rustc_ast/src/format.rs index b93846c1fe6..b611ddea1d9 100644 --- a/compiler/rustc_ast/src/format.rs +++ b/compiler/rustc_ast/src/format.rs @@ -266,7 +266,7 @@ pub enum FormatAlignment { #[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq)] pub enum FormatCount { /// `{:5}` or `{:.5}` - Literal(usize), + Literal(u16), /// `{:.*}`, `{:.5$}`, or `{:a$}`, etc. Argument(FormatArgPosition), } diff --git a/compiler/rustc_ast/src/lib.rs b/compiler/rustc_ast/src/lib.rs index 6372c66050e..da510e4967d 100644 --- a/compiler/rustc_ast/src/lib.rs +++ b/compiler/rustc_ast/src/lib.rs @@ -6,6 +6,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc( html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/", test(attr(deny(warnings))) @@ -19,7 +20,6 @@ #![feature(never_type)] #![feature(rustdoc_internals)] #![feature(stmt_expr_attributes)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod util { diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs index 40b29fdba25..4a1636e6aec 100644 --- a/compiler/rustc_ast/src/mut_visit.rs +++ b/compiler/rustc_ast/src/mut_visit.rs @@ -338,8 +338,11 @@ pub trait MutVisitor: Sized { walk_where_clause(self, where_clause); } - fn visit_where_predicate(&mut self, where_predicate: &mut WherePredicate) { - walk_where_predicate(self, where_predicate) + fn flat_map_where_predicate( + &mut self, + where_predicate: WherePredicate, + ) -> SmallVec<[WherePredicate; 1]> { + walk_flat_map_where_predicate(self, where_predicate) } fn visit_where_predicate_kind(&mut self, kind: &mut WherePredicateKind) { @@ -892,29 +895,9 @@ pub fn visit_token<T: MutVisitor>(vis: &mut T, t: &mut Token) { // multiple items there.... fn visit_nonterminal<T: MutVisitor>(vis: &mut T, nt: &mut token::Nonterminal) { match nt { - token::NtItem(item) => visit_clobber(item, |item| { - // This is probably okay, because the only visitors likely to - // peek inside interpolated nodes will be renamings/markings, - // which map single items to single items. - vis.flat_map_item(item).expect_one("expected visitor to produce exactly one item") - }), token::NtBlock(block) => vis.visit_block(block), - token::NtStmt(stmt) => visit_clobber(stmt, |stmt| { - // See reasoning above. - stmt.map(|stmt| { - vis.flat_map_stmt(stmt).expect_one("expected visitor to produce exactly one item") - }) - }), - token::NtPat(pat) => vis.visit_pat(pat), token::NtExpr(expr) => vis.visit_expr(expr), token::NtLiteral(expr) => vis.visit_expr(expr), - token::NtMeta(item) => { - let AttrItem { unsafety: _, path, args, tokens } = item.deref_mut(); - vis.visit_path(path); - visit_attr_args(vis, args); - visit_lazy_tts(vis, tokens); - } - token::NtPath(path) => vis.visit_path(path), } } @@ -978,7 +961,14 @@ fn walk_fn<T: MutVisitor>(vis: &mut T, kind: FnKind<'_>) { _ctxt, _ident, _vis, - Fn { defaultness, generics, contract, body, sig: FnSig { header, decl, span } }, + Fn { + defaultness, + generics, + contract, + body, + sig: FnSig { header, decl, span }, + define_opaque, + }, ) => { // Identifier and visibility are visited as a part of the item. visit_defaultness(vis, defaultness); @@ -992,6 +982,11 @@ fn walk_fn<T: MutVisitor>(vis: &mut T, kind: FnKind<'_>) { vis.visit_block(body); } vis.visit_span(span); + + for (id, path) in define_opaque.iter_mut().flatten() { + vis.visit_id(id); + vis.visit_path(path) + } } FnKind::Closure(binder, coroutine_kind, decl, body) => { vis.visit_closure_binder(binder); @@ -1105,15 +1100,20 @@ fn walk_ty_alias_where_clauses<T: MutVisitor>(vis: &mut T, tawcs: &mut TyAliasWh fn walk_where_clause<T: MutVisitor>(vis: &mut T, wc: &mut WhereClause) { let WhereClause { has_where_token: _, predicates, span } = wc; - visit_thin_vec(predicates, |predicate| vis.visit_where_predicate(predicate)); + predicates.flat_map_in_place(|predicate| vis.flat_map_where_predicate(predicate)); vis.visit_span(span); } -pub fn walk_where_predicate<T: MutVisitor>(vis: &mut T, pred: &mut WherePredicate) { - let WherePredicate { kind, id, span } = pred; +pub fn walk_flat_map_where_predicate<T: MutVisitor>( + vis: &mut T, + mut pred: WherePredicate, +) -> SmallVec<[WherePredicate; 1]> { + let WherePredicate { attrs, kind, id, span, is_placeholder: _ } = &mut pred; vis.visit_id(id); + visit_attrs(vis, attrs); vis.visit_where_predicate_kind(kind); vis.visit_span(span); + smallvec![pred] } pub fn walk_where_predicate_kind<T: MutVisitor>(vis: &mut T, kind: &mut WherePredicateKind) { @@ -1745,6 +1745,10 @@ pub fn walk_expr<T: MutVisitor>(vis: &mut T, Expr { kind, id, span, attrs, token vis.visit_expr(expr); vis.visit_span(await_kw_span); } + ExprKind::Use(expr, use_kw_span) => { + vis.visit_expr(expr); + vis.visit_span(use_kw_span); + } ExprKind::Assign(el, er, span) => { vis.visit_expr(el); vis.visit_expr(er); @@ -1895,6 +1899,9 @@ fn walk_capture_by<T: MutVisitor>(vis: &mut T, capture_by: &mut CaptureBy) { CaptureBy::Value { move_kw } => { vis.visit_span(move_kw); } + CaptureBy::Use { use_kw } => { + vis.visit_span(use_kw); + } } } @@ -1929,7 +1936,7 @@ impl DummyAstNode for Item { span: Default::default(), tokens: Default::default(), }, - ident: Ident::empty(), + ident: Ident::dummy(), kind: ItemKind::ExternCrate(None), tokens: Default::default(), } diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs index 97d121909f8..f7cd63aaaf8 100644 --- a/compiler/rustc_ast/src/token.rs +++ b/compiler/rustc_ast/src/token.rs @@ -2,7 +2,6 @@ use std::borrow::Cow; use std::fmt; use std::sync::Arc; -pub use BinOpToken::*; pub use LitKind::*; pub use Nonterminal::*; pub use NtExprKind::*; @@ -26,21 +25,6 @@ pub enum CommentKind { Block, } -#[derive(Clone, PartialEq, Encodable, Decodable, Hash, Debug, Copy)] -#[derive(HashStable_Generic)] -pub enum BinOpToken { - Plus, - Minus, - Star, - Slash, - Percent, - Caret, - And, - Or, - Shl, - Shr, -} - // This type must not implement `Hash` due to the unusual `PartialEq` impl below. #[derive(Copy, Clone, Debug, Encodable, Decodable, HashStable_Generic)] pub enum InvisibleOrigin { @@ -90,7 +74,10 @@ pub enum MetaVarKind { Ident, Lifetime, Literal, - Meta, + Meta { + /// Will `AttrItem::meta` succeed on this, if reparsed? + has_meta_form: bool, + }, Path, Vis, TT, @@ -110,7 +97,7 @@ impl fmt::Display for MetaVarKind { MetaVarKind::Ident => sym::ident, MetaVarKind::Lifetime => sym::lifetime, MetaVarKind::Literal => sym::literal, - MetaVarKind::Meta => sym::meta, + MetaVarKind::Meta { .. } => sym::meta, MetaVarKind::Path => sym::path, MetaVarKind::Vis => sym::vis, MetaVarKind::TT => sym::tt, @@ -373,11 +360,49 @@ pub enum TokenKind { /// `||` OrOr, /// `!` - Not, + Bang, /// `~` Tilde, - BinOp(BinOpToken), - BinOpEq(BinOpToken), + // `+` + Plus, + // `-` + Minus, + // `*` + Star, + // `/` + Slash, + // `%` + Percent, + // `^` + Caret, + // `&` + And, + // `|` + Or, + // `<<` + Shl, + // `>>` + Shr, + // `+=` + PlusEq, + // `-=` + MinusEq, + // `*=` + StarEq, + // `/=` + SlashEq, + // `%=` + PercentEq, + // `^=` + CaretEq, + // `&=` + AndEq, + // `|=` + OrEq, + // `<<=` + ShlEq, + // `>>=` + ShrEq, /* Structural symbols */ /// `@` @@ -497,31 +522,31 @@ impl TokenKind { Some(match (self, n) { (Le, 1) => (Lt, Eq), (EqEq, 1) => (Eq, Eq), - (Ne, 1) => (Not, Eq), + (Ne, 1) => (Bang, Eq), (Ge, 1) => (Gt, Eq), - (AndAnd, 1) => (BinOp(And), BinOp(And)), - (OrOr, 1) => (BinOp(Or), BinOp(Or)), - (BinOp(Shl), 1) => (Lt, Lt), - (BinOp(Shr), 1) => (Gt, Gt), - (BinOpEq(Plus), 1) => (BinOp(Plus), Eq), - (BinOpEq(Minus), 1) => (BinOp(Minus), Eq), - (BinOpEq(Star), 1) => (BinOp(Star), Eq), - (BinOpEq(Slash), 1) => (BinOp(Slash), Eq), - (BinOpEq(Percent), 1) => (BinOp(Percent), Eq), - (BinOpEq(Caret), 1) => (BinOp(Caret), Eq), - (BinOpEq(And), 1) => (BinOp(And), Eq), - (BinOpEq(Or), 1) => (BinOp(Or), Eq), - (BinOpEq(Shl), 1) => (Lt, Le), // `<` + `<=` - (BinOpEq(Shl), 2) => (BinOp(Shl), Eq), // `<<` + `=` - (BinOpEq(Shr), 1) => (Gt, Ge), // `>` + `>=` - (BinOpEq(Shr), 2) => (BinOp(Shr), Eq), // `>>` + `=` + (AndAnd, 1) => (And, And), + (OrOr, 1) => (Or, Or), + (Shl, 1) => (Lt, Lt), + (Shr, 1) => (Gt, Gt), + (PlusEq, 1) => (Plus, Eq), + (MinusEq, 1) => (Minus, Eq), + (StarEq, 1) => (Star, Eq), + (SlashEq, 1) => (Slash, Eq), + (PercentEq, 1) => (Percent, Eq), + (CaretEq, 1) => (Caret, Eq), + (AndEq, 1) => (And, Eq), + (OrEq, 1) => (Or, Eq), + (ShlEq, 1) => (Lt, Le), // `<` + `<=` + (ShlEq, 2) => (Shl, Eq), // `<<` + `=` + (ShrEq, 1) => (Gt, Ge), // `>` + `>=` + (ShrEq, 2) => (Shr, Eq), // `>>` + `=` (DotDot, 1) => (Dot, Dot), (DotDotDot, 1) => (Dot, DotDot), // `.` + `..` (DotDotDot, 2) => (DotDot, Dot), // `..` + `.` (DotDotEq, 2) => (DotDot, Eq), (PathSep, 1) => (Colon, Colon), - (RArrow, 1) => (BinOp(Minus), Gt), - (LArrow, 1) => (Lt, BinOp(Minus)), + (RArrow, 1) => (Minus, Gt), + (LArrow, 1) => (Lt, Minus), (FatArrow, 1) => (Eq, Gt), _ => return None, }) @@ -540,7 +565,7 @@ impl TokenKind { } pub fn should_end_const_arg(&self) -> bool { - matches!(self, Gt | Ge | BinOp(Shr) | BinOpEq(Shr)) + matches!(self, Gt | Ge | Shr | ShrEq) } } @@ -579,11 +604,11 @@ impl Token { pub fn is_punct(&self) -> bool { match self.kind { - Eq | Lt | Le | EqEq | Ne | Ge | Gt | AndAnd | OrOr | Not | Tilde | BinOp(_) - | BinOpEq(_) | At | Dot | DotDot | DotDotDot | DotDotEq | Comma | Semi | Colon - | PathSep | RArrow | LArrow | FatArrow | Pound | Dollar | Question | SingleQuote => { - true - } + Eq | Lt | Le | EqEq | Ne | Ge | Gt | AndAnd | OrOr | Bang | Tilde | Plus | Minus + | Star | Slash | Percent | Caret | And | Or | Shl | Shr | PlusEq | MinusEq | StarEq + | SlashEq | PercentEq | CaretEq | AndEq | OrEq | ShlEq | ShrEq | At | Dot | DotDot + | DotDotDot | DotDotEq | Comma | Semi | Colon | PathSep | RArrow | LArrow + | FatArrow | Pound | Dollar | Question | SingleQuote => true, OpenDelim(..) | CloseDelim(..) | Literal(..) | DocComment(..) | Ident(..) | NtIdent(..) | Lifetime(..) | NtLifetime(..) | Interpolated(..) | Eof => false, @@ -591,7 +616,7 @@ impl Token { } pub fn is_like_plus(&self) -> bool { - matches!(self.kind, BinOp(Plus) | BinOpEq(Plus)) + matches!(self.kind, Plus | PlusEq) } /// Returns `true` if the token can appear at the start of an expression. @@ -605,15 +630,15 @@ impl Token { ident_can_begin_expr(name, self.span, is_raw), // value name or keyword OpenDelim(Parenthesis | Brace | Bracket) | // tuple, array or block Literal(..) | // literal - Not | // operator not - BinOp(Minus) | // unary minus - BinOp(Star) | // dereference - BinOp(Or) | OrOr | // closure - BinOp(And) | // reference + Bang | // operator not + Minus | // unary minus + Star | // dereference + Or | OrOr | // closure + And | // reference AndAnd | // double reference // DotDotDot is no longer supported, but we need some way to display the error DotDot | DotDotDot | DotDotEq | // range notation - Lt | BinOp(Shl) | // associated path + Lt | Shl | // associated path PathSep | // global path Lifetime(..) | // labeled loop Pound => true, // expression attributes @@ -621,8 +646,7 @@ impl Token { matches!(&**nt, NtBlock(..) | NtExpr(..) | - NtLiteral(..) | - NtPath(..) + NtLiteral(..) ), OpenDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar( MetaVarKind::Block | @@ -643,29 +667,25 @@ impl Token { Ident(..) | NtIdent(..) | OpenDelim(Delimiter::Parenthesis) | // tuple pattern OpenDelim(Delimiter::Bracket) | // slice pattern - BinOp(And) | // reference - BinOp(Minus) | // negative literal - AndAnd | // double reference - Literal(_) | // literal - DotDot | // range pattern (future compat) - DotDotDot | // range pattern (future compat) - PathSep | // path - Lt | // path (UFCS constant) - BinOp(Shl) => true, // path (double UFCS) - // leading vert `|` or-pattern - BinOp(Or) => matches!(pat_kind, PatWithOr), + And | // reference + Minus | // negative literal + AndAnd | // double reference + Literal(_) | // literal + DotDot | // range pattern (future compat) + DotDotDot | // range pattern (future compat) + PathSep | // path + Lt | // path (UFCS constant) + Shl => true, // path (double UFCS) + Or => matches!(pat_kind, PatWithOr), // leading vert `|` or-pattern Interpolated(nt) => matches!(&**nt, | NtExpr(..) | NtLiteral(..) - | NtMeta(..) - | NtPat(..) - | NtPath(..) ), OpenDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar( MetaVarKind::Expr { .. } | MetaVarKind::Literal | - MetaVarKind::Meta | + MetaVarKind::Meta { .. } | MetaVarKind::Pat(_) | MetaVarKind::Path | MetaVarKind::Ty { .. } @@ -677,19 +697,18 @@ impl Token { /// Returns `true` if the token can appear at the start of a type. pub fn can_begin_type(&self) -> bool { match self.uninterpolate().kind { - Ident(name, is_raw) => + Ident(name, is_raw) => ident_can_begin_type(name, self.span, is_raw), // type name or keyword OpenDelim(Delimiter::Parenthesis) | // tuple OpenDelim(Delimiter::Bracket) | // array - Not | // never - BinOp(Star) | // raw pointer - BinOp(And) | // reference - AndAnd | // double reference - Question | // maybe bound in trait object - Lifetime(..) | // lifetime bound in trait object - Lt | BinOp(Shl) | // associated path - PathSep => true, // global path - Interpolated(ref nt) => matches!(&**nt, NtPath(..)), + Bang | // never + Star | // raw pointer + And | // reference + AndAnd | // double reference + Question | // maybe bound in trait object + Lifetime(..) | // lifetime bound in trait object + Lt | Shl | // associated path + PathSep => true, // global path OpenDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar( MetaVarKind::Ty { .. } | MetaVarKind::Path @@ -703,7 +722,7 @@ impl Token { /// Returns `true` if the token can appear at the start of a const param. pub fn can_begin_const_arg(&self) -> bool { match self.kind { - OpenDelim(Delimiter::Brace) | Literal(..) | BinOp(Minus) => true, + OpenDelim(Delimiter::Brace) | Literal(..) | Minus => true, Ident(name, IdentIsRaw::No) if name.is_bool_lit() => true, Interpolated(ref nt) => matches!(&**nt, NtExpr(..) | NtBlock(..) | NtLiteral(..)), OpenDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar( @@ -752,7 +771,7 @@ impl Token { /// Keep this in sync with and `Lit::from_token`, excluding unary negation. pub fn can_begin_literal_maybe_minus(&self) -> bool { match self.uninterpolate().kind { - Literal(..) | BinOp(Minus) => true, + Literal(..) | Minus => true, Ident(name, IdentIsRaw::No) if name.is_bool_lit() => true, Interpolated(ref nt) => match &**nt { NtLiteral(_) => true, @@ -848,27 +867,17 @@ impl Token { self.ident().is_some_and(|(ident, _)| ident.name == name) } - /// Returns `true` if the token is an interpolated path. - fn is_whole_path(&self) -> bool { - if let Interpolated(nt) = &self.kind - && let NtPath(..) = &**nt - { - return true; - } - - false - } - /// Is this a pre-parsed expression dropped into the token stream /// (which happens while parsing the result of macro expansion)? pub fn is_whole_expr(&self) -> bool { + #[allow(irrefutable_let_patterns)] // FIXME: temporary if let Interpolated(nt) = &self.kind - && let NtExpr(_) | NtLiteral(_) | NtPath(_) | NtBlock(_) = &**nt + && let NtExpr(_) | NtLiteral(_) | NtBlock(_) = &**nt { - return true; + true + } else { + matches!(self.is_metavar_seq(), Some(MetaVarKind::Path)) } - - false } /// Is the token an interpolated block (`$b:block`)? @@ -888,13 +897,13 @@ impl Token { } pub fn is_qpath_start(&self) -> bool { - self == &Lt || self == &BinOp(Shl) + self == &Lt || self == &Shl } pub fn is_path_start(&self) -> bool { self == &PathSep || self.is_qpath_start() - || self.is_whole_path() + || matches!(self.is_metavar_seq(), Some(MetaVarKind::Path)) || self.is_path_segment_keyword() || self.is_ident() && !self.is_reserved_ident() } @@ -980,59 +989,82 @@ impl Token { } pub fn glue(&self, joint: &Token) -> Option<Token> { - let kind = match self.kind { - Eq => match joint.kind { - Eq => EqEq, - Gt => FatArrow, - _ => return None, - }, - Lt => match joint.kind { - Eq => Le, - Lt => BinOp(Shl), - Le => BinOpEq(Shl), - BinOp(Minus) => LArrow, - _ => return None, - }, - Gt => match joint.kind { - Eq => Ge, - Gt => BinOp(Shr), - Ge => BinOpEq(Shr), - _ => return None, - }, - Not => match joint.kind { - Eq => Ne, - _ => return None, - }, - BinOp(op) => match joint.kind { - Eq => BinOpEq(op), - BinOp(And) if op == And => AndAnd, - BinOp(Or) if op == Or => OrOr, - Gt if op == Minus => RArrow, - _ => return None, - }, - Dot => match joint.kind { - Dot => DotDot, - DotDot => DotDotDot, - _ => return None, - }, - DotDot => match joint.kind { - Dot => DotDotDot, - Eq => DotDotEq, - _ => return None, - }, - Colon => match joint.kind { - Colon => PathSep, - _ => return None, - }, - SingleQuote => match joint.kind { - Ident(name, is_raw) => Lifetime(Symbol::intern(&format!("'{name}")), is_raw), - _ => return None, - }, + let kind = match (&self.kind, &joint.kind) { + (Eq, Eq) => EqEq, + (Eq, Gt) => FatArrow, + (Eq, _) => return None, + + (Lt, Eq) => Le, + (Lt, Lt) => Shl, + (Lt, Le) => ShlEq, + (Lt, Minus) => LArrow, + (Lt, _) => return None, - Le | EqEq | Ne | Ge | AndAnd | OrOr | Tilde | BinOpEq(..) | At | DotDotDot - | DotDotEq | Comma | Semi | PathSep | RArrow | LArrow | FatArrow | Pound | Dollar - | Question | OpenDelim(..) | CloseDelim(..) | Literal(..) | Ident(..) | NtIdent(..) - | Lifetime(..) | NtLifetime(..) | Interpolated(..) | DocComment(..) | Eof => { + (Gt, Eq) => Ge, + (Gt, Gt) => Shr, + (Gt, Ge) => ShrEq, + (Gt, _) => return None, + + (Bang, Eq) => Ne, + (Bang, _) => return None, + + (Plus, Eq) => PlusEq, + (Plus, _) => return None, + + (Minus, Eq) => MinusEq, + (Minus, Gt) => RArrow, + (Minus, _) => return None, + + (Star, Eq) => StarEq, + (Star, _) => return None, + + (Slash, Eq) => SlashEq, + (Slash, _) => return None, + + (Percent, Eq) => PercentEq, + (Percent, _) => return None, + + (Caret, Eq) => CaretEq, + (Caret, _) => return None, + + (And, Eq) => AndEq, + (And, And) => AndAnd, + (And, _) => return None, + + (Or, Eq) => OrEq, + (Or, Or) => OrOr, + (Or, _) => return None, + + (Shl, Eq) => ShlEq, + (Shl, _) => return None, + + (Shr, Eq) => ShrEq, + (Shr, _) => return None, + + (Dot, Dot) => DotDot, + (Dot, DotDot) => DotDotDot, + (Dot, _) => return None, + + (DotDot, Dot) => DotDotDot, + (DotDot, Eq) => DotDotEq, + (DotDot, _) => return None, + + (Colon, Colon) => PathSep, + (Colon, _) => return None, + + (SingleQuote, Ident(name, is_raw)) => { + Lifetime(Symbol::intern(&format!("'{name}")), *is_raw) + } + (SingleQuote, _) => return None, + + ( + Le | EqEq | Ne | Ge | AndAnd | OrOr | Tilde | PlusEq | MinusEq | StarEq | SlashEq + | PercentEq | CaretEq | AndEq | OrEq | ShlEq | ShrEq | At | DotDotDot | DotDotEq + | Comma | Semi | PathSep | RArrow | LArrow | FatArrow | Pound | Dollar | Question + | OpenDelim(..) | CloseDelim(..) | Literal(..) | Ident(..) | NtIdent(..) + | Lifetime(..) | NtLifetime(..) | Interpolated(..) | DocComment(..) | Eof, + _, + ) => { return None; } }; @@ -1072,15 +1104,9 @@ pub enum NtExprKind { #[derive(Clone, Encodable, Decodable)] /// For interpolation during macro expansion. pub enum Nonterminal { - NtItem(P<ast::Item>), NtBlock(P<ast::Block>), - NtStmt(P<ast::Stmt>), - NtPat(P<ast::Pat>), NtExpr(P<ast::Expr>), NtLiteral(P<ast::Expr>), - /// Stuff inside brackets for attributes - NtMeta(P<ast::AttrItem>), - NtPath(P<ast::Path>), } #[derive(Debug, Copy, Clone, PartialEq, Eq, Encodable, Decodable, Hash, HashStable_Generic)] @@ -1169,26 +1195,16 @@ impl fmt::Display for NonterminalKind { impl Nonterminal { pub fn use_span(&self) -> Span { match self { - NtItem(item) => item.span, NtBlock(block) => block.span, - NtStmt(stmt) => stmt.span, - NtPat(pat) => pat.span, NtExpr(expr) | NtLiteral(expr) => expr.span, - NtMeta(attr_item) => attr_item.span(), - NtPath(path) => path.span, } } pub fn descr(&self) -> &'static str { match self { - NtItem(..) => "item", NtBlock(..) => "block", - NtStmt(..) => "statement", - NtPat(..) => "pattern", NtExpr(..) => "expression", NtLiteral(..) => "literal", - NtMeta(..) => "attribute", - NtPath(..) => "path", } } } @@ -1206,14 +1222,9 @@ impl PartialEq for Nonterminal { impl fmt::Debug for Nonterminal { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - NtItem(..) => f.pad("NtItem(..)"), NtBlock(..) => f.pad("NtBlock(..)"), - NtStmt(..) => f.pad("NtStmt(..)"), - NtPat(..) => f.pad("NtPat(..)"), NtExpr(..) => f.pad("NtExpr(..)"), NtLiteral(..) => f.pad("NtLiteral(..)"), - NtMeta(..) => f.pad("NtMeta(..)"), - NtPath(..) => f.pad("NtPath(..)"), } } } diff --git a/compiler/rustc_ast/src/tokenstream.rs b/compiler/rustc_ast/src/tokenstream.rs index 1123ea3a449..bdd244be6d1 100644 --- a/compiler/rustc_ast/src/tokenstream.rs +++ b/compiler/rustc_ast/src/tokenstream.rs @@ -23,7 +23,7 @@ use rustc_macros::{Decodable, Encodable, HashStable_Generic}; use rustc_serialize::{Decodable, Encodable}; use rustc_span::{DUMMY_SP, Span, SpanDecoder, SpanEncoder, Symbol, sym}; -use crate::ast::{AttrStyle, StmtKind}; +use crate::ast::AttrStyle; use crate::ast_traits::{HasAttrs, HasTokens}; use crate::token::{self, Delimiter, InvisibleOrigin, Nonterminal, Token, TokenKind}; use crate::{AttrVec, Attribute}; @@ -461,16 +461,7 @@ impl TokenStream { pub fn from_nonterminal_ast(nt: &Nonterminal) -> TokenStream { match nt { - Nonterminal::NtItem(item) => TokenStream::from_ast(item), Nonterminal::NtBlock(block) => TokenStream::from_ast(block), - Nonterminal::NtStmt(stmt) if let StmtKind::Empty = stmt.kind => { - // FIXME: Properly collect tokens for empty statements. - TokenStream::token_alone(token::Semi, stmt.span) - } - Nonterminal::NtStmt(stmt) => TokenStream::from_ast(stmt), - Nonterminal::NtPat(pat) => TokenStream::from_ast(pat), - Nonterminal::NtMeta(attr) => TokenStream::from_ast(attr), - Nonterminal::NtPath(path) => TokenStream::from_ast(path), Nonterminal::NtExpr(expr) | Nonterminal::NtLiteral(expr) => TokenStream::from_ast(expr), } } @@ -654,7 +645,7 @@ impl TokenStream { if attr_style == AttrStyle::Inner { vec![ TokenTree::token_joint(token::Pound, span), - TokenTree::token_joint_hidden(token::Not, span), + TokenTree::token_joint_hidden(token::Bang, span), body, ] } else { diff --git a/compiler/rustc_ast/src/util/classify.rs b/compiler/rustc_ast/src/util/classify.rs index 64f2a98b8a6..e43d78f6e72 100644 --- a/compiler/rustc_ast/src/util/classify.rs +++ b/compiler/rustc_ast/src/util/classify.rs @@ -108,6 +108,7 @@ pub fn leading_labeled_expr(mut expr: &ast::Expr) -> bool { Assign(e, _, _) | AssignOp(_, e, _) | Await(e, _) + | Use(e, _) | Binary(_, e, _) | Call(e, _) | Cast(e, _) @@ -224,6 +225,7 @@ pub fn expr_trailing_brace(mut expr: &ast::Expr) -> Option<TrailingBrace<'_>> { | Lit(_) | Type(_, _) | Await(_, _) + | Use(_, _) | Field(_, _) | Index(_, _, _) | Underscore diff --git a/compiler/rustc_ast/src/util/parser.rs b/compiler/rustc_ast/src/util/parser.rs index 8f2b7a23c01..98b1fc52ed7 100644 --- a/compiler/rustc_ast/src/util/parser.rs +++ b/compiler/rustc_ast/src/util/parser.rs @@ -1,59 +1,21 @@ use rustc_span::kw; -use crate::ast::{self, BinOpKind}; -use crate::token::{self, BinOpToken, Token}; +use crate::ast::{self, BinOpKind, RangeLimits}; +use crate::token::{self, Token}; -/// Associative operator with precedence. -/// -/// This is the enum which specifies operator precedence and fixity to the parser. +/// Associative operator. #[derive(Copy, Clone, PartialEq, Debug)] pub enum AssocOp { - /// `+` - Add, - /// `-` - Subtract, - /// `*` - Multiply, - /// `/` - Divide, - /// `%` - Modulus, - /// `&&` - LAnd, - /// `||` - LOr, - /// `^` - BitXor, - /// `&` - BitAnd, - /// `|` - BitOr, - /// `<<` - ShiftLeft, - /// `>>` - ShiftRight, - /// `==` - Equal, - /// `<` - Less, - /// `<=` - LessEqual, - /// `!=` - NotEqual, - /// `>` - Greater, - /// `>=` - GreaterEqual, + /// A binary op. + Binary(BinOpKind), + /// `?=` where ? is one of the assignable BinOps + AssignOp(BinOpKind), /// `=` Assign, - /// `?=` where ? is one of the BinOpToken - AssignOp(BinOpToken), /// `as` - As, - /// `..` range - DotDot, - /// `..=` range - DotDotEq, + Cast, + /// `..` or `..=` range + Range(RangeLimits), } #[derive(PartialEq, Debug)] @@ -67,81 +29,56 @@ pub enum Fixity { } impl AssocOp { - /// Creates a new AssocOP from a token + /// Creates a new AssocOp from a token. pub fn from_token(t: &Token) -> Option<AssocOp> { use AssocOp::*; match t.kind { - token::BinOpEq(k) => Some(AssignOp(k)), token::Eq => Some(Assign), - token::BinOp(BinOpToken::Star) => Some(Multiply), - token::BinOp(BinOpToken::Slash) => Some(Divide), - token::BinOp(BinOpToken::Percent) => Some(Modulus), - token::BinOp(BinOpToken::Plus) => Some(Add), - token::BinOp(BinOpToken::Minus) => Some(Subtract), - token::BinOp(BinOpToken::Shl) => Some(ShiftLeft), - token::BinOp(BinOpToken::Shr) => Some(ShiftRight), - token::BinOp(BinOpToken::And) => Some(BitAnd), - token::BinOp(BinOpToken::Caret) => Some(BitXor), - token::BinOp(BinOpToken::Or) => Some(BitOr), - token::Lt => Some(Less), - token::Le => Some(LessEqual), - token::Ge => Some(GreaterEqual), - token::Gt => Some(Greater), - token::EqEq => Some(Equal), - token::Ne => Some(NotEqual), - token::AndAnd => Some(LAnd), - token::OrOr => Some(LOr), - token::DotDot => Some(DotDot), - token::DotDotEq => Some(DotDotEq), + token::Plus => Some(Binary(BinOpKind::Add)), + token::Minus => Some(Binary(BinOpKind::Sub)), + token::Star => Some(Binary(BinOpKind::Mul)), + token::Slash => Some(Binary(BinOpKind::Div)), + token::Percent => Some(Binary(BinOpKind::Rem)), + token::Caret => Some(Binary(BinOpKind::BitXor)), + token::And => Some(Binary(BinOpKind::BitAnd)), + token::Or => Some(Binary(BinOpKind::BitOr)), + token::Shl => Some(Binary(BinOpKind::Shl)), + token::Shr => Some(Binary(BinOpKind::Shr)), + token::PlusEq => Some(AssignOp(BinOpKind::Add)), + token::MinusEq => Some(AssignOp(BinOpKind::Sub)), + token::StarEq => Some(AssignOp(BinOpKind::Mul)), + token::SlashEq => Some(AssignOp(BinOpKind::Div)), + token::PercentEq => Some(AssignOp(BinOpKind::Rem)), + token::CaretEq => Some(AssignOp(BinOpKind::BitXor)), + token::AndEq => Some(AssignOp(BinOpKind::BitAnd)), + token::OrEq => Some(AssignOp(BinOpKind::BitOr)), + token::ShlEq => Some(AssignOp(BinOpKind::Shl)), + token::ShrEq => Some(AssignOp(BinOpKind::Shr)), + token::Lt => Some(Binary(BinOpKind::Lt)), + token::Le => Some(Binary(BinOpKind::Le)), + token::Ge => Some(Binary(BinOpKind::Ge)), + token::Gt => Some(Binary(BinOpKind::Gt)), + token::EqEq => Some(Binary(BinOpKind::Eq)), + token::Ne => Some(Binary(BinOpKind::Ne)), + token::AndAnd => Some(Binary(BinOpKind::And)), + token::OrOr => Some(Binary(BinOpKind::Or)), + token::DotDot => Some(Range(RangeLimits::HalfOpen)), // DotDotDot is no longer supported, but we need some way to display the error - token::DotDotDot => Some(DotDotEq), + token::DotDotEq | token::DotDotDot => Some(Range(RangeLimits::Closed)), // `<-` should probably be `< -` - token::LArrow => Some(Less), - _ if t.is_keyword(kw::As) => Some(As), + token::LArrow => Some(Binary(BinOpKind::Lt)), + _ if t.is_keyword(kw::As) => Some(Cast), _ => None, } } - /// Creates a new AssocOp from ast::BinOpKind. - pub fn from_ast_binop(op: BinOpKind) -> Self { - use AssocOp::*; - match op { - BinOpKind::Lt => Less, - BinOpKind::Gt => Greater, - BinOpKind::Le => LessEqual, - BinOpKind::Ge => GreaterEqual, - BinOpKind::Eq => Equal, - BinOpKind::Ne => NotEqual, - BinOpKind::Mul => Multiply, - BinOpKind::Div => Divide, - BinOpKind::Rem => Modulus, - BinOpKind::Add => Add, - BinOpKind::Sub => Subtract, - BinOpKind::Shl => ShiftLeft, - BinOpKind::Shr => ShiftRight, - BinOpKind::BitAnd => BitAnd, - BinOpKind::BitXor => BitXor, - BinOpKind::BitOr => BitOr, - BinOpKind::And => LAnd, - BinOpKind::Or => LOr, - } - } - /// Gets the precedence of this operator pub fn precedence(&self) -> ExprPrecedence { use AssocOp::*; match *self { - As => ExprPrecedence::Cast, - Multiply | Divide | Modulus => ExprPrecedence::Product, - Add | Subtract => ExprPrecedence::Sum, - ShiftLeft | ShiftRight => ExprPrecedence::Shift, - BitAnd => ExprPrecedence::BitAnd, - BitXor => ExprPrecedence::BitXor, - BitOr => ExprPrecedence::BitOr, - Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => ExprPrecedence::Compare, - LAnd => ExprPrecedence::LAnd, - LOr => ExprPrecedence::LOr, - DotDot | DotDotEq => ExprPrecedence::Range, + Cast => ExprPrecedence::Cast, + Binary(bin_op) => bin_op.precedence(), + Range(_) => ExprPrecedence::Range, Assign | AssignOp(_) => ExprPrecedence::Assign, } } @@ -152,22 +89,17 @@ impl AssocOp { // NOTE: it is a bug to have an operators that has same precedence but different fixities! match *self { Assign | AssignOp(_) => Fixity::Right, - As | Multiply | Divide | Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd - | BitXor | BitOr | LAnd | LOr => Fixity::Left, - Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual | DotDot | DotDotEq => { - Fixity::None - } + Binary(binop) => binop.fixity(), + Cast => Fixity::Left, + Range(_) => Fixity::None, } } pub fn is_comparison(&self) -> bool { use AssocOp::*; match *self { - Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual => true, - Assign | AssignOp(_) | As | Multiply | Divide | Modulus | Add | Subtract - | ShiftLeft | ShiftRight | BitAnd | BitXor | BitOr | LAnd | LOr | DotDot | DotDotEq => { - false - } + Binary(binop) => binop.is_comparison(), + Assign | AssignOp(_) | Cast | Range(_) => false, } } @@ -175,34 +107,7 @@ impl AssocOp { use AssocOp::*; match *self { Assign | AssignOp(_) => true, - Less | Greater | LessEqual | GreaterEqual | Equal | NotEqual | As | Multiply - | Divide | Modulus | Add | Subtract | ShiftLeft | ShiftRight | BitAnd | BitXor - | BitOr | LAnd | LOr | DotDot | DotDotEq => false, - } - } - - pub fn to_ast_binop(&self) -> Option<BinOpKind> { - use AssocOp::*; - match *self { - Less => Some(BinOpKind::Lt), - Greater => Some(BinOpKind::Gt), - LessEqual => Some(BinOpKind::Le), - GreaterEqual => Some(BinOpKind::Ge), - Equal => Some(BinOpKind::Eq), - NotEqual => Some(BinOpKind::Ne), - Multiply => Some(BinOpKind::Mul), - Divide => Some(BinOpKind::Div), - Modulus => Some(BinOpKind::Rem), - Add => Some(BinOpKind::Add), - Subtract => Some(BinOpKind::Sub), - ShiftLeft => Some(BinOpKind::Shl), - ShiftRight => Some(BinOpKind::Shr), - BitAnd => Some(BinOpKind::BitAnd), - BitXor => Some(BinOpKind::BitXor), - BitOr => Some(BinOpKind::BitOr), - LAnd => Some(BinOpKind::And), - LOr => Some(BinOpKind::Or), - Assign | AssignOp(_) | As | DotDot | DotDotEq => None, + Cast | Binary(_) | Range(_) => false, } } @@ -212,20 +117,23 @@ impl AssocOp { /// parentheses while having a high degree of confidence on the correctness of the suggestion. pub fn can_continue_expr_unambiguously(&self) -> bool { use AssocOp::*; + use BinOpKind::*; matches!( self, - BitXor | // `{ 42 } ^ 3` Assign | // `{ 42 } = { 42 }` - Divide | // `{ 42 } / 42` - Modulus | // `{ 42 } % 2` - ShiftRight | // `{ 42 } >> 2` - LessEqual | // `{ 42 } <= 3` - Greater | // `{ 42 } > 3` - GreaterEqual | // `{ 42 } >= 3` + Binary( + BitXor | // `{ 42 } ^ 3` + Div | // `{ 42 } / 42` + Rem | // `{ 42 } % 2` + Shr | // `{ 42 } >> 2` + Le | // `{ 42 } <= 3` + Gt | // `{ 42 } > 3` + Ge // `{ 42 } >= 3` + ) | AssignOp(_) | // `{ 42 } +=` // Equal | // `{ 42 } == { 42 }` Accepting these here would regress incorrect // NotEqual | // `{ 42 } != { 42 } struct literals parser recovery. - As // `{ 42 } as usize` + Cast // `{ 42 } as usize` ) } } diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs index 1cb32b56875..cfcb0e23cb5 100644 --- a/compiler/rustc_ast/src/visit.rs +++ b/compiler/rustc_ast/src/visit.rs @@ -833,7 +833,8 @@ pub fn walk_where_predicate<'a, V: Visitor<'a>>( visitor: &mut V, predicate: &'a WherePredicate, ) -> V::Result { - let WherePredicate { kind, id: _, span: _ } = predicate; + let WherePredicate { attrs, kind, id: _, span: _, is_placeholder: _ } = predicate; + walk_list!(visitor, visit_attribute, attrs); visitor.visit_where_predicate_kind(kind) } @@ -891,7 +892,14 @@ pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>) -> V::Resu _ctxt, _ident, _vis, - Fn { defaultness: _, sig: FnSig { header, decl, span: _ }, generics, contract, body }, + Fn { + defaultness: _, + sig: FnSig { header, decl, span: _ }, + generics, + contract, + body, + define_opaque, + }, ) => { // Identifier and visibility are visited as a part of the item. try_visit!(visitor.visit_fn_header(header)); @@ -899,6 +907,9 @@ pub fn walk_fn<'a, V: Visitor<'a>>(visitor: &mut V, kind: FnKind<'a>) -> V::Resu try_visit!(visitor.visit_fn_decl(decl)); visit_opt!(visitor, visit_contract, contract); visit_opt!(visitor, visit_block, body); + for (id, path) in define_opaque.iter().flatten() { + try_visit!(visitor.visit_path(path, *id)) + } } FnKind::Closure(binder, coroutine_kind, decl, body) => { try_visit!(visitor.visit_closure_binder(binder)); @@ -1202,7 +1213,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) -> V FnKind::Closure(binder, coroutine_kind, fn_decl, body), *span, *id - )) + )); } ExprKind::Block(block, opt_label) => { visit_opt!(visitor, visit_label, opt_label); @@ -1210,6 +1221,7 @@ pub fn walk_expr<'a, V: Visitor<'a>>(visitor: &mut V, expression: &'a Expr) -> V } ExprKind::Gen(_capt, body, _kind, _decl_span) => try_visit!(visitor.visit_block(body)), ExprKind::Await(expr, _span) => try_visit!(visitor.visit_expr(expr)), + ExprKind::Use(expr, _span) => try_visit!(visitor.visit_expr(expr)), ExprKind::Assign(lhs, rhs, _span) => { try_visit!(visitor.visit_expr(lhs)); try_visit!(visitor.visit_expr(rhs)); diff --git a/compiler/rustc_ast_ir/src/lib.rs b/compiler/rustc_ast_ir/src/lib.rs index 9884e191ea7..6d05cd18cec 100644 --- a/compiler/rustc_ast_ir/src/lib.rs +++ b/compiler/rustc_ast_ir/src/lib.rs @@ -9,7 +9,6 @@ #![cfg_attr(feature = "nightly", allow(internal_features))] #![cfg_attr(feature = "nightly", feature(never_type))] #![cfg_attr(feature = "nightly", feature(rustc_attrs))] -#![warn(unreachable_pub)] // tidy-alphabetical-end #[cfg(feature = "nightly")] diff --git a/compiler/rustc_ast_lowering/messages.ftl b/compiler/rustc_ast_lowering/messages.ftl index 1b91c33742d..906fb213ee7 100644 --- a/compiler/rustc_ast_lowering/messages.ftl +++ b/compiler/rustc_ast_lowering/messages.ftl @@ -37,11 +37,11 @@ ast_lowering_bad_return_type_notation_inputs = .suggestion = remove the input types ast_lowering_bad_return_type_notation_needs_dots = return type notation arguments must be elided with `..` - .suggestion = add `..` + .suggestion = use the correct syntax by adding `..` to the arguments ast_lowering_bad_return_type_notation_output = return type not allowed with return type notation - .suggestion = remove the return type +ast_lowering_bad_return_type_notation_output_suggestion = use the right argument notation and remove the return type ast_lowering_bad_return_type_notation_position = return type notation not allowed in this position yet @@ -88,7 +88,7 @@ ast_lowering_invalid_abi_clobber_abi = invalid ABI for `clobber_abi` .note = the following ABIs are supported on this target: {$supported_abis} -ast_lowering_invalid_abi_suggestion = did you mean +ast_lowering_invalid_abi_suggestion = there's a similarly named valid ABI `{$suggestion}` ast_lowering_invalid_asm_template_modifier_const = asm template modifiers are not allowed for `const` arguments diff --git a/compiler/rustc_ast_lowering/src/asm.rs b/compiler/rustc_ast_lowering/src/asm.rs index cfd32fc066f..87af7959a88 100644 --- a/compiler/rustc_ast_lowering/src/asm.rs +++ b/compiler/rustc_ast_lowering/src/asm.rs @@ -195,7 +195,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } } InlineAsmOperand::Const { anon_const } => hir::InlineAsmOperand::Const { - anon_const: self.lower_anon_const_to_anon_const(anon_const), + anon_const: self.lower_const_block(anon_const), }, InlineAsmOperand::Sym { sym } => { let static_def_id = self diff --git a/compiler/rustc_ast_lowering/src/delegation.rs b/compiler/rustc_ast_lowering/src/delegation.rs index d8b7cb0c322..946f2cafa15 100644 --- a/compiler/rustc_ast_lowering/src/delegation.rs +++ b/compiler/rustc_ast_lowering/src/delegation.rs @@ -60,25 +60,27 @@ pub(crate) struct DelegationResults<'hir> { } impl<'hir> LoweringContext<'_, 'hir> { - pub(crate) fn delegation_has_self(&self, item_id: NodeId, path_id: NodeId, span: Span) -> bool { + /// Defines whether the delegatee is an associated function whose first parameter is `self`. + pub(crate) fn delegatee_is_method(&self, item_id: NodeId, path_id: NodeId, span: Span) -> bool { let sig_id = self.get_delegation_sig_id(item_id, path_id, span); let Ok(sig_id) = sig_id else { return false; }; - self.has_self(sig_id, span) + self.is_method(sig_id, span) } - fn has_self(&self, def_id: DefId, span: Span) -> bool { - if let Some(local_sig_id) = def_id.as_local() { - // The value may be missing due to recursive delegation. - // Error will be emitted later during HIR ty lowering. - self.resolver.delegation_fn_sigs.get(&local_sig_id).is_some_and(|sig| sig.has_self) - } else { - match self.tcx.def_kind(def_id) { - DefKind::Fn => false, - DefKind::AssocFn => self.tcx.associated_item(def_id).fn_has_self_parameter, - _ => span_bug!(span, "unexpected DefKind for delegation item"), - } + fn is_method(&self, def_id: DefId, span: Span) -> bool { + match self.tcx.def_kind(def_id) { + DefKind::Fn => false, + DefKind::AssocFn => match def_id.as_local() { + Some(local_def_id) => self + .resolver + .delegation_fn_sigs + .get(&local_def_id) + .is_some_and(|sig| sig.has_self), + None => self.tcx.associated_item(def_id).fn_has_self_parameter, + }, + _ => span_bug!(span, "unexpected DefKind for delegation item"), } } @@ -324,10 +326,11 @@ impl<'hir> LoweringContext<'_, 'hir> { let call = if self .get_resolution_id(delegation.id, span) - .and_then(|def_id| Ok(self.has_self(def_id, span))) + .and_then(|def_id| Ok(self.is_method(def_id, span))) .unwrap_or_default() && delegation.qself.is_none() && !has_generic_args + && !args.is_empty() { let ast_segment = delegation.path.segments.last().unwrap(); let segment = self.lower_path_segment( diff --git a/compiler/rustc_ast_lowering/src/errors.rs b/compiler/rustc_ast_lowering/src/errors.rs index f31e2db051d..ceeb5dffbea 100644 --- a/compiler/rustc_ast_lowering/src/errors.rs +++ b/compiler/rustc_ast_lowering/src/errors.rs @@ -46,8 +46,9 @@ pub(crate) struct TupleStructWithDefault { #[derive(Subdiagnostic)] #[suggestion( ast_lowering_invalid_abi_suggestion, - code = "{suggestion}", - applicability = "maybe-incorrect" + code = "\"{suggestion}\"", + applicability = "maybe-incorrect", + style = "verbose" )] pub(crate) struct InvalidAbiSuggestion { #[primary_span] @@ -372,24 +373,39 @@ pub(crate) struct InclusiveRangeWithNoEnd { pub span: Span, } +#[derive(Subdiagnostic)] +#[multipart_suggestion( + ast_lowering_bad_return_type_notation_output_suggestion, + applicability = "machine-applicable", + style = "verbose" +)] +/// Given `T: Tr<m() -> Ret>` or `T: Tr<m(Ty) -> Ret>`, suggest `T: Tr<m(..)>`. +pub(crate) struct RTNSuggestion { + #[suggestion_part(code = "")] + pub output: Span, + #[suggestion_part(code = "(..)")] + pub input: Span, +} + #[derive(Diagnostic)] pub(crate) enum BadReturnTypeNotation { #[diag(ast_lowering_bad_return_type_notation_inputs)] Inputs { #[primary_span] - #[suggestion(code = "()", applicability = "maybe-incorrect")] + #[suggestion(code = "(..)", applicability = "machine-applicable", style = "verbose")] span: Span, }, #[diag(ast_lowering_bad_return_type_notation_output)] Output { #[primary_span] - #[suggestion(code = "", applicability = "maybe-incorrect")] span: Span, + #[subdiagnostic] + suggestion: RTNSuggestion, }, #[diag(ast_lowering_bad_return_type_notation_needs_dots)] NeedsDots { #[primary_span] - #[suggestion(code = "(..)", applicability = "maybe-incorrect")] + #[suggestion(code = "(..)", applicability = "machine-applicable", style = "verbose")] span: Span, }, #[diag(ast_lowering_bad_return_type_notation_position)] diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index efbd1711daa..5bb6704dde4 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -13,7 +13,7 @@ use rustc_middle::span_bug; use rustc_middle::ty::TyCtxt; use rustc_session::errors::report_lit_error; use rustc_span::source_map::{Spanned, respan}; -use rustc_span::{DUMMY_SP, DesugaringKind, Ident, Span, Symbol, kw, sym}; +use rustc_span::{DUMMY_SP, DesugaringKind, Ident, Span, Symbol, sym}; use thin_vec::{ThinVec, thin_vec}; use visit::{Visitor, walk_expr}; @@ -207,6 +207,7 @@ impl<'hir> LoweringContext<'_, 'hir> { }, ), ExprKind::Await(expr, await_kw_span) => self.lower_expr_await(*await_kw_span, expr), + ExprKind::Use(expr, use_kw_span) => self.lower_expr_use(*use_kw_span, expr), ExprKind::Closure(box Closure { binder, capture_clause, @@ -483,7 +484,7 @@ impl<'hir> LoweringContext<'_, 'hir> { if legacy_args_idx.contains(&idx) { let parent_def_id = self.current_hir_id_owner.def_id; let node_id = self.next_node_id(); - self.create_def(parent_def_id, node_id, kw::Empty, DefKind::AnonConst, f.span); + self.create_def(parent_def_id, node_id, None, DefKind::AnonConst, f.span); let mut visitor = WillCreateDefIdsVisitor {}; let const_value = if let ControlFlow::Break(span) = visitor.visit_expr(&arg) { AstP(Expr { @@ -671,10 +672,13 @@ impl<'hir> LoweringContext<'_, 'hir> { let span = self.lower_span(arm.span); self.lower_attrs(hir_id, &arm.attrs, arm.span); let is_never_pattern = pat.is_never_pattern(); - let body = if let Some(body) = &arm.body + // We need to lower the body even if it's unneeded for never pattern in match, + // ensure that we can get HirId for DefId if need (issue #137708). + let body = arm.body.as_ref().map(|x| self.lower_expr(x)); + let body = if let Some(body) = body && !is_never_pattern { - self.lower_expr(body) + body } else { // Either `body.is_none()` or `is_never_pattern` here. if !is_never_pattern { @@ -1064,6 +1068,10 @@ impl<'hir> LoweringContext<'_, 'hir> { ) } + fn lower_expr_use(&mut self, use_kw_span: Span, expr: &Expr) -> hir::ExprKind<'hir> { + hir::ExprKind::Use(self.lower_expr(expr), use_kw_span) + } + fn lower_expr_closure( &mut self, binder: &ClosureBinder, @@ -1687,6 +1695,19 @@ impl<'hir> LoweringContext<'_, 'hir> { let yielded = opt_expr.as_ref().map(|x| self.lower_expr(x)).unwrap_or_else(|| self.expr_unit(span)); + if !self.tcx.features().yield_expr() + && !self.tcx.features().coroutines() + && !self.tcx.features().gen_blocks() + { + rustc_session::parse::feature_err( + &self.tcx.sess, + sym::yield_expr, + span, + fluent_generated::ast_lowering_yield, + ) + .emit(); + } + let is_async_gen = match self.coroutine_kind { Some(hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::Gen, _)) => false, Some(hir::CoroutineKind::Desugared(hir::CoroutineDesugaring::AsyncGen, _)) => true, @@ -1711,28 +1732,8 @@ impl<'hir> LoweringContext<'_, 'hir> { None, ); } - Some(hir::CoroutineKind::Coroutine(_)) => { - if !self.tcx.features().coroutines() { - rustc_session::parse::feature_err( - &self.tcx.sess, - sym::coroutines, - span, - fluent_generated::ast_lowering_yield, - ) - .emit(); - } - false - } + Some(hir::CoroutineKind::Coroutine(_)) => false, None => { - if !self.tcx.features().coroutines() { - rustc_session::parse::feature_err( - &self.tcx.sess, - sym::coroutines, - span, - fluent_generated::ast_lowering_yield, - ) - .emit(); - } let suggestion = self.current_item.map(|s| s.shrink_to_lo()); self.dcx().emit_err(YieldInClosure { span, suggestion }); self.coroutine_kind = Some(hir::CoroutineKind::Coroutine(Movability::Movable)); @@ -2129,26 +2130,24 @@ impl<'hir> LoweringContext<'_, 'hir> { self.arena.alloc(self.expr(sp, hir::ExprKind::Tup(&[]))) } - pub(super) fn expr_usize(&mut self, sp: Span, value: usize) -> hir::Expr<'hir> { + fn expr_uint(&mut self, sp: Span, ty: ast::UintTy, value: u128) -> hir::Expr<'hir> { let lit = self.arena.alloc(hir::Lit { span: sp, - node: ast::LitKind::Int( - (value as u128).into(), - ast::LitIntType::Unsigned(ast::UintTy::Usize), - ), + node: ast::LitKind::Int(value.into(), ast::LitIntType::Unsigned(ty)), }); self.expr(sp, hir::ExprKind::Lit(lit)) } + pub(super) fn expr_usize(&mut self, sp: Span, value: usize) -> hir::Expr<'hir> { + self.expr_uint(sp, ast::UintTy::Usize, value as u128) + } + pub(super) fn expr_u32(&mut self, sp: Span, value: u32) -> hir::Expr<'hir> { - let lit = self.arena.alloc(hir::Lit { - span: sp, - node: ast::LitKind::Int( - u128::from(value).into(), - ast::LitIntType::Unsigned(ast::UintTy::U32), - ), - }); - self.expr(sp, hir::ExprKind::Lit(lit)) + self.expr_uint(sp, ast::UintTy::U32, value as u128) + } + + pub(super) fn expr_u16(&mut self, sp: Span, value: u16) -> hir::Expr<'hir> { + self.expr_uint(sp, ast::UintTy::U16, value as u128) } pub(super) fn expr_char(&mut self, sp: Span, value: char) -> hir::Expr<'hir> { diff --git a/compiler/rustc_ast_lowering/src/format.rs b/compiler/rustc_ast_lowering/src/format.rs index 07b94dbc2ae..faa47274f96 100644 --- a/compiler/rustc_ast_lowering/src/format.rs +++ b/compiler/rustc_ast_lowering/src/format.rs @@ -292,7 +292,7 @@ fn make_count<'hir>( hir::LangItem::FormatCount, sym::Is, )); - let value = ctx.arena.alloc_from_iter([ctx.expr_usize(sp, *n)]); + let value = ctx.arena.alloc_from_iter([ctx.expr_u16(sp, *n)]); ctx.expr_call_mut(sp, count_is, value) } Some(FormatCount::Argument(arg)) => { diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index a4fc4b3e3a1..9adc8bdd361 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -3,10 +3,9 @@ use rustc_ast::ptr::P; use rustc_ast::visit::AssocCtxt; use rustc_ast::*; use rustc_errors::ErrorGuaranteed; -use rustc_hir as hir; -use rustc_hir::PredicateOrigin; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{CRATE_DEF_ID, LocalDefId}; +use rustc_hir::{self as hir, HirId, PredicateOrigin}; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::span_bug; use rustc_middle::ty::{ResolverAstLowering, TyCtxt}; @@ -209,6 +208,7 @@ impl<'hir> LoweringContext<'_, 'hir> { generics, body, contract, + define_opaque, .. }) => { self.with_new_scopes(*fn_sig_span, |this| { @@ -237,6 +237,7 @@ impl<'hir> LoweringContext<'_, 'hir> { header: this.lower_fn_header(*header, hir::Safety::Safe, attrs), span: this.lower_span(*fn_sig_span), }; + this.lower_define_opaque(hir_id, &define_opaque); hir::ItemKind::Fn { sig, generics, body: body_id, has_body: body.is_some() } }) } @@ -779,7 +780,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ); (generics, kind, expr.is_some()) } - AssocItemKind::Fn(box Fn { sig, generics, body: None, .. }) => { + AssocItemKind::Fn(box Fn { sig, generics, body: None, define_opaque, .. }) => { // FIXME(contracts): Deny contract here since it won't apply to // any impl method or callees. let names = self.lower_fn_params_to_names(&sig.decl); @@ -791,9 +792,22 @@ impl<'hir> LoweringContext<'_, 'hir> { sig.header.coroutine_kind, attrs, ); + if define_opaque.is_some() { + self.dcx().span_err( + i.span, + "only trait methods with default bodies can define opaque types", + ); + } (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false) } - AssocItemKind::Fn(box Fn { sig, generics, body: Some(body), contract, .. }) => { + AssocItemKind::Fn(box Fn { + sig, + generics, + body: Some(body), + contract, + define_opaque, + .. + }) => { let body_id = self.lower_maybe_coroutine_body( sig.span, i.span, @@ -812,6 +826,7 @@ impl<'hir> LoweringContext<'_, 'hir> { sig.header.coroutine_kind, attrs, ); + self.lower_define_opaque(hir_id, &define_opaque); (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true) } AssocItemKind::Type(box TyAlias { generics, where_clauses, bounds, ty, .. }) => { @@ -871,7 +886,7 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::AssocItemKind::Fn { has_self: sig.decl.has_self() } } AssocItemKind::Delegation(box delegation) => hir::AssocItemKind::Fn { - has_self: self.delegation_has_self(i.id, delegation.id, i.span), + has_self: self.delegatee_is_method(i.id, delegation.id, i.span), }, AssocItemKind::MacCall(..) | AssocItemKind::DelegationMac(..) => { panic!("macros should have been expanded by now") @@ -911,7 +926,7 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::ImplItemKind::Const(ty, body) }, ), - AssocItemKind::Fn(box Fn { sig, generics, body, contract, .. }) => { + AssocItemKind::Fn(box Fn { sig, generics, body, contract, define_opaque, .. }) => { let body_id = self.lower_maybe_coroutine_body( sig.span, i.span, @@ -930,6 +945,7 @@ impl<'hir> LoweringContext<'_, 'hir> { sig.header.coroutine_kind, attrs, ); + self.lower_define_opaque(hir_id, &define_opaque); (generics, hir::ImplItemKind::Fn(sig, body_id)) } @@ -1000,7 +1016,7 @@ impl<'hir> LoweringContext<'_, 'hir> { hir::AssocItemKind::Fn { has_self: sig.decl.has_self() } } AssocItemKind::Delegation(box delegation) => hir::AssocItemKind::Fn { - has_self: self.delegation_has_self(i.id, delegation.id, i.span), + has_self: self.delegatee_is_method(i.id, delegation.id, i.span), }, AssocItemKind::MacCall(..) | AssocItemKind::DelegationMac(..) => { panic!("macros should have been expanded by now") @@ -1510,7 +1526,7 @@ impl<'hir> LoweringContext<'_, 'hir> { span: abi.span, suggestion: suggested_name.map(|suggested_name| InvalidAbiSuggestion { span: abi.span, - suggestion: format!("\"{suggested_name}\""), + suggestion: suggested_name.to_string(), }), command: "rustc --print=calling-conventions".to_string(), }); @@ -1657,6 +1673,35 @@ impl<'hir> LoweringContext<'_, 'hir> { (lowered_generics, res) } + pub(super) fn lower_define_opaque( + &mut self, + hir_id: HirId, + define_opaque: &Option<ThinVec<(NodeId, Path)>>, + ) { + assert_eq!(self.define_opaque, None); + assert!(hir_id.is_owner()); + let Some(define_opaque) = define_opaque.as_ref() else { + return; + }; + let define_opaque = define_opaque.iter().filter_map(|(id, path)| { + let res = self.resolver.get_partial_res(*id).unwrap(); + let Some(did) = res.expect_full_res().opt_def_id() else { + self.dcx().span_delayed_bug(path.span, "should have errored in resolve"); + return None; + }; + let Some(did) = did.as_local() else { + self.dcx().span_err( + path.span, + "only opaque types defined in the local crate can be defined", + ); + return None; + }; + Some((self.lower_span(path.span), did)) + }); + let define_opaque = self.arena.alloc_from_iter(define_opaque); + self.define_opaque = Some(define_opaque); + } + pub(super) fn lower_generic_bound_predicate( &mut self, ident: Ident, @@ -1728,6 +1773,7 @@ impl<'hir> LoweringContext<'_, 'hir> { fn lower_where_predicate(&mut self, pred: &WherePredicate) -> hir::WherePredicate<'hir> { let hir_id = self.lower_node_id(pred.id); let span = self.lower_span(pred.span); + self.lower_attrs(hir_id, &pred.attrs, span); let kind = self.arena.alloc(match &pred.kind { WherePredicateKind::BoundPredicate(WhereBoundPredicate { bound_generic_params, diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index 1c69937eed0..c7d37e2704d 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -32,13 +32,14 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(rust_logo)] #![feature(assert_matches)] #![feature(box_patterns)] +#![feature(exact_size_is_empty)] #![feature(if_let_guard)] #![feature(let_chains)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::sync::Arc; @@ -98,6 +99,8 @@ struct LoweringContext<'a, 'hir> { /// Bodies inside the owner being lowered. bodies: Vec<(hir::ItemLocalId, &'hir hir::Body<'hir>)>, + /// `#[define_opaque]` attributes + define_opaque: Option<&'hir [(Span, LocalDefId)]>, /// Attributes inside the owner being lowered. attrs: SortedMap<hir::ItemLocalId, &'hir [hir::Attribute]>, /// Collect items that were created by lowering the current owner. @@ -136,6 +139,7 @@ struct LoweringContext<'a, 'hir> { allow_try_trait: Arc<[Symbol]>, allow_gen_future: Arc<[Symbol]>, + allow_pattern_type: Arc<[Symbol]>, allow_async_iterator: Arc<[Symbol]>, allow_for_await: Arc<[Symbol]>, allow_async_fn_traits: Arc<[Symbol]>, @@ -154,6 +158,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // HirId handling. bodies: Vec::new(), + define_opaque: None, attrs: SortedMap::default(), children: Vec::default(), contract_ensures: None, @@ -176,6 +181,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { impl_trait_defs: Vec::new(), impl_trait_bounds: Vec::new(), allow_try_trait: [sym::try_trait_v2, sym::yeet_desugar_details].into(), + allow_pattern_type: [sym::pattern_types, sym::pattern_type_range_trait].into(), allow_gen_future: if tcx.features().async_fn_track_caller() { [sym::gen_future, sym::closure_track_caller].into() } else { @@ -492,7 +498,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { &mut self, parent: LocalDefId, node_id: ast::NodeId, - name: Symbol, + name: Option<Symbol>, def_kind: DefKind, span: Span, ) -> LocalDefId { @@ -545,6 +551,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let current_attrs = std::mem::take(&mut self.attrs); let current_bodies = std::mem::take(&mut self.bodies); + let current_define_opaque = std::mem::take(&mut self.define_opaque); let current_ident_and_label_to_local_id = std::mem::take(&mut self.ident_and_label_to_local_id); @@ -578,6 +585,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { self.attrs = current_attrs; self.bodies = current_bodies; + self.define_opaque = current_define_opaque; self.ident_and_label_to_local_id = current_ident_and_label_to_local_id; #[cfg(debug_assertions)] @@ -597,6 +605,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { fn make_owner_info(&mut self, node: hir::OwnerNode<'hir>) -> &'hir hir::OwnerInfo<'hir> { let attrs = std::mem::take(&mut self.attrs); let mut bodies = std::mem::take(&mut self.bodies); + let define_opaque = std::mem::take(&mut self.define_opaque); let trait_map = std::mem::take(&mut self.trait_map); #[cfg(debug_assertions)] @@ -616,7 +625,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let num_nodes = self.item_local_id_counter.as_usize(); let (nodes, parenting) = index::index_hir(self.tcx, node, &bodies, num_nodes); let nodes = hir::OwnerNodes { opt_hash_including_bodies, nodes, bodies }; - let attrs = hir::AttributeMap { map: attrs, opt_hash: attrs_hash }; + let attrs = hir::AttributeMap { map: attrs, opt_hash: attrs_hash, define_opaque }; self.arena.alloc(hir::OwnerInfo { nodes, parenting, attrs, trait_map }) } @@ -772,7 +781,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { let _def_id = self.create_def( self.current_hir_id_owner.def_id, param, - kw::UnderscoreLifetime, + Some(kw::UnderscoreLifetime), DefKind::LifetimeParam, ident.span, ); @@ -926,19 +935,26 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { if let Some(first_char) = constraint.ident.as_str().chars().next() && first_char.is_ascii_lowercase() { - let mut err = if !data.inputs.is_empty() { - self.dcx().create_err(errors::BadReturnTypeNotation::Inputs { - span: data.inputs_span, - }) - } else if let FnRetTy::Ty(ty) = &data.output { - self.dcx().create_err(errors::BadReturnTypeNotation::Output { - span: data.inputs_span.shrink_to_hi().to(ty.span), - }) - } else { - self.dcx().create_err(errors::BadReturnTypeNotation::NeedsDots { - span: data.inputs_span, - }) + let err = match (&data.inputs[..], &data.output) { + ([_, ..], FnRetTy::Default(_)) => { + errors::BadReturnTypeNotation::Inputs { span: data.inputs_span } + } + ([], FnRetTy::Default(_)) => { + errors::BadReturnTypeNotation::NeedsDots { span: data.inputs_span } + } + // The case `T: Trait<method(..) -> Ret>` is handled in the parser. + (_, FnRetTy::Ty(ty)) => { + let span = data.inputs_span.shrink_to_hi().to(ty.span); + errors::BadReturnTypeNotation::Output { + span, + suggestion: errors::RTNSuggestion { + output: span, + input: data.inputs_span, + }, + } + } }; + let mut err = self.dcx().create_err(err); if !self.tcx.features().return_type_notation() && self.tcx.sess.is_nightly_build() { @@ -1086,7 +1102,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { .and_then(|partial_res| partial_res.full_res()) { if !res.matches_ns(Namespace::TypeNS) - && path.is_potential_trivial_const_arg() + && path.is_potential_trivial_const_arg(false) { debug!( "lower_generic_arg: Lowering type argument as const argument: {:?}", @@ -1357,7 +1373,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } } TyKind::Pat(ty, pat) => { - hir::TyKind::Pat(self.lower_ty(ty, itctx), self.lower_ty_pat(pat)) + hir::TyKind::Pat(self.lower_ty(ty, itctx), self.lower_ty_pat(pat, ty.span)) } TyKind::MacCall(_) => { span_bug!(t.span, "`TyKind::MacCall` should have been expanded by now") @@ -2053,8 +2069,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { ) -> &'hir hir::ConstArg<'hir> { let tcx = self.tcx; - // FIXME(min_generic_const_args): we only allow one-segment const paths for now - let ct_kind = if path.is_potential_trivial_const_arg() + let ct_kind = if path + .is_potential_trivial_const_arg(tcx.features().min_generic_const_args()) && (tcx.features().min_generic_const_args() || matches!(res, Res::Def(DefKind::ConstParam, _))) { @@ -2064,7 +2080,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { path, ParamMode::Optional, AllowReturnTypeNotation::No, - // FIXME(min_generic_const_args): update for `fn foo() -> Bar<FOO<impl Trait>>` support + // FIXME(mgca): update for `fn foo() -> Bar<FOO<impl Trait>>` support ImplTraitContext::Disallowed(ImplTraitPosition::Path), None, ); @@ -2080,8 +2096,7 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // We're lowering a const argument that was originally thought to be a type argument, // so the def collector didn't create the def ahead of time. That's why we have to do // it here. - let def_id = - self.create_def(parent_def_id, node_id, kw::Empty, DefKind::AnonConst, span); + let def_id = self.create_def(parent_def_id, node_id, None, DefKind::AnonConst, span); let hir_id = self.lower_node_id(node_id); let path_expr = Expr { @@ -2128,19 +2143,18 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { }; let maybe_res = self.resolver.get_partial_res(expr.id).and_then(|partial_res| partial_res.full_res()); - // FIXME(min_generic_const_args): we only allow one-segment const paths for now - if let ExprKind::Path(None, path) = &expr.kind - && path.is_potential_trivial_const_arg() + if let ExprKind::Path(qself, path) = &expr.kind + && path.is_potential_trivial_const_arg(tcx.features().min_generic_const_args()) && (tcx.features().min_generic_const_args() || matches!(maybe_res, Some(Res::Def(DefKind::ConstParam, _)))) { let qpath = self.lower_qpath( expr.id, - &None, + qself, path, ParamMode::Optional, AllowReturnTypeNotation::No, - // FIXME(min_generic_const_args): update for `fn foo() -> Bar<FOO<impl Trait>>` support + // FIXME(mgca): update for `fn foo() -> Bar<FOO<impl Trait>>` support ImplTraitContext::Disallowed(ImplTraitPosition::Path), None, ); diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs index 2dcfe7c745d..07cc64a1358 100644 --- a/compiler/rustc_ast_lowering/src/pat.rs +++ b/compiler/rustc_ast_lowering/src/pat.rs @@ -3,11 +3,11 @@ use std::sync::Arc; use rustc_ast::ptr::P; use rustc_ast::*; use rustc_data_structures::stack::ensure_sufficient_stack; -use rustc_hir as hir; -use rustc_hir::def::Res; +use rustc_hir::def::{DefKind, Res}; +use rustc_hir::{self as hir, LangItem}; use rustc_middle::span_bug; use rustc_span::source_map::{Spanned, respan}; -use rustc_span::{Ident, Span}; +use rustc_span::{DesugaringKind, Ident, Span}; use super::errors::{ ArbitraryExpressionInPattern, ExtraDoubleDot, MisplacedDoubleDot, SubTupleBinding, @@ -430,22 +430,124 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { self.arena.alloc(hir::PatExpr { hir_id: self.lower_node_id(expr.id), span, kind }) } - pub(crate) fn lower_ty_pat(&mut self, pattern: &TyPat) -> &'hir hir::TyPat<'hir> { - self.arena.alloc(self.lower_ty_pat_mut(pattern)) + pub(crate) fn lower_ty_pat( + &mut self, + pattern: &TyPat, + base_type: Span, + ) -> &'hir hir::TyPat<'hir> { + self.arena.alloc(self.lower_ty_pat_mut(pattern, base_type)) } - fn lower_ty_pat_mut(&mut self, pattern: &TyPat) -> hir::TyPat<'hir> { + fn lower_ty_pat_mut(&mut self, pattern: &TyPat, base_type: Span) -> hir::TyPat<'hir> { // loop here to avoid recursion let pat_hir_id = self.lower_node_id(pattern.id); let node = match &pattern.kind { - TyPatKind::Range(e1, e2, Spanned { node: end, .. }) => hir::TyPatKind::Range( - e1.as_deref().map(|e| self.lower_anon_const_to_const_arg(e)), - e2.as_deref().map(|e| self.lower_anon_const_to_const_arg(e)), - self.lower_range_end(end, e2.is_some()), + TyPatKind::Range(e1, e2, Spanned { node: end, span }) => hir::TyPatKind::Range( + e1.as_deref().map(|e| self.lower_anon_const_to_const_arg(e)).unwrap_or_else(|| { + self.lower_ty_pat_range_end( + hir::LangItem::RangeMin, + span.shrink_to_lo(), + base_type, + ) + }), + e2.as_deref() + .map(|e| match end { + RangeEnd::Included(..) => self.lower_anon_const_to_const_arg(e), + RangeEnd::Excluded => self.lower_excluded_range_end(e), + }) + .unwrap_or_else(|| { + self.lower_ty_pat_range_end( + hir::LangItem::RangeMax, + span.shrink_to_hi(), + base_type, + ) + }), ), TyPatKind::Err(guar) => hir::TyPatKind::Err(*guar), }; hir::TyPat { hir_id: pat_hir_id, kind: node, span: self.lower_span(pattern.span) } } + + /// Lowers the range end of an exclusive range (`2..5`) to an inclusive range 2..=(5 - 1). + /// This way the type system doesn't have to handle the distinction between inclusive/exclusive ranges. + fn lower_excluded_range_end(&mut self, e: &AnonConst) -> &'hir hir::ConstArg<'hir> { + let span = self.lower_span(e.value.span); + let unstable_span = self.mark_span_with_reason( + DesugaringKind::PatTyRange, + span, + Some(Arc::clone(&self.allow_pattern_type)), + ); + let anon_const = self.with_new_scopes(span, |this| { + let def_id = this.local_def_id(e.id); + let hir_id = this.lower_node_id(e.id); + let body = this.lower_body(|this| { + // Need to use a custom function as we can't just subtract `1` from a `char`. + let kind = hir::ExprKind::Path(this.make_lang_item_qpath( + hir::LangItem::RangeSub, + unstable_span, + None, + )); + let fn_def = this.arena.alloc(hir::Expr { hir_id: this.next_id(), kind, span }); + let args = this.arena.alloc([this.lower_expr_mut(&e.value)]); + ( + &[], + hir::Expr { + hir_id: this.next_id(), + kind: hir::ExprKind::Call(fn_def, args), + span, + }, + ) + }); + hir::AnonConst { def_id, hir_id, body, span } + }); + self.arena.alloc(hir::ConstArg { + hir_id: self.next_id(), + kind: hir::ConstArgKind::Anon(self.arena.alloc(anon_const)), + }) + } + + /// When a range has no end specified (`1..` or `1..=`) or no start specified (`..5` or `..=5`), + /// we instead use a constant of the MAX/MIN of the type. + /// This way the type system does not have to handle the lack of a start/end. + fn lower_ty_pat_range_end( + &mut self, + lang_item: LangItem, + span: Span, + base_type: Span, + ) -> &'hir hir::ConstArg<'hir> { + let parent_def_id = self.current_hir_id_owner.def_id; + let node_id = self.next_node_id(); + + // Add a definition for the in-band const def. + // We're generating a range end that didn't exist in the AST, + // so the def collector didn't create the def ahead of time. That's why we have to do + // it here. + let def_id = self.create_def(parent_def_id, node_id, None, DefKind::AnonConst, span); + let hir_id = self.lower_node_id(node_id); + + let unstable_span = self.mark_span_with_reason( + DesugaringKind::PatTyRange, + self.lower_span(span), + Some(Arc::clone(&self.allow_pattern_type)), + ); + let span = self.lower_span(base_type); + + let path_expr = hir::Expr { + hir_id: self.next_id(), + kind: hir::ExprKind::Path(self.make_lang_item_qpath(lang_item, unstable_span, None)), + span, + }; + + let ct = self.with_new_scopes(span, |this| { + self.arena.alloc(hir::AnonConst { + def_id, + hir_id, + body: this.lower_body(|_this| (&[], path_expr)), + span, + }) + }); + let hir_id = self.next_id(); + self.arena.alloc(hir::ConstArg { kind: hir::ConstArgKind::Anon(ct), hir_id }) + } } diff --git a/compiler/rustc_ast_lowering/src/path.rs b/compiler/rustc_ast_lowering/src/path.rs index 55d06477313..d00c797755f 100644 --- a/compiler/rustc_ast_lowering/src/path.rs +++ b/compiler/rustc_ast_lowering/src/path.rs @@ -13,7 +13,7 @@ use tracing::{debug, instrument}; use super::errors::{ AsyncBoundNotOnTrait, AsyncBoundOnlyForFnTraits, BadReturnTypeNotation, - GenericTypeWithParentheses, UseAngleBrackets, + GenericTypeWithParentheses, RTNSuggestion, UseAngleBrackets, }; use super::{ AllowReturnTypeNotation, GenericArgsCtor, GenericArgsMode, ImplTraitContext, ImplTraitPosition, @@ -268,19 +268,26 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { } GenericArgs::Parenthesized(data) => match generic_args_mode { GenericArgsMode::ReturnTypeNotation => { - let mut err = if !data.inputs.is_empty() { - self.dcx().create_err(BadReturnTypeNotation::Inputs { - span: data.inputs_span, - }) - } else if let FnRetTy::Ty(ty) = &data.output { - self.dcx().create_err(BadReturnTypeNotation::Output { - span: data.inputs_span.shrink_to_hi().to(ty.span), - }) - } else { - self.dcx().create_err(BadReturnTypeNotation::NeedsDots { - span: data.inputs_span, - }) + let err = match (&data.inputs[..], &data.output) { + ([_, ..], FnRetTy::Default(_)) => { + BadReturnTypeNotation::Inputs { span: data.inputs_span } + } + ([], FnRetTy::Default(_)) => { + BadReturnTypeNotation::NeedsDots { span: data.inputs_span } + } + // The case `T: Trait<method(..) -> Ret>` is handled in the parser. + (_, FnRetTy::Ty(ty)) => { + let span = data.inputs_span.shrink_to_hi().to(ty.span); + BadReturnTypeNotation::Output { + span, + suggestion: RTNSuggestion { + output: span, + input: data.inputs_span, + }, + } + } }; + let mut err = self.dcx().create_err(err); if !self.tcx.features().return_type_notation() && self.tcx.sess.is_nightly_build() { diff --git a/compiler/rustc_ast_passes/src/ast_validation.rs b/compiler/rustc_ast_passes/src/ast_validation.rs index f9f4035cb22..232d60be4eb 100644 --- a/compiler/rustc_ast_passes/src/ast_validation.rs +++ b/compiler/rustc_ast_passes/src/ast_validation.rs @@ -917,7 +917,10 @@ impl<'a> Visitor<'a> for AstValidator<'a> { walk_list!(self, visit_attribute, &item.attrs); return; // Avoid visiting again. } - ItemKind::Fn(func @ box Fn { defaultness, generics: _, sig, contract: _, body }) => { + ItemKind::Fn( + func + @ box Fn { defaultness, generics: _, sig, contract: _, body, define_opaque: _ }, + ) => { self.check_defaultness(item.span, *defaultness); let is_intrinsic = diff --git a/compiler/rustc_ast_passes/src/errors.rs b/compiler/rustc_ast_passes/src/errors.rs index 9f0d2325475..8e53e600f7a 100644 --- a/compiler/rustc_ast_passes/src/errors.rs +++ b/compiler/rustc_ast_passes/src/errors.rs @@ -535,7 +535,6 @@ pub(crate) struct WhereClauseBeforeTypeAlias { } #[derive(Subdiagnostic)] - pub(crate) enum WhereClauseBeforeTypeAliasSugg { #[suggestion(ast_passes_remove_suggestion, applicability = "machine-applicable", code = "")] Remove { diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs index 0f80e49320e..31ff102c127 100644 --- a/compiler/rustc_ast_passes/src/feature_gate.rs +++ b/compiler/rustc_ast_passes/src/feature_gate.rs @@ -489,6 +489,7 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) { gate_all!(dyn_star, "`dyn*` trait objects are experimental"); gate_all!(const_closures, "const closures are experimental"); gate_all!(builtin_syntax, "`builtin #` syntax is unstable"); + gate_all!(ergonomic_clones, "ergonomic clones are experimental"); gate_all!(explicit_tail_calls, "`become` expression is experimental"); gate_all!(generic_const_items, "generic const items are experimental"); gate_all!(guard_patterns, "guard patterns are experimental", "consider using match arm guards"); @@ -503,6 +504,7 @@ pub fn check_crate(krate: &ast::Crate, sess: &Session, features: &Features) { gate_all!(unsafe_binders, "unsafe binder types are experimental"); gate_all!(contracts, "contracts are incomplete"); gate_all!(contracts_internals, "contract internal machinery is for internal use only"); + gate_all!(where_clause_attrs, "attributes in `where` clause are unstable"); if !visitor.features.never_patterns() { if let Some(spans) = spans.get(&sym::never_patterns) { diff --git a/compiler/rustc_ast_passes/src/lib.rs b/compiler/rustc_ast_passes/src/lib.rs index b4ed70d83e5..093199cf342 100644 --- a/compiler/rustc_ast_passes/src/lib.rs +++ b/compiler/rustc_ast_passes/src/lib.rs @@ -10,7 +10,6 @@ #![feature(iter_is_partitioned)] #![feature(let_chains)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod ast_validation; diff --git a/compiler/rustc_ast_pretty/src/lib.rs b/compiler/rustc_ast_pretty/src/lib.rs index 602ab69ee5b..84d9ce278a2 100644 --- a/compiler/rustc_ast_pretty/src/lib.rs +++ b/compiler/rustc_ast_pretty/src/lib.rs @@ -3,7 +3,6 @@ #![doc(rust_logo)] #![feature(box_patterns)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod helpers; diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 44e956dc37f..a8eaff7346b 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -11,9 +11,7 @@ use std::sync::Arc; use rustc_ast::attr::AttrIdGenerator; use rustc_ast::ptr::P; -use rustc_ast::token::{ - self, BinOpToken, CommentKind, Delimiter, IdentIsRaw, Nonterminal, Token, TokenKind, -}; +use rustc_ast::token::{self, CommentKind, Delimiter, IdentIsRaw, Nonterminal, Token, TokenKind}; use rustc_ast::tokenstream::{Spacing, TokenStream, TokenTree}; use rustc_ast::util::classify; use rustc_ast::util::comments::{Comment, CommentStyle}; @@ -26,7 +24,6 @@ use rustc_span::edition::Edition; use rustc_span::source_map::{SourceMap, Spanned}; use rustc_span::symbol::IdentPrinter; use rustc_span::{BytePos, CharPos, DUMMY_SP, FileName, Ident, Pos, Span, Symbol, kw, sym}; -use thin_vec::ThinVec; use crate::pp::Breaks::{Consistent, Inconsistent}; use crate::pp::{self, Breaks}; @@ -319,7 +316,7 @@ fn space_between(tt1: &TokenTree, tt2: &TokenTree) -> bool { (tt1, Tok(Token { kind: Comma | Semi | Dot, .. }, _)) if !is_punct(tt1) => false, // IDENT + `!`: `println!()`, but `if !x { ... }` needs a space after the `if` - (Tok(Token { kind: Ident(sym, is_raw), span }, _), Tok(Token { kind: Not, .. }, _)) + (Tok(Token { kind: Ident(sym, is_raw), span }, _), Tok(Token { kind: Bang, .. }, _)) if !Ident::new(*sym, *span).is_reserved() || matches!(is_raw, IdentIsRaw::Yes) => { false @@ -344,21 +341,6 @@ fn space_between(tt1: &TokenTree, tt2: &TokenTree) -> bool { } } -fn binop_to_string(op: BinOpToken) -> &'static str { - match op { - token::Plus => "+", - token::Minus => "-", - token::Star => "*", - token::Slash => "/", - token::Percent => "%", - token::Caret => "^", - token::And => "&", - token::Or => "|", - token::Shl => "<<", - token::Shr => ">>", - } -} - pub fn doc_comment_to_string( comment_kind: CommentKind, attr_style: ast::AttrStyle, @@ -913,12 +895,30 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere token::Ne => "!=".into(), token::Ge => ">=".into(), token::Gt => ">".into(), - token::Not => "!".into(), + token::Bang => "!".into(), token::Tilde => "~".into(), token::OrOr => "||".into(), token::AndAnd => "&&".into(), - token::BinOp(op) => binop_to_string(op).into(), - token::BinOpEq(op) => format!("{}=", binop_to_string(op)).into(), + token::Plus => "+".into(), + token::Minus => "-".into(), + token::Star => "*".into(), + token::Slash => "/".into(), + token::Percent => "%".into(), + token::Caret => "^".into(), + token::And => "&".into(), + token::Or => "|".into(), + token::Shl => "<<".into(), + token::Shr => ">>".into(), + token::PlusEq => "+=".into(), + token::MinusEq => "-=".into(), + token::StarEq => "*=".into(), + token::SlashEq => "/=".into(), + token::PercentEq => "%=".into(), + token::CaretEq => "^=".into(), + token::AndEq => "&=".into(), + token::OrEq => "|=".into(), + token::ShlEq => "<<=".into(), + token::ShrEq => ">>=".into(), /* Structural symbols */ token::At => "@".into(), @@ -1782,6 +1782,13 @@ impl<'a> State<'a> { self.print_mutability(*m, false); self.word("self") } + SelfKind::Pinned(lt, m) => { + self.word("&"); + self.print_opt_lifetime(lt); + self.word("pin "); + self.print_mutability(*m, true); + self.word("self") + } SelfKind::Explicit(typ, m) => { self.print_mutability(*m, false); self.word("self"); @@ -1970,15 +1977,7 @@ impl<'a> State<'a> { ) { self.ibox(INDENT_UNIT); self.print_formal_generic_params(generic_params); - let generics = ast::Generics { - params: ThinVec::new(), - where_clause: ast::WhereClause { - has_where_token: false, - predicates: ThinVec::new(), - span: DUMMY_SP, - }, - span: DUMMY_SP, - }; + let generics = ast::Generics::default(); let header = ast::FnHeader { safety, ext, ..ast::FnHeader::default() }; self.print_fn(decl, header, name, &generics); self.end(); diff --git a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs index 4b1374ceef3..e3c41f117ab 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/expr.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/expr.rs @@ -5,7 +5,7 @@ use itertools::{Itertools, Position}; use rustc_ast::ptr::P; use rustc_ast::util::classify; use rustc_ast::util::literal::escape_byte_str_symbol; -use rustc_ast::util::parser::{self, AssocOp, ExprPrecedence, Fixity}; +use rustc_ast::util::parser::{self, ExprPrecedence, Fixity}; use rustc_ast::{ self as ast, BlockCheckMode, FormatAlignment, FormatArgPosition, FormatArgsPiece, FormatCount, FormatDebugHex, FormatSign, FormatTrait, token, @@ -279,12 +279,11 @@ impl<'a> State<'a> { rhs: &ast::Expr, fixup: FixupContext, ) { - let assoc_op = AssocOp::from_ast_binop(op.node); - let binop_prec = assoc_op.precedence(); + let binop_prec = op.node.precedence(); let left_prec = lhs.precedence(); let right_prec = rhs.precedence(); - let (mut left_needs_paren, right_needs_paren) = match assoc_op.fixity() { + let (mut left_needs_paren, right_needs_paren) = match op.node.fixity() { Fixity::Left => (left_prec < binop_prec, right_prec <= binop_prec), Fixity::Right => (left_prec <= binop_prec, right_prec < binop_prec), Fixity::None => (left_prec <= binop_prec, right_prec <= binop_prec), @@ -575,6 +574,14 @@ impl<'a> State<'a> { ); self.word(".await"); } + ast::ExprKind::Use(expr, _) => { + self.print_expr_cond_paren( + expr, + expr.precedence() < ExprPrecedence::Unambiguous, + fixup, + ); + self.word(".use"); + } ast::ExprKind::Assign(lhs, rhs, _) => { self.print_expr_cond_paren( lhs, @@ -886,6 +893,7 @@ impl<'a> State<'a> { fn print_capture_clause(&mut self, capture_clause: ast::CaptureBy) { match capture_clause { ast::CaptureBy::Value { .. } => self.word_space("move"), + ast::CaptureBy::Use { .. } => self.word_space("use"), ast::CaptureBy::Ref => {} } } diff --git a/compiler/rustc_ast_pretty/src/pprust/state/item.rs b/compiler/rustc_ast_pretty/src/pprust/state/item.rs index c10b5ad34e1..6236f8ecfb5 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state/item.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state/item.rs @@ -650,7 +650,16 @@ impl<'a> State<'a> { attrs: &[ast::Attribute], func: &ast::Fn, ) { - let ast::Fn { defaultness, generics, sig, contract, body } = func; + let ast::Fn { defaultness, generics, sig, contract, body, define_opaque } = func; + + if let Some(define_opaque) = define_opaque { + for (_, path) in define_opaque { + self.word("define opaques from "); + self.print_path(path, false, 0); + self.word(","); + } + } + if body.is_some() { self.head(""); } @@ -698,7 +707,7 @@ impl<'a> State<'a> { } self.print_generic_params(&generics.params); self.print_fn_params_and_ret(decl, false); - self.print_where_clause(&generics.where_clause) + self.print_where_clause(&generics.where_clause); } pub(crate) fn print_fn_params_and_ret(&mut self, decl: &ast::FnDecl, is_closure: bool) { @@ -735,7 +744,8 @@ impl<'a> State<'a> { } pub fn print_where_predicate(&mut self, predicate: &ast::WherePredicate) { - let ast::WherePredicate { kind, id: _, span: _ } = predicate; + let ast::WherePredicate { attrs, kind, id: _, span: _, is_placeholder: _ } = predicate; + self.print_outer_attributes(attrs); match kind { ast::WherePredicateKind::BoundPredicate(where_bound_predicate) => { self.print_where_bound_predicate(where_bound_predicate); diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs index 9ac8de0227d..d2d1285b075 100644 --- a/compiler/rustc_attr_data_structures/src/attributes.rs +++ b/compiler/rustc_attr_data_structures/src/attributes.rs @@ -138,13 +138,14 @@ impl Deprecation { } } -/// Attributes represent parsed, *built in*, inert attributes. That means, -/// attributes that are not actually ever expanded. -/// For more information on this, see the module docs on the rustc_attr_parsing crate. +/// Represent parsed, *built in*, inert attributes. +/// +/// That means attributes that are not actually ever expanded. +/// For more information on this, see the module docs on the [`rustc_attr_parsing`] crate. /// They're instead used as markers, to guide the compilation process in various way in most every stage of the compiler. /// These are kept around after the AST, into the HIR and further on. /// -/// The word parsed could be a little misleading here, because the parser already parses +/// The word "parsed" could be a little misleading here, because the parser already parses /// attributes early on. However, the result, an [`ast::Attribute`] /// is only parsed at a high level, still containing a token stream in many cases. That is /// because the structure of the contents varies from attribute to attribute. @@ -153,7 +154,9 @@ impl Deprecation { /// the place where `must_use` is checked) little to no extra parsing or validating needs to /// happen. /// -/// For more docs, look in [`rustc_attr`](https://doc.rust-lang.org/stable/nightly-rustc/rustc_attr/index.html) +/// For more docs, look in [`rustc_attr_parsing`]. +/// +/// [`rustc_attr_parsing`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_attr_parsing/index.html #[derive(Clone, Debug, HashStable_Generic, Encodable, Decodable, PrintAttribute)] pub enum AttributeKind { // tidy-alphabetical-start diff --git a/compiler/rustc_attr_data_structures/src/lib.rs b/compiler/rustc_attr_data_structures/src/lib.rs index e4bb459e6df..bbd3308809c 100644 --- a/compiler/rustc_attr_data_structures/src/lib.rs +++ b/compiler/rustc_attr_data_structures/src/lib.rs @@ -3,7 +3,6 @@ #![doc(rust_logo)] #![feature(let_chains)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod attributes; @@ -35,13 +34,17 @@ pub trait HashStableContext: rustc_ast::HashStableContext + rustc_abi::HashStabl /// like [`Span`]s and empty tuples, are gracefully skipped so they don't clutter the /// representation much. pub trait PrintAttribute { - fn print_something(&self) -> bool; + /// Whether or not this will render as something meaningful, or if it's skipped + /// (which will force the containing struct to also skip printing a comma + /// and the field name). + fn should_render(&self) -> bool; + fn print_attribute(&self, p: &mut Printer); } impl<T: PrintAttribute> PrintAttribute for &T { - fn print_something(&self) -> bool { - T::print_something(self) + fn should_render(&self) -> bool { + T::should_render(self) } fn print_attribute(&self, p: &mut Printer) { @@ -49,9 +52,10 @@ impl<T: PrintAttribute> PrintAttribute for &T { } } impl<T: PrintAttribute> PrintAttribute for Option<T> { - fn print_something(&self) -> bool { - self.as_ref().is_some_and(|x| x.print_something()) + fn should_render(&self) -> bool { + self.as_ref().is_some_and(|x| x.should_render()) } + fn print_attribute(&self, p: &mut Printer) { if let Some(i) = self { T::print_attribute(i, p) @@ -59,9 +63,10 @@ impl<T: PrintAttribute> PrintAttribute for Option<T> { } } impl<T: PrintAttribute> PrintAttribute for ThinVec<T> { - fn print_something(&self) -> bool { - self.is_empty() || self[0].print_something() + fn should_render(&self) -> bool { + self.is_empty() || self[0].should_render() } + fn print_attribute(&self, p: &mut Printer) { let mut last_printed = false; p.word("["); @@ -70,7 +75,7 @@ impl<T: PrintAttribute> PrintAttribute for ThinVec<T> { p.word_space(","); } i.print_attribute(p); - last_printed = i.print_something(); + last_printed = i.should_render(); } p.word("]"); } @@ -78,7 +83,7 @@ impl<T: PrintAttribute> PrintAttribute for ThinVec<T> { macro_rules! print_skip { ($($t: ty),* $(,)?) => {$( impl PrintAttribute for $t { - fn print_something(&self) -> bool { false } + fn should_render(&self) -> bool { false } fn print_attribute(&self, _: &mut Printer) { } })* }; @@ -87,7 +92,7 @@ macro_rules! print_skip { macro_rules! print_disp { ($($t: ty),* $(,)?) => {$( impl PrintAttribute for $t { - fn print_something(&self) -> bool { true } + fn should_render(&self) -> bool { true } fn print_attribute(&self, p: &mut Printer) { p.word(format!("{}", self)); } @@ -97,7 +102,7 @@ macro_rules! print_disp { macro_rules! print_debug { ($($t: ty),* $(,)?) => {$( impl PrintAttribute for $t { - fn print_something(&self) -> bool { true } + fn should_render(&self) -> bool { true } fn print_attribute(&self, p: &mut Printer) { p.word(format!("{:?}", self)); } @@ -106,37 +111,39 @@ macro_rules! print_debug { } macro_rules! print_tup { - (num_print_something $($ts: ident)*) => { 0 $(+ $ts.print_something() as usize)* }; + (num_should_render $($ts: ident)*) => { 0 $(+ $ts.should_render() as usize)* }; () => {}; ($t: ident $($ts: ident)*) => { #[allow(non_snake_case, unused)] impl<$t: PrintAttribute, $($ts: PrintAttribute),*> PrintAttribute for ($t, $($ts),*) { - fn print_something(&self) -> bool { + fn should_render(&self) -> bool { let ($t, $($ts),*) = self; - print_tup!(num_print_something $t $($ts)*) != 0 + print_tup!(num_should_render $t $($ts)*) != 0 } fn print_attribute(&self, p: &mut Printer) { let ($t, $($ts),*) = self; - let parens = print_tup!(num_print_something $t $($ts)*) > 1; + let parens = print_tup!(num_should_render $t $($ts)*) > 1; if parens { - p.word("("); + p.popen(); } - let mut printed_anything = $t.print_something(); + let mut printed_anything = $t.should_render(); $t.print_attribute(p); $( - if printed_anything && $ts.print_something() { - p.word_space(","); + if $ts.should_render() { + if printed_anything { + p.word_space(","); + } printed_anything = true; } $ts.print_attribute(p); )* if parens { - p.word(")"); + p.pclose(); } } } @@ -147,5 +154,49 @@ macro_rules! print_tup { print_tup!(A B C D E F G H); print_skip!(Span, ()); -print_disp!(Symbol, u16, bool, NonZero<u32>); -print_debug!(UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency); +print_disp!(u16, bool, NonZero<u32>); +print_debug!(Symbol, UintTy, IntTy, Align, AttrStyle, CommentKind, Transparency); + +/// Finds attributes in sequences of attributes by pattern matching. +/// +/// A little like `matches` but for attributes. +/// +/// ```rust,ignore (illustrative) +/// // finds the repr attribute +/// if let Some(r) = find_attr!(attrs, AttributeKind::Repr(r) => r) { +/// +/// } +/// +/// // checks if one has matched +/// if find_attr!(attrs, AttributeKind::Repr(_)) { +/// +/// } +/// ``` +/// +/// Often this requires you to first end up with a list of attributes. +/// A common way to get those is through `tcx.get_all_attrs(did)` +#[macro_export] +macro_rules! find_attr { + ($attributes_list: expr, $pattern: pat $(if $guard: expr)?) => {{ + $crate::find_attr!($attributes_list, $pattern $(if $guard)? => ()).is_some() + }}; + + ($attributes_list: expr, $pattern: pat $(if $guard: expr)? => $e: expr) => {{ + fn check_attribute_iterator<'a>(_: &'_ impl IntoIterator<Item = &'a rustc_hir::Attribute>) {} + check_attribute_iterator(&$attributes_list); + + let find_attribute = |iter| { + for i in $attributes_list { + match i { + rustc_hir::Attribute::Parsed($pattern) $(if $guard)? => { + return Some($e); + } + _ => {} + } + } + + None + }; + find_attribute($attributes_list) + }}; +} diff --git a/compiler/rustc_attr_parsing/Cargo.toml b/compiler/rustc_attr_parsing/Cargo.toml index c335eeb5f71..6b4ce957630 100644 --- a/compiler/rustc_attr_parsing/Cargo.toml +++ b/compiler/rustc_attr_parsing/Cargo.toml @@ -16,6 +16,7 @@ rustc_fluent_macro = { path = "../rustc_fluent_macro" } rustc_hir = { path = "../rustc_hir" } rustc_lexer = { path = "../rustc_lexer" } rustc_macros = { path = "../rustc_macros" } +rustc_middle = { path = "../rustc_middle" } rustc_serialize = { path = "../rustc_serialize" } rustc_session = { path = "../rustc_session" } rustc_span = { path = "../rustc_span" } diff --git a/compiler/rustc_attr_parsing/src/context.rs b/compiler/rustc_attr_parsing/src/context.rs index 99eee0d3c4a..35541bb04bd 100644 --- a/compiler/rustc_attr_parsing/src/context.rs +++ b/compiler/rustc_attr_parsing/src/context.rs @@ -9,7 +9,6 @@ use rustc_errors::{DiagCtxtHandle, Diagnostic}; use rustc_feature::Features; use rustc_hir::{AttrArgs, AttrItem, AttrPath, Attribute, HashIgnoredAttrId}; use rustc_session::Session; -use rustc_span::symbol::kw; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym}; use crate::attributes::allow_unstable::{AllowConstFnUnstableParser, AllowInternalUnstableParser}; @@ -333,9 +332,12 @@ impl<'sess> AttributeParser<'sess> { { lit } else { - let guar = self.dcx().has_errors().unwrap(); + let guar = self.dcx().span_delayed_bug( + args.span().unwrap_or(DUMMY_SP), + "expr in place where literal is expected (builtin attr parsing)", + ); ast::MetaItemLit { - symbol: kw::Empty, + symbol: sym::dummy, suffix: None, kind: ast::LitKind::Err(guar), span: DUMMY_SP, diff --git a/compiler/rustc_attr_parsing/src/lib.rs b/compiler/rustc_attr_parsing/src/lib.rs index 9841166b37d..a7465847e18 100644 --- a/compiler/rustc_attr_parsing/src/lib.rs +++ b/compiler/rustc_attr_parsing/src/lib.rs @@ -77,10 +77,10 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(rust_logo)] #![feature(let_chains)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end #[macro_use] @@ -95,47 +95,3 @@ pub use context::{AttributeParser, OmitDoc}; pub use rustc_attr_data_structures::*; rustc_fluent_macro::fluent_messages! { "../messages.ftl" } - -/// Finds attributes in sequences of attributes by pattern matching. -/// -/// A little like `matches` but for attributes. -/// -/// ```rust,ignore (illustrative) -/// // finds the repr attribute -/// if let Some(r) = find_attr!(attrs, AttributeKind::Repr(r) => r) { -/// -/// } -/// -/// // checks if one has matched -/// if find_attr!(attrs, AttributeKind::Repr(_)) { -/// -/// } -/// ``` -/// -/// Often this requires you to first end up with a list of attributes. -/// A common way to get those is through `tcx.get_all_attrs(did)` -#[macro_export] -macro_rules! find_attr { - ($attributes_list: expr, $pattern: pat $(if $guard: expr)?) => {{ - $crate::find_attr!($attributes_list, $pattern $(if $guard)? => ()).is_some() - }}; - - ($attributes_list: expr, $pattern: pat $(if $guard: expr)? => $e: expr) => {{ - fn check_attribute_iterator<'a>(_: &'_ impl IntoIterator<Item = &'a rustc_hir::Attribute>) {} - check_attribute_iterator(&$attributes_list); - - let find_attribute = |iter| { - for i in $attributes_list { - match i { - rustc_hir::Attribute::Parsed($pattern) $(if $guard)? => { - return Some($e); - } - _ => {} - } - } - - None - }; - find_attribute($attributes_list) - }}; -} diff --git a/compiler/rustc_attr_parsing/src/parser.rs b/compiler/rustc_attr_parsing/src/parser.rs index 0ee0ea4ea59..a8a1460591c 100644 --- a/compiler/rustc_attr_parsing/src/parser.rs +++ b/compiler/rustc_attr_parsing/src/parser.rs @@ -12,8 +12,8 @@ use rustc_ast::{AttrArgs, DelimArgs, Expr, ExprKind, LitKind, MetaItemLit, Norma use rustc_ast_pretty::pprust; use rustc_errors::DiagCtxtHandle; use rustc_hir::{self as hir, AttrPath}; -use rustc_span::symbol::{Ident, kw}; -use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol}; +use rustc_span::symbol::{Ident, kw, sym}; +use rustc_span::{ErrorGuaranteed, Span, Symbol}; pub struct SegmentIterator<'a> { offset: usize, @@ -127,7 +127,7 @@ impl<'a> ArgParser<'a> { } AttrArgs::Eq { eq_span, expr } => Self::NameValue(NameValueParser { eq_span: *eq_span, - value: expr_to_lit(dcx, &expr), + value: expr_to_lit(dcx, &expr, *eq_span), value_span: expr.span, }), } @@ -348,7 +348,7 @@ impl NameValueParser { } } -fn expr_to_lit(dcx: DiagCtxtHandle<'_>, expr: &Expr) -> MetaItemLit { +fn expr_to_lit(dcx: DiagCtxtHandle<'_>, expr: &Expr, span: Span) -> MetaItemLit { // In valid code the value always ends up as a single literal. Otherwise, a dummy // literal suffices because the error is handled elsewhere. if let ExprKind::Lit(token_lit) = expr.kind @@ -356,8 +356,11 @@ fn expr_to_lit(dcx: DiagCtxtHandle<'_>, expr: &Expr) -> MetaItemLit { { lit } else { - let guar = dcx.has_errors().unwrap(); - MetaItemLit { symbol: kw::Empty, suffix: None, kind: LitKind::Err(guar), span: DUMMY_SP } + let guar = dcx.span_delayed_bug( + span, + "expr in place where literal is expected (builtin attr parsing)", + ); + MetaItemLit { symbol: sym::dummy, suffix: None, kind: LitKind::Err(guar), span } } } @@ -470,45 +473,34 @@ impl<'a> MetaItemListParserContext<'a> { { self.inside_delimiters.next(); return Some(MetaItemOrLitParser::Lit(lit)); + } else if let Some(TokenTree::Delimited(.., Delimiter::Invisible(_), inner_tokens)) = + self.inside_delimiters.peek() + { + self.inside_delimiters.next(); + return MetaItemListParserContext { + inside_delimiters: inner_tokens.iter().peekable(), + dcx: self.dcx, + } + .next(); } // or a path. let path = - if let Some(TokenTree::Token(Token { kind: token::Interpolated(nt), span, .. }, _)) = + if let Some(TokenTree::Token(Token { kind: token::Interpolated(_), span, .. }, _)) = self.inside_delimiters.peek() { - match &**nt { - // or maybe a full nt meta including the path but we return immediately - token::Nonterminal::NtMeta(item) => { - self.inside_delimiters.next(); - - return Some(MetaItemOrLitParser::MetaItemParser(MetaItemParser { - path: PathParser::Ast(&item.path), - args: ArgParser::from_attr_args(&item.args, self.dcx), - })); - } - // an already interpolated path from a macro expansion is a path, no need to parse - // one from tokens - token::Nonterminal::NtPath(path) => { - self.inside_delimiters.next(); - - AttrPath::from_ast(path) - } - _ => { - self.inside_delimiters.next(); - // we go into this path if an expr ended up in an attribute that - // expansion did not turn into a literal. Say, `#[repr(align(macro!()))]` - // where the macro didn't expand to a literal. An error is already given - // for this at this point, and then we do continue. This makes this path - // reachable... - let e = self.dcx.span_delayed_bug( - *span, - "expr in place where literal is expected (builtin attr parsing)", - ); - - return Some(MetaItemOrLitParser::Err(*span, e)); - } - } + self.inside_delimiters.next(); + // We go into this path if an expr ended up in an attribute that + // expansion did not turn into a literal. Say, `#[repr(align(macro!()))]` + // where the macro didn't expand to a literal. An error is already given + // for this at this point, and then we do continue. This makes this path + // reachable... + let e = self.dcx.span_delayed_bug( + *span, + "expr in place where literal is expected (builtin attr parsing)", + ); + + return Some(MetaItemOrLitParser::Err(*span, e)); } else { self.next_path()? }; diff --git a/compiler/rustc_baked_icu_data/src/lib.rs b/compiler/rustc_baked_icu_data/src/lib.rs index f86a9db61c6..f3f6522f575 100644 --- a/compiler/rustc_baked_icu_data/src/lib.rs +++ b/compiler/rustc_baked_icu_data/src/lib.rs @@ -23,6 +23,7 @@ // tidy-alphabetical-start #![allow(elided_lifetimes_in_paths)] #![allow(internal_features)] +#![allow(unreachable_pub)] // because this crate is mostly generated code #![doc(rust_logo)] #![feature(rustdoc_internals)] // #![warn(unreachable_pub)] // don't use because this crate is mostly generated code diff --git a/compiler/rustc_borrowck/src/borrowck_errors.rs b/compiler/rustc_borrowck/src/borrowck_errors.rs index 30e94b0bec7..c9be5575da5 100644 --- a/compiler/rustc_borrowck/src/borrowck_errors.rs +++ b/compiler/rustc_borrowck/src/borrowck_errors.rs @@ -403,6 +403,7 @@ impl<'infcx, 'tcx> crate::MirBorrowckCtxt<'_, 'infcx, 'tcx> { .expect_closure(); let span = match capture_clause { rustc_hir::CaptureBy::Value { move_kw } => move_kw.shrink_to_lo(), + rustc_hir::CaptureBy::Use { use_kw } => use_kw.shrink_to_lo(), rustc_hir::CaptureBy::Ref => fn_decl_span.shrink_to_lo(), }; diag.span_suggestion_verbose( diff --git a/compiler/rustc_borrowck/src/constraints/graph.rs b/compiler/rustc_borrowck/src/constraints/graph.rs index 9d500586f08..1209d8bf596 100644 --- a/compiler/rustc_borrowck/src/constraints/graph.rs +++ b/compiler/rustc_borrowck/src/constraints/graph.rs @@ -1,11 +1,8 @@ use rustc_data_structures::graph; use rustc_index::IndexVec; -use rustc_middle::mir::ConstraintCategory; -use rustc_middle::ty::{RegionVid, VarianceDiagInfo}; -use rustc_span::DUMMY_SP; +use rustc_middle::ty::RegionVid; use crate::constraints::{OutlivesConstraint, OutlivesConstraintIndex, OutlivesConstraintSet}; -use crate::type_check::Locations; /// The construct graph organizes the constraints by their end-points. /// It can be used to view a `R1: R2` constraint as either an edge `R1 @@ -23,8 +20,8 @@ pub(crate) type ReverseConstraintGraph = ConstraintGraph<Reverse>; /// Marker trait that controls whether a `R1: R2` constraint /// represents an edge `R1 -> R2` or `R2 -> R1`. pub(crate) trait ConstraintGraphDirection: Copy + 'static { - fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid; - fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid; + fn start_region(sup: RegionVid, sub: RegionVid) -> RegionVid; + fn end_region(sup: RegionVid, sub: RegionVid) -> RegionVid; fn is_normal() -> bool; } @@ -36,12 +33,12 @@ pub(crate) trait ConstraintGraphDirection: Copy + 'static { pub(crate) struct Normal; impl ConstraintGraphDirection for Normal { - fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid { - c.sup + fn start_region(sup: RegionVid, _sub: RegionVid) -> RegionVid { + sup } - fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid { - c.sub + fn end_region(_sup: RegionVid, sub: RegionVid) -> RegionVid { + sub } fn is_normal() -> bool { @@ -57,12 +54,12 @@ impl ConstraintGraphDirection for Normal { pub(crate) struct Reverse; impl ConstraintGraphDirection for Reverse { - fn start_region(c: &OutlivesConstraint<'_>) -> RegionVid { - c.sub + fn start_region(_sup: RegionVid, sub: RegionVid) -> RegionVid { + sub } - fn end_region(c: &OutlivesConstraint<'_>) -> RegionVid { - c.sup + fn end_region(sup: RegionVid, _sub: RegionVid) -> RegionVid { + sup } fn is_normal() -> bool { @@ -84,7 +81,7 @@ impl<D: ConstraintGraphDirection> ConstraintGraph<D> { let mut next_constraints = IndexVec::from_elem(None, &set.outlives); for (idx, constraint) in set.outlives.iter_enumerated().rev() { - let head = &mut first_constraints[D::start_region(constraint)]; + let head = &mut first_constraints[D::start_region(constraint.sup, constraint.sub)]; let next = &mut next_constraints[idx]; debug_assert!(next.is_none()); *next = *head; @@ -105,63 +102,57 @@ impl<D: ConstraintGraphDirection> ConstraintGraph<D> { RegionGraph::new(set, self, static_region) } + pub(crate) fn is_normal(&self) -> bool { + D::is_normal() + } + /// Given a region `R`, iterate over all constraints `R: R1`. - pub(crate) fn outgoing_edges<'a, 'tcx>( + pub(crate) fn outgoing_edges_from_graph<'a, 'tcx>( &'a self, region_sup: RegionVid, constraints: &'a OutlivesConstraintSet<'tcx>, - static_region: RegionVid, - ) -> Edges<'a, 'tcx, D> { - //if this is the `'static` region and the graph's direction is normal, - //then setup the Edges iterator to return all regions #53178 - if region_sup == static_region && D::is_normal() { - Edges { - graph: self, - constraints, - pointer: None, - next_static_idx: Some(0), - static_region, - } - } else { - //otherwise, just setup the iterator as normal - let first = self.first_constraints[region_sup]; - Edges { graph: self, constraints, pointer: first, next_static_idx: None, static_region } - } + ) -> EdgesFromGraph<'a, 'tcx, D> { + EdgesFromGraph { graph: self, constraints, pointer: self.first_constraints[region_sup] } + } + + /// Returns all regions (#53178). + pub(crate) fn outgoing_edges_from_static(&self) -> EdgesFromStatic { + EdgesFromStatic { next_static_idx: 0, end_static_idx: self.first_constraints.len() } } } -pub(crate) struct Edges<'a, 'tcx, D: ConstraintGraphDirection> { +pub(crate) struct EdgesFromGraph<'a, 'tcx, D: ConstraintGraphDirection> { graph: &'a ConstraintGraph<D>, constraints: &'a OutlivesConstraintSet<'tcx>, pointer: Option<OutlivesConstraintIndex>, - next_static_idx: Option<usize>, - static_region: RegionVid, } -impl<'a, 'tcx, D: ConstraintGraphDirection> Iterator for Edges<'a, 'tcx, D> { - type Item = OutlivesConstraint<'tcx>; +impl<'a, 'tcx, D: ConstraintGraphDirection> Iterator for EdgesFromGraph<'a, 'tcx, D> { + type Item = &'a OutlivesConstraint<'tcx>; fn next(&mut self) -> Option<Self::Item> { if let Some(p) = self.pointer { self.pointer = self.graph.next_constraints[p]; + Some(&self.constraints[p]) + } else { + None + } + } +} + +pub(crate) struct EdgesFromStatic { + next_static_idx: usize, + end_static_idx: usize, +} + +impl Iterator for EdgesFromStatic { + type Item = RegionVid; - Some(self.constraints[p]) - } else if let Some(next_static_idx) = self.next_static_idx { - self.next_static_idx = if next_static_idx == (self.graph.first_constraints.len() - 1) { - None - } else { - Some(next_static_idx + 1) - }; - - Some(OutlivesConstraint { - sup: self.static_region, - sub: next_static_idx.into(), - locations: Locations::All(DUMMY_SP), - span: DUMMY_SP, - category: ConstraintCategory::Internal, - variance_info: VarianceDiagInfo::default(), - from_closure: false, - }) + fn next(&mut self) -> Option<Self::Item> { + if self.next_static_idx < self.end_static_idx { + let ret = RegionVid::from_usize(self.next_static_idx); + self.next_static_idx += 1; + Some(ret) } else { None } @@ -193,21 +184,38 @@ impl<'a, 'tcx, D: ConstraintGraphDirection> RegionGraph<'a, 'tcx, D> { /// Given a region `R`, iterate over all regions `R1` such that /// there exists a constraint `R: R1`. pub(crate) fn outgoing_regions(&self, region_sup: RegionVid) -> Successors<'a, 'tcx, D> { - Successors { - edges: self.constraint_graph.outgoing_edges(region_sup, self.set, self.static_region), + // If this is the `'static` region and the graph's direction is normal, + // then setup the Edges iterator to return all regions (#53178). + if region_sup == self.static_region && D::is_normal() { + Successors::FromStatic(self.constraint_graph.outgoing_edges_from_static()) + } else { + // Otherwise, just setup the iterator as normal. + Successors::FromGraph( + self.constraint_graph.outgoing_edges_from_graph(region_sup, self.set), + ) } } } -pub(crate) struct Successors<'a, 'tcx, D: ConstraintGraphDirection> { - edges: Edges<'a, 'tcx, D>, +pub(crate) enum Successors<'a, 'tcx, D: ConstraintGraphDirection> { + FromStatic(EdgesFromStatic), + FromGraph(EdgesFromGraph<'a, 'tcx, D>), } impl<'a, 'tcx, D: ConstraintGraphDirection> Iterator for Successors<'a, 'tcx, D> { type Item = RegionVid; fn next(&mut self) -> Option<Self::Item> { - self.edges.next().map(|c| D::end_region(&c)) + match self { + Successors::FromStatic(edges) => { + // No `D::end_region` call needed here: static successors are only possible when + // the direction is `Normal`, so we can directly use what would be the `sub` value. + edges.next() + } + Successors::FromGraph(edges) => { + edges.next().map(|constraint| D::end_region(constraint.sup, constraint.sub)) + } + } } } diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs index f33f2ab58e0..2694a1eda78 100644 --- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs @@ -287,7 +287,6 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { None => "value".to_owned(), }; if needs_note { - let ty = self.infcx.tcx.short_string(ty, err.long_ty_path()); if let Some(local) = place.as_local() { let span = self.body.local_decls[local].source_info.span; err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label { diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs index d1f238331d5..208d510db2e 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mod.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs @@ -505,7 +505,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { let var_id = self.infcx.tcx.closure_captures(def_id)[field.index()].get_root_variable(); - Some(self.infcx.tcx.hir().name(var_id).to_string()) + Some(self.infcx.tcx.hir_name(var_id).to_string()) } _ => { // Might need a revision when the fields in trait RFC is implemented @@ -1124,7 +1124,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { def_id, target_place, places ); let hir_id = self.infcx.tcx.local_def_id_to_hir_id(def_id); - let expr = &self.infcx.tcx.hir().expect_expr(hir_id).kind; + let expr = &self.infcx.tcx.hir_expect_expr(hir_id).kind; debug!("closure_span: hir_id={:?} expr={:?}", hir_id, expr); if let &hir::ExprKind::Closure(&hir::Closure { kind, fn_decl_span, .. }) = expr { for (captured_place, place) in diff --git a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs index 5e83d2ffa97..29cc749877b 100644 --- a/compiler/rustc_borrowck/src/diagnostics/move_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/move_errors.rs @@ -596,10 +596,9 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { self.suggest_cloning(err, place_ty, expr, None); } - let ty = self.infcx.tcx.short_string(place_ty, err.long_ty_path()); err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label { is_partial_move: false, - ty, + ty: place_ty, place: &place_desc, span, }); @@ -629,10 +628,9 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { self.suggest_cloning(err, place_ty, expr, Some(use_spans)); } - let ty = self.infcx.tcx.short_string(place_ty, err.long_ty_path()); err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label { is_partial_move: false, - ty, + ty: place_ty, place: &place_desc, span: use_span, }); @@ -833,10 +831,9 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { self.suggest_cloning(err, bind_to.ty, expr, None); } - let ty = self.infcx.tcx.short_string(bind_to.ty, err.long_ty_path()); err.subdiagnostic(crate::session_diagnostics::TypeNoCopy::Label { is_partial_move: false, - ty, + ty: bind_to.ty, place: place_desc, span: binding_span, }); diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs index be4a7736b1c..3951b467961 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs @@ -698,7 +698,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { if !matches!(k, hir::AssocItemKind::Fn { .. }) { continue; } - if self.infcx.tcx.hir().name(hi) != self.infcx.tcx.hir().name(my_hir) { + if self.infcx.tcx.hir_name(hi) != self.infcx.tcx.hir_name(my_hir) { continue; } f_in_trait_opt = Some(hi); @@ -823,7 +823,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { ) => { capture_reason = format!("mutable borrow of `{upvar}`"); } - ty::UpvarCapture::ByValue => { + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => { capture_reason = format!("possible mutation of `{upvar}`"); } _ => bug!("upvar `{upvar}` borrowed, but not mutably"), diff --git a/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs b/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs index 876b8f214b0..7192a889adc 100644 --- a/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs +++ b/compiler/rustc_borrowck/src/diagnostics/opaque_suggestions.rs @@ -105,7 +105,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { if let Some(opaque_def_id) = opaque_def_id.as_local() && let hir::OpaqueTyOrigin::FnReturn { parent, .. } = - tcx.hir().expect_opaque_ty(opaque_def_id).origin + tcx.hir_expect_opaque_ty(opaque_def_id).origin { if let Some(sugg) = impl_trait_overcapture_suggestion( tcx, diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs index be28f84debd..412aaf70c3f 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs @@ -194,8 +194,8 @@ impl Display for RegionName { } impl rustc_errors::IntoDiagArg for RegionName { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + self.to_string().into_diag_arg(path) } } @@ -343,7 +343,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> { } }; let hir::ExprKind::Closure(&hir::Closure { fn_decl_span, .. }) = - tcx.hir().expect_expr(self.mir_hir_id()).kind + tcx.hir_expect_expr(self.mir_hir_id()).kind else { bug!("Closure is not defined by a closure expr"); }; diff --git a/compiler/rustc_borrowck/src/diagnostics/var_name.rs b/compiler/rustc_borrowck/src/diagnostics/var_name.rs index 191c0444c74..693d22abbe6 100644 --- a/compiler/rustc_borrowck/src/diagnostics/var_name.rs +++ b/compiler/rustc_borrowck/src/diagnostics/var_name.rs @@ -69,7 +69,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { let upvar_hir_id = upvars[upvar_index].get_root_variable(); debug!("get_upvar_name_and_span_for_region: upvar_hir_id={upvar_hir_id:?}"); - let upvar_name = tcx.hir().name(upvar_hir_id); + let upvar_name = tcx.hir_name(upvar_hir_id); let upvar_span = tcx.hir().span(upvar_hir_id); debug!( "get_upvar_name_and_span_for_region: upvar_name={upvar_name:?} upvar_span={upvar_span:?}", diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs index a98984a4b4c..7ae7936a2a2 100644 --- a/compiler/rustc_borrowck/src/lib.rs +++ b/compiler/rustc_borrowck/src/lib.rs @@ -2,6 +2,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(rust_logo)] #![feature(assert_matches)] #![feature(box_patterns)] @@ -13,7 +14,6 @@ #![feature(rustdoc_internals)] #![feature(stmt_expr_attributes)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::borrow::Cow; @@ -648,7 +648,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt< | StatementKind::StorageLive(..) => {} // This does not affect borrowck StatementKind::BackwardIncompatibleDropHint { place, reason: BackwardIncompatibleDropReason::Edition2024 } => { - self.check_backward_incompatible_drop(location, (**place, span), state); + self.check_backward_incompatible_drop(location, **place, state); } StatementKind::StorageDead(local) => { self.access_place( @@ -1174,7 +1174,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { fn check_backward_incompatible_drop( &mut self, location: Location, - (place, place_span): (Place<'tcx>, Span), + place: Place<'tcx>, state: &BorrowckDomain, ) { let tcx = self.infcx.tcx; @@ -1490,14 +1490,20 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { let stmt = &bbd.statements[loc.statement_index]; debug!("temporary assigned in: stmt={:?}", stmt); - if let StatementKind::Assign(box (_, Rvalue::Ref(_, _, source))) = stmt.kind - { - propagate_closure_used_mut_place(self, source); - } else { - bug!( - "closures should only capture user variables \ + match stmt.kind { + StatementKind::Assign(box ( + _, + Rvalue::Ref(_, _, source) + | Rvalue::Use(Operand::Copy(source) | Operand::Move(source)), + )) => { + propagate_closure_used_mut_place(self, source); + } + _ => { + bug!( + "closures should only capture user variables \ or references to user variables" - ); + ); + } } } _ => propagate_closure_used_mut_place(self, place), diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index a00fce08c9b..bb209a95711 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -21,8 +21,8 @@ use rustc_middle::traits::{ObligationCause, ObligationCauseCode}; use rustc_middle::ty::fold::fold_regions; use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, UniverseIndex}; use rustc_mir_dataflow::points::DenseLocationMap; -use rustc_span::Span; use rustc_span::hygiene::DesugaringKind; +use rustc_span::{DUMMY_SP, Span}; use tracing::{debug, instrument, trace}; use crate::BorrowckInferCtxt; @@ -311,9 +311,11 @@ enum RegionRelationCheckResult { } #[derive(Clone, PartialEq, Eq, Debug)] -enum Trace<'tcx> { +enum Trace<'a, 'tcx> { StartRegion, - FromOutlivesConstraint(OutlivesConstraint<'tcx>), + FromGraph(&'a OutlivesConstraint<'tcx>), + FromStatic(RegionVid), + FromMember(RegionVid, RegionVid, Span), NotVisited, } @@ -1764,6 +1766,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { let mut context = IndexVec::from_elem(Trace::NotVisited, &self.definitions); context[from_region] = Trace::StartRegion; + let fr_static = self.universal_regions().fr_static; + // Use a deque so that we do a breadth-first search. We will // stop at the first match, which ought to be the shortest // path (fewest constraints). @@ -1783,13 +1787,39 @@ impl<'tcx> RegionInferenceContext<'tcx> { if target_test(r) { let mut result = vec![]; let mut p = r; + // This loop is cold and runs at the end, which is why we delay + // `OutlivesConstraint` construction until now. loop { - match context[p].clone() { - Trace::NotVisited => { - bug!("found unvisited region {:?} on path to {:?}", p, r) + match context[p] { + Trace::FromGraph(c) => { + p = c.sup; + result.push(*c); } - Trace::FromOutlivesConstraint(c) => { + Trace::FromStatic(sub) => { + let c = OutlivesConstraint { + sup: fr_static, + sub, + locations: Locations::All(DUMMY_SP), + span: DUMMY_SP, + category: ConstraintCategory::Internal, + variance_info: ty::VarianceDiagInfo::default(), + from_closure: false, + }; + p = c.sup; + result.push(c); + } + + Trace::FromMember(sup, sub, span) => { + let c = OutlivesConstraint { + sup, + sub, + locations: Locations::All(span), + span, + category: ConstraintCategory::OpaqueType, + variance_info: ty::VarianceDiagInfo::default(), + from_closure: false, + }; p = c.sup; result.push(c); } @@ -1798,6 +1828,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { result.reverse(); return Some((result, r)); } + + Trace::NotVisited => { + bug!("found unvisited region {:?} on path to {:?}", p, r) + } } } } @@ -1808,45 +1842,42 @@ impl<'tcx> RegionInferenceContext<'tcx> { // A constraint like `'r: 'x` can come from our constraint // graph. - let fr_static = self.universal_regions().fr_static; - let outgoing_edges_from_graph = - self.constraint_graph.outgoing_edges(r, &self.constraints, fr_static); // Always inline this closure because it can be hot. - let mut handle_constraint = #[inline(always)] - |constraint: OutlivesConstraint<'tcx>| { - debug_assert_eq!(constraint.sup, r); - let sub_region = constraint.sub; - if let Trace::NotVisited = context[sub_region] { - context[sub_region] = Trace::FromOutlivesConstraint(constraint); - deque.push_back(sub_region); + let mut handle_trace = #[inline(always)] + |sub, trace| { + if let Trace::NotVisited = context[sub] { + context[sub] = trace; + deque.push_back(sub); } }; - // This loop can be hot. - for constraint in outgoing_edges_from_graph { - if matches!(constraint.category, ConstraintCategory::IllegalUniverse) { - debug!("Ignoring illegal universe constraint: {constraint:?}"); - continue; + // If this is the `'static` region and the graph's direction is normal, then set up the + // Edges iterator to return all regions (#53178). + if r == fr_static && self.constraint_graph.is_normal() { + for sub in self.constraint_graph.outgoing_edges_from_static() { + handle_trace(sub, Trace::FromStatic(sub)); + } + } else { + let edges = self.constraint_graph.outgoing_edges_from_graph(r, &self.constraints); + // This loop can be hot. + for constraint in edges { + if matches!(constraint.category, ConstraintCategory::IllegalUniverse) { + debug!("Ignoring illegal universe constraint: {constraint:?}"); + continue; + } + debug_assert_eq!(constraint.sup, r); + handle_trace(constraint.sub, Trace::FromGraph(constraint)); } - handle_constraint(constraint); } // Member constraints can also give rise to `'r: 'x` edges that // were not part of the graph initially, so watch out for those. // (But they are extremely rare; this loop is very cold.) for constraint in self.applied_member_constraints(self.constraint_sccs.scc(r)) { + let sub = constraint.min_choice; let p_c = &self.member_constraints[constraint.member_constraint_index]; - let constraint = OutlivesConstraint { - sup: r, - sub: constraint.min_choice, - locations: Locations::All(p_c.definition_span), - span: p_c.definition_span, - category: ConstraintCategory::OpaqueType, - variance_info: ty::VarianceDiagInfo::default(), - from_closure: false, - }; - handle_constraint(constraint); + handle_trace(sub, Trace::FromMember(r, sub, p_c.definition_span)); } } diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs index 11b30c145c2..4be5d0dbf42 100644 --- a/compiler/rustc_borrowck/src/session_diagnostics.rs +++ b/compiler/rustc_borrowck/src/session_diagnostics.rs @@ -459,17 +459,17 @@ pub(crate) enum OnClosureNote<'a> { } #[derive(Subdiagnostic)] -pub(crate) enum TypeNoCopy<'a> { +pub(crate) enum TypeNoCopy<'a, 'tcx> { #[label(borrowck_ty_no_impl_copy)] Label { is_partial_move: bool, - ty: String, + ty: Ty<'tcx>, place: &'a str, #[primary_span] span: Span, }, #[note(borrowck_ty_no_impl_copy)] - Note { is_partial_move: bool, ty: String, place: &'a str }, + Note { is_partial_move: bool, ty: Ty<'tcx>, place: &'a str }, } #[derive(Diagnostic)] diff --git a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs index 3b48ca305c4..8dff40ba6ac 100644 --- a/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs +++ b/compiler/rustc_borrowck/src/type_check/constraint_conversion.rs @@ -1,4 +1,4 @@ -use rustc_hir::def_id::DefId; +use rustc_hir::def_id::LocalDefId; use rustc_infer::infer::canonical::QueryRegionConstraints; use rustc_infer::infer::outlives::env::RegionBoundPairs; use rustc_infer::infer::outlives::obligations::{TypeOutlives, TypeOutlivesDelegate}; @@ -88,7 +88,7 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> { pub(crate) fn apply_closure_requirements( &mut self, closure_requirements: &ClosureRegionRequirements<'tcx>, - closure_def_id: DefId, + closure_def_id: LocalDefId, closure_args: ty::GenericArgsRef<'tcx>, ) { // Extract the values of the free regions in `closure_args` @@ -98,7 +98,7 @@ impl<'a, 'tcx> ConstraintConversion<'a, 'tcx> { self.tcx, closure_args, closure_requirements.num_external_vids, - closure_def_id.expect_local(), + closure_def_id, ); debug!(?closure_mapping); diff --git a/compiler/rustc_borrowck/src/type_check/input_output.rs b/compiler/rustc_borrowck/src/type_check/input_output.rs index f70b17e3362..c6b29fe36fd 100644 --- a/compiler/rustc_borrowck/src/type_check/input_output.rs +++ b/compiler/rustc_borrowck/src/type_check/input_output.rs @@ -24,9 +24,9 @@ use crate::universal_regions::DefiningTy; impl<'a, 'tcx> TypeChecker<'a, 'tcx> { /// Check explicit closure signature annotation, /// e.g., `|x: FxIndexMap<_, &'static u32>| ...`. - #[instrument(skip(self, body), level = "debug")] - pub(super) fn check_signature_annotation(&mut self, body: &Body<'tcx>) { - let mir_def_id = body.source.def_id().expect_local(); + #[instrument(skip(self), level = "debug")] + pub(super) fn check_signature_annotation(&mut self) { + let mir_def_id = self.body.source.def_id().expect_local(); if !self.tcx().is_closure_like(mir_def_id.to_def_id()) { return; @@ -38,9 +38,9 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // (e.g., the `_` in the code above) with fresh variables. // Then replace the bound items in the fn sig with fresh variables, // so that they represent the view from "inside" the closure. - let user_provided_sig = self.instantiate_canonical(body.span, &user_provided_poly_sig); + let user_provided_sig = self.instantiate_canonical(self.body.span, &user_provided_poly_sig); let mut user_provided_sig = self.infcx.instantiate_binder_with_fresh_vars( - body.span, + self.body.span, BoundRegionConversionTime::FnCall, user_provided_sig, ); @@ -66,12 +66,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { Ty::new_tup(self.tcx(), user_provided_sig.inputs()), args.tupled_upvars_ty(), args.coroutine_captures_by_ref_ty(), - self.infcx.next_region_var(RegionVariableOrigin::MiscVariable(body.span), || { - RegionCtxt::Unknown - }), + self.infcx + .next_region_var(RegionVariableOrigin::MiscVariable(self.body.span), || { + RegionCtxt::Unknown + }), ); - let next_ty_var = || self.infcx.next_ty_var(body.span); + let next_ty_var = || self.infcx.next_ty_var(self.body.span); let output_ty = Ty::new_coroutine( self.tcx(), self.tcx().coroutine_for_closure(mir_def_id), @@ -107,9 +108,10 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { for (&user_ty, arg_decl) in user_provided_sig.inputs().iter().zip_eq( // In MIR, closure args begin with an implicit `self`. // Also, coroutines have a resume type which may be implicitly `()`. - body.args_iter() + self.body + .args_iter() .skip(1 + if is_coroutine_with_implicit_resume_ty { 1 } else { 0 }) - .map(|local| &body.local_decls[local]), + .map(|local| &self.body.local_decls[local]), ) { self.ascribe_user_type_skip_wf( arg_decl.ty, @@ -119,7 +121,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } // If the user explicitly annotated the output type, enforce it. - let output_decl = &body.local_decls[RETURN_PLACE]; + let output_decl = &self.body.local_decls[RETURN_PLACE]; self.ascribe_user_type_skip_wf( output_decl.ty, ty::UserType::new(ty::UserTypeKind::Ty(user_provided_sig.output())), @@ -127,12 +129,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ); } - #[instrument(skip(self, body), level = "debug")] - pub(super) fn equate_inputs_and_outputs( - &mut self, - body: &Body<'tcx>, - normalized_inputs_and_output: &[Ty<'tcx>], - ) { + #[instrument(skip(self), level = "debug")] + pub(super) fn equate_inputs_and_outputs(&mut self, normalized_inputs_and_output: &[Ty<'tcx>]) { let (&normalized_output_ty, normalized_input_tys) = normalized_inputs_and_output.split_last().unwrap(); @@ -141,18 +139,18 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // Equate expected input tys with those in the MIR. for (argument_index, &normalized_input_ty) in normalized_input_tys.iter().enumerate() { - if argument_index + 1 >= body.local_decls.len() { + if argument_index + 1 >= self.body.local_decls.len() { self.tcx() .dcx() - .span_bug(body.span, "found more normalized_input_ty than local_decls"); + .span_bug(self.body.span, "found more normalized_input_ty than local_decls"); } // In MIR, argument N is stored in local N+1. let local = Local::from_usize(argument_index + 1); - let mir_input_ty = body.local_decls[local].ty; + let mir_input_ty = self.body.local_decls[local].ty; - let mir_input_span = body.local_decls[local].source_info.span; + let mir_input_span = self.body.local_decls[local].source_info.span; self.equate_normalized_input_or_output( normalized_input_ty, mir_input_ty, @@ -160,8 +158,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ); } - if let Some(mir_yield_ty) = body.yield_ty() { - let yield_span = body.local_decls[RETURN_PLACE].source_info.span; + if let Some(mir_yield_ty) = self.body.yield_ty() { + let yield_span = self.body.local_decls[RETURN_PLACE].source_info.span; self.equate_normalized_input_or_output( self.universal_regions.yield_ty.unwrap(), mir_yield_ty, @@ -169,8 +167,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ); } - if let Some(mir_resume_ty) = body.resume_ty() { - let yield_span = body.local_decls[RETURN_PLACE].source_info.span; + if let Some(mir_resume_ty) = self.body.resume_ty() { + let yield_span = self.body.local_decls[RETURN_PLACE].source_info.span; self.equate_normalized_input_or_output( self.universal_regions.resume_ty.unwrap(), mir_resume_ty, @@ -179,8 +177,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } // Return types are a bit more complex. They may contain opaque `impl Trait` types. - let mir_output_ty = body.local_decls[RETURN_PLACE].ty; - let output_span = body.local_decls[RETURN_PLACE].source_info.span; + let mir_output_ty = self.body.local_decls[RETURN_PLACE].ty; + let output_span = self.body.local_decls[RETURN_PLACE].source_info.span; self.equate_normalized_input_or_output(normalized_output_ty, mir_output_ty, output_span); } diff --git a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs index dcc17903002..f17ad23f4bf 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs @@ -31,7 +31,6 @@ mod trace; /// performed before pub(super) fn generate<'a, 'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, - body: &Body<'tcx>, location_map: &DenseLocationMap, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, @@ -51,23 +50,16 @@ pub(super) fn generate<'a, 'tcx>( // We do record these regions in the polonius context, since they're used to differentiate // relevant and boring locals, which is a key distinction used later in diagnostics. if typeck.tcx().sess.opts.unstable_opts.polonius.is_next_enabled() { - let (_, boring_locals) = compute_relevant_live_locals(typeck.tcx(), &free_regions, body); + let (_, boring_locals) = + compute_relevant_live_locals(typeck.tcx(), &free_regions, typeck.body); typeck.polonius_liveness.as_mut().unwrap().boring_nll_locals = boring_locals.into_iter().collect(); free_regions = typeck.universal_regions.universal_regions_iter().collect(); } let (relevant_live_locals, boring_locals) = - compute_relevant_live_locals(typeck.tcx(), &free_regions, body); - - trace::trace( - typeck, - body, - location_map, - flow_inits, - move_data, - relevant_live_locals, - boring_locals, - ); + compute_relevant_live_locals(typeck.tcx(), &free_regions, typeck.body); + + trace::trace(typeck, location_map, flow_inits, move_data, relevant_live_locals, boring_locals); // Mark regions that should be live where they appear within rvalues or within a call: like // args, regions, and types. @@ -76,7 +68,7 @@ pub(super) fn generate<'a, 'tcx>( &mut typeck.constraints.liveness_constraints, &typeck.universal_regions, &mut typeck.polonius_liveness, - body, + typeck.body, ); } diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs index dc35d5eb89c..7718644b9a9 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs @@ -39,17 +39,15 @@ use crate::type_check::{NormalizeLocation, TypeChecker}; /// this respects `#[may_dangle]` annotations). pub(super) fn trace<'a, 'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, - body: &Body<'tcx>, location_map: &DenseLocationMap, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, relevant_live_locals: Vec<Local>, boring_locals: Vec<Local>, ) { - let local_use_map = &LocalUseMap::build(&relevant_live_locals, location_map, body); + let local_use_map = &LocalUseMap::build(&relevant_live_locals, location_map, typeck.body); let cx = LivenessContext { typeck, - body, flow_inits, location_map, local_use_map, @@ -69,14 +67,13 @@ pub(super) fn trace<'a, 'tcx>( /// Contextual state for the type-liveness coroutine. struct LivenessContext<'a, 'typeck, 'b, 'tcx> { /// Current type-checker, giving us our inference context etc. + /// + /// This also stores the body we're currently analyzing. typeck: &'a mut TypeChecker<'typeck, 'tcx>, /// Defines the `PointIndex` mapping location_map: &'a DenseLocationMap, - /// MIR we are analyzing. - body: &'a Body<'tcx>, - /// Mapping to/from the various indices used for initialization tracking. move_data: &'a MoveData<'tcx>, @@ -139,7 +136,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { self.compute_use_live_points_for(local); self.compute_drop_live_points_for(local); - let local_ty = self.cx.body.local_decls[local].ty; + let local_ty = self.cx.body().local_decls[local].ty; if !self.use_live_at.is_empty() { self.cx.add_use_live_facts_for(local_ty, &self.use_live_at); @@ -164,8 +161,8 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { /// and can therefore safely be dropped. fn dropck_boring_locals(&mut self, boring_locals: Vec<Local>) { for local in boring_locals { - let local_ty = self.cx.body.local_decls[local].ty; - let local_span = self.cx.body.local_decls[local].source_info.span; + let local_ty = self.cx.body().local_decls[local].ty; + let local_span = self.cx.body().local_decls[local].source_info.span; let drop_data = self.cx.drop_data.entry(local_ty).or_insert_with({ let typeck = &self.cx.typeck; move || LivenessContext::compute_drop_data(typeck, local_ty, local_span) @@ -173,7 +170,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { drop_data.dropck_result.report_overflows( self.cx.typeck.infcx.tcx, - self.cx.body.local_decls[local].source_info.span, + self.cx.typeck.body.local_decls[local].source_info.span, local_ty, ); } @@ -202,7 +199,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { .var_dropped_at .iter() .filter_map(|&(local, location_index)| { - let local_ty = self.cx.body.local_decls[local].ty; + let local_ty = self.cx.body().local_decls[local].ty; if relevant_live_locals.contains(&local) || !local_ty.has_free_regions() { return None; } @@ -278,9 +275,9 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { let block = self.cx.location_map.to_location(block_start).block; self.stack.extend( - self.cx.body.basic_blocks.predecessors()[block] + self.cx.body().basic_blocks.predecessors()[block] .iter() - .map(|&pred_bb| self.cx.body.terminator_loc(pred_bb)) + .map(|&pred_bb| self.cx.body().terminator_loc(pred_bb)) .map(|pred_loc| self.cx.location_map.point_from_location(pred_loc)), ); } @@ -305,7 +302,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // Find the drops where `local` is initialized. for drop_point in self.cx.local_use_map.drops(local) { let location = self.cx.location_map.to_location(drop_point); - debug_assert_eq!(self.cx.body.terminator_loc(location.block), location,); + debug_assert_eq!(self.cx.body().terminator_loc(location.block), location,); if self.cx.initialized_at_terminator(location.block, mpi) && self.drop_live_at.insert(drop_point) @@ -351,7 +348,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // block. One of them may be either a definition or use // live point. let term_location = self.cx.location_map.to_location(term_point); - debug_assert_eq!(self.cx.body.terminator_loc(term_location.block), term_location,); + debug_assert_eq!(self.cx.body().terminator_loc(term_location.block), term_location,); let block = term_location.block; let entry_point = self.cx.location_map.entry_point(term_location.block); for p in (entry_point..term_point).rev() { @@ -376,7 +373,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { } } - let body = self.cx.body; + let body = self.cx.typeck.body; for &pred_block in body.basic_blocks.predecessors()[block].iter() { debug!("compute_drop_live_points_for_block: pred_block = {:?}", pred_block,); @@ -403,7 +400,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { continue; } - let pred_term_loc = self.cx.body.terminator_loc(pred_block); + let pred_term_loc = self.cx.body().terminator_loc(pred_block); let pred_term_point = self.cx.location_map.point_from_location(pred_term_loc); // If the terminator of this predecessor either *assigns* @@ -463,6 +460,9 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { } impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { + fn body(&self) -> &Body<'tcx> { + self.typeck.body + } /// Returns `true` if the local variable (or some part of it) is initialized at the current /// cursor position. Callers should call one of the `seek` methods immediately before to point /// the cursor to the desired location. @@ -481,7 +481,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { /// DROP of some local variable will have an effect -- note that /// drops, as they may unwind, are always terminators. fn initialized_at_terminator(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool { - self.flow_inits.seek_before_primary_effect(self.body.terminator_loc(block)); + self.flow_inits.seek_before_primary_effect(self.body().terminator_loc(block)); self.initialized_at_curr_loc(mpi) } @@ -491,7 +491,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { /// **Warning:** Does not account for the result of `Call` /// instructions. fn initialized_at_exit(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool { - self.flow_inits.seek_after_primary_effect(self.body.terminator_loc(block)); + self.flow_inits.seek_after_primary_effect(self.body().terminator_loc(block)); self.initialized_at_curr_loc(mpi) } @@ -526,7 +526,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { values::pretty_print_points(self.location_map, live_at.iter()), ); - let local_span = self.body.local_decls()[dropped_local].source_info.span; + let local_span = self.body().local_decls()[dropped_local].source_info.span; let drop_data = self.drop_data.entry(dropped_ty).or_insert_with({ let typeck = &self.typeck; move || Self::compute_drop_data(typeck, dropped_ty, local_span) @@ -544,7 +544,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { drop_data.dropck_result.report_overflows( self.typeck.infcx.tcx, - self.body.source_info(*drop_locations.first().unwrap()).span, + self.typeck.body.source_info(*drop_locations.first().unwrap()).span, dropped_ty, ); @@ -610,7 +610,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { Err(ErrorGuaranteed { .. }) => { // We don't run dropck on HIR, and dropck looks inside fields of // types, so there's no guarantee that it succeeds. We also - // can't rely on the the `ErrorGuaranteed` from `fully_perform` here + // can't rely on the `ErrorGuaranteed` from `fully_perform` here // because it comes from delay_span_bug. // // Do this inside of a probe because we don't particularly care (or want) diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index bd0d98028ae..d2eb7a52f78 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -38,7 +38,7 @@ use rustc_mir_dataflow::move_paths::MoveData; use rustc_mir_dataflow::points::DenseLocationMap; use rustc_span::def_id::CRATE_DEF_ID; use rustc_span::source_map::Spanned; -use rustc_span::{DUMMY_SP, Span, sym}; +use rustc_span::{Span, sym}; use rustc_trait_selection::traits::query::type_op::custom::scrape_region_constraints; use rustc_trait_selection::traits::query::type_op::{TypeOp, TypeOpOutput}; use tracing::{debug, instrument, trace}; @@ -51,7 +51,6 @@ use crate::polonius::legacy::{PoloniusFacts, PoloniusLocationTable}; use crate::polonius::{PoloniusContext, PoloniusLivenessContext}; use crate::region_infer::TypeTest; use crate::region_infer::values::{LivenessValues, PlaceholderIndex, PlaceholderIndices}; -use crate::renumber::RegionCtxt; use crate::session_diagnostics::{MoveUnsized, SimdIntrinsicArgConst}; use crate::type_check::free_region_relations::{CreateResult, UniversalRegionRelations}; use crate::universal_regions::{DefiningTy, UniversalRegions}; @@ -157,6 +156,7 @@ pub(crate) fn type_check<'a, 'tcx>( infcx, last_span: body.span, body, + promoted, user_type_annotations: &body.user_type_annotations, region_bound_pairs, known_type_outlives_obligations, @@ -171,15 +171,11 @@ pub(crate) fn type_check<'a, 'tcx>( }; typeck.check_user_type_annotations(); + typeck.visit_body(body); + typeck.equate_inputs_and_outputs(&normalized_inputs_and_output); + typeck.check_signature_annotation(); - let mut verifier = TypeVerifier { typeck: &mut typeck, promoted, last_span: body.span }; - verifier.visit_body(body); - - typeck.typeck_mir(body); - typeck.equate_inputs_and_outputs(body, &normalized_inputs_and_output); - typeck.check_signature_annotation(body); - - liveness::generate(&mut typeck, body, &location_map, flow_inits, move_data); + liveness::generate(&mut typeck, &location_map, flow_inits, move_data); let opaque_type_values = opaque_types::take_opaques_and_register_member_constraints(&mut typeck); @@ -213,358 +209,6 @@ enum FieldAccessError { OutOfRange { field_count: usize }, } -/// Verifies that MIR types are sane. -/// -/// FIXME: This should be merged with the actual `TypeChecker`. -struct TypeVerifier<'a, 'b, 'tcx> { - typeck: &'a mut TypeChecker<'b, 'tcx>, - promoted: &'b IndexSlice<Promoted, Body<'tcx>>, - last_span: Span, -} - -impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> { - fn visit_span(&mut self, span: Span) { - if !span.is_dummy() { - self.last_span = span; - } - } - - fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) { - self.super_place(place, context, location); - let tcx = self.tcx(); - let place_ty = place.ty(self.body(), tcx); - if let PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) = context { - let trait_ref = ty::TraitRef::new( - tcx, - tcx.require_lang_item(LangItem::Copy, Some(self.last_span)), - [place_ty.ty], - ); - - // To have a `Copy` operand, the type `T` of the - // value must be `Copy`. Note that we prove that `T: Copy`, - // rather than using the `is_copy_modulo_regions` - // test. This is important because - // `is_copy_modulo_regions` ignores the resulting region - // obligations and assumes they pass. This can result in - // bounds from `Copy` impls being unsoundly ignored (e.g., - // #29149). Note that we decide to use `Copy` before knowing - // whether the bounds fully apply: in effect, the rule is - // that if a value of some type could implement `Copy`, then - // it must. - self.typeck.prove_trait_ref( - trait_ref, - location.to_locations(), - ConstraintCategory::CopyBound, - ); - } - } - - fn visit_projection_elem( - &mut self, - place: PlaceRef<'tcx>, - elem: PlaceElem<'tcx>, - context: PlaceContext, - location: Location, - ) { - let tcx = self.tcx(); - let base_ty = place.ty(self.body(), tcx); - match elem { - // All these projections don't add any constraints, so there's nothing to - // do here. We check their invariants in the MIR validator after all. - ProjectionElem::Deref - | ProjectionElem::Index(_) - | ProjectionElem::ConstantIndex { .. } - | ProjectionElem::Subslice { .. } - | ProjectionElem::Downcast(..) => {} - ProjectionElem::Field(field, fty) => { - let fty = self.typeck.normalize(fty, location); - let ty = base_ty.field_ty(tcx, field); - let ty = self.typeck.normalize(ty, location); - debug!(?fty, ?ty); - - if let Err(terr) = self.typeck.relate_types( - ty, - context.ambient_variance(), - fty, - location.to_locations(), - ConstraintCategory::Boring, - ) { - span_mirbug!(self, place, "bad field access ({:?}: {:?}): {:?}", ty, fty, terr); - } - } - ProjectionElem::OpaqueCast(ty) => { - let ty = self.typeck.normalize(ty, location); - self.typeck - .relate_types( - ty, - context.ambient_variance(), - base_ty.ty, - location.to_locations(), - ConstraintCategory::TypeAnnotation(AnnotationSource::OpaqueCast), - ) - .unwrap(); - } - ProjectionElem::UnwrapUnsafeBinder(ty) => { - let ty::UnsafeBinder(binder_ty) = *base_ty.ty.kind() else { - unreachable!(); - }; - let found_ty = self.typeck.infcx.instantiate_binder_with_fresh_vars( - self.body().source_info(location).span, - BoundRegionConversionTime::HigherRankedType, - binder_ty.into(), - ); - self.typeck - .relate_types( - ty, - context.ambient_variance(), - found_ty, - location.to_locations(), - ConstraintCategory::Boring, - ) - .unwrap(); - } - ProjectionElem::Subtype(_) => { - bug!("ProjectionElem::Subtype shouldn't exist in borrowck") - } - } - } - - fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, location: Location) { - debug!(?constant, ?location, "visit_const_operand"); - - self.super_const_operand(constant, location); - let ty = constant.const_.ty(); - - self.typeck.infcx.tcx.for_each_free_region(&ty, |live_region| { - let live_region_vid = self.typeck.universal_regions.to_region_vid(live_region); - self.typeck.constraints.liveness_constraints.add_location(live_region_vid, location); - }); - - // HACK(compiler-errors): Constants that are gathered into Body.required_consts - // have their locations erased... - let locations = if location != Location::START { - location.to_locations() - } else { - Locations::All(constant.span) - }; - - if let Some(annotation_index) = constant.user_ty { - if let Err(terr) = self.typeck.relate_type_and_user_type( - constant.const_.ty(), - ty::Invariant, - &UserTypeProjection { base: annotation_index, projs: vec![] }, - locations, - ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg), - ) { - let annotation = &self.typeck.user_type_annotations[annotation_index]; - span_mirbug!( - self, - constant, - "bad constant user type {:?} vs {:?}: {:?}", - annotation, - constant.const_.ty(), - terr, - ); - } - } else { - let tcx = self.tcx(); - let maybe_uneval = match constant.const_ { - Const::Ty(_, ct) => match ct.kind() { - ty::ConstKind::Unevaluated(uv) => { - Some(UnevaluatedConst { def: uv.def, args: uv.args, promoted: None }) - } - _ => None, - }, - Const::Unevaluated(uv, _) => Some(uv), - _ => None, - }; - - if let Some(uv) = maybe_uneval { - if let Some(promoted) = uv.promoted { - let check_err = |verifier: &mut TypeVerifier<'a, 'b, 'tcx>, - promoted: &Body<'tcx>, - ty, - san_ty| { - if let Err(terr) = verifier.typeck.eq_types( - ty, - san_ty, - locations, - ConstraintCategory::Boring, - ) { - span_mirbug!( - verifier, - promoted, - "bad promoted type ({:?}: {:?}): {:?}", - ty, - san_ty, - terr - ); - }; - }; - - let promoted_body = &self.promoted[promoted]; - self.verify_promoted(promoted_body, location); - - let promoted_ty = promoted_body.return_ty(); - check_err(self, promoted_body, ty, promoted_ty); - } else { - self.typeck.ascribe_user_type( - constant.const_.ty(), - ty::UserType::new(ty::UserTypeKind::TypeOf( - uv.def, - UserArgs { args: uv.args, user_self_ty: None }, - )), - locations.span(self.typeck.body), - ); - } - } else if let Some(static_def_id) = constant.check_static_ptr(tcx) { - let unnormalized_ty = tcx.type_of(static_def_id).instantiate_identity(); - let normalized_ty = self.typeck.normalize(unnormalized_ty, locations); - let literal_ty = constant.const_.ty().builtin_deref(true).unwrap(); - - if let Err(terr) = self.typeck.eq_types( - literal_ty, - normalized_ty, - locations, - ConstraintCategory::Boring, - ) { - span_mirbug!(self, constant, "bad static type {:?} ({:?})", constant, terr); - } - } - - if let ty::FnDef(def_id, args) = *constant.const_.ty().kind() { - let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args); - self.typeck.normalize_and_prove_instantiated_predicates( - def_id, - instantiated_predicates, - locations, - ); - - assert!(!matches!( - tcx.impl_of_method(def_id).map(|imp| tcx.def_kind(imp)), - Some(DefKind::Impl { of_trait: true }) - )); - self.typeck.prove_predicates( - args.types().map(|ty| ty::ClauseKind::WellFormed(ty.into())), - locations, - ConstraintCategory::Boring, - ); - } - } - } - - fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { - self.super_local_decl(local, local_decl); - - for user_ty in - local_decl.user_ty.as_deref().into_iter().flat_map(UserTypeProjections::projections) - { - let span = self.typeck.user_type_annotations[user_ty.base].span; - - let ty = if local_decl.is_nonref_binding() { - local_decl.ty - } else if let &ty::Ref(_, rty, _) = local_decl.ty.kind() { - // If we have a binding of the form `let ref x: T = ..` - // then remove the outermost reference so we can check the - // type annotation for the remaining type. - rty - } else { - bug!("{:?} with ref binding has wrong type {}", local, local_decl.ty); - }; - - if let Err(terr) = self.typeck.relate_type_and_user_type( - ty, - ty::Invariant, - user_ty, - Locations::All(span), - ConstraintCategory::TypeAnnotation(AnnotationSource::Declaration), - ) { - span_mirbug!( - self, - local, - "bad user type on variable {:?}: {:?} != {:?} ({:?})", - local, - local_decl.ty, - local_decl.user_ty, - terr, - ); - } - } - } - - fn visit_body(&mut self, body: &Body<'tcx>) { - // The types of local_decls are checked above which is called in super_body. - self.super_body(body); - } -} - -impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { - fn body(&self) -> &Body<'tcx> { - self.typeck.body - } - - fn tcx(&self) -> TyCtxt<'tcx> { - self.typeck.infcx.tcx - } - - fn verify_promoted(&mut self, promoted_body: &'b Body<'tcx>, location: Location) { - // Determine the constraints from the promoted MIR by running the type - // checker on the promoted MIR, then transfer the constraints back to - // the main MIR, changing the locations to the provided location. - - let parent_body = mem::replace(&mut self.typeck.body, promoted_body); - - // Use new sets of constraints and closure bounds so that we can - // modify their locations. - let polonius_facts = &mut None; - let mut constraints = Default::default(); - let mut liveness_constraints = - LivenessValues::without_specific_points(Rc::new(DenseLocationMap::new(promoted_body))); - // Don't try to add borrow_region facts for the promoted MIR - - let mut swap_constraints = |this: &mut Self| { - mem::swap(this.typeck.polonius_facts, polonius_facts); - mem::swap(&mut this.typeck.constraints.outlives_constraints, &mut constraints); - mem::swap(&mut this.typeck.constraints.liveness_constraints, &mut liveness_constraints); - }; - - swap_constraints(self); - - self.visit_body(promoted_body); - - self.typeck.typeck_mir(promoted_body); - - self.typeck.body = parent_body; - // Merge the outlives constraints back in, at the given location. - swap_constraints(self); - - let locations = location.to_locations(); - for constraint in constraints.outlives().iter() { - let mut constraint = *constraint; - constraint.locations = locations; - if let ConstraintCategory::Return(_) - | ConstraintCategory::UseAsConst - | ConstraintCategory::UseAsStatic = constraint.category - { - // "Returning" from a promoted is an assignment to a - // temporary from the user's point of view. - constraint.category = ConstraintCategory::Boring; - } - self.typeck.constraints.outlives_constraints.push(constraint) - } - // If the region is live at least one location in the promoted MIR, - // then add a liveness constraint to the main MIR for this region - // at the location provided as an argument to this method - // - // add_location doesn't care about ordering so not a problem for the live regions to be - // unordered. - #[allow(rustc::potential_query_instability)] - for region in liveness_constraints.live_regions_unordered() { - self.typeck.constraints.liveness_constraints.add_location(region, location); - } - } -} - /// The MIR type checker. Visits the MIR and enforces all the /// constraints needed for it to be valid and well-typed. Along the /// way, it accrues region constraints -- these can later be used by @@ -573,6 +217,9 @@ struct TypeChecker<'a, 'tcx> { infcx: &'a BorrowckInferCtxt<'tcx>, last_span: Span, body: &'a Body<'tcx>, + /// The bodies of all promoteds. As promoteds have a completely separate CFG + /// recursing into them may corrupt your data structures if you're not careful. + promoted: &'a IndexSlice<Promoted, Body<'tcx>>, /// User type annotations are shared between the main MIR and the MIR of /// all of the promoted items. user_type_annotations: &'a CanonicalUserTypeAnnotations<'tcx>, @@ -721,6 +368,10 @@ impl Locations { } impl<'a, 'tcx> TypeChecker<'a, 'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.infcx.tcx + } + fn body(&self) -> &Body<'tcx> { self.body } @@ -848,6 +499,62 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { Ok(()) } + fn check_promoted(&mut self, promoted_body: &'a Body<'tcx>, location: Location) { + // Determine the constraints from the promoted MIR by running the type + // checker on the promoted MIR, then transfer the constraints back to + // the main MIR, changing the locations to the provided location. + + let parent_body = mem::replace(&mut self.body, promoted_body); + + // Use new sets of constraints and closure bounds so that we can + // modify their locations. + let polonius_facts = &mut None; + let mut constraints = Default::default(); + let mut liveness_constraints = + LivenessValues::without_specific_points(Rc::new(DenseLocationMap::new(promoted_body))); + + // Don't try to add borrow_region facts for the promoted MIR as they refer + // to the wrong locations. + let mut swap_constraints = |this: &mut Self| { + mem::swap(this.polonius_facts, polonius_facts); + mem::swap(&mut this.constraints.outlives_constraints, &mut constraints); + mem::swap(&mut this.constraints.liveness_constraints, &mut liveness_constraints); + }; + + swap_constraints(self); + + self.visit_body(promoted_body); + + self.body = parent_body; + + // Merge the outlives constraints back in, at the given location. + swap_constraints(self); + let locations = location.to_locations(); + for constraint in constraints.outlives().iter() { + let mut constraint = *constraint; + constraint.locations = locations; + if let ConstraintCategory::Return(_) + | ConstraintCategory::UseAsConst + | ConstraintCategory::UseAsStatic = constraint.category + { + // "Returning" from a promoted is an assignment to a + // temporary from the user's point of view. + constraint.category = ConstraintCategory::Boring; + } + self.constraints.outlives_constraints.push(constraint) + } + // If the region is live at least one location in the promoted MIR, + // then add a liveness constraint to the main MIR for this region + // at the location provided as an argument to this method + // + // add_location doesn't care about ordering so not a problem for the live regions to be + // unordered. + #[allow(rustc::potential_query_instability)] + for region in liveness_constraints.live_regions_unordered() { + self.constraints.liveness_constraints.add_location(region, location); + } + } + fn check_inline_const( &mut self, inferred_ty: Ty<'tcx>, @@ -877,15 +584,40 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { Locations::All(span), ); } +} - fn tcx(&self) -> TyCtxt<'tcx> { - self.infcx.tcx +impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { + fn visit_span(&mut self, span: Span) { + if !span.is_dummy() { + debug!(?span); + self.last_span = span; + } } #[instrument(skip(self, body), level = "debug")] - fn check_stmt(&mut self, body: &Body<'tcx>, stmt: &Statement<'tcx>, location: Location) { + fn visit_body(&mut self, body: &Body<'tcx>) { + debug_assert!(std::ptr::eq(self.body, body)); + + for (local, local_decl) in body.local_decls.iter_enumerated() { + self.visit_local_decl(local, local_decl); + } + + for (block, block_data) in body.basic_blocks.iter_enumerated() { + let mut location = Location { block, statement_index: 0 }; + for stmt in &block_data.statements { + self.visit_statement(stmt, location); + location.statement_index += 1; + } + + self.visit_terminator(block_data.terminator(), location); + self.check_iscleanup(block_data); + } + } + + #[instrument(skip(self), level = "debug")] + fn visit_statement(&mut self, stmt: &Statement<'tcx>, location: Location) { + self.super_statement(stmt, location); let tcx = self.tcx(); - debug!("stmt kind: {:?}", stmt.kind); match &stmt.kind { StatementKind::Assign(box (place, rv)) => { // Assignments to temporaries are not "interesting"; @@ -906,11 +638,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } Some(l) - if matches!(body.local_decls[l].local_info(), LocalInfo::AggregateTemp) => + if matches!( + self.body.local_decls[l].local_info(), + LocalInfo::AggregateTemp + ) => { ConstraintCategory::Usage } - Some(l) if !body.local_decls[l].is_user_variable() => { + Some(l) if !self.body.local_decls[l].is_user_variable() => { ConstraintCategory::Boring } _ => ConstraintCategory::Assignment, @@ -918,14 +653,14 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { debug!( "assignment category: {:?} {:?}", category, - place.as_local().map(|l| &body.local_decls[l]) + place.as_local().map(|l| &self.body.local_decls[l]) ); - let place_ty = place.ty(body, tcx).ty; + let place_ty = place.ty(self.body, tcx).ty; debug!(?place_ty); let place_ty = self.normalize(place_ty, location); debug!("place_ty normalized: {:?}", place_ty); - let rv_ty = rv.ty(body, tcx); + let rv_ty = rv.ty(self.body, tcx); debug!(?rv_ty); let rv_ty = self.normalize(rv_ty, location); debug!("normalized rv_ty: {:?}", rv_ty); @@ -962,7 +697,6 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } - self.check_rvalue(body, rv, location); if !self.unsized_feature_enabled() { let trait_ref = ty::TraitRef::new( tcx, @@ -977,7 +711,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } StatementKind::AscribeUserType(box (place, projection), variance) => { - let place_ty = place.ty(body, tcx).ty; + let place_ty = place.ty(self.body, tcx).ty; if let Err(terr) = self.relate_type_and_user_type( place_ty, *variance, @@ -997,14 +731,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ); } } - StatementKind::Intrinsic(box kind) => match kind { - NonDivergingIntrinsic::Assume(op) => self.check_operand(op, location), - NonDivergingIntrinsic::CopyNonOverlapping(..) => span_bug!( - stmt.source_info.span, - "Unexpected NonDivergingIntrinsic::CopyNonOverlapping, should only appear after lowering_intrinsics", - ), - }, - StatementKind::FakeRead(..) + StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(..)) + | StatementKind::FakeRead(..) | StatementKind::StorageLive(..) | StatementKind::StorageDead(..) | StatementKind::Retag { .. } @@ -1013,19 +741,17 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { | StatementKind::PlaceMention(..) | StatementKind::BackwardIncompatibleDropHint { .. } | StatementKind::Nop => {} - StatementKind::Deinit(..) | StatementKind::SetDiscriminant { .. } => { + StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(..)) + | StatementKind::Deinit(..) + | StatementKind::SetDiscriminant { .. } => { bug!("Statement not allowed in this MIR phase") } } } - #[instrument(skip(self, body, term_location), level = "debug")] - fn check_terminator( - &mut self, - body: &Body<'tcx>, - term: &Terminator<'tcx>, - term_location: Location, - ) { + #[instrument(skip(self), level = "debug")] + fn visit_terminator(&mut self, term: &Terminator<'tcx>, term_location: Location) { + self.super_terminator(term, term_location); let tcx = self.tcx(); debug!("terminator kind: {:?}", term.kind); match &term.kind { @@ -1043,9 +769,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } TerminatorKind::SwitchInt { discr, .. } => { - self.check_operand(discr, term_location); - - let switch_ty = discr.ty(body, tcx); + let switch_ty = discr.ty(self.body, tcx); if !switch_ty.is_integral() && !switch_ty.is_char() && !switch_ty.is_bool() { span_mirbug!(self, term, "bad SwitchInt discr ty {:?}", switch_ty); } @@ -1059,12 +783,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { _ => unreachable!(), }; - self.check_operand(func, term_location); - for arg in args { - self.check_operand(&arg.node, term_location); - } - - let func_ty = func.ty(body, tcx); + let func_ty = func.ty(self.body, tcx); debug!("func_ty.kind: {:?}", func_ty.kind()); let sig = match func_ty.kind() { @@ -1132,7 +851,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } if let TerminatorKind::Call { destination, target, .. } = term.kind { - self.check_call_dest(body, term, &sig, destination, target, term_location); + self.check_call_dest(term, &sig, destination, target, term_location); } // The ordinary liveness rules will ensure that all @@ -1147,32 +866,28 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { self.constraints.liveness_constraints.add_location(region_vid, term_location); } - self.check_call_inputs(body, term, func, &sig, args, term_location, call_source); + self.check_call_inputs(term, func, &sig, args, term_location, call_source); } TerminatorKind::Assert { cond, msg, .. } => { - self.check_operand(cond, term_location); - - let cond_ty = cond.ty(body, tcx); + let cond_ty = cond.ty(self.body, tcx); if cond_ty != tcx.types.bool { span_mirbug!(self, term, "bad Assert ({:?}, not bool", cond_ty); } if let AssertKind::BoundsCheck { len, index } = &**msg { - if len.ty(body, tcx) != tcx.types.usize { + if len.ty(self.body, tcx) != tcx.types.usize { span_mirbug!(self, len, "bounds-check length non-usize {:?}", len) } - if index.ty(body, tcx) != tcx.types.usize { + if index.ty(self.body, tcx) != tcx.types.usize { span_mirbug!(self, index, "bounds-check index non-usize {:?}", index) } } } TerminatorKind::Yield { value, resume_arg, .. } => { - self.check_operand(value, term_location); - - match body.yield_ty() { + match self.body.yield_ty() { None => span_mirbug!(self, term, "yield in non-coroutine"), Some(ty) => { - let value_ty = value.ty(body, tcx); + let value_ty = value.ty(self.body, tcx); if let Err(terr) = self.sub_types( value_ty, ty, @@ -1191,10 +906,10 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } - match body.resume_ty() { + match self.body.resume_ty() { None => span_mirbug!(self, term, "yield in non-coroutine"), Some(ty) => { - let resume_ty = resume_arg.ty(body, tcx); + let resume_ty = resume_arg.ty(self.body, tcx); if let Err(terr) = self.sub_types( ty, resume_ty.ty, @@ -1216,395 +931,76 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } - fn check_call_dest( - &mut self, - body: &Body<'tcx>, - term: &Terminator<'tcx>, - sig: &ty::FnSig<'tcx>, - destination: Place<'tcx>, - target: Option<BasicBlock>, - term_location: Location, - ) { - let tcx = self.tcx(); - match target { - Some(_) => { - let dest_ty = destination.ty(body, tcx).ty; - let dest_ty = self.normalize(dest_ty, term_location); - let category = match destination.as_local() { - Some(RETURN_PLACE) => { - if let DefiningTy::Const(def_id, _) | DefiningTy::InlineConst(def_id, _) = - self.universal_regions.defining_ty - { - if tcx.is_static(def_id) { - ConstraintCategory::UseAsStatic - } else { - ConstraintCategory::UseAsConst - } - } else { - ConstraintCategory::Return(ReturnConstraint::Normal) - } - } - Some(l) if !body.local_decls[l].is_user_variable() => { - ConstraintCategory::Boring - } - // The return type of a call is interesting for diagnostics. - _ => ConstraintCategory::Assignment, - }; - - let locations = term_location.to_locations(); - - if let Err(terr) = self.sub_types(sig.output(), dest_ty, locations, category) { - span_mirbug!( - self, - term, - "call dest mismatch ({:?} <- {:?}): {:?}", - dest_ty, - sig.output(), - terr - ); - } - - // When `unsized_fn_params` and `unsized_locals` are both not enabled, - // this check is done at `check_local`. - if self.unsized_feature_enabled() { - let span = term.source_info.span; - self.ensure_place_sized(dest_ty, span); - } - } - None => { - // The signature in this call can reference region variables, - // so erase them before calling a query. - let output_ty = self.tcx().erase_regions(sig.output()); - if !output_ty.is_privately_uninhabited( - self.tcx(), - self.infcx.typing_env(self.infcx.param_env), - ) { - span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig); - } - } - } - } - - #[instrument(level = "debug", skip(self, body, term, func, term_location, call_source))] - fn check_call_inputs( - &mut self, - body: &Body<'tcx>, - term: &Terminator<'tcx>, - func: &Operand<'tcx>, - sig: &ty::FnSig<'tcx>, - args: &[Spanned<Operand<'tcx>>], - term_location: Location, - call_source: CallSource, - ) { - if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) { - span_mirbug!(self, term, "call to {:?} with wrong # of args", sig); - } - - let func_ty = func.ty(body, self.infcx.tcx); - if let ty::FnDef(def_id, _) = *func_ty.kind() { - // Some of the SIMD intrinsics are special: they need a particular argument to be a - // constant. (Eventually this should use const-generics, but those are not up for the - // task yet: https://github.com/rust-lang/rust/issues/85229.) - if let Some(name @ (sym::simd_shuffle | sym::simd_insert | sym::simd_extract)) = - self.tcx().intrinsic(def_id).map(|i| i.name) - { - let idx = match name { - sym::simd_shuffle => 2, - _ => 1, - }; - if !matches!(args[idx], Spanned { node: Operand::Constant(_), .. }) { - self.tcx().dcx().emit_err(SimdIntrinsicArgConst { - span: term.source_info.span, - arg: idx + 1, - intrinsic: name.to_string(), - }); - } - } - } - debug!(?func_ty); + fn visit_local_decl(&mut self, local: Local, local_decl: &LocalDecl<'tcx>) { + self.super_local_decl(local, local_decl); - for (n, (fn_arg, op_arg)) in iter::zip(sig.inputs(), args).enumerate() { - let op_arg_ty = op_arg.node.ty(body, self.tcx()); + for user_ty in + local_decl.user_ty.as_deref().into_iter().flat_map(UserTypeProjections::projections) + { + let span = self.user_type_annotations[user_ty.base].span; - let op_arg_ty = self.normalize(op_arg_ty, term_location); - let category = if call_source.from_hir_call() { - ConstraintCategory::CallArgument(Some(self.infcx.tcx.erase_regions(func_ty))) + let ty = if local_decl.is_nonref_binding() { + local_decl.ty + } else if let &ty::Ref(_, rty, _) = local_decl.ty.kind() { + // If we have a binding of the form `let ref x: T = ..` + // then remove the outermost reference so we can check the + // type annotation for the remaining type. + rty } else { - ConstraintCategory::Boring + bug!("{:?} with ref binding has wrong type {}", local, local_decl.ty); }; - if let Err(terr) = - self.sub_types(op_arg_ty, *fn_arg, term_location.to_locations(), category) - { + + if let Err(terr) = self.relate_type_and_user_type( + ty, + ty::Invariant, + user_ty, + Locations::All(span), + ConstraintCategory::TypeAnnotation(AnnotationSource::Declaration), + ) { span_mirbug!( self, - term, - "bad arg #{:?} ({:?} <- {:?}): {:?}", - n, - fn_arg, - op_arg_ty, - terr + local, + "bad user type on variable {:?}: {:?} != {:?} ({:?})", + local, + local_decl.ty, + local_decl.user_ty, + terr, ); } } - } - - fn check_iscleanup(&mut self, body: &Body<'tcx>, block_data: &BasicBlockData<'tcx>) { - let is_cleanup = block_data.is_cleanup; - self.last_span = block_data.terminator().source_info.span; - match block_data.terminator().kind { - TerminatorKind::Goto { target } => { - self.assert_iscleanup(body, block_data, target, is_cleanup) - } - TerminatorKind::SwitchInt { ref targets, .. } => { - for target in targets.all_targets() { - self.assert_iscleanup(body, block_data, *target, is_cleanup); - } - } - TerminatorKind::UnwindResume => { - if !is_cleanup { - span_mirbug!(self, block_data, "resume on non-cleanup block!") - } - } - TerminatorKind::UnwindTerminate(_) => { - if !is_cleanup { - span_mirbug!(self, block_data, "terminate on non-cleanup block!") - } - } - TerminatorKind::Return => { - if is_cleanup { - span_mirbug!(self, block_data, "return on cleanup block") - } - } - TerminatorKind::TailCall { .. } => { - if is_cleanup { - span_mirbug!(self, block_data, "tailcall on cleanup block") - } - } - TerminatorKind::CoroutineDrop { .. } => { - if is_cleanup { - span_mirbug!(self, block_data, "coroutine_drop in cleanup block") - } - } - TerminatorKind::Yield { resume, drop, .. } => { - if is_cleanup { - span_mirbug!(self, block_data, "yield in cleanup block") - } - self.assert_iscleanup(body, block_data, resume, is_cleanup); - if let Some(drop) = drop { - self.assert_iscleanup(body, block_data, drop, is_cleanup); - } - } - TerminatorKind::Unreachable => {} - TerminatorKind::Drop { target, unwind, .. } - | TerminatorKind::Assert { target, unwind, .. } => { - self.assert_iscleanup(body, block_data, target, is_cleanup); - self.assert_iscleanup_unwind(body, block_data, unwind, is_cleanup); - } - TerminatorKind::Call { ref target, unwind, .. } => { - if let &Some(target) = target { - self.assert_iscleanup(body, block_data, target, is_cleanup); - } - self.assert_iscleanup_unwind(body, block_data, unwind, is_cleanup); - } - TerminatorKind::FalseEdge { real_target, imaginary_target } => { - self.assert_iscleanup(body, block_data, real_target, is_cleanup); - self.assert_iscleanup(body, block_data, imaginary_target, is_cleanup); - } - TerminatorKind::FalseUnwind { real_target, unwind } => { - self.assert_iscleanup(body, block_data, real_target, is_cleanup); - self.assert_iscleanup_unwind(body, block_data, unwind, is_cleanup); - } - TerminatorKind::InlineAsm { ref targets, unwind, .. } => { - for &target in targets { - self.assert_iscleanup(body, block_data, target, is_cleanup); - } - self.assert_iscleanup_unwind(body, block_data, unwind, is_cleanup); - } - } - } - - fn assert_iscleanup( - &mut self, - body: &Body<'tcx>, - ctxt: &dyn fmt::Debug, - bb: BasicBlock, - iscleanuppad: bool, - ) { - if body[bb].is_cleanup != iscleanuppad { - span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", bb, iscleanuppad); - } - } - - fn assert_iscleanup_unwind( - &mut self, - body: &Body<'tcx>, - ctxt: &dyn fmt::Debug, - unwind: UnwindAction, - is_cleanup: bool, - ) { - match unwind { - UnwindAction::Cleanup(unwind) => { - if is_cleanup { - span_mirbug!(self, ctxt, "unwind on cleanup block") - } - self.assert_iscleanup(body, ctxt, unwind, true); - } - UnwindAction::Continue => { - if is_cleanup { - span_mirbug!(self, ctxt, "unwind on cleanup block") - } - } - UnwindAction::Unreachable | UnwindAction::Terminate(_) => (), - } - } - - fn check_local(&mut self, body: &Body<'tcx>, local: Local, local_decl: &LocalDecl<'tcx>) { - match body.local_kind(local) { - LocalKind::ReturnPointer | LocalKind::Arg => { - // return values of normal functions are required to be - // sized by typeck, but return values of ADT constructors are - // not because we don't include a `Self: Sized` bounds on them. - // - // Unbound parts of arguments were never required to be Sized - // - maybe we should make that a warning. - return; - } - LocalKind::Temp => {} - } // When `unsized_fn_params` or `unsized_locals` is enabled, only function calls // and nullary ops are checked in `check_call_dest`. if !self.unsized_feature_enabled() { - let span = local_decl.source_info.span; - let ty = local_decl.ty; - self.ensure_place_sized(ty, span); - } - } - - fn ensure_place_sized(&mut self, ty: Ty<'tcx>, span: Span) { - let tcx = self.tcx(); - - // Erase the regions from `ty` to get a global type. The - // `Sized` bound in no way depends on precise regions, so this - // shouldn't affect `is_sized`. - let erased_ty = tcx.erase_regions(ty); - // FIXME(#132279): Using `Ty::is_sized` causes us to incorrectly handle opaques here. - if !erased_ty.is_sized(tcx, self.infcx.typing_env(self.infcx.param_env)) { - // in current MIR construction, all non-control-flow rvalue - // expressions evaluate through `as_temp` or `into` a return - // slot or local, so to find all unsized rvalues it is enough - // to check all temps, return slots and locals. - if self.reported_errors.replace((ty, span)).is_none() { - // While this is located in `nll::typeck` this error is not - // an NLL error, it's a required check to prevent creation - // of unsized rvalues in a call expression. - self.tcx().dcx().emit_err(MoveUnsized { ty, span }); - } - } - } - - fn aggregate_field_ty( - &mut self, - ak: &AggregateKind<'tcx>, - field_index: FieldIdx, - location: Location, - ) -> Result<Ty<'tcx>, FieldAccessError> { - let tcx = self.tcx(); - - match *ak { - AggregateKind::Adt(adt_did, variant_index, args, _, active_field_index) => { - let def = tcx.adt_def(adt_did); - let variant = &def.variant(variant_index); - let adj_field_index = active_field_index.unwrap_or(field_index); - if let Some(field) = variant.fields.get(adj_field_index) { - Ok(self.normalize(field.ty(tcx, args), location)) - } else { - Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() }) - } - } - AggregateKind::Closure(_, args) => { - match args.as_closure().upvar_tys().get(field_index.as_usize()) { - Some(ty) => Ok(*ty), - None => Err(FieldAccessError::OutOfRange { - field_count: args.as_closure().upvar_tys().len(), - }), - } - } - AggregateKind::Coroutine(_, args) => { - // It doesn't make sense to look at a field beyond the prefix; - // these require a variant index, and are not initialized in - // aggregate rvalues. - match args.as_coroutine().prefix_tys().get(field_index.as_usize()) { - Some(ty) => Ok(*ty), - None => Err(FieldAccessError::OutOfRange { - field_count: args.as_coroutine().prefix_tys().len(), - }), + match self.body.local_kind(local) { + LocalKind::ReturnPointer | LocalKind::Arg => { + // return values of normal functions are required to be + // sized by typeck, but return values of ADT constructors are + // not because we don't include a `Self: Sized` bounds on them. + // + // Unbound parts of arguments were never required to be Sized + // - maybe we should make that a warning. + return; } - } - AggregateKind::CoroutineClosure(_, args) => { - match args.as_coroutine_closure().upvar_tys().get(field_index.as_usize()) { - Some(ty) => Ok(*ty), - None => Err(FieldAccessError::OutOfRange { - field_count: args.as_coroutine_closure().upvar_tys().len(), - }), - } - } - AggregateKind::Array(ty) => Ok(ty), - AggregateKind::Tuple | AggregateKind::RawPtr(..) => { - unreachable!("This should have been covered in check_rvalues"); - } - } - } - - fn check_operand(&mut self, op: &Operand<'tcx>, location: Location) { - debug!(?op, ?location, "check_operand"); - - if let Operand::Constant(constant) = op { - let maybe_uneval = match constant.const_ { - Const::Val(..) | Const::Ty(_, _) => None, - Const::Unevaluated(uv, _) => Some(uv), - }; - - if let Some(uv) = maybe_uneval { - if uv.promoted.is_none() { - let tcx = self.tcx(); - let def_id = uv.def; - if tcx.def_kind(def_id) == DefKind::InlineConst { - let def_id = def_id.expect_local(); - let predicates = self.prove_closure_bounds( - tcx, - def_id, - uv.args, - location.to_locations(), - ); - self.normalize_and_prove_instantiated_predicates( - def_id.to_def_id(), - predicates, - location.to_locations(), - ); - } + LocalKind::Temp => { + let span = local_decl.source_info.span; + let ty = local_decl.ty; + self.ensure_place_sized(ty, span); } } } } - #[instrument(skip(self, body), level = "debug")] - fn check_rvalue(&mut self, body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) { + #[instrument(skip(self), level = "debug")] + fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) { + self.super_rvalue(rvalue, location); let tcx = self.tcx(); - let span = body.source_info(location).span; - + let span = self.body.source_info(location).span; match rvalue { - Rvalue::Aggregate(ak, ops) => { - for op in ops { - self.check_operand(op, location); - } - self.check_aggregate_rvalue(body, rvalue, ak, ops, location) - } + Rvalue::Aggregate(ak, ops) => self.check_aggregate_rvalue(rvalue, ak, ops, location), Rvalue::Repeat(operand, len) => { - self.check_operand(operand, location); - - let array_ty = rvalue.ty(body.local_decls(), tcx); + let array_ty = rvalue.ty(self.body.local_decls(), tcx); self.prove_predicate( ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(array_ty.into())), Locations::Single(location), @@ -1623,7 +1019,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } Operand::Move(place) => { // Make sure that repeated elements implement `Copy`. - let ty = place.ty(body, tcx).ty; + let ty = place.ty(self.body, tcx).ty; let trait_ref = ty::TraitRef::new( tcx, tcx.require_lang_item(LangItem::Copy, Some(span)), @@ -1656,9 +1052,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { &Rvalue::NullaryOp(NullOp::ContractChecks, _) => {} &Rvalue::NullaryOp(NullOp::UbChecks, _) => {} - Rvalue::ShallowInitBox(operand, ty) => { - self.check_operand(operand, location); - + Rvalue::ShallowInitBox(_operand, ty) => { let trait_ref = ty::TraitRef::new( tcx, tcx.require_lang_item(LangItem::Sized, Some(span)), @@ -1673,12 +1067,10 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } Rvalue::Cast(cast_kind, op, ty) => { - self.check_operand(op, location); - match *cast_kind { CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, coercion_source) => { let is_implicit_coercion = coercion_source == CoercionSource::Implicit; - let src_ty = op.ty(body, tcx); + let src_ty = op.ty(self.body, tcx); let mut src_sig = src_ty.fn_sig(tcx); if let ty::FnDef(def_id, _) = src_ty.kind() && let ty::FnPtr(_, target_hdr) = *ty.kind() @@ -1687,7 +1079,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { && let Some(safe_sig) = tcx.adjust_target_feature_sig( *def_id, src_sig, - body.source.def_id(), + self.body.source.def_id(), ) { src_sig = safe_sig; @@ -1780,7 +1172,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { PointerCoercion::ClosureFnPointer(safety), coercion_source, ) => { - let sig = match op.ty(body, tcx).kind() { + let sig = match op.ty(self.body, tcx).kind() { ty::Closure(_, args) => args.as_closure().sig(), _ => bug!(), }; @@ -1809,7 +1201,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { PointerCoercion::UnsafeFnPointer, coercion_source, ) => { - let fn_sig = op.ty(body, tcx).fn_sig(tcx); + let fn_sig = op.ty(self.body, tcx).fn_sig(tcx); // The type that we see in the fcx is like // `foo::<'a, 'b>`, where `foo` is the path to a @@ -1843,7 +1235,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { let trait_ref = ty::TraitRef::new( tcx, tcx.require_lang_item(LangItem::CoerceUnsized, Some(span)), - [op.ty(body, tcx), ty], + [op.ty(self.body, tcx), ty], ); let is_implicit_coercion = coercion_source == CoercionSource::Implicit; @@ -1869,7 +1261,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { _ => panic!("Invalid dyn* cast_ty"), }; - let self_ty = op.ty(body, tcx); + let self_ty = op.ty(self.body, tcx); let is_implicit_coercion = coercion_source == CoercionSource::Implicit; self.prove_predicates( @@ -1896,7 +1288,8 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { PointerCoercion::MutToConstPointer, coercion_source, ) => { - let ty::RawPtr(ty_from, hir::Mutability::Mut) = op.ty(body, tcx).kind() + let ty::RawPtr(ty_from, hir::Mutability::Mut) = + op.ty(self.body, tcx).kind() else { span_mirbug!(self, rvalue, "unexpected base type for cast {:?}", ty,); return; @@ -1924,7 +1317,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } CastKind::PointerCoercion(PointerCoercion::ArrayToPointer, coercion_source) => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let opt_ty_elem_mut = match ty_from.kind() { ty::RawPtr(array_ty, array_mut) => match array_ty.kind() { @@ -1987,7 +1380,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } CastKind::PointerExposeProvenance => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2005,7 +1398,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } CastKind::PointerWithExposedProvenance => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2022,7 +1415,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::IntToInt => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2039,7 +1432,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::IntToFloat => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2056,7 +1449,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::FloatToInt => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2073,7 +1466,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::FloatToFloat => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2090,7 +1483,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::FnPtrToPtr => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2107,7 +1500,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } CastKind::PtrToPtr => { - let ty_from = op.ty(body, tcx); + let ty_from = op.ty(self.body, tcx); let cast_ty_from = CastTy::from_ty(ty_from); let cast_ty_to = CastTy::from_ty(*ty); match (cast_ty_from, cast_ty_to) { @@ -2121,35 +1514,31 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // // Note that other checks (such as denying `dyn Send` -> `dyn // Debug`) are in `rustc_hir_typeck`. - if let ty::Dynamic(src_tty, ..) = src_tail.kind() - && let ty::Dynamic(dst_tty, ..) = dst_tail.kind() + if let ty::Dynamic(src_tty, _src_lt, ty::Dyn) = *src_tail.kind() + && let ty::Dynamic(dst_tty, dst_lt, ty::Dyn) = *dst_tail.kind() && src_tty.principal().is_some() && dst_tty.principal().is_some() { // Remove auto traits. // Auto trait checks are handled in `rustc_hir_typeck` as FCW. - let src_obj = tcx.mk_ty_from_kind(ty::Dynamic( + let src_obj = Ty::new_dynamic( + tcx, tcx.mk_poly_existential_predicates( &src_tty.without_auto_traits().collect::<Vec<_>>(), ), - tcx.lifetimes.re_static, + // FIXME: Once we disallow casting `*const dyn Trait + 'short` + // to `*const dyn Trait + 'long`, then this can just be `src_lt`. + dst_lt, ty::Dyn, - )); - let dst_obj = tcx.mk_ty_from_kind(ty::Dynamic( + ); + let dst_obj = Ty::new_dynamic( + tcx, tcx.mk_poly_existential_predicates( &dst_tty.without_auto_traits().collect::<Vec<_>>(), ), - tcx.lifetimes.re_static, + dst_lt, ty::Dyn, - )); - - // Replace trait object lifetimes with fresh vars, to allow - // casts like - // `*mut dyn FnOnce() + 'a` -> `*mut dyn FnOnce() + 'static` - let src_obj = - freshen_single_trait_object_lifetime(self.infcx, src_obj); - let dst_obj = - freshen_single_trait_object_lifetime(self.infcx, dst_obj); + ); debug!(?src_tty, ?dst_tty, ?src_obj, ?dst_obj); @@ -2187,22 +1576,20 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } Rvalue::Ref(region, _borrow_kind, borrowed_place) => { - self.add_reborrow_constraint(body, location, *region, borrowed_place); + self.add_reborrow_constraint(location, *region, borrowed_place); } Rvalue::BinaryOp( BinOp::Eq | BinOp::Ne | BinOp::Lt | BinOp::Le | BinOp::Gt | BinOp::Ge, box (left, right), ) => { - self.check_operand(left, location); - self.check_operand(right, location); - - let ty_left = left.ty(body, tcx); + let ty_left = left.ty(self.body, tcx); match ty_left.kind() { // Types with regions are comparable if they have a common super-type. ty::RawPtr(_, _) | ty::FnPtr(..) => { - let ty_right = right.ty(body, tcx); - let common_ty = self.infcx.next_ty_var(body.source_info(location).span); + let ty_right = right.ty(self.body, tcx); + let common_ty = + self.infcx.next_ty_var(self.body.source_info(location).span); self.sub_types( ty_left, common_ty, @@ -2231,7 +1618,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { // For types with no regions we can just check that the // both operands have the same type. ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_) - if ty_left == right.ty(body, tcx) => {} + if ty_left == right.ty(self.body, tcx) => {} // Other types are compared by trait methods, not by // `Rvalue::BinaryOp`. _ => span_mirbug!( @@ -2239,28 +1626,13 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { rvalue, "unexpected comparison types {:?} and {:?}", ty_left, - right.ty(body, tcx) + right.ty(self.body, tcx) ), } } - Rvalue::Use(operand) | Rvalue::UnaryOp(_, operand) => { - self.check_operand(operand, location); - } - Rvalue::CopyForDeref(place) => { - let op = &Operand::Copy(*place); - self.check_operand(op, location); - } - - Rvalue::BinaryOp(_, box (left, right)) => { - self.check_operand(left, location); - self.check_operand(right, location); - } - Rvalue::WrapUnsafeBinder(op, ty) => { - self.check_operand(op, location); let operand_ty = op.ty(self.body, self.tcx()); - let ty::UnsafeBinder(binder_ty) = *ty.kind() else { unreachable!(); }; @@ -2278,7 +1650,11 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { .unwrap(); } - Rvalue::RawPtr(..) + Rvalue::Use(_) + | Rvalue::UnaryOp(_, _) + | Rvalue::CopyForDeref(_) + | Rvalue::BinaryOp(..) + | Rvalue::RawPtr(..) | Rvalue::ThreadLocalRef(..) | Rvalue::Len(..) | Rvalue::Discriminant(..) @@ -2286,6 +1662,543 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { } } + #[instrument(level = "debug", skip(self))] + fn visit_operand(&mut self, op: &Operand<'tcx>, location: Location) { + self.super_operand(op, location); + if let Operand::Constant(constant) = op { + let maybe_uneval = match constant.const_ { + Const::Val(..) | Const::Ty(_, _) => None, + Const::Unevaluated(uv, _) => Some(uv), + }; + + if let Some(uv) = maybe_uneval { + if uv.promoted.is_none() { + let tcx = self.tcx(); + let def_id = uv.def; + if tcx.def_kind(def_id) == DefKind::InlineConst { + let def_id = def_id.expect_local(); + let predicates = self.prove_closure_bounds( + tcx, + def_id, + uv.args, + location.to_locations(), + ); + self.normalize_and_prove_instantiated_predicates( + def_id.to_def_id(), + predicates, + location.to_locations(), + ); + } + } + } + } + } + + #[instrument(level = "debug", skip(self))] + fn visit_const_operand(&mut self, constant: &ConstOperand<'tcx>, location: Location) { + self.super_const_operand(constant, location); + let ty = constant.const_.ty(); + + self.infcx.tcx.for_each_free_region(&ty, |live_region| { + let live_region_vid = self.universal_regions.to_region_vid(live_region); + self.constraints.liveness_constraints.add_location(live_region_vid, location); + }); + + let locations = location.to_locations(); + if let Some(annotation_index) = constant.user_ty { + if let Err(terr) = self.relate_type_and_user_type( + constant.const_.ty(), + ty::Invariant, + &UserTypeProjection { base: annotation_index, projs: vec![] }, + locations, + ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg), + ) { + let annotation = &self.user_type_annotations[annotation_index]; + span_mirbug!( + self, + constant, + "bad constant user type {:?} vs {:?}: {:?}", + annotation, + constant.const_.ty(), + terr, + ); + } + } else { + let tcx = self.tcx(); + let maybe_uneval = match constant.const_ { + Const::Ty(_, ct) => match ct.kind() { + ty::ConstKind::Unevaluated(uv) => { + Some(UnevaluatedConst { def: uv.def, args: uv.args, promoted: None }) + } + _ => None, + }, + Const::Unevaluated(uv, _) => Some(uv), + _ => None, + }; + + if let Some(uv) = maybe_uneval { + if let Some(promoted) = uv.promoted { + let promoted_body = &self.promoted[promoted]; + self.check_promoted(promoted_body, location); + let promoted_ty = promoted_body.return_ty(); + if let Err(terr) = + self.eq_types(ty, promoted_ty, locations, ConstraintCategory::Boring) + { + span_mirbug!( + self, + promoted, + "bad promoted type ({:?}: {:?}): {:?}", + ty, + promoted_ty, + terr + ); + }; + } else { + self.ascribe_user_type( + constant.const_.ty(), + ty::UserType::new(ty::UserTypeKind::TypeOf( + uv.def, + UserArgs { args: uv.args, user_self_ty: None }, + )), + locations.span(self.body), + ); + } + } else if let Some(static_def_id) = constant.check_static_ptr(tcx) { + let unnormalized_ty = tcx.type_of(static_def_id).instantiate_identity(); + let normalized_ty = self.normalize(unnormalized_ty, locations); + let literal_ty = constant.const_.ty().builtin_deref(true).unwrap(); + + if let Err(terr) = + self.eq_types(literal_ty, normalized_ty, locations, ConstraintCategory::Boring) + { + span_mirbug!(self, constant, "bad static type {:?} ({:?})", constant, terr); + } + } + + if let ty::FnDef(def_id, args) = *constant.const_.ty().kind() { + let instantiated_predicates = tcx.predicates_of(def_id).instantiate(tcx, args); + self.normalize_and_prove_instantiated_predicates( + def_id, + instantiated_predicates, + locations, + ); + + assert!(!matches!( + tcx.impl_of_method(def_id).map(|imp| tcx.def_kind(imp)), + Some(DefKind::Impl { of_trait: true }) + )); + self.prove_predicates( + args.types().map(|ty| ty::ClauseKind::WellFormed(ty.into())), + locations, + ConstraintCategory::Boring, + ); + } + } + } + + fn visit_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) { + self.super_place(place, context, location); + let tcx = self.tcx(); + let place_ty = place.ty(self.body, tcx); + if let PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy) = context { + let trait_ref = ty::TraitRef::new( + tcx, + tcx.require_lang_item(LangItem::Copy, Some(self.last_span)), + [place_ty.ty], + ); + + // To have a `Copy` operand, the type `T` of the + // value must be `Copy`. Note that we prove that `T: Copy`, + // rather than using the `is_copy_modulo_regions` + // test. This is important because + // `is_copy_modulo_regions` ignores the resulting region + // obligations and assumes they pass. This can result in + // bounds from `Copy` impls being unsoundly ignored (e.g., + // #29149). Note that we decide to use `Copy` before knowing + // whether the bounds fully apply: in effect, the rule is + // that if a value of some type could implement `Copy`, then + // it must. + self.prove_trait_ref(trait_ref, location.to_locations(), ConstraintCategory::CopyBound); + } + } + + fn visit_projection_elem( + &mut self, + place: PlaceRef<'tcx>, + elem: PlaceElem<'tcx>, + context: PlaceContext, + location: Location, + ) { + let tcx = self.tcx(); + let base_ty = place.ty(self.body(), tcx); + match elem { + // All these projections don't add any constraints, so there's nothing to + // do here. We check their invariants in the MIR validator after all. + ProjectionElem::Deref + | ProjectionElem::Index(_) + | ProjectionElem::ConstantIndex { .. } + | ProjectionElem::Subslice { .. } + | ProjectionElem::Downcast(..) => {} + ProjectionElem::Field(field, fty) => { + let fty = self.normalize(fty, location); + let ty = base_ty.field_ty(tcx, field); + let ty = self.normalize(ty, location); + debug!(?fty, ?ty); + + if let Err(terr) = self.relate_types( + ty, + context.ambient_variance(), + fty, + location.to_locations(), + ConstraintCategory::Boring, + ) { + span_mirbug!(self, place, "bad field access ({:?}: {:?}): {:?}", ty, fty, terr); + } + } + ProjectionElem::OpaqueCast(ty) => { + let ty = self.normalize(ty, location); + self.relate_types( + ty, + context.ambient_variance(), + base_ty.ty, + location.to_locations(), + ConstraintCategory::TypeAnnotation(AnnotationSource::OpaqueCast), + ) + .unwrap(); + } + ProjectionElem::UnwrapUnsafeBinder(ty) => { + let ty::UnsafeBinder(binder_ty) = *base_ty.ty.kind() else { + unreachable!(); + }; + let found_ty = self.infcx.instantiate_binder_with_fresh_vars( + self.body.source_info(location).span, + BoundRegionConversionTime::HigherRankedType, + binder_ty.into(), + ); + self.relate_types( + ty, + context.ambient_variance(), + found_ty, + location.to_locations(), + ConstraintCategory::Boring, + ) + .unwrap(); + } + ProjectionElem::Subtype(_) => { + bug!("ProjectionElem::Subtype shouldn't exist in borrowck") + } + } + } +} + +impl<'a, 'tcx> TypeChecker<'a, 'tcx> { + fn check_call_dest( + &mut self, + term: &Terminator<'tcx>, + sig: &ty::FnSig<'tcx>, + destination: Place<'tcx>, + target: Option<BasicBlock>, + term_location: Location, + ) { + let tcx = self.tcx(); + match target { + Some(_) => { + let dest_ty = destination.ty(self.body, tcx).ty; + let dest_ty = self.normalize(dest_ty, term_location); + let category = match destination.as_local() { + Some(RETURN_PLACE) => { + if let DefiningTy::Const(def_id, _) | DefiningTy::InlineConst(def_id, _) = + self.universal_regions.defining_ty + { + if tcx.is_static(def_id) { + ConstraintCategory::UseAsStatic + } else { + ConstraintCategory::UseAsConst + } + } else { + ConstraintCategory::Return(ReturnConstraint::Normal) + } + } + Some(l) if !self.body.local_decls[l].is_user_variable() => { + ConstraintCategory::Boring + } + // The return type of a call is interesting for diagnostics. + _ => ConstraintCategory::Assignment, + }; + + let locations = term_location.to_locations(); + + if let Err(terr) = self.sub_types(sig.output(), dest_ty, locations, category) { + span_mirbug!( + self, + term, + "call dest mismatch ({:?} <- {:?}): {:?}", + dest_ty, + sig.output(), + terr + ); + } + + // When `unsized_fn_params` and `unsized_locals` are both not enabled, + // this check is done at `check_local`. + if self.unsized_feature_enabled() { + let span = term.source_info.span; + self.ensure_place_sized(dest_ty, span); + } + } + None => { + // The signature in this call can reference region variables, + // so erase them before calling a query. + let output_ty = self.tcx().erase_regions(sig.output()); + if !output_ty.is_privately_uninhabited( + self.tcx(), + self.infcx.typing_env(self.infcx.param_env), + ) { + span_mirbug!(self, term, "call to converging function {:?} w/o dest", sig); + } + } + } + } + + #[instrument(level = "debug", skip(self, term, func, term_location, call_source))] + fn check_call_inputs( + &mut self, + term: &Terminator<'tcx>, + func: &Operand<'tcx>, + sig: &ty::FnSig<'tcx>, + args: &[Spanned<Operand<'tcx>>], + term_location: Location, + call_source: CallSource, + ) { + if args.len() < sig.inputs().len() || (args.len() > sig.inputs().len() && !sig.c_variadic) { + span_mirbug!(self, term, "call to {:?} with wrong # of args", sig); + } + + let func_ty = func.ty(self.body, self.infcx.tcx); + if let ty::FnDef(def_id, _) = *func_ty.kind() { + // Some of the SIMD intrinsics are special: they need a particular argument to be a + // constant. (Eventually this should use const-generics, but those are not up for the + // task yet: https://github.com/rust-lang/rust/issues/85229.) + if let Some(name @ (sym::simd_shuffle | sym::simd_insert | sym::simd_extract)) = + self.tcx().intrinsic(def_id).map(|i| i.name) + { + let idx = match name { + sym::simd_shuffle => 2, + _ => 1, + }; + if !matches!(args[idx], Spanned { node: Operand::Constant(_), .. }) { + self.tcx().dcx().emit_err(SimdIntrinsicArgConst { + span: term.source_info.span, + arg: idx + 1, + intrinsic: name.to_string(), + }); + } + } + } + debug!(?func_ty); + + for (n, (fn_arg, op_arg)) in iter::zip(sig.inputs(), args).enumerate() { + let op_arg_ty = op_arg.node.ty(self.body, self.tcx()); + + let op_arg_ty = self.normalize(op_arg_ty, term_location); + let category = if call_source.from_hir_call() { + ConstraintCategory::CallArgument(Some(self.infcx.tcx.erase_regions(func_ty))) + } else { + ConstraintCategory::Boring + }; + if let Err(terr) = + self.sub_types(op_arg_ty, *fn_arg, term_location.to_locations(), category) + { + span_mirbug!( + self, + term, + "bad arg #{:?} ({:?} <- {:?}): {:?}", + n, + fn_arg, + op_arg_ty, + terr + ); + } + } + } + + fn check_iscleanup(&mut self, block_data: &BasicBlockData<'tcx>) { + let is_cleanup = block_data.is_cleanup; + match block_data.terminator().kind { + TerminatorKind::Goto { target } => { + self.assert_iscleanup(block_data, target, is_cleanup) + } + TerminatorKind::SwitchInt { ref targets, .. } => { + for target in targets.all_targets() { + self.assert_iscleanup(block_data, *target, is_cleanup); + } + } + TerminatorKind::UnwindResume => { + if !is_cleanup { + span_mirbug!(self, block_data, "resume on non-cleanup block!") + } + } + TerminatorKind::UnwindTerminate(_) => { + if !is_cleanup { + span_mirbug!(self, block_data, "terminate on non-cleanup block!") + } + } + TerminatorKind::Return => { + if is_cleanup { + span_mirbug!(self, block_data, "return on cleanup block") + } + } + TerminatorKind::TailCall { .. } => { + if is_cleanup { + span_mirbug!(self, block_data, "tailcall on cleanup block") + } + } + TerminatorKind::CoroutineDrop { .. } => { + if is_cleanup { + span_mirbug!(self, block_data, "coroutine_drop in cleanup block") + } + } + TerminatorKind::Yield { resume, drop, .. } => { + if is_cleanup { + span_mirbug!(self, block_data, "yield in cleanup block") + } + self.assert_iscleanup(block_data, resume, is_cleanup); + if let Some(drop) = drop { + self.assert_iscleanup(block_data, drop, is_cleanup); + } + } + TerminatorKind::Unreachable => {} + TerminatorKind::Drop { target, unwind, .. } + | TerminatorKind::Assert { target, unwind, .. } => { + self.assert_iscleanup(block_data, target, is_cleanup); + self.assert_iscleanup_unwind(block_data, unwind, is_cleanup); + } + TerminatorKind::Call { ref target, unwind, .. } => { + if let &Some(target) = target { + self.assert_iscleanup(block_data, target, is_cleanup); + } + self.assert_iscleanup_unwind(block_data, unwind, is_cleanup); + } + TerminatorKind::FalseEdge { real_target, imaginary_target } => { + self.assert_iscleanup(block_data, real_target, is_cleanup); + self.assert_iscleanup(block_data, imaginary_target, is_cleanup); + } + TerminatorKind::FalseUnwind { real_target, unwind } => { + self.assert_iscleanup(block_data, real_target, is_cleanup); + self.assert_iscleanup_unwind(block_data, unwind, is_cleanup); + } + TerminatorKind::InlineAsm { ref targets, unwind, .. } => { + for &target in targets { + self.assert_iscleanup(block_data, target, is_cleanup); + } + self.assert_iscleanup_unwind(block_data, unwind, is_cleanup); + } + } + } + + fn assert_iscleanup(&mut self, ctxt: &dyn fmt::Debug, bb: BasicBlock, iscleanuppad: bool) { + if self.body[bb].is_cleanup != iscleanuppad { + span_mirbug!(self, ctxt, "cleanuppad mismatch: {:?} should be {:?}", bb, iscleanuppad); + } + } + + fn assert_iscleanup_unwind( + &mut self, + ctxt: &dyn fmt::Debug, + unwind: UnwindAction, + is_cleanup: bool, + ) { + match unwind { + UnwindAction::Cleanup(unwind) => { + if is_cleanup { + span_mirbug!(self, ctxt, "unwind on cleanup block") + } + self.assert_iscleanup(ctxt, unwind, true); + } + UnwindAction::Continue => { + if is_cleanup { + span_mirbug!(self, ctxt, "unwind on cleanup block") + } + } + UnwindAction::Unreachable | UnwindAction::Terminate(_) => (), + } + } + + fn ensure_place_sized(&mut self, ty: Ty<'tcx>, span: Span) { + let tcx = self.tcx(); + + // Erase the regions from `ty` to get a global type. The + // `Sized` bound in no way depends on precise regions, so this + // shouldn't affect `is_sized`. + let erased_ty = tcx.erase_regions(ty); + // FIXME(#132279): Using `Ty::is_sized` causes us to incorrectly handle opaques here. + if !erased_ty.is_sized(tcx, self.infcx.typing_env(self.infcx.param_env)) { + // in current MIR construction, all non-control-flow rvalue + // expressions evaluate through `as_temp` or `into` a return + // slot or local, so to find all unsized rvalues it is enough + // to check all temps, return slots and locals. + if self.reported_errors.replace((ty, span)).is_none() { + // While this is located in `nll::typeck` this error is not + // an NLL error, it's a required check to prevent creation + // of unsized rvalues in a call expression. + self.tcx().dcx().emit_err(MoveUnsized { ty, span }); + } + } + } + + fn aggregate_field_ty( + &mut self, + ak: &AggregateKind<'tcx>, + field_index: FieldIdx, + location: Location, + ) -> Result<Ty<'tcx>, FieldAccessError> { + let tcx = self.tcx(); + + match *ak { + AggregateKind::Adt(adt_did, variant_index, args, _, active_field_index) => { + let def = tcx.adt_def(adt_did); + let variant = &def.variant(variant_index); + let adj_field_index = active_field_index.unwrap_or(field_index); + if let Some(field) = variant.fields.get(adj_field_index) { + Ok(self.normalize(field.ty(tcx, args), location)) + } else { + Err(FieldAccessError::OutOfRange { field_count: variant.fields.len() }) + } + } + AggregateKind::Closure(_, args) => { + match args.as_closure().upvar_tys().get(field_index.as_usize()) { + Some(ty) => Ok(*ty), + None => Err(FieldAccessError::OutOfRange { + field_count: args.as_closure().upvar_tys().len(), + }), + } + } + AggregateKind::Coroutine(_, args) => { + // It doesn't make sense to look at a field beyond the prefix; + // these require a variant index, and are not initialized in + // aggregate rvalues. + match args.as_coroutine().prefix_tys().get(field_index.as_usize()) { + Some(ty) => Ok(*ty), + None => Err(FieldAccessError::OutOfRange { + field_count: args.as_coroutine().prefix_tys().len(), + }), + } + } + AggregateKind::CoroutineClosure(_, args) => { + match args.as_coroutine_closure().upvar_tys().get(field_index.as_usize()) { + Some(ty) => Ok(*ty), + None => Err(FieldAccessError::OutOfRange { + field_count: args.as_coroutine_closure().upvar_tys().len(), + }), + } + } + AggregateKind::Array(ty) => Ok(ty), + AggregateKind::Tuple | AggregateKind::RawPtr(..) => { + unreachable!("This should have been covered in check_rvalues"); + } + } + } + /// If this rvalue supports a user-given type annotation, then /// extract and return it. This represents the final type of the /// rvalue and will be unified with the inferred type. @@ -2320,7 +2233,6 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { fn check_aggregate_rvalue( &mut self, - body: &Body<'tcx>, rvalue: &Rvalue<'tcx>, aggregate_kind: &AggregateKind<'tcx>, operands: &IndexSlice<FieldIdx, Operand<'tcx>>, @@ -2353,7 +2265,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { continue; } }; - let operand_ty = operand.ty(body, tcx); + let operand_ty = operand.ty(self.body, tcx); let operand_ty = self.normalize(operand_ty, location); if let Err(terr) = self.sub_types( @@ -2383,7 +2295,6 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { /// - `borrowed_place`: the place `P` being borrowed fn add_reborrow_constraint( &mut self, - body: &Body<'tcx>, location: Location, borrow_region: ty::Region<'tcx>, borrowed_place: &Place<'tcx>, @@ -2422,7 +2333,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { let def = self.body.source.def_id().expect_local(); let upvars = tcx.closure_captures(def); let field = - path_utils::is_upvar_field_projection(tcx, upvars, borrowed_place.as_ref(), body); + path_utils::is_upvar_field_projection(tcx, upvars, borrowed_place.as_ref(), self.body); let category = if let Some(field) = field { ConstraintCategory::ClosureUpvar(field) } else { @@ -2434,7 +2345,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { match elem { ProjectionElem::Deref => { - let base_ty = base.ty(body, tcx).ty; + let base_ty = base.ty(self.body, tcx).ty; debug!("add_reborrow_constraint - base_ty = {:?}", base_ty); match base_ty.kind() { @@ -2443,7 +2354,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { sup: ref_region.as_var(), sub: borrow_region.as_var(), locations: location.to_locations(), - span: location.to_locations().span(body), + span: location.to_locations().span(self.body), category, variance_info: ty::VarianceDiagInfo::default(), from_closure: false, @@ -2587,7 +2498,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ConstraintCategory::Boring, // same as above. self.constraints, ) - .apply_closure_requirements(closure_requirements, def_id.to_def_id(), args); + .apply_closure_requirements(closure_requirements, def_id, args); } // Now equate closure args to regions inherited from `typeck_root_def_id`. Fixes #98589. @@ -2627,30 +2538,6 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { tcx.predicates_of(def_id).instantiate(tcx, args) } - - #[instrument(skip(self, body), level = "debug")] - fn typeck_mir(&mut self, body: &Body<'tcx>) { - self.last_span = body.span; - debug!(?body.span); - - for (local, local_decl) in body.local_decls.iter_enumerated() { - self.check_local(body, local, local_decl); - } - - for (block, block_data) in body.basic_blocks.iter_enumerated() { - let mut location = Location { block, statement_index: 0 }; - for stmt in &block_data.statements { - if !stmt.source_info.span.is_dummy() { - self.last_span = stmt.source_info.span; - } - self.check_stmt(body, stmt, location); - location.statement_index += 1; - } - - self.check_terminator(body, block_data.terminator(), location); - self.check_iscleanup(body, block_data); - } - } } trait NormalizeLocation: fmt::Debug + Copy { @@ -2707,16 +2594,3 @@ impl<'tcx> TypeOp<'tcx> for InstantiateOpaqueType<'tcx> { Ok(output) } } - -fn freshen_single_trait_object_lifetime<'tcx>( - infcx: &BorrowckInferCtxt<'tcx>, - ty: Ty<'tcx>, -) -> Ty<'tcx> { - let &ty::Dynamic(tty, _, dyn_kind @ ty::Dyn) = ty.kind() else { bug!("expected trait object") }; - - let fresh = infcx - .next_region_var(rustc_infer::infer::RegionVariableOrigin::MiscVariable(DUMMY_SP), || { - RegionCtxt::Unknown - }); - infcx.tcx.mk_ty_from_kind(ty::Dynamic(tty, fresh, dyn_kind)) -} diff --git a/compiler/rustc_borrowck/src/type_check/opaque_types.rs b/compiler/rustc_borrowck/src/type_check/opaque_types.rs index 17482cc0cbd..94b3d0c2bbf 100644 --- a/compiler/rustc_borrowck/src/type_check/opaque_types.rs +++ b/compiler/rustc_borrowck/src/type_check/opaque_types.rs @@ -180,6 +180,7 @@ pub(super) fn take_opaques_and_register_member_constraints<'tcx>( /// // Equivalent to: /// # mod dummy { use super::*; /// type FooReturn<'a, T> = impl Foo<'a>; +/// #[define_opaque(FooReturn)] /// fn foo<'a, T>(x: &'a u32, y: T) -> FooReturn<'a, T> { /// (x, y) /// } diff --git a/compiler/rustc_builtin_macros/Cargo.toml b/compiler/rustc_builtin_macros/Cargo.toml index b5f4f2efd1f..1289d21308b 100644 --- a/compiler/rustc_builtin_macros/Cargo.toml +++ b/compiler/rustc_builtin_macros/Cargo.toml @@ -3,10 +3,6 @@ name = "rustc_builtin_macros" version = "0.0.0" edition = "2024" - -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(llvm_enzyme)'] } - [lib] doctest = false diff --git a/compiler/rustc_builtin_macros/messages.ftl b/compiler/rustc_builtin_macros/messages.ftl index 4cac7cb93f5..3f03834f8d7 100644 --- a/compiler/rustc_builtin_macros/messages.ftl +++ b/compiler/rustc_builtin_macros/messages.ftl @@ -75,9 +75,10 @@ builtin_macros_autodiff_mode = unknown Mode: `{$mode}`. Use `Forward` or `Revers builtin_macros_autodiff_mode_activity = {$act} can not be used in {$mode} Mode builtin_macros_autodiff_not_build = this rustc version does not support autodiff builtin_macros_autodiff_number_activities = expected {$expected} activities, but found {$found} +builtin_macros_autodiff_ret_activity = invalid return activity {$act} in {$mode} Mode builtin_macros_autodiff_ty_activity = {$act} can not be used for this type - builtin_macros_autodiff_unknown_activity = did not recognize Activity: `{$act}` + builtin_macros_bad_derive_target = `derive` may only be applied to `struct`s, `enum`s and `union`s .label = not applicable here .label2 = not a `struct`, `enum` or `union` diff --git a/compiler/rustc_builtin_macros/src/alloc_error_handler.rs b/compiler/rustc_builtin_macros/src/alloc_error_handler.rs index 78bf2195975..1c1b2c88f76 100644 --- a/compiler/rustc_builtin_macros/src/alloc_error_handler.rs +++ b/compiler/rustc_builtin_macros/src/alloc_error_handler.rs @@ -88,6 +88,7 @@ fn generate_handler(cx: &ExtCtxt<'_>, handler: Ident, span: Span, sig_span: Span generics: Generics::default(), contract: None, body, + define_opaque: None, })); let attrs = thin_vec![cx.attr_word(sym::rustc_std_internal_symbol, span)]; diff --git a/compiler/rustc_builtin_macros/src/assert/context.rs b/compiler/rustc_builtin_macros/src/assert/context.rs index bb9dc651cec..a949ab94f3a 100644 --- a/compiler/rustc_builtin_macros/src/assert/context.rs +++ b/compiler/rustc_builtin_macros/src/assert/context.rs @@ -297,6 +297,7 @@ impl<'cx, 'a> Context<'cx, 'a> { | ExprKind::AssignOp(_, _, _) | ExprKind::Gen(_, _, _, _) | ExprKind::Await(_, _) + | ExprKind::Use(_, _) | ExprKind::Block(_, _) | ExprKind::Break(_, _) | ExprKind::Closure(_) diff --git a/compiler/rustc_builtin_macros/src/autodiff.rs b/compiler/rustc_builtin_macros/src/autodiff.rs index 8d7ac6de801..dcd3c1ce8d9 100644 --- a/compiler/rustc_builtin_macros/src/autodiff.rs +++ b/compiler/rustc_builtin_macros/src/autodiff.rs @@ -3,13 +3,13 @@ //! configs (autodiff enabled or disabled), so we have to add cfg's to each import. //! FIXME(ZuseZ4): Remove this once we have a smarter linter. -#[cfg(llvm_enzyme)] mod llvm_enzyme { use std::str::FromStr; use std::string::String; use rustc_ast::expand::autodiff_attrs::{ - AutoDiffAttrs, DiffActivity, DiffMode, valid_input_activity, valid_ty_for_activity, + AutoDiffAttrs, DiffActivity, DiffMode, valid_input_activity, valid_ret_activity, + valid_ty_for_activity, }; use rustc_ast::ptr::P; use rustc_ast::token::{Token, TokenKind}; @@ -130,10 +130,14 @@ mod llvm_enzyme { meta_item: &ast::MetaItem, mut item: Annotatable, ) -> Vec<Annotatable> { + if cfg!(not(llvm_enzyme)) { + ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span }); + return vec![item]; + } let dcx = ecx.sess.dcx(); // first get the annotable item: let (sig, is_impl): (FnSig, bool) = match &item { - Annotatable::Item(ref iitem) => { + Annotatable::Item(iitem) => { let sig = match &iitem.kind { ItemKind::Fn(box ast::Fn { sig, .. }) => sig, _ => { @@ -143,7 +147,7 @@ mod llvm_enzyme { }; (sig.clone(), false) } - Annotatable::AssocItem(ref assoc_item, _) => { + Annotatable::AssocItem(assoc_item, _) => { let sig = match &assoc_item.kind { ast::AssocItemKind::Fn(box ast::Fn { sig, .. }) => sig, _ => { @@ -171,8 +175,8 @@ mod llvm_enzyme { let sig_span = ecx.with_call_site_ctxt(sig.span); let (vis, primal) = match &item { - Annotatable::Item(ref iitem) => (iitem.vis.clone(), iitem.ident.clone()), - Annotatable::AssocItem(ref assoc_item, _) => { + Annotatable::Item(iitem) => (iitem.vis.clone(), iitem.ident.clone()), + Annotatable::AssocItem(assoc_item, _) => { (assoc_item.vis.clone(), assoc_item.ident.clone()) } _ => { @@ -244,6 +248,7 @@ mod llvm_enzyme { generics: Generics::default(), contract: None, body: Some(d_body), + define_opaque: None, }); let mut rustc_ad_attr = P(ast::NormalAttr::from_ident(Ident::with_dummy_span(sym::rustc_autodiff))); @@ -283,7 +288,7 @@ mod llvm_enzyme { let orig_annotatable: Annotatable = match item { Annotatable::Item(ref mut iitem) => { if !iitem.attrs.iter().any(|a| a.id == attr.id) { - iitem.attrs.push(attr.clone()); + iitem.attrs.push(attr); } if !iitem.attrs.iter().any(|a| a.id == inline_never.id) { iitem.attrs.push(inline_never.clone()); @@ -292,7 +297,7 @@ mod llvm_enzyme { } Annotatable::AssocItem(ref mut assoc_item, i @ Impl) => { if !assoc_item.attrs.iter().any(|a| a.id == attr.id) { - assoc_item.attrs.push(attr.clone()); + assoc_item.attrs.push(attr); } if !assoc_item.attrs.iter().any(|a| a.id == inline_never.id) { assoc_item.attrs.push(inline_never.clone()); @@ -319,7 +324,7 @@ mod llvm_enzyme { let d_annotatable = if is_impl { let assoc_item: AssocItemKind = ast::AssocItemKind::Fn(asdf); let d_fn = P(ast::AssocItem { - attrs: thin_vec![d_attr.clone(), inline_never], + attrs: thin_vec![d_attr, inline_never], id: ast::DUMMY_NODE_ID, span, vis, @@ -329,12 +334,8 @@ mod llvm_enzyme { }); Annotatable::AssocItem(d_fn, Impl) } else { - let mut d_fn = ecx.item( - span, - d_ident, - thin_vec![d_attr.clone(), inline_never], - ItemKind::Fn(asdf), - ); + let mut d_fn = + ecx.item(span, d_ident, thin_vec![d_attr, inline_never], ItemKind::Fn(asdf)); d_fn.vis = vis; Annotatable::Item(d_fn) }; @@ -443,7 +444,7 @@ mod llvm_enzyme { if primal_ret && n_active == 0 && x.mode.is_rev() { // We only have the primal ret. - body.stmts.push(ecx.stmt_expr(black_box_primal_call.clone())); + body.stmts.push(ecx.stmt_expr(black_box_primal_call)); return body; } @@ -468,7 +469,7 @@ mod llvm_enzyme { if primal_ret { // We have both primal ret and active floats. // primal ret is first, by construction. - exprs.push(primal_call.clone()); + exprs.push(primal_call); } // Now construct default placeholder for each active float. @@ -535,16 +536,11 @@ mod llvm_enzyme { return body; } [arg] => { - ret = ecx.expr_call( - new_decl_span, - blackbox_call_expr.clone(), - thin_vec![arg.clone()], - ); + ret = ecx.expr_call(new_decl_span, blackbox_call_expr, thin_vec![arg.clone()]); } args => { let ret_tuple: P<ast::Expr> = ecx.expr_tuple(span, args.into()); - ret = - ecx.expr_call(new_decl_span, blackbox_call_expr.clone(), thin_vec![ret_tuple]); + ret = ecx.expr_call(new_decl_span, blackbox_call_expr, thin_vec![ret_tuple]); } } assert!(has_ret(&d_sig.decl.output)); @@ -564,7 +560,7 @@ mod llvm_enzyme { let args: ThinVec<_> = idents[1..].iter().map(|arg| ecx.expr_path(ecx.path_ident(span, *arg))).collect(); let self_expr = ecx.expr_self(span); - ecx.expr_method_call(span, self_expr, primal, args.clone()) + ecx.expr_method_call(span, self_expr, primal, args) } else { let args: ThinVec<_> = idents.iter().map(|arg| ecx.expr_path(ecx.path_ident(span, *arg))).collect(); @@ -582,6 +578,8 @@ mod llvm_enzyme { // // Error handling: If the user provides an invalid configuration (incorrect numbers, types, or // both), we emit an error and return the original signature. This allows us to continue parsing. + // FIXME(Sa4dUs): make individual activities' span available so errors + // can point to only the activity instead of the entire attribute fn gen_enzyme_decl( ecx: &ExtCtxt<'_>, sig: &ast::FnSig, @@ -629,10 +627,22 @@ mod llvm_enzyme { errors = true; } } + + if has_ret && !valid_ret_activity(x.mode, x.ret_activity) { + dcx.emit_err(errors::AutoDiffInvalidRetAct { + span, + mode: x.mode.to_string(), + act: x.ret_activity.to_string(), + }); + // We don't set `errors = true` to avoid annoying type errors relative + // to the expanded macro type signature + } + if errors { // This is not the right signature, but we can continue parsing. return (sig.clone(), new_inputs, idents, true); } + let unsafe_activities = x .input_activity .iter() @@ -801,25 +811,4 @@ mod llvm_enzyme { } } -#[cfg(not(llvm_enzyme))] -mod ad_fallback { - use rustc_ast::ast; - use rustc_expand::base::{Annotatable, ExtCtxt}; - use rustc_span::Span; - - use crate::errors; - pub(crate) fn expand( - ecx: &mut ExtCtxt<'_>, - _expand_span: Span, - meta_item: &ast::MetaItem, - item: Annotatable, - ) -> Vec<Annotatable> { - ecx.sess.dcx().emit_err(errors::AutoDiffSupportNotBuild { span: meta_item.span }); - return vec![item]; - } -} - -#[cfg(not(llvm_enzyme))] -pub(crate) use ad_fallback::expand; -#[cfg(llvm_enzyme)] pub(crate) use llvm_enzyme::expand; diff --git a/compiler/rustc_builtin_macros/src/cfg_eval.rs b/compiler/rustc_builtin_macros/src/cfg_eval.rs index 53c61831b42..5788966b6e7 100644 --- a/compiler/rustc_builtin_macros/src/cfg_eval.rs +++ b/compiler/rustc_builtin_macros/src/cfg_eval.rs @@ -140,8 +140,9 @@ impl CfgEval<'_> { Annotatable::ForeignItem(self.flat_map_foreign_item(item).pop().unwrap()) } Annotatable::Stmt(_) => { - let stmt = - parser.parse_stmt_without_recovery(false, ForceCollect::Yes)?.unwrap(); + let stmt = parser + .parse_stmt_without_recovery(false, ForceCollect::Yes, false)? + .unwrap(); Annotatable::Stmt(P(self.flat_map_stmt(stmt).pop().unwrap())) } Annotatable::Expr(_) => { diff --git a/compiler/rustc_builtin_macros/src/define_opaque.rs b/compiler/rustc_builtin_macros/src/define_opaque.rs new file mode 100644 index 00000000000..9777e772cf2 --- /dev/null +++ b/compiler/rustc_builtin_macros/src/define_opaque.rs @@ -0,0 +1,54 @@ +use rustc_ast::{DUMMY_NODE_ID, ast}; +use rustc_expand::base::{Annotatable, ExtCtxt}; +use rustc_span::Span; + +pub(crate) fn expand( + ecx: &mut ExtCtxt<'_>, + _expand_span: Span, + meta_item: &ast::MetaItem, + mut item: Annotatable, +) -> Vec<Annotatable> { + let define_opaque = match &mut item { + Annotatable::Item(p) => match &mut p.kind { + ast::ItemKind::Fn(f) => Some(&mut f.define_opaque), + _ => None, + }, + Annotatable::AssocItem(i, _assoc_ctxt) => match &mut i.kind { + ast::AssocItemKind::Fn(func) => Some(&mut func.define_opaque), + _ => None, + }, + Annotatable::Stmt(s) => match &mut s.kind { + ast::StmtKind::Item(p) => match &mut p.kind { + ast::ItemKind::Fn(f) => Some(&mut f.define_opaque), + _ => None, + }, + _ => None, + }, + _ => None, + }; + + let Some(list) = meta_item.meta_item_list() else { + ecx.dcx().span_err(meta_item.span, "expected list of type aliases"); + return vec![item]; + }; + + if let Some(define_opaque) = define_opaque { + *define_opaque = Some( + list.iter() + .filter_map(|entry| match entry { + ast::MetaItemInner::MetaItem(meta_item) if meta_item.is_word() => { + Some((DUMMY_NODE_ID, meta_item.path.clone())) + } + _ => { + ecx.dcx().span_err(entry.span(), "expected path to type alias"); + None + } + }) + .collect(), + ); + } else { + ecx.dcx().span_err(meta_item.span, "only functions and methods can define opaque types"); + } + + vec![item] +} diff --git a/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs b/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs index 5aed9f76f14..46b79e09780 100644 --- a/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs +++ b/compiler/rustc_builtin_macros/src/deriving/coerce_pointee.rs @@ -300,13 +300,16 @@ pub(crate) fn expand_deriving_coerce_pointee( to_ty: &s_ty, rewritten: false, }; - let mut predicate = ast::WherePredicate { - kind: ast::WherePredicateKind::BoundPredicate(bound.clone()), - span: predicate.span, - id: ast::DUMMY_NODE_ID, - }; - substitution.visit_where_predicate(&mut predicate); + let mut kind = ast::WherePredicateKind::BoundPredicate(bound.clone()); + substitution.visit_where_predicate_kind(&mut kind); if substitution.rewritten { + let predicate = ast::WherePredicate { + attrs: predicate.attrs.clone(), + kind, + span: predicate.span, + id: ast::DUMMY_NODE_ID, + is_placeholder: false, + }; impl_generics.where_clause.predicates.push(predicate); } } @@ -388,8 +391,8 @@ impl<'a> ast::mut_visit::MutVisitor for TypeSubstitution<'a> { } } - fn visit_where_predicate(&mut self, where_predicate: &mut ast::WherePredicate) { - match &mut where_predicate.kind { + fn visit_where_predicate_kind(&mut self, kind: &mut ast::WherePredicateKind) { + match kind { rustc_ast::WherePredicateKind::BoundPredicate(bound) => { bound .bound_generic_params diff --git a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs index 6b59ac25827..03ee59de70e 100644 --- a/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs +++ b/compiler/rustc_builtin_macros/src/deriving/generic/mod.rs @@ -687,9 +687,11 @@ impl<'a> TraitDef<'a> { // and similarly for where clauses where_clause.predicates.extend(generics.where_clause.predicates.iter().map(|clause| { ast::WherePredicate { + attrs: clause.attrs.clone(), kind: clause.kind.clone(), id: ast::DUMMY_NODE_ID, span: clause.span.with_ctxt(ctxt), + is_placeholder: false, } })); @@ -744,8 +746,13 @@ impl<'a> TraitDef<'a> { }; let kind = ast::WherePredicateKind::BoundPredicate(predicate); - let predicate = - ast::WherePredicate { kind, id: ast::DUMMY_NODE_ID, span: self.span }; + let predicate = ast::WherePredicate { + attrs: ThinVec::new(), + kind, + id: ast::DUMMY_NODE_ID, + span: self.span, + is_placeholder: false, + }; where_clause.predicates.push(predicate); } } @@ -1033,6 +1040,7 @@ impl<'a> MethodDef<'a> { generics: fn_generics, contract: None, body: Some(body_block), + define_opaque: None, })), tokens: None, }) diff --git a/compiler/rustc_builtin_macros/src/errors.rs b/compiler/rustc_builtin_macros/src/errors.rs index 6213bd802c7..30597944124 100644 --- a/compiler/rustc_builtin_macros/src/errors.rs +++ b/compiler/rustc_builtin_macros/src/errors.rs @@ -144,10 +144,8 @@ pub(crate) struct AllocMustStatics { pub(crate) span: Span, } -#[cfg(llvm_enzyme)] pub(crate) use autodiff::*; -#[cfg(llvm_enzyme)] mod autodiff { use super::*; #[derive(Diagnostic)] @@ -188,6 +186,15 @@ mod autodiff { } #[derive(Diagnostic)] + #[diag(builtin_macros_autodiff_ret_activity)] + pub(crate) struct AutoDiffInvalidRetAct { + #[primary_span] + pub(crate) span: Span, + pub(crate) mode: String, + pub(crate) act: String, + } + + #[derive(Diagnostic)] #[diag(builtin_macros_autodiff_mode)] pub(crate) struct AutoDiffInvalidMode { #[primary_span] @@ -203,9 +210,7 @@ mod autodiff { } } -#[cfg(not(llvm_enzyme))] pub(crate) use ad_fallback::*; -#[cfg(not(llvm_enzyme))] mod ad_fallback { use super::*; #[derive(Diagnostic)] diff --git a/compiler/rustc_builtin_macros/src/format.rs b/compiler/rustc_builtin_macros/src/format.rs index 90447da6680..12654001a1e 100644 --- a/compiler/rustc_builtin_macros/src/format.rs +++ b/compiler/rustc_builtin_macros/src/format.rs @@ -190,7 +190,8 @@ fn make_format_args( && let [stmt] = block.stmts.as_slice() && let StmtKind::Expr(expr) = &stmt.kind && let ExprKind::Path(None, path) = &expr.kind - && path.is_potential_trivial_const_arg() + && path.segments.len() == 1 + && path.segments[0].args.is_none() { err.multipart_suggestion( "quote your inlined format argument to use as string literal", @@ -710,11 +711,9 @@ fn report_missing_placeholders( }; let pos = sub.position(); - let sub = String::from(sub.as_str()); - if explained.contains(&sub) { + if !explained.insert(sub.to_string()) { continue; } - explained.insert(sub); if !found_foreign { found_foreign = true; diff --git a/compiler/rustc_builtin_macros/src/format_foreign.rs b/compiler/rustc_builtin_macros/src/format_foreign.rs index 866ec72f116..13d5b42942a 100644 --- a/compiler/rustc_builtin_macros/src/format_foreign.rs +++ b/compiler/rustc_builtin_macros/src/format_foreign.rs @@ -12,14 +12,16 @@ pub(crate) mod printf { Escape((usize, usize)), } - impl<'a> Substitution<'a> { - pub(crate) fn as_str(&self) -> &str { + impl ToString for Substitution<'_> { + fn to_string(&self) -> String { match self { - Substitution::Format(fmt) => fmt.span, - Substitution::Escape(_) => "%%", + Substitution::Format(fmt) => fmt.span.into(), + Substitution::Escape(_) => "%%".into(), } } + } + impl Substitution<'_> { pub(crate) fn position(&self) -> InnerSpan { match self { Substitution::Format(fmt) => fmt.position, @@ -627,15 +629,17 @@ pub(crate) mod shell { Escape((usize, usize)), } - impl Substitution<'_> { - pub(crate) fn as_str(&self) -> String { + impl ToString for Substitution<'_> { + fn to_string(&self) -> String { match self { Substitution::Ordinal(n, _) => format!("${n}"), Substitution::Name(n, _) => format!("${n}"), Substitution::Escape(_) => "$$".into(), } } + } + impl Substitution<'_> { pub(crate) fn position(&self) -> InnerSpan { let (Self::Ordinal(_, pos) | Self::Name(_, pos) | Self::Escape(pos)) = self; InnerSpan::new(pos.0, pos.1) diff --git a/compiler/rustc_builtin_macros/src/global_allocator.rs b/compiler/rustc_builtin_macros/src/global_allocator.rs index 8fdbbf8e704..90d79235820 100644 --- a/compiler/rustc_builtin_macros/src/global_allocator.rs +++ b/compiler/rustc_builtin_macros/src/global_allocator.rs @@ -83,6 +83,7 @@ impl AllocFnFactory<'_, '_> { generics: Generics::default(), contract: None, body, + define_opaque: None, })); let item = self.cx.item( self.span, diff --git a/compiler/rustc_builtin_macros/src/lib.rs b/compiler/rustc_builtin_macros/src/lib.rs index ca16583a45d..dab3053d8b0 100644 --- a/compiler/rustc_builtin_macros/src/lib.rs +++ b/compiler/rustc_builtin_macros/src/lib.rs @@ -5,6 +5,7 @@ #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(assert_matches)] @@ -18,7 +19,6 @@ #![feature(rustdoc_internals)] #![feature(string_from_utf8_lossy_owned)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate proc_macro; @@ -39,6 +39,7 @@ mod compile_error; mod concat; mod concat_bytes; mod concat_idents; +mod define_opaque; mod derive; mod deriving; mod edition_panic; @@ -114,6 +115,7 @@ pub fn register_builtin_macros(resolver: &mut dyn ResolverExpand) { bench: test::expand_bench, cfg_accessible: cfg_accessible::Expander, cfg_eval: cfg_eval::expand, + define_opaque: define_opaque::expand, derive: derive::Expander { is_const: false }, derive_const: derive::Expander { is_const: true }, global_allocator: global_allocator::expand, diff --git a/compiler/rustc_builtin_macros/src/standard_library_imports.rs b/compiler/rustc_builtin_macros/src/standard_library_imports.rs index 6933ca09349..ba63b185e09 100644 --- a/compiler/rustc_builtin_macros/src/standard_library_imports.rs +++ b/compiler/rustc_builtin_macros/src/standard_library_imports.rs @@ -60,6 +60,7 @@ pub fn inject( Edition2018 => sym::rust_2018, Edition2021 => sym::rust_2021, Edition2024 => sym::rust_2024, + EditionFuture => sym::rust_future, }]) .map(|&symbol| Ident::new(symbol, span)) .collect(); diff --git a/compiler/rustc_builtin_macros/src/test_harness.rs b/compiler/rustc_builtin_macros/src/test_harness.rs index 472e16e62d5..768b459ec5e 100644 --- a/compiler/rustc_builtin_macros/src/test_harness.rs +++ b/compiler/rustc_builtin_macros/src/test_harness.rs @@ -346,6 +346,7 @@ fn mk_main(cx: &mut TestCtxt<'_>) -> P<ast::Item> { generics: ast::Generics::default(), contract: None, body: Some(main_body), + define_opaque: None, })); // Honor the reexport_test_harness_main attribute diff --git a/compiler/rustc_codegen_cranelift/example/issue-72793.rs b/compiler/rustc_codegen_cranelift/example/issue-72793.rs index 2e08fbca8ef..95d58b90e79 100644 --- a/compiler/rustc_codegen_cranelift/example/issue-72793.rs +++ b/compiler/rustc_codegen_cranelift/example/issue-72793.rs @@ -2,23 +2,21 @@ #![feature(type_alias_impl_trait)] -mod helper { - pub trait T { - type Item; - } +pub trait T { + type Item; +} - pub type Alias<'a> = impl T<Item = &'a ()>; +pub type Alias<'a> = impl T<Item = &'a ()>; - struct S; - impl<'a> T for &'a S { - type Item = &'a (); - } +struct S; +impl<'a> T for &'a S { + type Item = &'a (); +} - pub fn filter_positive<'a>() -> Alias<'a> { - &S - } +#[define_opaque(Alias)] +pub fn filter_positive<'a>() -> Alias<'a> { + &S } -use helper::*; fn with_positive(fun: impl Fn(Alias<'_>)) { fun(filter_positive()); diff --git a/compiler/rustc_codegen_cranelift/example/mini_core.rs b/compiler/rustc_codegen_cranelift/example/mini_core.rs index 72c9df59d83..6e345b2a6fd 100644 --- a/compiler/rustc_codegen_cranelift/example/mini_core.rs +++ b/compiler/rustc_codegen_cranelift/example/mini_core.rs @@ -624,25 +624,25 @@ pub mod intrinsics { #[rustc_intrinsic] pub fn size_of<T>() -> usize; #[rustc_intrinsic] - pub unsafe fn size_of_val<T: ?::Sized>(_val: *const T) -> usize; + pub unsafe fn size_of_val<T: ?::Sized>(val: *const T) -> usize; #[rustc_intrinsic] pub fn min_align_of<T>() -> usize; #[rustc_intrinsic] - pub unsafe fn min_align_of_val<T: ?::Sized>(_val: *const T) -> usize; + pub unsafe fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize; #[rustc_intrinsic] - pub unsafe fn copy<T>(_src: *const T, _dst: *mut T, _count: usize); + pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize); #[rustc_intrinsic] - pub unsafe fn transmute<T, U>(_e: T) -> U; + pub unsafe fn transmute<T, U>(e: T) -> U; #[rustc_intrinsic] - pub unsafe fn ctlz_nonzero<T>(_x: T) -> u32; + pub unsafe fn ctlz_nonzero<T>(x: T) -> u32; #[rustc_intrinsic] pub fn needs_drop<T: ?::Sized>() -> bool; #[rustc_intrinsic] - pub fn bitreverse<T>(_x: T) -> T; + pub fn bitreverse<T>(x: T) -> T; #[rustc_intrinsic] - pub fn bswap<T>(_x: T) -> T; + pub fn bswap<T>(x: T) -> T; #[rustc_intrinsic] - pub unsafe fn write_bytes<T>(_dst: *mut T, _val: u8, _count: usize); + pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize); #[rustc_intrinsic] pub unsafe fn unreachable() -> !; } diff --git a/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch b/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch index bedc6ca11b3..754025ff49d 100644 --- a/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch +++ b/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch @@ -12,15 +12,15 @@ index 7165c3e48af..968552ad435 100644 --- a/library/alloc/Cargo.toml +++ b/library/alloc/Cargo.toml @@ -11,7 +11,7 @@ test = { path = "../test" } - edition = "2021" + bench = false [dependencies] core = { path = "../core", public = true } --compiler_builtins = { version = "=0.1.148", features = ['rustc-dep-of-std'] } -+compiler_builtins = { version = "=0.1.148", features = ['rustc-dep-of-std', 'no-f16-f128'] } +-compiler_builtins = { version = "=0.1.151", features = ['rustc-dep-of-std'] } ++compiler_builtins = { version = "=0.1.151", features = ['rustc-dep-of-std', 'no-f16-f128'] } - [dev-dependencies] - rand = { version = "0.8.5", default-features = false, features = ["alloc"] } + [features] + compiler-builtins-mem = ['compiler_builtins/mem'] -- 2.34.1 diff --git a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs index b28c4c9539c..06d89bc9ea7 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/pass_mode.rs @@ -84,7 +84,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { AbiParam::new(scalar_to_clif_type(tcx, scalar)), attrs )], - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); smallvec![AbiParam::new(vector_ty)] } @@ -135,7 +135,7 @@ impl<'tcx> ArgAbiExt<'tcx> for ArgAbi<'tcx, Ty<'tcx>> { BackendRepr::Scalar(scalar) => { (None, vec![AbiParam::new(scalar_to_clif_type(tcx, scalar))]) } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { let vector_ty = crate::intrinsics::clif_vector_type(tcx, self.layout); (None, vec![AbiParam::new(vector_ty)]) } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs index 4d9bed8652c..6735ae024d1 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs @@ -53,7 +53,7 @@ fn report_atomic_type_validation_error<'tcx>( pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Type { let (element, count) = match layout.backend_repr { - BackendRepr::Vector { element, count } => (element, count), + BackendRepr::SimdVector { element, count } => (element, count), _ => unreachable!(), }; diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index 0929218ea2b..dd6d8dbb6f5 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -460,64 +460,6 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }); } - sym::simd_fpow => { - intrinsic_args!(fx, args => (a, b); intrinsic); - - if !a.layout().ty.is_simd() { - report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); - return; - } - - simd_pair_for_each_lane(fx, a, b, ret, &|fx, lane_ty, _ret_lane_ty, a_lane, b_lane| { - match lane_ty.kind() { - ty::Float(FloatTy::F32) => fx.lib_call( - "powf", - vec![AbiParam::new(types::F32), AbiParam::new(types::F32)], - vec![AbiParam::new(types::F32)], - &[a_lane, b_lane], - )[0], - ty::Float(FloatTy::F64) => fx.lib_call( - "pow", - vec![AbiParam::new(types::F64), AbiParam::new(types::F64)], - vec![AbiParam::new(types::F64)], - &[a_lane, b_lane], - )[0], - _ => unreachable!("{:?}", lane_ty), - } - }); - } - - sym::simd_fpowi => { - intrinsic_args!(fx, args => (a, exp); intrinsic); - let exp = exp.load_scalar(fx); - - if !a.layout().ty.is_simd() { - report_simd_type_validation_error(fx, intrinsic, span, a.layout().ty); - return; - } - - simd_for_each_lane( - fx, - a, - ret, - &|fx, lane_ty, _ret_lane_ty, lane| match lane_ty.kind() { - ty::Float(FloatTy::F32) => fx.lib_call( - "__powisf2", // compiler-builtins - vec![AbiParam::new(types::F32), AbiParam::new(types::I32)], - vec![AbiParam::new(types::F32)], - &[lane, exp], - )[0], - ty::Float(FloatTy::F64) => fx.lib_call( - "__powidf2", // compiler-builtins - vec![AbiParam::new(types::F64), AbiParam::new(types::I32)], - vec![AbiParam::new(types::F64)], - &[lane, exp], - )[0], - _ => unreachable!("{:?}", lane_ty), - }, - ); - } - sym::simd_fsin | sym::simd_fcos | sym::simd_fexp diff --git a/compiler/rustc_codegen_cranelift/src/lib.rs b/compiler/rustc_codegen_cranelift/src/lib.rs index a3f43744875..06939beb374 100644 --- a/compiler/rustc_codegen_cranelift/src/lib.rs +++ b/compiler/rustc_codegen_cranelift/src/lib.rs @@ -176,13 +176,9 @@ impl CodegenBackend for CraneliftCodegenBackend { } } - fn target_features_cfg( - &self, - sess: &Session, - _allow_unstable: bool, - ) -> Vec<rustc_span::Symbol> { + fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { // FIXME return the actually used target features. this is necessary for #[cfg(target_feature)] - if sess.target.arch == "x86_64" && sess.target.os != "none" { + let target_features = if sess.target.arch == "x86_64" && sess.target.os != "none" { // x86_64 mandates SSE2 support and rustc requires the x87 feature to be enabled vec![sym::fsxr, sym::sse, sym::sse2, Symbol::intern("x87")] } else if sess.target.arch == "aarch64" { @@ -196,7 +192,10 @@ impl CodegenBackend for CraneliftCodegenBackend { } } else { vec![] - } + }; + // FIXME do `unstable_target_features` properly + let unstable_target_features = target_features.clone(); + (target_features, unstable_target_features) } fn print_version(&self) { diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index 1b3f86c8405..cc739fefcd0 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -173,9 +173,11 @@ impl<'tcx> CValue<'tcx> { CValueInner::ByRef(ptr, None) => { let clif_ty = match layout.backend_repr { BackendRepr::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), - BackendRepr::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) - .by(u32::try_from(count).unwrap()) - .unwrap(), + BackendRepr::SimdVector { element, count } => { + scalar_to_clif_type(fx.tcx, element) + .by(u32::try_from(count).unwrap()) + .unwrap() + } _ => unreachable!("{:?}", layout.ty), }; let mut flags = MemFlags::new(); diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs index 3dad35bc4ce..5544aee9eaf 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core.rs @@ -595,25 +595,25 @@ pub mod intrinsics { #[rustc_intrinsic] pub fn size_of<T>() -> usize; #[rustc_intrinsic] - pub unsafe fn size_of_val<T: ?::Sized>(_val: *const T) -> usize; + pub unsafe fn size_of_val<T: ?::Sized>(val: *const T) -> usize; #[rustc_intrinsic] pub fn min_align_of<T>() -> usize; #[rustc_intrinsic] - pub unsafe fn min_align_of_val<T: ?::Sized>(_val: *const T) -> usize; + pub unsafe fn min_align_of_val<T: ?::Sized>(val: *const T) -> usize; #[rustc_intrinsic] - pub unsafe fn copy<T>(_src: *const T, _dst: *mut T, _count: usize); + pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize); #[rustc_intrinsic] - pub unsafe fn transmute<T, U>(_e: T) -> U; + pub unsafe fn transmute<T, U>(e: T) -> U; #[rustc_intrinsic] - pub unsafe fn ctlz_nonzero<T>(_x: T) -> u32; + pub unsafe fn ctlz_nonzero<T>(x: T) -> u32; #[rustc_intrinsic] pub fn needs_drop<T: ?::Sized>() -> bool; #[rustc_intrinsic] - pub fn bitreverse<T>(_x: T) -> T; + pub fn bitreverse<T>(x: T) -> T; #[rustc_intrinsic] - pub fn bswap<T>(_x: T) -> T; + pub fn bswap<T>(x: T) -> T; #[rustc_intrinsic] - pub unsafe fn write_bytes<T>(_dst: *mut T, _val: u8, _count: usize); + pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize); #[rustc_intrinsic] pub unsafe fn unreachable() -> !; } diff --git a/compiler/rustc_codegen_gcc/src/abi.rs b/compiler/rustc_codegen_gcc/src/abi.rs index 717baebcd8c..9fe6baa3d25 100644 --- a/compiler/rustc_codegen_gcc/src/abi.rs +++ b/compiler/rustc_codegen_gcc/src/abi.rs @@ -16,7 +16,7 @@ use crate::context::CodegenCx; use crate::intrinsic::ArgAbiExt; use crate::type_of::LayoutGccExt; -impl<'a, 'gcc, 'tcx> AbiBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tcx> { +impl AbiBuilderMethods for Builder<'_, '_, '_> { fn get_param(&mut self, index: usize) -> Self::Value { let func = self.current_func(); let param = func.get_param(index as i32); diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index cb4caec8c32..e5221c7da31 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -632,17 +632,16 @@ pub unsafe fn optimize_thin_module( Arc::new(SyncContext::new(context)) } }; - let module = ModuleCodegen { - module_llvm: GccContext { + let module = ModuleCodegen::new_regular( + thin_module.name().to_string(), + GccContext { context, should_combine_object_files, // TODO(antoyo): use the correct relocation model here. relocation_model: RelocModel::Pic, temp_dir: None, }, - name: thin_module.name().to_string(), - kind: ModuleKind::Regular, - }; + ); /*{ let target = &*module.module_llvm.tm; let llmod = module.module_llvm.llmod(); diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index 962f4b161d7..9b495174a3f 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -4,10 +4,10 @@ use std::sync::Arc; use std::time::Instant; use gccjit::{CType, Context, FunctionType, GlobalKind}; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::DebugInfoCodegenMethods; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_middle::dep_graph; use rustc_middle::mir::mono::Linkage; #[cfg(feature = "master")] @@ -237,16 +237,15 @@ pub fn compile_codegen_unit( } } - ModuleCodegen { - name: cgu_name.to_string(), - module_llvm: GccContext { + ModuleCodegen::new_regular( + cgu_name.to_string(), + GccContext { context: Arc::new(SyncContext::new(context)), relocation_model: tcx.sess.relocation_model(), should_combine_object_files: false, temp_dir: None, }, - kind: ModuleKind::Regular, - } + ) } (module, cost) diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index c8b7616e645..6573b5b165e 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -2439,9 +2439,5 @@ fn get_maybe_pointer_size(value: RValue<'_>) -> u32 { #[cfg(not(feature = "master"))] fn get_maybe_pointer_size(value: RValue<'_>) -> u32 { let type_ = value.get_type(); - if type_.get_pointee().is_some() { - std::mem::size_of::<*const ()>() as _ - } else { - type_.get_size() - } + if type_.get_pointee().is_some() { size_of::<*const ()>() as _ } else { type_.get_size() } } diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 20a3482aaa2..a63da6b6e27 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -59,16 +59,11 @@ pub fn type_is_pointer(typ: Type<'_>) -> bool { typ.get_pointee().is_some() } -impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { +impl<'gcc, 'tcx> ConstCodegenMethods for CodegenCx<'gcc, 'tcx> { fn const_null(&self, typ: Type<'gcc>) -> RValue<'gcc> { if type_is_pointer(typ) { self.context.new_null(typ) } else { self.const_int(typ, 0) } } - fn is_undef(&self, _val: RValue<'gcc>) -> bool { - // FIXME: actually check for undef - false - } - fn const_undef(&self, typ: Type<'gcc>) -> RValue<'gcc> { let local = self.current_func.borrow().expect("func").new_local(None, typ, "undefined"); if typ.is_struct().is_some() { @@ -146,13 +141,12 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } fn const_str(&self, s: &str) -> (RValue<'gcc>, RValue<'gcc>) { - let str_global = *self - .const_str_cache - .borrow_mut() - .raw_entry_mut() - .from_key(s) - .or_insert_with(|| (s.to_owned(), self.global_string(s))) - .1; + let mut const_str_cache = self.const_str_cache.borrow_mut(); + let str_global = const_str_cache.get(s).copied().unwrap_or_else(|| { + let g = self.global_string(s); + const_str_cache.insert(s.to_owned(), g); + g + }); let len = s.len(); let cs = self.const_ptrcast( str_global.get_address(None), @@ -263,7 +257,7 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } } - fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { + fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { const_alloc_to_gcc(self, alloc) } diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index fb0ca31c543..c514b7a428b 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -302,9 +302,9 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -pub fn const_alloc_to_gcc<'gcc, 'tcx>( - cx: &CodegenCx<'gcc, 'tcx>, - alloc: ConstAllocation<'tcx>, +pub fn const_alloc_to_gcc<'gcc>( + cx: &CodegenCx<'gcc, '_>, + alloc: ConstAllocation<'_>, ) -> RValue<'gcc> { let alloc = alloc.inner(); let mut llvals = Vec::with_capacity(alloc.provenance().ptrs().len() + 1); diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs index 4e8c8aaaf5c..6eae0c24f48 100644 --- a/compiler/rustc_codegen_gcc/src/gcc_util.rs +++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs @@ -48,7 +48,7 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri for feature in sess.opts.cg.target_feature.split(',') { if let Some(feature) = feature.strip_prefix('+') { all_rust_features.extend( - UnordSet::from(sess.target.implied_target_features(std::iter::once(feature))) + UnordSet::from(sess.target.implied_target_features(feature)) .to_sorted_stable_ord() .iter() .map(|&&s| (true, s)), diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index f8672c07299..f38622074f1 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -312,7 +312,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc let layout = self.layout_of(tp_ty).layout; let _use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 84cd5b002fb..8b454ab2a42 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -772,8 +772,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( sym::simd_floor => "floor", sym::simd_fma => "fma", sym::simd_relaxed_fma => "fma", // FIXME: this should relax to non-fused multiply-add when necessary - sym::simd_fpowi => "__builtin_powi", - sym::simd_fpow => "pow", sym::simd_fsin => "sin", sym::simd_fsqrt => "sqrt", sym::simd_round => "round", @@ -788,24 +786,16 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let mut vector_elements = vec![]; for i in 0..in_len { let index = bx.context.new_rvalue_from_long(bx.ulong_type, i as i64); - // we have to treat fpowi specially, since fpowi's second argument is always an i32 let mut arguments = vec![]; - if name == sym::simd_fpowi { - arguments = vec![ - bx.extract_element(args[0].immediate(), index).to_rvalue(), - args[1].immediate(), - ]; - } else { - for arg in args { - let mut element = bx.extract_element(arg.immediate(), index).to_rvalue(); - // FIXME: it would probably be better to not have casts here and use the proper - // instructions. - if let Some(typ) = cast_type { - element = bx.context.new_cast(None, element, typ); - } - arguments.push(element); + for arg in args { + let mut element = bx.extract_element(arg.immediate(), index).to_rvalue(); + // FIXME: it would probably be better to not have casts here and use the proper + // instructions. + if let Some(typ) = cast_type { + element = bx.context.new_cast(None, element, typ); } - }; + arguments.push(element); + } let mut result = bx.context.new_call(None, function, &arguments); if cast_type.is_some() { result = bx.context.new_cast(None, result, elem_ty); @@ -829,8 +819,6 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( | sym::simd_floor | sym::simd_fma | sym::simd_relaxed_fma - | sym::simd_fpow - | sym::simd_fpowi | sym::simd_fsin | sym::simd_fsqrt | sym::simd_round diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 6455bcec685..d478b2af46c 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -16,7 +16,7 @@ #![allow(internal_features)] #![doc(rust_logo)] #![feature(rustdoc_internals)] -#![feature(rustc_private, decl_macro, never_type, trusted_len, hash_raw_entry, let_chains)] +#![feature(rustc_private, decl_macro, never_type, trusted_len, let_chains)] #![allow(broken_intra_doc_links)] #![recursion_limit = "256"] #![warn(rust_2018_idioms)] @@ -259,8 +259,8 @@ impl CodegenBackend for GccCodegenBackend { .join(sess) } - fn target_features_cfg(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - target_features_cfg(sess, allow_unstable, &self.target_info) + fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { + target_features_cfg(sess, &self.target_info) } } @@ -393,7 +393,7 @@ impl WriteBackendMethods for GccCodegenBackend { unsafe fn optimize( _cgcx: &CodegenContext<Self>, _dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<Self::Module>, + module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, ) -> Result<(), FatalError> { module.module_llvm.context.set_optimization_level(to_gcc_opt_level(config.opt_level)); @@ -486,35 +486,41 @@ fn to_gcc_opt_level(optlevel: Option<OptLevel>) -> OptimizationLevel { /// Returns the features that should be set in `cfg(target_feature)`. fn target_features_cfg( sess: &Session, - allow_unstable: bool, target_info: &LockedTargetInfo, -) -> Vec<Symbol> { +) -> (Vec<Symbol>, Vec<Symbol>) { // TODO(antoyo): use global_gcc_features. - sess.target - .rust_target_features() - .iter() - .filter_map(|&(feature, gate, _)| { - if allow_unstable - || (gate.in_cfg() && (sess.is_nightly_build() || gate.requires_nightly().is_none())) - { - Some(feature) - } else { - None - } - }) - .filter(|feature| { - // TODO: we disable Neon for now since we don't support the LLVM intrinsics for it. - if *feature == "neon" { - return false; - } - target_info.cpu_supports(feature) - /* - adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma, - avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq, - bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, - sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves - */ - }) - .map(Symbol::intern) - .collect() + let f = |allow_unstable| { + sess.target + .rust_target_features() + .iter() + .filter_map(|&(feature, gate, _)| { + if allow_unstable + || (gate.in_cfg() + && (sess.is_nightly_build() || gate.requires_nightly().is_none())) + { + Some(feature) + } else { + None + } + }) + .filter(|feature| { + // TODO: we disable Neon for now since we don't support the LLVM intrinsics for it. + if *feature == "neon" { + return false; + } + target_info.cpu_supports(feature) + /* + adx, aes, avx, avx2, avx512bf16, avx512bitalg, avx512bw, avx512cd, avx512dq, avx512er, avx512f, avx512fp16, avx512ifma, + avx512pf, avx512vbmi, avx512vbmi2, avx512vl, avx512vnni, avx512vp2intersect, avx512vpopcntdq, + bmi1, bmi2, cmpxchg16b, ermsb, f16c, fma, fxsr, gfni, lzcnt, movbe, pclmulqdq, popcnt, rdrand, rdseed, rtm, + sha, sse, sse2, sse3, sse4.1, sse4.2, sse4a, ssse3, tbm, vaes, vpclmulqdq, xsave, xsavec, xsaveopt, xsaves + */ + }) + .map(Symbol::intern) + .collect() + }; + + let target_features = f(false); + let unstable_target_features = f(true); + (target_features, unstable_target_features) } diff --git a/compiler/rustc_codegen_gcc/src/type_.rs b/compiler/rustc_codegen_gcc/src/type_.rs index cb08723431a..4e0a250b550 100644 --- a/compiler/rustc_codegen_gcc/src/type_.rs +++ b/compiler/rustc_codegen_gcc/src/type_.rs @@ -123,7 +123,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { } } -impl<'gcc, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { +impl<'gcc, 'tcx> BaseTypeCodegenMethods for CodegenCx<'gcc, 'tcx> { fn type_i8(&self) -> Type<'gcc> { self.i8_type } diff --git a/compiler/rustc_codegen_gcc/src/type_of.rs b/compiler/rustc_codegen_gcc/src/type_of.rs index bac4fc51300..ae98b3d0b56 100644 --- a/compiler/rustc_codegen_gcc/src/type_of.rs +++ b/compiler/rustc_codegen_gcc/src/type_of.rs @@ -63,7 +63,7 @@ fn uncached_gcc_type<'gcc, 'tcx>( ) -> Type<'gcc> { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { ref element, count } => { + BackendRepr::SimdVector { ref element, count } => { let element = layout.scalar_gcc_type_at(cx, element, Size::ZERO); let element = // NOTE: gcc doesn't allow pointer types in vectors. @@ -178,7 +178,7 @@ pub trait LayoutGccExt<'tcx> { impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -186,9 +186,9 @@ impl<'tcx> LayoutGccExt<'tcx> for TyAndLayout<'tcx> { fn is_gcc_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 8c75125e009..71059338151 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -654,7 +654,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } -impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { +impl AbiBuilderMethods for Builder<'_, '_, '_> { fn get_param(&mut self, index: usize) -> Self::Value { llvm::get_param(self.llfn(), index as c_uint) } diff --git a/compiler/rustc_codegen_llvm/src/allocator.rs b/compiler/rustc_codegen_llvm/src/allocator.rs index 66723cbf882..e614115f64b 100644 --- a/compiler/rustc_codegen_llvm/src/allocator.rs +++ b/compiler/rustc_codegen_llvm/src/allocator.rs @@ -3,33 +3,31 @@ use rustc_ast::expand::allocator::{ ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE, alloc_error_handler_name, default_fn_name, global_fn_name, }; +use rustc_codegen_ssa::traits::BaseTypeCodegenMethods as _; use rustc_middle::bug; use rustc_middle::ty::TyCtxt; use rustc_session::config::{DebugInfo, OomStrategy}; -use crate::common::AsCCharPtr; -use crate::llvm::{self, Context, False, Module, True, Type}; -use crate::{ModuleLlvm, attributes, debuginfo}; +use crate::builder::SBuilder; +use crate::declare::declare_simple_fn; +use crate::llvm::{self, False, True, Type}; +use crate::{SimpleCx, attributes, debuginfo}; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, - module_llvm: &mut ModuleLlvm, + cx: SimpleCx<'_>, module_name: &str, kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind, ) { - let llcx = &*module_llvm.llcx; - let llmod = module_llvm.llmod(); - let usize = unsafe { - match tcx.sess.target.pointer_width { - 16 => llvm::LLVMInt16TypeInContext(llcx), - 32 => llvm::LLVMInt32TypeInContext(llcx), - 64 => llvm::LLVMInt64TypeInContext(llcx), - tws => bug!("Unsupported target word size for int: {}", tws), - } + let usize = match tcx.sess.target.pointer_width { + 16 => cx.type_i16(), + 32 => cx.type_i32(), + 64 => cx.type_i64(), + tws => bug!("Unsupported target word size for int: {}", tws), }; - let i8 = unsafe { llvm::LLVMInt8TypeInContext(llcx) }; - let i8p = unsafe { llvm::LLVMPointerTypeInContext(llcx, 0) }; + let i8 = cx.type_i8(); + let i8p = cx.type_ptr(); if kind == AllocatorKind::Default { for method in ALLOCATOR_METHODS { @@ -58,15 +56,14 @@ pub(crate) unsafe fn codegen( let from_name = global_fn_name(method.name); let to_name = default_fn_name(method.name); - create_wrapper_function(tcx, llcx, llmod, &from_name, &to_name, &args, output, false); + create_wrapper_function(tcx, &cx, &from_name, &to_name, &args, output, false); } } // rust alloc error handler create_wrapper_function( tcx, - llcx, - llmod, + &cx, "__rust_alloc_error_handler", alloc_error_handler_name(alloc_error_handler_kind), &[usize, usize], // size, align @@ -77,21 +74,21 @@ pub(crate) unsafe fn codegen( unsafe { // __rust_alloc_error_handler_should_panic let name = OomStrategy::SYMBOL; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); + let ll_g = cx.declare_global(name, i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let val = tcx.sess.opts.unstable_opts.oom.should_panic(); let llval = llvm::LLVMConstInt(i8, val as u64, False); llvm::set_initializer(ll_g, llval); let name = NO_ALLOC_SHIM_IS_UNSTABLE; - let ll_g = llvm::LLVMRustGetOrInsertGlobal(llmod, name.as_c_char_ptr(), name.len(), i8); + let ll_g = cx.declare_global(name, i8); llvm::set_visibility(ll_g, llvm::Visibility::from_generic(tcx.sess.default_visibility())); let llval = llvm::LLVMConstInt(i8, 0, False); llvm::set_initializer(ll_g, llval); } if tcx.sess.opts.debuginfo != DebugInfo::None { - let dbg_cx = debuginfo::CodegenUnitDebugContext::new(llmod); + let dbg_cx = debuginfo::CodegenUnitDebugContext::new(cx.llmod); debuginfo::metadata::build_compile_unit_di_node(tcx, module_name, &dbg_cx); dbg_cx.finalize(tcx.sess); } @@ -99,77 +96,64 @@ pub(crate) unsafe fn codegen( fn create_wrapper_function( tcx: TyCtxt<'_>, - llcx: &Context, - llmod: &Module, + cx: &SimpleCx<'_>, from_name: &str, to_name: &str, args: &[&Type], output: Option<&Type>, no_return: bool, ) { - unsafe { - let ty = llvm::LLVMFunctionType( - output.unwrap_or_else(|| llvm::LLVMVoidTypeInContext(llcx)), - args.as_ptr(), - args.len() as c_uint, - False, - ); - let llfn = llvm::LLVMRustGetOrInsertFunction( - llmod, - from_name.as_c_char_ptr(), - from_name.len(), - ty, - ); - let no_return = if no_return { - // -> ! DIFlagNoReturn - let no_return = llvm::AttributeKind::NoReturn.create_attr(llcx); - attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]); - Some(no_return) - } else { - None - }; - - llvm::set_visibility(llfn, llvm::Visibility::from_generic(tcx.sess.default_visibility())); - - if tcx.sess.must_emit_unwind_tables() { - let uwtable = - attributes::uwtable_attr(llcx, tcx.sess.opts.unstable_opts.use_sync_unwind); - attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); - } + let ty = cx.type_func(args, output.unwrap_or_else(|| cx.type_void())); + let llfn = declare_simple_fn( + &cx, + from_name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::Global, + llvm::Visibility::from_generic(tcx.sess.default_visibility()), + ty, + ); + let no_return = if no_return { + // -> ! DIFlagNoReturn + let no_return = llvm::AttributeKind::NoReturn.create_attr(cx.llcx); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[no_return]); + Some(no_return) + } else { + None + }; - let callee = - llvm::LLVMRustGetOrInsertFunction(llmod, to_name.as_c_char_ptr(), to_name.len(), ty); - if let Some(no_return) = no_return { - // -> ! DIFlagNoReturn - attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); - } - llvm::set_visibility(callee, llvm::Visibility::Hidden); - - let llbb = llvm::LLVMAppendBasicBlockInContext(llcx, llfn, c"entry".as_ptr()); - - let llbuilder = llvm::LLVMCreateBuilderInContext(llcx); - llvm::LLVMPositionBuilderAtEnd(llbuilder, llbb); - let args = args - .iter() - .enumerate() - .map(|(i, _)| llvm::LLVMGetParam(llfn, i as c_uint)) - .collect::<Vec<_>>(); - let ret = llvm::LLVMBuildCallWithOperandBundles( - llbuilder, - ty, - callee, - args.as_ptr(), - args.len() as c_uint, - [].as_ptr(), - 0 as c_uint, - c"".as_ptr(), - ); - llvm::LLVMSetTailCall(ret, True); - if output.is_some() { - llvm::LLVMBuildRet(llbuilder, ret); - } else { - llvm::LLVMBuildRetVoid(llbuilder); - } - llvm::LLVMDisposeBuilder(llbuilder); + if tcx.sess.must_emit_unwind_tables() { + let uwtable = + attributes::uwtable_attr(cx.llcx, tcx.sess.opts.unstable_opts.use_sync_unwind); + attributes::apply_to_llfn(llfn, llvm::AttributePlace::Function, &[uwtable]); + } + + let callee = declare_simple_fn( + &cx, + to_name, + llvm::CallConv::CCallConv, + llvm::UnnamedAddr::Global, + llvm::Visibility::Hidden, + ty, + ); + if let Some(no_return) = no_return { + // -> ! DIFlagNoReturn + attributes::apply_to_llfn(callee, llvm::AttributePlace::Function, &[no_return]); + } + llvm::set_visibility(callee, llvm::Visibility::Hidden); + + let llbb = unsafe { llvm::LLVMAppendBasicBlockInContext(cx.llcx, llfn, c"entry".as_ptr()) }; + + let mut bx = SBuilder::build(&cx, llbb); + let args = args + .iter() + .enumerate() + .map(|(i, _)| llvm::get_param(llfn, i as c_uint)) + .collect::<Vec<_>>(); + let ret = bx.call(ty, callee, &args, None); + llvm::LLVMSetTailCall(ret, True); + if output.is_some() { + bx.ret(ret); + } else { + bx.ret_void() } } diff --git a/compiler/rustc_codegen_llvm/src/asm.rs b/compiler/rustc_codegen_llvm/src/asm.rs index be5673eddf9..88daa025740 100644 --- a/compiler/rustc_codegen_llvm/src/asm.rs +++ b/compiler/rustc_codegen_llvm/src/asm.rs @@ -1,6 +1,5 @@ use std::assert_matches::assert_matches; -use libc::{c_char, c_uint}; use rustc_abi::{BackendRepr, Float, Integer, Primitive, Scalar}; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; use rustc_codegen_ssa::mir::operand::OperandValue; @@ -483,12 +482,13 @@ pub(crate) fn inline_asm_call<'ll>( debug!("Asm Output Type: {:?}", output); let fty = bx.cx.type_func(&argtys, output); - unsafe { - // Ask LLVM to verify that the constraints are well-formed. - let constraints_ok = llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()); - debug!("constraint verification result: {:?}", constraints_ok); - if constraints_ok { - let v = llvm::LLVMRustInlineAsm( + // Ask LLVM to verify that the constraints are well-formed. + let constraints_ok = + unsafe { llvm::LLVMRustInlineAsmVerify(fty, cons.as_c_char_ptr(), cons.len()) }; + debug!("constraint verification result: {:?}", constraints_ok); + if constraints_ok { + let v = unsafe { + llvm::LLVMRustInlineAsm( fty, asm.as_c_char_ptr(), asm.len(), @@ -498,54 +498,50 @@ pub(crate) fn inline_asm_call<'ll>( alignstack, dia, can_throw, - ); - - let call = if !labels.is_empty() { - assert!(catch_funclet.is_none()); - bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) - } else if let Some((catch, funclet)) = catch_funclet { - bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) - } else { - bx.call(fty, None, None, v, inputs, None, None) - }; + ) + }; - // Store mark in a metadata node so we can map LLVM errors - // back to source locations. See #17552. - let key = "srcloc"; - let kind = llvm::LLVMGetMDKindIDInContext( - bx.llcx, - key.as_ptr().cast::<c_char>(), - key.len() as c_uint, - ); + let call = if !labels.is_empty() { + assert!(catch_funclet.is_none()); + bx.callbr(fty, None, None, v, inputs, dest.unwrap(), labels, None, None) + } else if let Some((catch, funclet)) = catch_funclet { + bx.invoke(fty, None, None, v, inputs, dest.unwrap(), catch, funclet, None) + } else { + bx.call(fty, None, None, v, inputs, None, None) + }; - // `srcloc` contains one 64-bit integer for each line of assembly code, - // where the lower 32 bits hold the lo byte position and the upper 32 bits - // hold the hi byte position. - let mut srcloc = vec![]; - if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 { - // LLVM inserts an extra line to add the ".intel_syntax", so add - // a dummy srcloc entry for it. - // - // Don't do this if we only have 1 line span since that may be - // due to the asm template string coming from a macro. LLVM will - // default to the first srcloc for lines that don't have an - // associated srcloc. - srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0))); - } - srcloc.extend(line_spans.iter().map(|span| { - llvm::LLVMValueAsMetadata(bx.const_u64( - u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32), - )) - })); - let md = llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()); - let md = llvm::LLVMMetadataAsValue(&bx.llcx, md); - llvm::LLVMSetMetadata(call, kind, md); + // Store mark in a metadata node so we can map LLVM errors + // back to source locations. See #17552. + let key = "srcloc"; + let kind = bx.get_md_kind_id(key); - Some(call) - } else { - // LLVM has detected an issue with our constraints, bail out - None + // `srcloc` contains one 64-bit integer for each line of assembly code, + // where the lower 32 bits hold the lo byte position and the upper 32 bits + // hold the hi byte position. + let mut srcloc = vec![]; + if dia == llvm::AsmDialect::Intel && line_spans.len() > 1 { + // LLVM inserts an extra line to add the ".intel_syntax", so add + // a dummy srcloc entry for it. + // + // Don't do this if we only have 1 line span since that may be + // due to the asm template string coming from a macro. LLVM will + // default to the first srcloc for lines that don't have an + // associated srcloc. + srcloc.push(llvm::LLVMValueAsMetadata(bx.const_u64(0))); } + srcloc.extend(line_spans.iter().map(|span| { + llvm::LLVMValueAsMetadata( + bx.const_u64(u64::from(span.lo().to_u32()) | (u64::from(span.hi().to_u32()) << 32)), + ) + })); + let md = unsafe { llvm::LLVMMDNodeInContext2(bx.llcx, srcloc.as_ptr(), srcloc.len()) }; + let md = bx.get_metadata_value(md); + llvm::LLVMSetMetadata(call, kind, md); + + Some(call) + } else { + // LLVM has detected an issue with our constraints, bail out + None } } @@ -939,9 +935,10 @@ fn llvm_fixup_input<'ll, 'tcx>( } bx.insert_element(bx.const_undef(vec_ty), value, bx.const_i32(0)) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count); let indices: Vec<_> = (0..count * 2).map(|x| bx.const_i32(x as i32)).collect(); @@ -954,7 +951,7 @@ fn llvm_fixup_input<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, bx.cx.type_vector(bx.cx.type_f64(), 8)), ( X86( @@ -989,7 +986,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1026,7 +1023,7 @@ fn llvm_fixup_input<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_i16(), count)) } @@ -1099,9 +1096,10 @@ fn llvm_fixup_output<'ll, 'tcx>( } value } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(bx.cx, element); let vec_ty = bx.cx.type_vector(elem_ty, count * 2); let indices: Vec<_> = (0..count).map(|x| bx.const_i32(x as i32)).collect(); @@ -1114,7 +1112,7 @@ fn llvm_fixup_output<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => bx.bitcast(value, layout.llvm_type(bx.cx)), ( X86( @@ -1145,7 +1143,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1182,7 +1180,7 @@ fn llvm_fixup_output<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { bx.bitcast(value, bx.type_vector(bx.type_f16(), count)) } @@ -1243,9 +1241,10 @@ fn llvm_fixup_output_type<'ll, 'tcx>( let count = 16 / layout.size.bytes(); cx.type_vector(elem_ty, count) } - (AArch64(AArch64InlineAsmRegClass::vreg_low16), BackendRepr::Vector { element, count }) - if layout.size.bytes() == 8 => - { + ( + AArch64(AArch64InlineAsmRegClass::vreg_low16), + BackendRepr::SimdVector { element, count }, + ) if layout.size.bytes() == 8 => { let elem_ty = llvm_asm_scalar_type(cx, element); cx.type_vector(elem_ty, count * 2) } @@ -1256,7 +1255,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( } ( X86(X86InlineAsmRegClass::xmm_reg | X86InlineAsmRegClass::zmm_reg), - BackendRepr::Vector { .. }, + BackendRepr::SimdVector { .. }, ) if layout.size.bytes() == 64 => cx.type_vector(cx.type_f64(), 8), ( X86( @@ -1284,7 +1283,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | X86InlineAsmRegClass::ymm_reg | X86InlineAsmRegClass::zmm_reg, ), - BackendRepr::Vector { element, count: count @ (8 | 16) }, + BackendRepr::SimdVector { element, count: count @ (8 | 16) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } @@ -1321,7 +1320,7 @@ fn llvm_fixup_output_type<'ll, 'tcx>( | ArmInlineAsmRegClass::qreg_low4 | ArmInlineAsmRegClass::qreg_low8, ), - BackendRepr::Vector { element, count: count @ (4 | 8) }, + BackendRepr::SimdVector { element, count: count @ (4 | 8) }, ) if element.primitive() == Primitive::Float(Float::F16) => { cx.type_vector(cx.type_i16(), count) } diff --git a/compiler/rustc_codegen_llvm/src/back/archive.rs b/compiler/rustc_codegen_llvm/src/back/archive.rs index 93553f3f364..0a161442933 100644 --- a/compiler/rustc_codegen_llvm/src/back/archive.rs +++ b/compiler/rustc_codegen_llvm/src/back/archive.rs @@ -132,14 +132,33 @@ fn get_llvm_object_symbols( if err.is_null() { return Ok(true); } else { - return Err(unsafe { *Box::from_raw(err as *mut io::Error) }); + let error = unsafe { *Box::from_raw(err as *mut io::Error) }; + // These are the magic constants for LLVM bitcode files: + // https://github.com/llvm/llvm-project/blob/7eadc1960d199676f04add402bb0aa6f65b7b234/llvm/lib/BinaryFormat/Magic.cpp#L90-L97 + if buf.starts_with(&[0xDE, 0xCE, 0x17, 0x0B]) || buf.starts_with(&[b'B', b'C', 0xC0, 0xDE]) + { + // For LLVM bitcode, failure to read the symbols is not fatal. The bitcode may have been + // produced by a newer LLVM version that the one linked to rustc. This is fine provided + // that the linker does use said newer LLVM version. We skip writing the symbols for the + // bitcode to the symbol table of the archive. Traditional linkers don't like this, but + // newer linkers like lld, mold and wild ignore the symbol table anyway, so if they link + // against a new enough LLVM it will work out in the end. + // LLVM's archive writer also has this same behavior of only warning about invalid + // bitcode since https://github.com/llvm/llvm-project/pull/96848 + + // We don't have access to the DiagCtxt here to produce a nice warning in the correct format. + eprintln!("warning: Failed to read symbol table from LLVM bitcode: {}", error); + return Ok(true); + } else { + return Err(error); + } } unsafe extern "C" fn callback(state: *mut c_void, symbol_name: *const c_char) -> *mut c_void { let f = unsafe { &mut *(state as *mut &mut dyn FnMut(&[u8]) -> io::Result<()>) }; match f(unsafe { CStr::from_ptr(symbol_name) }.to_bytes()) { Ok(()) => std::ptr::null_mut(), - Err(err) => Box::into_raw(Box::new(err)) as *mut c_void, + Err(err) => Box::into_raw(Box::new(err) as Box<io::Error>) as *mut c_void, } } @@ -148,7 +167,7 @@ fn get_llvm_object_symbols( Box::into_raw(Box::new(io::Error::new( io::ErrorKind::Other, format!("LLVM error: {}", error.to_string_lossy()), - ))) as *mut c_void + )) as Box<io::Error>) as *mut c_void } } diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 99906ea7bce..668795191a2 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -2,6 +2,7 @@ use std::collections::BTreeMap; use std::ffi::{CStr, CString}; use std::fs::File; use std::path::Path; +use std::ptr::NonNull; use std::sync::Arc; use std::{io, iter, slice}; @@ -305,11 +306,8 @@ fn fat_lto( assert!(!serialized_modules.is_empty(), "must have at least one serialized module"); let (buffer, name) = serialized_modules.remove(0); info!("no in-memory regular modules to choose from, parsing {:?}", name); - ModuleCodegen { - module_llvm: ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?, - name: name.into_string().unwrap(), - kind: ModuleKind::Regular, - } + let llvm_module = ModuleLlvm::parse(cgcx, &name, buffer.data(), dcx)?; + ModuleCodegen::new_regular(name.into_string().unwrap(), llvm_module) } }; { @@ -655,14 +653,14 @@ pub(crate) fn run_pass_manager( } unsafe { - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; } if cfg!(llvm_enzyme) && enable_ad { let opt_stage = llvm::OptStage::FatLTO; let stage = write::AutodiffStage::PostAD; unsafe { - write::llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, stage)?; + write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage)?; } // This is the final IR, so people should be able to inspect the optimized autodiff output. @@ -729,6 +727,11 @@ impl ThinBuffer { ThinBuffer(buffer) } } + + pub unsafe fn from_raw_ptr(ptr: *mut llvm::ThinLTOBuffer) -> ThinBuffer { + let mut ptr = NonNull::new(ptr).unwrap(); + ThinBuffer(unsafe { ptr.as_mut() }) + } } impl ThinBufferMethods for ThinBuffer { @@ -772,11 +775,11 @@ pub(crate) unsafe fn optimize_thin_module( // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?; - let mut module = ModuleCodegen { - module_llvm, - name: thin_module.name().to_string(), - kind: ModuleKind::Regular, - }; + let mut module = ModuleCodegen::new_regular(thin_module.name(), module_llvm); + // Given that the newly created module lacks a thinlto buffer for embedding, we need to re-add it here. + if cgcx.config(ModuleKind::Regular).embed_bitcode() { + module.thin_lto_buffer = Some(thin_module.data().to_vec()); + } { let target = &*module.module_llvm.tm; let llmod = module.module_llvm.llmod(); @@ -793,7 +796,9 @@ pub(crate) unsafe fn optimize_thin_module( { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); - unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) }; + unsafe { + llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target.raw()) + }; save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); } @@ -823,7 +828,7 @@ pub(crate) unsafe fn optimize_thin_module( let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name()); if unsafe { - !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target) + !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw()) } { return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); } diff --git a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs index f075f332462..dfde4595590 100644 --- a/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs +++ b/compiler/rustc_codegen_llvm/src/back/owned_target_machine.rs @@ -1,6 +1,5 @@ use std::ffi::{CStr, c_char}; use std::marker::PhantomData; -use std::ops::Deref; use std::ptr::NonNull; use rustc_data_structures::small_c_str::SmallCStr; @@ -80,12 +79,12 @@ impl OwnedTargetMachine { .map(|tm_unique| Self { tm_unique, phantom: PhantomData }) .ok_or_else(|| LlvmError::CreateTargetMachine { triple: SmallCStr::from(triple) }) } -} - -impl Deref for OwnedTargetMachine { - type Target = llvm::TargetMachine; - fn deref(&self) -> &Self::Target { + /// Returns inner `llvm::TargetMachine` type. + /// + /// This could be a `Deref` implementation, but `llvm::TargetMachine` is an extern type and + /// `Deref::Target: ?Sized`. + pub fn raw(&self) -> &llvm::TargetMachine { // SAFETY: constructing ensures we have a valid pointer created by // llvm::LLVMRustCreateTargetMachine. unsafe { self.tm_unique.as_ref() } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index b67890c0465..bead4c82a81 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -1,6 +1,7 @@ use std::ffi::{CStr, CString}; use std::io::{self, Write}; use std::path::{Path, PathBuf}; +use std::ptr::null_mut; use std::sync::Arc; use std::{fs, slice, str}; @@ -15,7 +16,7 @@ use rustc_codegen_ssa::back::write::{ TargetMachineFactoryFn, }; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_codegen_ssa::{CompiledModule, ModuleCodegen, ModuleKind}; use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::small_c_str::SmallCStr; use rustc_errors::{DiagCtxtHandle, FatalError, Level}; @@ -551,6 +552,7 @@ pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, module: &ModuleCodegen<ModuleLlvm>, + thin_lto_buffer: Option<&mut *mut llvm::ThinLTOBuffer>, config: &ModuleConfig, opt_level: config::OptLevel, opt_stage: llvm::OptStage, @@ -584,7 +586,17 @@ pub(crate) unsafe fn llvm_optimize( vectorize_loop = config.vectorize_loop; } trace!(?unroll_loops, ?vectorize_slp, ?vectorize_loop, ?run_enzyme); - let using_thin_buffers = opt_stage == llvm::OptStage::PreLinkThinLTO || config.bitcode_needed(); + if thin_lto_buffer.is_some() { + assert!( + matches!( + opt_stage, + llvm::OptStage::PreLinkNoLTO + | llvm::OptStage::PreLinkFatLTO + | llvm::OptStage::PreLinkThinLTO + ), + "the bitcode for LTO can only be obtained at the pre-link stage" + ); + } let pgo_gen_path = get_pgo_gen_path(config); let pgo_use_path = get_pgo_use_path(config); let pgo_sample_use_path = get_pgo_sample_use_path(config); @@ -637,14 +649,16 @@ pub(crate) unsafe fn llvm_optimize( let result = unsafe { llvm::LLVMRustOptimize( module.module_llvm.llmod(), - &*module.module_llvm.tm, + &*module.module_llvm.tm.raw(), to_pass_builder_opt_level(opt_level), opt_stage, cgcx.opts.cg.linker_plugin_lto.enabled(), config.no_prepopulate_passes, config.verify_llvm_ir, config.lint_llvm_ir, - using_thin_buffers, + thin_lto_buffer, + config.emit_thin_lto, + config.emit_thin_lto_summary, config.merge_functions, unroll_loops, vectorize_slp, @@ -675,7 +689,7 @@ pub(crate) unsafe fn llvm_optimize( pub(crate) unsafe fn optimize( cgcx: &CodegenContext<LlvmCodegenBackend>, dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<ModuleLlvm>, + module: &mut ModuleCodegen<ModuleLlvm>, config: &ModuleConfig, ) -> Result<(), FatalError> { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); @@ -705,9 +719,53 @@ pub(crate) unsafe fn optimize( // Otherwise we pretend AD is already done and run the normal opt pipeline (=PostAD). let consider_ad = cfg!(llvm_enzyme) && config.autodiff.contains(&config::AutoDiff::Enable); let autodiff_stage = if consider_ad { AutodiffStage::PreAD } else { AutodiffStage::PostAD }; - return unsafe { - llvm_optimize(cgcx, dcx, module, config, opt_level, opt_stage, autodiff_stage) + // The embedded bitcode is used to run LTO/ThinLTO. + // The bitcode obtained during the `codegen` phase is no longer suitable for performing LTO. + // It may have undergone LTO due to ThinLocal, so we need to obtain the embedded bitcode at + // this point. + let mut thin_lto_buffer = if (module.kind == ModuleKind::Regular + && config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full)) + || config.emit_thin_lto_summary + { + Some(null_mut()) + } else { + None }; + unsafe { + llvm_optimize( + cgcx, + dcx, + module, + thin_lto_buffer.as_mut(), + config, + opt_level, + opt_stage, + autodiff_stage, + ) + }?; + if let Some(thin_lto_buffer) = thin_lto_buffer { + let thin_lto_buffer = unsafe { ThinBuffer::from_raw_ptr(thin_lto_buffer) }; + module.thin_lto_buffer = Some(thin_lto_buffer.data().to_vec()); + let bc_summary_out = + cgcx.output_filenames.temp_path(OutputType::ThinLinkBitcode, module_name); + if config.emit_thin_lto_summary + && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name() + { + let summary_data = thin_lto_buffer.thin_link_data(); + cgcx.prof.artifact_size( + "llvm_bitcode_summary", + thin_link_bitcode_filename.to_string_lossy(), + summary_data.len() as u64, + ); + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_emit_bitcode_summary", + &*module.name, + ); + if let Err(err) = fs::write(&bc_summary_out, summary_data) { + dcx.emit_err(WriteBytecode { path: &bc_summary_out, err }); + } + } + } } Ok(()) } @@ -760,59 +818,41 @@ pub(crate) unsafe fn codegen( // otherwise requested. let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); - let bc_summary_out = - cgcx.output_filenames.temp_path(OutputType::ThinLinkBitcode, module_name); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); if config.bitcode_needed() { - let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_module_codegen_make_bitcode", &*module.name); - let thin = ThinBuffer::new(llmod, config.emit_thin_lto, config.emit_thin_lto_summary); - let data = thin.data(); - - if let Some(bitcode_filename) = bc_out.file_name() { - cgcx.prof.artifact_size( - "llvm_bitcode", - bitcode_filename.to_string_lossy(), - data.len() as u64, - ); - } - - if config.emit_thin_lto_summary - && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name() - { - let summary_data = thin.thin_link_data(); - cgcx.prof.artifact_size( - "llvm_bitcode_summary", - thin_link_bitcode_filename.to_string_lossy(), - summary_data.len() as u64, - ); - - let _timer = cgcx.prof.generic_activity_with_arg( - "LLVM_module_codegen_emit_bitcode_summary", - &*module.name, - ); - if let Err(err) = fs::write(&bc_summary_out, summary_data) { - dcx.emit_err(WriteBytecode { path: &bc_summary_out, err }); - } - } - if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + let thin = { + let _timer = cgcx.prof.generic_activity_with_arg( + "LLVM_module_codegen_make_bitcode", + &*module.name, + ); + ThinBuffer::new(llmod, config.emit_thin_lto, false) + }; + let data = thin.data(); let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name); + if let Some(bitcode_filename) = bc_out.file_name() { + cgcx.prof.artifact_size( + "llvm_bitcode", + bitcode_filename.to_string_lossy(), + data.len() as u64, + ); + } if let Err(err) = fs::write(&bc_out, data) { dcx.emit_err(WriteBytecode { path: &bc_out, err }); } } - if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { + if config.embed_bitcode() && module.kind == ModuleKind::Regular { let _timer = cgcx .prof .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name); + let thin_bc = + module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode"); unsafe { - embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); + embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, &thin_bc); } } } @@ -875,7 +915,7 @@ pub(crate) unsafe fn codegen( }; write_output_file( dcx, - tm, + tm.raw(), config.no_builtins, llmod, &path, @@ -909,7 +949,7 @@ pub(crate) unsafe fn codegen( write_output_file( dcx, - tm, + tm.raw(), config.no_builtins, llmod, &obj_out, diff --git a/compiler/rustc_codegen_llvm/src/base.rs b/compiler/rustc_codegen_llvm/src/base.rs index d35c7945bae..6bd27914dbd 100644 --- a/compiler/rustc_codegen_llvm/src/base.rs +++ b/compiler/rustc_codegen_llvm/src/base.rs @@ -13,10 +13,10 @@ use std::time::Instant; +use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::*; -use rustc_codegen_ssa::{ModuleCodegen, ModuleKind}; use rustc_data_structures::small_c_str::SmallCStr; use rustc_middle::dep_graph; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; @@ -133,11 +133,7 @@ pub(crate) fn compile_codegen_unit( } } - ModuleCodegen { - name: cgu_name.to_string(), - module_llvm: llvm_module, - kind: ModuleKind::Regular, - } + ModuleCodegen::new_regular(cgu_name.to_string(), llvm_module) } (module, cost) diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index 677a9cd3e90..55d34f5f2ef 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -30,7 +30,7 @@ use tracing::{debug, instrument}; use crate::abi::FnAbiLlvmExt; use crate::common::Funclet; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, FullCx, GenericCx, SCx}; use crate::llvm::{ self, AtomicOrdering, AtomicRmwBinOp, BasicBlock, False, GEPNoWrapFlags, Metadata, True, }; @@ -40,15 +40,15 @@ use crate::value::Value; use crate::{attributes, llvm_util}; #[must_use] -pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SimpleCx<'ll>>> { +pub(crate) struct GenericBuilder<'a, 'll, CX: Borrow<SCx<'ll>>> { pub llbuilder: &'ll mut llvm::Builder<'ll>, - pub cx: &'a CX, + pub cx: &'a GenericCx<'ll, CX>, } -pub(crate) type SBuilder<'a, 'll> = GenericBuilder<'a, 'll, SimpleCx<'ll>>; -pub(crate) type Builder<'a, 'll, 'tcx> = GenericBuilder<'a, 'll, CodegenCx<'ll, 'tcx>>; +pub(crate) type SBuilder<'a, 'll> = GenericBuilder<'a, 'll, SCx<'ll>>; +pub(crate) type Builder<'a, 'll, 'tcx> = GenericBuilder<'a, 'll, FullCx<'ll, 'tcx>>; -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { fn drop(&mut self) { unsafe { llvm::LLVMDisposeBuilder(&mut *(self.llbuilder as *mut _)); @@ -57,7 +57,7 @@ impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> Drop for GenericBuilder<'a, 'll, CX> { } impl<'a, 'll> SBuilder<'a, 'll> { - fn call( + pub(crate) fn call( &mut self, llty: &'ll Type, llfn: &'ll Value, @@ -87,79 +87,36 @@ impl<'a, 'll> SBuilder<'a, 'll> { }; call } +} - fn with_scx(scx: &'a SimpleCx<'ll>) -> Self { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { + fn with_cx(scx: &'a GenericCx<'ll, CX>) -> Self { // Create a fresh builder from the simple context. - let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(scx.llcx) }; - SBuilder { llbuilder, cx: scx } + let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(scx.deref().borrow().llcx) }; + GenericBuilder { llbuilder, cx: scx } } -} -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { + pub(crate) fn bitcast(&mut self, val: &'ll Value, dest_ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildBitCast(self.llbuilder, val, dest_ty, UNNAMED) } } - fn ret_void(&mut self) { - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } + pub(crate) fn ret_void(&mut self) { + llvm::LLVMBuildRetVoid(self.llbuilder); } - fn ret(&mut self, v: &'ll Value) { + pub(crate) fn ret(&mut self, v: &'ll Value) { unsafe { llvm::LLVMBuildRet(self.llbuilder, v); } } -} -impl<'a, 'll> SBuilder<'a, 'll> { - fn build(cx: &'a SimpleCx<'ll>, llbb: &'ll BasicBlock) -> SBuilder<'a, 'll> { - let bx = SBuilder::with_scx(cx); + + pub(crate) fn build(cx: &'a GenericCx<'ll, CX>, llbb: &'ll BasicBlock) -> Self { + let bx = Self::with_cx(cx); unsafe { llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb); } bx } - - fn check_call<'b>( - &mut self, - typ: &str, - fn_ty: &'ll Type, - llfn: &'ll Value, - args: &'b [&'ll Value], - ) -> Cow<'b, [&'ll Value]> { - assert!( - self.cx.type_kind(fn_ty) == TypeKind::Function, - "builder::{typ} not passed a function, but {fn_ty:?}" - ); - - let param_tys = self.cx.func_params_types(fn_ty); - - let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v))) - .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); - - if all_args_match { - return Cow::Borrowed(args); - } - - let casted_args: Vec<_> = iter::zip(param_tys, args) - .enumerate() - .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.cx.val_ty(actual_val); - if expected_ty != actual_ty { - debug!( - "type mismatch in function call of {:?}. \ - Expected {:?} for param {}, got {:?}; injecting bitcast", - llfn, expected_ty, i, actual_ty - ); - self.bitcast(actual_val, expected_ty) - } else { - actual_val - } - }) - .collect(); - - Cow::Owned(casted_args) - } } /// Empty string, to be used where LLVM expects an instruction name, indicating @@ -167,17 +124,17 @@ impl<'a, 'll> SBuilder<'a, 'll> { // FIXME(eddyb) pass `&CStr` directly to FFI once it's a thin pointer. const UNNAMED: *const c_char = c"".as_ptr(); -impl<'ll, 'tcx> BackendTypes for Builder<'_, 'll, 'tcx> { - type Value = <CodegenCx<'ll, 'tcx> as BackendTypes>::Value; - type Metadata = <CodegenCx<'ll, 'tcx> as BackendTypes>::Metadata; - type Function = <CodegenCx<'ll, 'tcx> as BackendTypes>::Function; - type BasicBlock = <CodegenCx<'ll, 'tcx> as BackendTypes>::BasicBlock; - type Type = <CodegenCx<'ll, 'tcx> as BackendTypes>::Type; - type Funclet = <CodegenCx<'ll, 'tcx> as BackendTypes>::Funclet; - - type DIScope = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIScope; - type DILocation = <CodegenCx<'ll, 'tcx> as BackendTypes>::DILocation; - type DIVariable = <CodegenCx<'ll, 'tcx> as BackendTypes>::DIVariable; +impl<'ll, CX: Borrow<SCx<'ll>>> BackendTypes for GenericBuilder<'_, 'll, CX> { + type Value = <GenericCx<'ll, CX> as BackendTypes>::Value; + type Metadata = <GenericCx<'ll, CX> as BackendTypes>::Metadata; + type Function = <GenericCx<'ll, CX> as BackendTypes>::Function; + type BasicBlock = <GenericCx<'ll, CX> as BackendTypes>::BasicBlock; + type Type = <GenericCx<'ll, CX> as BackendTypes>::Type; + type Funclet = <GenericCx<'ll, CX> as BackendTypes>::Funclet; + + type DIScope = <GenericCx<'ll, CX> as BackendTypes>::DIScope; + type DILocation = <GenericCx<'ll, CX> as BackendTypes>::DILocation; + type DIVariable = <GenericCx<'ll, CX> as BackendTypes>::DIVariable; } impl abi::HasDataLayout for Builder<'_, '_, '_> { @@ -293,9 +250,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { } fn ret_void(&mut self) { - unsafe { - llvm::LLVMBuildRetVoid(self.llbuilder); - } + llvm::LLVMBuildRetVoid(self.llbuilder); } fn ret(&mut self, v: &'ll Value) { @@ -356,8 +311,8 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // This function handles switch instructions with more than 2 targets and it needs to // emit branch weights metadata instead of using the intrinsic. // The values 1 and 2000 are the same as the values used by the `llvm.expect` intrinsic. - let cold_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(1)) }; - let hot_weight = unsafe { llvm::LLVMValueAsMetadata(self.cx.const_u32(2000)) }; + let cold_weight = llvm::LLVMValueAsMetadata(self.cx.const_u32(1)); + let hot_weight = llvm::LLVMValueAsMetadata(self.cx.const_u32(2000)); let weight = |is_cold: bool| -> &Metadata { if is_cold { cold_weight } else { hot_weight } }; @@ -405,7 +360,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { + if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) { bundles.push(kcfi_bundle); } @@ -1433,7 +1388,7 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { + if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) { bundles.push(kcfi_bundle); } @@ -1476,26 +1431,12 @@ impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> { } impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { - fn build(cx: &'a CodegenCx<'ll, 'tcx>, llbb: &'ll BasicBlock) -> Builder<'a, 'll, 'tcx> { - let bx = Builder::with_cx(cx); - unsafe { - llvm::LLVMPositionBuilderAtEnd(bx.llbuilder, llbb); - } - bx - } - - fn with_cx(cx: &'a CodegenCx<'ll, 'tcx>) -> Self { - // Create a fresh builder from the crate context. - let llbuilder = unsafe { llvm::LLVMCreateBuilderInContext(cx.llcx) }; - Builder { llbuilder, cx } - } - pub(crate) fn llfn(&self) -> &'ll Value { unsafe { llvm::LLVMGetBasicBlockParent(self.llbb()) } } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { fn position_at_start(&mut self, llbb: &'ll BasicBlock) { unsafe { llvm::LLVMRustPositionBuilderAtStart(self.llbuilder, llbb); @@ -1525,7 +1466,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { } } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { pub(crate) fn minnum(&mut self, lhs: &'ll Value, rhs: &'ll Value) -> &'ll Value { unsafe { llvm::LLVMRustBuildMinNum(self.llbuilder, lhs, rhs) } } @@ -1626,9 +1567,7 @@ impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { let ret = unsafe { llvm::LLVMBuildCatchRet(self.llbuilder, funclet.cleanuppad(), unwind) }; ret.expect("LLVM does not have support for catchret") } -} -impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { fn check_call<'b>( &mut self, typ: &str, @@ -1643,7 +1582,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { let param_tys = self.cx.func_params_types(fn_ty); - let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.val_ty(v))) + let all_args_match = iter::zip(¶m_tys, args.iter().map(|&v| self.cx.val_ty(v))) .all(|(expected_ty, actual_ty)| *expected_ty == actual_ty); if all_args_match { @@ -1653,7 +1592,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { let casted_args: Vec<_> = iter::zip(param_tys, args) .enumerate() .map(|(i, (expected_ty, &actual_val))| { - let actual_ty = self.val_ty(actual_val); + let actual_ty = self.cx.val_ty(actual_val); if expected_ty != actual_ty { debug!( "type mismatch in function call of {:?}. \ @@ -1669,12 +1608,12 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { Cow::Owned(casted_args) } -} -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { + pub(crate) fn va_arg(&mut self, list: &'ll Value, ty: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMBuildVAArg(self.llbuilder, list, ty, UNNAMED) } } } + impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { pub(crate) fn call_intrinsic(&mut self, intrinsic: &str, args: &[&'ll Value]) -> &'ll Value { let (ty, f) = self.cx.get_intrinsic(intrinsic); @@ -1694,7 +1633,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { self.call_intrinsic(intrinsic, &[self.cx.const_u64(size), ptr]); } } -impl<'a, 'll, CX: Borrow<SimpleCx<'ll>>> GenericBuilder<'a, 'll, CX> { +impl<'a, 'll, CX: Borrow<SCx<'ll>>> GenericBuilder<'a, 'll, CX> { pub(crate) fn phi( &mut self, ty: &'ll Type, @@ -1782,7 +1721,7 @@ impl<'a, 'll, 'tcx> Builder<'a, 'll, 'tcx> { // Emit KCFI operand bundle let kcfi_bundle = self.kcfi_operand_bundle(fn_attrs, fn_abi, instance, llfn); - if let Some(kcfi_bundle) = kcfi_bundle.as_deref() { + if let Some(kcfi_bundle) = kcfi_bundle.as_ref().map(|b| b.raw()) { bundles.push(kcfi_bundle); } diff --git a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs index 2c7899975e3..71705ecb4d0 100644 --- a/compiler/rustc_codegen_llvm/src/builder/autodiff.rs +++ b/compiler/rustc_codegen_llvm/src/builder/autodiff.rs @@ -3,6 +3,7 @@ use std::ptr; use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, AutoDiffItem, DiffActivity, DiffMode}; use rustc_codegen_ssa::ModuleCodegen; use rustc_codegen_ssa::back::write::ModuleConfig; +use rustc_codegen_ssa::traits::BaseTypeCodegenMethods as _; use rustc_errors::FatalError; use tracing::{debug, trace}; @@ -286,7 +287,8 @@ pub(crate) fn differentiate<'ll>( } let diag_handler = cgcx.create_dcx(); - let cx = SimpleCx { llmod: module.module_llvm.llmod(), llcx: module.module_llvm.llcx }; + + let cx = SimpleCx::new(module.module_llvm.llmod(), module.module_llvm.llcx, cgcx.pointer_size); // First of all, did the user try to use autodiff without using the -Zautodiff=Enable flag? if !diff_items.is_empty() diff --git a/compiler/rustc_codegen_llvm/src/common.rs b/compiler/rustc_codegen_llvm/src/common.rs index f17d98fa242..457e5452ce9 100644 --- a/compiler/rustc_codegen_llvm/src/common.rs +++ b/compiler/rustc_codegen_llvm/src/common.rs @@ -1,5 +1,7 @@ //! Code that is useful in various codegen modules. +use std::borrow::Borrow; + use libc::{c_char, c_uint}; use rustc_abi as abi; use rustc_abi::Primitive::Pointer; @@ -18,6 +20,7 @@ use tracing::debug; use crate::consts::const_alloc_to_llvm; pub(crate) use crate::context::CodegenCx; +use crate::context::{GenericCx, SCx}; use crate::llvm::{self, BasicBlock, Bool, ConstantInt, False, Metadata, True}; use crate::type_::Type; use crate::value::Value; @@ -77,11 +80,11 @@ impl<'ll> Funclet<'ll> { } pub(crate) fn bundle(&self) -> &llvm::OperandBundle<'ll> { - &self.operand + self.operand.raw() } } -impl<'ll> BackendTypes for CodegenCx<'ll, '_> { +impl<'ll, CX: Borrow<SCx<'ll>>> BackendTypes for GenericCx<'ll, CX> { type Value = &'ll Value; type Metadata = &'ll Metadata; // FIXME(eddyb) replace this with a `Function` "subclass" of `Value`. @@ -118,7 +121,7 @@ impl<'ll> CodegenCx<'ll, '_> { } } -impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> ConstCodegenMethods for CodegenCx<'ll, 'tcx> { fn const_null(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMConstNull(t) } } @@ -127,10 +130,6 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { unsafe { llvm::LLVMGetUndef(t) } } - fn is_undef(&self, v: &'ll Value) -> bool { - unsafe { llvm::LLVMIsUndef(v) == True } - } - fn const_poison(&self, t: &'ll Type) -> &'ll Value { unsafe { llvm::LLVMGetPoison(t) } } @@ -209,28 +208,24 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn const_str(&self, s: &str) -> (&'ll Value, &'ll Value) { - let str_global = *self - .const_str_cache - .borrow_mut() - .raw_entry_mut() - .from_key(s) - .or_insert_with(|| { - let sc = self.const_bytes(s.as_bytes()); - let sym = self.generate_local_symbol_name("str"); - let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| { - bug!("symbol `{}` is already defined", sym); - }); - llvm::set_initializer(g, sc); - unsafe { - llvm::LLVMSetGlobalConstant(g, True); - llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); - } - llvm::set_linkage(g, llvm::Linkage::InternalLinkage); - // Cast to default address space if globals are in a different addrspace - let g = self.const_pointercast(g, self.type_ptr()); - (s.to_owned(), g) - }) - .1; + let mut const_str_cache = self.const_str_cache.borrow_mut(); + let str_global = const_str_cache.get(s).copied().unwrap_or_else(|| { + let sc = self.const_bytes(s.as_bytes()); + let sym = self.generate_local_symbol_name("str"); + let g = self.define_global(&sym, self.val_ty(sc)).unwrap_or_else(|| { + bug!("symbol `{}` is already defined", sym); + }); + llvm::set_initializer(g, sc); + unsafe { + llvm::LLVMSetGlobalConstant(g, True); + llvm::LLVMSetUnnamedAddress(g, llvm::UnnamedAddr::Global); + } + llvm::set_linkage(g, llvm::Linkage::InternalLinkage); + // Cast to default address space if globals are in a different addrspace + let g = self.const_pointercast(g, self.type_ptr()); + const_str_cache.insert(s.to_owned(), g); + g + }); let len = s.len(); (str_global, self.const_usize(len as u64)) } @@ -350,7 +345,7 @@ impl<'ll, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } } - fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value { + fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value { const_alloc_to_llvm(self, alloc, /*static*/ false) } diff --git a/compiler/rustc_codegen_llvm/src/consts.rs b/compiler/rustc_codegen_llvm/src/consts.rs index 330e8a8f406..a4e5749b3ac 100644 --- a/compiler/rustc_codegen_llvm/src/consts.rs +++ b/compiler/rustc_codegen_llvm/src/consts.rs @@ -16,7 +16,6 @@ use rustc_middle::mir::mono::MonoItem; use rustc_middle::ty::Instance; use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; use rustc_middle::{bug, span_bug}; -use rustc_session::config::Lto; use tracing::{debug, instrument, trace}; use crate::common::{AsCCharPtr, CodegenCx}; @@ -344,11 +343,11 @@ impl<'ll> CodegenCx<'ll, '_> { // Local definitions can never be imported, so we must not apply // the DLLImport annotation. && !dso_local - // ThinLTO can't handle this workaround in all cases, so we don't - // emit the attrs. Instead we make them unnecessary by disallowing - // dynamic linking when linker plugin based LTO is enabled. - && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled() - && self.tcx.sess.lto() != Lto::Thin; + // Linker plugin ThinLTO doesn't create the self-dllimport Rust uses for rlibs + // as the code generation happens out of process. Instead we assume static linkage + // and disallow dynamic linking when linker plugin based LTO is enabled. + // Regular in-process ThinLTO doesn't need this workaround. + && !self.tcx.sess.opts.cg.linker_plugin_lto.enabled(); // If this assertion triggers, there's something wrong with commandline // argument validation. @@ -490,7 +489,7 @@ impl<'ll> CodegenCx<'ll, '_> { llvm::LLVMMDStringInContext2(self.llcx, bytes.as_c_char_ptr(), bytes.len()); let data = [section, alloc]; let meta = llvm::LLVMMDNodeInContext2(self.llcx, data.as_ptr(), data.len()); - let val = llvm::LLVMMetadataAsValue(self.llcx, meta); + let val = self.get_metadata_value(meta); llvm::LLVMAddNamedMetadataOperand( self.llmod, c"wasm.custom_sections".as_ptr(), diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index e7952bc95e7..9f8ec0ea526 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1,13 +1,13 @@ use std::borrow::Borrow; use std::cell::{Cell, RefCell}; use std::ffi::{CStr, c_char, c_uint}; +use std::marker::PhantomData; use std::ops::Deref; use std::str; -use rustc_abi::{HasDataLayout, TargetDataLayout, VariantIdx}; +use rustc_abi::{HasDataLayout, Size, TargetDataLayout, VariantIdx}; use rustc_codegen_ssa::back::versioned_llvm_target; use rustc_codegen_ssa::base::{wants_msvc_seh, wants_wasm_eh}; -use rustc_codegen_ssa::common::TypeKind; use rustc_codegen_ssa::errors as ssa_errors; use rustc_codegen_ssa::traits::*; use rustc_data_structures::base_n::{ALPHANUMERIC_ONLY, ToBaseN}; @@ -32,9 +32,9 @@ use smallvec::SmallVec; use crate::back::write::to_llvm_code_model; use crate::callee::get_fn; -use crate::common::{self, AsCCharPtr}; +use crate::common::AsCCharPtr; use crate::debuginfo::metadata::apply_vcall_visibility_metadata; -use crate::llvm::{Metadata, MetadataType}; +use crate::llvm::Metadata; use crate::type_::Type; use crate::value::Value; use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util}; @@ -43,18 +43,19 @@ use crate::{attributes, coverageinfo, debuginfo, llvm, llvm_util}; /// However, there are various cx related functions which we want to be available to the builder and /// other compiler pieces. Here we define a small subset which has enough information and can be /// moved around more freely. -pub(crate) struct SimpleCx<'ll> { +pub(crate) struct SCx<'ll> { pub llmod: &'ll llvm::Module, pub llcx: &'ll llvm::Context, + pub isize_ty: &'ll Type, } -impl<'ll> Borrow<SimpleCx<'ll>> for CodegenCx<'ll, '_> { - fn borrow(&self) -> &SimpleCx<'ll> { +impl<'ll> Borrow<SCx<'ll>> for FullCx<'ll, '_> { + fn borrow(&self) -> &SCx<'ll> { &self.scx } } -impl<'ll, 'tcx> Deref for CodegenCx<'ll, 'tcx> { +impl<'ll, 'tcx> Deref for FullCx<'ll, 'tcx> { type Target = SimpleCx<'ll>; #[inline] @@ -63,10 +64,25 @@ impl<'ll, 'tcx> Deref for CodegenCx<'ll, 'tcx> { } } +pub(crate) struct GenericCx<'ll, T: Borrow<SCx<'ll>>>(T, PhantomData<SCx<'ll>>); + +impl<'ll, T: Borrow<SCx<'ll>>> Deref for GenericCx<'ll, T> { + type Target = T; + + #[inline] + fn deref(&self) -> &Self::Target { + &self.0 + } +} + +pub(crate) type SimpleCx<'ll> = GenericCx<'ll, SCx<'ll>>; + /// There is one `CodegenCx` per codegen unit. Each one has its own LLVM /// `llvm::Context` so that several codegen units may be processed in parallel. /// All other LLVM data structures in the `CodegenCx` are tied to that `llvm::Context`. -pub(crate) struct CodegenCx<'ll, 'tcx> { +pub(crate) type CodegenCx<'ll, 'tcx> = GenericCx<'ll, FullCx<'ll, 'tcx>>; + +pub(crate) struct FullCx<'ll, 'tcx> { pub tcx: TyCtxt<'tcx>, pub scx: SimpleCx<'ll>, pub use_dll_storage_attrs: bool, @@ -104,8 +120,6 @@ pub(crate) struct CodegenCx<'ll, 'tcx> { /// Mapping of scalar types to llvm types. pub scalar_lltypes: RefCell<FxHashMap<Ty<'tcx>, &'ll Type>>, - pub isize_ty: &'ll Type, - /// Extra per-CGU codegen state needed when coverage instrumentation is enabled. pub coverage_cx: Option<coverageinfo::CguCoverageContext<'ll, 'tcx>>, pub dbg_cx: Option<debuginfo::CodegenUnitDebugContext<'ll, 'tcx>>, @@ -205,7 +219,7 @@ pub(crate) unsafe fn create_module<'ll>( { let tm = crate::back::write::create_informational_target_machine(tcx.sess, false); unsafe { - llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, &tm); + llvm::LLVMRustSetDataLayoutFromTargetMachine(llmod, tm.raw()); } let llvm_data_layout = unsafe { llvm::LLVMGetDataLayoutStr(llmod) }; @@ -579,33 +593,33 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { None }; - let isize_ty = Type::ix_llcx(llcx, tcx.data_layout.pointer_size.bits()); - - CodegenCx { - tcx, - scx: SimpleCx { llcx, llmod }, - use_dll_storage_attrs, - tls_model, - codegen_unit, - instances: Default::default(), - vtables: Default::default(), - const_str_cache: Default::default(), - const_globals: Default::default(), - statics_to_rauw: RefCell::new(Vec::new()), - used_statics: RefCell::new(Vec::new()), - compiler_used_statics: RefCell::new(Vec::new()), - type_lowering: Default::default(), - scalar_lltypes: Default::default(), - isize_ty, - coverage_cx, - dbg_cx, - eh_personality: Cell::new(None), - eh_catch_typeinfo: Cell::new(None), - rust_try_fn: Cell::new(None), - intrinsics: Default::default(), - local_gen_sym_counter: Cell::new(0), - renamed_statics: Default::default(), - } + GenericCx( + FullCx { + tcx, + scx: SimpleCx::new(llmod, llcx, tcx.data_layout.pointer_size), + use_dll_storage_attrs, + tls_model, + codegen_unit, + instances: Default::default(), + vtables: Default::default(), + const_str_cache: Default::default(), + const_globals: Default::default(), + statics_to_rauw: RefCell::new(Vec::new()), + used_statics: RefCell::new(Vec::new()), + compiler_used_statics: RefCell::new(Vec::new()), + type_lowering: Default::default(), + scalar_lltypes: Default::default(), + coverage_cx, + dbg_cx, + eh_personality: Cell::new(None), + eh_catch_typeinfo: Cell::new(None), + rust_try_fn: Cell::new(None), + intrinsics: Default::default(), + local_gen_sym_counter: Cell::new(0), + renamed_statics: Default::default(), + }, + PhantomData, + ) } pub(crate) fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> { @@ -628,24 +642,32 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { llvm::set_section(g, c"llvm.metadata"); } } + impl<'ll> SimpleCx<'ll> { - pub(crate) fn val_ty(&self, v: &'ll Value) -> &'ll Type { - common::val_ty(v) + pub(crate) fn new( + llmod: &'ll llvm::Module, + llcx: &'ll llvm::Context, + pointer_size: Size, + ) -> Self { + let isize_ty = llvm::Type::ix_llcx(llcx, pointer_size.bits()); + Self(SCx { llmod, llcx, isize_ty }, PhantomData) } +} +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { pub(crate) fn get_metadata_value(&self, metadata: &'ll Metadata) -> &'ll Value { - unsafe { llvm::LLVMMetadataAsValue(self.llcx, metadata) } + llvm::LLVMMetadataAsValue(self.llcx(), metadata) } pub(crate) fn get_function(&self, name: &str) -> Option<&'ll Value> { let name = SmallCStr::new(name); - unsafe { llvm::LLVMGetNamedFunction(self.llmod, name.as_ptr()) } + unsafe { llvm::LLVMGetNamedFunction((**self).borrow().llmod, name.as_ptr()) } } - pub(crate) fn get_md_kind_id(&self, name: &str) -> u32 { + pub(crate) fn get_md_kind_id(&self, name: &str) -> llvm::MetadataKindId { unsafe { llvm::LLVMGetMDKindIDInContext( - self.llcx, + self.llcx(), name.as_ptr() as *const c_char, name.len() as c_uint, ) @@ -654,13 +676,9 @@ impl<'ll> SimpleCx<'ll> { pub(crate) fn create_metadata(&self, name: String) -> Option<&'ll Metadata> { Some(unsafe { - llvm::LLVMMDStringInContext2(self.llcx, name.as_ptr() as *const c_char, name.len()) + llvm::LLVMMDStringInContext2(self.llcx(), name.as_ptr() as *const c_char, name.len()) }) } - - pub(crate) fn type_kind(&self, ty: &'ll Type) -> TypeKind { - unsafe { llvm::LLVMRustGetTypeKind(ty).to_generic() } - } } impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { @@ -1203,27 +1221,18 @@ impl CodegenCx<'_, '_> { name.push_str(&(idx as u64).to_base(ALPHANUMERIC_ONLY)); name } - - /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`. - pub(crate) fn set_metadata<'a>(&self, val: &'a Value, kind_id: MetadataType, md: &'a Metadata) { - unsafe { - let node = llvm::LLVMMetadataAsValue(&self.llcx, md); - llvm::LLVMSetMetadata(val, kind_id as c_uint, node); - } - } } -// This is a duplication of the set_metadata function above. However, so far it's the only one -// shared between both contexts, so it doesn't seem worth it to make the Cx generic like we did it -// for the Builder. -impl SimpleCx<'_> { - #[allow(unused)] +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { /// A wrapper for [`llvm::LLVMSetMetadata`], but it takes `Metadata` as a parameter instead of `Value`. - pub(crate) fn set_metadata<'a>(&self, val: &'a Value, kind_id: MetadataType, md: &'a Metadata) { - unsafe { - let node = llvm::LLVMMetadataAsValue(&self.llcx, md); - llvm::LLVMSetMetadata(val, kind_id as c_uint, node); - } + pub(crate) fn set_metadata<'a>( + &self, + val: &'a Value, + kind_id: impl Into<llvm::MetadataKindId>, + md: &'ll Metadata, + ) { + let node = self.get_metadata_value(md); + llvm::LLVMSetMetadata(val, kind_id.into(), node); } } diff --git a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs index c53ea6d4666..80e54bf045e 100644 --- a/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs +++ b/compiler/rustc_codegen_llvm/src/coverageinfo/mapgen/covfun.rs @@ -8,7 +8,7 @@ use std::ffi::CString; use rustc_abi::Align; use rustc_codegen_ssa::traits::{ - BaseTypeCodegenMethods, ConstCodegenMethods, StaticCodegenMethods, + BaseTypeCodegenMethods as _, ConstCodegenMethods, StaticCodegenMethods, }; use rustc_middle::mir::coverage::{ BasicCoverageBlock, CovTerm, CoverageIdsInfo, Expression, FunctionCoverageInfo, Mapping, diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index e79662ebc64..2419ec1f888 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -11,6 +11,8 @@ //! * Use define_* family of methods when you might be defining the Value. //! * When in doubt, define. +use std::borrow::Borrow; + use itertools::Itertools; use rustc_codegen_ssa::traits::TypeMembershipCodegenMethods; use rustc_data_structures::fx::FxIndexSet; @@ -22,7 +24,7 @@ use tracing::debug; use crate::abi::FnAbiLlvmExt; use crate::common::AsCCharPtr; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, GenericCx, SCx, SimpleCx}; use crate::llvm::AttributePlace::Function; use crate::llvm::Visibility; use crate::type_::Type; @@ -81,16 +83,25 @@ pub(crate) fn declare_raw_fn<'ll, 'tcx>( llfn } -impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { /// Declare a global value. /// /// If there’s a value with the same name already declared, the function will /// return its Value instead. pub(crate) fn declare_global(&self, name: &str, ty: &'ll Type) -> &'ll Value { debug!("declare_global(name={:?})", name); - unsafe { llvm::LLVMRustGetOrInsertGlobal(self.llmod, name.as_c_char_ptr(), name.len(), ty) } + unsafe { + llvm::LLVMRustGetOrInsertGlobal( + (**self).borrow().llmod, + name.as_c_char_ptr(), + name.len(), + ty, + ) + } } +} +impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { /// Declare a C ABI function. /// /// Only use this for foreign function ABIs and glue. For Rust functions use diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 56fae135e55..660fc7ec4c4 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -470,7 +470,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { let layout = self.layout_of(tp_ty).layout; let use_integer_compare = match layout.backend_repr() { Scalar(_) | ScalarPair(_, _) => true, - Vector { .. } => false, + SimdVector { .. } => false, Memory { .. } => { // For rusty ABIs, small aggregates are actually passed // as `RegKind::Integer` (see `FnAbi::adjust_for_abi`), @@ -649,7 +649,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { fn type_test(&mut self, pointer: Self::Value, typeid: Self::Metadata) -> Self::Value { // Test the called operand using llvm.type.test intrinsic. The LowerTypeTests link-time // optimization pass replaces calls to this intrinsic with code to test type membership. - let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) }; + let typeid = self.get_metadata_value(typeid); self.call_intrinsic("llvm.type.test", &[pointer, typeid]) } @@ -659,7 +659,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { vtable_byte_offset: u64, typeid: &'ll Metadata, ) -> Self::Value { - let typeid = unsafe { llvm::LLVMMetadataAsValue(&self.llcx, typeid) }; + let typeid = self.get_metadata_value(typeid); let vtable_byte_offset = self.const_i32(vtable_byte_offset as i32); let type_checked_load = self.call_intrinsic("llvm.type.checked.load", &[llvtable, vtable_byte_offset, typeid]); @@ -1581,8 +1581,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( sym::simd_floor => ("floor", bx.type_func(&[vec_ty], vec_ty)), sym::simd_fma => ("fma", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), sym::simd_relaxed_fma => ("fmuladd", bx.type_func(&[vec_ty, vec_ty, vec_ty], vec_ty)), - sym::simd_fpowi => ("powi", bx.type_func(&[vec_ty, bx.type_i32()], vec_ty)), - sym::simd_fpow => ("pow", bx.type_func(&[vec_ty, vec_ty], vec_ty)), sym::simd_fsin => ("sin", bx.type_func(&[vec_ty], vec_ty)), sym::simd_fsqrt => ("sqrt", bx.type_func(&[vec_ty], vec_ty)), sym::simd_round => ("round", bx.type_func(&[vec_ty], vec_ty)), @@ -1615,8 +1613,6 @@ fn generic_simd_intrinsic<'ll, 'tcx>( | sym::simd_flog | sym::simd_floor | sym::simd_fma - | sym::simd_fpow - | sym::simd_fpowi | sym::simd_fsin | sym::simd_fsqrt | sym::simd_relaxed_fma diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index e9e1b644f18..f622646a5d9 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -12,7 +12,6 @@ #![feature(exact_size_is_empty)] #![feature(extern_types)] #![feature(file_buffered)] -#![feature(hash_raw_entry)] #![feature(if_let_guard)] #![feature(impl_trait_in_assoc_type)] #![feature(iter_intersperse)] @@ -20,7 +19,6 @@ #![feature(rustdoc_internals)] #![feature(slice_as_array)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::any::Any; @@ -29,6 +27,7 @@ use std::mem::ManuallyDrop; use back::owned_target_machine::OwnedTargetMachine; use back::write::{create_informational_target_machine, create_target_machine}; +use context::SimpleCx; use errors::{AutoDiffWithoutLTO, ParseTargetMachineConfig}; pub(crate) use llvm_util::target_features_cfg; use rustc_ast::expand::allocator::AllocatorKind; @@ -116,9 +115,11 @@ impl ExtraBackendMethods for LlvmCodegenBackend { kind: AllocatorKind, alloc_error_handler_kind: AllocatorKind, ) -> ModuleLlvm { - let mut module_llvm = ModuleLlvm::new_metadata(tcx, module_name); + let module_llvm = ModuleLlvm::new_metadata(tcx, module_name); + let cx = + SimpleCx::new(module_llvm.llmod(), &module_llvm.llcx, tcx.data_layout.pointer_size); unsafe { - allocator::codegen(tcx, &mut module_llvm, module_name, kind, alloc_error_handler_kind); + allocator::codegen(tcx, cx, module_name, kind, alloc_error_handler_kind); } module_llvm } @@ -194,7 +195,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { unsafe fn optimize( cgcx: &CodegenContext<Self>, dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<Self::Module>, + module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, ) -> Result<(), FatalError> { unsafe { back::write::optimize(cgcx, dcx, module, config) } @@ -339,8 +340,8 @@ impl CodegenBackend for LlvmCodegenBackend { llvm_util::print_version(); } - fn target_features_cfg(&self, sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - target_features_cfg(sess, allow_unstable) + fn target_features_cfg(&self, sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { + target_features_cfg(sess) } fn codegen_crate<'tcx>( diff --git a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs index 3c2c6964a3d..f6b23862907 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/enzyme_ffi.rs @@ -3,13 +3,14 @@ use libc::{c_char, c_uint}; +use super::MetadataKindId; use super::ffi::{BasicBlock, Metadata, Module, Type, Value}; use crate::llvm::Bool; #[link(name = "llvm-wrapper", kind = "static")] unsafe extern "C" { // Enzyme - pub(crate) fn LLVMRustHasMetadata(I: &Value, KindID: c_uint) -> bool; + pub(crate) safe fn LLVMRustHasMetadata(I: &Value, KindID: MetadataKindId) -> bool; pub(crate) fn LLVMRustEraseInstUntilInclusive(BB: &BasicBlock, I: &Value); pub(crate) fn LLVMRustGetLastInstruction<'a>(BB: &BasicBlock) -> Option<&'a Value>; pub(crate) fn LLVMRustDIGetInstMetadata(I: &Value) -> Option<&Metadata>; @@ -42,10 +43,10 @@ pub use self::Enzyme_AD::*; #[cfg(llvm_enzyme)] pub mod Enzyme_AD { use libc::c_void; - extern "C" { + unsafe extern "C" { pub fn EnzymeSetCLBool(arg1: *mut ::std::os::raw::c_void, arg2: u8); } - extern "C" { + unsafe extern "C" { static mut EnzymePrintPerf: c_void; static mut EnzymePrintActivity: c_void; static mut EnzymePrintType: c_void; diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index da91e6edbcf..39087a4d6f4 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -976,6 +976,16 @@ pub type SelfProfileAfterPassCallback = unsafe extern "C" fn(*mut c_void); pub type GetSymbolsCallback = unsafe extern "C" fn(*mut c_void, *const c_char) -> *mut c_void; pub type GetSymbolsErrorCallback = unsafe extern "C" fn(*const c_char) -> *mut c_void; +#[derive(Copy, Clone)] +#[repr(transparent)] +pub struct MetadataKindId(c_uint); + +impl From<MetadataType> for MetadataKindId { + fn from(value: MetadataType) -> Self { + Self(value as c_uint) + } +} + unsafe extern "C" { // Create and destroy contexts. pub(crate) fn LLVMContextDispose(C: &'static mut Context); @@ -983,7 +993,7 @@ unsafe extern "C" { C: &Context, Name: *const c_char, SLen: c_uint, - ) -> c_uint; + ) -> MetadataKindId; // Create modules. pub(crate) fn LLVMModuleCreateWithNameInContext( @@ -1046,14 +1056,13 @@ unsafe extern "C" { pub(crate) fn LLVMMetadataTypeInContext(C: &Context) -> &Type; // Operations on all values - pub(crate) fn LLVMIsUndef(Val: &Value) -> Bool; pub(crate) fn LLVMTypeOf(Val: &Value) -> &Type; pub(crate) fn LLVMGetValueName2(Val: &Value, Length: *mut size_t) -> *const c_char; pub(crate) fn LLVMSetValueName2(Val: &Value, Name: *const c_char, NameLen: size_t); pub(crate) fn LLVMReplaceAllUsesWith<'a>(OldVal: &'a Value, NewVal: &'a Value); - pub(crate) fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Node: &'a Value); + pub(crate) safe fn LLVMSetMetadata<'a>(Val: &'a Value, KindID: MetadataKindId, Node: &'a Value); pub(crate) fn LLVMGlobalSetMetadata<'a>(Val: &'a Value, KindID: c_uint, Metadata: &'a Metadata); - pub(crate) fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; + pub(crate) safe fn LLVMValueAsMetadata(Node: &Value) -> &Metadata; // Operations on constants of any type pub(crate) fn LLVMConstNull(Ty: &Type) -> &Value; @@ -1147,7 +1156,7 @@ unsafe extern "C" { pub(crate) fn LLVMSetThreadLocalMode(GlobalVar: &Value, Mode: ThreadLocalMode); pub(crate) fn LLVMIsGlobalConstant(GlobalVar: &Value) -> Bool; pub(crate) fn LLVMSetGlobalConstant(GlobalVar: &Value, IsConstant: Bool); - pub(crate) fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); + pub(crate) safe fn LLVMSetTailCall(CallInst: &Value, IsTailCall: Bool); // Operations on attributes pub(crate) fn LLVMCreateStringAttribute( @@ -1204,7 +1213,7 @@ unsafe extern "C" { pub(crate) fn LLVMGetCurrentDebugLocation2<'a>(Builder: &Builder<'a>) -> Option<&'a Metadata>; // Terminators - pub(crate) fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; + pub(crate) safe fn LLVMBuildRetVoid<'a>(B: &Builder<'a>) -> &'a Value; pub(crate) fn LLVMBuildRet<'a>(B: &Builder<'a>, V: &'a Value) -> &'a Value; pub(crate) fn LLVMBuildBr<'a>(B: &Builder<'a>, Dest: &'a BasicBlock) -> &'a Value; pub(crate) fn LLVMBuildCondBr<'a>( @@ -1680,7 +1689,7 @@ unsafe extern "C" { Packed: Bool, ); - pub(crate) fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; + pub(crate) safe fn LLVMMetadataAsValue<'a>(C: &'a Context, MD: &'a Metadata) -> &'a Value; pub(crate) fn LLVMSetUnnamedAddress(Global: &Value, UnnamedAddr: UnnamedAddr); @@ -2425,7 +2434,9 @@ unsafe extern "C" { NoPrepopulatePasses: bool, VerifyIR: bool, LintIR: bool, - UseThinLTOBuffers: bool, + ThinLTOBuffer: Option<&mut *mut ThinLTOBuffer>, + EmitThinLTO: bool, + EmitThinLTOSummary: bool, MergeFunctions: bool, UnrollLoops: bool, SLPVectorize: bool, diff --git a/compiler/rustc_codegen_llvm/src/llvm/mod.rs b/compiler/rustc_codegen_llvm/src/llvm/mod.rs index 5ec93424131..a36226b25a2 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/mod.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/mod.rs @@ -1,7 +1,6 @@ #![allow(non_snake_case)] use std::ffi::{CStr, CString}; -use std::ops::Deref; use std::ptr; use std::str::FromStr; use std::string::FromUtf8Error; @@ -355,6 +354,16 @@ impl<'a> OperandBundleOwned<'a> { }; OperandBundleOwned { raw: ptr::NonNull::new(raw).unwrap() } } + + /// Returns inner `OperandBundle` type. + /// + /// This could be a `Deref` implementation, but `OperandBundle` contains an extern type and + /// `Deref::Target: ?Sized`. + pub(crate) fn raw(&self) -> &OperandBundle<'a> { + // SAFETY: The returned reference is opaque and can only used for FFI. + // It is valid for as long as `&self` is. + unsafe { self.raw.as_ref() } + } } impl Drop for OperandBundleOwned<'_> { @@ -365,16 +374,6 @@ impl Drop for OperandBundleOwned<'_> { } } -impl<'a> Deref for OperandBundleOwned<'a> { - type Target = OperandBundle<'a>; - - fn deref(&self) -> &Self::Target { - // SAFETY: The returned reference is opaque and can only used for FFI. - // It is valid for as long as `&self` is. - unsafe { self.raw.as_ref() } - } -} - pub(crate) fn add_module_flag_u32( module: &Module, merge_behavior: ModuleFlagMergeBehavior, diff --git a/compiler/rustc_codegen_llvm/src/llvm_util.rs b/compiler/rustc_codegen_llvm/src/llvm_util.rs index 1fcb20e0d7b..4a166b0872d 100644 --- a/compiler/rustc_codegen_llvm/src/llvm_util.rs +++ b/compiler/rustc_codegen_llvm/src/llvm_util.rs @@ -306,44 +306,44 @@ pub(crate) fn to_llvm_features<'a>(sess: &Session, s: &'a str) -> Option<LLVMFea /// Must express features in the way Rust understands them. /// /// We do not have to worry about RUSTC_SPECIFIC_FEATURES here, those are handled outside codegen. -pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<Symbol> { - let mut features: FxHashSet<Symbol> = Default::default(); - +pub(crate) fn target_features_cfg(sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { // Add base features for the target. // We do *not* add the -Ctarget-features there, and instead duplicate the logic for that below. // The reason is that if LLVM considers a feature implied but we do not, we don't want that to // show up in `cfg`. That way, `cfg` is entirely under our control -- except for the handling of - // the target CPU, that is still expanded to target features (with all their implied features) by - // LLVM. + // the target CPU, that is still expanded to target features (with all their implied features) + // by LLVM. let target_machine = create_informational_target_machine(sess, true); - // Compute which of the known target features are enabled in the 'base' target machine. - // We only consider "supported" features; "forbidden" features are not reflected in `cfg` as of now. - features.extend( - sess.target - .rust_target_features() - .iter() - .filter(|(feature, _, _)| { - // skip checking special features, as LLVM may not understand them - if RUSTC_SPECIAL_FEATURES.contains(feature) { - return true; - } - // check that all features in a given smallvec are enabled - if let Some(feat) = to_llvm_features(sess, feature) { - for llvm_feature in feat { - let cstr = SmallCStr::new(llvm_feature); - if !unsafe { llvm::LLVMRustHasFeature(&target_machine, cstr.as_ptr()) } { - return false; - } + // Compute which of the known target features are enabled in the 'base' target machine. We only + // consider "supported" features; "forbidden" features are not reflected in `cfg` as of now. + let mut features: FxHashSet<Symbol> = sess + .target + .rust_target_features() + .iter() + .filter(|(feature, _, _)| { + // skip checking special features, as LLVM may not understand them + if RUSTC_SPECIAL_FEATURES.contains(feature) { + return true; + } + if let Some(feat) = to_llvm_features(sess, feature) { + for llvm_feature in feat { + let cstr = SmallCStr::new(llvm_feature); + // `LLVMRustHasFeature` is moderately expensive. On targets with many + // features (e.g. x86) these calls take a non-trivial fraction of runtime + // when compiling very small programs. + if !unsafe { llvm::LLVMRustHasFeature(target_machine.raw(), cstr.as_ptr()) } { + return false; } - true - } else { - false } - }) - .map(|(feature, _, _)| Symbol::intern(feature)), - ); + true + } else { + false + } + }) + .map(|(feature, _, _)| Symbol::intern(feature)) + .collect(); - // Add enabled features + // Add enabled and remove disabled features. for (enabled, feature) in sess.opts.cg.target_feature.split(',').filter_map(|s| match s.chars().next() { Some('+') => Some((true, Symbol::intern(&s[1..]))), @@ -359,7 +359,7 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S #[allow(rustc::potential_query_instability)] features.extend( sess.target - .implied_target_features(std::iter::once(feature.as_str())) + .implied_target_features(feature.as_str()) .iter() .map(|s| Symbol::intern(s)), ); @@ -370,11 +370,7 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S // `features.contains` below. #[allow(rustc::potential_query_instability)] features.retain(|f| { - if sess - .target - .implied_target_features(std::iter::once(f.as_str())) - .contains(&feature.as_str()) - { + if sess.target.implied_target_features(f.as_str()).contains(&feature.as_str()) { // If `f` if implies `feature`, then `!feature` implies `!f`, so we have to // remove `f`. (This is the standard logical contraposition principle.) false @@ -386,25 +382,31 @@ pub(crate) fn target_features_cfg(sess: &Session, allow_unstable: bool) -> Vec<S } } - // Filter enabled features based on feature gates - sess.target - .rust_target_features() - .iter() - .filter_map(|(feature, gate, _)| { - // The `allow_unstable` set is used by rustc internally to determined which target - // features are truly available, so we want to return even perma-unstable "forbidden" - // features. - if allow_unstable - || (gate.in_cfg() && (sess.is_nightly_build() || gate.requires_nightly().is_none())) - { - Some(*feature) - } else { - None - } - }) - .filter(|feature| features.contains(&Symbol::intern(feature))) - .map(|feature| Symbol::intern(feature)) - .collect() + // Filter enabled features based on feature gates. + let f = |allow_unstable| { + sess.target + .rust_target_features() + .iter() + .filter_map(|(feature, gate, _)| { + // The `allow_unstable` set is used by rustc internally to determined which target + // features are truly available, so we want to return even perma-unstable + // "forbidden" features. + if allow_unstable + || (gate.in_cfg() + && (sess.is_nightly_build() || gate.requires_nightly().is_none())) + { + Some(Symbol::intern(feature)) + } else { + None + } + }) + .filter(|feature| features.contains(&feature)) + .collect() + }; + + let target_features = f(false); + let unstable_target_features = f(true); + (target_features, unstable_target_features) } pub(crate) fn print_version() { @@ -453,8 +455,8 @@ pub(crate) fn print(req: &PrintRequest, out: &mut String, sess: &Session) { require_inited(); let tm = create_informational_target_machine(sess, false); match req.kind { - PrintKind::TargetCPUs => print_target_cpus(sess, &tm, out), - PrintKind::TargetFeatures => print_target_features(sess, &tm, out), + PrintKind::TargetCPUs => print_target_cpus(sess, tm.raw(), out), + PrintKind::TargetFeatures => print_target_features(sess, tm.raw(), out), _ => bug!("rustc_codegen_llvm can't handle print request: {:?}", req), } } @@ -681,7 +683,7 @@ pub(crate) fn global_llvm_features( for feature in sess.opts.cg.target_feature.split(',') { if let Some(feature) = feature.strip_prefix('+') { all_rust_features.extend( - UnordSet::from(sess.target.implied_target_features(std::iter::once(feature))) + UnordSet::from(sess.target.implied_target_features(feature)) .to_sorted_stable_ord() .iter() .map(|&&s| (true, s)), diff --git a/compiler/rustc_codegen_llvm/src/type_.rs b/compiler/rustc_codegen_llvm/src/type_.rs index d61ce417562..b89ce90d1a1 100644 --- a/compiler/rustc_codegen_llvm/src/type_.rs +++ b/compiler/rustc_codegen_llvm/src/type_.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::{fmt, ptr}; use libc::{c_char, c_uint}; @@ -11,7 +12,7 @@ use rustc_middle::ty::{self, Ty}; use rustc_target::callconv::{CastTarget, FnAbi}; use crate::abi::{FnAbiLlvmExt, LlvmType}; -use crate::context::{CodegenCx, SimpleCx}; +use crate::context::{CodegenCx, GenericCx, SCx}; pub(crate) use crate::llvm::Type; use crate::llvm::{Bool, False, Metadata, True}; use crate::type_of::LayoutLlvmExt; @@ -36,29 +37,29 @@ impl fmt::Debug for Type { } impl<'ll> CodegenCx<'ll, '_> {} -impl<'ll> SimpleCx<'ll> { +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { pub(crate) fn type_named_struct(&self, name: &str) -> &'ll Type { let name = SmallCStr::new(name); - unsafe { llvm::LLVMStructCreateNamed(self.llcx, name.as_ptr()) } + unsafe { llvm::LLVMStructCreateNamed(self.llcx(), name.as_ptr()) } } pub(crate) fn set_struct_body(&self, ty: &'ll Type, els: &[&'ll Type], packed: bool) { unsafe { llvm::LLVMStructSetBody(ty, els.as_ptr(), els.len() as c_uint, packed as Bool) } } pub(crate) fn type_void(&self) -> &'ll Type { - unsafe { llvm::LLVMVoidTypeInContext(self.llcx) } + unsafe { llvm::LLVMVoidTypeInContext(self.llcx()) } } pub(crate) fn type_token(&self) -> &'ll Type { - unsafe { llvm::LLVMTokenTypeInContext(self.llcx) } + unsafe { llvm::LLVMTokenTypeInContext(self.llcx()) } } pub(crate) fn type_metadata(&self) -> &'ll Type { - unsafe { llvm::LLVMMetadataTypeInContext(self.llcx) } + unsafe { llvm::LLVMMetadataTypeInContext(self.llcx()) } } ///x Creates an integer type with the given number of bits, e.g., i24 pub(crate) fn type_ix(&self, num_bits: u64) -> &'ll Type { - unsafe { llvm::LLVMIntTypeInContext(self.llcx, num_bits as c_uint) } + unsafe { llvm::LLVMIntTypeInContext(self.llcx(), num_bits as c_uint) } } pub(crate) fn type_vector(&self, ty: &'ll Type, len: u64) -> &'ll Type { @@ -121,19 +122,28 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { self.type_array(self.type_from_integer(unit), size / unit_size) } } -impl<'ll> SimpleCx<'ll> { + +impl<'ll, CX: Borrow<SCx<'ll>>> GenericCx<'ll, CX> { + pub(crate) fn llcx(&self) -> &'ll llvm::Context { + (**self).borrow().llcx + } + + pub(crate) fn isize_ty(&self) -> &'ll Type { + (**self).borrow().isize_ty + } + pub(crate) fn type_variadic_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { unsafe { llvm::LLVMFunctionType(ret, args.as_ptr(), args.len() as c_uint, True) } } pub(crate) fn type_i1(&self) -> &'ll Type { - unsafe { llvm::LLVMInt1TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt1TypeInContext(self.llcx()) } } pub(crate) fn type_struct(&self, els: &[&'ll Type], packed: bool) -> &'ll Type { unsafe { llvm::LLVMStructTypeInContext( - self.llcx, + self.llcx(), els.as_ptr(), els.len() as c_uint, packed as Bool, @@ -142,45 +152,45 @@ impl<'ll> SimpleCx<'ll> { } } -impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { +impl<'ll, CX: Borrow<SCx<'ll>>> BaseTypeCodegenMethods for GenericCx<'ll, CX> { fn type_i8(&self) -> &'ll Type { - unsafe { llvm::LLVMInt8TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt8TypeInContext(self.llcx()) } } fn type_i16(&self) -> &'ll Type { - unsafe { llvm::LLVMInt16TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt16TypeInContext(self.llcx()) } } fn type_i32(&self) -> &'ll Type { - unsafe { llvm::LLVMInt32TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt32TypeInContext(self.llcx()) } } fn type_i64(&self) -> &'ll Type { - unsafe { llvm::LLVMInt64TypeInContext(self.llcx) } + unsafe { llvm::LLVMInt64TypeInContext(self.llcx()) } } fn type_i128(&self) -> &'ll Type { - unsafe { llvm::LLVMIntTypeInContext(self.llcx, 128) } + unsafe { llvm::LLVMIntTypeInContext(self.llcx(), 128) } } fn type_isize(&self) -> &'ll Type { - self.isize_ty + self.isize_ty() } fn type_f16(&self) -> &'ll Type { - unsafe { llvm::LLVMHalfTypeInContext(self.llcx) } + unsafe { llvm::LLVMHalfTypeInContext(self.llcx()) } } fn type_f32(&self) -> &'ll Type { - unsafe { llvm::LLVMFloatTypeInContext(self.llcx) } + unsafe { llvm::LLVMFloatTypeInContext(self.llcx()) } } fn type_f64(&self) -> &'ll Type { - unsafe { llvm::LLVMDoubleTypeInContext(self.llcx) } + unsafe { llvm::LLVMDoubleTypeInContext(self.llcx()) } } fn type_f128(&self) -> &'ll Type { - unsafe { llvm::LLVMFP128TypeInContext(self.llcx) } + unsafe { llvm::LLVMFP128TypeInContext(self.llcx()) } } fn type_func(&self, args: &[&'ll Type], ret: &'ll Type) -> &'ll Type { @@ -196,7 +206,7 @@ impl<'ll, 'tcx> BaseTypeCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { } fn type_ptr_ext(&self, address_space: AddressSpace) -> &'ll Type { - unsafe { llvm::LLVMPointerTypeInContext(self.llcx, address_space.0) } + unsafe { llvm::LLVMPointerTypeInContext(self.llcx(), address_space.0) } } fn element_type(&self, ty: &'ll Type) -> &'ll Type { diff --git a/compiler/rustc_codegen_llvm/src/type_of.rs b/compiler/rustc_codegen_llvm/src/type_of.rs index ba01fbff385..4e7096da502 100644 --- a/compiler/rustc_codegen_llvm/src/type_of.rs +++ b/compiler/rustc_codegen_llvm/src/type_of.rs @@ -19,7 +19,7 @@ fn uncached_llvm_type<'a, 'tcx>( ) -> &'a Type { match layout.backend_repr { BackendRepr::Scalar(_) => bug!("handled elsewhere"), - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let element = layout.scalar_llvm_type_at(cx, element); return cx.type_vector(element, count); } @@ -171,7 +171,7 @@ pub(crate) trait LayoutLlvmExt<'tcx> { impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_immediate(&self) -> bool { match self.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => true, + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => true, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => false, } } @@ -179,9 +179,9 @@ impl<'tcx> LayoutLlvmExt<'tcx> for TyAndLayout<'tcx> { fn is_llvm_scalar_pair(&self) -> bool { match self.backend_repr { BackendRepr::ScalarPair(..) => true, - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => { - false - } + BackendRepr::Scalar(_) + | BackendRepr::SimdVector { .. } + | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_codegen_ssa/Cargo.toml b/compiler/rustc_codegen_ssa/Cargo.toml index 83d847f2d15..1346efcb87c 100644 --- a/compiler/rustc_codegen_ssa/Cargo.toml +++ b/compiler/rustc_codegen_ssa/Cargo.toml @@ -11,7 +11,7 @@ bitflags = "2.4.1" bstr = "1.11.3" # Pinned so `cargo update` bumps don't cause breakage. Please also update the # `cc` in `rustc_llvm` if you update the `cc` here. -cc = "=1.2.13" +cc = "=1.2.16" either = "1.5.0" itertools = "0.12" pathdiff = "0.2.0" diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index 22e262546c3..95912b01600 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -18,6 +18,8 @@ codegen_ssa_atomic_compare_exchange = Atomic compare-exchange intrinsic missing codegen_ssa_autodiff_without_lto = using the autodiff feature requires using fat-lto +codegen_ssa_bare_instruction_set = `#[instruction_set]` requires an argument + codegen_ssa_binary_output_to_tty = option `-o` or `--emit` is used to write binary output type `{$shorthand}` to stdout, but stdout is a tty codegen_ssa_cgu_not_recorded = @@ -52,6 +54,10 @@ codegen_ssa_error_creating_remark_dir = failed to create remark directory: {$err codegen_ssa_error_writing_def_file = Error writing .DEF file: {$error} +codegen_ssa_expected_name_value_pair = expected name value pair + +codegen_ssa_expected_one_argument = expected one argument + codegen_ssa_expected_used_symbol = expected `used`, `used(compiler)` or `used(linker)` codegen_ssa_extern_funcs_not_found = some `extern` functions couldn't be found; some native libraries may need to be installed or have their path specified @@ -88,9 +94,17 @@ codegen_ssa_incorrect_cgu_reuse_type = codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient. +codegen_ssa_invalid_argument = invalid argument + .help = valid inline arguments are `always` and `never` + +codegen_ssa_invalid_instruction_set = invalid instruction set specified + codegen_ssa_invalid_link_ordinal_nargs = incorrect number of arguments to `#[link_ordinal]` .note = the attribute requires exactly one argument +codegen_ssa_invalid_literal_value = invalid literal value + .label = value must be an integer between `0` and `255` + codegen_ssa_invalid_monomorphization_basic_float_type = invalid monomorphization of `{$name}` intrinsic: expected basic float type, found `{$ty}` codegen_ssa_invalid_monomorphization_basic_integer_type = invalid monomorphization of `{$name}` intrinsic: expected basic integer type, found `{$ty}` @@ -119,7 +133,8 @@ codegen_ssa_invalid_monomorphization_inserted_type = invalid monomorphization of codegen_ssa_invalid_monomorphization_invalid_bitmask = invalid monomorphization of `{$name}` intrinsic: invalid bitmask `{$mask_ty}`, expected `u{$expected_int_bits}` or `[u8; {$expected_bytes}]` -codegen_ssa_invalid_monomorphization_mask_type = invalid monomorphization of `{$name}` intrinsic: mask element type is `{$ty}`, expected `i_` +codegen_ssa_invalid_monomorphization_mask_type = invalid monomorphization of `{$name}` intrinsic: found mask element type is `{$ty}`, expected a signed integer type + .note = the mask may be widened, which only has the correct behavior for signed integers codegen_ssa_invalid_monomorphization_mismatched_lengths = invalid monomorphization of `{$name}` intrinsic: mismatched lengths: mask length `{$m_len}` != other vector length `{$v_len}` @@ -216,6 +231,8 @@ codegen_ssa_msvc_missing_linker = the msvc targets depend on the msvc linker but codegen_ssa_multiple_external_func_decl = multiple declarations of external function `{$function}` from library `{$library_name}` have different calling conventions +codegen_ssa_multiple_instruction_set = cannot specify more than one instruction set + codegen_ssa_multiple_main_functions = entry symbol `main` declared multiple times .help = did you use `#[no_mangle]` on `fn main`? Use `#![no_main]` to suppress the usual Rust-generated entry point @@ -228,6 +245,11 @@ codegen_ssa_no_natvis_directory = error enumerating natvis directory: {$error} codegen_ssa_no_saved_object_file = cached cgu {$cgu_name} should have an object file, but doesn't +codegen_ssa_null_on_export = `export_name` may not contain null characters + +codegen_ssa_out_of_range_integer = integer value out of range + .label = value must be between `0` and `255` + codegen_ssa_processing_dymutil_failed = processing debug info with `dsymutil` failed: {$status} .note = {$output} @@ -235,6 +257,8 @@ codegen_ssa_read_file = failed to read file: {$message} codegen_ssa_repair_vs_build_tools = the Visual Studio build tools may need to be repaired using the Visual Studio installer +codegen_ssa_requires_rust_abi = `#[track_caller]` requires Rust ABI + codegen_ssa_rlib_archive_build_failure = failed to build archive from rlib at `{$path}`: {$error} codegen_ssa_rlib_incompatible_dependency_formats = `{$ty1}` and `{$ty2}` do not have equivalent dependency formats (`{$list1}` vs `{$list2}`) @@ -355,6 +379,9 @@ codegen_ssa_unable_to_run_dsymutil = unable to run `dsymutil`: {$error} codegen_ssa_unable_to_write_debugger_visualizer = Unable to write debugger visualizer file `{$path}`: {$error} +codegen_ssa_unexpected_parameter_name = unexpected parameter name + .label = expected `{$prefix_nops}` or `{$entry_nops}` + codegen_ssa_unknown_archive_kind = Don't know how to build archive of type: {$kind} @@ -366,6 +393,8 @@ codegen_ssa_unknown_reuse_kind = unknown cgu-reuse-kind `{$kind}` specified codegen_ssa_unsupported_arch = unsupported arch `{$arch}` for os `{$os}` +codegen_ssa_unsupported_instruction_set = target does not support `#[instruction_set]` + codegen_ssa_unsupported_link_self_contained = option `-C link-self-contained` is not supported on this target codegen_ssa_use_cargo_directive = use the `cargo:rustc-link-lib` directive to specify the native libraries to link with Cargo (see https://doc.rust-lang.org/cargo/reference/build-scripts.html#rustc-link-lib) diff --git a/compiler/rustc_codegen_ssa/src/assert_module_sources.rs b/compiler/rustc_codegen_ssa/src/assert_module_sources.rs index da9f8d69297..3710625ac12 100644 --- a/compiler/rustc_codegen_ssa/src/assert_module_sources.rs +++ b/compiler/rustc_codegen_ssa/src/assert_module_sources.rs @@ -63,7 +63,7 @@ pub fn assert_module_sources(tcx: TyCtxt<'_>, set_reuse: &dyn Fn(&mut CguReuseTr }, }; - for attr in tcx.hir().attrs(rustc_hir::CRATE_HIR_ID) { + for attr in tcx.hir_attrs(rustc_hir::CRATE_HIR_ID) { ams.check_attr(attr); } @@ -211,7 +211,7 @@ impl fmt::Display for CguReuse { } impl IntoDiagArg for CguReuse { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.to_string())) } } diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index b090730ac6b..7d9971c021d 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -1,3 +1,5 @@ +mod raw_dylib; + use std::collections::BTreeSet; use std::ffi::OsString; use std::fs::{File, OpenOptions, read}; @@ -12,7 +14,7 @@ use itertools::Itertools; use regex::Regex; use rustc_arena::TypedArena; use rustc_ast::CRATE_NODE_ID; -use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; +use rustc_data_structures::fx::FxIndexSet; use rustc_data_structures::memmap::Mmap; use rustc_data_structures::temp_dir::MaybeTempDir; use rustc_errors::{DiagCtxtHandle, LintDiagnostic}; @@ -30,7 +32,6 @@ use rustc_session::config::{ self, CFGuard, CrateType, DebugInfo, LinkerFeaturesCli, OutFileName, OutputFilenames, OutputType, PrintKind, SplitDwarfKind, Strip, }; -use rustc_session::cstore::DllImport; use rustc_session::lint::builtin::LINKER_MESSAGES; use rustc_session::output::{check_file_is_writeable, invalid_output_for_target, out_filename}; use rustc_session::search_paths::PathKind; @@ -41,22 +42,21 @@ use rustc_session::{Session, filesearch}; use rustc_span::Symbol; use rustc_target::spec::crt_objects::CrtObjects; use rustc_target::spec::{ - Cc, LinkOutputKind, LinkSelfContainedComponents, LinkSelfContainedDefault, LinkerFeatures, - LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy, RelocModel, RelroLevel, SanitizerSet, - SplitDebuginfo, + BinaryFormat, Cc, LinkOutputKind, LinkSelfContainedComponents, LinkSelfContainedDefault, + LinkerFeatures, LinkerFlavor, LinkerFlavorCli, Lld, PanicStrategy, RelocModel, RelroLevel, + SanitizerSet, SplitDebuginfo, }; use tempfile::Builder as TempFileBuilder; use tracing::{debug, info, warn}; -use super::archive::{ArchiveBuilder, ArchiveBuilderBuilder, ImportLibraryItem}; +use super::archive::{ArchiveBuilder, ArchiveBuilderBuilder}; use super::command::Command; use super::linker::{self, Linker}; use super::metadata::{MetadataPosition, create_wrapper_file}; use super::rpath::{self, RPathConfig}; use super::{apple, versioned_llvm_target}; use crate::{ - CodegenResults, CompiledModule, CrateInfo, NativeLib, common, errors, - looks_like_rust_object_file, + CodegenResults, CompiledModule, CrateInfo, NativeLib, errors, looks_like_rust_object_file, }; pub fn ensure_removed(dcx: DiagCtxtHandle<'_>, path: &Path) { @@ -376,16 +376,22 @@ fn link_rlib<'a>( } } - for output_path in create_dll_import_libs( - sess, - archive_builder_builder, - codegen_results.crate_info.used_libraries.iter(), - tmpdir.as_ref(), - true, - ) { - ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|error| { - sess.dcx().emit_fatal(errors::AddNativeLibrary { library_path: output_path, error }); - }); + // On Windows, we add the raw-dylib import libraries to the rlibs already. + // But on ELF, this is not possible, as a shared object cannot be a member of a static library. + // Instead, we add all raw-dylibs to the final link on ELF. + if sess.target.is_like_windows { + for output_path in raw_dylib::create_raw_dylib_dll_import_libs( + sess, + archive_builder_builder, + codegen_results.crate_info.used_libraries.iter(), + tmpdir.as_ref(), + true, + ) { + ab.add_archive(&output_path, Box::new(|_| false)).unwrap_or_else(|error| { + sess.dcx() + .emit_fatal(errors::AddNativeLibrary { library_path: output_path, error }); + }); + } } if let Some(trailing_metadata) = trailing_metadata { @@ -426,108 +432,6 @@ fn link_rlib<'a>( ab } -/// Extract all symbols defined in raw-dylib libraries, collated by library name. -/// -/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library, -/// then the CodegenResults value contains one NativeLib instance for each block. However, the -/// linker appears to expect only a single import library for each library used, so we need to -/// collate the symbols together by library name before generating the import libraries. -fn collate_raw_dylibs<'a>( - sess: &Session, - used_libraries: impl IntoIterator<Item = &'a NativeLib>, -) -> Vec<(String, Vec<DllImport>)> { - // Use index maps to preserve original order of imports and libraries. - let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default(); - - for lib in used_libraries { - if lib.kind == NativeLibKind::RawDylib { - let ext = if lib.verbatim { "" } else { ".dll" }; - let name = format!("{}{}", lib.name, ext); - let imports = dylib_table.entry(name.clone()).or_default(); - for import in &lib.dll_imports { - if let Some(old_import) = imports.insert(import.name, import) { - // FIXME: when we add support for ordinals, figure out if we need to do anything - // if we have two DllImport values with the same name but different ordinals. - if import.calling_convention != old_import.calling_convention { - sess.dcx().emit_err(errors::MultipleExternalFuncDecl { - span: import.span, - function: import.name, - library_name: &name, - }); - } - } - } - } - } - sess.dcx().abort_if_errors(); - dylib_table - .into_iter() - .map(|(name, imports)| { - (name, imports.into_iter().map(|(_, import)| import.clone()).collect()) - }) - .collect() -} - -fn create_dll_import_libs<'a>( - sess: &Session, - archive_builder_builder: &dyn ArchiveBuilderBuilder, - used_libraries: impl IntoIterator<Item = &'a NativeLib>, - tmpdir: &Path, - is_direct_dependency: bool, -) -> Vec<PathBuf> { - collate_raw_dylibs(sess, used_libraries) - .into_iter() - .map(|(raw_dylib_name, raw_dylib_imports)| { - let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" }; - let output_path = tmpdir.join(format!("{raw_dylib_name}{name_suffix}.lib")); - - let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(&sess.target); - - let items: Vec<ImportLibraryItem> = raw_dylib_imports - .iter() - .map(|import: &DllImport| { - if sess.target.arch == "x86" { - ImportLibraryItem { - name: common::i686_decorated_name( - import, - mingw_gnu_toolchain, - false, - false, - ), - ordinal: import.ordinal(), - symbol_name: import.is_missing_decorations().then(|| { - common::i686_decorated_name( - import, - mingw_gnu_toolchain, - false, - true, - ) - }), - is_data: !import.is_fn, - } - } else { - ImportLibraryItem { - name: import.name.to_string(), - ordinal: import.ordinal(), - symbol_name: None, - is_data: !import.is_fn, - } - } - }) - .collect(); - - archive_builder_builder.create_dll_import_lib( - sess, - &raw_dylib_name, - items, - &output_path, - ); - - output_path - }) - .collect() -} - /// Create a static archive. /// /// This is essentially the same thing as an rlib, but it also involves adding all of the upstream @@ -1273,7 +1177,7 @@ mod win { let mut cp: u32 = 0; // We're using the `LOCALE_RETURN_NUMBER` flag to return a u32. // But the API requires us to pass the data as though it's a [u16] string. - let len = std::mem::size_of::<u32>() / std::mem::size_of::<u16>(); + let len = size_of::<u32>() / size_of::<u16>(); let data = std::slice::from_raw_parts_mut(&mut cp as *mut u32 as *mut u16, len); let len_written = GetLocaleInfoEx( LOCALE_NAME_SYSTEM_DEFAULT, @@ -1726,8 +1630,12 @@ fn exec_linker( args.push_str( &Escape { arg: arg.to_str().unwrap(), - // LLD also uses MSVC-like parsing for @-files by default when running on windows hosts - is_like_msvc: sess.target.is_like_msvc || (cfg!(windows) && flavor.uses_lld()), + // Windows-style escaping for @-files is used by + // - all linkers targeting MSVC-like targets, including LLD + // - all LLD flavors running on Windows hosts + // С/С++ compilers use Posix-style escaping (except clang-cl, which we do not use). + is_like_msvc: sess.target.is_like_msvc + || (cfg!(windows) && flavor.uses_lld() && !flavor.uses_cc()), } .to_string(), ); @@ -2418,15 +2326,39 @@ fn linker_with_args( link_output_kind, ); + // Raw-dylibs from all crates. + let raw_dylib_dir = tmpdir.join("raw-dylibs"); + if sess.target.binary_format == BinaryFormat::Elf { + // On ELF we can't pass the raw-dylibs stubs to the linker as a path, + // instead we need to pass them via -l. To find the stub, we need to add + // the directory of the stub to the linker search path. + // We make an extra directory for this to avoid polluting the search path. + if let Err(error) = fs::create_dir(&raw_dylib_dir) { + sess.dcx().emit_fatal(errors::CreateTempDir { error }) + } + cmd.include_path(&raw_dylib_dir); + } + // Link with the import library generated for any raw-dylib functions. - for output_path in create_dll_import_libs( - sess, - archive_builder_builder, - codegen_results.crate_info.used_libraries.iter(), - tmpdir, - true, - ) { - cmd.add_object(&output_path); + if sess.target.is_like_windows { + for output_path in raw_dylib::create_raw_dylib_dll_import_libs( + sess, + archive_builder_builder, + codegen_results.crate_info.used_libraries.iter(), + tmpdir, + true, + ) { + cmd.add_object(&output_path); + } + } else { + for link_path in raw_dylib::create_raw_dylib_elf_stub_shared_objects( + sess, + codegen_results.crate_info.used_libraries.iter(), + &raw_dylib_dir, + ) { + // Always use verbatim linkage, see comments in create_raw_dylib_elf_stub_shared_objects. + cmd.link_dylib_by_name(&link_path, true, false); + } } // As with add_upstream_native_libraries, we need to add the upstream raw-dylib symbols in case // they are used within inlined functions or instantiated generic functions. We do this *after* @@ -2445,19 +2377,35 @@ fn linker_with_args( .native_libraries .iter() .filter_map(|(&cnum, libraries)| { - (dependency_linkage[cnum] != Linkage::Static).then_some(libraries) + if sess.target.is_like_windows { + (dependency_linkage[cnum] != Linkage::Static).then_some(libraries) + } else { + Some(libraries) + } }) .flatten() .collect::<Vec<_>>(); native_libraries_from_nonstatics.sort_unstable_by(|a, b| a.name.as_str().cmp(b.name.as_str())); - for output_path in create_dll_import_libs( - sess, - archive_builder_builder, - native_libraries_from_nonstatics, - tmpdir, - false, - ) { - cmd.add_object(&output_path); + + if sess.target.is_like_windows { + for output_path in raw_dylib::create_raw_dylib_dll_import_libs( + sess, + archive_builder_builder, + native_libraries_from_nonstatics, + tmpdir, + false, + ) { + cmd.add_object(&output_path); + } + } else { + for link_path in raw_dylib::create_raw_dylib_elf_stub_shared_objects( + sess, + native_libraries_from_nonstatics, + &raw_dylib_dir, + ) { + // Always use verbatim linkage, see comments in create_raw_dylib_elf_stub_shared_objects. + cmd.link_dylib_by_name(&link_path, true, false); + } } // Library linking above uses some global state for things like `-Bstatic`/`-Bdynamic` to make @@ -3434,6 +3382,35 @@ fn add_lld_args( // this, `wasm-component-ld`, which is overridden if this option is passed. if !sess.target.is_like_wasm { cmd.cc_arg("-fuse-ld=lld"); + + // On ELF platforms like at least x64 linux, GNU ld and LLD have opposite defaults on some + // section garbage-collection features. For example, the somewhat popular `linkme` crate and + // its dependents rely in practice on this difference: when using lld, they need `-z + // nostart-stop-gc` to prevent encapsulation symbols and sections from being + // garbage-collected. + // + // More information about all this can be found in: + // - https://maskray.me/blog/2021-01-31-metadata-sections-comdat-and-shf-link-order + // - https://lld.llvm.org/ELF/start-stop-gc + // + // So when using lld, we restore, for now, the traditional behavior to help migration, but + // will remove it in the future. + // Since this only disables an optimization, it shouldn't create issues, but is in theory + // slightly suboptimal. However, it: + // - doesn't have any visible impact on our benchmarks + // - reduces the need to disable lld for the crates that depend on this + // + // Note that lld can detect some cases where this difference is relied on, and emits a + // dedicated error to add this link arg. We could make use of this error to emit an FCW. As + // of writing this, we don't do it, because lld is already enabled by default on nightly + // without this mitigation: no working project would see the FCW, so we do this to help + // stabilization. + // + // FIXME: emit an FCW if linking fails due its absence, and then remove this link-arg in the + // future. + if sess.target.llvm_target == "x86_64-unknown-linux-gnu" { + cmd.link_arg("-znostart-stop-gc"); + } } if !flavor.is_gnu() { diff --git a/compiler/rustc_codegen_ssa/src/back/link/raw_dylib.rs b/compiler/rustc_codegen_ssa/src/back/link/raw_dylib.rs new file mode 100644 index 00000000000..2c24378afe1 --- /dev/null +++ b/compiler/rustc_codegen_ssa/src/back/link/raw_dylib.rs @@ -0,0 +1,373 @@ +use std::fs; +use std::io::{BufWriter, Write}; +use std::path::{Path, PathBuf}; + +use rustc_abi::Endian; +use rustc_data_structures::base_n::{CASE_INSENSITIVE, ToBaseN}; +use rustc_data_structures::fx::FxIndexMap; +use rustc_data_structures::stable_hasher::StableHasher; +use rustc_hashes::Hash128; +use rustc_session::Session; +use rustc_session::cstore::DllImport; +use rustc_session::utils::NativeLibKind; +use rustc_span::Symbol; + +use crate::back::archive::ImportLibraryItem; +use crate::back::link::ArchiveBuilderBuilder; +use crate::errors::ErrorCreatingImportLibrary; +use crate::{NativeLib, common, errors}; + +/// Extract all symbols defined in raw-dylib libraries, collated by library name. +/// +/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library, +/// then the CodegenResults value contains one NativeLib instance for each block. However, the +/// linker appears to expect only a single import library for each library used, so we need to +/// collate the symbols together by library name before generating the import libraries. +fn collate_raw_dylibs_windows<'a>( + sess: &Session, + used_libraries: impl IntoIterator<Item = &'a NativeLib>, +) -> Vec<(String, Vec<DllImport>)> { + // Use index maps to preserve original order of imports and libraries. + let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default(); + + for lib in used_libraries { + if lib.kind == NativeLibKind::RawDylib { + let ext = if lib.verbatim { "" } else { ".dll" }; + let name = format!("{}{}", lib.name, ext); + let imports = dylib_table.entry(name.clone()).or_default(); + for import in &lib.dll_imports { + if let Some(old_import) = imports.insert(import.name, import) { + // FIXME: when we add support for ordinals, figure out if we need to do anything + // if we have two DllImport values with the same name but different ordinals. + if import.calling_convention != old_import.calling_convention { + sess.dcx().emit_err(errors::MultipleExternalFuncDecl { + span: import.span, + function: import.name, + library_name: &name, + }); + } + } + } + } + } + sess.dcx().abort_if_errors(); + dylib_table + .into_iter() + .map(|(name, imports)| { + (name, imports.into_iter().map(|(_, import)| import.clone()).collect()) + }) + .collect() +} + +pub(super) fn create_raw_dylib_dll_import_libs<'a>( + sess: &Session, + archive_builder_builder: &dyn ArchiveBuilderBuilder, + used_libraries: impl IntoIterator<Item = &'a NativeLib>, + tmpdir: &Path, + is_direct_dependency: bool, +) -> Vec<PathBuf> { + collate_raw_dylibs_windows(sess, used_libraries) + .into_iter() + .map(|(raw_dylib_name, raw_dylib_imports)| { + let name_suffix = if is_direct_dependency { "_imports" } else { "_imports_indirect" }; + let output_path = tmpdir.join(format!("{raw_dylib_name}{name_suffix}.lib")); + + let mingw_gnu_toolchain = common::is_mingw_gnu_toolchain(&sess.target); + + let items: Vec<ImportLibraryItem> = raw_dylib_imports + .iter() + .map(|import: &DllImport| { + if sess.target.arch == "x86" { + ImportLibraryItem { + name: common::i686_decorated_name( + import, + mingw_gnu_toolchain, + false, + false, + ), + ordinal: import.ordinal(), + symbol_name: import.is_missing_decorations().then(|| { + common::i686_decorated_name( + import, + mingw_gnu_toolchain, + false, + true, + ) + }), + is_data: !import.is_fn, + } + } else { + ImportLibraryItem { + name: import.name.to_string(), + ordinal: import.ordinal(), + symbol_name: None, + is_data: !import.is_fn, + } + } + }) + .collect(); + + archive_builder_builder.create_dll_import_lib( + sess, + &raw_dylib_name, + items, + &output_path, + ); + + output_path + }) + .collect() +} + +/// Extract all symbols defined in raw-dylib libraries, collated by library name. +/// +/// If we have multiple extern blocks that specify symbols defined in the same raw-dylib library, +/// then the CodegenResults value contains one NativeLib instance for each block. However, the +/// linker appears to expect only a single import library for each library used, so we need to +/// collate the symbols together by library name before generating the import libraries. +fn collate_raw_dylibs_elf<'a>( + sess: &Session, + used_libraries: impl IntoIterator<Item = &'a NativeLib>, +) -> Vec<(String, Vec<DllImport>)> { + // Use index maps to preserve original order of imports and libraries. + let mut dylib_table = FxIndexMap::<String, FxIndexMap<Symbol, &DllImport>>::default(); + + for lib in used_libraries { + if lib.kind == NativeLibKind::RawDylib { + let filename = if lib.verbatim { + lib.name.as_str().to_owned() + } else { + let ext = sess.target.dll_suffix.as_ref(); + let prefix = sess.target.dll_prefix.as_ref(); + format!("{prefix}{}{ext}", lib.name) + }; + + let imports = dylib_table.entry(filename.clone()).or_default(); + for import in &lib.dll_imports { + imports.insert(import.name, import); + } + } + } + sess.dcx().abort_if_errors(); + dylib_table + .into_iter() + .map(|(name, imports)| { + (name, imports.into_iter().map(|(_, import)| import.clone()).collect()) + }) + .collect() +} + +pub(super) fn create_raw_dylib_elf_stub_shared_objects<'a>( + sess: &Session, + used_libraries: impl IntoIterator<Item = &'a NativeLib>, + raw_dylib_so_dir: &Path, +) -> Vec<String> { + collate_raw_dylibs_elf(sess, used_libraries) + .into_iter() + .map(|(load_filename, raw_dylib_imports)| { + use std::hash::Hash; + + // `load_filename` is the *target/loader* filename that will end up in NEEDED. + // Usually this will be something like `libc.so` or `libc.so.6` but with + // verbatim it might also be an absolute path. + // To be able to support this properly, we always put this load filename + // into the SONAME of the library and link it via a temporary file with a random name. + // This also avoids naming conflicts with non-raw-dylib linkage of the same library. + + let shared_object = create_elf_raw_dylib_stub(sess, &load_filename, &raw_dylib_imports); + + let mut file_name_hasher = StableHasher::new(); + load_filename.hash(&mut file_name_hasher); + for raw_dylib in raw_dylib_imports { + raw_dylib.name.as_str().hash(&mut file_name_hasher); + } + + let library_filename: Hash128 = file_name_hasher.finish(); + let temporary_lib_name = format!( + "{}{}{}", + sess.target.dll_prefix, + library_filename.as_u128().to_base_fixed_len(CASE_INSENSITIVE), + sess.target.dll_suffix + ); + let link_path = raw_dylib_so_dir.join(&temporary_lib_name); + + let file = match fs::File::create_new(&link_path) { + Ok(file) => file, + Err(error) => sess.dcx().emit_fatal(ErrorCreatingImportLibrary { + lib_name: &load_filename, + error: error.to_string(), + }), + }; + if let Err(error) = BufWriter::new(file).write_all(&shared_object) { + sess.dcx().emit_fatal(ErrorCreatingImportLibrary { + lib_name: &load_filename, + error: error.to_string(), + }); + }; + + temporary_lib_name + }) + .collect() +} + +/// Create an ELF .so stub file for raw-dylib. +/// It exports all the provided symbols, but is otherwise empty. +fn create_elf_raw_dylib_stub(sess: &Session, soname: &str, symbols: &[DllImport]) -> Vec<u8> { + use object::write::elf as write; + use object::{Architecture, elf}; + + let mut stub_buf = Vec::new(); + + // Build the stub ELF using the object crate. + // The high-level portable API does not allow for the fine-grained control we need, + // so this uses the low-level object::write::elf API. + // The low-level API consists of two stages: reservation and writing. + // We first reserve space for all the things in the binary and then write them. + // It is important that the order of reservation matches the order of writing. + // The object crate contains many debug asserts that fire if you get this wrong. + + let endianness = match sess.target.options.endian { + Endian::Little => object::Endianness::Little, + Endian::Big => object::Endianness::Big, + }; + let mut stub = write::Writer::new(endianness, true, &mut stub_buf); + + // These initial reservations don't reserve any bytes in the binary yet, + // they just allocate in the internal data structures. + + // First, we crate the dynamic symbol table. It starts with a null symbol + // and then all the symbols and their dynamic strings. + stub.reserve_null_dynamic_symbol_index(); + + let dynstrs = symbols + .iter() + .map(|sym| { + stub.reserve_dynamic_symbol_index(); + (sym, stub.add_dynamic_string(sym.name.as_str().as_bytes())) + }) + .collect::<Vec<_>>(); + + let soname = stub.add_dynamic_string(soname.as_bytes()); + + // Reserve the sections. + // We have the minimal sections for a dynamic SO and .text where we point our dummy symbols to. + stub.reserve_shstrtab_section_index(); + let text_section_name = stub.add_section_name(".text".as_bytes()); + let text_section = stub.reserve_section_index(); + stub.reserve_dynstr_section_index(); + stub.reserve_dynsym_section_index(); + stub.reserve_dynamic_section_index(); + + // These reservations now determine the actual layout order of the object file. + stub.reserve_file_header(); + stub.reserve_shstrtab(); + stub.reserve_section_headers(); + stub.reserve_dynstr(); + stub.reserve_dynsym(); + stub.reserve_dynamic(2); // DT_SONAME, DT_NULL + + // First write the ELF header with the arch information. + let Some((arch, sub_arch)) = sess.target.object_architecture(&sess.unstable_target_features) + else { + sess.dcx().fatal(format!( + "raw-dylib is not supported for the architecture `{}`", + sess.target.arch + )); + }; + let e_machine = match (arch, sub_arch) { + (Architecture::Aarch64, None) => elf::EM_AARCH64, + (Architecture::Aarch64_Ilp32, None) => elf::EM_AARCH64, + (Architecture::Arm, None) => elf::EM_ARM, + (Architecture::Avr, None) => elf::EM_AVR, + (Architecture::Bpf, None) => elf::EM_BPF, + (Architecture::Csky, None) => elf::EM_CSKY, + (Architecture::E2K32, None) => elf::EM_MCST_ELBRUS, + (Architecture::E2K64, None) => elf::EM_MCST_ELBRUS, + (Architecture::I386, None) => elf::EM_386, + (Architecture::X86_64, None) => elf::EM_X86_64, + (Architecture::X86_64_X32, None) => elf::EM_X86_64, + (Architecture::Hexagon, None) => elf::EM_HEXAGON, + (Architecture::LoongArch64, None) => elf::EM_LOONGARCH, + (Architecture::M68k, None) => elf::EM_68K, + (Architecture::Mips, None) => elf::EM_MIPS, + (Architecture::Mips64, None) => elf::EM_MIPS, + (Architecture::Mips64_N32, None) => elf::EM_MIPS, + (Architecture::Msp430, None) => elf::EM_MSP430, + (Architecture::PowerPc, None) => elf::EM_PPC, + (Architecture::PowerPc64, None) => elf::EM_PPC64, + (Architecture::Riscv32, None) => elf::EM_RISCV, + (Architecture::Riscv64, None) => elf::EM_RISCV, + (Architecture::S390x, None) => elf::EM_S390, + (Architecture::Sbf, None) => elf::EM_SBF, + (Architecture::Sharc, None) => elf::EM_SHARC, + (Architecture::Sparc, None) => elf::EM_SPARC, + (Architecture::Sparc32Plus, None) => elf::EM_SPARC32PLUS, + (Architecture::Sparc64, None) => elf::EM_SPARCV9, + (Architecture::Xtensa, None) => elf::EM_XTENSA, + _ => { + sess.dcx().fatal(format!( + "raw-dylib is not supported for the architecture `{}`", + sess.target.arch + )); + } + }; + + stub.write_file_header(&write::FileHeader { + os_abi: crate::back::metadata::elf_os_abi(sess), + abi_version: 0, + e_type: object::elf::ET_DYN, + e_machine, + e_entry: 0, + e_flags: crate::back::metadata::elf_e_flags(arch, sess), + }) + .unwrap(); + + // .shstrtab + stub.write_shstrtab(); + + // Section headers + stub.write_null_section_header(); + stub.write_shstrtab_section_header(); + // Create a dummy .text section for our dummy symbols. + stub.write_section_header(&write::SectionHeader { + name: Some(text_section_name), + sh_type: elf::SHT_PROGBITS, + sh_flags: 0, + sh_addr: 0, + sh_offset: 0, + sh_size: 0, + sh_link: 0, + sh_info: 0, + sh_addralign: 1, + sh_entsize: 0, + }); + stub.write_dynstr_section_header(0); + stub.write_dynsym_section_header(0, 1); + stub.write_dynamic_section_header(0); + + // .dynstr + stub.write_dynstr(); + + // .dynsym + stub.write_null_dynamic_symbol(); + for (_, name) in dynstrs { + stub.write_dynamic_symbol(&write::Sym { + name: Some(name), + st_info: (elf::STB_GLOBAL << 4) | elf::STT_NOTYPE, + st_other: elf::STV_DEFAULT, + section: Some(text_section), + st_shndx: 0, // ignored by object in favor of the `section` field + st_value: 0, + st_size: 0, + }); + } + + // .dynamic + // the DT_SONAME will be used by the linker to populate DT_NEEDED + // which the loader uses to find the library. + // DT_NULL terminates the .dynamic table. + stub.write_dynamic_string(elf::DT_SONAME, soname); + stub.write_dynamic(elf::DT_NULL, 0); + + stub_buf +} diff --git a/compiler/rustc_codegen_ssa/src/back/linker.rs b/compiler/rustc_codegen_ssa/src/back/linker.rs index 8900405c1b8..a8405a2aec9 100644 --- a/compiler/rustc_codegen_ssa/src/back/linker.rs +++ b/compiler/rustc_codegen_ssa/src/back/linker.rs @@ -1655,9 +1655,9 @@ impl<'a> Linker for AixLinker<'a> { } } - fn link_dylib_by_name(&mut self, name: &str, _verbatim: bool, _as_needed: bool) { + fn link_dylib_by_name(&mut self, name: &str, verbatim: bool, _as_needed: bool) { self.hint_dynamic(); - self.link_or_cc_arg(format!("-l{name}")); + self.link_or_cc_arg(if verbatim { String::from(name) } else { format!("-l{name}") }); } fn link_dylib_by_path(&mut self, path: &Path, _as_needed: bool) { @@ -1668,7 +1668,7 @@ impl<'a> Linker for AixLinker<'a> { fn link_staticlib_by_name(&mut self, name: &str, verbatim: bool, whole_archive: bool) { self.hint_static(); if !whole_archive { - self.link_or_cc_arg(format!("-l{name}")); + self.link_or_cc_arg(if verbatim { String::from(name) } else { format!("-l{name}") }); } else { let mut arg = OsString::from("-bkeepfile:"); arg.push(find_native_static_library(name, verbatim, self.sess)); @@ -1939,14 +1939,14 @@ impl<'a> Linker for LlbcLinker<'a> { } fn optimize(&mut self) { - match self.sess.opts.optimize { + self.link_arg(match self.sess.opts.optimize { OptLevel::No => "-O0", OptLevel::Less => "-O1", OptLevel::More => "-O2", OptLevel::Aggressive => "-O3", OptLevel::Size => "-Os", OptLevel::SizeMin => "-Oz", - }; + }); } fn full_relro(&mut self) {} diff --git a/compiler/rustc_codegen_ssa/src/back/metadata.rs b/compiler/rustc_codegen_ssa/src/back/metadata.rs index 236507ac0cd..68b453ff424 100644 --- a/compiler/rustc_codegen_ssa/src/back/metadata.rs +++ b/compiler/rustc_codegen_ssa/src/back/metadata.rs @@ -9,8 +9,7 @@ use itertools::Itertools; use object::write::{self, StandardSegment, Symbol, SymbolSection}; use object::{ Architecture, BinaryFormat, Endianness, FileFlags, Object, ObjectSection, ObjectSymbol, - SectionFlags, SectionKind, SubArchitecture, SymbolFlags, SymbolKind, SymbolScope, elf, pe, - xcoff, + SectionFlags, SectionKind, SymbolFlags, SymbolKind, SymbolScope, elf, pe, xcoff, }; use rustc_abi::Endian; use rustc_data_structures::memmap::Mmap; @@ -206,51 +205,10 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static Endian::Little => Endianness::Little, Endian::Big => Endianness::Big, }; - let (architecture, sub_architecture) = match &sess.target.arch[..] { - "arm" => (Architecture::Arm, None), - "aarch64" => ( - if sess.target.pointer_width == 32 { - Architecture::Aarch64_Ilp32 - } else { - Architecture::Aarch64 - }, - None, - ), - "x86" => (Architecture::I386, None), - "s390x" => (Architecture::S390x, None), - "mips" | "mips32r6" => (Architecture::Mips, None), - "mips64" | "mips64r6" => (Architecture::Mips64, None), - "x86_64" => ( - if sess.target.pointer_width == 32 { - Architecture::X86_64_X32 - } else { - Architecture::X86_64 - }, - None, - ), - "powerpc" => (Architecture::PowerPc, None), - "powerpc64" => (Architecture::PowerPc64, None), - "riscv32" => (Architecture::Riscv32, None), - "riscv64" => (Architecture::Riscv64, None), - "sparc" => { - if sess.unstable_target_features.contains(&sym::v8plus) { - // Target uses V8+, aka EM_SPARC32PLUS, aka 64-bit V9 but in 32-bit mode - (Architecture::Sparc32Plus, None) - } else { - // Target uses V7 or V8, aka EM_SPARC - (Architecture::Sparc, None) - } - } - "sparc64" => (Architecture::Sparc64, None), - "avr" => (Architecture::Avr, None), - "msp430" => (Architecture::Msp430, None), - "hexagon" => (Architecture::Hexagon, None), - "bpf" => (Architecture::Bpf, None), - "loongarch64" => (Architecture::LoongArch64, None), - "csky" => (Architecture::Csky, None), - "arm64ec" => (Architecture::Aarch64, Some(SubArchitecture::Arm64EC)), - // Unsupported architecture. - _ => return None, + let Some((architecture, sub_architecture)) = + sess.target.object_architecture(&sess.unstable_target_features) + else { + return None; }; let binary_format = sess.target.binary_format.to_object(); @@ -292,7 +250,26 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static file.set_mangling(original_mangling); } - let e_flags = match architecture { + let e_flags = elf_e_flags(architecture, sess); + // adapted from LLVM's `MCELFObjectTargetWriter::getOSABI` + let os_abi = elf_os_abi(sess); + let abi_version = 0; + add_gnu_property_note(&mut file, architecture, binary_format, endianness); + file.flags = FileFlags::Elf { os_abi, abi_version, e_flags }; + Some(file) +} + +pub(super) fn elf_os_abi(sess: &Session) -> u8 { + match sess.target.options.os.as_ref() { + "hermit" => elf::ELFOSABI_STANDALONE, + "freebsd" => elf::ELFOSABI_FREEBSD, + "solaris" => elf::ELFOSABI_SOLARIS, + _ => elf::ELFOSABI_NONE, + } +} + +pub(super) fn elf_e_flags(architecture: Architecture, sess: &Session) -> u32 { + match architecture { Architecture::Mips => { let arch = match sess.target.options.cpu.as_ref() { "mips1" => elf::EF_MIPS_ARCH_1, @@ -373,7 +350,11 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static Architecture::Avr => { // Resolve the ISA revision and set // the appropriate EF_AVR_ARCH flag. - ef_avr_arch(&sess.target.options.cpu) + if let Some(ref cpu) = sess.opts.cg.target_cpu { + ef_avr_arch(cpu) + } else { + bug!("AVR CPU not explicitly specified") + } } Architecture::Csky => { let e_flags = match sess.target.options.abi.as_ref() { @@ -383,18 +364,7 @@ pub(crate) fn create_object_file(sess: &Session) -> Option<write::Object<'static e_flags } _ => 0, - }; - // adapted from LLVM's `MCELFObjectTargetWriter::getOSABI` - let os_abi = match sess.target.options.os.as_ref() { - "hermit" => elf::ELFOSABI_STANDALONE, - "freebsd" => elf::ELFOSABI_FREEBSD, - "solaris" => elf::ELFOSABI_SOLARIS, - _ => elf::ELFOSABI_NONE, - }; - let abi_version = 0; - add_gnu_property_note(&mut file, architecture, binary_format, endianness); - file.flags = FileFlags::Elf { os_abi, abi_version, e_flags }; - Some(file) + } } /// Mach-O files contain information about: diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index f008bd12ed8..9cc737d194c 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -6,6 +6,7 @@ use std::sync::Arc; use std::sync::mpsc::{Receiver, Sender, channel}; use std::{fs, io, mem, str, thread}; +use rustc_abi::Size; use rustc_ast::attr; use rustc_ast::expand::autodiff_attrs::AutoDiffItem; use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; @@ -278,6 +279,10 @@ impl ModuleConfig { || self.emit_obj == EmitObj::Bitcode || self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) } + + pub fn embed_bitcode(&self) -> bool { + self.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) + } } /// Configuration passed to the function returned by the `target_machine_factory`. @@ -351,6 +356,7 @@ pub struct CodegenContext<B: WriteBackendMethods> { pub target_is_like_aix: bool, pub split_debuginfo: rustc_target::spec::SplitDebuginfo, pub split_dwarf_kind: rustc_session::config::SplitDwarfKind, + pub pointer_size: Size, /// All commandline args used to invoke the compiler, with @file args fully expanded. /// This will only be used within debug info, e.g. in the pdb file on windows @@ -469,7 +475,7 @@ pub(crate) fn start_async_codegen<B: ExtraBackendMethods>( ) -> OngoingCodegen<B> { let (coordinator_send, coordinator_receive) = channel(); - let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID); + let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID); let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins); let crate_info = CrateInfo::new(tcx, target_cpu); @@ -877,14 +883,14 @@ pub(crate) fn compute_per_cgu_lto_type( fn execute_optimize_work_item<B: ExtraBackendMethods>( cgcx: &CodegenContext<B>, - module: ModuleCodegen<B::Module>, + mut module: ModuleCodegen<B::Module>, module_config: &ModuleConfig, ) -> Result<WorkItemResult<B>, FatalError> { let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); unsafe { - B::optimize(cgcx, dcx, &module, module_config)?; + B::optimize(cgcx, dcx, &mut module, module_config)?; } // After we've done the initial round of optimizations we need to @@ -1212,6 +1218,7 @@ fn start_executing_work<B: ExtraBackendMethods>( split_debuginfo: tcx.sess.split_debuginfo(), split_dwarf_kind: tcx.sess.opts.unstable_opts.split_dwarf_kind, parallel: backend.supports_parallel() && !sess.opts.unstable_opts.no_parallel_backend, + pointer_size: tcx.data_layout.pointer_size, }; // This is the "main loop" of parallel work happening for parallel codegen. diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 73a97d32c2d..e9c886d28ed 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -687,7 +687,7 @@ pub fn codegen_crate<B: ExtraBackendMethods>( submit_codegened_module_to_llvm( &backend, &ongoing_codegen.coordinator.sender, - ModuleCodegen { name: llmod_id, module_llvm, kind: ModuleKind::Allocator }, + ModuleCodegen::new_allocator(llmod_id, module_llvm), cost, ); } @@ -876,7 +876,7 @@ impl CrateInfo { let linked_symbols = crate_types.iter().map(|&c| (c, crate::back::linker::linked_symbols(tcx, c))).collect(); let local_crate_name = tcx.crate_name(LOCAL_CRATE); - let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID); + let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID); let subsystem = ast::attr::first_attr_value_str_by_name(crate_attrs, sym::windows_subsystem); let windows_subsystem = subsystem.map(|subsystem| { diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index 673740b4aab..998a4ff727e 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -1,15 +1,11 @@ use std::str::FromStr; use rustc_abi::ExternAbi; -use rustc_ast::expand::autodiff_attrs::{ - AutoDiffAttrs, DiffActivity, DiffMode, valid_input_activity, valid_ret_activity, -}; +use rustc_ast::expand::autodiff_attrs::{AutoDiffAttrs, DiffActivity, DiffMode}; use rustc_ast::{MetaItem, MetaItemInner, attr}; use rustc_attr_parsing::ReprAttr::ReprAlign; use rustc_attr_parsing::{AttributeKind, InlineAttr, InstructionSetAttr, OptimizeAttr}; use rustc_data_structures::fx::FxHashMap; -use rustc_errors::codes::*; -use rustc_errors::{DiagMessage, SubdiagMessage, struct_span_code_err}; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LOCAL_CRATE, LocalDefId}; use rustc_hir::weak_lang_items::WEAK_LANG_ITEMS; @@ -64,7 +60,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { ); } - let attrs = tcx.hir().attrs(tcx.local_def_id_to_hir_id(did)); + let attrs = tcx.hir_attrs(tcx.local_def_id_to_hir_id(did)); let mut codegen_fn_attrs = CodegenFnAttrs::new(); if tcx.should_inherit_track_caller(did) { codegen_fn_attrs.flags |= CodegenFnAttrFlags::TRACK_CALLER; @@ -79,7 +75,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { // When `no_builtins` is applied at the crate level, we should add the // `no-builtins` attribute to each function to ensure it takes effect in LTO. - let crate_attrs = tcx.hir().attrs(rustc_hir::CRATE_HIR_ID); + let crate_attrs = tcx.hir_attrs(rustc_hir::CRATE_HIR_ID); let no_builtins = attr::contains_name(crate_attrs, sym::no_builtins); if no_builtins { codegen_fn_attrs.flags |= CodegenFnAttrFlags::NO_BUILTINS; @@ -236,13 +232,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { && let Some(fn_sig) = fn_sig() && fn_sig.skip_binder().abi() != ExternAbi::Rust { - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0737, - "`#[track_caller]` requires Rust ABI" - ) - .emit(); + tcx.dcx().emit_err(errors::RequiresRustAbi { span: attr.span() }); } if is_closure && !tcx.features().closure_track_caller() @@ -263,13 +253,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { if s.as_str().contains('\0') { // `#[export_name = ...]` will be converted to a null-terminated string, // so it may not contain any null characters. - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0648, - "`export_name` may not contain null characters" - ) - .emit(); + tcx.dcx().emit_err(errors::NullOnExport { span: attr.span() }); } codegen_fn_attrs.export_name = Some(s); mixed_export_name_no_mangle_lint_state.track_export_name(attr.span()); @@ -394,47 +378,28 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { [sym::arm, sym::a32 | sym::t32] if !tcx.sess.target.has_thumb_interworking => { - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0779, - "target does not support `#[instruction_set]`" - ) - .emit(); + tcx.dcx().emit_err(errors::UnsuportedInstructionSet { + span: attr.span(), + }); None } [sym::arm, sym::a32] => Some(InstructionSetAttr::ArmA32), [sym::arm, sym::t32] => Some(InstructionSetAttr::ArmT32), _ => { - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0779, - "invalid instruction set specified", - ) - .emit(); + tcx.dcx().emit_err(errors::InvalidInstructionSet { + span: attr.span(), + }); None } } } [] => { - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0778, - "`#[instruction_set]` requires an argument" - ) - .emit(); + tcx.dcx().emit_err(errors::BareInstructionSet { span: attr.span() }); None } _ => { - struct_span_code_err!( - tcx.dcx(), - attr.span(), - E0779, - "cannot specify more than one instruction set" - ) - .emit(); + tcx.dcx() + .emit_err(errors::MultipleInstructionSet { span: attr.span() }); None } }) @@ -445,58 +410,38 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { let mut entry = None; for item in l { let Some(meta_item) = item.meta_item() else { - tcx.dcx().span_err(item.span(), "expected name value pair"); + tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() }); continue; }; let Some(name_value_lit) = meta_item.name_value_literal() else { - tcx.dcx().span_err(item.span(), "expected name value pair"); + tcx.dcx().emit_err(errors::ExpectedNameValuePair { span: item.span() }); continue; }; - fn emit_error_with_label( - tcx: TyCtxt<'_>, - span: Span, - error: impl Into<DiagMessage>, - label: impl Into<SubdiagMessage>, - ) { - let mut err: rustc_errors::Diag<'_, _> = - tcx.dcx().struct_span_err(span, error); - err.span_label(span, label); - err.emit(); - } - let attrib_to_write = match meta_item.name_or_empty() { sym::prefix_nops => &mut prefix, sym::entry_nops => &mut entry, _ => { - emit_error_with_label( - tcx, - item.span(), - "unexpected parameter name", - format!("expected {} or {}", sym::prefix_nops, sym::entry_nops), - ); + tcx.dcx().emit_err(errors::UnexpectedParameterName { + span: item.span(), + prefix_nops: sym::prefix_nops, + entry_nops: sym::entry_nops, + }); continue; } }; let rustc_ast::LitKind::Int(val, _) = name_value_lit.kind else { - emit_error_with_label( - tcx, - name_value_lit.span, - "invalid literal value", - "value must be an integer between `0` and `255`", - ); + tcx.dcx().emit_err(errors::InvalidLiteralValue { + span: name_value_lit.span, + }); continue; }; let Ok(val) = val.get().try_into() else { - emit_error_with_label( - tcx, - name_value_lit.span, - "integer value out of range", - "value must be between `0` and `255`", - ); + tcx.dcx() + .emit_err(errors::OutOfRangeInteger { span: name_value_lit.span }); continue; }; @@ -533,7 +478,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { inline_span = Some(attr.span()); let [item] = &items[..] else { - struct_span_code_err!(tcx.dcx(), attr.span(), E0534, "expected one argument").emit(); + tcx.dcx().emit_err(errors::ExpectedOneArgument { span: attr.span() }); return InlineAttr::None; }; @@ -542,9 +487,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { } else if item.has_name(sym::never) { InlineAttr::Never } else { - struct_span_code_err!(tcx.dcx(), items[0].span(), E0535, "invalid argument") - .with_help("valid inline arguments are `always` and `never`") - .emit(); + tcx.dcx().emit_err(errors::InvalidArgument { span: items[0].span() }); InlineAttr::None } @@ -575,9 +518,8 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { if !attr.has_name(sym::optimize) { return ia; } - let err = |sp, s| struct_span_code_err!(tcx.dcx(), sp, E0722, "{}", s).emit(); if attr.is_word() { - err(attr.span(), "expected one argument"); + tcx.dcx().emit_err(errors::ExpectedOneArgumentOptimize { span: attr.span() }); return ia; } let Some(ref items) = attr.meta_item_list() else { @@ -586,7 +528,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { inline_span = Some(attr.span()); let [item] = &items[..] else { - err(attr.span(), "expected one argument"); + tcx.dcx().emit_err(errors::ExpectedOneArgumentOptimize { span: attr.span() }); return OptimizeAttr::Default; }; if item.has_name(sym::size) { @@ -596,7 +538,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { } else if item.has_name(sym::none) { OptimizeAttr::DoNotOptimize } else { - err(item.span(), "invalid argument"); + tcx.dcx().emit_err(errors::InvalidArgumentOptimize { span: item.span() }); OptimizeAttr::Default } }); @@ -930,15 +872,6 @@ fn autodiff_attrs(tcx: TyCtxt<'_>, id: DefId) -> Option<AutoDiffAttrs> { } } - for &input in &arg_activities { - if !valid_input_activity(mode, input) { - span_bug!(attr.span(), "Invalid input activity {} for {} mode", input, mode); - } - } - if !valid_ret_activity(mode, ret_activity) { - span_bug!(attr.span(), "Invalid return activity {} for {} mode", ret_activity, mode); - } - Some(AutoDiffAttrs { mode, ret_activity, input_activity: arg_activities }) } diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index 5e25de02a77..394c80fcfbd 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -136,6 +136,110 @@ pub(crate) struct NoSavedObjectFile<'a> { } #[derive(Diagnostic)] +#[diag(codegen_ssa_requires_rust_abi, code = E0737)] +pub(crate) struct RequiresRustAbi { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_null_on_export, code = E0648)] +pub(crate) struct NullOnExport { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unsupported_instruction_set, code = E0779)] +pub(crate) struct UnsuportedInstructionSet { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_invalid_instruction_set, code = E0779)] +pub(crate) struct InvalidInstructionSet { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_bare_instruction_set, code = E0778)] +pub(crate) struct BareInstructionSet { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_multiple_instruction_set, code = E0779)] +pub(crate) struct MultipleInstructionSet { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_expected_name_value_pair)] +pub(crate) struct ExpectedNameValuePair { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_unexpected_parameter_name)] +pub(crate) struct UnexpectedParameterName { + #[primary_span] + #[label] + pub span: Span, + pub prefix_nops: Symbol, + pub entry_nops: Symbol, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_invalid_literal_value)] +pub(crate) struct InvalidLiteralValue { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_out_of_range_integer)] +pub(crate) struct OutOfRangeInteger { + #[primary_span] + #[label] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_expected_one_argument, code = E0534)] +pub(crate) struct ExpectedOneArgument { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_expected_one_argument, code = E0722)] +pub(crate) struct ExpectedOneArgumentOptimize { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_invalid_argument, code = E0535)] +#[help] +pub(crate) struct InvalidArgument { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(codegen_ssa_invalid_argument, code = E0722)] +pub(crate) struct InvalidArgumentOptimize { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(codegen_ssa_copy_path_buf)] pub(crate) struct CopyPathBuf { pub source_file: PathBuf, @@ -161,7 +265,7 @@ impl<'a> CopyPath<'a> { struct DebugArgPath<'a>(pub &'a Path); impl IntoDiagArg for DebugArgPath<'_> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { DiagArgValue::Str(Cow::Owned(format!("{:?}", self.0))) } } @@ -957,6 +1061,7 @@ pub enum InvalidMonomorphization<'tcx> { }, #[diag(codegen_ssa_invalid_monomorphization_mask_type, code = E0511)] + #[note] MaskType { #[primary_span] span: Span, @@ -1087,7 +1192,7 @@ pub enum ExpectedPointerMutability { } impl IntoDiagArg for ExpectedPointerMutability { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { match self { ExpectedPointerMutability::Mut => DiagArgValue::Str(Cow::Borrowed("*mut")), ExpectedPointerMutability::Not => DiagArgValue::Str(Cow::Borrowed("*_")), diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index 9d2ac219d59..44b8cc459cf 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -2,6 +2,7 @@ #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(assert_matches)] @@ -14,7 +15,6 @@ #![feature(rustdoc_internals)] #![feature(trait_alias)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end //! This crate contains codegen code that is used by all codegen backends (LLVM and others). @@ -75,9 +75,29 @@ pub struct ModuleCodegen<M> { pub name: String, pub module_llvm: M, pub kind: ModuleKind, + /// Saving the ThinLTO buffer for embedding in the object file. + pub thin_lto_buffer: Option<Vec<u8>>, } impl<M> ModuleCodegen<M> { + pub fn new_regular(name: impl Into<String>, module: M) -> Self { + Self { + name: name.into(), + module_llvm: module, + kind: ModuleKind::Regular, + thin_lto_buffer: None, + } + } + + pub fn new_allocator(name: impl Into<String>, module: M) -> Self { + Self { + name: name.into(), + module_llvm: module, + kind: ModuleKind::Allocator, + thin_lto_buffer: None, + } + } + pub fn into_compiled_module( self, emit_obj: bool, diff --git a/compiler/rustc_codegen_ssa/src/mir/block.rs b/compiler/rustc_codegen_ssa/src/mir/block.rs index e2a9b540d30..6d1930a402d 100644 --- a/compiler/rustc_codegen_ssa/src/mir/block.rs +++ b/compiler/rustc_codegen_ssa/src/mir/block.rs @@ -3,6 +3,7 @@ use std::cmp; use rustc_abi::{BackendRepr, ExternAbi, HasDataLayout, Reg, WrappingRange}; use rustc_ast as ast; use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece}; +use rustc_data_structures::packed::Pu128; use rustc_hir::lang_items::LangItem; use rustc_middle::mir::{self, AssertKind, InlineAsmMacro, SwitchTargets, UnwindTerminateReason}; use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, ValidityRequirement}; @@ -406,6 +407,39 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let cmp = bx.icmp(IntPredicate::IntEQ, discr_value, llval); bx.cond_br_with_expect(cmp, lltarget, llotherwise, expect); } + } else if target_iter.len() == 2 + && self.mir[targets.otherwise()].is_empty_unreachable() + && targets.all_values().contains(&Pu128(0)) + && targets.all_values().contains(&Pu128(1)) + { + // This is the really common case for `bool`, `Option`, etc. + // By using `trunc nuw` we communicate that other values are + // impossible without needing `switch` or `assume`s. + let true_bb = targets.target_for_value(1); + let false_bb = targets.target_for_value(0); + let true_ll = helper.llbb_with_cleanup(self, true_bb); + let false_ll = helper.llbb_with_cleanup(self, false_bb); + + let expected_cond_value = if self.cx.sess().opts.optimize == OptLevel::No { + None + } else { + match (self.cold_blocks[true_bb], self.cold_blocks[false_bb]) { + // Same coldness, no expectation + (true, true) | (false, false) => None, + // Different coldness, expect the non-cold one + (true, false) => Some(false), + (false, true) => Some(true), + } + }; + + let bool_ty = bx.tcx().types.bool; + let cond = if switch_ty == bool_ty { + discr_value + } else { + let bool_llty = bx.immediate_backend_type(bx.layout_of(bool_ty)); + bx.unchecked_utrunc(discr_value, bool_llty) + }; + bx.cond_br_with_expect(cond, true_ll, false_ll, expected_cond_value); } else if self.cx.sess().opts.optimize == OptLevel::No && target_iter.len() == 2 && self.mir[targets.otherwise()].is_empty_unreachable() diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index 0593fb420c3..96d1ab018f6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -349,7 +349,7 @@ fn wasm_type<'tcx>( PassMode::Direct(_) => { let direct_type = match arg_abi.layout.backend_repr { BackendRepr::Scalar(scalar) => wasm_primitive(scalar.primitive(), ptr_type), - BackendRepr::Vector { .. } => "v128", + BackendRepr::SimdVector { .. } => "v128", BackendRepr::Memory { .. } => { // FIXME: remove this branch once the wasm32-unknown-unknown ABI is fixed let _ = WasmCAbi::Legacy; diff --git a/compiler/rustc_codegen_ssa/src/mir/operand.rs b/compiler/rustc_codegen_ssa/src/mir/operand.rs index 461cf1b8acd..acae09b2c25 100644 --- a/compiler/rustc_codegen_ssa/src/mir/operand.rs +++ b/compiler/rustc_codegen_ssa/src/mir/operand.rs @@ -203,30 +203,14 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let alloc_align = alloc.inner().align; assert!(alloc_align >= layout.align.abi); - // Returns `None` when the value is partially undefined or any byte of it has provenance. - // Otherwise returns the value or (if the entire value is undef) returns an undef. let read_scalar = |start, size, s: abi::Scalar, ty| { - let range = alloc_range(start, size); match alloc.0.read_scalar( bx, - range, + alloc_range(start, size), /*read_provenance*/ matches!(s.primitive(), abi::Primitive::Pointer(_)), ) { - Ok(val) => Some(bx.scalar_to_backend(val, s, ty)), - Err(_) => { - // We may have failed due to partial provenance or unexpected provenance, - // continue down the normal code path if so. - if alloc.0.provenance().range_empty(range, &bx.tcx()) - // Since `read_scalar` failed, but there were no relocations involved, the - // bytes must be partially or fully uninitialized. Thus we can now unwrap the - // information about the range of uninit bytes and check if it's the full range. - && alloc.0.init_mask().is_range_initialized(range).unwrap_err() == range - { - Some(bx.const_undef(ty)) - } else { - None - } - } + Ok(val) => bx.scalar_to_backend(val, s, ty), + Err(_) => bx.const_poison(ty), } }; @@ -237,14 +221,16 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { // check that walks over the type of `mplace` to make sure it is truly correct to treat this // like a `Scalar` (or `ScalarPair`). match layout.backend_repr { - BackendRepr::Scalar(s) => { + BackendRepr::Scalar(s @ abi::Scalar::Initialized { .. }) => { let size = s.size(bx); assert_eq!(size, layout.size, "abi::Scalar size does not match layout size"); - if let Some(val) = read_scalar(offset, size, s, bx.immediate_backend_type(layout)) { - return OperandRef { val: OperandValue::Immediate(val), layout }; - } + let val = read_scalar(offset, size, s, bx.immediate_backend_type(layout)); + OperandRef { val: OperandValue::Immediate(val), layout } } - BackendRepr::ScalarPair(a, b) => { + BackendRepr::ScalarPair( + a @ abi::Scalar::Initialized { .. }, + b @ abi::Scalar::Initialized { .. }, + ) => { let (a_size, b_size) = (a.size(bx), b.size(bx)); let b_offset = (offset + a_size).align_to(b.align(bx).abi); assert!(b_offset.bytes() > 0); @@ -260,21 +246,20 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { b, bx.scalar_pair_element_backend_type(layout, 1, true), ); - if let (Some(a_val), Some(b_val)) = (a_val, b_val) { - return OperandRef { val: OperandValue::Pair(a_val, b_val), layout }; - } + OperandRef { val: OperandValue::Pair(a_val, b_val), layout } + } + _ if layout.is_zst() => OperandRef::zero_sized(layout), + _ => { + // Neither a scalar nor scalar pair. Load from a place + // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the + // same `ConstAllocation`? + let init = bx.const_data_from_alloc(alloc); + let base_addr = bx.static_addr_of(init, alloc_align, None); + + let llval = bx.const_ptr_byte_offset(base_addr, offset); + bx.load_operand(PlaceRef::new_sized(llval, layout)) } - _ if layout.is_zst() => return OperandRef::zero_sized(layout), - _ => {} } - // Neither a scalar nor scalar pair. Load from a place - // FIXME: should we cache `const_data_from_alloc` to avoid repeating this for the - // same `ConstAllocation`? - let init = bx.const_data_from_alloc(alloc); - let base_addr = bx.static_addr_of(init, alloc_align, None); - - let llval = bx.const_ptr_byte_offset(base_addr, offset); - bx.load_operand(PlaceRef::new_sized(llval, layout)) } /// Asserts that this operand refers to a scalar and returns @@ -359,7 +344,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { let offset = self.layout.fields.offset(i); if !bx.is_backend_ref(self.layout) && bx.is_backend_ref(field) { - if let BackendRepr::Vector { count, .. } = self.layout.backend_repr + if let BackendRepr::SimdVector { count, .. } = self.layout.backend_repr && let BackendRepr::Memory { sized: true } = field.backend_repr && count.is_power_of_two() { @@ -404,7 +389,7 @@ impl<'a, 'tcx, V: CodegenObject> OperandRef<'tcx, V> { } }; OperandValue::Immediate(match field.backend_repr { - BackendRepr::Vector { .. } => imm, + BackendRepr::SimdVector { .. } => imm, BackendRepr::Scalar(out_scalar) => { let Some(in_scalar) = in_scalar else { span_bug!( @@ -666,7 +651,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { // However, some SIMD types do not actually use the vector ABI // (in particular, packed SIMD types do not). Ensure we exclude those. let layout = bx.layout_of(constant_ty); - if let BackendRepr::Vector { .. } = layout.backend_repr { + if let BackendRepr::SimdVector { .. } = layout.backend_repr { let (llval, ty) = self.immediate_const_vector(bx, constant); return OperandRef { val: OperandValue::Immediate(llval), diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index edd09b9c3c5..6988724b421 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -2,7 +2,7 @@ use rustc_abi::Primitive::{Int, Pointer}; use rustc_abi::{Align, BackendRepr, FieldsShape, Size, TagEncoding, VariantIdx, Variants}; use rustc_middle::mir::PlaceTy; use rustc_middle::mir::interpret::Scalar; -use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Ty}; use rustc_middle::{bug, mir}; use tracing::{debug, instrument}; @@ -133,7 +133,7 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { Self::alloca(bx, ptr_layout) } - pub fn len<Cx: ConstCodegenMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V { + pub fn len<Cx: ConstCodegenMethods<Value = V>>(&self, cx: &Cx) -> V { if let FieldsShape::Array { count, .. } = self.layout.fields { if self.layout.is_unsized() { assert_eq!(count, 0); @@ -168,7 +168,11 @@ impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> { }; let val = PlaceValue { llval, - llextra: if bx.cx().type_has_metadata(field.ty) { self.val.llextra } else { None }, + llextra: if bx.cx().tcx().type_has_metadata(field.ty, bx.cx().typing_env()) { + self.val.llextra + } else { + None + }, align: effective_field_align, }; val.with_type(field) diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 1eebe04225b..95b108b1d33 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -3,12 +3,12 @@ use std::assert_matches::assert_matches; use arrayvec::ArrayVec; use rustc_abi::{self as abi, FIRST_VARIANT, FieldIdx}; use rustc_middle::ty::adjustment::PointerCoercion; -use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout}; +use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, LayoutOf, TyAndLayout}; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; use rustc_middle::{bug, mir, span_bug}; use rustc_session::config::OptLevel; use rustc_span::{DUMMY_SP, Span}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, instrument}; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; @@ -93,8 +93,6 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } - // If `v` is an integer constant whose value is just a single byte repeated N times, - // emit a `memset` filling the entire `dest` with that byte. let try_init_all_same = |bx: &mut Bx, v| { let start = dest.val.llval; let size = bx.const_usize(dest.layout.size.bytes()); @@ -119,33 +117,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { false }; - trace!(?cg_elem.val); match cg_elem.val { OperandValue::Immediate(v) => { if try_init_all_same(bx, v) { return; } } - OperandValue::Pair(a, b) => { - let a_is_undef = bx.cx().is_undef(a); - match (a_is_undef, bx.cx().is_undef(b)) { - // Can happen for uninit unions - (true, true) => { - // FIXME: can we produce better output here? - } - (false, true) | (true, false) => { - let val = if a_is_undef { b } else { a }; - if try_init_all_same(bx, val) { - return; - } - } - (false, false) => { - // FIXME: if both are the same value, use try_init_all_same - } - } - } - OperandValue::ZeroSized => unreachable!("checked above"), - OperandValue::Ref(..) => {} + _ => (), } let count = self @@ -386,6 +364,13 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { ) -> Bx::Value { assert_eq!(from_scalar.size(self.cx), to_scalar.size(self.cx)); + // While optimizations will remove no-op transmutes, they might still be + // there in debug or things that aren't no-op in MIR because they change + // the Rust type but not the underlying layout/niche. + if from_scalar == to_scalar && from_backend_ty == to_backend_ty { + return imm; + } + use abi::Primitive::*; imm = bx.from_immediate(imm); @@ -878,7 +863,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { let ty = cg_place.layout.ty; assert!( - if bx.cx().type_has_metadata(ty) { + if bx.cx().tcx().type_has_metadata(ty, bx.cx().typing_env()) { matches!(val, OperandValue::Pair(..)) } else { matches!(val, OperandValue::Immediate(..)) @@ -1190,7 +1175,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { assert!(!self.cx.is_backend_scalar_pair(layout)); OperandValueKind::Immediate(match layout.backend_repr { abi::BackendRepr::Scalar(s) => s, - abi::BackendRepr::Vector { element, .. } => element, + abi::BackendRepr::SimdVector { element, .. } => element, x => span_bug!(self.mir.span, "Couldn't translate {x:?} as backend immediate"), }) } else if self.cx.is_backend_scalar_pair(layout) { diff --git a/compiler/rustc_codegen_ssa/src/target_features.rs b/compiler/rustc_codegen_ssa/src/target_features.rs index d8b9bdb55da..8058cd1b178 100644 --- a/compiler/rustc_codegen_ssa/src/target_features.rs +++ b/compiler/rustc_codegen_ssa/src/target_features.rs @@ -10,7 +10,7 @@ use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_session::parse::feature_err; use rustc_span::{Span, Symbol, sym}; -use rustc_target::target_features; +use rustc_target::target_features::{self, Stability}; use crate::errors; @@ -87,12 +87,17 @@ pub(crate) fn from_target_feature_attr( // But ensure the ABI does not forbid enabling this. // Here we do assume that LLVM doesn't add even more implied features // we don't know about, at least no features that would have ABI effects! - if abi_feature_constraints.incompatible.contains(&name.as_str()) { - tcx.dcx().emit_err(errors::ForbiddenTargetFeatureAttr { - span: item.span(), - feature: name.as_str(), - reason: "this feature is incompatible with the target ABI", - }); + // We skip this logic in rustdoc, where we want to allow all target features of + // all targets, so we can't check their ABI compatibility and anyway we are not + // generating code so "it's fine". + if !tcx.sess.opts.actually_rustdoc { + if abi_feature_constraints.incompatible.contains(&name.as_str()) { + tcx.dcx().emit_err(errors::ForbiddenTargetFeatureAttr { + span: item.span(), + feature: name.as_str(), + reason: "this feature is incompatible with the target ABI", + }); + } } target_features.push(TargetFeature { name, implied: name != feature_sym }) } @@ -142,11 +147,38 @@ pub(crate) fn provide(providers: &mut Providers) { rust_target_features: |tcx, cnum| { assert_eq!(cnum, LOCAL_CRATE); if tcx.sess.opts.actually_rustdoc { - // rustdoc needs to be able to document functions that use all the features, so - // whitelist them all - rustc_target::target_features::all_rust_features() - .map(|(a, b)| (a.to_string(), b)) - .collect() + // HACK: rustdoc would like to pretend that we have all the target features, so we + // have to merge all the lists into one. To ensure an unstable target never prevents + // a stable one from working, we merge the stability info of all instances of the + // same target feature name, with the "most stable" taking precedence. And then we + // hope that this doesn't cause issues anywhere else in the compiler... + let mut result: UnordMap<String, Stability> = Default::default(); + for (name, stability) in rustc_target::target_features::all_rust_features() { + use std::collections::hash_map::Entry; + match result.entry(name.to_owned()) { + Entry::Vacant(vacant_entry) => { + vacant_entry.insert(stability); + } + Entry::Occupied(mut occupied_entry) => { + // Merge the two stabilities, "more stable" taking precedence. + match (occupied_entry.get(), stability) { + (Stability::Stable, _) + | ( + Stability::Unstable { .. }, + Stability::Unstable { .. } | Stability::Forbidden { .. }, + ) + | (Stability::Forbidden { .. }, Stability::Forbidden { .. }) => { + // The stability in the entry is at least as good as the new one, just keep it. + } + _ => { + // Overwrite stabilite. + occupied_entry.insert(stability); + } + } + } + } + } + result } else { tcx.sess .target @@ -158,7 +190,7 @@ pub(crate) fn provide(providers: &mut Providers) { }, implied_target_features: |tcx, feature: Symbol| { let feature = feature.as_str(); - UnordSet::from(tcx.sess.target.implied_target_features(std::iter::once(feature))) + UnordSet::from(tcx.sess.target.implied_target_features(feature)) .into_sorted_stable_ord() .into_iter() .map(|s| Symbol::intern(s)) diff --git a/compiler/rustc_codegen_ssa/src/traits/abi.rs b/compiler/rustc_codegen_ssa/src/traits/abi.rs index 60d8f2a9ece..49c51caa996 100644 --- a/compiler/rustc_codegen_ssa/src/traits/abi.rs +++ b/compiler/rustc_codegen_ssa/src/traits/abi.rs @@ -1,5 +1,5 @@ use super::BackendTypes; -pub trait AbiBuilderMethods<'tcx>: BackendTypes { +pub trait AbiBuilderMethods: BackendTypes { fn get_param(&mut self, index: usize) -> Self::Value; } diff --git a/compiler/rustc_codegen_ssa/src/traits/backend.rs b/compiler/rustc_codegen_ssa/src/traits/backend.rs index ebcf118b903..65fd843e7a5 100644 --- a/compiler/rustc_codegen_ssa/src/traits/backend.rs +++ b/compiler/rustc_codegen_ssa/src/traits/backend.rs @@ -45,10 +45,13 @@ pub trait CodegenBackend { fn print(&self, _req: &PrintRequest, _out: &mut String, _sess: &Session) {} - /// Returns the features that should be set in `cfg(target_features)`. + /// Returns two feature sets: + /// - The first has the features that should be set in `cfg(target_features)`. + /// - The second is like the first, but also includes unstable features. + /// /// RUSTC_SPECIFIC_FEATURES should be skipped here, those are handled outside codegen. - fn target_features_cfg(&self, _sess: &Session, _allow_unstable: bool) -> Vec<Symbol> { - vec![] + fn target_features_cfg(&self, _sess: &Session) -> (Vec<Symbol>, Vec<Symbol>) { + (vec![], vec![]) } fn print_passes(&self) {} diff --git a/compiler/rustc_codegen_ssa/src/traits/builder.rs b/compiler/rustc_codegen_ssa/src/traits/builder.rs index 99fd6b6510f..5f91133d5b4 100644 --- a/compiler/rustc_codegen_ssa/src/traits/builder.rs +++ b/compiler/rustc_codegen_ssa/src/traits/builder.rs @@ -40,7 +40,7 @@ pub trait BuilderMethods<'a, 'tcx>: + CoverageInfoBuilderMethods<'tcx> + DebugInfoBuilderMethods + ArgAbiBuilderMethods<'tcx> - + AbiBuilderMethods<'tcx> + + AbiBuilderMethods + IntrinsicCallBuilderMethods<'tcx> + AsmBuilderMethods<'tcx> + StaticBuilderMethods diff --git a/compiler/rustc_codegen_ssa/src/traits/consts.rs b/compiler/rustc_codegen_ssa/src/traits/consts.rs index 5cfb56ebace..d83a04d814b 100644 --- a/compiler/rustc_codegen_ssa/src/traits/consts.rs +++ b/compiler/rustc_codegen_ssa/src/traits/consts.rs @@ -3,13 +3,12 @@ use rustc_middle::mir::interpret::{ConstAllocation, Scalar}; use super::BackendTypes; -pub trait ConstCodegenMethods<'tcx>: BackendTypes { +pub trait ConstCodegenMethods: BackendTypes { // Constant constructors fn const_null(&self, t: Self::Type) -> Self::Value; /// Generate an uninitialized value (matching uninitialized memory in MIR). /// Whether memory is initialized or not is tracked byte-for-byte. fn const_undef(&self, t: Self::Type) -> Self::Value; - fn is_undef(&self, v: Self::Value) -> bool; /// Generate a fake value. Poison always affects the entire value, even if just a single byte is /// poison. This can only be used in codepaths that are already UB, i.e., UB-free Rust code /// (including code that e.g. copies uninit memory with `MaybeUninit`) can never encounter a @@ -38,7 +37,7 @@ pub trait ConstCodegenMethods<'tcx>: BackendTypes { fn const_to_opt_uint(&self, v: Self::Value) -> Option<u64>; fn const_to_opt_u128(&self, v: Self::Value, sign_ext: bool) -> Option<u128>; - fn const_data_from_alloc(&self, alloc: ConstAllocation<'tcx>) -> Self::Value; + fn const_data_from_alloc(&self, alloc: ConstAllocation<'_>) -> Self::Value; fn scalar_to_backend(&self, cv: Scalar, layout: abi::Scalar, llty: Self::Type) -> Self::Value; diff --git a/compiler/rustc_codegen_ssa/src/traits/mod.rs b/compiler/rustc_codegen_ssa/src/traits/mod.rs index 90fcfbe4da7..239857a4298 100644 --- a/compiler/rustc_codegen_ssa/src/traits/mod.rs +++ b/compiler/rustc_codegen_ssa/src/traits/mod.rs @@ -55,7 +55,7 @@ pub trait CodegenObject = Copy + PartialEq + fmt::Debug; pub trait CodegenMethods<'tcx> = LayoutOf<'tcx, LayoutOfResult = TyAndLayout<'tcx>> + FnAbiOf<'tcx, FnAbiOfResult = &'tcx FnAbi<'tcx, Ty<'tcx>>> + TypeCodegenMethods<'tcx> - + ConstCodegenMethods<'tcx> + + ConstCodegenMethods + StaticCodegenMethods + DebugInfoCodegenMethods<'tcx> + AsmCodegenMethods<'tcx> diff --git a/compiler/rustc_codegen_ssa/src/traits/type_.rs b/compiler/rustc_codegen_ssa/src/traits/type_.rs index c178ebc596e..32d9f27d32d 100644 --- a/compiler/rustc_codegen_ssa/src/traits/type_.rs +++ b/compiler/rustc_codegen_ssa/src/traits/type_.rs @@ -1,7 +1,7 @@ use rustc_abi::{AddressSpace, Float, Integer, Reg}; use rustc_middle::bug; +use rustc_middle::ty::Ty; use rustc_middle::ty::layout::{HasTyCtxt, HasTypingEnv, TyAndLayout}; -use rustc_middle::ty::{self, Ty}; use rustc_target::callconv::{ArgAbi, CastTarget, FnAbi}; use super::BackendTypes; @@ -9,7 +9,7 @@ use super::misc::MiscCodegenMethods; use crate::common::TypeKind; use crate::mir::place::PlaceRef; -pub trait BaseTypeCodegenMethods<'tcx>: BackendTypes { +pub trait BaseTypeCodegenMethods: BackendTypes { fn type_i8(&self) -> Self::Type; fn type_i16(&self) -> Self::Type; fn type_i32(&self) -> Self::Type; @@ -41,7 +41,7 @@ pub trait BaseTypeCodegenMethods<'tcx>: BackendTypes { } pub trait DerivedTypeCodegenMethods<'tcx>: - BaseTypeCodegenMethods<'tcx> + MiscCodegenMethods<'tcx> + HasTyCtxt<'tcx> + HasTypingEnv<'tcx> + BaseTypeCodegenMethods + MiscCodegenMethods<'tcx> + HasTyCtxt<'tcx> + HasTypingEnv<'tcx> { fn type_int(&self) -> Self::Type { match &self.sess().target.c_int_width[..] { @@ -84,26 +84,10 @@ pub trait DerivedTypeCodegenMethods<'tcx>: fn type_is_freeze(&self, ty: Ty<'tcx>) -> bool { ty.is_freeze(self.tcx(), self.typing_env()) } - - fn type_has_metadata(&self, ty: Ty<'tcx>) -> bool { - if ty.is_sized(self.tcx(), self.typing_env()) { - return false; - } - - let tail = self.tcx().struct_tail_for_codegen(ty, self.typing_env()); - match tail.kind() { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail), - } - } } impl<'tcx, T> DerivedTypeCodegenMethods<'tcx> for T where - Self: BaseTypeCodegenMethods<'tcx> - + MiscCodegenMethods<'tcx> - + HasTyCtxt<'tcx> - + HasTypingEnv<'tcx> + Self: BaseTypeCodegenMethods + MiscCodegenMethods<'tcx> + HasTyCtxt<'tcx> + HasTypingEnv<'tcx> { } diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index 97fe614aa10..c77efdd1728 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -40,7 +40,7 @@ pub trait WriteBackendMethods: 'static + Sized + Clone { unsafe fn optimize( cgcx: &CodegenContext<Self>, dcx: DiagCtxtHandle<'_>, - module: &ModuleCodegen<Self::Module>, + module: &mut ModuleCodegen<Self::Module>, config: &ModuleConfig, ) -> Result<(), FatalError>; fn optimize_fat( diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index eecc6690f51..ccf9b240d40 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -109,6 +109,8 @@ const_eval_frame_note_inner = inside {$where_ -> *[other] {""} } +const_eval_frame_note_last = the failure occurred here + const_eval_in_bounds_test = out-of-bounds pointer use const_eval_incompatible_calling_conventions = calling a function with calling convention {$callee_conv} using calling convention {$caller_conv} @@ -300,8 +302,7 @@ const_eval_overflow_arith = const_eval_overflow_shift = overflowing shift by {$shift_amount} in `{$intrinsic}` -const_eval_panic = - the evaluated program panicked at '{$msg}', {$file}:{$line}:{$col} +const_eval_panic = evaluation panicked: {$msg} const_eval_panic_non_str = argument to `panic!()` in a const context must have type `&str` diff --git a/compiler/rustc_const_eval/src/check_consts/mod.rs b/compiler/rustc_const_eval/src/check_consts/mod.rs index 659d4a30456..06ee7075170 100644 --- a/compiler/rustc_const_eval/src/check_consts/mod.rs +++ b/compiler/rustc_const_eval/src/check_consts/mod.rs @@ -81,7 +81,7 @@ pub fn rustc_allow_const_fn_unstable( def_id: LocalDefId, feature_gate: Symbol, ) -> bool { - let attrs = tcx.hir().attrs(tcx.local_def_id_to_hir_id(def_id)); + let attrs = tcx.hir_attrs(tcx.local_def_id_to_hir_id(def_id)); find_attr!(attrs, AttributeKind::AllowConstFnUnstable(syms) if syms.contains(&feature_gate)) } @@ -94,6 +94,11 @@ pub fn rustc_allow_const_fn_unstable( /// world into two functions: those that are safe to expose on stable (and hence may not use /// unstable features, not even recursively), and those that are not. pub fn is_fn_or_trait_safe_to_expose_on_stable(tcx: TyCtxt<'_>, def_id: DefId) -> bool { + // A default body in a `#[const_trait]` is const-stable when the trait is const-stable. + if tcx.is_const_default_method(def_id) { + return is_fn_or_trait_safe_to_expose_on_stable(tcx, tcx.parent(def_id)); + } + match tcx.lookup_const_stability(def_id) { None => { // In a `staged_api` crate, we do enforce recursive const stability for all unmarked diff --git a/compiler/rustc_const_eval/src/const_eval/error.rs b/compiler/rustc_const_eval/src/const_eval/error.rs index cc21a18af3a..ffb32fa41eb 100644 --- a/compiler/rustc_const_eval/src/const_eval/error.rs +++ b/compiler/rustc_const_eval/src/const_eval/error.rs @@ -48,11 +48,8 @@ impl MachineStopType for ConstEvalErrKind { | ModifiedGlobal | WriteThroughImmutablePointer => {} AssertFailure(kind) => kind.add_args(adder), - Panic { msg, line, col, file } => { - adder("msg".into(), msg.into_diag_arg()); - adder("file".into(), file.into_diag_arg()); - adder("line".into(), line.into_diag_arg()); - adder("col".into(), col.into_diag_arg()); + Panic { msg, .. } => { + adder("msg".into(), msg.into_diag_arg(&mut None)); } } } @@ -72,7 +69,7 @@ pub fn get_span_and_frames<'tcx>( let mut stacktrace = Frame::generate_stacktrace_from_stack(stack); // Filter out `requires_caller_location` frames. stacktrace.retain(|frame| !frame.instance.def.requires_caller_location(*tcx)); - let span = stacktrace.first().map(|f| f.span).unwrap_or(tcx.span); + let span = stacktrace.last().map(|f| f.span).unwrap_or(tcx.span); let mut frames = Vec::new(); @@ -115,6 +112,20 @@ pub fn get_span_and_frames<'tcx>( } } + // In `rustc`, we present const-eval errors from the outer-most place first to the inner-most. + // So we reverse the frames here. The first frame will be the same as the span from the current + // `TyCtxtAt<'_>`, so we remove it as it would be redundant. + frames.reverse(); + if frames.len() > 0 { + frames.remove(0); + } + if let Some(last) = frames.last_mut() + // If the span is not going to be printed, we don't want the span label for `is_last`. + && tcx.sess.source_map().span_to_snippet(last.span.source_callsite()).is_ok() + { + last.has_label = true; + } + (span, frames) } diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index c08495c012f..b020eeccf71 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -6,6 +6,7 @@ use rustc_abi::WrappingRange; use rustc_errors::codes::*; use rustc_errors::{ Diag, DiagArgValue, DiagCtxtHandle, DiagMessage, Diagnostic, EmissionGuarantee, Level, + MultiSpan, SubdiagMessageOp, Subdiagnostic, }; use rustc_hir::ConstContext; use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; @@ -17,6 +18,7 @@ use rustc_middle::mir::interpret::{ use rustc_middle::ty::{self, Mutability, Ty}; use rustc_span::{Span, Symbol}; +use crate::fluent_generated as fluent; use crate::interpret::InternKind; #[derive(Diagnostic)] @@ -278,14 +280,31 @@ pub(crate) struct NonConstImplNote { pub span: Span, } -#[derive(Subdiagnostic, Clone)] -#[note(const_eval_frame_note)] +#[derive(Clone)] pub struct FrameNote { - #[primary_span] pub span: Span, pub times: i32, pub where_: &'static str, pub instance: String, + pub has_label: bool, +} + +impl Subdiagnostic for FrameNote { + fn add_to_diag_with<G: EmissionGuarantee, F: SubdiagMessageOp<G>>( + self, + diag: &mut Diag<'_, G>, + f: &F, + ) { + diag.arg("times", self.times); + diag.arg("where_", self.where_); + diag.arg("instance", self.instance); + let mut span: MultiSpan = self.span.into(); + if self.has_label && !self.span.is_dummy() { + span.push_span_label(self.span, fluent::const_eval_frame_note_last); + } + let msg = f(diag, fluent::const_eval_frame_note.into()); + diag.span_note(span, msg); + } } #[derive(Subdiagnostic)] @@ -967,7 +986,7 @@ impl ReportErrorExt for ResourceExhaustionInfo { } impl rustc_errors::IntoDiagArg for InternKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(match self { InternKind::Static(Mutability::Not) => "static", InternKind::Static(Mutability::Mut) => "static_mut", diff --git a/compiler/rustc_const_eval/src/interpret/intern.rs b/compiler/rustc_const_eval/src/interpret/intern.rs index 43631330f89..e4b2fe5d153 100644 --- a/compiler/rustc_const_eval/src/interpret/intern.rs +++ b/compiler/rustc_const_eval/src/interpret/intern.rs @@ -104,7 +104,7 @@ fn intern_as_new_static<'tcx>( ) { let feed = tcx.create_def( static_id, - sym::nested, + Some(sym::nested), DefKind::Static { safety: hir::Safety::Safe, mutability: alloc.0.mutability, nested: true }, ); tcx.set_nested_alloc_id_static(alloc_id, feed.def_id()); diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 4f4b6785844..e5af0673629 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -955,18 +955,13 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Handle the effect an FFI call might have on the state of allocations. /// This overapproximates the modifications which external code might make to memory: - /// We set all reachable allocations as initialized, mark all provenances as exposed + /// We set all reachable allocations as initialized, mark all reachable provenances as exposed /// and overwrite them with `Provenance::WILDCARD`. - pub fn prepare_for_native_call( - &mut self, - id: AllocId, - initial_prov: M::Provenance, - ) -> InterpResult<'tcx> { - // Expose provenance of the root allocation. - M::expose_provenance(self, initial_prov)?; - + /// + /// The allocations in `ids` are assumed to be already exposed. + pub fn prepare_for_native_call(&mut self, ids: Vec<AllocId>) -> InterpResult<'tcx> { let mut done = FxHashSet::default(); - let mut todo = vec![id]; + let mut todo = ids; while let Some(id) = todo.pop() { if !done.insert(id) { // We already saw this allocation before, don't process it again. @@ -987,6 +982,10 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { todo.push(id); } } + // Also expose the provenance of the interpreter-level allocation, so it can + // be read by FFI. The `black_box` is defensive programming as LLVM likes + // to (incorrectly) optimize away ptr2int casts whose result is unused. + std::hint::black_box(alloc.get_bytes_unchecked_raw().expose_provenance()); // Prepare for possible write from native code if mutable. if info.mutbl.is_mut() { diff --git a/compiler/rustc_const_eval/src/interpret/stack.rs b/compiler/rustc_const_eval/src/interpret/stack.rs index 7d0e0492792..d7b03776bc4 100644 --- a/compiler/rustc_const_eval/src/interpret/stack.rs +++ b/compiler/rustc_const_eval/src/interpret/stack.rs @@ -231,13 +231,19 @@ impl<'tcx> FrameInfo<'tcx> { pub fn as_note(&self, tcx: TyCtxt<'tcx>) -> errors::FrameNote { let span = self.span; if tcx.def_key(self.instance.def_id()).disambiguated_data.data == DefPathData::Closure { - errors::FrameNote { where_: "closure", span, instance: String::new(), times: 0 } + errors::FrameNote { + where_: "closure", + span, + instance: String::new(), + times: 0, + has_label: false, + } } else { let instance = format!("{}", self.instance); // Note: this triggers a `must_produce_diag` state, which means that if we ever get // here we must emit a diagnostic. We should never display a `FrameInfo` unless we // actually want to emit a warning or error to the user. - errors::FrameNote { where_: "instance", span, instance, times: 0 } + errors::FrameNote { where_: "instance", span, instance, times: 0, has_label: false } } } } diff --git a/compiler/rustc_const_eval/src/interpret/validity.rs b/compiler/rustc_const_eval/src/interpret/validity.rs index 40dec0cb39e..eb3f552cd27 100644 --- a/compiler/rustc_const_eval/src/interpret/validity.rs +++ b/compiler/rustc_const_eval/src/interpret/validity.rs @@ -1296,7 +1296,7 @@ impl<'rt, 'tcx, M: Machine<'tcx>> ValueVisitor<'tcx, M> for ValidityVisitor<'rt, self.visit_scalar(b, b_layout)?; } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // No checks here, we assume layout computation gets this right. // (This is harder to check since Miri does not represent these as `Immediate`. We // also cannot use field projections since this might be a newtype around a vector.) diff --git a/compiler/rustc_const_eval/src/lib.rs b/compiler/rustc_const_eval/src/lib.rs index ed5489652fb..e03849c32f9 100644 --- a/compiler/rustc_const_eval/src/lib.rs +++ b/compiler/rustc_const_eval/src/lib.rs @@ -1,6 +1,7 @@ // tidy-alphabetical-start #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(rust_logo)] #![feature(assert_matches)] #![feature(box_patterns)] @@ -16,7 +17,6 @@ #![feature(unqualified_local_imports)] #![feature(yeet_expr)] #![warn(unqualified_local_imports)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod check_consts; diff --git a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs index 6426bca2332..d7e97f32bae 100644 --- a/compiler/rustc_const_eval/src/util/check_validity_requirement.rs +++ b/compiler/rustc_const_eval/src/util/check_validity_requirement.rs @@ -117,7 +117,7 @@ fn check_validity_requirement_lax<'tcx>( BackendRepr::ScalarPair(s1, s2) => { scalar_allows_raw_init(s1) && scalar_allows_raw_init(s2) } - BackendRepr::Vector { element: s, count } => count == 0 || scalar_allows_raw_init(s), + BackendRepr::SimdVector { element: s, count } => count == 0 || scalar_allows_raw_init(s), BackendRepr::Memory { .. } => true, // Fields are checked below. }; if !valid { diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index bdf5494f210..b9a5fc3a1fe 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -29,6 +29,11 @@ thin-vec = "0.2.12" tracing = "0.1" # tidy-alphabetical-end +[dependencies.hashbrown] +version = "0.15.2" +default-features = false +features = ["nightly"] # for may_dangle + [dependencies.parking_lot] version = "0.12" diff --git a/compiler/rustc_data_structures/src/aligned.rs b/compiler/rustc_data_structures/src/aligned.rs index 0e5ecfd9bff..a636d09fcae 100644 --- a/compiler/rustc_data_structures/src/aligned.rs +++ b/compiler/rustc_data_structures/src/aligned.rs @@ -2,10 +2,8 @@ use std::ptr::Alignment; /// Returns the ABI-required minimum alignment of a type in bytes. /// -/// This is equivalent to [`mem::align_of`], but also works for some unsized +/// This is equivalent to [`align_of`], but also works for some unsized /// types (e.g. slices or rustc's `List`s). -/// -/// [`mem::align_of`]: std::mem::align_of pub const fn align_of<T: ?Sized + Aligned>() -> Alignment { T::ALIGN } @@ -15,10 +13,10 @@ pub const fn align_of<T: ?Sized + Aligned>() -> Alignment { /// # Safety /// /// `Self::ALIGN` must be equal to the alignment of `Self`. For sized types it -/// is [`mem::align_of<Self>()`], for unsized types it depends on the type, for +/// is [`align_of::<Self>()`], for unsized types it depends on the type, for /// example `[T]` has alignment of `T`. /// -/// [`mem::align_of<Self>()`]: std::mem::align_of +/// [`align_of::<Self>()`]: align_of pub unsafe trait Aligned { /// Alignment of `Self`. const ALIGN: Alignment; diff --git a/compiler/rustc_data_structures/src/captures.rs b/compiler/rustc_data_structures/src/captures.rs deleted file mode 100644 index 677ccb31454..00000000000 --- a/compiler/rustc_data_structures/src/captures.rs +++ /dev/null @@ -1,8 +0,0 @@ -/// "Signaling" trait used in impl trait to tag lifetimes that you may -/// need to capture but don't really need for other reasons. -/// Basically a workaround; see [this comment] for details. -/// -/// [this comment]: https://github.com/rust-lang/rust/issues/34511#issuecomment-373423999 -pub trait Captures<'a> {} - -impl<'a, T: ?Sized> Captures<'a> for T {} diff --git a/compiler/rustc_data_structures/src/flat_map_in_place.rs b/compiler/rustc_data_structures/src/flat_map_in_place.rs index e66b00b7557..6d718059f9c 100644 --- a/compiler/rustc_data_structures/src/flat_map_in_place.rs +++ b/compiler/rustc_data_structures/src/flat_map_in_place.rs @@ -1,4 +1,4 @@ -use std::ptr; +use std::{mem, ptr}; use smallvec::{Array, SmallVec}; use thin_vec::ThinVec; @@ -13,39 +13,44 @@ pub trait FlatMapInPlace<T>: Sized { // The implementation of this method is syntactically identical for all the // different vector types. macro_rules! flat_map_in_place { - () => { + ($vec:ident $( where T: $bound:path)?) => { fn flat_map_in_place<F, I>(&mut self, mut f: F) where F: FnMut(T) -> I, I: IntoIterator<Item = T>, { + struct LeakGuard<'a, T $(: $bound)?>(&'a mut $vec<T>); + + impl<'a, T $(: $bound)?> Drop for LeakGuard<'a, T> { + fn drop(&mut self) { + unsafe { + self.0.set_len(0); // make sure we just leak elements in case of panic + } + } + } + + let this = LeakGuard(self); + let mut read_i = 0; let mut write_i = 0; unsafe { - let mut old_len = self.len(); - self.set_len(0); // make sure we just leak elements in case of panic - - while read_i < old_len { + while read_i < this.0.len() { // move the read_i'th item out of the vector and map it // to an iterator - let e = ptr::read(self.as_ptr().add(read_i)); + let e = ptr::read(this.0.as_ptr().add(read_i)); let iter = f(e).into_iter(); read_i += 1; for e in iter { if write_i < read_i { - ptr::write(self.as_mut_ptr().add(write_i), e); + ptr::write(this.0.as_mut_ptr().add(write_i), e); write_i += 1; } else { // If this is reached we ran out of space // in the middle of the vector. // However, the vector is in a valid state here, // so we just do a somewhat inefficient insert. - self.set_len(old_len); - self.insert(write_i, e); - - old_len = self.len(); - self.set_len(0); + this.0.insert(write_i, e); read_i += 1; write_i += 1; @@ -54,20 +59,23 @@ macro_rules! flat_map_in_place { } // write_i tracks the number of actually written new items. - self.set_len(write_i); + this.0.set_len(write_i); + + // The ThinVec is in a sane state again. Prevent the LeakGuard from leaking the data. + mem::forget(this); } } }; } impl<T> FlatMapInPlace<T> for Vec<T> { - flat_map_in_place!(); + flat_map_in_place!(Vec); } impl<T, A: Array<Item = T>> FlatMapInPlace<T> for SmallVec<A> { - flat_map_in_place!(); + flat_map_in_place!(SmallVec where T: Array); } impl<T> FlatMapInPlace<T> for ThinVec<T> { - flat_map_in_place!(); + flat_map_in_place!(ThinVec); } diff --git a/compiler/rustc_data_structures/src/graph/tests.rs b/compiler/rustc_data_structures/src/graph/tests.rs index b69b9dbc4a8..e48b9686c26 100644 --- a/compiler/rustc_data_structures/src/graph/tests.rs +++ b/compiler/rustc_data_structures/src/graph/tests.rs @@ -3,7 +3,7 @@ use std::cmp::max; use super::*; use crate::fx::FxHashMap; -pub struct TestGraph { +pub(super) struct TestGraph { num_nodes: usize, start_node: usize, successors: FxHashMap<usize, Vec<usize>>, @@ -11,7 +11,7 @@ pub struct TestGraph { } impl TestGraph { - pub fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { + pub(super) fn new(start_node: usize, edges: &[(usize, usize)]) -> Self { let mut graph = TestGraph { num_nodes: start_node + 1, start_node, diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 66d3834d857..865424fd6bb 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -24,7 +24,6 @@ #![feature(dropck_eyepatch)] #![feature(extend_one)] #![feature(file_buffered)] -#![feature(hash_raw_entry)] #![feature(macro_metavar_expr)] #![feature(map_try_insert)] #![feature(min_specialization)] @@ -48,7 +47,6 @@ pub use rustc_index::static_assert_size; pub mod aligned; pub mod base_n; pub mod binary_search_util; -pub mod captures; pub mod fingerprint; pub mod flat_map_in_place; pub mod flock; diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 6ae97222f77..64c64bfa3c2 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -1,3 +1,5 @@ +use std::alloc::Allocator; + #[rustc_on_unimplemented(message = "`{Self}` doesn't implement `DynSend`. \ Add it to `rustc_data_structures::marker` or use `IntoDynSyncSend` if it's already `Send`")] // This is an auto trait for types which can be sent across threads if `sync::is_dyn_thread_safe()` @@ -28,8 +30,8 @@ impls_dyn_send_neg!( [*const T where T: ?Sized] [*mut T where T: ?Sized] [std::ptr::NonNull<T> where T: ?Sized] - [std::rc::Rc<T> where T: ?Sized] - [std::rc::Weak<T> where T: ?Sized] + [std::rc::Rc<T, A> where T: ?Sized, A: Allocator] + [std::rc::Weak<T, A> where T: ?Sized, A: Allocator] [std::sync::MutexGuard<'_, T> where T: ?Sized] [std::sync::RwLockReadGuard<'_, T> where T: ?Sized] [std::sync::RwLockWriteGuard<'_, T> where T: ?Sized] @@ -74,6 +76,7 @@ impl_dyn_send!( [crate::sync::RwLock<T> where T: DynSend] [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Send + crate::tagged_ptr::Tag] [rustc_arena::TypedArena<T> where T: DynSend] + [hashbrown::HashTable<T> where T: DynSend] [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend] [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend] [thin_vec::ThinVec<T> where T: DynSend] @@ -96,8 +99,8 @@ impls_dyn_sync_neg!( [std::cell::RefCell<T> where T: ?Sized] [std::cell::UnsafeCell<T> where T: ?Sized] [std::ptr::NonNull<T> where T: ?Sized] - [std::rc::Rc<T> where T: ?Sized] - [std::rc::Weak<T> where T: ?Sized] + [std::rc::Rc<T, A> where T: ?Sized, A: Allocator] + [std::rc::Weak<T, A> where T: ?Sized, A: Allocator] [std::cell::OnceCell<T> where T] [std::sync::mpsc::Receiver<T> where T] [std::sync::mpsc::Sender<T> where T] @@ -151,6 +154,7 @@ impl_dyn_sync!( [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Sync + crate::tagged_ptr::Tag] [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend] [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync] + [hashbrown::HashTable<T> where T: DynSync] [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync] [indexmap::IndexMap<K, V, S> where K: DynSync, V: DynSync, S: DynSync] [smallvec::SmallVec<A> where A: smallvec::Array + DynSync] diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs index 78d69a66edc..f63b201742d 100644 --- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs +++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs @@ -313,8 +313,9 @@ pub struct Error<O, E> { mod helper { use super::*; - pub type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; + pub(super) type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; impl<O: ForestObligation> ObligationForest<O> { + #[cfg_attr(not(bootstrap), define_opaque(ObligationTreeIdGenerator))] pub fn new() -> ObligationForest<O> { ObligationForest { nodes: vec![], diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 39db551adfb..60f007083ba 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -863,15 +863,13 @@ fn get_thread_id() -> u32 { cfg_match! { windows => { pub fn get_resident_set_size() -> Option<usize> { - use std::mem; - use windows::{ Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS}, Win32::System::Threading::GetCurrentProcess, }; let mut pmc = PROCESS_MEMORY_COUNTERS::default(); - let pmc_size = mem::size_of_val(&pmc); + let pmc_size = size_of_val(&pmc); unsafe { K32GetProcessMemoryInfo( GetCurrentProcess(), @@ -889,7 +887,7 @@ cfg_match! { pub fn get_resident_set_size() -> Option<usize> { use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO}; use std::mem; - const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int; + const PROC_TASKINFO_SIZE: c_int = size_of::<proc_taskinfo>() as c_int; unsafe { let mut info: proc_taskinfo = mem::zeroed(); diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs index 65488c73d3c..49cafcb17a0 100644 --- a/compiler/rustc_data_structures/src/sharded.rs +++ b/compiler/rustc_data_structures/src/sharded.rs @@ -1,11 +1,11 @@ use std::borrow::Borrow; -use std::collections::hash_map::RawEntryMut; use std::hash::{Hash, Hasher}; use std::{iter, mem}; use either::Either; +use hashbrown::hash_table::{Entry, HashTable}; -use crate::fx::{FxHashMap, FxHasher}; +use crate::fx::FxHasher; use crate::sync::{CacheAligned, Lock, LockGuard, Mode, is_dyn_thread_safe}; // 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700, @@ -43,10 +43,10 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] - pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> { + pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> { match self { Self::Single(single) => single, - Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.get_shard_by_hash(make_hash(val)), } } @@ -56,12 +56,12 @@ impl<T> Sharded<T> { } #[inline] - pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> { + pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> { match self { Self::Single(single) => single, Self::Shards(shards) => { // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds. - unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 } + unsafe { &shards.get_unchecked(i & (SHARDS - 1)).0 } } } } @@ -69,7 +69,7 @@ impl<T> Sharded<T> { /// The shard is selected by hashing `val` with `FxHasher`. #[inline] #[track_caller] - pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> { + pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -79,7 +79,7 @@ impl<T> Sharded<T> { // `might_be_dyn_thread_safe` was also false. unsafe { single.lock_assume(Mode::NoSync) } } - Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)), + Self::Shards(..) => self.lock_shard_by_hash(make_hash(val)), } } @@ -91,7 +91,7 @@ impl<T> Sharded<T> { #[inline] #[track_caller] - pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> { + pub fn lock_shard_by_index(&self, i: usize) -> LockGuard<'_, T> { match self { Self::Single(single) => { // Synchronization is disabled so use the `lock_assume_no_sync` method optimized @@ -109,7 +109,7 @@ impl<T> Sharded<T> { // always inbounds. // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating // the lock thus `might_be_dyn_thread_safe` was also true. - unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } + unsafe { shards.get_unchecked(i & (SHARDS - 1)).0.lock_assume(Mode::Sync) } } } } @@ -140,14 +140,67 @@ pub fn shards() -> usize { 1 } -pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>; +pub type ShardedHashMap<K, V> = Sharded<HashTable<(K, V)>>; impl<K: Eq, V> ShardedHashMap<K, V> { + pub fn with_capacity(cap: usize) -> Self { + Self::new(|| HashTable::with_capacity(cap)) + } pub fn len(&self) -> usize { self.lock_shards().map(|shard| shard.len()).sum() } } +impl<K: Eq + Hash, V> ShardedHashMap<K, V> { + #[inline] + pub fn get<Q>(&self, key: &Q) -> Option<V> + where + K: Borrow<Q>, + Q: Hash + Eq, + V: Clone, + { + let hash = make_hash(key); + let shard = self.lock_shard_by_hash(hash); + let (_, value) = shard.find(hash, |(k, _)| k.borrow() == key)?; + Some(value.clone()) + } + + #[inline] + pub fn get_or_insert_with(&self, key: K, default: impl FnOnce() -> V) -> V + where + V: Copy, + { + let hash = make_hash(&key); + let mut shard = self.lock_shard_by_hash(hash); + + match table_entry(&mut shard, hash, &key) { + Entry::Occupied(e) => e.get().1, + Entry::Vacant(e) => { + let value = default(); + e.insert((key, value)); + value + } + } + } + + #[inline] + pub fn insert(&self, key: K, value: V) -> Option<V> { + let hash = make_hash(&key); + let mut shard = self.lock_shard_by_hash(hash); + + match table_entry(&mut shard, hash, &key) { + Entry::Occupied(e) => { + let previous = mem::replace(&mut e.into_mut().1, value); + Some(previous) + } + Entry::Vacant(e) => { + e.insert((key, value)); + None + } + } + } +} + impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { #[inline] pub fn intern_ref<Q: ?Sized>(&self, value: &Q, make: impl FnOnce() -> K) -> K @@ -157,13 +210,12 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { { let hash = make_hash(value); let mut shard = self.lock_shard_by_hash(hash); - let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value); - match entry { - RawEntryMut::Occupied(e) => *e.key(), - RawEntryMut::Vacant(e) => { + match table_entry(&mut shard, hash, value) { + Entry::Occupied(e) => e.get().0, + Entry::Vacant(e) => { let v = make(); - e.insert_hashed_nocheck(hash, v, ()); + e.insert((v, ())); v } } @@ -177,13 +229,12 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> { { let hash = make_hash(&value); let mut shard = self.lock_shard_by_hash(hash); - let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value); - match entry { - RawEntryMut::Occupied(e) => *e.key(), - RawEntryMut::Vacant(e) => { + match table_entry(&mut shard, hash, &value) { + Entry::Occupied(e) => e.get().0, + Entry::Vacant(e) => { let v = make(value); - e.insert_hashed_nocheck(hash, v, ()); + e.insert((v, ())); v } } @@ -200,17 +251,30 @@ impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> { let hash = make_hash(&value); let shard = self.lock_shard_by_hash(hash); let value = value.into_pointer(); - shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some() + shard.find(hash, |(k, ())| k.into_pointer() == value).is_some() } } #[inline] -pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { +fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { let mut state = FxHasher::default(); val.hash(&mut state); state.finish() } +#[inline] +fn table_entry<'a, K, V, Q>( + table: &'a mut HashTable<(K, V)>, + hash: u64, + key: &Q, +) -> Entry<'a, (K, V)> +where + K: Hash + Borrow<Q>, + Q: ?Sized + Eq, +{ + table.entry(hash, move |(k, _)| k.borrow() == key, |(k, _)| make_hash(k)) +} + /// Get a shard with a pre-computed hash value. If `get_shard_by_value` is /// ever used in combination with `get_shard_by_hash` on a single `Sharded` /// instance, then `hash` must be computed with `FxHasher`. Otherwise, @@ -218,7 +282,7 @@ pub fn make_hash<K: Hash + ?Sized>(val: &K) -> u64 { /// consistently for each `Sharded` instance. #[inline] fn get_shard_hash(hash: u64) -> usize { - let hash_len = mem::size_of::<usize>(); + let hash_len = size_of::<usize>(); // Ignore the top 7 bits as hashbrown uses these and get the next SHARD_BITS highest bits. // hashbrown also uses the lowest bits, so we can't use those (hash >> (hash_len * 8 - 7 - SHARD_BITS)) as usize diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index 066ea03b4ac..a01a420dfbd 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -1,4 +1,5 @@ use std::borrow::Borrow; +use std::cmp::Ordering; use std::fmt::Debug; use std::mem; use std::ops::{Bound, Index, IndexMut, RangeBounds}; @@ -156,6 +157,38 @@ impl<K: Ord, V> SortedMap<K, V> { &self.data[start..end] } + /// `sm.range_is_empty(r)` == `sm.range(r).is_empty()`, but is faster. + #[inline] + pub fn range_is_empty<R>(&self, range: R) -> bool + where + R: RangeBounds<K>, + { + // `range` must (via `range_slice_indices`) search for the start and + // end separately. But here we can do a single binary search for the + // entire range. If a single `x` matching `range` is found then the + // range is *not* empty. + self.data + .binary_search_by(|(x, _)| { + // Is `x` below `range`? + match range.start_bound() { + Bound::Included(start) if x < start => return Ordering::Less, + Bound::Excluded(start) if x <= start => return Ordering::Less, + _ => {} + }; + + // Is `x` above `range`? + match range.end_bound() { + Bound::Included(end) if x > end => return Ordering::Greater, + Bound::Excluded(end) if x >= end => return Ordering::Greater, + _ => {} + }; + + // `x` must be within `range`. + Ordering::Equal + }) + .is_err() + } + #[inline] pub fn remove_range<R>(&mut self, range: R) where diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index 37b54fe38ff..616a18a72ab 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -18,42 +18,54 @@ //! //! | Type | Serial version | Parallel version | //! | ----------------------- | ------------------- | ------------------------------- | -//! | `LRef<'a, T>` [^2] | `&'a mut T` | `&'a T` | -//! | | | | //! | `Lock<T>` | `RefCell<T>` | `RefCell<T>` or | //! | | | `parking_lot::Mutex<T>` | //! | `RwLock<T>` | `RefCell<T>` | `parking_lot::RwLock<T>` | //! | `MTLock<T>` [^1] | `T` | `Lock<T>` | -//! | `MTLockRef<'a, T>` [^2] | `&'a mut MTLock<T>` | `&'a MTLock<T>` | //! | | | | //! | `ParallelIterator` | `Iterator` | `rayon::iter::ParallelIterator` | //! //! [^1]: `MTLock` is similar to `Lock`, but the serial version avoids the cost //! of a `RefCell`. This is appropriate when interior mutability is not //! required. -//! -//! [^2]: `MTRef`, `MTLockRef` are type aliases. use std::collections::HashMap; use std::hash::{BuildHasher, Hash}; -pub use crate::marker::*; +pub use parking_lot::{ + MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, + RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, +}; -mod lock; +pub use self::atomic::AtomicU64; +pub use self::freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; #[doc(no_inline)] -pub use lock::{Lock, LockGuard, Mode}; - -mod worker_local; -pub use worker_local::{Registry, WorkerLocal}; +pub use self::lock::{Lock, LockGuard, Mode}; +pub use self::mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; +pub use self::parallel::{ + join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in, +}; +pub use self::vec::{AppendOnlyIndexVec, AppendOnlyVec}; +pub use self::worker_local::{Registry, WorkerLocal}; +pub use crate::marker::*; +mod freeze; +mod lock; mod parallel; -pub use parallel::{join, par_for_each_in, par_map, parallel_guard, scope, try_par_for_each_in}; -pub use vec::{AppendOnlyIndexVec, AppendOnlyVec}; - mod vec; +mod worker_local; -mod freeze; -pub use freeze::{FreezeLock, FreezeReadGuard, FreezeWriteGuard}; +/// Keep the conditional imports together in a submodule, so that import-sorting +/// doesn't split them up. +mod atomic { + // Most hosts can just use a regular AtomicU64. + #[cfg(target_has_atomic = "64")] + pub use std::sync::atomic::AtomicU64; + + // Some 32-bit hosts don't have AtomicU64, so use a fallback. + #[cfg(not(target_has_atomic = "64"))] + pub use portable_atomic::AtomicU64; +} mod mode { use std::sync::atomic::{AtomicU8, Ordering}; @@ -76,7 +88,7 @@ mod mode { // Whether thread safety might be enabled. #[inline] - pub fn might_be_dyn_thread_safe() -> bool { + pub(super) fn might_be_dyn_thread_safe() -> bool { DYN_THREAD_SAFE_MODE.load(Ordering::Relaxed) != DYN_NOT_THREAD_SAFE } @@ -97,21 +109,6 @@ mod mode { // FIXME(parallel_compiler): Get rid of these aliases across the compiler. -pub use std::sync::OnceLock; -// Use portable AtomicU64 for targets without native 64-bit atomics -#[cfg(target_has_atomic = "64")] -pub use std::sync::atomic::AtomicU64; - -pub use mode::{is_dyn_thread_safe, set_dyn_thread_safe_mode}; -pub use parking_lot::{ - MappedRwLockReadGuard as MappedReadGuard, MappedRwLockWriteGuard as MappedWriteGuard, - RwLockReadGuard as ReadGuard, RwLockWriteGuard as WriteGuard, -}; -#[cfg(not(target_has_atomic = "64"))] -pub use portable_atomic::AtomicU64; - -pub type LRef<'a, T> = &'a T; - #[derive(Debug, Default)] pub struct MTLock<T>(Lock<T>); @@ -142,14 +139,10 @@ impl<T> MTLock<T> { } } -use parking_lot::RwLock as InnerRwLock; - /// This makes locks panic if they are already held. /// It is only useful when you are running in a single thread const ERROR_CHECKING: bool = false; -pub type MTLockRef<'a, T> = LRef<'a, MTLock<T>>; - #[derive(Default)] #[repr(align(64))] pub struct CacheAligned<T>(pub T); @@ -167,12 +160,12 @@ impl<K: Eq + Hash, V: Eq, S: BuildHasher> HashMapExt<K, V> for HashMap<K, V, S> } #[derive(Debug, Default)] -pub struct RwLock<T>(InnerRwLock<T>); +pub struct RwLock<T>(parking_lot::RwLock<T>); impl<T> RwLock<T> { #[inline(always)] pub fn new(inner: T) -> Self { - RwLock(InnerRwLock::new(inner)) + RwLock(parking_lot::RwLock::new(inner)) } #[inline(always)] diff --git a/compiler/rustc_data_structures/src/sync/parallel.rs b/compiler/rustc_data_structures/src/sync/parallel.rs index 1ba631b8623..8ef8a3f3585 100644 --- a/compiler/rustc_data_structures/src/sync/parallel.rs +++ b/compiler/rustc_data_structures/src/sync/parallel.rs @@ -46,7 +46,7 @@ pub fn parallel_guard<R>(f: impl FnOnce(&ParallelGuard) -> R) -> R { ret } -pub fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) +fn serial_join<A, B, RA, RB>(oper_a: A, oper_b: B) -> (RA, RB) where A: FnOnce() -> RA, B: FnOnce() -> RB, diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index 402ec9827bb..d75af009850 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -106,6 +106,12 @@ pub struct WorkerLocal<T> { registry: Registry, } +// This is safe because the `deref` call will return a reference to a `T` unique to each thread +// or it will panic for threads without an associated local. So there isn't a need for `T` to do +// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id +// can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. +unsafe impl<T: Send> Sync for WorkerLocal<T> {} + impl<T> WorkerLocal<T> { /// Creates a new worker local where the `initial` closure computes the /// value this worker local should take for each thread in the registry. @@ -132,11 +138,6 @@ impl<T> Deref for WorkerLocal<T> { fn deref(&self) -> &T { // This is safe because `verify` will only return values less than // `self.registry.thread_limit` which is the size of the `self.locals` array. - - // The `deref` call will return a reference to a `T` unique to each thread - // or it will panic for threads without an associated local. So there isn't a need for `T` to do - // it's own synchronization. The `verify` method on `RegistryId` has an issue where the id - // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. unsafe { &self.locals.get_unchecked(self.registry.id().verify()).0 } } } diff --git a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs index 9c1e4cefa69..85b21a7c8ec 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs @@ -7,7 +7,7 @@ use crate::stable_hasher::{HashStable, StableHasher}; /// A tag type used in [`TaggedRef`] tests. #[derive(Copy, Clone, Debug, PartialEq, Eq)] -pub enum Tag2 { +enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, diff --git a/compiler/rustc_driver/src/lib.rs b/compiler/rustc_driver/src/lib.rs index a03834c519d..381309f83b2 100644 --- a/compiler/rustc_driver/src/lib.rs +++ b/compiler/rustc_driver/src/lib.rs @@ -3,6 +3,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(rust_logo)] #![feature(rustdoc_internals)] // tidy-alphabetical-end diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index a2ddff7183e..ed5662da16d 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -7,6 +7,7 @@ // tidy-alphabetical-start #![allow(internal_features)] #![allow(rustc::untranslatable_diagnostic)] // FIXME: make this translatable +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(decl_macro)] @@ -16,7 +17,6 @@ #![feature(result_flattening)] #![feature(rustdoc_internals)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::cmp::max; diff --git a/compiler/rustc_driver_impl/src/pretty.rs b/compiler/rustc_driver_impl/src/pretty.rs index 828a14e707c..16d70af7e05 100644 --- a/compiler/rustc_driver_impl/src/pretty.rs +++ b/compiler/rustc_driver_impl/src/pretty.rs @@ -268,8 +268,7 @@ pub fn print<'tcx>(sess: &Session, ppm: PpMode, ex: PrintExtra<'tcx>) { let tcx = ex.tcx(); let f = |annotation: &dyn pprust_hir::PpAnn| { let sm = sess.source_map(); - let hir_map = tcx.hir(); - let attrs = |id| hir_map.attrs(id); + let attrs = |id| tcx.hir_attrs(id); pprust_hir::print_crate( sm, tcx.hir_root_module(), diff --git a/compiler/rustc_error_codes/src/error_codes/E0133.md b/compiler/rustc_error_codes/src/error_codes/E0133.md index 8ca3f03ce15..854cca3d10f 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0133.md +++ b/compiler/rustc_error_codes/src/error_codes/E0133.md @@ -45,6 +45,7 @@ unsafe fn g() { ``` Linting against this is controlled via the `unsafe_op_in_unsafe_fn` lint, which -is `allow` by default but will be upgraded to `warn` in a future edition. +is `warn` by default in the 2024 edition and `allow` by default in earlier +editions. [unsafe-section]: https://doc.rust-lang.org/book/ch19-01-unsafe-rust.html diff --git a/compiler/rustc_error_codes/src/error_codes/E0373.md b/compiler/rustc_error_codes/src/error_codes/E0373.md index d4d26007aa5..b9db8072597 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0373.md +++ b/compiler/rustc_error_codes/src/error_codes/E0373.md @@ -3,7 +3,7 @@ A captured variable in a closure may not live long enough. Erroneous code example: ```compile_fail,E0373 -fn foo() -> Box<Fn(u32) -> u32> { +fn foo() -> Box<dyn Fn(u32) -> u32> { let x = 0u32; Box::new(|y| x + y) } @@ -42,7 +42,7 @@ This approach moves (or copies, where possible) data into the closure, rather than taking references to it. For example: ``` -fn foo() -> Box<Fn(u32) -> u32> { +fn foo() -> Box<dyn Fn(u32) -> u32> { let x = 0u32; Box::new(move |y| x + y) } diff --git a/compiler/rustc_error_codes/src/error_codes/E0792.md b/compiler/rustc_error_codes/src/error_codes/E0792.md index 5e3dcc4aa72..033e1c65192 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0792.md +++ b/compiler/rustc_error_codes/src/error_codes/E0792.md @@ -7,6 +7,7 @@ This means type Foo<T> = impl std::fmt::Debug; +#[define_opaque(Foo)] fn foo() -> Foo<u32> { 5u32 } @@ -19,6 +20,7 @@ is not accepted. If it were accepted, one could create unsound situations like type Foo<T> = impl Default; +#[define_opaque(Foo)] fn foo() -> Foo<u32> { 5u32 } @@ -36,6 +38,7 @@ Instead you need to make the function generic: type Foo<T> = impl std::fmt::Debug; +#[define_opaque(Foo)] fn foo<U>() -> Foo<U> { 5u32 } @@ -56,6 +59,7 @@ use std::fmt::Debug; type Foo<T: Debug> = impl Debug; +#[define_opaque(Foo)] fn foo<U: Debug>() -> Foo<U> { Vec::<U>::new() } diff --git a/compiler/rustc_error_codes/src/error_codes/E0804.md b/compiler/rustc_error_codes/src/error_codes/E0804.md new file mode 100644 index 00000000000..9a6937c0b52 --- /dev/null +++ b/compiler/rustc_error_codes/src/error_codes/E0804.md @@ -0,0 +1,41 @@ +An auto trait cannot be added to the bounds of a `dyn Trait` type via +a pointer cast. + +Erroneous code example: + +```rust,edition2021,compile_fail,E0804 +let ptr: *const dyn core::any::Any = &(); +_ = ptr as *const (dyn core::any::Any + Send); +``` + +Adding an auto trait can make the vtable invalid, potentially causing +UB in safe code afterwards. For example: + +```rust,edition2021,no_run +use core::{mem::transmute, ptr::NonNull}; + +trait Trait { + fn f(&self) + where + Self: Send; +} + +impl Trait for NonNull<()> { + fn f(&self) { + unreachable!() + } +} + +fn main() { + let unsend: &dyn Trait = &NonNull::dangling(); + let bad: &(dyn Trait + Send) = unsafe { transmute(unsend) }; + // This crashes, since the vtable for `NonNull as dyn Trait` does + // not have an entry for `Trait::f`. + bad.f(); +} +``` + +To fix this error, you can use `transmute` rather than pointer casts, +but you must ensure that the vtable is valid for the pointer's type +before calling a method on the trait object or allowing other code to +do so. diff --git a/compiler/rustc_error_codes/src/lib.rs b/compiler/rustc_error_codes/src/lib.rs index 098ca42be2b..dfeef5a957d 100644 --- a/compiler/rustc_error_codes/src/lib.rs +++ b/compiler/rustc_error_codes/src/lib.rs @@ -6,7 +6,6 @@ #![deny(rustdoc::invalid_codeblock_attributes)] #![doc(rust_logo)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end // This higher-order macro defines the error codes that are in use. It is used @@ -547,6 +546,7 @@ E0800: 0800, E0801: 0801, E0802: 0802, E0803: 0803, +E0804: 0804, ); ) } diff --git a/compiler/rustc_error_messages/src/lib.rs b/compiler/rustc_error_messages/src/lib.rs index ba1c3e185c2..066546ecf74 100644 --- a/compiler/rustc_error_messages/src/lib.rs +++ b/compiler/rustc_error_messages/src/lib.rs @@ -4,7 +4,6 @@ #![feature(rustc_attrs)] #![feature(rustdoc_internals)] #![feature(type_alias_impl_trait)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::borrow::Cow; @@ -209,6 +208,7 @@ pub type LazyFallbackBundle = Arc<LazyLock<FluentBundle, impl FnOnce() -> Fluent /// Return the default `FluentBundle` with standard "en-US" diagnostic messages. #[instrument(level = "trace", skip(resources))] +#[cfg_attr(not(bootstrap), define_opaque(LazyFallbackBundle))] pub fn fallback_fluent_bundle( resources: Vec<&'static str>, with_directionality_markers: bool, diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index 29a74ed3f4e..9f4d2ea5c1a 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -148,11 +148,17 @@ where /// converted rather than on `DiagArgValue`, which enables types from other `rustc_*` crates to /// implement this. pub trait IntoDiagArg { - fn into_diag_arg(self) -> DiagArgValue; + /// Convert `Self` into a `DiagArgValue` suitable for rendering in a diagnostic. + /// + /// It takes a `path` where "long values" could be written to, if the `DiagArgValue` is too big + /// for displaying on the terminal. This path comes from the `Diag` itself. When rendering + /// values that come from `TyCtxt`, like `Ty<'_>`, they can use `TyCtxt::short_string`. If a + /// value has no shortening logic that could be used, the argument can be safely ignored. + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue; } impl IntoDiagArg for DiagArgValue { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { self } } @@ -395,7 +401,7 @@ impl DiagInner { } pub(crate) fn arg(&mut self, name: impl Into<DiagArgName>, arg: impl IntoDiagArg) { - self.args.insert(name.into(), arg.into_diag_arg()); + self.args.insert(name.into(), arg.into_diag_arg(&mut self.long_ty_path)); } /// Fields used for Hash, and PartialEq trait. @@ -484,7 +490,7 @@ pub struct Diag<'a, G: EmissionGuarantee = ErrorGuaranteed> { // would be bad. impl<G> !Clone for Diag<'_, G> {} -rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * std::mem::size_of::<usize>()); +rustc_data_structures::static_assert_size!(Diag<'_, ()>, 3 * size_of::<usize>()); impl<G: EmissionGuarantee> Deref for Diag<'_, G> { type Target = DiagInner; diff --git a/compiler/rustc_errors/src/diagnostic_impls.rs b/compiler/rustc_errors/src/diagnostic_impls.rs index db6532f41ea..cb2e1769fa1 100644 --- a/compiler/rustc_errors/src/diagnostic_impls.rs +++ b/compiler/rustc_errors/src/diagnostic_impls.rs @@ -25,8 +25,8 @@ use crate::{ pub struct DiagArgFromDisplay<'a>(pub &'a dyn fmt::Display); impl IntoDiagArg for DiagArgFromDisplay<'_> { - fn into_diag_arg(self) -> DiagArgValue { - self.0.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.0.to_string().into_diag_arg(path) } } @@ -43,8 +43,8 @@ impl<'a, T: fmt::Display> From<&'a T> for DiagArgFromDisplay<'a> { } impl<'a, T: Clone + IntoDiagArg> IntoDiagArg for &'a T { - fn into_diag_arg(self) -> DiagArgValue { - self.clone().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.clone().into_diag_arg(path) } } @@ -53,8 +53,8 @@ macro_rules! into_diag_arg_using_display { ($( $ty:ty ),+ $(,)?) => { $( impl IntoDiagArg for $ty { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(path) } } )+ @@ -65,13 +65,13 @@ macro_rules! into_diag_arg_for_number { ($( $ty:ty ),+ $(,)?) => { $( impl IntoDiagArg for $ty { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { // Convert to a string if it won't fit into `Number`. #[allow(irrefutable_let_patterns)] if let Ok(n) = TryInto::<i32>::try_into(self) { DiagArgValue::Number(n) } else { - self.to_string().into_diag_arg() + self.to_string().into_diag_arg(path) } } } @@ -98,32 +98,32 @@ into_diag_arg_using_display!( ); impl IntoDiagArg for RustcVersion { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.to_string())) } } impl<I: rustc_type_ir::Interner> IntoDiagArg for rustc_type_ir::TraitRef<I> { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(path) } } impl<I: rustc_type_ir::Interner> IntoDiagArg for rustc_type_ir::ExistentialTraitRef<I> { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(path) } } impl<I: rustc_type_ir::Interner> IntoDiagArg for rustc_type_ir::UnevaluatedConst<I> { - fn into_diag_arg(self) -> DiagArgValue { - format!("{self:?}").into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + format!("{self:?}").into_diag_arg(path) } } impl<I: rustc_type_ir::Interner> IntoDiagArg for rustc_type_ir::FnSig<I> { - fn into_diag_arg(self) -> DiagArgValue { - format!("{self:?}").into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + format!("{self:?}").into_diag_arg(path) } } @@ -131,15 +131,15 @@ impl<I: rustc_type_ir::Interner, T> IntoDiagArg for rustc_type_ir::Binder<I, T> where T: IntoDiagArg, { - fn into_diag_arg(self) -> DiagArgValue { - self.skip_binder().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.skip_binder().into_diag_arg(path) } } into_diag_arg_for_number!(i8, u8, i16, u16, i32, u32, i64, u64, i128, u128, isize, usize); impl IntoDiagArg for bool { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { if self { DiagArgValue::Str(Cow::Borrowed("true")) } else { @@ -149,13 +149,13 @@ impl IntoDiagArg for bool { } impl IntoDiagArg for char { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(format!("{self:?}"))) } } impl IntoDiagArg for Vec<char> { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::StrListSepByAnd( self.into_iter().map(|c| Cow::Owned(format!("{c:?}"))).collect(), ) @@ -163,49 +163,49 @@ impl IntoDiagArg for Vec<char> { } impl IntoDiagArg for Symbol { - fn into_diag_arg(self) -> DiagArgValue { - self.to_ident_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_ident_string().into_diag_arg(path) } } impl<'a> IntoDiagArg for &'a str { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(path) } } impl IntoDiagArg for String { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self)) } } impl<'a> IntoDiagArg for Cow<'a, str> { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.into_owned())) } } impl<'a> IntoDiagArg for &'a Path { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.display().to_string())) } } impl IntoDiagArg for PathBuf { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.display().to_string())) } } impl IntoDiagArg for PanicStrategy { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.desc().to_string())) } } impl IntoDiagArg for hir::ConstContext { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(match self { hir::ConstContext::ConstFn => "const_fn", hir::ConstContext::Static(_) => "static", @@ -215,49 +215,49 @@ impl IntoDiagArg for hir::ConstContext { } impl IntoDiagArg for ast::Expr { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(pprust::expr_to_string(&self))) } } impl IntoDiagArg for ast::Path { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(pprust::path_to_string(&self))) } } impl IntoDiagArg for ast::token::Token { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(pprust::token_to_string(&self)) } } impl IntoDiagArg for ast::token::TokenKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(pprust::token_kind_to_string(&self)) } } impl IntoDiagArg for FloatTy { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(self.name_str())) } } impl IntoDiagArg for std::ffi::CString { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.to_string_lossy().into_owned())) } } impl IntoDiagArg for rustc_data_structures::small_c_str::SmallCStr { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(self.to_string_lossy().into_owned())) } } impl IntoDiagArg for ast::Visibility { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { let s = pprust::vis_to_string(&self); let s = s.trim_end().to_string(); DiagArgValue::Str(Cow::Owned(s)) @@ -265,49 +265,49 @@ impl IntoDiagArg for ast::Visibility { } impl IntoDiagArg for rustc_lint_defs::Level { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(self.to_cmd_flag())) } } impl<Id> IntoDiagArg for hir::def::Res<Id> { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(self.descr())) } } impl IntoDiagArg for DiagLocation { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::from(self.to_string())) } } impl IntoDiagArg for Backtrace { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::from(self.to_string())) } } impl IntoDiagArg for Level { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::from(self.to_string())) } } impl IntoDiagArg for ClosureKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(self.as_str().into()) } } impl IntoDiagArg for hir::def::Namespace { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(self.descr())) } } impl IntoDiagArg for ExprPrecedence { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Number(self as i32) } } @@ -328,7 +328,7 @@ impl<S> FromIterator<S> for DiagSymbolList<S> { } impl<S: std::fmt::Display> IntoDiagArg for DiagSymbolList<S> { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::StrListSepByAnd( self.0.into_iter().map(|sym| Cow::Owned(format!("`{sym}`"))).collect(), ) diff --git a/compiler/rustc_errors/src/emitter.rs b/compiler/rustc_errors/src/emitter.rs index f7f84239308..21255fcca96 100644 --- a/compiler/rustc_errors/src/emitter.rs +++ b/compiler/rustc_errors/src/emitter.rs @@ -113,24 +113,11 @@ impl Margin { self.computed_left > 0 } - fn was_cut_right(&self, line_len: usize) -> bool { - let right = - if self.computed_right == self.span_right || self.computed_right == self.label_right { - // FIXME: This comment refers to the only callsite of this method. - // Rephrase it or refactor it, so it can stand on its own. - // Account for the "..." padding given above. Otherwise we end up with code lines - // that do fit but end in "..." as if they were trimmed. - // FIXME: Don't hard-code this offset. Is this meant to represent - // `2 * str_width(self.margin())`? - self.computed_right - 6 - } else { - self.computed_right - }; - right < line_len && self.computed_left + self.column_width < line_len - } - fn compute(&mut self, max_line_len: usize) { // When there's a lot of whitespace (>20), we want to trim it as it is useless. + // FIXME: this doesn't account for '\t', but to do so correctly we need to perform that + // calculation later, right before printing in order to be accurate with both unicode + // handling and trimming of long lines. self.computed_left = if self.whitespace_left > 20 { self.whitespace_left - 16 // We want some padding. } else { @@ -616,7 +603,6 @@ pub struct HumanEmitter { #[setters(skip)] fallback_bundle: LazyFallbackBundle, short_message: bool, - teach: bool, ui_testing: bool, ignored_directories_in_source_blocks: Vec<String>, diagnostic_width: Option<usize>, @@ -642,7 +628,6 @@ impl HumanEmitter { fluent_bundle: None, fallback_bundle, short_message: false, - teach: false, ui_testing: false, ignored_directories_in_source_blocks: Vec::new(), diagnostic_width: None, @@ -670,43 +655,43 @@ impl HumanEmitter { width_offset: usize, code_offset: usize, margin: Margin, - ) { - // Tabs are assumed to have been replaced by spaces in calling code. - debug_assert!(!source_string.contains('\t')); + ) -> usize { let line_len = source_string.len(); // Create the source line we will highlight. let left = margin.left(line_len); let right = margin.right(line_len); // FIXME: The following code looks fishy. See #132860. // On long lines, we strip the source line, accounting for unicode. - let mut taken = 0; let code: String = source_string .chars() - .skip(left) - .take_while(|ch| { - // Make sure that the trimming on the right will fall within the terminal width. - let next = char_width(*ch); - if taken + next > right - left { - return false; - } - taken += next; - true - }) + .enumerate() + .skip_while(|(i, _)| *i < left) + .take_while(|(i, _)| *i < right) + .map(|(_, c)| c) .collect(); + let code = normalize_whitespace(&code); + let was_cut_right = + source_string.chars().enumerate().skip_while(|(i, _)| *i < right).next().is_some(); buffer.puts(line_offset, code_offset, &code, Style::Quotation); let placeholder = self.margin(); if margin.was_cut_left() { // We have stripped some code/whitespace from the beginning, make it clear. buffer.puts(line_offset, code_offset, placeholder, Style::LineNumber); } - if margin.was_cut_right(line_len) { + if was_cut_right { let padding = str_width(placeholder); // We have stripped some code after the rightmost span end, make it clear we did so. - buffer.puts(line_offset, code_offset + taken - padding, placeholder, Style::LineNumber); + buffer.puts( + line_offset, + code_offset + str_width(&code) - padding, + placeholder, + Style::LineNumber, + ); } buffer.puts(line_offset, 0, &self.maybe_anonymized(line_index), Style::LineNumber); self.draw_col_separator_no_space(buffer, line_offset, width_offset - 2); + left } #[instrument(level = "trace", skip(self), ret)] @@ -738,22 +723,16 @@ impl HumanEmitter { return Vec::new(); } - let source_string = match file.get_line(line.line_index - 1) { - Some(s) => normalize_whitespace(&s), - None => return Vec::new(), + let Some(source_string) = file.get_line(line.line_index - 1) else { + return Vec::new(); }; trace!(?source_string); let line_offset = buffer.num_lines(); - // Left trim - let left = margin.left(source_string.len()); - + // Left trim. // FIXME: This looks fishy. See #132860. - // Account for unicode characters of width !=0 that were removed. - let left = source_string.chars().take(left).map(|ch| char_width(ch)).sum(); - - self.draw_line( + let left = self.draw_line( buffer, &source_string, line.line_index, @@ -784,7 +763,7 @@ impl HumanEmitter { let mut short_start = true; for ann in &line.annotations { if let AnnotationType::MultilineStart(depth) = ann.annotation_type { - if source_string.chars().take(ann.start_col.display).all(|c| c.is_whitespace()) { + if source_string.chars().take(ann.start_col.file).all(|c| c.is_whitespace()) { let uline = self.underline(ann.is_primary); let chr = uline.multiline_whole_line; annotations.push((depth, uline.style)); @@ -903,11 +882,16 @@ impl HumanEmitter { // | x_span // <EMPTY LINE> // + let mut overlap = vec![false; annotations.len()]; let mut annotations_position = vec![]; let mut line_len: usize = 0; let mut p = 0; for (i, annotation) in annotations.iter().enumerate() { for (j, next) in annotations.iter().enumerate() { + if overlaps(next, annotation, 0) && j > i { + overlap[i] = true; + overlap[j] = true; + } if overlaps(next, annotation, 0) // This label overlaps with another one and both && annotation.has_label() // take space (they have text and are not && j > i // multiline lines). @@ -1035,24 +1019,21 @@ impl HumanEmitter { let pos = pos + 1; match annotation.annotation_type { AnnotationType::MultilineStart(depth) | AnnotationType::MultilineEnd(depth) => { + let pre: usize = source_string + .chars() + .take(annotation.start_col.file) + .skip(left) + .map(|c| char_width(c)) + .sum(); self.draw_range( buffer, underline.multiline_horizontal, line_offset + pos, width_offset + depth, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset + pre, underline.style, ); } - _ if self.teach => { - buffer.set_style_range( - line_offset, - (code_offset + annotation.start_col.display).saturating_sub(left), - (code_offset + annotation.end_col.display).saturating_sub(left), - underline.style, - annotation.is_primary, - ); - } _ => {} } } @@ -1072,11 +1053,18 @@ impl HumanEmitter { let underline = self.underline(annotation.is_primary); let pos = pos + 1; + let code_offset = code_offset + + source_string + .chars() + .take(annotation.start_col.file) + .skip(left) + .map(|c| char_width(c)) + .sum::<usize>(); if pos > 1 && (annotation.has_label() || annotation.takes_space()) { for p in line_offset + 1..=line_offset + pos { buffer.putc( p, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset, match annotation.annotation_type { AnnotationType::MultilineLine(_) => underline.multiline_vertical, _ => underline.vertical_text_line, @@ -1087,7 +1075,7 @@ impl HumanEmitter { if let AnnotationType::MultilineStart(_) = annotation.annotation_type { buffer.putc( line_offset + pos, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset, underline.bottom_right, underline.style, ); @@ -1097,7 +1085,7 @@ impl HumanEmitter { { buffer.putc( line_offset + pos, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset, underline.multiline_bottom_right_with_text, underline.style, ); @@ -1155,13 +1143,30 @@ impl HumanEmitter { let style = if annotation.is_primary { Style::LabelPrimary } else { Style::LabelSecondary }; let (pos, col) = if pos == 0 { - if annotation.end_col.display == 0 { - (pos + 1, (annotation.end_col.display + 2).saturating_sub(left)) + let pre: usize = source_string + .chars() + .take(annotation.end_col.file) + .skip(left) + .map(|c| char_width(c)) + .sum(); + if annotation.end_col.file == 0 { + (pos + 1, (pre + 2)) } else { - (pos + 1, (annotation.end_col.display + 1).saturating_sub(left)) + let pad = if annotation.end_col.file - annotation.start_col.file == 0 { + 2 + } else { + 1 + }; + (pos + 1, (pre + pad)) } } else { - (pos + 2, annotation.start_col.display.saturating_sub(left)) + let pre: usize = source_string + .chars() + .take(annotation.start_col.file) + .skip(left) + .map(|c| char_width(c)) + .sum(); + (pos + 2, pre) }; if let Some(ref label) = annotation.label { buffer.puts(line_offset + pos, code_offset + col, label, style); @@ -1194,14 +1199,35 @@ impl HumanEmitter { // | _^ test for &(pos, annotation) in &annotations_position { let uline = self.underline(annotation.is_primary); - for p in annotation.start_col.display..annotation.end_col.display { + let width = annotation.end_col.file - annotation.start_col.file; + let previous: String = + source_string.chars().take(annotation.start_col.file).skip(left).collect(); + let underlined: String = + source_string.chars().skip(annotation.start_col.file).take(width).collect(); + debug!(?previous, ?underlined); + let code_offset = code_offset + + source_string + .chars() + .take(annotation.start_col.file) + .skip(left) + .map(|c| char_width(c)) + .sum::<usize>(); + let ann_width: usize = source_string + .chars() + .skip(annotation.start_col.file) + .take(width) + .map(|c| char_width(c)) + .sum(); + let ann_width = if ann_width == 0 + && matches!(annotation.annotation_type, AnnotationType::Singleline) + { + 1 + } else { + ann_width + }; + for p in 0..ann_width { // The default span label underline. - buffer.putc( - line_offset + 1, - (code_offset + p).saturating_sub(left), - uline.underline, - uline.style, - ); + buffer.putc(line_offset + 1, code_offset + p, uline.underline, uline.style); } if pos == 0 @@ -1213,7 +1239,7 @@ impl HumanEmitter { // The beginning of a multiline span with its leftward moving line on the same line. buffer.putc( line_offset + 1, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset, match annotation.annotation_type { AnnotationType::MultilineStart(_) => uline.top_right_flat, AnnotationType::MultilineEnd(_) => uline.multiline_end_same_line, @@ -1231,7 +1257,7 @@ impl HumanEmitter { // so we start going down first. buffer.putc( line_offset + 1, - (code_offset + annotation.start_col.display).saturating_sub(left), + code_offset, match annotation.annotation_type { AnnotationType::MultilineStart(_) => uline.multiline_start_down, AnnotationType::MultilineEnd(_) => uline.multiline_end_up, @@ -1241,11 +1267,37 @@ impl HumanEmitter { ); } else if pos != 0 && annotation.has_label() { // The beginning of a span label with an actual label, we'll point down. - buffer.putc( + buffer.putc(line_offset + 1, code_offset, uline.label_start, uline.style); + } + } + + // We look for individual *long* spans, and we trim the *middle*, so that we render + // LL | ...= [0, 0, 0, ..., 0, 0]; + // | ^^^^^^^^^^...^^^^^^^ expected `&[u8]`, found `[{integer}; 1680]` + for (i, (_pos, annotation)) in annotations_position.iter().enumerate() { + // Skip cases where multiple spans overlap each other. + if overlap[i] { + continue; + }; + let AnnotationType::Singleline = annotation.annotation_type else { continue }; + let width = annotation.end_col.display - annotation.start_col.display; + if width > margin.column_width * 2 && width > 10 { + // If the terminal is *too* small, we keep at least a tiny bit of the span for + // display. + let pad = max(margin.column_width / 3, 5); + // Code line + buffer.replace( + line_offset, + annotation.start_col.file + pad, + annotation.end_col.file - pad, + self.margin(), + ); + // Underline line + buffer.replace( line_offset + 1, - (code_offset + annotation.start_col.display).saturating_sub(left), - uline.label_start, - uline.style, + annotation.start_col.file + pad, + annotation.end_col.file - pad, + self.margin(), ); } } @@ -1702,17 +1754,11 @@ impl HumanEmitter { // non-rustc_lexer::is_whitespace() chars are reported as an // error (ex. no-break-spaces \u{a0}), and thus can't be considered // for removal during error reporting. + // FIXME: doesn't account for '\t' properly. let leading_whitespace = source_string .chars() .take_while(|c| rustc_lexer::is_whitespace(*c)) - .map(|c| { - match c { - // Tabs are displayed as 4 spaces - '\t' => 4, - _ => 1, - } - }) - .sum(); + .count(); if source_string.chars().any(|c| !rustc_lexer::is_whitespace(c)) { whitespace_margin = min(whitespace_margin, leading_whitespace); } @@ -1726,8 +1772,8 @@ impl HumanEmitter { let mut span_left_margin = usize::MAX; for line in &annotated_file.lines { for ann in &line.annotations { - span_left_margin = min(span_left_margin, ann.start_col.display); - span_left_margin = min(span_left_margin, ann.end_col.display); + span_left_margin = min(span_left_margin, ann.start_col.file); + span_left_margin = min(span_left_margin, ann.end_col.file); } } if span_left_margin == usize::MAX { @@ -1747,12 +1793,12 @@ impl HumanEmitter { .map_or(0, |s| s.len()), ); for ann in &line.annotations { - span_right_margin = max(span_right_margin, ann.start_col.display); - span_right_margin = max(span_right_margin, ann.end_col.display); + span_right_margin = max(span_right_margin, ann.start_col.file); + span_right_margin = max(span_right_margin, ann.end_col.file); // FIXME: account for labels not in the same line let label_right = ann.label.as_ref().map_or(0, |l| l.len() + 1); label_right_margin = - max(label_right_margin, ann.end_col.display + label_right); + max(label_right_margin, ann.end_col.file + label_right); } } @@ -1763,15 +1809,7 @@ impl HumanEmitter { width_offset + annotated_file.multiline_depth + 1 }; - let column_width = if let Some(width) = self.diagnostic_width { - width.saturating_sub(code_offset) - } else if self.ui_testing || cfg!(miri) { - DEFAULT_COLUMN_WIDTH - } else { - termize::dimensions() - .map(|(w, _)| w.saturating_sub(code_offset)) - .unwrap_or(DEFAULT_COLUMN_WIDTH) - }; + let column_width = self.column_width(code_offset); let margin = Margin::new( whitespace_margin, @@ -1928,6 +1966,18 @@ impl HumanEmitter { Ok(()) } + fn column_width(&self, code_offset: usize) -> usize { + if let Some(width) = self.diagnostic_width { + width.saturating_sub(code_offset) + } else if self.ui_testing || cfg!(miri) { + DEFAULT_COLUMN_WIDTH + } else { + termize::dimensions() + .map(|(w, _)| w.saturating_sub(code_offset)) + .unwrap_or(DEFAULT_COLUMN_WIDTH) + } + } + fn emit_suggestion_default( &mut self, span: &MultiSpan, diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index f2b133f5677..80e43ede445 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -14,6 +14,7 @@ #![feature(associated_type_defaults)] #![feature(box_into_inner)] #![feature(box_patterns)] +#![feature(default_field_values)] #![feature(error_reporter)] #![feature(if_let_guard)] #![feature(let_chains)] @@ -24,7 +25,6 @@ #![feature(trait_alias)] #![feature(try_blocks)] #![feature(yeet_expr)] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate self as rustc_errors; @@ -626,7 +626,6 @@ pub enum StashKey { MaybeFruTypo, CallAssocMethod, AssociatedTypeSuggestion, - MaybeForgetReturn, /// Query cycle detected, stashing in favor of a better error. Cycle, UndeterminedMacroResolution, diff --git a/compiler/rustc_errors/src/markdown/parse.rs b/compiler/rustc_errors/src/markdown/parse.rs index 7a991a2ace7..f02387d8335 100644 --- a/compiler/rustc_errors/src/markdown/parse.rs +++ b/compiler/rustc_errors/src/markdown/parse.rs @@ -40,11 +40,13 @@ type ParseResult<'a> = Option<Parsed<'a>>; /// Parsing context #[derive(Clone, Copy, Debug, PartialEq)] +// The default values are the most common setting for non top-level parsing: not top block, not at +// line start (yes leading whitespace, not escaped). struct Context { /// If true, we are at a the topmost level (not recursing a nested tt) - top_block: bool, + top_block: bool = false, /// Previous character - prev: Prev, + prev: Prev = Prev::Whitespace, } /// Character class preceding this one @@ -57,14 +59,6 @@ enum Prev { Any, } -impl Default for Context { - /// Most common setting for non top-level parsing: not top block, not at - /// line start (yes leading whitespace, not escaped) - fn default() -> Self { - Self { top_block: false, prev: Prev::Whitespace } - } -} - /// Flags to simple parser function #[derive(Clone, Copy, Debug, PartialEq)] enum ParseOpt { @@ -248,7 +242,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> { } let (txt, rest) = parse_to_newline(&buf[1..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(txt, ctx); Some((MdTree::Heading(level.try_into().unwrap(), stream), rest)) @@ -257,7 +251,7 @@ fn parse_heading(buf: &[u8]) -> ParseResult<'_> { /// Bulleted list fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> { let (txt, rest) = get_indented_section(&buf[2..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(trim_ascii_start(txt), ctx); (MdTree::UnorderedListItem(stream), rest) } @@ -266,7 +260,7 @@ fn parse_unordered_li(buf: &[u8]) -> Parsed<'_> { fn parse_ordered_li(buf: &[u8]) -> Parsed<'_> { let (num, pos) = ord_list_start(buf).unwrap(); // success tested in caller let (txt, rest) = get_indented_section(&buf[pos..]); - let ctx = Context { top_block: false, prev: Prev::Whitespace }; + let ctx = Context { .. }; let stream = parse_recursive(trim_ascii_start(txt), ctx); (MdTree::OrderedListItem(num, stream), rest) } diff --git a/compiler/rustc_errors/src/styled_buffer.rs b/compiler/rustc_errors/src/styled_buffer.rs index 5ca9e9b18f3..790efd0286e 100644 --- a/compiler/rustc_errors/src/styled_buffer.rs +++ b/compiler/rustc_errors/src/styled_buffer.rs @@ -89,6 +89,19 @@ impl StyledBuffer { } } + pub(crate) fn replace(&mut self, line: usize, start: usize, end: usize, string: &str) { + if start == end { + return; + } + if start > self.lines[line].len() || end > self.lines[line].len() { + return; + } + let _ = self.lines[line].drain(start..(end - string.chars().count())); + for (i, c) in string.chars().enumerate() { + self.lines[line][start + i] = StyledChar::new(c, Style::LineNumber); + } + } + /// For given `line` inserts `string` with `style` before old content of that line, /// adding lines if needed pub(crate) fn prepend(&mut self, line: usize, string: &str, style: Style) { diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index 4a250145308..895efe05e11 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -7,7 +7,7 @@ use std::sync::Arc; use rustc_ast::attr::{AttributeExt, MarkedAttrs}; use rustc_ast::ptr::P; -use rustc_ast::token::Nonterminal; +use rustc_ast::token::MetaVarKind; use rustc_ast::tokenstream::TokenStream; use rustc_ast::visit::{AssocCtxt, Visitor}; use rustc_ast::{self as ast, AttrVec, Attribute, HasAttrs, Item, NodeId, PatKind}; @@ -19,7 +19,7 @@ use rustc_feature::Features; use rustc_hir as hir; use rustc_lint_defs::{BufferedEarlyLint, RegisteredTools}; use rustc_parse::MACRO_ARGUMENTS; -use rustc_parse::parser::Parser; +use rustc_parse::parser::{ForceCollect, Parser}; use rustc_session::config::CollapseMacroDebuginfo; use rustc_session::parse::ParseSess; use rustc_session::{Limit, Session}; @@ -53,6 +53,7 @@ pub enum Annotatable { Param(ast::Param), FieldDef(ast::FieldDef), Variant(ast::Variant), + WherePredicate(ast::WherePredicate), Crate(ast::Crate), } @@ -71,6 +72,7 @@ impl Annotatable { Annotatable::Param(p) => p.span, Annotatable::FieldDef(sf) => sf.span, Annotatable::Variant(v) => v.span, + Annotatable::WherePredicate(wp) => wp.span, Annotatable::Crate(c) => c.spans.inner_span, } } @@ -89,6 +91,7 @@ impl Annotatable { Annotatable::Param(p) => p.visit_attrs(f), Annotatable::FieldDef(sf) => sf.visit_attrs(f), Annotatable::Variant(v) => v.visit_attrs(f), + Annotatable::WherePredicate(wp) => wp.visit_attrs(f), Annotatable::Crate(c) => c.visit_attrs(f), } } @@ -107,6 +110,7 @@ impl Annotatable { Annotatable::Param(p) => visitor.visit_param(p), Annotatable::FieldDef(sf) => visitor.visit_field_def(sf), Annotatable::Variant(v) => visitor.visit_variant(v), + Annotatable::WherePredicate(wp) => visitor.visit_where_predicate(wp), Annotatable::Crate(c) => visitor.visit_crate(c), } } @@ -128,6 +132,7 @@ impl Annotatable { | Annotatable::Param(..) | Annotatable::FieldDef(..) | Annotatable::Variant(..) + | Annotatable::WherePredicate(..) | Annotatable::Crate(..) => panic!("unexpected annotatable"), } } @@ -223,6 +228,13 @@ impl Annotatable { } } + pub fn expect_where_predicate(self) -> ast::WherePredicate { + match self { + Annotatable::WherePredicate(wp) => wp, + _ => panic!("expected where predicate"), + } + } + pub fn expect_crate(self) -> ast::Crate { match self { Annotatable::Crate(krate) => krate, @@ -446,6 +458,10 @@ pub trait MacResult { None } + fn make_where_predicates(self: Box<Self>) -> Option<SmallVec<[ast::WherePredicate; 1]>> { + None + } + fn make_crate(self: Box<Self>) -> Option<ast::Crate> { // Fn-like macros cannot produce a crate. unreachable!() @@ -1389,13 +1405,13 @@ pub fn parse_macro_name_and_helper_attrs( /// If this item looks like a specific enums from `rental`, emit a fatal error. /// See #73345 and #83125 for more details. /// FIXME(#73933): Remove this eventually. -fn pretty_printing_compatibility_hack(item: &Item, sess: &Session) { +fn pretty_printing_compatibility_hack(item: &Item, psess: &ParseSess) { let name = item.ident.name; if name == sym::ProceduralMasqueradeDummyType && let ast::ItemKind::Enum(enum_def, _) = &item.kind && let [variant] = &*enum_def.variants && variant.ident.name == sym::Input - && let FileName::Real(real) = sess.source_map().span_to_filename(item.ident.span) + && let FileName::Real(real) = psess.source_map().span_to_filename(item.ident.span) && let Some(c) = real .local_path() .unwrap_or(Path::new("")) @@ -1413,7 +1429,7 @@ fn pretty_printing_compatibility_hack(item: &Item, sess: &Session) { }; if crate_matches { - sess.dcx().emit_fatal(errors::ProcMacroBackCompat { + psess.dcx().emit_fatal(errors::ProcMacroBackCompat { crate_name: "rental".to_string(), fixed_version: "0.5.6".to_string(), }); @@ -1421,7 +1437,7 @@ fn pretty_printing_compatibility_hack(item: &Item, sess: &Session) { } } -pub(crate) fn ann_pretty_printing_compatibility_hack(ann: &Annotatable, sess: &Session) { +pub(crate) fn ann_pretty_printing_compatibility_hack(ann: &Annotatable, psess: &ParseSess) { let item = match ann { Annotatable::Item(item) => item, Annotatable::Stmt(stmt) => match &stmt.kind { @@ -1430,17 +1446,36 @@ pub(crate) fn ann_pretty_printing_compatibility_hack(ann: &Annotatable, sess: &S }, _ => return, }; - pretty_printing_compatibility_hack(item, sess) + pretty_printing_compatibility_hack(item, psess) } -pub(crate) fn nt_pretty_printing_compatibility_hack(nt: &Nonterminal, sess: &Session) { - let item = match nt { - Nonterminal::NtItem(item) => item, - Nonterminal::NtStmt(stmt) => match &stmt.kind { - ast::StmtKind::Item(item) => item, - _ => return, - }, +pub(crate) fn stream_pretty_printing_compatibility_hack( + kind: MetaVarKind, + stream: &TokenStream, + psess: &ParseSess, +) { + let item = match kind { + MetaVarKind::Item => { + let mut parser = Parser::new(psess, stream.clone(), None); + // No need to collect tokens for this simple check. + parser + .parse_item(ForceCollect::No) + .expect("failed to reparse item") + .expect("an actual item") + } + MetaVarKind::Stmt => { + let mut parser = Parser::new(psess, stream.clone(), None); + // No need to collect tokens for this simple check. + let stmt = parser + .parse_stmt(ForceCollect::No) + .expect("failed to reparse") + .expect("an actual stmt"); + match &stmt.kind { + ast::StmtKind::Item(item) => item.clone(), + _ => return, + } + } _ => return, }; - pretty_printing_compatibility_hack(item, sess) + pretty_printing_compatibility_hack(&item, psess) } diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs index 83255b82017..5570c0c38e8 100644 --- a/compiler/rustc_expand/src/config.rs +++ b/compiler/rustc_expand/src/config.rs @@ -328,7 +328,7 @@ impl<'a> StripUnconfigured<'a> { // For inner attributes, we do the same thing for the `!` in `#![attr]`. let mut trees = if cfg_attr.style == AttrStyle::Inner { - let Some(TokenTree::Token(bang_token @ Token { kind: TokenKind::Not, .. }, _)) = + let Some(TokenTree::Token(bang_token @ Token { kind: TokenKind::Bang, .. }, _)) = orig_trees.next() else { panic!("Bad tokens for attribute {cfg_attr:?}"); diff --git a/compiler/rustc_expand/src/expand.rs b/compiler/rustc_expand/src/expand.rs index e3f31ebeca3..87f01be26c2 100644 --- a/compiler/rustc_expand/src/expand.rs +++ b/compiler/rustc_expand/src/expand.rs @@ -227,6 +227,12 @@ ast_fragments! { Variants(SmallVec<[ast::Variant; 1]>) { "variant"; many fn flat_map_variant; fn visit_variant(); fn make_variants; } + WherePredicates(SmallVec<[ast::WherePredicate; 1]>) { + "where predicate"; + many fn flat_map_where_predicate; + fn visit_where_predicate(); + fn make_where_predicates; + } Crate(ast::Crate) { "crate"; one fn visit_crate; fn visit_crate; fn make_crate; } } @@ -259,7 +265,8 @@ impl AstFragmentKind { | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::FieldDefs - | AstFragmentKind::Variants => SupportsMacroExpansion::No, + | AstFragmentKind::Variants + | AstFragmentKind::WherePredicates => SupportsMacroExpansion::No, } } @@ -290,6 +297,9 @@ impl AstFragmentKind { AstFragmentKind::Variants => { AstFragment::Variants(items.map(Annotatable::expect_variant).collect()) } + AstFragmentKind::WherePredicates => AstFragment::WherePredicates( + items.map(Annotatable::expect_where_predicate).collect(), + ), AstFragmentKind::Items => { AstFragment::Items(items.map(Annotatable::expect_item).collect()) } @@ -770,8 +780,8 @@ impl<'a, 'b> MacroExpander<'a, 'b> { } } Err(err) => { - let guar = err.emit(); - fragment_kind.dummy(span, guar) + let _guar = err.emit(); + fragment_kind.expect_from_annotatables(iter::once(item)) } } } @@ -865,7 +875,8 @@ impl<'a, 'b> MacroExpander<'a, 'b> { | Annotatable::GenericParam(..) | Annotatable::Param(..) | Annotatable::FieldDef(..) - | Annotatable::Variant(..) => panic!("unexpected annotatable"), + | Annotatable::Variant(..) + | Annotatable::WherePredicate(..) => panic!("unexpected annotatable"), }; if self.cx.ecfg.features.proc_macro_hygiene() { return; @@ -1002,7 +1013,8 @@ pub fn parse_ast_fragment<'a>( | AstFragmentKind::GenericParams | AstFragmentKind::Params | AstFragmentKind::FieldDefs - | AstFragmentKind::Variants => panic!("unexpected AST fragment kind"), + | AstFragmentKind::Variants + | AstFragmentKind::WherePredicates => panic!("unexpected AST fragment kind"), }) } @@ -1414,6 +1426,19 @@ impl InvocationCollectorNode for ast::Variant { } } +impl InvocationCollectorNode for ast::WherePredicate { + const KIND: AstFragmentKind = AstFragmentKind::WherePredicates; + fn to_annotatable(self) -> Annotatable { + Annotatable::WherePredicate(self) + } + fn fragment_to_output(fragment: AstFragment) -> Self::OutputTy { + fragment.make_where_predicates() + } + fn walk_flat_map<V: MutVisitor>(self, visitor: &mut V) -> Self::OutputTy { + walk_flat_map_where_predicate(visitor, self) + } +} + impl InvocationCollectorNode for ast::FieldDef { const KIND: AstFragmentKind = AstFragmentKind::FieldDefs; fn to_annotatable(self) -> Annotatable { @@ -2116,6 +2141,13 @@ impl<'a, 'b> MutVisitor for InvocationCollector<'a, 'b> { self.flat_map_node(node) } + fn flat_map_where_predicate( + &mut self, + node: ast::WherePredicate, + ) -> SmallVec<[ast::WherePredicate; 1]> { + self.flat_map_node(node) + } + fn flat_map_field_def(&mut self, node: ast::FieldDef) -> SmallVec<[ast::FieldDef; 1]> { self.flat_map_node(node) } diff --git a/compiler/rustc_expand/src/lib.rs b/compiler/rustc_expand/src/lib.rs index 777044e3f33..4222c9fe906 100644 --- a/compiler/rustc_expand/src/lib.rs +++ b/compiler/rustc_expand/src/lib.rs @@ -13,7 +13,6 @@ #![feature(rustdoc_internals)] #![feature(try_blocks)] #![feature(yeet_expr)] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate proc_macro as pm; diff --git a/compiler/rustc_expand/src/mbe/macro_check.rs b/compiler/rustc_expand/src/mbe/macro_check.rs index 729dec2bfbd..1a2db233b7a 100644 --- a/compiler/rustc_expand/src/mbe/macro_check.rs +++ b/compiler/rustc_expand/src/mbe/macro_check.rs @@ -432,7 +432,7 @@ fn check_nested_occurrences( } ( NestedMacroState::MacroRules, - &TokenTree::Token(Token { kind: TokenKind::Not, .. }), + &TokenTree::Token(Token { kind: TokenKind::Bang, .. }), ) => { state = NestedMacroState::MacroRulesNot; } diff --git a/compiler/rustc_expand/src/mbe/macro_rules.rs b/compiler/rustc_expand/src/mbe/macro_rules.rs index cc7e3e65105..5577c8902da 100644 --- a/compiler/rustc_expand/src/mbe/macro_rules.rs +++ b/compiler/rustc_expand/src/mbe/macro_rules.rs @@ -690,7 +690,7 @@ fn has_compile_error_macro(rhs: &mbe::TokenTree) -> bool { && let TokenKind::Ident(ident, _) = ident.kind && ident == sym::compile_error && let mbe::TokenTree::Token(bang) = bang - && let TokenKind::Not = bang.kind + && let TokenKind::Bang = bang.kind && let mbe::TokenTree::Delimited(.., del) = args && !del.delim.skip() { @@ -1135,7 +1135,7 @@ fn check_matcher_core<'tt>( && matches!(kind, NonterminalKind::Pat(PatParam { inferred: true })) && matches!( next_token, - TokenTree::Token(token) if *token == BinOp(token::BinOpToken::Or) + TokenTree::Token(token) if *token == token::Or ) { // It is suggestion to use pat_param, for example: $x:pat -> $x:pat_param. @@ -1177,7 +1177,7 @@ fn check_matcher_core<'tt>( if kind == NonterminalKind::Pat(PatWithOr) && sess.psess.edition.at_least_rust_2021() - && next_token.is_token(&BinOp(token::BinOpToken::Or)) + && next_token.is_token(&token::Or) { let suggestion = quoted_tt_to_string(&TokenTree::MetaVarDecl( span, @@ -1296,7 +1296,7 @@ fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow { const TOKENS: &[&str] = &["`=>`", "`,`", "`=`", "`|`", "`if`", "`in`"]; match tok { TokenTree::Token(token) => match token.kind { - FatArrow | Comma | Eq | BinOp(token::Or) => IsInFollow::Yes, + FatArrow | Comma | Eq | Or => IsInFollow::Yes, Ident(name, IdentIsRaw::No) if name == kw::If || name == kw::In => { IsInFollow::Yes } @@ -1332,9 +1332,9 @@ fn is_in_follow(tok: &mbe::TokenTree, kind: NonterminalKind) -> IsInFollow { | Colon | Eq | Gt - | BinOp(token::Shr) + | Shr | Semi - | BinOp(token::Or) => IsInFollow::Yes, + | Or => IsInFollow::Yes, Ident(name, IdentIsRaw::No) if name == kw::As || name == kw::Where => { IsInFollow::Yes } diff --git a/compiler/rustc_expand/src/mbe/quoted.rs b/compiler/rustc_expand/src/mbe/quoted.rs index b9a06157907..0ea53627fe7 100644 --- a/compiler/rustc_expand/src/mbe/quoted.rs +++ b/compiler/rustc_expand/src/mbe/quoted.rs @@ -274,7 +274,7 @@ fn parse_tree<'a>( let msg = format!("expected identifier, found `{}`", pprust::token_to_string(token),); sess.dcx().span_err(token.span, msg); - TokenTree::MetaVar(token.span, Ident::empty()) + TokenTree::MetaVar(token.span, Ident::dummy()) } // There are no more tokens. Just return the `$` we already have. @@ -302,8 +302,8 @@ fn parse_tree<'a>( /// `None`. fn kleene_op(token: &Token) -> Option<KleeneOp> { match token.kind { - token::BinOp(token::Star) => Some(KleeneOp::ZeroOrMore), - token::BinOp(token::Plus) => Some(KleeneOp::OneOrMore), + token::Star => Some(KleeneOp::ZeroOrMore), + token::Plus => Some(KleeneOp::OneOrMore), token::Question => Some(KleeneOp::ZeroOrOne), _ => None, } diff --git a/compiler/rustc_expand/src/mbe/transcribe.rs b/compiler/rustc_expand/src/mbe/transcribe.rs index 7fe9fa7937d..cffa4af6ac3 100644 --- a/compiler/rustc_expand/src/mbe/transcribe.rs +++ b/compiler/rustc_expand/src/mbe/transcribe.rs @@ -7,7 +7,7 @@ use rustc_ast::token::{ TokenKind, }; use rustc_ast::tokenstream::{DelimSpacing, DelimSpan, Spacing, TokenStream, TokenTree}; -use rustc_ast::{ExprKind, TyKind}; +use rustc_ast::{ExprKind, StmtKind, TyKind}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{Diag, DiagCtxtHandle, PResult, pluralize}; use rustc_parse::lexer::nfc_normalize; @@ -279,9 +279,9 @@ pub(super) fn transcribe<'a>( if let Some(cur_matched) = lookup_cur_matched(ident, interp, &repeats) { // We wrap the tokens in invisible delimiters, unless they are already wrapped // in invisible delimiters with the same `MetaVarKind`. Because some proc - // macros can't multiple layers of invisible delimiters of the same + // macros can't handle multiple layers of invisible delimiters of the same // `MetaVarKind`. This loses some span info, though it hopefully won't matter. - let mut mk_delimited = |mv_kind, mut stream: TokenStream| { + let mut mk_delimited = |mk_span, mv_kind, mut stream: TokenStream| { if stream.len() == 1 { let tree = stream.iter().next().unwrap(); if let TokenTree::Delimited(_, _, delim, inner) = tree @@ -295,6 +295,7 @@ pub(super) fn transcribe<'a>( // Emit as a token stream within `Delimiter::Invisible` to maintain // parsing priorities. marker.visit_span(&mut sp); + with_metavar_spans(|mspans| mspans.insert(mk_span, sp)); // Both the open delim and close delim get the same span, which covers the // `$foo` in the decl macro RHS. TokenTree::Delimited( @@ -322,12 +323,44 @@ pub(super) fn transcribe<'a>( let kind = token::NtLifetime(*ident, *is_raw); TokenTree::token_alone(kind, sp) } + MatchedSingle(ParseNtResult::Item(item)) => { + mk_delimited(item.span, MetaVarKind::Item, TokenStream::from_ast(item)) + } + MatchedSingle(ParseNtResult::Stmt(stmt)) => { + let stream = if let StmtKind::Empty = stmt.kind { + // FIXME: Properly collect tokens for empty statements. + TokenStream::token_alone(token::Semi, stmt.span) + } else { + TokenStream::from_ast(stmt) + }; + mk_delimited(stmt.span, MetaVarKind::Stmt, stream) + } + MatchedSingle(ParseNtResult::Pat(pat, pat_kind)) => mk_delimited( + pat.span, + MetaVarKind::Pat(*pat_kind), + TokenStream::from_ast(pat), + ), MatchedSingle(ParseNtResult::Ty(ty)) => { let is_path = matches!(&ty.kind, TyKind::Path(None, _path)); - mk_delimited(MetaVarKind::Ty { is_path }, TokenStream::from_ast(ty)) + mk_delimited( + ty.span, + MetaVarKind::Ty { is_path }, + TokenStream::from_ast(ty), + ) + } + MatchedSingle(ParseNtResult::Meta(attr_item)) => { + let has_meta_form = attr_item.meta_kind().is_some(); + mk_delimited( + attr_item.span(), + MetaVarKind::Meta { has_meta_form }, + TokenStream::from_ast(attr_item), + ) + } + MatchedSingle(ParseNtResult::Path(path)) => { + mk_delimited(path.span, MetaVarKind::Path, TokenStream::from_ast(path)) } MatchedSingle(ParseNtResult::Vis(vis)) => { - mk_delimited(MetaVarKind::Vis, TokenStream::from_ast(vis)) + mk_delimited(vis.span, MetaVarKind::Vis, TokenStream::from_ast(vis)) } MatchedSingle(ParseNtResult::Nt(nt)) => { // Other variables are emitted into the output stream as groups with diff --git a/compiler/rustc_expand/src/placeholders.rs b/compiler/rustc_expand/src/placeholders.rs index e969f2d4fb5..3a470924c7f 100644 --- a/compiler/rustc_expand/src/placeholders.rs +++ b/compiler/rustc_expand/src/placeholders.rs @@ -188,6 +188,19 @@ pub(crate) fn placeholder( vis, is_placeholder: true, }]), + AstFragmentKind::WherePredicates => { + AstFragment::WherePredicates(smallvec![ast::WherePredicate { + attrs: Default::default(), + id, + span, + kind: ast::WherePredicateKind::BoundPredicate(ast::WhereBoundPredicate { + bound_generic_params: Default::default(), + bounded_ty: ty(), + bounds: Default::default(), + }), + is_placeholder: true, + }]) + } } } @@ -267,6 +280,17 @@ impl MutVisitor for PlaceholderExpander { } } + fn flat_map_where_predicate( + &mut self, + predicate: ast::WherePredicate, + ) -> SmallVec<[ast::WherePredicate; 1]> { + if predicate.is_placeholder { + self.remove(predicate.id).make_where_predicates() + } else { + walk_flat_map_where_predicate(self, predicate) + } + } + fn flat_map_item(&mut self, item: P<ast::Item>) -> SmallVec<[P<ast::Item>; 1]> { match item.kind { ast::ItemKind::MacCall(_) => self.remove(item.id).make_items(), diff --git a/compiler/rustc_expand/src/proc_macro.rs b/compiler/rustc_expand/src/proc_macro.rs index a6cdeaee176..d5af9849e75 100644 --- a/compiler/rustc_expand/src/proc_macro.rs +++ b/compiler/rustc_expand/src/proc_macro.rs @@ -122,7 +122,7 @@ impl MultiItemModifier for DeriveProcMacro { // We had a lint for a long time, but now we just emit a hard error. // Eventually we might remove the special case hard error check // altogether. See #73345. - crate::base::ann_pretty_printing_compatibility_hack(&item, &ecx.sess); + crate::base::ann_pretty_printing_compatibility_hack(&item, &ecx.sess.psess); let input = item.to_tokens(); let stream = { let _timer = diff --git a/compiler/rustc_expand/src/proc_macro_server.rs b/compiler/rustc_expand/src/proc_macro_server.rs index 5581a6e6508..e0269eb6903 100644 --- a/compiler/rustc_expand/src/proc_macro_server.rs +++ b/compiler/rustc_expand/src/proc_macro_server.rs @@ -115,11 +115,43 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre while let Some(tree) = iter.next() { let (Token { kind, span }, joint) = match tree.clone() { - tokenstream::TokenTree::Delimited(span, _, delim, tts) => { - let delimiter = pm::Delimiter::from_internal(delim); + tokenstream::TokenTree::Delimited(span, _, mut delim, mut stream) => { + // We used to have an alternative behaviour for crates that + // needed it: a hack used to pass AST fragments to + // attribute and derive macros as a single nonterminal + // token instead of a token stream. Such token needs to be + // "unwrapped" and not represented as a delimited group. We + // had a lint for a long time, but now we just emit a hard + // error. Eventually we might remove the special case hard + // error check altogether. See #73345. + if let Delimiter::Invisible(InvisibleOrigin::MetaVar(kind)) = delim { + crate::base::stream_pretty_printing_compatibility_hack( + kind, + &stream, + rustc.psess(), + ); + } + + // In `mk_delimited` we avoid nesting invisible delimited + // of the same `MetaVarKind`. Here we do the same but + // ignore the `MetaVarKind` because it is discarded when we + // convert it to a `Group`. + while let Delimiter::Invisible(InvisibleOrigin::MetaVar(_)) = delim { + if stream.len() == 1 + && let tree = stream.iter().next().unwrap() + && let tokenstream::TokenTree::Delimited(_, _, delim2, stream2) = tree + && let Delimiter::Invisible(InvisibleOrigin::MetaVar(_)) = delim2 + { + delim = *delim2; + stream = stream2.clone(); + } else { + break; + } + } + trees.push(TokenTree::Group(Group { - delimiter, - stream: Some(tts), + delimiter: pm::Delimiter::from_internal(delim), + stream: Some(stream), span: DelimSpan { open: span.open, close: span.close, @@ -180,28 +212,28 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre Gt => op(">"), AndAnd => op("&&"), OrOr => op("||"), - Not => op("!"), + Bang => op("!"), Tilde => op("~"), - BinOp(Plus) => op("+"), - BinOp(Minus) => op("-"), - BinOp(Star) => op("*"), - BinOp(Slash) => op("/"), - BinOp(Percent) => op("%"), - BinOp(Caret) => op("^"), - BinOp(And) => op("&"), - BinOp(Or) => op("|"), - BinOp(Shl) => op("<<"), - BinOp(Shr) => op(">>"), - BinOpEq(Plus) => op("+="), - BinOpEq(Minus) => op("-="), - BinOpEq(Star) => op("*="), - BinOpEq(Slash) => op("/="), - BinOpEq(Percent) => op("%="), - BinOpEq(Caret) => op("^="), - BinOpEq(And) => op("&="), - BinOpEq(Or) => op("|="), - BinOpEq(Shl) => op("<<="), - BinOpEq(Shr) => op(">>="), + Plus => op("+"), + Minus => op("-"), + Star => op("*"), + Slash => op("/"), + Percent => op("%"), + Caret => op("^"), + And => op("&"), + Or => op("|"), + Shl => op("<<"), + Shr => op(">>"), + PlusEq => op("+="), + MinusEq => op("-="), + StarEq => op("*="), + SlashEq => op("/="), + PercentEq => op("%="), + CaretEq => op("^="), + AndEq => op("&="), + OrEq => op("|="), + ShlEq => op("<<="), + ShrEq => op(">>="), At => op("@"), Dot => op("."), DotDot => op(".."), @@ -279,15 +311,6 @@ impl FromInternal<(TokenStream, &mut Rustc<'_, '_>)> for Vec<TokenTree<TokenStre Interpolated(nt) => { let stream = TokenStream::from_nonterminal_ast(&nt); - // We used to have an alternative behaviour for crates that - // needed it: a hack used to pass AST fragments to - // attribute and derive macros as a single nonterminal - // token instead of a token stream. Such token needs to be - // "unwrapped" and not represented as a delimited group. We - // had a lint for a long time, but now we just emit a hard - // error. Eventually we might remove the special case hard - // error check altogether. See #73345. - crate::base::nt_pretty_printing_compatibility_hack(&nt, rustc.ecx.sess); trees.push(TokenTree::Group(Group { delimiter: pm::Delimiter::None, stream: Some(stream), @@ -322,16 +345,16 @@ impl ToInternal<SmallVec<[tokenstream::TokenTree; 2]>> b'=' => Eq, b'<' => Lt, b'>' => Gt, - b'!' => Not, + b'!' => Bang, b'~' => Tilde, - b'+' => BinOp(Plus), - b'-' => BinOp(Minus), - b'*' => BinOp(Star), - b'/' => BinOp(Slash), - b'%' => BinOp(Percent), - b'^' => BinOp(Caret), - b'&' => BinOp(And), - b'|' => BinOp(Or), + b'+' => Plus, + b'-' => Minus, + b'*' => Star, + b'/' => Slash, + b'%' => Percent, + b'^' => Caret, + b'&' => And, + b'|' => Or, b'@' => At, b'.' => Dot, b',' => Comma, @@ -372,10 +395,9 @@ impl ToInternal<SmallVec<[tokenstream::TokenTree; 2]>> suffix, span, }) if symbol.as_str().starts_with('-') => { - let minus = BinOp(BinOpToken::Minus); let symbol = Symbol::intern(&symbol.as_str()[1..]); let integer = TokenKind::lit(token::Integer, symbol, suffix); - let a = tokenstream::TokenTree::token_joint_hidden(minus, span); + let a = tokenstream::TokenTree::token_joint_hidden(Minus, span); let b = tokenstream::TokenTree::token_alone(integer, span); smallvec![a, b] } @@ -385,10 +407,9 @@ impl ToInternal<SmallVec<[tokenstream::TokenTree; 2]>> suffix, span, }) if symbol.as_str().starts_with('-') => { - let minus = BinOp(BinOpToken::Minus); let symbol = Symbol::intern(&symbol.as_str()[1..]); let float = TokenKind::lit(token::Float, symbol, suffix); - let a = tokenstream::TokenTree::token_joint_hidden(minus, span); + let a = tokenstream::TokenTree::token_joint_hidden(Minus, span); let b = tokenstream::TokenTree::token_alone(float, span); smallvec![a, b] } @@ -599,10 +620,7 @@ impl server::TokenStream for Rustc<'_, '_> { Ok(Self::TokenStream::from_iter([ // FIXME: The span of the `-` token is lost when // parsing, so we cannot faithfully recover it here. - tokenstream::TokenTree::token_joint_hidden( - token::BinOp(token::Minus), - e.span, - ), + tokenstream::TokenTree::token_joint_hidden(token::Minus, e.span), tokenstream::TokenTree::token_alone(token::Literal(*token_lit), e.span), ])) } diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 6d16dc00ef4..40857e0066e 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -576,6 +576,13 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ EncodeCrossCrate::Yes, experimental!(patchable_function_entry) ), + // Probably temporary component of min_generic_const_args. + // `#[type_const] const ASSOC: usize;` + gated!( + type_const, Normal, template!(Word), ErrorFollowing, + EncodeCrossCrate::Yes, min_generic_const_args, experimental!(type_const), + ), + // ========================================================================== // Internal attributes: Stability, deprecation, and unsafe: // ========================================================================== @@ -1011,7 +1018,7 @@ pub static BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ ), rustc_attr!( rustc_force_inline, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing, EncodeCrossCrate::Yes, - "#![rustc_force_inline] forces a free function to be inlined" + "#[rustc_force_inline] forces a free function to be inlined" ), // ========================================================================== diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs index 0b034a2ae10..25764755a8f 100644 --- a/compiler/rustc_feature/src/lib.rs +++ b/compiler/rustc_feature/src/lib.rs @@ -15,7 +15,6 @@ #![allow(internal_features)] #![doc(rust_logo)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod accepted; diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs index fc58b66ad35..47e4f9a2fe8 100644 --- a/compiler/rustc_feature/src/removed.rs +++ b/compiler/rustc_feature/src/removed.rs @@ -244,6 +244,8 @@ declare_features! ( /// Allows unnamed fields of struct and union type (removed, unnamed_fields, "1.83.0", Some(49804), Some("feature needs redesign")), (removed, unsafe_no_drop_flag, "1.0.0", None, None), + (removed, unsized_tuple_coercion, "CURRENT_RUSTC_VERSION", Some(42877), + Some("The feature restricts possible layouts for tuples, and this restriction is not worth it.")), /// Allows `union` fields that don't implement `Copy` as long as they don't have any drop glue. (removed, untagged_unions, "1.13.0", Some(55149), Some("unions with `Copy` and `ManuallyDrop` fields are stable; there is no intent to stabilize more")), diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs index 66c26a541f1..3c61bfd1c93 100644 --- a/compiler/rustc_feature/src/unstable.rs +++ b/compiler/rustc_feature/src/unstable.rs @@ -1,6 +1,7 @@ //! List of the unstable feature gates. use std::path::PathBuf; +use std::time::{SystemTime, UNIX_EPOCH}; use rustc_data_structures::fx::FxHashSet; use rustc_span::{Span, Symbol, sym}; @@ -239,7 +240,7 @@ declare_features! ( /// Added for testing unstable lints; perma-unstable. (internal, test_unstable_lint, "1.60.0", None), /// Helps with formatting for `group_imports = "StdExternalCrate"`. - (unstable, unqualified_local_imports, "1.83.0", None), + (unstable, unqualified_local_imports, "1.83.0", Some(138299)), /// Use for stable + negative coherence and strict coherence depending on trait's /// rustc_strict_coherence value. (unstable, with_negative_coherence, "1.60.0", None), @@ -473,6 +474,8 @@ declare_features! ( (unstable, doc_masked, "1.21.0", Some(44027)), /// Allows `dyn* Trait` objects. (incomplete, dyn_star, "1.65.0", Some(102425)), + /// Allows the .use postfix syntax `x.use` and use closures `use |x| { ... }` + (incomplete, ergonomic_clones, "CURRENT_RUSTC_VERSION", Some(132290)), /// Allows exhaustive pattern matching on types that contain uninhabited types. (unstable, exhaustive_patterns, "1.13.0", Some(51085)), /// Allows explicit tail calls via `become` expression. @@ -508,6 +511,8 @@ declare_features! ( (incomplete, generic_const_exprs, "1.56.0", Some(76560)), /// Allows generic parameters and where-clauses on free & associated const items. (incomplete, generic_const_items, "1.73.0", Some(113521)), + /// Allows the type of const generics to depend on generic parameters + (incomplete, generic_const_parameter_types, "CURRENT_RUSTC_VERSION", Some(137626)), /// Allows any generic constants being used as pattern type range ends (incomplete, generic_pattern_types, "1.86.0", Some(136574)), /// Allows registering static items globally, possibly across crates, to iterate over at runtime. @@ -599,6 +604,8 @@ declare_features! ( (unstable, precise_capturing_in_traits, "1.83.0", Some(130044)), /// Allows macro attributes on expressions, statements and non-inline modules. (unstable, proc_macro_hygiene, "1.30.0", Some(54727)), + /// Allows the use of raw-dylibs on ELF platforms + (incomplete, raw_dylib_elf, "CURRENT_RUSTC_VERSION", Some(135694)), /// Makes `&` and `&mut` patterns eat only one layer of references in Rust 2024. (incomplete, ref_pat_eat_one_layer_2024, "1.79.0", Some(123076)), /// Makes `&` and `&mut` patterns eat only one layer of references in Rust 2024—structural variant @@ -655,16 +662,17 @@ declare_features! ( (internal, unsized_fn_params, "1.49.0", Some(48055)), /// Allows unsized rvalues at arguments and parameters. (incomplete, unsized_locals, "1.30.0", Some(48055)), - /// Allows unsized tuple coercion. - (unstable, unsized_tuple_coercion, "1.20.0", Some(42877)), /// Allows using the `#[used(linker)]` (or `#[used(compiler)]`) attribute. (unstable, used_with_arg, "1.60.0", Some(93798)), + /// Allows use of attributes in `where` clauses. + (unstable, where_clause_attrs, "CURRENT_RUSTC_VERSION", Some(115590)), /// Allows use of x86 `AMX` target-feature attributes and intrinsics (unstable, x86_amx_intrinsics, "1.81.0", Some(126622)), /// Allows use of the `xop` target-feature (unstable, xop_target_feature, "1.81.0", Some(127208)), /// Allows `do yeet` expressions (unstable, yeet_expr, "1.62.0", Some(96373)), + (unstable, yield_expr, "CURRENT_RUSTC_VERSION", Some(43122)), // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way. // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! @@ -681,11 +689,13 @@ impl Features { ) -> Result<(), Box<dyn std::error::Error>> { #[derive(serde::Serialize)] struct LibFeature { + timestamp: u128, symbol: String, } #[derive(serde::Serialize)] struct LangFeature { + timestamp: u128, symbol: String, since: Option<String>, } @@ -699,10 +709,20 @@ impl Features { let metrics_file = std::fs::File::create(metrics_path)?; let metrics_file = std::io::BufWriter::new(metrics_file); + let now = || { + SystemTime::now() + .duration_since(UNIX_EPOCH) + .expect("system time should always be greater than the unix epoch") + .as_nanos() + }; + let lib_features = self .enabled_lib_features .iter() - .map(|EnabledLibFeature { gate_name, .. }| LibFeature { symbol: gate_name.to_string() }) + .map(|EnabledLibFeature { gate_name, .. }| LibFeature { + symbol: gate_name.to_string(), + timestamp: now(), + }) .collect(); let lang_features = self @@ -711,6 +731,7 @@ impl Features { .map(|EnabledLangFeature { gate_name, stable_since, .. }| LangFeature { symbol: gate_name.to_string(), since: stable_since.map(|since| since.to_string()), + timestamp: now(), }) .collect(); diff --git a/compiler/rustc_fluent_macro/src/fluent.rs b/compiler/rustc_fluent_macro/src/fluent.rs index d4604c27e6d..b04fd1b48f7 100644 --- a/compiler/rustc_fluent_macro/src/fluent.rs +++ b/compiler/rustc_fluent_macro/src/fluent.rs @@ -78,8 +78,8 @@ fn failed(crate_name: &Ident) -> proc_macro::TokenStream { /// See [rustc_fluent_macro::fluent_messages]. pub(crate) fn fluent_messages(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let crate_name = std::env::var("CARGO_PKG_NAME") - // If `CARGO_PKG_NAME` is missing, then we're probably running in a test, so use + let crate_name = std::env::var("CARGO_CRATE_NAME") + // If `CARGO_CRATE_NAME` is missing, then we're probably running in a test, so use // `no_crate`. .unwrap_or_else(|_| "no_crate".to_string()) .replace("rustc_", ""); diff --git a/compiler/rustc_fluent_macro/src/lib.rs b/compiler/rustc_fluent_macro/src/lib.rs index 3ad51fa1e64..c6e0484b921 100644 --- a/compiler/rustc_fluent_macro/src/lib.rs +++ b/compiler/rustc_fluent_macro/src/lib.rs @@ -7,7 +7,6 @@ #![feature(proc_macro_span)] #![feature(rustdoc_internals)] #![feature(track_path)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use proc_macro::TokenStream; diff --git a/compiler/rustc_graphviz/src/lib.rs b/compiler/rustc_graphviz/src/lib.rs index b5774f64b66..c8f8fd5be02 100644 --- a/compiler/rustc_graphviz/src/lib.rs +++ b/compiler/rustc_graphviz/src/lib.rs @@ -277,7 +277,6 @@ )] #![doc(rust_logo)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::borrow::Cow; diff --git a/compiler/rustc_hir/src/def.rs b/compiler/rustc_hir/src/def.rs index 7e3a8561da0..5f8941d4754 100644 --- a/compiler/rustc_hir/src/def.rs +++ b/compiler/rustc_hir/src/def.rs @@ -253,7 +253,9 @@ impl DefKind { } } - pub fn def_path_data(self, name: Symbol) -> DefPathData { + // Some `DefKind`s require a name, some don't. Panics if one is needed but + // not provided. (`AssocTy` is an exception, see below.) + pub fn def_path_data(self, name: Option<Symbol>) -> DefPathData { match self { DefKind::Mod | DefKind::Struct @@ -264,9 +266,13 @@ impl DefKind { | DefKind::TyAlias | DefKind::ForeignTy | DefKind::TraitAlias - | DefKind::AssocTy | DefKind::TyParam - | DefKind::ExternCrate => DefPathData::TypeNs(name), + | DefKind::ExternCrate => DefPathData::TypeNs(Some(name.unwrap())), + + // An associated type names will be missing for an RPITIT. It will + // later be given a name with `synthetic` in it, if necessary. + DefKind::AssocTy => DefPathData::TypeNs(name), + // It's not exactly an anon const, but wrt DefPathData, there // is no difference. DefKind::Static { nested: true, .. } => DefPathData::AnonConst, @@ -276,9 +282,9 @@ impl DefKind { | DefKind::Static { .. } | DefKind::AssocFn | DefKind::AssocConst - | DefKind::Field => DefPathData::ValueNs(name), - DefKind::Macro(..) => DefPathData::MacroNs(name), - DefKind::LifetimeParam => DefPathData::LifetimeNs(name), + | DefKind::Field => DefPathData::ValueNs(name.unwrap()), + DefKind::Macro(..) => DefPathData::MacroNs(name.unwrap()), + DefKind::LifetimeParam => DefPathData::LifetimeNs(name.unwrap()), DefKind::Ctor(..) => DefPathData::Ctor, DefKind::Use => DefPathData::Use, DefKind::ForeignMod => DefPathData::ForeignMod, @@ -429,7 +435,7 @@ pub enum Res<Id = hir::HirId> { /// mention any generic parameters to allow the following with `min_const_generics`: /// ``` /// # struct Foo; - /// impl Foo { fn test() -> [u8; std::mem::size_of::<Self>()] { todo!() } } + /// impl Foo { fn test() -> [u8; size_of::<Self>()] { todo!() } } /// /// struct Bar([u8; baz::<Self>()]); /// const fn baz<T>() -> usize { 10 } @@ -439,7 +445,7 @@ pub enum Res<Id = hir::HirId> { /// compat lint: /// ``` /// fn foo<T>() { - /// let _bar = [1_u8; std::mem::size_of::<*mut T>()]; + /// let _bar = [1_u8; size_of::<*mut T>()]; /// } /// ``` // FIXME(generic_const_exprs): Remove this bodge once that feature is stable. diff --git a/compiler/rustc_hir/src/definitions.rs b/compiler/rustc_hir/src/definitions.rs index c4c309e77e1..61f5efd9978 100644 --- a/compiler/rustc_hir/src/definitions.rs +++ b/compiler/rustc_hir/src/definitions.rs @@ -271,8 +271,9 @@ pub enum DefPathData { Use, /// A global asm item. GlobalAsm, - /// Something in the type namespace. - TypeNs(Symbol), + /// Something in the type namespace. Will be empty for RPITIT associated + /// types, which are given a synthetic name later, if necessary. + TypeNs(Option<Symbol>), /// Something in the value namespace. ValueNs(Symbol), /// Something in the macro namespace. @@ -410,8 +411,9 @@ impl DefPathData { pub fn get_opt_name(&self) -> Option<Symbol> { use self::DefPathData::*; match *self { - TypeNs(name) if name == kw::Empty => None, - TypeNs(name) | ValueNs(name) | MacroNs(name) | LifetimeNs(name) => Some(name), + TypeNs(name) => name, + + ValueNs(name) | MacroNs(name) | LifetimeNs(name) => Some(name), Impl | ForeignMod | CrateRoot | Use | GlobalAsm | Closure | Ctor | AnonConst | OpaqueTy => None, @@ -421,12 +423,14 @@ impl DefPathData { pub fn name(&self) -> DefPathDataName { use self::DefPathData::*; match *self { - TypeNs(name) if name == kw::Empty => { - DefPathDataName::Anon { namespace: sym::synthetic } - } - TypeNs(name) | ValueNs(name) | MacroNs(name) | LifetimeNs(name) => { - DefPathDataName::Named(name) + TypeNs(name) => { + if let Some(name) = name { + DefPathDataName::Named(name) + } else { + DefPathDataName::Anon { namespace: sym::synthetic } + } } + ValueNs(name) | MacroNs(name) | LifetimeNs(name) => DefPathDataName::Named(name), // Note that this does not show up in user print-outs. CrateRoot => DefPathDataName::Anon { namespace: kw::Crate }, Impl => DefPathDataName::Anon { namespace: kw::Impl }, diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index f0eaec55dbd..d4dfb9f2973 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -4,7 +4,7 @@ use std::fmt; use rustc_abi::ExternAbi; use rustc_ast::attr::AttributeExt; use rustc_ast::token::CommentKind; -use rustc_ast::util::parser::{AssocOp, ExprPrecedence}; +use rustc_ast::util::parser::ExprPrecedence; use rustc_ast::{ self as ast, FloatTy, InlineAsmOptions, InlineAsmTemplatePiece, IntTy, Label, LitIntType, LitKind, TraitObjectSyntax, UintTy, UnsafeBinderCastKind, @@ -243,7 +243,7 @@ impl<'hir> PathSegment<'hir> { } pub fn invalid() -> Self { - Self::new(Ident::empty(), HirId::INVALID, Res::Err) + Self::new(Ident::dummy(), HirId::INVALID, Res::Err) } pub fn args(&self) -> &GenericArgs<'hir> { @@ -1307,13 +1307,18 @@ impl Attribute { #[derive(Debug)] pub struct AttributeMap<'tcx> { pub map: SortedMap<ItemLocalId, &'tcx [Attribute]>, + /// Preprocessed `#[define_opaque]` attribute. + pub define_opaque: Option<&'tcx [(Span, LocalDefId)]>, // Only present when the crate hash is needed. pub opt_hash: Option<Fingerprint>, } impl<'tcx> AttributeMap<'tcx> { - pub const EMPTY: &'static AttributeMap<'static> = - &AttributeMap { map: SortedMap::new(), opt_hash: Some(Fingerprint::ZERO) }; + pub const EMPTY: &'static AttributeMap<'static> = &AttributeMap { + map: SortedMap::new(), + opt_hash: Some(Fingerprint::ZERO), + define_opaque: None, + }; #[inline] pub fn get(&self, id: ItemLocalId) -> &'tcx [Attribute] { @@ -1600,7 +1605,7 @@ pub struct PatField<'hir> { pub span: Span, } -#[derive(Copy, Clone, PartialEq, Debug, HashStable_Generic)] +#[derive(Copy, Clone, PartialEq, Debug, HashStable_Generic, Hash, Eq, Encodable, Decodable)] pub enum RangeEnd { Included, Excluded, @@ -1668,7 +1673,7 @@ pub enum PatExprKind<'hir> { #[derive(Debug, Clone, Copy, HashStable_Generic)] pub enum TyPatKind<'hir> { /// A range pattern (e.g., `1..=2` or `1..2`). - Range(Option<&'hir ConstArg<'hir>>, Option<&'hir ConstArg<'hir>>, RangeEnd), + Range(&'hir ConstArg<'hir>, &'hir ConstArg<'hir>), /// A placeholder for a pattern that wasn't well formed in some way. Err(ErrorGuaranteed), @@ -1683,6 +1688,12 @@ pub enum PatKind<'hir> { /// The `HirId` is the canonical ID for the variable being bound, /// (e.g., in `Ok(x) | Err(x)`, both `x` use the same canonical ID), /// which is the pattern ID of the first `x`. + /// + /// The `BindingMode` is what's provided by the user, before match + /// ergonomics are applied. For the binding mode actually in use, + /// see [`TypeckResults::extract_binding_mode`]. + /// + /// [`TypeckResults::extract_binding_mode`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.extract_binding_mode Binding(BindingMode, HirId, Ident, Option<&'hir Pat<'hir>>), /// A struct or struct variant pattern (e.g., `Variant {x, y, ..}`). @@ -2124,7 +2135,7 @@ impl Expr<'_> { | ExprKind::Become(..) => ExprPrecedence::Jump, // Binop-like expr kinds, handled by `AssocOp`. - ExprKind::Binary(op, ..) => AssocOp::from_ast_binop(op.node).precedence(), + ExprKind::Binary(op, ..) => op.node.precedence(), ExprKind::Cast(..) => ExprPrecedence::Cast, ExprKind::Assign(..) | @@ -2160,6 +2171,7 @@ impl Expr<'_> { | ExprKind::Tup(_) | ExprKind::Type(..) | ExprKind::UnsafeBinderCast(..) + | ExprKind::Use(..) | ExprKind::Err(_) => ExprPrecedence::Unambiguous, ExprKind::DropTemps(expr, ..) => expr.precedence(), @@ -2206,6 +2218,7 @@ impl Expr<'_> { ExprKind::Path(QPath::TypeRelative(..)) | ExprKind::Call(..) | ExprKind::MethodCall(..) + | ExprKind::Use(..) | ExprKind::Struct(..) | ExprKind::Tup(..) | ExprKind::If(..) @@ -2279,7 +2292,9 @@ impl Expr<'_> { pub fn can_have_side_effects(&self) -> bool { match self.peel_drop_temps().kind { - ExprKind::Path(_) | ExprKind::Lit(_) | ExprKind::OffsetOf(..) => false, + ExprKind::Path(_) | ExprKind::Lit(_) | ExprKind::OffsetOf(..) | ExprKind::Use(..) => { + false + } ExprKind::Type(base, _) | ExprKind::Unary(_, base) | ExprKind::Field(base, _) @@ -2541,6 +2556,8 @@ pub enum ExprKind<'hir> { /// /// [`type_dependent_def_id`]: ../../rustc_middle/ty/struct.TypeckResults.html#method.type_dependent_def_id MethodCall(&'hir PathSegment<'hir>, &'hir Expr<'hir>, &'hir [Expr<'hir>], Span), + /// An use expression (e.g., `var.use`). + Use(&'hir Expr<'hir>, Span), /// A tuple (e.g., `(a, b, c, d)`). Tup(&'hir [Expr<'hir>]), /// A binary operation (e.g., `a + b`, `a * b`). @@ -3506,7 +3523,7 @@ pub enum InlineAsmOperand<'hir> { out_expr: Option<&'hir Expr<'hir>>, }, Const { - anon_const: &'hir AnonConst, + anon_const: ConstBlock, }, SymFn { expr: &'hir Expr<'hir>, @@ -4315,16 +4332,6 @@ pub enum OwnerNode<'hir> { } impl<'hir> OwnerNode<'hir> { - pub fn ident(&self) -> Option<Ident> { - match self { - OwnerNode::Item(Item { ident, .. }) - | OwnerNode::ForeignItem(ForeignItem { ident, .. }) - | OwnerNode::ImplItem(ImplItem { ident, .. }) - | OwnerNode::TraitItem(TraitItem { ident, .. }) => Some(*ident), - OwnerNode::Crate(..) | OwnerNode::Synthetic => None, - } - } - pub fn span(&self) -> Span { match self { OwnerNode::Item(Item { span, .. }) diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs index d5fa7ec366b..3ef645a5f61 100644 --- a/compiler/rustc_hir/src/intravisit.rs +++ b/compiler/rustc_hir/src/intravisit.rs @@ -316,8 +316,8 @@ pub trait Visitor<'v>: Sized { fn visit_ident(&mut self, ident: Ident) -> Self::Result { walk_ident(self, ident) } - fn visit_mod(&mut self, m: &'v Mod<'v>, _s: Span, n: HirId) -> Self::Result { - walk_mod(self, m, n) + fn visit_mod(&mut self, m: &'v Mod<'v>, _s: Span, _n: HirId) -> Self::Result { + walk_mod(self, m) } fn visit_foreign_item(&mut self, i: &'v ForeignItem<'v>) -> Self::Result { walk_foreign_item(self, i) @@ -363,7 +363,7 @@ pub trait Visitor<'v>: Sized { /// See the doc comments on [`Ty`] for an explanation of what it means for a type to be /// ambiguous. /// - /// The [`Visitor::visit_infer`] method should be overriden in order to handle infer vars. + /// The [`Visitor::visit_infer`] method should be overridden in order to handle infer vars. fn visit_ty(&mut self, t: &'v Ty<'v, AmbigArg>) -> Self::Result { walk_ty(self, t) } @@ -374,7 +374,7 @@ pub trait Visitor<'v>: Sized { /// See the doc comments on [`ConstArg`] for an explanation of what it means for a const to be /// ambiguous. /// - /// The [`Visitor::visit_infer`] method should be overriden in order to handle infer vars. + /// The [`Visitor::visit_infer`] method should be overridden in order to handle infer vars. fn visit_const_arg(&mut self, c: &'v ConstArg<'v, AmbigArg>) -> Self::Result { walk_ambig_const_arg(self, c) } @@ -464,8 +464,8 @@ pub trait Visitor<'v>: Sized { fn visit_field_def(&mut self, s: &'v FieldDef<'v>) -> Self::Result { walk_field_def(self, s) } - fn visit_enum_def(&mut self, enum_definition: &'v EnumDef<'v>, item_id: HirId) -> Self::Result { - walk_enum_def(self, enum_definition, item_id) + fn visit_enum_def(&mut self, enum_definition: &'v EnumDef<'v>) -> Self::Result { + walk_enum_def(self, enum_definition) } fn visit_variant(&mut self, v: &'v Variant<'v>) -> Self::Result { walk_variant(self, v) @@ -532,28 +532,25 @@ pub fn walk_param<'v, V: Visitor<'v>>(visitor: &mut V, param: &'v Param<'v>) -> } pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V::Result { + try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_ident(item.ident)); match item.kind { ItemKind::ExternCrate(orig_name) => { - try_visit!(visitor.visit_id(item.hir_id())); visit_opt!(visitor, visit_name, orig_name); } ItemKind::Use(ref path, _) => { try_visit!(visitor.visit_use(path, item.hir_id())); } ItemKind::Static(ref typ, _, body) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_ty_unambig(typ)); try_visit!(visitor.visit_nested_body(body)); } ItemKind::Const(ref typ, ref generics, body) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_ty_unambig(typ)); try_visit!(visitor.visit_generics(generics)); try_visit!(visitor.visit_nested_body(body)); } ItemKind::Fn { sig, generics, body: body_id, .. } => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_fn( FnKind::ItemFn(item.ident, generics, sig.header), sig.decl, @@ -562,19 +559,14 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: item.owner_id.def_id, )); } - ItemKind::Macro(..) => { - try_visit!(visitor.visit_id(item.hir_id())); - } + ItemKind::Macro(..) => {} ItemKind::Mod(ref module) => { - // `visit_mod()` takes care of visiting the `Item`'s `HirId`. try_visit!(visitor.visit_mod(module, item.span, item.hir_id())); } ItemKind::ForeignMod { abi: _, items } => { - try_visit!(visitor.visit_id(item.hir_id())); walk_list!(visitor, visit_foreign_item_ref, items); } ItemKind::GlobalAsm { asm: _, fake_body } => { - try_visit!(visitor.visit_id(item.hir_id())); // Visit the fake body, which contains the asm statement. // Therefore we should not visit the asm statement again // outside of the body, or some visitors won't have their @@ -582,14 +574,12 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: try_visit!(visitor.visit_nested_body(fake_body)); } ItemKind::TyAlias(ref ty, ref generics) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_ty_unambig(ty)); try_visit!(visitor.visit_generics(generics)); } ItemKind::Enum(ref enum_definition, ref generics) => { try_visit!(visitor.visit_generics(generics)); - // `visit_enum_def()` takes care of visiting the `Item`'s `HirId`. - try_visit!(visitor.visit_enum_def(enum_definition, item.hir_id())); + try_visit!(visitor.visit_enum_def(enum_definition)); } ItemKind::Impl(Impl { constness: _, @@ -602,7 +592,6 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: self_ty, items, }) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_generics(generics)); visit_opt!(visitor, visit_trait_ref, of_trait); try_visit!(visitor.visit_ty_unambig(self_ty)); @@ -611,17 +600,14 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: ItemKind::Struct(ref struct_definition, ref generics) | ItemKind::Union(ref struct_definition, ref generics) => { try_visit!(visitor.visit_generics(generics)); - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_variant_data(struct_definition)); } ItemKind::Trait(.., ref generics, bounds, trait_item_refs) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_generics(generics)); walk_list!(visitor, visit_param_bound, bounds); walk_list!(visitor, visit_trait_item_ref, trait_item_refs); } ItemKind::TraitAlias(ref generics, bounds) => { - try_visit!(visitor.visit_id(item.hir_id())); try_visit!(visitor.visit_generics(generics)); walk_list!(visitor, visit_param_bound, bounds); } @@ -638,12 +624,7 @@ pub fn walk_ident<'v, V: Visitor<'v>>(visitor: &mut V, ident: Ident) -> V::Resul visitor.visit_name(ident.name) } -pub fn walk_mod<'v, V: Visitor<'v>>( - visitor: &mut V, - module: &'v Mod<'v>, - mod_hir_id: HirId, -) -> V::Result { - try_visit!(visitor.visit_id(mod_hir_id)); +pub fn walk_mod<'v, V: Visitor<'v>>(visitor: &mut V, module: &'v Mod<'v>) -> V::Result { walk_list!(visitor, visit_nested_item, module.item_ids.iter().copied()); V::Result::output() } @@ -708,9 +689,9 @@ pub fn walk_arm<'v, V: Visitor<'v>>(visitor: &mut V, arm: &'v Arm<'v>) -> V::Res pub fn walk_ty_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v TyPat<'v>) -> V::Result { try_visit!(visitor.visit_id(pattern.hir_id)); match pattern.kind { - TyPatKind::Range(lower_bound, upper_bound, _) => { - visit_opt!(visitor, visit_const_arg_unambig, lower_bound); - visit_opt!(visitor, visit_const_arg_unambig, upper_bound); + TyPatKind::Range(lower_bound, upper_bound) => { + try_visit!(visitor.visit_const_arg_unambig(lower_bound)); + try_visit!(visitor.visit_const_arg_unambig(upper_bound)); } TyPatKind::Err(_) => (), } @@ -821,6 +802,9 @@ pub fn walk_expr<'v, V: Visitor<'v>>(visitor: &mut V, expression: &'v Expr<'v>) try_visit!(visitor.visit_expr(receiver)); walk_list!(visitor, visit_expr, arguments); } + ExprKind::Use(expr, _) => { + try_visit!(visitor.visit_expr(expr)); + } ExprKind::Binary(_, ref left_expression, ref right_expression) => { try_visit!(visitor.visit_expr(left_expression)); try_visit!(visitor.visit_expr(right_expression)); @@ -1145,7 +1129,6 @@ pub fn walk_use<'v, V: Visitor<'v>>( path: &'v UsePath<'v>, hir_id: HirId, ) -> V::Result { - try_visit!(visitor.visit_id(hir_id)); let UsePath { segments, ref res, span } = *path; for &res in res { try_visit!(visitor.visit_path(&Path { segments, res, span }, hir_id)); @@ -1326,9 +1309,7 @@ pub fn walk_field_def<'v, V: Visitor<'v>>( pub fn walk_enum_def<'v, V: Visitor<'v>>( visitor: &mut V, enum_definition: &'v EnumDef<'v>, - item_id: HirId, ) -> V::Result { - try_visit!(visitor.visit_id(item_id)); walk_list!(visitor, visit_variant, enum_definition.variants); V::Result::output() } @@ -1447,7 +1428,7 @@ pub fn walk_inline_asm<'v, V: Visitor<'v>>( visit_opt!(visitor, visit_expr, out_expr); } InlineAsmOperand::Const { anon_const, .. } => { - try_visit!(visitor.visit_anon_const(anon_const)); + try_visit!(visitor.visit_inline_const(anon_const)); } InlineAsmOperand::SymFn { expr, .. } => { try_visit!(visitor.visit_expr(expr)); diff --git a/compiler/rustc_hir/src/lang_items.rs b/compiler/rustc_hir/src/lang_items.rs index f5626937ec4..29f4d5b8076 100644 --- a/compiler/rustc_hir/src/lang_items.rs +++ b/compiler/rustc_hir/src/lang_items.rs @@ -171,6 +171,7 @@ language_item_table! { Copy, sym::copy, copy_trait, Target::Trait, GenericRequirement::Exact(0); Clone, sym::clone, clone_trait, Target::Trait, GenericRequirement::None; CloneFn, sym::clone_fn, clone_fn, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::None; + UseCloned, sym::use_cloned, use_cloned_trait, Target::Trait, GenericRequirement::None; Sync, sym::sync, sync_trait, Target::Trait, GenericRequirement::Exact(0); DiscriminantKind, sym::discriminant_kind, discriminant_kind_trait, Target::Trait, GenericRequirement::None; /// The associated item of the `DiscriminantKind` trait. @@ -418,6 +419,9 @@ language_item_table! { Range, sym::Range, range_struct, Target::Struct, GenericRequirement::None; RangeToInclusive, sym::RangeToInclusive, range_to_inclusive_struct, Target::Struct, GenericRequirement::None; RangeTo, sym::RangeTo, range_to_struct, Target::Struct, GenericRequirement::None; + RangeMax, sym::RangeMax, range_max, Target::AssocConst, GenericRequirement::Exact(0); + RangeMin, sym::RangeMin, range_min, Target::AssocConst, GenericRequirement::Exact(0); + RangeSub, sym::RangeSub, range_sub, Target::Method(MethodKind::Trait { body: false }), GenericRequirement::Exact(0); // `new_range` types that are `Copy + IntoIterator` RangeFromCopy, sym::RangeFromCopy, range_from_copy_struct, Target::Struct, GenericRequirement::None; diff --git a/compiler/rustc_hir/src/lib.rs b/compiler/rustc_hir/src/lib.rs index 270d4fbec30..4a839d40571 100644 --- a/compiler/rustc_hir/src/lib.rs +++ b/compiler/rustc_hir/src/lib.rs @@ -4,6 +4,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(associated_type_defaults)] #![feature(box_patterns)] #![feature(closure_track_caller)] @@ -13,7 +14,6 @@ #![feature(never_type)] #![feature(rustc_attrs)] #![feature(variant_count)] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate self as rustc_hir; diff --git a/compiler/rustc_hir/src/stable_hash_impls.rs b/compiler/rustc_hir/src/stable_hash_impls.rs index 2709a826549..91ea88cae47 100644 --- a/compiler/rustc_hir/src/stable_hash_impls.rs +++ b/compiler/rustc_hir/src/stable_hash_impls.rs @@ -106,7 +106,7 @@ impl<'tcx, HirCtx: crate::HashStableContext> HashStable<HirCtx> for AttributeMap fn hash_stable(&self, hcx: &mut HirCtx, hasher: &mut StableHasher) { // We ignore the `map` since it refers to information included in `opt_hash` which is // hashed in the collector and used for the crate hash. - let AttributeMap { opt_hash, map: _ } = *self; + let AttributeMap { opt_hash, define_opaque: _, map: _ } = *self; opt_hash.unwrap().hash_stable(hcx, hasher); } } diff --git a/compiler/rustc_hir/src/target.rs b/compiler/rustc_hir/src/target.rs index 70e95a84e68..601898023fc 100644 --- a/compiler/rustc_hir/src/target.rs +++ b/compiler/rustc_hir/src/target.rs @@ -56,6 +56,7 @@ pub enum Target { Param, PatField, ExprField, + WherePredicate, } impl Display for Target { @@ -96,7 +97,8 @@ impl Target { | Target::MacroDef | Target::Param | Target::PatField - | Target::ExprField => false, + | Target::ExprField + | Target::WherePredicate => false, } } @@ -217,6 +219,7 @@ impl Target { Target::Param => "function param", Target::PatField => "pattern field", Target::ExprField => "struct field", + Target::WherePredicate => "where predicate", } } } diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl index 3f75cce0092..194f2cd04e4 100644 --- a/compiler/rustc_hir_analysis/messages.ftl +++ b/compiler/rustc_hir_analysis/messages.ftl @@ -244,9 +244,6 @@ hir_analysis_inherent_ty_outside_relevant = cannot define inherent `impl` for a .help = consider moving this inherent impl into the crate defining the type if possible .span_help = alternatively add `#[rustc_allow_incoherent_impl]` to the relevant impl items -hir_analysis_invalid_base_type = `{$ty}` is not a valid base type for range patterns - .note = range patterns only support integers - hir_analysis_invalid_generic_receiver_ty = invalid generic `self` parameter type: `{$receiver_ty}` .note = type of `self` must not be a method generic parameter type @@ -278,13 +275,6 @@ hir_analysis_invalid_union_field = hir_analysis_invalid_union_field_sugg = wrap the field type in `ManuallyDrop<...>` -hir_analysis_invalid_unsafe_field = - field must implement `Copy` or be wrapped in `ManuallyDrop<...>` to be unsafe - .note = unsafe fields must not have drop side-effects, which is currently enforced via either `Copy` or `ManuallyDrop<...>` - -hir_analysis_invalid_unsafe_field_sugg = - wrap the field type in `ManuallyDrop<...>` - hir_analysis_late_bound_const_in_apit = `impl Trait` can only mention const parameters from an fn or impl .label = const parameter declared here @@ -514,12 +504,9 @@ hir_analysis_supertrait_item_shadowee = item from `{$supertrait}` is shadowed by hir_analysis_supertrait_item_shadowing = trait item `{$item}` from `{$subtrait}` shadows identically named item from supertrait -hir_analysis_tait_forward_compat = item constrains opaque type that is not in its signature - .note = this item must mention the opaque type in its signature in order to be able to register hidden types - -hir_analysis_tait_forward_compat2 = item does not constrain `{$opaque_type}`, but has it in its signature - .note = consider moving the opaque type's declaration and defining uses into a separate module - .opaque = this opaque type is in the signature +hir_analysis_tait_forward_compat2 = item does not constrain `{$opaque_type}` + .note = consider removing `#[define_opaque]` or adding an empty `#[define_opaque()]` + .opaque = this opaque type is supposed to be constrained hir_analysis_target_feature_on_main = `main` function is not allowed to have `#[target_feature]` @@ -620,6 +607,8 @@ hir_analysis_variances_of = {$variances} hir_analysis_where_clause_on_main = `main` function is not allowed to have a `where` clause .label = `main` cannot have a `where` clause +hir_analysis_within_macro = due to this macro variable + hir_analysis_wrong_number_of_generic_arguments_to_intrinsic = intrinsic has wrong number of {$descr} parameters: found {$found}, expected {$expected} .label = expected {$expected} {$descr} {$expected -> diff --git a/compiler/rustc_hir_analysis/src/check/dropck.rs b/compiler/rustc_hir_analysis/src/check/always_applicable.rs index d7dfe482da4..8a841a11556 100644 --- a/compiler/rustc_hir_analysis/src/check/dropck.rs +++ b/compiler/rustc_hir_analysis/src/check/always_applicable.rs @@ -1,8 +1,15 @@ +//! This module contains methods that assist in checking that impls are general +//! enough, i.e. that they always apply to every valid instantaiton of the ADT +//! they're implemented for. +//! +//! This is necessary for `Drop` and negative impls to be well-formed. + use rustc_data_structures::fx::FxHashSet; use rustc_errors::codes::*; use rustc_errors::{ErrorGuaranteed, struct_span_code_err}; use rustc_infer::infer::{RegionResolutionError, TyCtxtInferExt}; use rustc_infer::traits::{ObligationCause, ObligationCauseCode}; +use rustc_middle::span_bug; use rustc_middle::ty::util::CheckRegions; use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypingMode}; use rustc_trait_selection::regions::InferCtxtRegionExt; @@ -27,11 +34,12 @@ use crate::hir::def_id::{DefId, LocalDefId}; /// 3. Any bounds on the generic parameters must be reflected in the /// struct/enum definition for the nominal type itself (i.e. /// cannot do `struct S<T>; impl<T:Clone> Drop for S<T> { ... }`). -/// pub(crate) fn check_drop_impl( tcx: TyCtxt<'_>, drop_impl_did: DefId, ) -> Result<(), ErrorGuaranteed> { + let drop_impl_did = drop_impl_did.expect_local(); + match tcx.impl_polarity(drop_impl_did) { ty::ImplPolarity::Positive => {} ty::ImplPolarity::Negative => { @@ -45,55 +53,110 @@ pub(crate) fn check_drop_impl( })); } } - let dtor_self_type = tcx.type_of(drop_impl_did).instantiate_identity(); - match dtor_self_type.kind() { + + tcx.ensure_ok().orphan_check_impl(drop_impl_did)?; + + let dtor_impl_trait_ref = tcx.impl_trait_ref(drop_impl_did).unwrap().instantiate_identity(); + + match dtor_impl_trait_ref.self_ty().kind() { ty::Adt(adt_def, adt_to_impl_args) => { - ensure_drop_params_and_item_params_correspond( + ensure_impl_params_and_item_params_correspond( tcx, - drop_impl_did.expect_local(), + drop_impl_did, adt_def.did(), adt_to_impl_args, )?; - ensure_drop_predicates_are_implied_by_item_defn( + ensure_impl_predicates_are_implied_by_item_defn( tcx, - drop_impl_did.expect_local(), - adt_def.did().expect_local(), + drop_impl_did, + adt_def.did(), adt_to_impl_args, ) } _ => { - // Destructors only work on nominal types. This was - // already checked by coherence, but compilation may - // not have been terminated. - let span = tcx.def_span(drop_impl_did); - let reported = tcx.dcx().span_delayed_bug( - span, - format!("should have been rejected by coherence check: {dtor_self_type}"), - ); - Err(reported) + span_bug!(tcx.def_span(drop_impl_did), "incoherent impl of Drop"); } } } -fn ensure_drop_params_and_item_params_correspond<'tcx>( +pub(crate) fn check_negative_auto_trait_impl<'tcx>( tcx: TyCtxt<'tcx>, - drop_impl_did: LocalDefId, - self_type_did: DefId, + impl_def_id: LocalDefId, + impl_trait_ref: ty::TraitRef<'tcx>, + polarity: ty::ImplPolarity, +) -> Result<(), ErrorGuaranteed> { + let ty::ImplPolarity::Negative = polarity else { + return Ok(()); + }; + + if !tcx.trait_is_auto(impl_trait_ref.def_id) { + return Ok(()); + } + + if tcx.defaultness(impl_def_id).is_default() { + tcx.dcx().span_delayed_bug(tcx.def_span(impl_def_id), "default impl cannot be negative"); + } + + tcx.ensure_ok().orphan_check_impl(impl_def_id)?; + + match impl_trait_ref.self_ty().kind() { + ty::Adt(adt_def, adt_to_impl_args) => { + ensure_impl_params_and_item_params_correspond( + tcx, + impl_def_id, + adt_def.did(), + adt_to_impl_args, + )?; + + ensure_impl_predicates_are_implied_by_item_defn( + tcx, + impl_def_id, + adt_def.did(), + adt_to_impl_args, + ) + } + _ => { + if tcx.features().auto_traits() { + // NOTE: We ignore the applicability check for negative auto impls + // defined in libcore. In the (almost impossible) future where we + // stabilize auto impls, then the proper applicability check MUST + // be implemented here to handle non-ADT rigid types. + Ok(()) + } else { + Err(tcx.dcx().span_delayed_bug( + tcx.def_span(impl_def_id), + "incoherent impl of negative auto trait", + )) + } + } + } +} + +fn ensure_impl_params_and_item_params_correspond<'tcx>( + tcx: TyCtxt<'tcx>, + impl_def_id: LocalDefId, + adt_def_id: DefId, adt_to_impl_args: GenericArgsRef<'tcx>, ) -> Result<(), ErrorGuaranteed> { let Err(arg) = tcx.uses_unique_generic_params(adt_to_impl_args, CheckRegions::OnlyParam) else { return Ok(()); }; - let drop_impl_span = tcx.def_span(drop_impl_did); - let item_span = tcx.def_span(self_type_did); - let self_descr = tcx.def_descr(self_type_did); + let impl_span = tcx.def_span(impl_def_id); + let item_span = tcx.def_span(adt_def_id); + let self_descr = tcx.def_descr(adt_def_id); + let polarity = match tcx.impl_polarity(impl_def_id) { + ty::ImplPolarity::Positive | ty::ImplPolarity::Reservation => "", + ty::ImplPolarity::Negative => "!", + }; + let trait_name = tcx + .item_name(tcx.trait_id_of_impl(impl_def_id.to_def_id()).expect("expected impl of trait")); let mut err = struct_span_code_err!( tcx.dcx(), - drop_impl_span, + impl_span, E0366, - "`Drop` impls cannot be specialized" + "`{polarity}{trait_name}` impls cannot be specialized", ); match arg { ty::util::NotUniqueParam::DuplicateParam(arg) => { @@ -116,17 +179,22 @@ fn ensure_drop_params_and_item_params_correspond<'tcx>( /// Confirms that all predicates defined on the `Drop` impl (`drop_impl_def_id`) are able to be /// proven from within `adt_def_id`'s environment. I.e. all the predicates on the impl are /// implied by the ADT being well formed. -fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( +fn ensure_impl_predicates_are_implied_by_item_defn<'tcx>( tcx: TyCtxt<'tcx>, - drop_impl_def_id: LocalDefId, - adt_def_id: LocalDefId, + impl_def_id: LocalDefId, + adt_def_id: DefId, adt_to_impl_args: GenericArgsRef<'tcx>, ) -> Result<(), ErrorGuaranteed> { let infcx = tcx.infer_ctxt().build(TypingMode::non_body_analysis()); let ocx = ObligationCtxt::new_with_diagnostics(&infcx); - let impl_span = tcx.def_span(drop_impl_def_id.to_def_id()); - + let impl_span = tcx.def_span(impl_def_id.to_def_id()); + let trait_name = tcx + .item_name(tcx.trait_id_of_impl(impl_def_id.to_def_id()).expect("expected impl of trait")); + let polarity = match tcx.impl_polarity(impl_def_id) { + ty::ImplPolarity::Positive | ty::ImplPolarity::Reservation => "", + ty::ImplPolarity::Negative => "!", + }; // Take the param-env of the adt and instantiate the args that show up in // the implementation's self type. This gives us the assumptions that the // self ty of the implementation is allowed to know just from it being a @@ -145,17 +213,21 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( let adt_env = ty::EarlyBinder::bind(tcx.param_env(adt_def_id)).instantiate(tcx, adt_to_impl_args); - let fresh_impl_args = infcx.fresh_args_for_item(impl_span, drop_impl_def_id.to_def_id()); + let fresh_impl_args = infcx.fresh_args_for_item(impl_span, impl_def_id.to_def_id()); let fresh_adt_ty = - tcx.impl_trait_ref(drop_impl_def_id).unwrap().instantiate(tcx, fresh_impl_args).self_ty(); + tcx.impl_trait_ref(impl_def_id).unwrap().instantiate(tcx, fresh_impl_args).self_ty(); ocx.eq(&ObligationCause::dummy_with_span(impl_span), adt_env, fresh_adt_ty, impl_adt_ty) - .unwrap(); + .expect("equating fully generic trait ref should never fail"); - for (clause, span) in tcx.predicates_of(drop_impl_def_id).instantiate(tcx, fresh_impl_args) { - let normalize_cause = traits::ObligationCause::misc(span, adt_def_id); + for (clause, span) in tcx.predicates_of(impl_def_id).instantiate(tcx, fresh_impl_args) { + let normalize_cause = traits::ObligationCause::misc(span, impl_def_id); let pred = ocx.normalize(&normalize_cause, adt_env, clause); - let cause = traits::ObligationCause::new(span, adt_def_id, ObligationCauseCode::DropImpl); + let cause = traits::ObligationCause::new( + span, + impl_def_id, + ObligationCauseCode::AlwaysApplicableImpl, + ); ocx.register_obligation(traits::Obligation::new(tcx, cause, adt_env, pred)); } @@ -173,13 +245,13 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( let root_predicate = error.root_obligation.predicate; if root_predicates.insert(root_predicate) { let item_span = tcx.def_span(adt_def_id); - let self_descr = tcx.def_descr(adt_def_id.to_def_id()); + let self_descr = tcx.def_descr(adt_def_id); guar = Some( struct_span_code_err!( tcx.dcx(), error.root_obligation.cause.span, E0367, - "`Drop` impl requires `{root_predicate}` \ + "`{polarity}{trait_name}` impl requires `{root_predicate}` \ but the {self_descr} it is implemented for does not", ) .with_span_note(item_span, "the implementor must specify the same requirement") @@ -190,12 +262,12 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( return Err(guar.unwrap()); } - let errors = ocx.infcx.resolve_regions(adt_def_id, adt_env, []); + let errors = ocx.infcx.resolve_regions(impl_def_id, adt_env, []); if !errors.is_empty() { let mut guar = None; for error in errors { let item_span = tcx.def_span(adt_def_id); - let self_descr = tcx.def_descr(adt_def_id.to_def_id()); + let self_descr = tcx.def_descr(adt_def_id); let outlives = match error { RegionResolutionError::ConcreteFailure(_, a, b) => format!("{b}: {a}"), RegionResolutionError::GenericBoundFailure(_, generic, r) => { @@ -212,7 +284,7 @@ fn ensure_drop_predicates_are_implied_by_item_defn<'tcx>( tcx.dcx(), error.origin().span(), E0367, - "`Drop` impl requires `{outlives}` \ + "`{polarity}{trait_name}` impl requires `{outlives}` \ but the {self_descr} it is implemented for does not", ) .with_span_note(item_span, "the implementor must specify the same requirement") diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index f2331f3fd8e..a266286664c 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -70,7 +70,6 @@ fn check_struct(tcx: TyCtxt<'_>, def_id: LocalDefId) { check_transparent(tcx, def); check_packed(tcx, span, def); - check_unsafe_fields(tcx, def_id); } fn check_union(tcx: TyCtxt<'_>, def_id: LocalDefId) { @@ -144,36 +143,6 @@ fn check_union_fields(tcx: TyCtxt<'_>, span: Span, item_def_id: LocalDefId) -> b true } -/// Check that the unsafe fields do not need dropping. -fn check_unsafe_fields(tcx: TyCtxt<'_>, item_def_id: LocalDefId) { - let span = tcx.def_span(item_def_id); - let def = tcx.adt_def(item_def_id); - - let typing_env = ty::TypingEnv::non_body_analysis(tcx, item_def_id); - let args = ty::GenericArgs::identity_for_item(tcx, item_def_id); - - for field in def.all_fields() { - if !field.safety.is_unsafe() { - continue; - } - - if !allowed_union_or_unsafe_field(tcx, field.ty(tcx, args), typing_env, span) { - let hir::Node::Field(field) = tcx.hir_node_by_def_id(field.did.expect_local()) else { - unreachable!("field has to correspond to hir field") - }; - let ty_span = field.ty.span; - tcx.dcx().emit_err(errors::InvalidUnsafeField { - field_span: field.span, - sugg: errors::InvalidUnsafeFieldSuggestion { - lo: ty_span.shrink_to_lo(), - hi: ty_span.shrink_to_hi(), - }, - note: (), - }); - } - } -} - /// Check that a `static` is inhabited. fn check_static_inhabited(tcx: TyCtxt<'_>, def_id: LocalDefId) { // Make sure statics are inhabited. @@ -216,7 +185,7 @@ fn check_static_inhabited(tcx: TyCtxt<'_>, def_id: LocalDefId) { /// Checks that an opaque type does not contain cycles and does not use `Self` or `T::Foo` /// projections that would result in "inheriting lifetimes". fn check_opaque(tcx: TyCtxt<'_>, def_id: LocalDefId) { - let hir::OpaqueTy { origin, .. } = *tcx.hir().expect_opaque_ty(def_id); + let hir::OpaqueTy { origin, .. } = *tcx.hir_expect_opaque_ty(def_id); // HACK(jynelson): trying to infer the type of `impl trait` breaks documenting // `async-std` (and `pub async fn` in general). @@ -423,6 +392,12 @@ fn best_definition_site_of_opaque<'tcx>( return ControlFlow::Continue(()); } + let opaque_types_defined_by = self.tcx.opaque_types_defined_by(item_def_id); + // Don't try to check items that cannot possibly constrain the type. + if !opaque_types_defined_by.contains(&self.opaque_def_id) { + return ControlFlow::Continue(()); + } + if let Some(hidden_ty) = self.tcx.mir_borrowck(item_def_id).concrete_opaque_types.get(&self.opaque_def_id) { @@ -482,19 +457,7 @@ fn best_definition_site_of_opaque<'tcx>( None } hir::OpaqueTyOrigin::TyAlias { in_assoc_ty: false, .. } => { - let scope = tcx.hir_get_defining_scope(tcx.local_def_id_to_hir_id(opaque_def_id)); - let found = if scope == hir::CRATE_HIR_ID { - tcx.hir_walk_toplevel_module(&mut locator) - } else { - match tcx.hir_node(scope) { - Node::Item(it) => locator.visit_item(it), - Node::ImplItem(it) => locator.visit_impl_item(it), - Node::TraitItem(it) => locator.visit_trait_item(it), - Node::ForeignItem(it) => locator.visit_foreign_item(it), - other => bug!("{:?} is not a valid scope for an opaque type item", other), - } - }; - found.break_value() + tcx.hir_walk_toplevel_module(&mut locator).break_value() } } } @@ -745,14 +708,10 @@ fn check_static_linkage(tcx: TyCtxt<'_>, def_id: LocalDefId) { pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) { match tcx.def_kind(def_id) { DefKind::Static { .. } => { - tcx.ensure_ok().typeck(def_id); - maybe_check_static_with_link_section(tcx, def_id); check_static_inhabited(tcx, def_id); check_static_linkage(tcx, def_id); } - DefKind::Const => { - tcx.ensure_ok().typeck(def_id); - } + DefKind::Const => {} DefKind::Enum => { check_enum(tcx, def_id); } @@ -766,7 +725,6 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) { ExternAbi::Rust, ) } - // Everything else is checked entirely within check_item_body } DefKind::Impl { of_trait } => { if of_trait && let Some(impl_trait_header) = tcx.impl_trait_header(def_id) { @@ -827,7 +785,7 @@ pub(crate) fn check_item_type(tcx: TyCtxt<'_>, def_id: LocalDefId) { check_type_alias_type_params_are_used(tcx, def_id); } DefKind::ForeignMod => { - let it = tcx.hir().expect_item(def_id); + let it = tcx.hir_expect_item(def_id); let hir::ItemKind::ForeignMod { abi, items } = it.kind else { return; }; @@ -1517,7 +1475,6 @@ fn check_enum(tcx: TyCtxt<'_>, def_id: LocalDefId) { detect_discriminant_duplicate(tcx, def); check_transparent(tcx, def); - check_unsafe_fields(tcx, def_id); } /// Part of enum check. Given the discriminants of an enum, errors if two or more discriminants are equal diff --git a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs index c193aad2afd..0a37a27b35b 100644 --- a/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs +++ b/compiler/rustc_hir_analysis/src/check/compare_impl_item.rs @@ -1031,7 +1031,7 @@ fn report_trait_method_mismatch<'tcx>( // When the `impl` receiver is an arbitrary self type, like `self: Box<Self>`, the // span points only at the type `Box<Self`>, but we want to cover the whole // argument pattern and type. - let (sig, body) = tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); + let (sig, body) = tcx.hir_expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); let span = tcx .hir_body_param_names(body) .zip(sig.decl.inputs.iter()) @@ -1051,7 +1051,7 @@ fn report_trait_method_mismatch<'tcx>( // Suggestion to change output type. We do not suggest in `async` functions // to avoid complex logic or incorrect output. if let ImplItemKind::Fn(sig, _) = - &tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).kind + &tcx.hir_expect_impl_item(impl_m.def_id.expect_local()).kind && !sig.header.asyncness.is_async() { let msg = "change the output type to match the trait"; @@ -1190,12 +1190,12 @@ fn extract_spans_for_error_reporting<'tcx>( ) -> (Span, Option<Span>) { let tcx = infcx.tcx; let mut impl_args = { - let (sig, _) = tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); + let (sig, _) = tcx.hir_expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span())) }; let trait_args = trait_m.def_id.as_local().map(|def_id| { - let (sig, _) = tcx.hir().expect_trait_item(def_id).expect_fn(); + let (sig, _) = tcx.hir_expect_trait_item(def_id).expect_fn(); sig.decl.inputs.iter().map(|t| t.span).chain(iter::once(sig.decl.output.span())) }); @@ -1371,7 +1371,7 @@ fn compare_number_of_generics<'tcx>( spans }; let (trait_spans, impl_trait_spans) = if let Some(def_id) = trait_.def_id.as_local() { - let trait_item = tcx.hir().expect_trait_item(def_id); + let trait_item = tcx.hir_expect_trait_item(def_id); let arg_spans: Vec<Span> = arg_spans(trait_.kind, trait_item.generics); let impl_trait_spans: Vec<Span> = trait_item .generics @@ -1388,7 +1388,7 @@ fn compare_number_of_generics<'tcx>( (trait_span.map(|s| vec![s]), vec![]) }; - let impl_item = tcx.hir().expect_impl_item(impl_.def_id.expect_local()); + let impl_item = tcx.hir_expect_impl_item(impl_.def_id.expect_local()); let impl_item_impl_trait_spans: Vec<Span> = impl_item .generics .params @@ -1466,7 +1466,7 @@ fn compare_number_of_method_arguments<'tcx>( .def_id .as_local() .and_then(|def_id| { - let (trait_m_sig, _) = &tcx.hir().expect_trait_item(def_id).expect_fn(); + let (trait_m_sig, _) = &tcx.hir_expect_trait_item(def_id).expect_fn(); let pos = trait_number_args.saturating_sub(1); trait_m_sig.decl.inputs.get(pos).map(|arg| { if pos == 0 { @@ -1478,7 +1478,7 @@ fn compare_number_of_method_arguments<'tcx>( }) .or_else(|| tcx.hir().span_if_local(trait_m.def_id)); - let (impl_m_sig, _) = &tcx.hir().expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); + let (impl_m_sig, _) = &tcx.hir_expect_impl_item(impl_m.def_id.expect_local()).expect_fn(); let pos = impl_number_args.saturating_sub(1); let impl_span = impl_m_sig .decl @@ -1580,10 +1580,10 @@ fn compare_synthetic_generics<'tcx>( // as another generic argument let new_name = tcx.opt_item_name(trait_def_id)?; let trait_m = trait_m.def_id.as_local()?; - let trait_m = tcx.hir().expect_trait_item(trait_m); + let trait_m = tcx.hir_expect_trait_item(trait_m); let impl_m = impl_m.def_id.as_local()?; - let impl_m = tcx.hir().expect_impl_item(impl_m); + let impl_m = tcx.hir_expect_impl_item(impl_m); // in case there are no generics, take the spot between the function name // and the opening paren of the argument list @@ -1613,7 +1613,7 @@ fn compare_synthetic_generics<'tcx>( err.span_label(impl_span, "expected `impl Trait`, found generic parameter"); let _: Option<_> = try { let impl_m = impl_m.def_id.as_local()?; - let impl_m = tcx.hir().expect_impl_item(impl_m); + let impl_m = tcx.hir_expect_impl_item(impl_m); let (sig, _) = impl_m.expect_fn(); let input_tys = sig.decl.inputs; @@ -1855,7 +1855,7 @@ fn compare_const_predicate_entailment<'tcx>( debug!(?impl_ty, ?trait_ty); // Locate the Span containing just the type of the offending impl - let (ty, _) = tcx.hir().expect_impl_item(impl_ct_def_id).expect_const(); + let (ty, _) = tcx.hir_expect_impl_item(impl_ct_def_id).expect_const(); cause.span = ty.span; let mut diag = struct_span_code_err!( @@ -1868,7 +1868,7 @@ fn compare_const_predicate_entailment<'tcx>( let trait_c_span = trait_ct.def_id.as_local().map(|trait_ct_def_id| { // Add a label to the Span containing just the type of the const - let (ty, _) = tcx.hir().expect_trait_item(trait_ct_def_id).expect_const(); + let (ty, _) = tcx.hir_expect_trait_item(trait_ct_def_id).expect_const(); ty.span }); diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index fedc197e7ef..42d785c8dd0 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -645,7 +645,6 @@ pub fn check_intrinsic_type( | sym::simd_xor | sym::simd_fmin | sym::simd_fmax - | sym::simd_fpow | sym::simd_saturating_add | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)), sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)), @@ -668,7 +667,6 @@ pub fn check_intrinsic_type( | sym::simd_floor | sym::simd_round | sym::simd_trunc => (1, 0, vec![param(0)], param(0)), - sym::simd_fpowi => (1, 0, vec![param(0), tcx.types.i32], param(0)), sym::simd_fma | sym::simd_relaxed_fma => { (1, 0, vec![param(0), param(0), param(0)], param(0)) } diff --git a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs index 51194740450..d63165f0f16 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsicck.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsicck.rs @@ -1,12 +1,13 @@ -use std::assert_matches::debug_assert_matches; - use rustc_abi::FieldIdx; use rustc_ast::InlineAsmTemplatePiece; use rustc_data_structures::fx::FxIndexSet; use rustc_hir::def_id::DefId; use rustc_hir::{self as hir, LangItem}; +use rustc_infer::infer::InferCtxt; use rustc_middle::bug; -use rustc_middle::ty::{self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitableExt, UintTy}; +use rustc_middle::ty::{ + self, Article, FloatTy, IntTy, Ty, TyCtxt, TypeVisitableExt, TypeckResults, UintTy, +}; use rustc_session::lint; use rustc_span::def_id::LocalDefId; use rustc_span::{Symbol, sym}; @@ -16,11 +17,11 @@ use rustc_target::asm::{ use crate::errors::RegisterTypeUnstable; -pub struct InlineAsmCtxt<'a, 'tcx: 'a> { - tcx: TyCtxt<'tcx>, +pub struct InlineAsmCtxt<'a, 'tcx> { typing_env: ty::TypingEnv<'tcx>, target_features: &'tcx FxIndexSet<Symbol>, - expr_ty: Box<dyn Fn(&hir::Expr<'tcx>) -> Ty<'tcx> + 'a>, + infcx: &'a InferCtxt<'tcx>, + typeck_results: &'a TypeckResults<'tcx>, } enum NonAsmTypeReason<'tcx> { @@ -32,28 +33,38 @@ enum NonAsmTypeReason<'tcx> { impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { pub fn new( - tcx: TyCtxt<'tcx>, def_id: LocalDefId, + infcx: &'a InferCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, - get_operand_ty: impl Fn(&hir::Expr<'tcx>) -> Ty<'tcx> + 'a, + typeck_results: &'a TypeckResults<'tcx>, ) -> Self { InlineAsmCtxt { - tcx, typing_env, - target_features: tcx.asm_target_features(def_id), - expr_ty: Box::new(get_operand_ty), + target_features: infcx.tcx.asm_target_features(def_id), + infcx, + typeck_results, } } + fn tcx(&self) -> TyCtxt<'tcx> { + self.infcx.tcx + } + fn expr_ty(&self, expr: &hir::Expr<'tcx>) -> Ty<'tcx> { - (self.expr_ty)(expr) + let ty = self.typeck_results.expr_ty_adjusted(expr); + let ty = self.infcx.resolve_vars_if_possible(ty); + if ty.has_non_region_infer() { + Ty::new_misc_error(self.tcx()) + } else { + self.tcx().erase_regions(ty) + } } // FIXME(compiler-errors): This could use `<$ty as Pointee>::Metadata == ()` fn is_thin_ptr_ty(&self, ty: Ty<'tcx>) -> bool { // Type still may have region variables, but `Sized` does not depend // on those, so just erase them before querying. - if ty.is_sized(self.tcx, self.typing_env) { + if ty.is_sized(self.tcx(), self.typing_env) { return true; } if let ty::Foreign(..) = ty.kind() { @@ -63,7 +74,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { } fn get_asm_ty(&self, ty: Ty<'tcx>) -> Result<InlineAsmType, NonAsmTypeReason<'tcx>> { - let asm_ty_isize = match self.tcx.sess.target.pointer_width { + let asm_ty_isize = match self.tcx().sess.target.pointer_width { 16 => InlineAsmType::I16, 32 => InlineAsmType::I32, 64 => InlineAsmType::I64, @@ -92,12 +103,12 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { ty::Adt(adt, args) if adt.repr().simd() => { let fields = &adt.non_enum_variant().fields; let field = &fields[FieldIdx::ZERO]; - let elem_ty = field.ty(self.tcx, args); + let elem_ty = field.ty(self.tcx(), args); let (size, ty) = match elem_ty.kind() { ty::Array(ty, len) => { - let len = self.tcx.normalize_erasing_regions(self.typing_env, *len); - if let Some(len) = len.try_to_target_usize(self.tcx) { + let len = self.tcx().normalize_erasing_regions(self.typing_env, *len); + if let Some(len) = len.try_to_target_usize(self.tcx()) { (len, *ty) } else { return Err(NonAsmTypeReason::UnevaluatedSIMDArrayLength( @@ -117,7 +128,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { Ok(InlineAsmType::VecI128(size)) } ty::Int(IntTy::Isize) | ty::Uint(UintTy::Usize) => { - Ok(match self.tcx.sess.target.pointer_width { + Ok(match self.tcx().sess.target.pointer_width { 16 => InlineAsmType::VecI16(size), 32 => InlineAsmType::VecI32(size), 64 => InlineAsmType::VecI64(size), @@ -154,9 +165,9 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { // `!` is allowed for input but not for output (issue #87802) ty::Never if is_input => return None, _ if ty.references_error() => return None, - ty::Adt(adt, args) if self.tcx.is_lang_item(adt.did(), LangItem::MaybeUninit) => { + ty::Adt(adt, args) if self.tcx().is_lang_item(adt.did(), LangItem::MaybeUninit) => { let fields = &adt.non_enum_variant().fields; - let ty = fields[FieldIdx::from_u32(1)].ty(self.tcx, args); + let ty = fields[FieldIdx::from_u32(1)].ty(self.tcx(), args); // FIXME: Are we just trying to map to the `T` in `MaybeUninit<T>`? // If so, just get it from the args. let ty::Adt(ty, args) = ty.kind() else { @@ -167,7 +178,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { "expected first field of `MaybeUninit` to be `ManuallyDrop`" ); let fields = &ty.non_enum_variant().fields; - let ty = fields[FieldIdx::ZERO].ty(self.tcx, args); + let ty = fields[FieldIdx::ZERO].ty(self.tcx(), args); self.get_asm_ty(ty) } _ => self.get_asm_ty(ty), @@ -178,9 +189,9 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { match reason { NonAsmTypeReason::UnevaluatedSIMDArrayLength(did, len) => { let msg = format!("cannot evaluate SIMD vector length `{len}`"); - self.tcx + self.infcx .dcx() - .struct_span_err(self.tcx.def_span(did), msg) + .struct_span_err(self.tcx().def_span(did), msg) .with_span_note( expr.span, "SIMD vector length needs to be known statically for use in `asm!`", @@ -189,7 +200,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { } NonAsmTypeReason::Invalid(ty) => { let msg = format!("cannot use value of type `{ty}` for inline assembly"); - self.tcx.dcx().struct_span_err(expr.span, msg).with_note( + self.infcx.dcx().struct_span_err(expr.span, msg).with_note( "only integers, floats, SIMD vectors, pointers and function pointers \ can be used as arguments for inline assembly", ).emit(); @@ -198,7 +209,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { let msg = format!( "cannot use value of unsized pointer type `{ty}` for inline assembly" ); - self.tcx + self.infcx .dcx() .struct_span_err(expr.span, msg) .with_note("only sized pointers can be used in inline assembly") @@ -208,8 +219,8 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { let msg = format!( "cannot use SIMD vector with element type `{ty}` for inline assembly" ); - self.tcx.dcx() - .struct_span_err(self.tcx.def_span(did), msg).with_span_note( + self.infcx.dcx() + .struct_span_err(self.tcx().def_span(did), msg).with_span_note( expr.span, "only integers, floats, SIMD vectors, pointers and function pointers \ can be used as arguments for inline assembly", @@ -222,9 +233,9 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { // Check that the type implements Copy. The only case where this can // possibly fail is for SIMD types which don't #[derive(Copy)]. - if !self.tcx.type_is_copy_modulo_regions(self.typing_env, ty) { + if !self.tcx().type_is_copy_modulo_regions(self.typing_env, ty) { let msg = "arguments for inline assembly must be copyable"; - self.tcx + self.infcx .dcx() .struct_span_err(expr.span, msg) .with_note(format!("`{ty}` does not implement the Copy trait")) @@ -244,7 +255,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { if in_asm_ty != asm_ty { let msg = "incompatible types for asm inout argument"; let in_expr_ty = self.expr_ty(in_expr); - self.tcx + self.infcx .dcx() .struct_span_err(vec![in_expr.span, expr.span], msg) .with_span_label(in_expr.span, format!("type `{in_expr_ty}`")) @@ -263,21 +274,21 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { // Check the type against the list of types supported by the selected // register class. - let asm_arch = self.tcx.sess.asm_arch.unwrap(); - let allow_experimental_reg = self.tcx.features().asm_experimental_reg(); + let asm_arch = self.tcx().sess.asm_arch.unwrap(); + let allow_experimental_reg = self.tcx().features().asm_experimental_reg(); let reg_class = reg.reg_class(); let supported_tys = reg_class.supported_types(asm_arch, allow_experimental_reg); let Some((_, feature)) = supported_tys.iter().find(|&&(t, _)| t == asm_ty) else { let mut err = if !allow_experimental_reg && reg_class.supported_types(asm_arch, true).iter().any(|&(t, _)| t == asm_ty) { - self.tcx.sess.create_feature_err( + self.tcx().sess.create_feature_err( RegisterTypeUnstable { span: expr.span, ty }, sym::asm_experimental_reg, ) } else { let msg = format!("type `{ty}` cannot be used with this register class"); - let mut err = self.tcx.dcx().struct_span_err(expr.span, msg); + let mut err = self.infcx.dcx().struct_span_err(expr.span, msg); let supported_tys: Vec<_> = supported_tys.iter().map(|(t, _)| t.to_string()).collect(); err.note(format!( @@ -307,7 +318,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { if let Some(feature) = feature { if !self.target_features.contains(feature) { let msg = format!("`{feature}` target feature is not enabled"); - self.tcx + self.infcx .dcx() .struct_span_err(expr.span, msg) .with_note(format!( @@ -344,7 +355,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { result: default_result, size: default_size, } = reg_class.default_modifier(asm_arch).unwrap(); - self.tcx.node_span_lint( + self.tcx().node_span_lint( lint::builtin::ASM_SUB_REGISTER, expr.hir_id, spans, @@ -366,11 +377,11 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { } pub fn check_asm(&self, asm: &hir::InlineAsm<'tcx>) { - let Some(asm_arch) = self.tcx.sess.asm_arch else { - self.tcx.dcx().delayed_bug("target architecture does not support asm"); + let Some(asm_arch) = self.tcx().sess.asm_arch else { + self.infcx.dcx().delayed_bug("target architecture does not support asm"); return; }; - let allow_experimental_reg = self.tcx.features().asm_experimental_reg(); + let allow_experimental_reg = self.tcx().features().asm_experimental_reg(); for (idx, &(op, op_sp)) in asm.operands.iter().enumerate() { // Validate register classes against currently enabled target // features. We check that at least one type is available for @@ -393,13 +404,13 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { } if let Err(msg) = reg.validate( asm_arch, - self.tcx.sess.relocation_model(), + self.tcx().sess.relocation_model(), self.target_features, - &self.tcx.sess.target, + &self.tcx().sess.target, op.is_clobber(), ) { let msg = format!("cannot use register `{}`: {}", reg.name(), msg); - self.tcx.dcx().span_err(op_sp, msg); + self.infcx.dcx().span_err(op_sp, msg); continue; } } @@ -439,7 +450,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { reg_class.name(), feature ); - self.tcx.dcx().span_err(op_sp, msg); + self.infcx.dcx().span_err(op_sp, msg); // register isn't enabled, don't do more checks continue; } @@ -453,7 +464,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { .intersperse(", ") .collect::<String>(), ); - self.tcx.dcx().span_err(op_sp, msg); + self.infcx.dcx().span_err(op_sp, msg); // register isn't enabled, don't do more checks continue; } @@ -487,12 +498,23 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { ); } } - // Typeck has checked that Const operands are integers. hir::InlineAsmOperand::Const { anon_const } => { - debug_assert_matches!( - self.tcx.type_of(anon_const.def_id).instantiate_identity().kind(), - ty::Error(_) | ty::Int(_) | ty::Uint(_) - ); + let ty = self.expr_ty(self.tcx().hir_body(anon_const.body).value); + match ty.kind() { + ty::Error(_) => {} + _ if ty.is_integral() => {} + _ => { + self.infcx + .dcx() + .struct_span_err(op_sp, "invalid type for `const` operand") + .with_span_label( + self.tcx().def_span(anon_const.def_id), + format!("is {} `{}`", ty.kind().article(), ty), + ) + .with_help("`const` operands must be of an integer type") + .emit(); + } + } } // Typeck has checked that SymFn refers to a function. hir::InlineAsmOperand::SymFn { expr } => { @@ -501,7 +523,7 @@ impl<'a, 'tcx> InlineAsmCtxt<'a, 'tcx> { ty::FnDef(..) => {} ty::Error(_) => {} _ => { - self.tcx + self.infcx .dcx() .struct_span_err(op_sp, "invalid `sym` operand") .with_span_label( diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs index 1c5455710db..9c28fac809d 100644 --- a/compiler/rustc_hir_analysis/src/check/mod.rs +++ b/compiler/rustc_hir_analysis/src/check/mod.rs @@ -62,9 +62,9 @@ a type parameter). */ +pub mod always_applicable; mod check; mod compare_impl_item; -pub mod dropck; mod entry; pub mod intrinsic; pub mod intrinsicck; @@ -113,11 +113,11 @@ pub fn provide(providers: &mut Providers) { } fn adt_destructor(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<ty::Destructor> { - tcx.calculate_dtor(def_id.to_def_id(), dropck::check_drop_impl) + tcx.calculate_dtor(def_id.to_def_id(), always_applicable::check_drop_impl) } fn adt_async_destructor(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<ty::AsyncDestructor> { - tcx.calculate_async_dtor(def_id.to_def_id(), dropck::check_drop_impl) + tcx.calculate_async_dtor(def_id.to_def_id(), always_applicable::check_drop_impl) } /// Given a `DefId` for an opaque type in return position, find its parent item's return @@ -145,7 +145,7 @@ pub fn forbid_intrinsic_abi(tcx: TyCtxt<'_>, sp: Span, abi: ExternAbi) { } } -fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) { +pub(super) fn maybe_check_static_with_link_section(tcx: TyCtxt<'_>, id: LocalDefId) { // Only restricted on wasm target for now if !tcx.sess.target.is_like_wasm { return; diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index e6ea6eddcaa..fd5ffdc2d7a 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -126,13 +126,14 @@ where let infcx_compat = infcx.fork(); - // We specifically want to call the non-compat version of `implied_bounds_tys`; we do this always. + // We specifically want to *disable* the implied bounds hack, first, + // so we can detect when failures are due to bevy's implied bounds. let outlives_env = OutlivesEnvironment::new_with_implied_bounds_compat( &infcx, body_def_id, param_env, assumed_wf_types.iter().copied(), - false, + true, ); lint_redundant_lifetimes(tcx, body_def_id, &outlives_env); @@ -142,53 +143,22 @@ where return Ok(()); } - let is_bevy = assumed_wf_types.visit_with(&mut ContainsBevyParamSet { tcx }).is_break(); - - // If we have set `no_implied_bounds_compat`, then do not attempt compatibility. - // We could also just always enter if `is_bevy`, and call `implied_bounds_tys`, - // but that does result in slightly more work when this option is set and - // just obscures what we mean here anyways. Let's just be explicit. - if is_bevy && !infcx.tcx.sess.opts.unstable_opts.no_implied_bounds_compat { - let outlives_env = OutlivesEnvironment::new_with_implied_bounds_compat( - &infcx, - body_def_id, - param_env, - assumed_wf_types, - true, - ); - let errors_compat = infcx_compat.resolve_regions_with_outlives_env(&outlives_env); - if errors_compat.is_empty() { - Ok(()) - } else { - Err(infcx_compat.err_ctxt().report_region_errors(body_def_id, &errors_compat)) - } + let outlives_env = OutlivesEnvironment::new_with_implied_bounds_compat( + &infcx_compat, + body_def_id, + param_env, + assumed_wf_types, + // Don't *disable* the implied bounds hack; though this will only apply + // the implied bounds hack if this contains `bevy_ecs`'s `ParamSet` type. + false, + ); + let errors_compat = infcx_compat.resolve_regions_with_outlives_env(&outlives_env); + if errors_compat.is_empty() { + // FIXME: Once we fix bevy, this would be the place to insert a warning + // to upgrade bevy. + Ok(()) } else { - Err(infcx.err_ctxt().report_region_errors(body_def_id, &errors)) - } -} - -struct ContainsBevyParamSet<'tcx> { - tcx: TyCtxt<'tcx>, -} - -impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ContainsBevyParamSet<'tcx> { - type Result = ControlFlow<()>; - - fn visit_ty(&mut self, t: Ty<'tcx>) -> Self::Result { - // We only care to match `ParamSet<T>` or `&ParamSet<T>`. - match t.kind() { - ty::Adt(def, _) => { - if self.tcx.item_name(def.did()) == sym::ParamSet - && self.tcx.crate_name(def.did().krate) == sym::bevy_ecs - { - return ControlFlow::Break(()); - } - } - ty::Ref(_, ty, _) => ty.visit_with(self)?, - _ => {} - } - - ControlFlow::Continue(()) + Err(infcx_compat.err_ctxt().report_region_errors(body_def_id, &errors_compat)) } } @@ -201,7 +171,7 @@ fn check_well_formed(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Result<(), ErrorGua hir::Node::ImplItem(item) => check_impl_item(tcx, item), hir::Node::ForeignItem(item) => check_foreign_item(tcx, item), hir::Node::OpaqueTy(_) => Ok(crate::check::check::check_item_type(tcx, def_id)), - _ => unreachable!(), + _ => unreachable!("{node:?}"), }; if let Some(generics) = node.generics() { @@ -543,7 +513,7 @@ fn check_gat_where_clauses(tcx: TyCtxt<'_>, trait_def_id: LocalDefId) { continue; } - let gat_item_hir = tcx.hir().expect_trait_item(gat_def_id); + let gat_item_hir = tcx.hir_expect_trait_item(gat_def_id); debug!(?required_bounds); let param_env = tcx.param_env(gat_def_id); @@ -966,7 +936,7 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) -> Result<(), let ty = tcx.type_of(param.def_id).instantiate_identity(); if tcx.features().unsized_const_params() { - enter_wf_checking_ctxt(tcx, hir_ty.span, param.def_id, |wfcx| { + enter_wf_checking_ctxt(tcx, hir_ty.span, tcx.local_parent(param.def_id), |wfcx| { wfcx.register_bound( ObligationCause::new( hir_ty.span, @@ -980,7 +950,7 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) -> Result<(), Ok(()) }) } else if tcx.features().adt_const_params() { - enter_wf_checking_ctxt(tcx, hir_ty.span, param.def_id, |wfcx| { + enter_wf_checking_ctxt(tcx, hir_ty.span, tcx.local_parent(param.def_id), |wfcx| { wfcx.register_bound( ObligationCause::new( hir_ty.span, @@ -1108,7 +1078,13 @@ fn check_associated_item( let ty = tcx.type_of(item.def_id).instantiate_identity(); let ty = wfcx.normalize(span, Some(WellFormedLoc::Ty(item_id)), ty); wfcx.register_wf_obligation(span, loc, ty.into()); - check_sized_if_body(wfcx, item.def_id.expect_local(), ty, Some(span)); + check_sized_if_body( + wfcx, + item.def_id.expect_local(), + ty, + Some(span), + ObligationCauseCode::SizedConstOrStatic, + ); Ok(()) } ty::AssocKind::Fn => { @@ -1354,7 +1330,7 @@ fn check_item_type( traits::ObligationCause::new( ty_span, wfcx.body_def_id, - ObligationCauseCode::WellFormed(None), + ObligationCauseCode::SizedConstOrStatic, ), wfcx.param_env, item_ty, @@ -1698,6 +1674,7 @@ fn check_fn_or_method<'tcx>( hir::FnRetTy::Return(ty) => Some(ty.span), hir::FnRetTy::DefaultReturn(_) => None, }, + ObligationCauseCode::SizedReturnType, ); } @@ -1706,13 +1683,14 @@ fn check_sized_if_body<'tcx>( def_id: LocalDefId, ty: Ty<'tcx>, maybe_span: Option<Span>, + code: ObligationCauseCode<'tcx>, ) { let tcx = wfcx.tcx(); if let Some(body) = tcx.hir_maybe_body_owned_by(def_id) { let span = maybe_span.unwrap_or(body.value.span); wfcx.register_bound( - ObligationCause::new(span, def_id, traits::ObligationCauseCode::SizedReturnType), + ObligationCause::new(span, def_id, code), wfcx.param_env, ty, tcx.require_lang_item(LangItem::Sized, Some(span)), diff --git a/compiler/rustc_hir_analysis/src/check_unused.rs b/compiler/rustc_hir_analysis/src/check_unused.rs index 750c09887a1..464ffa8711a 100644 --- a/compiler/rustc_hir_analysis/src/check_unused.rs +++ b/compiler/rustc_hir_analysis/src/check_unused.rs @@ -31,7 +31,7 @@ fn check_unused_traits(tcx: TyCtxt<'_>, (): ()) { if used_trait_imports.contains(&id) { continue; } - let item = tcx.hir().expect_item(id); + let item = tcx.hir_expect_item(id); if item.span.is_dummy() { continue; } diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs index cee2f487639..c918abe4c07 100644 --- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs +++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs @@ -82,7 +82,7 @@ fn visit_implementation_of_drop(checker: &Checker<'_>) -> Result<(), ErrorGuaran _ => {} } - let impl_ = tcx.hir().expect_item(impl_did).expect_impl(); + let impl_ = tcx.hir_expect_item(impl_did).expect_impl(); Err(tcx.dcx().emit_err(errors::DropImplOnWrongItem { span: impl_.self_ty.span })) } @@ -109,7 +109,7 @@ fn visit_implementation_of_copy(checker: &Checker<'_>) -> Result<(), ErrorGuaran match type_allowed_to_implement_copy(tcx, param_env, self_type, cause, impl_header.safety) { Ok(()) => Ok(()), Err(CopyImplementationError::InfringingFields(fields)) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(infringing_fields_error( tcx, fields.into_iter().map(|(field, ty, reason)| (tcx.def_span(field.did), ty, reason)), @@ -119,15 +119,15 @@ fn visit_implementation_of_copy(checker: &Checker<'_>) -> Result<(), ErrorGuaran )) } Err(CopyImplementationError::NotAnAdt) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(tcx.dcx().emit_err(errors::CopyImplOnNonAdt { span })) } Err(CopyImplementationError::HasDestructor) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(tcx.dcx().emit_err(errors::CopyImplOnTypeWithDtor { span })) } Err(CopyImplementationError::HasUnsafeFields) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(tcx .dcx() .span_delayed_bug(span, format!("cannot implement `Copy` for `{}`", self_type))) @@ -157,7 +157,7 @@ fn visit_implementation_of_const_param_ty( match type_allowed_to_implement_const_param_ty(tcx, param_env, self_type, kind, cause) { Ok(()) => Ok(()), Err(ConstParamTyImplementationError::InfrigingFields(fields)) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(infringing_fields_error( tcx, fields.into_iter().map(|(field, ty, reason)| (tcx.def_span(field.did), ty, reason)), @@ -167,11 +167,11 @@ fn visit_implementation_of_const_param_ty( )) } Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(tcx.dcx().emit_err(errors::ConstParamTyImplOnNonAdt { span })) } Err(ConstParamTyImplementationError::InvalidInnerTyOfBuiltinTy(infringing_tys)) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(infringing_fields_error( tcx, infringing_tys.into_iter().map(|(ty, reason)| (span, ty, reason)), @@ -181,7 +181,7 @@ fn visit_implementation_of_const_param_ty( )) } Err(ConstParamTyImplementationError::UnsizedConstParamsFeatureRequired) => { - let span = tcx.hir().expect_item(impl_did).expect_impl().self_ty.span; + let span = tcx.hir_expect_item(impl_did).expect_impl().self_ty.span; Err(tcx.dcx().emit_err(errors::ConstParamTyImplOnUnsized { span })) } } @@ -526,7 +526,7 @@ pub(crate) fn coerce_unsized_info<'tcx>( note: true, })); } else if diff_fields.len() > 1 { - let item = tcx.hir().expect_item(impl_did); + let item = tcx.hir_expect_item(impl_did); let span = if let ItemKind::Impl(hir::Impl { of_trait: Some(t), .. }) = &item.kind { t.path.span } else { diff --git a/compiler/rustc_hir_analysis/src/coherence/mod.rs b/compiler/rustc_hir_analysis/src/coherence/mod.rs index 0245d4c9fe4..b5c6c2f3861 100644 --- a/compiler/rustc_hir_analysis/src/coherence/mod.rs +++ b/compiler/rustc_hir_analysis/src/coherence/mod.rs @@ -16,6 +16,7 @@ use rustc_span::{ErrorGuaranteed, sym}; use rustc_type_ir::elaborate; use tracing::debug; +use crate::check::always_applicable; use crate::errors; mod builtin; @@ -24,11 +25,12 @@ mod inherent_impls_overlap; mod orphan; mod unsafety; -fn check_impl( - tcx: TyCtxt<'_>, +fn check_impl<'tcx>( + tcx: TyCtxt<'tcx>, impl_def_id: LocalDefId, - trait_ref: ty::TraitRef<'_>, - trait_def: &ty::TraitDef, + trait_ref: ty::TraitRef<'tcx>, + trait_def: &'tcx ty::TraitDef, + polarity: ty::ImplPolarity, ) -> Result<(), ErrorGuaranteed> { debug!( "(checking implementation) adding impl for trait '{:?}', item '{}'", @@ -44,6 +46,12 @@ fn check_impl( enforce_trait_manually_implementable(tcx, impl_def_id, trait_ref.def_id, trait_def) .and(enforce_empty_impls_for_marker_traits(tcx, impl_def_id, trait_ref.def_id, trait_def)) + .and(always_applicable::check_negative_auto_trait_impl( + tcx, + impl_def_id, + trait_ref, + polarity, + )) } fn enforce_trait_manually_implementable( @@ -154,16 +162,16 @@ fn coherent_trait(tcx: TyCtxt<'_>, def_id: DefId) -> Result<(), ErrorGuaranteed> let mut res = tcx.ensure_ok().specialization_graph_of(def_id); for &impl_def_id in impls { - let trait_header = tcx.impl_trait_header(impl_def_id).unwrap(); - let trait_ref = trait_header.trait_ref.instantiate_identity(); + let impl_header = tcx.impl_trait_header(impl_def_id).unwrap(); + let trait_ref = impl_header.trait_ref.instantiate_identity(); let trait_def = tcx.trait_def(trait_ref.def_id); res = res - .and(check_impl(tcx, impl_def_id, trait_ref, trait_def)) + .and(check_impl(tcx, impl_def_id, trait_ref, trait_def, impl_header.polarity)) .and(check_object_overlap(tcx, impl_def_id, trait_ref)) - .and(unsafety::check_item(tcx, impl_def_id, trait_header, trait_def)) + .and(unsafety::check_item(tcx, impl_def_id, impl_header, trait_def)) .and(tcx.ensure_ok().orphan_check_impl(impl_def_id)) - .and(builtin::check_trait(tcx, def_id, impl_def_id, trait_header)); + .and(builtin::check_trait(tcx, def_id, impl_def_id, impl_header)); } res diff --git a/compiler/rustc_hir_analysis/src/coherence/orphan.rs b/compiler/rustc_hir_analysis/src/coherence/orphan.rs index dbf7a7378f5..0b7fc44460e 100644 --- a/compiler/rustc_hir_analysis/src/coherence/orphan.rs +++ b/compiler/rustc_hir_analysis/src/coherence/orphan.rs @@ -376,7 +376,7 @@ fn emit_orphan_check_error<'tcx>( ) -> ErrorGuaranteed { match err { traits::OrphanCheckErr::NonLocalInputType(tys) => { - let item = tcx.hir().expect_item(impl_def_id); + let item = tcx.hir_expect_item(impl_def_id); let impl_ = item.expect_impl(); let hir_trait_ref = impl_.of_trait.as_ref().unwrap(); diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index 2a2879c6577..cfb6cf8a287 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -45,6 +45,7 @@ use tracing::{debug, instrument}; use crate::check::intrinsic::intrinsic_operation_unsafety; use crate::errors; +use crate::hir_ty_lowering::errors::assoc_kind_str; use crate::hir_ty_lowering::{FeedConstTy, HirTyLowerer, RegionInferReason}; pub(crate) mod dump; @@ -443,13 +444,14 @@ impl<'tcx> HirTyLowerer<'tcx> for ItemCtxt<'tcx> { self.tcx.at(span).type_param_predicates((self.item_def_id, def_id, assoc_name)) } - fn lower_assoc_ty( + fn lower_assoc_shared( &self, span: Span, item_def_id: DefId, - item_segment: &hir::PathSegment<'tcx>, + item_segment: &rustc_hir::PathSegment<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>, - ) -> Ty<'tcx> { + kind: ty::AssocKind, + ) -> Result<(DefId, ty::GenericArgsRef<'tcx>), ErrorGuaranteed> { if let Some(trait_ref) = poly_trait_ref.no_bound_vars() { let item_args = self.lowerer().lower_generic_args_of_assoc_item( span, @@ -457,7 +459,7 @@ impl<'tcx> HirTyLowerer<'tcx> for ItemCtxt<'tcx> { item_segment, trait_ref.args, ); - Ty::new_projection_from_args(self.tcx(), item_def_id, item_args) + Ok((item_def_id, item_args)) } else { // There are no late-bound regions; we can just ignore the binder. let (mut mpart_sugg, mut inferred_sugg) = (None, None); @@ -467,8 +469,7 @@ impl<'tcx> HirTyLowerer<'tcx> for ItemCtxt<'tcx> { hir::Node::Field(_) | hir::Node::Ctor(_) | hir::Node::Variant(_) => { let item = self .tcx - .hir() - .expect_item(self.tcx.hir_get_parent_item(self.hir_id()).def_id); + .hir_expect_item(self.tcx.hir_get_parent_item(self.hir_id()).def_id); match &item.kind { hir::ItemKind::Enum(_, generics) | hir::ItemKind::Struct(_, generics) @@ -518,16 +519,14 @@ impl<'tcx> HirTyLowerer<'tcx> for ItemCtxt<'tcx> { } _ => {} } - Ty::new_error( - self.tcx(), - self.tcx().dcx().emit_err(errors::AssociatedItemTraitUninferredGenericParams { - span, - inferred_sugg, - bound, - mpart_sugg, - what: "type", - }), - ) + + Err(self.tcx().dcx().emit_err(errors::AssociatedItemTraitUninferredGenericParams { + span, + inferred_sugg, + bound, + mpart_sugg, + what: assoc_kind_str(kind), + })) } } @@ -1074,7 +1073,6 @@ fn lower_variant<'tcx>( def.ctor().map(|(kind, _, def_id)| (kind, def_id.to_def_id())), discr, fields, - adt_kind, parent_did.to_def_id(), recovered, adt_kind == AdtKind::Struct && tcx.has_attr(parent_did, sym::non_exhaustive) @@ -1144,7 +1142,7 @@ fn adt_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::AdtDef<'_> { } fn trait_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::TraitDef { - let item = tcx.hir().expect_item(def_id); + let item = tcx.hir_expect_item(def_id); let (is_alias, is_auto, safety, items) = match item.kind { hir::ItemKind::Trait(is_auto, safety, .., items) => { @@ -1343,7 +1341,7 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, ty::PolyFn ), ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(sig, _, _), .. }) => { - let abi = tcx.hir().get_foreign_abi(hir_id); + let abi = tcx.hir_get_foreign_abi(hir_id); compute_sig_of_foreign_fn_decl(tcx, def_id, sig.decl, abi, sig.header.safety()) } @@ -1598,7 +1596,7 @@ pub fn suggest_impl_trait<'tcx>( fn impl_trait_header(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Option<ty::ImplTraitHeader<'_>> { let icx = ItemCtxt::new(tcx, def_id); - let item = tcx.hir().expect_item(def_id); + let item = tcx.hir_expect_item(def_id); let impl_ = item.expect_impl(); impl_.of_trait.as_ref().map(|ast_trait_ref| { let selfty = tcx.type_of(def_id).instantiate_identity(); @@ -1823,6 +1821,9 @@ fn const_param_default<'tcx>( ), }; let icx = ItemCtxt::new(tcx, def_id); - let ct = icx.lowerer().lower_const_arg(default_ct, FeedConstTy::Param(def_id.to_def_id())); + let identity_args = ty::GenericArgs::identity_for_item(tcx, def_id); + let ct = icx + .lowerer() + .lower_const_arg(default_ct, FeedConstTy::Param(def_id.to_def_id(), identity_args)); ty::EarlyBinder::bind(ct) } diff --git a/compiler/rustc_hir_analysis/src/collect/dump.rs b/compiler/rustc_hir_analysis/src/collect/dump.rs index 4debd3977f5..7cbd31de6ba 100644 --- a/compiler/rustc_hir_analysis/src/collect/dump.rs +++ b/compiler/rustc_hir_analysis/src/collect/dump.rs @@ -13,7 +13,7 @@ pub(crate) fn opaque_hidden_types(tcx: TyCtxt<'_>) { for id in tcx.hir_crate_items(()).opaques() { if let hir::OpaqueTyOrigin::FnReturn { parent: fn_def_id, .. } | hir::OpaqueTyOrigin::AsyncFn { parent: fn_def_id, .. } = - tcx.hir().expect_opaque_ty(id).origin + tcx.hir_expect_opaque_ty(id).origin && let hir::Node::TraitItem(trait_item) = tcx.hir_node_by_def_id(fn_def_id) && let (_, hir::TraitFn::Required(..)) = trait_item.expect_fn() { diff --git a/compiler/rustc_hir_analysis/src/collect/generics_of.rs b/compiler/rustc_hir_analysis/src/collect/generics_of.rs index 2cdd9a3a934..a153ce8ea90 100644 --- a/compiler/rustc_hir_analysis/src/collect/generics_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/generics_of.rs @@ -186,18 +186,9 @@ pub(super) fn generics_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Generics { { Some(parent_did) } - // Exclude `GlobalAsm` here which cannot have generics. - Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. }) - if asm.operands.iter().any(|(op, _op_sp)| match op { - hir::InlineAsmOperand::Const { anon_const } => { - anon_const.hir_id == hir_id - } - _ => false, - }) => - { - Some(parent_did) - } Node::TyPat(_) => Some(parent_did), + // Field default values inherit the ADT's generics. + Node::Field(_) => Some(parent_did), _ => None, } } diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index 5b511d27074..2022127a15b 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -223,10 +223,7 @@ fn gather_explicit_predicates_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Gen } hir::GenericParamKind::Const { .. } => { let param_def_id = param.def_id.to_def_id(); - let ct_ty = tcx - .type_of(param_def_id) - .no_bound_vars() - .expect("const parameters cannot be generic"); + let ct_ty = tcx.type_of(param_def_id).instantiate_identity(); let ct = icx.lowerer().lower_const_param(param_def_id, param.hir_id); predicates .insert((ty::ClauseKind::ConstArgHasType(ct, ct_ty).upcast(tcx), param.span)); @@ -940,12 +937,6 @@ impl<'tcx> ItemCtxt<'tcx> { } } -/// Compute the conditions that need to hold for a conditionally-const item to be const. -/// That is, compute the set of `~const` where clauses for a given item. -/// -/// This query also computes the `~const` where clauses for associated types, which are -/// not "const", but which have item bounds which may be `~const`. These must hold for -/// the `~const` item bound to hold. pub(super) fn const_conditions<'tcx>( tcx: TyCtxt<'tcx>, def_id: LocalDefId, @@ -1063,7 +1054,9 @@ pub(super) fn explicit_implied_const_bounds<'tcx>( def_id: LocalDefId, ) -> ty::EarlyBinder<'tcx, &'tcx [(ty::PolyTraitRef<'tcx>, Span)]> { if !tcx.is_conditionally_const(def_id) { - bug!("const_conditions invoked for item that is not conditionally const: {def_id:?}"); + bug!( + "explicit_implied_const_bounds invoked for item that is not conditionally const: {def_id:?}" + ); } let bounds = match tcx.opt_rpitit_info(def_id.to_def_id()) { diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs index 6b61d317d3f..883a1acdb30 100644 --- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs +++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs @@ -1462,7 +1462,8 @@ impl<'a, 'tcx> BoundVarContext<'a, 'tcx> { for &(opaque_def_id, captures) in opaque_capture_scopes.iter().rev() { let mut captures = captures.borrow_mut(); let remapped = *captures.entry(lifetime).or_insert_with(|| { - let feed = self.tcx.create_def(opaque_def_id, ident.name, DefKind::LifetimeParam); + let feed = + self.tcx.create_def(opaque_def_id, Some(ident.name), DefKind::LifetimeParam); feed.def_span(ident.span); feed.def_ident_span(Some(ident.span)); feed.def_id() diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index d564dc9699a..16caa4f6874 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -8,7 +8,7 @@ use rustc_middle::query::plumbing::CyclePlaceholder; use rustc_middle::ty::fold::fold_regions; use rustc_middle::ty::print::with_forced_trimmed_paths; use rustc_middle::ty::util::IntTypeExt; -use rustc_middle::ty::{self, Article, IsSuggestable, Ty, TyCtxt, TypeVisitableExt}; +use rustc_middle::ty::{self, IsSuggestable, Ty, TyCtxt, TypeVisitableExt}; use rustc_middle::{bug, span_bug}; use rustc_span::{DUMMY_SP, Ident, Span}; @@ -35,13 +35,6 @@ fn anon_const_type_of<'tcx>(icx: &ItemCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx let parent_node_id = tcx.parent_hir_id(hir_id); let parent_node = tcx.hir_node(parent_node_id); - let find_const = |&(op, op_sp)| match op { - hir::InlineAsmOperand::Const { anon_const } if anon_const.hir_id == hir_id => { - Some((anon_const, op_sp)) - } - _ => None, - }; - match parent_node { // Anon consts "inside" the type system. Node::ConstArg(&ConstArg { @@ -50,31 +43,6 @@ fn anon_const_type_of<'tcx>(icx: &ItemCtxt<'tcx>, def_id: LocalDefId) -> Ty<'tcx .. }) if anon_hir_id == hir_id => const_arg_anon_type_of(icx, arg_hir_id, span), - // Anon consts outside the type system. - Node::Expr(&Expr { kind: ExprKind::InlineAsm(asm), .. }) - | Node::Item(&Item { kind: ItemKind::GlobalAsm { asm, .. }, .. }) - if let Some((anon_const, op_sp)) = asm.operands.iter().find_map(find_const) => - { - let ty = tcx.typeck(def_id).node_type(hir_id); - - match ty.kind() { - ty::Error(_) => ty, - ty::Int(_) | ty::Uint(_) => ty, - _ => { - let guar = tcx - .dcx() - .struct_span_err(op_sp, "invalid type for `const` operand") - .with_span_label( - tcx.def_span(anon_const.def_id), - format!("is {} `{}`", ty.kind().article(), ty), - ) - .with_help("`const` operands must be of an integer type") - .emit(); - - Ty::new_error(tcx, guar) - } - } - } Node::Variant(Variant { disr_expr: Some(e), .. }) if e.hir_id == hir_id => { tcx.adt_def(tcx.hir_get_parent_item(hir_id)).repr().discr_type().to_ty(tcx) } @@ -512,5 +480,5 @@ pub(crate) fn type_alias_is_lazy<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> } } } - HasTait.visit_ty_unambig(tcx.hir().expect_item(def_id).expect_ty_alias().0).is_break() + HasTait.visit_ty_unambig(tcx.hir_expect_item(def_id).expect_ty_alias().0).is_break() } diff --git a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs index 399c4fbe55a..142078900f0 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs @@ -1,14 +1,13 @@ use rustc_hir::def::DefKind; use rustc_hir::def_id::LocalDefId; -use rustc_hir::intravisit::{self, Visitor}; -use rustc_hir::{self as hir, Expr, ImplItem, Item, Node, TraitItem, def}; +use rustc_hir::{self as hir, Expr, ImplItem, Item, Node, TraitItem, def, intravisit}; use rustc_middle::bug; use rustc_middle::hir::nested_filter; use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt}; use rustc_span::DUMMY_SP; use tracing::{debug, instrument, trace}; -use crate::errors::{TaitForwardCompat, TaitForwardCompat2, UnconstrainedOpaqueType}; +use crate::errors::{TaitForwardCompat2, UnconstrainedOpaqueType}; /// Checks "defining uses" of opaque `impl Trait` in associated types. /// These can only be defined by associated items of the same trait. @@ -82,38 +81,9 @@ pub(super) fn find_opaque_ty_constraints_for_impl_trait_in_assoc_type( /// ``` #[instrument(skip(tcx), level = "debug")] pub(super) fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: LocalDefId) -> Ty<'_> { - let hir_id = tcx.local_def_id_to_hir_id(def_id); - let scope = tcx.hir_get_defining_scope(hir_id); let mut locator = TaitConstraintLocator { def_id, tcx, found: None, typeck_types: vec![] }; - debug!(?scope); - - if scope == hir::CRATE_HIR_ID { - tcx.hir_walk_toplevel_module(&mut locator); - } else { - trace!("scope={:#?}", tcx.hir_node(scope)); - match tcx.hir_node(scope) { - // We explicitly call `visit_*` methods, instead of using `intravisit::walk_*` methods - // This allows our visitor to process the defining item itself, causing - // it to pick up any 'sibling' defining uses. - // - // For example, this code: - // ``` - // fn foo() { - // type Blah = impl Debug; - // let my_closure = || -> Blah { true }; - // } - // ``` - // - // requires us to explicitly process `foo()` in order - // to notice the defining usage of `Blah`. - Node::Item(it) => locator.visit_item(it), - Node::ImplItem(it) => locator.visit_impl_item(it), - Node::TraitItem(it) => locator.visit_trait_item(it), - Node::ForeignItem(it) => locator.visit_foreign_item(it), - other => bug!("{:?} is not a valid scope for an opaque type item", other), - } - } + tcx.hir_walk_toplevel_module(&mut locator); if let Some(hidden) = locator.found { // Only check against typeck if we didn't already error @@ -137,12 +107,7 @@ pub(super) fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: Local let reported = tcx.dcx().emit_err(UnconstrainedOpaqueType { span: tcx.def_span(def_id), name: tcx.item_ident(parent_def_id.to_def_id()), - what: match tcx.hir_node(scope) { - _ if scope == hir::CRATE_HIR_ID => "module", - Node::Item(hir::Item { kind: hir::ItemKind::Mod(_), .. }) => "module", - Node::Item(hir::Item { kind: hir::ItemKind::Impl(_), .. }) => "impl", - _ => "item", - }, + what: "crate", }); Ty::new_error(tcx, reported) } @@ -176,6 +141,13 @@ impl TaitConstraintLocator<'_> { return; } + let opaque_types_defined_by = self.tcx.opaque_types_defined_by(item_def_id); + // Don't try to check items that cannot possibly constrain the type. + if !opaque_types_defined_by.contains(&self.def_id) { + debug!("no constraint: no opaque types defined"); + return; + } + // Function items with `_` in their return type already emit an error, skip any // "non-defining use" errors for them. // Note that we use `Node::fn_sig` instead of `Node::fn_decl` here, because the former @@ -215,8 +187,6 @@ impl TaitConstraintLocator<'_> { return; } - let opaque_types_defined_by = self.tcx.opaque_types_defined_by(item_def_id); - let mut constrained = false; for (&opaque_type_key, &hidden_type) in &tables.concrete_opaque_types { if opaque_type_key.def_id != self.def_id { @@ -224,20 +194,6 @@ impl TaitConstraintLocator<'_> { } constrained = true; - if !opaque_types_defined_by.contains(&self.def_id) { - let guar = self.tcx.dcx().emit_err(TaitForwardCompat { - span: hidden_type.span, - item_span: self - .tcx - .def_ident_span(item_def_id) - .unwrap_or_else(|| self.tcx.def_span(item_def_id)), - }); - // Avoid "opaque type not constrained" errors on the opaque itself. - self.found = Some(ty::OpaqueHiddenType { - span: DUMMY_SP, - ty: Ty::new_error(self.tcx, guar), - }); - } let concrete_type = self.tcx.erase_regions(hidden_type.remap_generic_params_to_declaration_params( opaque_type_key, @@ -309,19 +265,13 @@ impl<'tcx> intravisit::Visitor<'tcx> for TaitConstraintLocator<'tcx> { } fn visit_item(&mut self, it: &'tcx Item<'tcx>) { trace!(?it.owner_id); - // The opaque type itself or its children are not within its reveal scope. - if it.owner_id.def_id != self.def_id { - self.check(it.owner_id.def_id); - intravisit::walk_item(self, it); - } + self.check(it.owner_id.def_id); + intravisit::walk_item(self, it); } fn visit_impl_item(&mut self, it: &'tcx ImplItem<'tcx>) { trace!(?it.owner_id); - // The opaque type itself or its children are not within its reveal scope. - if it.owner_id.def_id != self.def_id { - self.check(it.owner_id.def_id); - intravisit::walk_impl_item(self, it); - } + self.check(it.owner_id.def_id); + intravisit::walk_impl_item(self, it); } fn visit_trait_item(&mut self, it: &'tcx TraitItem<'tcx>) { trace!(?it.owner_id); diff --git a/compiler/rustc_hir_analysis/src/delegation.rs b/compiler/rustc_hir_analysis/src/delegation.rs index 4dbdfa3d85a..78b20f9897c 100644 --- a/compiler/rustc_hir_analysis/src/delegation.rs +++ b/compiler/rustc_hir_analysis/src/delegation.rs @@ -404,11 +404,16 @@ fn check_constraints<'tcx>( }; if let Some(local_sig_id) = sig_id.as_local() - && tcx.hir().opt_delegation_sig_id(local_sig_id).is_some() + && tcx.hir_opt_delegation_sig_id(local_sig_id).is_some() { emit("recursive delegation is not supported yet"); } + if tcx.fn_sig(sig_id).skip_binder().skip_binder().c_variadic { + // See issue #127443 for explanation. + emit("delegation to C-variadic functions is not allowed"); + } + ret } @@ -416,7 +421,7 @@ pub(crate) fn inherit_sig_for_delegation_item<'tcx>( tcx: TyCtxt<'tcx>, def_id: LocalDefId, ) -> &'tcx [Ty<'tcx>] { - let sig_id = tcx.hir().opt_delegation_sig_id(def_id).unwrap(); + let sig_id = tcx.hir_opt_delegation_sig_id(def_id).unwrap(); let caller_sig = tcx.fn_sig(sig_id); if let Err(err) = check_constraints(tcx, def_id, sig_id) { let sig_len = caller_sig.instantiate_identity().skip_binder().inputs().len() + 1; diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 852533ff5c9..f2560f22874 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -11,8 +11,6 @@ use rustc_middle::ty::Ty; use rustc_span::{Ident, Span, Symbol}; use crate::fluent_generated as fluent; -mod pattern_types; -pub(crate) use pattern_types::*; pub(crate) mod wrong_number_of_generic_args; mod precise_captures; @@ -84,6 +82,8 @@ pub(crate) struct AssocItemNotFound<'a> { pub label: Option<AssocItemNotFoundLabel<'a>>, #[subdiagnostic] pub sugg: Option<AssocItemNotFoundSugg<'a>>, + #[label(hir_analysis_within_macro)] + pub within_macro_span: Option<Span>, } #[derive(Subdiagnostic)] @@ -425,16 +425,6 @@ pub(crate) struct UnconstrainedOpaqueType { } #[derive(Diagnostic)] -#[diag(hir_analysis_tait_forward_compat)] -#[note] -pub(crate) struct TaitForwardCompat { - #[primary_span] - pub span: Span, - #[note] - pub item_span: Span, -} - -#[derive(Diagnostic)] #[diag(hir_analysis_tait_forward_compat2)] #[note] pub(crate) struct TaitForwardCompat2 { @@ -711,17 +701,6 @@ pub(crate) struct InvalidUnionField { } #[derive(Diagnostic)] -#[diag(hir_analysis_invalid_unsafe_field, code = E0740)] -pub(crate) struct InvalidUnsafeField { - #[primary_span] - pub field_span: Span, - #[subdiagnostic] - pub sugg: InvalidUnsafeFieldSuggestion, - #[note] - pub note: (), -} - -#[derive(Diagnostic)] #[diag(hir_analysis_return_type_notation_on_non_rpitit)] pub(crate) struct ReturnTypeNotationOnNonRpitit<'tcx> { #[primary_span] @@ -742,18 +721,6 @@ pub(crate) struct InvalidUnionFieldSuggestion { pub hi: Span, } -#[derive(Subdiagnostic)] -#[multipart_suggestion( - hir_analysis_invalid_unsafe_field_sugg, - applicability = "machine-applicable" -)] -pub(crate) struct InvalidUnsafeFieldSuggestion { - #[suggestion_part(code = "std::mem::ManuallyDrop<")] - pub lo: Span, - #[suggestion_part(code = ">")] - pub hi: Span, -} - #[derive(Diagnostic)] #[diag(hir_analysis_return_type_notation_equality_bound)] pub(crate) struct ReturnTypeNotationEqualityBound { diff --git a/compiler/rustc_hir_analysis/src/errors/pattern_types.rs b/compiler/rustc_hir_analysis/src/errors/pattern_types.rs deleted file mode 100644 index ec7b3aaa1c1..00000000000 --- a/compiler/rustc_hir_analysis/src/errors/pattern_types.rs +++ /dev/null @@ -1,14 +0,0 @@ -use rustc_macros::Diagnostic; -use rustc_middle::ty::Ty; -use rustc_span::Span; - -#[derive(Diagnostic)] -#[diag(hir_analysis_invalid_base_type)] -pub(crate) struct InvalidBaseType<'tcx> { - pub ty: Ty<'tcx>, - #[primary_span] - pub ty_span: Span, - pub pat: &'static str, - #[note] - pub pat_span: Span, -} diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs index dd346ed1f97..60af36da6e2 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/bounds.rs @@ -468,11 +468,13 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // Good error for `where Trait::method(..): Send`. let Some(self_ty) = opt_self_ty else { - return self.error_missing_qpath_self_ty( + let guar = self.error_missing_qpath_self_ty( trait_def_id, hir_ty.span, item_segment, + ty::AssocKind::Type, ); + return Ty::new_error(tcx, guar); }; let self_ty = self.lower_ty(self_ty); diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs index d644a1f224c..fa061c80618 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs @@ -151,6 +151,9 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { qself: &qself_str, label: None, sugg: None, + // Try to get the span of the identifier within the path's syntax context + // (if that's different). + within_macro_span: assoc_name.span.within_macro(span, tcx.sess.source_map()), }; if is_dummy { @@ -385,14 +388,17 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { }) } - pub(super) fn report_ambiguous_assoc_ty( + pub(super) fn report_ambiguous_assoc( &self, span: Span, types: &[String], traits: &[String], name: Symbol, + kind: ty::AssocKind, ) -> ErrorGuaranteed { - let mut err = struct_span_code_err!(self.dcx(), span, E0223, "ambiguous associated type"); + let kind_str = assoc_kind_str(kind); + let mut err = + struct_span_code_err!(self.dcx(), span, E0223, "ambiguous associated {kind_str}"); if self .tcx() .resolutions(()) @@ -417,7 +423,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { span, format!( "if there were a type named `Type` that implements a trait named \ - `Trait` with associated type `{name}`, you could use the \ + `Trait` with associated {kind_str} `{name}`, you could use the \ fully-qualified path", ), format!("<Type as Trait>::{name}"), @@ -440,7 +446,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { span, format!( "if there were a type named `Example` that implemented one of the \ - traits with associated type `{name}`, you could use the \ + traits with associated {kind_str} `{name}`, you could use the \ fully-qualified path", ), traits.iter().map(|trait_str| format!("<Example as {trait_str}>::{name}")), @@ -451,7 +457,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { err.span_suggestion_verbose( span, format!( - "if there were a trait named `Example` with associated type `{name}` \ + "if there were a trait named `Example` with associated {kind_str} `{name}` \ implemented for `{type_str}`, you could use the fully-qualified path", ), format!("<{type_str} as Example>::{name}"), @@ -462,7 +468,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { err.span_suggestions( span, format!( - "if there were a trait named `Example` with associated type `{name}` \ + "if there were a trait named `Example` with associated {kind_str} `{name}` \ implemented for one of the types, you could use the fully-qualified \ path", ), @@ -491,7 +497,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { err.emit() } - pub(crate) fn complain_about_ambiguous_inherent_assoc_ty( + pub(crate) fn complain_about_ambiguous_inherent_assoc( &self, name: Ident, candidates: Vec<DefId>, @@ -552,13 +558,14 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } // FIXME(inherent_associated_types): Find similarly named associated types and suggest them. - pub(crate) fn complain_about_inherent_assoc_ty_not_found( + pub(crate) fn complain_about_inherent_assoc_not_found( &self, name: Ident, self_ty: Ty<'tcx>, candidates: Vec<(DefId, (DefId, DefId))>, fulfillment_errors: Vec<FulfillmentError<'tcx>>, span: Span, + kind: ty::AssocKind, ) -> ErrorGuaranteed { // FIXME(fmease): This was copied in parts from an old version of `rustc_hir_typeck::method::suggest`. // Either @@ -568,12 +575,16 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let tcx = self.tcx(); + let kind_str = assoc_kind_str(kind); let adt_did = self_ty.ty_adt_def().map(|def| def.did()); let add_def_label = |err: &mut Diag<'_>| { if let Some(did) = adt_did { err.span_label( tcx.def_span(did), - format!("associated item `{name}` not found for this {}", tcx.def_descr(did)), + format!( + "associated {kind_str} `{name}` not found for this {}", + tcx.def_descr(did) + ), ); } }; @@ -600,11 +611,11 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { self.dcx(), name.span, E0220, - "associated type `{name}` not found for `{self_ty}` in the current scope" + "associated {kind_str} `{name}` not found for `{self_ty}` in the current scope" ); err.span_label(name.span, format!("associated item not found in `{self_ty}`")); err.note(format!( - "the associated type was found for\n{type_candidates}{additional_types}", + "the associated {kind_str} was found for\n{type_candidates}{additional_types}", )); add_def_label(&mut err); return err.emit(); @@ -685,7 +696,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let mut err = self.dcx().struct_span_err( name.span, - format!("the associated type `{name}` exists for `{self_ty}`, but its trait bounds were not satisfied") + format!("the associated {kind_str} `{name}` exists for `{self_ty}`, but its trait bounds were not satisfied") ); if !bounds.is_empty() { err.note(format!( @@ -695,7 +706,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } err.span_label( name.span, - format!("associated type cannot be referenced on `{self_ty}` due to unsatisfied trait bounds") + format!("associated {kind_str} cannot be referenced on `{self_ty}` due to unsatisfied trait bounds") ); for (span, mut bounds) in bound_spans { @@ -789,7 +800,11 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { Some(args.constraints.iter().filter_map(|constraint| { let ident = constraint.ident; - let trait_def = path.res.def_id(); + + let Res::Def(DefKind::Trait, trait_def) = path.res else { + return None; + }; + let assoc_item = tcx.associated_items(trait_def).find_by_name_and_kind( tcx, ident, @@ -1505,7 +1520,7 @@ fn generics_args_err_extend<'a>( }) .collect(); if args.len() > 1 - && let Some(span) = args.into_iter().last() + && let Some(span) = args.into_iter().next_back() { err.note( "generic arguments are not allowed on both an enum and its variant's path \ @@ -1610,7 +1625,7 @@ fn generics_args_err_extend<'a>( } } -pub(super) fn assoc_kind_str(kind: ty::AssocKind) -> &'static str { +pub(crate) fn assoc_kind_str(kind: ty::AssocKind) -> &'static str { match kind { ty::AssocKind::Fn => "function", ty::AssocKind::Const => "constant", diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/generics.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/generics.rs index 8d58a3bfbd3..60a60f6415a 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/generics.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/generics.rs @@ -115,7 +115,7 @@ fn generic_arg_mismatch_err( body.value.kind && let Res::Def(DefKind::Fn { .. }, id) = path.res { - // FIXME(min_generic_const_args): this branch is dead once new const path lowering + // FIXME(mgca): this branch is dead once new const path lowering // (for single-segment paths) is no longer gated err.help(format!("`{}` is a function item, not a type", tcx.item_name(id))); err.help("function item types cannot be named directly"); @@ -273,7 +273,7 @@ pub fn lower_generic_args<'tcx: 'a, 'a>( // We lower to an infer even when the feature gate is not enabled // as it is useful for diagnostics to be able to see a `ConstKind::Infer` - args.push(ctx.provided_kind(param, arg)); + args.push(ctx.provided_kind(&args, param, arg)); args_iter.next(); params.next(); } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs index f5e075367f3..e78801dd601 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs @@ -78,15 +78,18 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } if self_ty.span.edition().at_least_rust_2021() { - let msg = "expected a type, found a trait"; - let label = "you can add the `dyn` keyword if you want a trait object"; - let mut diag = - rustc_errors::struct_span_code_err!(self.dcx(), self_ty.span, E0782, "{}", msg); + let mut diag = rustc_errors::struct_span_code_err!( + self.dcx(), + self_ty.span, + E0782, + "{}", + "expected a type, found a trait" + ); if self_ty.span.can_be_used_for_suggestions() && !self.maybe_suggest_impl_trait(self_ty, &mut diag) + && !self.maybe_suggest_dyn_trait(self_ty, sugg, &mut diag) { - // FIXME: Only emit this suggestion if the trait is dyn-compatible. - diag.multipart_suggestion_verbose(label, sugg, Applicability::MachineApplicable); + self.maybe_suggest_add_generic_impl_trait(self_ty, &mut diag); } // Check if the impl trait that we are considering is an impl of a local trait. self.maybe_suggest_blanket_trait_impl(self_ty, &mut diag); @@ -123,6 +126,64 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } } + /// For a struct or enum with an invalid bare trait object field, suggest turning + /// it into a generic type bound. + fn maybe_suggest_add_generic_impl_trait( + &self, + self_ty: &hir::Ty<'_>, + diag: &mut Diag<'_>, + ) -> bool { + let tcx = self.tcx(); + + let parent_hir_id = tcx.parent_hir_id(self_ty.hir_id); + let parent_item = tcx.hir_get_parent_item(self_ty.hir_id).def_id; + + let generics = match tcx.hir_node_by_def_id(parent_item) { + hir::Node::Item(hir::Item { + kind: hir::ItemKind::Struct(variant, generics), .. + }) => { + if !variant.fields().iter().any(|field| field.hir_id == parent_hir_id) { + return false; + } + generics + } + hir::Node::Item(hir::Item { kind: hir::ItemKind::Enum(def, generics), .. }) => { + if !def + .variants + .iter() + .flat_map(|variant| variant.data.fields().iter()) + .any(|field| field.hir_id == parent_hir_id) + { + return false; + } + generics + } + _ => return false, + }; + + let Ok(rendered_ty) = tcx.sess.source_map().span_to_snippet(self_ty.span) else { + return false; + }; + + let param = "TUV" + .chars() + .map(|c| c.to_string()) + .chain((0..).map(|i| format!("P{i}"))) + .find(|s| !generics.params.iter().any(|param| param.name.ident().as_str() == s)) + .expect("we definitely can find at least one param name to generate"); + let mut sugg = vec![(self_ty.span, param.to_string())]; + if let Some(insertion_span) = generics.span_for_param_suggestion() { + sugg.push((insertion_span, format!(", {param}: {}", rendered_ty))); + } else { + sugg.push((generics.where_clause_span, format!("<{param}: {}>", rendered_ty))); + } + diag.multipart_suggestion_verbose( + "you might be missing a type parameter", + sugg, + Applicability::MachineApplicable, + ); + true + } /// Make sure that we are in the condition to suggest the blanket implementation. fn maybe_suggest_blanket_trait_impl<G: EmissionGuarantee>( &self, @@ -171,6 +232,65 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } } + /// Try our best to approximate when adding `dyn` would be helpful for a bare + /// trait object. + /// + /// Right now, this is if the type is either directly nested in another ty, + /// or if it's in the tail field within a struct. This approximates what the + /// user would've gotten on edition 2015, except for the case where we have + /// an *obvious* knock-on `Sized` error. + fn maybe_suggest_dyn_trait( + &self, + self_ty: &hir::Ty<'_>, + sugg: Vec<(Span, String)>, + diag: &mut Diag<'_>, + ) -> bool { + let tcx = self.tcx(); + + // Look at the direct HIR parent, since we care about the relationship between + // the type and the thing that directly encloses it. + match tcx.parent_hir_node(self_ty.hir_id) { + // These are all generally ok. Namely, when a trait object is nested + // into another expression or ty, it's either very certain that they + // missed the ty (e.g. `&Trait`) or it's not really possible to tell + // what their intention is, so let's not give confusing suggestions and + // just mention `dyn`. The user can make up their mind what to do here. + hir::Node::Ty(_) + | hir::Node::Expr(_) + | hir::Node::PatExpr(_) + | hir::Node::PathSegment(_) + | hir::Node::AssocItemConstraint(_) + | hir::Node::TraitRef(_) + | hir::Node::Item(_) + | hir::Node::WherePredicate(_) => {} + + hir::Node::Field(field) => { + // Enums can't have unsized fields, fields can only have an unsized tail field. + if let hir::Node::Item(hir::Item { + kind: hir::ItemKind::Struct(variant, _), .. + }) = tcx.parent_hir_node(field.hir_id) + && variant + .fields() + .last() + .is_some_and(|tail_field| tail_field.hir_id == field.hir_id) + { + // Ok + } else { + return false; + } + } + _ => return false, + } + + // FIXME: Only emit this suggestion if the trait is dyn-compatible. + diag.multipart_suggestion_verbose( + "you can add the `dyn` keyword if you want a trait object", + sugg, + Applicability::MachineApplicable, + ); + true + } + fn add_generic_param_suggestion( &self, generics: &hir::Generics<'_>, diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs index 750770178ee..8fff6eb9f6e 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs @@ -44,18 +44,18 @@ use rustc_middle::ty::{ }; use rustc_middle::{bug, span_bug}; use rustc_session::lint::builtin::AMBIGUOUS_ASSOCIATED_ITEMS; +use rustc_session::parse::feature_err; use rustc_span::edit_distance::find_best_match_for_name; -use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw}; +use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym}; use rustc_trait_selection::infer::InferCtxtExt; use rustc_trait_selection::traits::wf::object_region_bounds; use rustc_trait_selection::traits::{self, ObligationCtxt}; use rustc_type_ir::Upcast; use tracing::{debug, instrument}; +use self::errors::assoc_kind_str; use crate::check::check_abi_fn_ptr; -use crate::errors::{ - AmbiguousLifetimeBound, BadReturnTypeNotation, InvalidBaseType, NoVariantNamed, -}; +use crate::errors::{AmbiguousLifetimeBound, BadReturnTypeNotation, NoVariantNamed}; use crate::hir_ty_lowering::errors::{GenericsArgsErrExtend, prohibit_assoc_item_constraint}; use crate::hir_ty_lowering::generics::{check_generic_arg_count, lower_generic_args}; use crate::middle::resolve_bound_vars as rbv; @@ -152,7 +152,7 @@ pub trait HirTyLowerer<'tcx> { assoc_name: Ident, ) -> ty::EarlyBinder<'tcx, &'tcx [(ty::Clause<'tcx>, Span)]>; - /// Lower an associated type to a projection. + /// Lower an associated type/const (from a trait) to a projection. /// /// This method has to be defined by the concrete lowering context because /// dealing with higher-ranked trait references depends on its capabilities: @@ -164,13 +164,14 @@ pub trait HirTyLowerer<'tcx> { /// /// The canonical example of this is associated type `T::P` where `T` is a type /// param constrained by `T: for<'a> Trait<'a>` and where `Trait` defines `P`. - fn lower_assoc_ty( + fn lower_assoc_shared( &self, span: Span, item_def_id: DefId, item_segment: &hir::PathSegment<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>, - ) -> Ty<'tcx>; + kind: ty::AssocKind, + ) -> Result<(DefId, GenericArgsRef<'tcx>), ErrorGuaranteed>; fn lower_fn_sig( &self, @@ -232,16 +233,55 @@ impl AssocItemQSelf { /// Use this enum with `<dyn HirTyLowerer>::lower_const_arg` to instruct it with the /// desired behavior. #[derive(Debug, Clone, Copy)] -pub enum FeedConstTy { +pub enum FeedConstTy<'a, 'tcx> { /// Feed the type. /// /// The `DefId` belongs to the const param that we are supplying /// this (anon) const arg to. - Param(DefId), + /// + /// The list of generic args is used to instantiate the parameters + /// used by the type of the const param specified by `DefId`. + Param(DefId, &'a [ty::GenericArg<'tcx>]), /// Don't feed the type. No, } +#[derive(Debug, Clone, Copy)] +enum LowerAssocMode { + Type { permit_variants: bool }, + Const, +} + +impl LowerAssocMode { + fn kind(self) -> ty::AssocKind { + match self { + LowerAssocMode::Type { .. } => ty::AssocKind::Type, + LowerAssocMode::Const => ty::AssocKind::Const, + } + } + + fn def_kind(self) -> DefKind { + match self { + LowerAssocMode::Type { .. } => DefKind::AssocTy, + LowerAssocMode::Const => DefKind::AssocConst, + } + } + + fn permit_variants(self) -> bool { + match self { + LowerAssocMode::Type { permit_variants } => permit_variants, + // FIXME(mgca): Support paths like `Option::<T>::None` or `Option::<T>::Some` which resolve to const ctors/fn items respectively + LowerAssocMode::Const => false, + } + } +} + +#[derive(Debug, Clone, Copy)] +enum LoweredAssoc<'tcx> { + Term(DefId, GenericArgsRef<'tcx>), + Variant { adt: Ty<'tcx>, variant_did: DefId }, +} + /// New-typed boolean indicating whether explicit late-bound lifetimes /// are present in a set of generic arguments. /// @@ -298,6 +338,7 @@ pub trait GenericArgsLowerer<'a, 'tcx> { fn provided_kind( &mut self, + preceding_args: &[ty::GenericArg<'tcx>], param: &ty::GenericParamDef, arg: &GenericArg<'tcx>, ) -> ty::GenericArg<'tcx>; @@ -329,7 +370,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { #[instrument(level = "debug", skip(self), ret)] pub fn lower_resolved_lifetime(&self, resolved: rbv::ResolvedArg) -> ty::Region<'tcx> { let tcx = self.tcx(); - let lifetime_name = |def_id| tcx.hir().name(tcx.local_def_id_to_hir_id(def_id)); + let lifetime_name = |def_id| tcx.hir_name(tcx.local_def_id_to_hir_id(def_id)); match resolved { rbv::ResolvedArg::StaticLifetime => tcx.lifetimes.re_static, @@ -481,6 +522,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { fn provided_kind( &mut self, + preceding_args: &[ty::GenericArg<'tcx>], param: &ty::GenericParamDef, arg: &GenericArg<'tcx>, ) -> ty::GenericArg<'tcx> { @@ -526,7 +568,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => self .lowerer // Ambig portions of `ConstArg` are handled in the match arm below - .lower_const_arg(ct.as_unambig_ct(), FeedConstTy::Param(param.def_id)) + .lower_const_arg( + ct.as_unambig_ct(), + FeedConstTy::Param(param.def_id, preceding_args), + ) .into(), (&GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => { self.lowerer.ct_infer(Some(param), inf.span).into() @@ -582,8 +627,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let ty = tcx .at(self.span) .type_of(param.def_id) - .no_bound_vars() - .expect("const parameter types cannot be generic"); + .instantiate(tcx, preceding_args); if let Err(guar) = ty.error_reported() { return ty::Const::new_error(tcx, guar).into(); } @@ -623,7 +667,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { (args, arg_count) } - #[instrument(level = "debug", skip_all)] + #[instrument(level = "debug", skip(self))] pub fn lower_generic_args_of_assoc_item( &self, span: Span, @@ -631,7 +675,6 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { item_segment: &hir::PathSegment<'tcx>, parent_args: GenericArgsRef<'tcx>, ) -> GenericArgsRef<'tcx> { - debug!(?span, ?item_def_id, ?item_segment); let (args, _) = self.lower_generic_args_of_path(span, item_def_id, parent_args, item_segment, None); if let Some(c) = item_segment.args().constraints.first() { @@ -1111,7 +1154,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // NOTE: When this function starts resolving `Trait::AssocTy` successfully // it should also start reporting the `BARE_TRAIT_OBJECTS` lint. #[instrument(level = "debug", skip_all, ret)] - pub fn lower_assoc_path( + pub fn lower_assoc_path_ty( &self, hir_ref_id: HirId, span: Span, @@ -1120,6 +1163,72 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { assoc_segment: &'tcx hir::PathSegment<'tcx>, permit_variants: bool, ) -> Result<(Ty<'tcx>, DefKind, DefId), ErrorGuaranteed> { + let tcx = self.tcx(); + match self.lower_assoc_path_shared( + hir_ref_id, + span, + qself_ty, + qself, + assoc_segment, + LowerAssocMode::Type { permit_variants }, + )? { + LoweredAssoc::Term(def_id, args) => { + let alias_ty = ty::AliasTy::new_from_args(tcx, def_id, args); + let ty = Ty::new_alias(tcx, alias_ty.kind(tcx), alias_ty); + Ok((ty, tcx.def_kind(def_id), def_id)) + } + LoweredAssoc::Variant { adt, variant_did } => Ok((adt, DefKind::Variant, variant_did)), + } + } + + #[instrument(level = "debug", skip_all, ret)] + fn lower_assoc_path_const( + &self, + hir_ref_id: HirId, + span: Span, + qself_ty: Ty<'tcx>, + qself: &'tcx hir::Ty<'tcx>, + assoc_segment: &'tcx hir::PathSegment<'tcx>, + ) -> Result<Const<'tcx>, ErrorGuaranteed> { + let tcx = self.tcx(); + let (def_id, args) = match self.lower_assoc_path_shared( + hir_ref_id, + span, + qself_ty, + qself, + assoc_segment, + LowerAssocMode::Const, + )? { + LoweredAssoc::Term(def_id, args) => { + if !tcx.associated_item(def_id).is_type_const_capable(tcx) { + let mut err = tcx.dcx().struct_span_err( + span, + "use of trait associated const without `#[type_const]`", + ); + err.note("the declaration in the trait must be marked with `#[type_const]`"); + return Err(err.emit()); + } + (def_id, args) + } + // FIXME(mgca): implement support for this once ready to support all adt ctor expressions, + // not just const ctors + LoweredAssoc::Variant { .. } => { + span_bug!(span, "unexpected variant res for type associated const path") + } + }; + Ok(Const::new_unevaluated(tcx, ty::UnevaluatedConst::new(def_id, args))) + } + + #[instrument(level = "debug", skip_all, ret)] + fn lower_assoc_path_shared( + &self, + hir_ref_id: HirId, + span: Span, + qself_ty: Ty<'tcx>, + qself: &'tcx hir::Ty<'tcx>, + assoc_segment: &'tcx hir::PathSegment<'tcx>, + mode: LowerAssocMode, + ) -> Result<LoweredAssoc<'tcx>, ErrorGuaranteed> { debug!(%qself_ty, ?assoc_segment.ident); let tcx = self.tcx(); @@ -1134,13 +1243,16 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { .iter() .find(|vd| tcx.hygienic_eq(assoc_ident, vd.ident(tcx), adt_def.did())); if let Some(variant_def) = variant_def { - if permit_variants { + if mode.permit_variants() { tcx.check_stability(variant_def.def_id, Some(hir_ref_id), span, None); let _ = self.prohibit_generic_args( slice::from_ref(assoc_segment).iter(), GenericsArgsErrExtend::EnumVariant { qself, assoc_segment, adt_def }, ); - return Ok((qself_ty, DefKind::Variant, variant_def.def_id)); + return Ok(LoweredAssoc::Variant { + adt: qself_ty, + variant_did: variant_def.def_id, + }); } else { variant_resolution = Some(variant_def.def_id); } @@ -1148,15 +1260,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } // FIXME(inherent_associated_types, #106719): Support self types other than ADTs. - if let Some((ty, did)) = self.probe_inherent_assoc_ty( - assoc_ident, + if let Some((did, args)) = self.probe_inherent_assoc_shared( assoc_segment, adt_def.did(), qself_ty, hir_ref_id, span, + mode.kind(), )? { - return Ok((ty, DefKind::AssocTy, did)); + return Ok(LoweredAssoc::Term(did, args)); } } @@ -1185,7 +1297,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { ) }, AssocItemQSelf::SelfTyAlias, - ty::AssocKind::Type, + mode.kind(), assoc_ident, span, None, @@ -1197,14 +1309,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { ) => self.probe_single_ty_param_bound_for_assoc_item( param_did.expect_local(), qself.span, - ty::AssocKind::Type, + mode.kind(), assoc_ident, span, )?, _ => { + let kind_str = assoc_kind_str(mode.kind()); let reported = if variant_resolution.is_some() { // Variant in type position - let msg = format!("expected type, found variant `{assoc_ident}`"); + let msg = format!("expected {kind_str}, found variant `{assoc_ident}`"); self.dcx().span_err(span, msg) } else if qself_ty.is_enum() { let mut err = self.dcx().create_err(NoVariantNamed { @@ -1303,11 +1416,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { self.probe_traits_that_match_assoc_ty(qself_ty, assoc_ident); // Don't print `ty::Error` to the user. - self.report_ambiguous_assoc_ty( + self.report_ambiguous_assoc( span, &[qself_ty.to_string()], &traits, assoc_ident.name, + mode.kind(), ) }; return Err(reported); @@ -1315,10 +1429,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { }; let trait_did = bound.def_id(); - let assoc_ty = self - .probe_assoc_item(assoc_ident, ty::AssocKind::Type, hir_ref_id, span, trait_did) - .expect("failed to find associated type"); - let ty = self.lower_assoc_ty(span, assoc_ty.def_id, assoc_segment, bound); + let assoc_item = self + .probe_assoc_item(assoc_ident, mode.kind(), hir_ref_id, span, trait_did) + .expect("failed to find associated item"); + let (def_id, args) = + self.lower_assoc_shared(span, assoc_item.def_id, assoc_segment, bound, mode.kind())?; + let result = LoweredAssoc::Term(def_id, args); if let Some(variant_def_id) = variant_resolution { tcx.node_span_lint(AMBIGUOUS_ASSOCIATED_ITEMS, hir_ref_id, span, |lint| { @@ -1334,7 +1450,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { }; could_refer_to(DefKind::Variant, variant_def_id, ""); - could_refer_to(DefKind::AssocTy, assoc_ty.def_id, " also"); + could_refer_to(mode.def_kind(), assoc_item.def_id, " also"); lint.span_suggestion( span, @@ -1344,36 +1460,51 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { ); }); } - Ok((ty, DefKind::AssocTy, assoc_ty.def_id)) + Ok(result) } - fn probe_inherent_assoc_ty( + fn probe_inherent_assoc_shared( &self, - name: Ident, segment: &hir::PathSegment<'tcx>, adt_did: DefId, self_ty: Ty<'tcx>, block: HirId, span: Span, - ) -> Result<Option<(Ty<'tcx>, DefId)>, ErrorGuaranteed> { + kind: ty::AssocKind, + ) -> Result<Option<(DefId, GenericArgsRef<'tcx>)>, ErrorGuaranteed> { let tcx = self.tcx(); - // Don't attempt to look up inherent associated types when the feature is not enabled. - // Theoretically it'd be fine to do so since we feature-gate their definition site. - // However, due to current limitations of the implementation (caused by us performing - // selection during HIR ty lowering instead of in the trait solver), IATs can lead to cycle - // errors (#108491) which mask the feature-gate error, needlessly confusing users - // who use IATs by accident (#113265). if !tcx.features().inherent_associated_types() { - return Ok(None); + match kind { + // Don't attempt to look up inherent associated types when the feature is not enabled. + // Theoretically it'd be fine to do so since we feature-gate their definition site. + // However, due to current limitations of the implementation (caused by us performing + // selection during HIR ty lowering instead of in the trait solver), IATs can lead to cycle + // errors (#108491) which mask the feature-gate error, needlessly confusing users + // who use IATs by accident (#113265). + ty::AssocKind::Type => return Ok(None), + ty::AssocKind::Const => { + // We also gate the mgca codepath for type-level uses of inherent consts + // with the inherent_associated_types feature gate since it relies on the + // same machinery and has similar rough edges. + return Err(feature_err( + &tcx.sess, + sym::inherent_associated_types, + span, + "inherent associated types are unstable", + ) + .emit()); + } + ty::AssocKind::Fn => unreachable!(), + } } + let name = segment.ident; let candidates: Vec<_> = tcx .inherent_impls(adt_did) .iter() .filter_map(|&impl_| { - let (item, scope) = - self.probe_assoc_item_unchecked(name, ty::AssocKind::Type, block, impl_)?; + let (item, scope) = self.probe_assoc_item_unchecked(name, kind, block, impl_)?; Some((impl_, (item.def_id, scope))) }) .collect(); @@ -1388,13 +1519,11 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // In contexts that have no inference context, just make a new one. // We do need a local variable to store it, though. - let infcx_; let infcx = match self.infcx() { Some(infcx) => infcx, None => { assert!(!self_ty.has_infer()); - infcx_ = tcx.infer_ctxt().ignoring_regions().build(TypingMode::non_body_analysis()); - &infcx_ + &tcx.infer_ctxt().ignoring_regions().build(TypingMode::non_body_analysis()) } }; @@ -1413,8 +1542,8 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { &mut universes, self_ty, |self_ty| { - self.select_inherent_assoc_type_candidates( - infcx, name, span, self_ty, param_env, candidates, + self.select_inherent_assoc_candidates( + infcx, name, span, self_ty, param_env, candidates, kind, ) }, )?; @@ -1431,13 +1560,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { .chain(args.into_iter().skip(parent_args.len())), ); - let ty = - Ty::new_alias(tcx, ty::Inherent, ty::AliasTy::new_from_args(tcx, assoc_item, args)); - - Ok(Some((ty, assoc_item))) + Ok(Some((assoc_item, args))) } - fn select_inherent_assoc_type_candidates( + fn select_inherent_assoc_candidates( &self, infcx: &InferCtxt<'tcx>, name: Ident, @@ -1445,6 +1571,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { self_ty: Ty<'tcx>, param_env: ParamEnv<'tcx>, candidates: Vec<(DefId, (DefId, DefId))>, + kind: ty::AssocKind, ) -> Result<(DefId, (DefId, DefId)), ErrorGuaranteed> { let tcx = self.tcx(); let mut fulfillment_errors = Vec::new(); @@ -1489,17 +1616,18 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { .collect(); match &applicable_candidates[..] { - &[] => Err(self.complain_about_inherent_assoc_ty_not_found( + &[] => Err(self.complain_about_inherent_assoc_not_found( name, self_ty, candidates, fulfillment_errors, span, + kind, )), &[applicable_candidate] => Ok(applicable_candidate), - &[_, ..] => Err(self.complain_about_ambiguous_inherent_assoc_ty( + &[_, ..] => Err(self.complain_about_ambiguous_inherent_assoc( name, applicable_candidates.into_iter().map(|(_, (candidate, _))| candidate).collect(), span, @@ -1631,7 +1759,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { /// Lower a qualified path to a type. #[instrument(level = "debug", skip_all)] - fn lower_qpath( + fn lower_qpath_ty( &self, span: Span, opt_self_ty: Option<Ty<'tcx>>, @@ -1639,13 +1767,64 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { trait_segment: &hir::PathSegment<'tcx>, item_segment: &hir::PathSegment<'tcx>, ) -> Ty<'tcx> { + match self.lower_qpath_shared( + span, + opt_self_ty, + item_def_id, + trait_segment, + item_segment, + ty::AssocKind::Type, + ) { + Ok((item_def_id, item_args)) => { + Ty::new_projection_from_args(self.tcx(), item_def_id, item_args) + } + Err(guar) => Ty::new_error(self.tcx(), guar), + } + } + + /// Lower a qualified path to a const. + #[instrument(level = "debug", skip_all)] + fn lower_qpath_const( + &self, + span: Span, + opt_self_ty: Option<Ty<'tcx>>, + item_def_id: DefId, + trait_segment: &hir::PathSegment<'tcx>, + item_segment: &hir::PathSegment<'tcx>, + ) -> Const<'tcx> { + match self.lower_qpath_shared( + span, + opt_self_ty, + item_def_id, + trait_segment, + item_segment, + ty::AssocKind::Const, + ) { + Ok((item_def_id, item_args)) => { + let uv = ty::UnevaluatedConst::new(item_def_id, item_args); + Const::new_unevaluated(self.tcx(), uv) + } + Err(guar) => Const::new_error(self.tcx(), guar), + } + } + + #[instrument(level = "debug", skip_all)] + fn lower_qpath_shared( + &self, + span: Span, + opt_self_ty: Option<Ty<'tcx>>, + item_def_id: DefId, + trait_segment: &hir::PathSegment<'tcx>, + item_segment: &hir::PathSegment<'tcx>, + kind: ty::AssocKind, + ) -> Result<(DefId, GenericArgsRef<'tcx>), ErrorGuaranteed> { let tcx = self.tcx(); let trait_def_id = tcx.parent(item_def_id); debug!(?trait_def_id); let Some(self_ty) = opt_self_ty else { - return self.error_missing_qpath_self_ty(trait_def_id, span, item_segment); + return Err(self.error_missing_qpath_self_ty(trait_def_id, span, item_segment, kind)); }; debug!(?self_ty); @@ -1656,7 +1835,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let item_args = self.lower_generic_args_of_assoc_item(span, item_def_id, item_segment, trait_ref.args); - Ty::new_projection_from_args(tcx, item_def_id, item_args) + Ok((item_def_id, item_args)) } fn error_missing_qpath_self_ty( @@ -1664,7 +1843,8 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { trait_def_id: DefId, span: Span, item_segment: &hir::PathSegment<'tcx>, - ) -> Ty<'tcx> { + kind: ty::AssocKind, + ) -> ErrorGuaranteed { let tcx = self.tcx(); let path_str = tcx.def_path_str(trait_def_id); @@ -1700,9 +1880,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // FIXME: also look at `tcx.generics_of(self.item_def_id()).params` any that // references the trait. Relevant for the first case in // `src/test/ui/associated-types/associated-types-in-ambiguous-context.rs` - let reported = - self.report_ambiguous_assoc_ty(span, &type_names, &[path_str], item_segment.ident.name); - Ty::new_error(tcx, reported) + self.report_ambiguous_assoc(span, &type_names, &[path_str], item_segment.ident.name, kind) } pub fn prohibit_generic_args<'a>( @@ -2006,7 +2184,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { path.segments[..path.segments.len() - 2].iter(), GenericsArgsErrExtend::None, ); - self.lower_qpath( + self.lower_qpath_ty( span, opt_self_ty, def_id, @@ -2107,14 +2285,59 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { pub fn lower_const_arg( &self, const_arg: &hir::ConstArg<'tcx>, - feed: FeedConstTy, + feed: FeedConstTy<'_, 'tcx>, ) -> Const<'tcx> { let tcx = self.tcx(); - if let FeedConstTy::Param(param_def_id) = feed + if let FeedConstTy::Param(param_def_id, args) = feed && let hir::ConstArgKind::Anon(anon) = &const_arg.kind { - tcx.feed_anon_const_type(anon.def_id, tcx.type_of(param_def_id)); + let anon_const_type = tcx.type_of(param_def_id).instantiate(tcx, args); + + // FIXME(generic_const_parameter_types): Ideally we remove these errors below when + // we have the ability to intermix typeck of anon const const args with the parent + // bodies typeck. + + // We also error if the type contains any regions as effectively any region will wind + // up as a region variable in mir borrowck. It would also be somewhat concerning if + // hir typeck was using equality but mir borrowck wound up using subtyping as that could + // result in a non-infer in hir typeck but a region variable in borrowck. + if tcx.features().generic_const_parameter_types() + && (anon_const_type.has_free_regions() || anon_const_type.has_erased_regions()) + { + let e = tcx.dcx().span_err( + const_arg.span(), + "anonymous constants with lifetimes in their type are not yet supported", + ); + tcx.feed_anon_const_type(anon.def_id, ty::EarlyBinder::bind(Ty::new_error(tcx, e))); + return ty::Const::new_error(tcx, e); + } + // We must error if the instantiated type has any inference variables as we will + // use this type to feed the `type_of` and query results must not contain inference + // variables otherwise we will ICE. + if anon_const_type.has_non_region_infer() { + let e = tcx.dcx().span_err( + const_arg.span(), + "anonymous constants with inferred types are not yet supported", + ); + tcx.feed_anon_const_type(anon.def_id, ty::EarlyBinder::bind(Ty::new_error(tcx, e))); + return ty::Const::new_error(tcx, e); + } + // We error when the type contains unsubstituted generics since we do not currently + // give the anon const any of the generics from the parent. + if anon_const_type.has_non_region_param() { + let e = tcx.dcx().span_err( + const_arg.span(), + "anonymous constants referencing generics are not yet supported", + ); + tcx.feed_anon_const_type(anon.def_id, ty::EarlyBinder::bind(Ty::new_error(tcx, e))); + return ty::Const::new_error(tcx, e); + } + + tcx.feed_anon_const_type( + anon.def_id, + ty::EarlyBinder::bind(tcx.type_of(param_def_id).instantiate(tcx, args)), + ); } let hir_id = const_arg.hir_id; @@ -2124,11 +2347,19 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let opt_self_ty = maybe_qself.as_ref().map(|qself| self.lower_ty(qself)); self.lower_const_path_resolved(opt_self_ty, path, hir_id) } - hir::ConstArgKind::Path(qpath) => ty::Const::new_error_with_message( - tcx, - qpath.span(), - format!("Const::lower_const_arg: invalid qpath {qpath:?}"), - ), + hir::ConstArgKind::Path(hir::QPath::TypeRelative(qself, segment)) => { + debug!(?qself, ?segment); + let ty = self.lower_ty(qself); + self.lower_assoc_path_const(hir_id, const_arg.span(), ty, qself, segment) + .unwrap_or_else(|guar| Const::new_error(tcx, guar)) + } + hir::ConstArgKind::Path(qpath @ hir::QPath::LangItem(..)) => { + ty::Const::new_error_with_message( + tcx, + qpath.span(), + format!("Const::lower_const_arg: invalid qpath {qpath:?}"), + ) + } hir::ConstArgKind::Anon(anon) => self.lower_anon_const(anon), hir::ConstArgKind::Infer(span, ()) => self.ct_infer(None, span), } @@ -2164,6 +2395,20 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { ); ty::Const::new_unevaluated(tcx, ty::UnevaluatedConst::new(did, args)) } + Res::Def(DefKind::AssocConst, did) => { + debug_assert!(path.segments.len() >= 2); + let _ = self.prohibit_generic_args( + path.segments[..path.segments.len() - 2].iter(), + GenericsArgsErrExtend::None, + ); + self.lower_qpath_const( + span, + opt_self_ty, + did, + &path.segments[path.segments.len() - 2], + path.segments.last().unwrap(), + ) + } Res::Def(DefKind::Static { .. }, _) => { span_bug!(span, "use of bare `static` ConstArgKind::Path's not yet supported") } @@ -2180,7 +2425,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // Exhaustive match to be clear about what exactly we're considering to be // an invalid Res for a const path. - Res::Def( + res @ (Res::Def( DefKind::Mod | DefKind::Enum | DefKind::Variant @@ -2194,7 +2439,6 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { | DefKind::Union | DefKind::Trait | DefKind::ForeignTy - | DefKind::AssocConst | DefKind::TyParam | DefKind::Macro(_) | DefKind::LifetimeParam @@ -2217,12 +2461,15 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { | Res::Local(_) | Res::ToolMod | Res::NonMacroAttr(_) - | Res::Err => Const::new_error_with_message(tcx, span, "invalid Res for const path"), + | Res::Err) => Const::new_error_with_message( + tcx, + span, + format!("invalid Res {res:?} for const path"), + ), } } - /// Literals and const generic parameters are eagerly converted to a constant, everything else - /// becomes `Unevaluated`. + /// Literals are eagerly converted to a constant, everything else becomes `Unevaluated`. #[instrument(skip(self), level = "debug")] fn lower_anon_const(&self, anon: &AnonConst) -> Const<'tcx> { let tcx = self.tcx(); @@ -2230,10 +2477,10 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let expr = &tcx.hir_body(anon.body).value; debug!(?expr); - let ty = tcx - .type_of(anon.def_id) - .no_bound_vars() - .expect("const parameter types cannot be generic"); + // FIXME(generic_const_parameter_types): We should use the proper generic args + // here. It's only used as a hint for literals so doesn't matter too much to use the right + // generic arguments, just weaker type inference. + let ty = tcx.type_of(anon.def_id).instantiate_identity(); match self.try_lower_anon_const_lit(ty, expr) { Some(v) => v, @@ -2421,7 +2668,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { hir::TyKind::Path(hir::QPath::TypeRelative(qself, segment)) => { debug!(?qself, ?segment); let ty = self.lower_ty(qself); - self.lower_assoc_path(hir_ty.hir_id, hir_ty.span, ty, qself, segment, false) + self.lower_assoc_path_ty(hir_ty.hir_id, hir_ty.span, ty, qself, segment, false) .map(|(ty, _, _)| ty) .unwrap_or_else(|guar| Ty::new_error(tcx, guar)) } @@ -2452,28 +2699,26 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let ty_span = ty.span; let ty = self.lower_ty(ty); let pat_ty = match pat.kind { - hir::TyPatKind::Range(start, end, include_end) => { - let ty = match ty.kind() { - ty::Int(_) | ty::Uint(_) | ty::Char => ty, - _ => Ty::new_error( - tcx, - self.dcx().emit_err(InvalidBaseType { - ty, - pat: "range", + hir::TyPatKind::Range(start, end) => { + let (ty, start, end) = match ty.kind() { + // Keep this list of types in sync with the list of types that + // the `RangePattern` trait is implemented for. + ty::Int(_) | ty::Uint(_) | ty::Char => { + let start = self.lower_const_arg(start, FeedConstTy::No); + let end = self.lower_const_arg(end, FeedConstTy::No); + (ty, start, end) + } + _ => { + let guar = self.dcx().span_delayed_bug( ty_span, - pat_span: pat.span, - }), - ), - }; - let start = start.map(|expr| self.lower_const_arg(expr, FeedConstTy::No)); - let end = end.map(|expr| self.lower_const_arg(expr, FeedConstTy::No)); - - let include_end = match include_end { - hir::RangeEnd::Included => true, - hir::RangeEnd::Excluded => false, + "invalid base type for range pattern", + ); + let errc = ty::Const::new_error(tcx, guar); + (Ty::new_error(tcx, guar), errc, errc) + } }; - let pat = tcx.mk_pat(ty::PatternKind::Range { start, end, include_end }); + let pat = tcx.mk_pat(ty::PatternKind::Range { start, end }); Ty::new_pat(tcx, ty, pat) } hir::TyPatKind::Err(e) => Ty::new_error(tcx, e), diff --git a/compiler/rustc_hir_analysis/src/lib.rs b/compiler/rustc_hir_analysis/src/lib.rs index bdfbd540e40..cb3cdc5e8d3 100644 --- a/compiler/rustc_hir_analysis/src/lib.rs +++ b/compiler/rustc_hir_analysis/src/lib.rs @@ -59,6 +59,7 @@ This API is completely unstable and subject to change. #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(assert_matches)] @@ -73,7 +74,6 @@ This API is completely unstable and subject to change. #![feature(slice_partition_dedup)] #![feature(try_blocks)] #![feature(unwrap_infallible)] -#![warn(unreachable_pub)] // tidy-alphabetical-end // These are used by Clippy. @@ -212,7 +212,10 @@ pub fn check_crate(tcx: TyCtxt<'_>) { tcx.par_hir_body_owners(|item_def_id| { let def_kind = tcx.def_kind(item_def_id); match def_kind { - DefKind::Static { .. } => tcx.ensure_ok().eval_static_initializer(item_def_id), + DefKind::Static { .. } => { + tcx.ensure_ok().eval_static_initializer(item_def_id); + check::maybe_check_static_with_link_section(tcx, item_def_id); + } DefKind::Const if tcx.generics_of(item_def_id).is_empty() => { let instance = ty::Instance::new(item_def_id.into(), ty::GenericArgs::empty()); let cid = GlobalId { instance, promoted: None }; @@ -223,12 +226,9 @@ pub fn check_crate(tcx: TyCtxt<'_>) { } }); - // FIXME: Remove this when we implement creating `DefId`s - // for anon constants during their parents' typeck. - // Typeck all body owners in parallel will produce queries - // cycle errors because it may typeck on anon constants directly. tcx.par_hir_body_owners(|item_def_id| { let def_kind = tcx.def_kind(item_def_id); + // Skip `AnonConst`s because we feed their `type_of`. if !matches!(def_kind, DefKind::AnonConst) { tcx.ensure_ok().typeck(item_def_id); } @@ -260,7 +260,7 @@ pub fn lower_ty<'tcx>(tcx: TyCtxt<'tcx>, hir_ty: &hir::Ty<'tcx>) -> Ty<'tcx> { pub fn lower_const_arg_for_rustdoc<'tcx>( tcx: TyCtxt<'tcx>, hir_ct: &hir::ConstArg<'tcx>, - feed: FeedConstTy, + feed: FeedConstTy<'_, 'tcx>, ) -> Const<'tcx> { let env_def_id = tcx.hir_get_parent_item(hir_ct.hir_id); collect::ItemCtxt::new(tcx, env_def_id.def_id).lowerer().lower_const_arg(hir_ct, feed) diff --git a/compiler/rustc_hir_analysis/src/variance/constraints.rs b/compiler/rustc_hir_analysis/src/variance/constraints.rs index e954d2b9ea4..8475903c68f 100644 --- a/compiler/rustc_hir_analysis/src/variance/constraints.rs +++ b/compiler/rustc_hir_analysis/src/variance/constraints.rs @@ -252,13 +252,9 @@ impl<'a, 'tcx> ConstraintContext<'a, 'tcx> { ty::Pat(typ, pat) => { match *pat { - ty::PatternKind::Range { start, end, include_end: _ } => { - if let Some(start) = start { - self.add_constraints_from_const(current, start, variance); - } - if let Some(end) = end { - self.add_constraints_from_const(current, end, variance); - } + ty::PatternKind::Range { start, end } => { + self.add_constraints_from_const(current, start, variance); + self.add_constraints_from_const(current, end, variance); } } self.add_constraints_from_ty(current, typ, variance); diff --git a/compiler/rustc_hir_analysis/src/variance/mod.rs b/compiler/rustc_hir_analysis/src/variance/mod.rs index a7760326bb4..0800d99e945 100644 --- a/compiler/rustc_hir_analysis/src/variance/mod.rs +++ b/compiler/rustc_hir_analysis/src/variance/mod.rs @@ -198,6 +198,10 @@ fn variance_of_opaque( ty::ClauseKind::Trait(ty::TraitPredicate { trait_ref: ty::TraitRef { def_id: _, args, .. }, polarity: _, + }) + | ty::ClauseKind::HostEffect(ty::HostEffectPredicate { + trait_ref: ty::TraitRef { def_id: _, args, .. }, + constness: _, }) => { for arg in &args[1..] { arg.visit_with(&mut collector); diff --git a/compiler/rustc_hir_pretty/Cargo.toml b/compiler/rustc_hir_pretty/Cargo.toml index 86989d1e55b..91da8cb3fc5 100644 --- a/compiler/rustc_hir_pretty/Cargo.toml +++ b/compiler/rustc_hir_pretty/Cargo.toml @@ -8,7 +8,7 @@ edition = "2024" rustc_abi = { path = "../rustc_abi" } rustc_ast = { path = "../rustc_ast" } rustc_ast_pretty = { path = "../rustc_ast_pretty" } -rustc_attr_parsing = { path = "../rustc_attr_parsing" } +rustc_attr_data_structures = { path = "../rustc_attr_data_structures" } rustc_hir = { path = "../rustc_hir" } rustc_span = { path = "../rustc_span" } # tidy-alphabetical-end diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs index 5c7426d76b3..3067766fb4d 100644 --- a/compiler/rustc_hir_pretty/src/lib.rs +++ b/compiler/rustc_hir_pretty/src/lib.rs @@ -3,20 +3,19 @@ // tidy-alphabetical-start #![recursion_limit = "256"] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::cell::Cell; use std::vec; use rustc_abi::ExternAbi; -use rustc_ast::util::parser::{self, AssocOp, ExprPrecedence, Fixity}; +use rustc_ast::util::parser::{self, ExprPrecedence, Fixity}; use rustc_ast::{AttrStyle, DUMMY_NODE_ID, DelimArgs}; use rustc_ast_pretty::pp::Breaks::{Consistent, Inconsistent}; use rustc_ast_pretty::pp::{self, Breaks}; use rustc_ast_pretty::pprust::state::MacHeader; use rustc_ast_pretty::pprust::{Comments, PrintState}; -use rustc_attr_parsing::{AttributeKind, PrintAttribute}; +use rustc_attr_data_structures::{AttributeKind, PrintAttribute}; use rustc_hir::{ BindingMode, ByRef, ConstArgKind, GenericArg, GenericBound, GenericParam, GenericParamKind, HirId, ImplicitSelfKind, LifetimeParamKind, Node, PatKind, PreciseCapturingArg, RangeEnd, Term, @@ -118,9 +117,9 @@ impl<'a> State<'a> { self.hardbreak() } hir::Attribute::Parsed(pa) => { - self.word("#[attr=\""); + self.word("#[attr = "); pa.print_attribute(self); - self.word("\")]"); + self.word("]"); self.hardbreak() } } @@ -553,24 +552,6 @@ impl<'a> State<'a> { self.word(";") } - fn print_item_type( - &mut self, - item: &hir::Item<'_>, - generics: &hir::Generics<'_>, - inner: impl Fn(&mut Self), - ) { - self.head("type"); - self.print_ident(item.ident); - self.print_generic_params(generics.params); - self.end(); // end the inner ibox - - self.print_where_clause(generics); - self.space(); - inner(self); - self.word(";"); - self.end(); // end the outer ibox - } - fn print_item(&mut self, item: &hir::Item<'_>) { self.hardbreak_if_not_bol(); self.maybe_print_comment(item.span.lo()); @@ -683,10 +664,17 @@ impl<'a> State<'a> { self.end() } hir::ItemKind::TyAlias(ty, generics) => { - self.print_item_type(item, generics, |state| { - state.word_space("="); - state.print_type(ty); - }); + self.head("type"); + self.print_ident(item.ident); + self.print_generic_params(generics.params); + self.end(); // end the inner ibox + + self.print_where_clause(generics); + self.space(); + self.word_space("="); + self.print_type(ty); + self.word(";"); + self.end(); // end the outer ibox } hir::ItemKind::Enum(ref enum_definition, params) => { self.print_enum_def(enum_definition, params, item.ident.name, item.span); @@ -1296,12 +1284,11 @@ impl<'a> State<'a> { } fn print_expr_binary(&mut self, op: hir::BinOp, lhs: &hir::Expr<'_>, rhs: &hir::Expr<'_>) { - let assoc_op = AssocOp::from_ast_binop(op.node); - let binop_prec = assoc_op.precedence(); + let binop_prec = op.node.precedence(); let left_prec = lhs.precedence(); let right_prec = rhs.precedence(); - let (mut left_needs_paren, right_needs_paren) = match assoc_op.fixity() { + let (mut left_needs_paren, right_needs_paren) = match op.node.fixity() { Fixity::Left => (left_prec < binop_prec, right_prec <= binop_prec), Fixity::Right => (left_prec <= binop_prec, right_prec < binop_prec), Fixity::None => (left_prec <= binop_prec, right_prec <= binop_prec), @@ -1414,7 +1401,8 @@ impl<'a> State<'a> { hir::InlineAsmOperand::Const { ref anon_const } => { s.word("const"); s.space(); - s.print_anon_const(anon_const); + // Not using `print_inline_const` to avoid additional `const { ... }` + s.ann.nested(s, Nested::Body(anon_const.body)) } hir::InlineAsmOperand::SymFn { ref expr } => { s.word("sym_fn"); @@ -1470,6 +1458,10 @@ impl<'a> State<'a> { hir::ExprKind::MethodCall(segment, receiver, args, _) => { self.print_expr_method_call(segment, receiver, args); } + hir::ExprKind::Use(expr, _) => { + self.print_expr(expr); + self.word(".use"); + } hir::ExprKind::Binary(op, lhs, rhs) => { self.print_expr_binary(op, lhs, rhs); } @@ -1869,17 +1861,10 @@ impl<'a> State<'a> { // Pat isn't normalized, but the beauty of it // is that it doesn't matter match pat.kind { - TyPatKind::Range(begin, end, end_kind) => { - if let Some(expr) = begin { - self.print_const_arg(expr); - } - match end_kind { - RangeEnd::Included => self.word("..."), - RangeEnd::Excluded => self.word(".."), - } - if let Some(expr) = end { - self.print_const_arg(expr); - } + TyPatKind::Range(begin, end) => { + self.print_const_arg(begin); + self.word("..="); + self.print_const_arg(end); } TyPatKind::Err(_) => { self.popen(); @@ -2227,6 +2212,7 @@ impl<'a> State<'a> { fn print_capture_clause(&mut self, capture_clause: hir::CaptureBy) { match capture_clause { hir::CaptureBy::Value { .. } => self.word_space("move"), + hir::CaptureBy::Use { .. } => self.word_space("use"), hir::CaptureBy::Ref => {} } } @@ -2386,6 +2372,7 @@ impl<'a> State<'a> { } fn print_where_predicate(&mut self, predicate: &hir::WherePredicate<'_>) { + self.print_attrs_as_outer(self.attrs(predicate.hir_id)); match *predicate.kind { hir::WherePredicateKind::BoundPredicate(hir::WhereBoundPredicate { bound_generic_params, diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index a994b31aeb4..872861d6289 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -10,6 +10,7 @@ hir_typeck_address_of_temporary_taken = cannot take address of a temporary hir_typeck_arg_mismatch_indeterminate = argument type mismatch was detected, but rustc had trouble determining where .note = we would appreciate a bug report: https://github.com/rust-lang/rust/issues/new +hir_typeck_as_deref_suggestion = consider using `as_deref` here hir_typeck_base_expression_double_dot = base expression required after `..` hir_typeck_base_expression_double_dot_add_expr = add a base expression here hir_typeck_base_expression_double_dot_enable_default_field_values = @@ -27,6 +28,9 @@ hir_typeck_cannot_cast_to_bool = cannot cast `{$expr_ty}` as `bool` .help = compare with zero instead .label = unsupported cast +hir_typeck_cant_dereference = type `{$ty}` cannot be dereferenced +hir_typeck_cant_dereference_label = can't be dereferenced + hir_typeck_cast_enum_drop = cannot cast enum `{$expr_ty}` into integer `{$cast_ty}` because it implements `Drop` hir_typeck_cast_thin_pointer_to_wide_pointer = cannot cast thin pointer `{$expr_ty}` to wide pointer `{$cast_ty}` @@ -72,6 +76,9 @@ hir_typeck_dependency_on_unit_never_type_fallback = this function depends on nev hir_typeck_deref_is_empty = this expression `Deref`s to `{$deref_ty}` which implements `is_empty` +hir_typeck_expected_array_or_slice = expected an array or slice, found `{$ty}` +hir_typeck_expected_array_or_slice_label = pattern cannot match with input type `{$ty}` + hir_typeck_expected_default_return_type = expected `()` because of default return type hir_typeck_expected_return_type = expected `{$expected}` because of return type @@ -112,7 +119,11 @@ hir_typeck_int_to_fat = cannot cast `{$expr_ty}` to a pointer that {$known_wide hir_typeck_int_to_fat_label = creating a `{$cast_ty}` requires both an address and {$metadata} hir_typeck_int_to_fat_label_nightly = consider casting this expression to `*const ()`, then using `core::ptr::from_raw_parts` -hir_typeck_invalid_callee = expected function, found {$ty} +hir_typeck_invalid_callee = expected function, found {$found} +hir_typeck_invalid_defined = `{$path}` defined here +hir_typeck_invalid_defined_kind = {$kind} `{$path}` defined here +hir_typeck_invalid_fn_defined = `{$func}` defined here returns `{$ty}` +hir_typeck_invalid_local = `{$local_name}` has type `{$ty}` hir_typeck_lossy_provenance_int2ptr = strict provenance disallows casting integer `{$expr_ty}` to pointer `{$cast_ty}` @@ -142,6 +153,12 @@ hir_typeck_no_associated_item = no {$item_kind} named `{$item_name}` found for { *[other] {" "}in the current scope } +hir_typeck_no_field_on_type = no field `{$field}` on type `{$ty}` + +hir_typeck_no_field_on_variant = no field named `{$field}` on enum variant `{$container}::{$ident}` +hir_typeck_no_field_on_variant_enum = this enum variant... +hir_typeck_no_field_on_variant_field = ...does not have this field + hir_typeck_note_caller_chooses_ty_for_ty_param = the caller chooses a type for `{$ty_param_name}` which can be different from `{$found_ty}` hir_typeck_note_edition_guide = for more on editions, read https://doc.rust-lang.org/edition-guide @@ -154,10 +171,13 @@ hir_typeck_pass_to_variadic_function = can't pass `{$ty}` to variadic function .suggestion = cast the value to `{$cast_ty}` .teach_help = certain types, like `{$ty}`, must be casted before passing them to a variadic function, because of arcane ABI rules dictated by the C standard -hir_typeck_ptr_cast_add_auto_to_object = adding {$traits_len -> - [1] an auto trait {$traits} +hir_typeck_ptr_cast_add_auto_to_object = cannot add {$traits_len -> + [1] auto trait {$traits} *[other] auto traits {$traits} -} to a trait object in a pointer cast may cause UB later on +} to dyn bound via pointer cast + .note = this could allow UB elsewhere + .help = use `transmute` if you're sure this is sound + .label = unsupported cast hir_typeck_remove_semi_for_coerce = you might have meant to return the `match` expression hir_typeck_remove_semi_for_coerce_expr = this could be implicitly returned but it is a statement, not a tail expression @@ -183,6 +203,8 @@ hir_typeck_self_ctor_from_outer_item = can't reference `Self` constructor from o .label = the inner item doesn't inherit generics from this impl, so `Self` is invalid to reference .suggestion = replace `Self` with the actual type +hir_typeck_slicing_suggestion = consider slicing here + hir_typeck_struct_expr_non_exhaustive = cannot create non-exhaustive {$what} using struct expression diff --git a/compiler/rustc_hir_typeck/src/callee.rs b/compiler/rustc_hir_typeck/src/callee.rs index d18869b6d90..2a24d626ac3 100644 --- a/compiler/rustc_hir_typeck/src/callee.rs +++ b/compiler/rustc_hir_typeck/src/callee.rs @@ -23,7 +23,7 @@ use tracing::{debug, instrument}; use super::method::MethodCallee; use super::method::probe::ProbeScope; use super::{Expectation, FnCtxt, TupleArgumentsFlag}; -use crate::errors; +use crate::{errors, fluent_generated}; /// Checks that it is legal to call methods of the trait corresponding /// to `trait_id` (this only cares about the trait, not the specific @@ -674,13 +674,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } let callee_ty = self.resolve_vars_if_possible(callee_ty); + let mut path = None; let mut err = self.dcx().create_err(errors::InvalidCallee { span: callee_expr.span, - ty: match &unit_variant { + ty: callee_ty, + found: match &unit_variant { Some((_, kind, path)) => format!("{kind} `{path}`"), - None => format!("`{callee_ty}`"), + None => format!("`{}`", self.tcx.short_string(callee_ty, &mut path)), }, }); + *err.long_ty_path() = path; if callee_ty.references_error() { err.downgrade_to_delayed_bug(); } @@ -780,27 +783,33 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let Some(span) = self.tcx.hir().res_span(def) { let callee_ty = callee_ty.to_string(); let label = match (unit_variant, inner_callee_path) { - (Some((_, kind, path)), _) => Some(format!("{kind} `{path}` defined here")), - (_, Some(hir::QPath::Resolved(_, path))) => self - .tcx - .sess - .source_map() - .span_to_snippet(path.span) - .ok() - .map(|p| format!("`{p}` defined here returns `{callee_ty}`")), + (Some((_, kind, path)), _) => { + err.arg("kind", kind); + err.arg("path", path); + Some(fluent_generated::hir_typeck_invalid_defined_kind) + } + (_, Some(hir::QPath::Resolved(_, path))) => { + self.tcx.sess.source_map().span_to_snippet(path.span).ok().map(|p| { + err.arg("func", p); + fluent_generated::hir_typeck_invalid_fn_defined + }) + } _ => { match def { // Emit a different diagnostic for local variables, as they are not // type definitions themselves, but rather variables *of* that type. - Res::Local(hir_id) => Some(format!( - "`{}` has type `{}`", - self.tcx.hir().name(hir_id), - callee_ty - )), + Res::Local(hir_id) => { + err.arg("local_name", self.tcx.hir_name(hir_id)); + Some(fluent_generated::hir_typeck_invalid_local) + } Res::Def(kind, def_id) if kind.ns() == Some(Namespace::ValueNS) => { - Some(format!("`{}` defined here", self.tcx.def_path_str(def_id),)) + err.arg("path", self.tcx.def_path_str(def_id)); + Some(fluent_generated::hir_typeck_invalid_defined) + } + _ => { + err.arg("path", callee_ty); + Some(fluent_generated::hir_typeck_invalid_defined) } - _ => Some(format!("`{callee_ty}` defined here")), } } }; diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs index f5f6ada12c3..70b49fea34f 100644 --- a/compiler/rustc_hir_typeck/src/cast.rs +++ b/compiler/rustc_hir_typeck/src/cast.rs @@ -548,17 +548,19 @@ impl<'a, 'tcx> CastCheck<'tcx> { err.emit(); } CastError::SizedUnsizedCast => { + let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty); + let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty); fcx.dcx().emit_err(errors::CastThinPointerToWidePointer { span: self.span, - expr_ty: self.expr_ty, - cast_ty: fcx.ty_to_string(self.cast_ty), + expr_ty, + cast_ty, teach: fcx.tcx.sess.teach(E0607), }); } CastError::IntToWideCast(known_metadata) => { let expr_if_nightly = fcx.tcx.sess.is_nightly_build().then_some(self.expr_span); let cast_ty = fcx.resolve_vars_if_possible(self.cast_ty); - let expr_ty = fcx.ty_to_string(self.expr_ty); + let expr_ty = fcx.resolve_vars_if_possible(self.expr_ty); let metadata = known_metadata.unwrap_or("type-specific metadata"); let known_wide = known_metadata.is_some(); let span = self.cast_span; @@ -893,20 +895,22 @@ impl<'a, 'tcx> CastCheck<'tcx> { // e.g. we want to allow `dyn T -> (dyn T,)`, etc. // // We also need to skip auto traits to emit an FCW and not an error. - let src_obj = tcx.mk_ty_from_kind(ty::Dynamic( + let src_obj = Ty::new_dynamic( + tcx, tcx.mk_poly_existential_predicates( &src_tty.without_auto_traits().collect::<Vec<_>>(), ), tcx.lifetimes.re_erased, ty::Dyn, - )); - let dst_obj = tcx.mk_ty_from_kind(ty::Dynamic( + ); + let dst_obj = Ty::new_dynamic( + tcx, tcx.mk_poly_existential_predicates( &dst_tty.without_auto_traits().collect::<Vec<_>>(), ), tcx.lifetimes.re_erased, ty::Dyn, - )); + ); // `dyn Src = dyn Dst`, this checks for matching traits/generics/projections // This is `fcx.demand_eqtype`, but inlined to give a better error. @@ -936,23 +940,19 @@ impl<'a, 'tcx> CastCheck<'tcx> { .collect::<Vec<_>>(); if !added.is_empty() { - tcx.emit_node_span_lint( - lint::builtin::PTR_CAST_ADD_AUTO_TO_OBJECT, - self.expr.hir_id, - self.span, - errors::PtrCastAddAutoToObject { - traits_len: added.len(), - traits: { - let mut traits: Vec<_> = added - .into_iter() - .map(|trait_did| tcx.def_path_str(trait_did)) - .collect(); - - traits.sort(); - traits.into() - }, + tcx.dcx().emit_err(errors::PtrCastAddAutoToObject { + span: self.span, + traits_len: added.len(), + traits: { + let mut traits: Vec<_> = added + .into_iter() + .map(|trait_did| tcx.def_path_str(trait_did)) + .collect(); + + traits.sort(); + traits.into() }, - ) + }); } Ok(CastKind::PtrPtrCast) @@ -1164,10 +1164,7 @@ impl<'a, 'tcx> CastCheck<'tcx> { if let Some((deref_ty, _)) = derefed { // Give a note about what the expr derefs to. if deref_ty != self.expr_ty.peel_refs() { - err.subdiagnostic(errors::DerefImplsIsEmpty { - span: self.expr_span, - deref_ty: fcx.ty_to_string(deref_ty), - }); + err.subdiagnostic(errors::DerefImplsIsEmpty { span: self.expr_span, deref_ty }); } // Create a multipart suggestion: add `!` and `.is_empty()` in @@ -1175,7 +1172,7 @@ impl<'a, 'tcx> CastCheck<'tcx> { err.subdiagnostic(errors::UseIsEmpty { lo: self.expr_span.shrink_to_lo(), hi: self.span.with_lo(self.expr_span.hi()), - expr_ty: fcx.ty_to_string(self.expr_ty), + expr_ty: self.expr_ty, }); } } diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs index a5108e7a032..d7b5c1ed5e1 100644 --- a/compiler/rustc_hir_typeck/src/coercion.rs +++ b/compiler/rustc_hir_typeck/src/coercion.rs @@ -41,8 +41,8 @@ use rustc_abi::ExternAbi; use rustc_attr_parsing::InlineAttr; use rustc_errors::codes::*; use rustc_errors::{Applicability, Diag, struct_span_code_err}; -use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::{self as hir, LangItem}; use rustc_hir_analysis::hir_ty_lowering::HirTyLowerer; use rustc_infer::infer::relate::RelateResult; use rustc_infer::infer::{Coercion, DefineOpaqueTypes, InferOk, InferResult}; @@ -51,15 +51,13 @@ use rustc_infer::traits::{ PredicateObligations, }; use rustc_middle::span_bug; -use rustc_middle::traits::BuiltinImplSource; use rustc_middle::ty::adjustment::{ Adjust, Adjustment, AllowTwoPhase, AutoBorrow, AutoBorrowMutability, PointerCoercion, }; use rustc_middle::ty::error::TypeError; use rustc_middle::ty::visit::TypeVisitableExt; -use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt}; -use rustc_session::parse::feature_err; -use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Span, sym}; +use rustc_middle::ty::{self, AliasTy, GenericArgsRef, Ty, TyCtxt}; +use rustc_span::{BytePos, DUMMY_SP, DesugaringKind, Span}; use rustc_trait_selection::infer::InferCtxtExt as _; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt; use rustc_trait_selection::traits::{ @@ -595,6 +593,63 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // Create an obligation for `Source: CoerceUnsized<Target>`. let cause = self.cause(self.cause.span, ObligationCauseCode::Coercion { source, target }); + let root_obligation = Obligation::new( + self.tcx, + cause.clone(), + self.fcx.param_env, + ty::TraitRef::new(self.tcx, coerce_unsized_did, [coerce_source, coerce_target]), + ); + + // If the root `Source: CoerceUnsized<Target>` obligation can't possibly hold, + // we don't have to assume that this is unsizing coercion (it will always lead to an error) + // + // However, we don't want to bail early all the time, since the unholdable obligations + // may be interesting for diagnostics (such as trying to coerce `&T` to `&dyn Id<This = U>`), + // so we only bail if there (likely) is another way to convert the types. + if !self.infcx.predicate_may_hold(&root_obligation) { + if let Some(dyn_metadata_adt_def_id) = self.tcx.lang_items().get(LangItem::DynMetadata) + && let Some(metadata_type_def_id) = self.tcx.lang_items().get(LangItem::Metadata) + { + self.probe(|_| { + let ocx = ObligationCtxt::new(&self.infcx); + + // returns `true` if `<ty as Pointee>::Metadata` is `DynMetadata<_>` + let has_dyn_trait_metadata = |ty| { + let metadata_ty: Result<_, _> = ocx.structurally_normalize_ty( + &ObligationCause::dummy(), + self.fcx.param_env, + Ty::new_alias( + self.tcx, + ty::AliasTyKind::Projection, + AliasTy::new(self.tcx, metadata_type_def_id, [ty]), + ), + ); + + metadata_ty.is_ok_and(|metadata_ty| { + metadata_ty + .ty_adt_def() + .is_some_and(|d| d.did() == dyn_metadata_adt_def_id) + }) + }; + + // If both types are raw pointers to a (wrapper over a) trait object, + // this might be a cast like `*const W<dyn Trait> -> *const dyn Trait`. + // So it's better to bail and try that. (even if the cast is not possible, for + // example due to vtables not matching, cast diagnostic will likely still be better) + // + // N.B. use `target`, not `coerce_target` (the latter is a var) + if let &ty::RawPtr(source_pointee, _) = coerce_source.kind() + && let &ty::RawPtr(target_pointee, _) = target.kind() + && has_dyn_trait_metadata(source_pointee) + && has_dyn_trait_metadata(target_pointee) + { + return Err(TypeError::Mismatch); + } + + Ok(()) + })?; + } + } // Use a FIFO queue for this custom fulfillment procedure. // @@ -603,14 +658,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // and almost never more than 3. By using a SmallVec we avoid an // allocation, at the (very small) cost of (occasionally) having to // shift subsequent elements down when removing the front element. - let mut queue: SmallVec<[PredicateObligation<'tcx>; 4]> = smallvec![Obligation::new( - self.tcx, - cause, - self.fcx.param_env, - ty::TraitRef::new(self.tcx, coerce_unsized_did, [coerce_source, coerce_target]) - )]; - - let mut has_unsized_tuple_coercion = false; + let mut queue: SmallVec<[PredicateObligation<'tcx>; 4]> = smallvec![root_obligation]; // Keep resolving `CoerceUnsized` and `Unsize` predicates to avoid // emitting a coercion in cases like `Foo<$1>` -> `Foo<$2>`, where @@ -690,31 +738,10 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { // be silent, as it causes a type mismatch later. } - Ok(Some(impl_source)) => { - // Some builtin coercions are still unstable so we detect - // these here and emit a feature error if coercion doesn't fail - // due to another reason. - match impl_source { - traits::ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => { - has_unsized_tuple_coercion = true; - } - _ => {} - } - queue.extend(impl_source.nested_obligations()) - } + Ok(Some(impl_source)) => queue.extend(impl_source.nested_obligations()), } } - if has_unsized_tuple_coercion && !self.tcx.features().unsized_tuple_coercion() { - feature_err( - &self.tcx.sess, - sym::unsized_tuple_coercion, - self.cause.span, - "unsized tuple coercion is not stable enough for use and is subject to change", - ) - .emit(); - } - Ok(coercion) } diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs index 85131f6195f..aa917ee07ff 100644 --- a/compiler/rustc_hir_typeck/src/demand.rs +++ b/compiler/rustc_hir_typeck/src/demand.rs @@ -1265,7 +1265,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } else { CallableKind::Function }; - maybe_emit_help(def_id, path.segments[0].ident, args, callable_kind); + maybe_emit_help(def_id, path.segments.last().unwrap().ident, args, callable_kind); } hir::ExprKind::MethodCall(method, _receiver, args, _span) => { let Some(def_id) = diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index 1bf8aa4f78d..b73cd26927a 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -91,7 +91,7 @@ pub(crate) enum ReturnLikeStatementKind { } impl IntoDiagArg for ReturnLikeStatementKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { let kind = match self { Self::Return => "return", Self::Become => "become", @@ -373,9 +373,14 @@ pub(crate) struct LossyProvenanceInt2Ptr<'tcx> { pub sugg: LossyProvenanceInt2PtrSuggestion, } -#[derive(LintDiagnostic)] -#[diag(hir_typeck_ptr_cast_add_auto_to_object)] +#[derive(Diagnostic)] +#[diag(hir_typeck_ptr_cast_add_auto_to_object, code = E0804)] +#[note] +#[help] pub(crate) struct PtrCastAddAutoToObject { + #[primary_span] + #[label] + pub span: Span, pub traits_len: usize, pub traits: DiagSymbolList<String>, } @@ -455,11 +460,82 @@ impl HelpUseLatestEdition { } #[derive(Diagnostic)] +#[diag(hir_typeck_no_field_on_type, code = E0609)] +pub(crate) struct NoFieldOnType<'tcx> { + #[primary_span] + pub(crate) span: Span, + pub(crate) ty: Ty<'tcx>, + pub(crate) field: Ident, +} + +#[derive(Diagnostic)] +#[diag(hir_typeck_no_field_on_variant, code = E0609)] +pub(crate) struct NoFieldOnVariant<'tcx> { + #[primary_span] + pub(crate) span: Span, + pub(crate) container: Ty<'tcx>, + pub(crate) ident: Ident, + pub(crate) field: Ident, + #[label(hir_typeck_no_field_on_variant_enum)] + pub(crate) enum_span: Span, + #[label(hir_typeck_no_field_on_variant_field)] + pub(crate) field_span: Span, +} + +#[derive(Diagnostic)] +#[diag(hir_typeck_cant_dereference, code = E0614)] +pub(crate) struct CantDereference<'tcx> { + #[primary_span] + #[label(hir_typeck_cant_dereference_label)] + pub(crate) span: Span, + pub(crate) ty: Ty<'tcx>, +} + +#[derive(Diagnostic)] +#[diag(hir_typeck_expected_array_or_slice, code = E0529)] +pub(crate) struct ExpectedArrayOrSlice<'tcx> { + #[primary_span] + #[label(hir_typeck_expected_array_or_slice_label)] + pub(crate) span: Span, + pub(crate) ty: Ty<'tcx>, + pub(crate) slice_pat_semantics: bool, + #[subdiagnostic] + pub(crate) as_deref: Option<AsDerefSuggestion>, + #[subdiagnostic] + pub(crate) slicing: Option<SlicingSuggestion>, +} + +#[derive(Subdiagnostic)] +#[suggestion( + hir_typeck_as_deref_suggestion, + code = ".as_deref()", + style = "verbose", + applicability = "maybe-incorrect" +)] +pub(crate) struct AsDerefSuggestion { + #[primary_span] + pub(crate) span: Span, +} + +#[derive(Subdiagnostic)] +#[suggestion( + hir_typeck_slicing_suggestion, + code = "[..]", + style = "verbose", + applicability = "maybe-incorrect" +)] +pub(crate) struct SlicingSuggestion { + #[primary_span] + pub(crate) span: Span, +} + +#[derive(Diagnostic)] #[diag(hir_typeck_invalid_callee, code = E0618)] -pub(crate) struct InvalidCallee { +pub(crate) struct InvalidCallee<'tcx> { #[primary_span] pub span: Span, - pub ty: String, + pub ty: Ty<'tcx>, + pub found: String, } #[derive(Diagnostic)] @@ -469,7 +545,7 @@ pub(crate) struct IntToWide<'tcx> { #[label(hir_typeck_int_to_fat_label)] pub span: Span, pub metadata: &'tcx str, - pub expr_ty: String, + pub expr_ty: Ty<'tcx>, pub cast_ty: Ty<'tcx>, #[label(hir_typeck_int_to_fat_label_nightly)] pub expr_if_nightly: Option<Span>, @@ -581,12 +657,12 @@ pub(crate) struct UnionPatDotDot { applicability = "maybe-incorrect", style = "verbose" )] -pub(crate) struct UseIsEmpty { +pub(crate) struct UseIsEmpty<'tcx> { #[suggestion_part(code = "!")] pub lo: Span, #[suggestion_part(code = ".is_empty()")] pub hi: Span, - pub expr_ty: String, + pub expr_ty: Ty<'tcx>, } #[derive(Diagnostic)] @@ -745,10 +821,10 @@ pub(crate) struct CtorIsPrivate { #[derive(Subdiagnostic)] #[note(hir_typeck_deref_is_empty)] -pub(crate) struct DerefImplsIsEmpty { +pub(crate) struct DerefImplsIsEmpty<'tcx> { #[primary_span] pub span: Span, - pub deref_ty: String, + pub deref_ty: Ty<'tcx>, } #[derive(Subdiagnostic)] @@ -826,7 +902,7 @@ pub(crate) struct CastThinPointerToWidePointer<'tcx> { #[primary_span] pub span: Span, pub expr_ty: Ty<'tcx>, - pub cast_ty: String, + pub cast_ty: Ty<'tcx>, #[note(hir_typeck_teach_help)] pub(crate) teach: bool, } diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs index 4815627a0ce..7c6bb495be3 100644 --- a/compiler/rustc_hir_typeck/src/expr.rs +++ b/compiler/rustc_hir_typeck/src/expr.rs @@ -45,9 +45,10 @@ use crate::coercion::{CoerceMany, DynamicCoerceMany}; use crate::errors::{ AddressOfTemporaryTaken, BaseExpressionDoubleDot, BaseExpressionDoubleDotAddExpr, BaseExpressionDoubleDotEnableDefaultFieldValues, BaseExpressionDoubleDotRemove, - FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct, HelpUseLatestEdition, - ReturnLikeStatementKind, ReturnStmtOutsideOfFnBody, StructExprNonExhaustive, - TypeMismatchFruTypo, YieldExprOutsideOfCoroutine, + CantDereference, FieldMultiplySpecifiedInInitializer, FunctionalRecordUpdateOnNonStruct, + HelpUseLatestEdition, NoFieldOnType, NoFieldOnVariant, ReturnLikeStatementKind, + ReturnStmtOutsideOfFnBody, StructExprNonExhaustive, TypeMismatchFruTypo, + YieldExprOutsideOfCoroutine, }; use crate::{ BreakableCtxt, CoroutineTypes, Diverges, FnCtxt, Needs, cast, fatally_break_rust, @@ -297,7 +298,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Combine the diverging and has_error flags. self.diverges.set(self.diverges.get() | old_diverges); - debug!("type of {} is...", self.tcx.hir().node_to_string(expr.hir_id)); + debug!("type of {} is...", self.tcx.hir_id_to_string(expr.hir_id)); debug!("... {:?}, expected is {:?}", ty, expected); ty @@ -361,6 +362,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Any expression child of these expressions constitute reads. ExprKind::Array(_) | ExprKind::Call(_, _) + | ExprKind::Use(_, _) | ExprKind::MethodCall(_, _, _, _) | ExprKind::Tup(_) | ExprKind::Binary(_, _, _) @@ -551,6 +553,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ExprKind::Closure(closure) => self.check_expr_closure(closure, expr.span, expected), ExprKind::Block(body, _) => self.check_expr_block(body, expected), ExprKind::Call(callee, args) => self.check_expr_call(expr, callee, args, expected), + ExprKind::Use(used_expr, _) => self.check_expr_use(used_expr, expected), ExprKind::MethodCall(segment, receiver, args, _) => { self.check_expr_method_call(expr, segment, receiver, args, expected) } @@ -607,13 +610,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let Some(ty) = self.lookup_derefing(expr, oprnd, oprnd_t) { oprnd_t = ty; } else { - let mut err = type_error_struct!( - self.dcx(), - expr.span, - oprnd_t, - E0614, - "type `{oprnd_t}` cannot be dereferenced", - ); + let mut err = + self.dcx().create_err(CantDereference { span: expr.span, ty: oprnd_t }); let sp = tcx.sess.source_map().start_point(expr.span).with_parent(None); if let Some(sp) = tcx.sess.psess.ambiguous_block_expr_parse.borrow().get(&sp) @@ -1620,6 +1618,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) } + /// Checks use `x.use`. + fn check_expr_use( + &self, + used_expr: &'tcx hir::Expr<'tcx>, + expected: Expectation<'tcx>, + ) -> Ty<'tcx> { + self.check_expr_with_expectation(used_expr, expected) + } + fn check_expr_cast( &self, e: &'tcx hir::Expr<'tcx>, @@ -1810,7 +1817,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { crate::GatherLocalsVisitor::new(&fcx).visit_body(body); let ty = fcx.check_expr_with_expectation(body.value, expected); - fcx.require_type_is_sized(ty, body.value.span, ObligationCauseCode::ConstSized); + fcx.require_type_is_sized(ty, body.value.span, ObligationCauseCode::SizedConstOrStatic); fcx.write_ty(block.hir_id, ty); ty } @@ -1857,12 +1864,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { return Ty::new_error(tcx, guar); } + // We defer checking whether the element type is `Copy` as it is possible to have + // an inference variable as a repeat count and it seems unlikely that `Copy` would + // have inference side effects required for type checking to succeed. + if tcx.features().generic_arg_infer() { + self.deferred_repeat_expr_checks.borrow_mut().push((element, element_ty, count)); // If the length is 0, we don't create any elements, so we don't copy any. // If the length is 1, we don't copy that one element, we move it. Only check // for `Copy` if the length is larger, or unevaluated. - // FIXME(min_const_generic_exprs): We could perhaps defer this check so that - // we don't require `<?0t as Tr>::CONST` doesn't unnecessarily require `Copy`. - if count.try_to_target_usize(tcx).is_none_or(|x| x > 1) { + } else if count.try_to_target_usize(self.tcx).is_none_or(|x| x > 1) { self.enforce_repeat_element_needs_copy_bound(element, element_ty); } @@ -1872,7 +1882,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } /// Requires that `element_ty` is `Copy` (unless it's a const expression itself). - fn enforce_repeat_element_needs_copy_bound( + pub(super) fn enforce_repeat_element_needs_copy_bound( &self, element: &hir::Expr<'_>, element_ty: Ty<'tcx>, @@ -2923,7 +2933,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } let guar = if field.name == kw::Empty { - self.dcx().span_delayed_bug(field.span, "field name with no name") + self.dcx().span_bug(field.span, "field name with no name") } else if self.method_exists_for_diagnostic( field, base_ty, @@ -3070,7 +3080,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { "ban_nonexisting_field: field={:?}, base={:?}, expr={:?}, base_ty={:?}", ident, base, expr, base_ty ); - let mut err = self.no_such_field_err(ident, base_ty, base.hir_id); + let mut err = self.no_such_field_err(ident, base_ty, expr); match *base_ty.peel_refs().kind() { ty::Array(_, len) => { @@ -3283,21 +3293,27 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); } - fn no_such_field_err(&self, field: Ident, expr_t: Ty<'tcx>, id: HirId) -> Diag<'_> { + fn no_such_field_err( + &self, + field: Ident, + base_ty: Ty<'tcx>, + expr: &hir::Expr<'tcx>, + ) -> Diag<'_> { let span = field.span; - debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, expr_t); + debug!("no_such_field_err(span: {:?}, field: {:?}, expr_t: {:?})", span, field, base_ty); - let mut err = type_error_struct!( - self.dcx(), - span, - expr_t, - E0609, - "no field `{field}` on type `{expr_t}`", - ); + let mut err = self.dcx().create_err(NoFieldOnType { span, ty: base_ty, field }); + if base_ty.references_error() { + err.downgrade_to_delayed_bug(); + } + + if let Some(within_macro_span) = span.within_macro(expr.span, self.tcx.sess.source_map()) { + err.span_label(within_macro_span, "due to this macro variable"); + } // try to add a suggestion in case the field is a nested field of a field of the Adt - let mod_id = self.tcx.parent_module(id).to_def_id(); - let (ty, unwrap) = if let ty::Adt(def, args) = expr_t.kind() + let mod_id = self.tcx.parent_module(expr.hir_id).to_def_id(); + let (ty, unwrap) = if let ty::Adt(def, args) = base_ty.kind() && (self.tcx.is_diagnostic_item(sym::Result, def.did()) || self.tcx.is_diagnostic_item(sym::Option, def.did())) && let Some(arg) = args.get(0) @@ -3305,10 +3321,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { { (ty, "unwrap().") } else { - (expr_t, "") + (base_ty, "") }; for (found_fields, args) in - self.get_field_candidates_considering_privacy_for_diag(span, ty, mod_id, id) + self.get_field_candidates_considering_privacy_for_diag(span, ty, mod_id, expr.hir_id) { let field_names = found_fields.iter().map(|field| field.name).collect::<Vec<_>>(); let mut candidate_fields: Vec<_> = found_fields @@ -3321,7 +3337,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { args, vec![], mod_id, - id, + expr.hir_id, ) }) .map(|mut field_path| { @@ -3332,7 +3348,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { candidate_fields.sort(); let len = candidate_fields.len(); - if len > 0 { + // Don't suggest `.field` if the base expr is from a different + // syntax context than the field. + if len > 0 && expr.span.eq_ctxt(field.span) { err.span_suggestions( field.span.shrink_to_lo(), format!( @@ -3778,13 +3796,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.check_expr_asm_operand(out_expr, false); } } + hir::InlineAsmOperand::Const { ref anon_const } => { + self.check_expr_const_block(anon_const, Expectation::NoExpectation); + } hir::InlineAsmOperand::SymFn { expr } => { self.check_expr(expr); } - // `AnonConst`s have their own body and is type-checked separately. - // As they don't flow into the type system we don't need them to - // be well-formed. - hir::InlineAsmOperand::Const { .. } => {} hir::InlineAsmOperand::SymStatic { .. } => {} hir::InlineAsmOperand::Label { block } => { let previous_diverges = self.diverges.get(); @@ -3867,16 +3884,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .iter_enumerated() .find(|(_, f)| f.ident(self.tcx).normalize_to_macros_2_0() == subident) else { - type_error_struct!( - self.dcx(), - ident.span, - container, - E0609, - "no field named `{subfield}` on enum variant `{container}::{ident}`", - ) - .with_span_label(field.span, "this enum variant...") - .with_span_label(subident.span, "...does not have this field") - .emit(); + self.dcx() + .create_err(NoFieldOnVariant { + span: ident.span, + container, + ident, + field: subfield, + enum_span: field.span, + field_span: subident.span, + }) + .emit_unless(container.references_error()); break; }; @@ -3968,7 +3985,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { _ => (), }; - self.no_such_field_err(field, container, expr.hir_id).emit(); + self.no_such_field_err(field, container, expr).emit(); break; } diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs index 9b85b2aeec6..63e4a8fb44b 100644 --- a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs +++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs @@ -47,6 +47,21 @@ pub trait Delegate<'tcx> { /// the id of the binding in the pattern `pat`. fn consume(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: HirId); + /// The value found at `place` is used, depending + /// on `mode`. Where `diag_expr_id` is the id used for diagnostics for `place`. + /// + /// Use of a `Copy` type in a ByUse context is considered a use + /// by `ImmBorrow` and `borrow` is called instead. This is because + /// a shared borrow is the "minimum access" that would be needed + /// to perform a copy. + /// + /// + /// The parameter `diag_expr_id` indicates the HIR id that ought to be used for + /// diagnostics. Around pattern matching such as `let pat = expr`, the diagnostic + /// id will be the id of the expression `expr` but the place itself will have + /// the id of the binding in the pattern `pat`. + fn use_cloned(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: HirId); + /// The value found at `place` is being borrowed with kind `bk`. /// `diag_expr_id` is the id used for diagnostics (see `consume` for more details). fn borrow( @@ -91,6 +106,10 @@ impl<'tcx, D: Delegate<'tcx>> Delegate<'tcx> for &mut D { (**self).consume(place_with_id, diag_expr_id) } + fn use_cloned(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: HirId) { + (**self).use_cloned(place_with_id, diag_expr_id) + } + fn borrow( &mut self, place_with_id: &PlaceWithHirId<'tcx>, @@ -143,6 +162,8 @@ pub trait TypeInformationCtxt<'tcx> { fn type_is_copy_modulo_regions(&self, ty: Ty<'tcx>) -> bool; + fn type_is_use_cloned_modulo_regions(&self, ty: Ty<'tcx>) -> bool; + fn body_owner_def_id(&self) -> LocalDefId; fn tcx(&self) -> TyCtxt<'tcx>; @@ -184,6 +205,10 @@ impl<'tcx> TypeInformationCtxt<'tcx> for &FnCtxt<'_, 'tcx> { self.infcx.type_is_copy_modulo_regions(self.param_env, ty) } + fn type_is_use_cloned_modulo_regions(&self, ty: Ty<'tcx>) -> bool { + self.infcx.type_is_use_cloned_modulo_regions(self.param_env, ty) + } + fn body_owner_def_id(&self) -> LocalDefId { self.body_id } @@ -230,6 +255,10 @@ impl<'tcx> TypeInformationCtxt<'tcx> for (&LateContext<'tcx>, LocalDefId) { self.0.type_is_copy_modulo_regions(ty) } + fn type_is_use_cloned_modulo_regions(&self, ty: Ty<'tcx>) -> bool { + self.0.type_is_use_cloned_modulo_regions(ty) + } + fn body_owner_def_id(&self) -> LocalDefId { self.1 } @@ -295,6 +324,24 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx } } + pub fn consume_clone_or_copy(&self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: HirId) { + debug!("delegate_consume_or_clone(place_with_id={:?})", place_with_id); + + // `x.use` will do one of the following + // * if it implements `Copy`, it will be a copy + // * if it implements `UseCloned`, it will be a call to `clone` + // * otherwise, it is a move + // + // we do a conservative approximation of this, treating it as a move unless we know that it implements copy or `UseCloned` + if self.cx.type_is_copy_modulo_regions(place_with_id.place.ty()) { + self.delegate.borrow_mut().copy(place_with_id, diag_expr_id); + } else if self.cx.type_is_use_cloned_modulo_regions(place_with_id.place.ty()) { + self.delegate.borrow_mut().use_cloned(place_with_id, diag_expr_id); + } else { + self.delegate.borrow_mut().consume(place_with_id, diag_expr_id); + } + } + fn consume_exprs(&self, exprs: &[hir::Expr<'_>]) -> Result<(), Cx::Error> { for expr in exprs { self.consume_expr(expr)?; @@ -313,6 +360,15 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx Ok(()) } + pub fn consume_or_clone_expr(&self, expr: &hir::Expr<'_>) -> Result<(), Cx::Error> { + debug!("consume_or_clone_expr(expr={:?})", expr); + + let place_with_id = self.cat_expr(expr)?; + self.consume_clone_or_copy(&place_with_id, place_with_id.hir_id); + self.walk_expr(expr)?; + Ok(()) + } + fn mutate_expr(&self, expr: &hir::Expr<'_>) -> Result<(), Cx::Error> { let place_with_id = self.cat_expr(expr)?; self.delegate.borrow_mut().mutate(&place_with_id, place_with_id.hir_id); @@ -366,6 +422,10 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx self.consume_exprs(args)?; } + hir::ExprKind::Use(expr, _) => { + self.consume_or_clone_expr(expr)?; + } + hir::ExprKind::MethodCall(.., receiver, args, _) => { // callee.m(args) self.consume_expr(receiver)?; @@ -895,48 +955,44 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx match pat.kind { PatKind::Binding(_, canonical_id, ..) => { debug!("walk_pat: binding place={:?} pat={:?}", place, pat); - if let Some(bm) = self + let bm = self .cx .typeck_results() - .extract_binding_mode(tcx.sess, pat.hir_id, pat.span) - { - debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); + .extract_binding_mode(tcx.sess, pat.hir_id, pat.span); + debug!("walk_pat: pat.hir_id={:?} bm={:?}", pat.hir_id, bm); - // pat_ty: the type of the binding being produced. - let pat_ty = self.node_ty(pat.hir_id)?; - debug!("walk_pat: pat_ty={:?}", pat_ty); + // pat_ty: the type of the binding being produced. + let pat_ty = self.node_ty(pat.hir_id)?; + debug!("walk_pat: pat_ty={:?}", pat_ty); - let def = Res::Local(canonical_id); - if let Ok(ref binding_place) = - self.cat_res(pat.hir_id, pat.span, pat_ty, def) - { - self.delegate.borrow_mut().bind(binding_place, binding_place.hir_id); - } + let def = Res::Local(canonical_id); + if let Ok(ref binding_place) = self.cat_res(pat.hir_id, pat.span, pat_ty, def) { + self.delegate.borrow_mut().bind(binding_place, binding_place.hir_id); + } - // Subtle: MIR desugaring introduces immutable borrows for each pattern - // binding when lowering pattern guards to ensure that the guard does not - // modify the scrutinee. - if has_guard { - self.delegate.borrow_mut().borrow( - place, - discr_place.hir_id, - BorrowKind::Immutable, - ); - } + // Subtle: MIR desugaring introduces immutable borrows for each pattern + // binding when lowering pattern guards to ensure that the guard does not + // modify the scrutinee. + if has_guard { + self.delegate.borrow_mut().borrow( + place, + discr_place.hir_id, + BorrowKind::Immutable, + ); + } - // It is also a borrow or copy/move of the value being matched. - // In a cases of pattern like `let pat = upvar`, don't use the span - // of the pattern, as this just looks confusing, instead use the span - // of the discriminant. - match bm.0 { - hir::ByRef::Yes(m) => { - let bk = ty::BorrowKind::from_mutbl(m); - self.delegate.borrow_mut().borrow(place, discr_place.hir_id, bk); - } - hir::ByRef::No => { - debug!("walk_pat binding consuming pat"); - self.consume_or_copy(place, discr_place.hir_id); - } + // It is also a borrow or copy/move of the value being matched. + // In a cases of pattern like `let pat = upvar`, don't use the span + // of the pattern, as this just looks confusing, instead use the span + // of the discriminant. + match bm.0 { + hir::ByRef::Yes(m) => { + let bk = ty::BorrowKind::from_mutbl(m); + self.delegate.borrow_mut().borrow(place, discr_place.hir_id, bk); + } + hir::ByRef::No => { + debug!("walk_pat binding consuming pat"); + self.consume_or_copy(place, discr_place.hir_id); } } } @@ -1089,6 +1145,9 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx ty::UpvarCapture::ByValue => { self.consume_or_copy(&place_with_id, place_with_id.hir_id); } + ty::UpvarCapture::ByUse => { + self.consume_clone_or_copy(&place_with_id, place_with_id.hir_id); + } ty::UpvarCapture::ByRef(upvar_borrow) => { self.delegate.borrow_mut().borrow( &place_with_id, @@ -1176,7 +1235,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx self.cx.tainted_by_errors()?; bug!( "no type for node {} in mem_categorization", - self.cx.tcx().hir().node_to_string(id) + self.cx.tcx().hir_id_to_string(id) ); } } @@ -1390,6 +1449,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx hir::ExprKind::AddrOf(..) | hir::ExprKind::Call(..) + | hir::ExprKind::Use(..) | hir::ExprKind::Assign(..) | hir::ExprKind::AssignOp(..) | hir::ExprKind::Closure { .. } diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs index 2d7d80e39bc..c82f7a91168 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs @@ -85,25 +85,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }) } - /// Resolves type and const variables in `ty` if possible. Unlike the infcx + /// Resolves type and const variables in `t` if possible. Unlike the infcx /// version (resolve_vars_if_possible), this version will /// also select obligations if it seems useful, in an effort /// to get more type information. // FIXME(-Znext-solver): A lot of the calls to this method should // probably be `try_structurally_resolve_type` or `structurally_resolve_type` instead. #[instrument(skip(self), level = "debug", ret)] - pub(crate) fn resolve_vars_with_obligations(&self, mut ty: Ty<'tcx>) -> Ty<'tcx> { + pub(crate) fn resolve_vars_with_obligations<T: TypeFoldable<TyCtxt<'tcx>>>( + &self, + mut t: T, + ) -> T { // No Infer()? Nothing needs doing. - if !ty.has_non_region_infer() { + if !t.has_non_region_infer() { debug!("no inference var, nothing needs doing"); - return ty; + return t; } - // If `ty` is a type variable, see whether we already know what it is. - ty = self.resolve_vars_if_possible(ty); - if !ty.has_non_region_infer() { - debug!(?ty); - return ty; + // If `t` is a type variable, see whether we already know what it is. + t = self.resolve_vars_if_possible(t); + if !t.has_non_region_infer() { + debug!(?t); + return t; } // If not, try resolving pending obligations as much as @@ -111,7 +114,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // indirect dependencies that don't seem worth tracking // precisely. self.select_obligations_where_possible(|_| {}); - self.resolve_vars_if_possible(ty) + self.resolve_vars_if_possible(t) } pub(crate) fn record_deferred_call_resolution( @@ -137,7 +140,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(crate) fn local_ty(&self, span: Span, nid: HirId) -> Ty<'tcx> { self.locals.borrow().get(&nid).cloned().unwrap_or_else(|| { - span_bug!(span, "no type for local variable {}", self.tcx.hir().node_to_string(nid)) + span_bug!(span, "no type for local variable {}", self.tcx.hir_id_to_string(nid)) }) } @@ -217,6 +220,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) { debug!("fcx {}", self.tag()); + // Don't write user type annotations for const param types, since we give them + // identity args just so that we can trivially substitute their `EarlyBinder`. + // We enforce that they match their type in MIR later on. + if matches!(self.tcx.def_kind(def_id), DefKind::ConstParam) { + return; + } + if Self::can_contain_user_lifetime_bounds((args, user_self_ty)) { let canonicalized = self.canonicalize_user_type_annotation(ty::UserType::new( ty::UserTypeKind::TypeOf(def_id, UserArgs { args, user_self_ty }), @@ -519,7 +529,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pub(crate) fn lower_const_arg( &self, const_arg: &'tcx hir::ConstArg<'tcx>, - feed: FeedConstTy, + feed: FeedConstTy<'_, 'tcx>, ) -> ty::Const<'tcx> { let ct = self.lowerer().lower_const_arg(const_arg, feed); self.register_wf_obligation( @@ -549,11 +559,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { Some(&t) => t, None if let Some(e) = self.tainted_by_errors() => Ty::new_error(self.tcx, e), None => { - bug!( - "no type for node {} in fcx {}", - self.tcx.hir().node_to_string(id), - self.tag() - ); + bug!("no type for node {} in fcx {}", self.tcx.hir_id_to_string(id), self.tag()); } } } @@ -666,12 +672,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if !errors.is_empty() { self.adjust_fulfillment_errors_for_expr_obligation(&mut errors); - let errors_causecode = errors - .iter() - .map(|e| (e.obligation.cause.span, e.root_obligation.cause.code().clone())) - .collect::<Vec<_>>(); self.err_ctxt().report_fulfillment_errors(errors); - self.collect_unused_stmts_for_coerce_return_ty(errors_causecode); } } @@ -827,15 +828,14 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let trait_missing_method = matches!(error, method::MethodError::NoMatch(_)) && ty.normalized.is_trait(); - if item_name.name != kw::Empty { - self.report_method_error( - hir_id, - ty.normalized, - error, - Expectation::NoExpectation, - trait_missing_method && span.edition().at_least_rust_2021(), // emits missing method for trait only after edition 2021 - ); - } + assert_ne!(item_name.name, kw::Empty); + self.report_method_error( + hir_id, + ty.normalized, + error, + Expectation::NoExpectation, + trait_missing_method && span.edition().at_least_rust_2021(), // emits missing method for trait only after edition 2021 + ); result }); @@ -1135,7 +1135,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .last() .is_some_and(|GenericPathSegment(def_id, _)| tcx.generics_of(*def_id).has_self); - let (res, self_ctor_args) = if let Res::SelfCtor(impl_def_id) = res { + let (res, implicit_args) = if let Res::Def(DefKind::ConstParam, def) = res { + // types of const parameters are somewhat special as they are part of + // the same environment as the const parameter itself. this means that + // unlike most paths `type-of(N)` can return a type naming parameters + // introduced by the containing item, rather than provided through `N`. + // + // for example given `<T, const M: usize, const N: [T; M]>` and some + // `let a = N;` expression. The path to `N` would wind up with no args + // (as it has no args), but instantiating the early binder on `typeof(N)` + // requires providing generic arguments for `[T, M, N]`. + (res, Some(ty::GenericArgs::identity_for_item(tcx, tcx.parent(def)))) + } else if let Res::SelfCtor(impl_def_id) = res { let ty = LoweredTy::from_raw( self, span, @@ -1261,6 +1272,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fn provided_kind( &mut self, + preceding_args: &[ty::GenericArg<'tcx>], param: &ty::GenericParamDef, arg: &GenericArg<'tcx>, ) -> ty::GenericArg<'tcx> { @@ -1280,7 +1292,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => self .fcx // Ambiguous parts of `ConstArg` are handled in the match arms below - .lower_const_arg(ct.as_unambig_ct(), FeedConstTy::Param(param.def_id)) + .lower_const_arg( + ct.as_unambig_ct(), + FeedConstTy::Param(param.def_id, preceding_args), + ) .into(), (&GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => { self.fcx.ct_infer(Some(param), inf.span).into() @@ -1320,7 +1335,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } - let args_raw = self_ctor_args.unwrap_or_else(|| { + let args_raw = implicit_args.unwrap_or_else(|| { lower_generic_args( self, def_id, @@ -1454,7 +1469,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sp: Span, ct: ty::Const<'tcx>, ) -> ty::Const<'tcx> { - // FIXME(min_const_generic_exprs): We could process obligations here if `ct` is a var. + let ct = self.resolve_vars_with_obligations(ct); if self.next_trait_solver() && let ty::ConstKind::Unevaluated(..) = ct.kind() @@ -1510,6 +1525,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + pub(crate) fn structurally_resolve_const( + &self, + sp: Span, + ct: ty::Const<'tcx>, + ) -> ty::Const<'tcx> { + let ct = self.try_structurally_resolve_const(sp, ct); + + if !ct.is_ct_infer() { + ct + } else { + let e = self.tainted_by_errors().unwrap_or_else(|| { + self.err_ctxt() + .emit_inference_failure_err( + self.body_id, + sp, + ct.into(), + TypeAnnotationNeeded::E0282, + true, + ) + .emit() + }); + // FIXME: Infer `?ct = {const error}`? + ty::Const::new_error(self.tcx, e) + } + } + pub(crate) fn with_breakable_ctxt<F: FnOnce() -> R, R>( &self, id: HirId, diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs index 63c1c060827..b8517701667 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs @@ -3,9 +3,7 @@ use std::{fmt, iter, mem}; use itertools::Itertools; use rustc_data_structures::fx::FxIndexSet; use rustc_errors::codes::*; -use rustc_errors::{ - Applicability, Diag, ErrorGuaranteed, MultiSpan, StashKey, a_or_an, listify, pluralize, -}; +use rustc_errors::{Applicability, Diag, ErrorGuaranteed, MultiSpan, a_or_an, listify, pluralize}; use rustc_hir::def::{CtorOf, DefKind, Res}; use rustc_hir::def_id::DefId; use rustc_hir::intravisit::Visitor; @@ -101,17 +99,38 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { debug!("FnCtxt::check_asm: {} deferred checks", deferred_asm_checks.len()); for (asm, hir_id) in deferred_asm_checks.drain(..) { let enclosing_id = self.tcx.hir_enclosing_body_owner(hir_id); - let expr_ty = |expr: &hir::Expr<'tcx>| { - let ty = self.typeck_results.borrow().expr_ty_adjusted(expr); - let ty = self.resolve_vars_if_possible(ty); - if ty.has_non_region_infer() { - Ty::new_misc_error(self.tcx) - } else { - self.tcx.erase_regions(ty) - } - }; - InlineAsmCtxt::new(self.tcx, enclosing_id, self.typing_env(self.param_env), expr_ty) - .check_asm(asm); + InlineAsmCtxt::new( + enclosing_id, + &self.infcx, + self.typing_env(self.param_env), + &*self.typeck_results.borrow(), + ) + .check_asm(asm); + } + } + + pub(in super::super) fn check_repeat_exprs(&self) { + let mut deferred_repeat_expr_checks = self.deferred_repeat_expr_checks.borrow_mut(); + debug!("FnCtxt::check_repeat_exprs: {} deferred checks", deferred_repeat_expr_checks.len()); + for (element, element_ty, count) in deferred_repeat_expr_checks.drain(..) { + // We want to emit an error if the const is not structurally resolveable as otherwise + // we can find up conservatively proving `Copy` which may infer the repeat expr count + // to something that never required `Copy` in the first place. + let count = + self.structurally_resolve_const(element.span, self.normalize(element.span, count)); + + // Avoid run on "`NotCopy: Copy` is not implemented" errors when the repeat expr count + // is erroneous/unknown. The user might wind up specifying a repeat count of 0/1. + if count.references_error() { + continue; + } + + // If the length is 0, we don't create any elements, so we don't copy any. + // If the length is 1, we don't copy that one element, we move it. Only check + // for `Copy` if the length is larger. + if count.try_to_target_usize(self.tcx).is_none_or(|x| x > 1) { + self.enforce_repeat_element_needs_copy_bound(element, element_ty); + } } } @@ -1617,7 +1636,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ast::LitKind::Char(_) => tcx.types.char, ast::LitKind::Int(_, ast::LitIntType::Signed(t)) => Ty::new_int(tcx, ty::int_ty(t)), ast::LitKind::Int(_, ast::LitIntType::Unsigned(t)) => Ty::new_uint(tcx, ty::uint_ty(t)), - ast::LitKind::Int(_, ast::LitIntType::Unsuffixed) => { + ast::LitKind::Int(i, ast::LitIntType::Unsuffixed) => { let opt_ty = expected.to_option(self).and_then(|ty| match ty.kind() { ty::Int(_) | ty::Uint(_) => Some(ty), // These exist to direct casts like `0x61 as char` to use @@ -1626,6 +1645,19 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ty::Char => Some(tcx.types.u8), ty::RawPtr(..) => Some(tcx.types.usize), ty::FnDef(..) | ty::FnPtr(..) => Some(tcx.types.usize), + &ty::Pat(base, _) if base.is_integral() => { + let layout = tcx + .layout_of(self.typing_env(self.param_env).as_query_input(ty)) + .ok()?; + assert!(!layout.uninhabited); + + match layout.backend_repr { + rustc_abi::BackendRepr::Scalar(scalar) => { + scalar.valid_range(&tcx).contains(u128::from(i.get())).then_some(ty) + } + _ => unreachable!(), + } + } _ => None, }); opt_ty.unwrap_or_else(|| self.next_int_var()) @@ -2142,7 +2174,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let result = self .lowerer() - .lower_assoc_path(hir_id, path_span, ty.raw, qself, segment, true); + .lower_assoc_path_ty(hir_id, path_span, ty.raw, qself, segment, true); let ty = result .map(|(ty, _, _)| ty) .unwrap_or_else(|guar| Ty::new_error(self.tcx(), guar)); @@ -2161,62 +2193,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } - pub(super) fn collect_unused_stmts_for_coerce_return_ty( - &self, - errors_causecode: Vec<(Span, ObligationCauseCode<'tcx>)>, - ) { - for (span, code) in errors_causecode { - self.dcx().try_steal_modify_and_emit_err(span, StashKey::MaybeForgetReturn, |err| { - if let Some(fn_sig) = self.body_fn_sig() - && let ObligationCauseCode::WhereClauseInExpr(_, _, binding_hir_id, ..) = code - && !fn_sig.output().is_unit() - { - let mut block_num = 0; - let mut found_semi = false; - for (hir_id, node) in self.tcx.hir_parent_iter(binding_hir_id) { - // Don't proceed into parent bodies - if hir_id.owner != binding_hir_id.owner { - break; - } - match node { - hir::Node::Stmt(stmt) => { - if let hir::StmtKind::Semi(expr) = stmt.kind { - let expr_ty = self.typeck_results.borrow().expr_ty(expr); - let return_ty = fn_sig.output(); - if !matches!(expr.kind, hir::ExprKind::Ret(..)) - && self.may_coerce(expr_ty, return_ty) - { - found_semi = true; - } - } - } - hir::Node::Block(_block) => { - if found_semi { - block_num += 1; - } - } - hir::Node::Item(item) => { - if let hir::ItemKind::Fn { .. } = item.kind { - break; - } - } - _ => {} - } - } - if block_num > 1 && found_semi { - err.span_suggestion_verbose( - // use the span of the *whole* expr - self.tcx.hir().span(binding_hir_id).shrink_to_lo(), - "you might have meant to return this to infer its type parameters", - "return ", - Applicability::MaybeIncorrect, - ); - } - } - }); - } - } - /// Given a vector of fulfillment errors, try to adjust the spans of the /// errors to more accurately point at the cause of the failure. /// diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs index 42236ac6d80..e14f1528d2c 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/mod.rs @@ -17,7 +17,7 @@ use rustc_infer::infer; use rustc_infer::traits::Obligation; use rustc_middle::ty::{self, Const, Ty, TyCtxt, TypeVisitableExt}; use rustc_session::Session; -use rustc_span::{self, DUMMY_SP, Ident, Span, sym}; +use rustc_span::{self, DUMMY_SP, ErrorGuaranteed, Ident, Span, sym}; use rustc_trait_selection::error_reporting::TypeErrCtxt; use rustc_trait_selection::error_reporting::infer::sub_relations::SubRelations; use rustc_trait_selection::traits::{ObligationCause, ObligationCauseCode, ObligationCtxt}; @@ -308,15 +308,17 @@ impl<'tcx> HirTyLowerer<'tcx> for FnCtxt<'_, 'tcx> { )) } - fn lower_assoc_ty( + fn lower_assoc_shared( &self, span: Span, item_def_id: DefId, - item_segment: &hir::PathSegment<'tcx>, + item_segment: &rustc_hir::PathSegment<'tcx>, poly_trait_ref: ty::PolyTraitRef<'tcx>, - ) -> Ty<'tcx> { + _kind: ty::AssocKind, + ) -> Result<(DefId, ty::GenericArgsRef<'tcx>), ErrorGuaranteed> { let trait_ref = self.instantiate_binder_with_fresh_vars( span, + // FIXME(mgca): this should be assoc const if that is the `kind` infer::BoundRegionConversionTime::AssocTypeProjection(item_def_id), poly_trait_ref, ); @@ -328,7 +330,7 @@ impl<'tcx> HirTyLowerer<'tcx> for FnCtxt<'_, 'tcx> { trait_ref.args, ); - Ty::new_projection_from_args(self.tcx(), item_def_id, item_args) + Ok((item_def_id, item_args)) } fn probe_adt(&self, span: Span, ty: Ty<'tcx>) -> Option<ty::AdtDef<'tcx>> { diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs index f9fc1215936..37aaaed5477 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs @@ -23,7 +23,7 @@ use rustc_middle::ty::{ }; use rustc_session::errors::ExprParenthesesNeeded; use rustc_span::source_map::Spanned; -use rustc_span::{Ident, Span, Symbol, sym}; +use rustc_span::{ExpnKind, Ident, MacroKind, Span, Symbol, sym}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; use rustc_trait_selection::error_reporting::traits::DefIdOrName; use rustc_trait_selection::infer::InferCtxtExt; @@ -613,7 +613,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .iter() .take(4) .map(|(var_hir_id, upvar)| { - let var_name = self.tcx.hir().name(*var_hir_id).to_string(); + let var_name = self.tcx.hir_name(*var_hir_id).to_string(); let msg = format!("`{var_name}` captured here"); (upvar.span, msg) }) @@ -1365,6 +1365,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.param_env, ty::TraitRef::new(self.tcx, into_def_id, [expr_ty, expected_ty]), )) + && !expr + .span + .macro_backtrace() + .any(|x| matches!(x.kind, ExpnKind::Macro(MacroKind::Attr | MacroKind::Derive, ..))) { let span = expr.span.find_oldest_ancestor_in_same_ctxt(); @@ -1380,10 +1384,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { sugg.insert(0, (expr.span.shrink_to_lo(), format!("{}: ", name))); } diag.multipart_suggestion( - format!("call `Into::into` on this expression to convert `{expr_ty}` into `{expected_ty}`"), - sugg, - Applicability::MaybeIncorrect - ); + format!("call `Into::into` on this expression to convert `{expr_ty}` into `{expected_ty}`"), + sugg, + Applicability::MaybeIncorrect + ); return true; } diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs index 0130ad775d9..4968998fd51 100644 --- a/compiler/rustc_hir_typeck/src/lib.rs +++ b/compiler/rustc_hir_typeck/src/lib.rs @@ -1,6 +1,7 @@ // tidy-alphabetical-start #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(array_windows)] #![feature(box_patterns)] #![feature(if_let_guard)] @@ -8,7 +9,6 @@ #![feature(let_chains)] #![feature(never_type)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod _match; @@ -199,6 +199,15 @@ fn typeck_with_inspect<'tcx>( fcx.write_ty(id, expected_type); }; + // Whether to check repeat exprs before/after inference fallback is somewhat arbitrary of a decision + // as neither option is strictly more permissive than the other. However, we opt to check repeat exprs + // first as errors from not having inferred array lengths yet seem less confusing than errors from inference + // fallback arbitrarily inferring something incompatible with `Copy` inference side effects. + // + // This should also be forwards compatible with moving repeat expr checks to a custom goal kind or using + // marker traits in the future. + fcx.check_repeat_exprs(); + fcx.type_inference_fallback(); // Even though coercion casts provide type hints, we check casts after fallback for diff --git a/compiler/rustc_hir_typeck/src/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs index 3b107fbf173..0483164ca03 100644 --- a/compiler/rustc_hir_typeck/src/method/confirm.rs +++ b/compiler/rustc_hir_typeck/src/method/confirm.rs @@ -426,6 +426,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { fn provided_kind( &mut self, + preceding_args: &[ty::GenericArg<'tcx>], param: &ty::GenericParamDef, arg: &GenericArg<'tcx>, ) -> ty::GenericArg<'tcx> { @@ -446,7 +447,10 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { (GenericParamDefKind::Const { .. }, GenericArg::Const(ct)) => self .cfcx // We handle the ambig portions of `ConstArg` in the match arms below - .lower_const_arg(ct.as_unambig_ct(), FeedConstTy::Param(param.def_id)) + .lower_const_arg( + ct.as_unambig_ct(), + FeedConstTy::Param(param.def_id, preceding_args), + ) .into(), (GenericParamDefKind::Const { .. }, GenericArg::Infer(inf)) => { self.cfcx.ct_infer(Some(param), inf.span).into() diff --git a/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs b/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs index 69d7a6c97cb..72f8793d783 100644 --- a/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs +++ b/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs @@ -363,7 +363,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let import_items: Vec<_> = applicable_trait .import_ids .iter() - .map(|&import_id| self.tcx.hir().expect_item(import_id)) + .map(|&import_id| self.tcx.hir_expect_item(import_id)) .collect(); // Find an identifier with which this trait was imported (note that `_` doesn't count). diff --git a/compiler/rustc_hir_typeck/src/method/probe.rs b/compiler/rustc_hir_typeck/src/method/probe.rs index f87e5b5202a..ee01d78965d 100644 --- a/compiler/rustc_hir_typeck/src/method/probe.rs +++ b/compiler/rustc_hir_typeck/src/method/probe.rs @@ -2344,7 +2344,7 @@ impl<'a, 'tcx> ProbeContext<'a, 'tcx> { return false; }; let hir_id = self.fcx.tcx.local_def_id_to_hir_id(local_def_id); - let attrs = self.fcx.tcx.hir().attrs(hir_id); + let attrs = self.fcx.tcx.hir_attrs(hir_id); for attr in attrs { if sym::doc == attr.name_or_empty() { } else if sym::rustc_confusables == attr.name_or_empty() { diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index 18218a7a0a6..1a1540f505d 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -158,7 +158,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.typeck_results.borrow_mut().used_trait_imports.insert(import_id); } - let (span, sugg_span, source, item_name, args) = match self.tcx.hir_node(call_id) { + let (span, expr_span, source, item_name, args) = match self.tcx.hir_node(call_id) { hir::Node::Expr(&hir::Expr { kind: hir::ExprKind::MethodCall(segment, rcvr, args, _), span, @@ -194,6 +194,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { node => unreachable!("{node:?}"), }; + // Try to get the span of the identifier within the expression's syntax context + // (if that's different). + let within_macro_span = span.within_macro(expr_span, self.tcx.sess.source_map()); + // Avoid suggestions when we don't know what's going on. if let Err(guar) = rcvr_ty.error_reported() { return guar; @@ -207,10 +211,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { call_id, source, args, - sugg_span, + expr_span, &mut no_match_data, expected, trait_missing_method, + within_macro_span, ), MethodError::Ambiguity(mut sources) => { @@ -221,6 +226,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { "multiple applicable items in scope" ); err.span_label(item_name.span, format!("multiple `{item_name}` found")); + if let Some(within_macro_span) = within_macro_span { + err.span_label(within_macro_span, "due to this macro variable"); + } self.note_candidates_on_method_error( rcvr_ty, @@ -230,7 +238,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span, &mut err, &mut sources, - Some(sugg_span), + Some(expr_span), ); err.emit() } @@ -252,6 +260,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .span_if_local(def_id) .unwrap_or_else(|| self.tcx.def_span(def_id)); err.span_label(sp, format!("private {kind} defined here")); + if let Some(within_macro_span) = within_macro_span { + err.span_label(within_macro_span, "due to this macro variable"); + } self.suggest_valid_traits(&mut err, item_name, out_of_scope_traits, true); err.emit() } @@ -268,6 +279,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if !needs_mut { err.span_label(bound_span, "this has a `Sized` requirement"); } + if let Some(within_macro_span) = within_macro_span { + err.span_label(within_macro_span, "due to this macro variable"); + } if !candidates.is_empty() { let help = format!( "{an}other candidate{s} {were} found in the following trait{s}", @@ -581,6 +595,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { no_match_data: &mut NoMatchData<'tcx>, expected: Expectation<'tcx>, trait_missing_method: bool, + within_macro_span: Option<Span>, ) -> ErrorGuaranteed { let mode = no_match_data.mode; let tcx = self.tcx; @@ -721,6 +736,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if tcx.sess.source_map().is_multiline(sugg_span) { err.span_label(sugg_span.with_hi(span.lo()), ""); } + if let Some(within_macro_span) = within_macro_span { + err.span_label(within_macro_span, "due to this macro variable"); + } if short_ty_str.len() < ty_str.len() && ty_str.len() > 10 { ty_str = short_ty_str; @@ -773,7 +791,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { && let Ok(pick) = self.lookup_probe_for_diagnostic( item_name, Ty::new_ref(tcx, ty::Region::new_error_misc(tcx), ty, ptr_mutbl), - self.tcx.hir().expect_expr(self.tcx.parent_hir_id(rcvr_expr.hir_id)), + self.tcx.hir_expect_expr(self.tcx.parent_hir_id(rcvr_expr.hir_id)), ProbeScope::TraitsInScope, None, ) @@ -816,8 +834,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let SelfSource::MethodCall(rcvr_expr) = source { self.suggest_fn_call(&mut err, rcvr_expr, rcvr_ty, |output_ty| { - let call_expr = - self.tcx.hir().expect_expr(self.tcx.parent_hir_id(rcvr_expr.hir_id)); + let call_expr = self.tcx.hir_expect_expr(self.tcx.parent_hir_id(rcvr_expr.hir_id)); let probe = self.lookup_probe_for_diagnostic( item_name, output_ty, @@ -2355,7 +2372,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { Applicability::MachineApplicable, ); } else { - let call_expr = tcx.hir().expect_expr(tcx.parent_hir_id(expr.hir_id)); + let call_expr = tcx.hir_expect_expr(tcx.parent_hir_id(expr.hir_id)); if let Some(span) = call_expr.span.trim_start(item_name.span) { err.span_suggestion( @@ -2662,7 +2679,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { mod_id, expr.hir_id, ) { - let call_expr = self.tcx.hir().expect_expr(self.tcx.parent_hir_id(expr.hir_id)); + let call_expr = self.tcx.hir_expect_expr(self.tcx.parent_hir_id(expr.hir_id)); let lang_items = self.tcx.lang_items(); let never_mention_traits = [ @@ -2739,7 +2756,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let SelfSource::MethodCall(expr) = source else { return; }; - let call_expr = tcx.hir().expect_expr(tcx.parent_hir_id(expr.hir_id)); + let call_expr = tcx.hir_expect_expr(tcx.parent_hir_id(expr.hir_id)); let ty::Adt(kind, args) = actual.kind() else { return; diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs index bd0848b9916..7e6973259fe 100644 --- a/compiler/rustc_hir_typeck/src/pat.rs +++ b/compiler/rustc_hir_typeck/src/pat.rs @@ -87,10 +87,10 @@ struct TopInfo<'tcx> { } #[derive(Copy, Clone)] -struct PatInfo<'a, 'tcx> { +struct PatInfo<'tcx> { binding_mode: ByRef, max_ref_mutbl: MutblCap, - top_info: &'a TopInfo<'tcx>, + top_info: TopInfo<'tcx>, decl_origin: Option<DeclOrigin<'tcx>>, /// The depth of current pattern @@ -303,11 +303,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { origin_expr: Option<&'tcx hir::Expr<'tcx>>, decl_origin: Option<DeclOrigin<'tcx>>, ) { - let info = TopInfo { expected, origin_expr, span, hir_id: pat.hir_id }; + let top_info = TopInfo { expected, origin_expr, span, hir_id: pat.hir_id }; let pat_info = PatInfo { binding_mode: ByRef::No, max_ref_mutbl: MutblCap::Mut, - top_info: &info, + top_info, decl_origin, current_depth: 0, }; @@ -320,7 +320,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { /// Outside of this module, `check_pat_top` should always be used. /// Conversely, inside this module, `check_pat_top` should never be used. #[instrument(level = "debug", skip(self, pat_info))] - fn check_pat(&self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, pat_info: PatInfo<'_, 'tcx>) { + fn check_pat(&self, pat: &'tcx Pat<'tcx>, expected: Ty<'tcx>, pat_info: PatInfo<'tcx>) { let PatInfo { binding_mode, max_ref_mutbl, top_info: ti, current_depth, .. } = pat_info; let path_res = match pat.kind { @@ -352,13 +352,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { qpath, path_res.unwrap(), expected, - ti, + &pat_info.top_info, ); self.write_ty(*hir_id, ty); ty } - PatKind::Expr(lt) => self.check_pat_lit(pat.span, lt, expected, ti), - PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti), + PatKind::Expr(lt) => self.check_pat_lit(pat.span, lt, expected, &pat_info.top_info), + PatKind::Range(lhs, rhs, _) => { + self.check_pat_range(pat.span, lhs, rhs, expected, &pat_info.top_info) + } PatKind::Binding(ba, var_id, ident, sub) => { self.check_pat_ident(pat, ba, var_id, ident, sub, expected, pat_info) } @@ -818,7 +820,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ident: Ident, sub: Option<&'tcx Pat<'tcx>>, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let PatInfo { binding_mode: def_br, top_info: ti, .. } = pat_info; @@ -914,12 +916,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; // We have a concrete type for the local, so we do not need to taint it and hide follow up errors *using* the local. - let _ = self.demand_eqtype_pat(pat.span, eq_ty, local_ty, ti); + let _ = self.demand_eqtype_pat(pat.span, eq_ty, local_ty, &ti); // If there are multiple arms, make sure they all agree on // what the type of the binding `x` ought to be. if var_id != pat.hir_id { - self.check_binding_alt_eq_ty(user_bind_annot, pat.span, var_id, local_ty, ti); + self.check_binding_alt_eq_ty(user_bind_annot, pat.span, var_id, local_ty, &ti); } if let Some(p) = sub { @@ -1149,7 +1151,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fields: &'tcx [hir::PatField<'tcx>], has_rest_pat: bool, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { // Resolve the path and check the definition for errors. let (variant, pat_ty) = match self.check_struct_path(qpath, pat.hir_id) { @@ -1164,7 +1166,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; // Type-check the path. - let _ = self.demand_eqtype_pat(pat.span, expected, pat_ty, pat_info.top_info); + let _ = self.demand_eqtype_pat(pat.span, expected, pat_ty, &pat_info.top_info); // Type-check subpatterns. match self.check_struct_pat_fields(pat_ty, pat, variant, fields, has_rest_pat, pat_info) { @@ -1353,7 +1355,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { subpats: &'tcx [Pat<'tcx>], ddpos: hir::DotDotPos, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let on_error = |e| { @@ -1403,7 +1405,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let pat_ty = pat_ty.no_bound_vars().expect("expected fn type"); // Type-check the tuple struct pattern against the expected type. - let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, pat_info.top_info); + let diag = self.demand_eqtype_pat_diag(pat.span, expected, pat_ty, &pat_info.top_info); let had_err = diag.map_err(|diag| diag.emit()); // Type-check subpatterns. @@ -1610,7 +1612,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { elements: &'tcx [Pat<'tcx>], ddpos: hir::DotDotPos, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let mut expected_len = elements.len(); @@ -1625,7 +1627,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let element_tys_iter = (0..max_len).map(|_| self.next_ty_var(span)); let element_tys = tcx.mk_type_list_from_iter(element_tys_iter); let pat_ty = Ty::new_tup(tcx, element_tys); - if let Err(reported) = self.demand_eqtype_pat(span, expected, pat_ty, pat_info.top_info) { + if let Err(reported) = self.demand_eqtype_pat(span, expected, pat_ty, &pat_info.top_info) { // Walk subpatterns with an expected type of `err` in this case to silence // further errors being emitted when using the bindings. #50333 let element_tys_iter = (0..max_len).map(|_| Ty::new_error(tcx, reported)); @@ -1648,7 +1650,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { variant: &'tcx ty::VariantDef, fields: &'tcx [hir::PatField<'tcx>], has_rest_pat: bool, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Result<(), ErrorGuaranteed> { let tcx = self.tcx; @@ -2257,7 +2259,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span: Span, inner: &'tcx Pat<'tcx>, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; let (box_ty, inner_ty) = self @@ -2267,7 +2269,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // think any errors can be introduced by using `demand::eqtype`. let inner_ty = self.next_ty_var(inner.span); let box_ty = Ty::new_box(tcx, inner_ty); - self.demand_eqtype_pat(span, expected, box_ty, pat_info.top_info)?; + self.demand_eqtype_pat(span, expected, box_ty, &pat_info.top_info)?; Ok((box_ty, inner_ty)) }) .unwrap_or_else(|guar| { @@ -2283,7 +2285,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span: Span, inner: &'tcx Pat<'tcx>, expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; // Register a `DerefPure` bound, which is required by all `deref!()` pats. @@ -2324,7 +2326,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { inner: &'tcx Pat<'tcx>, pat_mutbl: Mutability, mut expected: Ty<'tcx>, - mut pat_info: PatInfo<'_, 'tcx>, + mut pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let tcx = self.tcx; @@ -2482,7 +2484,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { pat.span, expected, ref_ty, - pat_info.top_info, + &pat_info.top_info, ); // Look for a case like `fn foo(&foo: u32)` and suggest @@ -2605,7 +2607,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { slice: Option<&'tcx Pat<'tcx>>, after: &'tcx [Pat<'tcx>], expected: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> Ty<'tcx> { let expected = self.try_structurally_resolve_type(span, expected); @@ -2767,20 +2769,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { &self, span: Span, expected_ty: Ty<'tcx>, - pat_info: PatInfo<'_, 'tcx>, + pat_info: PatInfo<'tcx>, ) -> ErrorGuaranteed { let PatInfo { top_info: ti, current_depth, .. } = pat_info; - let mut err = struct_span_code_err!( - self.dcx(), - span, - E0529, - "expected an array or slice, found `{expected_ty}`" - ); + let mut slice_pat_semantics = false; + let mut as_deref = None; + let mut slicing = None; if let ty::Ref(_, ty, _) = expected_ty.kind() && let ty::Array(..) | ty::Slice(..) = ty.kind() { - err.help("the semantics of slice patterns changed recently; see issue #62254"); + slice_pat_semantics = true; } else if self .autoderef(span, expected_ty) .silence_errors() @@ -2797,28 +2796,23 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { || self.tcx.is_diagnostic_item(sym::Result, adt_def.did()) => { // Slicing won't work here, but `.as_deref()` might (issue #91328). - err.span_suggestion_verbose( - span.shrink_to_hi(), - "consider using `as_deref` here", - ".as_deref()", - Applicability::MaybeIncorrect, - ); + as_deref = Some(errors::AsDerefSuggestion { span: span.shrink_to_hi() }); } _ => (), } let is_top_level = current_depth <= 1; if is_slice_or_array_or_vector && is_top_level { - err.span_suggestion_verbose( - span.shrink_to_hi(), - "consider slicing here", - "[..]", - Applicability::MachineApplicable, - ); + slicing = Some(errors::SlicingSuggestion { span: span.shrink_to_hi() }); } } - err.span_label(span, format!("pattern cannot match with input type `{expected_ty}`")); - err.emit() + self.dcx().emit_err(errors::ExpectedArrayOrSlice { + span, + ty: expected_ty, + slice_pat_semantics, + as_deref, + slicing, + }) } fn is_slice_or_array_or_vector(&self, ty: Ty<'tcx>) -> (bool, Ty<'tcx>) { diff --git a/compiler/rustc_hir_typeck/src/typeck_root_ctxt.rs b/compiler/rustc_hir_typeck/src/typeck_root_ctxt.rs index 903be7e732a..381606a9fb0 100644 --- a/compiler/rustc_hir_typeck/src/typeck_root_ctxt.rs +++ b/compiler/rustc_hir_typeck/src/typeck_root_ctxt.rs @@ -62,6 +62,9 @@ pub(crate) struct TypeckRootCtxt<'tcx> { pub(super) deferred_coroutine_interiors: RefCell<Vec<(LocalDefId, hir::BodyId, Ty<'tcx>)>>, + pub(super) deferred_repeat_expr_checks: + RefCell<Vec<(&'tcx hir::Expr<'tcx>, Ty<'tcx>, ty::Const<'tcx>)>>, + /// Whenever we introduce an adjustment from `!` into a type variable, /// we record that type variable here. This is later used to inform /// fallback. See the `fallback` module for details. @@ -96,6 +99,7 @@ impl<'tcx> TypeckRootCtxt<'tcx> { deferred_transmute_checks: RefCell::new(Vec::new()), deferred_asm_checks: RefCell::new(Vec::new()), deferred_coroutine_interiors: RefCell::new(Vec::new()), + deferred_repeat_expr_checks: RefCell::new(Vec::new()), diverging_type_vars: RefCell::new(Default::default()), infer_var_info: RefCell::new(Default::default()), } diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs index 9a0b2247058..37f3786c00a 100644 --- a/compiler/rustc_hir_typeck/src/upvar.rs +++ b/compiler/rustc_hir_typeck/src/upvar.rs @@ -671,6 +671,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let (place, capture_kind) = match capture_clause { hir::CaptureBy::Value { .. } => adjust_for_move_closure(place, capture_kind), + hir::CaptureBy::Use { .. } => adjust_for_use_closure(place, capture_kind), hir::CaptureBy::Ref => adjust_for_non_move_closure(place, capture_kind), }; @@ -780,7 +781,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { PlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id, base => bug!("Expected upvar, found={:?}", base), }; - let var_ident = self.tcx.hir().ident(var_hir_id); + let var_ident = self.tcx.hir_ident(var_hir_id); let Some(min_cap_list) = root_var_min_capture_list.get_mut(&var_hir_id) else { let mutability = self.determine_capture_mutability(&typeck_results, &place); @@ -987,13 +988,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { UpvarMigrationInfo::CapturingPrecise { source_expr: Some(capture_expr_id), var_name: captured_name } => { let cause_span = self.tcx.hir().span(*capture_expr_id); lint.span_label(cause_span, format!("in Rust 2018, this closure captures all of `{}`, but in Rust 2021, it will only capture `{}`", - self.tcx.hir().name(*var_hir_id), + self.tcx.hir_name(*var_hir_id), captured_name, )); } UpvarMigrationInfo::CapturingNothing { use_span } => { lint.span_label(*use_span, format!("in Rust 2018, this causes the closure to capture `{}`, but in Rust 2021, it has no effect", - self.tcx.hir().name(*var_hir_id), + self.tcx.hir_name(*var_hir_id), )); } @@ -1008,13 +1009,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { match &lint_note.captures_info { UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => { lint.span_label(drop_location_span, format!("in Rust 2018, `{}` is dropped here, but in Rust 2021, only `{}` will be dropped here as part of the closure", - self.tcx.hir().name(*var_hir_id), + self.tcx.hir_name(*var_hir_id), captured_name, )); } UpvarMigrationInfo::CapturingNothing { use_span: _ } => { lint.span_label(drop_location_span, format!("in Rust 2018, `{v}` is dropped here along with the closure, but in Rust 2021 `{v}` is not part of the closure", - v = self.tcx.hir().name(*var_hir_id), + v = self.tcx.hir_name(*var_hir_id), )); } } @@ -1025,7 +1026,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // not capturing something anymore cannot cause a trait to fail to be implemented: match &lint_note.captures_info { UpvarMigrationInfo::CapturingPrecise { var_name: captured_name, .. } => { - let var_name = self.tcx.hir().name(*var_hir_id); + let var_name = self.tcx.hir_name(*var_hir_id); lint.span_label(closure_head_span, format!("\ in Rust 2018, this closure implements {missing_trait} \ as `{var_name}` implements {missing_trait}, but in Rust 2021, \ @@ -1165,7 +1166,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let ty = match closure_clause { hir::CaptureBy::Value { .. } => ty, // For move closure the capture kind should be by value - hir::CaptureBy::Ref => { + hir::CaptureBy::Ref | hir::CaptureBy::Use { .. } => { // For non move closure the capture kind is the max capture kind of all captures // according to the ordering ImmBorrow < UniqueImmBorrow < MutBorrow < ByValue let mut max_capture_info = root_var_min_capture_list.first().unwrap().info; @@ -1292,7 +1293,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .insert(UpvarMigrationInfo::CapturingNothing { use_span: upvar.span }); return Some(diagnostics_info); } - hir::CaptureBy::Ref => {} + hir::CaptureBy::Ref | hir::CaptureBy::Use { .. } => {} } return None; @@ -1305,7 +1306,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { for captured_place in root_var_min_capture_list.iter() { match captured_place.info.capture_kind { // Only care about captures that are moved into the closure - ty::UpvarCapture::ByValue => { + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => { projections_list.push(captured_place.place.projections.as_slice()); diagnostics_info.insert(UpvarMigrationInfo::CapturingPrecise { source_expr: captured_place.info.path_expr_id, @@ -1689,10 +1690,32 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // // If the data will be moved out of this place, then the place will be truncated // at the first Deref in `adjust_for_move_closure` and then moved into the closure. + // + // For example: + // + // struct Buffer<'a> { + // x: &'a String, + // y: Vec<u8>, + // } + // + // fn get<'a>(b: Buffer<'a>) -> impl Sized + 'a { + // let c = move || b.x; + // drop(b); + // c + // } + // + // Even though the closure is declared as move, when we are capturing borrowed data (in + // this case, *b.x) we prefer to capture by reference. + // Otherwise you'd get an error in 2021 immediately because you'd be trying to take + // ownership of the (borrowed) String or else you'd take ownership of b, as in 2018 and + // before, which is also an error. hir::CaptureBy::Value { .. } if !place.deref_tys().any(Ty::is_ref) => { ty::UpvarCapture::ByValue } - hir::CaptureBy::Value { .. } | hir::CaptureBy::Ref => { + hir::CaptureBy::Use { .. } if !place.deref_tys().any(Ty::is_ref) => { + ty::UpvarCapture::ByUse + } + hir::CaptureBy::Value { .. } | hir::CaptureBy::Use { .. } | hir::CaptureBy::Ref => { ty::UpvarCapture::ByRef(BorrowKind::Immutable) } } @@ -1927,7 +1950,7 @@ fn apply_capture_kind_on_capture_ty<'tcx>( region: ty::Region<'tcx>, ) -> Ty<'tcx> { match capture_kind { - ty::UpvarCapture::ByValue => ty, + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => ty, ty::UpvarCapture::ByRef(kind) => Ty::new_ref(tcx, region, ty, kind.to_mutbl_lossy()), } } @@ -2024,6 +2047,21 @@ impl<'tcx> euv::Delegate<'tcx> for InferBorrowKind<'tcx> { } #[instrument(skip(self), level = "debug")] + fn use_cloned(&mut self, place_with_id: &PlaceWithHirId<'tcx>, diag_expr_id: HirId) { + let PlaceBase::Upvar(upvar_id) = place_with_id.place.base else { return }; + assert_eq!(self.closure_def_id, upvar_id.closure_expr_id); + + self.capture_information.push(( + place_with_id.place.clone(), + ty::CaptureInfo { + capture_kind_expr_id: Some(diag_expr_id), + path_expr_id: Some(diag_expr_id), + capture_kind: ty::UpvarCapture::ByUse, + }, + )); + } + + #[instrument(skip(self), level = "debug")] fn borrow( &mut self, place_with_id: &PlaceWithHirId<'tcx>, @@ -2164,6 +2202,20 @@ fn adjust_for_move_closure( (place, ty::UpvarCapture::ByValue) } +/// Truncate deref of any reference. +fn adjust_for_use_closure( + mut place: Place<'_>, + mut kind: ty::UpvarCapture, +) -> (Place<'_>, ty::UpvarCapture) { + let first_deref = place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref); + + if let Some(idx) = first_deref { + truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx); + } + + (place, ty::UpvarCapture::ByUse) +} + /// Adjust closure capture just that if taking ownership of data, only move data /// from enclosing stack frame. fn adjust_for_non_move_closure( @@ -2174,7 +2226,7 @@ fn adjust_for_non_move_closure( place.projections.iter().position(|proj| proj.kind == ProjectionKind::Deref); match kind { - ty::UpvarCapture::ByValue => { + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => { if let Some(idx) = contains_deref { truncate_place_to_len_and_update_capture_kind(&mut place, &mut kind, idx); } @@ -2219,6 +2271,7 @@ fn construct_capture_kind_reason_string<'tcx>( let capture_kind_str = match capture_info.capture_kind { ty::UpvarCapture::ByValue => "ByValue".into(), + ty::UpvarCapture::ByUse => "ByUse".into(), ty::UpvarCapture::ByRef(kind) => format!("{kind:?}"), }; @@ -2240,13 +2293,14 @@ fn construct_capture_info_string<'tcx>( let capture_kind_str = match capture_info.capture_kind { ty::UpvarCapture::ByValue => "ByValue".into(), + ty::UpvarCapture::ByUse => "ByUse".into(), ty::UpvarCapture::ByRef(kind) => format!("{kind:?}"), }; format!("{place_str} -> {capture_kind_str}") } fn var_name(tcx: TyCtxt<'_>, var_hir_id: HirId) -> Symbol { - tcx.hir().name(var_hir_id) + tcx.hir_name(var_hir_id) } #[instrument(level = "debug", skip(tcx))] @@ -2335,8 +2389,11 @@ fn determine_capture_info( // expressions. let eq_capture_kind = match (capture_info_a.capture_kind, capture_info_b.capture_kind) { (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByValue) => true, + (ty::UpvarCapture::ByUse, ty::UpvarCapture::ByUse) => true, (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => ref_a == ref_b, - (ty::UpvarCapture::ByValue, _) | (ty::UpvarCapture::ByRef(_), _) => false, + (ty::UpvarCapture::ByValue, _) + | (ty::UpvarCapture::ByUse, _) + | (ty::UpvarCapture::ByRef(_), _) => false, }; if eq_capture_kind { @@ -2346,10 +2403,20 @@ fn determine_capture_info( } } else { // We select the CaptureKind which ranks higher based the following priority order: - // ByValue > MutBorrow > UniqueImmBorrow > ImmBorrow + // (ByUse | ByValue) > MutBorrow > UniqueImmBorrow > ImmBorrow match (capture_info_a.capture_kind, capture_info_b.capture_kind) { - (ty::UpvarCapture::ByValue, _) => capture_info_a, - (_, ty::UpvarCapture::ByValue) => capture_info_b, + (ty::UpvarCapture::ByUse, ty::UpvarCapture::ByValue) + | (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByUse) => { + bug!("Same capture can't be ByUse and ByValue at the same time") + } + (ty::UpvarCapture::ByValue, ty::UpvarCapture::ByValue) + | (ty::UpvarCapture::ByUse, ty::UpvarCapture::ByUse) + | (ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse, ty::UpvarCapture::ByRef(_)) => { + capture_info_a + } + (ty::UpvarCapture::ByRef(_), ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse) => { + capture_info_b + } (ty::UpvarCapture::ByRef(ref_a), ty::UpvarCapture::ByRef(ref_b)) => { match (ref_a, ref_b) { // Take LHS: @@ -2401,7 +2468,7 @@ fn truncate_place_to_len_and_update_capture_kind<'tcx>( } ty::UpvarCapture::ByRef(..) => {} - ty::UpvarCapture::ByValue => {} + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => {} } place.projections.truncate(len); diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs index 3e9ce0e11e4..d7d7d6a20ac 100644 --- a/compiler/rustc_hir_typeck/src/writeback.rs +++ b/compiler/rustc_hir_typeck/src/writeback.rs @@ -248,13 +248,6 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { } } } - - fn visit_const_block(&mut self, span: Span, anon_const: &hir::ConstBlock) { - self.visit_node_id(span, anon_const.hir_id); - - let body = self.tcx().hir_body(anon_const.body); - self.visit_body(body); - } } /////////////////////////////////////////////////////////////////////////// @@ -284,9 +277,6 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { hir::ExprKind::Field(..) | hir::ExprKind::OffsetOf(..) => { self.visit_field_id(e.hir_id); } - hir::ExprKind::ConstBlock(ref anon_const) => { - self.visit_const_block(e.span, anon_const); - } _ => {} } @@ -297,6 +287,14 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { self.fix_index_builtin_expr(e); } + fn visit_inline_const(&mut self, anon_const: &hir::ConstBlock) { + let span = self.tcx().def_span(anon_const.def_id); + self.visit_node_id(span, anon_const.hir_id); + + let body = self.tcx().hir_body(anon_const.body); + self.visit_body(body); + } + fn visit_generic_param(&mut self, p: &'tcx hir::GenericParam<'tcx>) { match &p.kind { hir::GenericParamKind::Lifetime { .. } => { @@ -319,11 +317,8 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { match p.kind { hir::PatKind::Binding(..) => { let typeck_results = self.fcx.typeck_results.borrow(); - if let Some(bm) = - typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span) - { - self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm); - } + let bm = typeck_results.extract_binding_mode(self.tcx().sess, p.hir_id, p.span); + self.typeck_results.pat_binding_modes_mut().insert(p.hir_id, bm); } hir::PatKind::Struct(_, fields, _) => { for field in fields { @@ -343,9 +338,6 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { fn visit_pat_expr(&mut self, expr: &'tcx hir::PatExpr<'tcx>) { self.visit_node_id(expr.span, expr.hir_id); - if let hir::PatExprKind::ConstBlock(c) = &expr.kind { - self.visit_const_block(expr.span, c); - } intravisit::walk_pat_expr(self, expr); } diff --git a/compiler/rustc_incremental/Cargo.toml b/compiler/rustc_incremental/Cargo.toml index 4939bfb3a1c..db0a5841887 100644 --- a/compiler/rustc_incremental/Cargo.toml +++ b/compiler/rustc_incremental/Cargo.toml @@ -5,7 +5,7 @@ edition = "2024" [dependencies] # tidy-alphabetical-start -rand = "0.8.4" +rand = "0.9.0" rustc_ast = { path = "../rustc_ast" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } diff --git a/compiler/rustc_incremental/src/assert_dep_graph.rs b/compiler/rustc_incremental/src/assert_dep_graph.rs index 8f2ca6babea..1b2056f541f 100644 --- a/compiler/rustc_incremental/src/assert_dep_graph.rs +++ b/compiler/rustc_incremental/src/assert_dep_graph.rs @@ -123,7 +123,7 @@ impl<'tcx> IfThisChanged<'tcx> { fn process_attrs(&mut self, def_id: LocalDefId) { let def_path_hash = self.tcx.def_path_hash(def_id.to_def_id()); let hir_id = self.tcx.local_def_id_to_hir_id(def_id); - let attrs = self.tcx.hir().attrs(hir_id); + let attrs = self.tcx.hir_attrs(hir_id); for attr in attrs { if attr.has_name(sym::rustc_if_this_changed) { let dep_node_interned = self.argument(attr); diff --git a/compiler/rustc_incremental/src/lib.rs b/compiler/rustc_incremental/src/lib.rs index 563ed7614c6..dabfb6a90ca 100644 --- a/compiler/rustc_incremental/src/lib.rs +++ b/compiler/rustc_incremental/src/lib.rs @@ -2,12 +2,12 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![deny(missing_docs)] #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(file_buffered)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod assert_dep_graph; diff --git a/compiler/rustc_incremental/src/persist/file_format.rs b/compiler/rustc_incremental/src/persist/file_format.rs index 4c664ea6ba0..9cec2702a41 100644 --- a/compiler/rustc_incremental/src/persist/file_format.rs +++ b/compiler/rustc_incremental/src/persist/file_format.rs @@ -123,7 +123,7 @@ pub(crate) fn read_file( // Check HEADER_FORMAT_VERSION { - debug_assert!(::std::mem::size_of_val(&HEADER_FORMAT_VERSION) == 2); + debug_assert!(size_of_val(&HEADER_FORMAT_VERSION) == 2); let mut header_format_version = [0u8; 2]; file.read_exact(&mut header_format_version)?; let header_format_version = diff --git a/compiler/rustc_incremental/src/persist/fs.rs b/compiler/rustc_incremental/src/persist/fs.rs index 19cca48af61..76a1ff3cf38 100644 --- a/compiler/rustc_incremental/src/persist/fs.rs +++ b/compiler/rustc_incremental/src/persist/fs.rs @@ -108,7 +108,7 @@ use std::io::{self, ErrorKind}; use std::path::{Path, PathBuf}; use std::time::{Duration, SystemTime, UNIX_EPOCH}; -use rand::{RngCore, thread_rng}; +use rand::{RngCore, rng}; use rustc_data_structures::base_n::{BaseNString, CASE_INSENSITIVE, ToBaseN}; use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; use rustc_data_structures::svh::Svh; @@ -445,7 +445,7 @@ fn copy_files(sess: &Session, target_dir: &Path, source_dir: &Path) -> Result<bo fn generate_session_dir_path(crate_dir: &Path) -> PathBuf { let timestamp = timestamp_to_string(SystemTime::now()); debug!("generate_session_dir_path: timestamp = {}", timestamp); - let random_number = thread_rng().next_u32(); + let random_number = rng().next_u32(); debug!("generate_session_dir_path: random_number = {}", random_number); // Chop the first 3 characters off the timestamp. Those 3 bytes will be zero for a while. diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs index 6dc044a6d38..598409c9051 100644 --- a/compiler/rustc_index/src/bit_set.rs +++ b/compiler/rustc_index/src/bit_set.rs @@ -1,7 +1,9 @@ use std::marker::PhantomData; +#[cfg(not(feature = "nightly"))] +use std::mem; use std::ops::{BitAnd, BitAndAssign, BitOrAssign, Bound, Not, Range, RangeBounds, Shl}; use std::rc::Rc; -use std::{fmt, iter, mem, slice}; +use std::{fmt, iter, slice}; use Chunk::*; #[cfg(feature = "nightly")] @@ -14,7 +16,7 @@ use crate::{Idx, IndexVec}; mod tests; type Word = u64; -const WORD_BYTES: usize = mem::size_of::<Word>(); +const WORD_BYTES: usize = size_of::<Word>(); const WORD_BITS: usize = WORD_BYTES * 8; // The choice of chunk size has some trade-offs. diff --git a/compiler/rustc_index/src/lib.rs b/compiler/rustc_index/src/lib.rs index 3441a5f65c7..cc680838e7e 100644 --- a/compiler/rustc_index/src/lib.rs +++ b/compiler/rustc_index/src/lib.rs @@ -4,7 +4,6 @@ #![cfg_attr(feature = "nightly", feature(extend_one, step_trait, test))] #![cfg_attr(feature = "nightly", feature(new_range_api))] #![cfg_attr(feature = "nightly", feature(new_zeroed_alloc))] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod bit_set; diff --git a/compiler/rustc_index/src/slice.rs b/compiler/rustc_index/src/slice.rs index f17ea9e4b59..67ac805c2bf 100644 --- a/compiler/rustc_index/src/slice.rs +++ b/compiler/rustc_index/src/slice.rs @@ -1,6 +1,7 @@ use std::fmt; use std::marker::PhantomData; use std::ops::{Index, IndexMut}; +use std::slice::GetDisjointMutError::*; use std::slice::{self, SliceIndex}; use crate::{Idx, IndexVec, IntoSliceIdx}; @@ -65,6 +66,8 @@ impl<I: Idx, T> IndexSlice<I, T> { #[inline] pub fn iter_enumerated(&self) -> impl DoubleEndedIterator<Item = (I, &T)> + ExactSizeIterator { + // Allow the optimizer to elide the bounds checking when creating each index. + let _ = I::new(self.len()); self.raw.iter().enumerate().map(|(n, t)| (I::new(n), t)) } @@ -72,6 +75,8 @@ impl<I: Idx, T> IndexSlice<I, T> { pub fn indices( &self, ) -> impl DoubleEndedIterator<Item = I> + ExactSizeIterator + Clone + 'static { + // Allow the optimizer to elide the bounds checking when creating each index. + let _ = I::new(self.len()); (0..self.len()).map(|n| I::new(n)) } @@ -84,6 +89,8 @@ impl<I: Idx, T> IndexSlice<I, T> { pub fn iter_enumerated_mut( &mut self, ) -> impl DoubleEndedIterator<Item = (I, &mut T)> + ExactSizeIterator { + // Allow the optimizer to elide the bounds checking when creating each index. + let _ = I::new(self.len()); self.raw.iter_mut().enumerate().map(|(n, t)| (I::new(n), t)) } @@ -115,32 +122,36 @@ impl<I: Idx, T> IndexSlice<I, T> { /// Returns mutable references to two distinct elements, `a` and `b`. /// - /// Panics if `a == b`. + /// Panics if `a == b` or if some of them are out of bounds. #[inline] pub fn pick2_mut(&mut self, a: I, b: I) -> (&mut T, &mut T) { let (ai, bi) = (a.index(), b.index()); - assert!(ai != bi); - - if ai < bi { - let (c1, c2) = self.raw.split_at_mut(bi); - (&mut c1[ai], &mut c2[0]) - } else { - let (c2, c1) = self.pick2_mut(b, a); - (c1, c2) + + match self.raw.get_disjoint_mut([ai, bi]) { + Ok([a, b]) => (a, b), + Err(OverlappingIndices) => panic!("Indices {ai:?} and {bi:?} are not disjoint!"), + Err(IndexOutOfBounds) => { + panic!("Some indices among ({ai:?}, {bi:?}) are out of bounds") + } } } /// Returns mutable references to three distinct elements. /// - /// Panics if the elements are not distinct. + /// Panics if the elements are not distinct or if some of them are out of bounds. #[inline] pub fn pick3_mut(&mut self, a: I, b: I, c: I) -> (&mut T, &mut T, &mut T) { let (ai, bi, ci) = (a.index(), b.index(), c.index()); - assert!(ai != bi && bi != ci && ci != ai); - let len = self.raw.len(); - assert!(ai < len && bi < len && ci < len); - let ptr = self.raw.as_mut_ptr(); - unsafe { (&mut *ptr.add(ai), &mut *ptr.add(bi), &mut *ptr.add(ci)) } + + match self.raw.get_disjoint_mut([ai, bi, ci]) { + Ok([a, b, c]) => (a, b, c), + Err(OverlappingIndices) => { + panic!("Indices {ai:?}, {bi:?} and {ci:?} are not disjoint!") + } + Err(IndexOutOfBounds) => { + panic!("Some indices among ({ai:?}, {bi:?}, {ci:?}) are out of bounds") + } + } } #[inline] diff --git a/compiler/rustc_index/src/vec.rs b/compiler/rustc_index/src/vec.rs index 7f3f3ead5f2..13f0dda180b 100644 --- a/compiler/rustc_index/src/vec.rs +++ b/compiler/rustc_index/src/vec.rs @@ -93,6 +93,8 @@ impl<I: Idx, T> IndexVec<I, T> { /// be allocated only once, with a capacity of at least `n`.) #[inline] pub fn from_fn_n(func: impl FnMut(I) -> T, n: usize) -> Self { + // Allow the optimizer to elide the bounds checking when creating each index. + let _ = I::new(n); IndexVec::from_raw((0..n).map(I::new).map(func).collect()) } @@ -128,6 +130,8 @@ impl<I: Idx, T> IndexVec<I, T> { pub fn into_iter_enumerated( self, ) -> impl DoubleEndedIterator<Item = (I, T)> + ExactSizeIterator { + // Allow the optimizer to elide the bounds checking when creating each index. + let _ = I::new(self.len()); self.raw.into_iter().enumerate().map(|(n, t)| (I::new(n), t)) } diff --git a/compiler/rustc_index/src/vec/tests.rs b/compiler/rustc_index/src/vec/tests.rs index 381d79c24fc..5b0ca4b2b92 100644 --- a/compiler/rustc_index/src/vec/tests.rs +++ b/compiler/rustc_index/src/vec/tests.rs @@ -9,8 +9,6 @@ crate::newtype_index! { #[test] fn index_size_is_optimized() { - use std::mem::size_of; - assert_eq!(size_of::<MyIdx>(), 4); // Uses 0xFFFF_FFFB assert_eq!(size_of::<Option<MyIdx>>(), 4); diff --git a/compiler/rustc_index_macros/src/newtype.rs b/compiler/rustc_index_macros/src/newtype.rs index 67ec7761133..f0b58eabbff 100644 --- a/compiler/rustc_index_macros/src/newtype.rs +++ b/compiler/rustc_index_macros/src/newtype.rs @@ -305,7 +305,7 @@ impl Parse for Newtype { } } -pub fn newtype(input: proc_macro::TokenStream) -> proc_macro::TokenStream { +pub(crate) fn newtype(input: proc_macro::TokenStream) -> proc_macro::TokenStream { let input = parse_macro_input!(input as Newtype); input.0.into() } diff --git a/compiler/rustc_infer/src/infer/relate/higher_ranked.rs b/compiler/rustc_infer/src/infer/relate/higher_ranked.rs index 0998f3c4790..a12d83db91f 100644 --- a/compiler/rustc_infer/src/infer/relate/higher_ranked.rs +++ b/compiler/rustc_infer/src/infer/relate/higher_ranked.rs @@ -2,6 +2,7 @@ //! the end of the file for details. use rustc_middle::ty::fold::FnMutDelegate; +use rustc_middle::ty::visit::TypeVisitableExt; use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable}; use tracing::{debug, instrument}; @@ -26,8 +27,9 @@ impl<'tcx> InferCtxt<'tcx> { where T: TypeFoldable<TyCtxt<'tcx>>, { - if let Some(inner) = binder.clone().no_bound_vars() { - return inner; + // Inlined `no_bound_vars`. + if !binder.as_ref().skip_binder().has_escaping_bound_vars() { + return binder.skip_binder(); } let next_universe = self.create_next_universe(); diff --git a/compiler/rustc_infer/src/lib.rs b/compiler/rustc_infer/src/lib.rs index a04b2bb2b08..ece18f4ea64 100644 --- a/compiler/rustc_infer/src/lib.rs +++ b/compiler/rustc_infer/src/lib.rs @@ -24,7 +24,6 @@ #![feature(let_chains)] #![feature(rustdoc_internals)] #![recursion_limit = "512"] // For rustdoc -#![warn(unreachable_pub)] // tidy-alphabetical-end mod errors; diff --git a/compiler/rustc_interface/src/lib.rs b/compiler/rustc_interface/src/lib.rs index 54cd341698f..67e0be93523 100644 --- a/compiler/rustc_interface/src/lib.rs +++ b/compiler/rustc_interface/src/lib.rs @@ -4,7 +4,6 @@ #![feature(iter_intersperse)] #![feature(let_chains)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod callbacks; diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 553215ca0af..e47385d0899 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -2,14 +2,14 @@ use std::any::Any; use std::ffi::OsString; use std::io::{self, BufWriter, Write}; use std::path::{Path, PathBuf}; -use std::sync::{Arc, LazyLock}; +use std::sync::{Arc, LazyLock, OnceLock}; use std::{env, fs, iter}; use rustc_ast as ast; use rustc_codegen_ssa::traits::CodegenBackend; use rustc_data_structures::parallel; use rustc_data_structures::steal::Steal; -use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, OnceLock, WorkerLocal}; +use rustc_data_structures::sync::{AppendOnlyIndexVec, FreezeLock, WorkerLocal}; use rustc_expand::base::{ExtCtxt, LintStoreExpand}; use rustc_feature::Features; use rustc_fs_util::try_canonicalize; diff --git a/compiler/rustc_interface/src/proc_macro_decls.rs b/compiler/rustc_interface/src/proc_macro_decls.rs index 00600abb5f1..a2c1f1dbeda 100644 --- a/compiler/rustc_interface/src/proc_macro_decls.rs +++ b/compiler/rustc_interface/src/proc_macro_decls.rs @@ -8,7 +8,7 @@ fn proc_macro_decls_static(tcx: TyCtxt<'_>, (): ()) -> Option<LocalDefId> { let mut decls = None; for id in tcx.hir_free_items() { - let attrs = tcx.hir().attrs(id.hir_id()); + let attrs = tcx.hir_attrs(id.hir_id()); if attr::contains_name(attrs, sym::rustc_proc_macro_decls) { decls = Some(id.owner_id.def_id); } diff --git a/compiler/rustc_interface/src/util.rs b/compiler/rustc_interface/src/util.rs index bc2aae7cd87..5cccab893bb 100644 --- a/compiler/rustc_interface/src/util.rs +++ b/compiler/rustc_interface/src/util.rs @@ -39,11 +39,11 @@ pub(crate) fn add_configuration( ) { let tf = sym::target_feature; - let unstable_target_features = codegen_backend.target_features_cfg(sess, true); - sess.unstable_target_features.extend(unstable_target_features.iter().cloned()); + let (target_features, unstable_target_features) = codegen_backend.target_features_cfg(sess); - let target_features = codegen_backend.target_features_cfg(sess, false); - sess.target_features.extend(target_features.iter().cloned()); + sess.unstable_target_features.extend(unstable_target_features.iter().copied()); + + sess.target_features.extend(target_features.iter().copied()); cfg.extend(target_features.into_iter().map(|feat| (tf, Some(feat)))); diff --git a/compiler/rustc_lexer/src/lib.rs b/compiler/rustc_lexer/src/lib.rs index bf18845a083..61638e45253 100644 --- a/compiler/rustc_lexer/src/lib.rs +++ b/compiler/rustc_lexer/src/lib.rs @@ -23,7 +23,6 @@ // We want to be able to build this crate with a stable compiler, // so no `#![feature]` attributes should be added. #![deny(unstable_features)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod cursor; diff --git a/compiler/rustc_lint/Cargo.toml b/compiler/rustc_lint/Cargo.toml index 24e7b40c8a2..d6014f5006a 100644 --- a/compiler/rustc_lint/Cargo.toml +++ b/compiler/rustc_lint/Cargo.toml @@ -24,6 +24,7 @@ rustc_span = { path = "../rustc_span" } rustc_target = { path = "../rustc_target" } rustc_trait_selection = { path = "../rustc_trait_selection" } rustc_type_ir = { path = "../rustc_type_ir" } +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } tracing = "0.1" unicode-security = "0.1.0" # tidy-alphabetical-end diff --git a/compiler/rustc_lint/messages.ftl b/compiler/rustc_lint/messages.ftl index 67936763427..d51865810b9 100644 --- a/compiler/rustc_lint/messages.ftl +++ b/compiler/rustc_lint/messages.ftl @@ -333,6 +333,11 @@ lint_identifier_uncommon_codepoints = identifier contains {$codepoints_len -> *[other] {" "}{$identifier_type} } Unicode general security profile +lint_if_let_dtor = {$dtor_kind -> + [dyn] value may invoke a custom destructor because it contains a trait object + *[concrete] value invokes this custom destructor + } + lint_if_let_rescope = `if let` assigns a shorter lifetime since Edition 2024 .label = this value has a significant drop implementation which may observe a major change in drop order and requires your discretion .help = the value is now dropped here in Edition 2024 diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index f7be37dc4a2..918a42f3047 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -419,7 +419,7 @@ impl MissingDoc { return; } - let attrs = cx.tcx.hir().attrs(cx.tcx.local_def_id_to_hir_id(def_id)); + let attrs = cx.tcx.hir_attrs(cx.tcx.local_def_id_to_hir_id(def_id)); let has_doc = attrs.iter().any(has_doc); if !has_doc { cx.emit_span_lint( @@ -997,7 +997,7 @@ declare_lint_pass!(InvalidNoMangleItems => [NO_MANGLE_CONST_ITEMS, NO_MANGLE_GEN impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { - let attrs = cx.tcx.hir().attrs(it.hir_id()); + let attrs = cx.tcx.hir_attrs(it.hir_id()); let check_no_mangle_on_generic_fn = |no_mangle_attr: &hir::Attribute, impl_generics: Option<&hir::Generics<'_>>, generics: &hir::Generics<'_>, @@ -1050,7 +1050,7 @@ impl<'tcx> LateLintPass<'tcx> for InvalidNoMangleItems { for it in *items { if let hir::AssocItemKind::Fn { .. } = it.kind { if let Some(no_mangle_attr) = - attr::find_by_name(cx.tcx.hir().attrs(it.id.hir_id()), sym::no_mangle) + attr::find_by_name(cx.tcx.hir_attrs(it.id.hir_id()), sym::no_mangle) { check_no_mangle_on_generic_fn( no_mangle_attr, diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 74663e6b4bb..017ae943e91 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -6,6 +6,7 @@ use std::cell::Cell; use std::slice; +use rustc_ast::BindingMode; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::sync; use rustc_data_structures::unord::UnordMap; @@ -14,6 +15,7 @@ use rustc_feature::Features; use rustc_hir::def::Res; use rustc_hir::def_id::{CrateNum, DefId}; use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData}; +use rustc_hir::{Pat, PatKind}; use rustc_middle::bug; use rustc_middle::middle::privacy::EffectiveVisibilities; use rustc_middle::ty::layout::{LayoutError, LayoutOfHelpers, TyAndLayout}; @@ -683,6 +685,10 @@ impl<'tcx> LateContext<'tcx> { self.tcx.type_is_copy_modulo_regions(self.typing_env(), ty) } + pub fn type_is_use_cloned_modulo_regions(&self, ty: Ty<'tcx>) -> bool { + self.tcx.type_is_use_cloned_modulo_regions(self.typing_env(), ty) + } + /// Gets the type-checking results for the current body, /// or `None` if outside a body. pub fn maybe_typeck_results(&self) -> Option<&'tcx ty::TypeckResults<'tcx>> { @@ -886,7 +892,12 @@ impl<'tcx> LateContext<'tcx> { } && let Some(init) = match parent_node { hir::Node::Expr(expr) => Some(expr), - hir::Node::LetStmt(hir::LetStmt { init, .. }) => *init, + hir::Node::LetStmt(hir::LetStmt { + init, + // Binding is immutable, init cannot be re-assigned + pat: Pat { kind: PatKind::Binding(BindingMode::NONE, ..), .. }, + .. + }) => *init, _ => None, } { @@ -931,7 +942,12 @@ impl<'tcx> LateContext<'tcx> { } && let Some(init) = match parent_node { hir::Node::Expr(expr) => Some(expr), - hir::Node::LetStmt(hir::LetStmt { init, .. }) => *init, + hir::Node::LetStmt(hir::LetStmt { + init, + // Binding is immutable, init cannot be re-assigned + pat: Pat { kind: PatKind::Binding(BindingMode::NONE, ..), .. }, + .. + }) => *init, hir::Node::Item(item) => match item.kind { hir::ItemKind::Const(.., body_id) | hir::ItemKind::Static(.., body_id) => { Some(self.tcx.hir_body(body_id).value) diff --git a/compiler/rustc_lint/src/dangling.rs b/compiler/rustc_lint/src/dangling.rs index fd6b3e90ada..91c7922638d 100644 --- a/compiler/rustc_lint/src/dangling.rs +++ b/compiler/rustc_lint/src/dangling.rs @@ -159,7 +159,10 @@ fn is_temporary_rvalue(expr: &Expr<'_>) -> bool { ExprKind::Path(..) => false, // Calls return rvalues. - ExprKind::Call(..) | ExprKind::MethodCall(..) | ExprKind::Binary(..) => true, + ExprKind::Call(..) + | ExprKind::MethodCall(..) + | ExprKind::Use(..) + | ExprKind::Binary(..) => true, // Inner blocks are rvalues. ExprKind::If(..) | ExprKind::Loop(..) | ExprKind::Match(..) | ExprKind::Block(..) => true, diff --git a/compiler/rustc_lint/src/default_could_be_derived.rs b/compiler/rustc_lint/src/default_could_be_derived.rs index 59e38a882dd..58efca5e994 100644 --- a/compiler/rustc_lint/src/default_could_be_derived.rs +++ b/compiler/rustc_lint/src/default_could_be_derived.rs @@ -133,7 +133,7 @@ impl<'tcx> LateLintPass<'tcx> for DefaultCouldBeDerived { return; } - // At least one of the fields with a default value have been overriden in + // At least one of the fields with a default value have been overridden in // the `Default` implementation. We suggest removing it and relying on `..` // instead. let any_default_field_given = diff --git a/compiler/rustc_lint/src/expect.rs b/compiler/rustc_lint/src/expect.rs index 9ca148e1f25..4c2b82a9a23 100644 --- a/compiler/rustc_lint/src/expect.rs +++ b/compiler/rustc_lint/src/expect.rs @@ -38,7 +38,7 @@ fn check_expectations(tcx: TyCtxt<'_>, tool_filter: Option<Symbol>) { } LintExpectationId::Stable { hir_id, attr_index, lint_index: Some(lint_index) } => { // We are an `eval_always` query, so looking at the attribute's `AttrId` is ok. - let attr_id = tcx.hir().attrs(hir_id)[attr_index as usize].id(); + let attr_id = tcx.hir_attrs(hir_id)[attr_index as usize].id(); (attr_id, lint_index) } diff --git a/compiler/rustc_lint/src/if_let_rescope.rs b/compiler/rustc_lint/src/if_let_rescope.rs index c1aa95667da..39ea8d8e324 100644 --- a/compiler/rustc_lint/src/if_let_rescope.rs +++ b/compiler/rustc_lint/src/if_let_rescope.rs @@ -1,18 +1,23 @@ use std::iter::repeat; use std::ops::ControlFlow; -use hir::intravisit::Visitor; +use hir::intravisit::{self, Visitor}; use rustc_ast::Recovered; use rustc_errors::{ Applicability, Diag, EmissionGuarantee, SubdiagMessageOp, Subdiagnostic, SuggestionStyle, }; use rustc_hir::{self as hir, HirIdSet}; -use rustc_macros::LintDiagnostic; -use rustc_middle::ty::TyCtxt; +use rustc_macros::{LintDiagnostic, Subdiagnostic}; +use rustc_middle::ty::adjustment::Adjust; +use rustc_middle::ty::significant_drop_order::{ + extract_component_with_significant_dtor, ty_dtor_span, +}; +use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_session::lint::{FutureIncompatibilityReason, LintId}; use rustc_session::{declare_lint, impl_lint_pass}; -use rustc_span::Span; use rustc_span::edition::Edition; +use rustc_span::{DUMMY_SP, Span}; +use smallvec::SmallVec; use crate::{LateContext, LateLintPass}; @@ -129,6 +134,7 @@ impl IfLetRescope { hir::ExprKind::If(_cond, _conseq, Some(alt)) => alt.span.shrink_to_hi(), _ => return, }; + let mut seen_dyn = false; let mut add_bracket_to_match_head = match_head_needs_bracket(tcx, expr); let mut significant_droppers = vec![]; let mut lifetime_ends = vec![]; @@ -136,6 +142,7 @@ impl IfLetRescope { let mut alt_heads = vec![]; let mut match_heads = vec![]; let mut consequent_heads = vec![]; + let mut destructors = vec![]; let mut first_if_to_lint = None; let mut first_if_to_rewrite = false; let mut empty_alt = false; @@ -159,11 +166,25 @@ impl IfLetRescope { let before_conseq = conseq.span.shrink_to_lo(); let lifetime_end = source_map.end_point(conseq.span); - if let ControlFlow::Break(significant_dropper) = - (FindSignificantDropper { cx }).visit_expr(init) + if let ControlFlow::Break((drop_span, drop_tys)) = + (FindSignificantDropper { cx }).check_if_let_scrutinee(init) { + destructors.extend(drop_tys.into_iter().filter_map(|ty| { + if let Some(span) = ty_dtor_span(tcx, ty) { + Some(DestructorLabel { span, dtor_kind: "concrete" }) + } else if matches!(ty.kind(), ty::Dynamic(..)) { + if seen_dyn { + None + } else { + seen_dyn = true; + Some(DestructorLabel { span: DUMMY_SP, dtor_kind: "dyn" }) + } + } else { + None + } + })); first_if_to_lint = first_if_to_lint.or_else(|| Some((span, expr.hir_id))); - significant_droppers.push(significant_dropper); + significant_droppers.push(drop_span); lifetime_ends.push(lifetime_end); if ty_ascription.is_some() || !expr.span.can_be_used_for_suggestions() @@ -226,6 +247,7 @@ impl IfLetRescope { hir_id, span, IfLetRescopeLint { + destructors, significant_droppers, lifetime_ends, rewrite: first_if_to_rewrite.then_some(IfLetRescopeRewrite { @@ -287,6 +309,8 @@ impl<'tcx> LateLintPass<'tcx> for IfLetRescope { #[derive(LintDiagnostic)] #[diag(lint_if_let_rescope)] struct IfLetRescopeLint { + #[subdiagnostic] + destructors: Vec<DestructorLabel>, #[label] significant_droppers: Vec<Span>, #[help] @@ -346,6 +370,14 @@ impl Subdiagnostic for IfLetRescopeRewrite { } } +#[derive(Subdiagnostic)] +#[note(lint_if_let_dtor)] +struct DestructorLabel { + #[primary_span] + span: Span, + dtor_kind: &'static str, +} + struct AltHead(Span); struct ConsequentRewrite { @@ -363,96 +395,107 @@ enum SingleArmMatchBegin { WithoutOpenBracket(Span), } -struct FindSignificantDropper<'tcx, 'a> { +struct FindSignificantDropper<'a, 'tcx> { cx: &'a LateContext<'tcx>, } -impl<'tcx, 'a> Visitor<'tcx> for FindSignificantDropper<'tcx, 'a> { - type Result = ControlFlow<Span>; +impl<'tcx> FindSignificantDropper<'_, 'tcx> { + /// Check the scrutinee of an `if let` to see if it promotes any temporary values + /// that would change drop order in edition 2024. Specifically, it checks the value + /// of the scrutinee itself, and also recurses into the expression to find any ref + /// exprs (or autoref) which would promote temporaries that would be scoped to the + /// end of this `if`. + fn check_if_let_scrutinee( + &mut self, + init: &'tcx hir::Expr<'tcx>, + ) -> ControlFlow<(Span, SmallVec<[Ty<'tcx>; 4]>)> { + self.check_promoted_temp_with_drop(init)?; + self.visit_expr(init) + } - fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Self::Result { - if self - .cx - .typeck_results() - .expr_ty(expr) - .has_significant_drop(self.cx.tcx, self.cx.typing_env()) - { - return ControlFlow::Break(expr.span); + /// Check that an expression is not a promoted temporary with a significant + /// drop impl. + /// + /// An expression is a promoted temporary if it has an addr taken (i.e. `&expr` or autoref) + /// or is the scrutinee of the `if let`, *and* the expression is not a place + /// expr, and it has a significant drop. + fn check_promoted_temp_with_drop( + &self, + expr: &'tcx hir::Expr<'tcx>, + ) -> ControlFlow<(Span, SmallVec<[Ty<'tcx>; 4]>)> { + if expr.is_place_expr(|base| { + self.cx + .typeck_results() + .adjustments() + .get(base.hir_id) + .is_some_and(|x| x.iter().any(|adj| matches!(adj.kind, Adjust::Deref(_)))) + }) { + return ControlFlow::Continue(()); } - match expr.kind { - hir::ExprKind::ConstBlock(_) - | hir::ExprKind::Lit(_) - | hir::ExprKind::Path(_) - | hir::ExprKind::Assign(_, _, _) - | hir::ExprKind::AssignOp(_, _, _) - | hir::ExprKind::Break(_, _) - | hir::ExprKind::Continue(_) - | hir::ExprKind::Ret(_) - | hir::ExprKind::Become(_) - | hir::ExprKind::InlineAsm(_) - | hir::ExprKind::OffsetOf(_, _) - | hir::ExprKind::Repeat(_, _) - | hir::ExprKind::Err(_) - | hir::ExprKind::Struct(_, _, _) - | hir::ExprKind::Closure(_) - | hir::ExprKind::Block(_, _) - | hir::ExprKind::DropTemps(_) - | hir::ExprKind::Loop(_, _, _, _) => ControlFlow::Continue(()), - hir::ExprKind::Tup(exprs) | hir::ExprKind::Array(exprs) => { - for expr in exprs { - self.visit_expr(expr)?; - } - ControlFlow::Continue(()) - } - hir::ExprKind::Call(callee, args) => { - self.visit_expr(callee)?; - for expr in args { - self.visit_expr(expr)?; - } - ControlFlow::Continue(()) - } - hir::ExprKind::MethodCall(_, receiver, args, _) => { - self.visit_expr(receiver)?; - for expr in args { - self.visit_expr(expr)?; + let drop_tys = extract_component_with_significant_dtor( + self.cx.tcx, + self.cx.typing_env(), + self.cx.typeck_results().expr_ty(expr), + ); + if drop_tys.is_empty() { + return ControlFlow::Continue(()); + } + + ControlFlow::Break((expr.span, drop_tys)) + } +} + +impl<'tcx> Visitor<'tcx> for FindSignificantDropper<'_, 'tcx> { + type Result = ControlFlow<(Span, SmallVec<[Ty<'tcx>; 4]>)>; + + fn visit_block(&mut self, b: &'tcx hir::Block<'tcx>) -> Self::Result { + // Blocks introduce temporary terminating scope for all of its + // statements, so just visit the tail expr, skipping over any + // statements. This prevents false positives like `{ let x = &Drop; }`. + if let Some(expr) = b.expr { self.visit_expr(expr) } else { ControlFlow::Continue(()) } + } + + fn visit_expr(&mut self, expr: &'tcx hir::Expr<'tcx>) -> Self::Result { + // Check for promoted temporaries from autoref, e.g. + // `if let None = TypeWithDrop.as_ref() {} else {}` + // where `fn as_ref(&self) -> Option<...>`. + for adj in self.cx.typeck_results().expr_adjustments(expr) { + match adj.kind { + // Skip when we hit the first deref expr. + Adjust::Deref(_) => break, + Adjust::Borrow(_) => { + self.check_promoted_temp_with_drop(expr)?; } - ControlFlow::Continue(()) - } - hir::ExprKind::Index(left, right, _) | hir::ExprKind::Binary(_, left, right) => { - self.visit_expr(left)?; - self.visit_expr(right) + _ => {} } - hir::ExprKind::Unary(_, expr) - | hir::ExprKind::Cast(expr, _) - | hir::ExprKind::Type(expr, _) - | hir::ExprKind::UnsafeBinderCast(_, expr, _) - | hir::ExprKind::Yield(expr, _) - | hir::ExprKind::AddrOf(_, _, expr) - | hir::ExprKind::Match(expr, _, _) - | hir::ExprKind::Field(expr, _) - | hir::ExprKind::Let(&hir::LetExpr { - init: expr, - span: _, - pat: _, - ty: _, - recovered: Recovered::No, - }) => self.visit_expr(expr), - hir::ExprKind::Let(_) => ControlFlow::Continue(()), + } - hir::ExprKind::If(cond, _, _) => { - if let hir::ExprKind::Let(hir::LetExpr { - init, - span: _, - pat: _, - ty: _, - recovered: Recovered::No, - }) = cond.kind - { - self.visit_expr(init)?; - } - ControlFlow::Continue(()) + match expr.kind { + // Account for cases like `if let None = Some(&Drop) {} else {}`. + hir::ExprKind::AddrOf(_, _, expr) => { + self.check_promoted_temp_with_drop(expr)?; + intravisit::walk_expr(self, expr) + } + // `(Drop, ()).1` introduces a temporary and then moves out of + // part of it, therefore we should check it for temporaries. + // FIXME: This may have false positives if we move the part + // that actually has drop, but oh well. + hir::ExprKind::Index(expr, _, _) | hir::ExprKind::Field(expr, _) => { + self.check_promoted_temp_with_drop(expr)?; + intravisit::walk_expr(self, expr) } + // If always introduces a temporary terminating scope for its cond and arms, + // so don't visit them. + hir::ExprKind::If(..) => ControlFlow::Continue(()), + // Match introduces temporary terminating scopes for arms, so don't visit + // them, and only visit the scrutinee to account for cases like: + // `if let None = match &Drop { _ => Some(1) } {} else {}`. + hir::ExprKind::Match(scrut, _, _) => self.visit_expr(scrut), + // Self explanatory. + hir::ExprKind::DropTemps(_) => ControlFlow::Continue(()), + // Otherwise, walk into the expr's parts. + _ => intravisit::walk_expr(self, expr), } } } diff --git a/compiler/rustc_lint/src/late.rs b/compiler/rustc_lint/src/late.rs index d22515d62d6..852bb01c096 100644 --- a/compiler/rustc_lint/src/late.rs +++ b/compiler/rustc_lint/src/late.rs @@ -48,7 +48,7 @@ impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> { where F: FnOnce(&mut Self), { - let attrs = self.context.tcx.hir().attrs(id); + let attrs = self.context.tcx.hir_attrs(id); let prev = self.context.last_node_with_lint_attrs; self.context.last_node_with_lint_attrs = id; debug!("late context: enter_attrs({:?})", attrs); @@ -74,7 +74,7 @@ impl<'tcx, T: LateLintPass<'tcx>> LateContextAndPass<'tcx, T> { fn process_mod(&mut self, m: &'tcx hir::Mod<'tcx>, n: HirId) { lint_callback!(self, check_mod, m, n); - hir_visit::walk_mod(self, m, n); + hir_visit::walk_mod(self, m); } } diff --git a/compiler/rustc_lint/src/levels.rs b/compiler/rustc_lint/src/levels.rs index 4ede9b44087..aa6eef906ea 100644 --- a/compiler/rustc_lint/src/levels.rs +++ b/compiler/rustc_lint/src/levels.rs @@ -152,7 +152,7 @@ fn lints_that_dont_need_to_run(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LintId> { #[instrument(level = "trace", skip(tcx), ret)] fn shallow_lint_levels_on(tcx: TyCtxt<'_>, owner: hir::OwnerId) -> ShallowLintLevelMap { let store = unerased_lint_store(tcx.sess); - let attrs = tcx.hir_attrs(owner); + let attrs = tcx.hir_attr_map(owner); let mut levels = LintLevelsBuilder { sess: tcx.sess, diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index 5c5b3b350dd..c38a7540018 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -21,6 +21,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(array_windows)] @@ -32,7 +33,6 @@ #![feature(rustc_attrs)] #![feature(rustdoc_internals)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod async_closures; @@ -599,6 +599,11 @@ fn register_builtins(store: &mut LintStore) { "converted into hard error, \ see <https://github.com/rust-lang/rust/issues/73333> for more information", ); + store.register_removed( + "ptr_cast_add_auto_to_object", + "converted into hard error, see issue #127323 \ + <https://github.com/rust-lang/rust/issues/127323> for more information", + ); } fn register_internals(store: &mut LintStore) { diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index 49f9ad39780..722779d3268 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -342,8 +342,8 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase { let crate_ident = if let Some(name) = &cx.tcx.sess.opts.crate_name { Some(Ident::from_str(name)) } else { - ast::attr::find_by_name(cx.tcx.hir().attrs(hir::CRATE_HIR_ID), sym::crate_name) - .and_then(|attr| { + ast::attr::find_by_name(cx.tcx.hir_attrs(hir::CRATE_HIR_ID), sym::crate_name).and_then( + |attr| { if let Attribute::Unparsed(n) = attr && let AttrItem { args: AttrArgs::Eq { eq_span: _, expr: lit }, .. } = n.as_ref() @@ -371,7 +371,8 @@ impl<'tcx> LateLintPass<'tcx> for NonSnakeCase { } else { None } - }) + }, + ) }; if let Some(ident) = &crate_ident { @@ -500,7 +501,7 @@ impl NonUpperCaseGlobals { impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { - let attrs = cx.tcx.hir().attrs(it.hir_id()); + let attrs = cx.tcx.hir_attrs(it.hir_id()); match it.kind { hir::ItemKind::Static(..) if !ast::attr::contains_name(attrs, sym::no_mangle) => { NonUpperCaseGlobals::check_upper_case(cx, "static variable", &it.ident); diff --git a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs index 7eaf83f5acf..659f6d98f03 100644 --- a/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs +++ b/compiler/rustc_lint/src/opaque_hidden_inferred_bound.rs @@ -43,6 +43,7 @@ declare_lint! { /// /// type Tait = impl Sized; /// + /// #[define_opaque(Tait)] /// fn test() -> impl Trait<Assoc = Tait> { /// 42 /// } diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index cb83d405cc3..6f3c32af5ef 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1,7 +1,7 @@ use std::iter; use std::ops::ControlFlow; -use rustc_abi::{BackendRepr, ExternAbi, TagEncoding, VariantIdx, Variants, WrappingRange}; +use rustc_abi::{BackendRepr, TagEncoding, VariantIdx, Variants, WrappingRange}; use rustc_data_structures::fx::FxHashSet; use rustc_errors::DiagMessage; use rustc_hir::intravisit::VisitorExt; @@ -882,27 +882,13 @@ fn ty_is_known_nonnull<'tcx>( || Option::unwrap_or_default( try { match **pat { - ty::PatternKind::Range { start, end, include_end } => { - match (start, end) { - (Some(start), None) => { - start.try_to_value()?.try_to_bits(tcx, typing_env)? > 0 - } - (Some(start), Some(end)) => { - let start = - start.try_to_value()?.try_to_bits(tcx, typing_env)?; - let end = - end.try_to_value()?.try_to_bits(tcx, typing_env)?; - - if include_end { - // This also works for negative numbers, as we just need - // to ensure we aren't wrapping over zero. - start > 0 && end >= start - } else { - start > 0 && end > start - } - } - _ => false, - } + ty::PatternKind::Range { start, end } => { + let start = start.try_to_value()?.try_to_bits(tcx, typing_env)?; + let end = end.try_to_value()?.try_to_bits(tcx, typing_env)?; + + // This also works for negative numbers, as we just need + // to ensure we aren't wrapping over zero. + start > 0 && end >= start } } }, @@ -1349,7 +1335,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { ty::FnPtr(sig_tys, hdr) => { let sig = sig_tys.with(hdr); - if self.is_internal_abi(sig.abi()) { + if sig.abi().is_rustic_abi() { return FfiUnsafe { ty, reason: fluent::lint_improper_ctypes_fnptr_reason, @@ -1552,13 +1538,6 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { self.check_type_for_ffi_and_report_errors(span, ty, true, false); } - fn is_internal_abi(&self, abi: ExternAbi) -> bool { - matches!( - abi, - ExternAbi::Rust | ExternAbi::RustCall | ExternAbi::RustCold | ExternAbi::RustIntrinsic - ) - } - /// Find any fn-ptr types with external ABIs in `ty`. /// /// For example, `Option<extern "C" fn()>` returns `extern "C" fn()` @@ -1567,17 +1546,16 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { hir_ty: &hir::Ty<'tcx>, ty: Ty<'tcx>, ) -> Vec<(Ty<'tcx>, Span)> { - struct FnPtrFinder<'a, 'b, 'tcx> { - visitor: &'a ImproperCTypesVisitor<'b, 'tcx>, + struct FnPtrFinder<'tcx> { spans: Vec<Span>, tys: Vec<Ty<'tcx>>, } - impl<'a, 'b, 'tcx> hir::intravisit::Visitor<'_> for FnPtrFinder<'a, 'b, 'tcx> { + impl<'tcx> hir::intravisit::Visitor<'_> for FnPtrFinder<'tcx> { fn visit_ty(&mut self, ty: &'_ hir::Ty<'_, AmbigArg>) { debug!(?ty); if let hir::TyKind::BareFn(hir::BareFnTy { abi, .. }) = ty.kind - && !self.visitor.is_internal_abi(*abi) + && !abi.is_rustic_abi() { self.spans.push(ty.span); } @@ -1586,12 +1564,12 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - impl<'a, 'b, 'tcx> ty::visit::TypeVisitor<TyCtxt<'tcx>> for FnPtrFinder<'a, 'b, 'tcx> { + impl<'tcx> ty::visit::TypeVisitor<TyCtxt<'tcx>> for FnPtrFinder<'tcx> { type Result = (); fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { if let ty::FnPtr(_, hdr) = ty.kind() - && !self.visitor.is_internal_abi(hdr.abi) + && !hdr.abi.is_rustic_abi() { self.tys.push(ty); } @@ -1600,7 +1578,7 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { } } - let mut visitor = FnPtrFinder { visitor: self, spans: Vec::new(), tys: Vec::new() }; + let mut visitor = FnPtrFinder { spans: Vec::new(), tys: Vec::new() }; ty.visit_with(&mut visitor); visitor.visit_ty_unambig(hir_ty); @@ -1611,17 +1589,17 @@ impl<'a, 'tcx> ImproperCTypesVisitor<'a, 'tcx> { impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDeclarations { fn check_foreign_item(&mut self, cx: &LateContext<'tcx>, it: &hir::ForeignItem<'tcx>) { let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Declaration }; - let abi = cx.tcx.hir().get_foreign_abi(it.hir_id()); + let abi = cx.tcx.hir_get_foreign_abi(it.hir_id()); match it.kind { hir::ForeignItemKind::Fn(sig, _, _) => { - if vis.is_internal_abi(abi) { + if abi.is_rustic_abi() { vis.check_fn(it.owner_id.def_id, sig.decl) } else { vis.check_foreign_fn(it.owner_id.def_id, sig.decl); } } - hir::ForeignItemKind::Static(ty, _, _) if !vis.is_internal_abi(abi) => { + hir::ForeignItemKind::Static(ty, _, _) if !abi.is_rustic_abi() => { vis.check_foreign_static(it.owner_id, ty.span); } hir::ForeignItemKind::Static(..) | hir::ForeignItemKind::Type => (), @@ -1775,7 +1753,7 @@ impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions { }; let mut vis = ImproperCTypesVisitor { cx, mode: CItemKind::Definition }; - if vis.is_internal_abi(abi) { + if abi.is_rustic_abi() { vis.check_fn(id, decl); } else { vis.check_foreign_fn(id, decl); diff --git a/compiler/rustc_lint/src/types/literal.rs b/compiler/rustc_lint/src/types/literal.rs index 3e4532a6dbe..7cb00262b6f 100644 --- a/compiler/rustc_lint/src/types/literal.rs +++ b/compiler/rustc_lint/src/types/literal.rs @@ -186,7 +186,7 @@ fn report_bin_hex_error( lit_no_suffix, negative_val: actually.clone(), int_ty: int_ty.name_str(), - uint_ty: int_ty.to_unsigned().name_str(), + uint_ty: Integer::fit_unsigned(val).uint_ty_str(), }) }) .flatten(); diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 97850a2afc1..592c934997c 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -73,14 +73,12 @@ declare_lint_pass! { NEVER_TYPE_FALLBACK_FLOWING_INTO_UNSAFE, NON_CONTIGUOUS_RANGE_ENDPOINTS, NON_EXHAUSTIVE_OMITTED_PATTERNS, - ORDER_DEPENDENT_TRAIT_OBJECTS, OUT_OF_SCOPE_MACRO_CALLS, OVERLAPPING_RANGE_ENDPOINTS, PATTERNS_IN_FNS_WITHOUT_BODY, PRIVATE_BOUNDS, PRIVATE_INTERFACES, PROC_MACRO_DERIVE_RESOLUTION_FALLBACK, - PTR_CAST_ADD_AUTO_TO_OBJECT, PTR_TO_INTEGER_TRANSMUTE_IN_CONSTS, PUB_USE_OF_PRIVATE_EXTERN_CRATE, REDUNDANT_IMPORTS, @@ -1503,42 +1501,6 @@ declare_lint! { } declare_lint! { - /// The `order_dependent_trait_objects` lint detects a trait coherency - /// violation that would allow creating two trait impls for the same - /// dynamic trait object involving marker traits. - /// - /// ### Example - /// - /// ```rust,compile_fail - /// pub trait Trait {} - /// - /// impl Trait for dyn Send + Sync { } - /// impl Trait for dyn Sync + Send { } - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// A previous bug caused the compiler to interpret traits with different - /// orders (such as `Send + Sync` and `Sync + Send`) as distinct types - /// when they were intended to be treated the same. This allowed code to - /// define separate trait implementations when there should be a coherence - /// error. This is a [future-incompatible] lint to transition this to a - /// hard error in the future. See [issue #56484] for more details. - /// - /// [issue #56484]: https://github.com/rust-lang/rust/issues/56484 - /// [future-incompatible]: ../index.md#future-incompatible-lints - pub ORDER_DEPENDENT_TRAIT_OBJECTS, - Deny, - "trait-object types were treated as different depending on marker-trait order", - @future_incompatible = FutureIncompatibleInfo { - reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps, - reference: "issue #56484 <https://github.com/rust-lang/rust/issues/56484>", - }; -} - -declare_lint! { /// The `coherence_leak_check` lint detects conflicting implementations of /// a trait that are only distinguished by the old leak-check code. /// @@ -2711,7 +2673,7 @@ declare_lint! { /// /// ```rust /// const fn foo<T>() -> usize { - /// if std::mem::size_of::<*mut T>() < 8 { // size of *mut T does not depend on T + /// if size_of::<*mut T>() < 8 { // size of *mut T does not depend on T /// 4 /// } else { /// 8 @@ -3782,7 +3744,7 @@ declare_lint! { Warn, "use of unsupported calling convention for function pointer", @future_incompatible = FutureIncompatibleInfo { - reason: FutureIncompatibilityReason::FutureReleaseErrorDontReportInDeps, + reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps, reference: "issue #130260 <https://github.com/rust-lang/rust/issues/130260>", }; } @@ -4828,58 +4790,6 @@ declare_lint! { } declare_lint! { - /// The `ptr_cast_add_auto_to_object` lint detects casts of raw pointers to trait - /// objects, which add auto traits. - /// - /// ### Example - /// - /// ```rust,edition2021,compile_fail - /// let ptr: *const dyn core::any::Any = &(); - /// _ = ptr as *const dyn core::any::Any + Send; - /// ``` - /// - /// {{produces}} - /// - /// ### Explanation - /// - /// Adding an auto trait can make the vtable invalid, potentially causing - /// UB in safe code afterwards. For example: - /// - /// ```ignore (causes a warning) - /// #![feature(arbitrary_self_types)] - /// - /// trait Trait { - /// fn f(self: *const Self) - /// where - /// Self: Send; - /// } - /// - /// impl Trait for *const () { - /// fn f(self: *const Self) { - /// unreachable!() - /// } - /// } - /// - /// fn main() { - /// let unsend: *const () = &(); - /// let unsend: *const dyn Trait = &unsend; - /// let send_bad: *const (dyn Trait + Send) = unsend as _; - /// send_bad.f(); // this crashes, since vtable for `*const ()` does not have an entry for `f` - /// } - /// ``` - /// - /// Generally you must ensure that vtable is right for the pointer's type, - /// before passing the pointer to safe code. - pub PTR_CAST_ADD_AUTO_TO_OBJECT, - Warn, - "detects `as` casts from pointers to `dyn Trait` to pointers to `dyn Trait + Auto`", - @future_incompatible = FutureIncompatibleInfo { - reason: FutureIncompatibilityReason::FutureReleaseErrorReportInDeps, - reference: "issue #127323 <https://github.com/rust-lang/rust/issues/127323>", - }; -} - -declare_lint! { /// The `out_of_scope_macro_calls` lint detects `macro_rules` called when they are not in scope, /// above their definition, which may happen in key-value attributes. /// diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs index e564235c41a..46b4b1d4383 100644 --- a/compiler/rustc_lint_defs/src/lib.rs +++ b/compiler/rustc_lint_defs/src/lib.rs @@ -1,7 +1,3 @@ -// tidy-alphabetical-start -#![warn(unreachable_pub)] -// tidy-alphabetical-end - use rustc_abi::ExternAbi; use rustc_ast::AttrId; use rustc_ast::attr::AttributeExt; diff --git a/compiler/rustc_llvm/Cargo.toml b/compiler/rustc_llvm/Cargo.toml index 4f3ce77efc4..061562b2ec5 100644 --- a/compiler/rustc_llvm/Cargo.toml +++ b/compiler/rustc_llvm/Cargo.toml @@ -12,5 +12,5 @@ libc = "0.2.73" # tidy-alphabetical-start # Pinned so `cargo update` bumps don't cause breakage. Please also update the # pinned `cc` in `rustc_codegen_ssa` if you update `cc` here. -cc = "=1.2.13" +cc = "=1.2.16" # tidy-alphabetical-end diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 9ce4abdb432..bc3d4d6f83a 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -7,6 +7,7 @@ #include "llvm/Analysis/Lint.h" #include "llvm/Analysis/TargetLibraryInfo.h" #include "llvm/Bitcode/BitcodeWriter.h" +#include "llvm/Bitcode/BitcodeWriterPass.h" #include "llvm/CodeGen/CommandFlags.h" #include "llvm/IR/AssemblyAnnotationWriter.h" #include "llvm/IR/AutoUpgrade.h" @@ -37,6 +38,7 @@ #include "llvm/Transforms/Instrumentation/InstrProfiling.h" #include "llvm/Transforms/Instrumentation/MemorySanitizer.h" #include "llvm/Transforms/Instrumentation/ThreadSanitizer.h" +#include "llvm/Transforms/Scalar/AnnotationRemarks.h" #include "llvm/Transforms/Utils/CanonicalizeAliases.h" #include "llvm/Transforms/Utils/FunctionImportUtils.h" #include "llvm/Transforms/Utils/NameAnonGlobals.h" @@ -195,6 +197,19 @@ extern "C" void LLVMRustTimeTraceProfilerFinish(const char *FileName) { GEN_SUBTARGETS #undef SUBTARGET +// This struct and various functions are sort of a hack right now, but the +// problem is that we've got in-memory LLVM modules after we generate and +// optimize all codegen-units for one compilation in rustc. To be compatible +// with the LTO support above we need to serialize the modules plus their +// ThinLTO summary into memory. +// +// This structure is basically an owned version of a serialize module, with +// a ThinLTO summary attached. +struct LLVMRustThinLTOBuffer { + std::string data; + std::string thin_link_data; +}; + extern "C" bool LLVMRustHasFeature(LLVMTargetMachineRef TM, const char *Feature) { TargetMachine *Target = unwrap(TM); @@ -469,7 +484,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine( if (ArgsCstrBuff != nullptr) { #if LLVM_VERSION_GE(20, 0) - int buffer_offset = 0; + size_t buffer_offset = 0; assert(ArgsCstrBuff[ArgsCstrBuffLen - 1] == '\0'); auto Arg0 = std::string(ArgsCstrBuff); buffer_offset = Arg0.size() + 1; @@ -487,7 +502,7 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine( Options.MCOptions.Argv0 = Arg0; Options.MCOptions.CommandlineArgs = CommandlineArgs; #else - int buffer_offset = 0; + size_t buffer_offset = 0; assert(ArgsCstrBuff[ArgsCstrBuffLen - 1] == '\0'); const size_t arg0_len = std::strlen(ArgsCstrBuff); @@ -496,13 +511,13 @@ extern "C" LLVMTargetMachineRef LLVMRustCreateTargetMachine( arg0[arg0_len] = '\0'; buffer_offset += arg0_len + 1; - const int num_cmd_arg_strings = std::count( + const size_t num_cmd_arg_strings = std::count( &ArgsCstrBuff[buffer_offset], &ArgsCstrBuff[ArgsCstrBuffLen], '\0'); std::string *cmd_arg_strings = new std::string[num_cmd_arg_strings]; - for (int i = 0; i < num_cmd_arg_strings; ++i) { + for (size_t i = 0; i < num_cmd_arg_strings; ++i) { assert(buffer_offset < ArgsCstrBuffLen); - const int len = std::strlen(ArgsCstrBuff + buffer_offset); + const size_t len = std::strlen(ArgsCstrBuff + buffer_offset); cmd_arg_strings[i] = std::string(&ArgsCstrBuff[buffer_offset], len); buffer_offset += len + 1; } @@ -704,7 +719,8 @@ extern "C" LLVMRustResult LLVMRustOptimize( LLVMModuleRef ModuleRef, LLVMTargetMachineRef TMRef, LLVMRustPassBuilderOptLevel OptLevelRust, LLVMRustOptStage OptStage, bool IsLinkerPluginLTO, bool NoPrepopulatePasses, bool VerifyIR, - bool LintIR, bool UseThinLTOBuffers, bool MergeFunctions, bool UnrollLoops, + bool LintIR, LLVMRustThinLTOBuffer **ThinLTOBufferRef, bool EmitThinLTO, + bool EmitThinLTOSummary, bool MergeFunctions, bool UnrollLoops, bool SLPVectorize, bool LoopVectorize, bool DisableSimplifyLibCalls, bool EmitLifetimeMarkers, bool RunEnzyme, LLVMRustSanitizerOptions *SanitizerOptions, const char *PGOGenPath, @@ -952,7 +968,10 @@ extern "C" LLVMRustResult LLVMRustOptimize( } ModulePassManager MPM; - bool NeedThinLTOBufferPasses = UseThinLTOBuffers; + bool NeedThinLTOBufferPasses = EmitThinLTO; + auto ThinLTOBuffer = std::make_unique<LLVMRustThinLTOBuffer>(); + raw_string_ostream ThinLTODataOS(ThinLTOBuffer->data); + raw_string_ostream ThinLinkDataOS(ThinLTOBuffer->thin_link_data); if (!NoPrepopulatePasses) { // The pre-link pipelines don't support O0 and require using // buildO0DefaultPipeline() instead. At the same time, the LTO pipelines do @@ -976,7 +995,25 @@ extern "C" LLVMRustResult LLVMRustOptimize( switch (OptStage) { case LLVMRustOptStage::PreLinkNoLTO: - MPM = PB.buildPerModuleDefaultPipeline(OptLevel); + if (ThinLTOBufferRef) { + // This is similar to LLVM's `buildFatLTODefaultPipeline`, where the + // bitcode for embedding is obtained after performing + // `ThinLTOPreLinkDefaultPipeline`. + MPM.addPass(PB.buildThinLTOPreLinkDefaultPipeline(OptLevel)); + if (EmitThinLTO) { + MPM.addPass(ThinLTOBitcodeWriterPass( + ThinLTODataOS, EmitThinLTOSummary ? &ThinLinkDataOS : nullptr)); + } else { + MPM.addPass(BitcodeWriterPass(ThinLTODataOS)); + } + *ThinLTOBufferRef = ThinLTOBuffer.release(); + MPM.addPass(PB.buildModuleOptimizationPipeline( + OptLevel, ThinOrFullLTOPhase::None)); + MPM.addPass( + createModuleToFunctionPassAdaptor(AnnotationRemarksPass())); + } else { + MPM = PB.buildPerModuleDefaultPipeline(OptLevel); + } break; case LLVMRustOptStage::PreLinkThinLTO: MPM = PB.buildThinLTOPreLinkDefaultPipeline(OptLevel); @@ -1022,6 +1059,16 @@ extern "C" LLVMRustResult LLVMRustOptimize( MPM.addPass(CanonicalizeAliasesPass()); MPM.addPass(NameAnonGlobalPass()); } + // For `-Copt-level=0`, ThinLTO, or LTO. + if (ThinLTOBufferRef && *ThinLTOBufferRef == nullptr) { + if (EmitThinLTO) { + MPM.addPass(ThinLTOBitcodeWriterPass( + ThinLTODataOS, EmitThinLTOSummary ? &ThinLinkDataOS : nullptr)); + } else { + MPM.addPass(BitcodeWriterPass(ThinLTODataOS)); + } + *ThinLTOBufferRef = ThinLTOBuffer.release(); + } // now load "-enzyme" pass: #ifdef ENZYME @@ -1500,19 +1547,6 @@ extern "C" bool LLVMRustPrepareThinLTOImport(const LLVMRustThinLTOData *Data, return true; } -// This struct and various functions are sort of a hack right now, but the -// problem is that we've got in-memory LLVM modules after we generate and -// optimize all codegen-units for one compilation in rustc. To be compatible -// with the LTO support above we need to serialize the modules plus their -// ThinLTO summary into memory. -// -// This structure is basically an owned version of a serialize module, with -// a ThinLTO summary attached. -struct LLVMRustThinLTOBuffer { - std::string data; - std::string thin_link_data; -}; - extern "C" LLVMRustThinLTOBuffer * LLVMRustThinLTOBufferCreate(LLVMModuleRef M, bool is_thin, bool emit_summary) { auto Ret = std::make_unique<LLVMRustThinLTOBuffer>(); diff --git a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp index aea2a8dd097..53df59930f4 100644 --- a/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/RustWrapper.cpp @@ -152,8 +152,12 @@ extern "C" LLVMContextRef LLVMRustContextCreate(bool shouldDiscardNames) { } extern "C" void LLVMRustSetNormalizedTarget(LLVMModuleRef M, - const char *Triple) { - unwrap(M)->setTargetTriple(Triple::normalize(Triple)); + const char *Target) { +#if LLVM_VERSION_GE(21, 0) + unwrap(M)->setTargetTriple(Triple(Triple::normalize(Target))); +#else + unwrap(M)->setTargetTriple(Triple::normalize(Target)); +#endif } extern "C" void LLVMRustPrintPassTimings(RustStringRef OutBuf) { diff --git a/compiler/rustc_llvm/src/lib.rs b/compiler/rustc_llvm/src/lib.rs index eda9b2b1fc0..68058250a26 100644 --- a/compiler/rustc_llvm/src/lib.rs +++ b/compiler/rustc_llvm/src/lib.rs @@ -4,7 +4,6 @@ #![doc(rust_logo)] #![feature(extern_types)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::cell::RefCell; diff --git a/compiler/rustc_macros/src/lib.rs b/compiler/rustc_macros/src/lib.rs index 34fc0f00320..44ba064dd82 100644 --- a/compiler/rustc_macros/src/lib.rs +++ b/compiler/rustc_macros/src/lib.rs @@ -6,7 +6,6 @@ #![feature(proc_macro_diagnostic)] #![feature(proc_macro_span)] #![feature(proc_macro_tracked_env)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use proc_macro::TokenStream; diff --git a/compiler/rustc_macros/src/print_attribute.rs b/compiler/rustc_macros/src/print_attribute.rs index 3c6e30b851b..42d94e72ee9 100644 --- a/compiler/rustc_macros/src/print_attribute.rs +++ b/compiler/rustc_macros/src/print_attribute.rs @@ -16,12 +16,14 @@ fn print_fields(name: &Ident, fields: &Fields) -> (TokenStream, TokenStream, Tok let name = field.ident.as_ref().unwrap(); let string_name = name.to_string(); disps.push(quote! { - if __printed_anything && #name.print_something() { - __p.word_space(","); + if #name.should_render() { + if __printed_anything { + __p.word_space(","); + } + __p.word(#string_name); + __p.word_space(":"); __printed_anything = true; } - __p.word(#string_name); - __p.word_space(":"); #name.print_attribute(__p); }); field_names.push(name); @@ -31,10 +33,11 @@ fn print_fields(name: &Ident, fields: &Fields) -> (TokenStream, TokenStream, Tok quote! { {#(#field_names),*} }, quote! { __p.word(#string_name); - if true #(&& !#field_names.print_something())* { + if true #(&& !#field_names.should_render())* { return; } + __p.nbsp(); __p.word("{"); #(#disps)* __p.word("}"); @@ -48,8 +51,10 @@ fn print_fields(name: &Ident, fields: &Fields) -> (TokenStream, TokenStream, Tok for idx in 0..fields_unnamed.unnamed.len() { let name = format_ident!("f{idx}"); disps.push(quote! { - if __printed_anything && #name.print_something() { - __p.word_space(","); + if #name.should_render() { + if __printed_anything { + __p.word_space(","); + } __printed_anything = true; } #name.print_attribute(__p); @@ -62,13 +67,13 @@ fn print_fields(name: &Ident, fields: &Fields) -> (TokenStream, TokenStream, Tok quote! { __p.word(#string_name); - if true #(&& !#field_names.print_something())* { + if true #(&& !#field_names.should_render())* { return; } - __p.word("("); + __p.popen(); #(#disps)* - __p.word(")"); + __p.pclose(); }, quote! { true }, ) @@ -138,7 +143,7 @@ pub(crate) fn print_attribute(input: Structure<'_>) -> TokenStream { input.gen_impl(quote! { #[allow(unused)] gen impl PrintAttribute for @Self { - fn print_something(&self) -> bool { #printed } + fn should_render(&self) -> bool { #printed } fn print_attribute(&self, __p: &mut rustc_ast_pretty::pp::Printer) { #code } } }) diff --git a/compiler/rustc_metadata/messages.ftl b/compiler/rustc_metadata/messages.ftl index df0a25712cc..20f66fae5c0 100644 --- a/compiler/rustc_metadata/messages.ftl +++ b/compiler/rustc_metadata/messages.ftl @@ -244,6 +244,9 @@ metadata_prev_alloc_error_handler = metadata_prev_global_alloc = previous global allocator defined here +metadata_raw_dylib_elf_unstable = + link kind `raw-dylib` is unstable on ELF platforms + metadata_raw_dylib_no_nul = link name must not contain NUL characters if link kind is `raw-dylib` diff --git a/compiler/rustc_metadata/src/dependency_format.rs b/compiler/rustc_metadata/src/dependency_format.rs index 582c2215d92..be31aa629c8 100644 --- a/compiler/rustc_metadata/src/dependency_format.rs +++ b/compiler/rustc_metadata/src/dependency_format.rs @@ -84,7 +84,7 @@ pub(crate) fn calculate(tcx: TyCtxt<'_>) -> Dependencies { fn calculate_type(tcx: TyCtxt<'_>, ty: CrateType) -> DependencyList { let sess = &tcx.sess; - if !sess.opts.output_types.should_codegen() { + if !sess.opts.output_types.should_link() { return IndexVec::new(); } diff --git a/compiler/rustc_metadata/src/lib.rs b/compiler/rustc_metadata/src/lib.rs index ebcc0efd5a6..634c82fd1d5 100644 --- a/compiler/rustc_metadata/src/lib.rs +++ b/compiler/rustc_metadata/src/lib.rs @@ -1,5 +1,6 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(coroutines)] @@ -15,7 +16,6 @@ #![feature(proc_macro_internals)] #![feature(rustdoc_internals)] #![feature(trusted_len)] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate proc_macro; diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs index 3f3e58384cb..d5dd5059aac 100644 --- a/compiler/rustc_metadata/src/locator.rs +++ b/compiler/rustc_metadata/src/locator.rs @@ -290,7 +290,7 @@ impl fmt::Display for CrateFlavor { } impl IntoDiagArg for CrateFlavor { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { match self { CrateFlavor::Rlib => DiagArgValue::Str(Cow::Borrowed("rlib")), CrateFlavor::Rmeta => DiagArgValue::Str(Cow::Borrowed("rmeta")), diff --git a/compiler/rustc_metadata/src/native_libs.rs b/compiler/rustc_metadata/src/native_libs.rs index e2d043f47c0..b96921a63f3 100644 --- a/compiler/rustc_metadata/src/native_libs.rs +++ b/compiler/rustc_metadata/src/native_libs.rs @@ -17,7 +17,7 @@ use rustc_session::search_paths::PathKind; use rustc_session::utils::NativeLibKind; use rustc_span::def_id::{DefId, LOCAL_CRATE}; use rustc_span::{Symbol, sym}; -use rustc_target::spec::LinkSelfContainedComponents; +use rustc_target::spec::{BinaryFormat, LinkSelfContainedComponents}; use crate::{errors, fluent_generated}; @@ -263,9 +263,26 @@ impl<'tcx> Collector<'tcx> { NativeLibKind::Framework { as_needed: None } } "raw-dylib" => { - if !sess.target.is_like_windows { + if sess.target.is_like_windows { + // raw-dylib is stable and working on Windows + } else if sess.target.binary_format == BinaryFormat::Elf + && features.raw_dylib_elf() + { + // raw-dylib is unstable on ELF, but the user opted in + } else if sess.target.binary_format == BinaryFormat::Elf + && sess.is_nightly_build() + { + feature_err( + sess, + sym::raw_dylib_elf, + span, + fluent_generated::metadata_raw_dylib_elf_unstable, + ) + .emit(); + } else { sess.dcx().emit_err(errors::RawDylibOnlyWindows { span }); } + NativeLibKind::RawDylib } "link-arg" => { diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index 16149198303..6ffbebfffd5 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -2,7 +2,7 @@ use std::iter::TrustedLen; use std::path::Path; -use std::sync::Arc; +use std::sync::{Arc, OnceLock}; use std::{io, iter, mem}; pub(super) use cstore_impl::provide; @@ -11,7 +11,7 @@ use rustc_ast as ast; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::owned_slice::OwnedSlice; -use rustc_data_structures::sync::{Lock, OnceLock}; +use rustc_data_structures::sync::Lock; use rustc_data_structures::unhash::UnhashMap; use rustc_expand::base::{SyntaxExtension, SyntaxExtensionKind}; use rustc_expand::proc_macro::{AttrProcMacro, BangProcMacro, DeriveProcMacro}; @@ -1116,7 +1116,6 @@ impl<'a> CrateMetadataRef<'a> { value: self.get_default_field(did.index), }) .collect(), - adt_kind, parent_did, None, data.is_non_exhaustive, diff --git a/compiler/rustc_metadata/src/rmeta/encoder.rs b/compiler/rustc_metadata/src/rmeta/encoder.rs index 88a88847e6b..57c941976e4 100644 --- a/compiler/rustc_metadata/src/rmeta/encoder.rs +++ b/compiler/rustc_metadata/src/rmeta/encoder.rs @@ -1357,8 +1357,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { features: &tcx.features(), }; let attr_iter = tcx - .hir() - .attrs(tcx.local_def_id_to_hir_id(def_id)) + .hir_attrs(tcx.local_def_id_to_hir_id(def_id)) .iter() .filter(|attr| analyze_attr(*attr, &mut state)); @@ -1839,7 +1838,7 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { fn encode_info_for_macro(&mut self, def_id: LocalDefId) { let tcx = self.tcx; - let hir::ItemKind::Macro(macro_def, _) = tcx.hir().expect_item(def_id).kind else { bug!() }; + let hir::ItemKind::Macro(macro_def, _) = tcx.hir_expect_item(def_id).kind else { bug!() }; self.tables.is_macro_rules.set(def_id.local_def_index, macro_def.macro_rules); record!(self.tables.macro_definition[def_id.to_def_id()] <- &*macro_def.body); } @@ -1918,11 +1917,11 @@ impl<'a, 'tcx> EncodeContext<'a, 'tcx> { for &proc_macro in &tcx.resolutions(()).proc_macros { let id = proc_macro; let proc_macro = tcx.local_def_id_to_hir_id(proc_macro); - let mut name = hir.name(proc_macro); + let mut name = tcx.hir_name(proc_macro); let span = hir.span(proc_macro); // Proc-macros may have attributes like `#[allow_internal_unstable]`, // so downstream crates need access to them. - let attrs = hir.attrs(proc_macro); + let attrs = tcx.hir_attrs(proc_macro); let macro_kind = if ast::attr::contains_name(attrs, sym::proc_macro) { MacroKind::Bang } else if ast::attr::contains_name(attrs, sym::proc_macro_attribute) { diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index aebd2181f31..43c1af642dd 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -14,7 +14,8 @@ rustc_abi = { path = "../rustc_abi" } rustc_apfloat = "0.2.0" rustc_arena = { path = "../rustc_arena" } rustc_ast = { path = "../rustc_ast" } -rustc_attr_parsing = { path = "../rustc_attr_parsing" } +rustc_ast_ir = { path = "../rustc_ast_ir" } +rustc_attr_data_structures = { path = "../rustc_attr_data_structures" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_error_messages = { path = "../rustc_error_messages" } # Used for intra-doc links rustc_errors = { path = "../rustc_errors" } diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs index be8a3403ba9..bd315577efb 100644 --- a/compiler/rustc_middle/src/error.rs +++ b/compiler/rustc_middle/src/error.rs @@ -47,10 +47,10 @@ pub struct UnsupportedUnion { // FIXME(autodiff): I should get used somewhere #[derive(Diagnostic)] #[diag(middle_autodiff_unsafe_inner_const_ref)] -pub struct AutodiffUnsafeInnerConstRef { +pub struct AutodiffUnsafeInnerConstRef<'tcx> { #[primary_span] pub span: Span, - pub ty: String, + pub ty: Ty<'tcx>, } #[derive(Subdiagnostic)] diff --git a/compiler/rustc_middle/src/hir/map.rs b/compiler/rustc_middle/src/hir/map.rs index fad8c7dcbcb..73c0af84a9f 100644 --- a/compiler/rustc_middle/src/hir/map.rs +++ b/compiler/rustc_middle/src/hir/map.rs @@ -275,7 +275,7 @@ impl<'tcx> TyCtxt<'tcx> { span_bug!( self.hir().span(hir_id), "body_owned_by: {} has no associated body", - self.hir().node_to_string(hir_id) + self.hir_id_to_string(hir_id) ); }) } @@ -374,7 +374,7 @@ impl<'tcx> TyCtxt<'tcx> { /// invoking `krate.attrs` because it registers a tighter /// dep-graph access. pub fn hir_krate_attrs(self) -> &'tcx [Attribute] { - self.hir().attrs(CRATE_HIR_ID) + self.hir_attrs(CRATE_HIR_ID) } pub fn hir_rustc_coherence_is_core(self) -> bool { @@ -674,104 +674,178 @@ impl<'tcx> TyCtxt<'tcx> { } } } -} -impl<'hir> Map<'hir> { - pub fn get_foreign_abi(self, hir_id: HirId) -> ExternAbi { - let parent = self.tcx.hir_get_parent_item(hir_id); + /// Get a representation of this `id` for debugging purposes. + /// NOTE: Do NOT use this in diagnostics! + pub fn hir_id_to_string(self, id: HirId) -> String { + let path_str = |def_id: LocalDefId| self.def_path_str(def_id); + + let span_str = || { + self.sess.source_map().span_to_snippet(Map { tcx: self }.span(id)).unwrap_or_default() + }; + let node_str = |prefix| format!("{id} ({prefix} `{}`)", span_str()); + + match self.hir_node(id) { + Node::Item(item) => { + let item_str = match item.kind { + ItemKind::ExternCrate(..) => "extern crate", + ItemKind::Use(..) => "use", + ItemKind::Static(..) => "static", + ItemKind::Const(..) => "const", + ItemKind::Fn { .. } => "fn", + ItemKind::Macro(..) => "macro", + ItemKind::Mod(..) => "mod", + ItemKind::ForeignMod { .. } => "foreign mod", + ItemKind::GlobalAsm { .. } => "global asm", + ItemKind::TyAlias(..) => "ty", + ItemKind::Enum(..) => "enum", + ItemKind::Struct(..) => "struct", + ItemKind::Union(..) => "union", + ItemKind::Trait(..) => "trait", + ItemKind::TraitAlias(..) => "trait alias", + ItemKind::Impl { .. } => "impl", + }; + format!("{id} ({item_str} {})", path_str(item.owner_id.def_id)) + } + Node::ForeignItem(item) => { + format!("{id} (foreign item {})", path_str(item.owner_id.def_id)) + } + Node::ImplItem(ii) => { + let kind = match ii.kind { + ImplItemKind::Const(..) => "associated constant", + ImplItemKind::Fn(fn_sig, _) => match fn_sig.decl.implicit_self { + ImplicitSelfKind::None => "associated function", + _ => "method", + }, + ImplItemKind::Type(_) => "associated type", + }; + format!("{id} ({kind} `{}` in {})", ii.ident, path_str(ii.owner_id.def_id)) + } + Node::TraitItem(ti) => { + let kind = match ti.kind { + TraitItemKind::Const(..) => "associated constant", + TraitItemKind::Fn(fn_sig, _) => match fn_sig.decl.implicit_self { + ImplicitSelfKind::None => "associated function", + _ => "trait method", + }, + TraitItemKind::Type(..) => "associated type", + }; + + format!("{id} ({kind} `{}` in {})", ti.ident, path_str(ti.owner_id.def_id)) + } + Node::Variant(variant) => { + format!("{id} (variant `{}` in {})", variant.ident, path_str(variant.def_id)) + } + Node::Field(field) => { + format!("{id} (field `{}` in {})", field.ident, path_str(field.def_id)) + } + Node::AnonConst(_) => node_str("const"), + Node::ConstBlock(_) => node_str("const"), + Node::ConstArg(_) => node_str("const"), + Node::Expr(_) => node_str("expr"), + Node::ExprField(_) => node_str("expr field"), + Node::Stmt(_) => node_str("stmt"), + Node::PathSegment(_) => node_str("path segment"), + Node::Ty(_) => node_str("type"), + Node::AssocItemConstraint(_) => node_str("assoc item constraint"), + Node::TraitRef(_) => node_str("trait ref"), + Node::OpaqueTy(_) => node_str("opaque type"), + Node::Pat(_) => node_str("pat"), + Node::TyPat(_) => node_str("pat ty"), + Node::PatField(_) => node_str("pattern field"), + Node::PatExpr(_) => node_str("pattern literal"), + Node::Param(_) => node_str("param"), + Node::Arm(_) => node_str("arm"), + Node::Block(_) => node_str("block"), + Node::Infer(_) => node_str("infer"), + Node::LetStmt(_) => node_str("local"), + Node::Ctor(ctor) => format!( + "{id} (ctor {})", + ctor.ctor_def_id().map_or("<missing path>".into(), |def_id| path_str(def_id)), + ), + Node::Lifetime(_) => node_str("lifetime"), + Node::GenericParam(param) => { + format!("{id} (generic_param {})", path_str(param.def_id)) + } + Node::Crate(..) => String::from("(root_crate)"), + Node::WherePredicate(_) => node_str("where predicate"), + Node::Synthetic => unreachable!(), + Node::Err(_) => node_str("error"), + Node::PreciseCapturingNonLifetimeArg(_param) => node_str("parameter"), + } + } + + pub fn hir_get_foreign_abi(self, hir_id: HirId) -> ExternAbi { + let parent = self.hir_get_parent_item(hir_id); if let OwnerNode::Item(Item { kind: ItemKind::ForeignMod { abi, .. }, .. }) = - self.tcx.hir_owner_node(parent) + self.hir_owner_node(parent) { return *abi; } bug!( "expected foreign mod or inlined parent, found {}", - self.node_to_string(HirId::make_owner(parent.def_id)) + self.hir_id_to_string(HirId::make_owner(parent.def_id)) ) } - pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> { - match self.tcx.expect_hir_owner_node(id) { + pub fn hir_expect_item(self, id: LocalDefId) -> &'tcx Item<'tcx> { + match self.expect_hir_owner_node(id) { OwnerNode::Item(item) => item, - _ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))), + _ => bug!("expected item, found {}", self.hir_id_to_string(HirId::make_owner(id))), } } - pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> { - match self.tcx.expect_hir_owner_node(id) { + pub fn hir_expect_impl_item(self, id: LocalDefId) -> &'tcx ImplItem<'tcx> { + match self.expect_hir_owner_node(id) { OwnerNode::ImplItem(item) => item, - _ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))), + _ => bug!("expected impl item, found {}", self.hir_id_to_string(HirId::make_owner(id))), } } - pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> { - match self.tcx.expect_hir_owner_node(id) { + pub fn hir_expect_trait_item(self, id: LocalDefId) -> &'tcx TraitItem<'tcx> { + match self.expect_hir_owner_node(id) { OwnerNode::TraitItem(item) => item, - _ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))), - } - } - - pub fn get_fn_output(self, def_id: LocalDefId) -> Option<&'hir FnRetTy<'hir>> { - Some(&self.tcx.opt_hir_owner_node(def_id)?.fn_decl()?.output) - } - - pub fn expect_variant(self, id: HirId) -> &'hir Variant<'hir> { - match self.tcx.hir_node(id) { - Node::Variant(variant) => variant, - _ => bug!("expected variant, found {}", self.node_to_string(id)), - } - } - - pub fn expect_field(self, id: HirId) -> &'hir FieldDef<'hir> { - match self.tcx.hir_node(id) { - Node::Field(field) => field, - _ => bug!("expected field, found {}", self.node_to_string(id)), - } - } - - pub fn expect_foreign_item(self, id: OwnerId) -> &'hir ForeignItem<'hir> { - match self.tcx.hir_owner_node(id) { - OwnerNode::ForeignItem(item) => item, _ => { - bug!( - "expected foreign item, found {}", - self.node_to_string(HirId::make_owner(id.def_id)) - ) + bug!("expected trait item, found {}", self.hir_id_to_string(HirId::make_owner(id))) } } } + pub fn hir_get_fn_output(self, def_id: LocalDefId) -> Option<&'tcx FnRetTy<'tcx>> { + Some(&self.opt_hir_owner_node(def_id)?.fn_decl()?.output) + } + #[track_caller] - pub fn expect_opaque_ty(self, id: LocalDefId) -> &'hir OpaqueTy<'hir> { - match self.tcx.hir_node_by_def_id(id) { + pub fn hir_expect_opaque_ty(self, id: LocalDefId) -> &'tcx OpaqueTy<'tcx> { + match self.hir_node_by_def_id(id) { Node::OpaqueTy(opaq) => opaq, _ => { bug!( "expected opaque type definition, found {}", - self.node_to_string(self.tcx.local_def_id_to_hir_id(id)) + self.hir_id_to_string(self.local_def_id_to_hir_id(id)) ) } } } - pub fn expect_expr(self, id: HirId) -> &'hir Expr<'hir> { - match self.tcx.hir_node(id) { + pub fn hir_expect_expr(self, id: HirId) -> &'tcx Expr<'tcx> { + match self.hir_node(id) { Node::Expr(expr) => expr, - _ => bug!("expected expr, found {}", self.node_to_string(id)), + _ => bug!("expected expr, found {}", self.hir_id_to_string(id)), } } - pub fn opt_delegation_sig_id(self, def_id: LocalDefId) -> Option<DefId> { - self.tcx.opt_hir_owner_node(def_id)?.fn_decl()?.opt_delegation_sig_id() + pub fn hir_opt_delegation_sig_id(self, def_id: LocalDefId) -> Option<DefId> { + self.opt_hir_owner_node(def_id)?.fn_decl()?.opt_delegation_sig_id() } #[inline] - fn opt_ident(self, id: HirId) -> Option<Ident> { - match self.tcx.hir_node(id) { + fn hir_opt_ident(self, id: HirId) -> Option<Ident> { + match self.hir_node(id) { Node::Pat(&Pat { kind: PatKind::Binding(_, _, ident, _), .. }) => Some(ident), // A `Ctor` doesn't have an identifier itself, but its parent // struct/variant does. Compare with `hir::Map::span`. - Node::Ctor(..) => match self.tcx.parent_hir_node(id) { + Node::Ctor(..) => match self.parent_hir_node(id) { Node::Item(item) => Some(item.ident), Node::Variant(variant) => Some(variant.ident), _ => unreachable!(), @@ -781,30 +855,32 @@ impl<'hir> Map<'hir> { } #[inline] - pub(super) fn opt_ident_span(self, id: HirId) -> Option<Span> { - self.opt_ident(id).map(|ident| ident.span) + pub(super) fn hir_opt_ident_span(self, id: HirId) -> Option<Span> { + self.hir_opt_ident(id).map(|ident| ident.span) } #[inline] - pub fn ident(self, id: HirId) -> Ident { - self.opt_ident(id).unwrap() + pub fn hir_ident(self, id: HirId) -> Ident { + self.hir_opt_ident(id).unwrap() } #[inline] - pub fn opt_name(self, id: HirId) -> Option<Symbol> { - self.opt_ident(id).map(|ident| ident.name) + pub fn hir_opt_name(self, id: HirId) -> Option<Symbol> { + self.hir_opt_ident(id).map(|ident| ident.name) } - pub fn name(self, id: HirId) -> Symbol { - self.opt_name(id).unwrap_or_else(|| bug!("no name for {}", self.node_to_string(id))) + pub fn hir_name(self, id: HirId) -> Symbol { + self.hir_opt_name(id).unwrap_or_else(|| bug!("no name for {}", self.hir_id_to_string(id))) } /// Given a node ID, gets a list of attributes associated with the AST /// corresponding to the node-ID. - pub fn attrs(self, id: HirId) -> &'hir [Attribute] { - self.tcx.hir_attrs(id.owner).get(id.local_id) + pub fn hir_attrs(self, id: HirId) -> &'tcx [Attribute] { + self.hir_attr_map(id.owner).get(id.local_id) } +} +impl<'hir> Map<'hir> { /// Gets the span of the definition of the specified HIR node. /// This is used by `tcx.def_span`. pub fn span(self, hir_id: HirId) -> Span { @@ -977,12 +1053,6 @@ impl<'hir> Map<'hir> { } } - /// Get a representation of this `id` for debugging purposes. - /// NOTE: Do NOT use this in diagnostics! - pub fn node_to_string(self, id: HirId) -> String { - hir_id_to_string(self, id) - } - /// Returns the HirId of `N` in `struct Foo<const N: usize = { ... }>` when /// called with the HirId for the `{ ... }` anon const pub fn opt_const_param_default_param_def_id(self, anon_const: HirId) -> Option<LocalDefId> { @@ -1147,102 +1217,6 @@ fn upstream_crates(tcx: TyCtxt<'_>) -> Vec<(StableCrateId, Svh)> { upstream_crates } -fn hir_id_to_string(map: Map<'_>, id: HirId) -> String { - let path_str = |def_id: LocalDefId| map.tcx.def_path_str(def_id); - - let span_str = || map.tcx.sess.source_map().span_to_snippet(map.span(id)).unwrap_or_default(); - let node_str = |prefix| format!("{id} ({prefix} `{}`)", span_str()); - - match map.tcx.hir_node(id) { - Node::Item(item) => { - let item_str = match item.kind { - ItemKind::ExternCrate(..) => "extern crate", - ItemKind::Use(..) => "use", - ItemKind::Static(..) => "static", - ItemKind::Const(..) => "const", - ItemKind::Fn { .. } => "fn", - ItemKind::Macro(..) => "macro", - ItemKind::Mod(..) => "mod", - ItemKind::ForeignMod { .. } => "foreign mod", - ItemKind::GlobalAsm { .. } => "global asm", - ItemKind::TyAlias(..) => "ty", - ItemKind::Enum(..) => "enum", - ItemKind::Struct(..) => "struct", - ItemKind::Union(..) => "union", - ItemKind::Trait(..) => "trait", - ItemKind::TraitAlias(..) => "trait alias", - ItemKind::Impl { .. } => "impl", - }; - format!("{id} ({item_str} {})", path_str(item.owner_id.def_id)) - } - Node::ForeignItem(item) => { - format!("{id} (foreign item {})", path_str(item.owner_id.def_id)) - } - Node::ImplItem(ii) => { - let kind = match ii.kind { - ImplItemKind::Const(..) => "associated constant", - ImplItemKind::Fn(fn_sig, _) => match fn_sig.decl.implicit_self { - ImplicitSelfKind::None => "associated function", - _ => "method", - }, - ImplItemKind::Type(_) => "associated type", - }; - format!("{id} ({kind} `{}` in {})", ii.ident, path_str(ii.owner_id.def_id)) - } - Node::TraitItem(ti) => { - let kind = match ti.kind { - TraitItemKind::Const(..) => "associated constant", - TraitItemKind::Fn(fn_sig, _) => match fn_sig.decl.implicit_self { - ImplicitSelfKind::None => "associated function", - _ => "trait method", - }, - TraitItemKind::Type(..) => "associated type", - }; - - format!("{id} ({kind} `{}` in {})", ti.ident, path_str(ti.owner_id.def_id)) - } - Node::Variant(variant) => { - format!("{id} (variant `{}` in {})", variant.ident, path_str(variant.def_id)) - } - Node::Field(field) => { - format!("{id} (field `{}` in {})", field.ident, path_str(field.def_id)) - } - Node::AnonConst(_) => node_str("const"), - Node::ConstBlock(_) => node_str("const"), - Node::ConstArg(_) => node_str("const"), - Node::Expr(_) => node_str("expr"), - Node::ExprField(_) => node_str("expr field"), - Node::Stmt(_) => node_str("stmt"), - Node::PathSegment(_) => node_str("path segment"), - Node::Ty(_) => node_str("type"), - Node::AssocItemConstraint(_) => node_str("assoc item constraint"), - Node::TraitRef(_) => node_str("trait ref"), - Node::OpaqueTy(_) => node_str("opaque type"), - Node::Pat(_) => node_str("pat"), - Node::TyPat(_) => node_str("pat ty"), - Node::PatField(_) => node_str("pattern field"), - Node::PatExpr(_) => node_str("pattern literal"), - Node::Param(_) => node_str("param"), - Node::Arm(_) => node_str("arm"), - Node::Block(_) => node_str("block"), - Node::Infer(_) => node_str("infer"), - Node::LetStmt(_) => node_str("local"), - Node::Ctor(ctor) => format!( - "{id} (ctor {})", - ctor.ctor_def_id().map_or("<missing path>".into(), |def_id| path_str(def_id)), - ), - Node::Lifetime(_) => node_str("lifetime"), - Node::GenericParam(param) => { - format!("{id} (generic_param {})", path_str(param.def_id)) - } - Node::Crate(..) => String::from("(root_crate)"), - Node::WherePredicate(_) => node_str("where predicate"), - Node::Synthetic => unreachable!(), - Node::Err(_) => node_str("error"), - Node::PreciseCapturingNonLifetimeArg(_param) => node_str("parameter"), - } -} - pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalModDefId) -> ModuleItems { let mut collector = ItemCollector::new(tcx, false); @@ -1356,7 +1330,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { self.submodules.push(item.owner_id); // A module collector does not recurse inside nested modules. if self.crate_collector { - intravisit::walk_mod(self, module, item.hir_id()); + intravisit::walk_mod(self, module); } } else { intravisit::walk_item(self, item) diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs index 6071a58367e..68b9a4f56b9 100644 --- a/compiler/rustc_middle/src/hir/mod.rs +++ b/compiler/rustc_middle/src/hir/mod.rs @@ -202,13 +202,13 @@ pub fn provide(providers: &mut Providers) { } }) }; - providers.hir_attrs = |tcx, id| { + providers.hir_attr_map = |tcx, id| { tcx.hir_crate(()).owners[id.def_id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs) }; providers.def_span = |tcx, def_id| tcx.hir().span(tcx.local_def_id_to_hir_id(def_id)); providers.def_ident_span = |tcx, def_id| { let hir_id = tcx.local_def_id_to_hir_id(def_id); - tcx.hir().opt_ident_span(hir_id) + tcx.hir_opt_ident_span(hir_id) }; providers.fn_arg_names = |tcx, def_id| { let hir = tcx.hir(); diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index 48ea7df5c23..1e6178144c9 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -29,6 +29,7 @@ #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::potential_query_instability)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(allocator_api)] @@ -61,7 +62,6 @@ #![feature(try_trait_v2_yeet)] #![feature(type_alias_impl_trait)] #![feature(yeet_expr)] -#![warn(unreachable_pub)] // tidy-alphabetical-end #[cfg(test)] diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs index 311bc60c3cd..a94ead161c3 100644 --- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs +++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs @@ -1,6 +1,6 @@ use rustc_abi::Align; use rustc_ast::expand::autodiff_attrs::AutoDiffAttrs; -use rustc_attr_parsing::{InlineAttr, InstructionSetAttr, OptimizeAttr}; +use rustc_attr_data_structures::{InlineAttr, InstructionSetAttr, OptimizeAttr}; use rustc_macros::{HashStable, TyDecodable, TyEncodable}; use rustc_span::Symbol; use rustc_target::spec::SanitizerSet; diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 2260cad41b9..ec128c8c478 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -4,7 +4,7 @@ use std::num::NonZero; use rustc_ast::NodeId; -use rustc_attr_parsing::{ +use rustc_attr_data_structures::{ self as attr, ConstStability, DefaultBodyStability, DeprecatedSince, Deprecation, Stability, }; use rustc_data_structures::unord::UnordMap; diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs index 171542d1279..d0dbf64dc59 100644 --- a/compiler/rustc_middle/src/mir/basic_blocks.rs +++ b/compiler/rustc_middle/src/mir/basic_blocks.rs @@ -1,8 +1,9 @@ +use std::sync::OnceLock; + use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::graph; use rustc_data_structures::graph::dominators::{Dominators, dominators}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_data_structures::sync::OnceLock; use rustc_index::{IndexSlice, IndexVec}; use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index 95bc9b71fe0..b24f6bc7770 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -222,7 +222,7 @@ impl AllocError { } /// The information that makes up a memory access: offset and size. -#[derive(Copy, Clone, PartialEq)] +#[derive(Copy, Clone)] pub struct AllocRange { pub start: Size, pub size: Size, @@ -470,7 +470,7 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> // Find the provenance. let (offset, _prov) = self .provenance - .range_get_ptrs(range, cx) + .range_ptrs_get(range, cx) .first() .copied() .expect("there must be provenance somewhere here"); @@ -679,6 +679,11 @@ impl<Prov: Provenance, Extra, Bytes: AllocBytes> Allocation<Prov, Extra, Bytes> // Set provenance of all bytes to wildcard. self.provenance.write_wildcards(self.len()); + // Also expose the provenance of the interpreter-level allocation, so it can + // be written by FFI. The `black_box` is defensive programming as LLVM likes + // to (incorrectly) optimize away ptr2int casts whose result is unused. + std::hint::black_box(self.get_bytes_unchecked_raw_mut().expose_provenance()); + Ok(()) } diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs index cc6389e2989..fea5038e6dd 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/init_mask.rs @@ -223,8 +223,8 @@ impl<D: TyDecoder> Decodable<D> for InitMaskMaterialized { // large. impl hash::Hash for InitMaskMaterialized { fn hash<H: hash::Hasher>(&self, state: &mut H) { - const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / std::mem::size_of::<Block>(); - const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>(); + const MAX_BLOCKS_TO_HASH: usize = super::MAX_BYTES_TO_HASH / size_of::<Block>(); + const MAX_BLOCKS_LEN: usize = super::MAX_HASHED_BUFFER_LEN / size_of::<Block>(); // Partially hash the `blocks` buffer when it is large. To limit collisions with common // prefixes and suffixes, we hash the length and some slices of the buffer. diff --git a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs index 2f13f963578..c9525df1f79 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation/provenance_map.rs @@ -2,6 +2,7 @@ //! representation for the common case where PTR_SIZE consecutive bytes have the same provenance. use std::cmp; +use std::ops::Range; use rustc_abi::{HasDataLayout, Size}; use rustc_data_structures::sorted_map::SortedMap; @@ -66,24 +67,33 @@ impl ProvenanceMap { } impl<Prov: Provenance> ProvenanceMap<Prov> { + fn adjusted_range_ptrs(range: AllocRange, cx: &impl HasDataLayout) -> Range<Size> { + // We have to go back `pointer_size - 1` bytes, as that one would still overlap with + // the beginning of this range. + let adjusted_start = Size::from_bytes( + range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1), + ); + adjusted_start..range.end() + } + /// Returns all ptr-sized provenance in the given range. /// If the range has length 0, returns provenance that crosses the edge between `start-1` and /// `start`. - pub(super) fn range_get_ptrs( + pub(super) fn range_ptrs_get( &self, range: AllocRange, cx: &impl HasDataLayout, ) -> &[(Size, Prov)] { - // We have to go back `pointer_size - 1` bytes, as that one would still overlap with - // the beginning of this range. - let adjusted_start = Size::from_bytes( - range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1), - ); - self.ptrs.range(adjusted_start..range.end()) + self.ptrs.range(Self::adjusted_range_ptrs(range, cx)) + } + + /// `pm.range_ptrs_is_empty(r, cx)` == `pm.range_ptrs_get(r, cx).is_empty()`, but is faster. + pub(super) fn range_ptrs_is_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { + self.ptrs.range_is_empty(Self::adjusted_range_ptrs(range, cx)) } /// Returns all byte-wise provenance in the given range. - fn range_get_bytes(&self, range: AllocRange) -> &[(Size, Prov)] { + fn range_bytes_get(&self, range: AllocRange) -> &[(Size, Prov)] { if let Some(bytes) = self.bytes.as_ref() { bytes.range(range.start..range.end()) } else { @@ -91,9 +101,14 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } } + /// Same as `range_bytes_get(range).is_empty()`, but faster. + fn range_bytes_is_empty(&self, range: AllocRange) -> bool { + self.bytes.as_ref().is_none_or(|bytes| bytes.range_is_empty(range.start..range.end())) + } + /// Get the provenance of a single byte. pub fn get(&self, offset: Size, cx: &impl HasDataLayout) -> Option<Prov> { - let prov = self.range_get_ptrs(alloc_range(offset, Size::from_bytes(1)), cx); + let prov = self.range_ptrs_get(alloc_range(offset, Size::from_bytes(1)), cx); debug_assert!(prov.len() <= 1); if let Some(entry) = prov.first() { // If it overlaps with this byte, it is on this byte. @@ -117,7 +132,7 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { /// limit access to provenance outside of the `Allocation` abstraction. /// pub fn range_empty(&self, range: AllocRange, cx: &impl HasDataLayout) -> bool { - self.range_get_ptrs(range, cx).is_empty() && self.range_get_bytes(range).is_empty() + self.range_ptrs_is_empty(range, cx) && self.range_bytes_is_empty(range) } /// Yields all the provenances stored in this map. @@ -149,12 +164,14 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // provenance that overlaps with the given range. let (first, last) = { // Find all provenance overlapping the given range. - let provenance = self.range_get_ptrs(range, cx); - if provenance.is_empty() { - // No provenance in this range, we are done. + if self.range_ptrs_is_empty(range, cx) { + // No provenance in this range, we are done. This is the common case. return Ok(()); } + // This redoes some of the work of `range_get_ptrs_is_empty`, but this path is much + // colder than the early return above, so it's worth it. + let provenance = self.range_ptrs_get(range, cx); ( provenance.first().unwrap().0, provenance.last().unwrap().0 + cx.data_layout().pointer_size, @@ -267,8 +284,8 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { // This includes the existing bytewise provenance in the range, and ptr provenance // that overlaps with the begin/end of the range. let mut dest_bytes_box = None; - let begin_overlap = self.range_get_ptrs(alloc_range(src.start, Size::ZERO), cx).first(); - let end_overlap = self.range_get_ptrs(alloc_range(src.end(), Size::ZERO), cx).first(); + let begin_overlap = self.range_ptrs_get(alloc_range(src.start, Size::ZERO), cx).first(); + let end_overlap = self.range_ptrs_get(alloc_range(src.end(), Size::ZERO), cx).first(); if !Prov::OFFSET_IS_ADDR { // There can't be any bytewise provenance, and we cannot split up the begin/end overlap. if let Some(entry) = begin_overlap { @@ -291,10 +308,10 @@ impl<Prov: Provenance> ProvenanceMap<Prov> { } else { trace!("no start overlapping entry"); } + // Then the main part, bytewise provenance from `self.bytes`. - if let Some(all_bytes) = self.bytes.as_ref() { - bytes.extend(all_bytes.range(src.start..src.end())); - } + bytes.extend(self.range_bytes_get(src)); + // And finally possibly parts of a pointer at the end. if let Some(entry) = end_overlap { trace!("end overlapping entry: {entry:?}"); diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index 743812e3a20..890756a17ca 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -248,7 +248,7 @@ pub enum InvalidMetaKind { } impl IntoDiagArg for InvalidMetaKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(match self { InvalidMetaKind::SliceTooBig => "slice_too_big", InvalidMetaKind::TooBig => "too_big", @@ -282,7 +282,7 @@ pub struct Misalignment { macro_rules! impl_into_diag_arg_through_debug { ($($ty:ty),*$(,)?) => {$( impl IntoDiagArg for $ty { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Owned(format!("{self:?}"))) } } @@ -401,7 +401,7 @@ pub enum PointerKind { } impl IntoDiagArg for PointerKind { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str( match self { Self::Ref(_) => "ref", @@ -666,7 +666,7 @@ macro_rules! err_ub_custom { msg: || $msg, add_args: Box::new(move |mut set_arg| { $($( - set_arg(stringify!($name).into(), rustc_errors::IntoDiagArg::into_diag_arg($name)); + set_arg(stringify!($name).into(), rustc_errors::IntoDiagArg::into_diag_arg($name, &mut None)); )*)? }) } diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index c48cfffa05c..effedf854e4 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -452,12 +452,7 @@ impl<'tcx> TyCtxt<'tcx> { } let id = self.alloc_map.reserve(); debug!("creating alloc {:?} with id {id:?}", alloc_salt.0); - let had_previous = self - .alloc_map - .to_alloc - .lock_shard_by_value(&id) - .insert(id, alloc_salt.0.clone()) - .is_some(); + let had_previous = self.alloc_map.to_alloc.insert(id, alloc_salt.0.clone()).is_some(); // We just reserved, so should always be unique. assert!(!had_previous); dedup.insert(alloc_salt, id); @@ -510,7 +505,7 @@ impl<'tcx> TyCtxt<'tcx> { /// local dangling pointers and allocations in constants/statics. #[inline] pub fn try_get_global_alloc(self, id: AllocId) -> Option<GlobalAlloc<'tcx>> { - self.alloc_map.to_alloc.lock_shard_by_value(&id).get(&id).cloned() + self.alloc_map.to_alloc.get(&id) } #[inline] @@ -529,9 +524,7 @@ impl<'tcx> TyCtxt<'tcx> { /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to /// call this function twice, even with the same `Allocation` will ICE the compiler. pub fn set_alloc_id_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) { - if let Some(old) = - self.alloc_map.to_alloc.lock_shard_by_value(&id).insert(id, GlobalAlloc::Memory(mem)) - { + if let Some(old) = self.alloc_map.to_alloc.insert(id, GlobalAlloc::Memory(mem)) { bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}"); } } @@ -539,11 +532,8 @@ impl<'tcx> TyCtxt<'tcx> { /// Freezes an `AllocId` created with `reserve` by pointing it at a static item. Trying to /// call this function twice, even with the same `DefId` will ICE the compiler. pub fn set_nested_alloc_id_static(self, id: AllocId, def_id: LocalDefId) { - if let Some(old) = self - .alloc_map - .to_alloc - .lock_shard_by_value(&id) - .insert(id, GlobalAlloc::Static(def_id.to_def_id())) + if let Some(old) = + self.alloc_map.to_alloc.insert(id, GlobalAlloc::Static(def_id.to_def_id())) { bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}"); } @@ -573,7 +563,7 @@ pub fn write_target_uint( #[inline] pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> { // This u128 holds an "any-size uint" (since smaller uints can fits in it) - let mut buf = [0u8; std::mem::size_of::<u128>()]; + let mut buf = [0u8; size_of::<u128>()]; // So we do not read exactly 16 bytes into the u128, just the "payload". let uint = match endianness { Endian::Little => { diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index ea0bb5feb12..83857ab6c5c 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -332,13 +332,13 @@ pub struct Body<'tcx> { /// /// ```rust /// fn test<T>() { - /// let _ = [0; std::mem::size_of::<*mut T>()]; + /// let _ = [0; size_of::<*mut T>()]; /// } /// ``` /// /// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization /// removed the last mention of all generic params. We do not want to rely on optimizations and - /// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this. + /// potentially allow things like `[u8; size_of::<T>() * 0]` due to this. pub is_polymorphic: bool, /// The phase at which this MIR should be "injected" into the compilation process. diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index 58d5c94d033..897119c0712 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -2,7 +2,7 @@ use std::fmt; use std::hash::Hash; use rustc_ast::expand::autodiff_attrs::AutoDiffItem; -use rustc_attr_parsing::InlineAttr; +use rustc_attr_data_structures::InlineAttr; use rustc_data_structures::base_n::{BaseNString, CASE_INSENSITIVE, ToBaseN}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxIndexMap; diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 875f5282bf2..573f7895da2 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -319,6 +319,7 @@ pub fn write_mir_pretty<'tcx>( writeln!(w, "// WARNING: This output format is intended for human consumers only")?; writeln!(w, "// and is subject to change without notice. Knock yourself out.")?; + writeln!(w, "// HINT: See also -Z dump-mir for MIR at specific points during compilation.")?; let mut first = true; for def_id in dump_mir_def_ids(tcx, single) { @@ -652,7 +653,10 @@ fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn io::Write) -> io: write!(w, "static mut ")? } (_, _) if is_function => write!(w, "fn ")?, - (DefKind::AnonConst | DefKind::InlineConst, _) => {} // things like anon const, not an item + // things like anon const, not an item + (DefKind::AnonConst | DefKind::InlineConst, _) => {} + // `global_asm!` have fake bodies, which we may dump after mir-build + (DefKind::GlobalAsm, _) => {} _ => bug!("Unexpected def kind {:?}", kind), } @@ -1199,7 +1203,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { && let Some(upvars) = tcx.upvars_mentioned(def_id) { for (&var_id, place) in iter::zip(upvars.keys(), places) { - let var_name = tcx.hir().name(var_id); + let var_name = tcx.hir_name(var_id); struct_fmt.field(var_name.as_str(), place); } } else { @@ -1220,7 +1224,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { && let Some(upvars) = tcx.upvars_mentioned(def_id) { for (&var_id, place) in iter::zip(upvars.keys(), places) { - let var_name = tcx.hir().name(var_id); + let var_name = tcx.hir_name(var_id); struct_fmt.field(var_name.as_str(), place); } } else { diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs index b887370fd69..b2c51ad8864 100644 --- a/compiler/rustc_middle/src/mir/terminator.rs +++ b/compiler/rustc_middle/src/mir/terminator.rs @@ -357,7 +357,7 @@ impl<O> AssertKind<O> { macro_rules! add { ($name: expr, $value: expr) => { - adder($name.into(), $value.into_diag_arg()); + adder($name.into(), $value.into_diag_arg(&mut None)); }; } @@ -454,6 +454,7 @@ mod helper { /// Like [`SwitchTargets::target_for_value`], but returning the same type as /// [`Terminator::successors`]. #[inline] + #[cfg_attr(not(bootstrap), define_opaque(Successors))] pub fn successors_for_value(&self, value: u128) -> Successors<'_> { let target = self.target_for_value(value); (&[]).into_iter().copied().chain(Some(target)) @@ -462,6 +463,7 @@ mod helper { impl<'tcx> TerminatorKind<'tcx> { #[inline] + #[cfg_attr(not(bootstrap), define_opaque(Successors))] pub fn successors(&self) -> Successors<'_> { use self::TerminatorKind::*; match *self { @@ -500,6 +502,7 @@ mod helper { } #[inline] + #[cfg_attr(not(bootstrap), define_opaque(SuccessorsMut))] pub fn successors_mut(&mut self) -> SuccessorsMut<'_> { use self::TerminatorKind::*; match *self { diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index 5950ac295af..9308570d89d 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -23,19 +23,13 @@ pub struct Preorder<'a, 'tcx> { body: &'a Body<'tcx>, visited: DenseBitSet<BasicBlock>, worklist: Vec<BasicBlock>, - root_is_start_block: bool, } impl<'a, 'tcx> Preorder<'a, 'tcx> { pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> { let worklist = vec![root]; - Preorder { - body, - visited: DenseBitSet::new_empty(body.basic_blocks.len()), - worklist, - root_is_start_block: root == START_BLOCK, - } + Preorder { body, visited: DenseBitSet::new_empty(body.basic_blocks.len()), worklist } } } @@ -71,15 +65,11 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { } fn size_hint(&self) -> (usize, Option<usize>) { - // All the blocks, minus the number of blocks we've visited. - let upper = self.body.basic_blocks.len() - self.visited.count(); + // The worklist might be only things already visited. + let lower = 0; - let lower = if self.root_is_start_block { - // We will visit all remaining blocks exactly once. - upper - } else { - self.worklist.len() - }; + // This is extremely loose, but it's not worth a popcnt loop to do better. + let upper = self.body.basic_blocks.len(); (lower, Some(upper)) } @@ -108,7 +98,6 @@ pub struct Postorder<'a, 'tcx> { basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>, visited: DenseBitSet<BasicBlock>, visit_stack: Vec<(BasicBlock, Successors<'a>)>, - root_is_start_block: bool, /// A non-empty `extra` allows for a precise calculation of the successors. extra: Option<(TyCtxt<'tcx>, Instance<'tcx>)>, } @@ -123,7 +112,6 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { basic_blocks, visited: DenseBitSet::new_empty(basic_blocks.len()), visit_stack: Vec::new(), - root_is_start_block: root == START_BLOCK, extra, }; @@ -211,16 +199,13 @@ impl<'tcx> Iterator for Postorder<'_, 'tcx> { } fn size_hint(&self) -> (usize, Option<usize>) { - // All the blocks, minus the number of blocks we've visited. - let upper = self.basic_blocks.len() - self.visited.count(); - - let lower = if self.root_is_start_block { - // We will visit all remaining blocks exactly once. - upper - } else { - self.visit_stack.len() - }; + // These bounds are not at all tight, but that's fine. + // It's not worth a popcnt loop in `DenseBitSet` to improve the upper, + // and in mono-reachable we can't be precise anyway. + // Leaning on amortized growth is fine. + let lower = self.visit_stack.len(); + let upper = self.basic_blocks.len(); (lower, Some(upper)) } } diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index cbd60920bc5..7bbaa0496d5 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -24,10 +24,11 @@ pub trait EraseType: Copy { pub type Erase<T: EraseType> = Erased<impl Copy>; #[inline(always)] +#[cfg_attr(not(bootstrap), define_opaque(Erase))] pub fn erase<T: EraseType>(src: T) -> Erase<T> { // Ensure the sizes match const { - if std::mem::size_of::<T>() != std::mem::size_of::<T::Result>() { + if size_of::<T>() != size_of::<T::Result>() { panic!("size of T must match erased type T::Result") } }; @@ -47,6 +48,7 @@ pub fn erase<T: EraseType>(src: T) -> Erase<T> { /// Restores an erased value. #[inline(always)] +#[cfg_attr(not(bootstrap), define_opaque(Erase))] pub fn restore<T: EraseType>(value: Erase<T>) -> T { let value: Erased<<T as EraseType>::Result> = value; // See comment in `erase` for why we use `transmute_unchecked`. @@ -231,9 +233,9 @@ trivial! { bool, Option<(rustc_span::def_id::DefId, rustc_session::config::EntryFnType)>, Option<rustc_ast::expand::allocator::AllocatorKind>, - Option<rustc_attr_parsing::ConstStability>, - Option<rustc_attr_parsing::DefaultBodyStability>, - Option<rustc_attr_parsing::Stability>, + Option<rustc_attr_data_structures::ConstStability>, + Option<rustc_attr_data_structures::DefaultBodyStability>, + Option<rustc_attr_data_structures::Stability>, Option<rustc_data_structures::svh::Svh>, Option<rustc_hir::def::DefKind>, Option<rustc_hir::CoroutineKind>, @@ -256,10 +258,10 @@ trivial! { Result<rustc_middle::traits::EvaluationResult, rustc_middle::traits::OverflowError>, rustc_abi::ReprOptions, rustc_ast::expand::allocator::AllocatorKind, - rustc_attr_parsing::ConstStability, - rustc_attr_parsing::DefaultBodyStability, - rustc_attr_parsing::Deprecation, - rustc_attr_parsing::Stability, + rustc_attr_data_structures::ConstStability, + rustc_attr_data_structures::DefaultBodyStability, + rustc_attr_data_structures::Deprecation, + rustc_attr_data_structures::Stability, rustc_data_structures::svh::Svh, rustc_errors::ErrorGuaranteed, rustc_hir::Constness, diff --git a/compiler/rustc_middle/src/query/keys.rs b/compiler/rustc_middle/src/query/keys.rs index 1489d57aba6..98314b5abfd 100644 --- a/compiler/rustc_middle/src/query/keys.rs +++ b/compiler/rustc_middle/src/query/keys.rs @@ -508,6 +508,14 @@ impl<'tcx, T: Clone> Key for CanonicalQueryInput<'tcx, T> { } } +impl<'tcx, T: Clone> Key for (CanonicalQueryInput<'tcx, T>, bool) { + type Cache<V> = DefaultCache<Self, V>; + + fn default_span(&self, _tcx: TyCtxt<'_>) -> Span { + DUMMY_SP + } +} + impl Key for (Symbol, u32, u32) { type Cache<V> = DefaultCache<Self, V>; diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 7c4ea06a746..781a898a6e9 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -41,7 +41,7 @@ use rustc_span::def_id::LOCAL_CRATE; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, Span, Symbol}; use rustc_target::spec::PanicStrategy; -use {rustc_abi as abi, rustc_ast as ast, rustc_attr_parsing as attr, rustc_hir as hir}; +use {rustc_abi as abi, rustc_ast as ast, rustc_attr_data_structures as attr, rustc_hir as hir}; use crate::infer::canonical::{self, Canonical}; use crate::lint::LintExpectation; @@ -143,11 +143,11 @@ rustc_queries! { /// Represents crate as a whole (as distinct from the top-level crate module). /// - /// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir().krate()`), + /// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir_crate()`), /// we will have to assume that any change means that you need to be recompiled. /// This is because the `hir_crate` query gives you access to all other items. - /// To avoid this fate, do not call `tcx.hir().krate()`; instead, - /// prefer wrappers like `tcx.visit_all_items_in_krate()`. + /// To avoid this fate, do not call `tcx.hir_crate()`; instead, + /// prefer wrappers like [`TyCtxt::hir_visit_all_item_likes_in_crate`]. query hir_crate(key: ()) -> &'tcx Crate<'tcx> { arena_cache eval_always @@ -198,7 +198,7 @@ rustc_queries! { /// /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. - query hir_attrs(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> { + query hir_attr_map(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> { desc { |tcx| "getting HIR owner attributes in `{}`", tcx.def_path_str(key) } feedable } @@ -748,6 +748,15 @@ rustc_queries! { } } + /// Compute the conditions that need to hold for a conditionally-const item to be const. + /// That is, compute the set of `~const` where clauses for a given item. + /// + /// This can be thought of as the `~const` equivalent of `predicates_of`. These are the + /// predicates that need to be proven at usage sites, and can be assumed at definition. + /// + /// This query also computes the `~const` where clauses for associated types, which are + /// not "const", but which have item bounds which may be `~const`. These must hold for + /// the `~const` item bound to hold. query const_conditions( key: DefId ) -> ty::ConstConditions<'tcx> { @@ -757,6 +766,11 @@ rustc_queries! { separate_provide_extern } + /// Compute the const bounds that are implied for a conditionally-const item. + /// + /// This can be though of as the `~const` equivalent of `explicit_item_bounds`. These + /// are the predicates that need to proven at definition sites, and can be assumed at + /// usage sites. query explicit_implied_const_bounds( key: DefId ) -> ty::EarlyBinder<'tcx, &'tcx [(ty::PolyTraitRef<'tcx>, Span)]> { @@ -985,12 +999,6 @@ rustc_queries! { separate_provide_extern } - query self_ty_of_trait_impl_enabling_order_dep_trait_object_hack( - key: DefId - ) -> Option<ty::EarlyBinder<'tcx, ty::Ty<'tcx>>> { - desc { |tcx| "computing self type wrt issue #33140 `{}`", tcx.def_path_str(key) } - } - /// Maps a `DefId` of a type to a list of its inherent impls. /// Contains implementations of methods that are inherent to a type. /// Methods in these implementations don't need to be exported. @@ -1519,6 +1527,11 @@ rustc_queries! { query is_copy_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool { desc { "computing whether `{}` is `Copy`", env.value } } + /// Trait selection queries. These are best used by invoking `ty.is_use_cloned_modulo_regions()`, + /// `ty.is_use_cloned()`, etc, since that will prune the environment where possible. + query is_use_cloned_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool { + desc { "computing whether `{}` is `UseCloned`", env.value } + } /// Query backing `Ty::is_sized`. query is_sized_raw(env: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool { desc { "computing whether `{}` is `Sized`", env.value } @@ -2248,22 +2261,13 @@ rustc_queries! { desc { "normalizing `{}`", goal.value } } - query implied_outlives_bounds_compat( - goal: CanonicalImpliedOutlivesBoundsGoal<'tcx> - ) -> Result< - &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>, - NoSolution, - > { - desc { "computing implied outlives bounds for `{}`", goal.canonical.value.value.ty } - } - query implied_outlives_bounds( - goal: CanonicalImpliedOutlivesBoundsGoal<'tcx> + key: (CanonicalImpliedOutlivesBoundsGoal<'tcx>, bool) ) -> Result< &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>, NoSolution, > { - desc { "computing implied outlives bounds v2 for `{}`", goal.canonical.value.value.ty } + desc { "computing implied outlives bounds for `{}` (hack disabled = {:?})", key.0.canonical.value.value.ty, key.1 } } /// Do not call this query directly: diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 690b8128b1a..4834444ed1d 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -165,7 +165,7 @@ impl<'tcx> TyCtxt<'tcx> { } } -#[inline] +#[inline(always)] pub fn query_get_at<'tcx, Cache>( tcx: TyCtxt<'tcx>, execute_query: fn(TyCtxt<'tcx>, Span, Cache::Key, QueryMode) -> Option<Cache::Value>, @@ -258,7 +258,7 @@ macro_rules! query_if_arena { }; } -/// If `separate_provide_if_extern`, then the key can be projected to its +/// If `separate_provide_extern`, then the key can be projected to its /// local key via `<$K as AsLocalKey>::LocalKey`. macro_rules! local_key_if_separate_extern { ([] $($K:tt)*) => { @@ -370,7 +370,7 @@ macro_rules! define_callbacks { // Increase this limit if necessary, but do try to keep the size low if possible #[cfg(target_pointer_width = "64")] const _: () = { - if mem::size_of::<Key<'static>>() > 88 { + if size_of::<Key<'static>>() > 88 { panic!("{}", concat!( "the query `", stringify!($name), @@ -386,7 +386,7 @@ macro_rules! define_callbacks { #[cfg(target_pointer_width = "64")] #[cfg(not(feature = "rustc_randomized_layouts"))] const _: () = { - if mem::size_of::<Value<'static>>() > 64 { + if size_of::<Value<'static>>() > 64 { panic!("{}", concat!( "the query `", stringify!($name), diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index e5592de81cd..f7b98d935d4 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -19,7 +19,7 @@ use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_hir::{BindingMode, ByRef, HirId, MatchSource, RangeEnd}; use rustc_index::{IndexVec, newtype_index}; -use rustc_macros::{HashStable, TypeVisitable}; +use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeVisitable}; use rustc_span::def_id::LocalDefId; use rustc_span::{ErrorGuaranteed, Span, Symbol}; use rustc_target::asm::InlineAsmRegOrRegClass; @@ -49,10 +49,13 @@ macro_rules! thir_with_elements { } )* + // Note: Making `Thir` implement `Clone` is useful for external tools that need access to + // THIR bodies even after the `Steal` query result has been stolen. + // One such tool is https://github.com/rust-corpus/qrates/. /// A container for a THIR body. /// /// This can be indexed directly by any THIR index (e.g. [`ExprId`]). - #[derive(Debug, HashStable)] + #[derive(Debug, HashStable, Clone)] pub struct Thir<'tcx> { pub body_type: BodyTy<'tcx>, $( @@ -90,7 +93,7 @@ thir_with_elements! { params: ParamId => Param<'tcx> => "p{}", } -#[derive(Debug, HashStable)] +#[derive(Debug, HashStable, Clone)] pub enum BodyTy<'tcx> { Const(Ty<'tcx>), Fn(FnSig<'tcx>), @@ -98,7 +101,7 @@ pub enum BodyTy<'tcx> { } /// Description of a type-checked function parameter. -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct Param<'tcx> { /// The pattern that appears in the parameter list, or None for implicit parameters. pub pat: Option<Box<Pat<'tcx>>>, @@ -118,7 +121,7 @@ pub enum LintLevel { Explicit(HirId), } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct Block { /// Whether the block itself has a label. Used by `label: {}` /// and `try` blocks. @@ -138,7 +141,7 @@ pub struct Block { type UserTy<'tcx> = Option<Box<CanonicalUserType<'tcx>>>; -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct AdtExpr<'tcx> { /// The ADT we're constructing. pub adt_def: AdtDef<'tcx>, @@ -155,7 +158,7 @@ pub struct AdtExpr<'tcx> { pub base: AdtExprBase<'tcx>, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub enum AdtExprBase<'tcx> { /// A struct expression where all the fields are explicitly enumerated: `Foo { a, b }`. None, @@ -168,7 +171,7 @@ pub enum AdtExprBase<'tcx> { DefaultFields(Box<[Ty<'tcx>]>), } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct ClosureExpr<'tcx> { pub closure_id: LocalDefId, pub args: UpvarArgs<'tcx>, @@ -177,7 +180,7 @@ pub struct ClosureExpr<'tcx> { pub fake_reads: Vec<(ExprId, FakeReadCause, HirId)>, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct InlineAsmExpr<'tcx> { pub asm_macro: AsmMacro, pub template: &'tcx [InlineAsmTemplatePiece], @@ -195,12 +198,12 @@ pub enum BlockSafety { ExplicitUnsafe(HirId), } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct Stmt<'tcx> { pub kind: StmtKind<'tcx>, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub enum StmtKind<'tcx> { /// An expression with a trailing semicolon. Expr { @@ -240,11 +243,11 @@ pub enum StmtKind<'tcx> { }, } -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, HashStable)] +#[derive(Clone, Debug, Copy, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)] pub struct LocalVarId(pub HirId); /// A THIR expression. -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct Expr<'tcx> { /// kind of expression pub kind: ExprKind<'tcx>, @@ -271,7 +274,7 @@ pub struct TempLifetime { pub backwards_incompatible: Option<region::Scope>, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub enum ExprKind<'tcx> { /// `Scope`s are used to explicitly mark destruction scopes, /// and to track the `HirId` of the expressions within the scope. @@ -312,6 +315,14 @@ pub enum ExprKind<'tcx> { /// (e.g. `foo(a, b)` in `x.foo(a, b)`). fn_span: Span, }, + /// A use expression `x.use`. + ByUse { + /// The expression on which use is applied. + expr: ExprId, + /// The span of use, without the dot and receiver + /// (e.g. `use` in `x.use`). + span: Span, + }, /// A *non-overloaded* dereference. Deref { arg: ExprId, @@ -548,20 +559,20 @@ pub enum ExprKind<'tcx> { /// Represents the association of a field identifier and an expression. /// /// This is used in struct constructors. -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct FieldExpr { pub name: FieldIdx, pub expr: ExprId, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct FruInfo<'tcx> { pub base: ExprId, pub field_types: Box<[Ty<'tcx>]>, } /// A `match` arm. -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub struct Arm<'tcx> { pub pattern: Box<Pat<'tcx>>, pub guard: Option<ExprId>, @@ -579,7 +590,7 @@ pub enum LogicalOp { Or, } -#[derive(Debug, HashStable)] +#[derive(Clone, Debug, HashStable)] pub enum InlineAsmOperand<'tcx> { In { reg: InlineAsmRegOrRegClass, @@ -616,13 +627,13 @@ pub enum InlineAsmOperand<'tcx> { }, } -#[derive(Debug, HashStable, TypeVisitable)] +#[derive(Clone, Debug, HashStable, TypeVisitable)] pub struct FieldPat<'tcx> { pub field: FieldIdx, pub pattern: Pat<'tcx>, } -#[derive(Debug, HashStable, TypeVisitable)] +#[derive(Clone, Debug, HashStable, TypeVisitable)] pub struct Pat<'tcx> { pub ty: Ty<'tcx>, pub span: Span, @@ -729,7 +740,7 @@ impl<'tcx> Pat<'tcx> { } } -#[derive(Debug, HashStable, TypeVisitable)] +#[derive(Clone, Debug, HashStable, TypeVisitable)] pub struct Ascription<'tcx> { pub annotation: CanonicalUserTypeAnnotation<'tcx>, /// Variance to use when relating the `user_ty` to the **type of the value being @@ -753,7 +764,7 @@ pub struct Ascription<'tcx> { pub variance: ty::Variance, } -#[derive(Debug, HashStable, TypeVisitable)] +#[derive(Clone, Debug, HashStable, TypeVisitable)] pub enum PatKind<'tcx> { /// A wildcard pattern: `_`. Wild, diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs index a9df4d1625b..d208692f4e7 100644 --- a/compiler/rustc_middle/src/thir/visit.rs +++ b/compiler/rustc_middle/src/thir/visit.rs @@ -59,6 +59,9 @@ pub fn walk_expr<'thir, 'tcx: 'thir, V: Visitor<'thir, 'tcx>>( visitor.visit_expr(&visitor.thir()[arg]); } } + ByUse { expr, span: _ } => { + visitor.visit_expr(&visitor.thir()[expr]); + } Deref { arg } => visitor.visit_expr(&visitor.thir()[arg]), Binary { lhs, rhs, op: _ } | LogicalOp { lhs, rhs, op: _ } => { visitor.visit_expr(&visitor.thir()[lhs]); diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index d033ecc75db..7e6151745e2 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -266,7 +266,7 @@ pub enum ObligationCauseCode<'tcx> { }, /// Constant expressions must be sized. - ConstSized, + SizedConstOrStatic, /// `static` items must have `Sync` type. SharedStatic, @@ -397,9 +397,9 @@ pub enum ObligationCauseCode<'tcx> { RustCall, - /// Obligations to prove that a `std::ops::Drop` impl is not stronger than + /// Obligations to prove that a `Drop` or negative auto trait impl is not stronger than /// the ADT it's being implemented for. - DropImpl, + AlwaysApplicableImpl, /// Requirement for a `const N: Ty` to implement `Ty: ConstParamTy` ConstParam(Ty<'tcx>), @@ -980,12 +980,9 @@ pub enum CodegenObligationError { /// overflow bug, since I believe this is the only case /// where ambiguity can result. Ambiguity, - /// This can trigger when we probe for the source of a `'static` lifetime requirement - /// on a trait object: `impl Foo for dyn Trait {}` has an implicit `'static` bound. - /// This can also trigger when we have a global bound that is not actually satisfied, - /// but was included during typeck due to the trivial_bounds feature. + /// This can trigger when we have a global bound that is not actually satisfied + /// due to trivial bounds. Unimplemented, - FulfillmentError, /// The selected impl has unconstrained generic parameters. This will emit an error /// during impl WF checking. UnconstrainedParam(ErrorGuaranteed), diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs index 9ce5373b031..8e54a9d487d 100644 --- a/compiler/rustc_middle/src/traits/specialization_graph.rs +++ b/compiler/rustc_middle/src/traits/specialization_graph.rs @@ -72,7 +72,7 @@ impl OverlapMode { .as_local() .into_iter() .flat_map(|local_def_id| { - tcx.hir().attrs(tcx.local_def_id_to_hir_id(local_def_id)) + tcx.hir_attrs(tcx.local_def_id_to_hir_id(local_def_id)) }) .find(|attr| attr.has_name(sym::rustc_strict_coherence)) .map(|attr| attr.span()); diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs index 3585f28b4a5..c1dc6a894ca 100644 --- a/compiler/rustc_middle/src/ty/adt.rs +++ b/compiler/rustc_middle/src/ty/adt.rs @@ -53,8 +53,6 @@ bitflags::bitflags! { const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8; /// Indicates whether the type is `UnsafeCell`. const IS_UNSAFE_CELL = 1 << 9; - /// Indicates whether the type is anonymous. - const IS_ANONYMOUS = 1 << 10; } } rustc_data_structures::external_bitflags_debug! { AdtFlags } @@ -402,12 +400,6 @@ impl<'tcx> AdtDef<'tcx> { self.flags().contains(AdtFlags::IS_MANUALLY_DROP) } - /// Returns `true` if this is an anonymous adt - #[inline] - pub fn is_anonymous(self) -> bool { - self.flags().contains(AdtFlags::IS_ANONYMOUS) - } - /// Returns `true` if this type has a destructor. pub fn has_dtor(self, tcx: TyCtxt<'tcx>) -> bool { self.destructor(tcx).is_some() diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs index 6309dd2e490..ce4c08aa485 100644 --- a/compiler/rustc_middle/src/ty/assoc.rs +++ b/compiler/rustc_middle/src/ty/assoc.rs @@ -3,7 +3,7 @@ use rustc_hir as hir; use rustc_hir::def::{DefKind, Namespace}; use rustc_hir::def_id::DefId; use rustc_macros::{Decodable, Encodable, HashStable}; -use rustc_span::{Ident, Symbol}; +use rustc_span::{Ident, Symbol, sym}; use super::{TyCtxt, Visibility}; use crate::ty; @@ -108,6 +108,24 @@ impl AssocItem { pub fn is_impl_trait_in_trait(&self) -> bool { self.opt_rpitit_info.is_some() } + + /// Returns true if: + /// - This trait associated item has the `#[type_const]` attribute, + /// - If it is in a trait impl, the item from the original trait has this attribute, or + /// - It is an inherent assoc const. + pub fn is_type_const_capable(&self, tcx: TyCtxt<'_>) -> bool { + if self.kind != ty::AssocKind::Const { + return false; + } + + let def_id = match (self.container, self.trait_item_def_id) { + (AssocItemContainer::Trait, _) => self.def_id, + (AssocItemContainer::Impl, Some(trait_item_did)) => trait_item_did, + // Inherent impl but this attr is only applied to trait assoc items. + (AssocItemContainer::Impl, None) => return true, + }; + tcx.has_attr(def_id, sym::type_const) + } } #[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash, Encodable, Decodable)] diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs index 3605f2402e7..703b6ce9247 100644 --- a/compiler/rustc_middle/src/ty/closure.rs +++ b/compiler/rustc_middle/src/ty/closure.rs @@ -51,6 +51,9 @@ pub enum UpvarCapture { /// depending on inference. ByValue, + /// Upvar is captured by use. This is true when the closure is labeled `use`. + ByUse, + /// Upvar is captured by reference. ByRef(BorrowKind), } @@ -178,7 +181,7 @@ impl<'tcx> CapturedPlace<'tcx> { pub fn is_by_ref(&self) -> bool { match self.info.capture_kind { - ty::UpvarCapture::ByValue => false, + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => false, ty::UpvarCapture::ByRef(..) => true, } } @@ -214,7 +217,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn closure_captures(self, def_id: LocalDefId) -> &'tcx [&'tcx ty::CapturedPlace<'tcx>] { if !self.is_closure_like(def_id.to_def_id()) { return &[]; - }; + } self.closure_typeinfo(def_id).captures } } @@ -293,7 +296,7 @@ pub struct CaptureInfo { pub fn place_to_string_for_capture<'tcx>(tcx: TyCtxt<'tcx>, place: &HirPlace<'tcx>) -> String { let mut curr_string: String = match place.base { - HirPlaceBase::Upvar(upvar_id) => tcx.hir().name(upvar_id.var_path.hir_id).to_string(), + HirPlaceBase::Upvar(upvar_id) => tcx.hir_name(upvar_id.var_path.hir_id).to_string(), _ => bug!("Capture_information should only contain upvars"), }; diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index 41958949836..fe42a224d9f 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -224,7 +224,7 @@ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Ty<'tcx> { }) } else { let tcx = decoder.interner(); - tcx.mk_ty_from_kind(rustc_type_ir::TyKind::decode(decoder)) + tcx.mk_ty_from_kind(ty::TyKind::decode(decoder)) } } } diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs index b72edc1c532..9f5e31d894c 100644 --- a/compiler/rustc_middle/src/ty/consts/int.rs +++ b/compiler/rustc_middle/src/ty/consts/int.rs @@ -118,7 +118,7 @@ impl std::fmt::Debug for ConstInt { impl IntoDiagArg for ConstInt { // FIXME this simply uses the Debug impl, but we could probably do better by converting both // to an inherent method that returns `Cow`. - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(format!("{self:?}").into()) } } @@ -408,7 +408,7 @@ macro_rules! from_x_for_scalar_int { fn from(u: $ty) -> Self { Self { data: u128::from(u), - size: NonZero::new(std::mem::size_of::<$ty>() as u8).unwrap(), + size: NonZero::new(size_of::<$ty>() as u8).unwrap(), } } } @@ -424,7 +424,7 @@ macro_rules! from_scalar_int_for_x { fn from(int: ScalarInt) -> Self { // The `unwrap` cannot fail because to_bits (if it succeeds) // is guaranteed to return a value that fits into the size. - int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>())) + int.to_bits(Size::from_bytes(size_of::<$ty>())) .try_into().unwrap() } } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 62a384af12f..a2472157d0e 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -324,11 +324,11 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.features() } - fn bound_coroutine_hidden_types( + fn coroutine_hidden_types( self, def_id: DefId, - ) -> impl IntoIterator<Item = ty::EarlyBinder<'tcx, ty::Binder<'tcx, Ty<'tcx>>>> { - self.bound_coroutine_hidden_types(def_id) + ) -> ty::EarlyBinder<'tcx, ty::Binder<'tcx, &'tcx ty::List<Ty<'tcx>>>> { + self.coroutine_hidden_types(def_id) } fn fn_sig(self, def_id: DefId) -> ty::EarlyBinder<'tcx, ty::PolyFnSig<'tcx>> { @@ -594,6 +594,10 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.trait_is_auto(trait_def_id) } + fn trait_is_coinductive(self, trait_def_id: DefId) -> bool { + self.trait_is_coinductive(trait_def_id) + } + fn trait_is_alias(self, trait_def_id: DefId) -> bool { self.trait_is_alias(trait_def_id) } @@ -812,32 +816,38 @@ pub struct CtxtInterners<'tcx> { impl<'tcx> CtxtInterners<'tcx> { fn new(arena: &'tcx WorkerLocal<Arena<'tcx>>) -> CtxtInterners<'tcx> { + // Default interner size - this value has been chosen empirically, and may need to be adjusted + // as the compiler evolves. + const N: usize = 2048; CtxtInterners { arena, - type_: Default::default(), - const_lists: Default::default(), - args: Default::default(), - type_lists: Default::default(), - region: Default::default(), - poly_existential_predicates: Default::default(), - canonical_var_infos: Default::default(), - predicate: Default::default(), - clauses: Default::default(), - projs: Default::default(), - place_elems: Default::default(), - const_: Default::default(), - pat: Default::default(), - const_allocation: Default::default(), - bound_variable_kinds: Default::default(), - layout: Default::default(), - adt_def: Default::default(), - external_constraints: Default::default(), - predefined_opaques_in_body: Default::default(), - fields: Default::default(), - local_def_ids: Default::default(), - captures: Default::default(), - offset_of: Default::default(), - valtree: Default::default(), + // The factors have been chosen by @FractalFir based on observed interner sizes, and local perf runs. + // To get the interner sizes, insert `eprintln` printing the size of the interner in functions like `intern_ty`. + // Bigger benchmarks tend to give more accurate ratios, so use something like `x perf eprintln --includes cargo`. + type_: InternedSet::with_capacity(N * 16), + const_lists: InternedSet::with_capacity(N * 4), + args: InternedSet::with_capacity(N * 4), + type_lists: InternedSet::with_capacity(N * 4), + region: InternedSet::with_capacity(N * 4), + poly_existential_predicates: InternedSet::with_capacity(N / 4), + canonical_var_infos: InternedSet::with_capacity(N / 2), + predicate: InternedSet::with_capacity(N), + clauses: InternedSet::with_capacity(N), + projs: InternedSet::with_capacity(N * 4), + place_elems: InternedSet::with_capacity(N * 2), + const_: InternedSet::with_capacity(N * 2), + pat: InternedSet::with_capacity(N), + const_allocation: InternedSet::with_capacity(N), + bound_variable_kinds: InternedSet::with_capacity(N * 2), + layout: InternedSet::with_capacity(N), + adt_def: InternedSet::with_capacity(N), + external_constraints: InternedSet::with_capacity(N), + predefined_opaques_in_body: InternedSet::with_capacity(N), + fields: InternedSet::with_capacity(N * 4), + local_def_ids: InternedSet::with_capacity(N), + captures: InternedSet::with_capacity(N), + offset_of: InternedSet::with_capacity(N), + valtree: InternedSet::with_capacity(N), } } @@ -1289,7 +1299,7 @@ impl<'tcx> TyCtxtFeed<'tcx, LocalDefId> { ), bodies, }))); - self.feed_owner_id().hir_attrs(attrs); + self.feed_owner_id().hir_attr_map(attrs); } } @@ -1317,6 +1327,11 @@ pub struct TyCtxt<'tcx> { gcx: &'tcx GlobalCtxt<'tcx>, } +// Explicitly implement `DynSync` and `DynSend` for `TyCtxt` to short circuit trait resolution. Its +// field are asserted to implement these traits below, so this is trivially safe, and it greatly +// speeds-up compilation of this crate and its dependents. +unsafe impl DynSend for TyCtxt<'_> {} +unsafe impl DynSync for TyCtxt<'_> {} fn _assert_tcx_fields() { sync::assert_dyn_sync::<&'_ GlobalCtxt<'_>>(); sync::assert_dyn_send::<&'_ GlobalCtxt<'_>>(); @@ -1875,7 +1890,7 @@ impl<'tcx> TyCtxtAt<'tcx> { pub fn create_def( self, parent: LocalDefId, - name: Symbol, + name: Option<Symbol>, def_kind: DefKind, ) -> TyCtxtFeed<'tcx, LocalDefId> { let feed = self.tcx.create_def(parent, name, def_kind); @@ -1890,7 +1905,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn create_def( self, parent: LocalDefId, - name: Symbol, + name: Option<Symbol>, def_kind: DefKind, ) -> TyCtxtFeed<'tcx, LocalDefId> { let data = def_kind.def_path_data(name); @@ -2199,7 +2214,7 @@ impl<'tcx> TyCtxt<'tcx> { /// Returns the origin of the opaque type `def_id`. #[instrument(skip(self), level = "trace", ret)] pub fn local_opaque_ty_origin(self, def_id: LocalDefId) -> hir::OpaqueTyOrigin<LocalDefId> { - self.hir().expect_opaque_ty(def_id).origin + self.hir_expect_opaque_ty(def_id).origin } pub fn finish(self) { @@ -2321,8 +2336,8 @@ macro_rules! sty_debug_print { $(let mut $variant = total;)* for shard in tcx.interners.type_.lock_shards() { - let types = shard.keys(); - for &InternedInSet(t) in types { + let types = shard.iter(); + for &(InternedInSet(t), ()) in types { let variant = match t.internee { ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) | ty::Float(..) | ty::Str | ty::Never => continue, @@ -3103,9 +3118,11 @@ impl<'tcx> TyCtxt<'tcx> { pub fn late_bound_vars(self, id: HirId) -> &'tcx List<ty::BoundVariableKind> { self.mk_bound_variable_kinds( - &self.late_bound_vars_map(id.owner).get(&id.local_id).cloned().unwrap_or_else(|| { - bug!("No bound vars found for {}", self.hir().node_to_string(id)) - }), + &self + .late_bound_vars_map(id.owner) + .get(&id.local_id) + .cloned() + .unwrap_or_else(|| bug!("No bound vars found for {}", self.hir_id_to_string(id))), ) } diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs index cb218a27e62..881381a5ee6 100644 --- a/compiler/rustc_middle/src/ty/diagnostics.rs +++ b/compiler/rustc_middle/src/ty/diagnostics.rs @@ -19,8 +19,16 @@ use crate::ty::{ TypeSuperVisitable, TypeVisitable, TypeVisitor, }; +impl IntoDiagArg for Ty<'_> { + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + ty::tls::with(|tcx| { + let ty = tcx.short_string(self, path); + rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(ty)) + }) + } +} + into_diag_arg_using_display! { - Ty<'_>, ty::Region<'_>, } diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 8c1991ddb36..a0e67929c52 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -213,10 +213,9 @@ impl<'tcx> Ty<'tcx> { } impl<'tcx> TyCtxt<'tcx> { - pub fn string_with_limit<'a, T>(self, p: T, length_limit: usize) -> String + pub fn string_with_limit<T>(self, p: T, length_limit: usize) -> String where - T: Print<'tcx, FmtPrinter<'a, 'tcx>> + Lift<TyCtxt<'tcx>> + Copy, - <T as Lift<TyCtxt<'tcx>>>::Lifted: Print<'tcx, FmtPrinter<'a, 'tcx>>, + T: Copy + for<'a, 'b> Lift<TyCtxt<'b>, Lifted: Print<'b, FmtPrinter<'a, 'b>>>, { let mut type_limit = 50; let regular = FmtPrinter::print_string(self, hir::def::Namespace::TypeNS, |cx| { @@ -253,10 +252,9 @@ impl<'tcx> TyCtxt<'tcx> { /// `tcx.short_string(ty, diag.long_ty_path())`. The diagnostic itself is the one that keeps /// the existence of a "long type" anywhere in the diagnostic, so the note telling the user /// where we wrote the file to is only printed once. - pub fn short_string<'a, T>(self, p: T, path: &mut Option<PathBuf>) -> String + pub fn short_string<T>(self, p: T, path: &mut Option<PathBuf>) -> String where - T: Print<'tcx, FmtPrinter<'a, 'tcx>> + Lift<TyCtxt<'tcx>> + Copy + Hash, - <T as Lift<TyCtxt<'tcx>>>::Lifted: Print<'tcx, FmtPrinter<'a, 'tcx>>, + T: Copy + Hash + for<'a, 'b> Lift<TyCtxt<'b>, Lifted: Print<'b, FmtPrinter<'a, 'b>>>, { let regular = FmtPrinter::print_string(self, hir::def::Namespace::TypeNS, |cx| { self.lift(p).expect("could not lift for printing").print(cx) diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index ec0498b168c..0b8f0e8cd41 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -220,13 +220,9 @@ impl FlagComputation { &ty::Pat(ty, pat) => { self.add_ty(ty); match *pat { - ty::PatternKind::Range { start, end, include_end: _ } => { - if let Some(start) = start { - self.add_const(start) - } - if let Some(end) = end { - self.add_const(end) - } + ty::PatternKind::Range { start, end } => { + self.add_const(start); + self.add_const(end); } } } diff --git a/compiler/rustc_middle/src/ty/generic_args.rs b/compiler/rustc_middle/src/ty/generic_args.rs index ed0b3059d75..f24910477dc 100644 --- a/compiler/rustc_middle/src/ty/generic_args.rs +++ b/compiler/rustc_middle/src/ty/generic_args.rs @@ -2,7 +2,6 @@ use core::intrinsics; use std::marker::PhantomData; -use std::mem; use std::num::NonZero; use std::ptr::NonNull; @@ -159,8 +158,8 @@ unsafe impl<'tcx> Sync for GenericArg<'tcx> where } impl<'tcx> IntoDiagArg for GenericArg<'tcx> { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(&mut None) } } @@ -176,17 +175,17 @@ impl<'tcx> GenericArgKind<'tcx> { let (tag, ptr) = match self { GenericArgKind::Lifetime(lt) => { // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0); + assert_eq!(align_of_val(&*lt.0.0) & TAG_MASK, 0); (REGION_TAG, NonNull::from(lt.0.0).cast()) } GenericArgKind::Type(ty) => { // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0); + assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0); (TYPE_TAG, NonNull::from(ty.0.0).cast()) } GenericArgKind::Const(ct) => { // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0); + assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0); (CONST_TAG, NonNull::from(ct.0.0).cast()) } }; diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs index 505c7278176..953ad62be0a 100644 --- a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs +++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs @@ -236,6 +236,11 @@ impl<'tcx> InhabitedPredicate<'tcx> { self.instantiate_opt(tcx, args).unwrap_or(self) } + /// Same as [`Self::instantiate`], but if there is no generics to + /// instantiate, returns `None`. This is useful because it lets us avoid + /// allocating a recursive copy of everything when the result is unchanged. + /// + /// Only used to implement `instantiate` itself. fn instantiate_opt(self, tcx: TyCtxt<'tcx>, args: ty::GenericArgsRef<'tcx>) -> Option<Self> { match self { Self::ConstIsZero(c) => { @@ -260,7 +265,10 @@ impl<'tcx> InhabitedPredicate<'tcx> { Some(InhabitedPredicate::True) => Some(InhabitedPredicate::True), Some(a) => Some(a.or(tcx, b.instantiate_opt(tcx, args).unwrap_or(b))), }, - _ => None, + Self::True | Self::False | Self::NotInModule(_) => None, + Self::OpaqueType(_) => { + bug!("unexpected OpaqueType in InhabitedPredicate"); + } } } } diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index e9c19331e4a..b99148f3368 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -111,8 +111,9 @@ pub enum InstanceKind<'tcx> { /// Dynamic dispatch to `<dyn Trait as Trait>::fn`. /// - /// This `InstanceKind` does not have callable MIR. Calls to `Virtual` instances must be - /// codegen'd as virtual calls through the vtable. + /// This `InstanceKind` may have a callable MIR as the default implementation. + /// Calls to `Virtual` instances must be codegen'd as virtual calls through the vtable. + /// *This means we might not know exactly what is being called.* /// /// If this is reified to a `fn` pointer, a `ReifyShim` is used (see `ReifyShim` above for more /// details on that). @@ -202,7 +203,7 @@ impl<'tcx> Instance<'tcx> { if !tcx.sess.opts.share_generics() // However, if the def_id is marked inline(never), then it's fine to just reuse the // upstream monomorphization. - && tcx.codegen_fn_attrs(self.def_id()).inline != rustc_attr_parsing::InlineAttr::Never + && tcx.codegen_fn_attrs(self.def_id()).inline != rustc_attr_data_structures::InlineAttr::Never { return None; } diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index eb14ed20fba..ebb6a8c08a5 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -1,20 +1,17 @@ -use std::num::NonZero; use std::ops::Bound; use std::{cmp, fmt}; use rustc_abi::{ - AddressSpace, Align, BackendRepr, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData, - PointeeInfo, PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, + AddressSpace, Align, ExternAbi, FieldIdx, FieldsShape, HasDataLayout, LayoutData, PointeeInfo, + PointerKind, Primitive, ReprOptions, Scalar, Size, TagEncoding, TargetDataLayout, TyAbiInterface, VariantIdx, Variants, }; use rustc_error_messages::DiagMessage; use rustc_errors::{ Diag, DiagArgValue, DiagCtxtHandle, Diagnostic, EmissionGuarantee, IntoDiagArg, Level, }; -use rustc_hashes::Hash64; use rustc_hir::LangItem; use rustc_hir::def_id::DefId; -use rustc_index::IndexVec; use rustc_macros::{HashStable, TyDecodable, TyEncodable, extension}; use rustc_session::config::OptLevel; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym}; @@ -185,12 +182,7 @@ pub const WIDE_PTR_ADDR: usize = 0; /// - For a slice, this is the length. pub const WIDE_PTR_EXTRA: usize = 1; -/// The maximum supported number of lanes in a SIMD vector. -/// -/// This value is selected based on backend support: -/// * LLVM does not appear to have a vector width limit. -/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer. -pub const MAX_SIMD_LANES: u64 = 1 << 0xF; +pub const MAX_SIMD_LANES: u64 = rustc_abi::MAX_SIMD_LANES; /// Used in `check_validity_requirement` to indicate the kind of initialization /// that is checked to be valid @@ -315,8 +307,8 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } impl<'tcx> IntoDiagArg for LayoutError<'tcx> { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(&mut None) } } @@ -762,11 +754,9 @@ where variant_index: VariantIdx, ) -> TyAndLayout<'tcx> { let layout = match this.variants { - Variants::Single { index } - // If all variants but one are uninhabited, the variant layout is the enum layout. - if index == variant_index => - { - this.layout + // If all variants but one are uninhabited, the variant layout is the enum layout. + Variants::Single { index } if index == variant_index => { + return this; } Variants::Single { .. } | Variants::Empty => { @@ -783,29 +773,18 @@ where } let fields = match this.ty.kind() { - ty::Adt(def, _) if def.variants().is_empty() => - bug!("for_variant called on zero-variant enum {}", this.ty), + ty::Adt(def, _) if def.variants().is_empty() => { + bug!("for_variant called on zero-variant enum {}", this.ty) + } ty::Adt(def, _) => def.variant(variant_index).fields.len(), _ => bug!("`ty_and_layout_for_variant` on unexpected type {}", this.ty), }; - tcx.mk_layout(LayoutData { - variants: Variants::Single { index: variant_index }, - fields: match NonZero::new(fields) { - Some(fields) => FieldsShape::Union(fields), - None => FieldsShape::Arbitrary { offsets: IndexVec::new(), memory_index: IndexVec::new() }, - }, - backend_repr: BackendRepr::Memory { sized: true }, - largest_niche: None, - uninhabited: true, - align: tcx.data_layout.i8_align, - size: Size::ZERO, - max_repr_align: None, - unadjusted_abi_align: tcx.data_layout.i8_align.abi, - randomization_seed: Hash64::ZERO, - }) + tcx.mk_layout(LayoutData::uninhabited_variant(cx, variant_index, fields)) } - Variants::Multiple { ref variants, .. } => cx.tcx().mk_layout(variants[variant_index].clone()), + Variants::Multiple { ref variants, .. } => { + cx.tcx().mk_layout(variants[variant_index].clone()) + } }; assert_eq!(*layout.variants(), Variants::Single { index: variant_index }); diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs index 6718493f6b3..0fd370a5619 100644 --- a/compiler/rustc_middle/src/ty/list.rs +++ b/compiler/rustc_middle/src/ty/list.rs @@ -93,7 +93,7 @@ impl<H, T> RawList<H, T> { T: Copy, { assert!(!mem::needs_drop::<T>()); - assert!(mem::size_of::<T>() != 0); + assert!(size_of::<T>() != 0); assert!(!slice.is_empty()); let (layout, _offset) = @@ -155,7 +155,7 @@ macro_rules! impl_list_empty { static EMPTY: ListSkeleton<$header_ty, MaxAlign> = ListSkeleton { header: $header_init, len: 0, data: [] }; - assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>()); + assert!(align_of::<T>() <= align_of::<MaxAlign>()); // SAFETY: `EMPTY` is sufficiently aligned to be an empty list for all // types with `align_of(T) <= align_of(MaxAlign)`, which we checked above. diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 9208c2a65a1..ad9d32fd6c1 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -17,7 +17,7 @@ use std::hash::{Hash, Hasher}; use std::marker::PhantomData; use std::num::NonZero; use std::ptr::NonNull; -use std::{fmt, mem, str}; +use std::{fmt, str}; pub use adt::*; pub use assoc::*; @@ -27,7 +27,8 @@ pub use intrinsic::IntrinsicDef; use rustc_abi::{Align, FieldIdx, Integer, IntegerType, ReprFlags, ReprOptions, VariantIdx}; use rustc_ast::expand::StrippedCfgItem; use rustc_ast::node_id::NodeMap; -use rustc_attr_parsing::AttributeKind; +pub use rustc_ast_ir::{Movability, Mutability, try_visit}; +use rustc_attr_data_structures::AttributeKind; use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::intern::Interned; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; @@ -51,7 +52,7 @@ pub use rustc_type_ir::relate::VarianceDiagInfo; pub use rustc_type_ir::*; use tracing::{debug, instrument}; pub use vtable::*; -use {rustc_ast as ast, rustc_attr_parsing as attr, rustc_hir as hir}; +use {rustc_ast as ast, rustc_attr_data_structures as attr, rustc_hir as hir}; pub use self::closure::{ BorrowKind, CAPTURE_STRUCT_LOCAL, CaptureInfo, CapturedPlace, ClosureTypeInfo, @@ -122,6 +123,7 @@ pub mod normalize_erasing_regions; pub mod pattern; pub mod print; pub mod relate; +pub mod significant_drop_order; pub mod trait_def; pub mod util; pub mod visit; @@ -636,12 +638,12 @@ impl<'tcx> TermKind<'tcx> { let (tag, ptr) = match self { TermKind::Ty(ty) => { // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0); + assert_eq!(align_of_val(&*ty.0.0) & TAG_MASK, 0); (TYPE_TAG, NonNull::from(ty.0.0).cast()) } TermKind::Const(ct) => { // Ensure we can use the tag bits. - assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0); + assert_eq!(align_of_val(&*ct.0.0) & TAG_MASK, 0); (CONST_TAG, NonNull::from(ct.0.0).cast()) } }; @@ -991,12 +993,6 @@ impl<'tcx> ParamEnv<'tcx> { ParamEnv { caller_bounds } } - /// Returns this same environment but with no caller bounds. - #[inline] - pub fn without_caller_bounds(self) -> Self { - Self::new(ListWithCachedTypeInfo::empty()) - } - /// Creates a pair of param-env and value for use in queries. pub fn and<T: TypeVisitable<TyCtxt<'tcx>>>(self, value: T) -> ParamEnvAnd<'tcx, T> { ParamEnvAnd { param_env: self, value } @@ -1155,7 +1151,7 @@ pub struct VariantDef { /// `DefId` that identifies the variant's constructor. /// If this variant is a struct variant, then this is `None`. pub ctor: Option<(CtorKind, DefId)>, - /// Variant or struct name, maybe empty for anonymous adt (struct or union). + /// Variant or struct name. pub name: Symbol, /// Discriminant of this variant. pub discr: VariantDiscr, @@ -1184,23 +1180,17 @@ impl VariantDef { /// /// If someone speeds up attribute loading to not be a performance concern, they can /// remove this hack and use the constructor `DefId` everywhere. + #[instrument(level = "debug")] pub fn new( name: Symbol, variant_did: Option<DefId>, ctor: Option<(CtorKind, DefId)>, discr: VariantDiscr, fields: IndexVec<FieldIdx, FieldDef>, - adt_kind: AdtKind, parent_did: DefId, recover_tainted: Option<ErrorGuaranteed>, is_field_list_non_exhaustive: bool, ) -> Self { - debug!( - "VariantDef::new(name = {:?}, variant_did = {:?}, ctor = {:?}, discr = {:?}, - fields = {:?}, adt_kind = {:?}, parent_did = {:?})", - name, variant_did, ctor, discr, fields, adt_kind, parent_did, - ); - let mut flags = VariantFlags::NO_VARIANT_FLAGS; if is_field_list_non_exhaustive { flags |= VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE; @@ -1425,39 +1415,6 @@ pub enum ImplOverlapKind { /// Whether or not the impl is permitted due to the trait being a `#[marker]` trait marker: bool, }, - /// These impls are allowed to overlap, but that raises an - /// issue #33140 future-compatibility warning (tracked in #56484). - /// - /// Some background: in Rust 1.0, the trait-object types `Send + Sync` (today's - /// `dyn Send + Sync`) and `Sync + Send` (now `dyn Sync + Send`) were different. - /// - /// The widely-used version 0.1.0 of the crate `traitobject` had accidentally relied on - /// that difference, doing what reduces to the following set of impls: - /// - /// ```compile_fail,(E0119) - /// trait Trait {} - /// impl Trait for dyn Send + Sync {} - /// impl Trait for dyn Sync + Send {} - /// ``` - /// - /// Obviously, once we made these types be identical, that code causes a coherence - /// error and a fairly big headache for us. However, luckily for us, the trait - /// `Trait` used in this case is basically a marker trait, and therefore having - /// overlapping impls for it is sound. - /// - /// To handle this, we basically regard the trait as a marker trait, with an additional - /// future-compatibility warning. To avoid accidentally "stabilizing" this feature, - /// it has the following restrictions: - /// - /// 1. The trait must indeed be a marker-like trait (i.e., no items), and must be - /// positive impls. - /// 2. The trait-ref of both impls must be equal. - /// 3. The trait-ref of both impls must be a trait object type consisting only of - /// marker traits. - /// 4. Neither of the impls can have any where-clauses. - /// - /// Once `traitobject` 0.1.0 is no longer an active concern, this hack can be removed. - FutureCompatOrderDepTraitObjects, } /// Useful source information about where a desugared associated type for an @@ -1635,7 +1592,7 @@ impl<'tcx> TyCtxt<'tcx> { }) } - /// Returns `true` if the impls are the same polarity and the trait either + /// Returns `Some` if the impls are the same polarity and the trait either /// has no items or is annotated `#[marker]` and prevents item overrides. #[instrument(level = "debug", skip(self), ret)] pub fn impls_are_allowed_to_overlap( @@ -1676,18 +1633,6 @@ impl<'tcx> TyCtxt<'tcx> { return Some(ImplOverlapKind::Permitted { marker: true }); } - if let Some(self_ty1) = - self.self_ty_of_trait_impl_enabling_order_dep_trait_object_hack(def_id1) - && let Some(self_ty2) = - self.self_ty_of_trait_impl_enabling_order_dep_trait_object_hack(def_id2) - { - if self_ty1 == self_ty2 { - return Some(ImplOverlapKind::FutureCompatOrderDepTraitObjects); - } else { - debug!("found {self_ty1:?} != {self_ty2:?}"); - } - } - None } @@ -1751,7 +1696,7 @@ impl<'tcx> TyCtxt<'tcx> { // FIXME(@lcnr): Remove this function. pub fn get_attrs_unchecked(self, did: DefId) -> &'tcx [hir::Attribute] { if let Some(did) = did.as_local() { - self.hir().attrs(self.local_def_id_to_hir_id(did)) + self.hir_attrs(self.local_def_id_to_hir_id(did)) } else { self.attrs_for_def(did) } @@ -1768,14 +1713,14 @@ impl<'tcx> TyCtxt<'tcx> { /// Gets all attributes. /// - /// To see if an item has a specific attribute, you should use [`rustc_attr_parsing::find_attr!`] so you can use matching. + /// To see if an item has a specific attribute, you should use [`rustc_attr_data_structures::find_attr!`] so you can use matching. pub fn get_all_attrs( self, did: impl Into<DefId>, ) -> impl Iterator<Item = &'tcx hir::Attribute> { let did: DefId = did.into(); if let Some(did) = did.as_local() { - self.hir().attrs(self.local_def_id_to_hir_id(did)).iter() + self.hir_attrs(self.local_def_id_to_hir_id(did)).iter() } else { self.attrs_for_def(did).iter() } @@ -1819,7 +1764,7 @@ impl<'tcx> TyCtxt<'tcx> { ) -> impl Iterator<Item = &'tcx hir::Attribute> { let filter_fn = move |a: &&hir::Attribute| a.path_matches(attr); if let Some(did) = did.as_local() { - self.hir().attrs(self.local_def_id_to_hir_id(did)).iter().filter(filter_fn) + self.hir_attrs(self.local_def_id_to_hir_id(did)).iter().filter(filter_fn) } else { self.attrs_for_def(did).iter().filter(filter_fn) } diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs index 8eaf0a58f70..71fc38cb7ed 100644 --- a/compiler/rustc_middle/src/ty/parameterized.rs +++ b/compiler/rustc_middle/src/ty/parameterized.rs @@ -80,10 +80,10 @@ trivially_parameterized_over_tcx! { rustc_ast::Attribute, rustc_ast::DelimArgs, rustc_ast::expand::StrippedCfgItem<rustc_hir::def_id::DefIndex>, - rustc_attr_parsing::ConstStability, - rustc_attr_parsing::DefaultBodyStability, - rustc_attr_parsing::Deprecation, - rustc_attr_parsing::Stability, + rustc_attr_data_structures::ConstStability, + rustc_attr_data_structures::DefaultBodyStability, + rustc_attr_data_structures::Deprecation, + rustc_attr_data_structures::Stability, rustc_hir::Constness, rustc_hir::Defaultness, rustc_hir::Safety, diff --git a/compiler/rustc_middle/src/ty/pattern.rs b/compiler/rustc_middle/src/ty/pattern.rs index e604aedd05e..4cad1ab2099 100644 --- a/compiler/rustc_middle/src/ty/pattern.rs +++ b/compiler/rustc_middle/src/ty/pattern.rs @@ -26,18 +26,30 @@ impl<'tcx> fmt::Debug for Pattern<'tcx> { impl<'tcx> fmt::Debug for PatternKind<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { - PatternKind::Range { start, end, include_end } => { - if let Some(start) = start { - write!(f, "{start}")?; + PatternKind::Range { start, end } => { + write!(f, "{start}")?; + + if let Some(c) = end.try_to_value() { + let end = c.valtree.unwrap_leaf(); + let size = end.size(); + let max = match c.ty.kind() { + ty::Int(_) => { + Some(ty::ScalarInt::truncate_from_int(size.signed_int_max(), size)) + } + ty::Uint(_) => { + Some(ty::ScalarInt::truncate_from_uint(size.unsigned_int_max(), size)) + } + ty::Char => Some(ty::ScalarInt::truncate_from_uint(char::MAX, size)), + _ => None, + }; + if let Some((max, _)) = max + && end == max + { + return write!(f, ".."); + } } - write!(f, "..")?; - if include_end { - write!(f, "=")?; - } - if let Some(end) = end { - write!(f, "{end}")?; - } - Ok(()) + + write!(f, "..={end}") } } } @@ -46,5 +58,5 @@ impl<'tcx> fmt::Debug for PatternKind<'tcx> { #[derive(Clone, PartialEq, Eq, Hash)] #[derive(HashStable, TyEncodable, TyDecodable, TypeVisitable, TypeFoldable)] pub enum PatternKind<'tcx> { - Range { start: Option<ty::Const<'tcx>>, end: Option<ty::Const<'tcx>>, include_end: bool }, + Range { start: ty::Const<'tcx>, end: ty::Const<'tcx> }, } diff --git a/compiler/rustc_middle/src/ty/predicate.rs b/compiler/rustc_middle/src/ty/predicate.rs index 553de83dfcb..1674ca4cfc5 100644 --- a/compiler/rustc_middle/src/ty/predicate.rs +++ b/compiler/rustc_middle/src/ty/predicate.rs @@ -4,7 +4,6 @@ use rustc_data_structures::intern::Interned; use rustc_hir::def_id::DefId; use rustc_macros::{HashStable, extension}; use rustc_type_ir as ir; -use tracing::instrument; use crate::ty::{ self, DebruijnIndex, EarlyBinder, PredicatePolarity, Ty, TyCtxt, TypeFlags, Upcast, UpcastFrom, @@ -51,10 +50,6 @@ impl<'tcx> rustc_type_ir::inherent::Predicate<TyCtxt<'tcx>> for Predicate<'tcx> self.as_clause() } - fn is_coinductive(self, interner: TyCtxt<'tcx>) -> bool { - self.is_coinductive(interner) - } - fn allow_normalization(self) -> bool { self.allow_normalization() } @@ -119,17 +114,6 @@ impl<'tcx> Predicate<'tcx> { Some(tcx.mk_predicate(kind)) } - #[instrument(level = "debug", skip(tcx), ret)] - pub fn is_coinductive(self, tcx: TyCtxt<'tcx>) -> bool { - match self.kind().skip_binder() { - ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => { - tcx.trait_is_coinductive(data.def_id()) - } - ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => true, - _ => false, - } - } - /// Whether this projection can be soundly normalized. /// /// Wf predicates must not be normalized, as normalization @@ -158,15 +142,21 @@ impl<'tcx> Predicate<'tcx> { } } -impl rustc_errors::IntoDiagArg for Predicate<'_> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(self.to_string())) +impl<'tcx> rustc_errors::IntoDiagArg for Predicate<'tcx> { + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + ty::tls::with(|tcx| { + let pred = tcx.short_string(self, path); + rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(pred)) + }) } } -impl rustc_errors::IntoDiagArg for Clause<'_> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(self.to_string())) +impl<'tcx> rustc_errors::IntoDiagArg for Clause<'tcx> { + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + ty::tls::with(|tcx| { + let clause = tcx.short_string(self, path); + rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(clause)) + }) } } diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index ed0839f47e6..2a3a7705b7b 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -133,6 +133,20 @@ pub macro with_no_queries($e:expr) {{ )) }} +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum WrapBinderMode { + ForAll, + Unsafe, +} +impl WrapBinderMode { + pub fn start_str(self) -> &'static str { + match self { + WrapBinderMode::ForAll => "for<", + WrapBinderMode::Unsafe => "unsafe<", + } + } +} + /// The "region highlights" are used to control region printing during /// specific error messages. When a "region highlight" is enabled, it /// gives an alternate way to print specific regions. For now, we @@ -219,7 +233,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { self.print_def_path(def_id, args) } - fn in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> + fn print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> where T: Print<'tcx, Self> + TypeFoldable<TyCtxt<'tcx>>, { @@ -229,6 +243,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { fn wrap_binder<T, F: FnOnce(&T, &mut Self) -> Result<(), fmt::Error>>( &mut self, value: &ty::Binder<'tcx, T>, + _mode: WrapBinderMode, f: F, ) -> Result<(), PrintError> where @@ -554,7 +569,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { // the children of the visible parent (as was done when computing // `visible_parent_map`), looking for the specific child we currently have and then // have access to the re-exported name. - DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => { + DefPathData::TypeNs(Some(ref mut name)) if Some(visible_parent) != actual_parent => { // Item might be re-exported several times, but filter for the one // that's public and whose identifier isn't `_`. let reexport = self @@ -575,7 +590,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } // Re-exported `extern crate` (#43189). DefPathData::CrateRoot => { - data = DefPathData::TypeNs(self.tcx().crate_name(def_id.krate)); + data = DefPathData::TypeNs(Some(self.tcx().crate_name(def_id.krate))); } _ => {} } @@ -703,8 +718,9 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { } ty::FnPtr(ref sig_tys, hdr) => p!(print(sig_tys.with(hdr))), ty::UnsafeBinder(ref bound_ty) => { - // FIXME(unsafe_binders): Make this print `unsafe<>` rather than `for<>`. - self.wrap_binder(bound_ty, |ty, cx| cx.pretty_print_type(*ty))?; + self.wrap_binder(bound_ty, WrapBinderMode::Unsafe, |ty, cx| { + cx.pretty_print_type(*ty) + })?; } ty::Infer(infer_ty) => { if self.should_print_verbose() { @@ -1067,29 +1083,33 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { }; if let Some(return_ty) = entry.return_ty { - self.wrap_binder(&bound_args_and_self_ty, |(args, _), cx| { - define_scoped_cx!(cx); - p!(write("{}", tcx.item_name(trait_def_id))); - p!("("); - - for (idx, ty) in args.iter().enumerate() { - if idx > 0 { - p!(", "); + self.wrap_binder( + &bound_args_and_self_ty, + WrapBinderMode::ForAll, + |(args, _), cx| { + define_scoped_cx!(cx); + p!(write("{}", tcx.item_name(trait_def_id))); + p!("("); + + for (idx, ty) in args.iter().enumerate() { + if idx > 0 { + p!(", "); + } + p!(print(ty)); } - p!(print(ty)); - } - p!(")"); - if let Some(ty) = return_ty.skip_binder().as_type() { - if !ty.is_unit() { - p!(" -> ", print(return_ty)); + p!(")"); + if let Some(ty) = return_ty.skip_binder().as_type() { + if !ty.is_unit() { + p!(" -> ", print(return_ty)); + } } - } - p!(write("{}", if paren_needed { ")" } else { "" })); + p!(write("{}", if paren_needed { ")" } else { "" })); - first = false; - Ok(()) - })?; + first = false; + Ok(()) + }, + )?; } else { // Otherwise, render this like a regular trait. traits.insert( @@ -1110,7 +1130,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { for (trait_pred, assoc_items) in traits { write!(self, "{}", if first { "" } else { " + " })?; - self.wrap_binder(&trait_pred, |trait_pred, cx| { + self.wrap_binder(&trait_pred, WrapBinderMode::ForAll, |trait_pred, cx| { define_scoped_cx!(cx); if trait_pred.polarity == ty::PredicatePolarity::Negative { @@ -1302,7 +1322,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { let mut first = true; if let Some(bound_principal) = predicates.principal() { - self.wrap_binder(&bound_principal, |principal, cx| { + self.wrap_binder(&bound_principal, WrapBinderMode::ForAll, |principal, cx| { define_scoped_cx!(cx); p!(print_def_path(principal.def_id, &[])); @@ -1516,10 +1536,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { ty::ExprKind::Binop(op) => { let (_, _, c1, c2) = expr.binop_args(); - let precedence = |binop: crate::mir::BinOp| { - use rustc_ast::util::parser::AssocOp; - AssocOp::from_ast_binop(binop.to_hir_binop()).precedence() - }; + let precedence = |binop: crate::mir::BinOp| binop.to_hir_binop().precedence(); let op_precedence = precedence(op); let formatted_op = op.to_hir_binop().as_str(); let (lhs_parenthesized, rhs_parenthesized) = match (c1.kind(), c2.kind()) { @@ -1930,7 +1947,7 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { let kind = closure.kind_ty().to_opt_closure_kind().unwrap_or(ty::ClosureKind::Fn); write!(self, "impl ")?; - self.wrap_binder(&sig, |sig, cx| { + self.wrap_binder(&sig, WrapBinderMode::ForAll, |sig, cx| { define_scoped_cx!(cx); p!(write("{kind}(")); @@ -2370,22 +2387,23 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { Ok(()) } - fn in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> + fn print_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), PrintError> where T: Print<'tcx, Self> + TypeFoldable<TyCtxt<'tcx>>, { - self.pretty_in_binder(value) + self.pretty_print_in_binder(value) } fn wrap_binder<T, C: FnOnce(&T, &mut Self) -> Result<(), PrintError>>( &mut self, value: &ty::Binder<'tcx, T>, + mode: WrapBinderMode, f: C, ) -> Result<(), PrintError> where T: TypeFoldable<TyCtxt<'tcx>>, { - self.pretty_wrap_binder(value, f) + self.pretty_wrap_binder(value, mode, f) } fn typed_value( @@ -2635,6 +2653,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { pub fn name_all_regions<T>( &mut self, value: &ty::Binder<'tcx, T>, + mode: WrapBinderMode, ) -> Result<(T, UnordMap<ty::BoundRegion, ty::Region<'tcx>>), fmt::Error> where T: TypeFoldable<TyCtxt<'tcx>>, @@ -2708,9 +2727,13 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { // anyways. let (new_value, map) = if self.should_print_verbose() { for var in value.bound_vars().iter() { - start_or_continue(self, "for<", ", "); + start_or_continue(self, mode.start_str(), ", "); write!(self, "{var:?}")?; } + // Unconditionally render `unsafe<>`. + if value.bound_vars().is_empty() && mode == WrapBinderMode::Unsafe { + start_or_continue(self, mode.start_str(), ""); + } start_or_continue(self, "", "> "); (value.clone().skip_binder(), UnordMap::default()) } else { @@ -2775,8 +2798,9 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { } }; - if !trim_path { - start_or_continue(self, "for<", ", "); + // Unconditionally render `unsafe<>`. + if !trim_path || mode == WrapBinderMode::Unsafe { + start_or_continue(self, mode.start_str(), ", "); do_continue(self, name); } ty::Region::new_bound(tcx, ty::INNERMOST, ty::BoundRegion { var: br.var, kind }) @@ -2789,9 +2813,12 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { }; let new_value = value.clone().skip_binder().fold_with(&mut folder); let region_map = folder.region_map; - if !trim_path { - start_or_continue(self, "", "> "); + + if mode == WrapBinderMode::Unsafe && region_map.is_empty() { + start_or_continue(self, mode.start_str(), ""); } + start_or_continue(self, "", "> "); + (new_value, region_map) }; @@ -2800,12 +2827,15 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { Ok((new_value, map)) } - pub fn pretty_in_binder<T>(&mut self, value: &ty::Binder<'tcx, T>) -> Result<(), fmt::Error> + pub fn pretty_print_in_binder<T>( + &mut self, + value: &ty::Binder<'tcx, T>, + ) -> Result<(), fmt::Error> where T: Print<'tcx, Self> + TypeFoldable<TyCtxt<'tcx>>, { let old_region_index = self.region_index; - let (new_value, _) = self.name_all_regions(value)?; + let (new_value, _) = self.name_all_regions(value, WrapBinderMode::ForAll)?; new_value.print(self)?; self.region_index = old_region_index; self.binder_depth -= 1; @@ -2815,13 +2845,14 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { pub fn pretty_wrap_binder<T, C: FnOnce(&T, &mut Self) -> Result<(), fmt::Error>>( &mut self, value: &ty::Binder<'tcx, T>, + mode: WrapBinderMode, f: C, ) -> Result<(), fmt::Error> where T: TypeFoldable<TyCtxt<'tcx>>, { let old_region_index = self.region_index; - let (new_value, _) = self.name_all_regions(value)?; + let (new_value, _) = self.name_all_regions(value, mode)?; f(&new_value, self)?; self.region_index = old_region_index; self.binder_depth -= 1; @@ -2880,7 +2911,7 @@ where T: Print<'tcx, P> + TypeFoldable<TyCtxt<'tcx>>, { fn print(&self, cx: &mut P) -> Result<(), PrintError> { - cx.in_binder(self) + cx.print_in_binder(self) } } @@ -2898,12 +2929,15 @@ where /// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only /// the trait path. That is, it will print `Trait<U>` instead of /// `<T as Trait<U>>`. -#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)] +#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift, Hash)] pub struct TraitRefPrintOnlyTraitPath<'tcx>(ty::TraitRef<'tcx>); impl<'tcx> rustc_errors::IntoDiagArg for TraitRefPrintOnlyTraitPath<'tcx> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + ty::tls::with(|tcx| { + let trait_ref = tcx.short_string(self, path); + rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(trait_ref)) + }) } } @@ -2915,12 +2949,15 @@ impl<'tcx> fmt::Debug for TraitRefPrintOnlyTraitPath<'tcx> { /// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only /// the trait path, and additionally tries to "sugar" `Fn(...)` trait bounds. -#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)] +#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift, Hash)] pub struct TraitRefPrintSugared<'tcx>(ty::TraitRef<'tcx>); impl<'tcx> rustc_errors::IntoDiagArg for TraitRefPrintSugared<'tcx> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + ty::tls::with(|tcx| { + let trait_ref = tcx.short_string(self, path); + rustc_errors::DiagArgValue::Str(std::borrow::Cow::Owned(trait_ref)) + }) } } diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 839c1c346a4..b1dfcb80bde 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -51,22 +51,12 @@ impl<'tcx> Relate<TyCtxt<'tcx>> for ty::Pattern<'tcx> { ) -> RelateResult<'tcx, Self> { match (&*a, &*b) { ( - &ty::PatternKind::Range { start: start_a, end: end_a, include_end: inc_a }, - &ty::PatternKind::Range { start: start_b, end: end_b, include_end: inc_b }, + &ty::PatternKind::Range { start: start_a, end: end_a }, + &ty::PatternKind::Range { start: start_b, end: end_b }, ) => { - // FIXME(pattern_types): make equal patterns equal (`0..=` is the same as `..=`). - let mut relate_opt_const = |a, b| match (a, b) { - (None, None) => Ok(None), - (Some(a), Some(b)) => relation.relate(a, b).map(Some), - // FIXME(pattern_types): report a better error - _ => Err(TypeError::Mismatch), - }; - let start = relate_opt_const(start_a, start_b)?; - let end = relate_opt_const(end_a, end_b)?; - if inc_a != inc_b { - todo!() - } - Ok(relation.cx().mk_pat(ty::PatternKind::Range { start, end, include_end: inc_a })) + let start = relation.relate(start_a, start_b)?; + let end = relation.relate(end_a, end_b)?; + Ok(relation.cx().mk_pat(ty::PatternKind::Range { start, end })) } } } diff --git a/compiler/rustc_middle/src/ty/significant_drop_order.rs b/compiler/rustc_middle/src/ty/significant_drop_order.rs new file mode 100644 index 00000000000..2d9e0331451 --- /dev/null +++ b/compiler/rustc_middle/src/ty/significant_drop_order.rs @@ -0,0 +1,174 @@ +use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::unord::UnordSet; +use rustc_hir::def_id::DefId; +use rustc_span::Span; +use smallvec::{SmallVec, smallvec}; +use tracing::{debug, instrument}; + +use crate::ty::{self, Ty, TyCtxt}; + +/// An additional filter to exclude well-known types from the ecosystem +/// because their drops are trivial. +/// This returns additional types to check if the drops are delegated to those. +/// A typical example is `hashbrown::HashMap<K, V>`, whose drop is delegated to `K` and `V`. +fn true_significant_drop_ty<'tcx>( + tcx: TyCtxt<'tcx>, + ty: Ty<'tcx>, +) -> Option<SmallVec<[Ty<'tcx>; 2]>> { + if let ty::Adt(def, args) = ty.kind() { + let mut did = def.did(); + let mut name_rev = vec![]; + loop { + let key = tcx.def_key(did); + + match key.disambiguated_data.data { + rustc_hir::definitions::DefPathData::CrateRoot => { + name_rev.push(tcx.crate_name(did.krate)); + } + rustc_hir::definitions::DefPathData::TypeNs(symbol) => { + name_rev.push(symbol.unwrap()); + } + _ => return None, + } + if let Some(parent) = key.parent { + did = DefId { krate: did.krate, index: parent }; + } else { + break; + } + } + let name_str: Vec<_> = name_rev.iter().rev().map(|x| x.as_str()).collect(); + debug!(?name_str); + match name_str[..] { + // These are the types from Rust core ecosystem + ["syn" | "proc_macro2", ..] + | ["core" | "std", "task", "LocalWaker" | "Waker"] + | ["core" | "std", "task", "wake", "LocalWaker" | "Waker"] => Some(smallvec![]), + // These are important types from Rust ecosystem + ["tracing", "instrument", "Instrumented"] | ["bytes", "Bytes"] => Some(smallvec![]), + ["hashbrown", "raw", "RawTable" | "RawIntoIter"] => { + if let [ty, ..] = &***args + && let Some(ty) = ty.as_type() + { + Some(smallvec![ty]) + } else { + None + } + } + ["hashbrown", "raw", "RawDrain"] => { + if let [_, ty, ..] = &***args + && let Some(ty) = ty.as_type() + { + Some(smallvec![ty]) + } else { + None + } + } + _ => None, + } + } else { + None + } +} + +/// Returns the list of types with a "potentially sigificant" that may be dropped +/// by dropping a value of type `ty`. +#[instrument(level = "trace", skip(tcx, typing_env))] +pub fn extract_component_raw<'tcx>( + tcx: TyCtxt<'tcx>, + typing_env: ty::TypingEnv<'tcx>, + ty: Ty<'tcx>, + ty_seen: &mut UnordSet<Ty<'tcx>>, +) -> SmallVec<[Ty<'tcx>; 4]> { + // Droppiness does not depend on regions, so let us erase them. + let ty = tcx.try_normalize_erasing_regions(typing_env, ty).unwrap_or(ty); + + let tys = tcx.list_significant_drop_tys(typing_env.as_query_input(ty)); + debug!(?ty, "components"); + let mut out_tys = smallvec![]; + for ty in tys { + if let Some(tys) = true_significant_drop_ty(tcx, ty) { + // Some types can be further opened up because the drop is simply delegated + for ty in tys { + if ty_seen.insert(ty) { + out_tys.extend(extract_component_raw(tcx, typing_env, ty, ty_seen)); + } + } + } else { + if ty_seen.insert(ty) { + out_tys.push(ty); + } + } + } + out_tys +} + +#[instrument(level = "trace", skip(tcx, typing_env))] +pub fn extract_component_with_significant_dtor<'tcx>( + tcx: TyCtxt<'tcx>, + typing_env: ty::TypingEnv<'tcx>, + ty: Ty<'tcx>, +) -> SmallVec<[Ty<'tcx>; 4]> { + let mut tys = extract_component_raw(tcx, typing_env, ty, &mut Default::default()); + let mut deduplicate = FxHashSet::default(); + tys.retain(|oty| deduplicate.insert(*oty)); + tys.into_iter().collect() +} + +/// Extract the span of the custom destructor of a type +/// especially the span of the `impl Drop` header or its entire block +/// when we are working with current local crate. +#[instrument(level = "trace", skip(tcx))] +pub fn ty_dtor_span<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Span> { + match ty.kind() { + ty::Bool + | ty::Char + | ty::Int(_) + | ty::Uint(_) + | ty::Float(_) + | ty::Error(_) + | ty::Str + | ty::Never + | ty::RawPtr(_, _) + | ty::Ref(_, _, _) + | ty::FnPtr(_, _) + | ty::Tuple(_) + | ty::Dynamic(_, _, _) + | ty::Alias(_, _) + | ty::Bound(_, _) + | ty::Pat(_, _) + | ty::Placeholder(_) + | ty::Infer(_) + | ty::Slice(_) + | ty::Array(_, _) + | ty::UnsafeBinder(_) => None, + + ty::Adt(adt_def, _) => { + let did = adt_def.did(); + let try_local_did_span = |did: DefId| { + if let Some(local) = did.as_local() { + tcx.source_span(local) + } else { + tcx.def_span(did) + } + }; + let dtor = if let Some(dtor) = tcx.adt_destructor(did) { + dtor.did + } else if let Some(dtor) = tcx.adt_async_destructor(did) { + dtor.future + } else { + return Some(try_local_did_span(did)); + }; + let def_key = tcx.def_key(dtor); + let Some(parent_index) = def_key.parent else { return Some(try_local_did_span(dtor)) }; + let parent_did = DefId { index: parent_index, krate: dtor.krate }; + Some(try_local_did_span(parent_did)) + } + ty::Coroutine(did, _) + | ty::CoroutineWitness(did, _) + | ty::CoroutineClosure(did, _) + | ty::Closure(did, _) + | ty::FnDef(did, _) + | ty::Foreign(did) => Some(tcx.def_span(did)), + ty::Param(_) => None, + } +} diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index db9e9fbc643..f1d03d0a659 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -50,7 +50,7 @@ impl<'tcx> fmt::Debug for ty::AdtDef<'tcx> { impl fmt::Debug for ty::UpvarId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let name = ty::tls::with(|tcx| tcx.hir().name(self.var_path.hir_id)); + let name = ty::tls::with(|tcx| tcx.hir_name(self.var_path.hir_id)); write!(f, "UpvarId({:?};`{}`;{:?})", self.var_path.hir_id, name, self.closure_expr_id) } } @@ -284,6 +284,7 @@ TrivialTypeTraversalImpls! { rustc_hir::def_id::LocalDefId, rustc_hir::HirId, rustc_hir::MatchSource, + rustc_hir::RangeEnd, rustc_span::Ident, rustc_span::Span, rustc_span::Symbol, diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index d5617adf26b..ce563c59251 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -342,6 +342,7 @@ impl ParamConst { ParamConst::new(def.index, def.name) } + #[instrument(level = "debug")] pub fn find_ty_from_env<'tcx>(self, env: ParamEnv<'tcx>) -> Ty<'tcx> { let mut candidates = env.caller_bounds().iter().filter_map(|clause| { // `ConstArgHasType` are never desugared to be higher ranked. @@ -461,7 +462,7 @@ impl<'tcx> Ty<'tcx> { #[inline] pub fn new_param(tcx: TyCtxt<'tcx>, index: u32, name: Symbol) -> Ty<'tcx> { - tcx.mk_ty_from_kind(Param(ParamTy { index, name })) + Ty::new(tcx, Param(ParamTy { index, name })) } #[inline] diff --git a/compiler/rustc_middle/src/ty/typeck_results.rs b/compiler/rustc_middle/src/ty/typeck_results.rs index d4484a16fea..06054e22e76 100644 --- a/compiler/rustc_middle/src/ty/typeck_results.rs +++ b/compiler/rustc_middle/src/ty/typeck_results.rs @@ -310,7 +310,7 @@ impl<'tcx> TypeckResults<'tcx> { pub fn node_type(&self, id: HirId) -> Ty<'tcx> { self.node_type_opt(id).unwrap_or_else(|| { - bug!("node_type: no type for node {}", tls::with(|tcx| tcx.hir().node_to_string(id))) + bug!("node_type: no type for node {}", tls::with(|tcx| tcx.hir_id_to_string(id))) }) } @@ -394,8 +394,10 @@ impl<'tcx> TypeckResults<'tcx> { matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _)))) } - pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> { - self.pat_binding_modes().get(id).copied().or_else(|| { + /// Returns the computed binding mode for a `PatKind::Binding` pattern + /// (after match ergonomics adjustments). + pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> BindingMode { + self.pat_binding_modes().get(id).copied().unwrap_or_else(|| { s.dcx().span_bug(sp, "missing binding mode"); }) } @@ -552,7 +554,7 @@ fn invalid_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: HirId) { ty::tls::with(|tcx| { bug!( "node {} cannot be placed in TypeckResults with hir_owner {:?}", - tcx.hir().node_to_string(hir_id), + tcx.hir_id_to_string(hir_id), hir_owner ) }); diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index c153f6bb7d7..0c68913904f 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -192,6 +192,18 @@ impl<'tcx> TyCtxt<'tcx> { ty.is_trivially_pure_clone_copy() || self.is_copy_raw(typing_env.as_query_input(ty)) } + /// Checks whether `ty: UseCloned` holds while ignoring region constraints. + /// + /// This function should not be used if there is an `InferCtxt` available. + /// Use `InferCtxt::type_is_copy_modulo_regions` instead. + pub fn type_is_use_cloned_modulo_regions( + self, + typing_env: ty::TypingEnv<'tcx>, + ty: Ty<'tcx>, + ) -> bool { + ty.is_trivially_pure_clone_copy() || self.is_use_cloned_raw(typing_env.as_query_input(ty)) + } + /// Returns the deeply last field of nested structures, or the same type if /// not a structure at all. Corresponds to the only possible unsized field, /// and its type can be used to determine unsizing strategy. @@ -208,6 +220,20 @@ impl<'tcx> TyCtxt<'tcx> { tcx.struct_tail_raw(ty, |ty| tcx.normalize_erasing_regions(typing_env, ty), || {}) } + /// Returns true if a type has metadata. + pub fn type_has_metadata(self, ty: Ty<'tcx>, typing_env: ty::TypingEnv<'tcx>) -> bool { + if ty.is_sized(self, typing_env) { + return false; + } + + let tail = self.struct_tail_for_codegen(ty, typing_env); + match tail.kind() { + ty::Foreign(..) => false, + ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, + _ => bug!("unexpected unsized tail: {:?}", tail), + } + } + /// Returns the deeply last field of nested structures, or the same type if /// not a structure at all. Corresponds to the only possible unsized field, /// and its type can be used to determine unsizing strategy. @@ -726,51 +752,37 @@ impl<'tcx> TyCtxt<'tcx> { } /// Return the set of types that should be taken into account when checking - /// trait bounds on a coroutine's internal state. - // FIXME(compiler-errors): We should remove this when the old solver goes away; - // and all other usages of this function should go through `bound_coroutine_hidden_types` - // instead. - pub fn coroutine_hidden_types( - self, - def_id: DefId, - ) -> impl Iterator<Item = ty::EarlyBinder<'tcx, Ty<'tcx>>> { - let coroutine_layout = self.mir_coroutine_witnesses(def_id); - coroutine_layout - .as_ref() - .map_or_else(|| [].iter(), |l| l.field_tys.iter()) - .filter(|decl| !decl.ignore_for_traits) - .map(|decl| ty::EarlyBinder::bind(decl.ty)) - } - - /// Return the set of types that should be taken into account when checking /// trait bounds on a coroutine's internal state. This properly replaces /// `ReErased` with new existential bound lifetimes. - pub fn bound_coroutine_hidden_types( + pub fn coroutine_hidden_types( self, def_id: DefId, - ) -> impl Iterator<Item = ty::EarlyBinder<'tcx, ty::Binder<'tcx, Ty<'tcx>>>> { + ) -> ty::EarlyBinder<'tcx, ty::Binder<'tcx, &'tcx ty::List<Ty<'tcx>>>> { let coroutine_layout = self.mir_coroutine_witnesses(def_id); - coroutine_layout - .as_ref() - .map_or_else(|| [].iter(), |l| l.field_tys.iter()) - .filter(|decl| !decl.ignore_for_traits) - .map(move |decl| { - let mut vars = vec![]; - let ty = fold_regions(self, decl.ty, |re, debruijn| { - assert_eq!(re, self.lifetimes.re_erased); - let var = ty::BoundVar::from_usize(vars.len()); - vars.push(ty::BoundVariableKind::Region(ty::BoundRegionKind::Anon)); - ty::Region::new_bound( - self, - debruijn, - ty::BoundRegion { var, kind: ty::BoundRegionKind::Anon }, - ) - }); - ty::EarlyBinder::bind(ty::Binder::bind_with_vars( - ty, - self.mk_bound_variable_kinds(&vars), - )) - }) + let mut vars = vec![]; + let bound_tys = self.mk_type_list_from_iter( + coroutine_layout + .as_ref() + .map_or_else(|| [].iter(), |l| l.field_tys.iter()) + .filter(|decl| !decl.ignore_for_traits) + .map(|decl| { + let ty = fold_regions(self, decl.ty, |re, debruijn| { + assert_eq!(re, self.lifetimes.re_erased); + let var = ty::BoundVar::from_usize(vars.len()); + vars.push(ty::BoundVariableKind::Region(ty::BoundRegionKind::Anon)); + ty::Region::new_bound( + self, + debruijn, + ty::BoundRegion { var, kind: ty::BoundRegionKind::Anon }, + ) + }); + ty + }), + ); + ty::EarlyBinder::bind(ty::Binder::bind_with_vars( + bound_tys, + self.mk_bound_variable_kinds(&vars), + )) } /// Expands the given impl trait type, stopping if the type is recursive. diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs index 3e8a3d1a289..a23316ae6fc 100644 --- a/compiler/rustc_middle/src/ty/walk.rs +++ b/compiler/rustc_middle/src/ty/walk.rs @@ -137,9 +137,9 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) ty::Pat(ty, pat) => { match *pat { - ty::PatternKind::Range { start, end, include_end: _ } => { - stack.extend(end.map(Into::into)); - stack.extend(start.map(Into::into)); + ty::PatternKind::Range { start, end } => { + stack.push(end.into()); + stack.push(start.into()); } } stack.push(ty.into()); diff --git a/compiler/rustc_mir_build/src/builder/expr/as_constant.rs b/compiler/rustc_mir_build/src/builder/expr/as_constant.rs index 84c9297e658..f743ea60a45 100644 --- a/compiler/rustc_mir_build/src/builder/expr/as_constant.rs +++ b/compiler/rustc_mir_build/src/builder/expr/as_constant.rs @@ -117,7 +117,12 @@ fn lit_to_mir_constant<'tcx>(tcx: TyCtxt<'tcx>, lit_input: LitToConstInput<'tcx> ConstValue::Scalar(Scalar::from_uint(result, width)) }; - let value = match (lit, ty.kind()) { + let lit_ty = match *ty.kind() { + ty::Pat(base, _) => base, + _ => ty, + }; + + let value = match (lit, lit_ty.kind()) { (ast::LitKind::Str(s, _), ty::Ref(_, inner_ty, _)) if inner_ty.is_str() => { let s = s.as_str(); let allocation = Allocation::from_bytes_byte_aligned_immutable(s.as_bytes()); diff --git a/compiler/rustc_mir_build/src/builder/expr/as_place.rs b/compiler/rustc_mir_build/src/builder/expr/as_place.rs index 581f45db6c4..50ca924baf9 100644 --- a/compiler/rustc_mir_build/src/builder/expr/as_place.rs +++ b/compiler/rustc_mir_build/src/builder/expr/as_place.rs @@ -582,6 +582,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { | ExprKind::Yield { .. } | ExprKind::ThreadLocalRef(_) | ExprKind::Call { .. } + | ExprKind::ByUse { .. } | ExprKind::WrapUnsafeBinder { .. } => { // these are not places, so we need to make a temporary. debug_assert!(!matches!(Category::of(&expr.kind), Some(Category::Place))); diff --git a/compiler/rustc_mir_build/src/builder/expr/as_rvalue.rs b/compiler/rustc_mir_build/src/builder/expr/as_rvalue.rs index 2c9a1de7f99..97d34b85f50 100644 --- a/compiler/rustc_mir_build/src/builder/expr/as_rvalue.rs +++ b/compiler/rustc_mir_build/src/builder/expr/as_rvalue.rs @@ -572,6 +572,14 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ); block.and(Rvalue::Use(operand)) } + + ExprKind::ByUse { expr, span: _ } => { + let operand = unpack!( + block = + this.as_operand(block, scope, expr, LocalInfo::Boring, NeedsTemporary::No) + ); + block.and(Rvalue::Use(operand)) + } } } diff --git a/compiler/rustc_mir_build/src/builder/expr/category.rs b/compiler/rustc_mir_build/src/builder/expr/category.rs index ca55d36bfc6..34524aed406 100644 --- a/compiler/rustc_mir_build/src/builder/expr/category.rs +++ b/compiler/rustc_mir_build/src/builder/expr/category.rs @@ -56,6 +56,7 @@ impl Category { | ExprKind::RawBorrow { .. } | ExprKind::Yield { .. } | ExprKind::Call { .. } + | ExprKind::ByUse { .. } | ExprKind::InlineAsm { .. } => Some(Category::Rvalue(RvalueFunc::Into)), ExprKind::Array { .. } diff --git a/compiler/rustc_mir_build/src/builder/expr/into.rs b/compiler/rustc_mir_build/src/builder/expr/into.rs index 72443e2f60d..333e69475c5 100644 --- a/compiler/rustc_mir_build/src/builder/expr/into.rs +++ b/compiler/rustc_mir_build/src/builder/expr/into.rs @@ -4,11 +4,14 @@ use rustc_ast::{AsmMacro, InlineAsmOptions}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir as hir; +use rustc_hir::lang_items::LangItem; use rustc_middle::mir::*; use rustc_middle::span_bug; use rustc_middle::thir::*; -use rustc_middle::ty::CanonicalUserTypeAnnotation; +use rustc_middle::ty::{CanonicalUserTypeAnnotation, Ty}; +use rustc_span::DUMMY_SP; use rustc_span::source_map::Spanned; +use rustc_trait_selection::infer::InferCtxtExt; use tracing::{debug, instrument}; use crate::builder::expr::category::{Category, RvalueFunc}; @@ -289,6 +292,57 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { this.diverge_from(block); success.unit() } + ExprKind::ByUse { expr, span } => { + let place = unpack!(block = this.as_place(block, expr)); + let ty = place.ty(&this.local_decls, this.tcx).ty; + + if this.tcx.type_is_copy_modulo_regions(this.infcx.typing_env(this.param_env), ty) { + this.cfg.push_assign( + block, + source_info, + destination, + Rvalue::Use(Operand::Copy(place)), + ); + block.unit() + } else if this.infcx.type_is_use_cloned_modulo_regions(this.param_env, ty) { + // Convert `expr.use` to a call like `Clone::clone(&expr)` + let success = this.cfg.start_new_block(); + let clone_trait = this.tcx.require_lang_item(LangItem::Clone, None); + let clone_fn = this.tcx.associated_item_def_ids(clone_trait)[0]; + let func = Operand::function_handle(this.tcx, clone_fn, [ty.into()], expr_span); + let ref_ty = Ty::new_imm_ref(this.tcx, this.tcx.lifetimes.re_erased, ty); + let ref_place = this.temp(ref_ty, span); + this.cfg.push_assign( + block, + source_info, + ref_place, + Rvalue::Ref(this.tcx.lifetimes.re_erased, BorrowKind::Shared, place), + ); + this.cfg.terminate( + block, + source_info, + TerminatorKind::Call { + func, + args: [Spanned { node: Operand::Move(ref_place), span: DUMMY_SP }] + .into(), + destination, + target: Some(success), + unwind: UnwindAction::Unreachable, + call_source: CallSource::Misc, + fn_span: expr_span, + }, + ); + success.unit() + } else { + this.cfg.push_assign( + block, + source_info, + destination, + Rvalue::Use(Operand::Move(place)), + ); + block.unit() + } + } ExprKind::Use { source } => this.expr_into_dest(destination, block, source), ExprKind::Borrow { arg, borrow_kind } => { // We don't do this in `as_rvalue` because we use `as_place` diff --git a/compiler/rustc_mir_build/src/builder/matches/match_pair.rs b/compiler/rustc_mir_build/src/builder/matches/match_pair.rs index 10b43390eb2..c6f3e22e95f 100644 --- a/compiler/rustc_mir_build/src/builder/matches/match_pair.rs +++ b/compiler/rustc_mir_build/src/builder/matches/match_pair.rs @@ -6,27 +6,25 @@ use rustc_middle::ty::{self, Ty, TypeVisitableExt}; use crate::builder::Builder; use crate::builder::expr::as_place::{PlaceBase, PlaceBuilder}; -use crate::builder::matches::{FlatPat, MatchPairTree, TestCase}; +use crate::builder::matches::{FlatPat, MatchPairTree, PatternExtraData, TestCase}; impl<'a, 'tcx> Builder<'a, 'tcx> { - /// Builds and returns [`MatchPairTree`] subtrees, one for each pattern in + /// Builds and pushes [`MatchPairTree`] subtrees, one for each pattern in /// `subpatterns`, representing the fields of a [`PatKind::Variant`] or /// [`PatKind::Leaf`]. /// /// Used internally by [`MatchPairTree::for_pattern`]. fn field_match_pairs( &mut self, + match_pairs: &mut Vec<MatchPairTree<'tcx>>, + extra_data: &mut PatternExtraData<'tcx>, place: PlaceBuilder<'tcx>, subpatterns: &[FieldPat<'tcx>], - ) -> Vec<MatchPairTree<'tcx>> { - subpatterns - .iter() - .map(|fieldpat| { - let place = - place.clone_project(PlaceElem::Field(fieldpat.field, fieldpat.pattern.ty)); - MatchPairTree::for_pattern(place, &fieldpat.pattern, self) - }) - .collect() + ) { + for fieldpat in subpatterns { + let place = place.clone_project(PlaceElem::Field(fieldpat.field, fieldpat.pattern.ty)); + MatchPairTree::for_pattern(place, &fieldpat.pattern, self, match_pairs, extra_data); + } } /// Builds [`MatchPairTree`] subtrees for the prefix/middle/suffix parts of an @@ -36,6 +34,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fn prefix_slice_suffix( &mut self, match_pairs: &mut Vec<MatchPairTree<'tcx>>, + extra_data: &mut PatternExtraData<'tcx>, place: &PlaceBuilder<'tcx>, prefix: &[Pat<'tcx>], opt_slice: &Option<Box<Pat<'tcx>>>, @@ -56,11 +55,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { ((prefix.len() + suffix.len()).try_into().unwrap(), false) }; - match_pairs.extend(prefix.iter().enumerate().map(|(idx, subpattern)| { + for (idx, subpattern) in prefix.iter().enumerate() { let elem = ProjectionElem::ConstantIndex { offset: idx as u64, min_length, from_end: false }; - MatchPairTree::for_pattern(place.clone_project(elem), subpattern, self) - })); + let place = place.clone_project(elem); + MatchPairTree::for_pattern(place, subpattern, self, match_pairs, extra_data) + } if let Some(subslice_pat) = opt_slice { let suffix_len = suffix.len() as u64; @@ -69,10 +69,10 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { to: if exact_size { min_length - suffix_len } else { suffix_len }, from_end: !exact_size, }); - match_pairs.push(MatchPairTree::for_pattern(subslice, subslice_pat, self)); + MatchPairTree::for_pattern(subslice, subslice_pat, self, match_pairs, extra_data); } - match_pairs.extend(suffix.iter().rev().enumerate().map(|(idx, subpattern)| { + for (idx, subpattern) in suffix.iter().rev().enumerate() { let end_offset = (idx + 1) as u64; let elem = ProjectionElem::ConstantIndex { offset: if exact_size { min_length - end_offset } else { end_offset }, @@ -80,19 +80,21 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { from_end: !exact_size, }; let place = place.clone_project(elem); - MatchPairTree::for_pattern(place, subpattern, self) - })); + MatchPairTree::for_pattern(place, subpattern, self, match_pairs, extra_data) + } } } impl<'tcx> MatchPairTree<'tcx> { /// Recursively builds a match pair tree for the given pattern and its /// subpatterns. - pub(in crate::builder) fn for_pattern( + pub(super) fn for_pattern( mut place_builder: PlaceBuilder<'tcx>, pattern: &Pat<'tcx>, cx: &mut Builder<'_, 'tcx>, - ) -> MatchPairTree<'tcx> { + match_pairs: &mut Vec<Self>, // Newly-created nodes are added to this vector + extra_data: &mut PatternExtraData<'tcx>, // Bindings/ascriptions are added here + ) { // Force the place type to the pattern's type. // FIXME(oli-obk): can we use this to simplify slice/array pattern hacks? if let Some(resolved) = place_builder.resolve_upvar(cx) { @@ -113,64 +115,102 @@ impl<'tcx> MatchPairTree<'tcx> { place_builder = place_builder.project(ProjectionElem::OpaqueCast(pattern.ty)); } + // Place can be none if the pattern refers to a non-captured place in a closure. let place = place_builder.try_to_place(cx); - let default_irrefutable = || TestCase::Irrefutable { binding: None, ascription: None }; let mut subpairs = Vec::new(); let test_case = match pattern.kind { - PatKind::Wild | PatKind::Error(_) => default_irrefutable(), + PatKind::Wild | PatKind::Error(_) => None, - PatKind::Or { ref pats } => TestCase::Or { + PatKind::Or { ref pats } => Some(TestCase::Or { pats: pats.iter().map(|pat| FlatPat::new(place_builder.clone(), pat, cx)).collect(), - }, + }), PatKind::Range(ref range) => { if range.is_full_range(cx.tcx) == Some(true) { - default_irrefutable() + None } else { - TestCase::Range(Arc::clone(range)) + Some(TestCase::Range(Arc::clone(range))) } } - PatKind::Constant { value } => TestCase::Constant { value }, + PatKind::Constant { value } => Some(TestCase::Constant { value }), PatKind::AscribeUserType { ascription: Ascription { ref annotation, variance }, ref subpattern, .. } => { + MatchPairTree::for_pattern( + place_builder, + subpattern, + cx, + &mut subpairs, + extra_data, + ); + // Apply the type ascription to the value at `match_pair.place` - let ascription = place.map(|source| super::Ascription { - annotation: annotation.clone(), - source, - variance, - }); - - subpairs.push(MatchPairTree::for_pattern(place_builder, subpattern, cx)); - TestCase::Irrefutable { ascription, binding: None } + if let Some(source) = place { + let annotation = annotation.clone(); + extra_data.ascriptions.push(super::Ascription { source, annotation, variance }); + } + + None } PatKind::Binding { mode, var, ref subpattern, .. } => { - let binding = place.map(|source| super::Binding { - span: pattern.span, - source, - var_id: var, - binding_mode: mode, - }); + // In order to please the borrow checker, when lowering a pattern + // like `x @ subpat` we must establish any bindings in `subpat` + // before establishing the binding for `x`. + // + // For example (from #69971): + // + // ```ignore (illustrative) + // struct NonCopyStruct { + // copy_field: u32, + // } + // + // fn foo1(x: NonCopyStruct) { + // let y @ NonCopyStruct { copy_field: z } = x; + // // the above should turn into + // let z = x.copy_field; + // let y = x; + // } + // ``` + // First, recurse into the subpattern, if any. if let Some(subpattern) = subpattern.as_ref() { // this is the `x @ P` case; have to keep matching against `P` now - subpairs.push(MatchPairTree::for_pattern(place_builder, subpattern, cx)); + MatchPairTree::for_pattern( + place_builder, + subpattern, + cx, + &mut subpairs, + extra_data, + ); } - TestCase::Irrefutable { ascription: None, binding } + + // Then push this binding, after any bindings in the subpattern. + if let Some(source) = place { + extra_data.bindings.push(super::Binding { + span: pattern.span, + source, + var_id: var, + binding_mode: mode, + }); + } + + None } PatKind::ExpandedConstant { subpattern: ref pattern, def_id: _, is_inline: false } => { - subpairs.push(MatchPairTree::for_pattern(place_builder, pattern, cx)); - default_irrefutable() + MatchPairTree::for_pattern(place_builder, pattern, cx, &mut subpairs, extra_data); + None } PatKind::ExpandedConstant { subpattern: ref pattern, def_id, is_inline: true } => { + MatchPairTree::for_pattern(place_builder, pattern, cx, &mut subpairs, extra_data); + // Apply a type ascription for the inline constant to the value at `match_pair.place` - let ascription = place.map(|source| { + if let Some(source) = place { let span = pattern.span; let parent_id = cx.tcx.typeck_root_def_id(cx.def_id.to_def_id()); let args = ty::InlineConstArgs::new( @@ -189,33 +229,47 @@ impl<'tcx> MatchPairTree<'tcx> { span, user_ty: Box::new(user_ty), }; - super::Ascription { annotation, source, variance: ty::Contravariant } - }); + let variance = ty::Contravariant; + extra_data.ascriptions.push(super::Ascription { annotation, source, variance }); + } - subpairs.push(MatchPairTree::for_pattern(place_builder, pattern, cx)); - TestCase::Irrefutable { ascription, binding: None } + None } PatKind::Array { ref prefix, ref slice, ref suffix } => { - cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix); - default_irrefutable() + cx.prefix_slice_suffix( + &mut subpairs, + extra_data, + &place_builder, + prefix, + slice, + suffix, + ); + None } PatKind::Slice { ref prefix, ref slice, ref suffix } => { - cx.prefix_slice_suffix(&mut subpairs, &place_builder, prefix, slice, suffix); + cx.prefix_slice_suffix( + &mut subpairs, + extra_data, + &place_builder, + prefix, + slice, + suffix, + ); if prefix.is_empty() && slice.is_some() && suffix.is_empty() { - default_irrefutable() + None } else { - TestCase::Slice { + Some(TestCase::Slice { len: prefix.len() + suffix.len(), variable_length: slice.is_some(), - } + }) } } PatKind::Variant { adt_def, variant_index, args, ref subpatterns } => { let downcast_place = place_builder.downcast(adt_def, variant_index); // `(x as Variant)` - subpairs = cx.field_match_pairs(downcast_place, subpatterns); + cx.field_match_pairs(&mut subpairs, extra_data, downcast_place, subpatterns); let irrefutable = adt_def.variants().iter_enumerated().all(|(i, v)| { i == variant_index @@ -225,21 +279,23 @@ impl<'tcx> MatchPairTree<'tcx> { .apply_ignore_module(cx.tcx, cx.infcx.typing_env(cx.param_env)) }) && (adt_def.did().is_local() || !adt_def.is_variant_list_non_exhaustive()); - if irrefutable { - default_irrefutable() - } else { - TestCase::Variant { adt_def, variant_index } - } + if irrefutable { None } else { Some(TestCase::Variant { adt_def, variant_index }) } } PatKind::Leaf { ref subpatterns } => { - subpairs = cx.field_match_pairs(place_builder, subpatterns); - default_irrefutable() + cx.field_match_pairs(&mut subpairs, extra_data, place_builder, subpatterns); + None } PatKind::Deref { ref subpattern } => { - subpairs.push(MatchPairTree::for_pattern(place_builder.deref(), subpattern, cx)); - default_irrefutable() + MatchPairTree::for_pattern( + place_builder.deref(), + subpattern, + cx, + &mut subpairs, + extra_data, + ); + None } PatKind::DerefPattern { ref subpattern, mutability } => { @@ -249,23 +305,32 @@ impl<'tcx> MatchPairTree<'tcx> { Ty::new_ref(cx.tcx, cx.tcx.lifetimes.re_erased, subpattern.ty, mutability), pattern.span, ); - subpairs.push(MatchPairTree::for_pattern( + MatchPairTree::for_pattern( PlaceBuilder::from(temp).deref(), subpattern, cx, - )); - TestCase::Deref { temp, mutability } + &mut subpairs, + extra_data, + ); + Some(TestCase::Deref { temp, mutability }) } - PatKind::Never => TestCase::Never, + PatKind::Never => Some(TestCase::Never), }; - MatchPairTree { - place, - test_case, - subpairs, - pattern_ty: pattern.ty, - pattern_span: pattern.span, + if let Some(test_case) = test_case { + // This pattern is refutable, so push a new match-pair node. + match_pairs.push(MatchPairTree { + place: place.expect("refutable patterns should always have a place to inspect"), + test_case, + subpairs, + pattern_ty: pattern.ty, + pattern_span: pattern.span, + }) + } else { + // This pattern is irrefutable, so it doesn't need its own match-pair node. + // Just push its refutable subpatterns instead, if any. + match_pairs.extend(subpairs); } } } diff --git a/compiler/rustc_mir_build/src/builder/matches/mod.rs b/compiler/rustc_mir_build/src/builder/matches/mod.rs index d05d5b151ff..b05052a3455 100644 --- a/compiler/rustc_mir_build/src/builder/matches/mod.rs +++ b/compiler/rustc_mir_build/src/builder/matches/mod.rs @@ -26,7 +26,6 @@ use crate::builder::{ // helper functions, broken out by category: mod match_pair; -mod simplify; mod test; mod util; @@ -987,18 +986,16 @@ impl<'tcx> PatternExtraData<'tcx> { } /// A pattern in a form suitable for lowering the match tree, with all irrefutable -/// patterns simplified away, and or-patterns sorted to the end. +/// patterns simplified away. /// -/// Here, "flat" indicates that the pattern's match pairs have been recursively -/// simplified by [`Builder::simplify_match_pairs`]. They are not necessarily -/// flat in an absolute sense. +/// Here, "flat" indicates that irrefutable nodes in the pattern tree have been +/// recursively replaced with their refutable subpatterns. They are not +/// necessarily flat in an absolute sense. /// /// Will typically be incorporated into a [`Candidate`]. #[derive(Debug, Clone)] struct FlatPat<'tcx> { /// To match the pattern, all of these must be satisfied... - // Invariant: all the match pairs are recursively simplified. - // Invariant: or-patterns must be sorted to the end. match_pairs: Vec<MatchPairTree<'tcx>>, extra_data: PatternExtraData<'tcx>, @@ -1008,17 +1005,15 @@ impl<'tcx> FlatPat<'tcx> { /// Creates a `FlatPat` containing a simplified [`MatchPairTree`] list/forest /// for the given pattern. fn new(place: PlaceBuilder<'tcx>, pattern: &Pat<'tcx>, cx: &mut Builder<'_, 'tcx>) -> Self { - // First, recursively build a tree of match pairs for the given pattern. - let mut match_pairs = vec![MatchPairTree::for_pattern(place, pattern, cx)]; + // Recursively build a tree of match pairs for the given pattern. + let mut match_pairs = vec![]; let mut extra_data = PatternExtraData { span: pattern.span, bindings: Vec::new(), ascriptions: Vec::new(), is_never: pattern.is_never_pattern(), }; - // Recursively remove irrefutable match pairs, while recording their - // bindings/ascriptions, and sort or-patterns after other match pairs. - cx.simplify_match_pairs(&mut match_pairs, &mut extra_data); + MatchPairTree::for_pattern(place, pattern, cx, &mut match_pairs, &mut extra_data); Self { match_pairs, extra_data } } @@ -1055,7 +1050,6 @@ struct Candidate<'tcx> { /// (see [`Builder::test_remaining_match_pairs_after_or`]). /// /// Invariants: - /// - All [`TestCase::Irrefutable`] patterns have been removed by simplification. /// - All or-patterns ([`TestCase::Or`]) have been sorted to the end. match_pairs: Vec<MatchPairTree<'tcx>>, @@ -1126,7 +1120,7 @@ impl<'tcx> Candidate<'tcx> { /// Incorporates an already-simplified [`FlatPat`] into a new candidate. fn from_flat_pat(flat_pat: FlatPat<'tcx>, has_guard: bool) -> Self { - Candidate { + let mut this = Candidate { match_pairs: flat_pat.match_pairs, extra_data: flat_pat.extra_data, has_guard, @@ -1135,7 +1129,14 @@ impl<'tcx> Candidate<'tcx> { otherwise_block: None, pre_binding_block: None, false_edge_start_block: None, - } + }; + this.sort_match_pairs(); + this + } + + /// Restores the invariant that or-patterns must be sorted to the end. + fn sort_match_pairs(&mut self) { + self.match_pairs.sort_by_key(|pair| matches!(pair.test_case, TestCase::Or { .. })); } /// Returns whether the first match pair of this candidate is an or-pattern. @@ -1227,17 +1228,11 @@ struct Ascription<'tcx> { /// - [`Builder::pick_test_for_match_pair`] (to choose a test) /// - [`Builder::sort_candidate`] (to see how the test interacts with a match pair) /// -/// Two variants are unlike the others and deserve special mention: -/// -/// - [`Self::Irrefutable`] is only used temporarily when building a [`MatchPairTree`]. -/// They are then flattened away by [`Builder::simplify_match_pairs`], with any -/// bindings/ascriptions incorporated into the enclosing [`FlatPat`]. -/// - [`Self::Or`] are not tested directly like the other variants. Instead they -/// participate in or-pattern expansion, where they are transformed into subcandidates. -/// - See [`Builder::expand_and_match_or_candidates`]. +/// Note that or-patterns are not tested directly like the other variants. +/// Instead they participate in or-pattern expansion, where they are transformed into +/// subcandidates. See [`Builder::expand_and_match_or_candidates`]. #[derive(Debug, Clone)] enum TestCase<'tcx> { - Irrefutable { binding: Option<Binding<'tcx>>, ascription: Option<Ascription<'tcx>> }, Variant { adt_def: ty::AdtDef<'tcx>, variant_index: VariantIdx }, Constant { value: mir::Const<'tcx> }, Range(Arc<PatRange<'tcx>>), @@ -1261,19 +1256,9 @@ impl<'tcx> TestCase<'tcx> { #[derive(Debug, Clone)] pub(crate) struct MatchPairTree<'tcx> { /// This place... - /// - /// --- - /// This can be `None` if it referred to a non-captured place in a closure. - /// - /// Invariant: Can only be `None` when `test_case` is `Irrefutable`. - /// Therefore this must be `Some(_)` after simplification. - place: Option<Place<'tcx>>, + place: Place<'tcx>, /// ... must pass this test... - /// - /// --- - /// Invariant: after creation and simplification in [`FlatPat::new`], - /// this must not be [`TestCase::Irrefutable`]. test_case: TestCase<'tcx>, /// ... and these subpairs must match. @@ -2091,11 +2076,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Extract the match-pair from the highest priority candidate let match_pair = &candidates[0].match_pairs[0]; let test = self.pick_test_for_match_pair(match_pair); - // Unwrap is ok after simplification. - let match_place = match_pair.place.unwrap(); debug!(?test, ?match_pair); - (match_place, test) + (match_pair.place, test) } /// Given a test, we partition the input candidates into several buckets. diff --git a/compiler/rustc_mir_build/src/builder/matches/simplify.rs b/compiler/rustc_mir_build/src/builder/matches/simplify.rs deleted file mode 100644 index 15c860151dc..00000000000 --- a/compiler/rustc_mir_build/src/builder/matches/simplify.rs +++ /dev/null @@ -1,71 +0,0 @@ -//! Simplifying Candidates -//! -//! *Simplifying* a match pair `place @ pattern` means breaking it down -//! into bindings or other, simpler match pairs. For example: -//! -//! - `place @ (P1, P2)` can be simplified to `[place.0 @ P1, place.1 @ P2]` -//! - `place @ x` can be simplified to `[]` by binding `x` to `place` -//! -//! The `simplify_match_pairs` routine just repeatedly applies these -//! sort of simplifications until there is nothing left to -//! simplify. Match pairs cannot be simplified if they require some -//! sort of test: for example, testing which variant an enum is, or -//! testing a value against a constant. - -use std::mem; - -use tracing::{debug, instrument}; - -use crate::builder::Builder; -use crate::builder::matches::{MatchPairTree, PatternExtraData, TestCase}; - -impl<'a, 'tcx> Builder<'a, 'tcx> { - /// Simplify a list of match pairs so they all require a test. Stores relevant bindings and - /// ascriptions in `extra_data`. - #[instrument(skip(self), level = "debug")] - pub(super) fn simplify_match_pairs( - &mut self, - match_pairs: &mut Vec<MatchPairTree<'tcx>>, - extra_data: &mut PatternExtraData<'tcx>, - ) { - // In order to please the borrow checker, in a pattern like `x @ pat` we must lower the - // bindings in `pat` before `x`. E.g. (#69971): - // - // struct NonCopyStruct { - // copy_field: u32, - // } - // - // fn foo1(x: NonCopyStruct) { - // let y @ NonCopyStruct { copy_field: z } = x; - // // the above should turn into - // let z = x.copy_field; - // let y = x; - // } - // - // We therefore lower bindings from left-to-right, except we lower the `x` in `x @ pat` - // after any bindings in `pat`. This doesn't work for or-patterns: the current structure of - // match lowering forces us to lower bindings inside or-patterns last. - for mut match_pair in mem::take(match_pairs) { - self.simplify_match_pairs(&mut match_pair.subpairs, extra_data); - if let TestCase::Irrefutable { binding, ascription } = match_pair.test_case { - if let Some(binding) = binding { - extra_data.bindings.push(binding); - } - if let Some(ascription) = ascription { - extra_data.ascriptions.push(ascription); - } - // Simplifiable pattern; we replace it with its already simplified subpairs. - match_pairs.append(&mut match_pair.subpairs); - } else { - // Unsimplifiable pattern; we keep it. - match_pairs.push(match_pair); - } - } - - // Move or-patterns to the end, because they can result in us - // creating additional candidates, so we want to test them as - // late as possible. - match_pairs.sort_by_key(|pair| matches!(pair.test_case, TestCase::Or { .. })); - debug!(simplified = ?match_pairs, "simplify_match_pairs"); - } -} diff --git a/compiler/rustc_mir_build/src/builder/matches/test.rs b/compiler/rustc_mir_build/src/builder/matches/test.rs index a7a028bff97..e5d61bc9e55 100644 --- a/compiler/rustc_mir_build/src/builder/matches/test.rs +++ b/compiler/rustc_mir_build/src/builder/matches/test.rs @@ -55,12 +55,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Or-patterns are not tested directly; instead they are expanded into subcandidates, // which are then distinguished by testing whatever non-or patterns they contain. TestCase::Or { .. } => bug!("or-patterns should have already been handled"), - - TestCase::Irrefutable { .. } => span_bug!( - match_pair.pattern_span, - "simplifiable pattern found: {:?}", - match_pair.pattern_span - ), }; Test { span: match_pair.pattern_span, kind } @@ -546,11 +540,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // than one, but it'd be very unusual to have two sides that // both require tests; you'd expect one side to be simplified // away.) - let (match_pair_index, match_pair) = candidate - .match_pairs - .iter() - .enumerate() - .find(|&(_, mp)| mp.place == Some(test_place))?; + let (match_pair_index, match_pair) = + candidate.match_pairs.iter().enumerate().find(|&(_, mp)| mp.place == test_place)?; // If true, the match pair is completely entailed by its corresponding test // branch, so it can be removed. If false, the match pair is _compatible_ @@ -593,7 +584,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { candidate .match_pairs .iter() - .any(|mp| mp.place == Some(test_place) && is_covering_range(&mp.test_case)) + .any(|mp| mp.place == test_place && is_covering_range(&mp.test_case)) }; if sorted_candidates .get(&TestBranch::Failure) @@ -769,7 +760,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let match_pair = candidate.match_pairs.remove(match_pair_index); candidate.match_pairs.extend(match_pair.subpairs); // Move or-patterns to the end. - candidate.match_pairs.sort_by_key(|pair| matches!(pair.test_case, TestCase::Or { .. })); + candidate.sort_match_pairs(); } ret diff --git a/compiler/rustc_mir_build/src/builder/matches/util.rs b/compiler/rustc_mir_build/src/builder/matches/util.rs index 589e350a03f..ed3b652e4ef 100644 --- a/compiler/rustc_mir_build/src/builder/matches/util.rs +++ b/compiler/rustc_mir_build/src/builder/matches/util.rs @@ -173,14 +173,10 @@ impl<'a, 'b, 'tcx> FakeBorrowCollector<'a, 'b, 'tcx> { // } // ``` // Hence we fake borrow using a deep borrow. - if let Some(place) = match_pair.place { - self.fake_borrow(place, FakeBorrowKind::Deep); - } + self.fake_borrow(match_pair.place, FakeBorrowKind::Deep); } else { // Insert a Shallow borrow of any place that is switched on. - if let Some(place) = match_pair.place { - self.fake_borrow(place, FakeBorrowKind::Shallow); - } + self.fake_borrow(match_pair.place, FakeBorrowKind::Shallow); for subpair in &match_pair.subpairs { self.visit_match_pair(subpair); diff --git a/compiler/rustc_mir_build/src/builder/mod.rs b/compiler/rustc_mir_build/src/builder/mod.rs index 94955954934..2909dea98b7 100644 --- a/compiler/rustc_mir_build/src/builder/mod.rs +++ b/compiler/rustc_mir_build/src/builder/mod.rs @@ -37,7 +37,7 @@ pub(crate) fn closure_saved_names_of_captured_variables<'tcx>( .map(|captured_place| { let name = captured_place.to_symbol(); match captured_place.info.capture_kind { - ty::UpvarCapture::ByValue => name, + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => name, ty::UpvarCapture::ByRef(..) => Symbol::intern(&format!("_ref__{name}")), } }) @@ -485,7 +485,7 @@ fn construct_fn<'tcx>( }; if let Some(custom_mir_attr) = - tcx.hir().attrs(fn_id).iter().find(|attr| attr.name_or_empty() == sym::custom_mir) + tcx.hir_attrs(fn_id).iter().find(|attr| attr.name_or_empty() == sym::custom_mir) { return custom::build_custom_mir( tcx, @@ -612,7 +612,8 @@ fn construct_error(tcx: TyCtxt<'_>, def_id: LocalDefId, guar: ErrorGuaranteed) - | DefKind::AssocConst | DefKind::AnonConst | DefKind::InlineConst - | DefKind::Static { .. } => (vec![], tcx.type_of(def_id).instantiate_identity(), None), + | DefKind::Static { .. } + | DefKind::GlobalAsm => (vec![], tcx.type_of(def_id).instantiate_identity(), None), DefKind::Ctor(..) | DefKind::Fn | DefKind::AssocFn => { let sig = tcx.liberate_late_bound_regions( def_id.to_def_id(), @@ -740,7 +741,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { coroutine: Option<Box<CoroutineInfo<'tcx>>>, ) -> Builder<'a, 'tcx> { let tcx = infcx.tcx; - let attrs = tcx.hir().attrs(hir_id); + let attrs = tcx.hir_attrs(hir_id); // Some functions always have overflow checks enabled, // however, they may not get codegen'd, depending on // the settings for the crate they are codegened in. @@ -871,7 +872,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { let mut projs = closure_env_projs.clone(); projs.push(ProjectionElem::Field(FieldIdx::new(i), ty)); match capture { - ty::UpvarCapture::ByValue => {} + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => {} ty::UpvarCapture::ByRef(..) => { projs.push(ProjectionElem::Deref); } diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 81561239491..27ff01b4803 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -942,14 +942,13 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { assert_eq!(orig_id.owner, self.hir_id.owner); let mut id = orig_id; - let hir = self.tcx.hir(); loop { if id == self.hir_id { // This is a moderately common case, mostly hit for previously unseen nodes. break; } - if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) { + if self.tcx.hir_attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) { // This is a rare case. It's for a node path that doesn't reach the root due to an // intervening lint level attribute. This result doesn't get cached. return id; diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs index c2eafd0a74e..d78c874c766 100644 --- a/compiler/rustc_mir_build/src/check_unsafety.rs +++ b/compiler/rustc_mir_build/src/check_unsafety.rs @@ -451,6 +451,7 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> { | ExprKind::Tuple { .. } | ExprKind::Unary { .. } | ExprKind::Call { .. } + | ExprKind::ByUse { .. } | ExprKind::Assign { .. } | ExprKind::AssignOp { .. } | ExprKind::Break { .. } diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs index f1753be845d..17b22f25dbb 100644 --- a/compiler/rustc_mir_build/src/errors.rs +++ b/compiler/rustc_mir_build/src/errors.rs @@ -828,7 +828,7 @@ pub(crate) struct IrrefutableLetPatternsWhileLet { #[derive(Diagnostic)] #[diag(mir_build_borrow_of_moved_value)] -pub(crate) struct BorrowOfMovedValue { +pub(crate) struct BorrowOfMovedValue<'tcx> { #[primary_span] #[label] #[label(mir_build_occurs_because_label)] @@ -836,7 +836,7 @@ pub(crate) struct BorrowOfMovedValue { #[label(mir_build_value_borrowed_label)] pub(crate) conflicts_ref: Vec<Span>, pub(crate) name: Ident, - pub(crate) ty: String, + pub(crate) ty: Ty<'tcx>, #[suggestion(code = "ref ", applicability = "machine-applicable")] pub(crate) suggest_borrowing: Option<Span>, } diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs index fa5db32d913..a25697ba086 100644 --- a/compiler/rustc_mir_build/src/lib.rs +++ b/compiler/rustc_mir_build/src/lib.rs @@ -3,12 +3,12 @@ // tidy-alphabetical-start #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(assert_matches)] #![feature(box_patterns)] #![feature(if_let_guard)] #![feature(let_chains)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end // The `builder` module used to be named `build`, but that was causing GitHub's diff --git a/compiler/rustc_mir_build/src/thir/cx/expr.rs b/compiler/rustc_mir_build/src/thir/cx/expr.rs index 88877d05ffa..b8af77245f2 100644 --- a/compiler/rustc_mir_build/src/thir/cx/expr.rs +++ b/compiler/rustc_mir_build/src/thir/cx/expr.rs @@ -464,6 +464,10 @@ impl<'tcx> ThirBuildCx<'tcx> { } } + hir::ExprKind::Use(expr, span) => { + ExprKind::ByUse { expr: self.mirror_expr(expr), span } + } + hir::ExprKind::AddrOf(hir::BorrowKind::Ref, mutbl, arg) => { ExprKind::Borrow { borrow_kind: mutbl.to_borrow_kind(), arg: self.mirror_expr(arg) } } @@ -648,7 +652,7 @@ impl<'tcx> ThirBuildCx<'tcx> { } }, - hir::ExprKind::Closure { .. } => { + hir::ExprKind::Closure(hir::Closure { .. }) => { let closure_ty = self.typeck_results.expr_ty(expr); let (def_id, args, movability) = match *closure_ty.kind() { ty::Closure(def_id, args) => (def_id, UpvarArgs::Closure(args), None), @@ -730,12 +734,20 @@ impl<'tcx> ThirBuildCx<'tcx> { } } hir::InlineAsmOperand::Const { ref anon_const } => { - let value = - mir::Const::from_unevaluated(tcx, anon_const.def_id.to_def_id()) - .instantiate_identity(); - let span = tcx.def_span(anon_const.def_id); - - InlineAsmOperand::Const { value, span } + let ty = self.typeck_results.node_type(anon_const.hir_id); + let did = anon_const.def_id.to_def_id(); + let typeck_root_def_id = tcx.typeck_root_def_id(did); + let parent_args = tcx.erase_regions(GenericArgs::identity_for_item( + tcx, + typeck_root_def_id, + )); + let args = + InlineConstArgs::new(tcx, InlineConstArgsParts { parent_args, ty }) + .args; + + let uneval = mir::UnevaluatedConst::new(did, args); + let value = mir::Const::Unevaluated(uneval, ty); + InlineAsmOperand::Const { value, span: tcx.def_span(did) } } hir::InlineAsmOperand::SymFn { expr } => { InlineAsmOperand::SymFn { value: self.mirror_expr(expr) } @@ -1029,7 +1041,7 @@ impl<'tcx> ThirBuildCx<'tcx> { "Should have already errored about late bound consts: {def_id:?}" ); }; - let name = self.tcx.hir().name(hir_id); + let name = self.tcx.hir_name(hir_id); let param = ty::ParamConst::new(index, name); ExprKind::ConstParam { param, def_id } @@ -1240,6 +1252,17 @@ impl<'tcx> ThirBuildCx<'tcx> { match upvar_capture { ty::UpvarCapture::ByValue => captured_place_expr, + ty::UpvarCapture::ByUse => { + let span = captured_place_expr.span; + let expr_id = self.thir.exprs.push(captured_place_expr); + + Expr { + temp_lifetime: TempLifetime { temp_lifetime, backwards_incompatible }, + ty: upvar_ty, + span: closure_expr.span, + kind: ExprKind::ByUse { expr: expr_id, span }, + } + } ty::UpvarCapture::ByRef(upvar_borrow) => { let borrow_kind = match upvar_borrow { ty::BorrowKind::Immutable => BorrowKind::Shared, diff --git a/compiler/rustc_mir_build/src/thir/cx/mod.rs b/compiler/rustc_mir_build/src/thir/cx/mod.rs index 2e069cae426..b3daed8a7e0 100644 --- a/compiler/rustc_mir_build/src/thir/cx/mod.rs +++ b/compiler/rustc_mir_build/src/thir/cx/mod.rs @@ -73,7 +73,6 @@ struct ThirBuildCx<'tcx> { impl<'tcx> ThirBuildCx<'tcx> { fn new(tcx: TyCtxt<'tcx>, def: LocalDefId) -> Self { let typeck_results = tcx.typeck(def); - let hir = tcx.hir(); let hir_id = tcx.local_def_id_to_hir_id(def); let body_type = match tcx.hir_body_owner_kind(def) { @@ -111,8 +110,8 @@ impl<'tcx> ThirBuildCx<'tcx> { typeck_results, rvalue_scopes: &typeck_results.rvalue_scopes, body_owner: def.to_def_id(), - apply_adjustments: hir - .attrs(hir_id) + apply_adjustments: tcx + .hir_attrs(hir_id) .iter() .all(|attr| attr.name_or_empty() != rustc_span::sym::custom_mir), } @@ -207,7 +206,7 @@ impl<'tcx> ThirBuildCx<'tcx> { &self, hir_id: HirId, ) -> Option<ty::CanonicalUserType<'tcx>> { - crate::thir::util::user_args_applied_to_ty_of_hir_id(self.typeck_results, hir_id) + crate::thir::util::user_args_applied_to_ty_of_hir_id(self.tcx, self.typeck_results, hir_id) } } diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs index d60ae6484af..cbd29ba837e 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs @@ -345,6 +345,7 @@ impl<'p, 'tcx> MatchVisitor<'p, 'tcx> { | Borrow { .. } | Box { .. } | Call { .. } + | ByUse { .. } | Closure { .. } | ConstBlock { .. } | ConstParam { .. } @@ -786,17 +787,13 @@ fn check_borrow_conflicts_in_at_patterns<'tcx>(cx: &MatchVisitor<'_, 'tcx>, pat: } }); if !conflicts_ref.is_empty() { - let mut path = None; - let ty = cx.tcx.short_string(ty, &mut path); - let mut err = sess.dcx().create_err(BorrowOfMovedValue { + sess.dcx().emit_err(BorrowOfMovedValue { binding_span: pat.span, conflicts_ref, name: Ident::new(name, pat.span), ty, suggest_borrowing: Some(pat.span.shrink_to_lo()), }); - *err.long_ty_path() = path; - err.emit(); } return; } @@ -1046,7 +1043,7 @@ fn find_fallback_pattern_typo<'tcx>( for item in cx.tcx.hir_crate_items(()).free_items() { if let DefKind::Use = cx.tcx.def_kind(item.owner_id) { // Look for consts being re-exported. - let item = cx.tcx.hir().expect_item(item.owner_id.def_id); + let item = cx.tcx.hir_expect_item(item.owner_id.def_id); let use_name = item.ident.name; let hir::ItemKind::Use(path, _) = item.kind else { continue; diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index 8dc3f998e09..4bfeab44bf4 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -539,7 +539,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { &self, hir_id: hir::HirId, ) -> Option<ty::CanonicalUserType<'tcx>> { - crate::thir::util::user_args_applied_to_ty_of_hir_id(self.typeck_results, hir_id) + crate::thir::util::user_args_applied_to_ty_of_hir_id(self.tcx, self.typeck_results, hir_id) } /// Takes a HIR Path. If the path is a constant, evaluates it and feeds diff --git a/compiler/rustc_mir_build/src/thir/print.rs b/compiler/rustc_mir_build/src/thir/print.rs index cd56d93afcf..16cef0ec3ac 100644 --- a/compiler/rustc_mir_build/src/thir/print.rs +++ b/compiler/rustc_mir_build/src/thir/print.rs @@ -246,6 +246,13 @@ impl<'a, 'tcx> ThirPrinter<'a, 'tcx> { print_indented!(self, "}", depth_lvl); } + ByUse { expr, span } => { + print_indented!(self, "ByUse {", depth_lvl); + print_indented!(self, "expr:", depth_lvl + 1); + self.print_expr(*expr, depth_lvl + 2); + print_indented!(self, format!("span: {:?}", span), depth_lvl + 1); + print_indented!(self, "}", depth_lvl); + } Deref { arg } => { print_indented!(self, "Deref {", depth_lvl); self.print_expr(*arg, depth_lvl + 1); diff --git a/compiler/rustc_mir_build/src/thir/util.rs b/compiler/rustc_mir_build/src/thir/util.rs index 60a47a94e3a..457957f5fce 100644 --- a/compiler/rustc_mir_build/src/thir/util.rs +++ b/compiler/rustc_mir_build/src/thir/util.rs @@ -1,12 +1,16 @@ +use std::assert_matches::assert_matches; + use rustc_hir as hir; +use rustc_hir::def::DefKind; use rustc_middle::bug; -use rustc_middle::ty::{self, CanonicalUserType}; +use rustc_middle::ty::{self, CanonicalUserType, TyCtxt}; use tracing::debug; /// Looks up the type associated with this hir-id and applies the /// user-given generic parameters; the hir-id must map to a suitable /// type. pub(crate) fn user_args_applied_to_ty_of_hir_id<'tcx>( + tcx: TyCtxt<'tcx>, typeck_results: &ty::TypeckResults<'tcx>, hir_id: hir::HirId, ) -> Option<CanonicalUserType<'tcx>> { @@ -16,7 +20,23 @@ pub(crate) fn user_args_applied_to_ty_of_hir_id<'tcx>( let ty = typeck_results.node_type(hir_id); match ty.kind() { ty::Adt(adt_def, ..) => { + // This "fixes" user type annotations for tupled ctor patterns for ADTs. + // That's because `type_of(ctor_did)` returns a FnDef, but we actually + // want to be annotating the type of the ADT itself. It's a bit goofy, + // but it's easier to adjust this here rather than in the path lowering + // code for patterns in HIR. if let ty::UserTypeKind::TypeOf(did, _) = &mut user_ty.value.kind { + // This is either already set up correctly (struct, union, enum, or variant), + // or needs adjusting (ctor). Make sure we don't start adjusting other + // user annotations like consts or fn calls. + assert_matches!( + tcx.def_kind(*did), + DefKind::Ctor(..) + | DefKind::Struct + | DefKind::Enum + | DefKind::Union + | DefKind::Variant + ); *did = adt_def.did(); } Some(user_ty) diff --git a/compiler/rustc_mir_dataflow/src/lib.rs b/compiler/rustc_mir_dataflow/src/lib.rs index a8a56baa1ff..82c57ef5678 100644 --- a/compiler/rustc_mir_dataflow/src/lib.rs +++ b/compiler/rustc_mir_dataflow/src/lib.rs @@ -1,4 +1,5 @@ // tidy-alphabetical-start +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(assert_matches)] #![feature(associated_type_defaults)] #![feature(box_patterns)] @@ -7,7 +8,6 @@ #![feature(let_chains)] #![feature(never_type)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_middle::ty; diff --git a/compiler/rustc_mir_transform/src/add_call_guards.rs b/compiler/rustc_mir_transform/src/add_call_guards.rs index 55694cacd92..bacff287859 100644 --- a/compiler/rustc_mir_transform/src/add_call_guards.rs +++ b/compiler/rustc_mir_transform/src/add_call_guards.rs @@ -32,14 +32,27 @@ pub(super) use self::AddCallGuards::*; impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { fn run_pass(&self, _tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let mut pred_count: IndexVec<_, _> = - body.basic_blocks.predecessors().iter().map(|ps| ps.len()).collect(); - pred_count[START_BLOCK] += 1; + let mut pred_count = IndexVec::from_elem(0u8, &body.basic_blocks); + for (_, data) in body.basic_blocks.iter_enumerated() { + for succ in data.terminator().successors() { + pred_count[succ] = pred_count[succ].saturating_add(1); + } + } // We need a place to store the new blocks generated let mut new_blocks = Vec::new(); let cur_len = body.basic_blocks.len(); + let mut new_block = |source_info: SourceInfo, is_cleanup: bool, target: BasicBlock| { + let block = BasicBlockData { + statements: vec![], + is_cleanup, + terminator: Some(Terminator { source_info, kind: TerminatorKind::Goto { target } }), + }; + let idx = cur_len + new_blocks.len(); + new_blocks.push(block); + BasicBlock::new(idx) + }; for block in body.basic_blocks_mut() { match block.terminator { @@ -47,25 +60,34 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { kind: TerminatorKind::Call { target: Some(ref mut destination), unwind, .. }, source_info, }) if pred_count[*destination] > 1 - && (matches!( - unwind, - UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) - ) || self == &AllCallEdges) => + && (generates_invoke(unwind) || self == &AllCallEdges) => { // It's a critical edge, break it - let call_guard = BasicBlockData { - statements: vec![], - is_cleanup: block.is_cleanup, - terminator: Some(Terminator { - source_info, - kind: TerminatorKind::Goto { target: *destination }, - }), - }; - - // Get the index it will be when inserted into the MIR - let idx = cur_len + new_blocks.len(); - new_blocks.push(call_guard); - *destination = BasicBlock::new(idx); + *destination = new_block(source_info, block.is_cleanup, *destination); + } + Some(Terminator { + kind: + TerminatorKind::InlineAsm { + asm_macro: InlineAsmMacro::Asm, + ref mut targets, + ref operands, + unwind, + .. + }, + source_info, + }) if self == &CriticalCallEdges => { + let has_outputs = operands.iter().any(|op| { + matches!(op, InlineAsmOperand::InOut { .. } | InlineAsmOperand::Out { .. }) + }); + let has_labels = + operands.iter().any(|op| matches!(op, InlineAsmOperand::Label { .. })); + if has_outputs && (has_labels || generates_invoke(unwind)) { + for target in targets.iter_mut() { + if pred_count[*target] > 1 { + *target = new_block(source_info, block.is_cleanup, *target); + } + } + } } _ => {} } @@ -80,3 +102,11 @@ impl<'tcx> crate::MirPass<'tcx> for AddCallGuards { true } } + +/// Returns true if this unwind action is code generated as an invoke as opposed to a call. +fn generates_invoke(unwind: UnwindAction) -> bool { + match unwind { + UnwindAction::Continue | UnwindAction::Unreachable => false, + UnwindAction::Cleanup(_) | UnwindAction::Terminate(_) => true, + } +} diff --git a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs index 0f5fcb0d8eb..9cd7045a0a2 100644 --- a/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs +++ b/compiler/rustc_mir_transform/src/coroutine/by_move_body.rs @@ -78,7 +78,6 @@ use rustc_middle::hir::place::{Projection, ProjectionKind}; use rustc_middle::mir::visit::MutVisitor; use rustc_middle::mir::{self, dump_mir}; use rustc_middle::ty::{self, InstanceKind, Ty, TyCtxt, TypeVisitableExt}; -use rustc_span::kw; pub(crate) fn coroutine_by_move_body_def_id<'tcx>( tcx: TyCtxt<'tcx>, @@ -170,7 +169,7 @@ pub(crate) fn coroutine_by_move_body_def_id<'tcx>( // this when building the field projection in the MIR body later on. let mut parent_capture_ty = parent_capture.place.ty(); parent_capture_ty = match parent_capture.info.capture_kind { - ty::UpvarCapture::ByValue => parent_capture_ty, + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => parent_capture_ty, ty::UpvarCapture::ByRef(kind) => Ty::new_ref( tcx, tcx.lifetimes.re_erased, @@ -214,7 +213,7 @@ pub(crate) fn coroutine_by_move_body_def_id<'tcx>( MakeByMoveBody { tcx, field_remapping, by_move_coroutine_ty }.visit_body(&mut by_move_body); // This will always be `{closure#1}`, since the original coroutine is `{closure#0}`. - let body_def = tcx.create_def(parent_def_id, kw::Empty, DefKind::SyntheticCoroutineBody); + let body_def = tcx.create_def(parent_def_id, None, DefKind::SyntheticCoroutineBody); by_move_body.source = mir::MirSource::from_instance(InstanceKind::Item(body_def.def_id().to_def_id())); dump_mir(tcx, false, "built", &"after", &by_move_body, |_, _| Ok(())); diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index 049f13ce96d..a0db8bdb7ed 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -80,35 +80,6 @@ impl<'tcx> Visitor<'tcx> for DeduceReadOnly { // `f` passes. Note that function arguments are the only situation in which this problem can // arise: every other use of `move` in MIR doesn't actually write to the value it moves // from. - // - // Anyway, right now this situation doesn't actually arise in practice. Instead, the MIR for - // that function looks like this: - // - // fn f(_1: BigStruct) -> () { - // let mut _0: (); - // let mut _2: BigStruct; - // bb0: { - // _2 = move _1; - // _0 = g(move _2) -> bb1; - // } - // ... - // } - // - // Because of that extra move that MIR construction inserts, `x` (i.e. `_1`) can *in - // practice* safely be marked `readonly`. - // - // To handle the possibility that other optimizations (for example, destination propagation) - // might someday generate MIR like the first example above, we panic upon seeing an argument - // to *our* function that is directly moved into *another* function as an argument. Having - // eliminated that problematic case, we can safely treat moves as copies in this analysis. - // - // In the future, if MIR optimizations cause arguments of a caller to be directly moved into - // the argument of a callee, we can just add that argument to `mutated_args` instead of - // panicking. - // - // Note that, because the problematic MIR is never actually generated, we can't add a test - // case for this. - if let TerminatorKind::Call { ref args, .. } = terminator.kind { for arg in args { if let Operand::Move(place) = arg.node { diff --git a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs index 7b3553e7afd..abbff1c48dd 100644 --- a/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs +++ b/compiler/rustc_mir_transform/src/ffi_unwind_calls.rs @@ -53,11 +53,7 @@ fn has_ffi_unwind_calls(tcx: TyCtxt<'_>, local_def_id: LocalDefId) -> bool { // Rust calls cannot themselves create foreign unwinds. // We assume this is true for intrinsics as well. - if let ExternAbi::RustIntrinsic - | ExternAbi::Rust - | ExternAbi::RustCall - | ExternAbi::RustCold = sig.abi() - { + if sig.abi().is_rustic_abi() { continue; }; diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 29521d42720..981dedd5b5c 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -1567,7 +1567,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { BackendRepr::ScalarPair(a, b) => { !a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx) } - BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => false, + BackendRepr::SimdVector { .. } | BackendRepr::Memory { .. } => false, } } diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 5981b5031c6..0183ba19475 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -606,14 +606,14 @@ fn try_inlining<'tcx, I: Inliner<'tcx>>( ty::EarlyBinder::bind(callee_body.clone()), ) else { debug!("failed to normalize callee body"); - return Err("implementation limitation"); + return Err("implementation limitation -- could not normalize callee body"); }; // Normally, this shouldn't be required, but trait normalization failure can create a // validation ICE. if !validate_types(tcx, inliner.typing_env(), &callee_body, &caller_body).is_empty() { debug!("failed to validate callee body"); - return Err("implementation limitation"); + return Err("implementation limitation -- callee body failed validation"); } // Check call signature compatibility. @@ -622,17 +622,9 @@ fn try_inlining<'tcx, I: Inliner<'tcx>>( let output_type = callee_body.return_ty(); if !util::sub_types(tcx, inliner.typing_env(), output_type, destination_ty) { trace!(?output_type, ?destination_ty); - debug!("failed to normalize return type"); - return Err("implementation limitation"); + return Err("implementation limitation -- return type mismatch"); } if callsite.fn_sig.abi() == ExternAbi::RustCall { - // FIXME: Don't inline user-written `extern "rust-call"` functions, - // since this is generally perf-negative on rustc, and we hope that - // LLVM will inline these functions instead. - if callee_body.spread_arg.is_some() { - return Err("user-written rust-call functions"); - } - let (self_arg, arg_tuple) = match &args[..] { [arg_tuple] => (None, arg_tuple), [self_arg, arg_tuple] => (Some(self_arg), arg_tuple), @@ -642,12 +634,17 @@ fn try_inlining<'tcx, I: Inliner<'tcx>>( let self_arg_ty = self_arg.map(|self_arg| self_arg.node.ty(&caller_body.local_decls, tcx)); let arg_tuple_ty = arg_tuple.node.ty(&caller_body.local_decls, tcx); - let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else { - bug!("Closure arguments are not passed as a tuple"); + let arg_tys = if callee_body.spread_arg.is_some() { + std::slice::from_ref(&arg_tuple_ty) + } else { + let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else { + bug!("Closure arguments are not passed as a tuple"); + }; + arg_tuple_tys.as_slice() }; for (arg_ty, input) in - self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter()) + self_arg_ty.into_iter().chain(arg_tys.iter().copied()).zip(callee_body.args_iter()) { let input_type = callee_body.local_decls[input].ty; if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) { @@ -663,7 +660,7 @@ fn try_inlining<'tcx, I: Inliner<'tcx>>( if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) { trace!(?arg_ty, ?input_type); debug!("failed to normalize argument type"); - return Err("implementation limitation"); + return Err("implementation limitation -- arg mismatch"); } } } @@ -693,13 +690,13 @@ fn check_mir_is_available<'tcx, I: Inliner<'tcx>>( // won't cause cycles on this. if !inliner.tcx().is_mir_available(callee_def_id) { debug!("item MIR unavailable"); - return Err("implementation limitation"); + return Err("implementation limitation -- MIR unavailable"); } } // These have no own callable MIR. InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => { debug!("instance without MIR (intrinsic / virtual)"); - return Err("implementation limitation"); + return Err("implementation limitation -- cannot inline intrinsic"); } // FIXME(#127030): `ConstParamHasTy` has bad interactions with @@ -709,7 +706,7 @@ fn check_mir_is_available<'tcx, I: Inliner<'tcx>>( // substituted. InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => { debug!("still needs substitution"); - return Err("implementation limitation"); + return Err("implementation limitation -- HACK for dropping polymorphic type"); } // This cannot result in an immediate cycle since the callee MIR is a shim, which does @@ -1060,8 +1057,7 @@ fn make_call_args<'tcx, I: Inliner<'tcx>>( closure_ref_arg.chain(tuple_tmp_args).collect() } else { - // FIXME(edition_2024): switch back to a normal method call. - <_>::into_iter(args) + args.into_iter() .map(|a| create_temp_if_necessary(inliner, a.node, callsite, caller_body, return_block)) .collect() } diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 04c9375b831..205d388f4fb 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -1,4 +1,5 @@ // tidy-alphabetical-start +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(array_windows)] #![feature(assert_matches)] #![feature(box_patterns)] @@ -12,7 +13,6 @@ #![feature(never_type)] #![feature(try_blocks)] #![feature(yeet_expr)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use hir::ConstContext; @@ -316,6 +316,10 @@ fn mir_keys(tcx: TyCtxt<'_>, (): ()) -> FxIndexSet<LocalDefId> { // All body-owners have MIR associated with them. let mut set: FxIndexSet<_> = tcx.hir_body_owners().collect(); + // Remove the fake bodies for `global_asm!`, since they're not useful + // to be emitted (`--emit=mir`) or encoded (in metadata). + set.retain(|&def_id| !matches!(tcx.def_kind(def_id), DefKind::GlobalAsm)); + // Coroutine-closures (e.g. async closures) have an additional by-move MIR // body that isn't in the HIR. for body_owner in tcx.hir_body_owners() { @@ -513,6 +517,24 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & body.tainted_by_errors = Some(error_reported); } + // Also taint the body if it's within a top-level item that is not well formed. + // + // We do this check here and not during `mir_promoted` because that may result + // in borrowck cycles if WF requires looking into an opaque hidden type. + let root = tcx.typeck_root_def_id(def.to_def_id()); + match tcx.def_kind(root) { + DefKind::Fn + | DefKind::AssocFn + | DefKind::Static { .. } + | DefKind::Const + | DefKind::AssocConst => { + if let Err(guar) = tcx.check_well_formed(root.expect_local()) { + body.tainted_by_errors = Some(guar); + } + } + _ => {} + } + run_analysis_to_runtime_passes(tcx, &mut body); tcx.alloc_steal_mir(body) diff --git a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs index e3260e45bc5..7d77fffa83f 100644 --- a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs +++ b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs @@ -6,7 +6,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_errors::Subdiagnostic; use rustc_hir::CRATE_HIR_ID; -use rustc_hir::def_id::{DefId, LocalDefId}; +use rustc_hir::def_id::LocalDefId; use rustc_index::bit_set::MixedBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_macros::{LintDiagnostic, Subdiagnostic}; @@ -15,7 +15,10 @@ use rustc_middle::mir::{ self, BasicBlock, Body, ClearCrossCrate, Local, Location, Place, StatementKind, TerminatorKind, dump_mir, }; -use rustc_middle::ty::{self, Ty, TyCtxt}; +use rustc_middle::ty::significant_drop_order::{ + extract_component_with_significant_dtor, ty_dtor_span, +}; +use rustc_middle::ty::{self, TyCtxt}; use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::{LookupResult, MoveData, MovePathIndex}; use rustc_mir_dataflow::{Analysis, MaybeReachable, ResultsCursor}; @@ -23,8 +26,7 @@ use rustc_session::lint::builtin::TAIL_EXPR_DROP_ORDER; use rustc_session::lint::{self}; use rustc_span::{DUMMY_SP, Span, Symbol}; use rustc_type_ir::data_structures::IndexMap; -use smallvec::{SmallVec, smallvec}; -use tracing::{debug, instrument}; +use tracing::debug; fn place_has_common_prefix<'tcx>(left: &Place<'tcx>, right: &Place<'tcx>) -> bool { left.local == right.local @@ -155,170 +157,6 @@ impl<'a, 'mir, 'tcx> DropsReachable<'a, 'mir, 'tcx> { } } -/// An additional filter to exclude well-known types from the ecosystem -/// because their drops are trivial. -/// This returns additional types to check if the drops are delegated to those. -/// A typical example is `hashbrown::HashMap<K, V>`, whose drop is delegated to `K` and `V`. -fn true_significant_drop_ty<'tcx>( - tcx: TyCtxt<'tcx>, - ty: Ty<'tcx>, -) -> Option<SmallVec<[Ty<'tcx>; 2]>> { - if let ty::Adt(def, args) = ty.kind() { - let mut did = def.did(); - let mut name_rev = vec![]; - loop { - let key = tcx.def_key(did); - - match key.disambiguated_data.data { - rustc_hir::definitions::DefPathData::CrateRoot => { - name_rev.push(tcx.crate_name(did.krate)) - } - rustc_hir::definitions::DefPathData::TypeNs(symbol) => name_rev.push(symbol), - _ => return None, - } - if let Some(parent) = key.parent { - did = DefId { krate: did.krate, index: parent }; - } else { - break; - } - } - let name_str: Vec<_> = name_rev.iter().rev().map(|x| x.as_str()).collect(); - debug!(?name_str); - match name_str[..] { - // These are the types from Rust core ecosystem - ["syn" | "proc_macro2", ..] - | ["core" | "std", "task", "LocalWaker" | "Waker"] - | ["core" | "std", "task", "wake", "LocalWaker" | "Waker"] => Some(smallvec![]), - // These are important types from Rust ecosystem - ["tracing", "instrument", "Instrumented"] | ["bytes", "Bytes"] => Some(smallvec![]), - ["hashbrown", "raw", "RawTable" | "RawIntoIter"] => { - if let [ty, ..] = &***args - && let Some(ty) = ty.as_type() - { - Some(smallvec![ty]) - } else { - None - } - } - ["hashbrown", "raw", "RawDrain"] => { - if let [_, ty, ..] = &***args - && let Some(ty) = ty.as_type() - { - Some(smallvec![ty]) - } else { - None - } - } - _ => None, - } - } else { - None - } -} - -/// Returns the list of types with a "potentially sigificant" that may be dropped -/// by dropping a value of type `ty`. -#[instrument(level = "debug", skip(tcx, typing_env))] -fn extract_component_raw<'tcx>( - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ty: Ty<'tcx>, - ty_seen: &mut UnordSet<Ty<'tcx>>, -) -> SmallVec<[Ty<'tcx>; 4]> { - // Droppiness does not depend on regions, so let us erase them. - let ty = tcx.try_normalize_erasing_regions(typing_env, ty).unwrap_or(ty); - - let tys = tcx.list_significant_drop_tys(typing_env.as_query_input(ty)); - debug!(?ty, "components"); - let mut out_tys = smallvec![]; - for ty in tys { - if let Some(tys) = true_significant_drop_ty(tcx, ty) { - // Some types can be further opened up because the drop is simply delegated - for ty in tys { - if ty_seen.insert(ty) { - out_tys.extend(extract_component_raw(tcx, typing_env, ty, ty_seen)); - } - } - } else { - if ty_seen.insert(ty) { - out_tys.push(ty); - } - } - } - out_tys -} - -#[instrument(level = "debug", skip(tcx, typing_env))] -fn extract_component_with_significant_dtor<'tcx>( - tcx: TyCtxt<'tcx>, - typing_env: ty::TypingEnv<'tcx>, - ty: Ty<'tcx>, -) -> SmallVec<[Ty<'tcx>; 4]> { - let mut tys = extract_component_raw(tcx, typing_env, ty, &mut Default::default()); - let mut deduplicate = FxHashSet::default(); - tys.retain(|oty| deduplicate.insert(*oty)); - tys.into_iter().collect() -} - -/// Extract the span of the custom destructor of a type -/// especially the span of the `impl Drop` header or its entire block -/// when we are working with current local crate. -#[instrument(level = "debug", skip(tcx))] -fn ty_dtor_span<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Span> { - match ty.kind() { - ty::Bool - | ty::Char - | ty::Int(_) - | ty::Uint(_) - | ty::Float(_) - | ty::Error(_) - | ty::Str - | ty::Never - | ty::RawPtr(_, _) - | ty::Ref(_, _, _) - | ty::FnPtr(_, _) - | ty::Tuple(_) - | ty::Dynamic(_, _, _) - | ty::Alias(_, _) - | ty::Bound(_, _) - | ty::Pat(_, _) - | ty::Placeholder(_) - | ty::Infer(_) - | ty::Slice(_) - | ty::Array(_, _) - | ty::UnsafeBinder(_) => None, - - ty::Adt(adt_def, _) => { - let did = adt_def.did(); - let try_local_did_span = |did: DefId| { - if let Some(local) = did.as_local() { - tcx.source_span(local) - } else { - tcx.def_span(did) - } - }; - let dtor = if let Some(dtor) = tcx.adt_destructor(did) { - dtor.did - } else if let Some(dtor) = tcx.adt_async_destructor(did) { - dtor.future - } else { - return Some(try_local_did_span(did)); - }; - let def_key = tcx.def_key(dtor); - let Some(parent_index) = def_key.parent else { return Some(try_local_did_span(dtor)) }; - let parent_did = DefId { index: parent_index, krate: dtor.krate }; - Some(try_local_did_span(parent_did)) - } - ty::Coroutine(did, _) - | ty::CoroutineWitness(did, _) - | ty::CoroutineClosure(did, _) - | ty::Closure(did, _) - | ty::FnDef(did, _) - | ty::Foreign(did) => Some(tcx.def_span(did)), - ty::Param(_) => None, - } -} - /// Check if a moved place at `idx` is a part of a BID. /// The use of this check is that we will consider drops on these /// as a drop of the overall BID and, thus, we can exclude it from the diagnosis. diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index d7cb8f18f82..67fca1d7c29 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -209,7 +209,7 @@ use std::path::PathBuf; use rustc_attr_parsing::InlineAttr; use rustc_data_structures::fx::FxIndexMap; -use rustc_data_structures::sync::{LRef, MTLock, par_for_each_in}; +use rustc_data_structures::sync::{MTLock, par_for_each_in}; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir as hir; use rustc_hir::def::DefKind; @@ -357,7 +357,7 @@ impl<'tcx> Extend<Spanned<MonoItem<'tcx>>> for MonoItems<'tcx> { fn collect_items_rec<'tcx>( tcx: TyCtxt<'tcx>, starting_item: Spanned<MonoItem<'tcx>>, - state: LRef<'_, SharedState<'tcx>>, + state: &SharedState<'tcx>, recursion_depths: &mut DefIdMap<usize>, recursion_limit: Limit, mode: CollectionMode, @@ -751,7 +751,7 @@ impl<'a, 'tcx> MirVisitor<'tcx> for MirUsedCollector<'a, 'tcx> { /// This does not walk the MIR of the constant as that is not needed for codegen, all we need is /// to ensure that the constant evaluates successfully and walk the result. #[instrument(skip(self), level = "debug")] - fn visit_const_operand(&mut self, constant: &mir::ConstOperand<'tcx>, location: Location) { + fn visit_const_operand(&mut self, constant: &mir::ConstOperand<'tcx>, _location: Location) { // No `super_constant` as we don't care about `visit_ty`/`visit_ty_const`. let Some(val) = self.eval_constant(constant) else { return }; collect_const_value(self.tcx, val, self.used_items); @@ -1043,18 +1043,7 @@ fn find_vtable_types_for_unsizing<'tcx>( ) -> (Ty<'tcx>, Ty<'tcx>) { let ptr_vtable = |inner_source: Ty<'tcx>, inner_target: Ty<'tcx>| { let typing_env = ty::TypingEnv::fully_monomorphized(); - let type_has_metadata = |ty: Ty<'tcx>| -> bool { - if ty.is_sized(tcx.tcx, typing_env) { - return false; - } - let tail = tcx.struct_tail_for_codegen(ty, typing_env); - match tail.kind() { - ty::Foreign(..) => false, - ty::Str | ty::Slice(..) | ty::Dynamic(..) => true, - _ => bug!("unexpected unsized tail: {:?}", tail), - } - }; - if type_has_metadata(inner_source) { + if tcx.type_has_metadata(inner_source, typing_env) { (inner_source, inner_target) } else { tcx.struct_lockstep_tails_for_codegen(inner_source, inner_target, typing_env) @@ -1682,30 +1671,26 @@ pub(crate) fn collect_crate_mono_items<'tcx>( debug!("building mono item graph, beginning at roots"); - let mut state = SharedState { + let state = SharedState { visited: MTLock::new(UnordSet::default()), mentioned: MTLock::new(UnordSet::default()), usage_map: MTLock::new(UsageMap::new()), }; let recursion_limit = tcx.recursion_limit(); - { - let state: LRef<'_, _> = &mut state; - - tcx.sess.time("monomorphization_collector_graph_walk", || { - par_for_each_in(roots, |root| { - let mut recursion_depths = DefIdMap::default(); - collect_items_rec( - tcx, - dummy_spanned(root), - state, - &mut recursion_depths, - recursion_limit, - CollectionMode::UsedItems, - ); - }); + tcx.sess.time("monomorphization_collector_graph_walk", || { + par_for_each_in(roots, |root| { + let mut recursion_depths = DefIdMap::default(); + collect_items_rec( + tcx, + dummy_spanned(root), + &state, + &mut recursion_depths, + recursion_limit, + CollectionMode::UsedItems, + ); }); - } + }); // The set of MonoItems was created in an inherently indeterministic order because // of parallelism. We sort it here to ensure that the output is deterministic. diff --git a/compiler/rustc_monomorphize/src/lib.rs b/compiler/rustc_monomorphize/src/lib.rs index 714b64b3a23..5dbae50c499 100644 --- a/compiler/rustc_monomorphize/src/lib.rs +++ b/compiler/rustc_monomorphize/src/lib.rs @@ -1,10 +1,10 @@ // tidy-alphabetical-start +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(array_windows)] #![feature(file_buffered)] #![feature(if_let_guard)] #![feature(impl_trait_in_assoc_type)] #![feature(let_chains)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_hir::lang_items::LangItem; diff --git a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs index 4c8dd933317..06d6c3ab805 100644 --- a/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs +++ b/compiler/rustc_monomorphize/src/mono_checks/abi_check.rs @@ -1,6 +1,6 @@ //! This module ensures that if a function's ABI requires a particular target feature, //! that target feature is enabled both on the callee and all callers. -use rustc_abi::{BackendRepr, ExternAbi, RegKind}; +use rustc_abi::{BackendRepr, RegKind}; use rustc_hir::CRATE_HIR_ID; use rustc_middle::mir::{self, traversal}; use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TyCtxt}; @@ -18,7 +18,7 @@ fn uses_vector_registers(mode: &PassMode, repr: &BackendRepr) -> bool { cast.prefix.iter().any(|r| r.is_some_and(|x| x.kind == RegKind::Vector)) || cast.rest.unit.kind == RegKind::Vector } - PassMode::Direct(..) | PassMode::Pair(..) => matches!(repr, BackendRepr::Vector { .. }), + PassMode::Direct(..) | PassMode::Pair(..) => matches!(repr, BackendRepr::SimdVector { .. }), } } @@ -115,8 +115,8 @@ fn check_call_site_abi<'tcx>( span: Span, caller: InstanceKind<'tcx>, ) { - if callee.fn_sig(tcx).abi() == ExternAbi::Rust { - // "Rust" ABI never passes arguments in vector registers. + if callee.fn_sig(tcx).abi().is_rustic_abi() { + // we directly handle the soundness of Rust ABIs return; } let typing_env = ty::TypingEnv::fully_monomorphized(); diff --git a/compiler/rustc_monomorphize/src/partitioning/autodiff.rs b/compiler/rustc_monomorphize/src/partitioning/autodiff.rs index 0c855508434..ebe0b258c1b 100644 --- a/compiler/rustc_monomorphize/src/partitioning/autodiff.rs +++ b/compiler/rustc_monomorphize/src/partitioning/autodiff.rs @@ -12,21 +12,15 @@ fn adjust_activity_to_abi<'tcx>(tcx: TyCtxt<'tcx>, fn_ty: Ty<'tcx>, da: &mut Vec if !matches!(fn_ty.kind(), ty::FnDef(..)) { bug!("expected fn def for autodiff, got {:?}", fn_ty); } - let fnc_binder: ty::Binder<'_, ty::FnSig<'_>> = fn_ty.fn_sig(tcx); - // If rustc compiles the unmodified primal, we know that this copy of the function - // also has correct lifetimes. We know that Enzyme won't free the shadow too early - // (or actually at all), so let's strip lifetimes when computing the layout. - let x = tcx.instantiate_bound_regions_with_erased(fnc_binder); + // We don't actually pass the types back into the type system. + // All we do is decide how to handle the arguments. + let sig = fn_ty.fn_sig(tcx).skip_binder(); + let mut new_activities = vec![]; let mut new_positions = vec![]; - for (i, ty) in x.inputs().iter().enumerate() { + for (i, ty) in sig.inputs().iter().enumerate() { if let Some(inner_ty) = ty.builtin_deref(true) { - if ty.is_fn_ptr() { - // FIXME(ZuseZ4): add a nicer error, or just figure out how to support them, - // since Enzyme itself can handle them. - tcx.dcx().err("function pointers are currently not supported in autodiff"); - } if inner_ty.is_slice() { // We know that the length will be passed as extra arg. if !da.is_empty() { diff --git a/compiler/rustc_next_trait_solver/src/delegate.rs b/compiler/rustc_next_trait_solver/src/delegate.rs index a64941cce3a..850d86d91e8 100644 --- a/compiler/rustc_next_trait_solver/src/delegate.rs +++ b/compiler/rustc_next_trait_solver/src/delegate.rs @@ -102,7 +102,6 @@ pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized { fn is_transmutable( &self, - param_env: <Self::Interner as Interner>::ParamEnv, dst: <Self::Interner as Interner>::Ty, src: <Self::Interner as Interner>::Ty, assume: <Self::Interner as Interner>::Const, diff --git a/compiler/rustc_next_trait_solver/src/lib.rs b/compiler/rustc_next_trait_solver/src/lib.rs index d67ae2550d9..f6963a79067 100644 --- a/compiler/rustc_next_trait_solver/src/lib.rs +++ b/compiler/rustc_next_trait_solver/src/lib.rs @@ -6,7 +6,6 @@ // tidy-alphabetical-start #![allow(rustc::usage_of_type_ir_inherent)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod canonicalizer; diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs index dc0f4c4483e..93804b14125 100644 --- a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs +++ b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs @@ -18,7 +18,7 @@ use crate::solve::{AdtDestructorKind, EvalCtxt, Goal, NoSolution}; pub(in crate::solve) fn instantiate_constituent_tys_for_auto_trait<D, I>( ecx: &EvalCtxt<'_, D>, ty: I::Ty, -) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution> +) -> Result<ty::Binder<I, Vec<I::Ty>>, NoSolution> where D: SolverDelegate<Interner = I>, I: Interner, @@ -33,10 +33,10 @@ where | ty::FnPtr(..) | ty::Error(_) | ty::Never - | ty::Char => Ok(vec![]), + | ty::Char => Ok(ty::Binder::dummy(vec![])), // Treat `str` like it's defined as `struct str([u8]);` - ty::Str => Ok(vec![ty::Binder::dummy(Ty::new_slice(cx, Ty::new_u8(cx)))]), + ty::Str => Ok(ty::Binder::dummy(vec![Ty::new_slice(cx, Ty::new_u8(cx))])), ty::Dynamic(..) | ty::Param(..) @@ -49,53 +49,49 @@ where } ty::RawPtr(element_ty, _) | ty::Ref(_, element_ty, _) => { - Ok(vec![ty::Binder::dummy(element_ty)]) + Ok(ty::Binder::dummy(vec![element_ty])) } ty::Pat(element_ty, _) | ty::Array(element_ty, _) | ty::Slice(element_ty) => { - Ok(vec![ty::Binder::dummy(element_ty)]) + Ok(ty::Binder::dummy(vec![element_ty])) } ty::Tuple(tys) => { // (T1, ..., Tn) -- meets any bound that all of T1...Tn meet - Ok(tys.iter().map(ty::Binder::dummy).collect()) + Ok(ty::Binder::dummy(tys.to_vec())) } - ty::Closure(_, args) => Ok(vec![ty::Binder::dummy(args.as_closure().tupled_upvars_ty())]), + ty::Closure(_, args) => Ok(ty::Binder::dummy(vec![args.as_closure().tupled_upvars_ty()])), ty::CoroutineClosure(_, args) => { - Ok(vec![ty::Binder::dummy(args.as_coroutine_closure().tupled_upvars_ty())]) + Ok(ty::Binder::dummy(vec![args.as_coroutine_closure().tupled_upvars_ty()])) } ty::Coroutine(_, args) => { let coroutine_args = args.as_coroutine(); - Ok(vec![ - ty::Binder::dummy(coroutine_args.tupled_upvars_ty()), - ty::Binder::dummy(coroutine_args.witness()), - ]) + Ok(ty::Binder::dummy(vec![coroutine_args.tupled_upvars_ty(), coroutine_args.witness()])) } ty::CoroutineWitness(def_id, args) => Ok(ecx .cx() - .bound_coroutine_hidden_types(def_id) - .into_iter() - .map(|bty| bty.instantiate(cx, args)) - .collect()), + .coroutine_hidden_types(def_id) + .instantiate(cx, args) + .map_bound(|tys| tys.to_vec())), - ty::UnsafeBinder(bound_ty) => Ok(vec![bound_ty.into()]), + ty::UnsafeBinder(bound_ty) => Ok(bound_ty.map_bound(|ty| vec![ty])), // For `PhantomData<T>`, we pass `T`. - ty::Adt(def, args) if def.is_phantom_data() => Ok(vec![ty::Binder::dummy(args.type_at(0))]), + ty::Adt(def, args) if def.is_phantom_data() => Ok(ty::Binder::dummy(vec![args.type_at(0)])), ty::Adt(def, args) => { - Ok(def.all_field_tys(cx).iter_instantiated(cx, args).map(ty::Binder::dummy).collect()) + Ok(ty::Binder::dummy(def.all_field_tys(cx).iter_instantiated(cx, args).collect())) } ty::Alias(ty::Opaque, ty::AliasTy { def_id, args, .. }) => { // We can resolve the `impl Trait` to its concrete type, // which enforces a DAG between the functions requiring // the auto trait bounds in question. - Ok(vec![ty::Binder::dummy(cx.type_of(def_id).instantiate(cx, args))]) + Ok(ty::Binder::dummy(vec![cx.type_of(def_id).instantiate(cx, args)])) } } } @@ -104,7 +100,7 @@ where pub(in crate::solve) fn instantiate_constituent_tys_for_sized_trait<D, I>( ecx: &EvalCtxt<'_, D>, ty: I::Ty, -) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution> +) -> Result<ty::Binder<I, Vec<I::Ty>>, NoSolution> where D: SolverDelegate<Interner = I>, I: Interner, @@ -130,7 +126,7 @@ where | ty::CoroutineClosure(..) | ty::Never | ty::Dynamic(_, _, ty::DynStar) - | ty::Error(_) => Ok(vec![]), + | ty::Error(_) => Ok(ty::Binder::dummy(vec![])), ty::Str | ty::Slice(_) @@ -145,11 +141,11 @@ where panic!("unexpected type `{ty:?}`") } - ty::UnsafeBinder(bound_ty) => Ok(vec![bound_ty.into()]), + ty::UnsafeBinder(bound_ty) => Ok(bound_ty.map_bound(|ty| vec![ty])), // impl Sized for () // impl Sized for (T1, T2, .., Tn) where Tn: Sized if n >= 1 - ty::Tuple(tys) => Ok(tys.last().map_or_else(Vec::new, |ty| vec![ty::Binder::dummy(ty)])), + ty::Tuple(tys) => Ok(ty::Binder::dummy(tys.last().map_or_else(Vec::new, |ty| vec![ty]))), // impl Sized for Adt<Args...> where sized_constraint(Adt)<Args...>: Sized // `sized_constraint(Adt)` is the deepest struct trail that can be determined @@ -162,9 +158,9 @@ where // if the ADT is sized for all possible args. ty::Adt(def, args) => { if let Some(sized_crit) = def.sized_constraint(ecx.cx()) { - Ok(vec![ty::Binder::dummy(sized_crit.instantiate(ecx.cx(), args))]) + Ok(ty::Binder::dummy(vec![sized_crit.instantiate(ecx.cx(), args)])) } else { - Ok(vec![]) + Ok(ty::Binder::dummy(vec![])) } } } @@ -174,14 +170,14 @@ where pub(in crate::solve) fn instantiate_constituent_tys_for_copy_clone_trait<D, I>( ecx: &EvalCtxt<'_, D>, ty: I::Ty, -) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution> +) -> Result<ty::Binder<I, Vec<I::Ty>>, NoSolution> where D: SolverDelegate<Interner = I>, I: Interner, { match ty.kind() { // impl Copy/Clone for FnDef, FnPtr - ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Ok(vec![]), + ty::FnDef(..) | ty::FnPtr(..) | ty::Error(_) => Ok(ty::Binder::dummy(vec![])), // Implementations are provided in core ty::Uint(_) @@ -197,7 +193,7 @@ where // Cannot implement in core, as we can't be generic over patterns yet, // so we'd have to list all patterns and type combinations. - ty::Pat(ty, ..) => Ok(vec![ty::Binder::dummy(ty)]), + ty::Pat(ty, ..) => Ok(ty::Binder::dummy(vec![ty])), ty::Dynamic(..) | ty::Str @@ -215,14 +211,14 @@ where } // impl Copy/Clone for (T1, T2, .., Tn) where T1: Copy/Clone, T2: Copy/Clone, .. Tn: Copy/Clone - ty::Tuple(tys) => Ok(tys.iter().map(ty::Binder::dummy).collect()), + ty::Tuple(tys) => Ok(ty::Binder::dummy(tys.to_vec())), // impl Copy/Clone for Closure where Self::TupledUpvars: Copy/Clone - ty::Closure(_, args) => Ok(vec![ty::Binder::dummy(args.as_closure().tupled_upvars_ty())]), + ty::Closure(_, args) => Ok(ty::Binder::dummy(vec![args.as_closure().tupled_upvars_ty()])), // impl Copy/Clone for CoroutineClosure where Self::TupledUpvars: Copy/Clone ty::CoroutineClosure(_, args) => { - Ok(vec![ty::Binder::dummy(args.as_coroutine_closure().tupled_upvars_ty())]) + Ok(ty::Binder::dummy(vec![args.as_coroutine_closure().tupled_upvars_ty()])) } // only when `coroutine_clone` is enabled and the coroutine is movable @@ -232,10 +228,7 @@ where Movability::Movable => { if ecx.cx().features().coroutine_clone() { let coroutine = args.as_coroutine(); - Ok(vec![ - ty::Binder::dummy(coroutine.tupled_upvars_ty()), - ty::Binder::dummy(coroutine.witness()), - ]) + Ok(ty::Binder::dummy(vec![coroutine.tupled_upvars_ty(), coroutine.witness()])) } else { Err(NoSolution) } @@ -247,10 +240,9 @@ where // impl Copy/Clone for CoroutineWitness where T: Copy/Clone forall T in coroutine_hidden_types ty::CoroutineWitness(def_id, args) => Ok(ecx .cx() - .bound_coroutine_hidden_types(def_id) - .into_iter() - .map(|bty| bty.instantiate(ecx.cx(), args)) - .collect()), + .coroutine_hidden_types(def_id) + .instantiate(ecx.cx(), args) + .map_bound(|tys| tys.to_vec())), } } diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs index 2491f09a0e2..ce53a3968c7 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/canonical.rs @@ -21,7 +21,7 @@ use tracing::{debug, instrument, trace}; use crate::canonicalizer::Canonicalizer; use crate::delegate::SolverDelegate; use crate::resolve::EagerResolver; -use crate::solve::eval_ctxt::NestedGoals; +use crate::solve::eval_ctxt::{CurrentGoalKind, NestedGoals}; use crate::solve::{ CanonicalInput, CanonicalResponse, Certainty, EvalCtxt, ExternalConstraintsData, Goal, MaybeCause, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput, @@ -109,18 +109,22 @@ where // // As we return all ambiguous nested goals, we can ignore the certainty returned // by `try_evaluate_added_goals()`. - let (certainty, normalization_nested_goals) = if self.is_normalizes_to_goal { - let NestedGoals { normalizes_to_goals, goals } = std::mem::take(&mut self.nested_goals); - if cfg!(debug_assertions) { - assert!(normalizes_to_goals.is_empty()); - if goals.is_empty() { - assert!(matches!(goals_certainty, Certainty::Yes)); + let (certainty, normalization_nested_goals) = match self.current_goal_kind { + CurrentGoalKind::NormalizesTo => { + let NestedGoals { normalizes_to_goals, goals } = + std::mem::take(&mut self.nested_goals); + if cfg!(debug_assertions) { + assert!(normalizes_to_goals.is_empty()); + if goals.is_empty() { + assert!(matches!(goals_certainty, Certainty::Yes)); + } } + (certainty, NestedNormalizationGoals(goals)) + } + CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => { + let certainty = certainty.unify_with(goals_certainty); + (certainty, NestedNormalizationGoals::empty()) } - (certainty, NestedNormalizationGoals(goals)) - } else { - let certainty = certainty.unify_with(goals_certainty); - (certainty, NestedNormalizationGoals::empty()) }; if let Certainty::Maybe(cause @ MaybeCause::Overflow { .. }) = certainty { @@ -163,19 +167,24 @@ where // ambiguous alias types which get replaced with fresh inference variables // during generalization. This prevents hangs caused by an exponential blowup, // see tests/ui/traits/next-solver/coherence-alias-hang.rs. - // - // We don't do so for `NormalizesTo` goals as we erased the expected term and - // bailing with overflow here would prevent us from detecting a type-mismatch, - // causing a coherence error in diesel, see #131969. We still bail with overflow - // when later returning from the parent AliasRelate goal. - if !self.is_normalizes_to_goal { - let num_non_region_vars = - canonical.variables.iter().filter(|c| !c.is_region() && c.is_existential()).count(); - if num_non_region_vars > self.cx().recursion_limit() { - debug!(?num_non_region_vars, "too many inference variables -> overflow"); - return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow { - suggest_increasing_limit: true, - })); + match self.current_goal_kind { + // We don't do so for `NormalizesTo` goals as we erased the expected term and + // bailing with overflow here would prevent us from detecting a type-mismatch, + // causing a coherence error in diesel, see #131969. We still bail with overflow + // when later returning from the parent AliasRelate goal. + CurrentGoalKind::NormalizesTo => {} + CurrentGoalKind::Misc | CurrentGoalKind::CoinductiveTrait => { + let num_non_region_vars = canonical + .variables + .iter() + .filter(|c| !c.is_region() && c.is_existential()) + .count(); + if num_non_region_vars > self.cx().recursion_limit() { + debug!(?num_non_region_vars, "too many inference variables -> overflow"); + return Ok(self.make_ambiguous_response_no_constraints(MaybeCause::Overflow { + suggest_increasing_limit: true, + })); + } } } diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs index b0a34b9ce75..e48ee71c858 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs @@ -9,6 +9,7 @@ use rustc_type_ir::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable}; use rustc_type_ir::inherent::*; use rustc_type_ir::relate::Relate; use rustc_type_ir::relate::solver_relating::RelateExt; +use rustc_type_ir::search_graph::PathKind; use rustc_type_ir::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitableExt, TypeVisitor}; use rustc_type_ir::{self as ty, CanonicalVarValues, InferCtxtLike, Interner, TypingMode}; use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic}; @@ -20,12 +21,51 @@ use crate::solve::inspect::{self, ProofTreeBuilder}; use crate::solve::search_graph::SearchGraph; use crate::solve::{ CanonicalInput, Certainty, FIXPOINT_STEP_LIMIT, Goal, GoalEvaluationKind, GoalSource, - HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryResult, + HasChanged, NestedNormalizationGoals, NoSolution, PredefinedOpaquesData, QueryInput, + QueryResult, }; pub(super) mod canonical; mod probe; +/// The kind of goal we're currently proving. +/// +/// This has effects on cycle handling handling and on how we compute +/// query responses, see the variant descriptions for more info. +#[derive(Debug, Copy, Clone)] +enum CurrentGoalKind { + Misc, + /// We're proving an trait goal for a coinductive trait, either an auto trait or `Sized`. + /// + /// These are currently the only goals whose impl where-clauses are considered to be + /// productive steps. + CoinductiveTrait, + /// Unlike other goals, `NormalizesTo` goals act like functions with the expected term + /// always being fully unconstrained. This would weaken inference however, as the nested + /// goals never get the inference constraints from the actual normalized-to type. + /// + /// Because of this we return any ambiguous nested goals from `NormalizesTo` to the + /// caller when then adds these to its own context. The caller is always an `AliasRelate` + /// goal so this never leaks out of the solver. + NormalizesTo, +} + +impl CurrentGoalKind { + fn from_query_input<I: Interner>(cx: I, input: QueryInput<I, I::Predicate>) -> CurrentGoalKind { + match input.goal.predicate.kind().skip_binder() { + ty::PredicateKind::Clause(ty::ClauseKind::Trait(pred)) => { + if cx.trait_is_coinductive(pred.trait_ref.def_id) { + CurrentGoalKind::CoinductiveTrait + } else { + CurrentGoalKind::Misc + } + } + ty::PredicateKind::NormalizesTo(_) => CurrentGoalKind::NormalizesTo, + _ => CurrentGoalKind::Misc, + } + } +} + pub struct EvalCtxt<'a, D, I = <D as SolverDelegate>::Interner> where D: SolverDelegate<Interner = I>, @@ -51,14 +91,10 @@ where /// The variable info for the `var_values`, only used to make an ambiguous response /// with no constraints. variables: I::CanonicalVars, - /// Whether we're currently computing a `NormalizesTo` goal. Unlike other goals, - /// `NormalizesTo` goals act like functions with the expected term always being - /// fully unconstrained. This would weaken inference however, as the nested goals - /// never get the inference constraints from the actual normalized-to type. Because - /// of this we return any ambiguous nested goals from `NormalizesTo` to the caller - /// when then adds these to its own context. The caller is always an `AliasRelate` - /// goal so this never leaks out of the solver. - is_normalizes_to_goal: bool, + + /// What kind of goal we're currently computing, see the enum definition + /// for more info. + current_goal_kind: CurrentGoalKind, pub(super) var_values: CanonicalVarValues<I>, predefined_opaques_in_body: I::PredefinedOpaques, @@ -226,8 +262,49 @@ where self.delegate.typing_mode() } - pub(super) fn set_is_normalizes_to_goal(&mut self) { - self.is_normalizes_to_goal = true; + /// Computes the `PathKind` for the step from the current goal to the + /// nested goal required due to `source`. + /// + /// See #136824 for a more detailed reasoning for this behavior. We + /// consider cycles to be coinductive if they 'step into' a where-clause + /// of a coinductive trait. We will likely extend this function in the future + /// and will need to clearly document it in the rustc-dev-guide before + /// stabilization. + pub(super) fn step_kind_for_source(&self, source: GoalSource) -> PathKind { + match source { + // We treat these goals as unknown for now. It is likely that most miscellaneous + // nested goals will be converted to an inductive variant in the future. + // + // Having unknown cycles is always the safer option, as changing that to either + // succeed or hard error is backwards compatible. If we incorrectly treat a cycle + // as inductive even though it should not be, it may be unsound during coherence and + // fixing it may cause inference breakage or introduce ambiguity. + GoalSource::Misc => PathKind::Unknown, + GoalSource::NormalizeGoal(path_kind) => path_kind, + GoalSource::ImplWhereBound => { + // We currently only consider a cycle coinductive if it steps + // into a where-clause of a coinductive trait. + // + // We probably want to make all traits coinductive in the future, + // so we treat cycles involving their where-clauses as ambiguous. + if let CurrentGoalKind::CoinductiveTrait = self.current_goal_kind { + PathKind::Coinductive + } else { + PathKind::Unknown + } + } + // Relating types is always unproductive. If we were to map proof trees to + // corecursive functions as explained in #136824, relating types never + // introduces a constructor which could cause the recursion to be guarded. + GoalSource::TypeRelating => PathKind::Inductive, + // Instantiating a higher ranked goal can never cause the recursion to be + // guarded and is therefore unproductive. + GoalSource::InstantiateHigherRanked => PathKind::Inductive, + // These goal sources are likely unproductive and can be changed to + // `PathKind::Inductive`. Keeping them as unknown until we're confident + // about this and have an example where it is necessary. + GoalSource::AliasBoundConstCondition | GoalSource::AliasWellFormed => PathKind::Unknown, + } } /// Creates a root evaluation context and search graph. This should only be @@ -256,7 +333,7 @@ where max_input_universe: ty::UniverseIndex::ROOT, variables: Default::default(), var_values: CanonicalVarValues::dummy(), - is_normalizes_to_goal: false, + current_goal_kind: CurrentGoalKind::Misc, origin_span, tainted: Ok(()), }; @@ -294,7 +371,7 @@ where delegate, variables: canonical_input.canonical.variables, var_values, - is_normalizes_to_goal: false, + current_goal_kind: CurrentGoalKind::from_query_input(cx, input), predefined_opaques_in_body: input.predefined_opaques_in_body, max_input_universe: canonical_input.canonical.max_universe, search_graph, @@ -340,6 +417,7 @@ where cx: I, search_graph: &'a mut SearchGraph<D>, canonical_input: CanonicalInput<I>, + step_kind_from_parent: PathKind, goal_evaluation: &mut ProofTreeBuilder<D>, ) -> QueryResult<I> { let mut canonical_goal_evaluation = @@ -352,6 +430,7 @@ where search_graph.with_new_goal( cx, canonical_input, + step_kind_from_parent, &mut canonical_goal_evaluation, |search_graph, canonical_goal_evaluation| { EvalCtxt::enter_canonical( @@ -395,12 +474,10 @@ where /// `NormalizesTo` is only used by `AliasRelate`, all other callsites /// should use [`EvalCtxt::evaluate_goal`] which discards that empty /// storage. - // FIXME(-Znext-solver=coinduction): `_source` is currently unused but will - // be necessary once we implement the new coinduction approach. pub(super) fn evaluate_goal_raw( &mut self, goal_evaluation_kind: GoalEvaluationKind, - _source: GoalSource, + source: GoalSource, goal: Goal<I, I::Predicate>, ) -> Result<(NestedNormalizationGoals<I>, HasChanged, Certainty), NoSolution> { let (orig_values, canonical_goal) = self.canonicalize_goal(goal); @@ -410,6 +487,7 @@ where self.cx(), self.search_graph, canonical_goal, + self.step_kind_for_source(source), &mut goal_evaluation, ); let response = match canonical_response { @@ -555,7 +633,7 @@ where let (NestedNormalizationGoals(nested_goals), _, certainty) = self.evaluate_goal_raw( GoalEvaluationKind::Nested, - GoalSource::Misc, + GoalSource::TypeRelating, unconstrained_goal, )?; // Add the nested goals from normalization to our own nested goals. @@ -630,8 +708,11 @@ where #[instrument(level = "trace", skip(self))] pub(super) fn add_normalizes_to_goal(&mut self, mut goal: Goal<I, ty::NormalizesTo<I>>) { - goal.predicate = - goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env)); + goal.predicate = goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new( + self, + GoalSource::TypeRelating, + goal.param_env, + )); self.inspect.add_normalizes_to_goal(self.delegate, self.max_input_universe, goal); self.nested_goals.normalizes_to_goals.push(goal); } @@ -639,7 +720,7 @@ where #[instrument(level = "debug", skip(self))] pub(super) fn add_goal(&mut self, source: GoalSource, mut goal: Goal<I, I::Predicate>) { goal.predicate = - goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, goal.param_env)); + goal.predicate.fold_with(&mut ReplaceAliasWithInfer::new(self, source, goal.param_env)); self.inspect.add_goal(self.delegate, self.max_input_universe, source, goal); self.nested_goals.goals.push((source, goal)); } @@ -885,7 +966,15 @@ where rhs: T, ) -> Result<(), NoSolution> { let goals = self.delegate.relate(param_env, lhs, variance, rhs, self.origin_span)?; - self.add_goals(GoalSource::Misc, goals); + if cfg!(debug_assertions) { + for g in goals.iter() { + match g.predicate.kind().skip_binder() { + ty::PredicateKind::Subtype { .. } | ty::PredicateKind::AliasRelate(..) => {} + p => unreachable!("unexpected nested goal in `relate`: {p:?}"), + } + } + } + self.add_goals(GoalSource::TypeRelating, goals); Ok(()) } @@ -913,7 +1002,7 @@ where /// `enter_forall`, but takes `&mut self` and passes it back through the /// callback since it can't be aliased during the call. - pub(super) fn enter_forall<T: TypeFoldable<I> + Copy, U>( + pub(super) fn enter_forall<T: TypeFoldable<I>, U>( &mut self, value: ty::Binder<I, T>, f: impl FnOnce(&mut Self, T) -> U, @@ -1037,12 +1126,11 @@ where pub(super) fn is_transmutable( &mut self, - param_env: I::ParamEnv, dst: I::Ty, src: I::Ty, assume: I::Const, ) -> Result<Certainty, NoSolution> { - self.delegate.is_transmutable(param_env, dst, src, assume) + self.delegate.is_transmutable(dst, src, assume) } } @@ -1053,6 +1141,13 @@ where /// /// This is a performance optimization to more eagerly detect cycles during trait /// solving. See tests/ui/traits/next-solver/cycles/cycle-modulo-ambig-aliases.rs. +/// +/// The emitted goals get evaluated in the context of the parent goal; by +/// replacing aliases in nested goals we essentially pull the normalization out of +/// the nested goal. We want to treat the goal as if the normalization still happens +/// inside of the nested goal by inheriting the `step_kind` of the nested goal and +/// storing it in the `GoalSource` of the emitted `AliasRelate` goals. +/// This is necessary for tests/ui/sized/coinductive-1.rs to compile. struct ReplaceAliasWithInfer<'me, 'a, D, I> where D: SolverDelegate<Interner = I>, @@ -1060,6 +1155,7 @@ where { ecx: &'me mut EvalCtxt<'a, D>, param_env: I::ParamEnv, + normalization_goal_source: GoalSource, cache: HashMap<I::Ty, I::Ty>, } @@ -1068,8 +1164,18 @@ where D: SolverDelegate<Interner = I>, I: Interner, { - fn new(ecx: &'me mut EvalCtxt<'a, D>, param_env: I::ParamEnv) -> Self { - ReplaceAliasWithInfer { ecx, param_env, cache: Default::default() } + fn new( + ecx: &'me mut EvalCtxt<'a, D>, + for_goal_source: GoalSource, + param_env: I::ParamEnv, + ) -> Self { + let step_kind = ecx.step_kind_for_source(for_goal_source); + ReplaceAliasWithInfer { + ecx, + param_env, + normalization_goal_source: GoalSource::NormalizeGoal(step_kind), + cache: Default::default(), + } } } @@ -1092,7 +1198,7 @@ where ty::AliasRelationDirection::Equate, ); self.ecx.add_goal( - GoalSource::Misc, + self.normalization_goal_source, Goal::new(self.cx(), self.param_env, normalizes_to), ); infer_ty @@ -1121,7 +1227,7 @@ where ty::AliasRelationDirection::Equate, ); self.ecx.add_goal( - GoalSource::Misc, + self.normalization_goal_source, Goal::new(self.cx(), self.param_env, normalizes_to), ); infer_ct diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs index add96a1fdf7..0a9e7fafaea 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/probe.rs @@ -34,7 +34,7 @@ where delegate, variables: outer_ecx.variables, var_values: outer_ecx.var_values, - is_normalizes_to_goal: outer_ecx.is_normalizes_to_goal, + current_goal_kind: outer_ecx.current_goal_kind, predefined_opaques_in_body: outer_ecx.predefined_opaques_in_body, max_input_universe, search_graph: outer_ecx.search_graph, diff --git a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs index 1607fbb1b6a..6a8e0790f7c 100644 --- a/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs +++ b/compiler/rustc_next_trait_solver/src/solve/inspect/build.rs @@ -421,7 +421,7 @@ impl<D: SolverDelegate<Interner = I>, I: Interner> ProofTreeBuilder<D> { self.add_goal( delegate, max_input_universe, - GoalSource::Misc, + GoalSource::TypeRelating, goal.with(delegate.cx(), goal.predicate), ); } diff --git a/compiler/rustc_next_trait_solver/src/solve/mod.rs b/compiler/rustc_next_trait_solver/src/solve/mod.rs index 1fa35b60304..199f0c7512e 100644 --- a/compiler/rustc_next_trait_solver/src/solve/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/mod.rs @@ -313,7 +313,9 @@ where ty::AliasRelationDirection::Equate, ), ); - self.add_goal(GoalSource::Misc, alias_relate_goal); + // We normalize the self type to be able to relate it with + // types from candidates. + self.add_goal(GoalSource::TypeRelating, alias_relate_goal); self.try_evaluate_added_goals()?; Ok(self.resolve_vars_if_possible(normalized_term)) } else { diff --git a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/inherent.rs b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/inherent.rs index 25e8708a332..1d1ff09ee41 100644 --- a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/inherent.rs +++ b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/inherent.rs @@ -39,7 +39,7 @@ where // // FIXME(-Znext-solver=coinductive): I think this should be split // and we tag the impl bounds with `GoalSource::ImplWhereBound`? - // Right not this includes both the impl and the assoc item where bounds, + // Right now this includes both the impl and the assoc item where bounds, // and I don't think the assoc item where-bounds are allowed to be coinductive. self.add_goals( GoalSource::Misc, diff --git a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs index 88002e1a88a..de6d21da0f5 100644 --- a/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/normalizes_to/mod.rs @@ -28,7 +28,6 @@ where &mut self, goal: Goal<I, NormalizesTo<I>>, ) -> QueryResult<I> { - self.set_is_normalizes_to_goal(); debug_assert!(self.term_is_fully_unconstrained(goal)); let cx = self.cx(); match goal.predicate.alias.kind(cx) { diff --git a/compiler/rustc_next_trait_solver/src/solve/project_goals.rs b/compiler/rustc_next_trait_solver/src/solve/project_goals.rs index d9452880071..944d5f0e042 100644 --- a/compiler/rustc_next_trait_solver/src/solve/project_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/project_goals.rs @@ -24,7 +24,8 @@ where ty::AliasRelationDirection::Equate, ), ); - self.add_goal(GoalSource::Misc, goal); + // A projection goal holds if the alias is equal to the expected term. + self.add_goal(GoalSource::TypeRelating, goal); self.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) } } diff --git a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs index 843200ca184..eba496fa226 100644 --- a/compiler/rustc_next_trait_solver/src/solve/search_graph.rs +++ b/compiler/rustc_next_trait_solver/src/solve/search_graph.rs @@ -1,10 +1,9 @@ use std::convert::Infallible; use std::marker::PhantomData; -use rustc_type_ir::Interner; -use rustc_type_ir::inherent::*; use rustc_type_ir::search_graph::{self, PathKind}; -use rustc_type_ir::solve::{CanonicalInput, Certainty, QueryResult}; +use rustc_type_ir::solve::{CanonicalInput, Certainty, NoSolution, QueryResult}; +use rustc_type_ir::{Interner, TypingMode}; use super::inspect::ProofTreeBuilder; use super::{FIXPOINT_STEP_LIMIT, has_no_inference_or_external_constraints}; @@ -48,7 +47,24 @@ where ) -> QueryResult<I> { match kind { PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes), - PathKind::Inductive => response_no_constraints(cx, input, Certainty::overflow(false)), + PathKind::Unknown => response_no_constraints(cx, input, Certainty::overflow(false)), + // Even though we know these cycles to be unproductive, we still return + // overflow during coherence. This is both as we are not 100% confident in + // the implementation yet and any incorrect errors would be unsound there. + // The affected cases are also fairly artificial and not necessarily desirable + // so keeping this as ambiguity is fine for now. + // + // See `tests/ui/traits/next-solver/cycles/unproductive-in-coherence.rs` for an + // example where this would matter. We likely should change these cycles to `NoSolution` + // even in coherence once this is a bit more settled. + PathKind::Inductive => match input.typing_mode { + TypingMode::Coherence => { + response_no_constraints(cx, input, Certainty::overflow(false)) + } + TypingMode::Analysis { .. } + | TypingMode::PostBorrowckAnalysis { .. } + | TypingMode::PostAnalysis => Err(NoSolution), + }, } } @@ -58,12 +74,7 @@ where input: CanonicalInput<I>, result: QueryResult<I>, ) -> bool { - match kind { - PathKind::Coinductive => response_no_constraints(cx, input, Certainty::Yes) == result, - PathKind::Inductive => { - response_no_constraints(cx, input, Certainty::overflow(false)) == result - } - } + Self::initial_provisional_result(cx, kind, input) == result } fn on_stack_overflow( @@ -94,10 +105,6 @@ where let certainty = from_result.unwrap().value.certainty; response_no_constraints(cx, for_input, certainty) } - - fn step_is_coinductive(cx: I, input: CanonicalInput<I>) -> bool { - input.canonical.value.goal.predicate.is_coinductive(cx) - } } fn response_no_constraints<I: Interner>( diff --git a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs index 1665dbb3018..12a55664641 100644 --- a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs @@ -617,7 +617,6 @@ where )?; let certainty = ecx.is_transmutable( - goal.param_env, goal.predicate.trait_ref.args.type_at(0), goal.predicate.trait_ref.args.type_at(1), assume, @@ -787,13 +786,6 @@ where ) } - // `(A, B, T)` -> `(A, B, U)` where `T: Unsize<U>` - (ty::Tuple(a_tys), ty::Tuple(b_tys)) - if a_tys.len() == b_tys.len() && !a_tys.is_empty() => - { - result_to_single(ecx.consider_builtin_tuple_unsize(goal, a_tys, b_tys)) - } - _ => vec![], } }) @@ -1085,48 +1077,6 @@ where .enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)) } - /// We generate the following builtin impl for tuples of all sizes. - /// - /// This impl is still unstable and we emit a feature error when it - /// when it is used by a coercion. - /// ```ignore (builtin impl example) - /// impl<T: ?Sized, U: ?Sized, V: ?Sized> Unsize<(T, V)> for (T, U) - /// where - /// U: Unsize<V>, - /// {} - /// ``` - fn consider_builtin_tuple_unsize( - &mut self, - goal: Goal<I, (I::Ty, I::Ty)>, - a_tys: I::Tys, - b_tys: I::Tys, - ) -> Result<Candidate<I>, NoSolution> { - let cx = self.cx(); - let Goal { predicate: (_a_ty, b_ty), .. } = goal; - - let (&a_last_ty, a_rest_tys) = a_tys.split_last().unwrap(); - let b_last_ty = b_tys.last().unwrap(); - - // Instantiate just the tail field of B., and require that they're equal. - let unsized_a_ty = Ty::new_tup_from_iter(cx, a_rest_tys.iter().copied().chain([b_last_ty])); - self.eq(goal.param_env, unsized_a_ty, b_ty)?; - - // Similar to ADTs, require that we can unsize the tail. - self.add_goal( - GoalSource::ImplWhereBound, - goal.with( - cx, - ty::TraitRef::new( - cx, - cx.require_lang_item(TraitSolverLangItem::Unsize), - [a_last_ty, b_last_ty], - ), - ), - ); - self.probe_builtin_trait_candidate(BuiltinImplSource::TupleUnsizing) - .enter(|ecx| ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes)) - } - // Return `Some` if there is an impl (built-in or user provided) that may // hold for the self type of the goal, which for coherence and soundness // purposes must disqualify the built-in auto impl assembled by considering @@ -1239,17 +1189,15 @@ where constituent_tys: impl Fn( &EvalCtxt<'_, D>, I::Ty, - ) -> Result<Vec<ty::Binder<I, I::Ty>>, NoSolution>, + ) -> Result<ty::Binder<I, Vec<I::Ty>>, NoSolution>, ) -> Result<Candidate<I>, NoSolution> { self.probe_trait_candidate(source).enter(|ecx| { - let goals = constituent_tys(ecx, goal.predicate.self_ty())? - .into_iter() - .map(|ty| { - ecx.enter_forall(ty, |ecx, ty| { - goal.with(ecx.cx(), goal.predicate.with_self_ty(ecx.cx(), ty)) - }) - }) - .collect::<Vec<_>>(); + let goals = + ecx.enter_forall(constituent_tys(ecx, goal.predicate.self_ty())?, |ecx, tys| { + tys.into_iter() + .map(|ty| goal.with(ecx.cx(), goal.predicate.with_self_ty(ecx.cx(), ty))) + .collect::<Vec<_>>() + }); ecx.add_goals(GoalSource::ImplWhereBound, goals); ecx.evaluate_added_goals_and_make_canonical_response(Certainty::Yes) }) diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl index 563081c7240..6d4308cda1a 100644 --- a/compiler/rustc_parse/messages.ftl +++ b/compiler/rustc_parse/messages.ftl @@ -26,6 +26,11 @@ parse_async_move_block_in_2015 = `async move` blocks are only allowed in Rust 20 parse_async_move_order_incorrect = the order of `move` and `async` is incorrect .suggestion = try switching the order +parse_async_use_block_in_2015 = `async use` blocks are only allowed in Rust 2018 or later + +parse_async_use_order_incorrect = the order of `use` and `async` is incorrect + .suggestion = try switching the order + parse_at_dot_dot_in_struct_pattern = `@ ..` is not supported in struct patterns .suggestion = bind to each field separately or, if you don't need them, just remove `{$ident} @` @@ -348,6 +353,9 @@ parse_incorrect_use_of_await = incorrect use of `await` parse_incorrect_use_of_await_postfix_suggestion = `await` is a postfix operation +parse_incorrect_use_of_use = incorrect use of `use` + .parentheses_suggestion = `use` is not a method call, try removing the parentheses + parse_incorrect_visibility_restriction = incorrect visibility restriction .help = some possible visibility restrictions are: `pub(crate)`: visible only on the current crate @@ -424,7 +432,7 @@ parse_invalid_logical_operator = `{$incorrect}` is not a logical operator .use_amp_amp_for_conjunction = use `&&` to perform logical conjunction .use_pipe_pipe_for_disjunction = use `||` to perform logical disjunction -parse_invalid_meta_item = expected unsuffixed literal, found `{$token}` +parse_invalid_meta_item = expected unsuffixed literal, found {$descr} .quote_ident_sugg = surround the identifier with quotation marks to make it into a string literal parse_invalid_offset_of = offset_of expects dot-separated field and variant names diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index dc03d6f9521..e090d9cf760 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -106,6 +106,19 @@ pub(crate) struct IncorrectUseOfAwait { pub span: Span, } +#[derive(Diagnostic)] +#[diag(parse_incorrect_use_of_use)] +pub(crate) struct IncorrectUseOfUse { + #[primary_span] + #[suggestion( + parse_parentheses_suggestion, + style = "verbose", + code = "", + applicability = "machine-applicable" + )] + pub span: Span, +} + #[derive(Subdiagnostic)] #[multipart_suggestion( parse_incorrect_use_of_await_postfix_suggestion, @@ -772,7 +785,6 @@ pub(crate) struct LabeledLoopInBreak { } #[derive(Subdiagnostic)] - pub(crate) enum WrapInParentheses { #[multipart_suggestion( parse_sugg_wrap_expression_in_parentheses, @@ -1025,7 +1037,7 @@ pub(crate) struct SuffixedLiteralInAttribute { pub(crate) struct InvalidMetaItem { #[primary_span] pub span: Span, - pub token: Token, + pub descr: String, #[subdiagnostic] pub quote_ident_sugg: Option<InvalidMetaItemQuoteIdentSugg>, } @@ -1501,6 +1513,14 @@ pub(crate) struct AsyncMoveOrderIncorrect { } #[derive(Diagnostic)] +#[diag(parse_async_use_order_incorrect)] +pub(crate) struct AsyncUseOrderIncorrect { + #[primary_span] + #[suggestion(style = "verbose", code = "async use", applicability = "maybe-incorrect")] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(parse_double_colon_in_bound)] pub(crate) struct DoubleColonInBound { #[primary_span] @@ -1669,6 +1689,13 @@ pub(crate) struct AsyncMoveBlockIn2015 { } #[derive(Diagnostic)] +#[diag(parse_async_use_block_in_2015)] +pub(crate) struct AsyncUseBlockIn2015 { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(parse_async_bound_modifier_in_2015)] pub(crate) struct AsyncBoundModifierIn2015 { #[primary_span] @@ -2923,8 +2950,9 @@ pub(crate) struct AddBoxNew { #[diag(parse_bad_return_type_notation_output)] pub(crate) struct BadReturnTypeNotationOutput { #[primary_span] - #[suggestion(code = "", applicability = "maybe-incorrect", style = "verbose")] pub span: Span, + #[suggestion(code = "", applicability = "maybe-incorrect", style = "verbose")] + pub suggestion: Span, } #[derive(Diagnostic)] diff --git a/compiler/rustc_parse/src/lexer/mod.rs b/compiler/rustc_parse/src/lexer/mod.rs index 792e2cc26ef..1d17290e1c7 100644 --- a/compiler/rustc_parse/src/lexer/mod.rs +++ b/compiler/rustc_parse/src/lexer/mod.rs @@ -384,17 +384,17 @@ impl<'psess, 'src> Lexer<'psess, 'src> { rustc_lexer::TokenKind::Colon => token::Colon, rustc_lexer::TokenKind::Dollar => token::Dollar, rustc_lexer::TokenKind::Eq => token::Eq, - rustc_lexer::TokenKind::Bang => token::Not, + rustc_lexer::TokenKind::Bang => token::Bang, rustc_lexer::TokenKind::Lt => token::Lt, rustc_lexer::TokenKind::Gt => token::Gt, - rustc_lexer::TokenKind::Minus => token::BinOp(token::Minus), - rustc_lexer::TokenKind::And => token::BinOp(token::And), - rustc_lexer::TokenKind::Or => token::BinOp(token::Or), - rustc_lexer::TokenKind::Plus => token::BinOp(token::Plus), - rustc_lexer::TokenKind::Star => token::BinOp(token::Star), - rustc_lexer::TokenKind::Slash => token::BinOp(token::Slash), - rustc_lexer::TokenKind::Caret => token::BinOp(token::Caret), - rustc_lexer::TokenKind::Percent => token::BinOp(token::Percent), + rustc_lexer::TokenKind::Minus => token::Minus, + rustc_lexer::TokenKind::And => token::And, + rustc_lexer::TokenKind::Or => token::Or, + rustc_lexer::TokenKind::Plus => token::Plus, + rustc_lexer::TokenKind::Star => token::Star, + rustc_lexer::TokenKind::Slash => token::Slash, + rustc_lexer::TokenKind::Caret => token::Caret, + rustc_lexer::TokenKind::Percent => token::Percent, rustc_lexer::TokenKind::Unknown | rustc_lexer::TokenKind::InvalidIdent => { // Don't emit diagnostics for sequences of the same invalid token diff --git a/compiler/rustc_parse/src/lexer/unicode_chars.rs b/compiler/rustc_parse/src/lexer/unicode_chars.rs index 648a352efd9..ff03b42484b 100644 --- a/compiler/rustc_parse/src/lexer/unicode_chars.rs +++ b/compiler/rustc_parse/src/lexer/unicode_chars.rs @@ -308,11 +308,11 @@ pub(super) static UNICODE_ARRAY: &[(char, &str, &str)] = &[ const ASCII_ARRAY: &[(&str, &str, Option<token::TokenKind>)] = &[ (" ", "Space", None), ("_", "Underscore", Some(token::Ident(kw::Underscore, token::IdentIsRaw::No))), - ("-", "Minus/Hyphen", Some(token::BinOp(token::Minus))), + ("-", "Minus/Hyphen", Some(token::Minus)), (",", "Comma", Some(token::Comma)), (";", "Semicolon", Some(token::Semi)), (":", "Colon", Some(token::Colon)), - ("!", "Exclamation Mark", Some(token::Not)), + ("!", "Exclamation Mark", Some(token::Bang)), ("?", "Question Mark", Some(token::Question)), (".", "Period", Some(token::Dot)), ("(", "Left Parenthesis", Some(token::OpenDelim(Delimiter::Parenthesis))), @@ -321,11 +321,11 @@ const ASCII_ARRAY: &[(&str, &str, Option<token::TokenKind>)] = &[ ("]", "Right Square Bracket", Some(token::CloseDelim(Delimiter::Bracket))), ("{", "Left Curly Brace", Some(token::OpenDelim(Delimiter::Brace))), ("}", "Right Curly Brace", Some(token::CloseDelim(Delimiter::Brace))), - ("*", "Asterisk", Some(token::BinOp(token::Star))), - ("/", "Slash", Some(token::BinOp(token::Slash))), + ("*", "Asterisk", Some(token::Star)), + ("/", "Slash", Some(token::Slash)), ("\\", "Backslash", None), - ("&", "Ampersand", Some(token::BinOp(token::And))), - ("+", "Plus Sign", Some(token::BinOp(token::Plus))), + ("&", "Ampersand", Some(token::And)), + ("+", "Plus Sign", Some(token::Plus)), ("<", "Less-Than Sign", Some(token::Lt)), ("=", "Equals Sign", Some(token::Eq)), ("==", "Double Equals Sign", Some(token::EqEq)), diff --git a/compiler/rustc_parse/src/lib.rs b/compiler/rustc_parse/src/lib.rs index 1a104ff5e33..79939aab7fc 100644 --- a/compiler/rustc_parse/src/lib.rs +++ b/compiler/rustc_parse/src/lib.rs @@ -4,6 +4,7 @@ #![allow(internal_features)] #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(array_windows)] #![feature(assert_matches)] #![feature(box_patterns)] @@ -12,7 +13,6 @@ #![feature(iter_intersperse)] #![feature(let_chains)] #![feature(string_from_utf8_lossy_owned)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::path::{Path, PathBuf}; diff --git a/compiler/rustc_parse/src/parser/attr.rs b/compiler/rustc_parse/src/parser/attr.rs index 2691e6f56d6..066b570c23f 100644 --- a/compiler/rustc_parse/src/parser/attr.rs +++ b/compiler/rustc_parse/src/parser/attr.rs @@ -1,4 +1,6 @@ -use rustc_ast::{self as ast, Attribute, attr, token}; +use rustc_ast as ast; +use rustc_ast::token::{self, MetaVarKind}; +use rustc_ast::{Attribute, attr}; use rustc_errors::codes::*; use rustc_errors::{Diag, PResult}; use rustc_span::{BytePos, Span}; @@ -9,7 +11,7 @@ use super::{ AttrWrapper, Capturing, FnParseMode, ForceCollect, Parser, ParserRange, PathStyle, Trailing, UsePreAttrPos, }; -use crate::{errors, exp, fluent_generated as fluent, maybe_whole}; +use crate::{errors, exp, fluent_generated as fluent}; // Public for rustfmt usage #[derive(Debug)] @@ -128,7 +130,7 @@ impl<'a> Parser<'a> { assert!(this.eat(exp!(Pound)), "parse_attribute called in non-attribute position"); let style = - if this.eat(exp!(Not)) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer }; + if this.eat(exp!(Bang)) { ast::AttrStyle::Inner } else { ast::AttrStyle::Outer }; this.expect(exp!(OpenBracket))?; let item = this.parse_attr_item(ForceCollect::No)?; @@ -269,7 +271,12 @@ impl<'a> Parser<'a> { /// PATH `=` UNSUFFIXED_LIT /// The delimiters or `=` are still put into the resulting token stream. pub fn parse_attr_item(&mut self, force_collect: ForceCollect) -> PResult<'a, ast::AttrItem> { - maybe_whole!(self, NtMeta, |attr| attr.into_inner()); + if let Some(item) = self.eat_metavar_seq_with_matcher( + |mv_kind| matches!(mv_kind, MetaVarKind::Meta { .. }), + |this| this.parse_attr_item(force_collect), + ) { + return Ok(item); + } // Attr items don't have attributes. self.collect_tokens(None, AttrWrapper::empty(), force_collect, |this, _empty_attrs| { @@ -305,7 +312,7 @@ impl<'a> Parser<'a> { loop { let start_pos = self.num_bump_calls; // Only try to parse if it is an inner attribute (has `!`). - let attr = if self.check(exp!(Pound)) && self.look_ahead(1, |t| t == &token::Not) { + let attr = if self.check(exp!(Pound)) && self.look_ahead(1, |t| t == &token::Bang) { Some(self.parse_attribute(InnerAttrPolicy::Permitted)?) } else if let token::DocComment(comment_kind, attr_style, data) = self.token.kind { if attr_style == ast::AttrStyle::Inner { @@ -396,18 +403,17 @@ impl<'a> Parser<'a> { &mut self, unsafe_allowed: AllowLeadingUnsafe, ) -> PResult<'a, ast::MetaItem> { - // We can't use `maybe_whole` here because it would bump in the `None` - // case, which we don't want. - if let token::Interpolated(nt) = &self.token.kind - && let token::NtMeta(attr_item) = &**nt - { - match attr_item.meta(attr_item.path.span) { - Some(meta) => { - self.bump(); - return Ok(meta); - } - None => self.unexpected()?, - } + if let Some(MetaVarKind::Meta { has_meta_form }) = self.token.is_metavar_seq() { + return if has_meta_form { + let attr_item = self + .eat_metavar_seq(MetaVarKind::Meta { has_meta_form: true }, |this| { + this.parse_attr_item(ForceCollect::No) + }) + .unwrap(); + Ok(attr_item.meta(attr_item.path.span).unwrap()) + } else { + self.unexpected_any() + }; } let lo = self.token.span; @@ -464,7 +470,7 @@ impl<'a> Parser<'a> { let mut err = errors::InvalidMetaItem { span: self.token.span, - token: self.token.clone(), + descr: super::token_descr(&self.token), quote_ident_sugg: None, }; diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index 67abc2d5394..716ababb008 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -1,17 +1,15 @@ use std::mem::take; use std::ops::{Deref, DerefMut}; -use std::sync::Arc; use ast::token::IdentIsRaw; use rustc_ast as ast; use rustc_ast::ptr::P; use rustc_ast::token::{self, Delimiter, Lit, LitKind, Token, TokenKind}; -use rustc_ast::tokenstream::AttrTokenTree; use rustc_ast::util::parser::AssocOp; use rustc_ast::{ AngleBracketedArg, AngleBracketedArgs, AnonConst, AttrVec, BinOpKind, BindingMode, Block, - BlockCheckMode, Expr, ExprKind, GenericArg, Generics, HasTokens, Item, ItemKind, Param, Pat, - PatKind, Path, PathSegment, QSelf, Recovered, Ty, TyKind, + BlockCheckMode, Expr, ExprKind, GenericArg, Generics, Item, ItemKind, Param, Pat, PatKind, + Path, PathSegment, QSelf, Recovered, Ty, TyKind, }; use rustc_ast_pretty::pprust; use rustc_data_structures::fx::FxHashSet; @@ -33,15 +31,15 @@ use super::{ SeqSep, TokenType, }; use crate::errors::{ - AddParen, AmbiguousPlus, AsyncMoveBlockIn2015, AttributeOnParamType, AwaitSuggestion, - BadQPathStage2, BadTypePlus, BadTypePlusSub, ColonAsSemi, ComparisonOperatorsCannotBeChained, - ComparisonOperatorsCannotBeChainedSugg, ConstGenericWithoutBraces, - ConstGenericWithoutBracesSugg, DocCommentDoesNotDocumentAnything, DocCommentOnParamType, - DoubleColonInBound, ExpectedIdentifier, ExpectedSemi, ExpectedSemiSugg, + AddParen, AmbiguousPlus, AsyncMoveBlockIn2015, AsyncUseBlockIn2015, AttributeOnParamType, + AwaitSuggestion, BadQPathStage2, BadTypePlus, BadTypePlusSub, ColonAsSemi, + ComparisonOperatorsCannotBeChained, ComparisonOperatorsCannotBeChainedSugg, + ConstGenericWithoutBraces, ConstGenericWithoutBracesSugg, DocCommentDoesNotDocumentAnything, + DocCommentOnParamType, DoubleColonInBound, ExpectedIdentifier, ExpectedSemi, ExpectedSemiSugg, GenericParamsWithoutAngleBrackets, GenericParamsWithoutAngleBracketsSugg, HelpIdentifierStartsWithNumber, HelpUseLatestEdition, InInTypo, IncorrectAwait, - IncorrectSemicolon, IncorrectUseOfAwait, PatternMethodParamWithoutBody, QuestionMarkInType, - QuestionMarkInTypeSugg, SelfParamNotFirst, StructLiteralBodyWithoutPath, + IncorrectSemicolon, IncorrectUseOfAwait, IncorrectUseOfUse, PatternMethodParamWithoutBody, + QuestionMarkInType, QuestionMarkInTypeSugg, SelfParamNotFirst, StructLiteralBodyWithoutPath, StructLiteralBodyWithoutPathSugg, StructLiteralNeedingParens, StructLiteralNeedingParensSugg, SuggAddMissingLetStmt, SuggEscapeIdentifier, SuggRemoveComma, TernaryOperator, UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration, @@ -574,10 +572,17 @@ impl<'a> Parser<'a> { return Err(self.dcx().create_err(UseEqInstead { span: self.token.span })); } - if self.token.is_keyword(kw::Move) && self.prev_token.is_keyword(kw::Async) { - // The 2015 edition is in use because parsing of `async move` has failed. + if (self.token.is_keyword(kw::Move) || self.token.is_keyword(kw::Use)) + && self.prev_token.is_keyword(kw::Async) + { + // The 2015 edition is in use because parsing of `async move` or `async use` has failed. let span = self.prev_token.span.to(self.token.span); - return Err(self.dcx().create_err(AsyncMoveBlockIn2015 { span })); + if self.token.is_keyword(kw::Move) { + return Err(self.dcx().create_err(AsyncMoveBlockIn2015 { span })); + } else { + // kw::Use + return Err(self.dcx().create_err(AsyncUseBlockIn2015 { span })); + } } let expect = tokens_to_string(&expected); @@ -1169,7 +1174,7 @@ impl<'a> Parser<'a> { let mut number_of_gt = 0; while self.look_ahead(position, |t| { trace!("check_trailing_angle_brackets: t={:?}", t); - if *t == token::BinOp(token::BinOpToken::Shr) { + if *t == token::Shr { number_of_shr += 1; true } else if *t == token::Gt { @@ -1224,7 +1229,7 @@ impl<'a> Parser<'a> { let span = lo.to(self.prev_token.span); // Detect trailing `>` like in `x.collect::Vec<_>>()`. let mut trailing_span = self.prev_token.span.shrink_to_hi(); - while self.token == token::BinOp(token::Shr) || self.token == token::Gt { + while self.token == token::Shr || self.token == token::Gt { trailing_span = trailing_span.to(self.token.span); self.bump(); } @@ -1350,13 +1355,13 @@ impl<'a> Parser<'a> { } return match (op.node, &outer_op.node) { // `x == y == z` - (BinOpKind::Eq, AssocOp::Equal) | + (BinOpKind::Eq, AssocOp::Binary(BinOpKind::Eq)) | // `x < y < z` and friends. - (BinOpKind::Lt, AssocOp::Less | AssocOp::LessEqual) | - (BinOpKind::Le, AssocOp::LessEqual | AssocOp::Less) | + (BinOpKind::Lt, AssocOp::Binary(BinOpKind::Lt | BinOpKind::Le)) | + (BinOpKind::Le, AssocOp::Binary(BinOpKind::Lt | BinOpKind::Le)) | // `x > y > z` and friends. - (BinOpKind::Gt, AssocOp::Greater | AssocOp::GreaterEqual) | - (BinOpKind::Ge, AssocOp::GreaterEqual | AssocOp::Greater) => { + (BinOpKind::Gt, AssocOp::Binary(BinOpKind::Gt | BinOpKind::Ge)) | + (BinOpKind::Ge, AssocOp::Binary(BinOpKind::Gt | BinOpKind::Ge)) => { let expr_to_str = |e: &Expr| { self.span_to_snippet(e.span) .unwrap_or_else(|_| pprust::expr_to_string(e)) @@ -1368,7 +1373,10 @@ impl<'a> Parser<'a> { false // Keep the current parse behavior, where the AST is `(x < y) < z`. } // `x == y < z` - (BinOpKind::Eq, AssocOp::Less | AssocOp::LessEqual | AssocOp::Greater | AssocOp::GreaterEqual) => { + ( + BinOpKind::Eq, + AssocOp::Binary(BinOpKind::Lt | BinOpKind::Le | BinOpKind::Gt | BinOpKind::Ge) + ) => { // Consume `z`/outer-op-rhs. let snapshot = self.create_snapshot_for_diagnostic(); match self.parse_expr() { @@ -1389,7 +1397,10 @@ impl<'a> Parser<'a> { } } // `x > y == z` - (BinOpKind::Lt | BinOpKind::Le | BinOpKind::Gt | BinOpKind::Ge, AssocOp::Equal) => { + ( + BinOpKind::Lt | BinOpKind::Le | BinOpKind::Gt | BinOpKind::Ge, + AssocOp::Binary(BinOpKind::Eq) + ) => { let snapshot = self.create_snapshot_for_diagnostic(); // At this point it is always valid to enclose the lhs in parentheses, no // further checks are necessary. @@ -1457,15 +1468,14 @@ impl<'a> Parser<'a> { // Include `<` to provide this recommendation even in a case like // `Foo<Bar<Baz<Qux, ()>>>` - if op.node == BinOpKind::Lt && outer_op.node == AssocOp::Less - || outer_op.node == AssocOp::Greater + if op.node == BinOpKind::Lt && outer_op.node == AssocOp::Binary(BinOpKind::Lt) + || outer_op.node == AssocOp::Binary(BinOpKind::Gt) { - if outer_op.node == AssocOp::Less { + if outer_op.node == AssocOp::Binary(BinOpKind::Lt) { let snapshot = self.create_snapshot_for_diagnostic(); self.bump(); // So far we have parsed `foo<bar<`, consume the rest of the type args. - let modifiers = - [(token::Lt, 1), (token::Gt, -1), (token::BinOp(token::Shr), -2)]; + let modifiers = [(token::Lt, 1), (token::Gt, -1), (token::Shr, -2)]; self.consume_tts(1, &modifiers); if !&[token::OpenDelim(Delimiter::Parenthesis), token::PathSep] @@ -1958,7 +1968,7 @@ impl<'a> Parser<'a> { &mut self, await_sp: Span, ) -> PResult<'a, P<Expr>> { - let (hi, expr, is_question) = if self.token == token::Not { + let (hi, expr, is_question) = if self.token == token::Bang { // Handle `await!(<expr>)`. self.recover_await_macro()? } else { @@ -1970,7 +1980,7 @@ impl<'a> Parser<'a> { } fn recover_await_macro(&mut self) -> PResult<'a, (Span, P<Expr>, bool)> { - self.expect(exp!(Not))?; + self.expect(exp!(Bang))?; self.expect(exp!(OpenParen))?; let expr = self.parse_expr()?; self.expect(exp!(CloseParen))?; @@ -1988,7 +1998,7 @@ impl<'a> Parser<'a> { self.parse_expr() } .map_err(|mut err| { - err.span_label(await_sp, "while parsing this incorrect await expression"); + err.span_label(await_sp, format!("while parsing this incorrect await expression")); err })?; Ok((expr.span, expr, is_question)) @@ -2027,10 +2037,25 @@ impl<'a> Parser<'a> { self.dcx().emit_err(IncorrectUseOfAwait { span }); } } + /// + /// If encountering `x.use()`, consumes and emits an error. + pub(super) fn recover_from_use(&mut self) { + if self.token == token::OpenDelim(Delimiter::Parenthesis) + && self.look_ahead(1, |t| t == &token::CloseDelim(Delimiter::Parenthesis)) + { + // var.use() + let lo = self.token.span; + self.bump(); // ( + let span = lo.to(self.token.span); + self.bump(); // ) + + self.dcx().emit_err(IncorrectUseOfUse { span }); + } + } pub(super) fn try_macro_suggestion(&mut self) -> PResult<'a, P<Expr>> { let is_try = self.token.is_keyword(kw::Try); - let is_questionmark = self.look_ahead(1, |t| t == &token::Not); //check for ! + let is_questionmark = self.look_ahead(1, |t| t == &token::Bang); //check for ! let is_open = self.look_ahead(2, |t| t == &token::OpenDelim(Delimiter::Parenthesis)); //check for ( if is_try && is_questionmark && is_open { @@ -2082,7 +2107,7 @@ impl<'a> Parser<'a> { ast::GenericBound::Trait(poly) => Some(poly), _ => None, }) - .last() + .next_back() { err.span_suggestion_verbose( poly.span.shrink_to_hi(), @@ -2400,52 +2425,6 @@ impl<'a> Parser<'a> { err.subdiagnostic(ExprParenthesesNeeded::surrounding(*sp)); } err.span_label(span, "expected expression"); - - // Walk the chain of macro expansions for the current token to point at how the original - // code was interpreted. This helps the user realize when a macro argument of one type is - // later reinterpreted as a different type, like `$x:expr` being reinterpreted as `$x:pat` - // in a subsequent macro invocation (#71039). - let mut tok = self.token.clone(); - let mut labels = vec![]; - while let TokenKind::Interpolated(nt) = &tok.kind { - let tokens = nt.tokens(); - labels.push(Arc::clone(nt)); - if let Some(tokens) = tokens - && let tokens = tokens.to_attr_token_stream() - && let tokens = tokens.0.deref() - && let [AttrTokenTree::Token(token, _)] = &tokens[..] - { - tok = token.clone(); - } else { - break; - } - } - let mut iter = labels.into_iter().peekable(); - let mut show_link = false; - while let Some(nt) = iter.next() { - let descr = nt.descr(); - if let Some(next) = iter.peek() { - let next_descr = next.descr(); - if next_descr != descr { - err.span_label(next.use_span(), format!("this is expected to be {next_descr}")); - err.span_label( - nt.use_span(), - format!( - "this is interpreted as {}, but it is expected to be {}", - next_descr, descr, - ), - ); - show_link = true; - } - } - } - if show_link { - err.note( - "when forwarding a matched fragment to another macro-by-example, matchers in the \ - second macro will see an opaque AST of the fragment type, not the underlying \ - tokens", - ); - } err } @@ -2635,10 +2614,12 @@ impl<'a> Parser<'a> { ) -> PResult<'a, GenericArg> { let is_op_or_dot = AssocOp::from_token(&self.token) .and_then(|op| { - if let AssocOp::Greater - | AssocOp::Less - | AssocOp::ShiftRight - | AssocOp::GreaterEqual + if let AssocOp::Binary( + BinOpKind::Gt + | BinOpKind::Lt + | BinOpKind::Shr + | BinOpKind::Ge + ) // Don't recover from `foo::<bar = baz>`, because this could be an attempt to // assign a value to a defaulted generic parameter. | AssocOp::Assign @@ -2653,8 +2634,7 @@ impl<'a> Parser<'a> { || self.token == TokenKind::Dot; // This will be true when a trait object type `Foo +` or a path which was a `const fn` with // type params has been parsed. - let was_op = - matches!(self.prev_token.kind, token::BinOp(token::Plus | token::Shr) | token::Gt); + let was_op = matches!(self.prev_token.kind, token::Plus | token::Shr | token::Gt); if !is_op_or_dot && !was_op { // We perform these checks and early return to avoid taking a snapshot unnecessarily. return Err(err); @@ -3032,8 +3012,7 @@ impl<'a> Parser<'a> { pub(super) fn recover_vcs_conflict_marker(&mut self) { // <<<<<<< - let Some(start) = self.conflict_marker(&TokenKind::BinOp(token::Shl), &TokenKind::Lt) - else { + let Some(start) = self.conflict_marker(&TokenKind::Shl, &TokenKind::Lt) else { return; }; let mut spans = Vec::with_capacity(3); @@ -3048,15 +3027,13 @@ impl<'a> Parser<'a> { if self.token == TokenKind::Eof { break; } - if let Some(span) = self.conflict_marker(&TokenKind::OrOr, &TokenKind::BinOp(token::Or)) - { + if let Some(span) = self.conflict_marker(&TokenKind::OrOr, &TokenKind::Or) { middlediff3 = Some(span); } if let Some(span) = self.conflict_marker(&TokenKind::EqEq, &TokenKind::Eq) { middle = Some(span); } - if let Some(span) = self.conflict_marker(&TokenKind::BinOp(token::Shr), &TokenKind::Gt) - { + if let Some(span) = self.conflict_marker(&TokenKind::Shr, &TokenKind::Gt) { spans.push(span); end = Some(span); break; diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index b2e58c94280..9b2d562a69e 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -4,7 +4,7 @@ use core::mem; use core::ops::{Bound, ControlFlow}; use ast::mut_visit::{self, MutVisitor}; -use ast::token::IdentIsRaw; +use ast::token::{IdentIsRaw, MetaVarKind}; use ast::{CoroutineKind, ForLoopKind, GenBlockKind, MatchKind, Pat, Path, PathSegment, Recovered}; use rustc_ast::ptr::P; use rustc_ast::token::{self, Delimiter, Token, TokenKind}; @@ -171,7 +171,7 @@ impl<'a> Parser<'a> { break; } // Check for deprecated `...` syntax - if self.token == token::DotDotDot && op.node == AssocOp::DotDotEq { + if self.token == token::DotDotDot && op.node == AssocOp::Range(RangeLimits::Closed) { self.err_dotdotdot_syntax(self.token.span); } @@ -188,17 +188,12 @@ impl<'a> Parser<'a> { } // Look for JS' `===` and `!==` and recover - if (op.node == AssocOp::Equal || op.node == AssocOp::NotEqual) + if let AssocOp::Binary(bop @ BinOpKind::Eq | bop @ BinOpKind::Ne) = op.node && self.token == token::Eq && self.prev_token.span.hi() == self.token.span.lo() { let sp = op.span.to(self.token.span); - let sugg = match op.node { - AssocOp::Equal => "==", - AssocOp::NotEqual => "!=", - _ => unreachable!(), - } - .into(); + let sugg = bop.as_str().into(); let invalid = format!("{sugg}="); self.dcx().emit_err(errors::InvalidComparisonOperator { span: sp, @@ -213,7 +208,7 @@ impl<'a> Parser<'a> { } // Look for PHP's `<>` and recover - if op.node == AssocOp::Less + if op.node == AssocOp::Binary(BinOpKind::Lt) && self.token == token::Gt && self.prev_token.span.hi() == self.token.span.lo() { @@ -231,7 +226,7 @@ impl<'a> Parser<'a> { } // Look for C++'s `<=>` and recover - if op.node == AssocOp::LessEqual + if op.node == AssocOp::Binary(BinOpKind::Le) && self.token == token::Gt && self.prev_token.span.hi() == self.token.span.lo() { @@ -244,8 +239,8 @@ impl<'a> Parser<'a> { self.bump(); } - if self.prev_token == token::BinOp(token::Plus) - && self.token == token::BinOp(token::Plus) + if self.prev_token == token::Plus + && self.token == token::Plus && self.prev_token.span.between(self.token.span).is_empty() { let op_span = self.prev_token.span.to(self.token.span); @@ -255,8 +250,8 @@ impl<'a> Parser<'a> { continue; } - if self.prev_token == token::BinOp(token::Minus) - && self.token == token::BinOp(token::Minus) + if self.prev_token == token::Minus + && self.token == token::Minus && self.prev_token.span.between(self.token.span).is_empty() && !self.look_ahead(1, |tok| tok.can_begin_expr()) { @@ -269,13 +264,13 @@ impl<'a> Parser<'a> { let op = op.node; // Special cases: - if op == AssocOp::As { + if op == AssocOp::Cast { lhs = self.parse_assoc_op_cast(lhs, lhs_span, ExprKind::Cast)?; continue; - } else if op == AssocOp::DotDot || op == AssocOp::DotDotEq { + } else if let AssocOp::Range(limits) = op { // If we didn't have to handle `x..`/`x..=`, it would be pretty easy to // generalise it to the Fixity::None code. - lhs = self.parse_expr_range(prec, lhs, op, cur_op_span)?; + lhs = self.parse_expr_range(prec, lhs, limits, cur_op_span)?; break; } @@ -290,46 +285,16 @@ impl<'a> Parser<'a> { let span = self.mk_expr_sp(&lhs, lhs_span, rhs.span); lhs = match op { - AssocOp::Add - | AssocOp::Subtract - | AssocOp::Multiply - | AssocOp::Divide - | AssocOp::Modulus - | AssocOp::LAnd - | AssocOp::LOr - | AssocOp::BitXor - | AssocOp::BitAnd - | AssocOp::BitOr - | AssocOp::ShiftLeft - | AssocOp::ShiftRight - | AssocOp::Equal - | AssocOp::Less - | AssocOp::LessEqual - | AssocOp::NotEqual - | AssocOp::Greater - | AssocOp::GreaterEqual => { - let ast_op = op.to_ast_binop().unwrap(); + AssocOp::Binary(ast_op) => { let binary = self.mk_binary(source_map::respan(cur_op_span, ast_op), lhs, rhs); self.mk_expr(span, binary) } AssocOp::Assign => self.mk_expr(span, ExprKind::Assign(lhs, rhs, cur_op_span)), - AssocOp::AssignOp(k) => { - let aop = match k { - token::Plus => BinOpKind::Add, - token::Minus => BinOpKind::Sub, - token::Star => BinOpKind::Mul, - token::Slash => BinOpKind::Div, - token::Percent => BinOpKind::Rem, - token::Caret => BinOpKind::BitXor, - token::And => BinOpKind::BitAnd, - token::Or => BinOpKind::BitOr, - token::Shl => BinOpKind::Shl, - token::Shr => BinOpKind::Shr, - }; + AssocOp::AssignOp(aop) => { let aopexpr = self.mk_assign_op(source_map::respan(cur_op_span, aop), lhs, rhs); self.mk_expr(span, aopexpr) } - AssocOp::As | AssocOp::DotDot | AssocOp::DotDotEq => { + AssocOp::Cast | AssocOp::Range(_) => { self.dcx().span_bug(span, "AssocOp should have been handled by special case") } }; @@ -347,13 +312,14 @@ impl<'a> Parser<'a> { // An exhaustive check is done in the following block, but these are checked first // because they *are* ambiguous but also reasonable looking incorrect syntax, so we // want to keep their span info to improve diagnostics in these cases in a later stage. - (true, Some(AssocOp::Multiply)) | // `{ 42 } *foo = bar;` or `{ 42 } * 3` - (true, Some(AssocOp::Subtract)) | // `{ 42 } -5` - (true, Some(AssocOp::Add)) | // `{ 42 } + 42` (unary plus) - (true, Some(AssocOp::LAnd)) | // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }` - (true, Some(AssocOp::LOr)) | // `{ 42 } || 42` ("logical or" or closure) - (true, Some(AssocOp::BitOr)) // `{ 42 } | 42` or `{ 42 } |x| 42` - => { + (true, Some(AssocOp::Binary( + BinOpKind::Mul | // `{ 42 } *foo = bar;` or `{ 42 } * 3` + BinOpKind::Sub | // `{ 42 } -5` + BinOpKind::Add | // `{ 42 } + 42` (unary plus) + BinOpKind::And | // `{ 42 } &&x` (#61475) or `{ 42 } && if x { 1 } else { 0 }` + BinOpKind::Or | // `{ 42 } || 42` ("logical or" or closure) + BinOpKind::BitOr // `{ 42 } | 42` or `{ 42 } |x| 42` + ))) => { // These cases are ambiguous and can't be identified in the parser alone. // // Bitwise AND is left out because guessing intent is hard. We can make @@ -392,23 +358,21 @@ impl<'a> Parser<'a> { // When parsing const expressions, stop parsing when encountering `>`. ( Some( - AssocOp::ShiftRight - | AssocOp::Greater - | AssocOp::GreaterEqual - | AssocOp::AssignOp(token::BinOpToken::Shr), + AssocOp::Binary(BinOpKind::Shr | BinOpKind::Gt | BinOpKind::Ge) + | AssocOp::AssignOp(BinOpKind::Shr), ), _, ) if self.restrictions.contains(Restrictions::CONST_EXPR) => { return None; } - // When recovering patterns as expressions, stop parsing when encountering an assignment `=`, an alternative `|`, or a range `..`. + // When recovering patterns as expressions, stop parsing when encountering an + // assignment `=`, an alternative `|`, or a range `..`. ( Some( AssocOp::Assign | AssocOp::AssignOp(_) - | AssocOp::BitOr - | AssocOp::DotDot - | AssocOp::DotDotEq, + | AssocOp::Binary(BinOpKind::BitOr) + | AssocOp::Range(_), ), _, ) if self.restrictions.contains(Restrictions::IS_PAT) => { @@ -423,7 +387,7 @@ impl<'a> Parser<'a> { incorrect: "and".into(), sub: errors::InvalidLogicalOperatorSub::Conjunction(self.token.span), }); - (AssocOp::LAnd, span) + (AssocOp::Binary(BinOpKind::And), span) } (None, Some((Ident { name: sym::or, span }, IdentIsRaw::No))) if self.may_recover() => { self.dcx().emit_err(errors::InvalidLogicalOperator { @@ -431,7 +395,7 @@ impl<'a> Parser<'a> { incorrect: "or".into(), sub: errors::InvalidLogicalOperatorSub::Disjunction(self.token.span), }); - (AssocOp::LOr, span) + (AssocOp::Binary(BinOpKind::Or), span) } _ => return None, }; @@ -449,7 +413,7 @@ impl<'a> Parser<'a> { &mut self, prec: ExprPrecedence, lhs: P<Expr>, - op: AssocOp, + limits: RangeLimits, cur_op_span: Span, ) -> PResult<'a, P<Expr>> { let rhs = if self.is_at_start_of_range_notation_rhs() { @@ -465,8 +429,6 @@ impl<'a> Parser<'a> { }; let rhs_span = rhs.as_ref().map_or(cur_op_span, |x| x.span); let span = self.mk_expr_sp(&lhs, lhs.span, rhs_span); - let limits = - if op == AssocOp::DotDot { RangeLimits::HalfOpen } else { RangeLimits::Closed }; let range = self.mk_range(Some(lhs), rhs, limits); Ok(self.mk_expr(span, range)) } @@ -543,23 +505,23 @@ impl<'a> Parser<'a> { // Note: when adding new unary operators, don't forget to adjust TokenKind::can_begin_expr() match this.token.uninterpolate().kind { // `!expr` - token::Not => make_it!(this, attrs, |this, _| this.parse_expr_unary(lo, UnOp::Not)), + token::Bang => make_it!(this, attrs, |this, _| this.parse_expr_unary(lo, UnOp::Not)), // `~expr` token::Tilde => make_it!(this, attrs, |this, _| this.recover_tilde_expr(lo)), // `-expr` - token::BinOp(token::Minus) => { + token::Minus => { make_it!(this, attrs, |this, _| this.parse_expr_unary(lo, UnOp::Neg)) } // `*expr` - token::BinOp(token::Star) => { + token::Star => { make_it!(this, attrs, |this, _| this.parse_expr_unary(lo, UnOp::Deref)) } // `&expr` and `&&expr` - token::BinOp(token::And) | token::AndAnd => { + token::And | token::AndAnd => { make_it!(this, attrs, |this, _| this.parse_expr_borrow(lo)) } // `+lit` - token::BinOp(token::Plus) if this.look_ahead(1, |tok| tok.is_numeric_lit()) => { + token::Plus if this.look_ahead(1, |tok| tok.is_numeric_lit()) => { let mut err = errors::LeadingPlusNotSupported { span: lo, remove_plus: None, @@ -579,9 +541,7 @@ impl<'a> Parser<'a> { this.parse_expr_prefix(attrs) } // Recover from `++x`: - token::BinOp(token::Plus) - if this.look_ahead(1, |t| *t == token::BinOp(token::Plus)) => - { + token::Plus if this.look_ahead(1, |t| *t == token::Plus) => { let starts_stmt = this.prev_token == token::Semi || this.prev_token == token::CloseDelim(Delimiter::Brace); let pre_span = this.token.span.to(this.look_ahead(1, |t| t.span)); @@ -605,7 +565,11 @@ impl<'a> Parser<'a> { fn parse_expr_prefix_common(&mut self, lo: Span) -> PResult<'a, (Span, P<Expr>)> { self.bump(); let attrs = self.parse_outer_attributes()?; - let expr = self.parse_expr_prefix(attrs)?; + let expr = if self.token.is_range_separator() { + self.parse_expr_prefix_range(attrs) + } else { + self.parse_expr_prefix(attrs) + }?; let span = self.interpolated_or_expr_span(&expr); Ok((lo.to(span), expr)) } @@ -761,14 +725,12 @@ impl<'a> Parser<'a> { suggestion, }) } - token::BinOp(token::Shl) => { - self.dcx().emit_err(errors::ShiftInterpretedAsGeneric { - shift: self.token.span, - r#type: path, - args: args_span, - suggestion, - }) - } + token::Shl => self.dcx().emit_err(errors::ShiftInterpretedAsGeneric { + shift: self.token.span, + r#type: path, + args: args_span, + suggestion, + }), _ => { // We can end up here even without `<` being the next token, for // example because `parse_ty_no_plus` returns `Err` on keywords, @@ -816,6 +778,7 @@ impl<'a> Parser<'a> { ExprKind::MethodCall(_) => "a method call", ExprKind::Call(_, _) => "a function call", ExprKind::Await(_, _) => "`.await`", + ExprKind::Use(_, _) => "`.use`", ExprKind::Match(_, _, MatchKind::Postfix) => "a postfix match", ExprKind::Err(_) => return Ok(with_postfix), _ => unreachable!("parse_dot_or_call_expr_with_ shouldn't produce this"), @@ -1334,6 +1297,12 @@ impl<'a> Parser<'a> { return Ok(self.mk_await_expr(self_arg, lo)); } + if self.eat_keyword(exp!(Use)) { + let use_span = self.prev_token.span; + self.psess.gated_spans.gate(sym::ergonomic_clones, use_span); + return Ok(self.mk_use_expr(self_arg, lo)); + } + // Post-fix match if self.eat_keyword(exp!(Match)) { let match_span = self.prev_token.span; @@ -1382,6 +1351,7 @@ impl<'a> Parser<'a> { fn parse_expr_bottom(&mut self) -> PResult<'a, P<Expr>> { maybe_recover_from_interpolated_ty_qpath!(self, true); + let span = self.token.span; if let token::Interpolated(nt) = &self.token.kind { match &**nt { token::NtExpr(e) | token::NtLiteral(e) => { @@ -1389,18 +1359,16 @@ impl<'a> Parser<'a> { self.bump(); return Ok(e); } - token::NtPath(path) => { - let path = (**path).clone(); - self.bump(); - return Ok(self.mk_expr(self.prev_token.span, ExprKind::Path(None, path))); - } token::NtBlock(block) => { let block = block.clone(); self.bump(); return Ok(self.mk_expr(self.prev_token.span, ExprKind::Block(block, None))); } - _ => {} }; + } else if let Some(path) = self.eat_metavar_seq(MetaVarKind::Path, |this| { + this.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type)) + }) { + return Ok(self.mk_expr(span, ExprKind::Path(None, path))); } // Outer attributes are already parsed and will be @@ -1435,6 +1403,7 @@ impl<'a> Parser<'a> { } else if this.check_path() { this.parse_expr_path_start() } else if this.check_keyword(exp!(Move)) + || this.check_keyword(exp!(Use)) || this.check_keyword(exp!(Static)) || this.check_const_closure() { @@ -1612,7 +1581,7 @@ impl<'a> Parser<'a> { }; // `!`, as an operator, is prefix, so we know this isn't that. - let (span, kind) = if self.eat(exp!(Not)) { + let (span, kind) = if self.eat(exp!(Bang)) { // MACRO INVOCATION expression if qself.is_some() { self.dcx().emit_err(errors::MacroInvocationWithQualifiedPath(path.span)); @@ -2426,7 +2395,7 @@ impl<'a> Parser<'a> { Ok(closure) } - /// Parses an optional `move` prefix to a closure-like construct. + /// Parses an optional `move` or `use` prefix to a closure-like construct. fn parse_capture_clause(&mut self) -> PResult<'a, CaptureBy> { if self.eat_keyword(exp!(Move)) { let move_kw_span = self.prev_token.span; @@ -2439,6 +2408,16 @@ impl<'a> Parser<'a> { } else { Ok(CaptureBy::Value { move_kw: move_kw_span }) } + } else if self.eat_keyword(exp!(Use)) { + let use_kw_span = self.prev_token.span; + self.psess.gated_spans.gate(sym::ergonomic_clones, use_kw_span); + // Check for `use async` and recover + if self.check_keyword(exp!(Async)) { + let use_async_span = self.token.span.with_lo(self.prev_token.span.data().lo); + Err(self.dcx().create_err(errors::AsyncUseOrderIncorrect { span: use_async_span })) + } else { + Ok(CaptureBy::Use { use_kw: use_kw_span }) + } } else { Ok(CaptureBy::Ref) } @@ -2633,7 +2612,7 @@ impl<'a> Parser<'a> { missing_let: None, comparison: None, }; - if self.prev_token == token::BinOp(token::Or) { + if self.prev_token == token::Or { // This was part of a closure, the that part of the parser recover. return Err(self.dcx().create_err(err)); } else { @@ -3084,7 +3063,7 @@ impl<'a> Parser<'a> { } self.restore_snapshot(pre_pat_snapshot); - match self.parse_stmt_without_recovery(true, ForceCollect::No) { + match self.parse_stmt_without_recovery(true, ForceCollect::No, false) { // Consume statements for as long as possible. Ok(Some(stmt)) => { stmts.push(stmt); @@ -3453,7 +3432,7 @@ impl<'a> Parser<'a> { self.is_keyword_ahead(lookahead, &[kw]) && (( // `async move {` - self.is_keyword_ahead(lookahead + 1, &[kw::Move]) + self.is_keyword_ahead(lookahead + 1, &[kw::Move, kw::Use]) && self.look_ahead(lookahead + 2, |t| { *t == token::OpenDelim(Delimiter::Brace) || t.is_whole_block() }) @@ -3856,6 +3835,13 @@ impl<'a> Parser<'a> { await_expr } + fn mk_use_expr(&mut self, self_arg: P<Expr>, lo: Span) -> P<Expr> { + let span = lo.to(self.prev_token.span); + let use_expr = self.mk_expr(span, ExprKind::Use(self_arg, self.prev_token.span)); + self.recover_from_use(); + use_expr + } + pub(crate) fn mk_expr_with_attrs(&self, span: Span, kind: ExprKind, attrs: AttrVec) -> P<Expr> { P(Expr { kind, span, attrs, id: DUMMY_NODE_ID, tokens: None }) } @@ -4004,6 +3990,7 @@ impl MutVisitor for CondChecker<'_> { } ExprKind::Unary(_, _) | ExprKind::Await(_, _) + | ExprKind::Use(_, _) | ExprKind::AssignOp(_, _, _) | ExprKind::Range(_, _, _) | ExprKind::Try(_) diff --git a/compiler/rustc_parse/src/parser/generics.rs b/compiler/rustc_parse/src/parser/generics.rs index 11f0e579de5..c3f71dd8b30 100644 --- a/compiler/rustc_parse/src/parser/generics.rs +++ b/compiler/rustc_parse/src/parser/generics.rs @@ -367,34 +367,47 @@ impl<'a> Parser<'a> { loop { let where_sp = where_lo.to(self.prev_token.span); + let attrs = self.parse_outer_attributes()?; let pred_lo = self.token.span; - let kind = if self.check_lifetime() && self.look_ahead(1, |t| !t.is_like_plus()) { - let lifetime = self.expect_lifetime(); - // Bounds starting with a colon are mandatory, but possibly empty. - self.expect(exp!(Colon))?; - let bounds = self.parse_lt_param_bounds(); - ast::WherePredicateKind::RegionPredicate(ast::WhereRegionPredicate { - lifetime, - bounds, - }) - } else if self.check_type() { - match self.parse_ty_where_predicate_kind_or_recover_tuple_struct_body( - struct_, pred_lo, where_sp, - )? { - PredicateKindOrStructBody::PredicateKind(kind) => kind, - PredicateKindOrStructBody::StructBody(body) => { - tuple_struct_body = Some(body); - break; - } + let predicate = self.collect_tokens(None, attrs, ForceCollect::No, |this, attrs| { + for attr in &attrs { + self.psess.gated_spans.gate(sym::where_clause_attrs, attr.span); } - } else { - break; - }; - where_clause.predicates.push(ast::WherePredicate { - kind, - id: DUMMY_NODE_ID, - span: pred_lo.to(self.prev_token.span), - }); + let kind = if this.check_lifetime() && this.look_ahead(1, |t| !t.is_like_plus()) { + let lifetime = this.expect_lifetime(); + // Bounds starting with a colon are mandatory, but possibly empty. + this.expect(exp!(Colon))?; + let bounds = this.parse_lt_param_bounds(); + Some(ast::WherePredicateKind::RegionPredicate(ast::WhereRegionPredicate { + lifetime, + bounds, + })) + } else if this.check_type() { + match this.parse_ty_where_predicate_kind_or_recover_tuple_struct_body( + struct_, pred_lo, where_sp, + )? { + PredicateKindOrStructBody::PredicateKind(kind) => Some(kind), + PredicateKindOrStructBody::StructBody(body) => { + tuple_struct_body = Some(body); + None + } + } + } else { + None + }; + let predicate = kind.map(|kind| ast::WherePredicate { + attrs, + kind, + id: DUMMY_NODE_ID, + span: pred_lo.to(this.prev_token.span), + is_placeholder: false, + }); + Ok((predicate, Trailing::No, UsePreAttrPos::No)) + })?; + match predicate { + Some(predicate) => where_clause.predicates.push(predicate), + None => break, + } let prev_token = self.prev_token.span; let ate_comma = self.eat(exp!(Comma)); diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs index c923717ecaf..2388463ecfc 100644 --- a/compiler/rustc_parse/src/parser/item.rs +++ b/compiler/rustc_parse/src/parser/item.rs @@ -4,7 +4,7 @@ use std::mem; use ast::token::IdentIsRaw; use rustc_ast::ast::*; use rustc_ast::ptr::P; -use rustc_ast::token::{self, Delimiter, TokenKind}; +use rustc_ast::token::{self, Delimiter, InvisibleOrigin, MetaVarKind, TokenKind}; use rustc_ast::tokenstream::{DelimSpan, TokenStream, TokenTree}; use rustc_ast::util::case::Case; use rustc_ast::{self as ast}; @@ -21,10 +21,10 @@ use super::diagnostics::{ConsumeClosingDelim, dummy_arg}; use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{ AttrWrapper, ExpKeywordPair, ExpTokenPair, FollowedByType, ForceCollect, Parser, PathStyle, - Trailing, UsePreAttrPos, + Recovered, Trailing, UsePreAttrPos, }; use crate::errors::{self, MacroExpandsToAdtField}; -use crate::{exp, fluent_generated as fluent, maybe_whole}; +use crate::{exp, fluent_generated as fluent}; impl<'a> Parser<'a> { /// Parses a source module as a crate. This is the main entry point for the parser. @@ -142,10 +142,13 @@ impl<'a> Parser<'a> { fn_parse_mode: FnParseMode, force_collect: ForceCollect, ) -> PResult<'a, Option<Item>> { - maybe_whole!(self, NtItem, |item| { + if let Some(item) = + self.eat_metavar_seq(MetaVarKind::Item, |this| this.parse_item(ForceCollect::Yes)) + { + let mut item = item.expect("an actual item"); attrs.prepend_to_nt_inner(&mut item.attrs); - Some(item.into_inner()) - }); + return Ok(Some(item.into_inner())); + } self.collect_tokens(None, attrs, force_collect, |this, mut attrs| { let lo = this.token.span; @@ -209,7 +212,7 @@ impl<'a> Parser<'a> { let check_pub = def == &Defaultness::Final; let mut def_ = || mem::replace(def, Defaultness::Final); - let info = if self.eat_keyword_case(exp!(Use), case) { + let info = if !self.is_use_closure() && self.eat_keyword_case(exp!(Use), case) { self.parse_use_item()? } else if self.check_fn_front_matter(check_pub, case) { // FUNCTION ITEM @@ -217,7 +220,14 @@ impl<'a> Parser<'a> { self.parse_fn(attrs, fn_parse_mode, lo, vis, case)?; ( ident, - ItemKind::Fn(Box::new(Fn { defaultness: def_(), sig, generics, contract, body })), + ItemKind::Fn(Box::new(Fn { + defaultness: def_(), + sig, + generics, + contract, + body, + define_opaque: None, + })), ) } else if self.eat_keyword(exp!(Extern)) { if self.eat_keyword(exp!(Crate)) { @@ -382,7 +392,7 @@ impl<'a> Parser<'a> { /// Are we sure this could not possibly be a macro invocation? fn isnt_macro_invocation(&mut self) -> bool { - self.check_ident() && self.look_ahead(1, |t| *t != token::Not && *t != token::PathSep) + self.check_ident() && self.look_ahead(1, |t| *t != token::Bang && *t != token::PathSep) } /// Recover on encountering a struct, enum, or method definition where the user @@ -480,7 +490,7 @@ impl<'a> Parser<'a> { /// Parses an item macro, e.g., `item!();`. fn parse_item_macro(&mut self, vis: &Visibility) -> PResult<'a, MacCall> { let path = self.parse_path(PathStyle::Mod)?; // `foo::bar` - self.expect(exp!(Not))?; // `!` + self.expect(exp!(Bang))?; // `!` match self.parse_delim_args() { // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`. Ok(args) => { @@ -540,7 +550,7 @@ impl<'a> Parser<'a> { fn parse_polarity(&mut self) -> ast::ImplPolarity { // Disambiguate `impl !Trait for Type { ... }` and `impl ! { ... }` for the never type. - if self.check(exp!(Not)) && self.look_ahead(1, |t| t.can_begin_type()) { + if self.check(exp!(Bang)) && self.look_ahead(1, |t| t.can_begin_type()) { self.bump(); // `!` ast::ImplPolarity::Negative(self.prev_token.span) } else { @@ -639,7 +649,7 @@ impl<'a> Parser<'a> { let impl_items = self.parse_item_list(attrs, |p| p.parse_impl_item(ForceCollect::No))?; - let item_kind = match ty_second { + let (of_trait, self_ty) = match ty_second { Some(ty_second) => { // impl Trait for Type if !has_for { @@ -672,31 +682,20 @@ impl<'a> Parser<'a> { }; let trait_ref = TraitRef { path, ref_id: ty_first.id }; - ItemKind::Impl(Box::new(Impl { - safety, - polarity, - defaultness, - constness, - generics, - of_trait: Some(trait_ref), - self_ty: ty_second, - items: impl_items, - })) - } - None => { - // impl Type - ItemKind::Impl(Box::new(Impl { - safety, - polarity, - defaultness, - constness, - generics, - of_trait: None, - self_ty: ty_first, - items: impl_items, - })) + (Some(trait_ref), ty_second) } + None => (None, ty_first), // impl Type }; + let item_kind = ItemKind::Impl(Box::new(Impl { + safety, + polarity, + defaultness, + constness, + generics, + of_trait, + self_ty, + items: impl_items, + })); Ok((Ident::empty(), item_kind)) } @@ -1277,6 +1276,21 @@ impl<'a> Parser<'a> { None } + fn is_use_closure(&self) -> bool { + if self.token.is_keyword(kw::Use) { + // Check if this could be a closure. + self.look_ahead(1, |token| { + // Move or Async here would be an error but still we're parsing a closure + let dist = + if token.is_keyword(kw::Move) || token.is_keyword(kw::Async) { 2 } else { 1 }; + + self.look_ahead(dist, |token| matches!(token.kind, token::Or | token::OrOr)) + }) + } else { + false + } + } + fn is_unsafe_foreign_mod(&self) -> bool { self.token.is_keyword(kw::Unsafe) && self.is_keyword_ahead(1, &[kw::Extern]) @@ -1290,10 +1304,10 @@ impl<'a> Parser<'a> { if self.check_keyword(exp!(Static)) { // Check if this could be a closure. !self.look_ahead(1, |token| { - if token.is_keyword(kw::Move) { + if token.is_keyword(kw::Move) || token.is_keyword(kw::Use) { return true; } - matches!(token.kind, token::BinOp(token::Or) | token::OrOr) + matches!(token.kind, token::Or | token::OrOr) }) } else { // `$qual static` @@ -1579,7 +1593,7 @@ impl<'a> Parser<'a> { } let ident = this.parse_field_ident("enum", vlo)?; - if this.token == token::Not { + if this.token == token::Bang { if let Err(err) = this.unexpected() { err.with_note(fluent::parse_macro_expands_to_enum_variant).emit(); } @@ -1814,7 +1828,7 @@ impl<'a> Parser<'a> { let attrs = p.parse_outer_attributes()?; p.collect_tokens(None, attrs, ForceCollect::No, |p, attrs| { let mut snapshot = None; - if p.is_vcs_conflict_marker(&TokenKind::BinOp(token::Shl), &TokenKind::Lt) { + if p.is_vcs_conflict_marker(&TokenKind::Shl, &TokenKind::Lt) { // Account for `<<<<<<<` diff markers. We can't proactively error here because // that can be a valid type start, so we snapshot and reparse only we've // encountered another parse error. @@ -2034,7 +2048,7 @@ impl<'a> Parser<'a> { attrs: AttrVec, ) -> PResult<'a, FieldDef> { let name = self.parse_field_ident(adt_ty, lo)?; - if self.token == token::Not { + if self.token == token::Bang { if let Err(mut err) = self.unexpected() { // Encounter the macro invocation err.subdiagnostic(MacroExpandsToAdtField { adt_ty }); @@ -2184,7 +2198,7 @@ impl<'a> Parser<'a> { if self.check_keyword(exp!(MacroRules)) { let macro_rules_span = self.token.span; - if self.look_ahead(1, |t| *t == token::Not) && self.look_ahead(2, |t| t.is_ident()) { + if self.look_ahead(1, |t| *t == token::Bang) && self.look_ahead(2, |t| t.is_ident()) { return IsMacroRulesItem::Yes { has_bang: true }; } else if self.look_ahead(1, |t| (t.is_ident())) { // macro_rules foo @@ -2209,11 +2223,11 @@ impl<'a> Parser<'a> { self.expect_keyword(exp!(MacroRules))?; // `macro_rules` if has_bang { - self.expect(exp!(Not))?; // `!` + self.expect(exp!(Bang))?; // `!` } let ident = self.parse_ident()?; - if self.eat(exp!(Not)) { + if self.eat(exp!(Bang)) { // Handle macro_rules! foo! let span = self.prev_token.span; self.dcx().emit_err(errors::MacroNameRemoveBang { span }); @@ -2954,14 +2968,27 @@ impl<'a> Parser<'a> { } _ => unreachable!(), }; + // is lifetime `n` tokens ahead? + let is_lifetime = |this: &Self, n| this.look_ahead(n, |t| t.is_lifetime()); // Is `self` `n` tokens ahead? let is_isolated_self = |this: &Self, n| { this.is_keyword_ahead(n, &[kw::SelfLower]) && this.look_ahead(n + 1, |t| t != &token::PathSep) }; + // Is `pin const self` `n` tokens ahead? + let is_isolated_pin_const_self = |this: &Self, n| { + this.look_ahead(n, |token| token.is_ident_named(sym::pin)) + && this.is_keyword_ahead(n + 1, &[kw::Const]) + && is_isolated_self(this, n + 2) + }; // Is `mut self` `n` tokens ahead? let is_isolated_mut_self = |this: &Self, n| this.is_keyword_ahead(n, &[kw::Mut]) && is_isolated_self(this, n + 1); + // Is `pin mut self` `n` tokens ahead? + let is_isolated_pin_mut_self = |this: &Self, n| { + this.look_ahead(n, |token| token.is_ident_named(sym::pin)) + && is_isolated_mut_self(this, n + 1) + }; // Parse `self` or `self: TYPE`. We already know the current token is `self`. let parse_self_possibly_typed = |this: &mut Self, m| { let eself_ident = expect_self_ident(this); @@ -3011,27 +3038,36 @@ impl<'a> Parser<'a> { // else is parsed as a normal function parameter list, so some lookahead is required. let eself_lo = self.token.span; let (eself, eself_ident, eself_hi) = match self.token.uninterpolate().kind { - token::BinOp(token::And) => { - let eself = if is_isolated_self(self, 1) { - // `&self` - self.bump(); - SelfKind::Region(None, Mutability::Not) - } else if is_isolated_mut_self(self, 1) { - // `&mut self` - self.bump(); - self.bump(); - SelfKind::Region(None, Mutability::Mut) - } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_self(self, 2) { - // `&'lt self` - self.bump(); - let lt = self.expect_lifetime(); - SelfKind::Region(Some(lt), Mutability::Not) - } else if self.look_ahead(1, |t| t.is_lifetime()) && is_isolated_mut_self(self, 2) { - // `&'lt mut self` - self.bump(); - let lt = self.expect_lifetime(); - self.bump(); - SelfKind::Region(Some(lt), Mutability::Mut) + token::And => { + let has_lifetime = is_lifetime(self, 1); + let skip_lifetime_count = has_lifetime as usize; + let eself = if is_isolated_self(self, skip_lifetime_count + 1) { + // `&{'lt} self` + self.bump(); // & + let lifetime = has_lifetime.then(|| self.expect_lifetime()); + SelfKind::Region(lifetime, Mutability::Not) + } else if is_isolated_mut_self(self, skip_lifetime_count + 1) { + // `&{'lt} mut self` + self.bump(); // & + let lifetime = has_lifetime.then(|| self.expect_lifetime()); + self.bump(); // mut + SelfKind::Region(lifetime, Mutability::Mut) + } else if is_isolated_pin_const_self(self, skip_lifetime_count + 1) { + // `&{'lt} pin const self` + self.bump(); // & + let lifetime = has_lifetime.then(|| self.expect_lifetime()); + self.psess.gated_spans.gate(sym::pin_ergonomics, self.token.span); + self.bump(); // pin + self.bump(); // const + SelfKind::Pinned(lifetime, Mutability::Not) + } else if is_isolated_pin_mut_self(self, skip_lifetime_count + 1) { + // `&{'lt} pin mut self` + self.bump(); // & + let lifetime = has_lifetime.then(|| self.expect_lifetime()); + self.psess.gated_spans.gate(sym::pin_ergonomics, self.token.span); + self.bump(); // pin + self.bump(); // mut + SelfKind::Pinned(lifetime, Mutability::Mut) } else { // `¬_self` return Ok(None); @@ -3041,12 +3077,12 @@ impl<'a> Parser<'a> { (eself, self_ident, hi) } // `*self` - token::BinOp(token::Star) if is_isolated_self(self, 1) => { + token::Star if is_isolated_self(self, 1) => { self.bump(); recover_self_ptr(self)? } // `*mut self` and `*const self` - token::BinOp(token::Star) + token::Star if self.look_ahead(1, |t| t.is_mutability()) && is_isolated_self(self, 2) => { self.bump(); @@ -3071,11 +3107,13 @@ impl<'a> Parser<'a> { fn is_named_param(&self) -> bool { let offset = match &self.token.kind { - token::Interpolated(nt) => match &**nt { - token::NtPat(..) => return self.look_ahead(1, |t| t == &token::Colon), + token::OpenDelim(Delimiter::Invisible(origin)) => match origin { + InvisibleOrigin::MetaVar(MetaVarKind::Pat(_)) => { + return self.check_noexpect_past_close_delim(&token::Colon); + } _ => 0, }, - token::BinOp(token::And) | token::AndAnd => 1, + token::And | token::AndAnd => 1, _ if self.token.is_keyword(kw::Mut) => 1, _ => 0, }; diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 80a33a76005..323c0c1d6d0 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -24,7 +24,8 @@ pub use pat::{CommaRecoveryMode, RecoverColon, RecoverComma}; use path::PathStyle; use rustc_ast::ptr::P; use rustc_ast::token::{ - self, Delimiter, IdentIsRaw, InvisibleOrigin, MetaVarKind, Nonterminal, Token, TokenKind, + self, Delimiter, IdentIsRaw, InvisibleOrigin, MetaVarKind, Nonterminal, NtPatKind, Token, + TokenKind, }; use rustc_ast::tokenstream::{AttrsTarget, Spacing, TokenStream, TokenTree}; use rustc_ast::util::case::Case; @@ -812,9 +813,9 @@ impl<'a> Parser<'a> { self.is_keyword_ahead(0, &[kw::Const]) && self.look_ahead(1, |t| match &t.kind { // async closures do not work with const closures, so we do not parse that here. - token::Ident(kw::Move | kw::Static, IdentIsRaw::No) + token::Ident(kw::Move | kw::Use | kw::Static, IdentIsRaw::No) | token::OrOr - | token::BinOp(token::Or) => true, + | token::Or => true, _ => false, }) } @@ -1075,10 +1076,12 @@ impl<'a> Parser<'a> { let initial_semicolon = self.token.span; while self.eat(exp!(Semi)) { - let _ = self.parse_stmt_without_recovery(false, ForceCollect::No).unwrap_or_else(|e| { - e.cancel(); - None - }); + let _ = self + .parse_stmt_without_recovery(false, ForceCollect::No, false) + .unwrap_or_else(|e| { + e.cancel(); + None + }); } expect_err @@ -1650,7 +1653,7 @@ impl<'a> Parser<'a> { /// `::{` or `::*` fn is_import_coupler(&mut self) -> bool { self.check_path_sep_and_look_ahead(|t| { - matches!(t.kind, token::OpenDelim(Delimiter::Brace) | token::BinOp(token::Star)) + matches!(t.kind, token::OpenDelim(Delimiter::Brace) | token::Star) }) } @@ -1745,7 +1748,12 @@ pub enum ParseNtResult { Tt(TokenTree), Ident(Ident, IdentIsRaw), Lifetime(Ident, IdentIsRaw), + Item(P<ast::Item>), + Stmt(P<ast::Stmt>), + Pat(P<ast::Pat>, NtPatKind), Ty(P<ast::Ty>), + Meta(P<ast::AttrItem>), + Path(P<ast::Path>), Vis(P<ast::Visibility>), /// This variant will eventually be removed, along with `Token::Interpolate`. diff --git a/compiler/rustc_parse/src/parser/nonterminal.rs b/compiler/rustc_parse/src/parser/nonterminal.rs index f202f85752e..1123755ce00 100644 --- a/compiler/rustc_parse/src/parser/nonterminal.rs +++ b/compiler/rustc_parse/src/parser/nonterminal.rs @@ -32,7 +32,7 @@ impl<'a> Parser<'a> { | MetaVarKind::Expr { .. } | MetaVarKind::Ty { .. } | MetaVarKind::Literal // `true`, `false` - | MetaVarKind::Meta + | MetaVarKind::Meta { .. } | MetaVarKind::Path => true, MetaVarKind::Item @@ -48,14 +48,11 @@ impl<'a> Parser<'a> { /// Old variant of `may_be_ident`. Being phased out. fn nt_may_be_ident(nt: &Nonterminal) -> bool { match nt { - NtStmt(_) - | NtPat(_) - | NtExpr(_) + NtExpr(_) | NtLiteral(_) // `true`, `false` - | NtMeta(_) - | NtPath(_) => true, + => true, - NtItem(_) | NtBlock(_) => false, + NtBlock(_) => false, } } @@ -98,8 +95,7 @@ impl<'a> Parser<'a> { token::OpenDelim(Delimiter::Brace) => true, token::NtLifetime(..) => true, token::Interpolated(nt) => match &**nt { - NtBlock(_) | NtStmt(_) | NtExpr(_) | NtLiteral(_) => true, - NtItem(_) | NtPat(_) | NtMeta(_) | NtPath(_) => false, + NtBlock(_) | NtExpr(_) | NtLiteral(_) => true, }, token::OpenDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar(k))) => match k { MetaVarKind::Block @@ -109,7 +105,7 @@ impl<'a> Parser<'a> { MetaVarKind::Item | MetaVarKind::Pat(_) | MetaVarKind::Ty { .. } - | MetaVarKind::Meta + | MetaVarKind::Meta { .. } | MetaVarKind::Path | MetaVarKind::Vis => false, MetaVarKind::Lifetime | MetaVarKind::Ident | MetaVarKind::TT => { @@ -149,7 +145,7 @@ impl<'a> Parser<'a> { // Note that TT is treated differently to all the others. NonterminalKind::TT => return Ok(ParseNtResult::Tt(self.parse_token_tree())), NonterminalKind::Item => match self.parse_item(ForceCollect::Yes)? { - Some(item) => NtItem(item), + Some(item) => return Ok(ParseNtResult::Item(item)), None => { return Err(self .dcx() @@ -162,7 +158,7 @@ impl<'a> Parser<'a> { NtBlock(self.collect_tokens_no_attrs(|this| this.parse_block())?) } NonterminalKind::Stmt => match self.parse_stmt(ForceCollect::Yes)? { - Some(s) => NtStmt(P(s)), + Some(stmt) => return Ok(ParseNtResult::Stmt(P(stmt))), None => { return Err(self .dcx() @@ -170,15 +166,18 @@ impl<'a> Parser<'a> { } }, NonterminalKind::Pat(pat_kind) => { - NtPat(self.collect_tokens_no_attrs(|this| match pat_kind { - PatParam { .. } => this.parse_pat_no_top_alt(None, None), - PatWithOr => this.parse_pat_no_top_guard( - None, - RecoverComma::No, - RecoverColon::No, - CommaRecoveryMode::EitherTupleOrPipe, - ), - })?) + return Ok(ParseNtResult::Pat( + self.collect_tokens_no_attrs(|this| match pat_kind { + PatParam { .. } => this.parse_pat_no_top_alt(None, None), + PatWithOr => this.parse_pat_no_top_guard( + None, + RecoverComma::No, + RecoverColon::No, + CommaRecoveryMode::EitherTupleOrPipe, + ), + })?, + pat_kind, + )); } NonterminalKind::Expr(_) => NtExpr(self.parse_expr_force_collect()?), NonterminalKind::Literal => { @@ -203,9 +202,13 @@ impl<'a> Parser<'a> { }; } NonterminalKind::Path => { - NtPath(P(self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))?)) + return Ok(ParseNtResult::Path(P( + self.collect_tokens_no_attrs(|this| this.parse_path(PathStyle::Type))? + ))); + } + NonterminalKind::Meta => { + return Ok(ParseNtResult::Meta(P(self.parse_attr_item(ForceCollect::Yes)?))); } - NonterminalKind::Meta => NtMeta(P(self.parse_attr_item(ForceCollect::Yes)?)), NonterminalKind::Vis => { return Ok(ParseNtResult::Vis(P(self.collect_tokens_no_attrs(|this| { this.parse_visibility(FollowedByType::Yes) diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs index 8ce749ec814..ec14c5718da 100644 --- a/compiler/rustc_parse/src/parser/pat.rs +++ b/compiler/rustc_parse/src/parser/pat.rs @@ -2,7 +2,8 @@ use std::ops::Bound; use rustc_ast::mut_visit::{self, MutVisitor}; use rustc_ast::ptr::P; -use rustc_ast::token::{self, BinOpToken, Delimiter, IdentIsRaw, Token}; +use rustc_ast::token::NtPatKind::*; +use rustc_ast::token::{self, Delimiter, IdentIsRaw, MetaVarKind, Token}; use rustc_ast::util::parser::ExprPrecedence; use rustc_ast::visit::{self, Visitor}; use rustc_ast::{ @@ -30,7 +31,7 @@ use crate::errors::{ UnexpectedVertVertInPattern, WrapInParens, }; use crate::parser::expr::{DestructuredFloat, could_be_unclosed_char_literal}; -use crate::{exp, maybe_recover_from_interpolated_ty_qpath, maybe_whole}; +use crate::{exp, maybe_recover_from_interpolated_ty_qpath}; #[derive(PartialEq, Copy, Clone)] pub enum Expected { @@ -357,7 +358,7 @@ impl<'a> Parser<'a> { ) }); match (is_end_ahead, &self.token.kind) { - (true, token::BinOp(token::Or) | token::OrOr) => { + (true, token::Or | token::OrOr) => { // A `|` or possibly `||` token shouldn't be here. Ban it. self.dcx().emit_err(TrailingVertNotAllowed { span: self.token.span, @@ -431,7 +432,11 @@ impl<'a> Parser<'a> { // `[` is included for indexing operations, // `[]` is excluded as `a[]` isn't an expression and should be recovered as `a, []` (cf. `tests/ui/parser/pat-lt-bracket-7.rs`), // `as` is included for type casts - let has_trailing_operator = matches!(self.token.kind, token::BinOp(op) if op != BinOpToken::Or) + let has_trailing_operator = matches!( + self.token.kind, + token::Plus | token::Minus | token::Star | token::Slash | token::Percent + | token::Caret | token::And | token::Shl | token::Shr // excludes `Or` + ) || self.token == token::Question || (self.token == token::OpenDelim(Delimiter::Bracket) && self.look_ahead(1, |t| *t != token::CloseDelim(Delimiter::Bracket))) // excludes `[]` @@ -689,6 +694,27 @@ impl<'a> Parser<'a> { PatVisitor { parser: self, stmt, arm: None, field: None }.visit_stmt(stmt); } + fn eat_metavar_pat(&mut self) -> Option<P<Pat>> { + // Must try both kinds of pattern nonterminals. + if let Some(pat) = self.eat_metavar_seq_with_matcher( + |mv_kind| matches!(mv_kind, MetaVarKind::Pat(PatParam { .. })), + |this| this.parse_pat_no_top_alt(None, None), + ) { + Some(pat) + } else if let Some(pat) = self.eat_metavar_seq(MetaVarKind::Pat(PatWithOr), |this| { + this.parse_pat_no_top_guard( + None, + RecoverComma::No, + RecoverColon::No, + CommaRecoveryMode::EitherTupleOrPipe, + ) + }) { + Some(pat) + } else { + None + } + } + /// Parses a pattern, with a setting whether modern range patterns (e.g., `a..=b`, `a..b` are /// allowed). fn parse_pat_with_range_pat( @@ -698,7 +724,10 @@ impl<'a> Parser<'a> { syntax_loc: Option<PatternLocation>, ) -> PResult<'a, P<Pat>> { maybe_recover_from_interpolated_ty_qpath!(self, true); - maybe_whole!(self, NtPat, |pat| pat); + + if let Some(pat) = self.eat_metavar_pat() { + return Ok(pat); + } let mut lo = self.token.span; @@ -738,7 +767,7 @@ impl<'a> Parser<'a> { self.recover_dotdotdot_rest_pat(lo) } else if let Some(form) = self.parse_range_end() { self.parse_pat_range_to(form)? // `..=X`, `...X`, or `..X`. - } else if self.eat(exp!(Not)) { + } else if self.eat(exp!(Bang)) { // Parse `!` self.psess.gated_spans.gate(sym::never_patterns, self.prev_token.span); PatKind::Never @@ -794,7 +823,7 @@ impl<'a> Parser<'a> { }; let span = lo.to(self.prev_token.span); - if qself.is_none() && self.check(exp!(Not)) { + if qself.is_none() && self.check(exp!(Bang)) { self.parse_pat_mac_invoc(path)? } else if let Some(form) = self.parse_range_end() { let begin = self.mk_expr(span, ExprKind::Path(qself, path)); @@ -1043,10 +1072,8 @@ impl<'a> Parser<'a> { self.recover_additional_muts(); // Make sure we don't allow e.g. `let mut $p;` where `$p:pat`. - if let token::Interpolated(nt) = &self.token.kind { - if let token::NtPat(..) = &**nt { - self.expected_ident_found_err().emit(); - } + if let Some(MetaVarKind::Pat(_)) = self.token.is_metavar_seq() { + self.expected_ident_found_err().emit(); } // Parse the pattern we hope to be an identifier. @@ -1232,7 +1259,7 @@ impl<'a> Parser<'a> { || self.look_ahead(dist, |t| { t.is_path_start() // e.g. `MY_CONST`; || *t == token::Dot // e.g. `.5` for recovery; - || matches!(t.kind, token::Literal(..) | token::BinOp(token::Minus)) + || matches!(t.kind, token::Literal(..) | token::Minus) || t.is_bool_lit() || t.is_whole_expr() || t.is_lifetime() // recover `'a` instead of `'a'` @@ -1308,7 +1335,7 @@ impl<'a> Parser<'a> { | token::OpenDelim(Delimiter::Brace) // A struct pattern. | token::DotDotDot | token::DotDotEq | token::DotDot // A range pattern. | token::PathSep // A tuple / struct variant pattern. - | token::Not)) // A macro expanding to a pattern. + | token::Bang)) // A macro expanding to a pattern. } /// Parses `ident` or `ident @ pat`. diff --git a/compiler/rustc_parse/src/parser/path.rs b/compiler/rustc_parse/src/parser/path.rs index c24305ea9a8..9c6830c3672 100644 --- a/compiler/rustc_parse/src/parser/path.rs +++ b/compiler/rustc_parse/src/parser/path.rs @@ -15,9 +15,9 @@ use tracing::debug; use super::ty::{AllowPlus, RecoverQPath, RecoverReturnSign}; use super::{Parser, Restrictions, TokenType}; -use crate::errors::{PathSingleColon, PathTripleColon}; +use crate::errors::{self, PathSingleColon, PathTripleColon}; +use crate::exp; use crate::parser::{CommaRecoveryMode, RecoverColon, RecoverComma}; -use crate::{errors, exp, maybe_whole}; /// Specifies how to parse a path. #[derive(Copy, Clone, PartialEq)] @@ -194,7 +194,11 @@ impl<'a> Parser<'a> { } }; - maybe_whole!(self, NtPath, |path| reject_generics_if_mod_style(self, path.into_inner())); + if let Some(path) = + self.eat_metavar_seq(MetaVarKind::Path, |this| this.parse_path(PathStyle::Type)) + { + return Ok(reject_generics_if_mod_style(self, path)); + } // If we have a `ty` metavar in the form of a path, reparse it directly as a path, instead // of reparsing it as a `ty` and then extracting the path. @@ -301,10 +305,7 @@ impl<'a> Parser<'a> { let is_args_start = |token: &Token| { matches!( token.kind, - token::Lt - | token::BinOp(token::Shl) - | token::OpenDelim(Delimiter::Parenthesis) - | token::LArrow + token::Lt | token::Shl | token::OpenDelim(Delimiter::Parenthesis) | token::LArrow ) }; let check_args_start = |this: &mut Self| { @@ -378,11 +379,14 @@ impl<'a> Parser<'a> { self.psess.gated_spans.gate(sym::return_type_notation, span); + let prev_lo = self.prev_token.span.shrink_to_hi(); if self.eat_noexpect(&token::RArrow) { let lo = self.prev_token.span; let ty = self.parse_ty()?; + let span = lo.to(ty.span); + let suggestion = prev_lo.to(ty.span); self.dcx() - .emit_err(errors::BadReturnTypeNotationOutput { span: lo.to(ty.span) }); + .emit_err(errors::BadReturnTypeNotationOutput { span, suggestion }); } P(ast::GenericArgs::ParenthesizedElided(span)) diff --git a/compiler/rustc_parse/src/parser/stmt.rs b/compiler/rustc_parse/src/parser/stmt.rs index a2699b077fc..0896bd88b4c 100644 --- a/compiler/rustc_parse/src/parser/stmt.rs +++ b/compiler/rustc_parse/src/parser/stmt.rs @@ -5,7 +5,7 @@ use std::ops::Bound; use ast::Label; use rustc_ast as ast; use rustc_ast::ptr::P; -use rustc_ast::token::{self, Delimiter, TokenKind}; +use rustc_ast::token::{self, Delimiter, InvisibleOrigin, MetaVarKind, TokenKind}; use rustc_ast::util::classify::{self, TrailingBrace}; use rustc_ast::{ AttrStyle, AttrVec, Block, BlockCheckMode, DUMMY_NODE_ID, Expr, ExprKind, HasAttrs, Local, @@ -33,8 +33,8 @@ impl<'a> Parser<'a> { /// If `force_collect` is [`ForceCollect::Yes`], forces collection of tokens regardless of /// whether or not we have attributes. // Public for rustfmt usage. - pub(super) fn parse_stmt(&mut self, force_collect: ForceCollect) -> PResult<'a, Option<Stmt>> { - Ok(self.parse_stmt_without_recovery(false, force_collect).unwrap_or_else(|e| { + pub fn parse_stmt(&mut self, force_collect: ForceCollect) -> PResult<'a, Option<Stmt>> { + Ok(self.parse_stmt_without_recovery(false, force_collect, false).unwrap_or_else(|e| { e.emit(); self.recover_stmt_(SemiColonMode::Break, BlockMode::Ignore); None @@ -42,23 +42,27 @@ impl<'a> Parser<'a> { } /// If `force_collect` is [`ForceCollect::Yes`], forces collection of tokens regardless of - /// whether or not we have attributes. - // Public for `cfg_eval` macro expansion. + /// whether or not we have attributes. If `force_full_expr` is true, parses the stmt without + /// using `Restriction::STMT_EXPR`. Public for `cfg_eval` macro expansion. pub fn parse_stmt_without_recovery( &mut self, capture_semi: bool, force_collect: ForceCollect, + force_full_expr: bool, ) -> PResult<'a, Option<Stmt>> { let pre_attr_pos = self.collect_pos(); let attrs = self.parse_outer_attributes()?; let lo = self.token.span; - maybe_whole!(self, NtStmt, |stmt| { + if let Some(stmt) = self.eat_metavar_seq(MetaVarKind::Stmt, |this| { + this.parse_stmt_without_recovery(false, ForceCollect::Yes, false) + }) { + let mut stmt = stmt.expect("an actual statement"); stmt.visit_attrs(|stmt_attrs| { attrs.prepend_to_nt_inner(stmt_attrs); }); - Some(stmt.into_inner()) - }); + return Ok(Some(stmt)); + } if self.token.is_keyword(kw::Mut) && self.is_keyword_ahead(1, &[kw::Let]) { self.bump(); @@ -147,12 +151,14 @@ impl<'a> Parser<'a> { } else if self.token != token::CloseDelim(Delimiter::Brace) { // Remainder are line-expr stmts. This is similar to the `parse_stmt_path_start` case // above. + let restrictions = + if force_full_expr { Restrictions::empty() } else { Restrictions::STMT_EXPR }; let e = self.collect_tokens( Some(pre_attr_pos), AttrWrapper::empty(), force_collect, |this, _empty_attrs| { - let (expr, _) = this.parse_expr_res(Restrictions::STMT_EXPR, attrs)?; + let (expr, _) = this.parse_expr_res(restrictions, attrs)?; Ok((expr, Trailing::No, UsePreAttrPos::Yes)) }, )?; @@ -176,7 +182,7 @@ impl<'a> Parser<'a> { let stmt = self.collect_tokens(None, attrs, ForceCollect::No, |this, attrs| { let path = this.parse_path(PathStyle::Expr)?; - if this.eat(exp!(Not)) { + if this.eat(exp!(Bang)) { let stmt_mac = this.parse_stmt_mac(lo, attrs, path)?; return Ok(( stmt_mac, @@ -229,11 +235,15 @@ impl<'a> Parser<'a> { let mac = P(MacCall { path, args }); let kind = if (style == MacStmtStyle::Braces - && self.token != token::Dot - && self.token != token::Question) - || self.token == token::Semi - || self.token == token::Eof - { + && !matches!(self.token.kind, token::Dot | token::Question)) + || matches!( + self.token.kind, + token::Semi + | token::Eof + | token::CloseDelim(Delimiter::Invisible(InvisibleOrigin::MetaVar( + MetaVarKind::Stmt + ))) + ) { StmtKind::MacCall(P(MacCallStmt { mac, style, attrs, tokens: None })) } else { // Since none of the above applied, this is an expression statement macro. @@ -442,7 +452,16 @@ impl<'a> Parser<'a> { /// Parses the RHS of a local variable declaration (e.g., `= 14;`). fn parse_initializer(&mut self, eq_optional: bool) -> PResult<'a, Option<P<Expr>>> { let eq_consumed = match self.token.kind { - token::BinOpEq(..) => { + token::PlusEq + | token::MinusEq + | token::StarEq + | token::SlashEq + | token::PercentEq + | token::CaretEq + | token::AndEq + | token::OrEq + | token::ShlEq + | token::ShrEq => { // Recover `let x <op>= 1` as `let x = 1` We must not use `+ BytePos(1)` here // because `<op>` can be a multi-byte lookalike that was recovered, e.g. `âž–=` (the // `âž–` is a U+2796 Heavy Minus Sign Unicode Character) that was recovered as a @@ -492,7 +511,7 @@ impl<'a> Parser<'a> { // bar; // // which is valid in other languages, but not Rust. - match self.parse_stmt_without_recovery(false, ForceCollect::No) { + match self.parse_stmt_without_recovery(false, ForceCollect::No, false) { // If the next token is an open brace, e.g., we have: // // if expr other_expr { @@ -688,7 +707,7 @@ impl<'a> Parser<'a> { if self.token == token::Eof { break; } - if self.is_vcs_conflict_marker(&TokenKind::BinOp(token::Shl), &TokenKind::Lt) { + if self.is_vcs_conflict_marker(&TokenKind::Shl, &TokenKind::Lt) { // Account for `<<<<<<<` diff markers. We can't proactively error here because // that can be a valid path start, so we snapshot and reparse only we've // encountered another parse error. @@ -801,10 +820,24 @@ impl<'a> Parser<'a> { &mut self, recover: AttemptLocalParseRecovery, ) -> PResult<'a, Option<Stmt>> { - // Skip looking for a trailing semicolon when we have an interpolated statement. - maybe_whole!(self, NtStmt, |stmt| Some(stmt.into_inner())); + // Skip looking for a trailing semicolon when we have a metavar seq. + if let Some(stmt) = self.eat_metavar_seq(MetaVarKind::Stmt, |this| { + // Why pass `true` for `force_full_expr`? Statement expressions are less expressive + // than "full" expressions, due to the `STMT_EXPR` restriction, and sometimes need + // parentheses. E.g. the "full" expression `match paren_around_match {} | true` when + // used in statement context must be written `(match paren_around_match {} | true)`. + // However, if the expression we are parsing in this statement context was pasted by a + // declarative macro, it may have come from a "full" expression context, and lack + // these parentheses. So we lift the `STMT_EXPR` restriction to ensure the statement + // will reparse successfully. + this.parse_stmt_without_recovery(false, ForceCollect::No, true) + }) { + let stmt = stmt.expect("an actual statement"); + return Ok(Some(stmt)); + } - let Some(mut stmt) = self.parse_stmt_without_recovery(true, ForceCollect::No)? else { + let Some(mut stmt) = self.parse_stmt_without_recovery(true, ForceCollect::No, false)? + else { return Ok(None); }; diff --git a/compiler/rustc_parse/src/parser/tests.rs b/compiler/rustc_parse/src/parser/tests.rs index 8b8c81a77a0..471966d086d 100644 --- a/compiler/rustc_parse/src/parser/tests.rs +++ b/compiler/rustc_parse/src/parser/tests.rs @@ -2291,7 +2291,7 @@ fn string_to_tts_macro() { Token { kind: token::Ident(name_macro_rules, IdentIsRaw::No), .. }, _, ), - TokenTree::Token(Token { kind: token::Not, .. }, _), + TokenTree::Token(Token { kind: token::Bang, .. }, _), TokenTree::Token(Token { kind: token::Ident(name_zip, IdentIsRaw::No), .. }, _), TokenTree::Delimited(.., macro_delim, macro_tts), ] if name_macro_rules == &kw::MacroRules && name_zip.as_str() == "zip" => { diff --git a/compiler/rustc_parse/src/parser/token_type.rs b/compiler/rustc_parse/src/parser/token_type.rs index 40631d9154d..886438fd583 100644 --- a/compiler/rustc_parse/src/parser/token_type.rs +++ b/compiler/rustc_parse/src/parser/token_type.rs @@ -25,7 +25,7 @@ pub enum TokenType { Gt, AndAnd, OrOr, - Not, + Bang, Tilde, // BinOps @@ -172,7 +172,7 @@ impl TokenType { Gt, AndAnd, OrOr, - Not, + Bang, Tilde, Plus, @@ -366,7 +366,7 @@ impl TokenType { TokenType::Gt => "`>`", TokenType::AndAnd => "`&&`", TokenType::OrOr => "`||`", - TokenType::Not => "`!`", + TokenType::Bang => "`!`", TokenType::Tilde => "`~`", TokenType::Plus => "`+`", @@ -445,12 +445,6 @@ macro_rules! exp { token_type: $crate::parser::token_type::TokenType::$tok } }; - (@binop, $op:ident) => { - $crate::parser::token_type::ExpTokenPair { - tok: &rustc_ast::token::BinOp(rustc_ast::token::BinOpToken::$op), - token_type: $crate::parser::token_type::TokenType::$op, - } - }; (@open, $delim:ident, $token_type:ident) => { $crate::parser::token_type::ExpTokenPair { tok: &rustc_ast::token::OpenDelim(rustc_ast::token::Delimiter::$delim), @@ -485,8 +479,13 @@ macro_rules! exp { (Gt) => { exp!(@tok, Gt) }; (AndAnd) => { exp!(@tok, AndAnd) }; (OrOr) => { exp!(@tok, OrOr) }; - (Not) => { exp!(@tok, Not) }; + (Bang) => { exp!(@tok, Bang) }; (Tilde) => { exp!(@tok, Tilde) }; + (Plus) => { exp!(@tok, Plus) }; + (Minus) => { exp!(@tok, Minus) }; + (Star) => { exp!(@tok, Star) }; + (And) => { exp!(@tok, And) }; + (Or) => { exp!(@tok, Or) }; (At) => { exp!(@tok, At) }; (Dot) => { exp!(@tok, Dot) }; (DotDot) => { exp!(@tok, DotDot) }; @@ -502,12 +501,6 @@ macro_rules! exp { (Question) => { exp!(@tok, Question) }; (Eof) => { exp!(@tok, Eof) }; - (Plus) => { exp!(@binop, Plus) }; - (Minus) => { exp!(@binop, Minus) }; - (Star) => { exp!(@binop, Star) }; - (And) => { exp!(@binop, And) }; - (Or) => { exp!(@binop, Or) }; - (OpenParen) => { exp!(@open, Parenthesis, OpenParen) }; (OpenBrace) => { exp!(@open, Brace, OpenBrace) }; (OpenBracket) => { exp!(@open, Bracket, OpenBracket) }; @@ -626,7 +619,7 @@ impl Iterator for TokenTypeSetIter { type Item = TokenType; fn next(&mut self) -> Option<TokenType> { - let num_bits: u32 = (std::mem::size_of_val(&self.0.0) * 8) as u32; + let num_bits: u32 = (size_of_val(&self.0.0) * 8) as u32; assert_eq!(num_bits, 128); let z = self.0.0.trailing_zeros(); if z == num_bits { diff --git a/compiler/rustc_parse/src/parser/ty.rs b/compiler/rustc_parse/src/parser/ty.rs index 18af0a1c79a..b45ebae079c 100644 --- a/compiler/rustc_parse/src/parser/ty.rs +++ b/compiler/rustc_parse/src/parser/ty.rs @@ -1,5 +1,5 @@ use rustc_ast::ptr::P; -use rustc_ast::token::{self, BinOpToken, Delimiter, IdentIsRaw, MetaVarKind, Token, TokenKind}; +use rustc_ast::token::{self, Delimiter, IdentIsRaw, MetaVarKind, Token, TokenKind}; use rustc_ast::util::case::Case; use rustc_ast::{ self as ast, BareFnTy, BoundAsyncness, BoundConstness, BoundPolarity, DUMMY_NODE_ID, FnRetTy, @@ -86,7 +86,7 @@ enum AllowCVariadic { /// Types can also be of the form `IDENT(u8, u8) -> u8`, however this assumes /// that `IDENT` is not the ident of a fn trait. fn can_continue_type_after_non_fn_ident(t: &Token) -> bool { - t == &token::PathSep || t == &token::Lt || t == &token::BinOp(token::Shl) + t == &token::PathSep || t == &token::Lt || t == &token::Shl } fn can_begin_dyn_bound_in_edition_2015(t: &Token) -> bool { @@ -260,7 +260,7 @@ impl<'a> Parser<'a> { let mut impl_dyn_multi = false; let kind = if self.check(exp!(OpenParen)) { self.parse_ty_tuple_or_parens(lo, allow_plus)? - } else if self.eat(exp!(Not)) { + } else if self.eat(exp!(Bang)) { // Never type `!` TyKind::Never } else if self.eat(exp!(Star)) { @@ -399,7 +399,7 @@ impl<'a> Parser<'a> { let mut trailing_plus = false; let (ts, trailing) = self.parse_paren_comma_seq(|p| { let ty = p.parse_ty()?; - trailing_plus = p.prev_token == TokenKind::BinOp(token::Plus); + trailing_plus = p.prev_token == TokenKind::Plus; Ok(ty) })?; @@ -735,7 +735,7 @@ impl<'a> Parser<'a> { // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; - *impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::BinOp(token::Plus); + *impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::Plus; Ok(TyKind::ImplTrait(ast::DUMMY_NODE_ID, bounds)) } @@ -747,11 +747,7 @@ impl<'a> Parser<'a> { self.expect_lt()?; let (args, _, _) = self.parse_seq_to_before_tokens( &[exp!(Gt)], - &[ - &TokenKind::Ge, - &TokenKind::BinOp(BinOpToken::Shr), - &TokenKind::BinOpEq(BinOpToken::Shr), - ], + &[&TokenKind::Ge, &TokenKind::Shr, &TokenKind::Shr], SeqSep::trailing_allowed(exp!(Comma)), |self_| { if self_.check_keyword(exp!(SelfUpper)) { @@ -781,7 +777,7 @@ impl<'a> Parser<'a> { self.check_keyword(exp!(Dyn)) && (self.token.uninterpolated_span().at_least_rust_2018() || self.look_ahead(1, |t| { - (can_begin_dyn_bound_in_edition_2015(t) || *t == TokenKind::BinOp(token::Star)) + (can_begin_dyn_bound_in_edition_2015(t) || *t == TokenKind::Star) && !can_continue_type_after_non_fn_ident(t) })) } @@ -803,7 +799,7 @@ impl<'a> Parser<'a> { // Always parse bounds greedily for better error recovery. let bounds = self.parse_generic_bounds()?; - *impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::BinOp(token::Plus); + *impl_dyn_multi = bounds.len() > 1 || self.prev_token == TokenKind::Plus; Ok(TyKind::TraitObject(bounds, syntax)) } @@ -821,7 +817,7 @@ impl<'a> Parser<'a> { ) -> PResult<'a, TyKind> { // Simple path let path = self.parse_path_inner(PathStyle::Type, ty_generics)?; - if self.eat(exp!(Not)) { + if self.eat(exp!(Bang)) { // Macro invocation in type position Ok(TyKind::MacCall(P(MacCall { path, args: self.parse_delim_args()? }))) } else if allow_plus == AllowPlus::Yes && self.check_plus() { @@ -874,7 +870,7 @@ impl<'a> Parser<'a> { fn can_begin_bound(&mut self) -> bool { self.check_path() || self.check_lifetime() - || self.check(exp!(Not)) + || self.check(exp!(Bang)) || self.check(exp!(Question)) || self.check(exp!(Tilde)) || self.check_keyword(exp!(For)) @@ -1025,7 +1021,7 @@ impl<'a> Parser<'a> { let polarity = if self.eat(exp!(Question)) { BoundPolarity::Maybe(self.prev_token.span) - } else if self.eat(exp!(Not)) { + } else if self.eat(exp!(Bang)) { self.psess.gated_spans.gate(sym::negative_bounds, self.prev_token.span); BoundPolarity::Negative(self.prev_token.span) } else { diff --git a/compiler/rustc_parse_format/src/lib.rs b/compiler/rustc_parse_format/src/lib.rs index 3b985621b57..5b8a2fe52d3 100644 --- a/compiler/rustc_parse_format/src/lib.rs +++ b/compiler/rustc_parse_format/src/lib.rs @@ -13,7 +13,6 @@ html_playground_url = "https://play.rust-lang.org/", test(attr(deny(warnings))) )] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub use Alignment::*; @@ -190,7 +189,7 @@ pub enum DebugHex { #[derive(Copy, Clone, Debug, PartialEq)] pub enum Count<'a> { /// The count is specified explicitly. - CountIs(usize), + CountIs(u16), /// The count is specified by the argument with the given name. CountIsName(&'a str, InnerSpan), /// The count is specified by the argument at the given index. @@ -565,7 +564,7 @@ impl<'a> Parser<'a> { /// consuming a macro argument, `None` if it's the case. fn position(&mut self) -> Option<Position<'a>> { if let Some(i) = self.integer() { - Some(ArgumentIs(i)) + Some(ArgumentIs(i.into())) } else { match self.cur.peek() { Some(&(lo, c)) if rustc_lexer::is_id_start(c) => { @@ -771,7 +770,7 @@ impl<'a> Parser<'a> { /// width. fn count(&mut self, start: usize) -> Count<'a> { if let Some(i) = self.integer() { - if self.consume('$') { CountIsParam(i) } else { CountIs(i) } + if self.consume('$') { CountIsParam(i.into()) } else { CountIs(i) } } else { let tmp = self.cur.clone(); let word = self.word(); @@ -822,15 +821,15 @@ impl<'a> Parser<'a> { word } - fn integer(&mut self) -> Option<usize> { - let mut cur: usize = 0; + fn integer(&mut self) -> Option<u16> { + let mut cur: u16 = 0; let mut found = false; let mut overflow = false; let start = self.current_pos(); while let Some(&(_, c)) = self.cur.peek() { if let Some(i) = c.to_digit(10) { let (tmp, mul_overflow) = cur.overflowing_mul(10); - let (tmp, add_overflow) = tmp.overflowing_add(i as usize); + let (tmp, add_overflow) = tmp.overflowing_add(i as u16); if mul_overflow || add_overflow { overflow = true; } @@ -847,11 +846,11 @@ impl<'a> Parser<'a> { let overflowed_int = &self.input[start..end]; self.err( format!( - "integer `{}` does not fit into the type `usize` whose range is `0..={}`", + "integer `{}` does not fit into the type `u16` whose range is `0..={}`", overflowed_int, - usize::MAX + u16::MAX ), - "integer out of range for `usize`", + "integer out of range for `u16`", self.span(start, end), ); } diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl index bc43580a7f0..ed498d9d344 100644 --- a/compiler/rustc_passes/messages.ftl +++ b/compiler/rustc_passes/messages.ftl @@ -725,6 +725,12 @@ passes_target_feature_on_statement = .warn = {-passes_previously_accepted} .label = {passes_should_be_applied_to_fn.label} +passes_trait_impl_const_stability_mismatch = const stability on the impl does not match the const stability on the trait +passes_trait_impl_const_stability_mismatch_impl_stable = this impl is (implicitly) stable... +passes_trait_impl_const_stability_mismatch_impl_unstable = this impl is unstable... +passes_trait_impl_const_stability_mismatch_trait_stable = ...but the trait is stable +passes_trait_impl_const_stability_mismatch_trait_unstable = ...but the trait is unstable + passes_trait_impl_const_stable = trait implementations cannot be const stable yet .note = see issue #67792 <https://github.com/rust-lang/rust/issues/67792> for more information @@ -773,6 +779,10 @@ passes_unstable_attr_for_already_stable_feature = .item = the stability attribute annotates this item .help = consider removing the attribute +passes_unsupported_attributes_in_where = + most attributes are not supported in `where` clauses + .help = only `#[cfg]` and `#[cfg_attr]` are supported + passes_unused = unused attribute .suggestion = remove this attribute diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 9be86812287..ece5a53aaa9 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -14,6 +14,7 @@ use rustc_attr_parsing::{AttributeKind, ReprAttr, find_attr}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{Applicability, DiagCtxtHandle, IntoDiagArg, MultiSpan, StashKey}; use rustc_feature::{AttributeDuplicates, AttributeType, BUILTIN_ATTRIBUTE_MAP, BuiltinAttribute}; +use rustc_hir::def::DefKind; use rustc_hir::def_id::LocalModDefId; use rustc_hir::intravisit::{self, Visitor}; use rustc_hir::{ @@ -51,7 +52,7 @@ fn target_from_impl_item<'tcx>(tcx: TyCtxt<'tcx>, impl_item: &hir::ImplItem<'_>) hir::ImplItemKind::Const(..) => Target::AssocConst, hir::ImplItemKind::Fn(..) => { let parent_def_id = tcx.hir_get_parent_item(impl_item.hir_id()).def_id; - let containing_item = tcx.hir().expect_item(parent_def_id); + let containing_item = tcx.hir_expect_item(parent_def_id); let containing_impl_is_for_trait = match &containing_item.kind { hir::ItemKind::Impl(impl_) => impl_.of_trait.is_some(), _ => bug!("parent of an ImplItem must be an Impl"), @@ -80,13 +81,13 @@ pub(crate) enum ProcMacroKind { } impl IntoDiagArg for ProcMacroKind { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { match self { ProcMacroKind::Attribute => "attribute proc macro", ProcMacroKind::Derive => "derive proc macro", ProcMacroKind::FunctionLike => "function-like proc macro", } - .into_diag_arg() + .into_diag_arg(&mut None) } } @@ -113,7 +114,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { let mut doc_aliases = FxHashMap::default(); let mut specified_inline = None; let mut seen = FxHashMap::default(); - let attrs = self.tcx.hir().attrs(hir_id); + let attrs = self.tcx.hir_attrs(hir_id); for attr in attrs { match attr { Attribute::Parsed(AttributeKind::Confusables { first_span, .. }) => { @@ -257,6 +258,9 @@ impl<'tcx> CheckAttrVisitor<'tcx> { [sym::coroutine, ..] => { self.check_coroutine(attr, target); } + [sym::type_const, ..] => { + self.check_type_const(hir_id,attr, target); + } [sym::linkage, ..] => self.check_linkage(attr, span, target), [sym::rustc_pub_transparent, ..] => self.check_rustc_pub_transparent(attr.span(), span, attrs), [ @@ -895,7 +899,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { if let Some(location) = match target { Target::AssocTy => { let parent_def_id = self.tcx.hir_get_parent_item(hir_id).def_id; - let containing_item = self.tcx.hir().expect_item(parent_def_id); + let containing_item = self.tcx.hir_expect_item(parent_def_id); if Target::from_item(containing_item) == Target::Impl { Some("type alias in implementation block") } else { @@ -904,7 +908,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { } Target::AssocConst => { let parent_def_id = self.tcx.hir_get_parent_item(hir_id).def_id; - let containing_item = self.tcx.hir().expect_item(parent_def_id); + let containing_item = self.tcx.hir_expect_item(parent_def_id); // We can't link to trait impl's consts. let err = "associated constant in trait implementation block"; match containing_item.kind { @@ -919,7 +923,8 @@ impl<'tcx> CheckAttrVisitor<'tcx> { | Target::Arm | Target::ForeignMod | Target::Closure - | Target::Impl => Some(target.name()), + | Target::Impl + | Target::WherePredicate => Some(target.name()), Target::ExternCrate | Target::Use | Target::Static @@ -947,7 +952,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { tcx.dcx().emit_err(errors::DocAliasBadLocation { span, attr_str, location }); return; } - let item_name = self.tcx.hir().name(hir_id); + let item_name = self.tcx.hir_name(hir_id); if item_name == doc_alias { tcx.dcx().emit_err(errors::DocAliasNotAnAlias { span, attr_str }); return; @@ -1476,7 +1481,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { // `#[must_use]` can be applied to a trait method definition with a default body if let Target::Method(MethodKind::Trait { body: true }) = target && let parent_def_id = self.tcx.hir_get_parent_item(hir_id).def_id - && let containing_item = self.tcx.hir().expect_item(parent_def_id) + && let containing_item = self.tcx.hir_expect_item(parent_def_id) && let hir::ItemKind::Trait(..) = containing_item.kind { return; @@ -1993,7 +1998,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { // catch `repr()` with no arguments, applied to an item (i.e. not `#![repr()]`) if item.is_some() { match target { - Target::Struct | Target::Union | Target::Enum => {} + Target::Struct | Target::Union | Target::Enum => continue, Target::Fn | Target::Method(_) => { feature_err( &self.tcx.sess, @@ -2518,6 +2523,23 @@ impl<'tcx> CheckAttrVisitor<'tcx> { } } + fn check_type_const(&self, hir_id: HirId, attr: &Attribute, target: Target) { + let tcx = self.tcx; + if target == Target::AssocConst + && let parent = tcx.parent(hir_id.expect_owner().to_def_id()) + && self.tcx.def_kind(parent) == DefKind::Trait + { + return; + } else { + self.dcx() + .struct_span_err( + attr.span(), + "`#[type_const]` must only be applied to trait associated constants", + ) + .emit(); + } + } + fn check_linkage(&self, attr: &Attribute, span: Span, target: Target) { match target { Target::Fn @@ -2550,7 +2572,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { match (target, force_inline_attr) { (Target::Closure, None) => { let is_coro = matches!( - self.tcx.hir().expect_expr(hir_id).kind, + self.tcx.hir_expect_expr(hir_id).kind, hir::ExprKind::Closure(hir::Closure { kind: hir::ClosureKind::Coroutine(..) | hir::ClosureKind::CoroutineClosure(..), @@ -2614,6 +2636,31 @@ impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> { intravisit::walk_item(self, item) } + fn visit_where_predicate(&mut self, where_predicate: &'tcx hir::WherePredicate<'tcx>) { + // FIXME(where_clause_attrs): Currently, as the following check shows, + // only `#[cfg]` and `#[cfg_attr]` are allowed, but it should be removed + // if we allow more attributes (e.g., tool attributes and `allow/deny/warn`) + // in where clauses. After that, only `self.check_attributes` should be enough. + const ATTRS_ALLOWED: &[Symbol] = &[sym::cfg, sym::cfg_attr]; + let spans = self + .tcx + .hir_attrs(where_predicate.hir_id) + .iter() + .filter(|attr| !ATTRS_ALLOWED.iter().any(|&sym| attr.has_name(sym))) + .map(|attr| attr.span()) + .collect::<Vec<_>>(); + if !spans.is_empty() { + self.tcx.dcx().emit_err(errors::UnsupportedAttributesInWhere { span: spans.into() }); + } + self.check_attributes( + where_predicate.hir_id, + where_predicate.span, + Target::WherePredicate, + None, + ); + intravisit::walk_where_predicate(self, where_predicate) + } + fn visit_generic_param(&mut self, generic_param: &'tcx hir::GenericParam<'tcx>) { let target = Target::from_generic_param(generic_param); self.check_attributes(generic_param.hir_id, generic_param.span, target, None); @@ -2766,7 +2813,7 @@ fn check_invalid_crate_level_attr(tcx: TyCtxt<'_>, attrs: &[Attribute]) { } fn check_non_exported_macro_for_invalid_attrs(tcx: TyCtxt<'_>, item: &Item<'_>) { - let attrs = tcx.hir().attrs(item.hir_id()); + let attrs = tcx.hir_attrs(item.hir_id()); for attr in attrs { if attr.has_name(sym::inline) { diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs index 7a14a7a5db2..7029c60c343 100644 --- a/compiler/rustc_passes/src/dead.rs +++ b/compiler/rustc_passes/src/dead.rs @@ -361,7 +361,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { self.tcx.hir_fn_sig_by_hir_id(self.tcx.local_def_id_to_hir_id(local_def_id)) && matches!(fn_sig.decl.implicit_self, hir::ImplicitSelfKind::None) && let TyKind::Path(hir::QPath::Resolved(_, path)) = - self.tcx.hir().expect_item(local_impl_of).expect_impl().self_ty.kind + self.tcx.hir_expect_item(local_impl_of).expect_impl().self_ty.kind && let Res::Def(def_kind, did) = path.res { match def_kind { @@ -424,7 +424,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { for impl_def_id in self.tcx.all_impls(item.owner_id.to_def_id()) { if let Some(local_def_id) = impl_def_id.as_local() && let ItemKind::Impl(impl_ref) = - self.tcx.hir().expect_item(local_def_id).kind + self.tcx.hir_expect_item(local_def_id).kind { // skip items // mark dependent traits live @@ -448,7 +448,7 @@ impl<'tcx> MarkSymbolVisitor<'tcx> { for impl_id in self.tcx.all_impls(trait_id) { if let Some(local_impl_id) = impl_id.as_local() && let ItemKind::Impl(impl_ref) = - self.tcx.hir().expect_item(local_impl_id).kind + self.tcx.hir_expect_item(local_impl_id).kind { if !matches!(trait_item.kind, hir::TraitItemKind::Type(..)) && !ty_ref_to_pub_struct(self.tcx, impl_ref.self_ty) diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs index 323b414cca0..e13d94c1031 100644 --- a/compiler/rustc_passes/src/diagnostic_items.rs +++ b/compiler/rustc_passes/src/diagnostic_items.rs @@ -19,7 +19,7 @@ use rustc_span::{Symbol, sym}; use crate::errors::DuplicateDiagnosticItemInCrate; fn observe_item<'tcx>(tcx: TyCtxt<'tcx>, diagnostic_items: &mut DiagnosticItems, owner: OwnerId) { - let attrs = tcx.hir().attrs(owner.into()); + let attrs = tcx.hir_attrs(owner.into()); if let Some(name) = extract(attrs) { // insert into our table collect_item(tcx, diagnostic_items, name, owner.to_def_id()); diff --git a/compiler/rustc_passes/src/entry.rs b/compiler/rustc_passes/src/entry.rs index 25e679a8460..d2729876ebb 100644 --- a/compiler/rustc_passes/src/entry.rs +++ b/compiler/rustc_passes/src/entry.rs @@ -31,7 +31,7 @@ fn entry_fn(tcx: TyCtxt<'_>, (): ()) -> Option<(DefId, EntryFnType)> { } // If the user wants no main function at all, then stop here. - if attr::contains_name(tcx.hir().attrs(CRATE_HIR_ID), sym::no_main) { + if attr::contains_name(tcx.hir_attrs(CRATE_HIR_ID), sym::no_main) { return None; } @@ -45,7 +45,7 @@ fn entry_fn(tcx: TyCtxt<'_>, (): ()) -> Option<(DefId, EntryFnType)> { } fn attr_span_by_symbol(ctxt: &EntryContext<'_>, id: ItemId, sym: Symbol) -> Option<Span> { - let attrs = ctxt.tcx.hir().attrs(id.hir_id()); + let attrs = ctxt.tcx.hir_attrs(id.hir_id()); attr::find_by_name(attrs, sym).map(|attr| attr.span()) } @@ -61,7 +61,7 @@ fn check_and_search_item(id: ItemId, ctxt: &mut EntryContext<'_>) { let at_root = ctxt.tcx.opt_local_parent(id.owner_id.def_id) == Some(CRATE_DEF_ID); - let attrs = ctxt.tcx.hir().attrs(id.hir_id()); + let attrs = ctxt.tcx.hir_attrs(id.hir_id()); let entry_point_type = rustc_ast::entry::entry_point_type( attrs, at_root, diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs index 5f686f38bab..b8359c27e53 100644 --- a/compiler/rustc_passes/src/errors.rs +++ b/compiler/rustc_passes/src/errors.rs @@ -1005,10 +1005,10 @@ pub(crate) struct LayoutHomogeneousAggregate { #[derive(Diagnostic)] #[diag(passes_layout_of)] -pub(crate) struct LayoutOf { +pub(crate) struct LayoutOf<'tcx> { #[primary_span] pub span: Span, - pub normalized_ty: String, + pub normalized_ty: Ty<'tcx>, pub ty_layout: String, } @@ -1553,6 +1553,45 @@ pub(crate) struct TraitImplConstStable { } #[derive(Diagnostic)] +#[diag(passes_trait_impl_const_stability_mismatch)] +pub(crate) struct TraitImplConstStabilityMismatch { + #[primary_span] + pub span: Span, + #[subdiagnostic] + pub impl_stability: ImplConstStability, + #[subdiagnostic] + pub trait_stability: TraitConstStability, +} + +#[derive(Subdiagnostic)] +pub(crate) enum TraitConstStability { + #[note(passes_trait_impl_const_stability_mismatch_trait_stable)] + Stable { + #[primary_span] + span: Span, + }, + #[note(passes_trait_impl_const_stability_mismatch_trait_unstable)] + Unstable { + #[primary_span] + span: Span, + }, +} + +#[derive(Subdiagnostic)] +pub(crate) enum ImplConstStability { + #[note(passes_trait_impl_const_stability_mismatch_impl_stable)] + Stable { + #[primary_span] + span: Span, + }, + #[note(passes_trait_impl_const_stability_mismatch_impl_unstable)] + Unstable { + #[primary_span] + span: Span, + }, +} + +#[derive(Diagnostic)] #[diag(passes_unknown_feature, code = E0635)] pub(crate) struct UnknownFeature { #[primary_span] @@ -1870,3 +1909,11 @@ pub(crate) struct RustcConstStableIndirectPairing { #[primary_span] pub span: Span, } + +#[derive(Diagnostic)] +#[diag(passes_unsupported_attributes_in_where)] +#[help] +pub(crate) struct UnsupportedAttributesInWhere { + #[primary_span] + pub span: MultiSpan, +} diff --git a/compiler/rustc_passes/src/hir_id_validator.rs b/compiler/rustc_passes/src/hir_id_validator.rs index 509c2f54775..84b92d49f24 100644 --- a/compiler/rustc_passes/src/hir_id_validator.rs +++ b/compiler/rustc_passes/src/hir_id_validator.rs @@ -60,19 +60,18 @@ impl<'a, 'hir> HirIdValidator<'a, 'hir> { .expect("owning item has no entry"); if max != self.hir_ids_seen.len() - 1 { - let hir = self.tcx.hir(); let pretty_owner = self.tcx.hir_def_path(owner.def_id).to_string_no_crate_verbose(); let missing_items: Vec<_> = (0..=max as u32) .map(|i| ItemLocalId::from_u32(i)) .filter(|&local_id| !self.hir_ids_seen.contains(local_id)) - .map(|local_id| hir.node_to_string(HirId { owner, local_id })) + .map(|local_id| self.tcx.hir_id_to_string(HirId { owner, local_id })) .collect(); let seen_items: Vec<_> = self .hir_ids_seen .iter() - .map(|local_id| hir.node_to_string(HirId { owner, local_id })) + .map(|local_id| self.tcx.hir_id_to_string(HirId { owner, local_id })) .collect(); self.error(|| { @@ -137,7 +136,7 @@ impl<'a, 'hir> intravisit::Visitor<'hir> for HirIdValidator<'a, 'hir> { self.error(|| { format!( "HirIdValidator: The recorded owner of {} is {} instead of {}", - self.tcx.hir().node_to_string(hir_id), + self.tcx.hir_id_to_string(hir_id), self.tcx.hir_def_path(hir_id.owner.def_id).to_string_no_crate_verbose(), self.tcx.hir_def_path(owner.def_id).to_string_no_crate_verbose() ) diff --git a/compiler/rustc_passes/src/input_stats.rs b/compiler/rustc_passes/src/input_stats.rs index 92ea49f18e5..1278e98afcf 100644 --- a/compiler/rustc_passes/src/input_stats.rs +++ b/compiler/rustc_passes/src/input_stats.rs @@ -107,12 +107,12 @@ impl<'k> StatCollector<'k> { let node = self.nodes.entry(label1).or_insert(Node::new()); node.stats.count += 1; - node.stats.size = std::mem::size_of_val(val); + node.stats.size = size_of_val(val); if let Some(label2) = label2 { let subnode = node.subnodes.entry(label2).or_insert(NodeStats::new()); subnode.count += 1; - subnode.size = std::mem::size_of_val(val); + subnode.size = size_of_val(val); } } @@ -255,9 +255,9 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { hir_visit::walk_body(self, b); } - fn visit_mod(&mut self, m: &'v hir::Mod<'v>, _s: Span, n: HirId) { + fn visit_mod(&mut self, m: &'v hir::Mod<'v>, _s: Span, _n: HirId) { self.record("Mod", None, m); - hir_visit::walk_mod(self, m, n) + hir_visit::walk_mod(self, m) } fn visit_foreign_item(&mut self, i: &'v hir::ForeignItem<'v>) { @@ -328,6 +328,7 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { Array, Call, MethodCall, + Use, Tup, Binary, Unary, @@ -626,7 +627,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> { (self, e, e.kind, None, ast, Expr, ExprKind), [ Array, ConstBlock, Call, MethodCall, Tup, Binary, Unary, Lit, Cast, Type, Let, - If, While, ForLoop, Loop, Match, Closure, Block, Await, TryBlock, Assign, + If, While, ForLoop, Loop, Match, Closure, Block, Await, Use, TryBlock, Assign, AssignOp, Field, Index, Range, Underscore, Path, AddrOf, Break, Continue, Ret, InlineAsm, FormatArgs, OffsetOf, MacCall, Struct, Repeat, Paren, Try, Yield, Yeet, Become, IncludedBytes, Gen, UnsafeBinderCast, Err, Dummy diff --git a/compiler/rustc_passes/src/layout_test.rs b/compiler/rustc_passes/src/layout_test.rs index 1133cf93304..d4512c9417e 100644 --- a/compiler/rustc_passes/src/layout_test.rs +++ b/compiler/rustc_passes/src/layout_test.rs @@ -112,8 +112,7 @@ fn dump_layout_of(tcx: TyCtxt<'_>, item_def_id: LocalDefId, attr: &Attribute) { } sym::debug => { - let normalized_ty = - format!("{}", tcx.normalize_erasing_regions(typing_env, ty)); + let normalized_ty = tcx.normalize_erasing_regions(typing_env, ty); // FIXME: using the `Debug` impl here isn't ideal. let ty_layout = format!("{:#?}", *ty_layout); tcx.dcx().emit_err(LayoutOf { span, normalized_ty, ty_layout }); diff --git a/compiler/rustc_passes/src/lib.rs b/compiler/rustc_passes/src/lib.rs index 1aa077ad2bb..6f6115af96c 100644 --- a/compiler/rustc_passes/src/lib.rs +++ b/compiler/rustc_passes/src/lib.rs @@ -6,13 +6,13 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(let_chains)] #![feature(map_try_insert)] #![feature(rustdoc_internals)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_middle::query::Providers; diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs index 24dc018c661..ed70d9ee91f 100644 --- a/compiler/rustc_passes/src/liveness.rs +++ b/compiler/rustc_passes/src/liveness.rs @@ -157,7 +157,7 @@ fn check_liveness(tcx: TyCtxt<'_>, def_id: LocalDefId) { if let Some(upvars) = tcx.upvars_mentioned(def_id) { for &var_hir_id in upvars.keys() { - let var_name = tcx.hir().name(var_hir_id); + let var_name = tcx.hir_name(var_hir_id); maps.add_variable(Upvar(var_hir_id, var_name)); } } @@ -426,6 +426,7 @@ impl<'tcx> Visitor<'tcx> for IrMaps<'tcx> { | hir::ExprKind::Array(..) | hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) + | hir::ExprKind::Use(..) | hir::ExprKind::Tup(..) | hir::ExprKind::Binary(..) | hir::ExprKind::AddrOf(..) @@ -705,7 +706,7 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { ); self.acc(self.exit_ln, var, ACC_READ | ACC_USE); } - ty::UpvarCapture::ByValue => {} + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => {} } } } @@ -1031,6 +1032,11 @@ impl<'a, 'tcx> Liveness<'a, 'tcx> { self.propagate_through_expr(receiver, succ) } + hir::ExprKind::Use(expr, _) => { + let succ = self.check_is_ty_uninhabited(expr, succ); + self.propagate_through_expr(expr, succ) + } + hir::ExprKind::Tup(exprs) => self.propagate_through_exprs(exprs, succ), hir::ExprKind::Binary(op, ref l, ref r) if op.node.is_lazy() => { @@ -1418,6 +1424,7 @@ fn check_expr<'tcx>(this: &mut Liveness<'_, 'tcx>, expr: &'tcx Expr<'tcx>) { // no correctness conditions related to liveness hir::ExprKind::Call(..) | hir::ExprKind::MethodCall(..) + | hir::ExprKind::Use(..) | hir::ExprKind::Match(..) | hir::ExprKind::Loop(..) | hir::ExprKind::Index(..) @@ -1493,7 +1500,7 @@ impl<'tcx> Liveness<'_, 'tcx> { for (&var_hir_id, min_capture_list) in closure_min_captures { for captured_place in min_capture_list { match captured_place.info.capture_kind { - ty::UpvarCapture::ByValue => {} + ty::UpvarCapture::ByValue | ty::UpvarCapture::ByUse => {} ty::UpvarCapture::ByRef(..) => continue, }; let span = captured_place.get_capture_kind_span(self.ir.tcx); diff --git a/compiler/rustc_passes/src/liveness/rwu_table.rs b/compiler/rustc_passes/src/liveness/rwu_table.rs index 6e2f976e5b0..4c1f6ea141e 100644 --- a/compiler/rustc_passes/src/liveness/rwu_table.rs +++ b/compiler/rustc_passes/src/liveness/rwu_table.rs @@ -39,7 +39,7 @@ impl RWUTable { /// Size of packed RWU in bits. const RWU_BITS: usize = 4; /// Size of a word in bits. - const WORD_BITS: usize = std::mem::size_of::<u8>() * 8; + const WORD_BITS: usize = size_of::<u8>() * 8; /// Number of packed RWUs that fit into a single word. const WORD_RWU_COUNT: usize = Self::WORD_BITS / Self::RWU_BITS; diff --git a/compiler/rustc_passes/src/loops.rs b/compiler/rustc_passes/src/loops.rs index 8e59c0b3251..b06f16cc7bd 100644 --- a/compiler/rustc_passes/src/loops.rs +++ b/compiler/rustc_passes/src/loops.rs @@ -222,7 +222,7 @@ impl<'hir> Visitor<'hir> for CheckLoopVisitor<'hir> { if let Some(break_expr) = opt_expr { let (head, loop_label, loop_kind) = if let Some(loop_id) = loop_id { - match self.tcx.hir().expect_expr(loop_id).kind { + match self.tcx.hir_expect_expr(loop_id).kind { hir::ExprKind::Loop(_, label, source, sp) => { (Some(sp), label, Some(source)) } diff --git a/compiler/rustc_passes/src/naked_functions.rs b/compiler/rustc_passes/src/naked_functions.rs index cb17b0f6cf5..d35aedf9a56 100644 --- a/compiler/rustc_passes/src/naked_functions.rs +++ b/compiler/rustc_passes/src/naked_functions.rs @@ -182,6 +182,7 @@ impl CheckInlineAssembly { | ExprKind::Array(..) | ExprKind::Call(..) | ExprKind::MethodCall(..) + | ExprKind::Use(..) | ExprKind::Tup(..) | ExprKind::Binary(..) | ExprKind::Unary(..) diff --git a/compiler/rustc_passes/src/reachable.rs b/compiler/rustc_passes/src/reachable.rs index fd465717bf7..599a08bac20 100644 --- a/compiler/rustc_passes/src/reachable.rs +++ b/compiler/rustc_passes/src/reachable.rs @@ -291,7 +291,7 @@ impl<'tcx> ReachableContext<'tcx> { _ => { bug!( "found unexpected node kind in worklist: {} ({:?})", - self.tcx.hir().node_to_string(self.tcx.local_def_id_to_hir_id(search_item)), + self.tcx.hir_id_to_string(self.tcx.local_def_id_to_hir_id(search_item)), node, ); } diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs index 8a4bdf3875c..aea4386295f 100644 --- a/compiler/rustc_passes/src/stability.rs +++ b/compiler/rustc_passes/src/stability.rs @@ -118,7 +118,7 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> { ) where F: FnOnce(&mut Self), { - let attrs = self.tcx.hir().attrs(self.tcx.local_def_id_to_hir_id(def_id)); + let attrs = self.tcx.hir_attrs(self.tcx.local_def_id_to_hir_id(def_id)); debug!("annotate(id = {:?}, attrs = {:?})", def_id, attrs); let depr = attr::find_attr!(attrs, AttributeKind::Deprecation{deprecation, span} => (*deprecation, *span)); @@ -313,7 +313,7 @@ impl<'a, 'tcx> Annotator<'a, 'tcx> { .map(|(stab, _span)| ConstStability::from_partial(stab, const_stability_indirect)); // If this is a const fn but not annotated with stability markers, see if we can inherit regular stability. - if fn_sig.is_some_and(|s| s.header.is_const()) && const_stab.is_none() && + if fn_sig.is_some_and(|s| s.header.is_const()) && const_stab.is_none() && // We only ever inherit unstable features. let Some(inherit_regular_stab) = final_stab.filter(|s| s.is_unstable()) @@ -795,7 +795,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { }) => { let features = self.tcx.features(); if features.staged_api() { - let attrs = self.tcx.hir().attrs(item.hir_id()); + let attrs = self.tcx.hir_attrs(item.hir_id()); let stab = attr::find_attr!(attrs, AttributeKind::Stability{stability, span} => (*stability, *span)); // FIXME(jdonszelmann): make it impossible to miss the or_else in the typesystem @@ -826,24 +826,56 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { } } - // `#![feature(const_trait_impl)]` is unstable, so any impl declared stable - // needs to have an error emitted. if features.const_trait_impl() - && self.tcx.is_const_trait_impl(item.owner_id.to_def_id()) - && const_stab.is_some_and(|stab| stab.is_const_stable()) + && let hir::Constness::Const = constness { - self.tcx.dcx().emit_err(errors::TraitImplConstStable { span: item.span }); + let stable_or_implied_stable = match const_stab { + None => true, + Some(stab) if stab.is_const_stable() => { + // `#![feature(const_trait_impl)]` is unstable, so any impl declared stable + // needs to have an error emitted. + // Note: Remove this error once `const_trait_impl` is stabilized + self.tcx + .dcx() + .emit_err(errors::TraitImplConstStable { span: item.span }); + true + } + Some(_) => false, + }; + + if let Some(trait_id) = t.trait_def_id() + && let Some(const_stab) = self.tcx.lookup_const_stability(trait_id) + { + // the const stability of a trait impl must match the const stability on the trait. + if const_stab.is_const_stable() != stable_or_implied_stable { + let trait_span = self.tcx.def_ident_span(trait_id).unwrap(); + + let impl_stability = if stable_or_implied_stable { + errors::ImplConstStability::Stable { span: item.span } + } else { + errors::ImplConstStability::Unstable { span: item.span } + }; + let trait_stability = if const_stab.is_const_stable() { + errors::TraitConstStability::Stable { span: trait_span } + } else { + errors::TraitConstStability::Unstable { span: trait_span } + }; + + self.tcx.dcx().emit_err(errors::TraitImplConstStabilityMismatch { + span: item.span, + impl_stability, + trait_stability, + }); + } + } } } - match constness { - rustc_hir::Constness::Const => { - if let Some(def_id) = t.trait_def_id() { - // FIXME(const_trait_impl): Improve the span here. - self.tcx.check_const_stability(def_id, t.path.span, t.path.span); - } - } - rustc_hir::Constness::NotConst => {} + if let hir::Constness::Const = constness + && let Some(def_id) = t.trait_def_id() + { + // FIXME(const_trait_impl): Improve the span here. + self.tcx.check_const_stability(def_id, t.path.span, t.path.span); } for impl_item_ref in *items { @@ -1002,7 +1034,7 @@ fn is_unstable_reexport(tcx: TyCtxt<'_>, id: hir::HirId) -> bool { } // If this is a path that isn't a use, we don't need to do anything special - if !matches!(tcx.hir().expect_item(def_id).kind, ItemKind::Use(..)) { + if !matches!(tcx.hir_expect_item(def_id).kind, ItemKind::Use(..)) { return false; } diff --git a/compiler/rustc_pattern_analysis/src/lib.rs b/compiler/rustc_pattern_analysis/src/lib.rs index a3400ebb799..eeea724a29b 100644 --- a/compiler/rustc_pattern_analysis/src/lib.rs +++ b/compiler/rustc_pattern_analysis/src/lib.rs @@ -6,7 +6,6 @@ #![allow(rustc::diagnostic_outside_of_impl)] #![allow(rustc::untranslatable_diagnostic)] #![cfg_attr(feature = "rustc", feature(let_chains))] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod constructor; diff --git a/compiler/rustc_pattern_analysis/tests/common/mod.rs b/compiler/rustc_pattern_analysis/tests/common/mod.rs index 365bc2d863f..8980b644f59 100644 --- a/compiler/rustc_pattern_analysis/tests/common/mod.rs +++ b/compiler/rustc_pattern_analysis/tests/common/mod.rs @@ -5,7 +5,7 @@ use rustc_pattern_analysis::usefulness::{PlaceValidity, UsefulnessReport}; use rustc_pattern_analysis::{MatchArm, PatCx, PrivateUninhabitedField}; /// Sets up `tracing` for easier debugging. Tries to look like the `rustc` setup. -pub fn init_tracing() { +fn init_tracing() { use tracing_subscriber::Layer; use tracing_subscriber::layer::SubscriberExt; use tracing_subscriber::util::SubscriberInitExt; @@ -24,7 +24,7 @@ pub fn init_tracing() { /// A simple set of types. #[allow(dead_code)] #[derive(Debug, Copy, Clone, PartialEq, Eq)] -pub enum Ty { +pub(super) enum Ty { /// Booleans Bool, /// 8-bit unsigned integers @@ -41,7 +41,7 @@ pub enum Ty { /// The important logic. impl Ty { - pub fn sub_tys(&self, ctor: &Constructor<Cx>) -> Vec<Self> { + pub(super) fn sub_tys(&self, ctor: &Constructor<Cx>) -> Vec<Self> { use Constructor::*; match (ctor, *self) { (Struct, Ty::Tuple(tys)) => tys.iter().copied().collect(), @@ -63,7 +63,7 @@ impl Ty { } } - pub fn ctor_set(&self) -> ConstructorSet<Cx> { + fn ctor_set(&self) -> ConstructorSet<Cx> { match *self { Ty::Bool => ConstructorSet::Bool, Ty::U8 => ConstructorSet::Integers { @@ -104,7 +104,7 @@ impl Ty { } } - pub fn write_variant_name( + fn write_variant_name( &self, f: &mut std::fmt::Formatter<'_>, ctor: &Constructor<Cx>, @@ -120,7 +120,7 @@ impl Ty { } /// Compute usefulness in our simple context (and set up tracing for easier debugging). -pub fn compute_match_usefulness<'p>( +pub(super) fn compute_match_usefulness<'p>( arms: &[MatchArm<'p, Cx>], ty: Ty, scrut_validity: PlaceValidity, @@ -137,7 +137,7 @@ pub fn compute_match_usefulness<'p>( } #[derive(Debug)] -pub struct Cx; +pub(super) struct Cx; /// The context for pattern analysis. Forwards anything interesting to `Ty` methods. impl PatCx for Cx { diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index 5271d03a6f6..cf32f237b86 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -1,12 +1,12 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(associated_type_defaults)] #![feature(let_chains)] #![feature(rustdoc_internals)] #![feature(try_blocks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod errors; @@ -40,7 +40,7 @@ use rustc_middle::ty::{ use rustc_middle::{bug, span_bug}; use rustc_session::lint; use rustc_span::hygiene::Transparency; -use rustc_span::{Ident, Span, Symbol, kw, sym}; +use rustc_span::{Ident, Span, Symbol, sym}; use tracing::debug; use {rustc_attr_parsing as attr, rustc_hir as hir}; @@ -493,7 +493,7 @@ impl<'tcx> EmbargoVisitor<'tcx> { ) { // Non-opaque macros cannot make other items more accessible than they already are. let hir_id = self.tcx.local_def_id_to_hir_id(local_def_id); - let attrs = self.tcx.hir().attrs(hir_id); + let attrs = self.tcx.hir_attrs(hir_id); if attr::find_attr!(attrs, AttributeKind::MacroTransparency(x) => *x) .unwrap_or(Transparency::fallback(md.macro_rules)) @@ -573,7 +573,7 @@ impl<'tcx> EmbargoVisitor<'tcx> { // have normal hygiene, so we can treat them like other items without type // privacy and mark them reachable. DefKind::Macro(_) => { - let item = self.tcx.hir().expect_item(def_id); + let item = self.tcx.hir_expect_item(def_id); if let hir::ItemKind::Macro(MacroDef { macro_rules: false, .. }, _) = item.kind { if vis.is_accessible_from(module, self.tcx) { self.update(def_id, macro_ev, Level::Reachable); @@ -597,7 +597,7 @@ impl<'tcx> EmbargoVisitor<'tcx> { DefKind::Struct | DefKind::Union => { // While structs and unions have type privacy, their fields do not. - let item = self.tcx.hir().expect_item(def_id); + let item = self.tcx.hir_expect_item(def_id); if let hir::ItemKind::Struct(ref struct_def, _) | hir::ItemKind::Union(ref struct_def, _) = item.kind { @@ -935,8 +935,8 @@ impl<'tcx> NamePrivacyVisitor<'tcx> { } // definition of the field - let ident = Ident::new(kw::Empty, use_ctxt); - let def_id = self.tcx.adjust_ident_and_get_scope(ident, def.did(), hir_id).1; + let ident = Ident::new(sym::dummy, use_ctxt); + let (_, def_id) = self.tcx.adjust_ident_and_get_scope(ident, def.did(), hir_id); !field.vis.is_accessible_from(def_id, self.tcx) } diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 73c205fdb17..a83c388c747 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs @@ -3,12 +3,12 @@ // tidy-alphabetical-start #![allow(internal_features)] #![allow(unused_parens)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(min_specialization)] #![feature(rustc_attrs)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_data_structures::stable_hasher::HashStable; diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 5e6bee1dbd5..3109d53cd2c 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1,5 +1,4 @@ use std::assert_matches::assert_matches; -use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; @@ -9,7 +8,7 @@ use std::sync::atomic::{AtomicU32, Ordering}; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::{QueryInvocationId, SelfProfilerRef}; -use rustc_data_structures::sharded::{self, Sharded}; +use rustc_data_structures::sharded::{self, ShardedHashMap}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::sync::{AtomicU64, Lock}; use rustc_data_structures::unord::UnordMap; @@ -619,7 +618,7 @@ impl<D: Deps> DepGraphData<D> { if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) { self.current.prev_index_to_index.lock()[prev_index] } else { - self.current.new_node_to_index.lock_shard_by_value(dep_node).get(dep_node).copied() + self.current.new_node_to_index.get(dep_node) } } @@ -1048,7 +1047,7 @@ rustc_index::newtype_index! { /// first, and `data` second. pub(super) struct CurrentDepGraph<D: Deps> { encoder: GraphEncoder<D>, - new_node_to_index: Sharded<FxHashMap<DepNode, DepNodeIndex>>, + new_node_to_index: ShardedHashMap<DepNode, DepNodeIndex>, prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>, /// This is used to verify that fingerprints do not change between the creation of a node @@ -1117,12 +1116,9 @@ impl<D: Deps> CurrentDepGraph<D> { profiler, previous, ), - new_node_to_index: Sharded::new(|| { - FxHashMap::with_capacity_and_hasher( - new_node_count_estimate / sharded::shards(), - Default::default(), - ) - }), + new_node_to_index: ShardedHashMap::with_capacity( + new_node_count_estimate / sharded::shards(), + ), prev_index_to_index: Lock::new(IndexVec::from_elem_n(None, prev_graph_node_count)), anon_id_seed, #[cfg(debug_assertions)] @@ -1152,14 +1148,9 @@ impl<D: Deps> CurrentDepGraph<D> { edges: EdgesVec, current_fingerprint: Fingerprint, ) -> DepNodeIndex { - let dep_node_index = match self.new_node_to_index.lock_shard_by_value(&key).entry(key) { - Entry::Occupied(entry) => *entry.get(), - Entry::Vacant(entry) => { - let dep_node_index = self.encoder.send(key, current_fingerprint, edges); - entry.insert(dep_node_index); - dep_node_index - } - }; + let dep_node_index = self + .new_node_to_index + .get_or_insert_with(key, || self.encoder.send(key, current_fingerprint, edges)); #[cfg(debug_assertions)] self.record_edge(dep_node_index, key, current_fingerprint); @@ -1257,7 +1248,7 @@ impl<D: Deps> CurrentDepGraph<D> { ) { let node = &prev_graph.index_to_node(prev_index); debug_assert!( - !self.new_node_to_index.lock_shard_by_value(node).contains_key(node), + !self.new_node_to_index.get(node).is_some(), "node from previous graph present in new node collection" ); } @@ -1299,12 +1290,11 @@ impl Default for TaskDeps { #[cfg(debug_assertions)] node: None, reads: EdgesVec::new(), - read_set: FxHashSet::default(), + read_set: FxHashSet::with_capacity_and_hasher(128, Default::default()), phantom_data: PhantomData, } } } - // A data structure that stores Option<DepNodeColor> values as a contiguous // array, using one u32 per entry. struct DepNodeColorMap { @@ -1383,7 +1373,7 @@ fn panic_on_forbidden_read<D: Deps>(data: &DepGraphData<D>, dep_node_index: DepN if dep_node.is_none() { // Try to find it among the new nodes for shard in data.current.new_node_to_index.lock_shards() { - if let Some((node, _)) = shard.iter().find(|(_, index)| **index == dep_node_index) { + if let Some((node, _)) = shard.iter().find(|(_, index)| *index == dep_node_index) { dep_node = Some(*node); break; } diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index bc78878a84a..2c6fd7d494f 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -63,7 +63,7 @@ rustc_index::newtype_index! { pub struct SerializedDepNodeIndex {} } -const DEP_NODE_SIZE: usize = std::mem::size_of::<SerializedDepNodeIndex>(); +const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>(); /// Amount of padding we need to add to the edge list data so that we can retrieve every /// SerializedDepNodeIndex with a fixed-size read then mask. const DEP_NODE_PAD: usize = DEP_NODE_SIZE - 1; @@ -175,7 +175,7 @@ impl EdgeHeader { #[inline] fn mask(bits: usize) -> usize { - usize::MAX >> ((std::mem::size_of::<usize>() * 8) - bits) + usize::MAX >> ((size_of::<usize>() * 8) - bits) } impl SerializedDepGraph { @@ -208,9 +208,8 @@ impl SerializedDepGraph { // for a node with length 64, which means the spilled 1-byte leb128 length is 1 byte of at // least (34 byte header + 1 byte len + 64 bytes edge data), which is ~1%. A 2-byte leb128 // length is about the same fractional overhead and it amortizes for yet greater lengths. - let mut edge_list_data = Vec::with_capacity( - graph_bytes - node_count * std::mem::size_of::<SerializedNodeHeader<D>>(), - ); + let mut edge_list_data = + Vec::with_capacity(graph_bytes - node_count * size_of::<SerializedNodeHeader<D>>()); for _index in 0..node_count { // Decode the header for this edge; the header packs together as many of the fixed-size @@ -300,7 +299,7 @@ struct Unpacked { // M..M+N bytes per index // M+N..16 kind impl<D: Deps> SerializedNodeHeader<D> { - const TOTAL_BITS: usize = std::mem::size_of::<DepKind>() * 8; + const TOTAL_BITS: usize = size_of::<DepKind>() * 8; const LEN_BITS: usize = Self::TOTAL_BITS - Self::KIND_BITS - Self::WIDTH_BITS; const WIDTH_BITS: usize = DEP_NODE_WIDTH_BITS; const KIND_BITS: usize = Self::TOTAL_BITS - D::DEP_KIND_MAX.leading_zeros() as usize; diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs index ee984095ad8..2aedd365adc 100644 --- a/compiler/rustc_query_system/src/lib.rs +++ b/compiler/rustc_query_system/src/lib.rs @@ -3,10 +3,8 @@ #![feature(assert_matches)] #![feature(core_intrinsics)] #![feature(dropck_eyepatch)] -#![feature(hash_raw_entry)] #![feature(let_chains)] #![feature(min_specialization)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod cache; diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index e11123dff26..30b5d7e5954 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -1,9 +1,8 @@ use std::fmt::Debug; use std::hash::Hash; +use std::sync::OnceLock; -use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sharded::{self, Sharded}; -use rustc_data_structures::sync::OnceLock; +use rustc_data_structures::sharded::ShardedHashMap; pub use rustc_data_structures::vec_cache::VecCache; use rustc_hir::def_id::LOCAL_CRATE; use rustc_index::Idx; @@ -36,7 +35,7 @@ pub trait QueryCache: Sized { /// In-memory cache for queries whose keys aren't suitable for any of the /// more specialized kinds of cache. Backed by a sharded hashmap. pub struct DefaultCache<K, V> { - cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>, + cache: ShardedHashMap<K, (V, DepNodeIndex)>, } impl<K, V> Default for DefaultCache<K, V> { @@ -55,19 +54,14 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { - let key_hash = sharded::make_hash(key); - let lock = self.cache.lock_shard_by_hash(key_hash); - let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); - - if let Some((_, value)) = result { Some(*value) } else { None } + self.cache.get(key) } #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - let mut lock = self.cache.lock_shard_by_value(&key); // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. - lock.insert(key, (value, index)); + self.cache.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index a8c2aa98cd0..37b305d0a8b 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -477,8 +477,8 @@ fn remove_cycle( /// Detects query cycles by using depth first search over all active query jobs. /// If a query cycle is found it will break the cycle by finding an edge which /// uses a query latch and then resuming that waiter. -/// There may be multiple cycles involved in a deadlock, so this searches -/// all active queries for cycles before finally resuming all the waiters at once. +/// There may be multiple cycles involved in a deadlock, but we only search +/// one cycle at a call and resume one waiter at once. See `FIXME` below. pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) { let mut wakelist = Vec::new(); let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect(); @@ -488,6 +488,19 @@ pub fn break_query_cycles(query_map: QueryMap, registry: &rayon_core::Registry) while jobs.len() > 0 { if remove_cycle(&query_map, &mut jobs, &mut wakelist) { found_cycle = true; + + // FIXME(#137731): Resume all the waiters at once may cause deadlocks, + // so we resume one waiter at a call for now. It's still unclear whether + // it's due to possible issues in rustc-rayon or instead in the handling + // of query cycles. + // This seem to only appear when multiple query cycles errors + // are involved, so this reduction in parallelism, while suboptimal, is not + // universal and only the deadlock handler will encounter these cases. + // The workaround shows loss of potential gains, but there still are big + // improvements in the common case, and no regressions compared to the + // single-threaded case. More investigation is still needed, and once fixed, + // we can wake up all the waiters up. + break; } } diff --git a/compiler/rustc_resolve/messages.ftl b/compiler/rustc_resolve/messages.ftl index b13de2875bc..817aa5b4385 100644 --- a/compiler/rustc_resolve/messages.ftl +++ b/compiler/rustc_resolve/messages.ftl @@ -125,9 +125,6 @@ resolve_const_param_in_enum_discriminant = resolve_const_param_in_non_trivial_anon_const = const parameters may only be used as standalone arguments, i.e. `{$name}` -resolve_const_param_in_ty_of_const_param = - const parameters may not be used in the type of const parameters - resolve_constructor_private_if_any_field_private = a constructor is private if any of the fields is private @@ -156,9 +153,13 @@ resolve_extern_crate_self_requires_renaming = `extern crate self;` requires renaming .suggestion = rename the `self` crate to be able to import it +resolve_forward_declared_generic_in_const_param_ty = + const parameter types cannot reference parameters before they are declared + .label = const parameter type cannot reference `{$param}` before it is declared + resolve_forward_declared_generic_param = - generic parameters with a default cannot use forward declared identifiers - .label = defaulted generic parameters cannot be forward declared + generic parameter defaults cannot reference parameters before they are declared + .label = cannot reference `{$param}` before it is declared resolve_found_an_item_configured_out = found an item that was configured out @@ -250,9 +251,6 @@ resolve_lifetime_param_in_enum_discriminant = resolve_lifetime_param_in_non_trivial_anon_const = lifetime parameters may not be used in const expressions -resolve_lifetime_param_in_ty_of_const_param = - lifetime parameters may not be used in the type of const parameters - resolve_lowercase_self = attempt to use a non-constant value in a constant .suggestion = try using `Self` @@ -383,9 +381,11 @@ resolve_self_imports_only_allowed_within_multipart_suggestion = resolve_self_imports_only_allowed_within_suggestion = consider importing the module directly +resolve_self_in_const_generic_ty = + cannot use `Self` in const parameter type + resolve_self_in_generic_param_default = generic parameters cannot use `Self` in their defaults - .label = `Self` in generic parameter default resolve_similarly_named_defined_here = similarly named {$candidate_descr} `{$candidate}` defined here @@ -437,9 +437,6 @@ resolve_type_param_in_enum_discriminant = resolve_type_param_in_non_trivial_anon_const = type parameters may not be used in const expressions -resolve_type_param_in_ty_of_const_param = - type parameters may not be used in the type of const parameters - resolve_undeclared_label = use of undeclared label `{$name}` .label = undeclared label `{$name}` diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 87f7eda391d..42fe01b1c84 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -1529,6 +1529,14 @@ impl<'a, 'ra, 'tcx> Visitor<'a> for BuildReducedGraphVisitor<'a, 'ra, 'tcx> { visit::walk_variant(self, variant); } + fn visit_where_predicate(&mut self, p: &'a ast::WherePredicate) { + if p.is_placeholder { + self.visit_invoc(p.id); + } else { + visit::walk_where_predicate(self, p); + } + } + fn visit_crate(&mut self, krate: &'a ast::Crate) { if krate.is_placeholder { self.visit_invoc_in_module(krate.id); diff --git a/compiler/rustc_resolve/src/def_collector.rs b/compiler/rustc_resolve/src/def_collector.rs index 75972a71c8e..33f529851ae 100644 --- a/compiler/rustc_resolve/src/def_collector.rs +++ b/compiler/rustc_resolve/src/def_collector.rs @@ -9,7 +9,7 @@ use rustc_hir as hir; use rustc_hir::def::{CtorKind, CtorOf, DefKind}; use rustc_hir::def_id::LocalDefId; use rustc_span::hygiene::LocalExpnId; -use rustc_span::{Span, Symbol, kw, sym}; +use rustc_span::{Span, Symbol, sym}; use tracing::debug; use crate::{ImplTraitContext, InvocationParent, Resolver}; @@ -38,7 +38,7 @@ impl<'a, 'ra, 'tcx> DefCollector<'a, 'ra, 'tcx> { fn create_def( &mut self, node_id: NodeId, - name: Symbol, + name: Option<Symbol>, def_kind: DefKind, span: Span, ) -> LocalDefId { @@ -89,7 +89,7 @@ impl<'a, 'ra, 'tcx> DefCollector<'a, 'ra, 'tcx> { self.visit_macro_invoc(field.id); } else { let name = field.ident.map_or_else(|| sym::integer(index(self)), |ident| ident.name); - let def = self.create_def(field.id, name, DefKind::Field, field.span); + let def = self.create_def(field.id, Some(name), DefKind::Field, field.span); self.with_parent(def, |this| visit::walk_field_def(this, field)); } } @@ -161,7 +161,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { return self.visit_macro_invoc(i.id); } }; - let def_id = self.create_def(i.id, i.ident.name, def_kind, i.span); + let def_id = self.create_def(i.id, Some(i.ident.name), def_kind, i.span); if let Some(macro_data) = opt_macro_data { self.resolver.macro_map.insert(def_id.to_def_id(), macro_data); @@ -175,7 +175,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { if let Some((ctor_kind, ctor_node_id)) = CtorKind::from_ast(struct_def) { this.create_def( ctor_node_id, - kw::Empty, + None, DefKind::Ctor(CtorOf::Struct, ctor_kind), i.span, ); @@ -211,20 +211,15 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { } let (return_id, return_span) = coroutine_kind.return_id(); - let return_def = - self.create_def(return_id, kw::Empty, DefKind::OpaqueTy, return_span); + let return_def = self.create_def(return_id, None, DefKind::OpaqueTy, return_span); self.with_parent(return_def, |this| this.visit_fn_ret_ty(output)); // If this async fn has no body (i.e. it's an async fn signature in a trait) // then the closure_def will never be used, and we should avoid generating a // def-id for it. if let Some(body) = body { - let closure_def = self.create_def( - coroutine_kind.closure_id(), - kw::Empty, - DefKind::Closure, - span, - ); + let closure_def = + self.create_def(coroutine_kind.closure_id(), None, DefKind::Closure, span); self.with_parent(closure_def, |this| this.visit_block(body)); } } @@ -235,7 +230,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { // Async closures desugar to closures inside of closures, so // we must create two defs. let coroutine_def = - self.create_def(coroutine_kind.closure_id(), kw::Empty, DefKind::Closure, span); + self.create_def(coroutine_kind.closure_id(), None, DefKind::Closure, span); self.with_parent(coroutine_def, |this| this.visit_expr(body)); } _ => visit::walk_fn(self, fn_kind), @@ -243,7 +238,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { } fn visit_use_tree(&mut self, use_tree: &'a UseTree, id: NodeId, _nested: bool) { - self.create_def(id, kw::Empty, DefKind::Use, use_tree.span); + self.create_def(id, None, DefKind::Use, use_tree.span); visit::walk_use_tree(self, use_tree, id); } @@ -262,7 +257,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { ForeignItemKind::MacCall(_) => return self.visit_macro_invoc(fi.id), }; - let def = self.create_def(fi.id, fi.ident.name, def_kind, fi.span); + let def = self.create_def(fi.id, Some(fi.ident.name), def_kind, fi.span); self.with_parent(def, |this| visit::walk_item(this, fi)); } @@ -271,12 +266,12 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { if v.is_placeholder { return self.visit_macro_invoc(v.id); } - let def = self.create_def(v.id, v.ident.name, DefKind::Variant, v.span); + let def = self.create_def(v.id, Some(v.ident.name), DefKind::Variant, v.span); self.with_parent(def, |this| { if let Some((ctor_kind, ctor_node_id)) = CtorKind::from_ast(&v.data) { this.create_def( ctor_node_id, - kw::Empty, + None, DefKind::Ctor(CtorOf::Variant, ctor_kind), v.span, ); @@ -285,6 +280,14 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { }); } + fn visit_where_predicate(&mut self, pred: &'a WherePredicate) { + if pred.is_placeholder { + self.visit_macro_invoc(pred.id) + } else { + visit::walk_where_predicate(self, pred) + } + } + fn visit_variant_data(&mut self, data: &'a VariantData) { // The assumption here is that non-`cfg` macro expansion cannot change field indices. // It currently holds because only inert attributes are accepted on fields, @@ -304,7 +307,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { GenericParamKind::Type { .. } => DefKind::TyParam, GenericParamKind::Const { .. } => DefKind::ConstParam, }; - self.create_def(param.id, param.ident.name, def_kind, param.ident.span); + self.create_def(param.id, Some(param.ident.name), def_kind, param.ident.span); // impl-Trait can happen inside generic parameters, like // ``` @@ -327,7 +330,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { } }; - let def = self.create_def(i.id, i.ident.name, def_kind, i.span); + let def = self.create_def(i.id, Some(i.ident.name), def_kind, i.span); self.with_parent(def, |this| visit::walk_assoc_item(this, i, ctxt)); } @@ -339,8 +342,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { } fn visit_anon_const(&mut self, constant: &'a AnonConst) { - let parent = - self.create_def(constant.id, kw::Empty, DefKind::AnonConst, constant.value.span); + let parent = self.create_def(constant.id, None, DefKind::AnonConst, constant.value.span); self.with_parent(parent, |this| visit::walk_anon_const(this, constant)); } @@ -348,18 +350,14 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { let parent_def = match expr.kind { ExprKind::MacCall(..) => return self.visit_macro_invoc(expr.id), ExprKind::Closure(..) | ExprKind::Gen(..) => { - self.create_def(expr.id, kw::Empty, DefKind::Closure, expr.span) + self.create_def(expr.id, None, DefKind::Closure, expr.span) } ExprKind::ConstBlock(ref constant) => { for attr in &expr.attrs { visit::walk_attribute(self, attr); } - let def = self.create_def( - constant.id, - kw::Empty, - DefKind::InlineConst, - constant.value.span, - ); + let def = + self.create_def(constant.id, None, DefKind::InlineConst, constant.value.span); self.with_parent(def, |this| visit::walk_anon_const(this, constant)); return; } @@ -383,7 +381,7 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { ImplTraitContext::Existential => DefKind::OpaqueTy, ImplTraitContext::InBinding => return visit::walk_ty(self, ty), }; - let id = self.create_def(*id, name, kind, ty.span); + let id = self.create_def(*id, Some(name), kind, ty.span); match self.impl_trait_context { // Do not nest APIT, as we desugar them as `impl_trait: bounds`, // so the `impl_trait` node is not a parent to `bounds`. @@ -459,4 +457,43 @@ impl<'a, 'ra, 'tcx> visit::Visitor<'a> for DefCollector<'a, 'ra, 'tcx> { visit::walk_attribute(self, attr); self.in_attr = orig_in_attr; } + + fn visit_inline_asm(&mut self, asm: &'a InlineAsm) { + let InlineAsm { + asm_macro: _, + template: _, + template_strs: _, + operands, + clobber_abis: _, + options: _, + line_spans: _, + } = asm; + for (op, _span) in operands { + match op { + InlineAsmOperand::In { expr, reg: _ } + | InlineAsmOperand::Out { expr: Some(expr), reg: _, late: _ } + | InlineAsmOperand::InOut { expr, reg: _, late: _ } => { + self.visit_expr(expr); + } + InlineAsmOperand::Out { expr: None, reg: _, late: _ } => {} + InlineAsmOperand::SplitInOut { in_expr, out_expr, reg: _, late: _ } => { + self.visit_expr(in_expr); + if let Some(expr) = out_expr { + self.visit_expr(expr); + } + } + InlineAsmOperand::Const { anon_const } => { + let def = self.create_def( + anon_const.id, + None, + DefKind::InlineConst, + anon_const.value.span, + ); + self.with_parent(def, |this| visit::walk_anon_const(this, anon_const)); + } + InlineAsmOperand::Sym { sym } => self.visit_inline_asm_sym(sym), + InlineAsmOperand::Label { block } => self.visit_block(block), + } + } + } } diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 55db336d85f..41f4254eede 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -42,10 +42,10 @@ use crate::imports::{Import, ImportKind}; use crate::late::{PatternSource, Rib}; use crate::{ AmbiguityError, AmbiguityErrorMisc, AmbiguityKind, BindingError, BindingKey, Finalize, - HasGenericParams, LexicalScopeBinding, MacroRulesScope, Module, ModuleKind, - ModuleOrUniformRoot, NameBinding, NameBindingKind, ParentScope, PathResult, PrivacyError, - ResolutionError, Resolver, Scope, ScopeSet, Segment, UseError, Used, VisResolutionError, - errors as errs, path_names_to_string, + ForwardGenericParamBanReason, HasGenericParams, LexicalScopeBinding, MacroRulesScope, Module, + ModuleKind, ModuleOrUniformRoot, NameBinding, NameBindingKind, ParentScope, PathResult, + PrivacyError, ResolutionError, Resolver, Scope, ScopeSet, Segment, UseError, Used, + VisResolutionError, errors as errs, path_names_to_string, }; type Res = def::Res<ast::NodeId>; @@ -887,12 +887,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { participle, name, }), - ResolutionError::ForwardDeclaredGenericParam => { - self.dcx().create_err(errs::ForwardDeclaredGenericParam { span }) + ResolutionError::ForwardDeclaredGenericParam(param, reason) => match reason { + ForwardGenericParamBanReason::Default => { + self.dcx().create_err(errs::ForwardDeclaredGenericParam { param, span }) + } + ForwardGenericParamBanReason::ConstParamTy => self + .dcx() + .create_err(errs::ForwardDeclaredGenericInConstParamTy { param, span }), + }, + ResolutionError::ParamInTyOfConstParam { name } => { + self.dcx().create_err(errs::ParamInTyOfConstParam { span, name }) } - ResolutionError::ParamInTyOfConstParam { name, param_kind: is_type } => self - .dcx() - .create_err(errs::ParamInTyOfConstParam { span, name, param_kind: is_type }), ResolutionError::ParamInNonTrivialAnonConst { name, param_kind: is_type } => { self.dcx().create_err(errs::ParamInNonTrivialAnonConst { span, @@ -908,9 +913,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { ResolutionError::ParamInEnumDiscriminant { name, param_kind: is_type } => self .dcx() .create_err(errs::ParamInEnumDiscriminant { span, name, param_kind: is_type }), - ResolutionError::SelfInGenericParamDefault => { - self.dcx().create_err(errs::SelfInGenericParamDefault { span }) - } + ResolutionError::ForwardDeclaredSelf(reason) => match reason { + ForwardGenericParamBanReason::Default => { + self.dcx().create_err(errs::SelfInGenericParamDefault { span }) + } + ForwardGenericParamBanReason::ConstParamTy => { + self.dcx().create_err(errs::SelfInConstGenericTy { span }) + } + }, ResolutionError::UnreachableLabel { name, definition_span, suggestion } => { let ((sub_suggestion_label, sub_suggestion), sub_unreachable_label) = match suggestion { @@ -2252,7 +2262,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { #[instrument(level = "debug", skip(self, parent_scope))] pub(crate) fn make_path_suggestion( &mut self, - span: Span, mut path: Vec<Segment>, parent_scope: &ParentScope<'ra>, ) -> Option<(Vec<Segment>, Option<String>)> { @@ -2267,7 +2276,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { && !first.ident.is_path_segment_keyword() => { // Insert a placeholder that's later replaced by `self`/`super`/etc. - path.insert(0, Segment::from_ident(Ident::empty())); + path.insert(0, Segment::from_ident(Ident::dummy())); } _ => return None, } @@ -2480,7 +2489,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // or `use a::{b, c, d}};` // ^^^^^^^^^^^ let (has_nested, after_crate_name) = - find_span_immediately_after_crate_name(self.tcx.sess, module_name, import.use_span); + find_span_immediately_after_crate_name(self.tcx.sess, import.use_span); debug!(has_nested, ?after_crate_name); let source_map = self.tcx.sess.source_map(); @@ -2687,11 +2696,7 @@ fn extend_span_to_previous_binding(sess: &Session, binding_span: Span) -> Option /// // ^^^^^^^^^^^^^^^ -- true /// ``` #[instrument(level = "debug", skip(sess))] -fn find_span_immediately_after_crate_name( - sess: &Session, - module_name: Symbol, - use_span: Span, -) -> (bool, Span) { +fn find_span_immediately_after_crate_name(sess: &Session, use_span: Span) -> (bool, Span) { let source_map = sess.source_map(); // Using `use issue_59764::foo::{baz, makro};` as an example throughout.. diff --git a/compiler/rustc_resolve/src/errors.rs b/compiler/rustc_resolve/src/errors.rs index 7eb795034b0..2ae6892bc93 100644 --- a/compiler/rustc_resolve/src/errors.rs +++ b/compiler/rustc_resolve/src/errors.rs @@ -338,6 +338,16 @@ pub(crate) struct ForwardDeclaredGenericParam { #[primary_span] #[label] pub(crate) span: Span, + pub(crate) param: Symbol, +} + +#[derive(Diagnostic)] +#[diag(resolve_forward_declared_generic_in_const_param_ty)] +pub(crate) struct ForwardDeclaredGenericInConstParamTy { + #[primary_span] + #[label] + pub(crate) span: Span, + pub(crate) param: Symbol, } #[derive(Diagnostic)] @@ -347,26 +357,19 @@ pub(crate) struct ParamInTyOfConstParam { #[label] pub(crate) span: Span, pub(crate) name: Symbol, - #[subdiagnostic] - pub(crate) param_kind: Option<ParamKindInTyOfConstParam>, -} - -#[derive(Debug)] -#[derive(Subdiagnostic)] -pub(crate) enum ParamKindInTyOfConstParam { - #[note(resolve_type_param_in_ty_of_const_param)] - Type, - #[note(resolve_const_param_in_ty_of_const_param)] - Const, - #[note(resolve_lifetime_param_in_ty_of_const_param)] - Lifetime, } #[derive(Diagnostic)] #[diag(resolve_self_in_generic_param_default, code = E0735)] pub(crate) struct SelfInGenericParamDefault { #[primary_span] - #[label] + pub(crate) span: Span, +} + +#[derive(Diagnostic)] +#[diag(resolve_self_in_const_generic_ty)] +pub(crate) struct SelfInConstGenericTy { + #[primary_span] pub(crate) span: Span, } diff --git a/compiler/rustc_resolve/src/ident.rs b/compiler/rustc_resolve/src/ident.rs index 499b9bca4d2..5f0a2a597e9 100644 --- a/compiler/rustc_resolve/src/ident.rs +++ b/compiler/rustc_resolve/src/ident.rs @@ -1117,13 +1117,14 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { debug!("validate_res_from_ribs({:?})", res); let ribs = &all_ribs[rib_index + 1..]; - // An invalid forward use of a generic parameter from a previous default. - if let RibKind::ForwardGenericParamBan = all_ribs[rib_index].kind { + // An invalid forward use of a generic parameter from a previous default + // or in a const param ty. + if let RibKind::ForwardGenericParamBan(reason) = all_ribs[rib_index].kind { if let Some(span) = finalize { let res_error = if rib_ident.name == kw::SelfUpper { - ResolutionError::SelfInGenericParamDefault + ResolutionError::ForwardDeclaredSelf(reason) } else { - ResolutionError::ForwardDeclaredGenericParam + ResolutionError::ForwardDeclaredGenericParam(rib_ident.name, reason) }; self.report_error(span, res_error); } @@ -1142,7 +1143,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { | RibKind::FnOrCoroutine | RibKind::Module(..) | RibKind::MacroDefinition(..) - | RibKind::ForwardGenericParamBan => { + | RibKind::ForwardGenericParamBan(_) => { // Nothing to do. Continue. } RibKind::Item(..) | RibKind::AssocItem => { @@ -1209,10 +1210,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { if let Some(span) = finalize { self.report_error( span, - ParamInTyOfConstParam { - name: rib_ident.name, - param_kind: None, - }, + ParamInTyOfConstParam { name: rib_ident.name }, ); } return Res::Err; @@ -1239,11 +1237,27 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { | RibKind::MacroDefinition(..) | RibKind::InlineAsmSym | RibKind::AssocItem - | RibKind::ForwardGenericParamBan => { + | RibKind::ForwardGenericParamBan(_) => { // Nothing to do. Continue. continue; } + RibKind::ConstParamTy => { + if !self.tcx.features().generic_const_parameter_types() { + if let Some(span) = finalize { + self.report_error( + span, + ResolutionError::ParamInTyOfConstParam { + name: rib_ident.name, + }, + ); + } + return Res::Err; + } else { + continue; + } + } + RibKind::ConstantItem(trivial, _) => { if let ConstantHasGenerics::No(cause) = trivial { // HACK(min_const_generics): If we encounter `Self` in an anonymous @@ -1292,18 +1306,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { RibKind::Item(has_generic_params, def_kind) => { (has_generic_params, def_kind) } - RibKind::ConstParamTy => { - if let Some(span) = finalize { - self.report_error( - span, - ResolutionError::ParamInTyOfConstParam { - name: rib_ident.name, - param_kind: Some(errors::ParamKindInTyOfConstParam::Type), - }, - ); - } - return Res::Err; - } }; if let Some(span) = finalize { @@ -1328,7 +1330,23 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { | RibKind::MacroDefinition(..) | RibKind::InlineAsmSym | RibKind::AssocItem - | RibKind::ForwardGenericParamBan => continue, + | RibKind::ForwardGenericParamBan(_) => continue, + + RibKind::ConstParamTy => { + if !self.tcx.features().generic_const_parameter_types() { + if let Some(span) = finalize { + self.report_error( + span, + ResolutionError::ParamInTyOfConstParam { + name: rib_ident.name, + }, + ); + } + return Res::Err; + } else { + continue; + } + } RibKind::ConstantItem(trivial, _) => { if let ConstantHasGenerics::No(cause) = trivial { @@ -1361,18 +1379,6 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { RibKind::Item(has_generic_params, def_kind) => { (has_generic_params, def_kind) } - RibKind::ConstParamTy => { - if let Some(span) = finalize { - self.report_error( - span, - ResolutionError::ParamInTyOfConstParam { - name: rib_ident.name, - param_kind: Some(errors::ParamKindInTyOfConstParam::Const), - }, - ); - } - return Res::Err; - } }; // This was an attempt to use a const parameter outside its scope. @@ -1391,6 +1397,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } _ => {} } + res } diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs index 6b8a7493cd4..89b9a074351 100644 --- a/compiler/rustc_resolve/src/imports.rs +++ b/compiler/rustc_resolve/src/imports.rs @@ -955,11 +955,9 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } else { None }; - let err = match self.make_path_suggestion( - span, - import.module_path.clone(), - &import.parent_scope, - ) { + let err = match self + .make_path_suggestion(import.module_path.clone(), &import.parent_scope) + { Some((suggestion, note)) => UnresolvedImportError { span, label: None, diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index ad9c3465f0c..d28988cd74f 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -78,6 +78,7 @@ struct IsNeverPattern; #[derive(Copy, Clone, Debug, PartialEq, Eq)] enum AnonConstKind { EnumDiscriminant, + FieldDefaultValue, InlineConst, ConstArg(IsRepeatExpr), } @@ -94,7 +95,7 @@ impl PatternSource { } impl IntoDiagArg for PatternSource { - fn into_diag_arg(self) -> DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { DiagArgValue::Str(Cow::Borrowed(self.descr())) } } @@ -206,7 +207,7 @@ pub(crate) enum RibKind<'ra> { /// All bindings in this rib are generic parameters that can't be used /// from the default of a generic parameter because they're not declared /// before said generic parameter. Also see the `visit_generics` override. - ForwardGenericParamBan, + ForwardGenericParamBan(ForwardGenericParamBanReason), /// We are inside of the type of a const parameter. Can't refer to any /// parameters. @@ -217,6 +218,12 @@ pub(crate) enum RibKind<'ra> { InlineAsmSym, } +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +pub(crate) enum ForwardGenericParamBanReason { + Default, + ConstParamTy, +} + impl RibKind<'_> { /// Whether this rib kind contains generic parameters, as opposed to local /// variables. @@ -227,9 +234,11 @@ impl RibKind<'_> { | RibKind::ConstantItem(..) | RibKind::Module(_) | RibKind::MacroDefinition(_) - | RibKind::ConstParamTy | RibKind::InlineAsmSym => false, - RibKind::AssocItem | RibKind::Item(..) | RibKind::ForwardGenericParamBan => true, + RibKind::ConstParamTy + | RibKind::AssocItem + | RibKind::Item(..) + | RibKind::ForwardGenericParamBan(_) => true, } } @@ -243,7 +252,7 @@ impl RibKind<'_> { | RibKind::Item(..) | RibKind::ConstantItem(..) | RibKind::Module(..) - | RibKind::ForwardGenericParamBan + | RibKind::ForwardGenericParamBan(_) | RibKind::ConstParamTy | RibKind::InlineAsmSym => true, } @@ -394,32 +403,37 @@ pub(crate) enum AliasPossibility { #[derive(Copy, Clone, Debug)] pub(crate) enum PathSource<'a> { - // Type paths `Path`. + /// Type paths `Path`. Type, - // Trait paths in bounds or impls. + /// Trait paths in bounds or impls. Trait(AliasPossibility), - // Expression paths `path`, with optional parent context. + /// Expression paths `path`, with optional parent context. Expr(Option<&'a Expr>), - // Paths in path patterns `Path`. + /// Paths in path patterns `Path`. Pat, - // Paths in struct expressions and patterns `Path { .. }`. + /// Paths in struct expressions and patterns `Path { .. }`. Struct, - // Paths in tuple struct patterns `Path(..)`. + /// Paths in tuple struct patterns `Path(..)`. TupleStruct(Span, &'a [Span]), - // `m::A::B` in `<T as m::A>::B::C`. + /// `m::A::B` in `<T as m::A>::B::C`. TraitItem(Namespace), - // Paths in delegation item + /// Paths in delegation item Delegation, /// An arg in a `use<'a, N>` precise-capturing bound. PreciseCapturingArg(Namespace), - // Paths that end with `(..)`, for return type notation. + /// Paths that end with `(..)`, for return type notation. ReturnTypeNotation, + /// Paths from `#[define_opaque]` attributes + DefineOpaques, } impl<'a> PathSource<'a> { fn namespace(self) -> Namespace { match self { - PathSource::Type | PathSource::Trait(_) | PathSource::Struct => TypeNS, + PathSource::Type + | PathSource::Trait(_) + | PathSource::Struct + | PathSource::DefineOpaques => TypeNS, PathSource::Expr(..) | PathSource::Pat | PathSource::TupleStruct(..) @@ -440,6 +454,7 @@ impl<'a> PathSource<'a> { | PathSource::ReturnTypeNotation => true, PathSource::Trait(_) | PathSource::TraitItem(..) + | PathSource::DefineOpaques | PathSource::Delegation | PathSource::PreciseCapturingArg(..) => false, } @@ -447,6 +462,7 @@ impl<'a> PathSource<'a> { fn descr_expected(self) -> &'static str { match &self { + PathSource::DefineOpaques => "type alias or associated type with opaqaue types", PathSource::Type => "type", PathSource::Trait(_) => "trait", PathSource::Pat => "unit struct, unit variant or constant", @@ -490,6 +506,19 @@ impl<'a> PathSource<'a> { pub(crate) fn is_expected(self, res: Res) -> bool { match self { + PathSource::DefineOpaques => { + matches!( + res, + Res::Def( + DefKind::Struct + | DefKind::Union + | DefKind::Enum + | DefKind::TyAlias + | DefKind::AssocTy, + _ + ) | Res::SelfTyAlias { .. } + ) + } PathSource::Type => matches!( res, Res::Def( @@ -569,16 +598,16 @@ impl<'a> PathSource<'a> { match (self, has_unexpected_resolution) { (PathSource::Trait(_), true) => E0404, (PathSource::Trait(_), false) => E0405, - (PathSource::Type, true) => E0573, - (PathSource::Type, false) => E0412, + (PathSource::Type | PathSource::DefineOpaques, true) => E0573, + (PathSource::Type | PathSource::DefineOpaques, false) => E0412, (PathSource::Struct, true) => E0574, (PathSource::Struct, false) => E0422, (PathSource::Expr(..), true) | (PathSource::Delegation, true) => E0423, (PathSource::Expr(..), false) | (PathSource::Delegation, false) => E0425, (PathSource::Pat | PathSource::TupleStruct(..), true) => E0532, (PathSource::Pat | PathSource::TupleStruct(..), false) => E0531, - (PathSource::TraitItem(..), true) | (PathSource::ReturnTypeNotation, true) => E0575, - (PathSource::TraitItem(..), false) | (PathSource::ReturnTypeNotation, false) => E0576, + (PathSource::TraitItem(..) | PathSource::ReturnTypeNotation, true) => E0575, + (PathSource::TraitItem(..) | PathSource::ReturnTypeNotation, false) => E0576, (PathSource::PreciseCapturingArg(..), true) => E0799, (PathSource::PreciseCapturingArg(..), false) => E0800, } @@ -621,7 +650,7 @@ pub(crate) struct UnnecessaryQualification<'ra> { pub removal_span: Span, } -#[derive(Default)] +#[derive(Default, Debug)] struct DiagMetadata<'ast> { /// The current trait's associated items' ident, used for diagnostic suggestions. current_trait_assoc_items: Option<&'ast [P<AssocItem>]>, @@ -1200,7 +1229,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r if let TyKind::Path(None, ref path) = ty.kind // We cannot disambiguate multi-segment paths right now as that requires type // checking. - && path.is_potential_trivial_const_arg() + && path.is_potential_trivial_const_arg(false) { let mut check_ns = |ns| { self.maybe_resolve_ident_in_lexical_scope(path.segments[0].ident, ns) @@ -1404,7 +1433,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r visit_opt!(self, visit_ident, ident); try_visit!(self.visit_ty(ty)); if let Some(v) = &default { - self.resolve_anon_const(v, AnonConstKind::ConstArg(IsRepeatExpr::No)); + self.resolve_anon_const(v, AnonConstKind::FieldDefaultValue); } } } @@ -1538,8 +1567,10 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { // provide previous type parameters as they're built. We // put all the parameters on the ban list and then remove // them one by one as they are processed and become available. - let mut forward_ty_ban_rib = Rib::new(RibKind::ForwardGenericParamBan); - let mut forward_const_ban_rib = Rib::new(RibKind::ForwardGenericParamBan); + let mut forward_ty_ban_rib = + Rib::new(RibKind::ForwardGenericParamBan(ForwardGenericParamBanReason::Default)); + let mut forward_const_ban_rib = + Rib::new(RibKind::ForwardGenericParamBan(ForwardGenericParamBanReason::Default)); for param in params.iter() { match param.kind { GenericParamKind::Type { .. } => { @@ -1570,6 +1601,25 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { forward_ty_ban_rib.bindings.insert(Ident::with_dummy_span(kw::SelfUpper), Res::Err); } + // NOTE: We use different ribs here not for a technical reason, but just + // for better diagnostics. + let mut forward_ty_ban_rib_const_param_ty = Rib { + bindings: forward_ty_ban_rib.bindings.clone(), + patterns_with_skipped_bindings: FxHashMap::default(), + kind: RibKind::ForwardGenericParamBan(ForwardGenericParamBanReason::ConstParamTy), + }; + let mut forward_const_ban_rib_const_param_ty = Rib { + bindings: forward_const_ban_rib.bindings.clone(), + patterns_with_skipped_bindings: FxHashMap::default(), + kind: RibKind::ForwardGenericParamBan(ForwardGenericParamBanReason::ConstParamTy), + }; + // We'll ban these with a `ConstParamTy` rib, so just clear these ribs for better + // diagnostics, so we don't mention anything about const param tys having generics at all. + if !self.r.tcx.features().generic_const_parameter_types() { + forward_ty_ban_rib_const_param_ty.bindings.clear(); + forward_const_ban_rib_const_param_ty.bindings.clear(); + } + self.with_lifetime_rib(LifetimeRibKind::AnonymousReportError, |this| { for param in params { match param.kind { @@ -1592,21 +1642,29 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { } // Allow all following defaults to refer to this type parameter. - forward_ty_ban_rib - .bindings - .remove(&Ident::with_dummy_span(param.ident.name)); + let i = &Ident::with_dummy_span(param.ident.name); + forward_ty_ban_rib.bindings.remove(i); + forward_ty_ban_rib_const_param_ty.bindings.remove(i); } GenericParamKind::Const { ref ty, kw_span: _, ref default } => { // Const parameters can't have param bounds. assert!(param.bounds.is_empty()); - this.ribs[TypeNS].push(Rib::new(RibKind::ConstParamTy)); - this.ribs[ValueNS].push(Rib::new(RibKind::ConstParamTy)); - this.with_lifetime_rib(LifetimeRibKind::ConstParamTy, |this| { + this.ribs[TypeNS].push(forward_ty_ban_rib_const_param_ty); + this.ribs[ValueNS].push(forward_const_ban_rib_const_param_ty); + if this.r.tcx.features().generic_const_parameter_types() { this.visit_ty(ty) - }); - this.ribs[TypeNS].pop().unwrap(); - this.ribs[ValueNS].pop().unwrap(); + } else { + this.ribs[TypeNS].push(Rib::new(RibKind::ConstParamTy)); + this.ribs[ValueNS].push(Rib::new(RibKind::ConstParamTy)); + this.with_lifetime_rib(LifetimeRibKind::ConstParamTy, |this| { + this.visit_ty(ty) + }); + this.ribs[TypeNS].pop().unwrap(); + this.ribs[ValueNS].pop().unwrap(); + } + forward_const_ban_rib_const_param_ty = this.ribs[ValueNS].pop().unwrap(); + forward_ty_ban_rib_const_param_ty = this.ribs[TypeNS].pop().unwrap(); if let Some(expr) = default { this.ribs[TypeNS].push(forward_ty_ban_rib); @@ -1620,9 +1678,9 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { } // Allow all following defaults to refer to this const parameter. - forward_const_ban_rib - .bindings - .remove(&Ident::with_dummy_span(param.ident.name)); + let i = &Ident::with_dummy_span(param.ident.name); + forward_const_ban_rib.bindings.remove(i); + forward_const_ban_rib_const_param_ty.bindings.remove(i); } } } @@ -1984,6 +2042,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { | PathSource::Pat | PathSource::Struct | PathSource::TupleStruct(..) + | PathSource::DefineOpaques | PathSource::Delegation => true, }; if inferred { @@ -2597,7 +2656,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { ); } - ItemKind::Fn(box Fn { ref generics, .. }) => { + ItemKind::Fn(box Fn { ref generics, ref define_opaque, .. }) => { self.with_generic_param_rib( &generics.params, RibKind::Item(HasGenericParams::Yes(generics.span), def_kind), @@ -2608,6 +2667,10 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { }, |this| visit::walk_item(this, item), ); + + for (id, path) in define_opaque.iter().flatten() { + self.smart_resolve_path(*id, &None, path, PathSource::DefineOpaques); + } } ItemKind::Enum(_, ref generics) @@ -2973,7 +3036,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { } // HACK(min_const_generics, generic_const_exprs): We - // want to keep allowing `[0; std::mem::size_of::<*mut T>()]` + // want to keep allowing `[0; size_of::<*mut T>()]` // with a future compat lint for now. We do this by adding an // additional special case for repeat expressions. // @@ -3078,8 +3141,12 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { }, ); } - AssocItemKind::Fn(box Fn { generics, .. }) => { + AssocItemKind::Fn(box Fn { generics, define_opaque, .. }) => { walk_assoc_item(self, generics, LifetimeBinderKind::Function, item); + + for (id, path) in define_opaque.iter().flatten() { + self.smart_resolve_path(*id, &None, path, PathSource::DefineOpaques); + } } AssocItemKind::Delegation(delegation) => { self.with_generic_param_rib( @@ -3125,6 +3192,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { PathSource::Trait(AliasPossibility::No), Finalize::new(trait_ref.ref_id, trait_ref.path.span), RecordPartialRes::Yes, + None, ); self.diag_metadata.currently_processing_impl_trait = None; if let Some(def_id) = res.expect_full_res().opt_def_id() { @@ -3288,7 +3356,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { }, ); } - AssocItemKind::Fn(box Fn { generics, .. }) => { + AssocItemKind::Fn(box Fn { generics, define_opaque, .. }) => { debug!("resolve_implementation AssocItemKind::Fn"); // We also need a new scope for the impl item type parameters. self.with_generic_param_rib( @@ -3315,6 +3383,10 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { visit::walk_assoc_item(this, item, AssocCtxt::Impl) }, ); + + for (id, path) in define_opaque.iter().flatten() { + self.smart_resolve_path(*id, &None, path, PathSource::DefineOpaques); + } } AssocItemKind::Type(box TyAlias { generics, .. }) => { self.diag_metadata.in_non_gat_assoc_type = Some(generics.params.is_empty()); @@ -4051,6 +4123,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { source, Finalize::new(id, path.span), RecordPartialRes::Yes, + None, ); } @@ -4062,14 +4135,21 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { source: PathSource<'ast>, finalize: Finalize, record_partial_res: RecordPartialRes, + parent_qself: Option<&QSelf>, ) -> PartialRes { let ns = source.namespace(); let Finalize { node_id, path_span, .. } = finalize; let report_errors = |this: &mut Self, res: Option<Res>| { if this.should_report_errs() { - let (err, candidates) = - this.smart_resolve_report_errors(path, None, path_span, source, res); + let (err, candidates) = this.smart_resolve_report_errors( + path, + None, + path_span, + source, + res, + parent_qself, + ); let def_id = this.parent_scope.module.nearest_parent_mod(); let instead = res.is_some(); @@ -4138,6 +4218,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { path_span, PathSource::Type, None, + parent_qself, ); // There are two different error messages user might receive at @@ -4415,6 +4496,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { PathSource::Trait(AliasPossibility::No), Finalize::new(finalize.node_id, qself.path_span), RecordPartialRes::No, + Some(&qself), ); if trait_res.expect_full_res() == Res::Err { @@ -4439,6 +4521,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { PathSource::TraitItem(ns), Finalize::with_root_span(finalize.node_id, finalize.path_span, qself.path_span), RecordPartialRes::No, + Some(&qself), ); // The remaining segments (the `C` in our example) will @@ -4609,11 +4692,12 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { constant, anon_const_kind ); - self.resolve_anon_const_manual( - constant.value.is_potential_trivial_const_arg(), - anon_const_kind, - |this| this.resolve_expr(&constant.value, None), - ) + let is_trivial_const_arg = constant + .value + .is_potential_trivial_const_arg(self.r.tcx.features().min_generic_const_args()); + self.resolve_anon_const_manual(is_trivial_const_arg, anon_const_kind, |this| { + this.resolve_expr(&constant.value, None) + }) } /// There are a few places that we need to resolve an anon const but we did not parse an @@ -4637,6 +4721,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { AnonConstKind::EnumDiscriminant => { ConstantHasGenerics::No(NoConstantGenericsReason::IsEnumDiscriminant) } + AnonConstKind::FieldDefaultValue => ConstantHasGenerics::Yes, AnonConstKind::InlineConst => ConstantHasGenerics::Yes, AnonConstKind::ConstArg(_) => { if self.r.tcx.features().generic_const_exprs() || is_trivial_const_arg { @@ -4773,8 +4858,11 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { // Constant arguments need to be treated as AnonConst since // that is how they will be later lowered to HIR. if const_args.contains(&idx) { + let is_trivial_const_arg = argument.is_potential_trivial_const_arg( + self.r.tcx.features().min_generic_const_args(), + ); self.resolve_anon_const_manual( - argument.is_potential_trivial_const_arg(), + is_trivial_const_arg, AnonConstKind::ConstArg(IsRepeatExpr::No), |this| this.resolve_expr(argument, None), ); diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index fa9c42e3593..84858cfc1b1 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -24,7 +24,7 @@ use rustc_hir::def_id::{CRATE_DEF_ID, DefId}; use rustc_hir::{MissingLifetimeKind, PrimTy}; use rustc_middle::ty; use rustc_session::{Session, lint}; -use rustc_span::edit_distance::find_best_match_for_name; +use rustc_span::edit_distance::{edit_distance, find_best_match_for_name}; use rustc_span::edition::Edition; use rustc_span::hygiene::MacroKind; use rustc_span::{DUMMY_SP, Ident, Span, Symbol, kw, sym}; @@ -35,7 +35,7 @@ use super::NoConstantGenericsReason; use crate::diagnostics::{ImportSuggestion, LabelSuggestion, TypoSuggestion}; use crate::late::{ AliasPossibility, LateResolutionVisitor, LifetimeBinderKind, LifetimeRes, LifetimeRibKind, - LifetimeUseSet, RibKind, + LifetimeUseSet, QSelf, RibKind, }; use crate::ty::fast_reject::SimplifiedType; use crate::{ @@ -421,6 +421,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { span: Span, source: PathSource<'_>, res: Option<Res>, + qself: Option<&QSelf>, ) -> (Diag<'tcx>, Vec<ImportSuggestion>) { debug!(?res, ?source); let base_error = self.make_base_error(path, span, source, res); @@ -429,6 +430,14 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { let mut err = self.r.dcx().struct_span_err(base_error.span, base_error.msg.clone()); err.code(code); + // Try to get the span of the identifier within the path's syntax context + // (if that's different). + if let Some(within_macro_span) = + base_error.span.within_macro(span, self.r.tcx.sess.source_map()) + { + err.span_label(within_macro_span, "due to this macro variable"); + } + self.detect_missing_binding_available_from_pattern(&mut err, path, following_seg); self.suggest_at_operator_in_slice_pat_with_range(&mut err, path); self.suggest_swapping_misplaced_self_ty_and_trait(&mut err, source, res, base_error.span); @@ -453,6 +462,15 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { self.suggest_self_or_self_ref(&mut err, path, span); self.detect_assoc_type_constraint_meant_as_path(&mut err, &base_error); + self.detect_rtn_with_fully_qualified_path( + &mut err, + path, + following_seg, + span, + source, + res, + qself, + ); if self.suggest_self_ty(&mut err, source, path, span) || self.suggest_self_value(&mut err, source, path, span) { @@ -501,6 +519,33 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { (err, candidates) } + fn detect_rtn_with_fully_qualified_path( + &self, + err: &mut Diag<'_>, + path: &[Segment], + following_seg: Option<&Segment>, + span: Span, + source: PathSource<'_>, + res: Option<Res>, + qself: Option<&QSelf>, + ) { + if let Some(Res::Def(DefKind::AssocFn, _)) = res + && let PathSource::TraitItem(TypeNS) = source + && let None = following_seg + && let Some(qself) = qself + && let TyKind::Path(None, ty_path) = &qself.ty.kind + && ty_path.segments.len() == 1 + && self.diag_metadata.current_where_predicate.is_some() + { + err.span_suggestion_verbose( + span, + "you might have meant to use the return type notation syntax", + format!("{}::{}(..)", ty_path.segments[0].ident, path[path.len() - 1].ident), + Applicability::MaybeIncorrect, + ); + } + } + fn detect_assoc_type_constraint_meant_as_path( &self, err: &mut Diag<'_>, @@ -2874,23 +2919,35 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { ) .with_span_label(lifetime_ref.ident.span, "undeclared lifetime") }; - self.suggest_introducing_lifetime( - &mut err, - Some(lifetime_ref.ident.name.as_str()), - |err, _, span, message, suggestion, span_suggs| { - err.multipart_suggestion_with_style( - message, - std::iter::once((span, suggestion)).chain(span_suggs.clone()).collect(), - Applicability::MaybeIncorrect, - if span_suggs.is_empty() { - SuggestionStyle::ShowCode - } else { - SuggestionStyle::ShowAlways - }, - ); - true - }, - ); + + // Check if this is a typo of `'static`. + if edit_distance(lifetime_ref.ident.name.as_str(), "'static", 2).is_some() { + err.span_suggestion_verbose( + lifetime_ref.ident.span, + "you may have misspelled the `'static` lifetime", + "'static", + Applicability::MachineApplicable, + ); + } else { + self.suggest_introducing_lifetime( + &mut err, + Some(lifetime_ref.ident.name.as_str()), + |err, _, span, message, suggestion, span_suggs| { + err.multipart_suggestion_with_style( + message, + std::iter::once((span, suggestion)).chain(span_suggs.clone()).collect(), + Applicability::MaybeIncorrect, + if span_suggs.is_empty() { + SuggestionStyle::ShowCode + } else { + SuggestionStyle::ShowAlways + }, + ); + true + }, + ); + } + err.emit(); } @@ -3052,7 +3109,6 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { .create_err(errors::ParamInTyOfConstParam { span: lifetime_ref.ident.span, name: lifetime_ref.ident.name, - param_kind: Some(errors::ParamKindInTyOfConstParam::Lifetime), }) .emit(); } @@ -3516,12 +3572,6 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { } } } - - // Record as using the suggested resolution. - let (_, (_, res)) = in_scope_lifetimes[0]; - for < in &lifetime_refs { - self.r.lifetimes_res_map.insert(lt.id, res); - } } _ => { let lifetime_spans: Vec<_> = diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index ea48d0cad74..495ce843fcd 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -20,7 +20,6 @@ #![feature(let_chains)] #![feature(rustc_attrs)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::cell::{Cell, RefCell}; @@ -30,11 +29,12 @@ use std::sync::Arc; use diagnostics::{ImportSuggestion, LabelSuggestion, Suggestion}; use effective_visibilities::EffectiveVisibilitiesVisitor; -use errors::{ - ParamKindInEnumDiscriminant, ParamKindInNonTrivialAnonConst, ParamKindInTyOfConstParam, -}; +use errors::{ParamKindInEnumDiscriminant, ParamKindInNonTrivialAnonConst}; use imports::{Import, ImportData, ImportKind, NameResolution}; -use late::{HasGenericParams, PathSource, PatternSource, UnnecessaryQualification}; +use late::{ + ForwardGenericParamBanReason, HasGenericParams, PathSource, PatternSource, + UnnecessaryQualification, +}; use macros::{MacroRulesBinding, MacroRulesScope, MacroRulesScopeRef}; use rustc_arena::{DroplessArena, TypedArena}; use rustc_ast::expand::StrippedCfgItem; @@ -275,9 +275,11 @@ enum ResolutionError<'ra> { shadowed_binding_span: Span, }, /// Error E0128: generic parameters with a default cannot use forward-declared identifiers. - ForwardDeclaredGenericParam, + ForwardDeclaredGenericParam(Symbol, ForwardGenericParamBanReason), + // FIXME(generic_const_parameter_types): This should give custom output specifying it's only + // problematic to use *forward declared* parameters when the feature is enabled. /// ERROR E0770: the type of const parameters must not depend on other generic parameters. - ParamInTyOfConstParam { name: Symbol, param_kind: Option<ParamKindInTyOfConstParam> }, + ParamInTyOfConstParam { name: Symbol }, /// generic parameters must not be used inside const evaluations. /// /// This error is only emitted when using `min_const_generics`. @@ -287,7 +289,7 @@ enum ResolutionError<'ra> { /// This error is emitted even with `generic_const_exprs`. ParamInEnumDiscriminant { name: Symbol, param_kind: ParamKindInEnumDiscriminant }, /// Error E0735: generic parameters with a default cannot use `Self` - SelfInGenericParamDefault, + ForwardDeclaredSelf(ForwardGenericParamBanReason), /// Error E0767: use of unreachable label UnreachableLabel { name: Symbol, definition_span: Span, suggestion: Option<LabelSuggestion> }, /// Error E0323, E0324, E0325: mismatch between trait item and impl item. @@ -1340,7 +1342,7 @@ impl<'tcx> Resolver<'_, 'tcx> { &mut self, parent: LocalDefId, node_id: ast::NodeId, - name: Symbol, + name: Option<Symbol>, def_kind: DefKind, expn_id: ExpnId, span: Span, diff --git a/compiler/rustc_sanitizers/src/lib.rs b/compiler/rustc_sanitizers/src/lib.rs index 55be931bcd6..c4a9cab24d0 100644 --- a/compiler/rustc_sanitizers/src/lib.rs +++ b/compiler/rustc_sanitizers/src/lib.rs @@ -4,8 +4,8 @@ //! compiler. // tidy-alphabetical-start +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(let_chains)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod cfi; diff --git a/compiler/rustc_serialize/src/leb128.rs b/compiler/rustc_serialize/src/leb128.rs index aa7c2858466..4a475805697 100644 --- a/compiler/rustc_serialize/src/leb128.rs +++ b/compiler/rustc_serialize/src/leb128.rs @@ -7,7 +7,7 @@ use crate::serialize::Decoder; /// Returns the length of the longest LEB128 encoding for `T`, assuming `T` is an integer type pub const fn max_leb128_len<T>() -> usize { // The longest LEB128 encoding for an integer uses 7 bits per byte. - (std::mem::size_of::<T>() * 8 + 6) / 7 + (size_of::<T>() * 8 + 6) / 7 } /// Returns the length of the longest LEB128 encoding of all supported integer types. diff --git a/compiler/rustc_serialize/src/lib.rs b/compiler/rustc_serialize/src/lib.rs index 9e9b78cfdd5..13c1a273eb8 100644 --- a/compiler/rustc_serialize/src/lib.rs +++ b/compiler/rustc_serialize/src/lib.rs @@ -14,7 +14,6 @@ #![feature(min_specialization)] #![feature(never_type)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub use self::serialize::{Decodable, Decoder, Encodable, Encoder}; diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 8f0b17b5e88..7af221c9607 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -42,6 +42,32 @@ mod cfg; mod native_libs; pub mod sigpipe; +pub const PRINT_KINDS: &[(&str, PrintKind)] = &[ + // tidy-alphabetical-start + ("all-target-specs-json", PrintKind::AllTargetSpecs), + ("calling-conventions", PrintKind::CallingConventions), + ("cfg", PrintKind::Cfg), + ("check-cfg", PrintKind::CheckCfg), + ("code-models", PrintKind::CodeModels), + ("crate-name", PrintKind::CrateName), + ("deployment-target", PrintKind::DeploymentTarget), + ("file-names", PrintKind::FileNames), + ("host-tuple", PrintKind::HostTuple), + ("link-args", PrintKind::LinkArgs), + ("native-static-libs", PrintKind::NativeStaticLibs), + ("relocation-models", PrintKind::RelocationModels), + ("split-debuginfo", PrintKind::SplitDebuginfo), + ("stack-protector-strategies", PrintKind::StackProtectorStrategies), + ("sysroot", PrintKind::Sysroot), + ("target-cpus", PrintKind::TargetCPUs), + ("target-features", PrintKind::TargetFeatures), + ("target-libdir", PrintKind::TargetLibdir), + ("target-list", PrintKind::TargetList), + ("target-spec-json", PrintKind::TargetSpec), + ("tls-models", PrintKind::TlsModels), + // tidy-alphabetical-end +]; + /// The different settings that the `-C strip` flag can have. #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum Strip { @@ -539,7 +565,10 @@ impl FromStr for SplitDwarfKind { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord, HashStable_Generic)] #[derive(Encodable, Decodable)] pub enum OutputType { + /// This is the optimized bitcode, which could be either pre-LTO or non-LTO bitcode, + /// depending on the specific request type. Bitcode, + /// This is the summary or index data part of the ThinLTO bitcode. ThinLinkBitcode, Assembly, LlvmAssembly, @@ -652,10 +681,14 @@ impl OutputType { } /// The type of diagnostics output to generate. -#[derive(Clone, Copy, Debug, PartialEq, Eq)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, Default)] pub enum ErrorOutputType { /// Output meant for the consumption of humans. - HumanReadable(HumanReadableErrorType, ColorConfig), + #[default] + HumanReadable { + kind: HumanReadableErrorType = HumanReadableErrorType::Default, + color_config: ColorConfig = ColorConfig::Auto, + }, /// Output that's consumed by other tools such as `rustfix` or the `RLS`. Json { /// Render the JSON in a human readable way (with indents and newlines). @@ -667,12 +700,6 @@ pub enum ErrorOutputType { }, } -impl Default for ErrorOutputType { - fn default() -> Self { - Self::HumanReadable(HumanReadableErrorType::Default, ColorConfig::Auto) - } -} - #[derive(Clone, Hash, Debug)] pub enum ResolveDocLinks { /// Do not resolve doc links. @@ -869,18 +896,13 @@ pub enum PrintKind { DeploymentTarget, } -#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)] +#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Default)] pub struct NextSolverConfig { /// Whether the new trait solver should be enabled in coherence. - pub coherence: bool, + pub coherence: bool = true, /// Whether the new trait solver should be enabled everywhere. /// This is only `true` if `coherence` is also enabled. - pub globally: bool, -} -impl Default for NextSolverConfig { - fn default() -> Self { - NextSolverConfig { coherence: true, globally: false } - } + pub globally: bool = false, } #[derive(Clone)] @@ -1505,6 +1527,13 @@ The default is {DEFAULT_EDITION} and the latest stable edition is {LATEST_STABLE ) }); +static PRINT_KINDS_STRING: LazyLock<String> = LazyLock::new(|| { + format!( + "[{}]", + PRINT_KINDS.iter().map(|(name, _)| format!("{name}")).collect::<Vec<_>>().join("|") + ) +}); + /// Returns all rustc command line options, including metadata for /// each option, such as whether the option is stable. pub fn rustc_optgroups() -> Vec<RustcOptGroup> { @@ -1565,10 +1594,7 @@ pub fn rustc_optgroups() -> Vec<RustcOptGroup> { "", "print", "Compiler information to print on stdout", - "[crate-name|file-names|sysroot|target-libdir|cfg|check-cfg|calling-conventions|\ - target-list|target-cpus|target-features|relocation-models|code-models|\ - tls-models|target-spec-json|all-target-specs-json|native-static-libs|\ - stack-protector-strategies|link-args|deployment-target]", + &PRINT_KINDS_STRING, ), opt(Stable, FlagMulti, "g", "", "Equivalent to -C debuginfo=2", ""), opt(Stable, FlagMulti, "O", "", "Equivalent to -C opt-level=3", ""), @@ -1792,7 +1818,7 @@ pub fn parse_json(early_dcx: &EarlyDiagCtxt, matches: &getopts::Matches) -> Json pub fn parse_error_format( early_dcx: &mut EarlyDiagCtxt, matches: &getopts::Matches, - color: ColorConfig, + color_config: ColorConfig, json_color: ColorConfig, json_rendered: HumanReadableErrorType, ) -> ErrorOutputType { @@ -1802,27 +1828,26 @@ pub fn parse_error_format( // `opt_present` because the latter will panic. let error_format = if matches.opts_present(&["error-format".to_owned()]) { match matches.opt_str("error-format").as_deref() { - None | Some("human") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color) - } - Some("human-annotate-rs") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::AnnotateSnippet, color) - } + None | Some("human") => ErrorOutputType::HumanReadable { color_config, .. }, + Some("human-annotate-rs") => ErrorOutputType::HumanReadable { + kind: HumanReadableErrorType::AnnotateSnippet, + color_config, + }, Some("json") => { ErrorOutputType::Json { pretty: false, json_rendered, color_config: json_color } } Some("pretty-json") => { ErrorOutputType::Json { pretty: true, json_rendered, color_config: json_color } } - Some("short") => ErrorOutputType::HumanReadable(HumanReadableErrorType::Short, color), - Some("human-unicode") => { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Unicode, color) + Some("short") => { + ErrorOutputType::HumanReadable { kind: HumanReadableErrorType::Short, color_config } } + Some("human-unicode") => ErrorOutputType::HumanReadable { + kind: HumanReadableErrorType::Unicode, + color_config, + }, Some(arg) => { - early_dcx.set_error_format(ErrorOutputType::HumanReadable( - HumanReadableErrorType::Default, - color, - )); + early_dcx.set_error_format(ErrorOutputType::HumanReadable { color_config, .. }); early_dcx.early_fatal(format!( "argument for `--error-format` must be `human`, `human-annotate-rs`, \ `human-unicode`, `json`, `pretty-json` or `short` (instead was `{arg}`)" @@ -1830,7 +1855,7 @@ pub fn parse_error_format( } } } else { - ErrorOutputType::HumanReadable(HumanReadableErrorType::Default, color) + ErrorOutputType::HumanReadable { color_config, .. } }; match error_format { @@ -1885,7 +1910,7 @@ fn check_error_format_stability( } let format = match format { ErrorOutputType::Json { pretty: true, .. } => "pretty-json", - ErrorOutputType::HumanReadable(format, _) => match format { + ErrorOutputType::HumanReadable { kind, .. } => match kind { HumanReadableErrorType::AnnotateSnippet => "human-annotate-rs", HumanReadableErrorType::Unicode => "human-unicode", _ => return, @@ -1996,32 +2021,6 @@ fn collect_print_requests( cg.target_feature = String::new(); } - const PRINT_KINDS: &[(&str, PrintKind)] = &[ - // tidy-alphabetical-start - ("all-target-specs-json", PrintKind::AllTargetSpecs), - ("calling-conventions", PrintKind::CallingConventions), - ("cfg", PrintKind::Cfg), - ("check-cfg", PrintKind::CheckCfg), - ("code-models", PrintKind::CodeModels), - ("crate-name", PrintKind::CrateName), - ("deployment-target", PrintKind::DeploymentTarget), - ("file-names", PrintKind::FileNames), - ("host-tuple", PrintKind::HostTuple), - ("link-args", PrintKind::LinkArgs), - ("native-static-libs", PrintKind::NativeStaticLibs), - ("relocation-models", PrintKind::RelocationModels), - ("split-debuginfo", PrintKind::SplitDebuginfo), - ("stack-protector-strategies", PrintKind::StackProtectorStrategies), - ("sysroot", PrintKind::Sysroot), - ("target-cpus", PrintKind::TargetCPUs), - ("target-features", PrintKind::TargetFeatures), - ("target-libdir", PrintKind::TargetLibdir), - ("target-list", PrintKind::TargetList), - ("target-spec-json", PrintKind::TargetSpec), - ("tls-models", PrintKind::TlsModels), - // tidy-alphabetical-end - ]; - // We disallow reusing the same path in multiple prints, such as `--print // cfg=output.txt --print link-args=output.txt`, because outputs are printed // by disparate pieces of the compiler, and keeping track of which files @@ -2807,8 +2806,8 @@ impl fmt::Display for CrateType { } impl IntoDiagArg for CrateType { - fn into_diag_arg(self) -> DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> DiagArgValue { + self.to_string().into_diag_arg(&mut None) } } @@ -2880,14 +2879,6 @@ impl PpMode { | StableMir => true, } } - pub fn needs_hir(&self) -> bool { - use PpMode::*; - match *self { - Source(_) | AstTree | AstTreeExpanded => false, - - Hir(_) | HirTree | ThirTree | ThirFlat | Mir | MirCFG | StableMir => true, - } - } pub fn needs_analysis(&self) -> bool { use PpMode::*; diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs index ec83761da4a..cc2decc2fe4 100644 --- a/compiler/rustc_session/src/filesearch.rs +++ b/compiler/rustc_session/src/filesearch.rs @@ -92,7 +92,7 @@ fn current_dll_path() -> Result<PathBuf, String> { if libc::loadquery( libc::L_GETINFO, buffer.as_mut_ptr() as *mut u8, - (std::mem::size_of::<libc::ld_info>() * buffer.len()) as u32, + (size_of::<libc::ld_info>() * buffer.len()) as u32, ) >= 0 { break; diff --git a/compiler/rustc_session/src/lib.rs b/compiler/rustc_session/src/lib.rs index 112adde3740..0e19b982a13 100644 --- a/compiler/rustc_session/src/lib.rs +++ b/compiler/rustc_session/src/lib.rs @@ -1,12 +1,12 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![feature(default_field_values)] #![feature(iter_intersperse)] #![feature(let_chains)] #![feature(rustc_attrs)] // To generate CodegenOptionsTargetModifiers and UnstableOptionsTargetModifiers enums // with macro_rules, it is necessary to use recursive mechanic ("Incremental TT Munchers"). #![recursion_limit = "256"] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod errors; diff --git a/compiler/rustc_session/src/session.rs b/compiler/rustc_session/src/session.rs index 8e5ff1d3bc4..bcd9a73d9d3 100644 --- a/compiler/rustc_session/src/session.rs +++ b/compiler/rustc_session/src/session.rs @@ -109,8 +109,8 @@ impl Mul<usize> for Limit { } impl rustc_errors::IntoDiagArg for Limit { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { - self.to_string().into_diag_arg() + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { + self.to_string().into_diag_arg(&mut None) } } @@ -913,7 +913,7 @@ fn default_emitter( let source_map = if sopts.unstable_opts.link_only { None } else { Some(source_map) }; match sopts.error_format { - config::ErrorOutputType::HumanReadable(kind, color_config) => { + config::ErrorOutputType::HumanReadable { kind, color_config } => { let short = kind.short(); if let HumanReadableErrorType::AnnotateSnippet = kind { @@ -930,7 +930,6 @@ fn default_emitter( .fluent_bundle(bundle) .sm(source_map) .short_message(short) - .teach(sopts.unstable_opts.teach) .diagnostic_width(sopts.diagnostic_width) .macro_backtrace(macro_backtrace) .track_diagnostics(track_diagnostics) @@ -1430,7 +1429,7 @@ fn mk_emitter(output: ErrorOutputType) -> Box<DynEmitter> { let fallback_bundle = fallback_fluent_bundle(vec![rustc_errors::DEFAULT_LOCALE_RESOURCE], false); let emitter: Box<DynEmitter> = match output { - config::ErrorOutputType::HumanReadable(kind, color_config) => { + config::ErrorOutputType::HumanReadable { kind, color_config } => { let short = kind.short(); Box::new( HumanEmitter::new(stderr_destination(color_config), fallback_bundle) diff --git a/compiler/rustc_session/src/utils.rs b/compiler/rustc_session/src/utils.rs index 9182789cf02..fcede379b89 100644 --- a/compiler/rustc_session/src/utils.rs +++ b/compiler/rustc_session/src/utils.rs @@ -34,6 +34,7 @@ pub enum NativeLibKind { as_needed: Option<bool>, }, /// Dynamic library (e.g. `foo.dll` on Windows) without a corresponding import library. + /// On Linux, it refers to a generated shared library stub. RawDylib, /// A macOS-specific kind of dynamic libraries. Framework { diff --git a/compiler/rustc_smir/src/lib.rs b/compiler/rustc_smir/src/lib.rs index 2215e2f01ad..eaba14bbf30 100644 --- a/compiler/rustc_smir/src/lib.rs +++ b/compiler/rustc_smir/src/lib.rs @@ -9,13 +9,13 @@ // tidy-alphabetical-start #![allow(internal_features)] #![allow(rustc::usage_of_ty_tykind)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc( html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/", test(attr(allow(unused_variables), deny(warnings))) )] #![doc(rust_logo)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub mod rustc_internal; diff --git a/compiler/rustc_smir/src/rustc_internal/internal.rs b/compiler/rustc_smir/src/rustc_internal/internal.rs index 50cf605ba2a..bb2b2dea2f3 100644 --- a/compiler/rustc_smir/src/rustc_internal/internal.rs +++ b/compiler/rustc_smir/src/rustc_internal/internal.rs @@ -88,10 +88,9 @@ impl RustcInternal for Pattern { type T<'tcx> = rustc_ty::Pattern<'tcx>; fn internal<'tcx>(&self, tables: &mut Tables<'_>, tcx: TyCtxt<'tcx>) -> Self::T<'tcx> { tcx.mk_pat(match self { - Pattern::Range { start, end, include_end } => rustc_ty::PatternKind::Range { - start: start.as_ref().map(|c| c.internal(tables, tcx)), - end: end.as_ref().map(|c| c.internal(tables, tcx)), - include_end: *include_end, + Pattern::Range { start, end, include_end: _ } => rustc_ty::PatternKind::Range { + start: start.as_ref().unwrap().internal(tables, tcx), + end: end.as_ref().unwrap().internal(tables, tcx), }, }) } diff --git a/compiler/rustc_smir/src/rustc_smir/context.rs b/compiler/rustc_smir/src/rustc_smir/context.rs index acd3b758351..aa1921fc8e7 100644 --- a/compiler/rustc_smir/src/rustc_smir/context.rs +++ b/compiler/rustc_smir/src/rustc_smir/context.rs @@ -272,7 +272,7 @@ impl<'tcx> Context for TablesWrapper<'tcx> { let tcx = tables.tcx; let did = tables[def_id]; let attrs_iter = if let Some(did) = did.as_local() { - tcx.hir().attrs(tcx.local_def_id_to_hir_id(did)).iter() + tcx.hir_attrs(tcx.local_def_id_to_hir_id(did)).iter() } else { tcx.attrs_for_def(did).iter() }; diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index a627e0e69b6..62cbab9b723 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -206,7 +206,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::BackendRepr { rustc_abi::BackendRepr::ScalarPair(first, second) => { ValueAbi::ScalarPair(first.stable(tables), second.stable(tables)) } - rustc_abi::BackendRepr::Vector { element, count } => { + rustc_abi::BackendRepr::SimdVector { element, count } => { ValueAbi::Vector { element: element.stable(tables), count } } rustc_abi::BackendRepr::Memory { sized } => ValueAbi::Aggregate { sized }, diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs index a0faf20c79a..aa0eac628dd 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs @@ -405,10 +405,11 @@ impl<'tcx> Stable<'tcx> for ty::Pattern<'tcx> { fn stable(&self, tables: &mut Tables<'_>) -> Self::T { match **self { - ty::PatternKind::Range { start, end, include_end } => stable_mir::ty::Pattern::Range { - start: start.stable(tables), - end: end.stable(tables), - include_end, + ty::PatternKind::Range { start, end } => stable_mir::ty::Pattern::Range { + // FIXME(SMIR): update data structures to not have an Option here anymore + start: Some(start.stable(tables)), + end: Some(end.stable(tables)), + include_end: true, }, } } diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs index 141d261b5f0..6384fa06c21 100644 --- a/compiler/rustc_span/src/analyze_source_file.rs +++ b/compiler/rustc_span/src/analyze_source_file.rs @@ -83,17 +83,17 @@ cfg_match! { // For character in the chunk, see if its byte value is < 0, which // indicates that it's part of a UTF-8 char. - let multibyte_test = unsafe { _mm_cmplt_epi8(chunk, _mm_set1_epi8(0)) }; + let multibyte_test = _mm_cmplt_epi8(chunk, _mm_set1_epi8(0)); // Create a bit mask from the comparison results. - let multibyte_mask = unsafe { _mm_movemask_epi8(multibyte_test) }; + let multibyte_mask = _mm_movemask_epi8(multibyte_test); // If the bit mask is all zero, we only have ASCII chars here: if multibyte_mask == 0 { assert!(intra_chunk_offset == 0); // Check for newlines in the chunk - let newlines_test = unsafe { _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8)) }; - let mut newlines_mask = unsafe { _mm_movemask_epi8(newlines_test) }; + let newlines_test = _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8)); + let mut newlines_mask = _mm_movemask_epi8(newlines_test); let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1); diff --git a/compiler/rustc_span/src/edition.rs b/compiler/rustc_span/src/edition.rs index 36f9b4cff60..da298080ed2 100644 --- a/compiler/rustc_span/src/edition.rs +++ b/compiler/rustc_span/src/edition.rs @@ -23,11 +23,27 @@ pub enum Edition { Edition2021, /// The 2024 edition Edition2024, + /// The future edition - this variant will always exist and features associated with this + /// edition can be moved to the next 20XX edition when it is established and it is confirmed + /// that those features will be part of that edition. + /// + /// This variant allows edition changes to be implemented before being assigned to a concrete + /// edition - primarily when there are two different unstable behaviours that need tested across + /// an edition boundary. + /// + /// This edition will be permanently unstable and any features associated with this edition + /// must also be behind a feature gate. + EditionFuture, } // Must be in order from oldest to newest. -pub const ALL_EDITIONS: &[Edition] = - &[Edition::Edition2015, Edition::Edition2018, Edition::Edition2021, Edition::Edition2024]; +pub const ALL_EDITIONS: &[Edition] = &[ + Edition::Edition2015, + Edition::Edition2018, + Edition::Edition2021, + Edition::Edition2024, + Edition::EditionFuture, +]; pub const EDITION_NAME_LIST: &str = "2015|2018|2021|2024"; @@ -42,6 +58,7 @@ impl fmt::Display for Edition { Edition::Edition2018 => "2018", Edition::Edition2021 => "2021", Edition::Edition2024 => "2024", + Edition::EditionFuture => "future", }; write!(f, "{s}") } @@ -54,6 +71,7 @@ impl Edition { Edition::Edition2018 => "rust_2018_compatibility", Edition::Edition2021 => "rust_2021_compatibility", Edition::Edition2024 => "rust_2024_compatibility", + Edition::EditionFuture => "edition_future_compatibility", } } @@ -63,6 +81,7 @@ impl Edition { Edition::Edition2018 => true, Edition::Edition2021 => true, Edition::Edition2024 => true, + Edition::EditionFuture => false, } } @@ -85,6 +104,11 @@ impl Edition { pub fn at_least_rust_2024(self) -> bool { self >= Edition::Edition2024 } + + /// Are we allowed to use features from the future edition? + pub fn at_least_edition_future(self) -> bool { + self >= Edition::EditionFuture + } } impl FromStr for Edition { @@ -95,6 +119,7 @@ impl FromStr for Edition { "2018" => Ok(Edition::Edition2018), "2021" => Ok(Edition::Edition2021), "2024" => Ok(Edition::Edition2024), + "future" => Ok(Edition::EditionFuture), _ => Err(()), } } diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 84e89ff4b7d..64982b858c6 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -1173,6 +1173,8 @@ pub enum DesugaringKind { BoundModifier, /// Calls to contract checks (`#[requires]` to precond, `#[ensures]` to postcond) Contract, + /// A pattern type range start/end + PatTyRange, } impl DesugaringKind { @@ -1190,6 +1192,7 @@ impl DesugaringKind { DesugaringKind::WhileLoop => "`while` loop", DesugaringKind::BoundModifier => "trait bound modifier", DesugaringKind::Contract => "contract check", + DesugaringKind::PatTyRange => "pattern type", } } } diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index bca9323a50d..f19d4d9f362 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -32,7 +32,6 @@ #![feature(rustc_attrs)] #![feature(rustdoc_internals)] #![feature(slice_as_chunks)] -#![warn(unreachable_pub)] // tidy-alphabetical-end // The code produced by the `Encodable`/`Decodable` derive macros refer to @@ -1057,6 +1056,37 @@ impl Span { } } + /// Returns the `Span` within the syntax context of "within". This is useful when + /// "self" is an expansion from a macro variable, since this can be used for + /// providing extra macro expansion context for certain errors. + /// + /// ```text + /// macro_rules! m { + /// ($ident:ident) => { ($ident,) } + /// } + /// + /// m!(outer_ident); + /// ``` + /// + /// If "self" is the span of the outer_ident, and "within" is the span of the `($ident,)` + /// expr, then this will return the span of the `$ident` macro variable. + pub fn within_macro(self, within: Span, sm: &SourceMap) -> Option<Span> { + match Span::prepare_to_combine(self, within) { + // Only return something if it doesn't overlap with the original span, + // and the span isn't "imported" (i.e. from unavailable sources). + // FIXME: This does limit the usefulness of the error when the macro is + // from a foreign crate; we could also take into account `-Zmacro-backtrace`, + // which doesn't redact this span (but that would mean passing in even more + // args to this function, lol). + Ok((self_, _, parent)) + if self_.hi < self.lo() || self.hi() < self_.lo && !sm.is_imported(within) => + { + Some(Span::new(self_.lo, self_.hi, self_.ctxt, parent)) + } + _ => None, + } + } + pub fn from_inner(self, inner: InnerSpan) -> Span { let span = self.data(); Span::new( diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 172c2faca96..8a8bec35d81 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -33,6 +33,15 @@ symbols! { // Special reserved identifiers used internally for elided lifetimes, // unnamed method parameters, crate root module, error recovery etc. // Matching predicates: `is_any_keyword`, `is_special`/`is_reserved` + // + // Notes about `kw::Empty`: + // - Its use can blur the lines between "empty symbol" and "no symbol". + // Using `Option<Symbol>` is preferable, where possible, because that + // is unambiguous. + // - For dummy symbols that are never used and absolutely must be + // present, it's better to use `sym::dummy` than `kw::Empty`, because + // it's clearer that it's intended as a dummy value, and more likely + // to be detected if it accidentally does get used. Empty: "", PathRoot: "{{root}}", DollarCrate: "$crate", @@ -308,6 +317,9 @@ symbols! { RangeFull, RangeInclusive, RangeInclusiveCopy, + RangeMax, + RangeMin, + RangeSub, RangeTo, RangeToInclusive, Rc, @@ -776,6 +788,7 @@ symbols! { default_method_body_is_const, default_type_parameter_fallback, default_type_params, + define_opaque, delayed_bug_from_inside_query, deny, deprecated, @@ -830,6 +843,7 @@ symbols! { drop_types_in_const, dropck_eyepatch, dropck_parametricity, + dummy: "<!dummy!>", // use this instead of `kw::Empty` for symbols that won't be used dummy_cgu_name, dylib, dyn_compatible_for_dispatch, @@ -859,6 +873,7 @@ symbols! { eprint_macro, eprintln_macro, eq, + ergonomic_clones, ermsb_target_feature, exact_div, except, @@ -1012,6 +1027,7 @@ symbols! { from_residual, from_size_align_unchecked, from_str_method, + from_u16, from_usize, from_yeet, fs_create_dir, @@ -1037,6 +1053,7 @@ symbols! { generic_associated_types_extended, generic_const_exprs, generic_const_items, + generic_const_parameter_types, generic_param_attrs, generic_pattern_types, get_context, @@ -1521,6 +1538,7 @@ symbols! { pattern_complexity_limit, pattern_parentheses, pattern_type, + pattern_type_range_trait, pattern_types, permissions_from_mode, phantom_data, @@ -1624,6 +1642,7 @@ symbols! { quote, range_inclusive_new, raw_dylib, + raw_dylib_elf, raw_eq, raw_identifiers, raw_ref_op, @@ -1711,6 +1730,7 @@ symbols! { rust_cold_cc, rust_eh_catch_typeinfo, rust_eh_personality, + rust_future, rust_logo, rust_out, rustc, @@ -1877,8 +1897,6 @@ symbols! { simd_fma, simd_fmax, simd_fmin, - simd_fpow, - simd_fpowi, simd_fsin, simd_fsqrt, simd_gather, @@ -2084,6 +2102,7 @@ symbols! { type_ascribe, type_ascription, type_changing_struct_update, + type_const, type_id, type_ir_inherent, type_length_limit, @@ -2180,6 +2199,7 @@ symbols! { unwrap, unwrap_binder, unwrap_or, + use_cloned, use_extern_macros, use_nested_groups, used, @@ -2235,6 +2255,7 @@ symbols! { wasm_abi, wasm_import_module, wasm_target_feature, + where_clause_attrs, while_let, windows, windows_subsystem, @@ -2294,11 +2315,23 @@ impl Ident { Ident::new(name, DUMMY_SP) } + /// This is best avoided, because it blurs the lines between "empty + /// identifier" and "no identifier". Using `Option<Ident>` is preferable, + /// where possible, because that is unambiguous. #[inline] pub fn empty() -> Ident { Ident::with_dummy_span(kw::Empty) } + // For dummy identifiers that are never used and absolutely must be + // present, it's better to use `Ident::dummy` than `Ident::Empty`, because + // it's clearer that it's intended as a dummy value, and more likely to be + // detected if it accidentally does get used. + #[inline] + pub fn dummy() -> Ident { + Ident::with_dummy_span(sym::dummy) + } + /// Maps a string to an identifier with a dummy span. pub fn from_str(string: &str) -> Ident { Ident::with_dummy_span(Symbol::intern(string)) diff --git a/compiler/rustc_symbol_mangling/src/lib.rs b/compiler/rustc_symbol_mangling/src/lib.rs index 4312c82815c..c4558b5ce8b 100644 --- a/compiler/rustc_symbol_mangling/src/lib.rs +++ b/compiler/rustc_symbol_mangling/src/lib.rs @@ -89,11 +89,11 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(let_chains)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_hir::def::DefKind; diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 4fafd1ac350..bc3923e4b4d 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -169,7 +169,7 @@ impl<'tcx> SymbolMangler<'tcx> { Ok(()) } - fn in_binder<T>( + fn wrap_binder<T>( &mut self, value: &ty::Binder<'tcx, T>, print_value: impl FnOnce(&mut Self, &T) -> Result<(), PrintError>, @@ -413,12 +413,8 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> { } ty::Pat(ty, pat) => match *pat { - ty::PatternKind::Range { start, end, include_end } => { - let consts = [ - start.unwrap_or(self.tcx.consts.unit), - end.unwrap_or(self.tcx.consts.unit), - ty::Const::from_bool(self.tcx, include_end), - ]; + ty::PatternKind::Range { start, end } => { + let consts = [start, end]; // HACK: Represent as tuple until we have something better. // HACK: constants are used in arrays, even if the types don't match. self.push("T"); @@ -471,7 +467,7 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> { ty::FnPtr(sig_tys, hdr) => { let sig = sig_tys.with(hdr); self.push("F"); - self.in_binder(&sig, |cx, sig| { + self.wrap_binder(&sig, |cx, sig| { if sig.safety.is_unsafe() { cx.push("U"); } @@ -554,7 +550,7 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> { // [<Trait> [{<Projection>}]] [{<Auto>}] // Since any predicates after the first one shouldn't change the binders, // just put them all in the binders of the first. - self.in_binder(&predicates[0], |cx, _| { + self.wrap_binder(&predicates[0], |cx, _| { for predicate in predicates.iter() { // It would be nice to be able to validate bound vars here, but // projections can actually include bound vars from super traits diff --git a/compiler/rustc_target/src/callconv/loongarch.rs b/compiler/rustc_target/src/callconv/loongarch.rs index 3fa67c624a7..209d7483e61 100644 --- a/compiler/rustc_target/src/callconv/loongarch.rs +++ b/compiler/rustc_target/src/callconv/loongarch.rs @@ -25,7 +25,7 @@ struct CannotUseFpConv; fn is_loongarch_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -80,7 +80,7 @@ where } } }, - BackendRepr::Vector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 1c044fe98b3..6d0ee3c7ee5 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -31,6 +31,7 @@ mod sparc64; mod wasm; mod x86; mod x86_64; +mod x86_win32; mod x86_win64; mod xtensa; @@ -357,7 +358,7 @@ impl<'a, Ty> ArgAbi<'a, Ty> { scalar_attrs(&layout, a, Size::ZERO), scalar_attrs(&layout, b, a.size(cx).align_to(b.align(cx).abi)), ), - BackendRepr::Vector { .. } => PassMode::Direct(ArgAttributes::new()), + BackendRepr::SimdVector { .. } => PassMode::Direct(ArgAttributes::new()), BackendRepr::Memory { .. } => Self::indirect_pass_mode(&layout), }; ArgAbi { layout, mode } @@ -649,7 +650,11 @@ impl<'a, Ty> FnAbi<'a, Ty> { }; let reg_struct_return = cx.x86_abi_opt().reg_struct_return; let opts = x86::X86Options { flavor, regparm, reg_struct_return }; - x86::compute_abi_info(cx, self, opts); + if spec.is_like_msvc { + x86_win32::compute_abi_info(cx, self, opts); + } else { + x86::compute_abi_info(cx, self, opts); + } } "x86_64" => match abi { ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self), @@ -759,7 +764,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { if arg_idx.is_none() && arg.layout.size > Primitive::Pointer(AddressSpace::DATA).size(cx) * 2 - && !matches!(arg.layout.backend_repr, BackendRepr::Vector { .. }) + && !matches!(arg.layout.backend_repr, BackendRepr::SimdVector { .. }) { // Return values larger than 2 registers using a return area // pointer. LLVM and Cranelift disagree about how to return @@ -826,7 +831,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // This is a fun case! The gist of what this is doing is // that we want callers and callees to always agree on the // ABI of how they pass SIMD arguments. If we were to *not* diff --git a/compiler/rustc_target/src/callconv/riscv.rs b/compiler/rustc_target/src/callconv/riscv.rs index 785175229b0..7368e225efa 100644 --- a/compiler/rustc_target/src/callconv/riscv.rs +++ b/compiler/rustc_target/src/callconv/riscv.rs @@ -31,7 +31,7 @@ struct CannotUseFpConv; fn is_riscv_aggregate<Ty>(arg: &ArgAbi<'_, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } @@ -86,7 +86,7 @@ where } } }, - BackendRepr::Vector { .. } => return Err(CannotUseFpConv), + BackendRepr::SimdVector { .. } => return Err(CannotUseFpConv), BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => match arg_layout.fields { FieldsShape::Primitive => { unreachable!("aggregates can't have `FieldsShape::Primitive`") diff --git a/compiler/rustc_target/src/callconv/s390x.rs b/compiler/rustc_target/src/callconv/s390x.rs index 4f69e32b2e3..1ba792c5acc 100644 --- a/compiler/rustc_target/src/callconv/s390x.rs +++ b/compiler/rustc_target/src/callconv/s390x.rs @@ -8,7 +8,7 @@ use crate::spec::HasTargetSpec; fn classify_ret<Ty>(ret: &mut ArgAbi<'_, Ty>) { let size = ret.layout.size; - if size.bits() <= 128 && matches!(ret.layout.backend_repr, BackendRepr::Vector { .. }) { + if size.bits() <= 128 && matches!(ret.layout.backend_repr, BackendRepr::SimdVector { .. }) { return; } if !ret.layout.is_aggregate() && size.bits() <= 64 { @@ -40,7 +40,7 @@ where let size = arg.layout.size; if size.bits() <= 128 { - if let BackendRepr::Vector { .. } = arg.layout.backend_repr { + if let BackendRepr::SimdVector { .. } = arg.layout.backend_repr { // pass non-wrapped vector types using `PassMode::Direct` return; } diff --git a/compiler/rustc_target/src/callconv/x86.rs b/compiler/rustc_target/src/callconv/x86.rs index 7e5aab0201b..6f112b49400 100644 --- a/compiler/rustc_target/src/callconv/x86.rs +++ b/compiler/rustc_target/src/callconv/x86.rs @@ -36,7 +36,7 @@ where if t.abi_return_struct_as_int || opts.reg_struct_return { // According to Clang, everyone but MSVC returns single-element // float aggregates directly in a floating-point register. - if !t.is_like_msvc && fn_abi.ret.layout.is_single_fp_element(cx) { + if fn_abi.ret.layout.is_single_fp_element(cx) { match fn_abi.ret.layout.size.bytes() { 4 => fn_abi.ret.cast_to(Reg::f32()), 8 => fn_abi.ret.cast_to(Reg::f64()), @@ -64,31 +64,11 @@ where continue; } - // FIXME: MSVC 2015+ will pass the first 3 vector arguments in [XYZ]MM0-2 - // See https://reviews.llvm.org/D72114 for Clang behavior - let t = cx.target_spec(); let align_4 = Align::from_bytes(4).unwrap(); let align_16 = Align::from_bytes(16).unwrap(); - if t.is_like_msvc - && arg.layout.is_adt() - && let Some(max_repr_align) = arg.layout.max_repr_align - && max_repr_align > align_4 - { - // MSVC has special rules for overaligned arguments: https://reviews.llvm.org/D72114. - // Summarized here: - // - Arguments with _requested_ alignment > 4 are passed indirectly. - // - For backwards compatibility, arguments with natural alignment > 4 are still passed - // on stack (via `byval`). For example, this includes `double`, `int64_t`, - // and structs containing them, provided they lack an explicit alignment attribute. - assert!( - arg.layout.align.abi >= max_repr_align, - "abi alignment {:?} less than requested alignment {max_repr_align:?}", - arg.layout.align.abi, - ); - arg.make_indirect(); - } else if arg.layout.is_aggregate() { + if arg.layout.is_aggregate() { // We need to compute the alignment of the `byval` argument. The rules can be found in // `X86_32ABIInfo::getTypeStackAlignInBytes` in Clang's `TargetInfo.cpp`. Summarized // here, they are: @@ -109,7 +89,7 @@ where { match layout.backend_repr { BackendRepr::Scalar(_) | BackendRepr::ScalarPair(..) => false, - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { if contains_vector(cx, layout.field(cx, i)) { diff --git a/compiler/rustc_target/src/callconv/x86_64.rs b/compiler/rustc_target/src/callconv/x86_64.rs index ab306e20239..300b19f62e7 100644 --- a/compiler/rustc_target/src/callconv/x86_64.rs +++ b/compiler/rustc_target/src/callconv/x86_64.rs @@ -56,7 +56,7 @@ where Primitive::Float(_) => Class::Sse, }, - BackendRepr::Vector { .. } => Class::Sse, + BackendRepr::SimdVector { .. } => Class::Sse, BackendRepr::ScalarPair(..) | BackendRepr::Memory { .. } => { for i in 0..layout.fields.count() { diff --git a/compiler/rustc_target/src/callconv/x86_win32.rs b/compiler/rustc_target/src/callconv/x86_win32.rs new file mode 100644 index 00000000000..554a7368848 --- /dev/null +++ b/compiler/rustc_target/src/callconv/x86_win32.rs @@ -0,0 +1,81 @@ +use rustc_abi::{Align, HasDataLayout, Reg, TyAbiInterface}; + +use crate::callconv::FnAbi; +use crate::spec::HasTargetSpec; + +pub(crate) fn compute_abi_info<'a, Ty, C>( + cx: &C, + fn_abi: &mut FnAbi<'a, Ty>, + opts: super::x86::X86Options, +) where + Ty: TyAbiInterface<'a, C> + Copy, + C: HasDataLayout + HasTargetSpec, +{ + if !fn_abi.ret.is_ignore() { + if fn_abi.ret.layout.is_aggregate() && fn_abi.ret.layout.is_sized() { + // Returning a structure. Most often, this will use + // a hidden first argument. On some platforms, though, + // small structs are returned as integers. + // + // Some links: + // https://www.angelcode.com/dev/callconv/callconv.html + // Clang's ABI handling is in lib/CodeGen/TargetInfo.cpp + let t = cx.target_spec(); + // MSVC does not special-case 1-element float aggregates, unlike others. + // GCC used to apply the SysV rule here, breaking windows-gnu's ABI, but was fixed: + // - reported in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=82028 + // - fixed in https://gcc.gnu.org/bugzilla/show_bug.cgi?id=85667 + if t.abi_return_struct_as_int || opts.reg_struct_return { + match fn_abi.ret.layout.size.bytes() { + 1 => fn_abi.ret.cast_to(Reg::i8()), + 2 => fn_abi.ret.cast_to(Reg::i16()), + 4 => fn_abi.ret.cast_to(Reg::i32()), + 8 => fn_abi.ret.cast_to(Reg::i64()), + _ => fn_abi.ret.make_indirect(), + } + } else { + fn_abi.ret.make_indirect(); + } + } else { + fn_abi.ret.extend_integer_width_to(32); + } + } + + for arg in fn_abi.args.iter_mut() { + if arg.is_ignore() || !arg.layout.is_sized() { + continue; + } + + // FIXME: MSVC 2015+ will pass the first 3 vector arguments in [XYZ]MM0-2 + // See https://reviews.llvm.org/D72114 for Clang behavior + + let align_4 = Align::from_bytes(4).unwrap(); + + if arg.layout.is_adt() + && let Some(max_repr_align) = arg.layout.max_repr_align + && max_repr_align > align_4 + { + // MSVC has special rules for overaligned arguments: https://reviews.llvm.org/D72114. + // Summarized here: + // - Arguments with _requested_ alignment > 4 are passed indirectly. + // - For backwards compatibility, arguments with natural alignment > 4 are still passed + // on stack (via `byval`). For example, this includes `double`, `int64_t`, + // and structs containing them, provided they lack an explicit alignment attribute. + assert!( + arg.layout.align.abi >= max_repr_align, + "abi alignment {:?} less than requested alignment {max_repr_align:?}", + arg.layout.align.abi, + ); + arg.make_indirect(); + } else if arg.layout.is_aggregate() { + // Alignment of the `byval` argument. + // The rules can be found in `X86_32ABIInfo::getTypeStackAlignInBytes` in Clang's `TargetInfo.cpp`. + let byval_align = align_4; + arg.pass_by_stack_offset(Some(byval_align)); + } else { + arg.extend_integer_width_to(32); + } + } + + super::x86::fill_inregs(cx, fn_abi, opts, false); +} diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index 4d99a9f9ba0..8f8597ea662 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -18,7 +18,7 @@ pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<' _ => a.make_indirect(), } } - BackendRepr::Vector { .. } => { + BackendRepr::SimdVector { .. } => { // FIXME(eddyb) there should be a size cap here // (probably what clang calls "illegal vectors"). } diff --git a/compiler/rustc_target/src/callconv/xtensa.rs b/compiler/rustc_target/src/callconv/xtensa.rs index 6c030cb3bf7..b687f0e20c6 100644 --- a/compiler/rustc_target/src/callconv/xtensa.rs +++ b/compiler/rustc_target/src/callconv/xtensa.rs @@ -116,7 +116,7 @@ where fn is_xtensa_aggregate<'a, Ty>(arg: &ArgAbi<'a, Ty>) -> bool { match arg.layout.backend_repr { - BackendRepr::Vector { .. } => true, + BackendRepr::SimdVector { .. } => true, _ => arg.layout.is_aggregate(), } } diff --git a/compiler/rustc_target/src/lib.rs b/compiler/rustc_target/src/lib.rs index 7ebe96960ed..a8d7da5692d 100644 --- a/compiler/rustc_target/src/lib.rs +++ b/compiler/rustc_target/src/lib.rs @@ -16,7 +16,6 @@ #![feature(let_chains)] #![feature(rustc_attrs)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use std::path::{Path, PathBuf}; diff --git a/compiler/rustc_target/src/spec/base/linux_wasm.rs b/compiler/rustc_target/src/spec/base/linux_wasm.rs new file mode 100644 index 00000000000..a8c137c22a9 --- /dev/null +++ b/compiler/rustc_target/src/spec/base/linux_wasm.rs @@ -0,0 +1,159 @@ +//! This target is a confluence of Linux and Wasm models, inheriting most +//! aspects from their respective base targets + +use crate::spec::{ + Cc, LinkSelfContainedDefault, LinkerFlavor, PanicStrategy, RelocModel, TargetOptions, TlsModel, + add_link_args, crt_objects, cvs, +}; + +pub(crate) fn opts() -> TargetOptions { + macro_rules! args { + ($prefix:literal) => { + &[ + // By default LLD only gives us one page of stack (64k) which is a + // little small. Default to a larger stack closer to other PC platforms + // (1MB) and users can always inject their own link-args to override this. + concat!($prefix, "-z"), + concat!($prefix, "stack-size=1048576"), + // By default LLD's memory layout is: + // + // 1. First, a blank page + // 2. Next, all static data + // 3. Finally, the main stack (which grows down) + // + // This has the unfortunate consequence that on stack overflows you + // corrupt static data and can cause some exceedingly weird bugs. To + // help detect this a little sooner we instead request that the stack is + // placed before static data. + // + // This means that we'll generate slightly larger binaries as references + // to static data will take more bytes in the ULEB128 encoding, but + // stack overflow will be guaranteed to trap as it underflows instead of + // corrupting static data. + concat!($prefix, "--stack-first"), + // FIXME we probably shouldn't pass this but instead pass an explicit list + // of symbols we'll allow to be undefined. We don't currently have a + // mechanism of knowing, however, which symbols are intended to be imported + // from the environment and which are intended to be imported from other + // objects linked elsewhere. This is a coarse approximation but is sure to + // hide some bugs and frustrate someone at some point, so we should ideally + // work towards a world where we can explicitly list symbols that are + // supposed to be imported and have all other symbols generate errors if + // they remain undefined. + concat!($prefix, "--allow-undefined"), + // LLD only implements C++-like demangling, which doesn't match our own + // mangling scheme. Tell LLD to not demangle anything and leave it up to + // us to demangle these symbols later. Currently rustc does not perform + // further demangling, but tools like twiggy and wasm-bindgen are intended + // to do so. + concat!($prefix, "--no-demangle"), + ] + }; + } + + let mut pre_link_args = TargetOptions::link_args(LinkerFlavor::WasmLld(Cc::No), args!("")); + add_link_args(&mut pre_link_args, LinkerFlavor::WasmLld(Cc::Yes), args!("-Wl,")); + + TargetOptions { + is_like_wasm: true, + families: cvs!["wasm", "unix"], + os: "linux".into(), + env: "musl".into(), + + // we allow dynamic linking, but only cdylibs. Basically we allow a + // final library artifact that exports some symbols (a wasm module) but + // we don't allow intermediate `dylib` crate types + dynamic_linking: true, + only_cdylib: true, + + // relatively self-explanatory! + exe_suffix: ".wasm".into(), + dll_prefix: "".into(), + dll_suffix: ".wasm".into(), + eh_frame_header: false, + + max_atomic_width: Some(64), + + // Unwinding doesn't work right now, so the whole target unconditionally + // defaults to panic=abort. Note that this is guaranteed to change in + // the future once unwinding is implemented. Don't rely on this as we're + // basically guaranteed to change it once WebAssembly supports + // exceptions. + panic_strategy: PanicStrategy::Abort, + + // Symbol visibility takes care of this for the WebAssembly. + // Additionally the only known linker, LLD, doesn't support the script + // arguments just yet + limit_rdylib_exports: false, + + // we use the LLD shipped with the Rust toolchain by default + linker: Some("rust-lld".into()), + linker_flavor: LinkerFlavor::WasmLld(Cc::No), + + pre_link_args, + + // FIXME: Figure out cases in which WASM needs to link with a native toolchain. + // + // rust-lang/rust#104137: cannot blindly remove this without putting in + // some other way to compensate for lack of `-nostartfiles` in linker + // invocation. + link_self_contained: LinkSelfContainedDefault::True, + pre_link_objects_self_contained: crt_objects::pre_wasi_self_contained(), + post_link_objects_self_contained: crt_objects::post_wasi_self_contained(), + + // This has no effect in LLVM 8 or prior, but in LLVM 9 and later when + // PIC code is implemented this has quite a drastic effect if it stays + // at the default, `pic`. In an effort to keep wasm binaries as minimal + // as possible we're defaulting to `static` for now, but the hope is + // that eventually we can ship a `pic`-compatible standard library which + // works with `static` as well (or works with some method of generating + // non-relative calls and such later on). + relocation_model: RelocModel::Static, + + // When the atomics feature is activated then these two keys matter, + // otherwise they're basically ignored by the standard library. In this + // mode, however, the `#[thread_local]` attribute works (i.e. + // `has_thread_local`) and we need to get it to work by specifying + // `local-exec` as that's all that's implemented in LLVM today for wasm. + has_thread_local: true, + tls_model: TlsModel::LocalExec, + + // Supporting Linux requires multithreading supported by Wasm's thread + // proposal + singlethread: false, + + // gdb scripts don't work on wasm blobs + emit_debug_gdb_scripts: false, + + // There's more discussion of this at + // https://bugs.llvm.org/show_bug.cgi?id=52442 but the general result is + // that this isn't useful for wasm and has tricky issues with + // representation, so this is disabled. + generate_arange_section: false, + + // Right now this is a bit of a workaround but we're currently saying that + // the target by default has a static crt which we're taking as a signal + // for "use the bundled crt". If that's turned off then the system's crt + // will be used, but this means that default usage of this target doesn't + // need an external compiler but it's still interoperable with an external + // compiler if configured correctly. + crt_static_default: true, + crt_static_respected: true, + + // Allow `+crt-static` to create a "cdylib" output which is just a wasm file + // without a main function. + crt_static_allows_dylibs: true, + + // Wasm start ignores arguments -- relies on API call from interface. + main_needs_argc_argv: false, + + // Wasm toolchains mangle the name of "main" to distinguish between different + // signatures. + entry_name: "__main_void".into(), + + // Wasm Feature flags for supporting Linux + features: "+atomics,+bulk-memory,+mutable-globals,+sign-ext".into(), + + ..Default::default() + } +} diff --git a/compiler/rustc_target/src/spec/base/mod.rs b/compiler/rustc_target/src/spec/base/mod.rs index 6f88be5d37f..e8fdc871785 100644 --- a/compiler/rustc_target/src/spec/base/mod.rs +++ b/compiler/rustc_target/src/spec/base/mod.rs @@ -18,6 +18,7 @@ pub(crate) mod linux_gnu; pub(crate) mod linux_musl; pub(crate) mod linux_ohos; pub(crate) mod linux_uclibc; +pub(crate) mod linux_wasm; pub(crate) mod msvc; pub(crate) mod netbsd; pub(crate) mod nto_qnx; diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 761d2848a05..1887134c575 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -43,7 +43,7 @@ use std::str::FromStr; use std::{fmt, io}; use rustc_abi::{Endian, ExternAbi, Integer, Size, TargetDataLayout, TargetDataLayoutErrors}; -use rustc_data_structures::fx::FxHashSet; +use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; use rustc_fs_util::try_canonicalize; use rustc_macros::{Decodable, Encodable, HashStable_Generic}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; @@ -1915,7 +1915,6 @@ supported_targets! { ("i686-pc-windows-msvc", i686_pc_windows_msvc), ("i686-uwp-windows-msvc", i686_uwp_windows_msvc), ("i686-win7-windows-msvc", i686_win7_windows_msvc), - ("i586-pc-windows-msvc", i586_pc_windows_msvc), ("thumbv7a-pc-windows-msvc", thumbv7a_pc_windows_msvc), ("thumbv7a-uwp-windows-msvc", thumbv7a_uwp_windows_msvc), @@ -1925,6 +1924,7 @@ supported_targets! { ("wasm32-wasip1", wasm32_wasip1), ("wasm32-wasip2", wasm32_wasip2), ("wasm32-wasip1-threads", wasm32_wasip1_threads), + ("wasm32-wali-linux-musl", wasm32_wali_linux_musl), ("wasm64-unknown-unknown", wasm64_unknown_unknown), ("thumbv6m-none-eabi", thumbv6m_none_eabi), @@ -2914,20 +2914,35 @@ impl Target { // On Windows, `extern "system"` behaves like msvc's `__stdcall`. // `__stdcall` only applies on x86 and on non-variadic functions: // https://learn.microsoft.com/en-us/cpp/cpp/stdcall?view=msvc-170 - System { unwind } if self.is_like_windows && self.arch == "x86" && !c_variadic => { - Stdcall { unwind } + System { unwind } => { + if self.is_like_windows && self.arch == "x86" && !c_variadic { + Stdcall { unwind } + } else { + C { unwind } + } + } + + EfiApi => { + if self.arch == "arm" { + Aapcs { unwind: false } + } else if self.arch == "x86_64" { + Win64 { unwind: false } + } else { + C { unwind: false } + } } - System { unwind } => C { unwind }, - EfiApi if self.arch == "arm" => Aapcs { unwind: false }, - EfiApi if self.arch == "x86_64" => Win64 { unwind: false }, - EfiApi => C { unwind: false }, // See commentary in `is_abi_supported`. - Stdcall { .. } | Thiscall { .. } if self.arch == "x86" => abi, - Stdcall { unwind } | Thiscall { unwind } => C { unwind }, - Fastcall { .. } if self.arch == "x86" => abi, - Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, - Fastcall { unwind } | Vectorcall { unwind } => C { unwind }, + Stdcall { unwind } | Thiscall { unwind } | Fastcall { unwind } => { + if self.arch == "x86" { abi } else { C { unwind } } + } + Vectorcall { unwind } => { + if ["x86", "x86_64"].contains(&&*self.arch) { + abi + } else { + C { unwind } + } + } // The Windows x64 calling convention we use for `extern "Rust"` // <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation> @@ -3488,7 +3503,15 @@ impl Target { return load_file(&p); } - Err(format!("Could not find specification for target {target_tuple:?}")) + // Leave in a specialized error message for the removed target. + // FIXME: If you see this and it's been a few months after this has been released, + // you can probably remove it. + if target_tuple == "i586-pc-windows-msvc" { + Err("the `i586-pc-windows-msvc` target has been removed. Use the `i686-pc-windows-msvc` target instead.\n\ + Windows 10 (the minimum required OS version) requires a CPU baseline of at least i686 so you can safely switch".into()) + } else { + Err(format!("Could not find specification for target {target_tuple:?}")) + } } TargetTuple::TargetJson { ref contents, .. } => { let obj = serde_json::from_str(contents).map_err(|e| e.to_string())?; @@ -3520,6 +3543,59 @@ impl Target { s => s.clone(), } } + + pub fn object_architecture( + &self, + unstable_target_features: &FxIndexSet<Symbol>, + ) -> Option<(object::Architecture, Option<object::SubArchitecture>)> { + use object::Architecture; + Some(match self.arch.as_ref() { + "arm" => (Architecture::Arm, None), + "aarch64" => ( + if self.pointer_width == 32 { + Architecture::Aarch64_Ilp32 + } else { + Architecture::Aarch64 + }, + None, + ), + "x86" => (Architecture::I386, None), + "s390x" => (Architecture::S390x, None), + "mips" | "mips32r6" => (Architecture::Mips, None), + "mips64" | "mips64r6" => (Architecture::Mips64, None), + "x86_64" => ( + if self.pointer_width == 32 { + Architecture::X86_64_X32 + } else { + Architecture::X86_64 + }, + None, + ), + "powerpc" => (Architecture::PowerPc, None), + "powerpc64" => (Architecture::PowerPc64, None), + "riscv32" => (Architecture::Riscv32, None), + "riscv64" => (Architecture::Riscv64, None), + "sparc" => { + if unstable_target_features.contains(&sym::v8plus) { + // Target uses V8+, aka EM_SPARC32PLUS, aka 64-bit V9 but in 32-bit mode + (Architecture::Sparc32Plus, None) + } else { + // Target uses V7 or V8, aka EM_SPARC + (Architecture::Sparc, None) + } + } + "sparc64" => (Architecture::Sparc64, None), + "avr" => (Architecture::Avr, None), + "msp430" => (Architecture::Msp430, None), + "hexagon" => (Architecture::Hexagon, None), + "bpf" => (Architecture::Bpf, None), + "loongarch64" => (Architecture::LoongArch64, None), + "csky" => (Architecture::Csky, None), + "arm64ec" => (Architecture::Aarch64, Some(object::SubArchitecture::Arm64EC)), + // Unsupported architecture. + _ => return None, + }) + } } /// Either a target tuple string or a path to a JSON file. diff --git a/compiler/rustc_target/src/spec/targets/i586_pc_windows_msvc.rs b/compiler/rustc_target/src/spec/targets/i586_pc_windows_msvc.rs deleted file mode 100644 index 394e6f9e6bf..00000000000 --- a/compiler/rustc_target/src/spec/targets/i586_pc_windows_msvc.rs +++ /dev/null @@ -1,9 +0,0 @@ -use crate::spec::Target; - -pub(crate) fn target() -> Target { - let mut base = super::i686_pc_windows_msvc::target(); - base.rustc_abi = None; // overwrite the SSE2 ABI set by the base target - base.cpu = "pentium".into(); - base.llvm_target = "i586-pc-windows-msvc".into(); - base -} diff --git a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs index 03bae9b5977..3c1d18e0777 100644 --- a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs +++ b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_gnuspe.rs @@ -24,7 +24,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { abi: "spe".into(), endian: Endian::Big, - features: "+secure-plt".into(), + features: "+secure-plt,+msync".into(), mcount: "_mcount".into(), ..base }, diff --git a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs index df4fd75b0bd..30d0d9cb60a 100644 --- a/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs +++ b/compiler/rustc_target/src/spec/targets/powerpc_unknown_linux_muslspe.rs @@ -26,6 +26,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { abi: "spe".into(), endian: Endian::Big, + features: "+msync".into(), mcount: "_mcount".into(), ..base }, diff --git a/compiler/rustc_target/src/spec/targets/wasm32_wali_linux_musl.rs b/compiler/rustc_target/src/spec/targets/wasm32_wali_linux_musl.rs new file mode 100644 index 00000000000..a0eb4a254fc --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/wasm32_wali_linux_musl.rs @@ -0,0 +1,29 @@ +//! The `wasm32-wali-linux-musl` target is a wasm32 target compliant with the +//! [WebAssembly Linux Interface](https://github.com/arjunr2/WALI). + +use crate::spec::{Cc, LinkerFlavor, Target, TargetMetadata, base}; + +pub(crate) fn target() -> Target { + let mut options = base::linux_wasm::opts(); + + options + .add_pre_link_args(LinkerFlavor::WasmLld(Cc::No), &["--export-memory", "--shared-memory"]); + options.add_pre_link_args( + LinkerFlavor::WasmLld(Cc::Yes), + &["--target=wasm32-wasi-threads", "-Wl,--export-memory,", "-Wl,--shared-memory"], + ); + + Target { + llvm_target: "wasm32-wasi".into(), + metadata: TargetMetadata { + description: Some("WebAssembly Linux Interface with musl-libc".into()), + tier: Some(3), + host_tools: Some(false), + std: None, + }, + pointer_width: 32, + data_layout: "e-m:e-p:32:32-p10:8:8-p20:8:8-i64:64-i128:128-n32:64-S128-ni:1:10:20".into(), + arch: "wasm32".into(), + options, + } +} diff --git a/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_espidf.rs b/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_espidf.rs index dd98f34d323..cffdaa90727 100644 --- a/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_espidf.rs +++ b/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_espidf.rs @@ -20,7 +20,7 @@ pub(crate) fn target() -> Target { vendor: "espressif".into(), executables: true, - cpu: "esp32-s2".into(), + cpu: "esp32s2".into(), linker: Some("xtensa-esp32s2-elf-gcc".into()), // See https://github.com/espressif/rust-esp32-example/issues/3#issuecomment-861054477 diff --git a/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_none_elf.rs b/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_none_elf.rs index 29bcf12cbaf..7bf5834ab0a 100644 --- a/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_none_elf.rs +++ b/compiler/rustc_target/src/spec/targets/xtensa_esp32s2_none_elf.rs @@ -16,7 +16,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { vendor: "espressif".into(), - cpu: "esp32-s2".into(), + cpu: "esp32s2".into(), linker: Some("xtensa-esp32s2-elf-gcc".into()), max_atomic_width: Some(32), features: "+forced-atomics".into(), diff --git a/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_espidf.rs b/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_espidf.rs index dd6e7b6c3e8..2e4afc00541 100644 --- a/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_espidf.rs +++ b/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_espidf.rs @@ -20,7 +20,7 @@ pub(crate) fn target() -> Target { vendor: "espressif".into(), executables: true, - cpu: "esp32-s3".into(), + cpu: "esp32s3".into(), linker: Some("xtensa-esp32s3-elf-gcc".into()), // The esp32s3 only supports native 32bit atomics. diff --git a/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_none_elf.rs b/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_none_elf.rs index ddc909f387e..d506888d8ee 100644 --- a/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_none_elf.rs +++ b/compiler/rustc_target/src/spec/targets/xtensa_esp32s3_none_elf.rs @@ -16,7 +16,7 @@ pub(crate) fn target() -> Target { options: TargetOptions { vendor: "espressif".into(), - cpu: "esp32-s3".into(), + cpu: "esp32s3".into(), linker: Some("xtensa-esp32s3-elf-gcc".into()), max_atomic_width: Some(32), atomic_cas: true, diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index 65a85151bef..6d3b6608ea2 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -461,6 +461,7 @@ const HEXAGON_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ static POWERPC_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ // tidy-alphabetical-start ("altivec", Unstable(sym::powerpc_target_feature), &[]), + ("msync", Unstable(sym::powerpc_target_feature), &[]), ("partword-atomics", Unstable(sym::powerpc_target_feature), &[]), ("power10-vector", Unstable(sym::powerpc_target_feature), &["power9-vector"]), ("power8-altivec", Unstable(sym::powerpc_target_feature), &["altivec"]), @@ -767,17 +768,15 @@ impl Target { } } - pub fn implied_target_features<'a>( - &self, - base_features: impl Iterator<Item = &'a str>, - ) -> FxHashSet<&'a str> { + // Note: the returned set includes `base_feature`. + pub fn implied_target_features<'a>(&self, base_feature: &'a str) -> FxHashSet<&'a str> { let implied_features = self.rust_target_features().iter().map(|(f, _, i)| (f, i)).collect::<FxHashMap<_, _>>(); - // implied target features have their own implied target features, so we traverse the - // map until there are no more features to add + // Implied target features have their own implied target features, so we traverse the + // map until there are no more features to add. let mut features = FxHashSet::default(); - let mut new_features = base_features.collect::<Vec<&str>>(); + let mut new_features = vec![base_feature]; while let Some(new_feature) = new_features.pop() { if features.insert(new_feature) { if let Some(implied_features) = implied_features.get(&new_feature) { diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs index a618bae269f..baf2489b2b8 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs @@ -65,7 +65,9 @@ use rustc_middle::bug; use rustc_middle::dep_graph::DepContext; use rustc_middle::traits::PatternOriginExpr; use rustc_middle::ty::error::{ExpectedFound, TypeError, TypeErrorToStringExt}; -use rustc_middle::ty::print::{PrintError, PrintTraitRefExt as _, with_forced_trimmed_paths}; +use rustc_middle::ty::print::{ + PrintError, PrintTraitRefExt as _, WrapBinderMode, with_forced_trimmed_paths, +}; use rustc_middle::ty::{ self, List, ParamEnv, Region, Ty, TyCtxt, TypeFoldable, TypeSuperVisitable, TypeVisitable, TypeVisitableExt, @@ -507,7 +509,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { hir::MatchSource::TryDesugar(scrut_hir_id), ) => { if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found { - let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id); + let scrut_expr = self.tcx.hir_expect_expr(scrut_hir_id); let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind { let arg_expr = args.first().expect("try desugaring call w/out arg"); self.typeck_results @@ -546,7 +548,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { }) => match source { hir::MatchSource::TryDesugar(scrut_hir_id) => { if let Some(ty::error::ExpectedFound { expected, .. }) = exp_found { - let scrut_expr = self.tcx.hir().expect_expr(scrut_hir_id); + let scrut_expr = self.tcx.hir_expect_expr(scrut_hir_id); let scrut_ty = if let hir::ExprKind::Call(_, args) = &scrut_expr.kind { let arg_expr = args.first().expect("try desugaring call w/out arg"); self.typeck_results @@ -835,7 +837,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let get_lifetimes = |sig| { use rustc_hir::def::Namespace; let (sig, reg) = ty::print::FmtPrinter::new(self.tcx, Namespace::TypeNS) - .name_all_regions(sig) + .name_all_regions(sig, WrapBinderMode::ForAll) .unwrap(); let lts: Vec<String> = reg.into_items().map(|(_, kind)| kind.to_string()).into_sorted_stable_ord(); @@ -2418,7 +2420,7 @@ impl<'tcx> ObligationCause<'tcx> { pub struct ObligationCauseAsDiagArg<'tcx>(pub ObligationCause<'tcx>); impl IntoDiagArg for ObligationCauseAsDiagArg<'_> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { let kind = match self.0.code() { ObligationCauseCode::CompareImplItem { kind: ty::AssocKind::Fn, .. } => "method_compat", ObligationCauseCode::CompareImplItem { kind: ty::AssocKind::Type, .. } => "type_compat", diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs index bed9734f389..40958ec1088 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/need_type_info.rs @@ -137,7 +137,7 @@ impl InferenceDiagnosticsParentData { } impl IntoDiagArg for UnderspecifiedArgKind { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { let kind = match self { Self::Type { .. } => "type", Self::Const { is_parameter: true } => "const_with_param", @@ -1351,7 +1351,7 @@ impl<'a, 'tcx> Visitor<'tcx> for FindInferSourceVisitor<'a, 'tcx> { && !has_impl_trait(def_id) // FIXME(fn_delegation): In delegation item argument spans are equal to last path // segment. This leads to ICE's when emitting `multipart_suggestion`. - && tcx.hir().opt_delegation_sig_id(expr.hir_id.owner.def_id).is_none() + && tcx.hir_opt_delegation_sig_id(expr.hir_id.owner.def_id).is_none() { let successor = method_args.get(0).map_or_else(|| (")", span.hi()), |arg| (", ", arg.span.lo())); diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/placeholder_error.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/placeholder_error.rs index aaaefd81d19..5056161e117 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/placeholder_error.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/nice_region_error/placeholder_error.rs @@ -31,7 +31,7 @@ impl<'tcx, T> IntoDiagArg for Highlighted<'tcx, T> where T: for<'a> Print<'tcx, FmtPrinter<'a, 'tcx>>, { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { rustc_errors::DiagArgValue::Str(self.to_string().into()) } } diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs index 514615735a5..5583deda99a 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs @@ -395,11 +395,28 @@ impl<T> Trait<T> for X { let sp = tcx .def_ident_span(body_owner_def_id) .unwrap_or_else(|| tcx.def_span(body_owner_def_id)); - diag.span_note( - sp, - "this item must have the opaque type in its signature in order to \ - be able to register hidden types", - ); + let mut alias_def_id = opaque_ty.def_id; + while let DefKind::OpaqueTy = tcx.def_kind(alias_def_id) { + alias_def_id = tcx.parent(alias_def_id); + } + let opaque_path = tcx.def_path_str(alias_def_id); + // FIXME(type_alias_impl_trait): make this a structured suggestion + match tcx.opaque_ty_origin(opaque_ty.def_id) { + rustc_hir::OpaqueTyOrigin::FnReturn { .. } => {} + rustc_hir::OpaqueTyOrigin::AsyncFn { .. } => {} + rustc_hir::OpaqueTyOrigin::TyAlias { + in_assoc_ty: false, .. + } => { + diag.span_note( + sp, + format!("this item must have a `#[define_opaque({opaque_path})]` \ + attribute to be able to define hidden types"), + ); + } + rustc_hir::OpaqueTyOrigin::TyAlias { + in_assoc_ty: true, .. + } => {} + } } // If two if arms can be coerced to a trait object, provide a structured // suggestion. @@ -726,7 +743,7 @@ fn foo(&self) -> Self::T { String::new() } if let ty::Alias(ty::Opaque, ty::AliasTy { def_id, .. }) = *proj_ty.self_ty().kind() { let opaque_local_def_id = def_id.as_local(); let opaque_hir_ty = if let Some(opaque_local_def_id) = opaque_local_def_id { - tcx.hir().expect_opaque_ty(opaque_local_def_id) + tcx.hir_expect_opaque_ty(opaque_local_def_id) } else { return false; }; diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs index fecb38ab597..c7f0a88f951 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs @@ -1023,7 +1023,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { format!(" for lifetime parameter `{name}`") } infer::UpvarRegion(ref upvar_id, _) => { - let var_name = self.tcx.hir().name(upvar_id.var_path.hir_id); + let var_name = self.tcx.hir_name(upvar_id.var_path.hir_id); format!(" for capture of `{var_name}` by closure") } infer::Nll(..) => bug!("NLL variable found in lexical phase"), diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/suggest.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/suggest.rs index a7e68e6419d..cdbb92f4c7b 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/suggest.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/suggest.rs @@ -778,8 +778,8 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { let exp_local_id = exp_def_id.as_local()?; match ( - &self.tcx.hir().expect_opaque_ty(last_local_id), - &self.tcx.hir().expect_opaque_ty(exp_local_id), + &self.tcx.hir_expect_opaque_ty(last_local_id), + &self.tcx.hir_expect_opaque_ty(exp_local_id), ) { ( hir::OpaqueTy { bounds: last_bounds, .. }, diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs index f15f1b78b52..d673e5672a0 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs @@ -1,8 +1,6 @@ use std::ops::ControlFlow; -use rustc_errors::{ - Applicability, Diag, E0283, E0284, E0790, MultiSpan, StashKey, struct_span_code_err, -}; +use rustc_errors::{Applicability, Diag, E0283, E0284, E0790, MultiSpan, struct_span_code_err}; use rustc_hir as hir; use rustc_hir::LangItem; use rustc_hir::def::{DefKind, Res}; @@ -197,7 +195,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // be ignoring the fact that we don't KNOW the type works // out. Though even that would probably be harmless, given that // we're only talking about builtin traits, which are known to be - // inhabited. We used to check for `self.tcx.sess.has_errors()` to + // inhabited. We used to check for `self.tainted_by_errors()` to // avoid inundating the user with unnecessary errors, but we now // check upstream for type errors and don't add the obligations to // begin with in those cases. @@ -211,7 +209,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { TypeAnnotationNeeded::E0282, false, ); - return err.stash(span, StashKey::MaybeForgetReturn).unwrap(); + return err.emit(); } Some(e) => return e, } diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs index 596f794568c..e2bdd52ba7c 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs @@ -829,7 +829,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { && let ty::Closure(closure_def_id, _) | ty::CoroutineClosure(closure_def_id, _) = *typeck_results.node_type(arg_hir_id).kind() { - // Otherwise, extract the closure kind from the obligation. + // Otherwise, extract the closure kind from the obligation, + // but only if we actually have an argument to deduce the + // closure type from... let mut err = self.report_closure_error( &obligation, closure_def_id, @@ -844,63 +846,72 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let self_ty = trait_pred.self_ty().skip_binder(); - if let Some(expected_kind) = self.tcx.fn_trait_kind_from_def_id(trait_pred.def_id()) { - let (closure_def_id, found_args, by_ref_captures) = match *self_ty.kind() { - ty::Closure(def_id, args) => { - (def_id, args.as_closure().sig().map_bound(|sig| sig.inputs()[0]), None) - } - ty::CoroutineClosure(def_id, args) => ( - def_id, - args.as_coroutine_closure() - .coroutine_closure_sig() - .map_bound(|sig| sig.tupled_inputs_ty), - Some(args.as_coroutine_closure().coroutine_captures_by_ref_ty()), - ), - _ => return None, + let (expected_kind, trait_prefix) = + if let Some(expected_kind) = self.tcx.fn_trait_kind_from_def_id(trait_pred.def_id()) { + (expected_kind, "") + } else if let Some(expected_kind) = + self.tcx.async_fn_trait_kind_from_def_id(trait_pred.def_id()) + { + (expected_kind, "Async") + } else { + return None; }; - let expected_args = - trait_pred.map_bound(|trait_pred| trait_pred.trait_ref.args.type_at(1)); - - // Verify that the arguments are compatible. If the signature is - // mismatched, then we have a totally different error to report. - if self.enter_forall(found_args, |found_args| { - self.enter_forall(expected_args, |expected_args| { - !self.can_eq(obligation.param_env, expected_args, found_args) - }) - }) { - return None; + let (closure_def_id, found_args, by_ref_captures) = match *self_ty.kind() { + ty::Closure(def_id, args) => { + (def_id, args.as_closure().sig().map_bound(|sig| sig.inputs()[0]), None) } + ty::CoroutineClosure(def_id, args) => ( + def_id, + args.as_coroutine_closure() + .coroutine_closure_sig() + .map_bound(|sig| sig.tupled_inputs_ty), + Some(args.as_coroutine_closure().coroutine_captures_by_ref_ty()), + ), + _ => return None, + }; - if let Some(found_kind) = self.closure_kind(self_ty) - && !found_kind.extends(expected_kind) - { - let mut err = self.report_closure_error( - &obligation, - closure_def_id, - found_kind, - expected_kind, - "", - ); - self.note_obligation_cause(&mut err, &obligation); - return Some(err.emit()); - } + let expected_args = trait_pred.map_bound(|trait_pred| trait_pred.trait_ref.args.type_at(1)); - // If the closure has captures, then perhaps the reason that the trait - // is unimplemented is because async closures don't implement `Fn`/`FnMut` - // if they have captures. - if let Some(by_ref_captures) = by_ref_captures - && let ty::FnPtr(sig_tys, _) = by_ref_captures.kind() - && !sig_tys.skip_binder().output().is_unit() - { - let mut err = self.dcx().create_err(AsyncClosureNotFn { - span: self.tcx.def_span(closure_def_id), - kind: expected_kind.as_str(), - }); - self.note_obligation_cause(&mut err, &obligation); - return Some(err.emit()); - } + // Verify that the arguments are compatible. If the signature is + // mismatched, then we have a totally different error to report. + if self.enter_forall(found_args, |found_args| { + self.enter_forall(expected_args, |expected_args| { + !self.can_eq(obligation.param_env, expected_args, found_args) + }) + }) { + return None; + } + + if let Some(found_kind) = self.closure_kind(self_ty) + && !found_kind.extends(expected_kind) + { + let mut err = self.report_closure_error( + &obligation, + closure_def_id, + found_kind, + expected_kind, + trait_prefix, + ); + self.note_obligation_cause(&mut err, &obligation); + return Some(err.emit()); + } + + // If the closure has captures, then perhaps the reason that the trait + // is unimplemented is because async closures don't implement `Fn`/`FnMut` + // if they have captures. + if let Some(by_ref_captures) = by_ref_captures + && let ty::FnPtr(sig_tys, _) = by_ref_captures.kind() + && !sig_tys.skip_binder().output().is_unit() + { + let mut err = self.dcx().create_err(AsyncClosureNotFn { + span: self.tcx.def_span(closure_def_id), + kind: expected_kind.as_str(), + }); + self.note_obligation_cause(&mut err, &obligation); + return Some(err.emit()); } + None } @@ -1189,9 +1200,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let span = obligation.cause.span; let mut diag = match ty.kind() { - _ if ty.has_param() => { - span_bug!(span, "const param tys cannot mention other generic parameters"); - } ty::Float(_) => { struct_span_code_err!( self.dcx(), @@ -2419,6 +2427,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { &mut vec![], &mut Default::default(), ); + self.suggest_swapping_lhs_and_rhs( + err, + obligation.predicate, + obligation.param_env, + obligation.cause.code(), + ); self.suggest_unsized_bound_if_applicable(err, obligation); if let Some(span) = err.span.primary_span() && let Some(mut diag) = @@ -2530,9 +2544,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { return GetSafeTransmuteErrorAndReason::Silent; }; - let Some(assume) = - rustc_transmute::Assume::from_const(self.infcx.tcx, obligation.param_env, assume) - else { + let Some(assume) = rustc_transmute::Assume::from_const(self.infcx.tcx, assume) else { self.dcx().span_delayed_bug( span, "Unable to construct rustc_transmute::Assume where it was previously possible", @@ -2544,11 +2556,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let src = trait_pred.trait_ref.args.type_at(1); let err_msg = format!("`{src}` cannot be safely transmuted into `{dst}`"); - match rustc_transmute::TransmuteTypeEnv::new(self.infcx).is_transmutable( - obligation.cause, - src_and_dst, - assume, - ) { + match rustc_transmute::TransmuteTypeEnv::new(self.infcx.tcx) + .is_transmutable(src_and_dst, assume) + { Answer::No(reason) => { let safe_transmute_explanation = match reason { rustc_transmute::Reason::SrcIsNotYetSupported => { @@ -3251,7 +3261,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { obligation: &PredicateObligation<'tcx>, span: Span, ) -> Result<Diag<'a>, ErrorGuaranteed> { - if !self.tcx.features().generic_const_exprs() { + if !self.tcx.features().generic_const_exprs() + && !self.tcx.features().min_generic_const_args() + { let guar = self .dcx() .struct_span_err(span, "constant expression depends on a generic parameter") diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs index c7e71a626dc..4f89f3c2b49 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs @@ -22,7 +22,6 @@ use rustc_hir::{ expr_needs_parens, is_range_literal, }; use rustc_infer::infer::{BoundRegionConversionTime, DefineOpaqueTypes, InferCtxt, InferOk}; -use rustc_middle::hir::map; use rustc_middle::traits::IsConstable; use rustc_middle::ty::error::TypeError; use rustc_middle::ty::print::{ @@ -93,7 +92,7 @@ impl<'a, 'tcx> CoroutineData<'a, 'tcx> { fn get_from_await_ty<F>( &self, visitor: AwaitsVisitor, - hir: map::Map<'tcx>, + tcx: TyCtxt<'tcx>, ty_matches: F, ) -> Option<Span> where @@ -102,7 +101,7 @@ impl<'a, 'tcx> CoroutineData<'a, 'tcx> { visitor .awaits .into_iter() - .map(|id| hir.expect_expr(id)) + .map(|id| tcx.hir_expect_expr(id)) .find(|await_expr| ty_matches(ty::Binder::dummy(self.0.expr_ty_adjusted(await_expr)))) .map(|expr| expr.span) } @@ -136,7 +135,10 @@ pub fn suggest_restriction<'tcx, G: EmissionGuarantee>( ) { if hir_generics.where_clause_span.from_expansion() || hir_generics.where_clause_span.desugaring_kind().is_some() - || projection.is_some_and(|projection| tcx.is_impl_trait_in_trait(projection.def_id)) + || projection.is_some_and(|projection| { + tcx.is_impl_trait_in_trait(projection.def_id) + || tcx.lookup_stability(projection.def_id).is_some_and(|stab| stab.is_unstable()) + }) { return; } @@ -2177,8 +2179,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { err: &mut Diag<'_, G>, obligation: &PredicateObligation<'tcx>, ) -> bool { - let hir = self.tcx.hir(); - // Attempt to detect an async-await error by looking at the obligation causes, looking // for a coroutine to be present. // @@ -2347,7 +2347,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let mut interior_or_upvar_span = None; - let from_awaited_ty = coroutine_data.get_from_await_ty(visitor, hir, ty_matches); + let from_awaited_ty = coroutine_data.get_from_await_ty(visitor, self.tcx, ty_matches); debug!(?from_awaited_ty); // Avoid disclosing internal information to downstream crates. @@ -2425,7 +2425,6 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // Special case the primary error message when send or sync is the trait that was // not implemented. - let hir = self.tcx.hir(); let trait_explanation = if let Some(name @ (sym::Send | sym::Sync)) = self.tcx.get_diagnostic_name(trait_pred.def_id()) { @@ -2452,7 +2451,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { .parent(coroutine_did) .as_local() .map(|parent_did| self.tcx.local_def_id_to_hir_id(parent_did)) - .and_then(|parent_hir_id| hir.opt_name(parent_hir_id)) + .and_then(|parent_hir_id| self.tcx.hir_opt_name(parent_hir_id)) .map(|name| { format!("future returned by `{name}` is not {trait_name}") })?, @@ -2476,7 +2475,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { .parent(coroutine_did) .as_local() .map(|parent_did| self.tcx.local_def_id_to_hir_id(parent_did)) - .and_then(|parent_hir_id| hir.opt_name(parent_hir_id)) + .and_then(|parent_hir_id| self.tcx.hir_opt_name(parent_hir_id)) .map(|name| { format!("async iterator returned by `{name}` is not {trait_name}") })?, @@ -2499,7 +2498,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { .parent(coroutine_did) .as_local() .map(|parent_did| self.tcx.local_def_id_to_hir_id(parent_did)) - .and_then(|parent_hir_id| hir.opt_name(parent_hir_id)) + .and_then(|parent_hir_id| self.tcx.hir_opt_name(parent_hir_id)) .map(|name| { format!("iterator returned by `{name}` is not {trait_name}") })? @@ -2692,7 +2691,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { | ObligationCauseCode::LetElse | ObligationCauseCode::BinOp { .. } | ObligationCauseCode::AscribeUserTypeProvePredicate(..) - | ObligationCauseCode::DropImpl + | ObligationCauseCode::AlwaysApplicableImpl | ObligationCauseCode::ConstParam(_) | ObligationCauseCode::ReferenceOutlivesReferent(..) | ObligationCauseCode::ObjectTypeBound(..) => {} @@ -3123,8 +3122,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { Applicability::MachineApplicable, ); } - ObligationCauseCode::ConstSized => { - err.note("constant expressions must have a statically known size"); + ObligationCauseCode::SizedConstOrStatic => { + err.note("statics and constants must have a statically known size"); } ObligationCauseCode::InlineAsmSized => { err.note("all inline asm arguments must have a statically known size"); @@ -3188,7 +3187,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { false }; - if !is_upvar_tys_infer_tuple { + let is_builtin_async_fn_trait = + tcx.async_fn_trait_kind_from_def_id(data.parent_trait_pred.def_id()).is_some(); + + if !is_upvar_tys_infer_tuple && !is_builtin_async_fn_trait { let ty_str = tcx.short_string(ty, err.long_ty_path()); let msg = format!("required because it appears within the type `{ty_str}`"); match ty.kind() { @@ -3563,7 +3565,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { ObligationCauseCode::OpaqueReturnType(expr_info) => { let (expr_ty, expr) = if let Some((expr_ty, hir_id)) = expr_info { let expr_ty = tcx.short_string(expr_ty, err.long_ty_path()); - let expr = tcx.hir().expect_expr(hir_id); + let expr = tcx.hir_expect_expr(hir_id); (expr_ty, expr) } else if let Some(body_id) = tcx.hir_node_by_def_id(body_id).body_id() && let body = tcx.hir_body(body_id) @@ -4911,6 +4913,53 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { ); true } + pub(crate) fn suggest_swapping_lhs_and_rhs<T>( + &self, + err: &mut Diag<'_>, + predicate: T, + param_env: ty::ParamEnv<'tcx>, + cause_code: &ObligationCauseCode<'tcx>, + ) where + T: Upcast<TyCtxt<'tcx>, ty::Predicate<'tcx>>, + { + let tcx = self.tcx; + let predicate = predicate.upcast(tcx); + match *cause_code { + ObligationCauseCode::BinOp { + lhs_hir_id, + rhs_hir_id: Some(rhs_hir_id), + rhs_span: Some(rhs_span), + .. + } if let Some(typeck_results) = &self.typeck_results + && let hir::Node::Expr(lhs) = tcx.hir_node(lhs_hir_id) + && let hir::Node::Expr(rhs) = tcx.hir_node(rhs_hir_id) + && let Some(lhs_ty) = typeck_results.expr_ty_opt(lhs) + && let Some(rhs_ty) = typeck_results.expr_ty_opt(rhs) => + { + if let Some(pred) = predicate.as_trait_clause() + && tcx.is_lang_item(pred.def_id(), LangItem::PartialEq) + && self + .infcx + .type_implements_trait(pred.def_id(), [rhs_ty, lhs_ty], param_env) + .must_apply_modulo_regions() + { + let lhs_span = tcx.hir().span(lhs_hir_id); + let sm = tcx.sess.source_map(); + if let Ok(rhs_snippet) = sm.span_to_snippet(rhs_span) + && let Ok(lhs_snippet) = sm.span_to_snippet(lhs_span) + { + err.note(format!("`{rhs_ty}` implements `PartialEq<{lhs_ty}>`")); + err.multipart_suggestion( + "consider swapping the equality", + vec![(lhs_span, rhs_snippet), (rhs_span, lhs_snippet)], + Applicability::MaybeIncorrect, + ); + } + } + } + _ => {} + } + } } /// Add a hint to add a missing borrow or remove an unnecessary one. diff --git a/compiler/rustc_trait_selection/src/errors.rs b/compiler/rustc_trait_selection/src/errors.rs index 23aa3800660..fe859eb53cd 100644 --- a/compiler/rustc_trait_selection/src/errors.rs +++ b/compiler/rustc_trait_selection/src/errors.rs @@ -784,10 +784,10 @@ pub enum TyOrSig<'tcx> { } impl IntoDiagArg for TyOrSig<'_> { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, path: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { match self { - TyOrSig::Ty(ty) => ty.into_diag_arg(), - TyOrSig::ClosureSig(sig) => sig.into_diag_arg(), + TyOrSig::Ty(ty) => ty.into_diag_arg(path), + TyOrSig::ClosureSig(sig) => sig.into_diag_arg(path), } } } diff --git a/compiler/rustc_trait_selection/src/errors/note_and_explain.rs b/compiler/rustc_trait_selection/src/errors/note_and_explain.rs index 4601ddf678a..46622246a17 100644 --- a/compiler/rustc_trait_selection/src/errors/note_and_explain.rs +++ b/compiler/rustc_trait_selection/src/errors/note_and_explain.rs @@ -105,7 +105,7 @@ pub enum SuffixKind { } impl IntoDiagArg for PrefixKind { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { let kind = match self { Self::Empty => "empty", Self::RefValidFor => "ref_valid_for", @@ -127,7 +127,7 @@ impl IntoDiagArg for PrefixKind { } impl IntoDiagArg for SuffixKind { - fn into_diag_arg(self) -> rustc_errors::DiagArgValue { + fn into_diag_arg(self, _: &mut Option<std::path::PathBuf>) -> rustc_errors::DiagArgValue { let kind = match self { Self::Empty => "empty", Self::Continues => "continues", diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs index f373706b296..5cf0600ade8 100644 --- a/compiler/rustc_trait_selection/src/infer.rs +++ b/compiler/rustc_trait_selection/src/infer.rs @@ -53,6 +53,16 @@ impl<'tcx> InferCtxt<'tcx> { traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, clone_def_id) } + fn type_is_use_cloned_modulo_regions( + &self, + param_env: ty::ParamEnv<'tcx>, + ty: Ty<'tcx>, + ) -> bool { + let ty = self.resolve_vars_if_possible(ty); + let use_cloned_def_id = self.tcx.require_lang_item(LangItem::UseCloned, None); + traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, use_cloned_def_id) + } + fn type_is_sized_modulo_regions(&self, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> bool { let lang_item = self.tcx.require_lang_item(LangItem::Sized, None); traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, lang_item) diff --git a/compiler/rustc_trait_selection/src/lib.rs b/compiler/rustc_trait_selection/src/lib.rs index b235d0da83c..b18fb0fb8fd 100644 --- a/compiler/rustc_trait_selection/src/lib.rs +++ b/compiler/rustc_trait_selection/src/lib.rs @@ -31,7 +31,6 @@ #![feature(unwrap_infallible)] #![feature(yeet_expr)] #![recursion_limit = "512"] // For rustdoc -#![warn(unreachable_pub)] // For rustdoc // tidy-alphabetical-end pub mod error_reporting; diff --git a/compiler/rustc_trait_selection/src/regions.rs b/compiler/rustc_trait_selection/src/regions.rs index 55171754618..068e90b00b8 100644 --- a/compiler/rustc_trait_selection/src/regions.rs +++ b/compiler/rustc_trait_selection/src/regions.rs @@ -17,13 +17,7 @@ impl<'tcx> OutlivesEnvironment<'tcx> { param_env: ty::ParamEnv<'tcx>, assumed_wf_tys: impl IntoIterator<Item = Ty<'tcx>>, ) -> Self { - Self::new_with_implied_bounds_compat( - infcx, - body_id, - param_env, - assumed_wf_tys, - !infcx.tcx.sess.opts.unstable_opts.no_implied_bounds_compat, - ) + Self::new_with_implied_bounds_compat(infcx, body_id, param_env, assumed_wf_tys, false) } fn new_with_implied_bounds_compat( @@ -31,7 +25,7 @@ impl<'tcx> OutlivesEnvironment<'tcx> { body_id: LocalDefId, param_env: ty::ParamEnv<'tcx>, assumed_wf_tys: impl IntoIterator<Item = Ty<'tcx>>, - implied_bounds_compat: bool, + disable_implied_bounds_hack: bool, ) -> Self { let mut bounds = vec![]; @@ -59,11 +53,11 @@ impl<'tcx> OutlivesEnvironment<'tcx> { OutlivesEnvironment::from_normalized_bounds( param_env, bounds, - infcx.implied_bounds_tys_with_compat( + infcx.implied_bounds_tys( body_id, param_env, assumed_wf_tys, - implied_bounds_compat, + disable_implied_bounds_hack, ), ) } diff --git a/compiler/rustc_trait_selection/src/solve/delegate.rs b/compiler/rustc_trait_selection/src/solve/delegate.rs index 58d8a3a6254..e69bad095e3 100644 --- a/compiler/rustc_trait_selection/src/solve/delegate.rs +++ b/compiler/rustc_trait_selection/src/solve/delegate.rs @@ -7,7 +7,6 @@ use rustc_infer::infer::canonical::{ Canonical, CanonicalExt as _, CanonicalQueryInput, CanonicalVarInfo, CanonicalVarValues, }; use rustc_infer::infer::{InferCtxt, RegionVariableOrigin, TyCtxtInferExt}; -use rustc_infer::traits::ObligationCause; use rustc_infer::traits::solve::Goal; use rustc_middle::ty::fold::TypeFoldable; use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt as _}; @@ -222,7 +221,6 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< // register candidates. We probably need to register >1 since we may have an OR of ANDs. fn is_transmutable( &self, - param_env: ty::ParamEnv<'tcx>, dst: Ty<'tcx>, src: Ty<'tcx>, assume: ty::Const<'tcx>, @@ -231,16 +229,14 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< // which will ICE for region vars. let (dst, src) = self.tcx.erase_regions((dst, src)); - let Some(assume) = rustc_transmute::Assume::from_const(self.tcx, param_env, assume) else { + let Some(assume) = rustc_transmute::Assume::from_const(self.tcx, assume) else { return Err(NoSolution); }; // FIXME(transmutability): This really should be returning nested goals for `Answer::If*` - match rustc_transmute::TransmuteTypeEnv::new(&self.0).is_transmutable( - ObligationCause::dummy(), - rustc_transmute::Types { src, dst }, - assume, - ) { + match rustc_transmute::TransmuteTypeEnv::new(self.0.tcx) + .is_transmutable(rustc_transmute::Types { src, dst }, assume) + { rustc_transmute::Answer::Yes => Ok(Certainty::Yes), rustc_transmute::Answer::No(_) | rustc_transmute::Answer::If(_) => Err(NoSolution), } diff --git a/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs b/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs index 982782bc57c..352ac7c1a4e 100644 --- a/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs +++ b/compiler/rustc_trait_selection/src/solve/fulfill/derive_errors.rs @@ -438,7 +438,10 @@ impl<'tcx> ProofTreeVisitor<'tcx> for BestObligation<'tcx> { let obligation; match (child_mode, nested_goal.source()) { - (ChildMode::Trait(_) | ChildMode::Host(_), GoalSource::Misc) => { + ( + ChildMode::Trait(_) | ChildMode::Host(_), + GoalSource::Misc | GoalSource::TypeRelating | GoalSource::NormalizeGoal(_), + ) => { continue; } (ChildMode::Trait(parent_trait_pred), GoalSource::ImplWhereBound) => { diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs index bdee6ca2afe..39333082acd 100644 --- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs +++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs @@ -2,7 +2,7 @@ //! //! For concrete constants, this is fairly simple as we can just try and evaluate it. //! -//! When dealing with polymorphic constants, for example `std::mem::size_of::<T>() - 1`, +//! When dealing with polymorphic constants, for example `size_of::<T>() - 1`, //! this is not as easy. //! //! In this case we try to build an abstract representation of this constant using @@ -85,6 +85,12 @@ pub fn is_const_evaluatable<'tcx>( } _ => bug!("unexpected constkind in `is_const_evalautable: {unexpanded_ct:?}`"), } + } else if tcx.features().min_generic_const_args() { + // This is a sanity check to make sure that non-generics consts are checked to + // be evaluatable in case they aren't cchecked elsewhere. This will NOT error + // if the const uses generics, as desired. + crate::traits::evaluate_const(infcx, unexpanded_ct, param_env); + Ok(()) } else { let uv = match unexpanded_ct.kind() { ty::ConstKind::Unevaluated(uv) => uv, diff --git a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs index d4502be6ccf..3fceada2510 100644 --- a/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs +++ b/compiler/rustc_trait_selection/src/traits/dyn_compatibility.rs @@ -24,7 +24,9 @@ use super::elaborate; use crate::infer::TyCtxtInferExt; pub use crate::traits::DynCompatibilityViolation; use crate::traits::query::evaluate_obligation::InferCtxtExt; -use crate::traits::{MethodViolationCode, Obligation, ObligationCause, util}; +use crate::traits::{ + MethodViolationCode, Obligation, ObligationCause, normalize_param_env_or_error, util, +}; /// Returns the dyn-compatibility violations that affect HIR ty lowering. /// @@ -579,8 +581,8 @@ fn receiver_is_dispatchable<'tcx>( let unsized_receiver_ty = receiver_for_self_ty(tcx, receiver_ty, unsized_self_ty, method.def_id); - // create a modified param env, with `Self: Unsize<U>` and `U: Trait` added to caller bounds - // `U: ?Sized` is already implied here + // create a modified param env, with `Self: Unsize<U>` and `U: Trait` (and all of + // its supertraits) added to caller bounds. `U: ?Sized` is already implied here. let param_env = { let param_env = tcx.param_env(method.def_id); @@ -598,10 +600,13 @@ fn receiver_is_dispatchable<'tcx>( ty::TraitRef::new_from_args(tcx, trait_def_id, args).upcast(tcx) }; - let caller_bounds = - param_env.caller_bounds().iter().chain([unsize_predicate, trait_predicate]); - - ty::ParamEnv::new(tcx.mk_clauses_from_iter(caller_bounds)) + normalize_param_env_or_error( + tcx, + ty::ParamEnv::new(tcx.mk_clauses_from_iter( + param_env.caller_bounds().iter().chain([unsize_predicate, trait_predicate]), + )), + ObligationCause::dummy_with_span(tcx.def_span(method.def_id)), + ) }; // Receiver: DispatchFromDyn<Receiver[Self => U]> diff --git a/compiler/rustc_trait_selection/src/traits/effects.rs b/compiler/rustc_trait_selection/src/traits/effects.rs index b32909efe0b..3c127416cbf 100644 --- a/compiler/rustc_trait_selection/src/traits/effects.rs +++ b/compiler/rustc_trait_selection/src/traits/effects.rs @@ -31,6 +31,8 @@ pub fn evaluate_host_effect_obligation<'tcx>( ); } + let ref obligation = selcx.infcx.resolve_vars_if_possible(obligation.clone()); + // Force ambiguity for infer self ty. if obligation.predicate.self_ty().is_ty_var() { return Err(EvaluationFailure::Ambiguous); diff --git a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs index 18932695807..68983ef80fa 100644 --- a/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/outlives_bounds.rs @@ -36,7 +36,7 @@ fn implied_outlives_bounds<'a, 'tcx>( param_env: ty::ParamEnv<'tcx>, body_id: LocalDefId, ty: Ty<'tcx>, - compat: bool, + disable_implied_bounds_hack: bool, ) -> Vec<OutlivesBound<'tcx>> { let ty = infcx.resolve_vars_if_possible(ty); let ty = OpportunisticRegionResolver::new(infcx).fold_ty(ty); @@ -52,11 +52,8 @@ fn implied_outlives_bounds<'a, 'tcx>( let mut canonical_var_values = OriginalQueryValues::default(); let input = ImpliedOutlivesBounds { ty }; let canonical = infcx.canonicalize_query(param_env.and(input), &mut canonical_var_values); - let implied_bounds_result = if compat { - infcx.tcx.implied_outlives_bounds_compat(canonical) - } else { - infcx.tcx.implied_outlives_bounds(canonical) - }; + let implied_bounds_result = + infcx.tcx.implied_outlives_bounds((canonical, disable_implied_bounds_hack)); let Ok(canonical_result) = implied_bounds_result else { return vec![]; }; @@ -110,14 +107,15 @@ fn implied_outlives_bounds<'a, 'tcx>( impl<'tcx> InferCtxt<'tcx> { /// Do *NOT* call this directly. You probably want to construct a `OutlivesEnvironment` /// instead if you're interested in the implied bounds for a given signature. - fn implied_bounds_tys_with_compat<Tys: IntoIterator<Item = Ty<'tcx>>>( + fn implied_bounds_tys<Tys: IntoIterator<Item = Ty<'tcx>>>( &self, body_id: LocalDefId, param_env: ParamEnv<'tcx>, tys: Tys, - compat: bool, + disable_implied_bounds_hack: bool, ) -> impl Iterator<Item = OutlivesBound<'tcx>> { - tys.into_iter() - .flat_map(move |ty| implied_outlives_bounds(self, param_env, body_id, ty, compat)) + tys.into_iter().flat_map(move |ty| { + implied_outlives_bounds(self, param_env, body_id, ty, disable_implied_bounds_hack) + }) } } diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index de5aaa5bd97..4d9cf6620d6 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -1232,8 +1232,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( // why we special case object types. false } - ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) - | ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => { + ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) => { // These traits have no associated types. selcx.tcx().dcx().span_delayed_bug( obligation.cause.span, @@ -1325,8 +1324,7 @@ fn confirm_select_candidate<'cx, 'tcx>( } ImplSource::Builtin(BuiltinImplSource::Object { .. }, _) | ImplSource::Param(..) - | ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) - | ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => { + | ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) => { // we don't create Select candidates with this kind of resolution span_bug!( obligation.cause.span, diff --git a/compiler/rustc_trait_selection/src/traits/query/normalize.rs b/compiler/rustc_trait_selection/src/traits/query/normalize.rs index 2ef9d5421ba..6bf76eaee5c 100644 --- a/compiler/rustc_trait_selection/src/traits/query/normalize.rs +++ b/compiler/rustc_trait_selection/src/traits/query/normalize.rs @@ -31,20 +31,19 @@ impl<'a, 'tcx> At<'a, 'tcx> { /// normalized. If you don't care about regions, you should prefer /// `normalize_erasing_regions`, which is more efficient. /// - /// If the normalization succeeds and is unambiguous, returns back - /// the normalized value along with various outlives relations (in - /// the form of obligations that must be discharged). + /// If the normalization succeeds, returns back the normalized + /// value along with various outlives relations (in the form of + /// obligations that must be discharged). /// - /// N.B., this will *eventually* be the main means of - /// normalizing, but for now should be used only when we actually - /// know that normalization will succeed, since error reporting - /// and other details are still "under development". - /// - /// This normalization should *only* be used when the projection does not - /// have possible ambiguity or may not be well-formed. + /// This normalization should *only* be used when the projection is well-formed and + /// does not have possible ambiguity (contains inference variables). /// /// After codegen, when lifetimes do not matter, it is preferable to instead /// use [`TyCtxt::normalize_erasing_regions`], which wraps this procedure. + /// + /// N.B. Once the new solver is stabilized this method of normalization will + /// likely be removed as trait solver operations are already cached by the query + /// system making this redundant. fn query_normalize<T>(self, value: T) -> Result<Normalized<'tcx, T>, NoSolution> where T: TypeFoldable<TyCtxt<'tcx>>, @@ -210,8 +209,6 @@ impl<'a, 'tcx> FallibleTypeFolder<TyCtxt<'tcx>> for QueryNormalizer<'a, 'tcx> { // See note in `rustc_trait_selection::traits::project` about why we // wait to fold the args. - - // Wrap this in a closure so we don't accidentally return from the outer function let res = match kind { ty::Opaque => { // Only normalize `impl Trait` outside of type inference, usually in codegen. diff --git a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs index ec0b7903396..f98529860ff 100644 --- a/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs +++ b/compiler/rustc_trait_selection/src/traits/query/type_op/implied_outlives_bounds.rs @@ -1,15 +1,16 @@ +use std::ops::ControlFlow; + +use rustc_infer::infer::RegionObligation; use rustc_infer::infer::canonical::CanonicalQueryInput; -use rustc_infer::infer::resolve::OpportunisticRegionResolver; use rustc_infer::traits::query::OutlivesBound; use rustc_infer::traits::query::type_op::ImpliedOutlivesBounds; use rustc_middle::infer::canonical::CanonicalQueryResponse; use rustc_middle::traits::ObligationCause; -use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, TypeFolder, TypeVisitableExt}; +use rustc_middle::ty::{self, ParamEnvAnd, Ty, TyCtxt, TypeVisitable, TypeVisitor}; use rustc_span::def_id::CRATE_DEF_ID; -use rustc_span::{DUMMY_SP, Span}; +use rustc_span::{DUMMY_SP, Span, sym}; use rustc_type_ir::outlives::{Component, push_outlives_components}; use smallvec::{SmallVec, smallvec}; -use tracing::debug; use crate::traits::query::NoSolution; use crate::traits::{ObligationCtxt, wf}; @@ -35,11 +36,7 @@ impl<'tcx> super::QueryTypeOp<'tcx> for ImpliedOutlivesBounds<'tcx> { tcx: TyCtxt<'tcx>, canonicalized: CanonicalQueryInput<'tcx, ParamEnvAnd<'tcx, Self>>, ) -> Result<CanonicalQueryResponse<'tcx, Self::QueryResponse>, NoSolution> { - if tcx.sess.opts.unstable_opts.no_implied_bounds_compat { - tcx.implied_outlives_bounds(canonicalized) - } else { - tcx.implied_outlives_bounds_compat(canonicalized) - } + tcx.implied_outlives_bounds((canonicalized, false)) } fn perform_locally_with_next_solver( @@ -47,11 +44,7 @@ impl<'tcx> super::QueryTypeOp<'tcx> for ImpliedOutlivesBounds<'tcx> { key: ParamEnvAnd<'tcx, Self>, span: Span, ) -> Result<Self::QueryResponse, NoSolution> { - if ocx.infcx.tcx.sess.opts.unstable_opts.no_implied_bounds_compat { - compute_implied_outlives_bounds_inner(ocx, key.param_env, key.value.ty, span) - } else { - compute_implied_outlives_bounds_compat_inner(ocx, key.param_env, key.value.ty, span) - } + compute_implied_outlives_bounds_inner(ocx, key.param_env, key.value.ty, span, false) } } @@ -60,18 +53,15 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>, span: Span, + disable_implied_bounds_hack: bool, ) -> Result<Vec<OutlivesBound<'tcx>>, NoSolution> { - let normalize_op = |ty| -> Result<_, NoSolution> { + let normalize_ty = |ty| -> Result<_, NoSolution> { // We must normalize the type so we can compute the right outlives components. // for example, if we have some constrained param type like `T: Trait<Out = U>`, // and we know that `&'a T::Out` is WF, then we want to imply `U: 'a`. let ty = ocx .deeply_normalize(&ObligationCause::dummy_with_span(span), param_env, ty) .map_err(|_| NoSolution)?; - if !ocx.select_all_or_error().is_empty() { - return Err(NoSolution); - } - let ty = OpportunisticRegionResolver::new(&ocx.infcx).fold_ty(ty); Ok(ty) }; @@ -81,7 +71,7 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( // guaranteed to be a subset of the original type, so we need to store the // WF args we've computed in a set. let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default(); - let mut wf_args = vec![ty.into(), normalize_op(ty)?.into()]; + let mut wf_args = vec![ty.into(), normalize_ty(ty)?.into()]; let mut outlives_bounds: Vec<OutlivesBound<'tcx>> = vec![]; @@ -96,8 +86,14 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( .into_iter() .flatten() { - assert!(!obligation.has_escaping_bound_vars()); - let Some(pred) = obligation.predicate.kind().no_bound_vars() else { + let pred = ocx + .deeply_normalize( + &ObligationCause::dummy_with_span(span), + param_env, + obligation.predicate, + ) + .map_err(|_| NoSolution)?; + let Some(pred) = pred.kind().no_bound_vars() else { continue; }; match pred { @@ -130,7 +126,6 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( ty_a, r_b, ))) => { - let ty_a = normalize_op(ty_a)?; let mut components = smallvec![]; push_outlives_components(ocx.infcx.tcx, ty_a, &mut components); outlives_bounds.extend(implied_bounds_from_components(r_b, components)) @@ -139,141 +134,48 @@ pub fn compute_implied_outlives_bounds_inner<'tcx>( } } - Ok(outlives_bounds) -} - -pub fn compute_implied_outlives_bounds_compat_inner<'tcx>( - ocx: &ObligationCtxt<'_, 'tcx>, - param_env: ty::ParamEnv<'tcx>, - ty: Ty<'tcx>, - span: Span, -) -> Result<Vec<OutlivesBound<'tcx>>, NoSolution> { - let tcx = ocx.infcx.tcx; - - // Sometimes when we ask what it takes for T: WF, we get back that - // U: WF is required; in that case, we push U onto this stack and - // process it next. Because the resulting predicates aren't always - // guaranteed to be a subset of the original type, so we need to store the - // WF args we've computed in a set. - let mut checked_wf_args = rustc_data_structures::fx::FxHashSet::default(); - let mut wf_args = vec![ty.into()]; - - let mut outlives_bounds: Vec<ty::OutlivesPredicate<'tcx, ty::GenericArg<'tcx>>> = vec![]; - - while let Some(arg) = wf_args.pop() { - if !checked_wf_args.insert(arg) { - continue; + // If we detect `bevy_ecs::*::ParamSet` in the WF args list (and `disable_implied_bounds_hack` + // or `-Zno-implied-bounds-compat` are not set), then use the registered outlives obligations + // as implied bounds. + if !disable_implied_bounds_hack + && !ocx.infcx.tcx.sess.opts.unstable_opts.no_implied_bounds_compat + && ty.visit_with(&mut ContainsBevyParamSet { tcx: ocx.infcx.tcx }).is_break() + { + for RegionObligation { sup_type, sub_region, .. } in + ocx.infcx.take_registered_region_obligations() + { + let mut components = smallvec![]; + push_outlives_components(ocx.infcx.tcx, sup_type, &mut components); + outlives_bounds.extend(implied_bounds_from_components(sub_region, components)); } + } - // Compute the obligations for `arg` to be well-formed. If `arg` is - // an unresolved inference variable, just instantiated an empty set - // -- because the return type here is going to be things we *add* - // to the environment, it's always ok for this set to be smaller - // than the ultimate set. (Note: normally there won't be - // unresolved inference variables here anyway, but there might be - // during typeck under some circumstances.) - // - // FIXME(@lcnr): It's not really "always fine", having fewer implied - // bounds can be backward incompatible, e.g. #101951 was caused by - // us not dealing with inference vars in `TypeOutlives` predicates. - let obligations = - wf::obligations(ocx.infcx, param_env, CRATE_DEF_ID, 0, arg, span).unwrap_or_default(); + Ok(outlives_bounds) +} - for obligation in obligations { - debug!(?obligation); - assert!(!obligation.has_escaping_bound_vars()); +struct ContainsBevyParamSet<'tcx> { + tcx: TyCtxt<'tcx>, +} - // While these predicates should all be implied by other parts of - // the program, they are still relevant as they may constrain - // inference variables, which is necessary to add the correct - // implied bounds in some cases, mostly when dealing with projections. - // - // Another important point here: we only register `Projection` - // predicates, since otherwise we might register outlives - // predicates containing inference variables, and we don't - // learn anything new from those. - if obligation.predicate.has_non_region_infer() { - match obligation.predicate.kind().skip_binder() { - ty::PredicateKind::Clause(ty::ClauseKind::Projection(..)) - | ty::PredicateKind::AliasRelate(..) => { - ocx.register_obligation(obligation.clone()); - } - _ => {} - } - } +impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for ContainsBevyParamSet<'tcx> { + type Result = ControlFlow<()>; - let pred = match obligation.predicate.kind().no_bound_vars() { - None => continue, - Some(pred) => pred, - }; - match pred { - // FIXME(const_generics): Make sure that `<'a, 'b, const N: &'a &'b u32>` is sound - // if we ever support that - ty::PredicateKind::Clause(ty::ClauseKind::Trait(..)) - | ty::PredicateKind::Clause(ty::ClauseKind::HostEffect(..)) - | ty::PredicateKind::Clause(ty::ClauseKind::ConstArgHasType(..)) - | ty::PredicateKind::Subtype(..) - | ty::PredicateKind::Coerce(..) - | ty::PredicateKind::Clause(ty::ClauseKind::Projection(..)) - | ty::PredicateKind::DynCompatible(..) - | ty::PredicateKind::Clause(ty::ClauseKind::ConstEvaluatable(..)) - | ty::PredicateKind::ConstEquate(..) - | ty::PredicateKind::Ambiguous - | ty::PredicateKind::NormalizesTo(..) - | ty::PredicateKind::AliasRelate(..) => {} - - // We need to search through *all* WellFormed predicates - ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(arg)) => { - wf_args.push(arg); + fn visit_ty(&mut self, t: Ty<'tcx>) -> Self::Result { + // We only care to match `ParamSet<T>` or `&ParamSet<T>`. + match t.kind() { + ty::Adt(def, _) => { + if self.tcx.item_name(def.did()) == sym::ParamSet + && self.tcx.crate_name(def.did().krate) == sym::bevy_ecs + { + return ControlFlow::Break(()); } - - // We need to register region relationships - ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives( - ty::OutlivesPredicate(r_a, r_b), - )) => outlives_bounds.push(ty::OutlivesPredicate(r_a.into(), r_b)), - - ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(ty::OutlivesPredicate( - ty_a, - r_b, - ))) => outlives_bounds.push(ty::OutlivesPredicate(ty_a.into(), r_b)), } + ty::Ref(_, ty, _) => ty.visit_with(self)?, + _ => {} } - } - // This call to `select_all_or_error` is necessary to constrain inference variables, which we - // use further down when computing the implied bounds. - match ocx.select_all_or_error().as_slice() { - [] => (), - _ => return Err(NoSolution), + ControlFlow::Continue(()) } - - // We lazily compute the outlives components as - // `select_all_or_error` constrains inference variables. - let mut implied_bounds = Vec::new(); - for ty::OutlivesPredicate(a, r_b) in outlives_bounds { - match a.unpack() { - ty::GenericArgKind::Lifetime(r_a) => { - implied_bounds.push(OutlivesBound::RegionSubRegion(r_b, r_a)) - } - ty::GenericArgKind::Type(ty_a) => { - let mut ty_a = ocx.infcx.resolve_vars_if_possible(ty_a); - // Need to manually normalize in the new solver as `wf::obligations` does not. - if ocx.infcx.next_trait_solver() { - ty_a = ocx - .deeply_normalize(&ObligationCause::dummy_with_span(span), param_env, ty_a) - .map_err(|_| NoSolution)?; - } - let mut components = smallvec![]; - push_outlives_components(tcx, ty_a, &mut components); - implied_bounds.extend(implied_bounds_from_components(r_b, components)) - } - ty::GenericArgKind::Const(_) => { - unreachable!("consts do not participate in outlives bounds") - } - } - } - - Ok(implied_bounds) } /// When we have an implied bound that `T: 'a`, we can further break diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 11b6b826efe..a8d8003ead6 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -1017,13 +1017,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } } - // `(.., T)` -> `(.., U)` - (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { - if tys_a.len() == tys_b.len() { - candidates.vec.push(BuiltinUnsizeCandidate); - } - } - _ => {} }; } diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 32cbb97e314..6db97fc321a 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -7,7 +7,6 @@ //! [rustc dev guide]: //! https://rustc-dev-guide.rust-lang.org/traits/resolution.html#confirmation -use std::iter; use std::ops::ControlFlow; use rustc_ast::Mutability; @@ -410,13 +409,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let predicate = obligation.predicate.skip_binder(); let mut assume = predicate.trait_ref.args.const_at(2); - // FIXME(min_generic_const_exprs): We should shallowly normalize this. + // FIXME(mgca): We should shallowly normalize this. if self.tcx().features().generic_const_exprs() { assume = crate::traits::evaluate_const(self.infcx, assume, obligation.param_env) } - let Some(assume) = - rustc_transmute::Assume::from_const(self.infcx.tcx, obligation.param_env, assume) - else { + let Some(assume) = rustc_transmute::Assume::from_const(self.infcx.tcx, assume) else { return Err(Unimplemented); }; @@ -424,12 +421,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { let src = predicate.trait_ref.args.type_at(1); debug!(?src, ?dst); - let mut transmute_env = rustc_transmute::TransmuteTypeEnv::new(self.infcx); - let maybe_transmutable = transmute_env.is_transmutable( - obligation.cause.clone(), - rustc_transmute::Types { dst, src }, - assume, - ); + let mut transmute_env = rustc_transmute::TransmuteTypeEnv::new(self.infcx.tcx); + let maybe_transmutable = + transmute_env.is_transmutable(rustc_transmute::Types { dst, src }, assume); let fully_flattened = match maybe_transmutable { Answer::No(_) => Err(Unimplemented)?, @@ -989,8 +983,10 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { return Err(SelectionError::Unimplemented); } } else { - nested.push(obligation.with( + nested.push(Obligation::new( self.tcx(), + obligation.derived_cause(ObligationCauseCode::BuiltinDerived), + obligation.param_env, ty::TraitRef::new( self.tcx(), self.tcx().require_lang_item( @@ -1320,34 +1316,6 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ImplSource::Builtin(BuiltinImplSource::Misc, nested) } - // `(.., T)` -> `(.., U)` - (&ty::Tuple(tys_a), &ty::Tuple(tys_b)) => { - assert_eq!(tys_a.len(), tys_b.len()); - - // The last field of the tuple has to exist. - let (&a_last, a_mid) = tys_a.split_last().ok_or(Unimplemented)?; - let &b_last = tys_b.last().unwrap(); - - // Check that the source tuple with the target's - // last element is equal to the target. - let new_tuple = - Ty::new_tup_from_iter(tcx, a_mid.iter().copied().chain(iter::once(b_last))); - let InferOk { mut obligations, .. } = self - .infcx - .at(&obligation.cause, obligation.param_env) - .eq(DefineOpaqueTypes::Yes, target, new_tuple) - .map_err(|_| Unimplemented)?; - - // Add a nested `T: Unsize<U>` predicate. - let last_unsize_obligation = obligation.with( - tcx, - ty::TraitRef::new(tcx, obligation.predicate.def_id(), [a_last, b_last]), - ); - obligations.push(last_unsize_obligation); - - ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, obligations) - } - _ => bug!("source: {source}, target: {target}"), }) } diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 436ce3dddd9..e1adabbeaa6 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -9,7 +9,7 @@ use std::ops::ControlFlow; use std::{cmp, iter}; use hir::def::DefKind; -use rustc_data_structures::fx::{FxHashSet, FxIndexMap, FxIndexSet}; +use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_errors::{Diag, EmissionGuarantee}; use rustc_hir as hir; @@ -25,7 +25,6 @@ use rustc_middle::dep_graph::{DepNodeIndex, dep_kinds}; pub use rustc_middle::traits::select::*; use rustc_middle::ty::abstract_const::NotConstEvaluatable; use rustc_middle::ty::error::TypeErrorToStringExt; -use rustc_middle::ty::fold::fold_regions; use rustc_middle::ty::print::{PrintTraitRefExt as _, with_no_trimmed_paths}; use rustc_middle::ty::{ self, GenericArgsRef, PolyProjectionPredicate, Ty, TyCtxt, TypeFoldable, TypeVisitableExt, @@ -1008,7 +1007,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { // depend on its particular value in order to work, so we can clear // out the param env and get better caching. debug!("in global"); - obligation.param_env = obligation.param_env.without_caller_bounds(); + obligation.param_env = ty::ParamEnv::empty(); } let stack = self.push_stack(previous_stack, &obligation); @@ -1225,15 +1224,21 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { /// that recursion is ok. This routine returns `true` if the top of the /// stack (`cycle[0]`): /// - /// - is a defaulted trait, + /// - is a coinductive trait: an auto-trait or `Sized`, /// - it also appears in the backtrace at some position `X`, /// - all the predicates at positions `X..` between `X` and the top are - /// also defaulted traits. + /// also coinductive traits. pub(crate) fn coinductive_match<I>(&mut self, mut cycle: I) -> bool where I: Iterator<Item = ty::Predicate<'tcx>>, { - cycle.all(|predicate| predicate.is_coinductive(self.tcx())) + cycle.all(|p| match p.kind().skip_binder() { + ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => { + self.infcx.tcx.trait_is_coinductive(data.def_id()) + } + ty::PredicateKind::Clause(ty::ClauseKind::WellFormed(_)) => true, + _ => false, + }) } /// Further evaluates `candidate` to decide whether all type parameters match and whether nested @@ -1920,9 +1925,9 @@ impl<'tcx> SelectionContext<'_, 'tcx> { let mut impl_candidate = None; for c in impls { if let Some(prev) = impl_candidate.replace(c) { - if self.prefer_lhs_over_victim(has_non_region_infer, c, prev) { + if self.prefer_lhs_over_victim(has_non_region_infer, c, prev.0) { // Ok, prefer `c` over the previous entry - } else if self.prefer_lhs_over_victim(has_non_region_infer, prev, c) { + } else if self.prefer_lhs_over_victim(has_non_region_infer, prev, c.0) { // Ok, keep `prev` instead of the new entry impl_candidate = Some(prev); } else { @@ -1981,7 +1986,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { &self, has_non_region_infer: bool, (lhs, lhs_evaluation): (DefId, EvaluationResult), - (victim, victim_evaluation): (DefId, EvaluationResult), + victim: DefId, ) -> bool { let tcx = self.tcx(); // See if we can toss out `victim` based on specialization. @@ -1997,14 +2002,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } match tcx.impls_are_allowed_to_overlap(lhs, victim) { - // For #33140 the impl headers must be exactly equal, the trait must not have - // any associated items and there are no where-clauses. - // - // We can just arbitrarily drop one of the impls. - Some(ty::ImplOverlapKind::FutureCompatOrderDepTraitObjects) => { - assert_eq!(lhs_evaluation, victim_evaluation); - true - } // For candidates which already reference errors it doesn't really // matter what we do 🤷 Some(ty::ImplOverlapKind::Permitted { marker: false }) => { @@ -2199,8 +2196,8 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } ty::CoroutineWitness(def_id, args) => { - let hidden_types = bind_coroutine_hidden_types_above( - self.infcx, + let hidden_types = rebind_coroutine_witness_types( + self.infcx.tcx, def_id, args, obligation.predicate.bound_vars(), @@ -2234,15 +2231,6 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } } - // `Copy` and `Clone` are automatically implemented for an anonymous adt - // if all of its fields are `Copy` and `Clone` - ty::Adt(adt, args) if adt.is_anonymous() => { - // (*) binder moved here - Where(obligation.predicate.rebind( - adt.non_enum_variant().fields.iter().map(|f| f.ty(self.tcx(), args)).collect(), - )) - } - ty::Adt(..) | ty::Alias(..) | ty::Param(..) | ty::Placeholder(..) => { // Fallback to whatever user-defined impls exist in this case. None @@ -2348,7 +2336,7 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } ty::CoroutineWitness(def_id, args) => { - bind_coroutine_hidden_types_above(self.infcx, def_id, args, t.bound_vars()) + rebind_coroutine_witness_types(self.infcx.tcx, def_id, args, t.bound_vars()) } // For `PhantomData<T>`, we pass `T`. @@ -2843,6 +2831,23 @@ impl<'tcx> SelectionContext<'_, 'tcx> { } } +fn rebind_coroutine_witness_types<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, + args: ty::GenericArgsRef<'tcx>, + bound_vars: &'tcx ty::List<ty::BoundVariableKind>, +) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> { + let bound_coroutine_types = tcx.coroutine_hidden_types(def_id).skip_binder(); + let shifted_coroutine_types = + tcx.shift_bound_var_indices(bound_vars.len(), bound_coroutine_types.skip_binder()); + ty::Binder::bind_with_vars( + ty::EarlyBinder::bind(shifted_coroutine_types.to_vec()).instantiate(tcx, args), + tcx.mk_bound_variable_kinds_from_iter( + bound_vars.iter().chain(bound_coroutine_types.bound_vars()), + ), + ) +} + impl<'o, 'tcx> TraitObligationStack<'o, 'tcx> { fn list(&'o self) -> TraitObligationStackList<'o, 'tcx> { TraitObligationStackList::with(self) @@ -3151,56 +3156,3 @@ pub(crate) enum ProjectionMatchesProjection { Ambiguous, No, } - -/// Replace all regions inside the coroutine interior with late bound regions. -/// Note that each region slot in the types gets a new fresh late bound region, which means that -/// none of the regions inside relate to any other, even if typeck had previously found constraints -/// that would cause them to be related. -#[instrument(level = "trace", skip(infcx), ret)] -fn bind_coroutine_hidden_types_above<'tcx>( - infcx: &InferCtxt<'tcx>, - def_id: DefId, - args: ty::GenericArgsRef<'tcx>, - bound_vars: &ty::List<ty::BoundVariableKind>, -) -> ty::Binder<'tcx, Vec<Ty<'tcx>>> { - let tcx = infcx.tcx; - let mut seen_tys = FxHashSet::default(); - - let considering_regions = infcx.considering_regions; - - let num_bound_variables = bound_vars.len() as u32; - let mut counter = num_bound_variables; - - let hidden_types: Vec<_> = tcx - .coroutine_hidden_types(def_id) - // Deduplicate tys to avoid repeated work. - .filter(|bty| seen_tys.insert(*bty)) - .map(|mut bty| { - // Only remap erased regions if we use them. - if considering_regions { - bty = bty.map_bound(|ty| { - fold_regions(tcx, ty, |r, current_depth| match r.kind() { - ty::ReErased => { - let br = ty::BoundRegion { - var: ty::BoundVar::from_u32(counter), - kind: ty::BoundRegionKind::Anon, - }; - counter += 1; - ty::Region::new_bound(tcx, current_depth, br) - } - r => bug!("unexpected region: {r:?}"), - }) - }) - } - - bty.instantiate(tcx, args) - }) - .collect(); - let bound_vars = tcx.mk_bound_variable_kinds_from_iter( - bound_vars.iter().chain( - (num_bound_variables..counter) - .map(|_| ty::BoundVariableKind::Region(ty::BoundRegionKind::Anon)), - ), - ); - ty::Binder::bind_with_vars(hidden_types, bound_vars) -} diff --git a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs index cb3e81f5477..448ac558cad 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/mod.rs @@ -20,7 +20,7 @@ use rustc_middle::bug; use rustc_middle::query::LocalCrate; use rustc_middle::ty::print::PrintTraitRefExt as _; use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, TypeVisitableExt, TypingMode}; -use rustc_session::lint::builtin::{COHERENCE_LEAK_CHECK, ORDER_DEPENDENT_TRAIT_OBJECTS}; +use rustc_session::lint::builtin::COHERENCE_LEAK_CHECK; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, sym}; use rustc_type_ir::solve::NoSolution; use specialization_graph::GraphExt; @@ -557,13 +557,9 @@ fn report_conflicting_impls<'tcx>( let msg = || { format!( - "conflicting implementations of trait `{}`{}{}", + "conflicting implementations of trait `{}`{}", overlap.trait_ref.print_trait_sugared(), overlap.self_ty.map_or_else(String::new, |ty| format!(" for type `{ty}`")), - match used_to_be_allowed { - Some(FutureCompatOverlapErrorKind::OrderDepTraitObjects) => ": (E0119)", - _ => "", - } ) }; @@ -588,7 +584,6 @@ fn report_conflicting_impls<'tcx>( } Some(kind) => { let lint = match kind { - FutureCompatOverlapErrorKind::OrderDepTraitObjects => ORDER_DEPENDENT_TRAIT_OBJECTS, FutureCompatOverlapErrorKind::LeakCheck => COHERENCE_LEAK_CHECK, }; tcx.node_span_lint(lint, tcx.local_def_id_to_hir_id(impl_def_id), impl_span, |err| { diff --git a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs index 19d3561dd59..9452dca9a4f 100644 --- a/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs +++ b/compiler/rustc_trait_selection/src/traits/specialize/specialization_graph.rs @@ -12,7 +12,6 @@ use crate::traits; #[derive(Copy, Clone, Debug)] pub enum FutureCompatOverlapErrorKind { - OrderDepTraitObjects, LeakCheck, } @@ -151,12 +150,6 @@ impl<'tcx> Children { { match overlap_kind { ty::ImplOverlapKind::Permitted { marker: _ } => {} - ty::ImplOverlapKind::FutureCompatOrderDepTraitObjects => { - *last_lint_mut = Some(FutureCompatOverlapError { - error: create_overlap_error(overlap), - kind: FutureCompatOverlapErrorKind::OrderDepTraitObjects, - }); - } } return Ok((false, false)); diff --git a/compiler/rustc_trait_selection/src/traits/wf.rs b/compiler/rustc_trait_selection/src/traits/wf.rs index 18906a6a8ce..54b6c22b2d8 100644 --- a/compiler/rustc_trait_selection/src/traits/wf.rs +++ b/compiler/rustc_trait_selection/src/traits/wf.rs @@ -708,7 +708,7 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for WfPredicates<'a, 'tcx> { ty::Pat(subty, pat) => { self.require_sized(subty, ObligationCauseCode::Misc); match *pat { - ty::PatternKind::Range { start, end, include_end: _ } => { + ty::PatternKind::Range { start, end } => { let mut check = |c| { let cause = self.cause(ObligationCauseCode::Misc); self.out.push(traits::Obligation::with_depth( @@ -738,12 +738,8 @@ impl<'a, 'tcx> TypeVisitor<TyCtxt<'tcx>> for WfPredicates<'a, 'tcx> { } } }; - if let Some(start) = start { - check(start) - } - if let Some(end) = end { - check(end) - } + check(start); + check(end); } } } diff --git a/compiler/rustc_traits/src/codegen.rs b/compiler/rustc_traits/src/codegen.rs index cc329ca3328..4a889abfc28 100644 --- a/compiler/rustc_traits/src/codegen.rs +++ b/compiler/rustc_traits/src/codegen.rs @@ -70,7 +70,7 @@ pub(crate) fn codegen_select_candidate<'tcx>( infcx.err_ctxt().report_overflow_obligation_cycle(&cycle); } } - return Err(CodegenObligationError::FulfillmentError); + return Err(CodegenObligationError::Unimplemented); } let impl_source = infcx.resolve_vars_if_possible(impl_source); diff --git a/compiler/rustc_traits/src/implied_outlives_bounds.rs b/compiler/rustc_traits/src/implied_outlives_bounds.rs index 5f75e242a50..6fb483e6dac 100644 --- a/compiler/rustc_traits/src/implied_outlives_bounds.rs +++ b/compiler/rustc_traits/src/implied_outlives_bounds.rs @@ -10,38 +10,28 @@ use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; use rustc_span::DUMMY_SP; use rustc_trait_selection::infer::InferCtxtBuilderExt; -use rustc_trait_selection::traits::query::type_op::implied_outlives_bounds::{ - compute_implied_outlives_bounds_compat_inner, compute_implied_outlives_bounds_inner, -}; +use rustc_trait_selection::traits::query::type_op::implied_outlives_bounds::compute_implied_outlives_bounds_inner; use rustc_trait_selection::traits::query::{CanonicalImpliedOutlivesBoundsGoal, NoSolution}; pub(crate) fn provide(p: &mut Providers) { - *p = Providers { implied_outlives_bounds_compat, ..*p }; *p = Providers { implied_outlives_bounds, ..*p }; } -fn implied_outlives_bounds_compat<'tcx>( - tcx: TyCtxt<'tcx>, - goal: CanonicalImpliedOutlivesBoundsGoal<'tcx>, -) -> Result< - &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>, - NoSolution, -> { - tcx.infer_ctxt().enter_canonical_trait_query(&goal, |ocx, key| { - let (param_env, ImpliedOutlivesBounds { ty }) = key.into_parts(); - compute_implied_outlives_bounds_compat_inner(ocx, param_env, ty, DUMMY_SP) - }) -} - fn implied_outlives_bounds<'tcx>( tcx: TyCtxt<'tcx>, - goal: CanonicalImpliedOutlivesBoundsGoal<'tcx>, + (goal, disable_implied_bounds_hack): (CanonicalImpliedOutlivesBoundsGoal<'tcx>, bool), ) -> Result< &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>, NoSolution, > { tcx.infer_ctxt().enter_canonical_trait_query(&goal, |ocx, key| { let (param_env, ImpliedOutlivesBounds { ty }) = key.into_parts(); - compute_implied_outlives_bounds_inner(ocx, param_env, ty, DUMMY_SP) + compute_implied_outlives_bounds_inner( + ocx, + param_env, + ty, + DUMMY_SP, + disable_implied_bounds_hack, + ) }) } diff --git a/compiler/rustc_traits/src/lib.rs b/compiler/rustc_traits/src/lib.rs index d2f979bd6d9..697c8391803 100644 --- a/compiler/rustc_traits/src/lib.rs +++ b/compiler/rustc_traits/src/lib.rs @@ -2,7 +2,6 @@ // tidy-alphabetical-start #![recursion_limit = "256"] -#![warn(unreachable_pub)] // tidy-alphabetical-end mod codegen; diff --git a/compiler/rustc_transmute/Cargo.toml b/compiler/rustc_transmute/Cargo.toml index e9daf6b0c38..f0c783b3002 100644 --- a/compiler/rustc_transmute/Cargo.toml +++ b/compiler/rustc_transmute/Cargo.toml @@ -8,8 +8,6 @@ edition = "2024" rustc_abi = { path = "../rustc_abi", optional = true } rustc_data_structures = { path = "../rustc_data_structures" } rustc_hir = { path = "../rustc_hir", optional = true } -rustc_infer = { path = "../rustc_infer", optional = true } -rustc_macros = { path = "../rustc_macros", optional = true } rustc_middle = { path = "../rustc_middle", optional = true } rustc_span = { path = "../rustc_span", optional = true } tracing = "0.1" @@ -19,8 +17,6 @@ tracing = "0.1" rustc = [ "dep:rustc_abi", "dep:rustc_hir", - "dep:rustc_infer", - "dep:rustc_macros", "dep:rustc_middle", "dep:rustc_span", ] diff --git a/compiler/rustc_transmute/src/layout/dfa.rs b/compiler/rustc_transmute/src/layout/dfa.rs index a70dc034e63..a29baade42f 100644 --- a/compiler/rustc_transmute/src/layout/dfa.rs +++ b/compiler/rustc_transmute/src/layout/dfa.rs @@ -38,7 +38,7 @@ impl<R> Transitions<R> where R: Ref, { - #[allow(dead_code)] + #[cfg(test)] fn insert(&mut self, transition: Transition<R>, state: State) { match transition { Transition::Byte(b) => { @@ -86,15 +86,6 @@ impl<R> Dfa<R> where R: Ref, { - #[allow(dead_code)] - pub(crate) fn unit() -> Self { - let transitions: Map<State, Transitions<R>> = Map::default(); - let start = State::new(); - let accepting = start; - - Self { transitions, start, accepting } - } - #[cfg(test)] pub(crate) fn bool() -> Self { let mut transitions: Map<State, Transitions<R>> = Map::default(); diff --git a/compiler/rustc_transmute/src/layout/nfa.rs b/compiler/rustc_transmute/src/layout/nfa.rs index 3b49c59fa68..9c21fd94f03 100644 --- a/compiler/rustc_transmute/src/layout/nfa.rs +++ b/compiler/rustc_transmute/src/layout/nfa.rs @@ -159,11 +159,6 @@ where } Self { transitions, start, accepting } } - - #[allow(dead_code)] - pub(crate) fn edges_from(&self, start: State) -> Option<&Map<Transition<R>, Set<State>>> { - self.transitions.get(&start) - } } impl State { diff --git a/compiler/rustc_transmute/src/layout/tree.rs b/compiler/rustc_transmute/src/layout/tree.rs index b00585d2922..a21be5dda4e 100644 --- a/compiler/rustc_transmute/src/layout/tree.rs +++ b/compiler/rustc_transmute/src/layout/tree.rs @@ -237,7 +237,7 @@ pub(crate) mod rustc { ty::Tuple(members) => Self::from_tuple((ty, layout), members, cx), - ty::Array(inner_ty, len) => { + ty::Array(inner_ty, _len) => { let FieldsShape::Array { stride, count } = &layout.fields else { return Err(Err::NotYetSupported); }; @@ -282,7 +282,6 @@ pub(crate) mod rustc { FieldsShape::Primitive => { assert_eq!(members.len(), 1); let inner_ty = members[0]; - let inner_layout = layout_of(cx, inner_ty)?; Self::from_ty(inner_ty, cx) } FieldsShape::Arbitrary { offsets, .. } => { @@ -345,7 +344,7 @@ pub(crate) mod rustc { // the enum delegates its layout to the variant at `index`. layout_of_variant(*index, None) } - Variants::Multiple { tag, tag_encoding, tag_field, .. } => { + Variants::Multiple { tag: _, tag_encoding, tag_field, .. } => { // `Variants::Multiple` denotes an enum with multiple // variants. The layout of such an enum is the disjunction // of the layouts of its tagged variants. @@ -356,7 +355,7 @@ pub(crate) mod rustc { let variants = def.discriminants(cx.tcx()).try_fold( Self::uninhabited(), - |variants, (idx, ref discriminant)| { + |variants, (idx, _discriminant)| { let variant = layout_of_variant(idx, Some(tag_encoding.clone()))?; Result::<Self, Err>::Ok(variants.or(variant)) }, @@ -414,7 +413,7 @@ pub(crate) mod rustc { // Append the fields, in memory order, to the layout. let inverse_memory_index = memory_index.invert_bijective_mapping(); - for (memory_idx, &field_idx) in inverse_memory_index.iter_enumerated() { + for &field_idx in inverse_memory_index.iter() { // Add interfield padding. let padding_needed = offsets[field_idx] - size; let padding = Self::padding(padding_needed.bytes_usize()); @@ -468,15 +467,14 @@ pub(crate) mod rustc { // This constructor does not support non-`FieldsShape::Union` // layouts. Fields of this shape are all placed at offset 0. - let FieldsShape::Union(fields) = layout.fields() else { + let FieldsShape::Union(_fields) = layout.fields() else { return Err(Err::NotYetSupported); }; let fields = &def.non_enum_variant().fields; let fields = fields.iter_enumerated().try_fold( Self::uninhabited(), - |fields, (idx, field_def)| { - let field_def = Def::Field(field_def); + |fields, (idx, _field_def)| { let field_ty = ty_field(cx, (ty, layout), idx); let field_layout = layout_of(cx, field_ty)?; let field = Self::from_ty(field_ty, cx)?; diff --git a/compiler/rustc_transmute/src/lib.rs b/compiler/rustc_transmute/src/lib.rs index f9537f708ef..7d800e49ff4 100644 --- a/compiler/rustc_transmute/src/lib.rs +++ b/compiler/rustc_transmute/src/lib.rs @@ -1,8 +1,6 @@ // tidy-alphabetical-start -#![allow(unused_variables)] -#![feature(alloc_layout_extra)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![feature(never_type)] -#![warn(unreachable_pub)] // tidy-alphabetical-end pub(crate) use rustc_data_structures::fx::{FxIndexMap as Map, FxIndexSet as Set}; @@ -81,15 +79,12 @@ pub enum Reason<T> { #[cfg(feature = "rustc")] mod rustc { use rustc_hir::lang_items::LangItem; - use rustc_infer::infer::InferCtxt; - use rustc_macros::TypeVisitable; - use rustc_middle::traits::ObligationCause; - use rustc_middle::ty::{Const, ParamEnv, Ty, TyCtxt}; + use rustc_middle::ty::{Const, Ty, TyCtxt}; use super::*; /// The source and destination types of a transmutation. - #[derive(TypeVisitable, Debug, Clone, Copy)] + #[derive(Debug, Clone, Copy)] pub struct Types<'tcx> { /// The source type. pub src: Ty<'tcx>, @@ -97,27 +92,22 @@ mod rustc { pub dst: Ty<'tcx>, } - pub struct TransmuteTypeEnv<'cx, 'tcx> { - infcx: &'cx InferCtxt<'tcx>, + pub struct TransmuteTypeEnv<'tcx> { + tcx: TyCtxt<'tcx>, } - impl<'cx, 'tcx> TransmuteTypeEnv<'cx, 'tcx> { - pub fn new(infcx: &'cx InferCtxt<'tcx>) -> Self { - Self { infcx } + impl<'tcx> TransmuteTypeEnv<'tcx> { + pub fn new(tcx: TyCtxt<'tcx>) -> Self { + Self { tcx } } - #[allow(unused)] pub fn is_transmutable( &mut self, - cause: ObligationCause<'tcx>, types: Types<'tcx>, assume: crate::Assume, ) -> crate::Answer<crate::layout::rustc::Ref<'tcx>> { crate::maybe_transmutable::MaybeTransmutableQuery::new( - types.src, - types.dst, - assume, - self.infcx.tcx, + types.src, types.dst, assume, self.tcx, ) .answer() } @@ -125,11 +115,7 @@ mod rustc { impl Assume { /// Constructs an `Assume` from a given const-`Assume`. - pub fn from_const<'tcx>( - tcx: TyCtxt<'tcx>, - param_env: ParamEnv<'tcx>, - ct: Const<'tcx>, - ) -> Option<Self> { + pub fn from_const<'tcx>(tcx: TyCtxt<'tcx>, ct: Const<'tcx>) -> Option<Self> { use rustc_middle::ty::ScalarInt; use rustc_span::sym; diff --git a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs index 023c8fad781..63fabc9c83d 100644 --- a/compiler/rustc_transmute/src/maybe_transmutable/mod.rs +++ b/compiler/rustc_transmute/src/maybe_transmutable/mod.rs @@ -79,12 +79,12 @@ where pub(crate) fn answer(self) -> Answer<<C as QueryContext>::Ref> { let Self { src, dst, assume, context } = self; - // Unconditionally all `Def` nodes from `src`, without pruning away the + // Unconditionally remove all `Def` nodes from `src`, without pruning away the // branches they appear in. This is valid to do for value-to-value // transmutations, but not for `&mut T` to `&mut U`; we will need to be // more sophisticated to handle transmutations between mutable // references. - let src = src.prune(&|def| false); + let src = src.prune(&|_def| false); if src.is_inhabited() && !dst.is_inhabited() { return Answer::No(Reason::DstUninhabited); @@ -96,7 +96,7 @@ where let dst = if assume.safety { // ...if safety is assumed, don't check if they carry safety // invariants; retain all paths. - dst.prune(&|def| false) + dst.prune(&|_def| false) } else { // ...otherwise, prune away all paths with safety invariants from // the `Dst` layout. diff --git a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs index 84af20c773e..4d81382eba0 100644 --- a/compiler/rustc_transmute/src/maybe_transmutable/tests.rs +++ b/compiler/rustc_transmute/src/maybe_transmutable/tests.rs @@ -92,7 +92,6 @@ mod bool { #[test] fn should_permit_validity_expansion_and_reject_contraction() { - let un = layout::Tree::<Def, !>::uninhabited(); let b0 = layout::Tree::<Def, !>::from_bits(0); let b1 = layout::Tree::<Def, !>::from_bits(1); let b2 = layout::Tree::<Def, !>::from_bits(2); diff --git a/compiler/rustc_ty_utils/Cargo.toml b/compiler/rustc_ty_utils/Cargo.toml index 4c7a57f2931..7e96b64408c 100644 --- a/compiler/rustc_ty_utils/Cargo.toml +++ b/compiler/rustc_ty_utils/Cargo.toml @@ -7,6 +7,7 @@ edition = "2024" # tidy-alphabetical-start itertools = "0.12" rustc_abi = { path = "../rustc_abi" } +rustc_attr_parsing = { path = "../rustc_attr_parsing" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } rustc_fluent_macro = { path = "../rustc_fluent_macro" } diff --git a/compiler/rustc_ty_utils/messages.ftl b/compiler/rustc_ty_utils/messages.ftl index ae795ef2214..8bc7bf10865 100644 --- a/compiler/rustc_ty_utils/messages.ftl +++ b/compiler/rustc_ty_utils/messages.ftl @@ -14,6 +14,8 @@ ty_utils_borrow_not_supported = borrowing is not supported in generic constants ty_utils_box_not_supported = allocations are not allowed in generic constants +ty_utils_by_use_not_supported = .use is not allowed in generic constants + ty_utils_closure_and_return_not_supported = closures and function keywords are not supported in generic constants ty_utils_const_block_not_supported = const blocks are not supported in generic constants @@ -42,8 +44,6 @@ ty_utils_logical_op_not_supported = unsupported operation in generic constants, ty_utils_loop_not_supported = loops and loop control flow are not supported in generic constants -ty_utils_multiple_array_fields_simd_type = monomorphising SIMD type `{$ty}` with more than one array field - ty_utils_needs_drop_overflow = overflow while checking whether `{$query_ty}` requires drop ty_utils_never_to_any_not_supported = coercing the `never` type is not supported in generic constants diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index 0ff82f0c256..a726ebae6fe 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -309,15 +309,11 @@ fn fn_abi_of_fn_ptr<'tcx>( query: ty::PseudoCanonicalInput<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let ty::PseudoCanonicalInput { typing_env, value: (sig, extra_args) } = query; - - let cx = LayoutCx::new(tcx, typing_env); fn_abi_new_uncached( - &cx, + &LayoutCx::new(tcx, typing_env), tcx.instantiate_bound_regions_with_erased(sig), extra_args, None, - None, - false, ) } @@ -326,19 +322,11 @@ fn fn_abi_of_instance<'tcx>( query: ty::PseudoCanonicalInput<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let ty::PseudoCanonicalInput { typing_env, value: (instance, extra_args) } = query; - - let sig = fn_sig_for_fn_abi(tcx, instance, typing_env); - - let caller_location = - instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()); - fn_abi_new_uncached( &LayoutCx::new(tcx, typing_env), - sig, + fn_sig_for_fn_abi(tcx, instance, typing_env), extra_args, - caller_location, - Some(instance.def_id()), - matches!(instance.def, ty::InstanceKind::Virtual(..)), + Some(instance), ) } @@ -448,10 +436,7 @@ fn fn_abi_sanity_check<'tcx>( ) { let tcx = cx.tcx(); - if spec_abi == ExternAbi::Rust - || spec_abi == ExternAbi::RustCall - || spec_abi == ExternAbi::RustCold - { + if spec_abi.is_rustic_abi() { if arg.layout.is_zst() { // Casting closures to function pointers depends on ZST closure types being // omitted entirely in the calling convention. @@ -472,7 +457,7 @@ fn fn_abi_sanity_check<'tcx>( // `layout.backend_repr` and ignore everything else. We should just reject //`Aggregate` entirely here, but some targets need to be fixed first. match arg.layout.backend_repr { - BackendRepr::Scalar(_) | BackendRepr::Vector { .. } => {} + BackendRepr::Scalar(_) | BackendRepr::SimdVector { .. } => {} BackendRepr::ScalarPair(..) => { panic!("`PassMode::Direct` used for ScalarPair type {}", arg.layout.ty) } @@ -547,19 +532,25 @@ fn fn_abi_sanity_check<'tcx>( fn_arg_sanity_check(cx, fn_abi, spec_abi, &fn_abi.ret); } -// FIXME(eddyb) perhaps group the signature/type-containing (or all of them?) -// arguments of this method, into a separate `struct`. -#[tracing::instrument(level = "debug", skip(cx, caller_location, fn_def_id, force_thin_self_ptr))] +#[tracing::instrument(level = "debug", skip(cx, instance))] fn fn_abi_new_uncached<'tcx>( cx: &LayoutCx<'tcx>, sig: ty::FnSig<'tcx>, extra_args: &[Ty<'tcx>], - caller_location: Option<Ty<'tcx>>, - fn_def_id: Option<DefId>, - // FIXME(eddyb) replace this with something typed, like an `enum`. - force_thin_self_ptr: bool, + instance: Option<ty::Instance<'tcx>>, ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, &'tcx FnAbiError<'tcx>> { let tcx = cx.tcx(); + let (caller_location, determined_fn_def_id, is_virtual_call) = if let Some(instance) = instance + { + let is_virtual_call = matches!(instance.def, ty::InstanceKind::Virtual(..)); + ( + instance.def.requires_caller_location(tcx).then(|| tcx.caller_location_ty()), + if is_virtual_call { None } else { Some(instance.def_id()) }, + is_virtual_call, + ) + } else { + (None, None, false) + }; let sig = tcx.normalize_erasing_regions(cx.typing_env, sig); let conv = conv_from_spec_abi(cx.tcx(), sig.abi, sig.c_variadic); @@ -568,16 +559,11 @@ fn fn_abi_new_uncached<'tcx>( let extra_args = if sig.abi == ExternAbi::RustCall { assert!(!sig.c_variadic && extra_args.is_empty()); - if let Some(input) = sig.inputs().last() { - if let ty::Tuple(tupled_arguments) = input.kind() { - inputs = &sig.inputs()[0..sig.inputs().len() - 1]; - tupled_arguments - } else { - bug!( - "argument to function with \"rust-call\" ABI \ - is not a tuple" - ); - } + if let Some(input) = sig.inputs().last() + && let ty::Tuple(tupled_arguments) = input.kind() + { + inputs = &sig.inputs()[0..sig.inputs().len() - 1]; + tupled_arguments } else { bug!( "argument to function with \"rust-call\" ABI \ @@ -590,7 +576,7 @@ fn fn_abi_new_uncached<'tcx>( }; let is_drop_in_place = - fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace)); + determined_fn_def_id.is_some_and(|def_id| tcx.is_lang_item(def_id, LangItem::DropInPlace)); let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, &'tcx FnAbiError<'tcx>> { let span = tracing::debug_span!("arg_of"); @@ -603,7 +589,7 @@ fn fn_abi_new_uncached<'tcx>( }); let layout = cx.layout_of(ty).map_err(|err| &*tcx.arena.alloc(FnAbiError::Layout(*err)))?; - let layout = if force_thin_self_ptr && arg_idx == Some(0) { + let layout = if is_virtual_call && arg_idx == Some(0) { // Don't pass the vtable, it's not an argument of the virtual fn. // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen @@ -646,9 +632,22 @@ fn fn_abi_new_uncached<'tcx>( c_variadic: sig.c_variadic, fixed_count: inputs.len() as u32, conv, - can_unwind: fn_can_unwind(cx.tcx(), fn_def_id, sig.abi), + can_unwind: fn_can_unwind( + tcx, + // Since `#[rustc_nounwind]` can change unwinding, we cannot infer unwinding by `fn_def_id` for a virtual call. + determined_fn_def_id, + sig.abi, + ), }; - fn_abi_adjust_for_abi(cx, &mut fn_abi, sig.abi, fn_def_id); + fn_abi_adjust_for_abi( + cx, + &mut fn_abi, + sig.abi, + // If this is a virtual call, we cannot pass the `fn_def_id`, as it might call other + // functions from vtable. Internally, `deduced_param_attrs` attempts to infer attributes by + // visit the function body. + determined_fn_def_id, + ); debug!("fn_abi_new_uncached = {:?}", fn_abi); fn_abi_sanity_check(cx, &fn_abi, sig.abi); Ok(tcx.arena.alloc(fn_abi)) @@ -685,7 +684,7 @@ fn fn_abi_adjust_for_abi<'tcx>( let tcx = cx.tcx(); - if abi == ExternAbi::Rust || abi == ExternAbi::RustCall || abi == ExternAbi::RustIntrinsic { + if abi.is_rustic_abi() { fn_abi.adjust_for_rust_abi(cx, abi); // Look up the deduced parameter attributes for this function, if we have its def ID and diff --git a/compiler/rustc_ty_utils/src/assoc.rs b/compiler/rustc_ty_utils/src/assoc.rs index c8034f4e7b9..b7684e85d41 100644 --- a/compiler/rustc_ty_utils/src/assoc.rs +++ b/compiler/rustc_ty_utils/src/assoc.rs @@ -21,7 +21,7 @@ pub(crate) fn provide(providers: &mut Providers) { } fn associated_item_def_ids(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &[DefId] { - let item = tcx.hir().expect_item(def_id); + let item = tcx.hir_expect_item(def_id); match item.kind { hir::ItemKind::Trait(.., trait_item_refs) => { // We collect RPITITs for each trait method's return type and create a @@ -96,7 +96,7 @@ fn impl_item_implementor_ids(tcx: TyCtxt<'_>, impl_id: DefId) -> DefIdMap<DefId> fn associated_item(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::AssocItem { let id = tcx.local_def_id_to_hir_id(def_id); let parent_def_id = tcx.hir_get_parent_item(id); - let parent_item = tcx.hir().expect_item(parent_def_id.def_id); + let parent_item = tcx.hir_expect_item(parent_def_id.def_id); match parent_item.kind { hir::ItemKind::Impl(impl_) => { if let Some(impl_item_ref) = impl_.items.iter().find(|i| i.id.owner_id.def_id == def_id) @@ -201,7 +201,7 @@ fn associated_types_for_impl_traits_in_associated_fn( let mut visitor = RPITVisitor { rpits: FxIndexSet::default() }; - if let Some(output) = tcx.hir().get_fn_output(fn_def_id) { + if let Some(output) = tcx.hir_get_fn_output(fn_def_id) { visitor.visit_fn_ret_ty(output); tcx.arena.alloc_from_iter(visitor.rpits.iter().map(|opaque_ty_def_id| { @@ -252,7 +252,8 @@ fn associated_type_for_impl_trait_in_trait( assert_eq!(tcx.def_kind(trait_def_id), DefKind::Trait); let span = tcx.def_span(opaque_ty_def_id); - let trait_assoc_ty = tcx.at(span).create_def(trait_def_id, kw::Empty, DefKind::AssocTy); + // No name because this is a synthetic associated type. + let trait_assoc_ty = tcx.at(span).create_def(trait_def_id, None, DefKind::AssocTy); let local_def_id = trait_assoc_ty.def_id(); let def_id = local_def_id.to_def_id(); @@ -304,7 +305,8 @@ fn associated_type_for_impl_trait_in_impl( hir::FnRetTy::DefaultReturn(_) => tcx.def_span(impl_fn_def_id), hir::FnRetTy::Return(ty) => ty.span, }; - let impl_assoc_ty = tcx.at(span).create_def(impl_local_def_id, kw::Empty, DefKind::AssocTy); + // No name because this is a synthetic associated type. + let impl_assoc_ty = tcx.at(span).create_def(impl_local_def_id, None, DefKind::AssocTy); let local_def_id = impl_assoc_ty.def_id(); let def_id = local_def_id.to_def_id(); diff --git a/compiler/rustc_ty_utils/src/common_traits.rs b/compiler/rustc_ty_utils/src/common_traits.rs index 2157ab3c402..20646cf9a82 100644 --- a/compiler/rustc_ty_utils/src/common_traits.rs +++ b/compiler/rustc_ty_utils/src/common_traits.rs @@ -10,6 +10,13 @@ fn is_copy_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::PseudoCanonicalInput<'tcx, Ty is_item_raw(tcx, query, LangItem::Copy) } +fn is_use_cloned_raw<'tcx>( + tcx: TyCtxt<'tcx>, + query: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>, +) -> bool { + is_item_raw(tcx, query, LangItem::UseCloned) +} + fn is_sized_raw<'tcx>(tcx: TyCtxt<'tcx>, query: ty::PseudoCanonicalInput<'tcx, Ty<'tcx>>) -> bool { is_item_raw(tcx, query, LangItem::Sized) } @@ -33,5 +40,12 @@ fn is_item_raw<'tcx>( } pub(crate) fn provide(providers: &mut Providers) { - *providers = Providers { is_copy_raw, is_sized_raw, is_freeze_raw, is_unpin_raw, ..*providers }; + *providers = Providers { + is_copy_raw, + is_use_cloned_raw, + is_sized_raw, + is_freeze_raw, + is_unpin_raw, + ..*providers + }; } diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs index ece796b3c71..b275cd382ab 100644 --- a/compiler/rustc_ty_utils/src/consts.rs +++ b/compiler/rustc_ty_utils/src/consts.rs @@ -230,7 +230,9 @@ fn recurse_build<'tcx>( error(GenericConstantTooComplexSub::LoopNotSupported(node.span))? } ExprKind::Box { .. } => error(GenericConstantTooComplexSub::BoxNotSupported(node.span))?, - + ExprKind::ByUse { .. } => { + error(GenericConstantTooComplexSub::ByUseNotSupported(node.span))? + } ExprKind::Unary { .. } => unreachable!(), // we handle valid unary/binary ops above ExprKind::Binary { .. } => { @@ -317,6 +319,7 @@ impl<'a, 'tcx> IsThirPolymorphic<'a, 'tcx> { | thir::ExprKind::Box { .. } | thir::ExprKind::If { .. } | thir::ExprKind::Call { .. } + | thir::ExprKind::ByUse { .. } | thir::ExprKind::Deref { .. } | thir::ExprKind::Binary { .. } | thir::ExprKind::LogicalOp { .. } diff --git a/compiler/rustc_ty_utils/src/errors.rs b/compiler/rustc_ty_utils/src/errors.rs index 5497d7d0bd2..0298e7e0e95 100644 --- a/compiler/rustc_ty_utils/src/errors.rs +++ b/compiler/rustc_ty_utils/src/errors.rs @@ -55,6 +55,8 @@ pub(crate) enum GenericConstantTooComplexSub { BoxNotSupported(#[primary_span] Span), #[label(ty_utils_binary_not_supported)] BinaryNotSupported(#[primary_span] Span), + #[label(ty_utils_by_use_not_supported)] + ByUseNotSupported(#[primary_span] Span), #[label(ty_utils_logical_op_not_supported)] LogicalOpNotSupported(#[primary_span] Span), #[label(ty_utils_assign_not_supported)] @@ -83,12 +85,6 @@ pub(crate) struct ZeroLengthSimdType<'tcx> { } #[derive(Diagnostic)] -#[diag(ty_utils_multiple_array_fields_simd_type)] -pub(crate) struct MultipleArrayFieldsSimdType<'tcx> { - pub ty: Ty<'tcx>, -} - -#[derive(Diagnostic)] #[diag(ty_utils_oversized_simd_type)] pub(crate) struct OversizedSimdType<'tcx> { pub ty: Ty<'tcx>, diff --git a/compiler/rustc_ty_utils/src/implied_bounds.rs b/compiler/rustc_ty_utils/src/implied_bounds.rs index 6205578bf74..aeaa83c77b5 100644 --- a/compiler/rustc_ty_utils/src/implied_bounds.rs +++ b/compiler/rustc_ty_utils/src/implied_bounds.rs @@ -161,7 +161,7 @@ fn fn_sig_spans(tcx: TyCtxt<'_>, def_id: LocalDefId) -> impl Iterator<Item = Spa } fn impl_spans(tcx: TyCtxt<'_>, def_id: LocalDefId) -> impl Iterator<Item = Span> { - let item = tcx.hir().expect_item(def_id); + let item = tcx.hir_expect_item(def_id); if let hir::ItemKind::Impl(impl_) = item.kind { let trait_args = impl_ .of_trait diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index d059d6dcd13..90b3517eded 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -107,11 +107,9 @@ fn resolve_associated_item<'tcx>( let input = typing_env.as_query_input(trait_ref); let vtbl = match tcx.codegen_select_candidate(input) { Ok(vtbl) => vtbl, - Err( - CodegenObligationError::Ambiguity - | CodegenObligationError::Unimplemented - | CodegenObligationError::FulfillmentError, - ) => return Ok(None), + Err(CodegenObligationError::Ambiguity | CodegenObligationError::Unimplemented) => { + return Ok(None); + } Err(CodegenObligationError::UnconstrainedParam(guar)) => return Err(guar), }; @@ -381,8 +379,7 @@ fn resolve_associated_item<'tcx>( } } traits::ImplSource::Param(..) - | traits::ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) - | traits::ImplSource::Builtin(BuiltinImplSource::TupleUnsizing, _) => None, + | traits::ImplSource::Builtin(BuiltinImplSource::TraitUpcasting { .. }, _) => None, }) } diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 2ab6b8e17c4..7334beb52c9 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -1,36 +1,28 @@ -use std::fmt::Debug; -use std::iter; - use hir::def_id::DefId; use rustc_abi::Integer::{I8, I32}; use rustc_abi::Primitive::{self, Float, Int, Pointer}; use rustc_abi::{ - AbiAndPrefAlign, AddressSpace, Align, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, - HasDataLayout, Layout, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, - StructKind, TagEncoding, VariantIdx, Variants, WrappingRange, + AddressSpace, BackendRepr, FIRST_VARIANT, FieldIdx, FieldsShape, HasDataLayout, Layout, + LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, + VariantIdx, Variants, WrappingRange, }; use rustc_hashes::Hash64; -use rustc_index::bit_set::DenseBitSet; -use rustc_index::{IndexSlice, IndexVec}; +use rustc_index::IndexVec; use rustc_middle::bug; -use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal}; use rustc_middle::query::Providers; use rustc_middle::ty::layout::{ - FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, MAX_SIMD_LANES, TyAndLayout, + FloatExt, HasTyCtxt, IntegerExt, LayoutCx, LayoutError, LayoutOf, TyAndLayout, }; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{ - self, AdtDef, CoroutineArgsExt, EarlyBinder, GenericArgsRef, PseudoCanonicalInput, Ty, TyCtxt, - TypeVisitableExt, + self, AdtDef, CoroutineArgsExt, EarlyBinder, PseudoCanonicalInput, Ty, TyCtxt, TypeVisitableExt, }; use rustc_session::{DataTypeKind, FieldInfo, FieldKind, SizeKind, VariantInfo}; use rustc_span::{Symbol, sym}; -use tracing::{debug, instrument, trace}; +use tracing::{debug, instrument}; use {rustc_abi as abi, rustc_hir as hir}; -use crate::errors::{ - MultipleArrayFieldsSimdType, NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType, -}; +use crate::errors::{NonPrimitiveSimdType, OversizedSimdType, ZeroLengthSimdType}; mod invariant; @@ -126,20 +118,23 @@ fn map_error<'tcx>( .delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}")); LayoutError::ReferencesError(guar) } + LayoutCalculatorError::ZeroLengthSimdType => { + // Can't be caught in typeck if the array length is generic. + cx.tcx().dcx().emit_fatal(ZeroLengthSimdType { ty }) + } + LayoutCalculatorError::OversizedSimdType { max_lanes } => { + // Can't be caught in typeck if the array length is generic. + cx.tcx().dcx().emit_fatal(OversizedSimdType { ty, max_lanes }) + } + LayoutCalculatorError::NonPrimitiveSimdType(field) => { + // This error isn't caught in typeck, e.g., if + // the element type of the vector is generic. + cx.tcx().dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty: field.ty }) + } }; error(cx, err) } -fn univariant_uninterned<'tcx>( - cx: &LayoutCx<'tcx>, - ty: Ty<'tcx>, - fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, - kind: StructKind, -) -> Result<LayoutData<FieldIdx, VariantIdx>, &'tcx LayoutError<'tcx>> { - let repr = ReprOptions::default(); - cx.calc.univariant(fields, &repr, kind).map_err(|err| map_error(cx, ty, err)) -} - fn extract_const_value<'tcx>( cx: &LayoutCx<'tcx>, ty: Ty<'tcx>, @@ -190,6 +185,10 @@ fn layout_of_uncached<'tcx>( let tcx = cx.tcx(); let dl = cx.data_layout(); + let map_layout = |result: Result<_, _>| match result { + Ok(layout) => Ok(tcx.mk_layout(layout)), + Err(err) => Err(map_error(cx, ty, err)), + }; let scalar_unit = |value: Primitive| { let size = value.size(dl); assert!(size.bits() <= 128); @@ -197,8 +196,10 @@ fn layout_of_uncached<'tcx>( }; let scalar = |value: Primitive| tcx.mk_layout(LayoutData::scalar(cx, scalar_unit(value))); - let univariant = |fields: &IndexSlice<FieldIdx, TyAndLayout<'tcx>>, kind| { - Ok(tcx.mk_layout(univariant_uninterned(cx, ty, fields, kind)?)) + let univariant = |tys: &[Ty<'tcx>], kind| { + let fields = tys.iter().map(|ty| cx.layout_of(*ty)).try_collect::<IndexVec<_, _>>()?; + let repr = ReprOptions::default(); + map_layout(cx.calc.univariant(&fields, &repr, kind)) }; debug_assert!(!ty.has_non_region_infer()); @@ -207,24 +208,45 @@ fn layout_of_uncached<'tcx>( let layout = cx.layout_of(ty)?.layout; let mut layout = LayoutData::clone(&layout.0); match *pat { - ty::PatternKind::Range { start, end, include_end } => { + ty::PatternKind::Range { start, end } => { if let BackendRepr::Scalar(scalar) | BackendRepr::ScalarPair(scalar, _) = &mut layout.backend_repr { - if let Some(start) = start { - scalar.valid_range_mut().start = extract_const_value(cx, ty, start)? - .try_to_bits(tcx, cx.typing_env) - .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; - } - if let Some(end) = end { - let mut end = extract_const_value(cx, ty, end)? - .try_to_bits(tcx, cx.typing_env) - .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; - if !include_end { - end = end.wrapping_sub(1); + scalar.valid_range_mut().start = extract_const_value(cx, ty, start)? + .try_to_bits(tcx, cx.typing_env) + .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; + + scalar.valid_range_mut().end = extract_const_value(cx, ty, end)? + .try_to_bits(tcx, cx.typing_env) + .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; + + // FIXME(pattern_types): create implied bounds from pattern types in signatures + // that require that the range end is >= the range start so that we can't hit + // this error anymore without first having hit a trait solver error. + // Very fuzzy on the details here, but pattern types are an internal impl detail, + // so we can just go with this for now + if scalar.is_signed() { + let range = scalar.valid_range_mut(); + let start = layout.size.sign_extend(range.start); + let end = layout.size.sign_extend(range.end); + if end < start { + let guar = tcx.dcx().err(format!( + "pattern type ranges cannot wrap: {start}..={end}" + )); + + return Err(error(cx, LayoutError::ReferencesError(guar))); } - scalar.valid_range_mut().end = end; - } + } else { + let range = scalar.valid_range_mut(); + if range.end < range.start { + let guar = tcx.dcx().err(format!( + "pattern type ranges cannot wrap: {}..={}", + range.start, range.end + )); + + return Err(error(cx, LayoutError::ReferencesError(guar))); + } + }; let niche = Niche { offset: Size::ZERO, @@ -267,7 +289,7 @@ fn layout_of_uncached<'tcx>( } // The never type. - ty::Never => tcx.mk_layout(cx.calc.layout_of_never_type()), + ty::Never => tcx.mk_layout(LayoutData::never_type(cx)), // Potentially-wide pointers. ty::Ref(_, pointee, _) | ty::RawPtr(pointee, _) => { @@ -338,7 +360,7 @@ fn layout_of_uncached<'tcx>( }; // Effectively a (ptr, meta) tuple. - tcx.mk_layout(cx.calc.scalar_pair(data_ptr, metadata)) + tcx.mk_layout(LayoutData::scalar_pair(cx, data_ptr, metadata)) } ty::Dynamic(_, _, ty::DynStar) => { @@ -346,7 +368,7 @@ fn layout_of_uncached<'tcx>( data.valid_range_mut().start = 0; let mut vtable = scalar_unit(Pointer(AddressSpace::DATA)); vtable.valid_range_mut().start = 1; - tcx.mk_layout(cx.calc.scalar_pair(data, vtable)) + tcx.mk_layout(LayoutData::scalar_pair(cx, data, vtable)) } // Arrays and slices. @@ -356,228 +378,114 @@ fn layout_of_uncached<'tcx>( .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; let element = cx.layout_of(element)?; - let size = element - .size - .checked_mul(count, dl) - .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; - - let abi = BackendRepr::Memory { sized: true }; - - let largest_niche = if count != 0 { element.largest_niche } else { None }; - let uninhabited = if count != 0 { element.uninhabited } else { false }; - - tcx.mk_layout(LayoutData { - variants: Variants::Single { index: FIRST_VARIANT }, - fields: FieldsShape::Array { stride: element.size, count }, - backend_repr: abi, - largest_niche, - uninhabited, - align: element.align, - size, - max_repr_align: None, - unadjusted_abi_align: element.align.abi, - randomization_seed: element.randomization_seed.wrapping_add(Hash64::new(count)), - }) + map_layout(cx.calc.array_like(&element, Some(count)))? } ty::Slice(element) => { let element = cx.layout_of(element)?; - tcx.mk_layout(LayoutData { - variants: Variants::Single { index: FIRST_VARIANT }, - fields: FieldsShape::Array { stride: element.size, count: 0 }, - backend_repr: BackendRepr::Memory { sized: false }, - largest_niche: None, - uninhabited: false, - align: element.align, - size: Size::ZERO, - max_repr_align: None, - unadjusted_abi_align: element.align.abi, - // adding a randomly chosen value to distinguish slices - randomization_seed: element - .randomization_seed - .wrapping_add(Hash64::new(0x2dcba99c39784102)), - }) + map_layout(cx.calc.array_like(&element, None).map(|mut layout| { + // a randomly chosen value to distinguish slices + layout.randomization_seed = Hash64::new(0x2dcba99c39784102); + layout + }))? + } + ty::Str => { + let element = scalar(Int(I8, false)); + map_layout(cx.calc.array_like(&element, None).map(|mut layout| { + // another random value + layout.randomization_seed = Hash64::new(0xc1325f37d127be22); + layout + }))? } - ty::Str => tcx.mk_layout(LayoutData { - variants: Variants::Single { index: FIRST_VARIANT }, - fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, - backend_repr: BackendRepr::Memory { sized: false }, - largest_niche: None, - uninhabited: false, - align: dl.i8_align, - size: Size::ZERO, - max_repr_align: None, - unadjusted_abi_align: dl.i8_align.abi, - // another random value - randomization_seed: Hash64::new(0xc1325f37d127be22), - }), // Odd unit types. - ty::FnDef(..) => univariant(IndexSlice::empty(), StructKind::AlwaysSized)?, - ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => { - let mut unit = - univariant_uninterned(cx, ty, IndexSlice::empty(), StructKind::AlwaysSized)?; - match unit.backend_repr { - BackendRepr::Memory { ref mut sized } => *sized = false, - _ => bug!(), - } - tcx.mk_layout(unit) + ty::FnDef(..) | ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => { + let sized = matches!(ty.kind(), ty::FnDef(..)); + tcx.mk_layout(LayoutData::unit(cx, sized)) } - ty::Coroutine(def_id, args) => coroutine_layout(cx, ty, def_id, args)?, + ty::Coroutine(def_id, args) => { + use rustc_middle::ty::layout::PrimitiveExt as _; + + let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else { + return Err(error(cx, LayoutError::Unknown(ty))); + }; + + let local_layouts = info + .field_tys + .iter() + .map(|local| { + let field_ty = EarlyBinder::bind(local.ty); + let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty.instantiate(tcx, args)); + cx.spanned_layout_of(uninit_ty, local.source_info.span) + }) + .try_collect::<IndexVec<_, _>>()?; + + let prefix_layouts = args + .as_coroutine() + .prefix_tys() + .iter() + .map(|ty| cx.layout_of(ty)) + .try_collect::<IndexVec<_, _>>()?; - ty::Closure(_, args) => { - let tys = args.as_closure().upvar_tys(); - univariant( - &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?, - StructKind::AlwaysSized, - )? + let layout = cx + .calc + .coroutine( + &local_layouts, + prefix_layouts, + &info.variant_fields, + &info.storage_conflicts, + |tag| TyAndLayout { + ty: tag.primitive().to_ty(tcx), + layout: tcx.mk_layout(LayoutData::scalar(cx, tag)), + }, + ) + .map(|mut layout| { + // this is similar to how ReprOptions populates its field_shuffle_seed + layout.randomization_seed = tcx.def_path_hash(def_id).0.to_smaller_hash(); + debug!("coroutine layout ({:?}): {:#?}", ty, layout); + layout + }); + map_layout(layout)? } + ty::Closure(_, args) => univariant(args.as_closure().upvar_tys(), StructKind::AlwaysSized)?, + ty::CoroutineClosure(_, args) => { - let tys = args.as_coroutine_closure().upvar_tys(); - univariant( - &tys.iter().map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?, - StructKind::AlwaysSized, - )? + univariant(args.as_coroutine_closure().upvar_tys(), StructKind::AlwaysSized)? } ty::Tuple(tys) => { let kind = if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; - univariant(&tys.iter().map(|k| cx.layout_of(k)).try_collect::<IndexVec<_, _>>()?, kind)? + univariant(tys, kind)? } // SIMD vector types. ty::Adt(def, args) if def.repr().simd() => { - if !def.is_struct() { - // Should have yielded E0517 by now. - let guar = tcx - .dcx() - .delayed_bug("#[repr(simd)] was applied to an ADT that is not a struct"); - return Err(error(cx, LayoutError::ReferencesError(guar))); - } - - let fields = &def.non_enum_variant().fields; - - // Supported SIMD vectors are homogeneous ADTs with at least one field: + // Supported SIMD vectors are ADTs with a single array field: // - // * #[repr(simd)] struct S(T, T, T, T); - // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T } // * #[repr(simd)] struct S([T; 4]) // // where T is a primitive scalar (integer/float/pointer). - - // SIMD vectors with zero fields are not supported. - // (should be caught by typeck) - if fields.is_empty() { - tcx.dcx().emit_fatal(ZeroLengthSimdType { ty }) - } - - // Type of the first ADT field: - let f0_ty = fields[FieldIdx::ZERO].ty(tcx, args); - - // Heterogeneous SIMD vectors are not supported: - // (should be caught by typeck) - for fi in fields { - if fi.ty(tcx, args) != f0_ty { - let guar = tcx.dcx().delayed_bug( - "#[repr(simd)] was applied to an ADT with heterogeneous field type", - ); - return Err(error(cx, LayoutError::ReferencesError(guar))); - } - } - - // The element type and number of elements of the SIMD vector - // are obtained from: - // - // * the element type and length of the single array field, if - // the first field is of array type, or - // - // * the homogeneous field type and the number of fields. - let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() { - // First ADT field is an array: - - // SIMD vectors with multiple array fields are not supported: - // Can't be caught by typeck with a generic simd type. - if def.non_enum_variant().fields.len() != 1 { - tcx.dcx().emit_fatal(MultipleArrayFieldsSimdType { ty }); - } - - // Extract the number of elements from the layout of the array field: - let FieldsShape::Array { count, .. } = cx.layout_of(f0_ty)?.layout.fields() else { - return Err(error(cx, LayoutError::Unknown(ty))); - }; - - (*e_ty, *count, true) - } else { - // First ADT field is not an array: - (f0_ty, def.non_enum_variant().fields.len() as _, false) + let Some(ty::Array(e_ty, e_len)) = def + .is_struct() + .then(|| &def.variant(FIRST_VARIANT).fields) + .filter(|fields| fields.len() == 1) + .map(|fields| *fields[FieldIdx::ZERO].ty(tcx, args).kind()) + else { + // Invalid SIMD types should have been caught by typeck by now. + let guar = tcx.dcx().delayed_bug("#[repr(simd)] was applied to an invalid ADT"); + return Err(error(cx, LayoutError::ReferencesError(guar))); }; - // SIMD vectors of zero length are not supported. - // Additionally, lengths are capped at 2^16 as a fixed maximum backends must - // support. - // - // Can't be caught in typeck if the array length is generic. - if e_len == 0 { - tcx.dcx().emit_fatal(ZeroLengthSimdType { ty }); - } else if e_len > MAX_SIMD_LANES { - tcx.dcx().emit_fatal(OversizedSimdType { ty, max_lanes: MAX_SIMD_LANES }); - } + let e_len = extract_const_value(cx, ty, e_len)? + .try_to_target_usize(tcx) + .ok_or_else(|| error(cx, LayoutError::Unknown(ty)))?; - // Compute the ABI of the element type: let e_ly = cx.layout_of(e_ty)?; - let BackendRepr::Scalar(e_abi) = e_ly.backend_repr else { - // This error isn't caught in typeck, e.g., if - // the element type of the vector is generic. - tcx.dcx().emit_fatal(NonPrimitiveSimdType { ty, e_ty }); - }; - - // Compute the size and alignment of the vector: - let size = e_ly - .size - .checked_mul(e_len, dl) - .ok_or_else(|| error(cx, LayoutError::SizeOverflow(ty)))?; - - let (abi, align) = if def.repr().packed() && !e_len.is_power_of_two() { - // Non-power-of-two vectors have padding up to the next power-of-two. - // If we're a packed repr, remove the padding while keeping the alignment as close - // to a vector as possible. - ( - BackendRepr::Memory { sized: true }, - AbiAndPrefAlign { - abi: Align::max_aligned_factor(size), - pref: dl.llvmlike_vector_align(size).pref, - }, - ) - } else { - ( - BackendRepr::Vector { element: e_abi, count: e_len }, - dl.llvmlike_vector_align(size), - ) - }; - let size = size.align_to(align.abi); - - // Compute the placement of the vector fields: - let fields = if is_array { - FieldsShape::Arbitrary { offsets: [Size::ZERO].into(), memory_index: [0].into() } - } else { - FieldsShape::Array { stride: e_ly.size, count: e_len } - }; - tcx.mk_layout(LayoutData { - variants: Variants::Single { index: FIRST_VARIANT }, - fields, - backend_repr: abi, - largest_niche: e_ly.largest_niche, - uninhabited: false, - size, - align, - max_repr_align: None, - unadjusted_abi_align: align.abi, - randomization_seed: e_ly.randomization_seed.wrapping_add(Hash64::new(e_len)), - }) + map_layout(cx.calc.simd_type(e_ly, e_len, def.repr().packed()))? } // ADTs. @@ -603,11 +511,7 @@ fn layout_of_uncached<'tcx>( return Err(error(cx, LayoutError::ReferencesError(guar))); } - return Ok(tcx.mk_layout( - cx.calc - .layout_of_union(&def.repr(), &variants) - .map_err(|err| map_error(cx, ty, err))?, - )); + return map_layout(cx.calc.layout_of_union(&def.repr(), &variants)); } let get_discriminant_type = @@ -735,335 +639,6 @@ fn layout_of_uncached<'tcx>( }) } -/// Overlap eligibility and variant assignment for each CoroutineSavedLocal. -#[derive(Clone, Debug, PartialEq)] -enum SavedLocalEligibility { - Unassigned, - Assigned(VariantIdx), - Ineligible(Option<FieldIdx>), -} - -// When laying out coroutines, we divide our saved local fields into two -// categories: overlap-eligible and overlap-ineligible. -// -// Those fields which are ineligible for overlap go in a "prefix" at the -// beginning of the layout, and always have space reserved for them. -// -// Overlap-eligible fields are only assigned to one variant, so we lay -// those fields out for each variant and put them right after the -// prefix. -// -// Finally, in the layout details, we point to the fields from the -// variants they are assigned to. It is possible for some fields to be -// included in multiple variants. No field ever "moves around" in the -// layout; its offset is always the same. -// -// Also included in the layout are the upvars and the discriminant. -// These are included as fields on the "outer" layout; they are not part -// of any variant. - -/// Compute the eligibility and assignment of each local. -fn coroutine_saved_local_eligibility( - info: &CoroutineLayout<'_>, -) -> (DenseBitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) { - use SavedLocalEligibility::*; - - let mut assignments: IndexVec<CoroutineSavedLocal, SavedLocalEligibility> = - IndexVec::from_elem(Unassigned, &info.field_tys); - - // The saved locals not eligible for overlap. These will get - // "promoted" to the prefix of our coroutine. - let mut ineligible_locals = DenseBitSet::new_empty(info.field_tys.len()); - - // Figure out which of our saved locals are fields in only - // one variant. The rest are deemed ineligible for overlap. - for (variant_index, fields) in info.variant_fields.iter_enumerated() { - for local in fields { - match assignments[*local] { - Unassigned => { - assignments[*local] = Assigned(variant_index); - } - Assigned(idx) => { - // We've already seen this local at another suspension - // point, so it is no longer a candidate. - trace!( - "removing local {:?} in >1 variant ({:?}, {:?})", - local, variant_index, idx - ); - ineligible_locals.insert(*local); - assignments[*local] = Ineligible(None); - } - Ineligible(_) => {} - } - } - } - - // Next, check every pair of eligible locals to see if they - // conflict. - for local_a in info.storage_conflicts.rows() { - let conflicts_a = info.storage_conflicts.count(local_a); - if ineligible_locals.contains(local_a) { - continue; - } - - for local_b in info.storage_conflicts.iter(local_a) { - // local_a and local_b are storage live at the same time, therefore they - // cannot overlap in the coroutine layout. The only way to guarantee - // this is if they are in the same variant, or one is ineligible - // (which means it is stored in every variant). - if ineligible_locals.contains(local_b) || assignments[local_a] == assignments[local_b] { - continue; - } - - // If they conflict, we will choose one to make ineligible. - // This is not always optimal; it's just a greedy heuristic that - // seems to produce good results most of the time. - let conflicts_b = info.storage_conflicts.count(local_b); - let (remove, other) = - if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) }; - ineligible_locals.insert(remove); - assignments[remove] = Ineligible(None); - trace!("removing local {:?} due to conflict with {:?}", remove, other); - } - } - - // Count the number of variants in use. If only one of them, then it is - // impossible to overlap any locals in our layout. In this case it's - // always better to make the remaining locals ineligible, so we can - // lay them out with the other locals in the prefix and eliminate - // unnecessary padding bytes. - { - let mut used_variants = DenseBitSet::new_empty(info.variant_fields.len()); - for assignment in &assignments { - if let Assigned(idx) = assignment { - used_variants.insert(*idx); - } - } - if used_variants.count() < 2 { - for assignment in assignments.iter_mut() { - *assignment = Ineligible(None); - } - ineligible_locals.insert_all(); - } - } - - // Write down the order of our locals that will be promoted to the prefix. - { - for (idx, local) in ineligible_locals.iter().enumerate() { - assignments[local] = Ineligible(Some(FieldIdx::from_usize(idx))); - } - } - debug!("coroutine saved local assignments: {:?}", assignments); - - (ineligible_locals, assignments) -} - -/// Compute the full coroutine layout. -fn coroutine_layout<'tcx>( - cx: &LayoutCx<'tcx>, - ty: Ty<'tcx>, - def_id: hir::def_id::DefId, - args: GenericArgsRef<'tcx>, -) -> Result<Layout<'tcx>, &'tcx LayoutError<'tcx>> { - use SavedLocalEligibility::*; - let tcx = cx.tcx(); - let instantiate_field = |ty: Ty<'tcx>| EarlyBinder::bind(ty).instantiate(tcx, args); - - let Some(info) = tcx.coroutine_layout(def_id, args.as_coroutine().kind_ty()) else { - return Err(error(cx, LayoutError::Unknown(ty))); - }; - let (ineligible_locals, assignments) = coroutine_saved_local_eligibility(info); - - // Build a prefix layout, including "promoting" all ineligible - // locals as part of the prefix. We compute the layout of all of - // these fields at once to get optimal packing. - let tag_index = args.as_coroutine().prefix_tys().len(); - - // `info.variant_fields` already accounts for the reserved variants, so no need to add them. - let max_discr = (info.variant_fields.len() - 1) as u128; - let discr_int = abi::Integer::fit_unsigned(max_discr); - let tag = Scalar::Initialized { - value: Primitive::Int(discr_int, /* signed = */ false), - valid_range: WrappingRange { start: 0, end: max_discr }, - }; - let tag_layout = TyAndLayout { - ty: discr_int.to_ty(tcx, /* signed = */ false), - layout: tcx.mk_layout(LayoutData::scalar(cx, tag)), - }; - - let promoted_layouts = ineligible_locals.iter().map(|local| { - let field_ty = instantiate_field(info.field_tys[local].ty); - let uninit_ty = Ty::new_maybe_uninit(tcx, field_ty); - cx.spanned_layout_of(uninit_ty, info.field_tys[local].source_info.span) - }); - let prefix_layouts = args - .as_coroutine() - .prefix_tys() - .iter() - .map(|ty| cx.layout_of(ty)) - .chain(iter::once(Ok(tag_layout))) - .chain(promoted_layouts) - .try_collect::<IndexVec<_, _>>()?; - let prefix = univariant_uninterned(cx, ty, &prefix_layouts, StructKind::AlwaysSized)?; - - let (prefix_size, prefix_align) = (prefix.size, prefix.align); - - // Split the prefix layout into the "outer" fields (upvars and - // discriminant) and the "promoted" fields. Promoted fields will - // get included in each variant that requested them in - // CoroutineLayout. - debug!("prefix = {:#?}", prefix); - let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { - FieldsShape::Arbitrary { mut offsets, memory_index } => { - let mut inverse_memory_index = memory_index.invert_bijective_mapping(); - - // "a" (`0..b_start`) and "b" (`b_start..`) correspond to - // "outer" and "promoted" fields respectively. - let b_start = FieldIdx::from_usize(tag_index + 1); - let offsets_b = IndexVec::from_raw(offsets.raw.split_off(b_start.as_usize())); - let offsets_a = offsets; - - // Disentangle the "a" and "b" components of `inverse_memory_index` - // by preserving the order but keeping only one disjoint "half" each. - // FIXME(eddyb) build a better abstraction for permutations, if possible. - let inverse_memory_index_b: IndexVec<u32, FieldIdx> = inverse_memory_index - .iter() - .filter_map(|&i| i.as_u32().checked_sub(b_start.as_u32()).map(FieldIdx::from_u32)) - .collect(); - inverse_memory_index.raw.retain(|&i| i < b_start); - let inverse_memory_index_a = inverse_memory_index; - - // Since `inverse_memory_index_{a,b}` each only refer to their - // respective fields, they can be safely inverted - let memory_index_a = inverse_memory_index_a.invert_bijective_mapping(); - let memory_index_b = inverse_memory_index_b.invert_bijective_mapping(); - - let outer_fields = - FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }; - (outer_fields, offsets_b, memory_index_b) - } - _ => bug!(), - }; - - let mut size = prefix.size; - let mut align = prefix.align; - let variants = info - .variant_fields - .iter_enumerated() - .map(|(index, variant_fields)| { - // Only include overlap-eligible fields when we compute our variant layout. - let variant_only_tys = variant_fields - .iter() - .filter(|local| match assignments[**local] { - Unassigned => bug!(), - Assigned(v) if v == index => true, - Assigned(_) => bug!("assignment does not match variant"), - Ineligible(_) => false, - }) - .map(|local| { - let field_ty = instantiate_field(info.field_tys[*local].ty); - Ty::new_maybe_uninit(tcx, field_ty) - }); - - let mut variant = univariant_uninterned( - cx, - ty, - &variant_only_tys.map(|ty| cx.layout_of(ty)).try_collect::<IndexVec<_, _>>()?, - StructKind::Prefixed(prefix_size, prefix_align.abi), - )?; - variant.variants = Variants::Single { index }; - - let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else { - bug!(); - }; - - // Now, stitch the promoted and variant-only fields back together in - // the order they are mentioned by our CoroutineLayout. - // Because we only use some subset (that can differ between variants) - // of the promoted fields, we can't just pick those elements of the - // `promoted_memory_index` (as we'd end up with gaps). - // So instead, we build an "inverse memory_index", as if all of the - // promoted fields were being used, but leave the elements not in the - // subset as `INVALID_FIELD_IDX`, which we can filter out later to - // obtain a valid (bijective) mapping. - const INVALID_FIELD_IDX: FieldIdx = FieldIdx::MAX; - debug_assert!(variant_fields.next_index() <= INVALID_FIELD_IDX); - - let mut combined_inverse_memory_index = IndexVec::from_elem_n( - INVALID_FIELD_IDX, - promoted_memory_index.len() + memory_index.len(), - ); - let mut offsets_and_memory_index = iter::zip(offsets, memory_index); - let combined_offsets = variant_fields - .iter_enumerated() - .map(|(i, local)| { - let (offset, memory_index) = match assignments[*local] { - Unassigned => bug!(), - Assigned(_) => { - let (offset, memory_index) = offsets_and_memory_index.next().unwrap(); - (offset, promoted_memory_index.len() as u32 + memory_index) - } - Ineligible(field_idx) => { - let field_idx = field_idx.unwrap(); - (promoted_offsets[field_idx], promoted_memory_index[field_idx]) - } - }; - combined_inverse_memory_index[memory_index] = i; - offset - }) - .collect(); - - // Remove the unused slots and invert the mapping to obtain the - // combined `memory_index` (also see previous comment). - combined_inverse_memory_index.raw.retain(|&i| i != INVALID_FIELD_IDX); - let combined_memory_index = combined_inverse_memory_index.invert_bijective_mapping(); - - variant.fields = FieldsShape::Arbitrary { - offsets: combined_offsets, - memory_index: combined_memory_index, - }; - - size = size.max(variant.size); - align = align.max(variant.align); - Ok(variant) - }) - .try_collect::<IndexVec<VariantIdx, _>>()?; - - size = size.align_to(align.abi); - - let uninhabited = prefix.uninhabited || variants.iter().all(|v| v.is_uninhabited()); - let abi = BackendRepr::Memory { sized: true }; - - // this is similar to how ReprOptions populates its field_shuffle_seed - let def_hash = tcx.def_path_hash(def_id).0.to_smaller_hash(); - - let layout = tcx.mk_layout(LayoutData { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: tag_index, - variants, - }, - fields: outer_fields, - backend_repr: abi, - // Suppress niches inside coroutines. If the niche is inside a field that is aliased (due to - // self-referentiality), getting the discriminant can cause aliasing violations. - // `UnsafeCell` blocks niches for the same reason, but we don't yet have `UnsafePinned` that - // would do the same for us here. - // See <https://github.com/rust-lang/rust/issues/63818>, <https://github.com/rust-lang/miri/issues/3780>. - // FIXME: Remove when <https://github.com/rust-lang/rust/issues/125735> is implemented and aliased coroutine fields are wrapped in `UnsafePinned`. - largest_niche: None, - uninhabited, - size, - align, - max_repr_align: None, - unadjusted_abi_align: align.abi, - randomization_seed: def_hash, - }); - debug!("coroutine layout ({:?}): {:#?}", ty, layout); - Ok(layout) -} - fn record_layout_for_printing<'tcx>(cx: &LayoutCx<'tcx>, layout: TyAndLayout<'tcx>) { // Ignore layouts that are done with non-empty environments or // non-monomorphic layouts, as the user only wants to see the stuff diff --git a/compiler/rustc_ty_utils/src/layout/invariant.rs b/compiler/rustc_ty_utils/src/layout/invariant.rs index c695e2887fd..7423a156a21 100644 --- a/compiler/rustc_ty_utils/src/layout/invariant.rs +++ b/compiler/rustc_ty_utils/src/layout/invariant.rs @@ -234,7 +234,7 @@ pub(super) fn layout_sanity_check<'tcx>(cx: &LayoutCx<'tcx>, layout: &TyAndLayou "`ScalarPair` second field with bad ABI in {inner:#?}", ); } - BackendRepr::Vector { element, count } => { + BackendRepr::SimdVector { element, count } => { let align = layout.align.abi; let size = layout.size; let element_align = element.align(cx).abi; diff --git a/compiler/rustc_ty_utils/src/lib.rs b/compiler/rustc_ty_utils/src/lib.rs index 8be1611bb9a..143b7d53880 100644 --- a/compiler/rustc_ty_utils/src/lib.rs +++ b/compiler/rustc_ty_utils/src/lib.rs @@ -6,6 +6,7 @@ // tidy-alphabetical-start #![allow(internal_features)] +#![cfg_attr(doc, recursion_limit = "256")] // FIXME(nnethercote): will be removed by #124141 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")] #![doc(rust_logo)] #![feature(assert_matches)] @@ -16,7 +17,6 @@ #![feature(let_chains)] #![feature(never_type)] #![feature(rustdoc_internals)] -#![warn(unreachable_pub)] // tidy-alphabetical-end use rustc_middle::query::Providers; diff --git a/compiler/rustc_ty_utils/src/opaque_types.rs b/compiler/rustc_ty_utils/src/opaque_types.rs index 98881905bcf..3aad97d86cc 100644 --- a/compiler/rustc_ty_utils/src/opaque_types.rs +++ b/compiler/rustc_ty_utils/src/opaque_types.rs @@ -1,8 +1,8 @@ use rustc_data_structures::fx::FxHashSet; use rustc_hir::def::DefKind; use rustc_hir::def_id::LocalDefId; +use rustc_hir::intravisit; use rustc_hir::intravisit::Visitor; -use rustc_hir::{CRATE_HIR_ID, intravisit}; use rustc_middle::bug; use rustc_middle::query::Providers; use rustc_middle::ty::util::{CheckRegions, NotUniqueParam}; @@ -30,14 +30,21 @@ enum CollectionMode { /// For impl trait in assoc types we only permit collecting them from /// associated types of the same impl block. ImplTraitInAssocTypes, - TypeAliasImplTraitTransition, + /// When collecting for an explicit `#[define_opaque]` attribute, find all TAITs + Taits, + /// The default case, only collect RPITs and AsyncFn return types, as these are + /// always defined by the current item. + RpitAndAsyncFnOnly, } impl<'tcx> OpaqueTypeCollector<'tcx> { fn new(tcx: TyCtxt<'tcx>, item: LocalDefId) -> Self { - let mode = match tcx.def_kind(tcx.local_parent(item)) { - DefKind::Impl { of_trait: true } => CollectionMode::ImplTraitInAssocTypes, - _ => CollectionMode::TypeAliasImplTraitTransition, + let mode = match tcx.def_kind(item) { + DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy => { + CollectionMode::ImplTraitInAssocTypes + } + DefKind::TyAlias => CollectionMode::Taits, + _ => CollectionMode::RpitAndAsyncFnOnly, }; Self { tcx, opaques: Vec::new(), item, seen: Default::default(), span: None, mode } } @@ -73,40 +80,6 @@ impl<'tcx> OpaqueTypeCollector<'tcx> { } } - /// Returns `true` if `opaque_hir_id` is a sibling or a child of a sibling of `self.item`. - /// - /// Example: - /// ```ignore UNSOLVED (is this a bug?) - /// # #![feature(type_alias_impl_trait)] - /// pub mod foo { - /// pub mod bar { - /// pub trait Bar { /* ... */ } - /// pub type Baz = impl Bar; - /// - /// # impl Bar for () {} - /// fn f1() -> Baz { /* ... */ } - /// } - /// fn f2() -> bar::Baz { /* ... */ } - /// } - /// ``` - /// - /// and `opaque_def_id` is the `DefId` of the definition of the opaque type `Baz`. - /// For the above example, this function returns `true` for `f1` and `false` for `f2`. - #[instrument(level = "trace", skip(self), ret)] - fn check_tait_defining_scope(&self, opaque_def_id: LocalDefId) -> bool { - let mut hir_id = self.tcx.local_def_id_to_hir_id(self.item); - let opaque_hir_id = self.tcx.local_def_id_to_hir_id(opaque_def_id); - - // Named opaque types can be defined by any siblings or children of siblings. - let scope = self.tcx.hir_get_defining_scope(opaque_hir_id); - // We walk up the node tree until we hit the root or the scope of the opaque type. - while hir_id != scope && hir_id != CRATE_HIR_ID { - hir_id = self.tcx.hir_get_parent_item(hir_id).into(); - } - // Syntactically, we are allowed to define the concrete type if: - hir_id == scope - } - #[instrument(level = "trace", skip(self))] fn collect_taits_declared_in_body(&mut self) { let body = self.tcx.hir_body_owned_by(self.item).value; @@ -139,18 +112,31 @@ impl<'tcx> OpaqueTypeCollector<'tcx> { } // TAITs outside their defining scopes are ignored. - let origin = self.tcx.local_opaque_ty_origin(alias_ty.def_id.expect_local()); - trace!(?origin); - match origin { + match self.tcx.local_opaque_ty_origin(alias_ty.def_id.expect_local()) { rustc_hir::OpaqueTyOrigin::FnReturn { .. } | rustc_hir::OpaqueTyOrigin::AsyncFn { .. } => {} - rustc_hir::OpaqueTyOrigin::TyAlias { in_assoc_ty, .. } => { - if !in_assoc_ty && !self.check_tait_defining_scope(alias_ty.def_id.expect_local()) { - return; + rustc_hir::OpaqueTyOrigin::TyAlias { in_assoc_ty, .. } => match self.mode { + // If we are collecting opaques in an assoc method, we are only looking at assoc types + // mentioned in the assoc method and only at opaques defined in there. We do not + // want to collect TAITs + CollectionMode::ImplTraitInAssocTypes => { + if !in_assoc_ty { + return; + } } - } + // If we are collecting opaques referenced from a `define_opaque` attribute, we + // do not want to look at opaques defined in associated types. Those can only be + // defined by methods on the same impl. + CollectionMode::Taits => { + if in_assoc_ty { + return; + } + } + CollectionMode::RpitAndAsyncFnOnly => return, + }, } + trace!(?alias_ty, "adding"); self.opaques.push(alias_ty.def_id.expect_local()); let parent_count = self.tcx.generics_of(alias_ty.def_id).parent_count; @@ -192,6 +178,32 @@ impl<'tcx> OpaqueTypeCollector<'tcx> { } } } + + /// Checks the `#[define_opaque]` attributes on items and collectes opaques to define + /// from the referenced types. + #[instrument(level = "trace", skip(self))] + fn collect_taits_from_defines_attr(&mut self) { + let hir_id = self.tcx.local_def_id_to_hir_id(self.item); + if !hir_id.is_owner() { + return; + } + let Some(defines) = self.tcx.hir_attr_map(hir_id.owner).define_opaque else { + return; + }; + for &(span, define) in defines { + trace!(?define); + let mode = std::mem::replace(&mut self.mode, CollectionMode::Taits); + let n = self.opaques.len(); + super::sig_types::walk_types(self.tcx, define, self); + if n == self.opaques.len() { + self.tcx.dcx().span_err(span, "item does not contain any opaque types"); + } + self.mode = mode; + } + // Allow using `#[define_opaque]` on assoc methods and type aliases to override the default collection mode in + // case it was capturing too much. + self.mode = CollectionMode::RpitAndAsyncFnOnly; + } } impl<'tcx> super::sig_types::SpannedTypeVisitor<'tcx> for OpaqueTypeCollector<'tcx> { @@ -210,6 +222,7 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> { self.visit_opaque_ty(alias_ty); } // Skips type aliases, as they are meant to be transparent. + // FIXME(type_alias_impl_trait): can we require mentioning nested type aliases explicitly? ty::Alias(ty::Weak, alias_ty) if alias_ty.def_id.is_local() => { self.tcx .type_of(alias_ty.def_id) @@ -283,28 +296,6 @@ impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for OpaqueTypeCollector<'tcx> { self.visit_opaque_ty(alias_ty); } } - ty::Adt(def, _) if def.did().is_local() => { - if let CollectionMode::ImplTraitInAssocTypes = self.mode { - return; - } - if !self.seen.insert(def.did().expect_local()) { - return; - } - for variant in def.variants().iter() { - for field in variant.fields.iter() { - // Don't use the `ty::Adt` args, we either - // * found the opaque in the args - // * will find the opaque in the uninstantiated fields - // The only other situation that can occur is that after instantiating, - // some projection resolves to an opaque that we would have otherwise - // not found. While we could instantiate and walk those, that would mean we - // would have to walk all generic parameters of an Adt, which can quickly - // degenerate into looking at an exponential number of types. - let ty = self.tcx.type_of(field.did).instantiate_identity(); - self.visit_spanned(self.tcx.def_span(field.did), ty); - } - } - } _ => trace!(kind=?t.kind()), } } @@ -317,7 +308,9 @@ fn opaque_types_defined_by<'tcx>( let kind = tcx.def_kind(item); trace!(?kind); let mut collector = OpaqueTypeCollector::new(tcx, item); + collector.collect_taits_from_defines_attr(); super::sig_types::walk_types(tcx, item, &mut collector); + match kind { DefKind::AssocFn | DefKind::Fn @@ -350,8 +343,7 @@ fn opaque_types_defined_by<'tcx>( | DefKind::GlobalAsm | DefKind::Impl { .. } | DefKind::SyntheticCoroutineBody => {} - // Closures and coroutines are type checked with their parent, so we need to allow all - // opaques from the closure signature *and* from the parent body. + // Closures and coroutines are type checked with their parent DefKind::Closure | DefKind::InlineConst => { collector.opaques.extend(tcx.opaque_types_defined_by(tcx.local_parent(item))); } diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index 8ed45b4e541..bb61f4bee66 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -6,13 +6,11 @@ use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::query::Providers; use rustc_middle::ty::fold::fold_regions; -use rustc_middle::ty::{ - self, EarlyBinder, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, Upcast, -}; +use rustc_middle::ty::{self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitable, TypeVisitor, Upcast}; use rustc_span::DUMMY_SP; use rustc_span::def_id::{CRATE_DEF_ID, DefId, LocalDefId}; use rustc_trait_selection::traits; -use tracing::{debug, instrument}; +use tracing::instrument; #[instrument(level = "debug", skip(tcx), ret)] fn sized_constraint_for_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<'tcx>> { @@ -37,8 +35,6 @@ fn sized_constraint_for_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<' | Never | Dynamic(_, _, ty::DynStar) => None, - UnsafeBinder(_) => todo!(), - // these are never sized Str | Slice(..) | Dynamic(_, _, ty::Dyn) | Foreign(..) => Some(ty), @@ -52,9 +48,14 @@ fn sized_constraint_for_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<Ty<' sized_constraint_for_ty(tcx, ty) }), - // these can be sized or unsized + // these can be sized or unsized. Param(..) | Alias(..) | Error(_) => Some(ty), + // We cannot instantiate the binder, so just return the *original* type back, + // but only if the inner type has a sized constraint. Thus we skip the binder, + // but don't actually use the result from `sized_constraint_for_ty`. + UnsafeBinder(inner_ty) => sized_constraint_for_ty(tcx, inner_ty.skip_binder()).map(|_| ty), + Placeholder(..) | Bound(..) | Infer(..) => { bug!("unexpected type `{ty:?}` in sized_constraint_for_ty") } @@ -257,57 +258,6 @@ fn param_env_normalized_for_post_analysis(tcx: TyCtxt<'_>, def_id: DefId) -> ty: typing_env.with_post_analysis_normalized(tcx).param_env } -/// If the given trait impl enables exploiting the former order dependence of trait objects, -/// returns its self type; otherwise, returns `None`. -/// -/// See [`ty::ImplOverlapKind::FutureCompatOrderDepTraitObjects`] for more details. -#[instrument(level = "debug", skip(tcx))] -fn self_ty_of_trait_impl_enabling_order_dep_trait_object_hack( - tcx: TyCtxt<'_>, - def_id: DefId, -) -> Option<EarlyBinder<'_, Ty<'_>>> { - let impl_ = - tcx.impl_trait_header(def_id).unwrap_or_else(|| bug!("called on inherent impl {def_id:?}")); - - let trait_ref = impl_.trait_ref.skip_binder(); - debug!(?trait_ref); - - let is_marker_like = impl_.polarity == ty::ImplPolarity::Positive - && tcx.associated_item_def_ids(trait_ref.def_id).is_empty(); - - // Check whether these impls would be ok for a marker trait. - if !is_marker_like { - debug!("not marker-like!"); - return None; - } - - // impl must be `impl Trait for dyn Marker1 + Marker2 + ...` - if trait_ref.args.len() != 1 { - debug!("impl has args!"); - return None; - } - - let predicates = tcx.predicates_of(def_id); - if predicates.parent.is_some() || !predicates.predicates.is_empty() { - debug!(?predicates, "impl has predicates!"); - return None; - } - - let self_ty = trait_ref.self_ty(); - let self_ty_matches = match self_ty.kind() { - ty::Dynamic(data, re, _) if re.is_static() => data.principal().is_none(), - _ => false, - }; - - if self_ty_matches { - debug!("MATCHES!"); - Some(EarlyBinder::bind(self_ty)) - } else { - debug!("non-matching self type"); - None - } -} - /// Check if a function is async. fn asyncness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Asyncness { let node = tcx.hir_node_by_def_id(def_id); @@ -367,7 +317,6 @@ pub(crate) fn provide(providers: &mut Providers) { adt_sized_constraint, param_env, param_env_normalized_for_post_analysis, - self_ty_of_trait_impl_enabling_order_dep_trait_object_hack, defaultness, unsizing_params_for_adt, ..*providers diff --git a/compiler/rustc_type_ir/Cargo.toml b/compiler/rustc_type_ir/Cargo.toml index d8184da927c..4adf7157926 100644 --- a/compiler/rustc_type_ir/Cargo.toml +++ b/compiler/rustc_type_ir/Cargo.toml @@ -16,7 +16,7 @@ rustc_macros = { path = "../rustc_macros", optional = true } rustc_serialize = { path = "../rustc_serialize", optional = true } rustc_span = { path = "../rustc_span", optional = true } rustc_type_ir_macros = { path = "../rustc_type_ir_macros" } -smallvec = { version = "1.8.1", default-features = false } +smallvec = { version = "1.8.1", default-features = false, features = ["const_generics"] } thin-vec = "0.2.12" tracing = "0.1" # tidy-alphabetical-end @@ -33,6 +33,3 @@ nightly = [ "rustc_index/nightly", "rustc_ast_ir/nightly", ] - -[lints.rust] -unexpected_cfgs = { level = "warn", check-cfg = ['cfg(bootstrap)'] } diff --git a/compiler/rustc_type_ir/src/infer_ctxt.rs b/compiler/rustc_type_ir/src/infer_ctxt.rs index c9b636132f8..d5edc9610a4 100644 --- a/compiler/rustc_type_ir/src/infer_ctxt.rs +++ b/compiler/rustc_type_ir/src/infer_ctxt.rs @@ -151,7 +151,7 @@ pub trait InferCtxtLike: Sized { value: ty::Binder<Self::Interner, T>, ) -> T; - fn enter_forall<T: TypeFoldable<Self::Interner> + Copy, U>( + fn enter_forall<T: TypeFoldable<Self::Interner>, U>( &self, value: ty::Binder<Self::Interner, T>, f: impl FnOnce(T) -> U, diff --git a/compiler/rustc_type_ir/src/inherent.rs b/compiler/rustc_type_ir/src/inherent.rs index 9277226b718..d4134bdf3a7 100644 --- a/compiler/rustc_type_ir/src/inherent.rs +++ b/compiler/rustc_type_ir/src/inherent.rs @@ -462,8 +462,6 @@ pub trait Predicate<I: Interner<Predicate = Self>>: { fn as_clause(self) -> Option<I::Clause>; - fn is_coinductive(self, interner: I) -> bool; - // FIXME: Eventually uplift the impl out of rustc and make this defaulted. fn allow_normalization(self) -> bool; } diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index aae2d2e96b9..e765cb66d00 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -189,10 +189,10 @@ pub trait Interner: type Features: Features<Self>; fn features(self) -> Self::Features; - fn bound_coroutine_hidden_types( + fn coroutine_hidden_types( self, def_id: Self::DefId, - ) -> impl IntoIterator<Item = ty::EarlyBinder<Self, ty::Binder<Self, Self::Ty>>>; + ) -> ty::EarlyBinder<Self, ty::Binder<Self, Self::Tys>>; fn fn_sig( self, @@ -279,6 +279,8 @@ pub trait Interner: fn trait_is_auto(self, trait_def_id: Self::DefId) -> bool; + fn trait_is_coinductive(self, trait_def_id: Self::DefId) -> bool; + fn trait_is_alias(self, trait_def_id: Self::DefId) -> bool; fn trait_is_dyn_compatible(self, trait_def_id: Self::DefId) -> bool; diff --git a/compiler/rustc_type_ir/src/lib.rs b/compiler/rustc_type_ir/src/lib.rs index 15ef4e7d6c1..e2dfd9173fa 100644 --- a/compiler/rustc_type_ir/src/lib.rs +++ b/compiler/rustc_type_ir/src/lib.rs @@ -6,7 +6,6 @@ feature(associated_type_defaults, never_type, rustc_attrs, negative_impls) )] #![cfg_attr(feature = "nightly", allow(internal_features))] -#![warn(unreachable_pub)] // tidy-alphabetical-end extern crate self as rustc_type_ir; diff --git a/compiler/rustc_type_ir/src/predicate.rs b/compiler/rustc_type_ir/src/predicate.rs index 51790b13ec7..46385ca3a6f 100644 --- a/compiler/rustc_type_ir/src/predicate.rs +++ b/compiler/rustc_type_ir/src/predicate.rs @@ -131,8 +131,6 @@ pub struct TraitPredicate<I: Interner> { /// If polarity is Negative: we are proving that a negative impl of this trait /// exists. (Note that coherence also checks whether negative impls of supertraits /// exist via a series of predicates.) - /// - /// If polarity is Reserved: that's a bug. pub polarity: PredicatePolarity, } diff --git a/compiler/rustc_type_ir/src/search_graph/mod.rs b/compiler/rustc_type_ir/src/search_graph/mod.rs index 082cfff72e2..9ae83928057 100644 --- a/compiler/rustc_type_ir/src/search_graph/mod.rs +++ b/compiler/rustc_type_ir/src/search_graph/mod.rs @@ -12,13 +12,16 @@ /// The global cache has to be completely unobservable, while the per-cycle cache may impact /// behavior as long as the resulting behavior is still correct. use std::cmp::Ordering; -use std::collections::BTreeSet; +use std::collections::BTreeMap; +use std::collections::hash_map::Entry; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; use derive_where::derive_where; use rustc_index::{Idx, IndexVec}; +#[cfg(feature = "nightly")] +use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable}; use tracing::debug; use crate::data_structures::HashMap; @@ -104,27 +107,63 @@ pub trait Delegate { for_input: <Self::Cx as Cx>::Input, from_result: <Self::Cx as Cx>::Result, ) -> <Self::Cx as Cx>::Result; - - fn step_is_coinductive(cx: Self::Cx, input: <Self::Cx as Cx>::Input) -> bool; } /// In the initial iteration of a cycle, we do not yet have a provisional /// result. In the case we return an initial provisional result depending /// on the kind of cycle. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[cfg_attr(feature = "nightly", derive(TyDecodable, TyEncodable, HashStable_NoContext))] pub enum PathKind { - Coinductive, + /// A path consisting of only inductive/unproductive steps. Their initial + /// provisional result is `Err(NoSolution)`. We currently treat them as + /// `PathKind::Unknown` during coherence until we're fully confident in + /// our approach. Inductive, + /// A path which is not be coinductive right now but we may want + /// to change of them to be so in the future. We return an ambiguous + /// result in this case to prevent people from relying on this. + Unknown, + /// A path with at least one coinductive step. Such cycles hold. + Coinductive, } +impl PathKind { + /// Returns the path kind when merging `self` with `rest`. + /// + /// Given an inductive path `self` and a coinductive path `rest`, + /// the path `self -> rest` would be coinductive. + /// + /// This operation represents an ordering and would be equivalent + /// to `max(self, rest)`. + fn extend(self, rest: PathKind) -> PathKind { + match (self, rest) { + (PathKind::Coinductive, _) | (_, PathKind::Coinductive) => PathKind::Coinductive, + (PathKind::Unknown, _) | (_, PathKind::Unknown) => PathKind::Unknown, + (PathKind::Inductive, PathKind::Inductive) => PathKind::Inductive, + } + } +} + +/// The kinds of cycles a cycle head was involved in. +/// +/// This is used to avoid rerunning a cycle if there's +/// just a single usage kind and the final result matches +/// its provisional result. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UsageKind { Single(PathKind), Mixed, } +impl From<PathKind> for UsageKind { + fn from(path: PathKind) -> UsageKind { + UsageKind::Single(path) + } +} impl UsageKind { - fn merge(self, other: Self) -> Self { - match (self, other) { + #[must_use] + fn merge(self, other: impl Into<Self>) -> Self { + match (self, other.into()) { (UsageKind::Mixed, _) | (_, UsageKind::Mixed) => UsageKind::Mixed, (UsageKind::Single(lhs), UsageKind::Single(rhs)) => { if lhs == rhs { @@ -135,7 +174,39 @@ impl UsageKind { } } } - fn and_merge(&mut self, other: Self) { +} + +/// For each goal we track whether the paths from this goal +/// to its cycle heads are coinductive. +/// +/// This is a necessary condition to rebase provisional cache +/// entries. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum AllPathsToHeadCoinductive { + Yes, + No, +} +impl From<PathKind> for AllPathsToHeadCoinductive { + fn from(path: PathKind) -> AllPathsToHeadCoinductive { + match path { + PathKind::Coinductive => AllPathsToHeadCoinductive::Yes, + _ => AllPathsToHeadCoinductive::No, + } + } +} +impl AllPathsToHeadCoinductive { + #[must_use] + fn merge(self, other: impl Into<Self>) -> Self { + match (self, other.into()) { + (AllPathsToHeadCoinductive::Yes, AllPathsToHeadCoinductive::Yes) => { + AllPathsToHeadCoinductive::Yes + } + (AllPathsToHeadCoinductive::No, _) | (_, AllPathsToHeadCoinductive::No) => { + AllPathsToHeadCoinductive::No + } + } + } + fn and_merge(&mut self, other: impl Into<Self>) { *self = self.merge(other); } } @@ -177,10 +248,11 @@ impl AvailableDepth { /// All cycle heads a given goal depends on, ordered by their stack depth. /// -/// We therefore pop the cycle heads from highest to lowest. +/// We also track all paths from this goal to that head. This is necessary +/// when rebasing provisional cache results. #[derive(Clone, Debug, PartialEq, Eq, Default)] struct CycleHeads { - heads: BTreeSet<StackDepth>, + heads: BTreeMap<StackDepth, AllPathsToHeadCoinductive>, } impl CycleHeads { @@ -189,15 +261,15 @@ impl CycleHeads { } fn highest_cycle_head(&self) -> StackDepth { - *self.heads.last().unwrap() + self.opt_highest_cycle_head().unwrap() } fn opt_highest_cycle_head(&self) -> Option<StackDepth> { - self.heads.last().copied() + self.heads.last_key_value().map(|(k, _)| *k) } fn opt_lowest_cycle_head(&self) -> Option<StackDepth> { - self.heads.first().copied() + self.heads.first_key_value().map(|(k, _)| *k) } fn remove_highest_cycle_head(&mut self) { @@ -205,29 +277,100 @@ impl CycleHeads { debug_assert_ne!(last, None); } - fn insert(&mut self, head: StackDepth) { - self.heads.insert(head); + fn insert( + &mut self, + head: StackDepth, + path_from_entry: impl Into<AllPathsToHeadCoinductive> + Copy, + ) { + self.heads.entry(head).or_insert(path_from_entry.into()).and_merge(path_from_entry); } fn merge(&mut self, heads: &CycleHeads) { - for &head in heads.heads.iter() { - self.insert(head); + for (&head, &path_from_entry) in heads.heads.iter() { + self.insert(head, path_from_entry); + debug_assert!(matches!(self.heads[&head], AllPathsToHeadCoinductive::Yes)); } } + fn iter(&self) -> impl Iterator<Item = (StackDepth, AllPathsToHeadCoinductive)> + '_ { + self.heads.iter().map(|(k, v)| (*k, *v)) + } + /// Update the cycle heads of a goal at depth `this` given the cycle heads /// of a nested goal. This merges the heads after filtering the parent goal /// itself. - fn extend_from_child(&mut self, this: StackDepth, child: &CycleHeads) { - for &head in child.heads.iter() { + fn extend_from_child(&mut self, this: StackDepth, step_kind: PathKind, child: &CycleHeads) { + for (&head, &path_from_entry) in child.heads.iter() { match head.cmp(&this) { Ordering::Less => {} Ordering::Equal => continue, Ordering::Greater => unreachable!(), } - self.insert(head); + let path_from_entry = match step_kind { + PathKind::Coinductive => AllPathsToHeadCoinductive::Yes, + PathKind::Unknown | PathKind::Inductive => path_from_entry, + }; + + self.insert(head, path_from_entry); + } + } +} + +bitflags::bitflags! { + /// Tracks how nested goals have been accessed. This is necessary to disable + /// global cache entries if computing them would otherwise result in a cycle or + /// access a provisional cache entry. + #[derive(Debug, Clone, Copy)] + pub struct PathsToNested: u8 { + /// The initial value when adding a goal to its own nested goals. + const EMPTY = 1 << 0; + const INDUCTIVE = 1 << 1; + const UNKNOWN = 1 << 2; + const COINDUCTIVE = 1 << 3; + } +} +impl From<PathKind> for PathsToNested { + fn from(path: PathKind) -> PathsToNested { + match path { + PathKind::Inductive => PathsToNested::INDUCTIVE, + PathKind::Unknown => PathsToNested::UNKNOWN, + PathKind::Coinductive => PathsToNested::COINDUCTIVE, + } + } +} +impl PathsToNested { + /// The implementation of this function is kind of ugly. We check whether + /// there currently exist 'weaker' paths in the set, if so we upgrade these + /// paths to at least `path`. + #[must_use] + fn extend_with(mut self, path: PathKind) -> Self { + match path { + PathKind::Inductive => { + if self.intersects(PathsToNested::EMPTY) { + self.remove(PathsToNested::EMPTY); + self.insert(PathsToNested::INDUCTIVE); + } + } + PathKind::Unknown => { + if self.intersects(PathsToNested::EMPTY | PathsToNested::INDUCTIVE) { + self.remove(PathsToNested::EMPTY | PathsToNested::INDUCTIVE); + self.insert(PathsToNested::UNKNOWN); + } + } + PathKind::Coinductive => { + if self.intersects( + PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN, + ) { + self.remove( + PathsToNested::EMPTY | PathsToNested::INDUCTIVE | PathsToNested::UNKNOWN, + ); + self.insert(PathsToNested::COINDUCTIVE); + } + } } + + self } } @@ -246,23 +389,19 @@ impl CycleHeads { /// We need to disable the global cache if using it would hide a cycle, as /// cycles can impact behavior. The cycle ABA may have different final /// results from a the cycle BAB depending on the cycle root. -#[derive_where(Debug, Default; X: Cx)] +#[derive_where(Debug, Default, Clone; X: Cx)] struct NestedGoals<X: Cx> { - nested_goals: HashMap<X::Input, UsageKind>, + nested_goals: HashMap<X::Input, PathsToNested>, } impl<X: Cx> NestedGoals<X> { fn is_empty(&self) -> bool { self.nested_goals.is_empty() } - fn insert(&mut self, input: X::Input, path_from_entry: UsageKind) { - self.nested_goals.entry(input).or_insert(path_from_entry).and_merge(path_from_entry); - } - - fn merge(&mut self, nested_goals: &NestedGoals<X>) { - #[allow(rustc::potential_query_instability)] - for (input, path_from_entry) in nested_goals.iter() { - self.insert(input, path_from_entry); + fn insert(&mut self, input: X::Input, paths_to_nested: PathsToNested) { + match self.nested_goals.entry(input) { + Entry::Occupied(mut entry) => *entry.get_mut() |= paths_to_nested, + Entry::Vacant(entry) => drop(entry.insert(paths_to_nested)), } } @@ -274,25 +413,18 @@ impl<X: Cx> NestedGoals<X> { /// the same as for the child. fn extend_from_child(&mut self, step_kind: PathKind, nested_goals: &NestedGoals<X>) { #[allow(rustc::potential_query_instability)] - for (input, path_from_entry) in nested_goals.iter() { - let path_from_entry = match step_kind { - PathKind::Coinductive => path_from_entry, - PathKind::Inductive => UsageKind::Single(PathKind::Inductive), - }; - self.insert(input, path_from_entry); + for (input, paths_to_nested) in nested_goals.iter() { + let paths_to_nested = paths_to_nested.extend_with(step_kind); + self.insert(input, paths_to_nested); } } #[cfg_attr(feature = "nightly", rustc_lint_query_instability)] #[allow(rustc::potential_query_instability)] - fn iter(&self) -> impl Iterator<Item = (X::Input, UsageKind)> { + fn iter(&self) -> impl Iterator<Item = (X::Input, PathsToNested)> + '_ { self.nested_goals.iter().map(|(i, p)| (*i, *p)) } - fn get(&self, input: X::Input) -> Option<UsageKind> { - self.nested_goals.get(&input).copied() - } - fn contains(&self, input: X::Input) -> bool { self.nested_goals.contains_key(&input) } @@ -310,6 +442,12 @@ rustc_index::newtype_index! { struct StackEntry<X: Cx> { input: X::Input, + /// Whether proving this goal is a coinductive step. + /// + /// This is used when encountering a trait solver cycle to + /// decide whether the initial provisional result of the cycle. + step_kind_from_parent: PathKind, + /// The available depth of a given goal, immutable. available_depth: AvailableDepth, @@ -346,9 +484,9 @@ struct ProvisionalCacheEntry<X: Cx> { encountered_overflow: bool, /// All cycle heads this cache entry depends on. heads: CycleHeads, - /// The path from the highest cycle head to this goal. + /// The path from the highest cycle head to this goal. This differs from + /// `heads` which tracks the path to the cycle head *from* this goal. path_from_head: PathKind, - nested_goals: NestedGoals<X>, result: X::Result, } @@ -367,6 +505,20 @@ pub struct SearchGraph<D: Delegate<Cx = X>, X: Cx = <D as Delegate>::Cx> { _marker: PhantomData<D>, } +/// While [`SearchGraph::update_parent_goal`] can be mostly shared between +/// ordinary nested goals/global cache hits and provisional cache hits, +/// using the provisional cache should not add any nested goals. +/// +/// `nested_goals` are only used when checking whether global cache entries +/// are applicable. This only cares about whether a goal is actually accessed. +/// Given that the usage of the provisional cache is fully determinstic, we +/// don't need to track the nested goals used while computing a provisional +/// cache entry. +enum UpdateParentGoalCtxt<'a, X: Cx> { + Ordinary(&'a NestedGoals<X>), + ProvisionalCacheHit, +} + impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { pub fn new(root_depth: usize) -> SearchGraph<D> { Self { @@ -382,27 +534,32 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { /// and using existing global cache entries to make sure they /// have the same impact on the remaining evaluation. fn update_parent_goal( - cx: X, stack: &mut IndexVec<StackDepth, StackEntry<X>>, + step_kind_from_parent: PathKind, reached_depth: StackDepth, heads: &CycleHeads, encountered_overflow: bool, - nested_goals: &NestedGoals<X>, + context: UpdateParentGoalCtxt<'_, X>, ) { if let Some(parent_index) = stack.last_index() { let parent = &mut stack[parent_index]; parent.reached_depth = parent.reached_depth.max(reached_depth); parent.encountered_overflow |= encountered_overflow; - parent.heads.extend_from_child(parent_index, heads); - let step_kind = Self::step_kind(cx, parent.input); - parent.nested_goals.extend_from_child(step_kind, nested_goals); + parent.heads.extend_from_child(parent_index, step_kind_from_parent, heads); + let parent_depends_on_cycle = match context { + UpdateParentGoalCtxt::Ordinary(nested_goals) => { + parent.nested_goals.extend_from_child(step_kind_from_parent, nested_goals); + !nested_goals.is_empty() + } + UpdateParentGoalCtxt::ProvisionalCacheHit => true, + }; // Once we've got goals which encountered overflow or a cycle, // we track all goals whose behavior may depend depend on these // goals as this change may cause them to now depend on additional // goals, resulting in new cycles. See the dev-guide for examples. - if !nested_goals.is_empty() { - parent.nested_goals.insert(parent.input, UsageKind::Single(PathKind::Coinductive)) + if parent_depends_on_cycle { + parent.nested_goals.insert(parent.input, PathsToNested::EMPTY); } } } @@ -422,21 +579,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { self.stack.len() } - fn step_kind(cx: X, input: X::Input) -> PathKind { - if D::step_is_coinductive(cx, input) { PathKind::Coinductive } else { PathKind::Inductive } - } - /// Whether the path from `head` to the current stack entry is inductive or coinductive. - fn stack_path_kind( - cx: X, + /// + /// The `step_kind_to_head` is used to add a single additional path segment to the path on + /// the stack which completes the cycle. This given an inductive step AB which then cycles + /// coinductively with A, we need to treat this cycle as coinductive. + fn cycle_path_kind( stack: &IndexVec<StackDepth, StackEntry<X>>, + step_kind_to_head: PathKind, head: StackDepth, ) -> PathKind { - if stack.raw[head.index()..].iter().all(|entry| D::step_is_coinductive(cx, entry.input)) { - PathKind::Coinductive - } else { - PathKind::Inductive - } + stack.raw[head.index() + 1..] + .iter() + .fold(step_kind_to_head, |curr, entry| curr.extend(entry.step_kind_from_parent)) } /// Probably the most involved method of the whole solver. @@ -447,6 +602,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { &mut self, cx: X, input: X::Input, + step_kind_from_parent: PathKind, inspect: &mut D::ProofTreeBuilder, mut evaluate_goal: impl FnMut(&mut Self, &mut D::ProofTreeBuilder) -> X::Result, ) -> X::Result { @@ -464,7 +620,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // - A // - BA cycle // - CB :x: - if let Some(result) = self.lookup_provisional_cache(cx, input) { + if let Some(result) = self.lookup_provisional_cache(input, step_kind_from_parent) { return result; } @@ -477,10 +633,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // global cache has been disabled as it may otherwise change the result for // cyclic goals. We don't care about goals which are not on the current stack // so it's fine to drop their scope eagerly. - self.lookup_global_cache_untracked(cx, input, available_depth) + self.lookup_global_cache_untracked(cx, input, step_kind_from_parent, available_depth) .inspect(|expected| debug!(?expected, "validate cache entry")) .map(|r| (scope, r)) - } else if let Some(result) = self.lookup_global_cache(cx, input, available_depth) { + } else if let Some(result) = + self.lookup_global_cache(cx, input, step_kind_from_parent, available_depth) + { return result; } else { None @@ -490,8 +648,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // avoid iterating over the stack in case a goal has already been computed. // This may not have an actual performance impact and we could reorder them // as it may reduce the number of `nested_goals` we need to track. - if let Some(result) = self.check_cycle_on_stack(cx, input) { - debug_assert!(validate_cache.is_none(), "global cache and cycle on stack"); + if let Some(result) = self.check_cycle_on_stack(cx, input, step_kind_from_parent) { + debug_assert!(validate_cache.is_none(), "global cache and cycle on stack: {input:?}"); return result; } @@ -499,6 +657,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { let depth = self.stack.next_index(); let entry = StackEntry { input, + step_kind_from_parent, available_depth, reached_depth: depth, heads: Default::default(), @@ -522,12 +681,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // We've finished computing the goal and have popped it from the stack, // lazily update its parent goal. Self::update_parent_goal( - cx, &mut self.stack, + final_entry.step_kind_from_parent, final_entry.reached_depth, &final_entry.heads, final_entry.encountered_overflow, - &final_entry.nested_goals, + UpdateParentGoalCtxt::Ordinary(&final_entry.nested_goals), ); // We're now done with this goal. We only add the root of cycles to the global cache. @@ -541,19 +700,20 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { self.insert_global_cache(cx, input, final_entry, result, dep_node) } } else if D::ENABLE_PROVISIONAL_CACHE { - debug_assert!(validate_cache.is_none()); + debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}"); let entry = self.provisional_cache.entry(input).or_default(); - let StackEntry { heads, nested_goals, encountered_overflow, .. } = final_entry; - let path_from_head = Self::stack_path_kind(cx, &self.stack, heads.highest_cycle_head()); - entry.push(ProvisionalCacheEntry { - encountered_overflow, - heads, - path_from_head, - nested_goals, - result, - }); + let StackEntry { heads, encountered_overflow, .. } = final_entry; + let path_from_head = Self::cycle_path_kind( + &self.stack, + step_kind_from_parent, + heads.highest_cycle_head(), + ); + let provisional_cache_entry = + ProvisionalCacheEntry { encountered_overflow, heads, path_from_head, result }; + debug!(?provisional_cache_entry); + entry.push(provisional_cache_entry); } else { - debug_assert!(validate_cache.is_none()); + debug_assert!(validate_cache.is_none(), "unexpected non-root: {input:?}"); } result @@ -575,7 +735,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // // We must therefore not use the global cache entry for `B` in that case. // See tests/ui/traits/next-solver/cycles/hidden-by-overflow.rs - last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive)); + last.nested_goals.insert(last.input, PathsToNested::EMPTY); } debug!("encountered stack overflow"); @@ -607,7 +767,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { /// This can be thought of rotating the sub-tree of this provisional result and changing /// its entry point while making sure that all paths through this sub-tree stay the same. /// - /// /// In case the popped cycle head failed to reach a fixpoint anything which depends on /// its provisional result is invalid. Actually discarding provisional cache entries in /// this case would cause hangs, so we instead change the result of dependant provisional @@ -616,7 +775,6 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { /// to me. fn rebase_provisional_cache_entries( &mut self, - cx: X, stack_entry: &StackEntry<X>, mut mutate_result: impl FnMut(X::Input, X::Result) -> X::Result, ) { @@ -628,25 +786,24 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { encountered_overflow: _, heads, path_from_head, - nested_goals, result, } = entry; - if heads.highest_cycle_head() != head { + if heads.highest_cycle_head() == head { + heads.remove_highest_cycle_head() + } else { return true; } - // We don't try rebasing if the path from the current head - // to the cache entry is not coinductive or if the path from - // the cache entry to the current head is not coinductive. - // - // Both of these constraints could be weakened, but by only - // accepting coinductive paths we don't have to worry about - // changing the cycle kind of the remaining cycles. We can - // extend this in the future once there's a known issue - // caused by it. - if *path_from_head != PathKind::Coinductive - || nested_goals.get(stack_entry.input).unwrap() - != UsageKind::Single(PathKind::Coinductive) + // We only try to rebase if all paths from the cache entry + // to its heads are coinductive. In this case these cycle + // kinds won't change, no matter the goals between these + // heads and the provisional cache entry. + if heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) { + return false; + } + + // The same for nested goals of the cycle head. + if stack_entry.heads.iter().any(|(_, p)| matches!(p, AllPathsToHeadCoinductive::No)) { return false; } @@ -654,20 +811,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // Merge the cycle heads of the provisional cache entry and the // popped head. If the popped cycle head was a root, discard all // provisional cache entries which depend on it. - heads.remove_highest_cycle_head(); heads.merge(&stack_entry.heads); let Some(head) = heads.opt_highest_cycle_head() else { return false; }; - // As we've made sure that the path from the new highest cycle - // head to the uses of the popped cycle head are fully coinductive, - // we can be sure that the paths to all nested goals of the popped - // cycle head remain the same. We can simply merge them. - nested_goals.merge(&stack_entry.nested_goals); // We now care about the path from the next highest cycle head to the // provisional cache entry. - *path_from_head = Self::stack_path_kind(cx, &self.stack, head); + *path_from_head = path_from_head.extend(Self::cycle_path_kind( + &self.stack, + stack_entry.step_kind_from_parent, + head, + )); // Mutate the result of the provisional cache entry in case we did // not reach a fixpoint. *result = mutate_result(input, *result); @@ -677,19 +832,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { }); } - fn lookup_provisional_cache(&mut self, cx: X, input: X::Input) -> Option<X::Result> { + fn lookup_provisional_cache( + &mut self, + input: X::Input, + step_kind_from_parent: PathKind, + ) -> Option<X::Result> { if !D::ENABLE_PROVISIONAL_CACHE { return None; } let entries = self.provisional_cache.get(&input)?; - for &ProvisionalCacheEntry { - encountered_overflow, - ref heads, - path_from_head, - ref nested_goals, - result, - } in entries + for &ProvisionalCacheEntry { encountered_overflow, ref heads, path_from_head, result } in + entries { let head = heads.highest_cycle_head(); if encountered_overflow { @@ -710,22 +864,18 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // A provisional cache entry is only valid if the current path from its // highest cycle head to the goal is the same. - if path_from_head == Self::stack_path_kind(cx, &self.stack, head) { + if path_from_head == Self::cycle_path_kind(&self.stack, step_kind_from_parent, head) { // While we don't have to track the full depth of the provisional cache entry, // we do have to increment the required depth by one as we'd have already failed // with overflow otherwise let next_index = self.stack.next_index(); - let last = &mut self.stack.raw.last_mut().unwrap(); - let path_from_entry = Self::step_kind(cx, last.input); - last.nested_goals.insert(input, UsageKind::Single(path_from_entry)); - Self::update_parent_goal( - cx, &mut self.stack, + step_kind_from_parent, next_index, heads, - false, - nested_goals, + encountered_overflow, + UpdateParentGoalCtxt::ProvisionalCacheHit, ); debug_assert!(self.stack[head].has_been_used.is_some()); debug!(?head, ?path_from_head, "provisional cache hit"); @@ -740,8 +890,8 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { /// evaluating this entry would not have ended up depending on either a goal /// already on the stack or a provisional cache entry. fn candidate_is_applicable( - cx: X, stack: &IndexVec<StackDepth, StackEntry<X>>, + step_kind_from_parent: PathKind, provisional_cache: &HashMap<X::Input, Vec<ProvisionalCacheEntry<X>>>, nested_goals: &NestedGoals<X>, ) -> bool { @@ -772,8 +922,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { for &ProvisionalCacheEntry { encountered_overflow, ref heads, - path_from_head, - nested_goals: _, + path_from_head: head_to_provisional, result: _, } in entries.iter() { @@ -785,24 +934,19 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // A provisional cache entry only applies if the path from its highest head // matches the path when encountering the goal. + // + // We check if any of the paths taken while computing the global goal + // would end up with an applicable provisional cache entry. let head = heads.highest_cycle_head(); - let full_path = match Self::stack_path_kind(cx, stack, head) { - PathKind::Coinductive => path_from_global_entry, - PathKind::Inductive => UsageKind::Single(PathKind::Inductive), - }; - - match (full_path, path_from_head) { - (UsageKind::Mixed, _) - | (UsageKind::Single(PathKind::Coinductive), PathKind::Coinductive) - | (UsageKind::Single(PathKind::Inductive), PathKind::Inductive) => { - debug!( - ?full_path, - ?path_from_head, - "cache entry not applicable due to matching paths" - ); - return false; - } - _ => debug!(?full_path, ?path_from_head, "paths don't match"), + let head_to_curr = Self::cycle_path_kind(stack, step_kind_from_parent, head); + let full_paths = path_from_global_entry.extend_with(head_to_curr); + if full_paths.contains(head_to_provisional.into()) { + debug!( + ?full_paths, + ?head_to_provisional, + "cache entry not applicable due to matching paths" + ); + return false; } } } @@ -816,14 +960,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { &self, cx: X, input: X::Input, + step_kind_from_parent: PathKind, available_depth: AvailableDepth, ) -> Option<X::Result> { cx.with_global_cache(|cache| { cache .get(cx, input, available_depth, |nested_goals| { Self::candidate_is_applicable( - cx, &self.stack, + step_kind_from_parent, &self.provisional_cache, nested_goals, ) @@ -839,14 +984,15 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { &mut self, cx: X, input: X::Input, + step_kind_from_parent: PathKind, available_depth: AvailableDepth, ) -> Option<X::Result> { cx.with_global_cache(|cache| { let CacheData { result, additional_depth, encountered_overflow, nested_goals } = cache .get(cx, input, available_depth, |nested_goals| { Self::candidate_is_applicable( - cx, &self.stack, + step_kind_from_parent, &self.provisional_cache, nested_goals, ) @@ -860,12 +1006,12 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // cycle heads are always empty. let heads = Default::default(); Self::update_parent_goal( - cx, &mut self.stack, + step_kind_from_parent, reached_depth, &heads, encountered_overflow, - nested_goals, + UpdateParentGoalCtxt::Ordinary(nested_goals), ); debug!(?additional_depth, "global cache hit"); @@ -873,16 +1019,21 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { }) } - fn check_cycle_on_stack(&mut self, cx: X, input: X::Input) -> Option<X::Result> { + fn check_cycle_on_stack( + &mut self, + cx: X, + input: X::Input, + step_kind_from_parent: PathKind, + ) -> Option<X::Result> { let (head, _stack_entry) = self.stack.iter_enumerated().find(|(_, e)| e.input == input)?; - debug!("encountered cycle with depth {head:?}"); // We have a nested goal which directly relies on a goal deeper in the stack. // // We start by tagging all cycle participants, as that's necessary for caching. // // Finally we can return either the provisional response or the initial response // in case we're in the first fixpoint iteration for this goal. - let path_kind = Self::stack_path_kind(cx, &self.stack, head); + let path_kind = Self::cycle_path_kind(&self.stack, step_kind_from_parent, head); + debug!(?path_kind, "encountered cycle with depth {head:?}"); let usage_kind = UsageKind::Single(path_kind); self.stack[head].has_been_used = Some(self.stack[head].has_been_used.map_or(usage_kind, |prev| prev.merge(usage_kind))); @@ -894,11 +1045,10 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { let last = &mut self.stack[last_index]; last.reached_depth = last.reached_depth.max(next_index); - let path_from_entry = Self::step_kind(cx, last.input); - last.nested_goals.insert(input, UsageKind::Single(path_from_entry)); - last.nested_goals.insert(last.input, UsageKind::Single(PathKind::Coinductive)); + last.nested_goals.insert(input, step_kind_from_parent.into()); + last.nested_goals.insert(last.input, PathsToNested::EMPTY); if last_index != head { - last.heads.insert(head); + last.heads.insert(head, step_kind_from_parent); } // Return the provisional result or, if we're in the first iteration, @@ -964,7 +1114,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // this was only the root of either coinductive or inductive cycles, and the // final result is equal to the initial response for that case. if self.reached_fixpoint(cx, &stack_entry, usage_kind, result) { - self.rebase_provisional_cache_entries(cx, &stack_entry, |_, result| result); + self.rebase_provisional_cache_entries(&stack_entry, |_, result| result); return (stack_entry, result); } @@ -981,7 +1131,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { // we also taint all provisional cache entries which depend on the // current goal. if D::is_ambiguous_result(result) { - self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { + self.rebase_provisional_cache_entries(&stack_entry, |input, _| { D::propagate_ambiguity(cx, input, result) }); return (stack_entry, result); @@ -993,7 +1143,7 @@ impl<D: Delegate<Cx = X>, X: Cx> SearchGraph<D> { if i >= D::FIXPOINT_STEP_LIMIT { debug!("canonical cycle overflow"); let result = D::on_fixpoint_overflow(cx, input); - self.rebase_provisional_cache_entries(cx, &stack_entry, |input, _| { + self.rebase_provisional_cache_entries(&stack_entry, |input, _| { D::on_fixpoint_overflow(cx, input) }); return (stack_entry, result); diff --git a/compiler/rustc_type_ir/src/solve/mod.rs b/compiler/rustc_type_ir/src/solve/mod.rs index a562b751d8a..de2e0f2c0ec 100644 --- a/compiler/rustc_type_ir/src/solve/mod.rs +++ b/compiler/rustc_type_ir/src/solve/mod.rs @@ -8,6 +8,7 @@ use derive_where::derive_where; use rustc_macros::{HashStable_NoContext, TyDecodable, TyEncodable}; use rustc_type_ir_macros::{Lift_Generic, TypeFoldable_Generic, TypeVisitable_Generic}; +use crate::search_graph::PathKind; use crate::{self as ty, Canonical, CanonicalVarValues, Interner, Upcast}; pub type CanonicalInput<I, T = <I as Interner>::Predicate> = @@ -57,20 +58,24 @@ impl<I: Interner, P> Goal<I, P> { /// Why a specific goal has to be proven. /// /// This is necessary as we treat nested goals different depending on -/// their source. This is currently mostly used by proof tree visitors -/// but will be used by cycle handling in the future. +/// their source. This is used to decide whether a cycle is coinductive. +/// See the documentation of `EvalCtxt::step_kind_for_source` for more details +/// about this. +/// +/// It is also used by proof tree visitors, e.g. for diagnostics purposes. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] #[cfg_attr(feature = "nightly", derive(HashStable_NoContext))] pub enum GoalSource { Misc, - /// We're proving a where-bound of an impl. + /// A nested goal required to prove that types are equal/subtypes. + /// This is always an unproductive step. /// - /// FIXME(-Znext-solver=coinductive): Explain how and why this - /// changes whether cycles are coinductive. + /// This is also used for all `NormalizesTo` goals as we they are used + /// to relate types in `AliasRelate`. + TypeRelating, + /// We're proving a where-bound of an impl. ImplWhereBound, /// Const conditions that need to hold for `~const` alias bounds to hold. - /// - /// FIXME(-Znext-solver=coinductive): Are these even coinductive? AliasBoundConstCondition, /// Instantiating a higher-ranked goal and re-proving it. InstantiateHigherRanked, @@ -78,6 +83,12 @@ pub enum GoalSource { /// This is used in two places: projecting to an opaque whose hidden type /// is already registered in the opaque type storage, and for rigid projections. AliasWellFormed, + /// In case normalizing aliases in nested goals cycles, eagerly normalizing these + /// aliases in the context of the parent may incorrectly change the cycle kind. + /// Normalizing aliases in goals therefore tracks the original path kind for this + /// nested goal. See the comment of the `ReplaceAliasWithInfer` visitor for more + /// details. + NormalizeGoal(PathKind), } #[derive_where(Clone; I: Interner, Goal<I, P>: Clone)] @@ -181,11 +192,6 @@ pub enum BuiltinImplSource { /// /// The index is only used for winnowing. TraitUpcasting(usize), - /// Unsizing a tuple like `(A, B, ..., X)` to `(A, B, ..., Y)` if `X` unsizes to `Y`. - /// - /// This can be removed when `feature(tuple_unsizing)` is stabilized, since we only - /// use it to detect when unsizing tuples in hir typeck. - TupleUnsizing, } #[derive_where(Clone, Copy, Hash, PartialEq, Eq, Debug; I: Interner)] diff --git a/compiler/stable_mir/src/crate_def.rs b/compiler/stable_mir/src/crate_def.rs index 8c6fd99f98a..2577c281ca4 100644 --- a/compiler/stable_mir/src/crate_def.rs +++ b/compiler/stable_mir/src/crate_def.rs @@ -10,6 +10,27 @@ use crate::{Crate, Symbol, with}; #[derive(Clone, Copy, PartialEq, Eq, Hash, Serialize)] pub struct DefId(pub(crate) usize); +impl DefId { + /// Return fully qualified name of this definition + pub fn name(&self) -> Symbol { + with(|cx| cx.def_name(*self, false)) + } + + /// Return a trimmed name of this definition. + /// + /// This can be used to print more user friendly diagnostic messages. + /// + /// If a symbol name can only be imported from one place for a type, and as + /// long as it was not glob-imported anywhere in the current crate, we trim its + /// path and print only the name. + /// + /// For example, this function may shorten `std::vec::Vec` to just `Vec`, + /// as long as there is no other `Vec` importable anywhere. + pub fn trimmed_name(&self) -> Symbol { + with(|cx| cx.def_name(*self, true)) + } +} + /// A trait for retrieving information about a particular definition. /// /// Implementors must provide the implementation of `def_id` which will be used to retrieve @@ -19,24 +40,17 @@ pub trait CrateDef { fn def_id(&self) -> DefId; /// Return the fully qualified name of the current definition. + /// + /// See [`DefId::name`] for more details fn name(&self) -> Symbol { - let def_id = self.def_id(); - with(|cx| cx.def_name(def_id, false)) + self.def_id().name() } /// Return a trimmed name of this definition. /// - /// This can be used to print more user friendly diagnostic messages. - /// - /// If a symbol name can only be imported from one place for a type, and as - /// long as it was not glob-imported anywhere in the current crate, we trim its - /// path and print only the name. - /// - /// For example, this function may shorten `std::vec::Vec` to just `Vec`, - /// as long as there is no other `Vec` importable anywhere. + /// See [`DefId::trimmed_name`] for more details fn trimmed_name(&self) -> Symbol { - let def_id = self.def_id(); - with(|cx| cx.def_name(def_id, true)) + self.def_id().trimmed_name() } /// Return information about the crate where this definition is declared. diff --git a/compiler/stable_mir/src/lib.rs b/compiler/stable_mir/src/lib.rs index 0b4cebadad1..70d42dfbfcb 100644 --- a/compiler/stable_mir/src/lib.rs +++ b/compiler/stable_mir/src/lib.rs @@ -48,10 +48,7 @@ pub type CrateNum = usize; impl Debug for DefId { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - f.debug_struct("DefId") - .field("id", &self.0) - .field("name", &with(|cx| cx.def_name(*self, false))) - .finish() + f.debug_struct("DefId").field("id", &self.0).field("name", &self.name()).finish() } } @@ -132,13 +129,21 @@ crate_def_with_ty! { } impl CrateItem { - /// This will return the body of an item. - /// - /// This will panic if no body is available. - pub fn body(&self) -> mir::Body { + /// This will return the body of an item or panic if it's not available. + pub fn expect_body(&self) -> mir::Body { with(|cx| cx.mir_body(self.0)) } + /// Return the body of an item if available. + pub fn body(&self) -> Option<mir::Body> { + with(|cx| cx.has_body(self.0).then(|| cx.mir_body(self.0))) + } + + /// Check if a body is available for this item. + pub fn has_body(&self) -> bool { + with(|cx| cx.has_body(self.0)) + } + pub fn span(&self) -> Span { with(|cx| cx.span_of_an_item(self.0)) } @@ -159,8 +164,11 @@ impl CrateItem { with(|cx| cx.is_foreign_item(self.0)) } + /// Emit MIR for this item body. pub fn emit_mir<W: io::Write>(&self, w: &mut W) -> io::Result<()> { - self.body().dump(w, &self.name()) + self.body() + .ok_or_else(|| io::Error::other(format!("No body found for `{}`", self.name())))? + .dump(w, &self.name()) } } diff --git a/compiler/stable_mir/src/mir/alloc.rs b/compiler/stable_mir/src/mir/alloc.rs index 7e0c4a479b8..023807b76ae 100644 --- a/compiler/stable_mir/src/mir/alloc.rs +++ b/compiler/stable_mir/src/mir/alloc.rs @@ -58,7 +58,7 @@ impl IndexedVal for AllocId { /// Utility function used to read an allocation data into a unassigned integer. pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> { - let mut buf = [0u8; std::mem::size_of::<u128>()]; + let mut buf = [0u8; size_of::<u128>()]; match MachineInfo::target_endianness() { Endian::Little => { bytes.read_exact(&mut buf[..bytes.len()])?; @@ -73,7 +73,7 @@ pub(crate) fn read_target_uint(mut bytes: &[u8]) -> Result<u128, Error> { /// Utility function used to read an allocation data into an assigned integer. pub(crate) fn read_target_int(mut bytes: &[u8]) -> Result<i128, Error> { - let mut buf = [0u8; std::mem::size_of::<i128>()]; + let mut buf = [0u8; size_of::<i128>()]; match MachineInfo::target_endianness() { Endian::Little => { bytes.read_exact(&mut buf[..bytes.len()])?; |
