diff options
| author | Laurențiu Nicola <lnicola@users.noreply.github.com> | 2025-01-20 09:29:00 +0000 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2025-01-20 09:29:00 +0000 |
| commit | 141e53b7154083afcfb2e0fb0334e554d0c146a4 (patch) | |
| tree | 1707172316fef25a86219934940bf7d3196d263f /compiler | |
| parent | 61af2cc09a25a51015ed1c0e3ff9ce3dc8323987 (diff) | |
| parent | 3af5c080e61a00986852fc12ff2681238eaaa2dc (diff) | |
| download | rust-141e53b7154083afcfb2e0fb0334e554d0c146a4.tar.gz rust-141e53b7154083afcfb2e0fb0334e554d0c146a4.zip | |
Merge pull request #18980 from lnicola/sync-from-rust
minor: Sync from downstream
Diffstat (limited to 'compiler')
442 files changed, 9314 insertions, 7457 deletions
diff --git a/compiler/rustc_abi/src/extern_abi/mod.rs b/compiler/rustc_abi/src/extern_abi/mod.rs index 390f2dbc10f..130834d560f 100644 --- a/compiler/rustc_abi/src/extern_abi/mod.rs +++ b/compiler/rustc_abi/src/extern_abi/mod.rs @@ -45,6 +45,9 @@ pub enum ExternAbi { PtxKernel, Msp430Interrupt, X86Interrupt, + /// An entry-point function called by the GPU's host + // FIXME: should not be callable from Rust on GPU targets, is for host's use only + GpuKernel, EfiApi, AvrInterrupt, AvrNonBlockingInterrupt, @@ -122,6 +125,7 @@ const AbiDatas: &[AbiData] = &[ AbiData { abi: Abi::PtxKernel, name: "ptx-kernel" }, AbiData { abi: Abi::Msp430Interrupt, name: "msp430-interrupt" }, AbiData { abi: Abi::X86Interrupt, name: "x86-interrupt" }, + AbiData { abi: Abi::GpuKernel, name: "gpu-kernel" }, AbiData { abi: Abi::EfiApi, name: "efiapi" }, AbiData { abi: Abi::AvrInterrupt, name: "avr-interrupt" }, AbiData { abi: Abi::AvrNonBlockingInterrupt, name: "avr-non-blocking-interrupt" }, @@ -192,6 +196,10 @@ pub fn is_enabled( s } +/// Returns whether the ABI is stable to use. +/// +/// Note that there is a separate check determining whether the ABI is even supported +/// on the current target; see `fn is_abi_supported` in `rustc_target::spec`. pub fn is_stable(name: &str) -> Result<(), AbiDisabled> { match name { // Stable @@ -235,6 +243,10 @@ pub fn is_stable(name: &str) -> Result<(), AbiDisabled> { feature: sym::abi_x86_interrupt, explain: "x86-interrupt ABI is experimental and subject to change", }), + "gpu-kernel" => Err(AbiDisabled::Unstable { + feature: sym::abi_gpu_kernel, + explain: "gpu-kernel ABI is experimental and subject to change", + }), "avr-interrupt" | "avr-non-blocking-interrupt" => Err(AbiDisabled::Unstable { feature: sym::abi_avr_interrupt, explain: "avr-interrupt and avr-non-blocking-interrupt ABIs are experimental and subject to change", @@ -289,20 +301,21 @@ impl Abi { PtxKernel => 19, Msp430Interrupt => 20, X86Interrupt => 21, - EfiApi => 22, - AvrInterrupt => 23, - AvrNonBlockingInterrupt => 24, - CCmseNonSecureCall => 25, - CCmseNonSecureEntry => 26, + GpuKernel => 22, + EfiApi => 23, + AvrInterrupt => 24, + AvrNonBlockingInterrupt => 25, + CCmseNonSecureCall => 26, + CCmseNonSecureEntry => 27, // Cross-platform ABIs - System { unwind: false } => 27, - System { unwind: true } => 28, - RustIntrinsic => 29, - RustCall => 30, - Unadjusted => 31, - RustCold => 32, - RiscvInterruptM => 33, - RiscvInterruptS => 34, + System { unwind: false } => 28, + System { unwind: true } => 29, + RustIntrinsic => 30, + RustCall => 31, + Unadjusted => 32, + RustCold => 33, + RiscvInterruptM => 34, + RiscvInterruptS => 35, }; debug_assert!( AbiDatas diff --git a/compiler/rustc_abi/src/layout.rs b/compiler/rustc_abi/src/layout.rs index 226a46f605c..b8773f9ff38 100644 --- a/compiler/rustc_abi/src/layout.rs +++ b/compiler/rustc_abi/src/layout.rs @@ -119,6 +119,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { .chain(Niche::from_scalar(dl, Size::ZERO, a)) .max_by_key(|niche| niche.available(dl)); + let combined_seed = a.size(&self.cx).bytes().wrapping_add(b.size(&self.cx).bytes()); + LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Arbitrary { @@ -131,6 +133,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { size, max_repr_align: None, unadjusted_abi_align: align.abi, + randomization_seed: combined_seed, } } @@ -223,6 +226,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { size: Size::ZERO, max_repr_align: None, unadjusted_abi_align: dl.i8_align.abi, + randomization_seed: 0, } } @@ -385,6 +389,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { return Err(LayoutCalculatorError::EmptyUnion); }; + let combined_seed = only_variant + .iter() + .map(|v| v.randomization_seed) + .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed)); + Ok(LayoutData { variants: Variants::Single { index: only_variant_idx }, fields: FieldsShape::Union(union_field_count), @@ -394,6 +403,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { size: size.align_to(align.abi), max_repr_align, unadjusted_abi_align, + randomization_seed: combined_seed, }) } @@ -650,6 +660,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { BackendRepr::Memory { sized: true } }; + let combined_seed = variant_layouts + .iter() + .map(|v| v.randomization_seed) + .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed)); + let layout = LayoutData { variants: Variants::Multiple { tag: niche_scalar, @@ -671,6 +686,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { align, max_repr_align, unadjusted_abi_align, + randomization_seed: combined_seed, }; Some(TmpLayout { layout, variants: variant_layouts }) @@ -961,6 +977,11 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); + let combined_seed = layout_variants + .iter() + .map(|v| v.randomization_seed) + .fold(repr.field_shuffle_seed, |acc, seed| acc.wrapping_add(seed)); + let tagged_layout = LayoutData { variants: Variants::Multiple { tag, @@ -978,6 +999,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { size, max_repr_align, unadjusted_abi_align, + randomization_seed: combined_seed, }; let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants }; @@ -1030,12 +1052,15 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { let mut max_repr_align = repr.align; let mut inverse_memory_index: IndexVec<u32, FieldIdx> = fields.indices().collect(); let optimize_field_order = !repr.inhibit_struct_field_reordering(); - if optimize_field_order && fields.len() > 1 { - let end = - if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; - let optimizing = &mut inverse_memory_index.raw[..end]; - let fields_excluding_tail = &fields.raw[..end]; + let end = if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; + let optimizing = &mut inverse_memory_index.raw[..end]; + let fields_excluding_tail = &fields.raw[..end]; + // unsizable tail fields are excluded so that we use the same seed for the sized and unsized layouts. + let field_seed = fields_excluding_tail + .iter() + .fold(0u64, |acc, f| acc.wrapping_add(f.randomization_seed)); + if optimize_field_order && fields.len() > 1 { // If `-Z randomize-layout` was enabled for the type definition we can shuffle // the field ordering to try and catch some code making assumptions about layouts // we don't guarantee. @@ -1046,8 +1071,9 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { use rand::seq::SliceRandom; // `ReprOptions.field_shuffle_seed` is a deterministic seed we can use to randomize field // ordering. - let mut rng = - rand_xoshiro::Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed); + let mut rng = rand_xoshiro::Xoshiro128StarStar::seed_from_u64( + field_seed.wrapping_add(repr.field_shuffle_seed), + ); // Shuffle the ordering of the fields. optimizing.shuffle(&mut rng); @@ -1344,6 +1370,8 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { unadjusted_abi_align }; + let seed = field_seed.wrapping_add(repr.field_shuffle_seed); + Ok(LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Arbitrary { offsets, memory_index }, @@ -1353,6 +1381,7 @@ impl<Cx: HasDataLayout> LayoutCalculator<Cx> { size, max_repr_align, unadjusted_abi_align, + randomization_seed: seed, }) } diff --git a/compiler/rustc_abi/src/lib.rs b/compiler/rustc_abi/src/lib.rs index 7fa869a509c..8ad33749f34 100644 --- a/compiler/rustc_abi/src/lib.rs +++ b/compiler/rustc_abi/src/lib.rs @@ -1719,6 +1719,18 @@ pub struct LayoutData<FieldIdx: Idx, VariantIdx: Idx> { /// Only used on aarch64-linux, where the argument passing ABI ignores the requested alignment /// in some cases. pub unadjusted_abi_align: Align, + + /// The randomization seed based on this type's own repr and its fields. + /// + /// Since randomization is toggled on a per-crate basis even crates that do not have randomization + /// enabled should still calculate a seed so that downstream uses can use it to distinguish different + /// types. + /// + /// For every T and U for which we do not guarantee that a repr(Rust) `Foo<T>` can be coerced or + /// transmuted to `Foo<U>` we aim to create probalistically distinct seeds so that Foo can choose + /// to reorder its fields based on that information. The current implementation is a conservative + /// approximation of this goal. + pub randomization_seed: u64, } impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { @@ -1739,6 +1751,30 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { let largest_niche = Niche::from_scalar(cx, Size::ZERO, scalar); let size = scalar.size(cx); let align = scalar.align(cx); + + let range = scalar.valid_range(cx); + + // All primitive types for which we don't have subtype coercions should get a distinct seed, + // so that types wrapping them can use randomization to arrive at distinct layouts. + // + // Some type information is already lost at this point, so as an approximation we derive + // the seed from what remains. For example on 64-bit targets usize and u64 can no longer + // be distinguished. + let randomization_seed = size + .bytes() + .wrapping_add( + match scalar.primitive() { + Primitive::Int(_, true) => 1, + Primitive::Int(_, false) => 2, + Primitive::Float(_) => 3, + Primitive::Pointer(_) => 4, + } << 32, + ) + // distinguishes references from pointers + .wrapping_add((range.start as u64).rotate_right(16)) + // distinguishes char from u32 and bool from u8 + .wrapping_add((range.end as u64).rotate_right(16)); + LayoutData { variants: Variants::Single { index: VariantIdx::new(0) }, fields: FieldsShape::Primitive, @@ -1748,6 +1784,7 @@ impl<FieldIdx: Idx, VariantIdx: Idx> LayoutData<FieldIdx, VariantIdx> { align, max_repr_align: None, unadjusted_abi_align: align.abi, + randomization_seed, } } } @@ -1770,6 +1807,7 @@ where variants, max_repr_align, unadjusted_abi_align, + ref randomization_seed, } = self; f.debug_struct("Layout") .field("size", size) @@ -1780,6 +1818,7 @@ where .field("variants", variants) .field("max_repr_align", max_repr_align) .field("unadjusted_abi_align", unadjusted_abi_align) + .field("randomization_seed", randomization_seed) .finish() } } diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs index 4d8525989cc..b21ccba93bb 100644 --- a/compiler/rustc_arena/src/lib.rs +++ b/compiler/rustc_arena/src/lib.rs @@ -78,7 +78,7 @@ impl<T> ArenaChunk<T> { // been initialized. unsafe { let slice = self.storage.as_mut(); - ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len])); + slice[..len].assume_init_drop(); } } } diff --git a/compiler/rustc_ast/src/ast.rs b/compiler/rustc_ast/src/ast.rs index 3a81b93d157..8e73df63ef5 100644 --- a/compiler/rustc_ast/src/ast.rs +++ b/compiler/rustc_ast/src/ast.rs @@ -623,7 +623,7 @@ impl Pat { PatKind::Wild | PatKind::Rest | PatKind::Never - | PatKind::Lit(_) + | PatKind::Expr(_) | PatKind::Range(..) | PatKind::Ident(..) | PatKind::Path(..) @@ -801,8 +801,8 @@ pub enum PatKind { /// A reference pattern (e.g., `&mut (a, b)`). Ref(P<Pat>, Mutability), - /// A literal. - Lit(P<Expr>), + /// A literal, const block or path. + Expr(P<Expr>), /// A range pattern (e.g., `1...2`, `1..2`, `1..`, `..2`, `1..=2`, `..=2`). Range(Option<P<Expr>>, Option<P<Expr>>, Spanned<RangeEnd>), diff --git a/compiler/rustc_ast/src/attr/mod.rs b/compiler/rustc_ast/src/attr/mod.rs index 97385b2eaab..51f18580013 100644 --- a/compiler/rustc_ast/src/attr/mod.rs +++ b/compiler/rustc_ast/src/attr/mod.rs @@ -723,6 +723,8 @@ impl MetaItemLit { pub trait AttributeExt: Debug { fn id(&self) -> AttrId; + /// For a single-segment attribute (i.e., `#[attr]` and not `#[path::atrr]`), + /// return the name of the attribute, else return the empty identifier. fn name_or_empty(&self) -> Symbol { self.ident().unwrap_or_else(Ident::empty).name } diff --git a/compiler/rustc_ast/src/mut_visit.rs b/compiler/rustc_ast/src/mut_visit.rs index 04cdfc93dcb..aa88e8369d5 100644 --- a/compiler/rustc_ast/src/mut_visit.rs +++ b/compiler/rustc_ast/src/mut_visit.rs @@ -1512,7 +1512,7 @@ pub fn walk_pat<T: MutVisitor>(vis: &mut T, pat: &mut P<Pat>) { vis.visit_ident(ident); visit_opt(sub, |sub| vis.visit_pat(sub)); } - PatKind::Lit(e) => vis.visit_expr(e), + PatKind::Expr(e) => vis.visit_expr(e), PatKind::TupleStruct(qself, path, elems) => { vis.visit_qself(qself); vis.visit_path(path); diff --git a/compiler/rustc_ast/src/token.rs b/compiler/rustc_ast/src/token.rs index f639e785bc4..3b7367d1ee2 100644 --- a/compiler/rustc_ast/src/token.rs +++ b/compiler/rustc_ast/src/token.rs @@ -909,7 +909,8 @@ impl Token { self.is_keyword(kw) || (case == Case::Insensitive && self.is_non_raw_ident_where(|id| { - id.name.as_str().to_lowercase() == kw.as_str().to_lowercase() + // Do an ASCII case-insensitive match, because all keywords are ASCII. + id.name.as_str().eq_ignore_ascii_case(kw.as_str()) })) } diff --git a/compiler/rustc_ast/src/util/comments/tests.rs b/compiler/rustc_ast/src/util/comments/tests.rs index f88b534a0c1..7a87ccea62a 100644 --- a/compiler/rustc_ast/src/util/comments/tests.rs +++ b/compiler/rustc_ast/src/util/comments/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use rustc_span::create_default_session_globals_then; diff --git a/compiler/rustc_ast/src/visit.rs b/compiler/rustc_ast/src/visit.rs index e99fc7b604e..1d6d7330757 100644 --- a/compiler/rustc_ast/src/visit.rs +++ b/compiler/rustc_ast/src/visit.rs @@ -680,7 +680,7 @@ pub fn walk_pat<'a, V: Visitor<'a>>(visitor: &mut V, pattern: &'a Pat) -> V::Res try_visit!(visitor.visit_ident(ident)); visit_opt!(visitor, visit_pat, optional_subpattern); } - PatKind::Lit(expression) => try_visit!(visitor.visit_expr(expression)), + PatKind::Expr(expression) => try_visit!(visitor.visit_expr(expression)), PatKind::Range(lower_bound, upper_bound, _end) => { visit_opt!(visitor, visit_expr, lower_bound); visit_opt!(visitor, visit_expr, upper_bound); diff --git a/compiler/rustc_ast_lowering/src/delegation.rs b/compiler/rustc_ast_lowering/src/delegation.rs index 758f1dc1c35..266e77c0e02 100644 --- a/compiler/rustc_ast_lowering/src/delegation.rs +++ b/compiler/rustc_ast_lowering/src/delegation.rs @@ -188,7 +188,14 @@ impl<'hir> LoweringContext<'_, 'hir> { ) -> hir::FnSig<'hir> { let header = if let Some(local_sig_id) = sig_id.as_local() { match self.resolver.delegation_fn_sigs.get(&local_sig_id) { - Some(sig) => self.lower_fn_header(sig.header, hir::Safety::Safe), + Some(sig) => self.lower_fn_header( + sig.header, + // HACK: we override the default safety instead of generating attributes from the ether. + // We are not forwarding the attributes, as the delegation fn sigs are collected on the ast, + // and here we need the hir attributes. + if sig.target_feature { hir::Safety::Unsafe } else { hir::Safety::Safe }, + &[], + ), None => self.generate_header_error(), } } else { @@ -198,7 +205,11 @@ impl<'hir> LoweringContext<'_, 'hir> { Asyncness::No => hir::IsAsync::NotAsync, }; hir::FnHeader { - safety: sig.safety, + safety: if self.tcx.codegen_fn_attrs(sig_id).safe_target_features { + hir::HeaderSafety::SafeTargetFeatures + } else { + hir::HeaderSafety::Normal(sig.safety) + }, constness: self.tcx.constness(sig_id), asyncness, abi: sig.abi, @@ -384,7 +395,7 @@ impl<'hir> LoweringContext<'_, 'hir> { fn generate_header_error(&self) -> hir::FnHeader { hir::FnHeader { - safety: hir::Safety::Safe, + safety: hir::Safety::Safe.into(), constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, abi: abi::Abi::Rust, diff --git a/compiler/rustc_ast_lowering/src/expr.rs b/compiler/rustc_ast_lowering/src/expr.rs index d16a3ce390d..a76ca6772e5 100644 --- a/compiler/rustc_ast_lowering/src/expr.rs +++ b/compiler/rustc_ast_lowering/src/expr.rs @@ -102,17 +102,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let kind = match &e.kind { ExprKind::Array(exprs) => hir::ExprKind::Array(self.lower_exprs(exprs)), - ExprKind::ConstBlock(c) => { - let c = self.with_new_scopes(c.value.span, |this| { - let def_id = this.local_def_id(c.id); - hir::ConstBlock { - def_id, - hir_id: this.lower_node_id(c.id), - body: this.lower_const_body(c.value.span, Some(&c.value)), - } - }); - hir::ExprKind::ConstBlock(c) - } + ExprKind::ConstBlock(c) => hir::ExprKind::ConstBlock(self.lower_const_block(c)), ExprKind::Repeat(expr, count) => { let expr = self.lower_expr(expr); let count = self.lower_array_length_to_const_arg(count); @@ -153,18 +143,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let ohs = self.lower_expr(ohs); hir::ExprKind::Unary(op, ohs) } - ExprKind::Lit(token_lit) => { - let lit_kind = match LitKind::from_token_lit(*token_lit) { - Ok(lit_kind) => lit_kind, - Err(err) => { - let guar = - report_lit_error(&self.tcx.sess.psess, err, *token_lit, e.span); - LitKind::Err(guar) - } - }; - let lit = self.arena.alloc(respan(self.lower_span(e.span), lit_kind)); - hir::ExprKind::Lit(lit) - } + ExprKind::Lit(token_lit) => hir::ExprKind::Lit(self.lower_lit(token_lit, e.span)), ExprKind::IncludedBytes(bytes) => { let lit = self.arena.alloc(respan( self.lower_span(e.span), @@ -403,6 +382,32 @@ impl<'hir> LoweringContext<'_, 'hir> { }) } + pub(crate) fn lower_const_block(&mut self, c: &AnonConst) -> hir::ConstBlock { + self.with_new_scopes(c.value.span, |this| { + let def_id = this.local_def_id(c.id); + hir::ConstBlock { + def_id, + hir_id: this.lower_node_id(c.id), + body: this.lower_const_body(c.value.span, Some(&c.value)), + } + }) + } + + pub(crate) fn lower_lit( + &mut self, + token_lit: &token::Lit, + span: Span, + ) -> &'hir Spanned<LitKind> { + let lit_kind = match LitKind::from_token_lit(*token_lit) { + Ok(lit_kind) => lit_kind, + Err(err) => { + let guar = report_lit_error(&self.tcx.sess.psess, err, *token_lit, span); + LitKind::Err(guar) + } + }; + self.arena.alloc(respan(self.lower_span(span), lit_kind)) + } + fn lower_unop(&mut self, u: UnOp) -> hir::UnOp { match u { UnOp::Deref => hir::UnOp::Deref, diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs index c3ff7b4b897..29d4fb9ef25 100644 --- a/compiler/rustc_ast_lowering/src/index.rs +++ b/compiler/rustc_ast_lowering/src/index.rs @@ -209,6 +209,14 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { }); } + fn visit_pat_expr(&mut self, expr: &'hir PatExpr<'hir>) { + self.insert(expr.span, expr.hir_id, Node::PatExpr(expr)); + + self.with_parent(expr.hir_id, |this| { + intravisit::walk_pat_expr(this, expr); + }); + } + fn visit_pat_field(&mut self, field: &'hir PatField<'hir>) { self.insert(field.span, field.hir_id, Node::PatField(field)); self.with_parent(field.hir_id, |this| { diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 6fce9116938..61d7da429f8 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -231,7 +231,7 @@ impl<'hir> LoweringContext<'_, 'hir> { }); let sig = hir::FnSig { decl, - header: this.lower_fn_header(*header, hir::Safety::Safe), + header: this.lower_fn_header(*header, hir::Safety::Safe, attrs), span: this.lower_span(*fn_sig_span), }; hir::ItemKind::Fn { sig, generics, body: body_id, has_body: body.is_some() } @@ -610,7 +610,7 @@ impl<'hir> LoweringContext<'_, 'hir> { fn lower_foreign_item(&mut self, i: &ForeignItem) -> &'hir hir::ForeignItem<'hir> { let hir_id = hir::HirId::make_owner(self.current_hir_id_owner.def_id); let owner_id = hir_id.expect_owner(); - self.lower_attrs(hir_id, &i.attrs); + let attrs = self.lower_attrs(hir_id, &i.attrs); let item = hir::ForeignItem { owner_id, ident: self.lower_ident(i.ident), @@ -634,7 +634,7 @@ impl<'hir> LoweringContext<'_, 'hir> { }); // Unmarked safety in unsafe block defaults to unsafe. - let header = self.lower_fn_header(sig.header, hir::Safety::Unsafe); + let header = self.lower_fn_header(sig.header, hir::Safety::Unsafe, attrs); hir::ForeignItemKind::Fn( hir::FnSig { header, decl, span: self.lower_span(sig.span) }, @@ -776,6 +776,7 @@ impl<'hir> LoweringContext<'_, 'hir> { i.id, FnDeclKind::Trait, sig.header.coroutine_kind, + attrs, ); (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Required(names)), false) } @@ -795,6 +796,7 @@ impl<'hir> LoweringContext<'_, 'hir> { i.id, FnDeclKind::Trait, sig.header.coroutine_kind, + attrs, ); (generics, hir::TraitItemKind::Fn(sig, hir::TraitFn::Provided(body_id)), true) } @@ -911,6 +913,7 @@ impl<'hir> LoweringContext<'_, 'hir> { i.id, if self.is_in_trait_impl { FnDeclKind::Impl } else { FnDeclKind::Inherent }, sig.header.coroutine_kind, + attrs, ); (generics, hir::ImplItemKind::Fn(sig, body_id)) @@ -1339,8 +1342,9 @@ impl<'hir> LoweringContext<'_, 'hir> { id: NodeId, kind: FnDeclKind, coroutine_kind: Option<CoroutineKind>, + attrs: &[hir::Attribute], ) -> (&'hir hir::Generics<'hir>, hir::FnSig<'hir>) { - let header = self.lower_fn_header(sig.header, hir::Safety::Safe); + let header = self.lower_fn_header(sig.header, hir::Safety::Safe, attrs); let itctx = ImplTraitContext::Universal; let (generics, decl) = self.lower_generics(generics, id, itctx, |this| { this.lower_fn_decl(&sig.decl, id, sig.span, kind, coroutine_kind) @@ -1352,14 +1356,28 @@ impl<'hir> LoweringContext<'_, 'hir> { &mut self, h: FnHeader, default_safety: hir::Safety, + attrs: &[hir::Attribute], ) -> hir::FnHeader { let asyncness = if let Some(CoroutineKind::Async { span, .. }) = h.coroutine_kind { hir::IsAsync::Async(span) } else { hir::IsAsync::NotAsync }; + + let safety = self.lower_safety(h.safety, default_safety); + + // Treat safe `#[target_feature]` functions as unsafe, but also remember that we did so. + let safety = if attrs.iter().any(|attr| attr.has_name(sym::target_feature)) + && safety.is_safe() + && !self.tcx.sess.target.is_like_wasm + { + hir::HeaderSafety::SafeTargetFeatures + } else { + safety.into() + }; + hir::FnHeader { - safety: self.lower_safety(h.safety, default_safety), + safety, asyncness, constness: self.lower_constness(h.constness), abi: self.lower_extern(h.ext), diff --git a/compiler/rustc_ast_lowering/src/lib.rs b/compiler/rustc_ast_lowering/src/lib.rs index fe2d5a594f3..0e28590bd66 100644 --- a/compiler/rustc_ast_lowering/src/lib.rs +++ b/compiler/rustc_ast_lowering/src/lib.rs @@ -35,6 +35,7 @@ #![doc(rust_logo)] #![feature(assert_matches)] #![feature(box_patterns)] +#![feature(if_let_guard)] #![feature(let_chains)] #![feature(rustdoc_internals)] #![warn(unreachable_pub)] diff --git a/compiler/rustc_ast_lowering/src/pat.rs b/compiler/rustc_ast_lowering/src/pat.rs index a4ab2561b72..abd314ae74c 100644 --- a/compiler/rustc_ast_lowering/src/pat.rs +++ b/compiler/rustc_ast_lowering/src/pat.rs @@ -1,9 +1,12 @@ +use std::sync::Arc; + use rustc_ast::ptr::P; use rustc_ast::*; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hir as hir; use rustc_hir::def::Res; -use rustc_span::source_map::Spanned; +use rustc_middle::span_bug; +use rustc_span::source_map::{Spanned, respan}; use rustc_span::{Ident, Span}; use super::errors::{ @@ -35,8 +38,8 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { lower_sub, ); } - PatKind::Lit(e) => { - break hir::PatKind::Lit(self.lower_expr_within_pat(e, false)); + PatKind::Expr(e) => { + break hir::PatKind::Expr(self.lower_expr_within_pat(e, false)); } PatKind::TupleStruct(qself, path, pats) => { let qpath = self.lower_qpath( @@ -120,8 +123,9 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { self.lower_range_end(end, e2.is_some()), ); } - // FIXME(guard_patterns): lower pattern guards to HIR - PatKind::Guard(inner, _) => pattern = inner, + PatKind::Guard(inner, cond) => { + break hir::PatKind::Guard(self.lower_pat(inner), self.lower_expr(cond)); + } PatKind::Slice(pats) => break self.lower_pat_slice(pats), PatKind::Rest => { // If we reach here the `..` pattern is not semantically allowed. @@ -366,24 +370,54 @@ impl<'a, 'hir> LoweringContext<'a, 'hir> { // } // m!(S); // ``` - fn lower_expr_within_pat(&mut self, expr: &Expr, allow_paths: bool) -> &'hir hir::Expr<'hir> { - match &expr.kind { - ExprKind::Lit(..) - | ExprKind::ConstBlock(..) - | ExprKind::IncludedBytes(..) - | ExprKind::Err(_) - | ExprKind::Dummy => {} - ExprKind::Path(..) if allow_paths => {} - ExprKind::Unary(UnOp::Neg, inner) if matches!(inner.kind, ExprKind::Lit(_)) => {} + fn lower_expr_within_pat( + &mut self, + expr: &Expr, + allow_paths: bool, + ) -> &'hir hir::PatExpr<'hir> { + let err = |guar| hir::PatExprKind::Lit { + lit: self.arena.alloc(respan(self.lower_span(expr.span), LitKind::Err(guar))), + negated: false, + }; + let kind = match &expr.kind { + ExprKind::Lit(lit) => { + hir::PatExprKind::Lit { lit: self.lower_lit(lit, expr.span), negated: false } + } + ExprKind::ConstBlock(c) => hir::PatExprKind::ConstBlock(self.lower_const_block(c)), + ExprKind::IncludedBytes(bytes) => hir::PatExprKind::Lit { + lit: self.arena.alloc(respan( + self.lower_span(expr.span), + LitKind::ByteStr(Arc::clone(bytes), StrStyle::Cooked), + )), + negated: false, + }, + ExprKind::Err(guar) => err(*guar), + ExprKind::Dummy => span_bug!(expr.span, "lowered ExprKind::Dummy"), + ExprKind::Path(qself, path) if allow_paths => hir::PatExprKind::Path(self.lower_qpath( + expr.id, + qself, + path, + ParamMode::Optional, + AllowReturnTypeNotation::No, + ImplTraitContext::Disallowed(ImplTraitPosition::Path), + None, + )), + ExprKind::Unary(UnOp::Neg, inner) if let ExprKind::Lit(lit) = &inner.kind => { + hir::PatExprKind::Lit { lit: self.lower_lit(lit, expr.span), negated: true } + } _ => { let pattern_from_macro = expr.is_approximately_pattern(); let guar = self.dcx().emit_err(ArbitraryExpressionInPattern { span: expr.span, pattern_from_macro_note: pattern_from_macro, }); - return self.arena.alloc(self.expr_err(expr.span, guar)); + err(guar) } - } - self.lower_expr(expr) + }; + self.arena.alloc(hir::PatExpr { + hir_id: self.lower_node_id(expr.id), + span: expr.span, + kind, + }) } } diff --git a/compiler/rustc_ast_passes/src/feature_gate.rs b/compiler/rustc_ast_passes/src/feature_gate.rs index 3fbf1210186..94746212138 100644 --- a/compiler/rustc_ast_passes/src/feature_gate.rs +++ b/compiler/rustc_ast_passes/src/feature_gate.rs @@ -242,7 +242,7 @@ impl<'a> Visitor<'a> for PostExpansionVisitor<'a> { } } - ast::ItemKind::Struct(..) => { + ast::ItemKind::Struct(..) | ast::ItemKind::Enum(..) | ast::ItemKind::Union(..) => { for attr in attr::filter_by_name(&i.attrs, sym::repr) { for item in attr.meta_item_list().unwrap_or_else(ThinVec::new) { if item.has_name(sym::simd) { @@ -692,7 +692,7 @@ fn check_new_solver_banned_features(sess: &Session, features: &Features) { .find(|feat| feat.gate_name == sym::generic_const_exprs) .map(|feat| feat.attr_sp) { - #[cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] + #[allow(rustc::symbol_intern_string_literal)] sess.dcx().emit_err(errors::IncompatibleFeatures { spans: vec![gce_span], f1: Symbol::intern("-Znext-solver=globally"), diff --git a/compiler/rustc_ast_pretty/src/pprust/state.rs b/compiler/rustc_ast_pretty/src/pprust/state.rs index 172df102929..9b958ed6b0d 100644 --- a/compiler/rustc_ast_pretty/src/pprust/state.rs +++ b/compiler/rustc_ast_pretty/src/pprust/state.rs @@ -1701,7 +1701,7 @@ impl<'a> State<'a> { self.print_pat(inner); } } - PatKind::Lit(e) => self.print_expr(e, FixupContext::default()), + PatKind::Expr(e) => self.print_expr(e, FixupContext::default()), PatKind::Range(begin, end, Spanned { node: end_kind, .. }) => { if let Some(e) = begin { self.print_expr(e, FixupContext::default()); diff --git a/compiler/rustc_attr_data_structures/src/attributes.rs b/compiler/rustc_attr_data_structures/src/attributes.rs index 8986bec57de..def6b16ee8a 100644 --- a/compiler/rustc_attr_data_structures/src/attributes.rs +++ b/compiler/rustc_attr_data_structures/src/attributes.rs @@ -11,6 +11,22 @@ pub enum InlineAttr { Hint, Always, Never, + /// `#[rustc_force_inline]` forces inlining to happen in the MIR inliner - it reports an error + /// if the inlining cannot happen. It is limited to only free functions so that the calls + /// can always be resolved. + Force { + attr_span: Span, + reason: Option<Symbol>, + }, +} + +impl InlineAttr { + pub fn always(&self) -> bool { + match self { + InlineAttr::Always | InlineAttr::Force { .. } => true, + InlineAttr::None | InlineAttr::Hint | InlineAttr::Never => false, + } + } } #[derive(Clone, Encodable, Decodable, Debug, PartialEq, Eq, HashStable_Generic)] diff --git a/compiler/rustc_attr_data_structures/src/stability.rs b/compiler/rustc_attr_data_structures/src/stability.rs index 3c77d4c766c..dfda04387ec 100644 --- a/compiler/rustc_attr_data_structures/src/stability.rs +++ b/compiler/rustc_attr_data_structures/src/stability.rs @@ -101,6 +101,16 @@ impl PartialConstStability { } } +#[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)] +#[derive(HashStable_Generic)] +pub enum AllowedThroughUnstableModules { + /// This does not get a deprecation warning. We still generally would prefer people to use the + /// fully stable path, and a warning will likely be emitted in the future. + WithoutDeprecation, + /// Emit the given deprecation warning. + WithDeprecation(Symbol), +} + /// The available stability levels. #[derive(Encodable, Decodable, PartialEq, Copy, Clone, Debug, Eq, Hash)] #[derive(HashStable_Generic)] @@ -137,9 +147,8 @@ pub enum StabilityLevel { Stable { /// Rust release which stabilized this feature. since: StableSince, - /// Is this item allowed to be referred to on stable, despite being contained in unstable - /// modules? - allowed_through_unstable_modules: bool, + /// This is `Some` if this item allowed to be referred to on stable via unstable modules. + allowed_through_unstable_modules: Option<AllowedThroughUnstableModules>, }, } diff --git a/compiler/rustc_attr_parsing/src/attributes/stability.rs b/compiler/rustc_attr_parsing/src/attributes/stability.rs index 89937e1c593..bfbe51b27d8 100644 --- a/compiler/rustc_attr_parsing/src/attributes/stability.rs +++ b/compiler/rustc_attr_parsing/src/attributes/stability.rs @@ -6,8 +6,8 @@ use rustc_ast::MetaItem; use rustc_ast::attr::AttributeExt; use rustc_ast_pretty::pprust; use rustc_attr_data_structures::{ - ConstStability, DefaultBodyStability, Stability, StabilityLevel, StableSince, UnstableReason, - VERSION_PLACEHOLDER, + AllowedThroughUnstableModules, ConstStability, DefaultBodyStability, Stability, StabilityLevel, + StableSince, UnstableReason, VERSION_PLACEHOLDER, }; use rustc_errors::ErrorGuaranteed; use rustc_session::Session; @@ -24,11 +24,16 @@ pub fn find_stability( item_sp: Span, ) -> Option<(Stability, Span)> { let mut stab: Option<(Stability, Span)> = None; - let mut allowed_through_unstable_modules = false; + let mut allowed_through_unstable_modules = None; for attr in attrs { match attr.name_or_empty() { - sym::rustc_allowed_through_unstable_modules => allowed_through_unstable_modules = true, + sym::rustc_allowed_through_unstable_modules => { + allowed_through_unstable_modules = Some(match attr.value_str() { + Some(msg) => AllowedThroughUnstableModules::WithDeprecation(msg), + None => AllowedThroughUnstableModules::WithoutDeprecation, + }) + } sym::unstable => { if stab.is_some() { sess.dcx().emit_err(session_diagnostics::MultipleStabilityLevels { @@ -56,15 +61,15 @@ pub fn find_stability( } } - if allowed_through_unstable_modules { + if let Some(allowed_through_unstable_modules) = allowed_through_unstable_modules { match &mut stab { Some(( Stability { - level: StabilityLevel::Stable { allowed_through_unstable_modules, .. }, + level: StabilityLevel::Stable { allowed_through_unstable_modules: in_stab, .. }, .. }, _, - )) => *allowed_through_unstable_modules = true, + )) => *in_stab = Some(allowed_through_unstable_modules), _ => { sess.dcx() .emit_err(session_diagnostics::RustcAllowedUnstablePairing { span: item_sp }); @@ -283,7 +288,7 @@ fn parse_stability(sess: &Session, attr: &impl AttributeExt) -> Option<(Symbol, match feature { Ok(feature) => { - let level = StabilityLevel::Stable { since, allowed_through_unstable_modules: false }; + let level = StabilityLevel::Stable { since, allowed_through_unstable_modules: None }; Some((feature, level)) } Err(ErrorGuaranteed { .. }) => None, diff --git a/compiler/rustc_borrowck/messages.ftl b/compiler/rustc_borrowck/messages.ftl index ee4b2f95cb1..ada20e5c614 100644 --- a/compiler/rustc_borrowck/messages.ftl +++ b/compiler/rustc_borrowck/messages.ftl @@ -213,6 +213,10 @@ borrowck_suggest_create_fresh_reborrow = borrowck_suggest_iterate_over_slice = consider iterating over a slice of the `{$ty}`'s content to avoid moving into the `for` loop +borrowck_tail_expr_drop_order = relative drop order changing in Rust 2024 + .label = this temporary value will be dropped at the end of the block + .note = consider using a `let` binding to ensure the value will live long enough + borrowck_ty_no_impl_copy = {$is_partial_move -> [true] partial move diff --git a/compiler/rustc_borrowck/src/borrow_set.rs b/compiler/rustc_borrowck/src/borrow_set.rs index ff838fbbb88..303fa469332 100644 --- a/compiler/rustc_borrowck/src/borrow_set.rs +++ b/compiler/rustc_borrowck/src/borrow_set.rs @@ -2,7 +2,7 @@ use std::fmt; use std::ops::Index; use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::{MutatingUseContext, NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, Body, Local, Location, traversal}; use rustc_middle::span_bug; @@ -11,7 +11,6 @@ use rustc_mir_dataflow::move_paths::MoveData; use tracing::debug; use crate::BorrowIndex; -use crate::path_utils::allow_two_phase_borrow; use crate::place_ext::PlaceExt; pub struct BorrowSet<'tcx> { @@ -132,7 +131,7 @@ impl<'tcx> fmt::Display for BorrowData<'tcx> { pub enum LocalsStateAtExit { AllAreInvalidated, - SomeAreInvalidated { has_storage_dead_or_moved: BitSet<Local> }, + SomeAreInvalidated { has_storage_dead_or_moved: DenseBitSet<Local> }, } impl LocalsStateAtExit { @@ -141,7 +140,7 @@ impl LocalsStateAtExit { body: &Body<'tcx>, move_data: &MoveData<'tcx>, ) -> Self { - struct HasStorageDead(BitSet<Local>); + struct HasStorageDead(DenseBitSet<Local>); impl<'tcx> Visitor<'tcx> for HasStorageDead { fn visit_local(&mut self, local: Local, ctx: PlaceContext, _: Location) { @@ -154,7 +153,8 @@ impl LocalsStateAtExit { if locals_are_invalidated_at_exit { LocalsStateAtExit::AllAreInvalidated } else { - let mut has_storage_dead = HasStorageDead(BitSet::new_empty(body.local_decls.len())); + let mut has_storage_dead = + HasStorageDead(DenseBitSet::new_empty(body.local_decls.len())); has_storage_dead.visit_body(body); let mut has_storage_dead_or_moved = has_storage_dead.0; for move_out in &move_data.moves { @@ -350,7 +350,7 @@ impl<'a, 'tcx> GatherBorrows<'a, 'tcx> { start_location, assigned_place, borrow_index, ); - if !allow_two_phase_borrow(kind) { + if !kind.allows_two_phase_borrow() { debug!(" -> {:?}", start_location); return; } diff --git a/compiler/rustc_borrowck/src/consumers.rs b/compiler/rustc_borrowck/src/consumers.rs index 7e8c48a1551..5a89f7c351c 100644 --- a/compiler/rustc_borrowck/src/consumers.rs +++ b/compiler/rustc_borrowck/src/consumers.rs @@ -11,8 +11,8 @@ pub use super::dataflow::{BorrowIndex, Borrows, calculate_borrows_out_of_scope_a pub use super::place_ext::PlaceExt; pub use super::places_conflict::{PlaceConflictBias, places_conflict}; pub use super::polonius::legacy::{ - AllFacts as PoloniusInput, LocationTable, PoloniusOutput, PoloniusRegionVid, RichLocation, - RustcFacts, + PoloniusFacts as PoloniusInput, PoloniusLocationTable, PoloniusOutput, PoloniusRegionVid, + RichLocation, RustcFacts, }; pub use super::region_infer::RegionInferenceContext; @@ -33,7 +33,7 @@ pub enum ConsumerOptions { /// without significant slowdowns. /// /// Implies [`RegionInferenceContext`](ConsumerOptions::RegionInferenceContext), - /// and additionally retrieve the [`LocationTable`] and [`PoloniusInput`] that + /// and additionally retrieve the [`PoloniusLocationTable`] and [`PoloniusInput`] that /// would be given to Polonius. Critically, this does not run Polonius, which /// one may want to avoid due to performance issues on large bodies. PoloniusInputFacts, @@ -71,7 +71,7 @@ pub struct BodyWithBorrowckFacts<'tcx> { /// The table that maps Polonius points to locations in the table. /// Populated when using [`ConsumerOptions::PoloniusInputFacts`] /// or [`ConsumerOptions::PoloniusOutputFacts`]. - pub location_table: Option<LocationTable>, + pub location_table: Option<PoloniusLocationTable>, /// Polonius input facts. /// Populated when using [`ConsumerOptions::PoloniusInputFacts`] /// or [`ConsumerOptions::PoloniusOutputFacts`]. diff --git a/compiler/rustc_borrowck/src/dataflow.rs b/compiler/rustc_borrowck/src/dataflow.rs index abe4d4f20ec..7511a55b03a 100644 --- a/compiler/rustc_borrowck/src/dataflow.rs +++ b/compiler/rustc_borrowck/src/dataflow.rs @@ -2,7 +2,7 @@ use std::fmt; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::graph; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::{ self, BasicBlock, Body, CallReturnPlaces, Location, Place, TerminatorEdges, }; @@ -180,26 +180,35 @@ pub struct Borrows<'a, 'tcx> { } struct OutOfScopePrecomputer<'a, 'tcx> { - visited: BitSet<mir::BasicBlock>, + visited: DenseBitSet<mir::BasicBlock>, visit_stack: Vec<mir::BasicBlock>, body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>, borrows_out_of_scope_at_location: FxIndexMap<Location, Vec<BorrowIndex>>, } -impl<'a, 'tcx> OutOfScopePrecomputer<'a, 'tcx> { - fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self { - OutOfScopePrecomputer { - visited: BitSet::new_empty(body.basic_blocks.len()), +impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> { + fn compute( + body: &Body<'tcx>, + regioncx: &RegionInferenceContext<'tcx>, + borrow_set: &BorrowSet<'tcx>, + ) -> FxIndexMap<Location, Vec<BorrowIndex>> { + let mut prec = OutOfScopePrecomputer { + visited: DenseBitSet::new_empty(body.basic_blocks.len()), visit_stack: vec![], body, regioncx, borrows_out_of_scope_at_location: FxIndexMap::default(), + }; + for (borrow_index, borrow_data) in borrow_set.iter_enumerated() { + let borrow_region = borrow_data.region; + let location = borrow_data.reserve_location; + prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location); } + + prec.borrows_out_of_scope_at_location } -} -impl<'tcx> OutOfScopePrecomputer<'_, 'tcx> { fn precompute_borrows_out_of_scope( &mut self, borrow_index: BorrowIndex, @@ -280,19 +289,11 @@ pub fn calculate_borrows_out_of_scope_at_location<'tcx>( regioncx: &RegionInferenceContext<'tcx>, borrow_set: &BorrowSet<'tcx>, ) -> FxIndexMap<Location, Vec<BorrowIndex>> { - let mut prec = OutOfScopePrecomputer::new(body, regioncx); - for (borrow_index, borrow_data) in borrow_set.iter_enumerated() { - let borrow_region = borrow_data.region; - let location = borrow_data.reserve_location; - - prec.precompute_borrows_out_of_scope(borrow_index, borrow_region, location); - } - - prec.borrows_out_of_scope_at_location + OutOfScopePrecomputer::compute(body, regioncx, borrow_set) } struct PoloniusOutOfScopePrecomputer<'a, 'tcx> { - visited: BitSet<mir::BasicBlock>, + visited: DenseBitSet<mir::BasicBlock>, visit_stack: Vec<mir::BasicBlock>, body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>, @@ -300,19 +301,30 @@ struct PoloniusOutOfScopePrecomputer<'a, 'tcx> { loans_out_of_scope_at_location: FxIndexMap<Location, Vec<BorrowIndex>>, } -impl<'a, 'tcx> PoloniusOutOfScopePrecomputer<'a, 'tcx> { - fn new(body: &'a Body<'tcx>, regioncx: &'a RegionInferenceContext<'tcx>) -> Self { - Self { - visited: BitSet::new_empty(body.basic_blocks.len()), +impl<'tcx> PoloniusOutOfScopePrecomputer<'_, 'tcx> { + fn compute( + body: &Body<'tcx>, + regioncx: &RegionInferenceContext<'tcx>, + borrow_set: &BorrowSet<'tcx>, + ) -> FxIndexMap<Location, Vec<BorrowIndex>> { + // The in-tree polonius analysis computes loans going out of scope using the + // set-of-loans model. + let mut prec = PoloniusOutOfScopePrecomputer { + visited: DenseBitSet::new_empty(body.basic_blocks.len()), visit_stack: vec![], body, regioncx, loans_out_of_scope_at_location: FxIndexMap::default(), + }; + for (loan_idx, loan_data) in borrow_set.iter_enumerated() { + let issuing_region = loan_data.region; + let loan_issued_at = loan_data.reserve_location; + prec.precompute_loans_out_of_scope(loan_idx, issuing_region, loan_issued_at); } + + prec.loans_out_of_scope_at_location } -} -impl<'tcx> PoloniusOutOfScopePrecomputer<'_, 'tcx> { /// Loans are in scope while they are live: whether they are contained within any live region. /// In the location-insensitive analysis, a loan will be contained in a region if the issuing /// region can reach it in the subset graph. So this is a reachability problem. @@ -325,10 +337,17 @@ impl<'tcx> PoloniusOutOfScopePrecomputer<'_, 'tcx> { let sccs = self.regioncx.constraint_sccs(); let universal_regions = self.regioncx.universal_regions(); + // The loop below was useful for the location-insensitive analysis but shouldn't be + // impactful in the location-sensitive case. It seems that it does, however, as without it a + // handful of tests fail. That likely means some liveness or outlives data related to choice + // regions is missing + // FIXME: investigate the impact of loans traversing applied member constraints and why some + // tests fail otherwise. + // // We first handle the cases where the loan doesn't go out of scope, depending on the // issuing region's successors. for successor in graph::depth_first_search(&self.regioncx.region_graph(), issuing_region) { - // 1. Via applied member constraints + // Via applied member constraints // // The issuing region can flow into the choice regions, and they are either: // - placeholders or free regions themselves, @@ -346,14 +365,6 @@ impl<'tcx> PoloniusOutOfScopePrecomputer<'_, 'tcx> { return; } } - - // 2. Via regions that are live at all points: placeholders and free regions. - // - // If the issuing region outlives such a region, its loan escapes the function and - // cannot go out of scope. We can early return. - if self.regioncx.is_region_live_at_all_points(successor) { - return; - } } let first_block = loan_issued_at.block; @@ -461,34 +472,12 @@ impl<'a, 'tcx> Borrows<'a, 'tcx> { regioncx: &RegionInferenceContext<'tcx>, borrow_set: &'a BorrowSet<'tcx>, ) -> Self { - let mut borrows_out_of_scope_at_location = - calculate_borrows_out_of_scope_at_location(body, regioncx, borrow_set); - - // The in-tree polonius analysis computes loans going out of scope using the set-of-loans - // model, and makes sure they're identical to the existing computation of the set-of-points - // model. - if tcx.sess.opts.unstable_opts.polonius.is_next_enabled() { - let mut polonius_prec = PoloniusOutOfScopePrecomputer::new(body, regioncx); - for (loan_idx, loan_data) in borrow_set.iter_enumerated() { - let issuing_region = loan_data.region; - let loan_issued_at = loan_data.reserve_location; - - polonius_prec.precompute_loans_out_of_scope( - loan_idx, - issuing_region, - loan_issued_at, - ); - } - - assert_eq!( - borrows_out_of_scope_at_location, polonius_prec.loans_out_of_scope_at_location, - "polonius loan scopes differ from NLL borrow scopes, for body {:?}", - body.span, - ); - - borrows_out_of_scope_at_location = polonius_prec.loans_out_of_scope_at_location; - } - + let borrows_out_of_scope_at_location = + if !tcx.sess.opts.unstable_opts.polonius.is_next_enabled() { + calculate_borrows_out_of_scope_at_location(body, regioncx, borrow_set) + } else { + PoloniusOutOfScopePrecomputer::compute(body, regioncx, borrow_set) + }; Borrows { tcx, body, borrow_set, borrows_out_of_scope_at_location } } @@ -559,7 +548,7 @@ impl<'a, 'tcx> Borrows<'a, 'tcx> { } } -type BorrowsDomain = BitSet<BorrowIndex>; +type BorrowsDomain = DenseBitSet<BorrowIndex>; /// Forward dataflow computation of the set of borrows that are in scope at a particular location. /// - we gen the introduced loans @@ -575,7 +564,7 @@ impl<'tcx> rustc_mir_dataflow::Analysis<'tcx> for Borrows<'_, 'tcx> { fn bottom_value(&self, _: &mir::Body<'tcx>) -> Self::Domain { // bottom = nothing is reserved or activated yet; - BitSet::new_empty(self.borrow_set.len()) + DenseBitSet::new_empty(self.borrow_set.len()) } fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) { diff --git a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs index 8e5944d6cf4..da59f9f9ebd 100644 --- a/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/conflict_errors.rs @@ -20,7 +20,7 @@ use rustc_middle::bug; use rustc_middle::hir::nested_filter::OnlyBodies; use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::mir::{ - self, AggregateKind, BindingForm, BorrowKind, CallSource, ClearCrossCrate, ConstraintCategory, + self, AggregateKind, BindingForm, BorrowKind, ClearCrossCrate, ConstraintCategory, FakeBorrowKind, FakeReadCause, LocalDecl, LocalInfo, LocalKind, Location, MutBorrowKind, Operand, Place, PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, VarBindingForm, VarDebugInfoContents, @@ -30,13 +30,13 @@ use rustc_middle::ty::{ self, PredicateKind, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor, Upcast, suggest_constraining_type_params, }; -use rustc_middle::util::CallKind; use rustc_mir_dataflow::move_paths::{InitKind, MoveOutIndex, MovePathIndex}; use rustc_span::def_id::{DefId, LocalDefId}; use rustc_span::hygiene::DesugaringKind; use rustc_span::{BytePos, Ident, Span, Symbol, kw, sym}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; use rustc_trait_selection::error_reporting::traits::FindExprBySpan; +use rustc_trait_selection::error_reporting::traits::call_kind::CallKind; use rustc_trait_selection::infer::InferCtxtExt; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; use rustc_trait_selection::traits::{Obligation, ObligationCause, ObligationCtxt}; @@ -46,7 +46,7 @@ use super::explain_borrow::{BorrowExplanation, LaterUseKind}; use super::{DescribePlaceOpt, RegionName, RegionNameSource, UseSpans}; use crate::borrow_set::{BorrowData, TwoPhaseActivation}; use crate::diagnostics::conflict_errors::StorageDeadOrDrop::LocalStorageDead; -use crate::diagnostics::{CapturedMessageOpt, Instance, find_all_local_uses}; +use crate::diagnostics::{CapturedMessageOpt, call_kind, find_all_local_uses}; use crate::prefixes::IsPrefixOf; use crate::{InitializationRequiringAction, MirBorrowckCtxt, WriteKind, borrowck_errors}; @@ -305,7 +305,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } if let UseSpans::FnSelfUse { - kind: CallKind::DerefCoercion { deref_target, deref_target_ty, .. }, + kind: CallKind::DerefCoercion { deref_target_span, deref_target_ty, .. }, .. } = use_spans { @@ -315,8 +315,10 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { )); // Check first whether the source is accessible (issue #87060) - if self.infcx.tcx.sess.source_map().is_span_accessible(deref_target) { - err.span_note(deref_target, "deref defined here"); + if let Some(deref_target_span) = deref_target_span + && self.infcx.tcx.sess.source_map().is_span_accessible(deref_target_span) + { + err.span_note(deref_target_span, "deref defined here"); } } @@ -1516,15 +1518,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { }); self.explain_why_borrow_contains_point(location, borrow, None) - .add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - Some(borrow_span), - None, - ); + .add_explanation_to_diagnostic(&self, &mut err, "", Some(borrow_span), None); self.suggest_copy_for_type_in_cloned_ref(&mut err, place); let typeck_results = self.infcx.tcx.typeck(self.mir_def_id()); if let Some(expr) = self.find_expr(borrow_span) { @@ -1591,15 +1585,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { }); self.explain_why_borrow_contains_point(location, borrow, None) - .add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - None, - None, - ); + .add_explanation_to_diagnostic(&self, &mut err, "", None, None); err } @@ -1886,9 +1872,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } explanation.add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, + &self, &mut err, first_borrow_desc, None, @@ -2698,22 +2682,19 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { return; } - let mut sugg = vec![]; let sm = self.infcx.tcx.sess.source_map(); - - if let Some(span) = finder.closure_arg_span { - sugg.push((sm.next_point(span.shrink_to_lo()).shrink_to_hi(), finder.suggest_arg)); - } - for span in finder.closure_change_spans { - sugg.push((span, "this".to_string())); - } - - for (span, suggest) in finder.closure_call_changes { - sugg.push((span, suggest)); - } + let sugg = finder + .closure_arg_span + .map(|span| (sm.next_point(span.shrink_to_lo()).shrink_to_hi(), finder.suggest_arg)) + .into_iter() + .chain( + finder.closure_change_spans.into_iter().map(|span| (span, "this".to_string())), + ) + .chain(finder.closure_call_changes) + .collect(); err.multipart_suggestion_verbose( - "try explicitly pass `&Self` into the Closure as an argument", + "try explicitly passing `&Self` into the closure as an argument", sugg, Applicability::MachineApplicable, ); @@ -3046,15 +3027,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { if let BorrowExplanation::MustBeValidFor { .. } = explanation { } else { - explanation.add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - None, - None, - ); + explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); } } else { err.span_label(borrow_span, "borrowed value does not live long enough"); @@ -3067,15 +3040,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } }); - explanation.add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - Some(borrow_span), - None, - ); + explanation.add_explanation_to_diagnostic(&self, &mut err, "", Some(borrow_span), None); } err @@ -3128,15 +3093,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { _ => {} } - explanation.add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - None, - None, - ); + explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); self.buffer_error(err); } @@ -3309,15 +3266,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } _ => {} } - explanation.add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - None, - None, - ); + explanation.add_explanation_to_diagnostic(&self, &mut err, "", None, None); borrow_spans.args_subdiag(&mut err, |args_span| { crate::session_diagnostics::CaptureArgLabel::Capture { @@ -3808,15 +3757,8 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } }); - self.explain_why_borrow_contains_point(location, loan, None).add_explanation_to_diagnostic( - self.infcx.tcx, - self.body, - &self.local_names, - &mut err, - "", - None, - None, - ); + self.explain_why_borrow_contains_point(location, loan, None) + .add_explanation_to_diagnostic(&self, &mut err, "", None, None); self.explain_deref_coercion(loan, &mut err); @@ -3825,38 +3767,27 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { fn explain_deref_coercion(&mut self, loan: &BorrowData<'tcx>, err: &mut Diag<'_>) { let tcx = self.infcx.tcx; - if let ( - Some(Terminator { - kind: TerminatorKind::Call { call_source: CallSource::OverloadedOperator, .. }, - .. - }), - Some((method_did, method_args)), - ) = ( - &self.body[loan.reserve_location.block].terminator, - rustc_middle::util::find_self_call( + if let Some(Terminator { kind: TerminatorKind::Call { call_source, fn_span, .. }, .. }) = + &self.body[loan.reserve_location.block].terminator + && let Some((method_did, method_args)) = rustc_middle::util::find_self_call( tcx, self.body, loan.assigned_place.local, loan.reserve_location.block, - ), - ) { - if tcx.is_diagnostic_item(sym::deref_method, method_did) { - let deref_target = - tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| { - Instance::try_resolve( - tcx, - self.infcx.typing_env(self.infcx.param_env), - deref_target, - method_args, - ) - .transpose() - }); - if let Some(Ok(instance)) = deref_target { - let deref_target_ty = - instance.ty(tcx, self.infcx.typing_env(self.infcx.param_env)); - err.note(format!("borrow occurs due to deref coercion to `{deref_target_ty}`")); - err.span_note(tcx.def_span(instance.def_id()), "deref defined here"); - } + ) + && let CallKind::DerefCoercion { deref_target_span, deref_target_ty, .. } = call_kind( + self.infcx.tcx, + self.infcx.typing_env(self.infcx.param_env), + method_did, + method_args, + *fn_span, + call_source.from_hir_call(), + Some(self.infcx.tcx.fn_arg_names(method_did)[0]), + ) + { + err.note(format!("borrow occurs due to deref coercion to `{deref_target_ty}`")); + if let Some(deref_target_span) = deref_target_span { + err.span_note(deref_target_span, "deref defined here"); } } } diff --git a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs index 87017460e8e..5c0c1d0eb86 100644 --- a/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs +++ b/compiler/rustc_borrowck/src/diagnostics/explain_borrow.rs @@ -5,10 +5,9 @@ use std::assert_matches::assert_matches; -use rustc_errors::{Applicability, Diag}; +use rustc_errors::{Applicability, Diag, EmissionGuarantee}; use rustc_hir as hir; use rustc_hir::intravisit::Visitor; -use rustc_index::IndexSlice; use rustc_infer::infer::NllRegionVariableOrigin; use rustc_middle::middle::resolve_bound_vars::ObjectLifetimeDefault; use rustc_middle::mir::{ @@ -17,15 +16,16 @@ use rustc_middle::mir::{ }; use rustc_middle::ty::adjustment::PointerCoercion; use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt}; -use rustc_middle::util::CallKind; -use rustc_span::{DesugaringKind, Span, Symbol, kw, sym}; +use rustc_span::{DesugaringKind, Span, kw, sym}; use rustc_trait_selection::error_reporting::traits::FindExprBySpan; +use rustc_trait_selection::error_reporting::traits::call_kind::CallKind; use tracing::{debug, instrument}; use super::{RegionName, UseSpans, find_use}; use crate::borrow_set::BorrowData; +use crate::constraints::OutlivesConstraint; use crate::nll::ConstraintDescription; -use crate::region_infer::{BlameConstraint, Cause, ExtraConstraintInfo}; +use crate::region_infer::{BlameConstraint, Cause}; use crate::{MirBorrowckCtxt, WriteKind}; #[derive(Debug)] @@ -43,7 +43,7 @@ pub(crate) enum BorrowExplanation<'tcx> { span: Span, region_name: RegionName, opt_place_desc: Option<String>, - extra_info: Vec<ExtraConstraintInfo>, + path: Vec<OutlivesConstraint<'tcx>>, }, Unexplained, } @@ -61,16 +61,18 @@ impl<'tcx> BorrowExplanation<'tcx> { pub(crate) fn is_explained(&self) -> bool { !matches!(self, BorrowExplanation::Unexplained) } - pub(crate) fn add_explanation_to_diagnostic( + pub(crate) fn add_explanation_to_diagnostic<G: EmissionGuarantee>( &self, - tcx: TyCtxt<'tcx>, - body: &Body<'tcx>, - local_names: &IndexSlice<Local, Option<Symbol>>, - err: &mut Diag<'_>, + cx: &MirBorrowckCtxt<'_, '_, 'tcx>, + err: &mut Diag<'_, G>, borrow_desc: &str, borrow_span: Option<Span>, multiple_borrow_span: Option<(Span, Span)>, ) { + let tcx = cx.infcx.tcx; + let body = cx.body; + let local_names = &cx.local_names; + if let Some(span) = borrow_span { let def_id = body.source.def_id(); if let Some(node) = tcx.hir().get_if_local(def_id) @@ -306,7 +308,7 @@ impl<'tcx> BorrowExplanation<'tcx> { ref region_name, ref opt_place_desc, from_closure: _, - ref extra_info, + ref path, } => { region_name.highlight_region_name(err); @@ -328,13 +330,8 @@ impl<'tcx> BorrowExplanation<'tcx> { ); }; - for extra in extra_info { - match extra { - ExtraConstraintInfo::PlaceholderFromPredicate(span) => { - err.span_note(*span, "due to current limitations in the borrow checker, this implies a `'static` lifetime"); - } - } - } + cx.add_placeholder_from_predicate_note(err, &path); + cx.add_sized_or_copy_bound_info(err, category, &path); if let ConstraintCategory::Cast { is_implicit_coercion: true, @@ -349,10 +346,10 @@ impl<'tcx> BorrowExplanation<'tcx> { } } - fn add_object_lifetime_default_note( + fn add_object_lifetime_default_note<G: EmissionGuarantee>( &self, tcx: TyCtxt<'tcx>, - err: &mut Diag<'_>, + err: &mut Diag<'_, G>, unsize_ty: Ty<'tcx>, ) { if let ty::Adt(def, args) = unsize_ty.kind() { @@ -406,9 +403,9 @@ impl<'tcx> BorrowExplanation<'tcx> { } } - fn add_lifetime_bound_suggestion_to_diagnostic( + fn add_lifetime_bound_suggestion_to_diagnostic<G: EmissionGuarantee>( &self, - err: &mut Diag<'_>, + err: &mut Diag<'_, G>, category: &ConstraintCategory<'tcx>, span: Span, region_name: &RegionName, @@ -435,14 +432,14 @@ impl<'tcx> BorrowExplanation<'tcx> { } } -fn suggest_rewrite_if_let( +fn suggest_rewrite_if_let<G: EmissionGuarantee>( tcx: TyCtxt<'_>, expr: &hir::Expr<'_>, pat: &str, init: &hir::Expr<'_>, conseq: &hir::Expr<'_>, alt: Option<&hir::Expr<'_>>, - err: &mut Diag<'_>, + err: &mut Diag<'_, G>, ) { let source_map = tcx.sess.source_map(); err.span_note( @@ -487,8 +484,9 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> { &self, borrow_region: RegionVid, outlived_region: RegionVid, - ) -> (ConstraintCategory<'tcx>, bool, Span, Option<RegionName>, Vec<ExtraConstraintInfo>) { - let (blame_constraint, extra_info) = self.regioncx.best_blame_constraint( + ) -> (ConstraintCategory<'tcx>, bool, Span, Option<RegionName>, Vec<OutlivesConstraint<'tcx>>) + { + let (blame_constraint, path) = self.regioncx.best_blame_constraint( borrow_region, NllRegionVariableOrigin::FreeRegion, |r| self.regioncx.provides_universal_region(r, borrow_region, outlived_region), @@ -497,7 +495,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> { let outlived_fr_name = self.give_region_a_name(outlived_region); - (category, from_closure, cause.span, outlived_fr_name, extra_info) + (category, from_closure, cause.span, outlived_fr_name, path) } /// Returns structured explanation for *why* the borrow contains the @@ -596,7 +594,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> { None => { if let Some(region) = self.to_error_region_vid(borrow_region_vid) { - let (category, from_closure, span, region_name, extra_info) = + let (category, from_closure, span, region_name, path) = self.free_region_constraint_info(borrow_region_vid, region); if let Some(region_name) = region_name { let opt_place_desc = self.describe_place(borrow.borrowed_place.as_ref()); @@ -606,7 +604,7 @@ impl<'tcx> MirBorrowckCtxt<'_, '_, 'tcx> { span, region_name, opt_place_desc, - extra_info, + path, } } else { debug!("Could not generate a region name"); diff --git a/compiler/rustc_borrowck/src/diagnostics/mod.rs b/compiler/rustc_borrowck/src/diagnostics/mod.rs index ebbdfea302c..bd6f77156ca 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mod.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mod.rs @@ -4,27 +4,29 @@ use std::collections::BTreeMap; use rustc_abi::{FieldIdx, VariantIdx}; use rustc_data_structures::fx::FxIndexMap; -use rustc_errors::{Applicability, Diag, MultiSpan}; +use rustc_errors::{Applicability, Diag, EmissionGuarantee, MultiSpan}; use rustc_hir::def::{CtorKind, Namespace}; use rustc_hir::{self as hir, CoroutineKind, LangItem}; use rustc_index::IndexSlice; -use rustc_infer::infer::BoundRegionConversionTime; +use rustc_infer::infer::{ + BoundRegionConversionTime, NllRegionVariableOrigin, RegionVariableOrigin, +}; use rustc_infer::traits::SelectionError; use rustc_middle::bug; use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::mir::{ - AggregateKind, CallSource, ConstOperand, FakeReadCause, Local, LocalInfo, LocalKind, Location, - Operand, Place, PlaceRef, ProjectionElem, Rvalue, Statement, StatementKind, Terminator, - TerminatorKind, + AggregateKind, CallSource, ConstOperand, ConstraintCategory, FakeReadCause, Local, LocalInfo, + LocalKind, Location, Operand, Place, PlaceRef, ProjectionElem, Rvalue, Statement, + StatementKind, Terminator, TerminatorKind, }; use rustc_middle::ty::print::Print; -use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; -use rustc_middle::util::{CallDesugaringKind, call_kind}; +use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_mir_dataflow::move_paths::{InitLocation, LookupResult, MoveOutIndex}; use rustc_span::def_id::LocalDefId; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span, Symbol, sym}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; +use rustc_trait_selection::error_reporting::traits::call_kind::{CallDesugaringKind, call_kind}; use rustc_trait_selection::infer::InferCtxtExt; use rustc_trait_selection::traits::{ FulfillmentErrorCode, type_known_to_meet_bound_modulo_regions, @@ -33,7 +35,9 @@ use tracing::debug; use super::MirBorrowckCtxt; use super::borrow_set::BorrowData; +use crate::constraints::OutlivesConstraint; use crate::fluent_generated as fluent; +use crate::nll::ConstraintDescription; use crate::session_diagnostics::{ CaptureArgLabel, CaptureReasonLabel, CaptureReasonNote, CaptureReasonSuggest, CaptureVarCause, CaptureVarKind, CaptureVarPathUseCause, OnClosureNote, @@ -59,7 +63,7 @@ pub(crate) use mutability_errors::AccessKind; pub(crate) use outlives_suggestion::OutlivesSuggestionBuilder; pub(crate) use region_errors::{ErrorConstraintInfo, RegionErrorKind, RegionErrors}; pub(crate) use region_name::{RegionName, RegionNameSource}; -pub(crate) use rustc_middle::util::CallKind; +pub(crate) use rustc_trait_selection::error_reporting::traits::call_kind::CallKind; pub(super) struct DescribePlaceOpt { including_downcast: bool, @@ -619,6 +623,52 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { region.print(&mut printer).unwrap(); printer.into_buffer() } + + /// Add a note to region errors and borrow explanations when higher-ranked regions in predicates + /// implicitly introduce an "outlives `'static`" constraint. + fn add_placeholder_from_predicate_note<G: EmissionGuarantee>( + &self, + err: &mut Diag<'_, G>, + path: &[OutlivesConstraint<'tcx>], + ) { + let predicate_span = path.iter().find_map(|constraint| { + let outlived = constraint.sub; + if let Some(origin) = self.regioncx.var_infos.get(outlived) + && let RegionVariableOrigin::Nll(NllRegionVariableOrigin::Placeholder(_)) = + origin.origin + && let ConstraintCategory::Predicate(span) = constraint.category + { + Some(span) + } else { + None + } + }); + + if let Some(span) = predicate_span { + err.span_note(span, "due to current limitations in the borrow checker, this implies a `'static` lifetime"); + } + } + + /// Add a label to region errors and borrow explanations when outlives constraints arise from + /// proving a type implements `Sized` or `Copy`. + fn add_sized_or_copy_bound_info<G: EmissionGuarantee>( + &self, + err: &mut Diag<'_, G>, + blamed_category: ConstraintCategory<'tcx>, + path: &[OutlivesConstraint<'tcx>], + ) { + for sought_category in [ConstraintCategory::SizedBound, ConstraintCategory::CopyBound] { + if sought_category != blamed_category + && let Some(sought_constraint) = path.iter().find(|c| c.category == sought_category) + { + let label = format!( + "requirement occurs due to {}", + sought_category.description().trim_end() + ); + err.span_label(sought_constraint.span, label); + } + } + } } /// The span(s) associated to a use of a place. @@ -992,6 +1042,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { kind, }; } + normal_ret } diff --git a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs index 16de160cae5..a6ca038282d 100644 --- a/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/mutability_errors.rs @@ -10,6 +10,7 @@ use rustc_hir::intravisit::Visitor; use rustc_hir::{self as hir, BindingMode, ByRef, Node}; use rustc_middle::bug; use rustc_middle::hir::place::PlaceBase; +use rustc_middle::mir::visit::PlaceContext; use rustc_middle::mir::{ self, BindingForm, Local, LocalDecl, LocalInfo, LocalKind, Location, Mutability, Place, PlaceRef, ProjectionElem, @@ -22,7 +23,6 @@ use rustc_trait_selection::traits; use tracing::debug; use crate::diagnostics::BorrowedContentSource; -use crate::util::FindAssignments; use crate::{MirBorrowckCtxt, session_diagnostics}; #[derive(Copy, Clone, Debug, Eq, PartialEq)] @@ -1088,6 +1088,38 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } } + /// Finds all statements that assign directly to local (i.e., X = ...) and returns their + /// locations. + fn find_assignments(&self, local: Local) -> Vec<Location> { + use rustc_middle::mir::visit::Visitor; + + struct FindLocalAssignmentVisitor { + needle: Local, + locations: Vec<Location>, + } + + impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor { + fn visit_local( + &mut self, + local: Local, + place_context: PlaceContext, + location: Location, + ) { + if self.needle != local { + return; + } + + if place_context.is_place_assignment() { + self.locations.push(location); + } + } + } + + let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] }; + visitor.visit_body(self.body); + visitor.locations + } + fn suggest_make_local_mut(&self, err: &mut Diag<'_>, local: Local, name: Symbol) { let local_decl = &self.body.local_decls[local]; @@ -1121,7 +1153,7 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { })) => { // check if the RHS is from desugaring let opt_assignment_rhs_span = - self.body.find_assignments(local).first().map(|&location| { + self.find_assignments(local).first().map(|&location| { if let Some(mir::Statement { source_info: _, kind: diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs index 3555009c63f..f0baa20648c 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs @@ -13,7 +13,7 @@ use rustc_hir::{PolyTraitRef, TyKind, WhereBoundPredicate}; use rustc_infer::infer::{NllRegionVariableOrigin, RelateParamBound}; use rustc_middle::bug; use rustc_middle::hir::place::PlaceBase; -use rustc_middle::mir::{ConstraintCategory, ReturnConstraint}; +use rustc_middle::mir::{AnnotationSource, ConstraintCategory, ReturnConstraint}; use rustc_middle::ty::{self, GenericArgs, Region, RegionVid, Ty, TyCtxt, TypeVisitor}; use rustc_span::{Ident, Span, kw}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; @@ -29,7 +29,7 @@ use tracing::{debug, instrument, trace}; use super::{OutlivesSuggestionBuilder, RegionName, RegionNameSource}; use crate::nll::ConstraintDescription; use crate::region_infer::values::RegionElement; -use crate::region_infer::{BlameConstraint, ExtraConstraintInfo, TypeTest}; +use crate::region_infer::{BlameConstraint, TypeTest}; use crate::session_diagnostics::{ FnMutError, FnMutReturnTypeErr, GenericDoesNotLiveLongEnough, LifetimeOutliveErr, LifetimeReturnCategoryErr, RequireStaticErr, VarHereDenote, @@ -49,8 +49,8 @@ impl<'tcx> ConstraintDescription for ConstraintCategory<'tcx> { ConstraintCategory::Cast { is_implicit_coercion: false, .. } => "cast ", ConstraintCategory::Cast { is_implicit_coercion: true, .. } => "coercion ", ConstraintCategory::CallArgument(_) => "argument ", - ConstraintCategory::TypeAnnotation => "type annotation ", - ConstraintCategory::ClosureBounds => "closure body ", + ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg) => "generic argument ", + ConstraintCategory::TypeAnnotation(_) => "type annotation ", ConstraintCategory::SizedBound => "proving this value is `Sized` ", ConstraintCategory::CopyBound => "copying this value ", ConstraintCategory::OpaqueType => "opaque type ", @@ -440,10 +440,9 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { ) { debug!("report_region_error(fr={:?}, outlived_fr={:?})", fr, outlived_fr); - let (blame_constraint, extra_info) = - self.regioncx.best_blame_constraint(fr, fr_origin, |r| { - self.regioncx.provides_universal_region(r, fr, outlived_fr) - }); + let (blame_constraint, path) = self.regioncx.best_blame_constraint(fr, fr_origin, |r| { + self.regioncx.provides_universal_region(r, fr, outlived_fr) + }); let BlameConstraint { category, cause, variance_info, .. } = blame_constraint; debug!("report_region_error: category={:?} {:?} {:?}", category, cause, variance_info); @@ -554,13 +553,8 @@ impl<'infcx, 'tcx> MirBorrowckCtxt<'_, 'infcx, 'tcx> { } } - for extra in extra_info { - match extra { - ExtraConstraintInfo::PlaceholderFromPredicate(span) => { - diag.span_note(span, "due to current limitations in the borrow checker, this implies a `'static` lifetime"); - } - } - } + self.add_placeholder_from_predicate_note(&mut diag, &path); + self.add_sized_or_copy_bound_info(&mut diag, category, &path); self.buffer_error(diag); } diff --git a/compiler/rustc_borrowck/src/diagnostics/region_name.rs b/compiler/rustc_borrowck/src/diagnostics/region_name.rs index bdb880b2bce..9349b46ec5b 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_name.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_name.rs @@ -5,7 +5,7 @@ use std::fmt::{self, Display}; use std::iter; use rustc_data_structures::fx::IndexEntry; -use rustc_errors::Diag; +use rustc_errors::{Diag, EmissionGuarantee}; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_middle::ty::print::RegionHighlightMode; @@ -108,7 +108,7 @@ impl RegionName { } } - pub(crate) fn highlight_region_name(&self, diag: &mut Diag<'_>) { + pub(crate) fn highlight_region_name<G: EmissionGuarantee>(&self, diag: &mut Diag<'_, G>) { match &self.source { RegionNameSource::NamedLateParamRegion(span) | RegionNameSource::NamedEarlyParamRegion(span) => { diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs index b061a450c83..91dc76f597a 100644 --- a/compiler/rustc_borrowck/src/lib.rs +++ b/compiler/rustc_borrowck/src/lib.rs @@ -6,6 +6,7 @@ #![feature(assert_matches)] #![feature(box_patterns)] #![feature(file_buffered)] +#![feature(if_let_guard)] #![feature(let_chains)] #![feature(never_type)] #![feature(rustc_attrs)] @@ -15,16 +16,19 @@ #![warn(unreachable_pub)] // tidy-alphabetical-end +use std::borrow::Cow; use std::cell::RefCell; use std::marker::PhantomData; -use std::ops::Deref; +use std::ops::{ControlFlow, Deref}; use rustc_abi::FieldIdx; use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; use rustc_data_structures::graph::dominators::Dominators; +use rustc_errors::LintDiagnostic; use rustc_hir as hir; +use rustc_hir::CRATE_HIR_ID; use rustc_hir::def_id::LocalDefId; -use rustc_index::bit_set::{BitSet, MixedBitSet}; +use rustc_index::bit_set::{DenseBitSet, MixedBitSet}; use rustc_index::{IndexSlice, IndexVec}; use rustc_infer::infer::{ InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin, TyCtxtInferExt, @@ -42,7 +46,7 @@ use rustc_mir_dataflow::move_paths::{ InitIndex, InitLocation, LookupResult, MoveData, MovePathIndex, }; use rustc_mir_dataflow::{Analysis, EntryStates, Results, ResultsVisitor, visit_results}; -use rustc_session::lint::builtin::UNUSED_MUT; +use rustc_session::lint::builtin::{TAIL_EXPR_DROP_ORDER, UNUSED_MUT}; use rustc_span::{Span, Symbol}; use smallvec::SmallVec; use tracing::{debug, instrument}; @@ -56,7 +60,7 @@ use crate::diagnostics::{ use crate::path_utils::*; use crate::place_ext::PlaceExt; use crate::places_conflict::{PlaceConflictBias, places_conflict}; -use crate::polonius::legacy::{LocationTable, PoloniusOutput}; +use crate::polonius::legacy::{PoloniusLocationTable, PoloniusOutput}; use crate::prefixes::PrefixSet; use crate::region_infer::RegionInferenceContext; use crate::renumber::RegionCtxt; @@ -81,7 +85,6 @@ mod session_diagnostics; mod type_check; mod universal_regions; mod used_muts; -mod util; /// A public API provided for the Rust compiler consumers. pub mod consumers; @@ -176,12 +179,9 @@ fn do_mir_borrowck<'tcx>( infcx.register_predefined_opaques_for_next_solver(def); } - let location_table = LocationTable::new(body); + let location_table = PoloniusLocationTable::new(body); let move_data = MoveData::gather_moves(body, tcx, |_| true); - let promoted_move_data = promoted - .iter_enumerated() - .map(|(idx, body)| (idx, MoveData::gather_moves(body, tcx, |_| true))); let flow_inits = MaybeInitializedPlaces::new(tcx, body, &move_data) .iterate_to_fixpoint(tcx, body, Some("borrowck")) @@ -239,15 +239,20 @@ fn do_mir_borrowck<'tcx>( false }; - for (idx, move_data) in promoted_move_data { + // While promoteds should mostly be correct by construction, we need to check them for + // invalid moves to detect moving out of arrays:`struct S; fn main() { &([S][0]); }`. + for promoted_body in &promoted { use rustc_middle::mir::visit::Visitor; - - let promoted_body = &promoted[idx]; + // This assumes that we won't use some of the fields of the `promoted_mbcx` + // when detecting and reporting move errors. While it would be nice to move + // this check out of `MirBorrowckCtxt`, actually doing so is far from trivial. + let move_data = MoveData::gather_moves(promoted_body, tcx, |_| true); let mut promoted_mbcx = MirBorrowckCtxt { infcx: &infcx, body: promoted_body, move_data: &move_data, - location_table: &location_table, // no need to create a real one for the promoted, it is not used + // no need to create a real location table for the promoted, it is not used + location_table: &location_table, movable_coroutine, fn_self_span_reported: Default::default(), locals_are_invalidated_at_exit, @@ -266,9 +271,6 @@ fn do_mir_borrowck<'tcx>( move_errors: Vec::new(), diags_buffer, }; - MoveVisitor { ctxt: &mut promoted_mbcx }.visit_body(promoted_body); - promoted_mbcx.report_move_errors(); - struct MoveVisitor<'a, 'b, 'infcx, 'tcx> { ctxt: &'a mut MirBorrowckCtxt<'b, 'infcx, 'tcx>, } @@ -280,6 +282,8 @@ fn do_mir_borrowck<'tcx>( } } } + MoveVisitor { ctxt: &mut promoted_mbcx }.visit_body(promoted_body); + promoted_mbcx.report_move_errors(); } let mut mbcx = MirBorrowckCtxt { @@ -513,7 +517,7 @@ struct MirBorrowckCtxt<'a, 'infcx, 'tcx> { /// Map from MIR `Location` to `LocationIndex`; created /// when MIR borrowck begins. - location_table: &'a LocationTable, + location_table: &'a PoloniusLocationTable, movable_coroutine: bool, /// This keeps track of whether local variables are free-ed when the function @@ -636,9 +640,11 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, Borrowck<'a, 'tcx>> for MirBorrowckCtxt< | StatementKind::Coverage(..) // These do not actually affect borrowck | StatementKind::ConstEvalCounter - // This do not affect borrowck - | StatementKind::BackwardIncompatibleDropHint { .. } | StatementKind::StorageLive(..) => {} + // This does not affect borrowck + StatementKind::BackwardIncompatibleDropHint { place, reason: BackwardIncompatibleDropReason::Edition2024 } => { + self.check_backward_incompatible_drop(location, (**place, span), state); + } StatementKind::StorageDead(local) => { self.access_place( location, @@ -823,6 +829,7 @@ use self::ReadOrWrite::{Activation, Read, Reservation, Write}; #[derive(Copy, Clone, PartialEq, Eq, Debug)] enum ArtificialField { + ArrayLength, FakeBorrow, } @@ -1007,6 +1014,24 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { } } + fn borrows_in_scope<'s>( + &self, + location: Location, + state: &'s BorrowckDomain, + ) -> Cow<'s, DenseBitSet<BorrowIndex>> { + if let Some(polonius) = &self.polonius_output { + // Use polonius output if it has been enabled. + let location = self.location_table.start_index(location); + let mut polonius_output = DenseBitSet::new_empty(self.borrow_set.len()); + for &idx in polonius.errors_at(location) { + polonius_output.insert(idx); + } + Cow::Owned(polonius_output) + } else { + Cow::Borrowed(&state.borrows) + } + } + #[instrument(level = "debug", skip(self, state))] fn check_access_for_conflict( &mut self, @@ -1018,18 +1043,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { ) -> bool { let mut error_reported = false; - // Use polonius output if it has been enabled. - let mut polonius_output; - let borrows_in_scope = if let Some(polonius) = &self.polonius_output { - let location = self.location_table.start_index(location); - polonius_output = BitSet::new_empty(self.borrow_set.len()); - for &idx in polonius.errors_at(location) { - polonius_output.insert(idx); - } - &polonius_output - } else { - &state.borrows - }; + let borrows_in_scope = self.borrows_in_scope(location, state); each_borrow_involving_path( self, @@ -1054,31 +1068,31 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { rw, (borrow_index, borrow), ); - Control::Continue + ControlFlow::Continue(()) } (Read(_), BorrowKind::Shared | BorrowKind::Fake(_)) | ( Read(ReadKind::Borrow(BorrowKind::Fake(FakeBorrowKind::Shallow))), BorrowKind::Mut { .. }, - ) => Control::Continue, + ) => ControlFlow::Continue(()), (Reservation(_), BorrowKind::Fake(_) | BorrowKind::Shared) => { // This used to be a future compatibility warning (to be // disallowed on NLL). See rust-lang/rust#56254 - Control::Continue + ControlFlow::Continue(()) } (Write(WriteKind::Move), BorrowKind::Fake(FakeBorrowKind::Shallow)) => { // Handled by initialization checks. - Control::Continue + ControlFlow::Continue(()) } (Read(kind), BorrowKind::Mut { .. }) => { // Reading from mere reservations of mutable-borrows is OK. if !is_active(this.dominators(), borrow, location) { - assert!(allow_two_phase_borrow(borrow.kind)); - return Control::Continue; + assert!(borrow.kind.allows_two_phase_borrow()); + return ControlFlow::Continue(()); } error_reported = true; @@ -1094,7 +1108,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { this.buffer_error(err); } } - Control::Break + ControlFlow::Break(()) } (Reservation(kind) | Activation(kind, _) | Write(kind), _) => { @@ -1141,7 +1155,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { this.report_illegal_mutation_of_borrowed(location, place_span, borrow) } } - Control::Break + ControlFlow::Break(()) } }, ); @@ -1149,6 +1163,61 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { error_reported } + /// Through #123739, backward incompatible drops (BIDs) are introduced. + /// We would like to emit lints whether borrow checking fails at these future drop locations. + #[instrument(level = "debug", skip(self, state))] + fn check_backward_incompatible_drop( + &mut self, + location: Location, + (place, place_span): (Place<'tcx>, Span), + state: &BorrowckDomain, + ) { + let tcx = self.infcx.tcx; + // If this type does not need `Drop`, then treat it like a `StorageDead`. + // This is needed because we track the borrows of refs to thread locals, + // and we'll ICE because we don't track borrows behind shared references. + let sd = if place.ty(self.body, tcx).ty.needs_drop(tcx, self.body.typing_env(tcx)) { + AccessDepth::Drop + } else { + AccessDepth::Shallow(None) + }; + + let borrows_in_scope = self.borrows_in_scope(location, state); + + // This is a very simplified version of `Self::check_access_for_conflict`. + // We are here checking on BIDs and specifically still-live borrows of data involving the BIDs. + each_borrow_involving_path( + self, + self.infcx.tcx, + self.body, + (sd, place), + self.borrow_set, + |borrow_index| borrows_in_scope.contains(borrow_index), + |this, _borrow_index, borrow| { + if matches!(borrow.kind, BorrowKind::Fake(_)) { + return ControlFlow::Continue(()); + } + let borrowed = this.retrieve_borrow_spans(borrow).var_or_use_path_span(); + let explain = this.explain_why_borrow_contains_point( + location, + borrow, + Some((WriteKind::StorageDeadOrDrop, place)), + ); + this.infcx.tcx.node_span_lint( + TAIL_EXPR_DROP_ORDER, + CRATE_HIR_ID, + borrowed, + |diag| { + session_diagnostics::TailExprDropOrder { borrowed }.decorate_lint(diag); + explain.add_explanation_to_diagnostic(&this, diag, "", None, None); + }, + ); + // We may stop at the first case + ControlFlow::Break(()) + }, + ); + } + fn mutate_place( &mut self, location: Location, @@ -1185,7 +1254,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { } BorrowKind::Mut { .. } => { let wk = WriteKind::MutableBorrow(bk); - if allow_two_phase_borrow(bk) { + if bk.allows_two_phase_borrow() { (Deep, Reservation(wk)) } else { (Deep, Write(wk)) @@ -1270,11 +1339,16 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, '_, 'tcx> { ); } - &Rvalue::Discriminant(place) => { + &(Rvalue::Len(place) | Rvalue::Discriminant(place)) => { + let af = match *rvalue { + Rvalue::Len(..) => Some(ArtificialField::ArrayLength), + Rvalue::Discriminant(..) => None, + _ => unreachable!(), + }; self.access_place( location, (place, span), - (Shallow(None), Read(ReadKind::Copy)), + (Shallow(af), Read(ReadKind::Copy)), LocalMutationIsAllowed::No, state, ); diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs index 968b6d383c1..35264bd1a70 100644 --- a/compiler/rustc_borrowck/src/nll.rs +++ b/compiler/rustc_borrowck/src/nll.rs @@ -28,7 +28,9 @@ use crate::borrow_set::BorrowSet; use crate::consumers::ConsumerOptions; use crate::diagnostics::{BorrowckDiagnosticsBuffer, RegionErrors}; use crate::polonius::LocalizedOutlivesConstraintSet; -use crate::polonius::legacy::{AllFacts, AllFactsExt, LocationTable, PoloniusOutput}; +use crate::polonius::legacy::{ + PoloniusFacts, PoloniusFactsExt, PoloniusLocationTable, PoloniusOutput, +}; use crate::region_infer::RegionInferenceContext; use crate::type_check::{self, MirTypeckResults}; use crate::universal_regions::UniversalRegions; @@ -39,7 +41,7 @@ use crate::{BorrowckInferCtxt, polonius, renumber}; pub(crate) struct NllOutput<'tcx> { pub regioncx: RegionInferenceContext<'tcx>, pub opaque_type_values: FxIndexMap<LocalDefId, OpaqueHiddenType<'tcx>>, - pub polonius_input: Option<Box<AllFacts>>, + pub polonius_input: Option<Box<PoloniusFacts>>, pub polonius_output: Option<Box<PoloniusOutput>>, pub opt_closure_req: Option<ClosureRegionRequirements<'tcx>>, pub nll_errors: RegionErrors<'tcx>, @@ -80,7 +82,7 @@ pub(crate) fn compute_regions<'a, 'tcx>( universal_regions: UniversalRegions<'tcx>, body: &Body<'tcx>, promoted: &IndexSlice<Promoted, Body<'tcx>>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, borrow_set: &BorrowSet<'tcx>, @@ -91,17 +93,17 @@ pub(crate) fn compute_regions<'a, 'tcx>( || is_polonius_legacy_enabled; let polonius_output = consumer_options.map(|c| c.polonius_output()).unwrap_or_default() || is_polonius_legacy_enabled; - let mut all_facts = - (polonius_input || AllFacts::enabled(infcx.tcx)).then_some(AllFacts::default()); + let mut polonius_facts = + (polonius_input || PoloniusFacts::enabled(infcx.tcx)).then_some(PoloniusFacts::default()); - let elements = Rc::new(DenseLocationMap::new(body)); + let location_map = Rc::new(DenseLocationMap::new(body)); // Run the MIR type-checker. let MirTypeckResults { constraints, universal_region_relations, opaque_type_values, - mut polonius_context, + polonius_context, } = type_check::type_check( infcx, body, @@ -109,10 +111,10 @@ pub(crate) fn compute_regions<'a, 'tcx>( universal_regions, location_table, borrow_set, - &mut all_facts, + &mut polonius_facts, flow_inits, move_data, - Rc::clone(&elements), + Rc::clone(&location_map), ); // Create the region inference context, taking ownership of the @@ -122,7 +124,7 @@ pub(crate) fn compute_regions<'a, 'tcx>( // If requested, emit legacy polonius facts. polonius::legacy::emit_facts( - &mut all_facts, + &mut polonius_facts, infcx.tcx, location_table, body, @@ -137,23 +139,23 @@ pub(crate) fn compute_regions<'a, 'tcx>( var_infos, constraints, universal_region_relations, - elements, + location_map, ); - // If requested for `-Zpolonius=next`, convert NLL constraints to localized outlives - // constraints. - let localized_outlives_constraints = polonius_context - .as_mut() - .map(|polonius_context| polonius_context.create_localized_constraints(&mut regioncx, body)); + // If requested for `-Zpolonius=next`, convert NLL constraints to localized outlives constraints + // and use them to compute loan liveness. + let localized_outlives_constraints = polonius_context.as_ref().map(|polonius_context| { + polonius_context.compute_loan_liveness(infcx.tcx, &mut regioncx, body, borrow_set) + }); // If requested: dump NLL facts, and run legacy polonius analysis. - let polonius_output = all_facts.as_ref().and_then(|all_facts| { + let polonius_output = polonius_facts.as_ref().and_then(|polonius_facts| { if infcx.tcx.sess.opts.unstable_opts.nll_facts { let def_id = body.source.def_id(); let def_path = infcx.tcx.def_path(def_id); let dir_path = PathBuf::from(&infcx.tcx.sess.opts.unstable_opts.nll_facts_dir) .join(def_path.to_filename_friendly_no_crate()); - all_facts.write_to_dir(dir_path, location_table).unwrap(); + polonius_facts.write_to_dir(dir_path, location_table).unwrap(); } if polonius_output { @@ -162,7 +164,7 @@ pub(crate) fn compute_regions<'a, 'tcx>( let algorithm = Algorithm::from_str(&algorithm).unwrap(); debug!("compute_regions: using polonius algorithm {:?}", algorithm); let _prof_timer = infcx.tcx.prof.generic_activity("polonius_analysis"); - Some(Box::new(Output::compute(all_facts, algorithm, false))) + Some(Box::new(Output::compute(polonius_facts, algorithm, false))) } else { None } @@ -182,7 +184,7 @@ pub(crate) fn compute_regions<'a, 'tcx>( NllOutput { regioncx, opaque_type_values: remapped_opaque_tys, - polonius_input: all_facts.map(Box::new), + polonius_input: polonius_facts.map(Box::new), polonius_output, opt_closure_req: closure_region_requirements, nll_errors, diff --git a/compiler/rustc_borrowck/src/path_utils.rs b/compiler/rustc_borrowck/src/path_utils.rs index 12a37f56fcf..2c94a32d369 100644 --- a/compiler/rustc_borrowck/src/path_utils.rs +++ b/compiler/rustc_borrowck/src/path_utils.rs @@ -1,26 +1,14 @@ +use std::ops::ControlFlow; + use rustc_abi::FieldIdx; use rustc_data_structures::graph::dominators::Dominators; -use rustc_middle::mir::{BasicBlock, Body, BorrowKind, Location, Place, PlaceRef, ProjectionElem}; +use rustc_middle::mir::{BasicBlock, Body, Location, Place, PlaceRef, ProjectionElem}; use rustc_middle::ty::TyCtxt; use tracing::debug; use crate::borrow_set::{BorrowData, BorrowSet, TwoPhaseActivation}; use crate::{AccessDepth, BorrowIndex, places_conflict}; -/// Returns `true` if the borrow represented by `kind` is -/// allowed to be split into separate Reservation and -/// Activation phases. -pub(super) fn allow_two_phase_borrow(kind: BorrowKind) -> bool { - kind.allows_two_phase_borrow() -} - -/// Control for the path borrow checking code -#[derive(Copy, Clone, PartialEq, Eq, Debug)] -pub(super) enum Control { - Continue, - Break, -} - /// Encapsulates the idea of iterating over every borrow that involves a particular path pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( s: &mut S, @@ -31,7 +19,7 @@ pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( is_candidate: I, mut op: F, ) where - F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> Control, + F: FnMut(&mut S, BorrowIndex, &BorrowData<'tcx>) -> ControlFlow<()>, I: Fn(BorrowIndex) -> bool, { let (access, place) = access_place; @@ -62,7 +50,7 @@ pub(super) fn each_borrow_involving_path<'tcx, F, I, S>( i, borrowed, place, access ); let ctrl = op(s, i, borrowed); - if ctrl == Control::Break { + if matches!(ctrl, ControlFlow::Break(_)) { return; } } diff --git a/compiler/rustc_borrowck/src/places_conflict.rs b/compiler/rustc_borrowck/src/places_conflict.rs index 560b8c0349a..679e111caa9 100644 --- a/compiler/rustc_borrowck/src/places_conflict.rs +++ b/compiler/rustc_borrowck/src/places_conflict.rs @@ -203,7 +203,8 @@ fn place_components_conflict<'tcx>( let base_ty = base.ty(body, tcx).ty; match (elem, base_ty.kind(), access) { - (_, _, Shallow(Some(ArtificialField::FakeBorrow))) => { + (_, _, Shallow(Some(ArtificialField::ArrayLength))) + | (_, _, Shallow(Some(ArtificialField::FakeBorrow))) => { // The array length is like additional fields on the // type; it does not overlap any existing data there. // Furthermore, if cannot actually be a prefix of any diff --git a/compiler/rustc_borrowck/src/polonius/legacy/accesses.rs b/compiler/rustc_borrowck/src/polonius/legacy/accesses.rs index 4a0c8d9b4b4..edd7ca578b7 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/accesses.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/accesses.rs @@ -4,16 +4,16 @@ use rustc_middle::ty::TyCtxt; use rustc_mir_dataflow::move_paths::{LookupResult, MoveData}; use tracing::debug; -use super::{AllFacts, LocationIndex, LocationTable}; +use super::{LocationIndex, PoloniusFacts, PoloniusLocationTable}; use crate::def_use::{self, DefUse}; use crate::universal_regions::UniversalRegions; /// Emit polonius facts for variable defs, uses, drops, and path accesses. pub(crate) fn emit_access_facts<'tcx>( tcx: TyCtxt<'tcx>, - facts: &mut AllFacts, + facts: &mut PoloniusFacts, body: &Body<'tcx>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, move_data: &MoveData<'tcx>, universal_regions: &UniversalRegions<'tcx>, ) { @@ -31,9 +31,9 @@ pub(crate) fn emit_access_facts<'tcx>( /// MIR visitor extracting point-wise facts about accesses. struct AccessFactsExtractor<'a, 'tcx> { - facts: &'a mut AllFacts, + facts: &'a mut PoloniusFacts, move_data: &'a MoveData<'tcx>, - location_table: &'a LocationTable, + location_table: &'a PoloniusLocationTable, } impl<'tcx> AccessFactsExtractor<'_, 'tcx> { diff --git a/compiler/rustc_borrowck/src/polonius/legacy/facts.rs b/compiler/rustc_borrowck/src/polonius/legacy/facts.rs index 42c4e733218..64389b11a65 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/facts.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/facts.rs @@ -4,13 +4,13 @@ use std::fs::{self, File}; use std::io::Write; use std::path::Path; -use polonius_engine::{AllFacts as PoloniusFacts, Atom, Output}; +use polonius_engine::{AllFacts, Atom, Output}; use rustc_macros::extension; use rustc_middle::mir::Local; use rustc_middle::ty::{RegionVid, TyCtxt}; use rustc_mir_dataflow::move_paths::MovePathIndex; -use super::{LocationIndex, LocationTable}; +use super::{LocationIndex, PoloniusLocationTable}; use crate::BorrowIndex; #[derive(Copy, Clone, Debug)] @@ -49,11 +49,11 @@ impl polonius_engine::FactTypes for RustcFacts { type Path = MovePathIndex; } -pub type AllFacts = PoloniusFacts<RustcFacts>; +pub type PoloniusFacts = AllFacts<RustcFacts>; -#[extension(pub(crate) trait AllFactsExt)] -impl AllFacts { - /// Returns `true` if there is a need to gather `AllFacts` given the +#[extension(pub(crate) trait PoloniusFactsExt)] +impl PoloniusFacts { + /// Returns `true` if there is a need to gather `PoloniusFacts` given the /// current `-Z` flags. fn enabled(tcx: TyCtxt<'_>) -> bool { tcx.sess.opts.unstable_opts.nll_facts @@ -63,7 +63,7 @@ impl AllFacts { fn write_to_dir( &self, dir: impl AsRef<Path>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>> { let dir: &Path = dir.as_ref(); fs::create_dir_all(dir)?; @@ -119,7 +119,7 @@ impl Atom for LocationIndex { } struct FactWriter<'w> { - location_table: &'w LocationTable, + location_table: &'w PoloniusLocationTable, dir: &'w Path, } @@ -141,7 +141,7 @@ trait FactRow { fn write( &self, out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>>; } @@ -149,7 +149,7 @@ impl FactRow for PoloniusRegionVid { fn write( &self, out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>> { write_row(out, location_table, &[self]) } @@ -163,7 +163,7 @@ where fn write( &self, out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>> { write_row(out, location_table, &[&self.0, &self.1]) } @@ -178,7 +178,7 @@ where fn write( &self, out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>> { write_row(out, location_table, &[&self.0, &self.1, &self.2]) } @@ -194,7 +194,7 @@ where fn write( &self, out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, ) -> Result<(), Box<dyn Error>> { write_row(out, location_table, &[&self.0, &self.1, &self.2, &self.3]) } @@ -202,7 +202,7 @@ where fn write_row( out: &mut dyn Write, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, columns: &[&dyn FactCell], ) -> Result<(), Box<dyn Error>> { for (index, c) in columns.iter().enumerate() { @@ -213,41 +213,41 @@ fn write_row( } trait FactCell { - fn to_string(&self, location_table: &LocationTable) -> String; + fn to_string(&self, location_table: &PoloniusLocationTable) -> String; } impl FactCell for BorrowIndex { - fn to_string(&self, _location_table: &LocationTable) -> String { + fn to_string(&self, _location_table: &PoloniusLocationTable) -> String { format!("{self:?}") } } impl FactCell for Local { - fn to_string(&self, _location_table: &LocationTable) -> String { + fn to_string(&self, _location_table: &PoloniusLocationTable) -> String { format!("{self:?}") } } impl FactCell for MovePathIndex { - fn to_string(&self, _location_table: &LocationTable) -> String { + fn to_string(&self, _location_table: &PoloniusLocationTable) -> String { format!("{self:?}") } } impl FactCell for PoloniusRegionVid { - fn to_string(&self, _location_table: &LocationTable) -> String { + fn to_string(&self, _location_table: &PoloniusLocationTable) -> String { format!("{self:?}") } } impl FactCell for RegionVid { - fn to_string(&self, _location_table: &LocationTable) -> String { + fn to_string(&self, _location_table: &PoloniusLocationTable) -> String { format!("{self:?}") } } impl FactCell for LocationIndex { - fn to_string(&self, location_table: &LocationTable) -> String { + fn to_string(&self, location_table: &PoloniusLocationTable) -> String { format!("{:?}", location_table.to_rich_location(*self)) } } diff --git a/compiler/rustc_borrowck/src/polonius/legacy/loan_invalidations.rs b/compiler/rustc_borrowck/src/polonius/legacy/loan_invalidations.rs index bb6d593d0d8..cbcfab1dc3e 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/loan_invalidations.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/loan_invalidations.rs @@ -1,3 +1,5 @@ +use std::ops::ControlFlow; + use rustc_data_structures::graph::dominators::Dominators; use rustc_middle::bug; use rustc_middle::mir::visit::Visitor; @@ -9,7 +11,7 @@ use rustc_middle::mir::{ use rustc_middle::ty::TyCtxt; use tracing::debug; -use super::{AllFacts, LocationTable}; +use super::{PoloniusFacts, PoloniusLocationTable}; use crate::borrow_set::BorrowSet; use crate::path_utils::*; use crate::{ @@ -20,9 +22,9 @@ use crate::{ /// Emit `loan_invalidated_at` facts. pub(super) fn emit_loan_invalidations<'tcx>( tcx: TyCtxt<'tcx>, - facts: &mut AllFacts, + facts: &mut PoloniusFacts, body: &Body<'tcx>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, borrow_set: &BorrowSet<'tcx>, ) { let dominators = body.basic_blocks.dominators(); @@ -33,9 +35,9 @@ pub(super) fn emit_loan_invalidations<'tcx>( struct LoanInvalidationsGenerator<'a, 'tcx> { tcx: TyCtxt<'tcx>, - facts: &'a mut AllFacts, + facts: &'a mut PoloniusFacts, body: &'a Body<'tcx>, - location_table: &'a LocationTable, + location_table: &'a PoloniusLocationTable, dominators: &'a Dominators<BasicBlock>, borrow_set: &'a BorrowSet<'tcx>, } @@ -260,7 +262,7 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> { } BorrowKind::Mut { .. } => { let wk = WriteKind::MutableBorrow(bk); - if allow_two_phase_borrow(bk) { + if bk.allows_two_phase_borrow() { (Deep, Reservation(wk)) } else { (Deep, Write(wk)) @@ -298,11 +300,16 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> { self.consume_operand(location, op); } - &Rvalue::Discriminant(place) => { + &(Rvalue::Len(place) | Rvalue::Discriminant(place)) => { + let af = match rvalue { + Rvalue::Len(..) => Some(ArtificialField::ArrayLength), + Rvalue::Discriminant(..) => None, + _ => unreachable!(), + }; self.access_place( location, place, - (Shallow(None), Read(ReadKind::Copy)), + (Shallow(af), Read(ReadKind::Copy)), LocalMutationIsAllowed::No, ); } @@ -378,8 +385,8 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> { // Reading from mere reservations of mutable-borrows is OK. if !is_active(this.dominators, borrow, location) { // If the borrow isn't active yet, reads don't invalidate it - assert!(allow_two_phase_borrow(borrow.kind)); - return Control::Continue; + assert!(borrow.kind.allows_two_phase_borrow()); + return ControlFlow::Continue(()); } // Unique and mutable borrows are invalidated by reads from any @@ -395,7 +402,7 @@ impl<'a, 'tcx> LoanInvalidationsGenerator<'a, 'tcx> { this.emit_loan_invalidated_at(borrow_index, location); } } - Control::Continue + ControlFlow::Continue(()) }, ); } diff --git a/compiler/rustc_borrowck/src/polonius/legacy/loan_kills.rs b/compiler/rustc_borrowck/src/polonius/legacy/loan_kills.rs index 0148e0b2869..098c922bf7b 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/loan_kills.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/loan_kills.rs @@ -6,16 +6,16 @@ use rustc_middle::mir::{ use rustc_middle::ty::TyCtxt; use tracing::debug; -use super::{AllFacts, LocationTable}; +use super::{PoloniusFacts, PoloniusLocationTable}; use crate::borrow_set::BorrowSet; use crate::places_conflict; /// Emit `loan_killed_at` and `cfg_edge` facts at the same time. pub(super) fn emit_loan_kills<'tcx>( tcx: TyCtxt<'tcx>, - facts: &mut AllFacts, + facts: &mut PoloniusFacts, body: &Body<'tcx>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, borrow_set: &BorrowSet<'tcx>, ) { let mut visitor = LoanKillsGenerator { borrow_set, tcx, location_table, facts, body }; @@ -26,8 +26,8 @@ pub(super) fn emit_loan_kills<'tcx>( struct LoanKillsGenerator<'a, 'tcx> { tcx: TyCtxt<'tcx>, - facts: &'a mut AllFacts, - location_table: &'a LocationTable, + facts: &'a mut PoloniusFacts, + location_table: &'a PoloniusLocationTable, borrow_set: &'a BorrowSet<'tcx>, body: &'a Body<'tcx>, } diff --git a/compiler/rustc_borrowck/src/polonius/legacy/location.rs b/compiler/rustc_borrowck/src/polonius/legacy/location.rs index 4cb1202033c..5f816bb9bbd 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/location.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/location.rs @@ -13,7 +13,7 @@ use tracing::debug; /// granularity through outlives relations; however, the rich location /// table serves another purpose: it compresses locations from /// multiple words into a single u32. -pub struct LocationTable { +pub struct PoloniusLocationTable { num_points: usize, statements_before_block: IndexVec<BasicBlock, usize>, } @@ -30,7 +30,7 @@ pub enum RichLocation { Mid(Location), } -impl LocationTable { +impl PoloniusLocationTable { pub(crate) fn new(body: &Body<'_>) -> Self { let mut num_points = 0; let statements_before_block = body @@ -43,8 +43,8 @@ impl LocationTable { }) .collect(); - debug!("LocationTable(statements_before_block={:#?})", statements_before_block); - debug!("LocationTable: num_points={:#?}", num_points); + debug!("PoloniusLocationTable(statements_before_block={:#?})", statements_before_block); + debug!("PoloniusLocationTable: num_points={:#?}", num_points); Self { num_points, statements_before_block } } diff --git a/compiler/rustc_borrowck/src/polonius/legacy/mod.rs b/compiler/rustc_borrowck/src/polonius/legacy/mod.rs index 45bdbd1e999..95820c07a02 100644 --- a/compiler/rustc_borrowck/src/polonius/legacy/mod.rs +++ b/compiler/rustc_borrowck/src/polonius/legacy/mod.rs @@ -36,16 +36,16 @@ pub use self::facts::*; /// /// The rest of the facts are emitted during typeck and liveness. pub(crate) fn emit_facts<'tcx>( - all_facts: &mut Option<AllFacts>, + facts: &mut Option<PoloniusFacts>, tcx: TyCtxt<'tcx>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, body: &Body<'tcx>, borrow_set: &BorrowSet<'tcx>, move_data: &MoveData<'tcx>, universal_region_relations: &UniversalRegionRelations<'tcx>, constraints: &MirTypeckRegionConstraints<'tcx>, ) { - let Some(facts) = all_facts else { + let Some(facts) = facts else { // We don't do anything if there are no facts to fill. return; }; @@ -67,9 +67,9 @@ pub(crate) fn emit_facts<'tcx>( /// Emit facts needed for move/init analysis: moves and assignments. fn emit_move_facts( - facts: &mut AllFacts, + facts: &mut PoloniusFacts, body: &Body<'_>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, move_data: &MoveData<'_>, ) { facts.path_is_var.extend(move_data.rev_lookup.iter_locals_enumerated().map(|(l, r)| (r, l))); @@ -139,7 +139,7 @@ fn emit_move_facts( /// Emit universal regions facts, and their relations. fn emit_universal_region_facts( - facts: &mut AllFacts, + facts: &mut PoloniusFacts, borrow_set: &BorrowSet<'_>, universal_region_relations: &UniversalRegionRelations<'_>, ) { @@ -187,10 +187,10 @@ pub(crate) fn emit_drop_facts<'tcx>( local: Local, kind: &GenericArg<'tcx>, universal_regions: &UniversalRegions<'tcx>, - all_facts: &mut Option<AllFacts>, + facts: &mut Option<PoloniusFacts>, ) { debug!("emit_drop_facts(local={:?}, kind={:?}", local, kind); - let Some(facts) = all_facts.as_mut() else { return }; + let Some(facts) = facts.as_mut() else { return }; let _prof_timer = tcx.prof.generic_activity("polonius_fact_generation"); tcx.for_each_free_region(kind, |drop_live_region| { let region_vid = universal_regions.to_region_vid(drop_live_region); @@ -201,8 +201,8 @@ pub(crate) fn emit_drop_facts<'tcx>( /// Emit facts about the outlives constraints: the `subset` base relation, i.e. not a transitive /// closure. fn emit_outlives_facts<'tcx>( - facts: &mut AllFacts, - location_table: &LocationTable, + facts: &mut PoloniusFacts, + location_table: &PoloniusLocationTable, constraints: &MirTypeckRegionConstraints<'tcx>, ) { facts.subset_base.extend(constraints.outlives_constraints.outlives().iter().flat_map( diff --git a/compiler/rustc_borrowck/src/polonius/loan_liveness.rs b/compiler/rustc_borrowck/src/polonius/loan_liveness.rs new file mode 100644 index 00000000000..768c12a97a6 --- /dev/null +++ b/compiler/rustc_borrowck/src/polonius/loan_liveness.rs @@ -0,0 +1,307 @@ +use std::collections::{BTreeMap, BTreeSet}; + +use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexSet}; +use rustc_middle::mir::visit::Visitor; +use rustc_middle::mir::{ + Body, Local, Location, Place, Rvalue, Statement, StatementKind, Terminator, TerminatorKind, +}; +use rustc_middle::ty::{RegionVid, TyCtxt}; +use rustc_mir_dataflow::points::PointIndex; + +use super::{LiveLoans, LocalizedOutlivesConstraintSet}; +use crate::constraints::OutlivesConstraint; +use crate::dataflow::BorrowIndex; +use crate::region_infer::values::LivenessValues; +use crate::type_check::Locations; +use crate::{BorrowSet, PlaceConflictBias, places_conflict}; + +/// Compute loan reachability, stop at kills, and trace loan liveness throughout the CFG, by +/// traversing the full graph of constraints that combines: +/// - the localized constraints (the physical edges), +/// - with the constraints that hold at all points (the logical edges). +pub(super) fn compute_loan_liveness<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + liveness: &LivenessValues, + outlives_constraints: impl Iterator<Item = OutlivesConstraint<'tcx>>, + borrow_set: &BorrowSet<'tcx>, + localized_outlives_constraints: &LocalizedOutlivesConstraintSet, +) -> LiveLoans { + let mut live_loans = LiveLoans::new(borrow_set.len()); + + // FIXME: it may be preferable for kills to be encoded in the edges themselves, to simplify and + // likely make traversal (and constraint generation) more efficient. We also display kills on + // edges when visualizing the constraint graph anyways. + let kills = collect_kills(body, tcx, borrow_set); + + // Create the full graph with the physical edges we've localized earlier, and the logical edges + // of constraints that hold at all points. + let logical_constraints = + outlives_constraints.filter(|c| matches!(c.locations, Locations::All(_))); + let graph = LocalizedConstraintGraph::new(&localized_outlives_constraints, logical_constraints); + let mut visited = FxHashSet::default(); + let mut stack = Vec::new(); + + // Compute reachability per loan by traversing each loan's subgraph starting from where it is + // introduced. + for (loan_idx, loan) in borrow_set.iter_enumerated() { + visited.clear(); + stack.clear(); + + let start_node = LocalizedNode { + region: loan.region, + point: liveness.point_from_location(loan.reserve_location), + }; + stack.push(start_node); + + while let Some(node) = stack.pop() { + if !visited.insert(node) { + continue; + } + + // Record the loan as being live on entry to this point. + live_loans.insert(node.point, loan_idx); + + // Here, we have a conundrum. There's currently a weakness in our theory, in that + // we're using a single notion of reachability to represent what used to be _two_ + // different transitive closures. It didn't seem impactful when coming up with the + // single-graph and reachability through space (regions) + time (CFG) concepts, but in + // practice the combination of time-traveling with kills is more impactful than + // initially anticipated. + // + // Kills should prevent a loan from reaching its successor points in the CFG, but not + // while time-traveling: we're not actually at that CFG point, but looking for + // predecessor regions that contain the loan. One of the two TCs we had pushed the + // transitive subset edges to each point instead of having backward edges, and the + // problem didn't exist before. In the abstract, naive reachability is not enough to + // model this, we'd need a slightly different solution. For example, maybe with a + // two-step traversal: + // - at each point we first traverse the subgraph (and possibly time-travel) looking for + // exit nodes while ignoring kills, + // - and then when we're back at the current point, we continue normally. + // + // Another (less annoying) subtlety is that kills and the loan use-map are + // flow-insensitive. Kills can actually appear in places before a loan is introduced, or + // at a location that is actually unreachable in the CFG from the introduction point, + // and these can also be encountered during time-traveling. + // + // The simplest change that made sense to "fix" the issues above is taking into + // account kills that are: + // - reachable from the introduction point + // - encountered during forward traversal. Note that this is not transitive like the + // two-step traversal described above: only kills encountered on exit via a backward + // edge are ignored. + // + // In our test suite, there are a couple of cases where kills are encountered while + // time-traveling, however as far as we can tell, always in cases where they would be + // unreachable. We have reason to believe that this is a property of the single-graph + // approach (but haven't proved it yet): + // - reachable kills while time-traveling would also be encountered via regular + // traversal + // - it makes _some_ sense to ignore unreachable kills, but subtleties around dead code + // in general need to be better thought through (like they were for NLLs). + // - ignoring kills is a conservative approximation: the loan is still live and could + // cause false positive errors at another place access. Soundness issues in this + // domain should look more like the absence of reachability instead. + // + // This is enough in practice to pass tests, and therefore is what we have implemented + // for now. + // + // FIXME: all of the above. Analyze potential unsoundness, possibly in concert with a + // borrowck implementation in a-mir-formality, fuzzing, or manually crafting + // counter-examples. + + // Continuing traversal will depend on whether the loan is killed at this point, and + // whether we're time-traveling. + let current_location = liveness.location_from_point(node.point); + let is_loan_killed = + kills.get(¤t_location).is_some_and(|kills| kills.contains(&loan_idx)); + + for succ in graph.outgoing_edges(node) { + // If the loan is killed at this point, it is killed _on exit_. But only during + // forward traversal. + if is_loan_killed { + let destination = liveness.location_from_point(succ.point); + if current_location.is_predecessor_of(destination, body) { + continue; + } + } + stack.push(succ); + } + } + } + + live_loans +} + +/// The localized constraint graph indexes the physical and logical edges to compute a given node's +/// successors during traversal. +struct LocalizedConstraintGraph { + /// The actual, physical, edges we have recorded for a given node. + edges: FxHashMap<LocalizedNode, FxIndexSet<LocalizedNode>>, + + /// The logical edges representing the outlives constraints that hold at all points in the CFG, + /// which we don't localize to avoid creating a lot of unnecessary edges in the graph. Some CFGs + /// can be big, and we don't need to create such a physical edge for every point in the CFG. + logical_edges: FxHashMap<RegionVid, FxIndexSet<RegionVid>>, +} + +/// A node in the graph to be traversed, one of the two vertices of a localized outlives constraint. +#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)] +struct LocalizedNode { + region: RegionVid, + point: PointIndex, +} + +impl LocalizedConstraintGraph { + /// Traverses the constraints and returns the indexed graph of edges per node. + fn new<'tcx>( + constraints: &LocalizedOutlivesConstraintSet, + logical_constraints: impl Iterator<Item = OutlivesConstraint<'tcx>>, + ) -> Self { + let mut edges: FxHashMap<_, FxIndexSet<_>> = FxHashMap::default(); + for constraint in &constraints.outlives { + let source = LocalizedNode { region: constraint.source, point: constraint.from }; + let target = LocalizedNode { region: constraint.target, point: constraint.to }; + edges.entry(source).or_default().insert(target); + } + + let mut logical_edges: FxHashMap<_, FxIndexSet<_>> = FxHashMap::default(); + for constraint in logical_constraints { + logical_edges.entry(constraint.sup).or_default().insert(constraint.sub); + } + + LocalizedConstraintGraph { edges, logical_edges } + } + + /// Returns the outgoing edges of a given node, not its transitive closure. + fn outgoing_edges(&self, node: LocalizedNode) -> impl Iterator<Item = LocalizedNode> + use<'_> { + // The outgoing edges are: + // - the physical edges present at this node, + // - the materialized logical edges that exist virtually at all points for this node's + // region, localized at this point. + let physical_edges = + self.edges.get(&node).into_iter().flat_map(|targets| targets.iter().copied()); + let materialized_edges = + self.logical_edges.get(&node.region).into_iter().flat_map(move |targets| { + targets + .iter() + .copied() + .map(move |target| LocalizedNode { point: node.point, region: target }) + }); + physical_edges.chain(materialized_edges) + } +} + +/// Traverses the MIR and collects kills. +fn collect_kills<'tcx>( + body: &Body<'tcx>, + tcx: TyCtxt<'tcx>, + borrow_set: &BorrowSet<'tcx>, +) -> BTreeMap<Location, BTreeSet<BorrowIndex>> { + let mut collector = KillsCollector { borrow_set, tcx, body, kills: BTreeMap::default() }; + for (block, data) in body.basic_blocks.iter_enumerated() { + collector.visit_basic_block_data(block, data); + } + collector.kills +} + +struct KillsCollector<'a, 'tcx> { + body: &'a Body<'tcx>, + tcx: TyCtxt<'tcx>, + borrow_set: &'a BorrowSet<'tcx>, + + /// The set of loans killed at each location. + kills: BTreeMap<Location, BTreeSet<BorrowIndex>>, +} + +// This visitor has a similar structure to the `Borrows` dataflow computation with respect to kills, +// and the datalog polonius fact generation for the `loan_killed_at` relation. +impl<'tcx> KillsCollector<'_, 'tcx> { + /// Records the borrows on the specified place as `killed`. For example, when assigning to a + /// local, or on a call's return destination. + fn record_killed_borrows_for_place(&mut self, place: Place<'tcx>, location: Location) { + // For the reasons described in graph traversal, we also filter out kills + // unreachable from the loan's introduction point, as they would stop traversal when + // e.g. checking for reachability in the subset graph through invariance constraints + // higher up. + let filter_unreachable_kills = |loan| { + let introduction = self.borrow_set[loan].reserve_location; + let reachable = introduction.is_predecessor_of(location, self.body); + reachable + }; + + let other_borrows_of_local = self + .borrow_set + .local_map + .get(&place.local) + .into_iter() + .flat_map(|bs| bs.iter()) + .copied(); + + // If the borrowed place is a local with no projections, all other borrows of this + // local must conflict. This is purely an optimization so we don't have to call + // `places_conflict` for every borrow. + if place.projection.is_empty() { + if !self.body.local_decls[place.local].is_ref_to_static() { + self.kills + .entry(location) + .or_default() + .extend(other_borrows_of_local.filter(|&loan| filter_unreachable_kills(loan))); + } + return; + } + + // By passing `PlaceConflictBias::NoOverlap`, we conservatively assume that any given + // pair of array indices are not equal, so that when `places_conflict` returns true, we + // will be assured that two places being compared definitely denotes the same sets of + // locations. + let definitely_conflicting_borrows = other_borrows_of_local + .filter(|&i| { + places_conflict( + self.tcx, + self.body, + self.borrow_set[i].borrowed_place, + place, + PlaceConflictBias::NoOverlap, + ) + }) + .filter(|&loan| filter_unreachable_kills(loan)); + + self.kills.entry(location).or_default().extend(definitely_conflicting_borrows); + } + + /// Records the borrows on the specified local as `killed`. + fn record_killed_borrows_for_local(&mut self, local: Local, location: Location) { + if let Some(borrow_indices) = self.borrow_set.local_map.get(&local) { + self.kills.entry(location).or_default().extend(borrow_indices.iter()); + } + } +} + +impl<'tcx> Visitor<'tcx> for KillsCollector<'_, 'tcx> { + fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) { + // Make sure there are no remaining borrows for locals that have gone out of scope. + if let StatementKind::StorageDead(local) = statement.kind { + self.record_killed_borrows_for_local(local, location); + } + + self.super_statement(statement, location); + } + + fn visit_assign(&mut self, place: &Place<'tcx>, rvalue: &Rvalue<'tcx>, location: Location) { + // When we see `X = ...`, then kill borrows of `(*X).foo` and so forth. + self.record_killed_borrows_for_place(*place, location); + self.super_assign(place, rvalue, location); + } + + fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) { + // A `Call` terminator's return value can be a local which has borrows, so we need to record + // those as killed as well. + if let TerminatorKind::Call { destination, .. } = terminator.kind { + self.record_killed_borrows_for_place(destination, location); + } + + self.super_terminator(terminator, location); + } +} diff --git a/compiler/rustc_borrowck/src/polonius/mod.rs b/compiler/rustc_borrowck/src/polonius/mod.rs index a853ff266a1..502c868194a 100644 --- a/compiler/rustc_borrowck/src/polonius/mod.rs +++ b/compiler/rustc_borrowck/src/polonius/mod.rs @@ -37,21 +37,25 @@ mod constraints; mod dump; pub(crate) mod legacy; mod liveness_constraints; +mod loan_liveness; +mod typeck_constraints; use std::collections::BTreeMap; use rustc_index::bit_set::SparseBitMatrix; -use rustc_middle::mir::{Body, Location}; -use rustc_middle::ty::RegionVid; +use rustc_middle::mir::Body; +use rustc_middle::ty::{RegionVid, TyCtxt}; use rustc_mir_dataflow::points::PointIndex; pub(crate) use self::constraints::*; pub(crate) use self::dump::dump_polonius_mir; use self::liveness_constraints::create_liveness_constraints; -use crate::RegionInferenceContext; -use crate::constraints::OutlivesConstraint; -use crate::region_infer::values::LivenessValues; -use crate::type_check::Locations; +use self::loan_liveness::compute_loan_liveness; +use self::typeck_constraints::convert_typeck_constraints; +use crate::dataflow::BorrowIndex; +use crate::{BorrowSet, RegionInferenceContext}; + +pub(crate) type LiveLoans = SparseBitMatrix<PointIndex, BorrowIndex>; /// This struct holds the data needed to create the Polonius localized constraints. pub(crate) struct PoloniusContext { @@ -83,19 +87,28 @@ impl PoloniusContext { Self { live_region_variances: BTreeMap::new(), live_regions: None } } - /// Creates a constraint set for `-Zpolonius=next` by: + /// Computes live loans using the set of loans model for `-Zpolonius=next`. + /// + /// First, creates a constraint graph combining regions and CFG points, by: /// - converting NLL typeck constraints to be localized /// - encoding liveness constraints - pub(crate) fn create_localized_constraints<'tcx>( + /// + /// Then, this graph is traversed, and combined with kills, reachability is recorded as loan + /// liveness, to be used by the loan scope and active loans computations. + pub(crate) fn compute_loan_liveness<'tcx>( &self, - regioncx: &RegionInferenceContext<'tcx>, + tcx: TyCtxt<'tcx>, + regioncx: &mut RegionInferenceContext<'tcx>, body: &Body<'tcx>, + borrow_set: &BorrowSet<'tcx>, ) -> LocalizedOutlivesConstraintSet { let mut localized_outlives_constraints = LocalizedOutlivesConstraintSet::default(); convert_typeck_constraints( + tcx, body, regioncx.liveness_constraints(), regioncx.outlives_constraints(), + regioncx.universal_regions(), &mut localized_outlives_constraints, ); @@ -111,44 +124,18 @@ impl PoloniusContext { &mut localized_outlives_constraints, ); - // FIXME: here, we can trace loan reachability in the constraint graph and record this as loan - // liveness for the next step in the chain, the NLL loan scope and active loans computations. + // Now that we have a complete graph, we can compute reachability to trace the liveness of + // loans for the next step in the chain, the NLL loan scope and active loans computations. + let live_loans = compute_loan_liveness( + tcx, + body, + regioncx.liveness_constraints(), + regioncx.outlives_constraints(), + borrow_set, + &localized_outlives_constraints, + ); + regioncx.record_live_loans(live_loans); localized_outlives_constraints } } - -/// Propagate loans throughout the subset graph at a given point (with some subtleties around the -/// location where effects start to be visible). -fn convert_typeck_constraints<'tcx>( - body: &Body<'tcx>, - liveness: &LivenessValues, - outlives_constraints: impl Iterator<Item = OutlivesConstraint<'tcx>>, - localized_outlives_constraints: &mut LocalizedOutlivesConstraintSet, -) { - for outlives_constraint in outlives_constraints { - match outlives_constraint.locations { - Locations::All(_) => { - // For now, turn logical constraints holding at all points into physical edges at - // every point in the graph. - // FIXME: encode this into *traversal* instead. - for (block, bb) in body.basic_blocks.iter_enumerated() { - let statement_count = bb.statements.len(); - for statement_index in 0..=statement_count { - let current_location = Location { block, statement_index }; - let current_point = liveness.point_from_location(current_location); - - localized_outlives_constraints.push(LocalizedOutlivesConstraint { - source: outlives_constraint.sup, - from: current_point, - target: outlives_constraint.sub, - to: current_point, - }); - } - } - } - - _ => {} - } - } -} diff --git a/compiler/rustc_borrowck/src/polonius/typeck_constraints.rs b/compiler/rustc_borrowck/src/polonius/typeck_constraints.rs new file mode 100644 index 00000000000..1289b1899eb --- /dev/null +++ b/compiler/rustc_borrowck/src/polonius/typeck_constraints.rs @@ -0,0 +1,229 @@ +use rustc_data_structures::fx::FxHashSet; +use rustc_middle::mir::{Body, Location, Statement, StatementKind, Terminator, TerminatorKind}; +use rustc_middle::ty::{TyCtxt, TypeVisitable}; +use rustc_mir_dataflow::points::PointIndex; + +use super::{LocalizedOutlivesConstraint, LocalizedOutlivesConstraintSet}; +use crate::constraints::OutlivesConstraint; +use crate::region_infer::values::LivenessValues; +use crate::type_check::Locations; +use crate::universal_regions::UniversalRegions; + +/// Propagate loans throughout the subset graph at a given point (with some subtleties around the +/// location where effects start to be visible). +pub(super) fn convert_typeck_constraints<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + liveness: &LivenessValues, + outlives_constraints: impl Iterator<Item = OutlivesConstraint<'tcx>>, + universal_regions: &UniversalRegions<'tcx>, + localized_outlives_constraints: &mut LocalizedOutlivesConstraintSet, +) { + for outlives_constraint in outlives_constraints { + match outlives_constraint.locations { + Locations::All(_) => { + // We don't turn constraints holding at all points into physical edges at every + // point in the graph. They are encoded into *traversal* instead: a given node's + // successors will combine these logical edges with the regular, physical, localized + // edges. + continue; + } + + Locations::Single(location) => { + // This constraint is marked as holding at one location, we localize it to that + // location or its successor, depending on the corresponding MIR + // statement/terminator. Unfortunately, they all show up from typeck as coming "on + // entry", so for now we modify them to take effects that should apply "on exit" + // into account. + // + // FIXME: this approach is subtle, complicated, and hard to test, so we should track + // this information better in MIR typeck instead, for example with a new `Locations` + // variant that contains which node is crossing over between entry and exit. + let point = liveness.point_from_location(location); + let localized_constraint = if let Some(stmt) = + body[location.block].statements.get(location.statement_index) + { + localize_statement_constraint( + tcx, + body, + stmt, + liveness, + &outlives_constraint, + location, + point, + universal_regions, + ) + } else { + assert_eq!(location.statement_index, body[location.block].statements.len()); + let terminator = body[location.block].terminator(); + localize_terminator_constraint( + tcx, + body, + terminator, + liveness, + &outlives_constraint, + point, + universal_regions, + ) + }; + localized_outlives_constraints.push(localized_constraint); + } + } + } +} + +/// For a given outlives constraint arising from a MIR statement, localize the constraint with the +/// needed CFG `from`-`to` intra-block nodes. +fn localize_statement_constraint<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + stmt: &Statement<'tcx>, + liveness: &LivenessValues, + outlives_constraint: &OutlivesConstraint<'tcx>, + current_location: Location, + current_point: PointIndex, + universal_regions: &UniversalRegions<'tcx>, +) -> LocalizedOutlivesConstraint { + match &stmt.kind { + StatementKind::Assign(box (lhs, rhs)) => { + // To create localized outlives constraints without midpoints, we rely on the property + // that no input regions from the RHS of the assignment will flow into themselves: they + // should not appear in the output regions in the LHS. We believe this to be true by + // construction of the MIR, via temporaries, and assert it here. + // + // We think we don't need midpoints because: + // - every LHS Place has a unique set of regions that don't appear elsewhere + // - this implies that for them to be part of the RHS, the same Place must be read and + // written + // - and that should be impossible in MIR + // + // When we have a more complete implementation in the future, tested with crater, etc, + // we can relax this to a debug assert instead, or remove it. + assert!( + { + let mut lhs_regions = FxHashSet::default(); + tcx.for_each_free_region(lhs, |region| { + let region = universal_regions.to_region_vid(region); + lhs_regions.insert(region); + }); + + let mut rhs_regions = FxHashSet::default(); + tcx.for_each_free_region(rhs, |region| { + let region = universal_regions.to_region_vid(region); + rhs_regions.insert(region); + }); + + // The intersection between LHS and RHS regions should be empty. + lhs_regions.is_disjoint(&rhs_regions) + }, + "there should be no common regions between the LHS and RHS of an assignment" + ); + + // As mentioned earlier, we should be tracking these better upstream but: we want to + // relate the types on entry to the type of the place on exit. That is, outlives + // constraints on the RHS are on entry, and outlives constraints to/from the LHS are on + // exit (i.e. on entry to the successor location). + let lhs_ty = body.local_decls[lhs.local].ty; + let successor_location = Location { + block: current_location.block, + statement_index: current_location.statement_index + 1, + }; + let successor_point = liveness.point_from_location(successor_location); + compute_constraint_direction( + tcx, + outlives_constraint, + &lhs_ty, + current_point, + successor_point, + universal_regions, + ) + } + _ => { + // For the other cases, we localize an outlives constraint to where it arises. + LocalizedOutlivesConstraint { + source: outlives_constraint.sup, + from: current_point, + target: outlives_constraint.sub, + to: current_point, + } + } + } +} + +/// For a given outlives constraint arising from a MIR terminator, localize the constraint with the +/// needed CFG `from`-`to` inter-block nodes. +fn localize_terminator_constraint<'tcx>( + tcx: TyCtxt<'tcx>, + body: &Body<'tcx>, + terminator: &Terminator<'tcx>, + liveness: &LivenessValues, + outlives_constraint: &OutlivesConstraint<'tcx>, + current_point: PointIndex, + universal_regions: &UniversalRegions<'tcx>, +) -> LocalizedOutlivesConstraint { + // FIXME: check if other terminators need the same handling as `Call`s, in particular + // Assert/Yield/Drop. A handful of tests are failing with Drop related issues, as well as some + // coroutine tests, and that may be why. + match &terminator.kind { + // FIXME: also handle diverging calls. + TerminatorKind::Call { destination, target: Some(target), .. } => { + // Calls are similar to assignments, and thus follow the same pattern. If there is a + // target for the call we also relate what flows into the destination here to entry to + // that successor. + let destination_ty = destination.ty(&body.local_decls, tcx); + let successor_location = Location { block: *target, statement_index: 0 }; + let successor_point = liveness.point_from_location(successor_location); + compute_constraint_direction( + tcx, + outlives_constraint, + &destination_ty, + current_point, + successor_point, + universal_regions, + ) + } + _ => { + // Typeck constraints guide loans between regions at the current point, so we do that in + // the general case, and liveness will take care of making them flow to the terminator's + // successors. + LocalizedOutlivesConstraint { + source: outlives_constraint.sup, + from: current_point, + target: outlives_constraint.sub, + to: current_point, + } + } + } +} +/// For a given outlives constraint and CFG edge, returns the localized constraint with the +/// appropriate `from`-`to` direction. This is computed according to whether the constraint flows to +/// or from a free region in the given `value`, some kind of result for an effectful operation, like +/// the LHS of an assignment. +fn compute_constraint_direction<'tcx>( + tcx: TyCtxt<'tcx>, + outlives_constraint: &OutlivesConstraint<'tcx>, + value: &impl TypeVisitable<TyCtxt<'tcx>>, + current_point: PointIndex, + successor_point: PointIndex, + universal_regions: &UniversalRegions<'tcx>, +) -> LocalizedOutlivesConstraint { + let mut to = current_point; + let mut from = current_point; + tcx.for_each_free_region(value, |region| { + let region = universal_regions.to_region_vid(region); + if region == outlives_constraint.sub { + // This constraint flows into the result, its effects start becoming visible on exit. + to = successor_point; + } else if region == outlives_constraint.sup { + // This constraint flows from the result, its effects start becoming visible on exit. + from = successor_point; + } + }); + + LocalizedOutlivesConstraint { + source: outlives_constraint.sup, + from, + target: outlives_constraint.sub, + to, + } +} diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index 2150759d329..d2268c4779d 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -13,15 +13,16 @@ use rustc_infer::infer::region_constraints::{GenericKind, VarInfos, VerifyBound, use rustc_infer::infer::{InferCtxt, NllRegionVariableOrigin, RegionVariableOrigin}; use rustc_middle::bug; use rustc_middle::mir::{ - BasicBlock, Body, ClosureOutlivesRequirement, ClosureOutlivesSubject, ClosureOutlivesSubjectTy, - ClosureRegionRequirements, ConstraintCategory, Local, Location, ReturnConstraint, - TerminatorKind, + AnnotationSource, BasicBlock, Body, ClosureOutlivesRequirement, ClosureOutlivesSubject, + ClosureOutlivesSubjectTy, ClosureRegionRequirements, ConstraintCategory, Local, Location, + ReturnConstraint, TerminatorKind, }; use rustc_middle::traits::{ObligationCause, ObligationCauseCode}; use rustc_middle::ty::fold::fold_regions; use rustc_middle::ty::{self, RegionVid, Ty, TyCtxt, TypeFoldable, UniverseIndex}; use rustc_mir_dataflow::points::DenseLocationMap; use rustc_span::Span; +use rustc_span::hygiene::DesugaringKind; use tracing::{debug, instrument, trace}; use crate::BorrowckInferCtxt; @@ -30,6 +31,7 @@ use crate::constraints::{ConstraintSccIndex, OutlivesConstraint, OutlivesConstra use crate::dataflow::BorrowIndex; use crate::diagnostics::{RegionErrorKind, RegionErrors, UniverseInfo}; use crate::member_constraints::{MemberConstraintSet, NllMemberConstraintIndex}; +use crate::polonius::LiveLoans; use crate::polonius::legacy::PoloniusOutput; use crate::region_infer::reverse_sccs::ReverseSccGraph; use crate::region_infer::values::{LivenessValues, RegionElement, RegionValues, ToElementIndex}; @@ -315,11 +317,6 @@ enum Trace<'tcx> { NotVisited, } -#[derive(Clone, PartialEq, Eq, Debug)] -pub(crate) enum ExtraConstraintInfo { - PlaceholderFromPredicate(Span), -} - #[instrument(skip(infcx, sccs), level = "debug")] fn sccs_info<'tcx>(infcx: &BorrowckInferCtxt<'tcx>, sccs: &ConstraintSccs) { use crate::renumber::RegionCtxt; @@ -396,7 +393,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { var_infos: VarInfos, constraints: MirTypeckRegionConstraints<'tcx>, universal_region_relations: Frozen<UniversalRegionRelations<'tcx>>, - elements: Rc<DenseLocationMap>, + location_map: Rc<DenseLocationMap>, ) -> Self { let universal_regions = &universal_region_relations.universal_regions; let MirTypeckRegionConstraints { @@ -440,7 +437,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { } let mut scc_values = - RegionValues::new(elements, universal_regions.len(), placeholder_indices); + RegionValues::new(location_map, universal_regions.len(), placeholder_indices); for region in liveness_constraints.regions() { let scc = constraint_sccs.scc(region); @@ -978,7 +975,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { propagated_outlives_requirements: &mut Vec<ClosureOutlivesRequirement<'tcx>>, ) -> bool { let tcx = infcx.tcx; - let TypeTest { generic_kind, lower_bound, span: blame_span, ref verify_bound } = *type_test; + let TypeTest { generic_kind, lower_bound, span: blame_span, verify_bound: _ } = *type_test; let generic_ty = generic_kind.to_ty(tcx); let Some(subject) = self.try_promote_type_test_subject(infcx, generic_ty) else { @@ -1016,25 +1013,10 @@ impl<'tcx> RegionInferenceContext<'tcx> { // For each region outlived by lower_bound find a non-local, // universal region (it may be the same region) and add it to // `ClosureOutlivesRequirement`. + let mut found_outlived_universal_region = false; for ur in self.scc_values.universal_regions_outlived_by(r_scc) { + found_outlived_universal_region = true; debug!("universal_region_outlived_by ur={:?}", ur); - // Check whether we can already prove that the "subject" outlives `ur`. - // If so, we don't have to propagate this requirement to our caller. - // - // To continue the example from the function, if we are trying to promote - // a requirement that `T: 'X`, and we know that `'X = '1 + '2` (i.e., the union - // `'1` and `'2`), then in this loop `ur` will be `'1` (and `'2`). So here - // we check whether `T: '1` is something we *can* prove. If so, no need - // to propagate that requirement. - // - // This is needed because -- particularly in the case - // where `ur` is a local bound -- we are sometimes in a - // position to prove things that our caller cannot. See - // #53570 for an example. - if self.eval_verify_bound(infcx, generic_ty, ur, &verify_bound) { - continue; - } - let non_local_ub = self.universal_region_relations.non_local_upper_bounds(ur); debug!(?non_local_ub); @@ -1056,6 +1038,11 @@ impl<'tcx> RegionInferenceContext<'tcx> { propagated_outlives_requirements.push(requirement); } } + // If we succeed to promote the subject, i.e. it only contains non-local regions, + // and fail to prove the type test inside of the closure, the `lower_bound` has to + // also be at least as large as some universal region, as the type test is otherwise + // trivial. + assert!(found_outlived_universal_region); true } @@ -1948,7 +1935,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { from_region: RegionVid, from_region_origin: NllRegionVariableOrigin, target_test: impl Fn(RegionVid) -> bool, - ) -> (BlameConstraint<'tcx>, Vec<ExtraConstraintInfo>) { + ) -> (BlameConstraint<'tcx>, Vec<OutlivesConstraint<'tcx>>) { // Find all paths let (path, target_region) = self .find_constraint_paths_between_regions(from_region, target_test) @@ -1970,25 +1957,6 @@ impl<'tcx> RegionInferenceContext<'tcx> { .collect::<Vec<_>>() ); - let mut extra_info = vec![]; - for constraint in path.iter() { - let outlived = constraint.sub; - let Some(origin) = self.var_infos.get(outlived) else { - continue; - }; - let RegionVariableOrigin::Nll(NllRegionVariableOrigin::Placeholder(p)) = origin.origin - else { - continue; - }; - debug!(?constraint, ?p); - let ConstraintCategory::Predicate(span) = constraint.category else { - continue; - }; - extra_info.push(ExtraConstraintInfo::PlaceholderFromPredicate(span)); - // We only want to point to one - break; - } - // We try to avoid reporting a `ConstraintCategory::Predicate` as our best constraint. // Instead, we use it to produce an improved `ObligationCauseCode`. // FIXME - determine what we should do if we encounter multiple @@ -2007,42 +1975,8 @@ impl<'tcx> RegionInferenceContext<'tcx> { }) .unwrap_or_else(|| ObligationCauseCode::Misc); - // Classify each of the constraints along the path. - let mut categorized_path: Vec<BlameConstraint<'tcx>> = path - .iter() - .map(|constraint| BlameConstraint { - category: constraint.category, - from_closure: constraint.from_closure, - cause: ObligationCause::new(constraint.span, CRATE_DEF_ID, cause_code.clone()), - variance_info: constraint.variance_info, - }) - .collect(); - debug!("categorized_path={:#?}", categorized_path); - - // To find the best span to cite, we first try to look for the - // final constraint that is interesting and where the `sup` is - // not unified with the ultimate target region. The reason - // for this is that we have a chain of constraints that lead - // from the source to the target region, something like: - // - // '0: '1 ('0 is the source) - // '1: '2 - // '2: '3 - // '3: '4 - // '4: '5 - // '5: '6 ('6 is the target) - // - // Some of those regions are unified with `'6` (in the same - // SCC). We want to screen those out. After that point, the - // "closest" constraint we have to the end is going to be the - // most likely to be the point where the value escapes -- but - // we still want to screen for an "interesting" point to - // highlight (e.g., a call site or something). - let target_scc = self.constraint_sccs.scc(target_region); - let mut range = 0..path.len(); - - // As noted above, when reporting an error, there is typically a chain of constraints - // leading from some "source" region which must outlive some "target" region. + // When reporting an error, there is typically a chain of constraints leading from some + // "source" region which must outlive some "target" region. // In most cases, we prefer to "blame" the constraints closer to the target -- // but there is one exception. When constraints arise from higher-ranked subtyping, // we generally prefer to blame the source value, @@ -2083,78 +2017,114 @@ impl<'tcx> RegionInferenceContext<'tcx> { | NllRegionVariableOrigin::Existential { from_forall: true } => false, }; - let find_region = |i: &usize| { - let constraint = &path[*i]; - - let constraint_sup_scc = self.constraint_sccs.scc(constraint.sup); - - if blame_source { - match categorized_path[*i].category { - ConstraintCategory::OpaqueType - | ConstraintCategory::Boring - | ConstraintCategory::BoringNoLocation - | ConstraintCategory::Internal - | ConstraintCategory::Predicate(_) => false, - ConstraintCategory::TypeAnnotation - | ConstraintCategory::Return(_) - | ConstraintCategory::Yield => true, - _ => constraint_sup_scc != target_scc, - } + // To pick a constraint to blame, we organize constraints by how interesting we expect them + // to be in diagnostics, then pick the most interesting one closest to either the source or + // the target on our constraint path. + let constraint_interest = |constraint: &OutlivesConstraint<'tcx>| { + // Try to avoid blaming constraints from desugarings, since they may not clearly match + // match what users have written. As an exception, allow blaming returns generated by + // `?` desugaring, since the correspondence is fairly clear. + let category = if let Some(kind) = constraint.span.desugaring_kind() + && (kind != DesugaringKind::QuestionMark + || !matches!(constraint.category, ConstraintCategory::Return(_))) + { + ConstraintCategory::Boring } else { - !matches!( - categorized_path[*i].category, - ConstraintCategory::OpaqueType - | ConstraintCategory::Boring - | ConstraintCategory::BoringNoLocation - | ConstraintCategory::Internal - | ConstraintCategory::Predicate(_) - ) - } - }; - - let best_choice = - if blame_source { range.rev().find(find_region) } else { range.find(find_region) }; - - debug!(?best_choice, ?blame_source, ?extra_info); + constraint.category + }; - if let Some(i) = best_choice { - if let Some(next) = categorized_path.get(i + 1) { - if matches!(categorized_path[i].category, ConstraintCategory::Return(_)) - && next.category == ConstraintCategory::OpaqueType + match category { + // Returns usually provide a type to blame and have specially written diagnostics, + // so prioritize them. + ConstraintCategory::Return(_) => 0, + // Unsizing coercions are interesting, since we have a note for that: + // `BorrowExplanation::add_object_lifetime_default_note`. + // FIXME(dianne): That note shouldn't depend on a coercion being blamed; see issue + // #131008 for an example of where we currently don't emit it but should. + // Once the note is handled properly, this case should be removed. Until then, it + // should be as limited as possible; the note is prone to false positives and this + // constraint usually isn't best to blame. + ConstraintCategory::Cast { + unsize_to: Some(unsize_ty), + is_implicit_coercion: true, + } if target_region == self.universal_regions().fr_static + // Mirror the note's condition, to minimize how often this diverts blame. + && let ty::Adt(_, args) = unsize_ty.kind() + && args.iter().any(|arg| arg.as_type().is_some_and(|ty| ty.is_trait())) + // Mimic old logic for this, to minimize false positives in tests. + && !path + .iter() + .any(|c| matches!(c.category, ConstraintCategory::TypeAnnotation(_))) => { - // The return expression is being influenced by the return type being - // impl Trait, point at the return type and not the return expr. - return (next.clone(), extra_info); + 1 } + // Between other interesting constraints, order by their position on the `path`. + ConstraintCategory::Yield + | ConstraintCategory::UseAsConst + | ConstraintCategory::UseAsStatic + | ConstraintCategory::TypeAnnotation( + AnnotationSource::Ascription + | AnnotationSource::Declaration + | AnnotationSource::OpaqueCast, + ) + | ConstraintCategory::Cast { .. } + | ConstraintCategory::CallArgument(_) + | ConstraintCategory::CopyBound + | ConstraintCategory::SizedBound + | ConstraintCategory::Assignment + | ConstraintCategory::Usage + | ConstraintCategory::ClosureUpvar(_) => 2, + // Generic arguments are unlikely to be what relates regions together + ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg) => 3, + // We handle predicates and opaque types specially; don't prioritize them here. + ConstraintCategory::Predicate(_) | ConstraintCategory::OpaqueType => 4, + // `Boring` constraints can correspond to user-written code and have useful spans, + // but don't provide any other useful information for diagnostics. + ConstraintCategory::Boring => 5, + // `BoringNoLocation` constraints can point to user-written code, but are less + // specific, and are not used for relations that would make sense to blame. + ConstraintCategory::BoringNoLocation => 6, + // Do not blame internal constraints. + ConstraintCategory::Internal => 7, + ConstraintCategory::IllegalUniverse => 8, } + }; - if categorized_path[i].category == ConstraintCategory::Return(ReturnConstraint::Normal) - { - let field = categorized_path.iter().find_map(|p| { - if let ConstraintCategory::ClosureUpvar(f) = p.category { - Some(f) - } else { - None - } - }); - - if let Some(field) = field { - categorized_path[i].category = - ConstraintCategory::Return(ReturnConstraint::ClosureUpvar(field)); - } - } + let best_choice = if blame_source { + path.iter().enumerate().rev().min_by_key(|(_, c)| constraint_interest(c)).unwrap().0 + } else { + path.iter().enumerate().min_by_key(|(_, c)| constraint_interest(c)).unwrap().0 + }; - return (categorized_path[i].clone(), extra_info); - } + debug!(?best_choice, ?blame_source); - // If that search fails, that is.. unusual. Maybe everything - // is in the same SCC or something. In that case, find what - // appears to be the most interesting point to report to the - // user via an even more ad-hoc guess. - categorized_path.sort_by_key(|p| p.category); - debug!("sorted_path={:#?}", categorized_path); + let best_constraint = if let Some(next) = path.get(best_choice + 1) + && matches!(path[best_choice].category, ConstraintCategory::Return(_)) + && next.category == ConstraintCategory::OpaqueType + { + // The return expression is being influenced by the return type being + // impl Trait, point at the return type and not the return expr. + *next + } else if path[best_choice].category == ConstraintCategory::Return(ReturnConstraint::Normal) + && let Some(field) = path.iter().find_map(|p| { + if let ConstraintCategory::ClosureUpvar(f) = p.category { Some(f) } else { None } + }) + { + OutlivesConstraint { + category: ConstraintCategory::Return(ReturnConstraint::ClosureUpvar(field)), + ..path[best_choice] + } + } else { + path[best_choice] + }; - (categorized_path.remove(0), extra_info) + let blame_constraint = BlameConstraint { + category: best_constraint.category, + from_closure: best_constraint.from_closure, + cause: ObligationCause::new(best_constraint.span, CRATE_DEF_ID, cause_code.clone()), + variance_info: best_constraint.variance_info, + }; + (blame_constraint, path) } pub(crate) fn universe_info(&self, universe: ty::UniverseIndex) -> UniverseInfo<'tcx> { @@ -2202,28 +2172,6 @@ impl<'tcx> RegionInferenceContext<'tcx> { self.constraint_graph.region_graph(&self.constraints, self.universal_regions().fr_static) } - /// Returns whether the given region is considered live at all points: whether it is a - /// placeholder or a free region. - pub(crate) fn is_region_live_at_all_points(&self, region: RegionVid) -> bool { - // FIXME: there must be a cleaner way to find this information. At least, when - // higher-ranked subtyping is abstracted away from the borrowck main path, we'll only - // need to check whether this is a universal region. - let origin = self.region_definition(region).origin; - let live_at_all_points = matches!( - origin, - NllRegionVariableOrigin::Placeholder(_) | NllRegionVariableOrigin::FreeRegion - ); - live_at_all_points - } - - /// Returns whether the `loan_idx` is live at the given `location`: whether its issuing - /// region is contained within the type of a variable that is live at this point. - /// Note: for now, the sets of live loans is only available when using `-Zpolonius=next`. - pub(crate) fn is_loan_live_at(&self, loan_idx: BorrowIndex, location: Location) -> bool { - let point = self.liveness_constraints.point_from_location(location); - self.liveness_constraints.is_loan_live_at(loan_idx, point) - } - /// Returns the representative `RegionVid` for a given SCC. /// See `RegionTracker` for how a region variable ID is chosen. /// @@ -2239,6 +2187,20 @@ impl<'tcx> RegionInferenceContext<'tcx> { pub(crate) fn liveness_constraints(&self) -> &LivenessValues { &self.liveness_constraints } + + /// When using `-Zpolonius=next`, records the given live loans for the loan scopes and active + /// loans dataflow computations. + pub(crate) fn record_live_loans(&mut self, live_loans: LiveLoans) { + self.liveness_constraints.record_live_loans(live_loans); + } + + /// Returns whether the `loan_idx` is live at the given `location`: whether its issuing + /// region is contained within the type of a variable that is live at this point. + /// Note: for now, the sets of live loans is only available when using `-Zpolonius=next`. + pub(crate) fn is_loan_live_at(&self, loan_idx: BorrowIndex, location: Location) -> bool { + let point = self.liveness_constraints.point_from_location(location); + self.liveness_constraints.is_loan_live_at(loan_idx, point) + } } impl<'tcx> RegionDefinition<'tcx> { diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs index 3e16a3ca157..7c484327e31 100644 --- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs +++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs @@ -152,7 +152,6 @@ impl<'tcx> RegionInferenceContext<'tcx> { let (Ok(e) | Err(e)) = prev .build_mismatch_error( &OpaqueHiddenType { ty, span: concrete_type.span }, - opaque_type_key.def_id, infcx.tcx, ) .map(|d| d.emit()); diff --git a/compiler/rustc_borrowck/src/region_infer/values.rs b/compiler/rustc_borrowck/src/region_infer/values.rs index e567f3a8b0d..f1bcb353dc6 100644 --- a/compiler/rustc_borrowck/src/region_infer/values.rs +++ b/compiler/rustc_borrowck/src/region_infer/values.rs @@ -11,6 +11,7 @@ use rustc_mir_dataflow::points::{DenseLocationMap, PointIndex}; use tracing::debug; use crate::BorrowIndex; +use crate::polonius::LiveLoans; rustc_index::newtype_index! { /// A single integer representing a `ty::Placeholder`. @@ -38,7 +39,7 @@ pub(crate) enum RegionElement { /// an interval matrix storing liveness ranges for each region-vid. pub(crate) struct LivenessValues { /// The map from locations to points. - elements: Rc<DenseLocationMap>, + location_map: Rc<DenseLocationMap>, /// Which regions are live. This is exclusive with the fine-grained tracking in `points`, and /// currently only used for validating promoteds (which don't care about more precise tracking). @@ -50,39 +51,18 @@ pub(crate) struct LivenessValues { /// region is live, only that it is. points: Option<SparseIntervalMatrix<RegionVid, PointIndex>>, - /// When using `-Zpolonius=next`, for each point: the loans flowing into the live regions at - /// that point. - pub(crate) loans: Option<LiveLoans>, -} - -/// Data used to compute the loans that are live at a given point in the CFG, when using -/// `-Zpolonius=next`. -pub(crate) struct LiveLoans { - /// The set of loans that flow into a given region. When individual regions are marked as live - /// in the CFG, these inflowing loans are recorded as live. - pub(crate) inflowing_loans: SparseBitMatrix<RegionVid, BorrowIndex>, - - /// The set of loans that are live at a given point in the CFG. - pub(crate) live_loans: SparseBitMatrix<PointIndex, BorrowIndex>, -} - -impl LiveLoans { - pub(crate) fn new(num_loans: usize) -> Self { - LiveLoans { - live_loans: SparseBitMatrix::new(num_loans), - inflowing_loans: SparseBitMatrix::new(num_loans), - } - } + /// When using `-Zpolonius=next`, the set of loans that are live at a given point in the CFG. + live_loans: Option<LiveLoans>, } impl LivenessValues { /// Create an empty map of regions to locations where they're live. - pub(crate) fn with_specific_points(elements: Rc<DenseLocationMap>) -> Self { + pub(crate) fn with_specific_points(location_map: Rc<DenseLocationMap>) -> Self { LivenessValues { live_regions: None, - points: Some(SparseIntervalMatrix::new(elements.num_points())), - elements, - loans: None, + points: Some(SparseIntervalMatrix::new(location_map.num_points())), + location_map, + live_loans: None, } } @@ -90,12 +70,12 @@ impl LivenessValues { /// /// Unlike `with_specific_points`, does not track exact locations where something is live, only /// which regions are live. - pub(crate) fn without_specific_points(elements: Rc<DenseLocationMap>) -> Self { + pub(crate) fn without_specific_points(location_map: Rc<DenseLocationMap>) -> Self { LivenessValues { live_regions: Some(Default::default()), points: None, - elements, - loans: None, + location_map, + live_loans: None, } } @@ -122,20 +102,13 @@ impl LivenessValues { /// Records `region` as being live at the given `location`. pub(crate) fn add_location(&mut self, region: RegionVid, location: Location) { - let point = self.elements.point_from_location(location); + let point = self.location_map.point_from_location(location); debug!("LivenessValues::add_location(region={:?}, location={:?})", region, location); if let Some(points) = &mut self.points { points.insert(region, point); - } else if self.elements.point_in_range(point) { + } else if self.location_map.point_in_range(point) { self.live_regions.as_mut().unwrap().insert(region); } - - // When available, record the loans flowing into this region as live at the given point. - if let Some(loans) = self.loans.as_mut() { - if let Some(inflowing) = loans.inflowing_loans.row(region) { - loans.live_loans.union_row(point, inflowing); - } - } } /// Records `region` as being live at all the given `points`. @@ -143,20 +116,9 @@ impl LivenessValues { debug!("LivenessValues::add_points(region={:?}, points={:?})", region, points); if let Some(this) = &mut self.points { this.union_row(region, points); - } else if points.iter().any(|point| self.elements.point_in_range(point)) { + } else if points.iter().any(|point| self.location_map.point_in_range(point)) { self.live_regions.as_mut().unwrap().insert(region); } - - // When available, record the loans flowing into this region as live at the given points. - if let Some(loans) = self.loans.as_mut() { - if let Some(inflowing) = loans.inflowing_loans.row(region) { - if !inflowing.is_empty() { - for point in points.iter() { - loans.live_loans.union_row(point, inflowing); - } - } - } - } } /// Records `region` as being live at all the control-flow points. @@ -170,7 +132,7 @@ impl LivenessValues { /// Returns whether `region` is marked live at the given `location`. pub(crate) fn is_live_at(&self, region: RegionVid, location: Location) -> bool { - let point = self.elements.point_from_location(location); + let point = self.location_map.point_from_location(location); if let Some(points) = &self.points { points.row(region).is_some_and(|r| r.contains(point)) } else { @@ -191,33 +153,39 @@ impl LivenessValues { .row(region) .into_iter() .flat_map(|set| set.iter()) - .take_while(|&p| self.elements.point_in_range(p)) + .take_while(|&p| self.location_map.point_in_range(p)) } /// For debugging purposes, returns a pretty-printed string of the points where the `region` is /// live. pub(crate) fn pretty_print_live_points(&self, region: RegionVid) -> String { pretty_print_region_elements( - self.live_points(region).map(|p| RegionElement::Location(self.elements.to_location(p))), + self.live_points(region) + .map(|p| RegionElement::Location(self.location_map.to_location(p))), ) } #[inline] pub(crate) fn point_from_location(&self, location: Location) -> PointIndex { - self.elements.point_from_location(location) + self.location_map.point_from_location(location) } #[inline] pub(crate) fn location_from_point(&self, point: PointIndex) -> Location { - self.elements.to_location(point) + self.location_map.to_location(point) + } + + /// When using `-Zpolonius=next`, records the given live loans for the loan scopes and active + /// loans dataflow computations. + pub(crate) fn record_live_loans(&mut self, live_loans: LiveLoans) { + self.live_loans = Some(live_loans); } /// When using `-Zpolonius=next`, returns whether the `loan_idx` is live at the given `point`. pub(crate) fn is_loan_live_at(&self, loan_idx: BorrowIndex, point: PointIndex) -> bool { - self.loans + self.live_loans .as_ref() .expect("Accessing live loans requires `-Zpolonius=next`") - .live_loans .contains(point, loan_idx) } } @@ -272,7 +240,7 @@ impl PlaceholderIndices { /// because (since it is returned) it must live for at least `'a`. But /// it would also contain various points from within the function. pub(crate) struct RegionValues<N: Idx> { - elements: Rc<DenseLocationMap>, + location_map: Rc<DenseLocationMap>, placeholder_indices: PlaceholderIndices, points: SparseIntervalMatrix<N, PointIndex>, free_regions: SparseBitMatrix<N, RegionVid>, @@ -287,14 +255,14 @@ impl<N: Idx> RegionValues<N> { /// Each of the regions in num_region_variables will be initialized with an /// empty set of points and no causal information. pub(crate) fn new( - elements: Rc<DenseLocationMap>, + location_map: Rc<DenseLocationMap>, num_universal_regions: usize, placeholder_indices: PlaceholderIndices, ) -> Self { - let num_points = elements.num_points(); + let num_points = location_map.num_points(); let num_placeholders = placeholder_indices.len(); Self { - elements, + location_map, points: SparseIntervalMatrix::new(num_points), placeholder_indices, free_regions: SparseBitMatrix::new(num_universal_regions), @@ -336,7 +304,7 @@ impl<N: Idx> RegionValues<N> { end: usize, ) -> Option<usize> { let row = self.points.row(r)?; - let block = self.elements.entry_point(block); + let block = self.location_map.entry_point(block); let start = block.plus(start); let end = block.plus(end); let first_unset = row.first_unset_in(start..=end)?; @@ -375,8 +343,8 @@ impl<N: Idx> RegionValues<N> { pub(crate) fn locations_outlived_by<'a>(&'a self, r: N) -> impl Iterator<Item = Location> + 'a { self.points.row(r).into_iter().flat_map(move |set| { set.iter() - .take_while(move |&p| self.elements.point_in_range(p)) - .map(move |p| self.elements.to_location(p)) + .take_while(move |&p| self.location_map.point_in_range(p)) + .map(move |p| self.location_map.to_location(p)) }) } @@ -430,12 +398,12 @@ pub(crate) trait ToElementIndex: Debug + Copy { impl ToElementIndex for Location { fn add_to_row<N: Idx>(self, values: &mut RegionValues<N>, row: N) -> bool { - let index = values.elements.point_from_location(self); + let index = values.location_map.point_from_location(self); values.points.insert(row, index) } fn contained_in_row<N: Idx>(self, values: &RegionValues<N>, row: N) -> bool { - let index = values.elements.point_from_location(self); + let index = values.location_map.point_from_location(self); values.points.contains(row, index) } } @@ -464,14 +432,14 @@ impl ToElementIndex for ty::PlaceholderRegion { /// For debugging purposes, returns a pretty-printed string of the given points. pub(crate) fn pretty_print_points( - elements: &DenseLocationMap, + location_map: &DenseLocationMap, points: impl IntoIterator<Item = PointIndex>, ) -> String { pretty_print_region_elements( points .into_iter() - .take_while(|&p| elements.point_in_range(p)) - .map(|p| elements.to_location(p)) + .take_while(|&p| location_map.point_in_range(p)) + .map(|p| location_map.to_location(p)) .map(RegionElement::Location), ) } @@ -544,12 +512,12 @@ fn pretty_print_region_elements(elements: impl IntoIterator<Item = RegionElement return result; - fn push_location_range(str: &mut String, location1: Location, location2: Location) { + fn push_location_range(s: &mut String, location1: Location, location2: Location) { if location1 == location2 { - str.push_str(&format!("{location1:?}")); + s.push_str(&format!("{location1:?}")); } else { assert_eq!(location1.block, location2.block); - str.push_str(&format!( + s.push_str(&format!( "{:?}[{}..={}]", location1.block, location1.statement_index, location2.statement_index )); diff --git a/compiler/rustc_borrowck/src/renumber.rs b/compiler/rustc_borrowck/src/renumber.rs index d83d6ade203..e355d2b415b 100644 --- a/compiler/rustc_borrowck/src/renumber.rs +++ b/compiler/rustc_borrowck/src/renumber.rs @@ -34,7 +34,6 @@ pub(crate) enum RegionCtxt { Location(Location), TyContext(TyContext), Free(Symbol), - Bound(Symbol), LateBound(Symbol), Existential(Option<Symbol>), Placeholder(Symbol), diff --git a/compiler/rustc_borrowck/src/session_diagnostics.rs b/compiler/rustc_borrowck/src/session_diagnostics.rs index 627444a4ce5..4be5d0dbf42 100644 --- a/compiler/rustc_borrowck/src/session_diagnostics.rs +++ b/compiler/rustc_borrowck/src/session_diagnostics.rs @@ -480,3 +480,10 @@ pub(crate) struct SimdIntrinsicArgConst { pub arg: usize, pub intrinsic: String, } + +#[derive(LintDiagnostic)] +#[diag(borrowck_tail_expr_drop_order)] +pub(crate) struct TailExprDropOrder { + #[label] + pub borrowed: Span, +} diff --git a/compiler/rustc_borrowck/src/type_check/free_region_relations.rs b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs index ea965eb6545..edf612f4e97 100644 --- a/compiler/rustc_borrowck/src/type_check/free_region_relations.rs +++ b/compiler/rustc_borrowck/src/type_check/free_region_relations.rs @@ -5,13 +5,14 @@ use rustc_infer::infer::canonical::QueryRegionConstraints; use rustc_infer::infer::outlives::env::RegionBoundPairs; use rustc_infer::infer::region_constraints::GenericKind; use rustc_infer::infer::{InferCtxt, outlives}; +use rustc_infer::traits::ScrubbedTraitError; use rustc_middle::mir::ConstraintCategory; use rustc_middle::traits::ObligationCause; use rustc_middle::traits::query::OutlivesBound; use rustc_middle::ty::{self, RegionVid, Ty, TypeVisitableExt}; use rustc_span::{ErrorGuaranteed, Span}; -use rustc_trait_selection::error_reporting::InferCtxtErrorExt; -use rustc_trait_selection::solve::deeply_normalize; +use rustc_trait_selection::solve::NoSolution; +use rustc_trait_selection::traits::query::type_op::custom::CustomTypeOp; use rustc_trait_selection::traits::query::type_op::{self, TypeOp}; use tracing::{debug, instrument}; use type_op::TypeOpOutput; @@ -229,24 +230,14 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> { let mut constraints = vec![]; let mut known_type_outlives_obligations = vec![]; for bound in param_env.caller_bounds() { - let Some(mut outlives) = bound.as_type_outlives_clause() else { continue }; - - // In the new solver, normalize the type-outlives obligation assumptions. - if self.infcx.next_trait_solver() { - match deeply_normalize( - self.infcx.at(&ObligationCause::misc(span, defining_ty_def_id), param_env), + if let Some(outlives) = bound.as_type_outlives_clause() { + self.normalize_and_push_type_outlives_obligation( outlives, - ) { - Ok(normalized_outlives) => { - outlives = normalized_outlives; - } - Err(e) => { - self.infcx.err_ctxt().report_fulfillment_errors(e); - } - } - } - - known_type_outlives_obligations.push(outlives); + span, + &mut known_type_outlives_obligations, + &mut constraints, + ); + }; } let unnormalized_input_output_tys = self @@ -356,6 +347,44 @@ impl<'tcx> UniversalRegionRelationsBuilder<'_, 'tcx> { } } + fn normalize_and_push_type_outlives_obligation( + &self, + mut outlives: ty::PolyTypeOutlivesPredicate<'tcx>, + span: Span, + known_type_outlives_obligations: &mut Vec<ty::PolyTypeOutlivesPredicate<'tcx>>, + constraints: &mut Vec<&QueryRegionConstraints<'tcx>>, + ) { + // In the new solver, normalize the type-outlives obligation assumptions. + if self.infcx.next_trait_solver() { + let Ok(TypeOpOutput { + output: normalized_outlives, + constraints: constraints_normalize, + error_info: _, + }) = CustomTypeOp::new( + |ocx| { + ocx.deeply_normalize( + &ObligationCause::dummy_with_span(span), + self.param_env, + outlives, + ) + .map_err(|_: Vec<ScrubbedTraitError<'tcx>>| NoSolution) + }, + "normalize type outlives obligation", + ) + .fully_perform(self.infcx, span) + else { + self.infcx.dcx().delayed_bug(format!("could not normalize {outlives:?}")); + return; + }; + outlives = normalized_outlives; + if let Some(c) = constraints_normalize { + constraints.push(c); + } + } + + known_type_outlives_obligations.push(outlives); + } + /// Update the type of a single local, which should represent /// either the return type of the MIR or one of its arguments. At /// the same time, compute and add any implied bounds that come diff --git a/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs b/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs index 695a1cdac0d..6182b68f6f4 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/local_use_map.rs @@ -82,7 +82,7 @@ impl<'a> Iterator for AppearancesIter<'a> { impl LocalUseMap { pub(crate) fn build( live_locals: &[Local], - elements: &DenseLocationMap, + location_map: &DenseLocationMap, body: &Body<'_>, ) -> Self { let nones = IndexVec::from_elem(None, &body.local_decls); @@ -101,7 +101,7 @@ impl LocalUseMap { IndexVec::from_elem(false, &body.local_decls); live_locals.iter().for_each(|&local| locals_with_use_data[local] = true); - LocalUseMapBuild { local_use_map: &mut local_use_map, elements, locals_with_use_data } + LocalUseMapBuild { local_use_map: &mut local_use_map, location_map, locals_with_use_data } .visit_body(body); local_use_map @@ -125,7 +125,7 @@ impl LocalUseMap { struct LocalUseMapBuild<'me> { local_use_map: &'me mut LocalUseMap, - elements: &'me DenseLocationMap, + location_map: &'me DenseLocationMap, // Vector used in `visit_local` to signal which `Local`s do we need // def/use/drop information on, constructed from `live_locals` (that @@ -147,7 +147,7 @@ impl Visitor<'_> for LocalUseMapBuild<'_> { DefUse::Use => &mut self.local_use_map.first_use_at[local], DefUse::Drop => &mut self.local_use_map.first_drop_at[local], }; - let point_index = self.elements.point_from_location(location); + let point_index = self.location_map.point_from_location(location); let appearance_index = self .local_use_map .appearances diff --git a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs index 3e9900cce5f..4e0b2a4e296 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs @@ -32,24 +32,32 @@ mod trace; pub(super) fn generate<'a, 'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, body: &Body<'tcx>, - elements: &DenseLocationMap, + location_map: &DenseLocationMap, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, ) { debug!("liveness::generate"); - let free_regions = regions_that_outlive_free_regions( - typeck.infcx.num_region_vars(), - &typeck.universal_regions, - &typeck.constraints.outlives_constraints, - ); + // NLLs can avoid computing some liveness data here because its constraints are + // location-insensitive, but that doesn't work in polonius: locals whose type contains a region + // that outlives a free region are not necessarily live everywhere in a flow-sensitive setting, + // unlike NLLs. + let free_regions = if !typeck.tcx().sess.opts.unstable_opts.polonius.is_next_enabled() { + regions_that_outlive_free_regions( + typeck.infcx.num_region_vars(), + &typeck.universal_regions, + &typeck.constraints.outlives_constraints, + ) + } else { + typeck.universal_regions.universal_regions_iter().collect() + }; let (relevant_live_locals, boring_locals) = compute_relevant_live_locals(typeck.tcx(), &free_regions, body); trace::trace( typeck, body, - elements, + location_map, flow_inits, move_data, relevant_live_locals, diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs index c7a2d32b31d..c564d85616e 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs @@ -1,5 +1,5 @@ use rustc_data_structures::fx::{FxIndexMap, FxIndexSet}; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::interval::IntervalSet; use rustc_infer::infer::canonical::QueryRegionConstraints; use rustc_infer::infer::outlives::for_liveness; @@ -16,7 +16,7 @@ use rustc_trait_selection::traits::query::type_op::{DropckOutlives, TypeOp, Type use tracing::debug; use crate::polonius; -use crate::region_infer::values::{self, LiveLoans}; +use crate::region_infer::values; use crate::type_check::liveness::local_use_map::LocalUseMap; use crate::type_check::{NormalizeLocation, TypeChecker}; @@ -37,49 +37,18 @@ use crate::type_check::{NormalizeLocation, TypeChecker}; pub(super) fn trace<'a, 'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, body: &Body<'tcx>, - elements: &DenseLocationMap, + location_map: &DenseLocationMap, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, relevant_live_locals: Vec<Local>, boring_locals: Vec<Local>, ) { - let local_use_map = &LocalUseMap::build(&relevant_live_locals, elements, body); - - // When using `-Zpolonius=next`, compute the set of loans that can reach a given region. - if typeck.tcx().sess.opts.unstable_opts.polonius.is_next_enabled() { - let borrow_set = &typeck.borrow_set; - let mut live_loans = LiveLoans::new(borrow_set.len()); - let outlives_constraints = &typeck.constraints.outlives_constraints; - let graph = outlives_constraints.graph(typeck.infcx.num_region_vars()); - let region_graph = - graph.region_graph(outlives_constraints, typeck.universal_regions.fr_static); - - // Traverse each issuing region's constraints, and record the loan as flowing into the - // outlived region. - for (loan, issuing_region_data) in borrow_set.iter_enumerated() { - for succ in rustc_data_structures::graph::depth_first_search( - ®ion_graph, - issuing_region_data.region, - ) { - // We don't need to mention that a loan flows into its issuing region. - if succ == issuing_region_data.region { - continue; - } - - live_loans.inflowing_loans.insert(succ, loan); - } - } - - // Store the inflowing loans in the liveness constraints: they will be used to compute live - // loans when liveness data is recorded there. - typeck.constraints.liveness_constraints.loans = Some(live_loans); - }; - + let local_use_map = &LocalUseMap::build(&relevant_live_locals, location_map, body); let cx = LivenessContext { typeck, body, flow_inits, - elements, + location_map, local_use_map, move_data, drop_data: FxIndexMap::default(), @@ -100,7 +69,7 @@ struct LivenessContext<'a, 'typeck, 'b, 'tcx> { typeck: &'a mut TypeChecker<'typeck, 'tcx>, /// Defines the `PointIndex` mapping - elements: &'a DenseLocationMap, + location_map: &'a DenseLocationMap, /// MIR we are analyzing. body: &'a Body<'tcx>, @@ -129,7 +98,7 @@ struct LivenessResults<'a, 'typeck, 'b, 'tcx> { cx: LivenessContext<'a, 'typeck, 'b, 'tcx>, /// Set of points that define the current local. - defs: BitSet<PointIndex>, + defs: DenseBitSet<PointIndex>, /// Points where the current variable is "use live" -- meaning /// that there is a future "full use" that may use its value. @@ -149,10 +118,10 @@ struct LivenessResults<'a, 'typeck, 'b, 'tcx> { impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { fn new(cx: LivenessContext<'a, 'typeck, 'b, 'tcx>) -> Self { - let num_points = cx.elements.num_points(); + let num_points = cx.location_map.num_points(); LivenessResults { cx, - defs: BitSet::new_empty(num_points), + defs: DenseBitSet::new_empty(num_points), use_live_at: IntervalSet::new(num_points), drop_live_at: IntervalSet::new(num_points), drop_locations: vec![], @@ -213,14 +182,14 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { fn add_extra_drop_facts(&mut self, relevant_live_locals: &[Local]) { // This collect is more necessary than immediately apparent // because these facts go into `add_drop_live_facts_for()`, - // which also writes to `all_facts`, and so this is genuinely + // which also writes to `polonius_facts`, and so this is genuinely // a simultaneous overlapping mutable borrow. // FIXME for future hackers: investigate whether this is // actually necessary; these facts come from Polonius // and probably maybe plausibly does not need to go back in. // It may be necessary to just pick out the parts of // `add_drop_live_facts_for()` that make sense. - let Some(facts) = self.cx.typeck.all_facts.as_ref() else { return }; + let Some(facts) = self.cx.typeck.polonius_facts.as_ref() else { return }; let facts_to_add: Vec<_> = { let relevant_live_locals: FxIndexSet<_> = relevant_live_locals.iter().copied().collect(); @@ -240,7 +209,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { .collect() }; - let live_at = IntervalSet::new(self.cx.elements.num_points()); + let live_at = IntervalSet::new(self.cx.location_map.num_points()); for (local, local_ty, location) in facts_to_add { self.cx.add_drop_live_facts_for(local, local_ty, &[location], &live_at); } @@ -279,7 +248,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // * Inclusively, the block start // * Exclusively, the previous definition (if it's in this block) // * Exclusively, the previous live_at setting (an optimization) - let block_start = self.cx.elements.to_block_start(p); + let block_start = self.cx.location_map.to_block_start(p); let previous_defs = self.defs.last_set_in(block_start..=p); let previous_live_at = self.use_live_at.last_set_in(block_start..=p); @@ -303,12 +272,12 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // terminators of predecessor basic blocks. Push those onto the // stack so that the next iteration(s) will process them. - let block = self.cx.elements.to_location(block_start).block; + let block = self.cx.location_map.to_location(block_start).block; self.stack.extend( self.cx.body.basic_blocks.predecessors()[block] .iter() .map(|&pred_bb| self.cx.body.terminator_loc(pred_bb)) - .map(|pred_loc| self.cx.elements.point_from_location(pred_loc)), + .map(|pred_loc| self.cx.location_map.point_from_location(pred_loc)), ); } } @@ -331,7 +300,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // Find the drops where `local` is initialized. for drop_point in self.cx.local_use_map.drops(local) { - let location = self.cx.elements.to_location(drop_point); + let location = self.cx.location_map.to_location(drop_point); debug_assert_eq!(self.cx.body.terminator_loc(location.block), location,); if self.cx.initialized_at_terminator(location.block, mpi) @@ -367,7 +336,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { debug!( "compute_drop_live_points_for_block(mpi={:?}, term_point={:?})", self.cx.move_data.move_paths[mpi].place, - self.cx.elements.to_location(term_point), + self.cx.location_map.to_location(term_point), ); // We are only invoked with terminators where `mpi` is @@ -377,12 +346,15 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { // Otherwise, scan backwards through the statements in the // block. One of them may be either a definition or use // live point. - let term_location = self.cx.elements.to_location(term_point); + let term_location = self.cx.location_map.to_location(term_point); debug_assert_eq!(self.cx.body.terminator_loc(term_location.block), term_location,); let block = term_location.block; - let entry_point = self.cx.elements.entry_point(term_location.block); + let entry_point = self.cx.location_map.entry_point(term_location.block); for p in (entry_point..term_point).rev() { - debug!("compute_drop_live_points_for_block: p = {:?}", self.cx.elements.to_location(p)); + debug!( + "compute_drop_live_points_for_block: p = {:?}", + self.cx.location_map.to_location(p) + ); if self.defs.contains(p) { debug!("compute_drop_live_points_for_block: def site"); @@ -428,7 +400,7 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { } let pred_term_loc = self.cx.body.terminator_loc(pred_block); - let pred_term_point = self.cx.elements.point_from_location(pred_term_loc); + let pred_term_point = self.cx.location_map.point_from_location(pred_term_loc); // If the terminator of this predecessor either *assigns* // our value or is a "normal use", then stop. @@ -523,7 +495,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { /// points `live_at`. fn add_use_live_facts_for(&mut self, value: Ty<'tcx>, live_at: &IntervalSet<PointIndex>) { debug!("add_use_live_facts_for(value={:?})", value); - Self::make_all_regions_live(self.elements, self.typeck, value, live_at); + Self::make_all_regions_live(self.location_map, self.typeck, value, live_at); } /// Some variable with type `live_ty` is "drop live" at `location` @@ -547,7 +519,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { dropped_local, dropped_ty, drop_locations, - values::pretty_print_points(self.elements, live_at.iter()), + values::pretty_print_points(self.location_map, live_at.iter()), ); let drop_data = self.drop_data.entry(dropped_ty).or_insert_with({ @@ -574,19 +546,19 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { // All things in the `outlives` array may be touched by // the destructor and must be live at this point. for &kind in &drop_data.dropck_result.kinds { - Self::make_all_regions_live(self.elements, self.typeck, kind, live_at); + Self::make_all_regions_live(self.location_map, self.typeck, kind, live_at); polonius::legacy::emit_drop_facts( self.typeck.tcx(), dropped_local, &kind, self.typeck.universal_regions, - self.typeck.all_facts, + self.typeck.polonius_facts, ); } } fn make_all_regions_live( - elements: &DenseLocationMap, + location_map: &DenseLocationMap, typeck: &mut TypeChecker<'_, 'tcx>, value: impl TypeVisitable<TyCtxt<'tcx>> + Relate<TyCtxt<'tcx>>, live_at: &IntervalSet<PointIndex>, @@ -594,7 +566,7 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { debug!("make_all_regions_live(value={:?})", value); debug!( "make_all_regions_live: live_at={}", - values::pretty_print_points(elements, live_at.iter()), + values::pretty_print_points(location_map, live_at.iter()), ); value.visit_with(&mut for_liveness::FreeRegionsVisitor { diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 10fb8a399a2..e0196d55f20 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -49,7 +49,7 @@ use crate::constraints::{OutlivesConstraint, OutlivesConstraintSet}; use crate::diagnostics::UniverseInfo; use crate::member_constraints::MemberConstraintSet; use crate::polonius::PoloniusContext; -use crate::polonius::legacy::{AllFacts, LocationTable}; +use crate::polonius::legacy::{PoloniusFacts, PoloniusLocationTable}; use crate::region_infer::TypeTest; use crate::region_infer::values::{LivenessValues, PlaceholderIndex, PlaceholderIndices}; use crate::renumber::RegionCtxt; @@ -98,29 +98,29 @@ mod relate_tys; /// - `body` -- MIR body to type-check /// - `promoted` -- map of promoted constants within `body` /// - `universal_regions` -- the universal regions from `body`s function signature -/// - `location_table` -- MIR location map of `body` +/// - `location_table` -- for datalog polonius, the map between `Location`s and `RichLocation`s /// - `borrow_set` -- information about borrows occurring in `body` -/// - `all_facts` -- when using Polonius, this is the generated set of Polonius facts +/// - `polonius_facts` -- when using Polonius, this is the generated set of Polonius facts /// - `flow_inits` -- results of a maybe-init dataflow analysis /// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis -/// - `elements` -- MIR region map +/// - `location_map` -- map between MIR `Location` and `PointIndex` pub(crate) fn type_check<'a, 'tcx>( infcx: &BorrowckInferCtxt<'tcx>, body: &Body<'tcx>, promoted: &IndexSlice<Promoted, Body<'tcx>>, universal_regions: UniversalRegions<'tcx>, - location_table: &LocationTable, + location_table: &PoloniusLocationTable, borrow_set: &BorrowSet<'tcx>, - all_facts: &mut Option<AllFacts>, + polonius_facts: &mut Option<PoloniusFacts>, flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, - elements: Rc<DenseLocationMap>, + location_map: Rc<DenseLocationMap>, ) -> MirTypeckResults<'tcx> { let implicit_region_bound = ty::Region::new_var(infcx.tcx, universal_regions.fr_fn_body); let mut constraints = MirTypeckRegionConstraints { placeholder_indices: PlaceholderIndices::default(), placeholder_index_to_region: IndexVec::default(), - liveness_constraints: LivenessValues::with_specific_points(Rc::clone(&elements)), + liveness_constraints: LivenessValues::with_specific_points(Rc::clone(&location_map)), outlives_constraints: OutlivesConstraintSet::default(), member_constraints: MemberConstraintSet::default(), type_tests: Vec::default(), @@ -165,7 +165,7 @@ pub(crate) fn type_check<'a, 'tcx>( reported_errors: Default::default(), universal_regions: &universal_region_relations.universal_regions, location_table, - all_facts, + polonius_facts, borrow_set, constraints: &mut constraints, polonius_context: &mut polonius_context, @@ -180,7 +180,7 @@ pub(crate) fn type_check<'a, 'tcx>( typeck.equate_inputs_and_outputs(body, &normalized_inputs_and_output); typeck.check_signature_annotation(body); - liveness::generate(&mut typeck, body, &elements, flow_inits, move_data); + liveness::generate(&mut typeck, body, &location_map, flow_inits, move_data); let opaque_type_values = opaque_types::take_opaques_and_register_member_constraints(&mut typeck); @@ -298,7 +298,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> { context.ambient_variance(), base_ty.ty, location.to_locations(), - ConstraintCategory::TypeAnnotation, + ConstraintCategory::TypeAnnotation(AnnotationSource::OpaqueCast), ) .unwrap(); } @@ -333,7 +333,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> { ty::Invariant, &UserTypeProjection { base: annotation_index, projs: vec![] }, locations, - ConstraintCategory::Boring, + ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg), ) { let annotation = &self.typeck.user_type_annotations[annotation_index]; span_mirbug!( @@ -455,7 +455,7 @@ impl<'a, 'b, 'tcx> Visitor<'tcx> for TypeVerifier<'a, 'b, 'tcx> { ty::Invariant, user_ty, Locations::All(*span), - ConstraintCategory::TypeAnnotation, + ConstraintCategory::TypeAnnotation(AnnotationSource::Declaration), ) { span_mirbug!( self, @@ -495,14 +495,14 @@ impl<'a, 'b, 'tcx> TypeVerifier<'a, 'b, 'tcx> { // Use new sets of constraints and closure bounds so that we can // modify their locations. - let all_facts = &mut None; + let polonius_facts = &mut None; let mut constraints = Default::default(); let mut liveness_constraints = LivenessValues::without_specific_points(Rc::new(DenseLocationMap::new(promoted_body))); // Don't try to add borrow_region facts for the promoted MIR let mut swap_constraints = |this: &mut Self| { - mem::swap(this.typeck.all_facts, all_facts); + mem::swap(this.typeck.polonius_facts, polonius_facts); mem::swap(&mut this.typeck.constraints.outlives_constraints, &mut constraints); mem::swap(&mut this.typeck.constraints.liveness_constraints, &mut liveness_constraints); }; @@ -560,8 +560,8 @@ struct TypeChecker<'a, 'tcx> { implicit_region_bound: ty::Region<'tcx>, reported_errors: FxIndexSet<(Ty<'tcx>, Span)>, universal_regions: &'a UniversalRegions<'tcx>, - location_table: &'a LocationTable, - all_facts: &'a mut Option<AllFacts>, + location_table: &'a PoloniusLocationTable, + polonius_facts: &'a mut Option<PoloniusFacts>, borrow_set: &'a BorrowSet<'tcx>, constraints: &'a mut MirTypeckRegionConstraints<'tcx>, /// When using `-Zpolonius=next`, the helper data used to create polonius constraints. @@ -927,7 +927,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ty::Invariant, &UserTypeProjection { base: annotation_index, projs: vec![] }, location.to_locations(), - ConstraintCategory::Boring, + ConstraintCategory::TypeAnnotation(AnnotationSource::GenericArg), ) { let annotation = &self.user_type_annotations[annotation_index]; span_mirbug!( @@ -962,7 +962,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { *variance, projection, Locations::All(stmt.source_info.span), - ConstraintCategory::TypeAnnotation, + ConstraintCategory::TypeAnnotation(AnnotationSource::Ascription), ) { let annotation = &self.user_type_annotations[projection.base]; span_mirbug!( @@ -1226,6 +1226,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { Some(l) if !body.local_decls[l].is_user_variable() => { ConstraintCategory::Boring } + // The return type of a call is interesting for diagnostics. _ => ConstraintCategory::Assignment, }; @@ -1653,7 +1654,20 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { match *cast_kind { CastKind::PointerCoercion(PointerCoercion::ReifyFnPointer, coercion_source) => { let is_implicit_coercion = coercion_source == CoercionSource::Implicit; - let src_sig = op.ty(body, tcx).fn_sig(tcx); + let src_ty = op.ty(body, tcx); + let mut src_sig = src_ty.fn_sig(tcx); + if let ty::FnDef(def_id, _) = src_ty.kind() + && let ty::FnPtr(_, target_hdr) = *ty.kind() + && tcx.codegen_fn_attrs(def_id).safe_target_features + && target_hdr.safety.is_safe() + && let Some(safe_sig) = tcx.adjust_target_feature_sig( + *def_id, + src_sig, + body.source.def_id(), + ) + { + src_sig = safe_sig; + } // HACK: This shouldn't be necessary... We can remove this when we actually // get binders with where clauses, then elaborate implied bounds into that @@ -2169,7 +2183,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ty_left, common_ty, location.to_locations(), - ConstraintCategory::Boring, + ConstraintCategory::CallArgument(None), ) .unwrap_or_else(|err| { bug!("Could not equate type variable with {:?}: {:?}", ty_left, err) @@ -2178,7 +2192,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { ty_right, common_ty, location.to_locations(), - ConstraintCategory::Boring, + ConstraintCategory::CallArgument(None), ) { span_mirbug!( self, @@ -2221,6 +2235,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { Rvalue::RawPtr(..) | Rvalue::ThreadLocalRef(..) + | Rvalue::Len(..) | Rvalue::Discriminant(..) | Rvalue::NullaryOp(NullOp::OffsetOf(..), _) => {} } @@ -2236,6 +2251,7 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { | Rvalue::Repeat(..) | Rvalue::Ref(..) | Rvalue::RawPtr(..) + | Rvalue::Len(..) | Rvalue::Cast(..) | Rvalue::ShallowInitBox(..) | Rvalue::BinaryOp(..) @@ -2327,18 +2343,18 @@ impl<'a, 'tcx> TypeChecker<'a, 'tcx> { borrowed_place: &Place<'tcx>, ) { // These constraints are only meaningful during borrowck: - let Self { borrow_set, location_table, all_facts, constraints, .. } = self; + let Self { borrow_set, location_table, polonius_facts, constraints, .. } = self; // In Polonius mode, we also push a `loan_issued_at` fact // linking the loan to the region (in some cases, though, // there is no loan associated with this borrow expression -- // that occurs when we are borrowing an unsafe place, for // example). - if let Some(all_facts) = all_facts { + if let Some(polonius_facts) = polonius_facts { let _prof_timer = self.infcx.tcx.prof.generic_activity("polonius_fact_generation"); if let Some(borrow_index) = borrow_set.get_index_of(&location) { let region_vid = borrow_region.as_var(); - all_facts.loan_issued_at.push(( + polonius_facts.loan_issued_at.push(( region_vid.into(), borrow_index, location_table.mid_index(location), diff --git a/compiler/rustc_borrowck/src/type_check/opaque_types.rs b/compiler/rustc_borrowck/src/type_check/opaque_types.rs index edf3b1ae092..ad4e006c21a 100644 --- a/compiler/rustc_borrowck/src/type_check/opaque_types.rs +++ b/compiler/rustc_borrowck/src/type_check/opaque_types.rs @@ -25,8 +25,8 @@ pub(super) fn take_opaques_and_register_member_constraints<'tcx>( let opaque_types = infcx .take_opaque_types() .into_iter() - .map(|(opaque_type_key, decl)| { - let hidden_type = infcx.resolve_vars_if_possible(decl.hidden_type); + .map(|(opaque_type_key, hidden_type)| { + let hidden_type = infcx.resolve_vars_if_possible(hidden_type); register_member_constraints( typeck, &mut member_constraints, diff --git a/compiler/rustc_borrowck/src/universal_regions.rs b/compiler/rustc_borrowck/src/universal_regions.rs index 3dc4569c57b..26af86c0cdd 100644 --- a/compiler/rustc_borrowck/src/universal_regions.rs +++ b/compiler/rustc_borrowck/src/universal_regions.rs @@ -467,15 +467,13 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { self.infcx.tcx.local_parent(self.mir_def), |r| { debug!(?r); - if !indices.indices.contains_key(&r) { - let region_vid = { - let name = r.get_name_or_anon(); - self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name)) - }; - - debug!(?region_vid); - indices.insert_late_bound_region(r, region_vid.as_var()); - } + let region_vid = { + let name = r.get_name_or_anon(); + self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name)) + }; + + debug!(?region_vid); + indices.insert_late_bound_region(r, region_vid.as_var()); }, ); @@ -484,21 +482,17 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { self.infcx.num_region_vars() }; - // "Liberate" the late-bound regions. These correspond to - // "local" free regions. + // Converse of above, if this is a function/closure then the late-bound regions declared + // on its signature are local. + // + // We manually loop over `bound_inputs_and_output` instead of using + // `for_each_late_bound_region_in_item` as we may need to add the otherwise + // implicit `ClosureEnv` region. let bound_inputs_and_output = self.compute_inputs_and_output(&indices, defining_ty); - - let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars( - FR, - self.mir_def, - bound_inputs_and_output, - &mut indices, - ); - // Converse of above, if this is a function/closure then the late-bound regions declared on its - // signature are local. - for_each_late_bound_region_in_item(self.infcx.tcx, self.mir_def, |r| { - debug!(?r); - if !indices.indices.contains_key(&r) { + for (idx, bound_var) in bound_inputs_and_output.bound_vars().iter().enumerate() { + if let ty::BoundVariableKind::Region(kind) = bound_var { + let kind = ty::LateParamRegionKind::from_bound(ty::BoundVar::from_usize(idx), kind); + let r = ty::Region::new_late_param(self.infcx.tcx, self.mir_def.to_def_id(), kind); let region_vid = { let name = r.get_name_or_anon(); self.infcx.next_nll_region_var(FR, || RegionCtxt::LateBound(name)) @@ -507,7 +501,12 @@ impl<'cx, 'tcx> UniversalRegionsBuilder<'cx, 'tcx> { debug!(?region_vid); indices.insert_late_bound_region(r, region_vid.as_var()); } - }); + } + let inputs_and_output = self.infcx.replace_bound_regions_with_nll_infer_vars( + self.mir_def, + bound_inputs_and_output, + &indices, + ); let (unnormalized_output_ty, mut unnormalized_input_tys) = inputs_and_output.split_last().unwrap(); @@ -832,10 +831,9 @@ impl<'tcx> BorrowckInferCtxt<'tcx> { #[instrument(level = "debug", skip(self, indices))] fn replace_bound_regions_with_nll_infer_vars<T>( &self, - origin: NllRegionVariableOrigin, all_outlive_scope: LocalDefId, value: ty::Binder<'tcx, T>, - indices: &mut UniversalRegionIndices<'tcx>, + indices: &UniversalRegionIndices<'tcx>, ) -> T where T: TypeFoldable<TyCtxt<'tcx>>, @@ -845,18 +843,7 @@ impl<'tcx> BorrowckInferCtxt<'tcx> { let kind = ty::LateParamRegionKind::from_bound(br.var, br.kind); let liberated_region = ty::Region::new_late_param(self.tcx, all_outlive_scope.to_def_id(), kind); - let region_vid = { - let name = match br.kind.get_name() { - Some(name) => name, - _ => sym::anon, - }; - - self.next_nll_region_var(origin, || RegionCtxt::Bound(name)) - }; - - indices.insert_late_bound_region(liberated_region, region_vid.as_var()); - debug!(?liberated_region, ?region_vid); - region_vid + ty::Region::new_var(self.tcx, indices.to_region_vid(liberated_region)) }); value } @@ -870,7 +857,7 @@ impl<'tcx> UniversalRegionIndices<'tcx> { /// well. These are used for error reporting. fn insert_late_bound_region(&mut self, r: ty::Region<'tcx>, vid: ty::RegionVid) { debug!("insert_late_bound_region({:?}, {:?})", r, vid); - self.indices.insert(r, vid); + assert_eq!(self.indices.insert(r, vid), None); } /// Converts `r` into a local inference variable: `r` can either diff --git a/compiler/rustc_borrowck/src/util/collect_writes.rs b/compiler/rustc_borrowck/src/util/collect_writes.rs deleted file mode 100644 index 55f1073176a..00000000000 --- a/compiler/rustc_borrowck/src/util/collect_writes.rs +++ /dev/null @@ -1,35 +0,0 @@ -use rustc_middle::mir::visit::{PlaceContext, Visitor}; -use rustc_middle::mir::{Body, Local, Location}; - -pub(crate) trait FindAssignments { - // Finds all statements that assign directly to local (i.e., X = ...) - // and returns their locations. - fn find_assignments(&self, local: Local) -> Vec<Location>; -} - -impl<'tcx> FindAssignments for Body<'tcx> { - fn find_assignments(&self, local: Local) -> Vec<Location> { - let mut visitor = FindLocalAssignmentVisitor { needle: local, locations: vec![] }; - visitor.visit_body(self); - visitor.locations - } -} - -// The Visitor walks the MIR to return the assignment statements corresponding -// to a Local. -struct FindLocalAssignmentVisitor { - needle: Local, - locations: Vec<Location>, -} - -impl<'tcx> Visitor<'tcx> for FindLocalAssignmentVisitor { - fn visit_local(&mut self, local: Local, place_context: PlaceContext, location: Location) { - if self.needle != local { - return; - } - - if place_context.is_place_assignment() { - self.locations.push(location); - } - } -} diff --git a/compiler/rustc_borrowck/src/util/mod.rs b/compiler/rustc_borrowck/src/util/mod.rs deleted file mode 100644 index 5f2960b768b..00000000000 --- a/compiler/rustc_borrowck/src/util/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod collect_writes; - -pub(crate) use collect_writes::FindAssignments; diff --git a/compiler/rustc_codegen_cranelift/Cargo.lock b/compiler/rustc_codegen_cranelift/Cargo.lock index ec71370ef9e..b5aba86079f 100644 --- a/compiler/rustc_codegen_cranelift/Cargo.lock +++ b/compiler/rustc_codegen_cranelift/Cargo.lock @@ -10,15 +10,15 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "anyhow" -version = "1.0.94" +version = "1.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" +checksum = "34ac096ce696dc2fcabef30516bb13c0a68a11d30131d3df6f04711467681b04" [[package]] name = "arbitrary" -version = "1.3.2" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d5a26814d8dcb93b0e5a0ff3c6d80a8843bafb21b39e8e18a6f05471870e110" +checksum = "dde20b3d026af13f561bdd0f15edf01fc734f0dafcedbaf42bba506a9517f223" [[package]] name = "bitflags" @@ -211,9 +211,9 @@ checksum = "2acce4a10f12dc2fb14a218589d4f1f62ef011b2d0cc4b3cb1bba8e94da14649" [[package]] name = "foldhash" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f81ec6369c545a7d40e4589b5597581fa1c441fe1cce96dd1de43159910a36a2" +checksum = "a0d2fde1f7b3d48b8395d5f2de76c18a528bd6a9cdde438df747bfcba3e05d6f" [[package]] name = "gimli" @@ -253,15 +253,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.169" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "b5aba8db14291edd000dfcc4d620c7ebfb122c613afb886ca8803fa4e128a20a" [[package]] name = "libloading" -version = "0.8.4" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e310b3a6b5907f99202fcdb4960ff45b93735d7c7d96b760fcff8db2dc0e103d" +checksum = "fc2f4eb4bc735547cfed7c0a4922cbd04a4655978c09b54f1f7b228750664c34" dependencies = [ "cfg-if", "windows-targets", @@ -290,9 +290,9 @@ checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "object" -version = "0.36.5" +version = "0.36.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" +checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" dependencies = [ "crc32fast", "hashbrown 0.15.2", @@ -311,9 +311,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.38" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "0e4dccaaaf89514f546c693ddc140f729f958c247918a13380cccc6078391acc" dependencies = [ "proc-macro2", ] @@ -346,9 +346,9 @@ dependencies = [ [[package]] name = "rustc-hash" -version = "2.0.0" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" +checksum = "c7fb8039b3032c191086b10f11f319a6e99e1e82889c5cc6046f515c9db1d497" [[package]] name = "rustc_codegen_cranelift" @@ -370,18 +370,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6513c1ad0b11a9376da888e3e0baa0077f1aed55c17f50e7b2397136129fb88f" +checksum = "02fc4265df13d6fa1d00ecff087228cc0a2b5f3c0e87e258d8b94a156e984c70" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.215" +version = "1.0.217" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" +checksum = "5a9bf7cf98d04a2b28aead066b7496853d4779c9cc183c440dbac457641e19a0" dependencies = [ "proc-macro2", "quote", @@ -402,9 +402,9 @@ checksum = "a8f112729512f8e442d81f95a8a7ddf2b7c6b8a1a6f509a95864142b30cab2d3" [[package]] name = "syn" -version = "2.0.90" +version = "2.0.95" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" +checksum = "46f71c0377baf4ef1cc3e3402ded576dccc315800fbc62dfc7fe04b009773b4a" dependencies = [ "proc-macro2", "quote", @@ -419,9 +419,9 @@ checksum = "61c41af27dd6d1e27b1b16b489db798443478cef1f06a660c96db617ba5de3b1" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "adb9e6ca4f869e1180728b7950e35922a7fc6397f7b641499e8f3ef06e50dc83" [[package]] name = "wasmtime-jit-icache-coherence" diff --git a/compiler/rustc_codegen_cranelift/Cargo.toml b/compiler/rustc_codegen_cranelift/Cargo.toml index 82d2b6cb2c4..bfdbc3e768a 100644 --- a/compiler/rustc_codegen_cranelift/Cargo.toml +++ b/compiler/rustc_codegen_cranelift/Cargo.toml @@ -23,6 +23,14 @@ libloading = { version = "0.8.0", optional = true } smallvec = "1.8.1" [patch.crates-io] +# Uncomment to use an unreleased version of cranelift +#cranelift-codegen = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } +#cranelift-frontend = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } +#cranelift-module = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } +#cranelift-native = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } +#cranelift-jit = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } +#cranelift-object = { git = "https://github.com/bytecodealliance/wasmtime.git", branch = "release-28.0.0", version = "0.115.0" } + # Uncomment to use local checkout of cranelift #cranelift-codegen = { path = "../wasmtime/cranelift/codegen" } #cranelift-frontend = { path = "../wasmtime/cranelift/frontend" } diff --git a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs index e47e9829916..a73e3c87d43 100644 --- a/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs +++ b/compiler/rustc_codegen_cranelift/build_system/build_sysroot.rs @@ -33,14 +33,7 @@ pub(crate) fn build_sysroot( let cg_clif_dylib_path = match cg_clif_dylib_src { CodegenBackend::Local(src_path) => { // Copy the backend - let cg_clif_dylib_path = if cfg!(windows) { - // Windows doesn't have rpath support, so the cg_clif dylib needs to be next to the - // binaries. - dist_dir.join("bin") - } else { - dist_dir.join("lib") - } - .join(src_path.file_name().unwrap()); + let cg_clif_dylib_path = dist_dir.join("lib").join(src_path.file_name().unwrap()); try_hard_link(src_path, &cg_clif_dylib_path); CodegenBackend::Local(cg_clif_dylib_path) } @@ -102,19 +95,14 @@ pub(crate) fn build_sysroot( .install_into_sysroot(dist_dir); } - let mut target_compiler = { - let rustc_clif = dist_dir.join(wrapper_base_name.replace("____", "rustc-clif")); - let rustdoc_clif = dist_dir.join(wrapper_base_name.replace("____", "rustdoc-clif")); - - Compiler { - cargo: bootstrap_host_compiler.cargo.clone(), - rustc: rustc_clif.clone(), - rustdoc: rustdoc_clif.clone(), - rustflags: vec![], - rustdocflags: vec![], - triple: target_triple, - runner: vec![], - } + let mut target_compiler = Compiler { + cargo: bootstrap_host_compiler.cargo.clone(), + rustc: dist_dir.join(wrapper_base_name.replace("____", "rustc-clif")), + rustdoc: dist_dir.join(wrapper_base_name.replace("____", "rustdoc-clif")), + rustflags: vec![], + rustdocflags: vec![], + triple: target_triple, + runner: vec![], }; if !is_native { target_compiler.set_cross_linker_and_runner(); diff --git a/compiler/rustc_codegen_cranelift/build_system/tests.rs b/compiler/rustc_codegen_cranelift/build_system/tests.rs index 08736db8ba0..8de419a0c4e 100644 --- a/compiler/rustc_codegen_cranelift/build_system/tests.rs +++ b/compiler/rustc_codegen_cranelift/build_system/tests.rs @@ -73,8 +73,6 @@ const BASE_SYSROOT_SUITE: &[TestCase] = &[ "example/arbitrary_self_types_pointers_and_wrappers.rs", &[], ), - TestCase::build_lib("build.alloc_system", "example/alloc_system.rs", "lib"), - TestCase::build_bin_and_run("aot.alloc_example", "example/alloc_example.rs", &[]), TestCase::jit_bin("jit.std_example", "example/std_example.rs", "arg"), TestCase::build_bin_and_run("aot.std_example", "example/std_example.rs", &["arg"]), TestCase::build_bin_and_run("aot.dst_field_align", "example/dst-field-align.rs", &[]), @@ -89,7 +87,6 @@ const BASE_SYSROOT_SUITE: &[TestCase] = &[ &[], ), TestCase::build_bin_and_run("aot.float-minmax-pass", "example/float-minmax-pass.rs", &[]), - TestCase::build_bin_and_run("aot.mod_bench", "example/mod_bench.rs", &[]), TestCase::build_bin_and_run("aot.issue-72793", "example/issue-72793.rs", &[]), TestCase::build_bin("aot.issue-59326", "example/issue-59326.rs"), TestCase::build_bin_and_run("aot.neon", "example/neon.rs", &[]), diff --git a/compiler/rustc_codegen_cranelift/config.txt b/compiler/rustc_codegen_cranelift/config.txt index 9808ad624e1..f578cbef35e 100644 --- a/compiler/rustc_codegen_cranelift/config.txt +++ b/compiler/rustc_codegen_cranelift/config.txt @@ -21,15 +21,12 @@ aot.mini_core_hello_world testsuite.base_sysroot aot.arbitrary_self_types_pointers_and_wrappers aot.issue_91827_extern_types -build.alloc_system -aot.alloc_example jit.std_example aot.std_example aot.dst_field_align aot.subslice-patterns-const-eval aot.track-caller-attribute aot.float-minmax-pass -aot.mod_bench aot.issue-72793 aot.issue-59326 aot.neon diff --git a/compiler/rustc_codegen_cranelift/example/alloc_example.rs b/compiler/rustc_codegen_cranelift/example/alloc_example.rs deleted file mode 100644 index da70ca79439..00000000000 --- a/compiler/rustc_codegen_cranelift/example/alloc_example.rs +++ /dev/null @@ -1,44 +0,0 @@ -#![feature(start, core_intrinsics, alloc_error_handler, lang_items)] -#![allow(internal_features)] -#![no_std] - -extern crate alloc; -extern crate alloc_system; - -use alloc::boxed::Box; - -use alloc_system::System; - -#[global_allocator] -static ALLOC: System = System; - -#[cfg_attr(unix, link(name = "c"))] -#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))] -extern "C" { - fn puts(s: *const u8) -> i32; -} - -#[panic_handler] -fn panic_handler(_: &core::panic::PanicInfo<'_>) -> ! { - core::intrinsics::abort(); -} - -#[alloc_error_handler] -fn alloc_error_handler(_: alloc::alloc::Layout) -> ! { - core::intrinsics::abort(); -} - -#[lang = "eh_personality"] -fn eh_personality() -> ! { - loop {} -} - -#[start] -fn main(_argc: isize, _argv: *const *const u8) -> isize { - let world: Box<&str> = Box::new("Hello World!\0"); - unsafe { - puts(*world as *const str as *const u8); - } - - 0 -} diff --git a/compiler/rustc_codegen_cranelift/example/alloc_system.rs b/compiler/rustc_codegen_cranelift/example/alloc_system.rs deleted file mode 100644 index 2884c9c32ae..00000000000 --- a/compiler/rustc_codegen_cranelift/example/alloc_system.rs +++ /dev/null @@ -1,124 +0,0 @@ -// SPDX-License-Identifier: MIT OR Apache-2.0 -// SPDX-FileCopyrightText: The Rust Project Developers (see https://thanks.rust-lang.org) - -#![no_std] - -pub struct System; - -#[cfg(any(windows, unix, target_os = "redox"))] -mod realloc_fallback { - use core::alloc::{GlobalAlloc, Layout}; - use core::{cmp, ptr}; - impl super::System { - pub(crate) unsafe fn realloc_fallback( - &self, - ptr: *mut u8, - old_layout: Layout, - new_size: usize, - ) -> *mut u8 { - // Docs for GlobalAlloc::realloc require this to be valid: - let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align()); - let new_ptr = GlobalAlloc::alloc(self, new_layout); - if !new_ptr.is_null() { - let size = cmp::min(old_layout.size(), new_size); - ptr::copy_nonoverlapping(ptr, new_ptr, size); - GlobalAlloc::dealloc(self, ptr, old_layout); - } - new_ptr - } - } -} -#[cfg(any(unix, target_os = "redox"))] -mod platform { - use core::alloc::{GlobalAlloc, Layout}; - use core::ffi::c_void; - use core::ptr; - - use System; - extern "C" { - fn posix_memalign(memptr: *mut *mut c_void, align: usize, size: usize) -> i32; - fn free(p: *mut c_void); - } - unsafe impl GlobalAlloc for System { - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - aligned_malloc(&layout) - } - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - let ptr = self.alloc(layout.clone()); - if !ptr.is_null() { - ptr::write_bytes(ptr, 0, layout.size()); - } - ptr - } - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { - free(ptr as *mut c_void) - } - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - self.realloc_fallback(ptr, layout, new_size) - } - } - unsafe fn aligned_malloc(layout: &Layout) -> *mut u8 { - let mut out = ptr::null_mut(); - let ret = posix_memalign(&mut out, layout.align(), layout.size()); - if ret != 0 { ptr::null_mut() } else { out as *mut u8 } - } -} -#[cfg(windows)] -#[allow(nonstandard_style)] -mod platform { - use core::alloc::{GlobalAlloc, Layout}; - - use System; - type LPVOID = *mut u8; - type HANDLE = LPVOID; - type SIZE_T = usize; - type DWORD = u32; - type BOOL = i32; - extern "system" { - fn GetProcessHeap() -> HANDLE; - fn HeapAlloc(hHeap: HANDLE, dwFlags: DWORD, dwBytes: SIZE_T) -> LPVOID; - fn HeapFree(hHeap: HANDLE, dwFlags: DWORD, lpMem: LPVOID) -> BOOL; - fn GetLastError() -> DWORD; - } - #[repr(C)] - struct Header(*mut u8); - const HEAP_ZERO_MEMORY: DWORD = 0x00000008; - unsafe fn get_header<'a>(ptr: *mut u8) -> &'a mut Header { - &mut *(ptr as *mut Header).sub(1) - } - unsafe fn align_ptr(ptr: *mut u8, align: usize) -> *mut u8 { - let aligned = ptr.add(align - (ptr as usize & (align - 1))); - *get_header(aligned) = Header(ptr); - aligned - } - #[inline] - unsafe fn allocate_with_flags(layout: Layout, flags: DWORD) -> *mut u8 { - let size = layout.size() + layout.align(); - let ptr = HeapAlloc(GetProcessHeap(), flags, size); - (if ptr.is_null() { ptr } else { align_ptr(ptr, layout.align()) }) as *mut u8 - } - unsafe impl GlobalAlloc for System { - #[inline] - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - allocate_with_flags(layout, 0) - } - #[inline] - unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { - allocate_with_flags(layout, HEAP_ZERO_MEMORY) - } - #[inline] - unsafe fn dealloc(&self, ptr: *mut u8, _layout: Layout) { - let header = get_header(ptr); - let err = HeapFree(GetProcessHeap(), 0, header.0 as LPVOID); - debug_assert!(err != 0, "Failed to free heap memory: {}", GetLastError()); - } - #[inline] - unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 { - self.realloc_fallback(ptr, layout, new_size) - } - } -} diff --git a/compiler/rustc_codegen_cranelift/example/mod_bench.rs b/compiler/rustc_codegen_cranelift/example/mod_bench.rs deleted file mode 100644 index 11a3e8fc72d..00000000000 --- a/compiler/rustc_codegen_cranelift/example/mod_bench.rs +++ /dev/null @@ -1,37 +0,0 @@ -#![feature(start, core_intrinsics, lang_items)] -#![allow(internal_features)] -#![no_std] - -#[cfg_attr(unix, link(name = "c"))] -#[cfg_attr(target_env = "msvc", link(name = "msvcrt"))] -extern "C" {} - -#[panic_handler] -fn panic_handler(_: &core::panic::PanicInfo<'_>) -> ! { - core::intrinsics::abort(); -} - -#[lang = "eh_personality"] -fn eh_personality() {} - -// Required for rustc_codegen_llvm -#[no_mangle] -unsafe extern "C" fn _Unwind_Resume() { - core::intrinsics::unreachable(); -} - -#[start] -fn main(_argc: isize, _argv: *const *const u8) -> isize { - for i in 2..10_000_000 { - black_box((i + 1) % i); - } - - 0 -} - -#[inline(never)] -fn black_box(i: u32) { - if i != 1 { - core::intrinsics::abort(); - } -} diff --git a/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch b/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch index bf07e455a75..bf58e485158 100644 --- a/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch +++ b/compiler/rustc_codegen_cranelift/patches/0029-stdlib-Disable-f16-and-f128-in-compiler-builtins.patch @@ -16,8 +16,8 @@ index 7165c3e48af..968552ad435 100644 [dependencies] core = { path = "../core" } --compiler_builtins = { version = "=0.1.140", features = ['rustc-dep-of-std'] } -+compiler_builtins = { version = "=0.1.140", features = ['rustc-dep-of-std', 'no-f16-f128'] } +-compiler_builtins = { version = "=0.1.143", features = ['rustc-dep-of-std'] } ++compiler_builtins = { version = "=0.1.143", features = ['rustc-dep-of-std', 'no-f16-f128'] } [dev-dependencies] rand = { version = "0.8.5", default-features = false, features = ["alloc"] } diff --git a/compiler/rustc_codegen_cranelift/rust-toolchain b/compiler/rustc_codegen_cranelift/rust-toolchain index 4b97f210579..e4c3dd708fd 100644 --- a/compiler/rustc_codegen_cranelift/rust-toolchain +++ b/compiler/rustc_codegen_cranelift/rust-toolchain @@ -1,4 +1,4 @@ [toolchain] -channel = "nightly-2025-01-05" +channel = "nightly-2025-01-10" components = ["rust-src", "rustc-dev", "llvm-tools"] profile = "minimal" diff --git a/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs index 1e14f41d4a2..ebbb6879610 100644 --- a/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs +++ b/compiler/rustc_codegen_cranelift/scripts/cargo-clif.rs @@ -16,7 +16,7 @@ fn main() { if let Some(name) = option_env!("BUILTIN_BACKEND") { rustflags.push(format!("-Zcodegen-backend={name}")); } else { - let dylib = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join( + let dylib = sysroot.join("lib").join( env::consts::DLL_PREFIX.to_string() + "rustc_codegen_cranelift" + env::consts::DLL_SUFFIX, diff --git a/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs b/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs index a27b9983bf1..528031af82a 100644 --- a/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs +++ b/compiler/rustc_codegen_cranelift/scripts/rustc-clif.rs @@ -11,7 +11,7 @@ fn main() { sysroot = sysroot.parent().unwrap(); } - let cg_clif_dylib_path = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join( + let cg_clif_dylib_path = sysroot.join("lib").join( env::consts::DLL_PREFIX.to_string() + "rustc_codegen_cranelift" + env::consts::DLL_SUFFIX, ); diff --git a/compiler/rustc_codegen_cranelift/scripts/rustdoc-clif.rs b/compiler/rustc_codegen_cranelift/scripts/rustdoc-clif.rs index 1cad312bb79..6ebe060d8bb 100644 --- a/compiler/rustc_codegen_cranelift/scripts/rustdoc-clif.rs +++ b/compiler/rustc_codegen_cranelift/scripts/rustdoc-clif.rs @@ -11,7 +11,7 @@ fn main() { sysroot = sysroot.parent().unwrap(); } - let cg_clif_dylib_path = sysroot.join(if cfg!(windows) { "bin" } else { "lib" }).join( + let cg_clif_dylib_path = sysroot.join("lib").join( env::consts::DLL_PREFIX.to_string() + "rustc_codegen_cranelift" + env::consts::DLL_SUFFIX, ); diff --git a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh index 442d61c6ade..e569da90cf7 100755 --- a/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh +++ b/compiler/rustc_codegen_cranelift/scripts/test_rustc_tests.sh @@ -123,7 +123,6 @@ rm tests/ui/mir/mir_raw_fat_ptr.rs # same rm tests/ui/consts/issue-33537.rs # same rm tests/ui/consts/const-mut-refs-crate.rs # same rm tests/ui/abi/large-byval-align.rs # exceeds implementation limit of Cranelift -rm tests/ui/invalid-compile-flags/crate-type-flag.rs # warning about proc-macros and panic=abort # doesn't work due to the way the rustc test suite is invoked. # should work when using ./x.py test the way it is intended diff --git a/compiler/rustc_codegen_cranelift/src/abi/mod.rs b/compiler/rustc_codegen_cranelift/src/abi/mod.rs index 2466bfe60c7..2c99597922e 100644 --- a/compiler/rustc_codegen_cranelift/src/abi/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/abi/mod.rs @@ -65,7 +65,11 @@ pub(crate) fn conv_to_call_conv(sess: &Session, c: Conv, default_call_conv: Call sess.dcx().fatal("C-cmse-nonsecure-entry call conv is not yet implemented"); } - Conv::Msp430Intr | Conv::PtxKernel | Conv::AvrInterrupt | Conv::AvrNonBlockingInterrupt => { + Conv::Msp430Intr + | Conv::PtxKernel + | Conv::GpuKernel + | Conv::AvrInterrupt + | Conv::AvrNonBlockingInterrupt => { unreachable!("tried to use {c:?} call conv which only exists on an unsupported target"); } } diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index 956a024fa4d..34066eb83fc 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -828,6 +828,12 @@ fn codegen_stmt<'tcx>( fx.bcx.ins().nop(); } } + Rvalue::Len(place) => { + let place = codegen_place(fx, place); + let usize_layout = fx.layout_of(fx.tcx.types.usize); + let len = codegen_array_len(fx, place); + lval.write_cvalue(fx, CValue::by_val(len, usize_layout)); + } Rvalue::ShallowInitBox(ref operand, content_ty) => { let content_ty = fx.monomorphize(content_ty); let box_layout = fx.layout_of(Ty::new_box(fx.tcx, content_ty)); diff --git a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs index 734574338d0..dcfd7ddabbc 100644 --- a/compiler/rustc_codegen_cranelift/src/codegen_i128.rs +++ b/compiler/rustc_codegen_cranelift/src/codegen_i128.rs @@ -62,9 +62,8 @@ pub(crate) fn maybe_codegen<'tcx>( } } -pub(crate) fn maybe_codegen_checked<'tcx>( +pub(crate) fn maybe_codegen_mul_checked<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, - bin_op: BinOp, lhs: CValue<'tcx>, rhs: CValue<'tcx>, ) -> Option<CValue<'tcx>> { @@ -77,33 +76,22 @@ pub(crate) fn maybe_codegen_checked<'tcx>( } let is_signed = type_sign(lhs.layout().ty); - - match bin_op { - BinOp::BitAnd | BinOp::BitOr | BinOp::BitXor => unreachable!(), - BinOp::Add | BinOp::Sub => None, - BinOp::Mul => { - let out_ty = Ty::new_tup(fx.tcx, &[lhs.layout().ty, fx.tcx.types.bool]); - let out_place = CPlace::new_stack_slot(fx, fx.layout_of(out_ty)); - let param_types = vec![ - AbiParam::special(fx.pointer_type, ArgumentPurpose::StructReturn), - AbiParam::new(types::I128), - AbiParam::new(types::I128), - ]; - let args = [out_place.to_ptr().get_addr(fx), lhs.load_scalar(fx), rhs.load_scalar(fx)]; - fx.lib_call( - if is_signed { "__rust_i128_mulo" } else { "__rust_u128_mulo" }, - param_types, - vec![], - &args, - ); - Some(out_place.to_cvalue(fx)) - } - BinOp::AddUnchecked | BinOp::SubUnchecked | BinOp::MulUnchecked => unreachable!(), - BinOp::AddWithOverflow | BinOp::SubWithOverflow | BinOp::MulWithOverflow => unreachable!(), - BinOp::Offset => unreachable!("offset should only be used on pointers, not 128bit ints"), - BinOp::Div | BinOp::Rem => unreachable!(), - BinOp::Cmp => unreachable!(), - BinOp::Lt | BinOp::Le | BinOp::Eq | BinOp::Ge | BinOp::Gt | BinOp::Ne => unreachable!(), - BinOp::Shl | BinOp::ShlUnchecked | BinOp::Shr | BinOp::ShrUnchecked => unreachable!(), - } + let oflow_out_place = CPlace::new_stack_slot(fx, fx.layout_of(fx.tcx.types.i32)); + let param_types = vec![ + AbiParam::new(types::I128), + AbiParam::new(types::I128), + AbiParam::special(fx.pointer_type, ArgumentPurpose::Normal), + ]; + let args = [lhs.load_scalar(fx), rhs.load_scalar(fx), oflow_out_place.to_ptr().get_addr(fx)]; + let ret = fx.lib_call( + if is_signed { "__rust_i128_mulo" } else { "__rust_u128_mulo" }, + param_types, + vec![AbiParam::new(types::I128)], + &args, + ); + let mul = ret[0]; + let oflow = oflow_out_place.to_cvalue(fx).load_scalar(fx); + let oflow = clif_intcast(fx, oflow, types::I8, false); + let layout = fx.layout_of(Ty::new_tup(fx.tcx, &[lhs.layout().ty, fx.tcx.types.bool])); + Some(CValue::by_val_pair(mul, oflow, layout)) } diff --git a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs index f8e3a034421..2484c10848e 100644 --- a/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs +++ b/compiler/rustc_codegen_cranelift/src/compiler_builtins.rs @@ -43,7 +43,7 @@ builtin_functions! { fn __divti3(n: i128, d: i128) -> i128; fn __umodti3(n: u128, d: u128) -> u128; fn __modti3(n: i128, d: i128) -> i128; - fn __rust_u128_mulo(a: u128, b: u128) -> (u128, bool); + fn __rust_u128_mulo(a: u128, b: u128, oflow: &mut i32) -> u128; // floats fn __floattisf(i: i128) -> f32; diff --git a/compiler/rustc_codegen_cranelift/src/driver/mod.rs b/compiler/rustc_codegen_cranelift/src/driver/mod.rs index fb0eed07c19..ffd47cace38 100644 --- a/compiler/rustc_codegen_cranelift/src/driver/mod.rs +++ b/compiler/rustc_codegen_cranelift/src/driver/mod.rs @@ -73,12 +73,14 @@ impl Drop for TimingGuard { impl cranelift_codegen::timing::Profiler for MeasuremeProfiler { fn start_pass(&self, pass: cranelift_codegen::timing::Pass) -> Box<dyn std::any::Any> { - let mut timing_guard = - TimingGuard { profiler: std::mem::ManuallyDrop::new(self.0.clone()), inner: None }; + let mut timing_guard = Box::new(TimingGuard { + profiler: std::mem::ManuallyDrop::new(self.0.clone()), + inner: None, + }); timing_guard.inner = Some( unsafe { &*(&*timing_guard.profiler as &SelfProfilerRef as *const SelfProfilerRef) } .generic_activity(pass.description()), ); - Box::new(timing_guard) + timing_guard } } diff --git a/compiler/rustc_codegen_cranelift/src/num.rs b/compiler/rustc_codegen_cranelift/src/num.rs index fb18f45d7dc..f44e2459a78 100644 --- a/compiler/rustc_codegen_cranelift/src/num.rs +++ b/compiler/rustc_codegen_cranelift/src/num.rs @@ -2,10 +2,10 @@ use crate::prelude::*; -pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> { +pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> IntCC { use BinOp::*; use IntCC::*; - Some(match bin_op { + match bin_op { Eq => Equal, Lt => { if signed { @@ -36,8 +36,8 @@ pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> { UnsignedGreaterThan } } - _ => return None, - }) + _ => unreachable!(), + } } fn codegen_three_way_compare<'tcx>( @@ -48,8 +48,8 @@ fn codegen_three_way_compare<'tcx>( ) -> CValue<'tcx> { // This emits `(lhs > rhs) - (lhs < rhs)`, which is cranelift's preferred form per // <https://github.com/bytecodealliance/wasmtime/blob/8052bb9e3b792503b225f2a5b2ba3bc023bff462/cranelift/codegen/src/prelude_opt.isle#L41-L47> - let gt_cc = crate::num::bin_op_to_intcc(BinOp::Gt, signed).unwrap(); - let lt_cc = crate::num::bin_op_to_intcc(BinOp::Lt, signed).unwrap(); + let gt_cc = crate::num::bin_op_to_intcc(BinOp::Gt, signed); + let lt_cc = crate::num::bin_op_to_intcc(BinOp::Lt, signed); let gt = fx.bcx.ins().icmp(gt_cc, lhs, rhs); let lt = fx.bcx.ins().icmp(lt_cc, lhs, rhs); let val = fx.bcx.ins().isub(gt, lt); @@ -63,11 +63,7 @@ fn codegen_compare_bin_op<'tcx>( lhs: Value, rhs: Value, ) -> CValue<'tcx> { - if bin_op == BinOp::Cmp { - return codegen_three_way_compare(fx, signed, lhs, rhs); - } - - let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap(); + let intcc = crate::num::bin_op_to_intcc(bin_op, signed); let val = fx.bcx.ins().icmp(intcc, lhs, rhs); CValue::by_val(val, fx.layout_of(fx.tcx.types.bool)) } @@ -79,7 +75,7 @@ pub(crate) fn codegen_binop<'tcx>( in_rhs: CValue<'tcx>, ) -> CValue<'tcx> { match bin_op { - BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt | BinOp::Cmp => { + BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => { match in_lhs.layout().ty.kind() { ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => { let signed = type_sign(in_lhs.layout().ty); @@ -91,6 +87,16 @@ pub(crate) fn codegen_binop<'tcx>( _ => {} } } + BinOp::Cmp => match in_lhs.layout().ty.kind() { + ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => { + let signed = type_sign(in_lhs.layout().ty); + let lhs = in_lhs.load_scalar(fx); + let rhs = in_rhs.load_scalar(fx); + + return codegen_three_way_compare(fx, signed, lhs, rhs); + } + _ => {} + }, _ => {} } @@ -200,10 +206,6 @@ pub(crate) fn codegen_checked_int_binop<'tcx>( let lhs = in_lhs.load_scalar(fx); let rhs = in_rhs.load_scalar(fx); - if let Some(res) = crate::codegen_i128::maybe_codegen_checked(fx, bin_op, in_lhs, in_rhs) { - return res; - } - let signed = type_sign(in_lhs.layout().ty); let (res, has_overflow) = match bin_op { @@ -236,6 +238,10 @@ pub(crate) fn codegen_checked_int_binop<'tcx>( (val, has_overflow) } BinOp::Mul => { + if let Some(res) = crate::codegen_i128::maybe_codegen_mul_checked(fx, in_lhs, in_rhs) { + return res; + } + let ty = fx.bcx.func.dfg.value_type(lhs); match ty { types::I8 | types::I16 | types::I32 if !signed => { @@ -357,14 +363,12 @@ pub(crate) fn codegen_float_binop<'tcx>( _ => bug!(), }; - let ret_val = fx.lib_call( + fx.lib_call( name, vec![AbiParam::new(ty), AbiParam::new(ty)], vec![AbiParam::new(ty)], &[lhs, rhs], - )[0]; - - return CValue::by_val(ret_val, in_lhs.layout()); + )[0] } BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => { let fltcc = match bin_op { @@ -431,13 +435,9 @@ pub(crate) fn codegen_ptr_binop<'tcx>( BinOp::Lt | BinOp::Le | BinOp::Ge | BinOp::Gt => { let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr); - let ptr_cmp = - fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr); - let extra_cmp = fx.bcx.ins().icmp( - bin_op_to_intcc(bin_op, false).unwrap(), - lhs_extra, - rhs_extra, - ); + let ptr_cmp = fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false), lhs_ptr, rhs_ptr); + let extra_cmp = + fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false), lhs_extra, rhs_extra); fx.bcx.ins().select(ptr_eq, extra_cmp, ptr_cmp) } diff --git a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml index 704d7b9c2fd..73ec6b84a15 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/ci.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/ci.yml @@ -22,7 +22,6 @@ jobs: - { gcc: "gcc-13.deb" } - { gcc: "gcc-13-without-int128.deb" } commands: [ - "--mini-tests", "--std-tests", # FIXME: re-enable asm tests when GCC can emit in the right syntax. # "--asm-tests", @@ -79,6 +78,7 @@ jobs: run: | ./y.sh prepare --only-libcore ./y.sh build --sysroot + ./y.sh test --mini-tests cargo test - name: Run y.sh cargo build @@ -87,7 +87,7 @@ jobs: - name: Clean run: | - ./y.sh clean all + ./y.sh clean all - name: Prepare dependencies run: | @@ -95,9 +95,6 @@ jobs: git config --global user.name "User" ./y.sh prepare - - name: Add more failing tests because the sysroot is not compiled with LTO - run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt - - name: Run tests run: | ./y.sh test --release --clean --build-sysroot ${{ matrix.commands }} diff --git a/compiler/rustc_codegen_gcc/.github/workflows/failures.yml b/compiler/rustc_codegen_gcc/.github/workflows/failures.yml index 2c1ed9ad429..f33d9fcc582 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/failures.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/failures.yml @@ -90,15 +90,12 @@ jobs: if: matrix.libgccjit_version.gcc != 'libgccjit12.so' run: ./y.sh prepare - - name: Add more failing tests because the sysroot is not compiled with LTO - run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt - - name: Run tests # TODO: re-enable those tests for libgccjit 12. if: matrix.libgccjit_version.gcc != 'libgccjit12.so' id: tests run: | - ${{ matrix.libgccjit_version.env_extra }} ./y.sh test --release --clean --build-sysroot --test-failing-rustc ${{ matrix.libgccjit_version.extra }} | tee output_log + ${{ matrix.libgccjit_version.env_extra }} ./y.sh test --release --clean --build-sysroot --test-failing-rustc ${{ matrix.libgccjit_version.extra }} 2>&1 | tee output_log rg --text "test result" output_log >> $GITHUB_STEP_SUMMARY - name: Run failing ui pattern tests for ICE @@ -106,7 +103,7 @@ jobs: if: matrix.libgccjit_version.gcc != 'libgccjit12.so' id: ui-tests run: | - ${{ matrix.libgccjit_version.env_extra }} ./y.sh test --release --test-failing-ui-pattern-tests ${{ matrix.libgccjit_version.extra }} | tee output_log_ui + ${{ matrix.libgccjit_version.env_extra }} ./y.sh test --release --test-failing-ui-pattern-tests ${{ matrix.libgccjit_version.extra }} 2>&1 | tee output_log_ui if grep -q "the compiler unexpectedly panicked" output_log_ui; then echo "Error: 'the compiler unexpectedly panicked' found in output logs. CI Error!!" exit 1 diff --git a/compiler/rustc_codegen_gcc/.github/workflows/gcc12.yml b/compiler/rustc_codegen_gcc/.github/workflows/gcc12.yml index 7dcad21a02e..4c2ce91e86e 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/gcc12.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/gcc12.yml @@ -82,9 +82,6 @@ jobs: #- name: Add more failing tests for GCC 12 #run: cat tests/failing-ui-tests12.txt >> tests/failing-ui-tests.txt - #- name: Add more failing tests because the sysroot is not compiled with LTO - #run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt - #- name: Run tests #run: | #./y.sh test --release --clean --build-sysroot ${{ matrix.commands }} --no-default-features diff --git a/compiler/rustc_codegen_gcc/.github/workflows/m68k.yml b/compiler/rustc_codegen_gcc/.github/workflows/m68k.yml index 1c864e04413..07bb372b360 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/m68k.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/m68k.yml @@ -23,7 +23,6 @@ jobs: fail-fast: false matrix: commands: [ - "--mini-tests", "--std-tests", # TODO(antoyo): fix those on m68k. #"--test-libcore", @@ -93,6 +92,7 @@ jobs: run: | ./y.sh prepare --only-libcore --cross ./y.sh build --sysroot --features compiler_builtins/no-f16-f128 --target-triple m68k-unknown-linux-gnu + ./y.sh test --mini-tests CG_GCC_TEST_TARGET=m68k-unknown-linux-gnu cargo test ./y.sh clean all @@ -102,9 +102,6 @@ jobs: git config --global user.name "User" ./y.sh prepare --cross - - name: Add more failing tests because the sysroot is not compiled with LTO - run: cat tests/failing-non-lto-tests.txt >> tests/failing-ui-tests.txt - - name: Run tests run: | ./y.sh test --release --clean --build-sysroot --sysroot-features compiler_builtins/no-f16-f128 ${{ matrix.commands }} diff --git a/compiler/rustc_codegen_gcc/.github/workflows/release.yml b/compiler/rustc_codegen_gcc/.github/workflows/release.yml index d5c06a836db..60e0943c87d 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/release.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/release.yml @@ -13,7 +13,7 @@ env: jobs: build: - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 strategy: fail-fast: false @@ -54,6 +54,7 @@ jobs: run: | ./y.sh prepare --only-libcore EMBED_LTO_BITCODE=1 ./y.sh build --sysroot --release --release-sysroot + ./y.sh test --mini-tests cargo test ./y.sh clean all @@ -70,4 +71,9 @@ jobs: run: | # FIXME(antoyo): we cannot enable LTO for stdarch tests currently because of some failing LTO tests using proc-macros. echo -n 'lto = "fat"' >> build_system/build_sysroot/Cargo.toml - EMBED_LTO_BITCODE=1 ./y.sh test --release --clean --release-sysroot --build-sysroot ${{ matrix.commands }} + EMBED_LTO_BITCODE=1 ./y.sh test --release --clean --release-sysroot --build-sysroot --keep-lto-tests ${{ matrix.commands }} + + - name: Run y.sh cargo build + run: | + EMBED_LTO_BITCODE=1 CHANNEL="release" ./y.sh cargo build --release --manifest-path tests/hello-world/Cargo.toml + # TODO: grep the asm output for "call my_func" and fail if it is found. diff --git a/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml b/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml index d8818eefa96..d5ae6144496 100644 --- a/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml +++ b/compiler/rustc_codegen_gcc/.github/workflows/stdarch.yml @@ -73,10 +73,6 @@ jobs: echo "LD_LIBRARY_PATH="$(./y.sh info | grep -v Using) >> $GITHUB_ENV echo "LIBRARY_PATH="$(./y.sh info | grep -v Using) >> $GITHUB_ENV - - name: Build (part 2) - run: | - cargo test - - name: Clean if: ${{ !matrix.cargo_runner }} run: | @@ -92,6 +88,7 @@ jobs: if: ${{ !matrix.cargo_runner }} run: | ./y.sh test --release --clean --release-sysroot --build-sysroot --mini-tests --std-tests --test-libcore + cargo test - name: Run stdarch tests if: ${{ !matrix.cargo_runner }} diff --git a/compiler/rustc_codegen_gcc/.rustfmt.toml b/compiler/rustc_codegen_gcc/.rustfmt.toml index 725aec25a07..a11bc41680d 100644 --- a/compiler/rustc_codegen_gcc/.rustfmt.toml +++ b/compiler/rustc_codegen_gcc/.rustfmt.toml @@ -1,3 +1,5 @@ -version = "Two" +style_edition = "2024" use_small_heuristics = "Max" merge_derives = false +group_imports = "StdExternalCrate" +imports_granularity = "Module" diff --git a/compiler/rustc_codegen_gcc/Cargo.lock b/compiler/rustc_codegen_gcc/Cargo.lock index 6b06e7d7f27..636e75b94a3 100644 --- a/compiler/rustc_codegen_gcc/Cargo.lock +++ b/compiler/rustc_codegen_gcc/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "aho-corasick" @@ -12,12 +12,40 @@ dependencies = [ ] [[package]] +name = "bitflags" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b048fb63fd8b5923fc5aa7b340d8e156aec7ec02f0c78fa8a6ddc2613f6f71de" + +[[package]] name = "boml" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85fdb93f04c73bff54305fa437ffea5449c41edcaadfe882f35836206b166ac5" [[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "errno" +version = "0.3.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "33d852cb9b869c2a9b3df2f71a3074817f01e1844f839a144f5fcef059a4eb5d" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] name = "fm" version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -28,18 +56,18 @@ dependencies = [ [[package]] name = "gccjit" -version = "2.2.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4bb376e98c82d9284c3a17fc1d6bf9bc921055418950238d7a553c27a7e1f6ab" +checksum = "72fd91f4adbf02b53cfc73c97bc33c5f253009043f30c56a5ec08dd5c8094dc8" dependencies = [ "gccjit_sys", ] [[package]] name = "gccjit_sys" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93b4b1be553b5df790bf25ca2a1d6add81727dc29f8d5c8742468ed306d621d1" +checksum = "0fb7b8f48a75e2cfe78c3d9a980b32771c34ffd12d196021ab3f98c49fbd2f0d" dependencies = [ "libc", ] @@ -77,9 +105,15 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.150" +version = "0.2.168" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" +checksum = "5aaeb2981e0606ca11d79718f8bb01164f1d6ed75080182d3abf017e6d244b6d" + +[[package]] +name = "linux-raw-sys" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" [[package]] name = "memchr" @@ -98,6 +132,12 @@ dependencies = [ ] [[package]] +name = "once_cell" +version = "1.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" + +[[package]] name = "regex" version = "1.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -121,6 +161,20 @@ dependencies = [ "boml", "gccjit", "lang_tester", + "tempfile", +] + +[[package]] +name = "rustix" +version = "0.38.42" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f93dc38ecbab2eb790ff964bb77fa94faf256fd3e73285fd7ba0903b76bedb85" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", ] [[package]] @@ -133,6 +187,19 @@ dependencies = [ ] [[package]] +name = "tempfile" +version = "3.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "28cce251fcbc87fac86a866eeb0d6c2d536fc16d06f184bb61aeae11aa4cee0c" +dependencies = [ + "cfg-if", + "fastrand", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] name = "termcolor" version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" @@ -205,3 +272,76 @@ name = "winapi-x86_64-pc-windows-gnu" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_gnullvm", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" + +[[package]] +name = "windows_i686_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" + +[[package]] +name = "windows_i686_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.52.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" diff --git a/compiler/rustc_codegen_gcc/Cargo.toml b/compiler/rustc_codegen_gcc/Cargo.toml index 4828b7ddb16..63d37358561 100644 --- a/compiler/rustc_codegen_gcc/Cargo.toml +++ b/compiler/rustc_codegen_gcc/Cargo.toml @@ -22,15 +22,16 @@ master = ["gccjit/master"] default = ["master"] [dependencies] -gccjit = "2.2" +gccjit = "2.4" #gccjit = { git = "https://github.com/rust-lang/gccjit.rs" } # Local copy. #gccjit = { path = "../gccjit.rs" } [dev-dependencies] -lang_tester = "0.8.0" boml = "0.3.1" +lang_tester = "0.8.0" +tempfile = "3.7.1" [profile.dev] # By compiling dependencies with optimizations, performing tests gets much faster. diff --git a/compiler/rustc_codegen_gcc/build_system/src/test.rs b/compiler/rustc_codegen_gcc/build_system/src/test.rs index c69e240c01d..7cc7336612c 100644 --- a/compiler/rustc_codegen_gcc/build_system/src/test.rs +++ b/compiler/rustc_codegen_gcc/build_system/src/test.rs @@ -93,6 +93,7 @@ struct TestArg { sysroot_panic_abort: bool, config_info: ConfigInfo, sysroot_features: Vec<String>, + keep_lto_tests: bool, } impl TestArg { @@ -128,6 +129,9 @@ impl TestArg { "--sysroot-panic-abort" => { test_arg.sysroot_panic_abort = true; } + "--keep-lto-tests" => { + test_arg.keep_lto_tests = true; + } "--sysroot-features" => match args.next() { Some(feature) if !feature.is_empty() => { test_arg.sysroot_features.push(feature); @@ -194,7 +198,7 @@ fn build_if_no_backend(env: &Env, args: &TestArg) -> Result<(), String> { } fn clean(_env: &Env, args: &TestArg) -> Result<(), String> { - let _ = std::fs::remove_dir_all(&args.config_info.cargo_target_dir); + let _ = remove_dir_all(&args.config_info.cargo_target_dir); let path = Path::new(&args.config_info.cargo_target_dir).join("gccjit"); create_dir(&path) } @@ -641,7 +645,7 @@ fn test_projects(env: &Env, args: &TestArg) -> Result<(), String> { //failing test is fixed upstream. //"https://github.com/marshallpierce/rust-base64", // FIXME: one test is OOM-killed. // TODO: ignore the base64 test that is OOM-killed. - "https://github.com/time-rs/time", + //"https://github.com/time-rs/time", // FIXME: one test fails (https://github.com/time-rs/time/issues/719). "https://github.com/rust-lang/log", "https://github.com/bitflags/bitflags", //"https://github.com/serde-rs/serde", // FIXME: one test fails. @@ -835,8 +839,7 @@ fn valid_ui_error_pattern_test(file: &str) -> bool { .any(|to_ignore| file.ends_with(to_ignore)) } -#[rustfmt::skip] -fn contains_ui_error_patterns(file_path: &Path) -> Result<bool, String> { +fn contains_ui_error_patterns(file_path: &Path, keep_lto_tests: bool) -> Result<bool, String> { // Tests generating errors. let file = File::open(file_path) .map_err(|error| format!("Failed to read `{}`: {:?}", file_path.display(), error))?; @@ -849,22 +852,38 @@ fn contains_ui_error_patterns(file_path: &Path) -> Result<bool, String> { "//@ error-pattern:", "//@ build-fail", "//@ run-fail", + "//@ known-bug", "-Cllvm-args", "//~", "thread", ] - .iter() - .any(|check| line.contains(check)) + .iter() + .any(|check| line.contains(check)) + { + return Ok(true); + } + + if !keep_lto_tests + && (line.contains("-Clto") + || line.contains("-C lto") + || line.contains("compile-flags: -Clinker-plugin-lto")) + && !line.contains("-Clto=thin") { return Ok(true); } + if line.contains("//[") && line.contains("]~") { return Ok(true); } } - if file_path.display().to_string().contains("ambiguous-4-extern.rs") { + let file_path = file_path.display().to_string(); + if file_path.contains("ambiguous-4-extern.rs") { eprintln!("nothing found for {file_path:?}"); } + // The files in this directory contain errors. + if file_path.contains("/error-emitter/") { + return Ok(true); + } Ok(false) } @@ -903,7 +922,7 @@ where rust_path.join("tests/ui"), &mut |_dir| Ok(()), &mut |file_path| { - if contains_ui_error_patterns(file_path)? { + if contains_ui_error_patterns(file_path, args.keep_lto_tests)? { Ok(()) } else { remove_file(file_path).map_err(|e| e.to_string()) @@ -928,7 +947,7 @@ where .iter() .any(|name| *name == dir_name) { - std::fs::remove_dir_all(dir).map_err(|error| { + remove_dir_all(dir).map_err(|error| { format!("Failed to remove folder `{}`: {:?}", dir.display(), error) })?; } @@ -940,27 +959,42 @@ where // These two functions are used to remove files that are known to not be working currently // with the GCC backend to reduce noise. - fn dir_handling(dir: &Path) -> Result<(), String> { - if dir.file_name().map(|name| name == "auxiliary").unwrap_or(true) { - return Ok(()); - } + fn dir_handling(keep_lto_tests: bool) -> impl Fn(&Path) -> Result<(), String> { + move |dir| { + if dir.file_name().map(|name| name == "auxiliary").unwrap_or(true) { + return Ok(()); + } - walk_dir(dir, &mut dir_handling, &mut file_handling, false) - } - fn file_handling(file_path: &Path) -> Result<(), String> { - if !file_path.extension().map(|extension| extension == "rs").unwrap_or(false) { - return Ok(()); + walk_dir( + dir, + &mut dir_handling(keep_lto_tests), + &mut file_handling(keep_lto_tests), + false, + ) } - let path_str = file_path.display().to_string().replace("\\", "/"); - if valid_ui_error_pattern_test(&path_str) { - return Ok(()); - } else if contains_ui_error_patterns(file_path)? { - return remove_file(&file_path); + } + + fn file_handling(keep_lto_tests: bool) -> impl Fn(&Path) -> Result<(), String> { + move |file_path| { + if !file_path.extension().map(|extension| extension == "rs").unwrap_or(false) { + return Ok(()); + } + let path_str = file_path.display().to_string().replace("\\", "/"); + if valid_ui_error_pattern_test(&path_str) { + return Ok(()); + } else if contains_ui_error_patterns(file_path, keep_lto_tests)? { + return remove_file(&file_path); + } + Ok(()) } - Ok(()) } - walk_dir(rust_path.join("tests/ui"), &mut dir_handling, &mut file_handling, false)?; + walk_dir( + rust_path.join("tests/ui"), + &mut dir_handling(args.keep_lto_tests), + &mut file_handling(args.keep_lto_tests), + false, + )?; } let nb_parts = args.nb_parts.unwrap_or(0); if nb_parts > 0 { @@ -1173,7 +1207,7 @@ fn remove_files_callback<'a>( files.split('\n').map(|line| line.trim()).filter(|line| !line.is_empty()) { let path = rust_path.join(file); - if let Err(e) = std::fs::remove_dir_all(&path) { + if let Err(e) = remove_dir_all(&path) { println!("Failed to remove directory `{}`: {}", path.display(), e); } } diff --git a/compiler/rustc_codegen_gcc/example/mini_core.rs b/compiler/rustc_codegen_gcc/example/mini_core.rs index cdd151613df..5a4ee0a198c 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core.rs @@ -170,6 +170,14 @@ impl Add for usize { } } +impl Add for isize { + type Output = Self; + + fn add(self, rhs: Self) -> Self { + self + rhs + } +} + #[lang = "sub"] pub trait Sub<RHS = Self> { type Output; @@ -681,7 +689,7 @@ impl<T> Index<usize> for [T] { } } -extern { +extern "C" { type VaListImpl; } diff --git a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs index dcfa34cb729..1d51e0a1856 100644 --- a/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs +++ b/compiler/rustc_codegen_gcc/example/mini_core_hello_world.rs @@ -258,13 +258,13 @@ fn main() { assert_eq!(((|()| 42u8) as fn(()) -> u8)(()), 42); - extern { + extern "C" { #[linkage = "weak"] static ABC: *const u8; } { - extern { + extern "C" { #[linkage = "weak"] static ABC: *const u8; } diff --git a/compiler/rustc_codegen_gcc/example/mod_bench.rs b/compiler/rustc_codegen_gcc/example/mod_bench.rs index cae911c1073..e8a9cade747 100644 --- a/compiler/rustc_codegen_gcc/example/mod_bench.rs +++ b/compiler/rustc_codegen_gcc/example/mod_bench.rs @@ -3,7 +3,7 @@ #![allow(internal_features)] #[link(name = "c")] -extern {} +extern "C" {} #[panic_handler] fn panic_handler(_: &core::panic::PanicInfo<'_>) -> ! { diff --git a/compiler/rustc_codegen_gcc/example/std_example.rs b/compiler/rustc_codegen_gcc/example/std_example.rs index 9e43b4635f0..5fa1e0afb06 100644 --- a/compiler/rustc_codegen_gcc/example/std_example.rs +++ b/compiler/rustc_codegen_gcc/example/std_example.rs @@ -7,7 +7,7 @@ use std::arch::x86_64::*; use std::io::Write; use std::ops::Coroutine; -extern { +extern "C" { pub fn printf(format: *const i8, ...) -> i32; } diff --git a/compiler/rustc_codegen_gcc/libgccjit.version b/compiler/rustc_codegen_gcc/libgccjit.version index b9bbbd324c3..ff58accec1d 100644 --- a/compiler/rustc_codegen_gcc/libgccjit.version +++ b/compiler/rustc_codegen_gcc/libgccjit.version @@ -1 +1 @@ -e744a9459d33864067214741daf5c5bc2a7b88c6 +45648c2edd4ecd862d9f08196d3d6c6ccba79f07 diff --git a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch index b2ab05691ec..70e3e2ba7fe 100644 --- a/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch +++ b/compiler/rustc_codegen_gcc/patches/0022-core-Disable-not-compiling-tests.patch @@ -1,7 +1,7 @@ -From 18793c6109890493ceb3ff36549849a36e3d8022 Mon Sep 17 00:00:00 2001 +From af0e237f056fa838c77463381a19b0dc993c0a35 Mon Sep 17 00:00:00 2001 From: None <none@example.com> Date: Sun, 1 Sep 2024 11:42:17 -0400 -Subject: [PATCH] [core] Disable not compiling tests +Subject: [PATCH] Disable not compiling tests --- library/core/tests/Cargo.toml | 14 ++++++++++++++ @@ -30,14 +30,15 @@ index 0000000..ca326ac +rand = { version = "0.8.5", default-features = false } +rand_xorshift = { version = "0.3.0", default-features = false } diff --git a/library/core/tests/lib.rs b/library/core/tests/lib.rs -index 1e336bf..5800ebb 100644 +index a4a7946..ecfe43f 100644 --- a/library/core/tests/lib.rs +++ b/library/core/tests/lib.rs @@ -1,4 +1,5 @@ // tidy-alphabetical-start +#![cfg(test)] - #![cfg_attr(bootstrap, feature(offset_of_nested))] #![cfg_attr(target_has_atomic = "128", feature(integer_atomics))] #![cfg_attr(test, feature(cfg_match))] --- -2.46.0 + #![feature(alloc_layout_extra)] +-- +2.47.1 + diff --git a/compiler/rustc_codegen_gcc/patches/libgccjit12/0001-core-Disable-portable-simd-test.patch b/compiler/rustc_codegen_gcc/patches/libgccjit12/0001-core-Disable-portable-simd-test.patch index 01461987ffb..9ef5e0e4f46 100644 --- a/compiler/rustc_codegen_gcc/patches/libgccjit12/0001-core-Disable-portable-simd-test.patch +++ b/compiler/rustc_codegen_gcc/patches/libgccjit12/0001-core-Disable-portable-simd-test.patch @@ -27,5 +27,4 @@ index b71786c..cf484d5 100644 mod slice; mod str; mod str_lossy; --- -2.45.2 +-- 2.45.2 diff --git a/compiler/rustc_codegen_gcc/rust-toolchain b/compiler/rustc_codegen_gcc/rust-toolchain index dca3b0c22e4..940b3de9f74 100644 --- a/compiler/rustc_codegen_gcc/rust-toolchain +++ b/compiler/rustc_codegen_gcc/rust-toolchain @@ -1,3 +1,3 @@ [toolchain] -channel = "nightly-2024-08-11" +channel = "nightly-2025-01-12" components = ["rust-src", "rustc-dev", "llvm-tools-preview"] diff --git a/compiler/rustc_codegen_gcc/src/allocator.rs b/compiler/rustc_codegen_gcc/src/allocator.rs index f13a75648ae..416f3231a13 100644 --- a/compiler/rustc_codegen_gcc/src/allocator.rs +++ b/compiler/rustc_codegen_gcc/src/allocator.rs @@ -1,6 +1,6 @@ -#[cfg(feature = "master")] -use gccjit::FnAttribute; use gccjit::{Context, FunctionType, GlobalKind, ToRValue, Type}; +#[cfg(feature = "master")] +use gccjit::{FnAttribute, VarAttribute}; use rustc_ast::expand::allocator::{ ALLOCATOR_METHODS, AllocatorKind, AllocatorTy, NO_ALLOC_SHIM_IS_UNSTABLE, alloc_error_handler_name, default_fn_name, global_fn_name, @@ -10,6 +10,8 @@ use rustc_middle::ty::TyCtxt; use rustc_session::config::OomStrategy; use crate::GccContext; +#[cfg(feature = "master")] +use crate::base::symbol_visibility_to_gcc; pub(crate) unsafe fn codegen( tcx: TyCtxt<'_>, @@ -70,12 +72,20 @@ pub(crate) unsafe fn codegen( let name = OomStrategy::SYMBOL.to_string(); let global = context.new_global(None, GlobalKind::Exported, i8, name); + #[cfg(feature = "master")] + global.add_attribute(VarAttribute::Visibility(symbol_visibility_to_gcc( + tcx.sess.default_visibility(), + ))); let value = tcx.sess.opts.unstable_opts.oom.should_panic(); let value = context.new_rvalue_from_int(i8, value as i32); global.global_set_initializer_rvalue(value); let name = NO_ALLOC_SHIM_IS_UNSTABLE.to_string(); let global = context.new_global(None, GlobalKind::Exported, i8, name); + #[cfg(feature = "master")] + global.add_attribute(VarAttribute::Visibility(symbol_visibility_to_gcc( + tcx.sess.default_visibility(), + ))); let value = context.new_rvalue_from_int(i8, 0); global.global_set_initializer_rvalue(value); } @@ -105,15 +115,9 @@ fn create_wrapper_function( ); #[cfg(feature = "master")] - match tcx.sess.default_visibility() { - rustc_target::spec::SymbolVisibility::Hidden => { - func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Hidden)) - } - rustc_target::spec::SymbolVisibility::Protected => { - func.add_attribute(FnAttribute::Visibility(gccjit::Visibility::Protected)) - } - rustc_target::spec::SymbolVisibility::Interposable => {} - } + func.add_attribute(FnAttribute::Visibility(symbol_visibility_to_gcc( + tcx.sess.default_visibility(), + ))); if tcx.sess.must_emit_unwind_tables() { // TODO(antoyo): emit unwind tables. diff --git a/compiler/rustc_codegen_gcc/src/attributes.rs b/compiler/rustc_codegen_gcc/src/attributes.rs index 028a5ab5f71..69b04dd5796 100644 --- a/compiler/rustc_codegen_gcc/src/attributes.rs +++ b/compiler/rustc_codegen_gcc/src/attributes.rs @@ -20,7 +20,7 @@ fn inline_attr<'gcc, 'tcx>( ) -> Option<FnAttribute<'gcc>> { match inline { InlineAttr::Hint => Some(FnAttribute::Inline), - InlineAttr::Always => Some(FnAttribute::AlwaysInline), + InlineAttr::Always | InlineAttr::Force { .. } => Some(FnAttribute::AlwaysInline), InlineAttr::Never => { if cx.sess().target.arch != "amdgpu" { Some(FnAttribute::NoInline) diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index ed92f9c5241..e419bd18099 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -35,16 +35,13 @@ use rustc_middle::bug; use rustc_middle::dep_graph::WorkProduct; use rustc_middle::middle::exported_symbols::{SymbolExportInfo, SymbolExportLevel}; use rustc_session::config::{CrateType, Lto}; +use rustc_target::spec::RelocModel; use tempfile::{TempDir, tempdir}; use crate::back::write::save_temp_bitcode; use crate::errors::{DynamicLinkingWithLTO, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib}; use crate::{GccCodegenBackend, GccContext, SyncContext, to_gcc_opt_level}; -/// We keep track of the computed LTO cache keys from the previous -/// session to determine which CGUs we can reuse. -//pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; - pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, @@ -54,7 +51,7 @@ pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { struct LtoData { // TODO(antoyo): use symbols_below_threshold. - //symbols_below_threshold: Vec<CString>, + //symbols_below_threshold: Vec<String>, upstream_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir, } @@ -83,7 +80,7 @@ fn prepare_lto( let symbol_filter = &|&(ref name, info): &(String, SymbolExportInfo)| { if info.level.is_below_threshold(export_threshold) || info.used { - Some(CString::new(name.as_str()).unwrap()) + Some(name.clone()) } else { None } @@ -91,7 +88,7 @@ fn prepare_lto( let exported_symbols = cgcx.exported_symbols.as_ref().expect("needs exported symbols for LTO"); let mut symbols_below_threshold = { let _timer = cgcx.prof.generic_activity("GCC_lto_generate_symbols_below_threshold"); - exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<CString>>() + exported_symbols[&LOCAL_CRATE].iter().filter_map(symbol_filter).collect::<Vec<String>>() }; info!("{} symbols to preserve in this crate", symbols_below_threshold.len()); @@ -159,11 +156,7 @@ fn prepare_lto( } } - Ok(LtoData { - //symbols_below_threshold, - upstream_modules, - tmp_path, - }) + Ok(LtoData { upstream_modules, tmp_path }) } fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> { @@ -191,7 +184,7 @@ pub(crate) fn run_fat( cached_modules, lto_data.upstream_modules, lto_data.tmp_path, - //&symbols_below_threshold, + //<o_data.symbols_below_threshold, ) } @@ -202,7 +195,7 @@ fn fat_lto( cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, mut serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir, - //symbols_below_threshold: &[*const libc::c_char], + //symbols_below_threshold: &[String], ) -> Result<LtoModuleCodegen<GccCodegenBackend>, FatalError> { let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module"); info!("going for a fat lto"); @@ -327,6 +320,7 @@ fn fat_lto( ptr as *const *const libc::c_char, symbols_below_threshold.len() as libc::size_t, );*/ + save_temp_bitcode(cgcx, &module, "lto.after-restriction"); //} } @@ -363,8 +357,6 @@ pub(crate) fn run_thin( let dcx = cgcx.create_dcx(); let dcx = dcx.handle(); let lto_data = prepare_lto(cgcx, dcx)?; - /*let symbols_below_threshold = - symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::<Vec<_>>();*/ if cgcx.opts.cg.linker_plugin_lto.enabled() { unreachable!( "We should never reach this case if the LTO step \ @@ -377,7 +369,8 @@ pub(crate) fn run_thin( modules, lto_data.upstream_modules, lto_data.tmp_path, - cached_modules, /*, &symbols_below_threshold*/ + cached_modules, + //<o_data.symbols_below_threshold, ) } @@ -428,7 +421,7 @@ fn thin_lto( serialized_modules: Vec<(SerializedModule<ModuleBuffer>, CString)>, tmp_path: TempDir, cached_modules: Vec<(SerializedModule<ModuleBuffer>, WorkProduct)>, - //symbols_below_threshold: &[*const libc::c_char], + //_symbols_below_threshold: &[String], ) -> Result<(Vec<LtoModuleCodegen<GccCodegenBackend>>, Vec<WorkProduct>), FatalError> { let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); info!("going for that thin, thin LTO"); @@ -640,7 +633,13 @@ pub unsafe fn optimize_thin_module( } }; let module = ModuleCodegen { - module_llvm: GccContext { context, should_combine_object_files, temp_dir: None }, + module_llvm: GccContext { + context, + should_combine_object_files, + // TODO(antoyo): use the correct relocation model here. + relocation_model: RelocModel::Pic, + temp_dir: None, + }, name: thin_module.name().to_string(), kind: ModuleKind::Regular, }; @@ -660,9 +659,7 @@ pub unsafe fn optimize_thin_module( { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); - if !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) { - return Err(write::llvm_err(&dcx, LlvmError::PrepareThinLtoModule)); - } + unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) }; save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); } diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index 802968979c7..51c5ba73e32 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -1,6 +1,6 @@ use std::{env, fs}; -use gccjit::OutputKind; +use gccjit::{Context, OutputKind}; use rustc_codegen_ssa::back::link::ensure_removed; use rustc_codegen_ssa::back::write::{BitcodeSection, CodegenContext, EmitObj, ModuleConfig}; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; @@ -10,6 +10,7 @@ use rustc_session::config::OutputType; use rustc_span::fatal_error::FatalError; use rustc_target::spec::SplitDebuginfo; +use crate::base::add_pic_option; use crate::errors::CopyBitcode; use crate::{GccCodegenBackend, GccContext}; @@ -31,51 +32,87 @@ pub(crate) unsafe fn codegen( // NOTE: Only generate object files with GIMPLE when this environment variable is set for // now because this requires a particular setup (same gcc/lto1/lto-wrapper commit as libgccjit). - // TODO: remove this environment variable. + // TODO(antoyo): remove this environment variable. let fat_lto = env::var("EMBED_LTO_BITCODE").as_deref() == Ok("1"); let bc_out = cgcx.output_filenames.temp_path(OutputType::Bitcode, module_name); let obj_out = cgcx.output_filenames.temp_path(OutputType::Object, module_name); - if config.bitcode_needed() && fat_lto { - let _timer = cgcx - .prof - .generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name); - - // TODO(antoyo) - /*if let Some(bitcode_filename) = bc_out.file_name() { - cgcx.prof.artifact_size( - "llvm_bitcode", - bitcode_filename.to_string_lossy(), - data.len() as u64, - ); - }*/ - - if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + if config.bitcode_needed() { + if fat_lto { let _timer = cgcx .prof - .generic_activity_with_arg("GCC_module_codegen_emit_bitcode", &*module.name); - context.add_command_line_option("-flto=auto"); - context.add_command_line_option("-flto-partition=one"); - // TODO: remove since we don't want fat objects when it is for Bitcode only. - context.add_command_line_option("-ffat-lto-objects"); - context - .compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str")); - } + .generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name); - if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { - let _timer = cgcx - .prof - .generic_activity_with_arg("GCC_module_codegen_embed_bitcode", &*module.name); - // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes? - //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); - - context.add_command_line_option("-flto=auto"); - context.add_command_line_option("-flto-partition=one"); - context.add_command_line_option("-ffat-lto-objects"); - // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument). - context - .compile_to_file(OutputKind::ObjectFile, bc_out.to_str().expect("path to str")); + // TODO(antoyo) + /*if let Some(bitcode_filename) = bc_out.file_name() { + cgcx.prof.artifact_size( + "llvm_bitcode", + bitcode_filename.to_string_lossy(), + data.len() as u64, + ); + }*/ + + if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + let _timer = cgcx.prof.generic_activity_with_arg( + "GCC_module_codegen_emit_bitcode", + &*module.name, + ); + context.add_command_line_option("-flto=auto"); + context.add_command_line_option("-flto-partition=one"); + // TODO(antoyo): remove since we don't want fat objects when it is for Bitcode only. + context.add_command_line_option("-ffat-lto-objects"); + context.compile_to_file( + OutputKind::ObjectFile, + bc_out.to_str().expect("path to str"), + ); + } + + if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { + let _timer = cgcx.prof.generic_activity_with_arg( + "GCC_module_codegen_embed_bitcode", + &*module.name, + ); + // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes? + //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); + + context.add_command_line_option("-flto=auto"); + context.add_command_line_option("-flto-partition=one"); + context.add_command_line_option("-ffat-lto-objects"); + // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument). + context.compile_to_file( + OutputKind::ObjectFile, + bc_out.to_str().expect("path to str"), + ); + } + } else { + if config.emit_bc || config.emit_obj == EmitObj::Bitcode { + let _timer = cgcx.prof.generic_activity_with_arg( + "GCC_module_codegen_emit_bitcode", + &*module.name, + ); + context.compile_to_file( + OutputKind::ObjectFile, + bc_out.to_str().expect("path to str"), + ); + } + + if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { + // TODO(antoyo): we might want to emit to emit an error here, saying to set the + // environment variable EMBED_LTO_BITCODE. + let _timer = cgcx.prof.generic_activity_with_arg( + "GCC_module_codegen_embed_bitcode", + &*module.name, + ); + // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes? + //embed_bitcode(cgcx, llcx, llmod, &config.bc_cmdline, data); + + // TODO(antoyo): Send -plugin/usr/lib/gcc/x86_64-pc-linux-gnu/11.1.0/liblto_plugin.so to linker (this should be done when specifying the appropriate rustc cli argument). + context.compile_to_file( + OutputKind::ObjectFile, + bc_out.to_str().expect("path to str"), + ); + } } } @@ -123,6 +160,8 @@ pub(crate) unsafe fn codegen( // NOTE: without -fuse-linker-plugin, we get the following error: // lto1: internal compiler error: decompressed stream: Destination buffer is too small + // TODO(antoyo): since we do not do LTO when the linker is invoked anymore, perhaps + // the following flag is not necessary anymore. context.add_driver_option("-fuse-linker-plugin"); } @@ -131,11 +170,43 @@ pub(crate) unsafe fn codegen( // /usr/bin/ld: cannot find -lgcc_s: No such file or directory context.add_driver_option("-nostdlib"); - // NOTE: this doesn't actually generate an executable. With the above flags, it combines the .o files together in another .o. - context.compile_to_file( - OutputKind::Executable, - obj_out.to_str().expect("path to str"), - ); + let path = obj_out.to_str().expect("path to str"); + + if fat_lto { + let lto_path = format!("{}.lto", path); + // FIXME(antoyo): The LTO frontend generates the following warning: + // ../build_sysroot/sysroot_src/library/core/src/num/dec2flt/lemire.rs:150:15: warning: type of ‘_ZN4core3num7dec2flt5table17POWER_OF_FIVE_12817ha449a68fb31379e4E’ does not match original declaration [-Wlto-type-mismatch] + // 150 | let (lo5, hi5) = POWER_OF_FIVE_128[index]; + // | ^ + // lto1: note: ‘_ZN4core3num7dec2flt5table17POWER_OF_FIVE_12817ha449a68fb31379e4E’ was previously declared here + // + // This option is to mute it to make the UI tests pass with LTO enabled. + context.add_driver_option("-Wno-lto-type-mismatch"); + // NOTE: this doesn't actually generate an executable. With the above + // flags, it combines the .o files together in another .o. + context.compile_to_file(OutputKind::Executable, <o_path); + + let context = Context::default(); + if cgcx.target_arch == "x86" || cgcx.target_arch == "x86_64" { + // NOTE: it seems we need to use add_driver_option instead of + // add_command_line_option here because we use the LTO frontend via gcc. + context.add_driver_option("-masm=intel"); + } + + // NOTE: these two options are needed to invoke LTO to produce an object file. + // We need to initiate a second compilation because the arguments "-x lto" + // needs to be at the very beginning. + context.add_driver_option("-x"); + context.add_driver_option("lto"); + add_pic_option(&context, module.module_llvm.relocation_model); + context.add_driver_option(lto_path); + + context.compile_to_file(OutputKind::ObjectFile, path); + } else { + // NOTE: this doesn't actually generate an executable. With the above + // flags, it combines the .o files together in another .o. + context.compile_to_file(OutputKind::Executable, path); + } } else { context.compile_to_file( OutputKind::ObjectFile, diff --git a/compiler/rustc_codegen_gcc/src/base.rs b/compiler/rustc_codegen_gcc/src/base.rs index 18aa32754e1..c9701fb9885 100644 --- a/compiler/rustc_codegen_gcc/src/base.rs +++ b/compiler/rustc_codegen_gcc/src/base.rs @@ -3,7 +3,7 @@ use std::env; use std::sync::Arc; use std::time::Instant; -use gccjit::{CType, FunctionType, GlobalKind}; +use gccjit::{CType, Context, FunctionType, GlobalKind}; use rustc_codegen_ssa::base::maybe_create_entry_wrapper; use rustc_codegen_ssa::mono_item::MonoItemExt; use rustc_codegen_ssa::traits::DebugInfoCodegenMethods; @@ -15,21 +15,32 @@ use rustc_middle::mir::mono::Visibility; use rustc_middle::ty::TyCtxt; use rustc_session::config::DebugInfo; use rustc_span::Symbol; -use rustc_target::spec::PanicStrategy; +#[cfg(feature = "master")] +use rustc_target::spec::SymbolVisibility; +use rustc_target::spec::{PanicStrategy, RelocModel}; use crate::builder::Builder; use crate::context::CodegenCx; use crate::{GccContext, LockedTargetInfo, SyncContext, gcc_util, new_context}; #[cfg(feature = "master")] -pub fn visibility_to_gcc(linkage: Visibility) -> gccjit::Visibility { - match linkage { +pub fn visibility_to_gcc(visibility: Visibility) -> gccjit::Visibility { + match visibility { Visibility::Default => gccjit::Visibility::Default, Visibility::Hidden => gccjit::Visibility::Hidden, Visibility::Protected => gccjit::Visibility::Protected, } } +#[cfg(feature = "master")] +pub fn symbol_visibility_to_gcc(visibility: SymbolVisibility) -> gccjit::Visibility { + match visibility { + SymbolVisibility::Hidden => gccjit::Visibility::Hidden, + SymbolVisibility::Protected => gccjit::Visibility::Protected, + SymbolVisibility::Interposable => gccjit::Visibility::Default, + } +} + pub fn global_linkage_to_gcc(linkage: Linkage) -> GlobalKind { match linkage { Linkage::External => GlobalKind::Imported, @@ -140,9 +151,7 @@ pub fn compile_codegen_unit( }); } - if tcx.sess.relocation_model() == rustc_target::spec::RelocModel::Static { - context.add_command_line_option("-fno-pie"); - } + add_pic_option(&context, tcx.sess.relocation_model()); let target_cpu = gcc_util::target_cpu(tcx.sess); if target_cpu != "generic" { @@ -199,12 +208,13 @@ pub fn compile_codegen_unit( let f32_type_supported = target_info.supports_target_dependent_type(CType::Float32); let f64_type_supported = target_info.supports_target_dependent_type(CType::Float64); let f128_type_supported = target_info.supports_target_dependent_type(CType::Float128); + let u128_type_supported = target_info.supports_target_dependent_type(CType::UInt128t); // TODO: improve this to avoid passing that many arguments. let cx = CodegenCx::new( &context, cgu, tcx, - target_info.supports_128bit_int(), + u128_type_supported, f16_type_supported, f32_type_supported, f64_type_supported, @@ -235,6 +245,7 @@ pub fn compile_codegen_unit( name: cgu_name.to_string(), module_llvm: GccContext { context: Arc::new(SyncContext::new(context)), + relocation_model: tcx.sess.relocation_model(), should_combine_object_files: false, temp_dir: None, }, @@ -244,3 +255,24 @@ pub fn compile_codegen_unit( (module, cost) } + +pub fn add_pic_option<'gcc>(context: &Context<'gcc>, relocation_model: RelocModel) { + match relocation_model { + rustc_target::spec::RelocModel::Static => { + context.add_command_line_option("-fno-pie"); + context.add_driver_option("-fno-pie"); + } + rustc_target::spec::RelocModel::Pic => { + context.add_command_line_option("-fPIC"); + // NOTE: we use both add_command_line_option and add_driver_option because the usage in + // this module (compile_codegen_unit) requires add_command_line_option while the usage + // in the back::write module (codegen) requires add_driver_option. + context.add_driver_option("-fPIC"); + } + rustc_target::spec::RelocModel::Pie => { + context.add_command_line_option("-fPIE"); + context.add_driver_option("-fPIE"); + } + model => eprintln!("Unsupported relocation model: {:?}", model), + } +} diff --git a/compiler/rustc_codegen_gcc/src/builder.rs b/compiler/rustc_codegen_gcc/src/builder.rs index 9a142326ad1..89e5cf1b8c6 100644 --- a/compiler/rustc_codegen_gcc/src/builder.rs +++ b/compiler/rustc_codegen_gcc/src/builder.rs @@ -1102,18 +1102,24 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { val: RValue<'gcc>, ptr: RValue<'gcc>, align: Align, - _flags: MemFlags, + flags: MemFlags, ) -> RValue<'gcc> { let ptr = self.check_store(val, ptr); let destination = ptr.dereference(self.location); // NOTE: libgccjit does not support specifying the alignment on the assignment, so we cast // to type so it gets the proper alignment. let destination_type = destination.to_rvalue().get_type().unqualified(); - let aligned_type = destination_type.get_aligned(align.bytes()).make_pointer(); - let aligned_destination = self.cx.context.new_bitcast(self.location, ptr, aligned_type); - let aligned_destination = aligned_destination.dereference(self.location); - self.llbb().add_assignment(self.location, aligned_destination, val); - // TODO(antoyo): handle align and flags. + let align = if flags.contains(MemFlags::UNALIGNED) { 1 } else { align.bytes() }; + let mut modified_destination_type = destination_type.get_aligned(align); + if flags.contains(MemFlags::VOLATILE) { + modified_destination_type = modified_destination_type.make_volatile(); + } + + let modified_ptr = + self.cx.context.new_cast(self.location, ptr, modified_destination_type.make_pointer()); + let modified_destination = modified_ptr.dereference(self.location); + self.llbb().add_assignment(self.location, modified_destination, val); + // TODO(antoyo): handle `MemFlags::NONTEMPORAL`. // NOTE: dummy value here since it's never used. FIXME(antoyo): API should not return a value here? // When adding support for NONTEMPORAL, make sure to not just emit MOVNT on x86; see the // LLVM backend for details. @@ -1236,13 +1242,13 @@ impl<'a, 'gcc, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'gcc, 'tcx> { } fn ptrtoint(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { - let usize_value = self.cx.const_bitcast(value, self.cx.type_isize()); + let usize_value = self.cx.context.new_cast(None, value, self.cx.type_isize()); self.intcast(usize_value, dest_ty, false) } fn inttoptr(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { let usize_value = self.intcast(value, self.cx.type_isize(), false); - self.cx.const_bitcast(usize_value, dest_ty) + self.cx.context.new_cast(None, usize_value, dest_ty) } fn bitcast(&mut self, value: RValue<'gcc>, dest_ty: Type<'gcc>) -> RValue<'gcc> { @@ -1901,6 +1907,15 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { v2: RValue<'gcc>, mask: RValue<'gcc>, ) -> RValue<'gcc> { + // NOTE: if the `mask` is a constant value, the following code will copy it in many places, + // which will make GCC create a lot (+4000) local variables in some cases. + // So we assign it to an explicit local variable once to avoid this. + let func = self.current_func(); + let mask_var = func.new_local(self.location, mask.get_type(), "mask"); + let block = self.block; + block.add_assignment(self.location, mask_var, mask); + let mask = mask_var.to_rvalue(); + // TODO(antoyo): use a recursive unqualified() here. let vector_type = v1.get_type().unqualified().dyncast_vector().expect("vector type"); let element_type = vector_type.get_element_type(); @@ -1917,18 +1932,35 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { self.int_type }; - let vector_type = - mask.get_type().dyncast_vector().expect("simd_shuffle mask should be of vector type"); - let mask_num_units = vector_type.get_num_units(); - let mut mask_elements = vec![]; - for i in 0..mask_num_units { - let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _); - mask_elements.push(self.context.new_cast( - self.location, - self.extract_element(mask, index).to_rvalue(), - mask_element_type, - )); - } + // NOTE: this condition is needed because we call shuffle_vector in the implementation of + // simd_gather. + let mut mask_elements = if let Some(vector_type) = mask.get_type().dyncast_vector() { + let mask_num_units = vector_type.get_num_units(); + let mut mask_elements = vec![]; + for i in 0..mask_num_units { + let index = self.context.new_rvalue_from_long(self.cx.type_u32(), i as _); + mask_elements.push(self.context.new_cast( + self.location, + self.extract_element(mask, index).to_rvalue(), + mask_element_type, + )); + } + mask_elements + } else { + let struct_type = mask.get_type().is_struct().expect("mask should be of struct type"); + let mask_num_units = struct_type.get_field_count(); + let mut mask_elements = vec![]; + for i in 0..mask_num_units { + let field = struct_type.get_field(i as i32); + mask_elements.push(self.context.new_cast( + self.location, + mask.access_field(self.location, field).to_rvalue(), + mask_element_type, + )); + } + mask_elements + }; + let mask_num_units = mask_elements.len(); // NOTE: the mask needs to be the same length as the input vectors, so add the missing // elements in the mask if needed. diff --git a/compiler/rustc_codegen_gcc/src/callee.rs b/compiler/rustc_codegen_gcc/src/callee.rs index 65972a03e83..c133ae4fcdd 100644 --- a/compiler/rustc_codegen_gcc/src/callee.rs +++ b/compiler/rustc_codegen_gcc/src/callee.rs @@ -72,95 +72,74 @@ pub fn get_fn<'gcc, 'tcx>(cx: &CodegenCx<'gcc, 'tcx>, instance: Instance<'tcx>) attributes::from_fn_attrs(cx, func, instance); - let instance_def_id = instance.def_id(); - - // TODO(antoyo): set linkage and attributes. - - // Apply an appropriate linkage/visibility value to our item that we - // just declared. - // - // This is sort of subtle. Inside our codegen unit we started off - // compilation by predefining all our own `MonoItem` instances. That - // is, everything we're codegenning ourselves is already defined. That - // means that anything we're actually codegenning in this codegen unit - // will have hit the above branch in `get_declared_value`. As a result, - // we're guaranteed here that we're declaring a symbol that won't get - // defined, or in other words we're referencing a value from another - // codegen unit or even another crate. - // - // So because this is a foreign value we blanket apply an external - // linkage directive because it's coming from a different object file. - // The visibility here is where it gets tricky. This symbol could be - // referencing some foreign crate or foreign library (an `extern` - // block) in which case we want to leave the default visibility. We may - // also, though, have multiple codegen units. It could be a - // monomorphization, in which case its expected visibility depends on - // whether we are sharing generics or not. The important thing here is - // that the visibility we apply to the declaration is the same one that - // has been applied to the definition (wherever that definition may be). - let is_generic = instance.args.non_erasable_generics().next().is_some(); - - if is_generic { - // This is a monomorphization. Its expected visibility depends - // on whether we are in share-generics mode. - - if cx.tcx.sess.opts.share_generics() { - // We are in share_generics mode. - - if let Some(instance_def_id) = instance_def_id.as_local() { - // This is a definition from the current crate. If the - // definition is unreachable for downstream crates or - // the current crate does not re-export generics, the - // definition of the instance will have been declared - // as `hidden`. - if cx.tcx.is_unreachable_local_definition(instance_def_id) + #[cfg(feature = "master")] + { + let instance_def_id = instance.def_id(); + + // TODO(antoyo): set linkage and attributes. + + // Apply an appropriate linkage/visibility value to our item that we + // just declared. + // + // This is sort of subtle. Inside our codegen unit we started off + // compilation by predefining all our own `MonoItem` instances. That + // is, everything we're codegenning ourselves is already defined. That + // means that anything we're actually codegenning in this codegen unit + // will have hit the above branch in `get_declared_value`. As a result, + // we're guaranteed here that we're declaring a symbol that won't get + // defined, or in other words we're referencing a value from another + // codegen unit or even another crate. + // + // So because this is a foreign value we blanket apply an external + // linkage directive because it's coming from a different object file. + // The visibility here is where it gets tricky. This symbol could be + // referencing some foreign crate or foreign library (an `extern` + // block) in which case we want to leave the default visibility. We may + // also, though, have multiple codegen units. It could be a + // monomorphization, in which case its expected visibility depends on + // whether we are sharing generics or not. The important thing here is + // that the visibility we apply to the declaration is the same one that + // has been applied to the definition (wherever that definition may be). + let is_generic = instance.args.non_erasable_generics().next().is_some(); + + let is_hidden = if is_generic { + // This is a monomorphization of a generic function. + if !(cx.tcx.sess.opts.share_generics() + || tcx.codegen_fn_attrs(instance_def_id).inline + == rustc_attr_parsing::InlineAttr::Never) + { + // When not sharing generics, all instances are in the same + // crate and have hidden visibility. + true + } else if let Some(instance_def_id) = instance_def_id.as_local() { + // This is a monomorphization of a generic function + // defined in the current crate. It is hidden if: + // - the definition is unreachable for downstream + // crates, or + // - the current crate does not re-export generics + // (because the crate is a C library or executable) + cx.tcx.is_unreachable_local_definition(instance_def_id) || !cx.tcx.local_crate_exports_generics() - { - #[cfg(feature = "master")] - func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); - } } else { // This is a monomorphization of a generic function - // defined in an upstream crate. - if instance.upstream_monomorphization(tcx).is_some() { - // This is instantiated in another crate. It cannot - // be `hidden`. - } else { - // This is a local instantiation of an upstream definition. - // If the current crate does not re-export it - // (because it is a C library or an executable), it - // will have been declared `hidden`. - if !cx.tcx.local_crate_exports_generics() { - #[cfg(feature = "master")] - func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); - } - } + // defined in an upstream crate. It is hidden if: + // - it is instantiated in this crate, and + // - the current crate does not re-export generics + instance.upstream_monomorphization(tcx).is_none() + && !cx.tcx.local_crate_exports_generics() } } else { - // When not sharing generics, all instances are in the same - // crate and have hidden visibility - #[cfg(feature = "master")] + // This is a non-generic function. It is hidden if: + // - it is instantiated in the local crate, and + // - it is defined an upstream crate (non-local), or + // - it is not reachable + cx.tcx.is_codegened_item(instance_def_id) + && (!instance_def_id.is_local() + || !cx.tcx.is_reachable_non_generic(instance_def_id)) + }; + if is_hidden { func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); } - } else { - // This is a non-generic function - if cx.tcx.is_codegened_item(instance_def_id) { - // This is a function that is instantiated in the local crate - - if instance_def_id.is_local() { - // This is function that is defined in the local crate. - // If it is not reachable, it is hidden. - if !cx.tcx.is_reachable_non_generic(instance_def_id) { - #[cfg(feature = "master")] - func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); - } - } else { - // This is a function from an upstream crate that has - // been instantiated here. These are always hidden. - #[cfg(feature = "master")] - func.add_attribute(FnAttribute::Visibility(Visibility::Hidden)); - } - } } func diff --git a/compiler/rustc_codegen_gcc/src/common.rs b/compiler/rustc_codegen_gcc/src/common.rs index 0d3e7083d56..f43743fc2a4 100644 --- a/compiler/rustc_codegen_gcc/src/common.rs +++ b/compiler/rustc_codegen_gcc/src/common.rs @@ -240,14 +240,14 @@ impl<'gcc, 'tcx> ConstCodegenMethods<'tcx> for CodegenCx<'gcc, 'tcx> { } }; let ptr_type = base_addr.get_type(); - let base_addr = self.const_bitcast(base_addr, self.usize_type); + let base_addr = self.context.new_cast(None, base_addr, self.usize_type); let offset = self.context.new_rvalue_from_long(self.usize_type, offset.bytes() as i64); - let ptr = self.const_bitcast(base_addr + offset, ptr_type); + let ptr = self.context.new_cast(None, base_addr + offset, ptr_type); if !matches!(layout.primitive(), Pointer(_)) { self.const_bitcast(ptr.dereference(None).to_rvalue(), ty) } else { - self.const_bitcast(ptr, ty) + self.context.new_cast(None, ptr, ty) } } } diff --git a/compiler/rustc_codegen_gcc/src/consts.rs b/compiler/rustc_codegen_gcc/src/consts.rs index 6dc2f4ed668..1631ecfeecf 100644 --- a/compiler/rustc_codegen_gcc/src/consts.rs +++ b/compiler/rustc_codegen_gcc/src/consts.rs @@ -252,7 +252,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { let global = self.declare_global( sym, gcc_type, - GlobalKind::Exported, + GlobalKind::Imported, is_tls, fn_attrs.link_section, ); @@ -404,7 +404,6 @@ fn check_and_apply_linkage<'gcc, 'tcx>( // TODO(antoyo): set linkage. let value = cx.const_ptrcast(global1.get_address(None), gcc_type); global2.global_set_initializer_rvalue(value); - // TODO(antoyo): use global_set_initializer() when it will work. global2 } else { // Generate an external declaration. diff --git a/compiler/rustc_codegen_gcc/src/context.rs b/compiler/rustc_codegen_gcc/src/context.rs index f67dcf0cb11..c81c53359fd 100644 --- a/compiler/rustc_codegen_gcc/src/context.rs +++ b/compiler/rustc_codegen_gcc/src/context.rs @@ -386,6 +386,7 @@ impl<'gcc, 'tcx> CodegenCx<'gcc, 'tcx> { impl<'gcc, 'tcx> BackendTypes for CodegenCx<'gcc, 'tcx> { type Value = RValue<'gcc>; type Metadata = RValue<'gcc>; + // TODO(antoyo): change to Function<'gcc>. type Function = RValue<'gcc>; type BasicBlock = Block<'gcc>; diff --git a/compiler/rustc_codegen_gcc/src/debuginfo.rs b/compiler/rustc_codegen_gcc/src/debuginfo.rs index 6aeb656c1ab..d3aeb7f3bde 100644 --- a/compiler/rustc_codegen_gcc/src/debuginfo.rs +++ b/compiler/rustc_codegen_gcc/src/debuginfo.rs @@ -4,7 +4,7 @@ use gccjit::{Location, RValue}; use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext, VariableKind}; use rustc_codegen_ssa::traits::{DebugInfoBuilderMethods, DebugInfoCodegenMethods}; use rustc_data_structures::sync::Lrc; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::{self, Body, SourceScope}; use rustc_middle::ty::{Instance, PolyExistentialTraitRef, Ty}; @@ -69,7 +69,7 @@ fn compute_mir_scopes<'gcc, 'tcx>( ) { // Find all scopes with variables defined in them. let variables = if cx.sess().opts.debuginfo == DebugInfo::Full { - let mut vars = BitSet::new_empty(mir.source_scopes.len()); + let mut vars = DenseBitSet::new_empty(mir.source_scopes.len()); // FIXME(eddyb) take into account that arguments always have debuginfo, // irrespective of their name (assuming full debuginfo is enabled). // NOTE(eddyb) actually, on second thought, those are always in the @@ -82,7 +82,7 @@ fn compute_mir_scopes<'gcc, 'tcx>( // Nothing to emit, of course. None }; - let mut instantiated = BitSet::new_empty(mir.source_scopes.len()); + let mut instantiated = DenseBitSet::new_empty(mir.source_scopes.len()); // Instantiate all scopes. for idx in 0..mir.source_scopes.len() { let scope = SourceScope::new(idx); @@ -101,9 +101,9 @@ fn make_mir_scope<'gcc, 'tcx>( cx: &CodegenCx<'gcc, 'tcx>, _instance: Instance<'tcx>, mir: &Body<'tcx>, - variables: &Option<BitSet<SourceScope>>, + variables: &Option<DenseBitSet<SourceScope>>, debug_context: &mut FunctionDebugContext<'tcx, (), Location<'gcc>>, - instantiated: &mut BitSet<SourceScope>, + instantiated: &mut DenseBitSet<SourceScope>, scope: SourceScope, ) { if instantiated.contains(scope) { diff --git a/compiler/rustc_codegen_gcc/src/errors.rs b/compiler/rustc_codegen_gcc/src/errors.rs index c896246866b..1b59b9ac169 100644 --- a/compiler/rustc_codegen_gcc/src/errors.rs +++ b/compiler/rustc_codegen_gcc/src/errors.rs @@ -41,10 +41,6 @@ pub(crate) enum PossibleFeature<'a> { } #[derive(Diagnostic)] -#[diag(codegen_gcc_lto_not_supported)] -pub(crate) struct LTONotSupported; - -#[derive(Diagnostic)] #[diag(codegen_gcc_unwinding_inline_asm)] pub(crate) struct UnwindingInlineAsm { #[primary_span] diff --git a/compiler/rustc_codegen_gcc/src/gcc_util.rs b/compiler/rustc_codegen_gcc/src/gcc_util.rs index 1994a2a3c53..560aff43d65 100644 --- a/compiler/rustc_codegen_gcc/src/gcc_util.rs +++ b/compiler/rustc_codegen_gcc/src/gcc_util.rs @@ -66,16 +66,14 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri // We do the equivalent above in `target_features_cfg`. // See <https://github.com/rust-lang/rust/issues/134792>. all_rust_features.push((false, feature)); - } else if !feature.is_empty() { - if diagnostics { - sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature }); - } + } else if !feature.is_empty() && diagnostics { + sess.dcx().emit_warn(UnknownCTargetFeaturePrefix { feature }); } } // Remove features that are meant for rustc, not codegen. - all_rust_features.retain(|(_, feature)| { + all_rust_features.retain(|&(_, feature)| { // Retain if it is not a rustc feature - !RUSTC_SPECIFIC_FEATURES.contains(feature) + !RUSTC_SPECIFIC_FEATURES.contains(&feature) }); // Check feature validity. @@ -103,7 +101,7 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri }; sess.dcx().emit_warn(unknown_feature); } - Some((_, stability, _)) => { + Some(&(_, stability, _)) => { if let Err(reason) = stability.toggle_allowed() { sess.dcx().emit_warn(ForbiddenCTargetFeature { feature, @@ -165,29 +163,25 @@ pub(crate) fn global_gcc_features(sess: &Session, diagnostics: bool) -> Vec<Stri ); // Translate this into GCC features. - let feats = all_rust_features - .iter() - .filter_map(|&(enable, feature)| { + let feats = + all_rust_features.iter().flat_map(|&(enable, feature)| { let enable_disable = if enable { '+' } else { '-' }; // We run through `to_gcc_features` when // passing requests down to GCC. This means that all in-language // features also work on the command line instead of having two // different names when the GCC name and the Rust name differ. - Some( - to_gcc_features(sess, feature) - .iter() - .flat_map(|feat| to_gcc_features(sess, feat).into_iter()) - .map(|feature| { - if enable_disable == '-' { - format!("-{}", feature) - } else { - feature.to_string() - } - }) - .collect::<Vec<_>>(), - ) - }) - .flatten(); + to_gcc_features(sess, feature) + .iter() + .flat_map(|feat| to_gcc_features(sess, feat).into_iter()) + .map(|feature| { + if enable_disable == '-' { + format!("-{}", feature) + } else { + feature.to_string() + } + }) + .collect::<Vec<_>>() + }); features.extend(feats); if diagnostics { diff --git a/compiler/rustc_codegen_gcc/src/int.rs b/compiler/rustc_codegen_gcc/src/int.rs index 02b760dc733..fe6a65bed03 100644 --- a/compiler/rustc_codegen_gcc/src/int.rs +++ b/compiler/rustc_codegen_gcc/src/int.rs @@ -90,7 +90,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } } - } else if a_type.is_vector() && a_type.is_vector() { + } else if a_type.is_vector() && b_type.is_vector() { a >> b } else if a_native && !b_native { self.gcc_lshr(a, self.gcc_int_cast(b, a_type)) @@ -322,36 +322,26 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { }, } } else { - match new_kind { - Int(I128) | Uint(U128) => { - let func_name = match oop { - OverflowOp::Add => match new_kind { - Int(I128) => "__rust_i128_addo", - Uint(U128) => "__rust_u128_addo", - _ => unreachable!(), - }, - OverflowOp::Sub => match new_kind { - Int(I128) => "__rust_i128_subo", - Uint(U128) => "__rust_u128_subo", - _ => unreachable!(), - }, - OverflowOp::Mul => match new_kind { - Int(I128) => "__rust_i128_mulo", // TODO(antoyo): use __muloti4d instead? - Uint(U128) => "__rust_u128_mulo", - _ => unreachable!(), - }, - }; - return self.operation_with_overflow(func_name, lhs, rhs); - } - _ => match oop { - OverflowOp::Mul => match new_kind { - Int(I32) => "__mulosi4", - Int(I64) => "__mulodi4", - _ => unreachable!(), - }, - _ => unimplemented!("overflow operation for {:?}", new_kind), + let (func_name, width) = match oop { + OverflowOp::Add => match new_kind { + Int(I128) => ("__rust_i128_addo", 128), + Uint(U128) => ("__rust_u128_addo", 128), + _ => unreachable!(), }, - } + OverflowOp::Sub => match new_kind { + Int(I128) => ("__rust_i128_subo", 128), + Uint(U128) => ("__rust_u128_subo", 128), + _ => unreachable!(), + }, + OverflowOp::Mul => match new_kind { + Int(I32) => ("__mulosi4", 32), + Int(I64) => ("__mulodi4", 64), + Int(I128) => ("__rust_i128_mulo", 128), // TODO(antoyo): use __muloti4d instead? + Uint(U128) => ("__rust_u128_mulo", 128), + _ => unreachable!(), + }, + }; + return self.operation_with_overflow(func_name, lhs, rhs, width); }; let intrinsic = self.context.get_builtin_function(name); @@ -364,80 +354,87 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { (res.dereference(self.location).to_rvalue(), overflow) } + /// Non-`__builtin_*` overflow operations with a `fn(T, T, &mut i32) -> T` signature. pub fn operation_with_overflow( &self, func_name: &str, lhs: RValue<'gcc>, rhs: RValue<'gcc>, + width: u64, ) -> (RValue<'gcc>, RValue<'gcc>) { let a_type = lhs.get_type(); let b_type = rhs.get_type(); debug_assert!(a_type.dyncast_array().is_some()); debug_assert!(b_type.dyncast_array().is_some()); + let overflow_type = self.i32_type; + let overflow_param_type = overflow_type.make_pointer(); + let res_type = a_type; + + let overflow_value = + self.current_func().new_local(self.location, overflow_type, "overflow"); + let overflow_addr = overflow_value.get_address(self.location); + let param_a = self.context.new_parameter(self.location, a_type, "a"); let param_b = self.context.new_parameter(self.location, b_type, "b"); - let result_field = self.context.new_field(self.location, a_type, "result"); - let overflow_field = self.context.new_field(self.location, self.bool_type, "overflow"); - - let ret_ty = Ty::new_tup(self.tcx, &[self.tcx.types.i128, self.tcx.types.bool]); + let param_overflow = + self.context.new_parameter(self.location, overflow_param_type, "overflow"); + + let a_elem_type = a_type.dyncast_array().expect("non-array a value"); + debug_assert!(a_elem_type.is_integral()); + let res_ty = match width { + 32 => self.tcx.types.i32, + 64 => self.tcx.types.i64, + 128 => self.tcx.types.i128, + _ => unreachable!("unexpected integer size"), + }; let layout = self .tcx - .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ret_ty)) + .layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(res_ty)) .unwrap(); let arg_abi = ArgAbi { layout, mode: PassMode::Direct(ArgAttributes::new()) }; let mut fn_abi = FnAbi { - args: vec![arg_abi.clone(), arg_abi.clone()].into_boxed_slice(), + args: vec![arg_abi.clone(), arg_abi.clone(), arg_abi.clone()].into_boxed_slice(), ret: arg_abi, c_variadic: false, - fixed_count: 2, + fixed_count: 3, conv: Conv::C, can_unwind: false, }; fn_abi.adjust_for_foreign_abi(self.cx, spec::abi::Abi::C { unwind: false }).unwrap(); - let indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. }); - - let return_type = self - .context - .new_struct_type(self.location, "result_overflow", &[result_field, overflow_field]); - let result = if indirect { - let return_value = - self.current_func().new_local(self.location, return_type.as_type(), "return_value"); - let return_param_type = return_type.as_type().make_pointer(); - let return_param = - self.context.new_parameter(self.location, return_param_type, "return_value"); + let ret_indirect = matches!(fn_abi.ret.mode, PassMode::Indirect { .. }); + + let result = if ret_indirect { + let res_value = self.current_func().new_local(self.location, res_type, "result_value"); + let res_addr = res_value.get_address(self.location); + let res_param_type = res_type.make_pointer(); + let param_res = self.context.new_parameter(self.location, res_param_type, "result"); + let func = self.context.new_function( self.location, FunctionType::Extern, self.type_void(), - &[return_param, param_a, param_b], + &[param_res, param_a, param_b, param_overflow], func_name, false, ); - self.llbb().add_eval( - self.location, - self.context.new_call(self.location, func, &[ - return_value.get_address(self.location), - lhs, - rhs, - ]), - ); - return_value.to_rvalue() + let _void = + self.context.new_call(self.location, func, &[res_addr, lhs, rhs, overflow_addr]); + res_value.to_rvalue() } else { let func = self.context.new_function( self.location, FunctionType::Extern, - return_type.as_type(), - &[param_a, param_b], + res_type, + &[param_a, param_b, param_overflow], func_name, false, ); - self.context.new_call(self.location, func, &[lhs, rhs]) + self.context.new_call(self.location, func, &[lhs, rhs, overflow_addr]) }; - let overflow = result.access_field(self.location, overflow_field); - let int_result = result.access_field(self.location, result_field); - (int_result, overflow) + + (result, self.context.new_cast(self.location, overflow_value, self.bool_type).to_rvalue()) } pub fn gcc_icmp( @@ -660,7 +657,7 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { } } } - } else if a_type.is_vector() && a_type.is_vector() { + } else if a_type.is_vector() && b_type.is_vector() { a << b } else if a_native && !b_native { self.gcc_shl(a, self.gcc_int_cast(b, a_type)) diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs index 0a448ded6b1..231307def29 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/llvm.rs @@ -421,7 +421,7 @@ pub fn adjust_intrinsic_arguments<'a, 'b, 'gcc, 'tcx>( | "__builtin_ia32_xsaveopt64" => { let new_args = args.to_vec(); let thirty_two = builder.context.new_rvalue_from_int(new_args[1].get_type(), 32); - let arg2 = new_args[1] << thirty_two | new_args[2]; + let arg2 = (new_args[1] << thirty_two) | new_args[2]; let arg2_type = gcc_func.get_param_type(1); let arg2 = builder.context.new_cast(None, arg2, arg2_type); args = vec![new_args[0], arg2].into(); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index 78ec9741f57..48606f5f91c 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -13,15 +13,16 @@ use rustc_codegen_ssa::common::IntPredicate; use rustc_codegen_ssa::errors::InvalidMonomorphization; use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue}; use rustc_codegen_ssa::mir::place::{PlaceRef, PlaceValue}; +#[cfg(feature = "master")] +use rustc_codegen_ssa::traits::MiscCodegenMethods; use rustc_codegen_ssa::traits::{ - ArgAbiBuilderMethods, BuilderMethods, ConstCodegenMethods, IntrinsicCallBuilderMethods, + ArgAbiBuilderMethods, BaseTypeCodegenMethods, BuilderMethods, ConstCodegenMethods, + IntrinsicCallBuilderMethods, }; -#[cfg(feature = "master")] -use rustc_codegen_ssa::traits::{BaseTypeCodegenMethods, MiscCodegenMethods}; use rustc_middle::bug; -use rustc_middle::ty::layout::LayoutOf; #[cfg(feature = "master")] -use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv}; +use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt}; +use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; use rustc_middle::ty::{self, Instance, Ty}; use rustc_span::{Span, Symbol, sym}; use rustc_target::abi::HasDataLayout; @@ -139,6 +140,18 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc &args.iter().map(|arg| arg.immediate()).collect::<Vec<_>>(), ) } + sym::fmaf16 => { + // TODO(antoyo): use the correct builtin for f16. + let func = self.cx.context.get_builtin_function("fmaf"); + let args: Vec<_> = args + .iter() + .map(|arg| { + self.cx.context.new_cast(self.location, arg.immediate(), self.cx.type_f32()) + }) + .collect(); + let result = self.cx.context.new_call(self.location, func, &args); + self.cx.context.new_cast(self.location, result, self.cx.type_f16()) + } sym::is_val_statically_known => { let a = args[0].immediate(); let builtin = self.context.get_builtin_function("__builtin_constant_p"); @@ -988,7 +1001,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { 128 => "__rust_i128_addo", _ => unreachable!(), }; - let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs); + let (int_result, overflow) = + self.operation_with_overflow(func_name, lhs, rhs, width); self.llbb().add_assignment(self.location, res, int_result); overflow }; @@ -1058,7 +1072,8 @@ impl<'a, 'gcc, 'tcx> Builder<'a, 'gcc, 'tcx> { 128 => "__rust_i128_subo", _ => unreachable!(), }; - let (int_result, overflow) = self.operation_with_overflow(func_name, lhs, rhs); + let (int_result, overflow) = + self.operation_with_overflow(func_name, lhs, rhs, width); self.llbb().add_assignment(self.location, res, int_result); overflow }; diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index 79d1a06dd46..1be452e5d05 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -379,7 +379,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // Make sure this is actually a SIMD vector. let idx_ty = args[2].layout.ty; let n: u64 = if idx_ty.is_simd() - && matches!(idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32)) + && matches!(*idx_ty.simd_size_and_type(bx.cx.tcx).1.kind(), ty::Uint(ty::UintTy::U32)) { idx_ty.simd_size_and_type(bx.cx.tcx).0 } else { @@ -829,6 +829,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( | sym::simd_flog | sym::simd_floor | sym::simd_fma + | sym::simd_relaxed_fma | sym::simd_fpow | sym::simd_fpowi | sym::simd_fsin diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index 7329080ce1f..f6ad0c79de5 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -27,6 +27,8 @@ // Some "regular" crates we want to share with rustc extern crate object; extern crate smallvec; +// FIXME(antoyo): clippy bug: remove the #[allow] when it's fixed. +#[allow(unused_extern_crates)] extern crate tempfile; #[macro_use] extern crate tracing; @@ -88,7 +90,6 @@ use std::sync::atomic::Ordering; use std::sync::{Arc, Mutex}; use back::lto::{ThinBuffer, ThinData}; -use errors::LTONotSupported; use gccjit::{CType, Context, OptimizationLevel}; #[cfg(feature = "master")] use gccjit::{TargetInfo, Version}; @@ -109,9 +110,10 @@ use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; use rustc_middle::ty::TyCtxt; use rustc_middle::util::Providers; use rustc_session::Session; -use rustc_session::config::{Lto, OptLevel, OutputFilenames}; +use rustc_session::config::{OptLevel, OutputFilenames}; use rustc_span::Symbol; use rustc_span::fatal_error::FatalError; +use rustc_target::spec::RelocModel; use tempfile::TempDir; use crate::back::lto::ModuleBuffer; @@ -141,11 +143,15 @@ impl TargetInfo { false } - fn supports_128bit_int(&self) -> bool { - self.supports_128bit_integers.load(Ordering::SeqCst) - } - - fn supports_target_dependent_type(&self, _typ: CType) -> bool { + fn supports_target_dependent_type(&self, typ: CType) -> bool { + match typ { + CType::UInt128t | CType::Int128t => { + if self.supports_128bit_integers.load(Ordering::SeqCst) { + return true; + } + } + _ => (), + } false } } @@ -166,10 +172,6 @@ impl LockedTargetInfo { self.info.lock().expect("lock").cpu_supports(feature) } - fn supports_128bit_int(&self) -> bool { - self.info.lock().expect("lock").supports_128bit_int() - } - fn supports_target_dependent_type(&self, typ: CType) -> bool { self.info.lock().expect("lock").supports_target_dependent_type(typ) } @@ -202,10 +204,6 @@ impl CodegenBackend for GccCodegenBackend { #[cfg(feature = "master")] gccjit::set_global_personality_function_name(b"rust_eh_personality\0"); - if sess.lto() == Lto::Thin { - sess.dcx().emit_warn(LTONotSupported {}); - } - #[cfg(not(feature = "master"))] { let temp_dir = TempDir::new().expect("cannot create temporary directory"); @@ -297,6 +295,7 @@ impl ExtraBackendMethods for GccCodegenBackend { ) -> Self::Module { let mut mods = GccContext { context: Arc::new(SyncContext::new(new_context(tcx))), + relocation_model: tcx.sess.relocation_model(), should_combine_object_files: false, temp_dir: None, }; @@ -328,6 +327,9 @@ impl ExtraBackendMethods for GccCodegenBackend { pub struct GccContext { context: Arc<SyncContext>, + /// This field is needed in order to be able to set the flag -fPIC when necessary when doing + /// LTO. + relocation_model: RelocModel, should_combine_object_files: bool, // Temporary directory used by LTO. We keep it here so that it's not removed before linking. temp_dir: Option<TempDir>, @@ -492,10 +494,10 @@ fn target_features_cfg( sess.target .rust_target_features() .iter() - .filter(|(_, gate, _)| gate.in_cfg()) - .filter_map(|(feature, gate, _)| { + .filter(|&&(_, gate, _)| gate.in_cfg()) + .filter_map(|&(feature, gate, _)| { if sess.is_nightly_build() || allow_unstable || gate.requires_nightly().is_none() { - Some(*feature) + Some(feature) } else { None } diff --git a/compiler/rustc_codegen_gcc/tests/failing-non-lto-tests.txt b/compiler/rustc_codegen_gcc/tests/failing-non-lto-tests.txt deleted file mode 100644 index 384dfdc26fb..00000000000 --- a/compiler/rustc_codegen_gcc/tests/failing-non-lto-tests.txt +++ /dev/null @@ -1,11 +0,0 @@ -tests/ui/issues/issue-44056.rs -tests/ui/lto/fat-lto.rs -tests/ui/lto/debuginfo-lto.rs -tests/ui/lto/lto-many-codegen-units.rs -tests/ui/lto/issue-100772.rs -tests/ui/lto/lto-rustc-loads-linker-plugin.rs -tests/ui/panic-runtime/lto-unwind.rs -tests/ui/sanitizer/issue-111184-cfi-coroutine-witness.rs -tests/ui/sepcomp/sepcomp-lib-lto.rs -tests/ui/lto/lto-opt-level-s.rs -tests/ui/lto/lto-opt-level-z.rs diff --git a/compiler/rustc_codegen_gcc/tests/failing-ui-tests.txt b/compiler/rustc_codegen_gcc/tests/failing-ui-tests.txt index 457072b1a5b..082958bfe1f 100644 --- a/compiler/rustc_codegen_gcc/tests/failing-ui-tests.txt +++ b/compiler/rustc_codegen_gcc/tests/failing-ui-tests.txt @@ -69,20 +69,22 @@ tests/ui/mir/mir_heavy_promoted.rs tests/ui/consts/const_cmp_type_id.rs tests/ui/consts/issue-73976-monomorphic.rs tests/ui/consts/issue-94675.rs -tests/ui/rfcs/rfc-2632-const-trait-impl/const-drop-fail.rs -tests/ui/rfcs/rfc-2632-const-trait-impl/const-drop.rs +tests/ui/traits/const-traits/const-drop-fail.rs +tests/ui/traits/const-traits/const-drop.rs tests/ui/runtime/on-broken-pipe/child-processes.rs -tests/ui/sanitizer/cfi-assoc-ty-lifetime-issue-123053.rs -tests/ui/sanitizer/cfi-async-closures.rs -tests/ui/sanitizer/cfi-closures.rs -tests/ui/sanitizer/cfi-complex-receiver.rs -tests/ui/sanitizer/cfi-coroutine.rs -tests/ui/sanitizer/cfi-drop-in-place.rs -tests/ui/sanitizer/cfi-drop-no-principal.rs -tests/ui/sanitizer/cfi-fn-ptr.rs -tests/ui/sanitizer/cfi-self-ref.rs -tests/ui/sanitizer/cfi-supertraits.rs -tests/ui/sanitizer/cfi-virtual-auto.rs +tests/ui/sanitizer/cfi/assoc-ty-lifetime-issue-123053.rs +tests/ui/sanitizer/cfi/async-closures.rs +tests/ui/sanitizer/cfi/closures.rs +tests/ui/sanitizer/cfi/complex-receiver.rs +tests/ui/sanitizer/cfi/coroutine.rs +tests/ui/sanitizer/cfi/drop-in-place.rs +tests/ui/sanitizer/cfi/drop-no-principal.rs +tests/ui/sanitizer/cfi/fn-ptr.rs +tests/ui/sanitizer/cfi/self-ref.rs +tests/ui/sanitizer/cfi/supertraits.rs +tests/ui/sanitizer/cfi/virtual-auto.rs +tests/ui/sanitizer/cfi/sized-associated-ty.rs +tests/ui/sanitizer/cfi/can-reveal-opaques.rs tests/ui/sanitizer/kcfi-mangling.rs tests/ui/statics/const_generics.rs tests/ui/backtrace/dylib-dep.rs @@ -91,6 +93,7 @@ tests/ui/delegation/fn-header.rs tests/ui/consts/zst_no_llvm_alloc.rs tests/ui/consts/const-eval/parse_ints.rs tests/ui/simd/intrinsic/generic-arithmetic-pass.rs +tests/ui/simd/intrinsic/generic-as.rs tests/ui/backtrace/backtrace.rs tests/ui/lifetimes/tail-expr-lock-poisoning.rs tests/ui/runtime/rt-explody-panic-payloads.rs @@ -118,5 +121,4 @@ tests/ui/codegen/equal-pointers-unequal/strict-provenance/print3.rs tests/ui/codegen/equal-pointers-unequal/strict-provenance/inline2.rs tests/ui/codegen/equal-pointers-unequal/strict-provenance/segfault.rs tests/ui/codegen/equal-pointers-unequal/strict-provenance/zero.rs -tests/ui/sanitizer/cfi-sized-associated-ty.rs -tests/ui/sanitizer/cfi-can-reveal-opaques.rs +tests/ui/simd/simd-bitmask-notpow2.rs diff --git a/compiler/rustc_codegen_gcc/tests/failing-ui-tests12.txt b/compiler/rustc_codegen_gcc/tests/failing-ui-tests12.txt index 1d9bdaa552c..b10d4bc82aa 100644 --- a/compiler/rustc_codegen_gcc/tests/failing-ui-tests12.txt +++ b/compiler/rustc_codegen_gcc/tests/failing-ui-tests12.txt @@ -11,7 +11,6 @@ tests/ui/simd/array-type.rs tests/ui/simd/intrinsic/float-minmax-pass.rs tests/ui/simd/intrinsic/generic-arithmetic-pass.rs tests/ui/simd/intrinsic/generic-arithmetic-saturating-pass.rs -tests/ui/simd/intrinsic/generic-as.rs tests/ui/simd/intrinsic/generic-cast-pass.rs tests/ui/simd/intrinsic/generic-cast-pointer-width.rs tests/ui/simd/intrinsic/generic-comparison-pass.rs diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.lock b/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.lock new file mode 100644 index 00000000000..fe252db4425 --- /dev/null +++ b/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.lock @@ -0,0 +1,14 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "hello_world" +version = "0.0.0" +dependencies = [ + "mylib", +] + +[[package]] +name = "mylib" +version = "0.1.0" diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.toml b/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.toml index 0b8cdc63fbe..c6e22f642f6 100644 --- a/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.toml +++ b/compiler/rustc_codegen_gcc/tests/hello-world/Cargo.toml @@ -1,4 +1,12 @@ [package] name = "hello_world" +edition = "2024" [dependencies] +mylib = { path = "mylib" } + +[profile.dev] +lto = "thin" + +[profile.release] +lto = "fat" diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.lock b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.lock new file mode 100644 index 00000000000..c8a0bfc6354 --- /dev/null +++ b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.lock @@ -0,0 +1,5 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +[[package]] +name = "mylib" +version = "0.1.0" diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.toml b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.toml new file mode 100644 index 00000000000..d15f62bfb6d --- /dev/null +++ b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "mylib" +version = "0.1.0" +authors = ["Antoni Boucher <bouanto@zoho.com>"] +edition = "2018" + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[dependencies] diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/mylib/src/lib.rs b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/src/lib.rs new file mode 100644 index 00000000000..8d3d111bd19 --- /dev/null +++ b/compiler/rustc_codegen_gcc/tests/hello-world/mylib/src/lib.rs @@ -0,0 +1,7 @@ +pub fn my_func(a: i32, b: i32) -> i32 { + let mut res = a; + for i in a..b { + res += i; + } + res +} diff --git a/compiler/rustc_codegen_gcc/tests/hello-world/src/main.rs b/compiler/rustc_codegen_gcc/tests/hello-world/src/main.rs index e7a11a969c0..71c78d364ac 100644 --- a/compiler/rustc_codegen_gcc/tests/hello-world/src/main.rs +++ b/compiler/rustc_codegen_gcc/tests/hello-world/src/main.rs @@ -1,3 +1,5 @@ +use mylib::my_func; + fn main() { - println!("Hello, world!"); + println!("{}", my_func(5, 10)); } diff --git a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs index aecea37ab5a..64c932a2658 100644 --- a/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs +++ b/compiler/rustc_codegen_gcc/tests/lang_tests_common.rs @@ -1,10 +1,8 @@ //! The common code for `tests/lang_tests_*.rs` -use std::{ - env::{self, current_dir}, - path::{Path, PathBuf}, - process::Command, -}; +use std::env::{self, current_dir}; +use std::path::{Path, PathBuf}; +use std::process::Command; use boml::Toml; use lang_tester::LangTester; @@ -22,14 +20,20 @@ pub fn main_inner(profile: Profile) { let tempdir = TempDir::new().expect("temp dir"); let current_dir = current_dir().expect("current dir"); let current_dir = current_dir.to_str().expect("current dir").to_string(); - let toml = Toml::parse(include_str!("../config.toml")).expect("Failed to parse `config.toml`"); - let gcc_path = if let Ok(gcc_path) = toml.get_string("gcc-path") { - PathBuf::from(gcc_path.to_string()) - } else { - // then we try to retrieve it from the `target` folder. - let commit = include_str!("../libgccjit.version").trim(); - Path::new("build/libgccjit").join(commit) - }; + + let manifest_dir = Path::new(env!("CARGO_MANIFEST_DIR")); + + let gcc_path = std::fs::read_to_string(manifest_dir.join("config.toml")) + .ok() + .and_then(|v| { + let toml = Toml::parse(&v).expect("Failed to parse `config.toml`"); + toml.get_string("gcc-path").map(PathBuf::from).ok() + }) + .unwrap_or_else(|| { + // then we try to retrieve it from the `target` folder. + let commit = include_str!("../libgccjit.version").trim(); + Path::new("build/libgccjit").join(commit) + }); let gcc_path = Path::new(&gcc_path) .canonicalize() @@ -83,6 +87,8 @@ pub fn main_inner(profile: Profile) { &format!("{}/build/build_sysroot/sysroot/", current_dir), "-C", "link-arg=-lc", + "--extern", + "mini_core=target/out/libmini_core.rlib", "-o", exe.to_str().expect("to_str"), path.to_str().expect("to_str"), diff --git a/compiler/rustc_codegen_gcc/tests/run/array.rs b/compiler/rustc_codegen_gcc/tests/run/array.rs index d8de9f28d4c..c3c08c29c6d 100644 --- a/compiler/rustc_codegen_gcc/tests/run/array.rs +++ b/compiler/rustc_codegen_gcc/tests/run/array.rs @@ -7,38 +7,12 @@ // 5 // 10 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for i32 {} -impl Copy for u8 {} -impl Copy for i8 {} -impl Copy for i16 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] @@ -48,182 +22,6 @@ mod libc { } } -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic"] -#[track_caller] -#[no_mangle] -pub fn panic(_msg: &'static str) -> ! { - unsafe { - libc::puts("Panicking\0" as *const str as *const u8); - intrinsics::abort(); - } -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -#[lang = "add"] -trait Add<RHS = Self> { - type Output; - - fn add(self, rhs: RHS) -> Self::Output; -} - -impl Add for u8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i32 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for usize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for isize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -#[lang = "sub"] -pub trait Sub<RHS = Self> { - type Output; - - fn sub(self, rhs: RHS) -> Self::Output; -} - -impl Sub for usize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for isize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for u8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for i8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for i16 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -#[track_caller] -#[lang = "panic_const_add_overflow"] -pub fn panic_const_add_overflow() -> ! { - panic("attempt to add with overflow"); -} - -#[track_caller] -#[lang = "panic_const_sub_overflow"] -pub fn panic_const_sub_overflow() -> ! { - panic("attempt to subtract with overflow"); -} - -/* - * Code - */ - static mut ONE: usize = 1; fn make_array() -> [u8; 3] { diff --git a/compiler/rustc_codegen_gcc/tests/run/closure.rs b/compiler/rustc_codegen_gcc/tests/run/closure.rs index b0d0ca4ee8d..46c47bc54ed 100644 --- a/compiler/rustc_codegen_gcc/tests/run/closure.rs +++ b/compiler/rustc_codegen_gcc/tests/run/closure.rs @@ -8,200 +8,20 @@ // Int argument: 2 // Both args: 11 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, - unboxed_closures, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for i32 {} -impl Copy for u32 {} -impl Copy for u8 {} -impl Copy for i8 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] extern "C" { - pub fn puts(s: *const u8) -> i32; pub fn printf(format: *const i8, ...) -> i32; } } -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -#[lang = "tuple_trait"] -pub trait Tuple {} - -#[lang = "unsize"] -pub trait Unsize<T: ?Sized> {} - -#[lang = "coerce_unsized"] -pub trait CoerceUnsized<T> {} - -impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} -impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} -impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {} -impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} - -#[lang = "fn_once"] -#[rustc_paren_sugar] -pub trait FnOnce<Args: Tuple> { - #[lang = "fn_once_output"] - type Output; - - extern "rust-call" fn call_once(self, args: Args) -> Self::Output; -} - -#[lang = "fn_mut"] -#[rustc_paren_sugar] -pub trait FnMut<Args: Tuple>: FnOnce<Args> { - extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output; -} - -#[lang = "add"] -trait Add<RHS = Self> { - type Output; - - fn add(self, rhs: RHS) -> Self::Output; -} - -impl Add for u8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i32 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for usize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for isize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -#[lang = "panic"] -#[track_caller] -#[no_mangle] -pub fn panic(_msg: &'static str) -> ! { - unsafe { - libc::puts("Panicking\0" as *const str as *const u8); - intrinsics::abort(); - } -} - -#[track_caller] -#[lang = "panic_const_add_overflow"] -pub fn panic_const_add_overflow() -> ! { - panic("attempt to add with overflow"); -} - -/* - * Code - */ - #[start] fn main(mut argc: isize, _argv: *const *const u8) -> isize { let string = "Arg: %d\n\0"; diff --git a/compiler/rustc_codegen_gcc/tests/run/condition.rs b/compiler/rustc_codegen_gcc/tests/run/condition.rs index 770b18a89e3..039ef94eaa7 100644 --- a/compiler/rustc_codegen_gcc/tests/run/condition.rs +++ b/compiler/rustc_codegen_gcc/tests/run/condition.rs @@ -5,304 +5,20 @@ // stdout: true // 1 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for u64 {} -impl Copy for i32 {} -impl Copy for u32 {} -impl Copy for bool {} -impl Copy for u16 {} -impl Copy for i16 {} -impl Copy for char {} -impl Copy for i8 {} -impl Copy for u8 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] extern "C" { pub fn printf(format: *const i8, ...) -> i32; - pub fn puts(s: *const u8) -> i32; - } -} - -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic"] -#[track_caller] -#[no_mangle] -pub fn panic(_msg: &'static str) -> ! { - unsafe { - libc::puts("Panicking\0" as *const str as *const u8); - intrinsics::abort(); - } -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -#[lang = "add"] -trait Add<RHS = Self> { - type Output; - - fn add(self, rhs: RHS) -> Self::Output; -} - -impl Add for u8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i32 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for usize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for isize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -#[lang = "sub"] -pub trait Sub<RHS = Self> { - type Output; - - fn sub(self, rhs: RHS) -> Self::Output; -} - -impl Sub for usize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for isize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for u8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for i8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs } } -impl Sub for i16 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -#[lang = "eq"] -pub trait PartialEq<Rhs: ?Sized = Self> { - fn eq(&self, other: &Rhs) -> bool; - fn ne(&self, other: &Rhs) -> bool; -} - -impl PartialEq for u8 { - fn eq(&self, other: &u8) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &u8) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for u16 { - fn eq(&self, other: &u16) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &u16) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for u32 { - fn eq(&self, other: &u32) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &u32) -> bool { - (*self) != (*other) - } -} - - -impl PartialEq for u64 { - fn eq(&self, other: &u64) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &u64) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for usize { - fn eq(&self, other: &usize) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &usize) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for i8 { - fn eq(&self, other: &i8) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &i8) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for i32 { - fn eq(&self, other: &i32) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &i32) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for isize { - fn eq(&self, other: &isize) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &isize) -> bool { - (*self) != (*other) - } -} - -impl PartialEq for char { - fn eq(&self, other: &char) -> bool { - (*self) == (*other) - } - fn ne(&self, other: &char) -> bool { - (*self) != (*other) - } -} - -/* - * Code - */ - #[start] fn main(argc: isize, _argv: *const *const u8) -> isize { unsafe { diff --git a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs index 523544ee6bb..ed1bf72bb27 100644 --- a/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs +++ b/compiler/rustc_codegen_gcc/tests/run/fun_ptr.rs @@ -4,212 +4,20 @@ // status: 0 // stdout: 1 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for i32 {} -impl Copy for u8 {} -impl Copy for i8 {} -impl Copy for i16 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] extern "C" { pub fn printf(format: *const i8, ...) -> i32; - pub fn puts(s: *const u8) -> i32; - } -} - -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic"] -#[track_caller] -#[no_mangle] -pub fn panic(_msg: &'static str) -> ! { - unsafe { - libc::puts("Panicking\0" as *const str as *const u8); - intrinsics::abort(); - } -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -#[lang = "add"] -trait Add<RHS = Self> { - type Output; - - fn add(self, rhs: RHS) -> Self::Output; -} - -impl Add for u8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i32 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for usize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for isize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -#[lang = "sub"] -pub trait Sub<RHS = Self> { - type Output; - - fn sub(self, rhs: RHS) -> Self::Output; -} - -impl Sub for usize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for isize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for u8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs } } -impl Sub for i8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for i16 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - - -/* - * Code - */ - fn i16_as_i8(a: i16) -> i8 { a as i8 } diff --git a/compiler/rustc_codegen_gcc/tests/run/operations.rs b/compiler/rustc_codegen_gcc/tests/run/operations.rs index 2e3c021d5f7..0e44fc580b8 100644 --- a/compiler/rustc_codegen_gcc/tests/run/operations.rs +++ b/compiler/rustc_codegen_gcc/tests/run/operations.rs @@ -38,8 +38,8 @@ pub trait Deref { fn deref(&self) -> &Self::Target; } -#[lang = "receiver"] -trait Receiver { +#[lang = "legacy_receiver"] +trait LegacyReceiver { } #[lang = "freeze"] diff --git a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs index c7510d16449..2b8812ad51c 100644 --- a/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs +++ b/compiler/rustc_codegen_gcc/tests/run/ptr_cast.rs @@ -4,212 +4,20 @@ // status: 0 // stdout: 1 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for i32 {} -impl Copy for u8 {} -impl Copy for i8 {} -impl Copy for i16 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] extern "C" { pub fn printf(format: *const i8, ...) -> i32; - pub fn puts(s: *const u8) -> i32; - } -} - -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic"] -#[track_caller] -#[no_mangle] -pub fn panic(_msg: &'static str) -> ! { - unsafe { - libc::puts("Panicking\0" as *const str as *const u8); - intrinsics::abort(); - } -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -#[lang = "add"] -trait Add<RHS = Self> { - type Output; - - fn add(self, rhs: RHS) -> Self::Output; -} - -impl Add for u8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i8 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for i32 { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for usize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -impl Add for isize { - type Output = Self; - - fn add(self, rhs: Self) -> Self { - self + rhs - } -} - -#[lang = "sub"] -pub trait Sub<RHS = Self> { - type Output; - - fn sub(self, rhs: RHS) -> Self::Output; -} - -impl Sub for usize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for isize { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for u8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs } } -impl Sub for i8 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - -impl Sub for i16 { - type Output = Self; - - fn sub(self, rhs: Self) -> Self { - self - rhs - } -} - - -/* - * Code - */ - static mut ONE: usize = 1; fn make_array() -> [u8; 3] { diff --git a/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs index 8d40deb8c85..f2a5a2e4384 100644 --- a/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs +++ b/compiler/rustc_codegen_gcc/tests/run/return-tuple.rs @@ -15,18 +15,18 @@ #[lang = "copy"] pub unsafe trait Copy {} -unsafe impl Copy for bool {} -unsafe impl Copy for u8 {} -unsafe impl Copy for u16 {} -unsafe impl Copy for u32 {} -unsafe impl Copy for u64 {} -unsafe impl Copy for usize {} -unsafe impl Copy for i8 {} -unsafe impl Copy for i16 {} -unsafe impl Copy for i32 {} -unsafe impl Copy for isize {} -unsafe impl Copy for f32 {} -unsafe impl Copy for char {} +impl Copy for bool {} +impl Copy for u8 {} +impl Copy for u16 {} +impl Copy for u32 {} +impl Copy for u64 {} +impl Copy for usize {} +impl Copy for i8 {} +impl Copy for i16 {} +impl Copy for i32 {} +impl Copy for isize {} +impl Copy for f32 {} +impl Copy for char {} mod libc { #[link(name = "c")] @@ -43,8 +43,8 @@ mod libc { #[lang = "sized"] pub trait Sized {} -#[lang = "receiver"] -trait Receiver { +#[lang = "legacy_receiver"] +trait LegacyReceiver { } #[lang = "freeze"] diff --git a/compiler/rustc_codegen_gcc/tests/run/slice.rs b/compiler/rustc_codegen_gcc/tests/run/slice.rs index 35ad594ecde..fba93fc1554 100644 --- a/compiler/rustc_codegen_gcc/tests/run/slice.rs +++ b/compiler/rustc_codegen_gcc/tests/run/slice.rs @@ -4,36 +4,12 @@ // status: 0 // stdout: 5 -#![feature(arbitrary_self_types, auto_traits, lang_items, no_core, start, intrinsics, rustc_attrs)] -#![allow(internal_features)] +#![feature(no_core, start)] #![no_std] #![no_core] -/* - * Core - */ - -// Because we don't have core yet. -#[lang = "sized"] -pub trait Sized {} - -#[lang = "copy"] -trait Copy { -} - -impl Copy for isize {} -impl Copy for usize {} -impl Copy for i32 {} -impl Copy for u32 {} -impl<T: ?Sized> Copy for *mut T {} - -#[lang = "receiver"] -trait Receiver { -} - -#[lang = "freeze"] -pub(crate) unsafe auto trait Freeze {} +extern crate mini_core; mod libc { #[link(name = "c")] @@ -42,79 +18,6 @@ mod libc { } } -#[lang = "index"] -pub trait Index<Idx: ?Sized> { - type Output: ?Sized; - fn index(&self, index: Idx) -> &Self::Output; -} - -impl<T> Index<usize> for [T; 3] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -impl<T> Index<usize> for [T] { - type Output = T; - - fn index(&self, index: usize) -> &Self::Output { - &self[index] - } -} - -#[lang = "unsize"] -pub trait Unsize<T: ?Sized> {} - -#[lang = "coerce_unsized"] -pub trait CoerceUnsized<T> {} - -impl<'a, 'b: 'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a U> for &'b T {} -impl<'a, T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<&'a mut U> for &'a mut T {} -impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*const U> for *const T {} -impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<*mut U> for *mut T {} - -#[lang = "drop_in_place"] -#[allow(unconditional_recursion)] -pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) { - // Code here does not matter - this is replaced by the - // real drop glue by the compiler. - drop_in_place(to_drop); -} - -#[lang = "panic_location"] -struct PanicLocation { - file: &'static str, - line: u32, - column: u32, -} - -#[lang = "panic_bounds_check"] -#[track_caller] -#[no_mangle] -fn panic_bounds_check(index: usize, len: usize) -> ! { - unsafe { - libc::printf("index out of bounds: the len is %d but the index is %d\n\0" as *const str as *const i8, len, index); - intrinsics::abort(); - } -} - -mod intrinsics { - use super::Sized; - - #[rustc_nounwind] - #[rustc_intrinsic] - #[rustc_intrinsic_must_be_overridden] - pub fn abort() -> ! { - loop {} - } -} - -/* - * Code - */ - static mut TWO: usize = 2; fn index_slice(s: &[u32]) -> u32 { diff --git a/compiler/rustc_codegen_gcc/tests/run/volatile2.rs b/compiler/rustc_codegen_gcc/tests/run/volatile2.rs new file mode 100644 index 00000000000..a177b817ab3 --- /dev/null +++ b/compiler/rustc_codegen_gcc/tests/run/volatile2.rs @@ -0,0 +1,113 @@ +// Compiler: +// +// Run-time: +// status: 0 + +mod libc { + #[link(name = "c")] + extern "C" { + pub fn puts(s: *const u8) -> i32; + + pub fn sigaction(signum: i32, act: *const sigaction, oldact: *mut sigaction) -> i32; + pub fn mmap(addr: *mut (), len: usize, prot: i32, flags: i32, fd: i32, offset: i64) -> *mut (); + pub fn mprotect(addr: *mut (), len: usize, prot: i32) -> i32; + } + + pub const PROT_READ: i32 = 1; + pub const PROT_WRITE: i32 = 2; + pub const MAP_PRIVATE: i32 = 0x0002; + pub const MAP_ANONYMOUS: i32 = 0x0020; + pub const MAP_FAILED: *mut u8 = !0 as *mut u8; + + /// glibc sigaction + #[repr(C)] + pub struct sigaction { + pub sa_sigaction: Option<unsafe extern "C" fn(i32, *mut (), *mut ())>, + pub sa_mask: [u32; 32], + pub sa_flags: i32, + pub sa_restorer: Option<unsafe extern "C" fn()>, + } + + pub const SA_SIGINFO: i32 = 0x00000004; + pub const SIGSEGV: i32 = 11; +} + +static mut COUNT: u32 = 0; +static mut STORAGE: *mut u8 = core::ptr::null_mut(); +const PAGE_SIZE: usize = 1 << 15; + +fn main() { + unsafe { + // Register a segfault handler + libc::sigaction( + libc::SIGSEGV, + &libc::sigaction { + sa_sigaction: Some(segv_handler), + sa_flags: libc::SA_SIGINFO, + ..core::mem::zeroed() + }, + core::ptr::null_mut(), + ); + + STORAGE = libc::mmap( + core::ptr::null_mut(), + PAGE_SIZE * 2, + 0, + libc::MAP_PRIVATE | libc::MAP_ANONYMOUS, + -1, + 0, + ).cast(); + if STORAGE == libc::MAP_FAILED { + panic!("error: mmap failed"); + } + + let p_count = (&mut COUNT) as *mut u32; + p_count.write_volatile(0); + + // Trigger segfaults + STORAGE.add(0).write_volatile(1); + STORAGE.add(PAGE_SIZE).write_volatile(1); + STORAGE.add(0).write_volatile(1); + STORAGE.add(PAGE_SIZE).write_volatile(1); + STORAGE.add(0).write_volatile(1); + STORAGE.add(PAGE_SIZE).write_volatile(1); + STORAGE.add(0).read_volatile(); + STORAGE.add(PAGE_SIZE).read_volatile(); + STORAGE.add(0).read_volatile(); + STORAGE.add(PAGE_SIZE).read_volatile(); + STORAGE.add(0).read_volatile(); + STORAGE.add(PAGE_SIZE).read_volatile(); + STORAGE.add(0).write_volatile(1); + STORAGE.add(PAGE_SIZE).write_volatile(1); + + // The segfault handler should have been called for every `write_volatile` and + // `read_volatile` in `STORAGE`. If the compiler ignores volatility, some of these writes + // will be combined, causing a different number of segfaults. + // + // This `p_count` read is done by a volatile read. If the compiler + // ignores volatility, the compiler will speculate that `*p_count` is + // unchanged and remove this check, failing the test. + if p_count.read_volatile() != 14 { + panic!("error: segfault count mismatch: {}", p_count.read_volatile()); + } + } +} + +unsafe extern "C" fn segv_handler(_: i32, _: *mut (), _: *mut ()) { + let p_count = (&mut COUNT) as *mut u32; + p_count.write_volatile(p_count.read_volatile() + 1); + let count = p_count.read_volatile(); + + // Toggle the protected page so that the handler will be called for + // each `write_volatile` + libc::mprotect( + STORAGE.cast(), + PAGE_SIZE, + if count % 2 == 1 { libc::PROT_READ | libc::PROT_WRITE } else { 0 }, + ); + libc::mprotect( + STORAGE.add(PAGE_SIZE).cast(), + PAGE_SIZE, + if count % 2 == 0 { libc::PROT_READ | libc::PROT_WRITE } else { 0 }, + ); +} diff --git a/compiler/rustc_codegen_llvm/src/abi.rs b/compiler/rustc_codegen_llvm/src/abi.rs index 1d35138b013..31ee0eeca11 100644 --- a/compiler/rustc_codegen_llvm/src/abi.rs +++ b/compiler/rustc_codegen_llvm/src/abi.rs @@ -1,3 +1,4 @@ +use std::borrow::Borrow; use std::cmp; use libc::c_uint; @@ -312,7 +313,7 @@ impl<'ll, 'tcx> ArgAbiBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { pub(crate) trait FnAbiLlvmExt<'ll, 'tcx> { fn llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; fn ptr_to_llvm_type(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Type; - fn llvm_cconv(&self) -> llvm::CallConv; + fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv; /// Apply attributes to a function declaration/definition. fn apply_attrs_llfn( @@ -404,8 +405,8 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { cx.type_ptr_ext(cx.data_layout().instruction_address_space) } - fn llvm_cconv(&self) -> llvm::CallConv { - self.conv.into() + fn llvm_cconv(&self, cx: &CodegenCx<'ll, 'tcx>) -> llvm::CallConv { + llvm::CallConv::from_conv(self.conv, cx.tcx.sess.target.arch.borrow()) } fn apply_attrs_llfn( @@ -617,7 +618,7 @@ impl<'ll, 'tcx> FnAbiLlvmExt<'ll, 'tcx> for FnAbi<'tcx, Ty<'tcx>> { } } - let cconv = self.llvm_cconv(); + let cconv = self.llvm_cconv(&bx.cx); if cconv != llvm::CCallConv { llvm::SetInstructionCallConv(callsite, cconv); } @@ -655,8 +656,8 @@ impl<'tcx> AbiBuilderMethods<'tcx> for Builder<'_, '_, 'tcx> { } } -impl From<Conv> for llvm::CallConv { - fn from(conv: Conv) -> Self { +impl llvm::CallConv { + pub fn from_conv(conv: Conv, arch: &str) -> Self { match conv { Conv::C | Conv::Rust @@ -666,6 +667,15 @@ impl From<Conv> for llvm::CallConv { Conv::Cold => llvm::ColdCallConv, Conv::PreserveMost => llvm::PreserveMost, Conv::PreserveAll => llvm::PreserveAll, + Conv::GpuKernel => { + if arch == "amdgpu" { + llvm::AmdgpuKernel + } else if arch == "nvptx64" { + llvm::PtxKernel + } else { + panic!("Architecture {arch} does not support GpuKernel calling convention"); + } + } Conv::AvrInterrupt => llvm::AvrInterrupt, Conv::AvrNonBlockingInterrupt => llvm::AvrNonBlockingInterrupt, Conv::ArmAapcs => llvm::ArmAapcsCallConv, diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index f8454fd9960..95e0481b035 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -37,7 +37,9 @@ fn inline_attr<'ll>(cx: &CodegenCx<'ll, '_>, inline: InlineAttr) -> Option<&'ll } match inline { InlineAttr::Hint => Some(AttributeKind::InlineHint.create_attr(cx.llcx)), - InlineAttr::Always => Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)), + InlineAttr::Always | InlineAttr::Force { .. } => { + Some(AttributeKind::AlwaysInline.create_attr(cx.llcx)) + } InlineAttr::Never => { if cx.sess().target.arch != "amdgpu" { Some(AttributeKind::NoInline.create_attr(cx.llcx)) @@ -472,7 +474,11 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( let allocated_pointer = AttributeKind::AllocatedPointer.create_attr(cx.llcx); attributes::apply_to_llfn(llfn, AttributePlace::Argument(0), &[allocated_pointer]); } - if let Some(align) = codegen_fn_attrs.alignment { + // function alignment can be set globally with the `-Zmin-function-alignment=<n>` flag; + // the alignment from a `#[repr(align(<n>))]` is used if it specifies a higher alignment. + if let Some(align) = + Ord::max(cx.tcx.sess.opts.unstable_opts.min_function_alignment, codegen_fn_attrs.alignment) + { llvm::set_alignment(llfn, align); } if let Some(backchain) = backchain_attr(cx) { diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 4adf99e91d0..78c759bbe8c 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -1,7 +1,6 @@ use std::collections::BTreeMap; use std::ffi::{CStr, CString}; use std::fs::File; -use std::mem::ManuallyDrop; use std::path::Path; use std::sync::Arc; use std::{io, iter, slice}; @@ -9,7 +8,7 @@ use std::{io, iter, slice}; use object::read::archive::ArchiveFile; use rustc_codegen_ssa::back::lto::{LtoModuleCodegen, SerializedModule, ThinModule, ThinShared}; use rustc_codegen_ssa::back::symbol_export; -use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, TargetMachineFactoryConfig}; +use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput}; use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; @@ -706,18 +705,15 @@ pub(crate) unsafe fn optimize_thin_module( let dcx = dcx.handle(); let module_name = &thin_module.shared.module_names[thin_module.idx]; - let tm_factory_config = TargetMachineFactoryConfig::new(cgcx, module_name.to_str().unwrap()); - let tm = (cgcx.tm_factory)(tm_factory_config).map_err(|e| write::llvm_err(dcx, e))?; // Right now the implementation we've got only works over serialized // modules, so we create a fresh new LLVM context and parse the module // into that context. One day, however, we may do this for upstream // crates but for locally codegened modules we may be able to reuse // that LLVM Context and Module. - let llcx = unsafe { llvm::LLVMRustContextCreate(cgcx.fewer_names) }; - let llmod_raw = parse_module(llcx, module_name, thin_module.data(), dcx)? as *const _; + let module_llvm = ModuleLlvm::parse(cgcx, module_name, thin_module.data(), dcx)?; let mut module = ModuleCodegen { - module_llvm: ModuleLlvm { llmod_raw, llcx, tm: ManuallyDrop::new(tm) }, + module_llvm, name: thin_module.name().to_string(), kind: ModuleKind::Regular, }; @@ -737,11 +733,7 @@ pub(crate) unsafe fn optimize_thin_module( { let _timer = cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); - if unsafe { - !llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) - } { - return Err(write::llvm_err(dcx, LlvmError::PrepareThinLtoModule)); - } + unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target) }; save_temp_bitcode(cgcx, &module, "thin-lto-after-rename"); } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index d8fbe51b975..65345751842 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -741,7 +741,10 @@ impl<'ll, 'tcx> MiscCodegenMethods<'tcx> for CodegenCx<'ll, 'tcx> { if self.get_declared_value(entry_name).is_none() { Some(self.declare_entry_fn( entry_name, - self.sess().target.entry_abi.into(), + llvm::CallConv::from_conv( + self.sess().target.entry_abi, + self.sess().target.arch.borrow(), + ), llvm::UnnamedAddr::Global, fn_type, )) diff --git a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs index 07bd0f4d1c1..e545ce386ed 100644 --- a/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs +++ b/compiler/rustc_codegen_llvm/src/debuginfo/create_scope_map.rs @@ -4,7 +4,7 @@ use rustc_codegen_ssa::mir::debuginfo::{DebugScope, FunctionDebugContext}; use rustc_codegen_ssa::traits::*; use rustc_data_structures::fx::FxHashMap; use rustc_index::Idx; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::{Body, SourceScope}; use rustc_middle::ty::layout::{FnAbiOf, HasTypingEnv}; use rustc_middle::ty::{self, Instance}; @@ -27,7 +27,7 @@ pub(crate) fn compute_mir_scopes<'ll, 'tcx>( ) { // Find all scopes with variables defined in them. let variables = if cx.sess().opts.debuginfo == DebugInfo::Full { - let mut vars = BitSet::new_empty(mir.source_scopes.len()); + let mut vars = DenseBitSet::new_empty(mir.source_scopes.len()); // FIXME(eddyb) take into account that arguments always have debuginfo, // irrespective of their name (assuming full debuginfo is enabled). // NOTE(eddyb) actually, on second thought, those are always in the @@ -40,7 +40,7 @@ pub(crate) fn compute_mir_scopes<'ll, 'tcx>( // Nothing to emit, of course. None }; - let mut instantiated = BitSet::new_empty(mir.source_scopes.len()); + let mut instantiated = DenseBitSet::new_empty(mir.source_scopes.len()); let mut discriminators = FxHashMap::default(); // Instantiate all scopes. for idx in 0..mir.source_scopes.len() { @@ -63,9 +63,9 @@ fn make_mir_scope<'ll, 'tcx>( cx: &CodegenCx<'ll, 'tcx>, instance: Instance<'tcx>, mir: &Body<'tcx>, - variables: &Option<BitSet<SourceScope>>, + variables: &Option<DenseBitSet<SourceScope>>, debug_context: &mut FunctionDebugContext<'tcx, &'ll DIScope, &'ll DILocation>, - instantiated: &mut BitSet<SourceScope>, + instantiated: &mut DenseBitSet<SourceScope>, discriminators: &mut FxHashMap<BytePos, u32>, scope: SourceScope, ) { diff --git a/compiler/rustc_codegen_llvm/src/declare.rs b/compiler/rustc_codegen_llvm/src/declare.rs index 3ec386f6b07..c72b5b5611f 100644 --- a/compiler/rustc_codegen_llvm/src/declare.rs +++ b/compiler/rustc_codegen_llvm/src/declare.rs @@ -125,7 +125,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> { let llfn = declare_raw_fn( self, name, - fn_abi.llvm_cconv(), + fn_abi.llvm_cconv(self), llvm::UnnamedAddr::Global, llvm::Visibility::Default, fn_abi.llvm_type(self), diff --git a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs index bb324ee682c..ec6c84f6f25 100644 --- a/compiler/rustc_codegen_llvm/src/llvm/ffi.rs +++ b/compiler/rustc_codegen_llvm/src/llvm/ffi.rs @@ -120,6 +120,7 @@ pub enum CallConv { X86_Intr = 83, AvrNonBlockingInterrupt = 84, AvrInterrupt = 85, + AmdgpuKernel = 91, } /// Must match the layout of `LLVMLinkage`. @@ -2374,7 +2375,7 @@ unsafe extern "C" { Data: &ThinLTOData, Module: &Module, Target: &TargetMachine, - ) -> bool; + ); pub fn LLVMRustPrepareThinLTOResolveWeak(Data: &ThinLTOData, Module: &Module) -> bool; pub fn LLVMRustPrepareThinLTOInternalize(Data: &ThinLTOData, Module: &Module) -> bool; pub fn LLVMRustPrepareThinLTOImport( diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index e4b3ad19801..df35b5e8426 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -2451,10 +2451,10 @@ fn add_order_independent_options( } if sess.target.os == "emscripten" { - cmd.cc_arg(if sess.panic_strategy() == PanicStrategy::Abort { - "-sDISABLE_EXCEPTION_CATCHING=1" - } else if sess.opts.unstable_opts.emscripten_wasm_eh { + cmd.cc_arg(if sess.opts.unstable_opts.emscripten_wasm_eh { "-fwasm-exceptions" + } else if sess.panic_strategy() == PanicStrategy::Abort { + "-sDISABLE_EXCEPTION_CATCHING=1" } else { "-sDISABLE_EXCEPTION_CATCHING=0" }); diff --git a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs index cdb72aba36f..1daa17fbaf3 100644 --- a/compiler/rustc_codegen_ssa/src/codegen_attrs.rs +++ b/compiler/rustc_codegen_ssa/src/codegen_attrs.rs @@ -18,6 +18,7 @@ use rustc_session::parse::feature_err; use rustc_session::{Session, lint}; use rustc_span::{Ident, Span, sym}; use rustc_target::spec::{SanitizerSet, abi}; +use tracing::debug; use crate::errors; use crate::target_features::{check_target_feature_trait_unsafe, from_target_feature_attr}; @@ -249,10 +250,14 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { } } sym::target_feature => { - if !tcx.is_closure_like(did.to_def_id()) - && let Some(fn_sig) = fn_sig() - && fn_sig.skip_binder().safety().is_safe() - { + let Some(sig) = tcx.hir_node_by_def_id(did).fn_sig() else { + tcx.dcx().span_delayed_bug(attr.span, "target_feature applied to non-fn"); + continue; + }; + let safe_target_features = + matches!(sig.header.safety, hir::HeaderSafety::SafeTargetFeatures); + codegen_fn_attrs.safe_target_features = safe_target_features; + if safe_target_features { if tcx.sess.target.is_like_wasm || tcx.sess.opts.actually_rustdoc { // The `#[target_feature]` attribute is allowed on // WebAssembly targets on all functions, including safe @@ -525,6 +530,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { if !attr.has_name(sym::inline) { return ia; } + if attr.is_word() { InlineAttr::Hint } else if let Some(ref items) = attr.meta_item_list() { @@ -547,6 +553,20 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { ia } }); + codegen_fn_attrs.inline = attrs.iter().fold(codegen_fn_attrs.inline, |ia, attr| { + if !attr.has_name(sym::rustc_force_inline) || !tcx.features().rustc_attrs() { + return ia; + } + + if attr.is_word() { + InlineAttr::Force { attr_span: attr.span, reason: None } + } else if let Some(val) = attr.value_str() { + InlineAttr::Force { attr_span: attr.span, reason: Some(val) } + } else { + debug!("`rustc_force_inline` not checked by attribute validation"); + ia + } + }); // naked function MUST NOT be inlined! This attribute is required for the rust compiler itself, // but not for the code generation backend because at that point the naked function will just be @@ -596,7 +616,7 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { // is probably a poor usage of `#[inline(always)]` and easily avoided by not using the attribute. if tcx.features().target_feature_11() && tcx.is_closure_like(did.to_def_id()) - && codegen_fn_attrs.inline != InlineAttr::Always + && !codegen_fn_attrs.inline.always() { let owner_id = tcx.parent(did.to_def_id()); if tcx.def_kind(owner_id).has_codegen_attrs() { @@ -606,22 +626,28 @@ fn codegen_fn_attrs(tcx: TyCtxt<'_>, did: LocalDefId) -> CodegenFnAttrs { } } - // If a function uses #[target_feature] it can't be inlined into general + // If a function uses `#[target_feature]` it can't be inlined into general // purpose functions as they wouldn't have the right target features - // enabled. For that reason we also forbid #[inline(always)] as it can't be + // enabled. For that reason we also forbid `#[inline(always)]` as it can't be // respected. - if !codegen_fn_attrs.target_features.is_empty() && codegen_fn_attrs.inline == InlineAttr::Always + // + // `#[rustc_force_inline]` doesn't need to be prohibited here, only + // `#[inline(always)]`, as forced inlining is implemented entirely within + // rustc (and so the MIR inliner can do any necessary checks for compatible target + // features). + // + // This sidesteps the LLVM blockers in enabling `target_features` + + // `inline(always)` to be used together (see rust-lang/rust#116573 and + // llvm/llvm-project#70563). + if !codegen_fn_attrs.target_features.is_empty() + && matches!(codegen_fn_attrs.inline, InlineAttr::Always) { if let Some(span) = inline_span { - tcx.dcx().span_err( - span, - "cannot use `#[inline(always)]` with \ - `#[target_feature]`", - ); + tcx.dcx().span_err(span, "cannot use `#[inline(always)]` with `#[target_feature]`"); } } - if !codegen_fn_attrs.no_sanitize.is_empty() && codegen_fn_attrs.inline == InlineAttr::Always { + if !codegen_fn_attrs.no_sanitize.is_empty() && codegen_fn_attrs.inline.always() { if let (Some(no_sanitize_span), Some(inline_span)) = (no_sanitize_span, inline_span) { let hir_id = tcx.local_def_id_to_hir_id(did); tcx.node_span_lint( diff --git a/compiler/rustc_codegen_ssa/src/mir/analyze.rs b/compiler/rustc_codegen_ssa/src/mir/analyze.rs index 43b8230c679..23baab3124e 100644 --- a/compiler/rustc_codegen_ssa/src/mir/analyze.rs +++ b/compiler/rustc_codegen_ssa/src/mir/analyze.rs @@ -2,7 +2,7 @@ //! which do not. use rustc_data_structures::graph::dominators::Dominators; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, DefLocation, Location, TerminatorKind, traversal}; @@ -16,7 +16,7 @@ use crate::traits::*; pub(crate) fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( fx: &FunctionCx<'a, 'tcx, Bx>, traversal_order: &[mir::BasicBlock], -) -> BitSet<mir::Local> { +) -> DenseBitSet<mir::Local> { let mir = fx.mir; let dominators = mir.basic_blocks.dominators(); let locals = mir @@ -44,7 +44,7 @@ pub(crate) fn non_ssa_locals<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( analyzer.visit_basic_block_data(bb, data); } - let mut non_ssa_locals = BitSet::new_empty(analyzer.locals.len()); + let mut non_ssa_locals = DenseBitSet::new_empty(analyzer.locals.len()); for (local, kind) in analyzer.locals.iter_enumerated() { if matches!(kind, LocalKind::Memory) { non_ssa_locals.insert(local); diff --git a/compiler/rustc_codegen_ssa/src/mir/mod.rs b/compiler/rustc_codegen_ssa/src/mir/mod.rs index 62f69af3f2f..3a896071bc6 100644 --- a/compiler/rustc_codegen_ssa/src/mir/mod.rs +++ b/compiler/rustc_codegen_ssa/src/mir/mod.rs @@ -1,7 +1,7 @@ use std::iter; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::{UnwindTerminateReason, traversal}; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, HasTypingEnv, TyAndLayout}; @@ -293,7 +293,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( // So drop the builder of `start_llbb` to avoid having two at the same time. drop(start_bx); - let mut unreached_blocks = BitSet::new_filled(mir.basic_blocks.len()); + let mut unreached_blocks = DenseBitSet::new_filled(mir.basic_blocks.len()); // Codegen the body of each reachable block using our reverse postorder list. for bb in traversal_order { fx.codegen_block(bb); @@ -316,7 +316,7 @@ pub fn codegen_mir<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( fn arg_local_refs<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( bx: &mut Bx, fx: &mut FunctionCx<'a, 'tcx, Bx>, - memory_locals: &BitSet<mir::Local>, + memory_locals: &DenseBitSet<mir::Local>, ) -> Vec<LocalRef<'tcx, Bx::Value>> { let mir = fx.mir; let mut idx = 0; diff --git a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs index cac3cc587cb..8df270abc81 100644 --- a/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs +++ b/compiler/rustc_codegen_ssa/src/mir/naked_asm.rs @@ -132,9 +132,13 @@ fn prefix_and_suffix<'tcx>( let attrs = tcx.codegen_fn_attrs(instance.def_id()); let link_section = attrs.link_section.map(|symbol| symbol.as_str().to_string()); - let align = attrs.alignment.map(|a| a.bytes()).unwrap_or(4); - // See https://sourceware.org/binutils/docs/as/ARM-Directives.html for info on these directives. + // function alignment can be set globally with the `-Zmin-function-alignment=<n>` flag; + // the alignment from a `#[repr(align(<n>))]` is used if it specifies a higher alignment. + // if no alignment is specified, an alignment of 4 bytes is used. + let min_function_alignment = tcx.sess.opts.unstable_opts.min_function_alignment; + let align = Ord::max(min_function_alignment, attrs.alignment).map(|a| a.bytes()).unwrap_or(4); + // In particular, `.arm` can also be written `.code 32` and `.thumb` as `.code 16`. let (arch_prefix, arch_suffix) = if is_arm { ( diff --git a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs index 3b62148abb7..eb4ef599b82 100644 --- a/compiler/rustc_codegen_ssa/src/mir/rvalue.rs +++ b/compiler/rustc_codegen_ssa/src/mir/rvalue.rs @@ -10,9 +10,9 @@ use rustc_session::config::OptLevel; use rustc_span::{DUMMY_SP, Span}; use tracing::{debug, instrument}; -use super::FunctionCx; use super::operand::{OperandRef, OperandValue}; use super::place::PlaceRef; +use super::{FunctionCx, LocalRef}; use crate::common::IntPredicate; use crate::traits::*; use crate::{MemFlags, base}; @@ -93,23 +93,37 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { return; } - if let OperandValue::Immediate(v) = cg_elem.val { + let try_init_all_same = |bx: &mut Bx, v| { let start = dest.val.llval; let size = bx.const_usize(dest.layout.size.bytes()); - // Use llvm.memset.p0i8.* to initialize all zero arrays - if bx.cx().const_to_opt_u128(v, false) == Some(0) { - let fill = bx.cx().const_u8(0); - bx.memset(start, fill, size, dest.val.align, MemFlags::empty()); - return; + // Use llvm.memset.p0i8.* to initialize all same byte arrays + if let Some(int) = bx.cx().const_to_opt_u128(v, false) { + let bytes = &int.to_le_bytes()[..cg_elem.layout.size.bytes_usize()]; + let first = bytes[0]; + if bytes[1..].iter().all(|&b| b == first) { + let fill = bx.cx().const_u8(first); + bx.memset(start, fill, size, dest.val.align, MemFlags::empty()); + return true; + } } // Use llvm.memset.p0i8.* to initialize byte arrays let v = bx.from_immediate(v); if bx.cx().val_ty(v) == bx.cx().type_i8() { bx.memset(start, v, size, dest.val.align, MemFlags::empty()); - return; + return true; } + false + }; + + match cg_elem.val { + OperandValue::Immediate(v) => { + if try_init_all_same(bx, v) { + return; + } + } + _ => (), } let count = self @@ -593,6 +607,14 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { self.codegen_place_to_pointer(bx, place, mk_ptr) } + mir::Rvalue::Len(place) => { + let size = self.evaluate_array_len(bx, place); + OperandRef { + val: OperandValue::Immediate(size), + layout: bx.cx().layout_of(bx.tcx().types.usize), + } + } + mir::Rvalue::BinaryOp(op_with_overflow, box (ref lhs, ref rhs)) if let Some(op) = op_with_overflow.overflowing_to_wrapping() => { @@ -792,6 +814,24 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } + fn evaluate_array_len(&mut self, bx: &mut Bx, place: mir::Place<'tcx>) -> Bx::Value { + // ZST are passed as operands and require special handling + // because codegen_place() panics if Local is operand. + if let Some(index) = place.as_local() { + if let LocalRef::Operand(op) = self.locals[index] { + if let ty::Array(_, n) = op.layout.ty.kind() { + let n = n + .try_to_target_usize(bx.tcx()) + .expect("expected monomorphic const in codegen"); + return bx.cx().const_usize(n); + } + } + } + // use common size calculation for non zero-sized types + let cg_value = self.codegen_place(bx, place.as_ref()); + cg_value.len(bx.cx()) + } + /// Codegen an `Rvalue::RawPtr` or `Rvalue::Ref` fn codegen_place_to_pointer( &mut self, @@ -1063,6 +1103,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::Rvalue::Ref(..) | mir::Rvalue::CopyForDeref(..) | mir::Rvalue::RawPtr(..) | + mir::Rvalue::Len(..) | mir::Rvalue::Cast(..) | // (*) mir::Rvalue::ShallowInitBox(..) | // (*) mir::Rvalue::BinaryOp(..) | diff --git a/compiler/rustc_const_eval/messages.ftl b/compiler/rustc_const_eval/messages.ftl index 0c2242b810b..d4bfb781320 100644 --- a/compiler/rustc_const_eval/messages.ftl +++ b/compiler/rustc_const_eval/messages.ftl @@ -12,8 +12,6 @@ const_eval_already_reported = const_eval_assume_false = `assume` called with `false` -const_eval_await_non_const = - cannot convert `{$ty}` into a future in {const_eval_const_context}s const_eval_bounds_check_failed = indexing out of bounds: the len is {$len} but the index is {$index} const_eval_call_nonzero_intrinsic = @@ -23,11 +21,6 @@ const_eval_closure_call = closures need an RFC before allowed to be called in {const_eval_const_context}s const_eval_closure_fndef_not_const = function defined here, but it is not `const` -const_eval_closure_non_const = - cannot call non-const closure in {const_eval_const_context}s - -const_eval_conditionally_const_call = - cannot call conditionally-const {$def_descr} `{$def_path_str}` in {const_eval_const_context}s const_eval_consider_dereferencing = consider dereferencing here @@ -62,10 +55,6 @@ const_eval_dealloc_incorrect_layout = const_eval_dealloc_kind_mismatch = deallocating {$alloc}, which is {$alloc_kind} memory, using {$kind} deallocation operation -const_eval_deref_coercion_non_const = - cannot perform deref coercion on `{$ty}` in {const_eval_const_context}s - .note = attempting to deref into `{$target_ty}` - .target_note = deref defined here const_eval_deref_function_pointer = accessing {$allocation} which contains a function const_eval_deref_vtable_pointer = @@ -109,9 +98,6 @@ const_eval_extern_type_field = `extern type` field does not have a known offset const_eval_fn_ptr_call = function pointers need an RFC before allowed to be called in {const_eval_const_context}s -const_eval_for_loop_into_iter_non_const = - cannot use `for` loop on `{$ty}` in {const_eval_const_context}s - const_eval_frame_note = {$times -> [0] {const_eval_frame_note_inner} *[other] [... {$times} additional calls {const_eval_frame_note_inner} ...] @@ -216,9 +202,6 @@ const_eval_long_running = .label = the const evaluator is currently interpreting this expression .help = the constant being evaluated -const_eval_match_eq_non_const = cannot match on `{$ty}` in {const_eval_const_context}s - .note = `{$ty}` cannot be compared in compile-time, and therefore cannot be used in `match`es - const_eval_max_num_nodes_in_const = maximum number of nodes exceeded in constant {$global_const_id} const_eval_memory_access_test = memory access failed @@ -249,11 +232,26 @@ const_eval_mutable_ref_escaping = If you really want global mutable state, try using an interior mutable `static` or a `static mut`. const_eval_nested_static_in_thread_local = #[thread_local] does not support implicit nested statics, please create explicit static items and refer to them instead + +const_eval_non_const_await = + cannot convert `{$ty}` into a future in {const_eval_const_context}s + +const_eval_non_const_closure = + cannot call {$non_or_conditionally}-const closure in {const_eval_const_context}s + +const_eval_non_const_deref_coercion = + cannot perform {$non_or_conditionally}-const deref coercion on `{$ty}` in {const_eval_const_context}s + .note = attempting to deref into `{$target_ty}` + .target_note = deref defined here + const_eval_non_const_fmt_macro_call = - cannot call non-const formatting macro in {const_eval_const_context}s + cannot call {$non_or_conditionally}-const formatting macro in {const_eval_const_context}s const_eval_non_const_fn_call = - cannot call non-const {$def_descr} `{$def_path_str}` in {const_eval_const_context}s + cannot call {$non_or_conditionally}-const {$def_descr} `{$def_path_str}` in {const_eval_const_context}s + +const_eval_non_const_for_loop_into_iter = + cannot use `for` loop on `{$ty}` in {const_eval_const_context}s const_eval_non_const_impl = impl defined here, but it is not `const` @@ -261,6 +259,20 @@ const_eval_non_const_impl = const_eval_non_const_intrinsic = cannot call non-const intrinsic `{$name}` in {const_eval_const_context}s +const_eval_non_const_match_eq = cannot match on `{$ty}` in {const_eval_const_context}s + .note = `{$ty}` cannot be compared in compile-time, and therefore cannot be used in `match`es + +const_eval_non_const_operator = + cannot call {$non_or_conditionally}-const operator in {const_eval_const_context}s + +const_eval_non_const_question_branch = + `?` is not allowed on `{$ty}` in {const_eval_const_context}s +const_eval_non_const_question_from_residual = + `?` is not allowed on `{$ty}` in {const_eval_const_context}s + +const_eval_non_const_try_block_from_output = + `try` block cannot convert `{$ty}` to the result in {const_eval_const_context}s + const_eval_not_enough_caller_args = calling a function with fewer arguments than it requires @@ -281,8 +293,6 @@ const_eval_offset_from_unsigned_overflow = *[false] offset } than second: {$a_offset} < {$b_offset} -const_eval_operator_non_const = - cannot call non-const operator in {const_eval_const_context}s const_eval_overflow_arith = arithmetic overflow in `{$intrinsic}` const_eval_overflow_shift = @@ -325,11 +335,6 @@ const_eval_ptr_as_bytes_1 = const_eval_ptr_as_bytes_2 = the absolute address of a pointer is not known at compile-time, so such operations are not supported -const_eval_question_branch_non_const = - `?` is not allowed on `{$ty}` in {const_eval_const_context}s -const_eval_question_from_residual_non_const = - `?` is not allowed on `{$ty}` in {const_eval_const_context}s - const_eval_range = in the range {$lo}..={$hi} const_eval_range_lower = greater or equal to {$lo} const_eval_range_singular = equal to {$lo} @@ -379,8 +384,6 @@ const_eval_too_generic = const_eval_too_many_caller_args = calling a function with more arguments than it expected -const_eval_try_block_from_output_non_const = - `try` block cannot convert `{$ty}` to the result in {const_eval_const_context}s const_eval_unallowed_fn_pointer_call = function pointer calls are not allowed in {const_eval_const_context}s const_eval_unallowed_heap_allocations = @@ -421,7 +424,7 @@ const_eval_unstable_in_stable_exposed = .bypass_sugg = otherwise, as a last resort `#[rustc_allow_const_fn_unstable]` can be used to bypass stability checks (this requires team approval) const_eval_unstable_intrinsic = `{$name}` is not yet stable as a const intrinsic - .help = add `#![feature({$feature})]` to the crate attributes to enable +const_eval_unstable_intrinsic_suggestion = add `#![feature({$feature})]` to the crate attributes to enable const_eval_unterminated_c_string = reading a null-terminated string starting at {$pointer} with no null found before end of allocation diff --git a/compiler/rustc_const_eval/src/check_consts/check.rs b/compiler/rustc_const_eval/src/check_consts/check.rs index e895c44199b..ed34996a7a7 100644 --- a/compiler/rustc_const_eval/src/check_consts/check.rs +++ b/compiler/rustc_const_eval/src/check_consts/check.rs @@ -10,7 +10,7 @@ use rustc_attr_parsing::{ConstStability, StabilityLevel}; use rustc_errors::{Diag, ErrorGuaranteed}; use rustc_hir::def_id::DefId; use rustc_hir::{self as hir, LangItem}; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_infer::infer::TyCtxtInferExt; use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::*; @@ -35,6 +35,12 @@ use crate::errors; type QualifResults<'mir, 'tcx, Q> = rustc_mir_dataflow::ResultsCursor<'mir, 'tcx, FlowSensitiveAnalysis<'mir, 'mir, 'tcx, Q>>; +#[derive(Copy, Clone, PartialEq, Eq, Debug)] +enum ConstConditionsHold { + Yes, + No, +} + #[derive(Default)] pub(crate) struct Qualifs<'mir, 'tcx> { has_mut_interior: Option<QualifResults<'mir, 'tcx, HasMutInterior>>, @@ -172,7 +178,7 @@ pub struct Checker<'mir, 'tcx> { /// A set that stores for each local whether it is "transient", i.e. guaranteed to be dead /// when this MIR body returns. - transient_locals: Option<BitSet<Local>>, + transient_locals: Option<DenseBitSet<Local>>, error_emitted: Option<ErrorGuaranteed>, secondary_errors: Vec<Diag<'tcx>>, @@ -242,7 +248,7 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { // And then check all `Return` in the MIR, and if a local is "maybe live" at a // `Return` then it is definitely not transient. - let mut transient = BitSet::new_filled(ccx.body.local_decls.len()); + let mut transient = DenseBitSet::new_filled(ccx.body.local_decls.len()); // Make sure to only visit reachable blocks, the dataflow engine can ICE otherwise. for (bb, data) in traversal::reachable(&ccx.body) { if matches!(data.terminator().kind, TerminatorKind::Return) { @@ -376,15 +382,15 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { callee: DefId, callee_args: ty::GenericArgsRef<'tcx>, call_span: Span, - ) -> bool { + ) -> Option<ConstConditionsHold> { let tcx = self.tcx; if !tcx.is_conditionally_const(callee) { - return false; + return None; } let const_conditions = tcx.const_conditions(callee).instantiate(tcx, callee_args); if const_conditions.is_empty() { - return false; + return None; } let (infcx, param_env) = tcx.infer_ctxt().build_with_typing_env(self.body.typing_env(tcx)); @@ -413,12 +419,13 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { })); let errors = ocx.select_all_or_error(); - if !errors.is_empty() { + if errors.is_empty() { + Some(ConstConditionsHold::Yes) + } else { tcx.dcx() .span_delayed_bug(call_span, "this should have reported a ~const error in HIR"); + Some(ConstConditionsHold::No) } - - true } pub fn check_drop_terminator( @@ -457,6 +464,12 @@ impl<'mir, 'tcx> Checker<'mir, 'tcx> { err_span, ); } + + fn crate_inject_span(&self) -> Option<Span> { + self.tcx.hir_crate_items(()).definitions().next().and_then(|id| { + self.tcx.crate_level_attribute_injection_span(self.tcx.local_def_id_to_hir_id(id)) + }) + } } impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { @@ -488,7 +501,8 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { Rvalue::Use(_) | Rvalue::CopyForDeref(..) | Rvalue::Repeat(..) - | Rvalue::Discriminant(..) => {} + | Rvalue::Discriminant(..) + | Rvalue::Len(_) => {} Rvalue::Aggregate(kind, ..) => { if let AggregateKind::Coroutine(def_id, ..) = kind.as_ref() @@ -572,27 +586,12 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { ) => {} Rvalue::ShallowInitBox(_, _) => {} - Rvalue::UnaryOp(op, operand) => { + Rvalue::UnaryOp(_, operand) => { let ty = operand.ty(self.body, self.tcx); - match op { - UnOp::Not | UnOp::Neg => { - if is_int_bool_float_or_char(ty) { - // Int, bool, float, and char operations are fine. - } else { - span_bug!( - self.span, - "non-primitive type in `Rvalue::UnaryOp{op:?}`: {ty:?}", - ); - } - } - UnOp::PtrMetadata => { - if !ty.is_ref() && !ty.is_unsafe_ptr() { - span_bug!( - self.span, - "non-pointer type in `Rvalue::UnaryOp({op:?})`: {ty:?}", - ); - } - } + if is_int_bool_float_or_char(ty) { + // Int, bool, float, and char operations are fine. + } else { + span_bug!(self.span, "non-primitive type in `Rvalue::UnaryOp`: {:?}", ty); } } @@ -706,9 +705,17 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { trace!("attempting to call a trait method"); let trait_is_const = tcx.is_const_trait(trait_did); - if trait_is_const { + // Only consider a trait to be const if the const conditions hold. + // Otherwise, it's really misleading to call something "conditionally" + // const when it's very obviously not conditionally const. + if trait_is_const && has_const_conditions == Some(ConstConditionsHold::Yes) { // Trait calls are always conditionally-const. - self.check_op(ops::ConditionallyConstCall { callee, args: fn_args }); + self.check_op(ops::ConditionallyConstCall { + callee, + args: fn_args, + span: *fn_span, + call_source, + }); // FIXME(const_trait_impl): do a more fine-grained check whether this // particular trait can be const-stably called. } else { @@ -725,8 +732,13 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { } // Even if we know the callee, ensure we can use conditionally-const calls. - if has_const_conditions { - self.check_op(ops::ConditionallyConstCall { callee, args: fn_args }); + if has_const_conditions.is_some() { + self.check_op(ops::ConditionallyConstCall { + callee, + args: fn_args, + span: *fn_span, + call_source, + }); } // At this point, we are calling a function, `callee`, whose `DefId` is known... @@ -803,6 +815,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { name: intrinsic.name, feature, const_stable_indirect: is_const_stable, + suggestion: self.crate_inject_span(), }); } Some(ConstStability { level: StabilityLevel::Stable { .. }, .. }) => { @@ -891,7 +904,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { // regular stability. feature == sym::rustc_private && issue == NonZero::new(27812) - && self.tcx.sess.opts.unstable_opts.force_unstable_if_unmarked + && tcx.sess.opts.unstable_opts.force_unstable_if_unmarked }; // Even if the feature is enabled, we still need check_op to double-check // this if the callee is not safe to expose on stable. @@ -901,6 +914,7 @@ impl<'tcx> Visitor<'tcx> for Checker<'_, 'tcx> { feature, feature_enabled, safe_to_expose_on_stable: callee_safe_to_expose_on_stable, + suggestion_span: self.crate_inject_span(), }); } } diff --git a/compiler/rustc_const_eval/src/check_consts/ops.rs b/compiler/rustc_const_eval/src/check_consts/ops.rs index ebd680ac28a..3c83a7b92cd 100644 --- a/compiler/rustc_const_eval/src/check_consts/ops.rs +++ b/compiler/rustc_const_eval/src/check_consts/ops.rs @@ -1,8 +1,8 @@ //! Concrete error types for all operations which may be invalid in a certain const context. use hir::{ConstContext, LangItem}; -use rustc_errors::Diag; use rustc_errors::codes::*; +use rustc_errors::{Applicability, Diag}; use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_infer::infer::TyCtxtInferExt; @@ -14,8 +14,11 @@ use rustc_middle::ty::{ self, Closure, FnDef, FnPtr, GenericArgKind, GenericArgsRef, Param, TraitRef, Ty, suggest_constraining_type_param, }; -use rustc_middle::util::{CallDesugaringKind, CallKind, call_kind}; +use rustc_session::parse::add_feature_diagnostics; use rustc_span::{BytePos, Pos, Span, Symbol, sym}; +use rustc_trait_selection::error_reporting::traits::call_kind::{ + CallDesugaringKind, CallKind, call_kind, +}; use rustc_trait_selection::traits::SelectionContext; use tracing::debug; @@ -77,6 +80,8 @@ impl<'tcx> NonConstOp<'tcx> for FnCallIndirect { pub(crate) struct ConditionallyConstCall<'tcx> { pub callee: DefId, pub args: GenericArgsRef<'tcx>, + pub span: Span, + pub call_source: CallSource, } impl<'tcx> NonConstOp<'tcx> for ConditionallyConstCall<'tcx> { @@ -91,16 +96,22 @@ impl<'tcx> NonConstOp<'tcx> for ConditionallyConstCall<'tcx> { } } - fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, span: Span) -> Diag<'tcx> { - ccx.tcx.sess.create_feature_err( - errors::ConditionallyConstCall { - span, - def_path_str: ccx.tcx.def_path_str_with_args(self.callee, self.args), - def_descr: ccx.tcx.def_descr(self.callee), - kind: ccx.const_kind(), - }, - sym::const_trait_impl, - ) + fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> { + let mut diag = build_error_for_const_call( + ccx, + self.callee, + self.args, + self.span, + self.call_source, + "conditionally", + |_, _, _| {}, + ); + + // Override code and mention feature. + diag.code(E0658); + add_feature_diagnostics(&mut diag, ccx.tcx.sess, sym::const_trait_impl); + + diag } } @@ -118,210 +129,252 @@ impl<'tcx> NonConstOp<'tcx> for FnCallNonConst<'tcx> { #[allow(rustc::diagnostic_outside_of_impl)] #[allow(rustc::untranslatable_diagnostic)] fn build_error(&self, ccx: &ConstCx<'_, 'tcx>, _: Span) -> Diag<'tcx> { - let FnCallNonConst { callee, args, span, call_source } = *self; - let ConstCx { tcx, typing_env, .. } = *ccx; + let tcx = ccx.tcx; let caller = ccx.def_id(); - let diag_trait = |err, self_ty: Ty<'_>, trait_id| { - let trait_ref = TraitRef::from_method(tcx, trait_id, args); - - match self_ty.kind() { - Param(param_ty) => { - debug!(?param_ty); - if let Some(generics) = tcx.hir_node_by_def_id(caller).generics() { - let constraint = with_no_trimmed_paths!(format!( - "~const {}", - trait_ref.print_trait_sugared(), - )); - suggest_constraining_type_param( - tcx, - generics, - err, - param_ty.name.as_str(), - &constraint, - Some(trait_ref.def_id), - None, - ); + let mut err = build_error_for_const_call( + ccx, + self.callee, + self.args, + self.span, + self.call_source, + "non", + |err, self_ty, trait_id| { + // FIXME(const_trait_impl): Do we need any of this on the non-const codepath? + + let trait_ref = TraitRef::from_method(tcx, trait_id, self.args); + + match self_ty.kind() { + Param(param_ty) => { + debug!(?param_ty); + if let Some(generics) = tcx.hir_node_by_def_id(caller).generics() { + let constraint = with_no_trimmed_paths!(format!( + "~const {}", + trait_ref.print_trait_sugared(), + )); + suggest_constraining_type_param( + tcx, + generics, + err, + param_ty.name.as_str(), + &constraint, + Some(trait_ref.def_id), + None, + ); + } } - } - ty::Adt(..) => { - let (infcx, param_env) = tcx.infer_ctxt().build_with_typing_env(typing_env); - let obligation = - Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref); - let mut selcx = SelectionContext::new(&infcx); - let implsrc = selcx.select(&obligation); - if let Ok(Some(ImplSource::UserDefined(data))) = implsrc { - // FIXME(const_trait_impl) revisit this - if !tcx.is_const_trait_impl(data.impl_def_id) { - let span = tcx.def_span(data.impl_def_id); - err.subdiagnostic(errors::NonConstImplNote { span }); + ty::Adt(..) => { + let (infcx, param_env) = + tcx.infer_ctxt().build_with_typing_env(ccx.typing_env); + let obligation = + Obligation::new(tcx, ObligationCause::dummy(), param_env, trait_ref); + let mut selcx = SelectionContext::new(&infcx); + let implsrc = selcx.select(&obligation); + if let Ok(Some(ImplSource::UserDefined(data))) = implsrc { + // FIXME(const_trait_impl) revisit this + if !tcx.is_const_trait_impl(data.impl_def_id) { + let span = tcx.def_span(data.impl_def_id); + err.subdiagnostic(errors::NonConstImplNote { span }); + } } } + _ => {} } - _ => {} + }, + ); + + if let ConstContext::Static(_) = ccx.const_kind() { + err.note(fluent_generated::const_eval_lazy_lock); + } + + err + } +} + +/// Build an error message reporting that a function call is not const (or only +/// conditionally const). In case that this call is desugared (like an operator +/// or sugar from something like a `for` loop), try to build a better error message +/// that doesn't call it a method. +fn build_error_for_const_call<'tcx>( + ccx: &ConstCx<'_, 'tcx>, + callee: DefId, + args: ty::GenericArgsRef<'tcx>, + span: Span, + call_source: CallSource, + non_or_conditionally: &'static str, + note_trait_if_possible: impl FnOnce(&mut Diag<'tcx>, Ty<'tcx>, DefId), +) -> Diag<'tcx> { + let tcx = ccx.tcx; + + let call_kind = + call_kind(tcx, ccx.typing_env, callee, args, span, call_source.from_hir_call(), None); + + debug!(?call_kind); + + let mut err = match call_kind { + CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => { + macro_rules! error { + ($err:ident) => { + tcx.dcx().create_err(errors::$err { + span, + ty: self_ty, + kind: ccx.const_kind(), + non_or_conditionally, + }) + }; } - }; - - let call_kind = - call_kind(tcx, ccx.typing_env, callee, args, span, call_source.from_hir_call(), None); - - debug!(?call_kind); - - let mut err = match call_kind { - CallKind::Normal { desugaring: Some((kind, self_ty)), .. } => { - macro_rules! error { - ($err:ident) => { - tcx.dcx().create_err(errors::$err { - span, - ty: self_ty, - kind: ccx.const_kind(), - }) - }; - } - // Don't point at the trait if this is a desugaring... - // FIXME(const_trait_impl): we could perhaps do this for `Iterator`. - match kind { - CallDesugaringKind::ForLoopIntoIter | CallDesugaringKind::ForLoopNext => { - error!(NonConstForLoopIntoIter) - } - CallDesugaringKind::QuestionBranch => { - error!(NonConstQuestionBranch) - } - CallDesugaringKind::QuestionFromResidual => { - error!(NonConstQuestionFromResidual) - } - CallDesugaringKind::TryBlockFromOutput => { - error!(NonConstTryBlockFromOutput) - } - CallDesugaringKind::Await => { - error!(NonConstAwait) - } + // Don't point at the trait if this is a desugaring... + // FIXME(const_trait_impl): we could perhaps do this for `Iterator`. + match kind { + CallDesugaringKind::ForLoopIntoIter | CallDesugaringKind::ForLoopNext => { + error!(NonConstForLoopIntoIter) + } + CallDesugaringKind::QuestionBranch => { + error!(NonConstQuestionBranch) + } + CallDesugaringKind::QuestionFromResidual => { + error!(NonConstQuestionFromResidual) + } + CallDesugaringKind::TryBlockFromOutput => { + error!(NonConstTryBlockFromOutput) + } + CallDesugaringKind::Await => { + error!(NonConstAwait) } } - CallKind::FnCall { fn_trait_id, self_ty } => { - let note = match self_ty.kind() { - FnDef(def_id, ..) => { - let span = tcx.def_span(*def_id); - if ccx.tcx.is_const_fn(*def_id) { - span_bug!(span, "calling const FnDef errored when it shouldn't"); - } - - Some(errors::NonConstClosureNote::FnDef { span }) + } + CallKind::FnCall { fn_trait_id, self_ty } => { + let note = match self_ty.kind() { + FnDef(def_id, ..) => { + let span = tcx.def_span(*def_id); + if ccx.tcx.is_const_fn(*def_id) { + span_bug!(span, "calling const FnDef errored when it shouldn't"); } - FnPtr(..) => Some(errors::NonConstClosureNote::FnPtr), - Closure(..) => Some(errors::NonConstClosureNote::Closure), - _ => None, - }; - let mut err = tcx.dcx().create_err(errors::NonConstClosure { + Some(errors::NonConstClosureNote::FnDef { span }) + } + FnPtr(..) => Some(errors::NonConstClosureNote::FnPtr), + Closure(..) => Some(errors::NonConstClosureNote::Closure), + _ => None, + }; + + let mut err = tcx.dcx().create_err(errors::NonConstClosure { + span, + kind: ccx.const_kind(), + note, + non_or_conditionally, + }); + + note_trait_if_possible(&mut err, self_ty, fn_trait_id); + err + } + CallKind::Operator { trait_id, self_ty, .. } => { + let mut err = if let CallSource::MatchCmp = call_source { + tcx.dcx().create_err(errors::NonConstMatchEq { span, kind: ccx.const_kind(), - note, - }); - - diag_trait(&mut err, self_ty, fn_trait_id); - err - } - CallKind::Operator { trait_id, self_ty, .. } => { - let mut err = if let CallSource::MatchCmp = call_source { - tcx.dcx().create_err(errors::NonConstMatchEq { - span, - kind: ccx.const_kind(), - ty: self_ty, - }) - } else { - let mut sugg = None; - - if ccx.tcx.is_lang_item(trait_id, LangItem::PartialEq) { - match (args[0].unpack(), args[1].unpack()) { - (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty)) - if self_ty == rhs_ty - && self_ty.is_ref() - && self_ty.peel_refs().is_primitive() => - { - let mut num_refs = 0; - let mut tmp_ty = self_ty; - while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() { - num_refs += 1; - tmp_ty = *inner_ty; - } - let deref = "*".repeat(num_refs); - - if let Ok(call_str) = - ccx.tcx.sess.source_map().span_to_snippet(span) - { - if let Some(eq_idx) = call_str.find("==") { - if let Some(rhs_idx) = call_str[(eq_idx + 2)..] - .find(|c: char| !c.is_whitespace()) - { - let rhs_pos = span.lo() - + BytePos::from_usize(eq_idx + 2 + rhs_idx); - let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos); - sugg = Some(errors::ConsiderDereferencing { - deref, - span: span.shrink_to_lo(), - rhs_span, - }); - } + ty: self_ty, + non_or_conditionally, + }) + } else { + let mut sugg = None; + + if ccx.tcx.is_lang_item(trait_id, LangItem::PartialEq) { + match (args[0].unpack(), args[1].unpack()) { + (GenericArgKind::Type(self_ty), GenericArgKind::Type(rhs_ty)) + if self_ty == rhs_ty + && self_ty.is_ref() + && self_ty.peel_refs().is_primitive() => + { + let mut num_refs = 0; + let mut tmp_ty = self_ty; + while let rustc_middle::ty::Ref(_, inner_ty, _) = tmp_ty.kind() { + num_refs += 1; + tmp_ty = *inner_ty; + } + let deref = "*".repeat(num_refs); + + if let Ok(call_str) = ccx.tcx.sess.source_map().span_to_snippet(span) { + if let Some(eq_idx) = call_str.find("==") { + if let Some(rhs_idx) = + call_str[(eq_idx + 2)..].find(|c: char| !c.is_whitespace()) + { + let rhs_pos = + span.lo() + BytePos::from_usize(eq_idx + 2 + rhs_idx); + let rhs_span = span.with_lo(rhs_pos).with_hi(rhs_pos); + sugg = Some(errors::ConsiderDereferencing { + deref, + span: span.shrink_to_lo(), + rhs_span, + }); } } } - _ => {} } + _ => {} } - tcx.dcx().create_err(errors::NonConstOperator { - span, - kind: ccx.const_kind(), - sugg, - }) - }; - - diag_trait(&mut err, self_ty, trait_id); - err - } - CallKind::DerefCoercion { deref_target, deref_target_ty, self_ty } => { - // Check first whether the source is accessible (issue #87060) - let target = if tcx.sess.source_map().is_span_accessible(deref_target) { - Some(deref_target) - } else { - None - }; - - let mut err = tcx.dcx().create_err(errors::NonConstDerefCoercion { + } + tcx.dcx().create_err(errors::NonConstOperator { span, - ty: self_ty, kind: ccx.const_kind(), - target_ty: deref_target_ty, - deref_target: target, - }); + sugg, + non_or_conditionally, + }) + }; - diag_trait(&mut err, self_ty, tcx.require_lang_item(LangItem::Deref, Some(span))); - err - } - _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentMethods) => { - ccx.dcx().create_err(errors::NonConstFmtMacroCall { span, kind: ccx.const_kind() }) - } - _ => ccx.dcx().create_err(errors::NonConstFnCall { + note_trait_if_possible(&mut err, self_ty, trait_id); + err + } + CallKind::DerefCoercion { deref_target_span, deref_target_ty, self_ty } => { + // Check first whether the source is accessible (issue #87060) + let target = if let Some(deref_target_span) = deref_target_span + && tcx.sess.source_map().is_span_accessible(deref_target_span) + { + Some(deref_target_span) + } else { + None + }; + + let mut err = tcx.dcx().create_err(errors::NonConstDerefCoercion { span, - def_descr: ccx.tcx.def_descr(callee), - def_path_str: ccx.tcx.def_path_str_with_args(callee, args), + ty: self_ty, kind: ccx.const_kind(), - }), - }; + target_ty: deref_target_ty, + deref_target: target, + non_or_conditionally, + }); + + note_trait_if_possible( + &mut err, + self_ty, + tcx.require_lang_item(LangItem::Deref, Some(span)), + ); + err + } + _ if tcx.opt_parent(callee) == tcx.get_diagnostic_item(sym::ArgumentMethods) => { + ccx.dcx().create_err(errors::NonConstFmtMacroCall { + span, + kind: ccx.const_kind(), + non_or_conditionally, + }) + } + _ => ccx.dcx().create_err(errors::NonConstFnCall { + span, + def_descr: ccx.tcx.def_descr(callee), + def_path_str: ccx.tcx.def_path_str_with_args(callee, args), + kind: ccx.const_kind(), + non_or_conditionally, + }), + }; - err.note(format!( - "calls in {}s are limited to constant functions, \ + err.note(format!( + "calls in {}s are limited to constant functions, \ tuple structs and tuple variants", - ccx.const_kind(), - )); - - if let ConstContext::Static(_) = ccx.const_kind() { - err.note(fluent_generated::const_eval_lazy_lock); - } + ccx.const_kind(), + )); - err - } + err } /// A call to an `#[unstable]` const fn or `#[rustc_const_unstable]` function. @@ -335,6 +388,7 @@ pub(crate) struct FnCallUnstable { /// expose on stable. pub feature_enabled: bool, pub safe_to_expose_on_stable: bool, + pub suggestion_span: Option<Span>, } impl<'tcx> NonConstOp<'tcx> for FnCallUnstable { @@ -354,8 +408,18 @@ impl<'tcx> NonConstOp<'tcx> for FnCallUnstable { def_path: ccx.tcx.def_path_str(self.def_id), }); // FIXME: make this translatable + let msg = format!("add `#![feature({})]` to the crate attributes to enable", self.feature); #[allow(rustc::untranslatable_diagnostic)] - err.help(format!("add `#![feature({})]` to the crate attributes to enable", self.feature)); + if let Some(span) = self.suggestion_span { + err.span_suggestion_verbose( + span, + msg, + format!("#![feature({})]\n", self.feature), + Applicability::MachineApplicable, + ); + } else { + err.help(msg); + } err } @@ -383,6 +447,7 @@ pub(crate) struct IntrinsicUnstable { pub name: Symbol, pub feature: Symbol, pub const_stable_indirect: bool, + pub suggestion: Option<Span>, } impl<'tcx> NonConstOp<'tcx> for IntrinsicUnstable { @@ -402,6 +467,8 @@ impl<'tcx> NonConstOp<'tcx> for IntrinsicUnstable { span, name: self.name, feature: self.feature, + suggestion: self.suggestion, + help: self.suggestion.is_none(), }) } } diff --git a/compiler/rustc_const_eval/src/check_consts/qualifs.rs b/compiler/rustc_const_eval/src/check_consts/qualifs.rs index b1b7fb406b1..e244b50a4b5 100644 --- a/compiler/rustc_const_eval/src/check_consts/qualifs.rs +++ b/compiler/rustc_const_eval/src/check_consts/qualifs.rs @@ -230,7 +230,9 @@ where Q::in_any_value_of_ty(cx, rvalue.ty(cx.body, cx.tcx)) } - Rvalue::Discriminant(place) => in_place::<Q, _>(cx, in_local, place.as_ref()), + Rvalue::Discriminant(place) | Rvalue::Len(place) => { + in_place::<Q, _>(cx, in_local, place.as_ref()) + } Rvalue::CopyForDeref(place) => in_place::<Q, _>(cx, in_local, place.as_ref()), diff --git a/compiler/rustc_const_eval/src/check_consts/resolver.rs b/compiler/rustc_const_eval/src/check_consts/resolver.rs index 5a6e7ab2bee..79df63a9e84 100644 --- a/compiler/rustc_const_eval/src/check_consts/resolver.rs +++ b/compiler/rustc_const_eval/src/check_consts/resolver.rs @@ -197,6 +197,7 @@ where | mir::Rvalue::CopyForDeref(..) | mir::Rvalue::ThreadLocalRef(..) | mir::Rvalue::Repeat(..) + | mir::Rvalue::Len(..) | mir::Rvalue::BinaryOp(..) | mir::Rvalue::NullaryOp(..) | mir::Rvalue::UnaryOp(..) diff --git a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs index 8af17d01b0a..35c3e3ed315 100644 --- a/compiler/rustc_const_eval/src/const_eval/fn_queries.rs +++ b/compiler/rustc_const_eval/src/const_eval/fn_queries.rs @@ -4,14 +4,18 @@ use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; -fn parent_impl_constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { +fn parent_impl_or_trait_constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { let parent_id = tcx.local_parent(def_id); - if matches!(tcx.def_kind(parent_id), DefKind::Impl { .. }) - && let Some(header) = tcx.impl_trait_header(parent_id) - { - header.constness - } else { - hir::Constness::NotConst + match tcx.def_kind(parent_id) { + DefKind::Impl { of_trait: true } => tcx.impl_trait_header(parent_id).unwrap().constness, + DefKind::Trait => { + if tcx.is_const_trait(parent_id.into()) { + hir::Constness::Const + } else { + hir::Constness::NotConst + } + } + _ => hir::Constness::NotConst, } } @@ -34,7 +38,7 @@ fn constness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> hir::Constness { // If the function itself is not annotated with `const`, it may still be a `const fn` // if it resides in a const trait impl. - parent_impl_constness(tcx, def_id) + parent_impl_or_trait_constness(tcx, def_id) } else { tcx.dcx().span_bug( tcx.def_span(def_id), diff --git a/compiler/rustc_const_eval/src/const_eval/machine.rs b/compiler/rustc_const_eval/src/const_eval/machine.rs index ba7fbb254c6..cfdfbdb7880 100644 --- a/compiler/rustc_const_eval/src/const_eval/machine.rs +++ b/compiler/rustc_const_eval/src/const_eval/machine.rs @@ -360,10 +360,7 @@ impl<'tcx> interpret::Machine<'tcx> for CompileTimeMachine<'tcx> { // sensitive check here. But we can at least rule out functions that are not const at // all. That said, we have to allow calling functions inside a trait marked with // #[const_trait]. These *are* const-checked! - // FIXME(const_trait_impl): why does `is_const_fn` not classify them as const? - if (!ecx.tcx.is_const_fn(def) && !ecx.tcx.is_const_default_method(def)) - || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) - { + if !ecx.tcx.is_const_fn(def) || ecx.tcx.has_attr(def, sym::rustc_do_not_const_check) { // We certainly do *not* want to actually call the fn // though, so be sure we return here. throw_unsup_format!("calling non-const function `{}`", instance) diff --git a/compiler/rustc_const_eval/src/errors.rs b/compiler/rustc_const_eval/src/errors.rs index 57534540019..1ee9214c4b2 100644 --- a/compiler/rustc_const_eval/src/errors.rs +++ b/compiler/rustc_const_eval/src/errors.rs @@ -123,12 +123,19 @@ pub(crate) struct UnstableConstFn { #[derive(Diagnostic)] #[diag(const_eval_unstable_intrinsic)] -#[help] pub(crate) struct UnstableIntrinsic { #[primary_span] pub span: Span, pub name: Symbol, pub feature: Symbol, + #[suggestion( + const_eval_unstable_intrinsic_suggestion, + code = "#![feature({feature})]\n", + applicability = "machine-applicable" + )] + pub suggestion: Option<Span>, + #[help(const_eval_unstable_intrinsic_suggestion)] + pub help: bool, } #[derive(Diagnostic)] @@ -174,16 +181,7 @@ pub(crate) struct NonConstFmtMacroCall { #[primary_span] pub span: Span, pub kind: ConstContext, -} - -#[derive(Diagnostic)] -#[diag(const_eval_conditionally_const_call)] -pub(crate) struct ConditionallyConstCall { - #[primary_span] - pub span: Span, - pub def_path_str: String, - pub def_descr: &'static str, - pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] @@ -194,6 +192,7 @@ pub(crate) struct NonConstFnCall { pub def_path_str: String, pub def_descr: &'static str, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] @@ -293,68 +292,75 @@ pub struct RawBytesNote { // FIXME(fee1-dead) do not use stringly typed `ConstContext` #[derive(Diagnostic)] -#[diag(const_eval_match_eq_non_const, code = E0015)] +#[diag(const_eval_non_const_match_eq, code = E0015)] #[note] pub struct NonConstMatchEq<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_for_loop_into_iter_non_const, code = E0015)] +#[diag(const_eval_non_const_for_loop_into_iter, code = E0015)] pub struct NonConstForLoopIntoIter<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_question_branch_non_const, code = E0015)] +#[diag(const_eval_non_const_question_branch, code = E0015)] pub struct NonConstQuestionBranch<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_question_from_residual_non_const, code = E0015)] +#[diag(const_eval_non_const_question_from_residual, code = E0015)] pub struct NonConstQuestionFromResidual<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_try_block_from_output_non_const, code = E0015)] +#[diag(const_eval_non_const_try_block_from_output, code = E0015)] pub struct NonConstTryBlockFromOutput<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_await_non_const, code = E0015)] +#[diag(const_eval_non_const_await, code = E0015)] pub struct NonConstAwait<'tcx> { #[primary_span] pub span: Span, pub ty: Ty<'tcx>, pub kind: ConstContext, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_closure_non_const, code = E0015)] +#[diag(const_eval_non_const_closure, code = E0015)] pub struct NonConstClosure { #[primary_span] pub span: Span, pub kind: ConstContext, #[subdiagnostic] pub note: Option<NonConstClosureNote>, + pub non_or_conditionally: &'static str, } #[derive(Subdiagnostic)] @@ -381,17 +387,18 @@ pub struct ConsiderDereferencing { } #[derive(Diagnostic)] -#[diag(const_eval_operator_non_const, code = E0015)] +#[diag(const_eval_non_const_operator, code = E0015)] pub struct NonConstOperator { #[primary_span] pub span: Span, pub kind: ConstContext, #[subdiagnostic] pub sugg: Option<ConsiderDereferencing>, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] -#[diag(const_eval_deref_coercion_non_const, code = E0015)] +#[diag(const_eval_non_const_deref_coercion, code = E0015)] #[note] pub struct NonConstDerefCoercion<'tcx> { #[primary_span] @@ -401,6 +408,7 @@ pub struct NonConstDerefCoercion<'tcx> { pub target_ty: Ty<'tcx>, #[note(const_eval_target_note)] pub deref_target: Option<Span>, + pub non_or_conditionally: &'static str, } #[derive(Diagnostic)] diff --git a/compiler/rustc_const_eval/src/interpret/memory.rs b/compiler/rustc_const_eval/src/interpret/memory.rs index 0790db984e3..2772c94d52b 100644 --- a/compiler/rustc_const_eval/src/interpret/memory.rs +++ b/compiler/rustc_const_eval/src/interpret/memory.rs @@ -1481,22 +1481,31 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { /// Test if this value might be null. /// If the machine does not support ptr-to-int casts, this is conservative. pub fn scalar_may_be_null(&self, scalar: Scalar<M::Provenance>) -> InterpResult<'tcx, bool> { - interp_ok(match scalar.try_to_scalar_int() { - Ok(int) => int.is_null(), + match scalar.try_to_scalar_int() { + Ok(int) => interp_ok(int.is_null()), Err(_) => { - // Can only happen during CTFE. + // We can't cast this pointer to an integer. Can only happen during CTFE. let ptr = scalar.to_pointer(self)?; match self.ptr_try_get_alloc_id(ptr, 0) { Ok((alloc_id, offset, _)) => { - let size = self.get_alloc_info(alloc_id).size; - // If the pointer is out-of-bounds, it may be null. - // Note that one-past-the-end (offset == size) is still inbounds, and never null. - offset > size + let info = self.get_alloc_info(alloc_id); + // If the pointer is in-bounds (including "at the end"), it is definitely not null. + if offset <= info.size { + return interp_ok(false); + } + // If the allocation is N-aligned, and the offset is not divisible by N, + // then `base + offset` has a non-zero remainder after division by `N`, + // which means `base + offset` cannot be null. + if offset.bytes() % info.align.bytes() != 0 { + return interp_ok(false); + } + // We don't know enough, this might be null. + interp_ok(true) } Err(_offset) => bug!("a non-int scalar is always a pointer"), } } - }) + } } /// Turning a "maybe pointer" into a proper pointer (and some information diff --git a/compiler/rustc_const_eval/src/interpret/operand.rs b/compiler/rustc_const_eval/src/interpret/operand.rs index b861ffb6110..5d905cff1f2 100644 --- a/compiler/rustc_const_eval/src/interpret/operand.rs +++ b/compiler/rustc_const_eval/src/interpret/operand.rs @@ -704,8 +704,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { pub fn read_str(&self, mplace: &MPlaceTy<'tcx, M::Provenance>) -> InterpResult<'tcx, &str> { let len = mplace.len(self)?; let bytes = self.read_bytes_ptr_strip_provenance(mplace.ptr(), Size::from_bytes(len))?; - let str = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; - interp_ok(str) + let s = std::str::from_utf8(bytes).map_err(|err| err_ub!(InvalidStr(err)))?; + interp_ok(s) } /// Read from a local of the current frame. Convenience method for [`InterpCx::local_at_frame_to_op`]. diff --git a/compiler/rustc_const_eval/src/interpret/place.rs b/compiler/rustc_const_eval/src/interpret/place.rs index 0d974071619..c97922ac132 100644 --- a/compiler/rustc_const_eval/src/interpret/place.rs +++ b/compiler/rustc_const_eval/src/interpret/place.rs @@ -1017,9 +1017,9 @@ where /// This is allocated in immutable global memory and deduplicated. pub fn allocate_str_dedup( &mut self, - str: &str, + s: &str, ) -> InterpResult<'tcx, MPlaceTy<'tcx, M::Provenance>> { - let bytes = str.as_bytes(); + let bytes = s.as_bytes(); let ptr = self.allocate_bytes_dedup(bytes)?; // Create length metadata for the string. diff --git a/compiler/rustc_const_eval/src/interpret/step.rs b/compiler/rustc_const_eval/src/interpret/step.rs index 32e77fe1024..b61865be667 100644 --- a/compiler/rustc_const_eval/src/interpret/step.rs +++ b/compiler/rustc_const_eval/src/interpret/step.rs @@ -9,13 +9,12 @@ use rustc_middle::ty::layout::FnAbiOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_middle::{bug, mir, span_bug}; use rustc_span::source_map::Spanned; -use rustc_span::{DesugaringKind, Span}; use rustc_target::callconv::FnAbi; use tracing::{info, instrument, trace}; use super::{ FnArg, FnVal, ImmTy, Immediate, InterpCx, InterpResult, Machine, MemPlaceMeta, PlaceTy, - Projectable, interp_ok, throw_ub, + Projectable, Scalar, interp_ok, throw_ub, }; use crate::util; @@ -81,9 +80,7 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { use rustc_middle::mir::StatementKind::*; match &stmt.kind { - Assign(box (place, rvalue)) => { - self.eval_rvalue_into_place(rvalue, *place, stmt.source_info.span)? - } + Assign(box (place, rvalue)) => self.eval_rvalue_into_place(rvalue, *place)?, SetDiscriminant { place, variant_index } => { let dest = self.eval_place(**place)?; @@ -162,7 +159,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { &mut self, rvalue: &mir::Rvalue<'tcx>, place: mir::Place<'tcx>, - span: Span, ) -> InterpResult<'tcx> { let dest = self.eval_place(place)?; // FIXME: ensure some kind of non-aliasing between LHS and RHS? @@ -218,6 +214,12 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.write_repeat(operand, &dest)?; } + Len(place) => { + let src = self.eval_place(place)?; + let len = src.len(self)?; + self.write_scalar(Scalar::from_target_usize(len, self), &dest)?; + } + Ref(_, borrow_kind, place) => { let src = self.eval_place(place)?; let place = self.force_allocation(&src)?; @@ -248,13 +250,8 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { let src = self.eval_place(place)?; let place = self.force_allocation(&src)?; let mut val = ImmTy::from_immediate(place.to_ref(self), dest.layout); - if !place_base_raw - && span.desugaring_kind() != Some(DesugaringKind::IndexBoundsCheckReborrow) - { + if !place_base_raw { // If this was not already raw, it needs retagging. - // As a special hack, we exclude the desugared `PtrMetadata(&raw const *_n)` - // from indexing. (Really we should not do any retag on `&raw` but that does not - // currently work with Stacked Borrows.) val = M::retag_ptr_value(self, mir::RetagKind::Raw, &val)?; } self.write_immediate(*val, &dest)?; diff --git a/compiler/rustc_data_structures/src/flock.rs b/compiler/rustc_data_structures/src/flock.rs index e03962a54ec..292a33d5646 100644 --- a/compiler/rustc_data_structures/src/flock.rs +++ b/compiler/rustc_data_structures/src/flock.rs @@ -4,6 +4,7 @@ //! green/native threading. This is just a bare-bones enough solution for //! librustdoc, it is not production quality at all. +#[cfg(bootstrap)] cfg_match! { cfg(target_os = "linux") => { mod linux; @@ -27,4 +28,28 @@ cfg_match! { } } +#[cfg(not(bootstrap))] +cfg_match! { + target_os = "linux" => { + mod linux; + use linux as imp; + } + target_os = "redox" => { + mod linux; + use linux as imp; + } + unix => { + mod unix; + use unix as imp; + } + windows => { + mod windows; + use self::windows as imp; + } + _ => { + mod unsupported; + use unsupported as imp; + } +} + pub use imp::Lock; diff --git a/compiler/rustc_data_structures/src/graph/implementation/mod.rs b/compiler/rustc_data_structures/src/graph/implementation/mod.rs index 43fdfe6ee0d..7724e9347d8 100644 --- a/compiler/rustc_data_structures/src/graph/implementation/mod.rs +++ b/compiler/rustc_data_structures/src/graph/implementation/mod.rs @@ -22,7 +22,7 @@ use std::fmt::Debug; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use tracing::debug; #[cfg(test)] @@ -214,7 +214,7 @@ impl<N: Debug, E: Debug> Graph<N, E> { direction: Direction, entry_node: NodeIndex, ) -> Vec<NodeIndex> { - let mut visited = BitSet::new_empty(self.len_nodes()); + let mut visited = DenseBitSet::new_empty(self.len_nodes()); let mut stack = vec![]; let mut result = Vec::with_capacity(self.len_nodes()); let mut push_node = |stack: &mut Vec<_>, node: NodeIndex| { @@ -287,7 +287,7 @@ impl<'g, N: Debug, E: Debug> Iterator for AdjacentEdges<'g, N, E> { pub struct DepthFirstTraversal<'g, N, E> { graph: &'g Graph<N, E>, stack: Vec<NodeIndex>, - visited: BitSet<usize>, + visited: DenseBitSet<usize>, direction: Direction, } @@ -297,7 +297,7 @@ impl<'g, N: Debug, E: Debug> DepthFirstTraversal<'g, N, E> { start_node: NodeIndex, direction: Direction, ) -> Self { - let mut visited = BitSet::new_empty(graph.len_nodes()); + let mut visited = DenseBitSet::new_empty(graph.len_nodes()); visited.insert(start_node.node_id()); DepthFirstTraversal { graph, stack: vec![start_node], visited, direction } } diff --git a/compiler/rustc_data_structures/src/graph/iterate/mod.rs b/compiler/rustc_data_structures/src/graph/iterate/mod.rs index cbc6664d853..7b4573d7a84 100644 --- a/compiler/rustc_data_structures/src/graph/iterate/mod.rs +++ b/compiler/rustc_data_structures/src/graph/iterate/mod.rs @@ -1,6 +1,6 @@ use std::ops::ControlFlow; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use super::{DirectedGraph, StartNode, Successors}; @@ -78,7 +78,7 @@ where { graph: G, stack: Vec<G::Node>, - visited: BitSet<G::Node>, + visited: DenseBitSet<G::Node>, } impl<G> DepthFirstSearch<G> @@ -86,7 +86,7 @@ where G: DirectedGraph + Successors, { pub fn new(graph: G) -> Self { - Self { stack: vec![], visited: BitSet::new_empty(graph.num_nodes()), graph } + Self { stack: vec![], visited: DenseBitSet::new_empty(graph.num_nodes()), graph } } /// Version of `push_start_node` that is convenient for chained @@ -125,6 +125,16 @@ where pub fn visited(&self, node: G::Node) -> bool { self.visited.contains(node) } + + /// Returns a reference to the set of nodes that have been visited, with + /// the same caveats as [`Self::visited`]. + /// + /// When incorporating the visited nodes into another bitset, using bulk + /// operations like `union` or `intersect` can be more efficient than + /// processing each node individually. + pub fn visited_set(&self) -> &DenseBitSet<G::Node> { + &self.visited + } } impl<G> std::fmt::Debug for DepthFirstSearch<G> @@ -207,8 +217,8 @@ where { graph: &'graph G, stack: Vec<Event<G::Node>>, - visited: BitSet<G::Node>, - settled: BitSet<G::Node>, + visited: DenseBitSet<G::Node>, + settled: DenseBitSet<G::Node>, } impl<'graph, G> TriColorDepthFirstSearch<'graph, G> @@ -219,8 +229,8 @@ where TriColorDepthFirstSearch { graph, stack: vec![], - visited: BitSet::new_empty(graph.num_nodes()), - settled: BitSet::new_empty(graph.num_nodes()), + visited: DenseBitSet::new_empty(graph.num_nodes()), + settled: DenseBitSet::new_empty(graph.num_nodes()), } } diff --git a/compiler/rustc_data_structures/src/graph/mod.rs b/compiler/rustc_data_structures/src/graph/mod.rs index 103ddd917bf..92035e8bc48 100644 --- a/compiler/rustc_data_structures/src/graph/mod.rs +++ b/compiler/rustc_data_structures/src/graph/mod.rs @@ -4,6 +4,7 @@ pub mod dominators; pub mod implementation; pub mod iterate; mod reference; +pub mod reversed; pub mod scc; pub mod vec_graph; diff --git a/compiler/rustc_data_structures/src/graph/reversed.rs b/compiler/rustc_data_structures/src/graph/reversed.rs new file mode 100644 index 00000000000..9b726deaa15 --- /dev/null +++ b/compiler/rustc_data_structures/src/graph/reversed.rs @@ -0,0 +1,42 @@ +use crate::graph::{DirectedGraph, Predecessors, Successors}; + +/// View that reverses the direction of edges in its underlying graph, so that +/// successors become predecessors and vice-versa. +/// +/// Because of `impl<G: Graph> Graph for &G`, the underlying graph can be +/// wrapped by-reference instead of by-value if desired. +#[derive(Clone, Copy, Debug)] +pub struct ReversedGraph<G> { + pub inner: G, +} + +impl<G> ReversedGraph<G> { + pub fn new(inner: G) -> Self { + Self { inner } + } +} + +impl<G: DirectedGraph> DirectedGraph for ReversedGraph<G> { + type Node = G::Node; + + fn num_nodes(&self) -> usize { + self.inner.num_nodes() + } +} + +// Implementing `StartNode` is not possible in general, because the start node +// of an underlying graph is instead an _end_ node in the reversed graph. +// But would be possible to define another wrapper type that adds an explicit +// start node to its underlying graph, if desired. + +impl<G: Predecessors> Successors for ReversedGraph<G> { + fn successors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> { + self.inner.predecessors(node) + } +} + +impl<G: Successors> Predecessors for ReversedGraph<G> { + fn predecessors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> { + self.inner.successors(node) + } +} diff --git a/compiler/rustc_data_structures/src/marker.rs b/compiler/rustc_data_structures/src/marker.rs index 2b629024bfe..6ae97222f77 100644 --- a/compiler/rustc_data_structures/src/marker.rs +++ b/compiler/rustc_data_structures/src/marker.rs @@ -72,7 +72,7 @@ impl_dyn_send!( [Vec<T, A> where T: DynSend, A: std::alloc::Allocator + DynSend] [Box<T, A> where T: ?Sized + DynSend, A: std::alloc::Allocator + DynSend] [crate::sync::RwLock<T> where T: DynSend] - [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Send + crate::tagged_ptr::Pointer, T: Send + crate::tagged_ptr::Tag, const CP: bool] + [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Send + crate::tagged_ptr::Tag] [rustc_arena::TypedArena<T> where T: DynSend] [indexmap::IndexSet<V, S> where V: DynSend, S: DynSend] [indexmap::IndexMap<K, V, S> where K: DynSend, V: DynSend, S: DynSend] @@ -148,7 +148,7 @@ impl_dyn_sync!( [crate::sync::RwLock<T> where T: DynSend + DynSync] [crate::sync::WorkerLocal<T> where T: DynSend] [crate::intern::Interned<'a, T> where 'a, T: DynSync] - [crate::tagged_ptr::CopyTaggedPtr<P, T, CP> where P: Sync + crate::tagged_ptr::Pointer, T: Sync + crate::tagged_ptr::Tag, const CP: bool] + [crate::tagged_ptr::TaggedRef<'a, P, T> where 'a, P: Sync, T: Sync + crate::tagged_ptr::Tag] [parking_lot::lock_api::Mutex<R, T> where R: DynSync, T: ?Sized + DynSend] [parking_lot::lock_api::RwLock<R, T> where R: DynSync, T: ?Sized + DynSend + DynSync] [indexmap::IndexSet<V, S> where V: DynSync, S: DynSync] diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 19050746c2f..18e98e6c39f 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -860,6 +860,7 @@ fn get_thread_id() -> u32 { } // Memory reporting +#[cfg(bootstrap)] cfg_match! { cfg(windows) => { pub fn get_resident_set_size() -> Option<usize> { @@ -921,5 +922,67 @@ cfg_match! { } } +#[cfg(not(bootstrap))] +cfg_match! { + windows => { + pub fn get_resident_set_size() -> Option<usize> { + use std::mem; + + use windows::{ + Win32::System::ProcessStatus::{K32GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS}, + Win32::System::Threading::GetCurrentProcess, + }; + + let mut pmc = PROCESS_MEMORY_COUNTERS::default(); + let pmc_size = mem::size_of_val(&pmc); + unsafe { + K32GetProcessMemoryInfo( + GetCurrentProcess(), + &mut pmc, + pmc_size as u32, + ) + } + .ok() + .ok()?; + + Some(pmc.WorkingSetSize) + } + } + target_os = "macos" => { + pub fn get_resident_set_size() -> Option<usize> { + use libc::{c_int, c_void, getpid, proc_pidinfo, proc_taskinfo, PROC_PIDTASKINFO}; + use std::mem; + const PROC_TASKINFO_SIZE: c_int = mem::size_of::<proc_taskinfo>() as c_int; + + unsafe { + let mut info: proc_taskinfo = mem::zeroed(); + let info_ptr = &mut info as *mut proc_taskinfo as *mut c_void; + let pid = getpid() as c_int; + let ret = proc_pidinfo(pid, PROC_PIDTASKINFO, 0, info_ptr, PROC_TASKINFO_SIZE); + if ret == PROC_TASKINFO_SIZE { + Some(info.pti_resident_size as usize) + } else { + None + } + } + } + } + unix => { + pub fn get_resident_set_size() -> Option<usize> { + let field = 1; + let contents = fs::read("/proc/self/statm").ok()?; + let contents = String::from_utf8(contents).ok()?; + let s = contents.split_whitespace().nth(field)?; + let npages = s.parse::<usize>().ok()?; + Some(npages * 4096) + } + } + _ => { + pub fn get_resident_set_size() -> Option<usize> { + None + } + } +} + #[cfg(test)] mod tests; diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs index 0872bd2c9ac..9cd0cc499ca 100644 --- a/compiler/rustc_data_structures/src/stable_hasher.rs +++ b/compiler/rustc_data_structures/src/stable_hasher.rs @@ -3,7 +3,7 @@ use std::marker::PhantomData; use std::mem; use std::num::NonZero; -use rustc_index::bit_set::{self, BitSet}; +use rustc_index::bit_set::{self, DenseBitSet}; use rustc_index::{Idx, IndexSlice, IndexVec}; use smallvec::SmallVec; @@ -544,7 +544,7 @@ where } } -impl<I: Idx, CTX> HashStable<CTX> for BitSet<I> { +impl<I: Idx, CTX> HashStable<CTX> for DenseBitSet<I> { fn hash_stable(&self, _ctx: &mut CTX, hasher: &mut StableHasher) { ::std::hash::Hash::hash(self, hasher); } diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs index aab50a13af0..635f241847c 100644 --- a/compiler/rustc_data_structures/src/stable_hasher/tests.rs +++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs @@ -17,9 +17,9 @@ fn hash<T: HashStable<()>>(t: &T) -> Hash128 { // Check that bit set hash includes the domain size. #[test] fn test_hash_bit_set() { - use rustc_index::bit_set::BitSet; - let a: BitSet<usize> = BitSet::new_empty(1); - let b: BitSet<usize> = BitSet::new_empty(2); + use rustc_index::bit_set::DenseBitSet; + let a: DenseBitSet<usize> = DenseBitSet::new_empty(1); + let b: DenseBitSet<usize> = DenseBitSet::new_empty(2); assert_ne!(a, b); assert_ne!(hash(&a), hash(&b)); } diff --git a/compiler/rustc_data_structures/src/tagged_ptr.rs b/compiler/rustc_data_structures/src/tagged_ptr.rs index 2914eece679..94db421f77e 100644 --- a/compiler/rustc_data_structures/src/tagged_ptr.rs +++ b/compiler/rustc_data_structures/src/tagged_ptr.rs @@ -1,116 +1,26 @@ -//! This module implements tagged pointers. +//! This module implements tagged pointers. In order to utilize the pointer +//! packing, you must have a tag type implementing the [`Tag`] trait. //! -//! In order to utilize the pointer packing, you must have two types: a pointer, -//! and a tag. -//! -//! The pointer must implement the [`Pointer`] trait, with the primary -//! requirement being convertible to and from a raw pointer. Note that the -//! pointer must be dereferenceable, so raw pointers generally cannot implement -//! the [`Pointer`] trait. This implies that the pointer must also be non-null. -//! -//! Many common pointer types already implement the [`Pointer`] trait. -//! -//! The tag must implement the [`Tag`] trait. -//! -//! We assert that the tag and the [`Pointer`] types are compatible at compile +//! We assert that the tag and the reference type is compatible at compile //! time. +use std::fmt; +use std::hash::{Hash, Hasher}; +use std::marker::PhantomData; +use std::num::NonZero; use std::ops::Deref; use std::ptr::NonNull; -use std::rc::Rc; -use std::sync::Arc; use crate::aligned::Aligned; +use crate::stable_hasher::{HashStable, StableHasher}; -mod copy; -mod drop; -mod impl_tag; - -pub use copy::CopyTaggedPtr; -pub use drop::TaggedPtr; - -/// This describes the pointer type encapsulated by [`TaggedPtr`] and -/// [`CopyTaggedPtr`]. -/// -/// # Safety -/// -/// The pointer returned from [`into_ptr`] must be a [valid], pointer to -/// [`<Self as Deref>::Target`]. -/// -/// Note that if `Self` implements [`DerefMut`] the pointer returned from -/// [`into_ptr`] must be valid for writes (and thus calling [`NonNull::as_mut`] -/// on it must be safe). -/// -/// The [`BITS`] constant must be correct. [`BITS`] least-significant bits, -/// must be zero on all pointers returned from [`into_ptr`]. -/// -/// For example, if the alignment of [`Self::Target`] is 2, then `BITS` should be 1. -/// -/// [`BITS`]: Pointer::BITS -/// [`into_ptr`]: Pointer::into_ptr -/// [valid]: std::ptr#safety -/// [`<Self as Deref>::Target`]: Deref::Target -/// [`Self::Target`]: Deref::Target -/// [`DerefMut`]: std::ops::DerefMut -pub unsafe trait Pointer: Deref { - /// Number of unused (always zero) **least-significant bits** in this - /// pointer, usually related to the pointees alignment. - /// - /// For example if [`BITS`] = `2`, then given `ptr = Self::into_ptr(..)`, - /// `ptr.addr() & 0b11 == 0` must be true. - /// - /// Most likely the value you want to use here is the following, unless - /// your [`Self::Target`] type is unsized (e.g., `ty::List<T>` in rustc) - /// or your pointer is over/under aligned, in which case you'll need to - /// manually figure out what the right type to pass to [`bits_for`] is, or - /// what the value to set here. - /// - /// ```rust - /// # use std::ops::Deref; - /// # use rustc_data_structures::tagged_ptr::bits_for; - /// # struct T; - /// # impl Deref for T { type Target = u8; fn deref(&self) -> &u8 { &0 } } - /// # impl T { - /// const BITS: u32 = bits_for::<<Self as Deref>::Target>(); - /// # } - /// ``` - /// - /// [`BITS`]: Pointer::BITS - /// [`Self::Target`]: Deref::Target - const BITS: u32; - - /// Turns this pointer into a raw, non-null pointer. - /// - /// The inverse of this function is [`from_ptr`]. - /// - /// This function guarantees that the least-significant [`Self::BITS`] bits - /// are zero. - /// - /// [`from_ptr`]: Pointer::from_ptr - /// [`Self::BITS`]: Pointer::BITS - fn into_ptr(self) -> NonNull<Self::Target>; - - /// Re-creates the original pointer, from a raw pointer returned by [`into_ptr`]. - /// - /// # Safety - /// - /// The passed `ptr` must be returned from [`into_ptr`]. - /// - /// This acts as [`ptr::read::<Self>()`] semantically, it should not be called more than - /// once on non-[`Copy`] `Pointer`s. - /// - /// [`into_ptr`]: Pointer::into_ptr - /// [`ptr::read::<Self>()`]: std::ptr::read - unsafe fn from_ptr(ptr: NonNull<Self::Target>) -> Self; -} - -/// This describes tags that the [`TaggedPtr`] struct can hold. +/// This describes tags that the [`TaggedRef`] struct can hold. /// /// # Safety /// -/// The [`BITS`] constant must be correct. -/// -/// No more than [`BITS`] least-significant bits may be set in the returned usize. +/// - The [`BITS`] constant must be correct. +/// - No more than [`BITS`] least-significant bits may be set in the returned usize. +/// - [`Eq`] and [`Hash`] must be implementable with the returned `usize` from `into_usize`. /// /// [`BITS`]: Tag::BITS pub unsafe trait Tag: Copy { @@ -166,118 +76,217 @@ pub const fn bits_for_tags(mut tags: &[usize]) -> u32 { bits } -unsafe impl<T: ?Sized + Aligned> Pointer for Box<T> { - const BITS: u32 = bits_for::<Self::Target>(); +/// A covariant [`Copy`] tagged borrow. This is essentially `{ pointer: &'a P, tag: T }` packed +/// in a single reference. +pub struct TaggedRef<'a, Pointee: Aligned + ?Sized, T: Tag> { + /// This is semantically a pair of `pointer: &'a P` and `tag: T` fields, + /// however we pack them in a single pointer, to save space. + /// + /// We pack the tag into the **most**-significant bits of the pointer to + /// ease retrieval of the value. A left shift is a multiplication and + /// those are embeddable in instruction encoding, for example: + /// + /// ```asm + /// // (<https://godbolt.org/z/jqcYPWEr3>) + /// example::shift_read3: + /// mov eax, dword ptr [8*rdi] + /// ret + /// + /// example::mask_read3: + /// and rdi, -8 + /// mov eax, dword ptr [rdi] + /// ret + /// ``` + /// + /// This is ASM outputted by rustc for reads of values behind tagged + /// pointers for different approaches of tagging: + /// - `shift_read3` uses `<< 3` (the tag is in the most-significant bits) + /// - `mask_read3` uses `& !0b111` (the tag is in the least-significant bits) + /// + /// The shift approach thus produces less instructions and is likely faster + /// (see <https://godbolt.org/z/Y913sMdWb>). + /// + /// Encoding diagram: + /// ```text + /// [ packed.addr ] + /// [ tag ] [ pointer.addr >> T::BITS ] <-- usize::BITS - T::BITS bits + /// ^ + /// | + /// T::BITS bits + /// ``` + /// + /// The tag can be retrieved by `packed.addr() >> T::BITS` and the pointer + /// can be retrieved by `packed.map_addr(|addr| addr << T::BITS)`. + packed: NonNull<Pointee>, + tag_pointer_ghost: PhantomData<(&'a Pointee, T)>, +} +impl<'a, P, T> TaggedRef<'a, P, T> +where + P: Aligned + ?Sized, + T: Tag, +{ + /// Tags `pointer` with `tag`. + /// + /// [`TaggedRef`]: crate::tagged_ptr::TaggedRef #[inline] - fn into_ptr(self) -> NonNull<T> { - // Safety: pointers from `Box::into_raw` are valid & non-null - unsafe { NonNull::new_unchecked(Box::into_raw(self)) } + pub fn new(pointer: &'a P, tag: T) -> Self { + Self { packed: Self::pack(NonNull::from(pointer), tag), tag_pointer_ghost: PhantomData } } + /// Retrieves the pointer. #[inline] - unsafe fn from_ptr(ptr: NonNull<T>) -> Self { - // Safety: `ptr` comes from `into_ptr` which calls `Box::into_raw` - unsafe { Box::from_raw(ptr.as_ptr()) } + pub fn pointer(self) -> &'a P { + // SAFETY: pointer_raw returns the original pointer + unsafe { self.pointer_raw().as_ref() } } -} - -unsafe impl<T: ?Sized + Aligned> Pointer for Rc<T> { - const BITS: u32 = bits_for::<Self::Target>(); + /// Retrieves the tag. #[inline] - fn into_ptr(self) -> NonNull<T> { - // Safety: pointers from `Rc::into_raw` are valid & non-null - unsafe { NonNull::new_unchecked(Rc::into_raw(self).cast_mut()) } + pub fn tag(&self) -> T { + // Unpack the tag, according to the `self.packed` encoding scheme + let tag = self.packed.addr().get() >> Self::TAG_BIT_SHIFT; + + // Safety: + // The shift retrieves the original value from `T::into_usize`, + // satisfying `T::from_usize`'s preconditions. + unsafe { T::from_usize(tag) } } + /// Sets the tag to a new value. #[inline] - unsafe fn from_ptr(ptr: NonNull<T>) -> Self { - // Safety: `ptr` comes from `into_ptr` which calls `Rc::into_raw` - unsafe { Rc::from_raw(ptr.as_ptr()) } + pub fn set_tag(&mut self, tag: T) { + self.packed = Self::pack(self.pointer_raw(), tag); } -} -unsafe impl<T: ?Sized + Aligned> Pointer for Arc<T> { - const BITS: u32 = bits_for::<Self::Target>(); + const TAG_BIT_SHIFT: u32 = usize::BITS - T::BITS; + const ASSERTION: () = { assert!(T::BITS <= bits_for::<P>()) }; + /// Pack pointer `ptr` with a `tag`, according to `self.packed` encoding scheme. #[inline] - fn into_ptr(self) -> NonNull<T> { - // Safety: pointers from `Arc::into_raw` are valid & non-null - unsafe { NonNull::new_unchecked(Arc::into_raw(self).cast_mut()) } + fn pack(ptr: NonNull<P>, tag: T) -> NonNull<P> { + // Trigger assert! + let () = Self::ASSERTION; + + let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT; + + ptr.map_addr(|addr| { + // Safety: + // - The pointer is `NonNull` => it's address is `NonZero<usize>` + // - `P::BITS` least significant bits are always zero (`Pointer` contract) + // - `T::BITS <= P::BITS` (from `Self::ASSERTION`) + // + // Thus `addr >> T::BITS` is guaranteed to be non-zero. + // + // `{non_zero} | packed_tag` can't make the value zero. + + let packed = (addr.get() >> T::BITS) | packed_tag; + unsafe { NonZero::new_unchecked(packed) } + }) } + /// Retrieves the original raw pointer from `self.packed`. #[inline] - unsafe fn from_ptr(ptr: NonNull<T>) -> Self { - // Safety: `ptr` comes from `into_ptr` which calls `Arc::into_raw` - unsafe { Arc::from_raw(ptr.as_ptr()) } + pub(super) fn pointer_raw(&self) -> NonNull<P> { + self.packed.map_addr(|addr| unsafe { NonZero::new_unchecked(addr.get() << T::BITS) }) } } -unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a T { - const BITS: u32 = bits_for::<Self::Target>(); +impl<P, T> Copy for TaggedRef<'_, P, T> +where + P: Aligned + ?Sized, + T: Tag, +{ +} +impl<P, T> Clone for TaggedRef<'_, P, T> +where + P: Aligned + ?Sized, + T: Tag, +{ #[inline] - fn into_ptr(self) -> NonNull<T> { - NonNull::from(self) + fn clone(&self) -> Self { + *self } +} + +impl<P, T> Deref for TaggedRef<'_, P, T> +where + P: Aligned + ?Sized, + T: Tag, +{ + type Target = P; #[inline] - unsafe fn from_ptr(ptr: NonNull<T>) -> Self { - // Safety: - // `ptr` comes from `into_ptr` which gets the pointer from a reference - unsafe { ptr.as_ref() } + fn deref(&self) -> &Self::Target { + self.pointer() } } -unsafe impl<'a, T: 'a + ?Sized + Aligned> Pointer for &'a mut T { - const BITS: u32 = bits_for::<Self::Target>(); +impl<P, T> fmt::Debug for TaggedRef<'_, P, T> +where + P: Aligned + fmt::Debug + ?Sized, + T: Tag + fmt::Debug, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("TaggedRef") + .field("pointer", &self.pointer()) + .field("tag", &self.tag()) + .finish() + } +} +impl<P, T> PartialEq for TaggedRef<'_, P, T> +where + P: Aligned + ?Sized, + T: Tag, +{ #[inline] - fn into_ptr(self) -> NonNull<T> { - NonNull::from(self) + #[allow(ambiguous_wide_pointer_comparisons)] + fn eq(&self, other: &Self) -> bool { + self.packed == other.packed } +} +impl<P, T: Tag> Eq for TaggedRef<'_, P, T> {} + +impl<P, T: Tag> Hash for TaggedRef<'_, P, T> { #[inline] - unsafe fn from_ptr(mut ptr: NonNull<T>) -> Self { - // Safety: - // `ptr` comes from `into_ptr` which gets the pointer from a reference - unsafe { ptr.as_mut() } + fn hash<H: Hasher>(&self, state: &mut H) { + self.packed.hash(state); } } -/// A tag type used in [`CopyTaggedPtr`] and [`TaggedPtr`] tests. -#[derive(Copy, Clone, Debug, PartialEq, Eq)] -#[cfg(test)] -enum Tag2 { - B00 = 0b00, - B01 = 0b01, - B10 = 0b10, - B11 = 0b11, +impl<'a, P, T, HCX> HashStable<HCX> for TaggedRef<'a, P, T> +where + P: HashStable<HCX> + Aligned + ?Sized, + T: Tag + HashStable<HCX>, +{ + fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { + self.pointer().hash_stable(hcx, hasher); + self.tag().hash_stable(hcx, hasher); + } } -#[cfg(test)] -unsafe impl Tag for Tag2 { - const BITS: u32 = 2; - - fn into_usize(self) -> usize { - self as _ - } +// Safety: +// `TaggedRef<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such +// it's ok to implement `Sync` as long as `P: Sync, T: Sync` +unsafe impl<P, T> Sync for TaggedRef<'_, P, T> +where + P: Sync + Aligned + ?Sized, + T: Sync + Tag, +{ +} - unsafe fn from_usize(tag: usize) -> Self { - match tag { - 0b00 => Tag2::B00, - 0b01 => Tag2::B01, - 0b10 => Tag2::B10, - 0b11 => Tag2::B11, - _ => unreachable!(), - } - } +// Safety: +// `TaggedRef<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such +// it's ok to implement `Send` as long as `P: Send, T: Send` +unsafe impl<P, T> Send for TaggedRef<'_, P, T> +where + P: Sync + Aligned + ?Sized, + T: Send + Tag, +{ } #[cfg(test)] -impl<HCX> crate::stable_hasher::HashStable<HCX> for Tag2 { - fn hash_stable(&self, hcx: &mut HCX, hasher: &mut crate::stable_hasher::StableHasher) { - (*self as u8).hash_stable(hcx, hasher); - } -} +mod tests; diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy.rs deleted file mode 100644 index 25e107b0f41..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/copy.rs +++ /dev/null @@ -1,330 +0,0 @@ -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::marker::PhantomData; -use std::mem::ManuallyDrop; -use std::num::NonZero; -use std::ops::{Deref, DerefMut}; -use std::ptr::NonNull; - -use super::{Pointer, Tag}; -use crate::stable_hasher::{HashStable, StableHasher}; - -/// A [`Copy`] tagged pointer. -/// -/// This is essentially `{ pointer: P, tag: T }` packed in a single pointer. -/// -/// You should use this instead of the [`TaggedPtr`] type in all cases where -/// `P` implements [`Copy`]. -/// -/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without -/// unpacking. Otherwise we don't implement [`PartialEq`], [`Eq`] and [`Hash`]; -/// if you want that, wrap the [`CopyTaggedPtr`]. -/// -/// [`TaggedPtr`]: crate::tagged_ptr::TaggedPtr -pub struct CopyTaggedPtr<P, T, const COMPARE_PACKED: bool> -where - P: Pointer, - T: Tag, -{ - /// This is semantically a pair of `pointer: P` and `tag: T` fields, - /// however we pack them in a single pointer, to save space. - /// - /// We pack the tag into the **most**-significant bits of the pointer to - /// ease retrieval of the value. A left shift is a multiplication and - /// those are embeddable in instruction encoding, for example: - /// - /// ```asm - /// // (<https://godbolt.org/z/jqcYPWEr3>) - /// example::shift_read3: - /// mov eax, dword ptr [8*rdi] - /// ret - /// - /// example::mask_read3: - /// and rdi, -8 - /// mov eax, dword ptr [rdi] - /// ret - /// ``` - /// - /// This is ASM outputted by rustc for reads of values behind tagged - /// pointers for different approaches of tagging: - /// - `shift_read3` uses `<< 3` (the tag is in the most-significant bits) - /// - `mask_read3` uses `& !0b111` (the tag is in the least-significant bits) - /// - /// The shift approach thus produces less instructions and is likely faster - /// (see <https://godbolt.org/z/Y913sMdWb>). - /// - /// Encoding diagram: - /// ```text - /// [ packed.addr ] - /// [ tag ] [ pointer.addr >> T::BITS ] <-- usize::BITS - T::BITS bits - /// ^ - /// | - /// T::BITS bits - /// ``` - /// - /// The tag can be retrieved by `packed.addr() >> T::BITS` and the pointer - /// can be retrieved by `packed.map_addr(|addr| addr << T::BITS)`. - packed: NonNull<P::Target>, - tag_ghost: PhantomData<T>, -} - -// Note that even though `CopyTaggedPtr` is only really expected to work with -// `P: Copy`, can't add `P: Copy` bound, because `CopyTaggedPtr` is used in the -// `TaggedPtr`'s implementation. -impl<P, T, const CP: bool> CopyTaggedPtr<P, T, CP> -where - P: Pointer, - T: Tag, -{ - /// Tags `pointer` with `tag`. - /// - /// Note that this leaks `pointer`: it won't be dropped when - /// `CopyTaggedPtr` is dropped. If you have a pointer with a significant - /// drop, use [`TaggedPtr`] instead. - /// - /// [`TaggedPtr`]: crate::tagged_ptr::TaggedPtr - #[inline] - pub fn new(pointer: P, tag: T) -> Self { - Self { packed: Self::pack(P::into_ptr(pointer), tag), tag_ghost: PhantomData } - } - - /// Retrieves the pointer. - #[inline] - pub fn pointer(self) -> P - where - P: Copy, - { - // SAFETY: pointer_raw returns the original pointer - // - // Note that this isn't going to double-drop or anything because we have - // P: Copy - unsafe { P::from_ptr(self.pointer_raw()) } - } - - /// Retrieves the tag. - #[inline] - pub fn tag(&self) -> T { - // Unpack the tag, according to the `self.packed` encoding scheme - let tag = self.packed.addr().get() >> Self::TAG_BIT_SHIFT; - - // Safety: - // The shift retrieves the original value from `T::into_usize`, - // satisfying `T::from_usize`'s preconditions. - unsafe { T::from_usize(tag) } - } - - /// Sets the tag to a new value. - #[inline] - pub fn set_tag(&mut self, tag: T) { - self.packed = Self::pack(self.pointer_raw(), tag); - } - - const TAG_BIT_SHIFT: u32 = usize::BITS - T::BITS; - const ASSERTION: () = { assert!(T::BITS <= P::BITS) }; - - /// Pack pointer `ptr` that comes from [`P::into_ptr`] with a `tag`, - /// according to `self.packed` encoding scheme. - /// - /// [`P::into_ptr`]: Pointer::into_ptr - #[inline] - fn pack(ptr: NonNull<P::Target>, tag: T) -> NonNull<P::Target> { - // Trigger assert! - let () = Self::ASSERTION; - - let packed_tag = tag.into_usize() << Self::TAG_BIT_SHIFT; - - ptr.map_addr(|addr| { - // Safety: - // - The pointer is `NonNull` => it's address is `NonZero<usize>` - // - `P::BITS` least significant bits are always zero (`Pointer` contract) - // - `T::BITS <= P::BITS` (from `Self::ASSERTION`) - // - // Thus `addr >> T::BITS` is guaranteed to be non-zero. - // - // `{non_zero} | packed_tag` can't make the value zero. - - let packed = (addr.get() >> T::BITS) | packed_tag; - unsafe { NonZero::new_unchecked(packed) } - }) - } - - /// Retrieves the original raw pointer from `self.packed`. - #[inline] - pub(super) fn pointer_raw(&self) -> NonNull<P::Target> { - self.packed.map_addr(|addr| unsafe { NonZero::new_unchecked(addr.get() << T::BITS) }) - } - - /// This provides a reference to the `P` pointer itself, rather than the - /// `Deref::Target`. It is used for cases where we want to call methods - /// that may be implement differently for the Pointer than the Pointee - /// (e.g., `Rc::clone` vs cloning the inner value). - pub(super) fn with_pointer_ref<R>(&self, f: impl FnOnce(&P) -> R) -> R { - // Safety: - // - `self.raw.pointer_raw()` is originally returned from `P::into_ptr` - // and as such is valid for `P::from_ptr`. - // - This also allows us to not care whatever `f` panics or not. - // - Even though we create a copy of the pointer, we store it inside - // `ManuallyDrop` and only access it by-ref, so we don't double-drop. - // - // Semantically this is just `f(&self.pointer)` (where `self.pointer` - // is non-packed original pointer). - // - // Note that even though `CopyTaggedPtr` is only really expected to - // work with `P: Copy`, we have to assume `P: ?Copy`, because - // `CopyTaggedPtr` is used in the `TaggedPtr`'s implementation. - let ptr = unsafe { ManuallyDrop::new(P::from_ptr(self.pointer_raw())) }; - f(&ptr) - } -} - -impl<P, T, const CP: bool> Copy for CopyTaggedPtr<P, T, CP> -where - P: Pointer + Copy, - T: Tag, -{ -} - -impl<P, T, const CP: bool> Clone for CopyTaggedPtr<P, T, CP> -where - P: Pointer + Copy, - T: Tag, -{ - #[inline] - fn clone(&self) -> Self { - *self - } -} - -impl<P, T, const CP: bool> Deref for CopyTaggedPtr<P, T, CP> -where - P: Pointer, - T: Tag, -{ - type Target = P::Target; - - #[inline] - fn deref(&self) -> &Self::Target { - // Safety: - // `pointer_raw` returns the original pointer from `P::into_ptr` which, - // by the `Pointer`'s contract, must be valid. - unsafe { self.pointer_raw().as_ref() } - } -} - -impl<P, T, const CP: bool> DerefMut for CopyTaggedPtr<P, T, CP> -where - P: Pointer + DerefMut, - T: Tag, -{ - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - // Safety: - // `pointer_raw` returns the original pointer from `P::into_ptr` which, - // by the `Pointer`'s contract, must be valid for writes if - // `P: DerefMut`. - unsafe { self.pointer_raw().as_mut() } - } -} - -impl<P, T, const CP: bool> fmt::Debug for CopyTaggedPtr<P, T, CP> -where - P: Pointer + fmt::Debug, - T: Tag + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.with_pointer_ref(|ptr| { - f.debug_struct("CopyTaggedPtr").field("pointer", ptr).field("tag", &self.tag()).finish() - }) - } -} - -impl<P, T> PartialEq for CopyTaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ - #[inline] - #[allow(ambiguous_wide_pointer_comparisons)] - fn eq(&self, other: &Self) -> bool { - self.packed == other.packed - } -} - -impl<P, T> Eq for CopyTaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ -} - -impl<P, T> Hash for CopyTaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ - #[inline] - fn hash<H: Hasher>(&self, state: &mut H) { - self.packed.hash(state); - } -} - -impl<P, T, HCX, const CP: bool> HashStable<HCX> for CopyTaggedPtr<P, T, CP> -where - P: Pointer + HashStable<HCX>, - T: Tag + HashStable<HCX>, -{ - fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { - self.with_pointer_ref(|ptr| ptr.hash_stable(hcx, hasher)); - self.tag().hash_stable(hcx, hasher); - } -} - -// Safety: -// `CopyTaggedPtr<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such -// it's ok to implement `Sync` as long as `P: Sync, T: Sync` -unsafe impl<P, T, const CP: bool> Sync for CopyTaggedPtr<P, T, CP> -where - P: Sync + Pointer, - T: Sync + Tag, -{ -} - -// Safety: -// `CopyTaggedPtr<P, T, ..>` is semantically just `{ ptr: P, tag: T }`, as such -// it's ok to implement `Send` as long as `P: Send, T: Send` -unsafe impl<P, T, const CP: bool> Send for CopyTaggedPtr<P, T, CP> -where - P: Send + Pointer, - T: Send + Tag, -{ -} - -/// Test that `new` does not compile if there is not enough alignment for the -/// tag in the pointer. -/// -/// ```compile_fail,E0080 -/// use rustc_data_structures::tagged_ptr::{CopyTaggedPtr, Tag}; -/// -/// #[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, B11 = 0b11 }; -/// -/// unsafe impl Tag for Tag2 { -/// const BITS: u32 = 2; -/// -/// fn into_usize(self) -> usize { todo!() } -/// unsafe fn from_usize(tag: usize) -> Self { todo!() } -/// } -/// -/// let value = 12u16; -/// let reference = &value; -/// let tag = Tag2::B01; -/// -/// let _ptr = CopyTaggedPtr::<_, _, true>::new(reference, tag); -/// ``` -// For some reason miri does not get the compile error -// probably it `check`s instead of `build`ing? -#[cfg(not(miri))] -const _: () = (); - -#[cfg(test)] -mod tests; diff --git a/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs deleted file mode 100644 index 160af8a65d9..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/copy/tests.rs +++ /dev/null @@ -1,50 +0,0 @@ -use std::ptr; - -use crate::hashes::Hash128; -use crate::stable_hasher::{HashStable, StableHasher}; -use crate::tagged_ptr::{CopyTaggedPtr, Pointer, Tag, Tag2}; - -#[test] -fn smoke() { - let value = 12u32; - let reference = &value; - let tag = Tag2::B01; - - let ptr = tag_ptr(reference, tag); - - assert_eq!(ptr.tag(), tag); - assert_eq!(*ptr, 12); - assert!(ptr::eq(ptr.pointer(), reference)); - - let copy = ptr; - - let mut ptr = ptr; - ptr.set_tag(Tag2::B00); - assert_eq!(ptr.tag(), Tag2::B00); - - assert_eq!(copy.tag(), tag); - assert_eq!(*copy, 12); - assert!(ptr::eq(copy.pointer(), reference)); -} - -#[test] -fn stable_hash_hashes_as_tuple() { - let hash_packed = { - let mut hasher = StableHasher::new(); - tag_ptr(&12, Tag2::B11).hash_stable(&mut (), &mut hasher); - hasher.finish::<Hash128>() - }; - - let hash_tupled = { - let mut hasher = StableHasher::new(); - (&12, Tag2::B11).hash_stable(&mut (), &mut hasher); - hasher.finish::<Hash128>() - }; - - assert_eq!(hash_packed, hash_tupled); -} - -/// Helper to create tagged pointers without specifying `COMPARE_PACKED` if it does not matter. -fn tag_ptr<P: Pointer, T: Tag>(ptr: P, tag: T) -> CopyTaggedPtr<P, T, true> { - CopyTaggedPtr::new(ptr, tag) -} diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop.rs deleted file mode 100644 index 319a8cdd399..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/drop.rs +++ /dev/null @@ -1,178 +0,0 @@ -use std::fmt; -use std::hash::{Hash, Hasher}; -use std::ops::{Deref, DerefMut}; - -use super::{CopyTaggedPtr, Pointer, Tag}; -use crate::stable_hasher::{HashStable, StableHasher}; - -/// A tagged pointer that supports pointers that implement [`Drop`]. -/// -/// This is essentially `{ pointer: P, tag: T }` packed in a single pointer. -/// -/// You should use [`CopyTaggedPtr`] instead of the this type in all cases -/// where `P` implements [`Copy`]. -/// -/// If `COMPARE_PACKED` is true, then the pointers will be compared and hashed without -/// unpacking. Otherwise we don't implement [`PartialEq`], [`Eq`] and [`Hash`]; -/// if you want that, wrap the [`TaggedPtr`]. -pub struct TaggedPtr<P, T, const COMPARE_PACKED: bool> -where - P: Pointer, - T: Tag, -{ - raw: CopyTaggedPtr<P, T, COMPARE_PACKED>, -} - -impl<P, T, const CP: bool> TaggedPtr<P, T, CP> -where - P: Pointer, - T: Tag, -{ - /// Tags `pointer` with `tag`. - #[inline] - pub fn new(pointer: P, tag: T) -> Self { - TaggedPtr { raw: CopyTaggedPtr::new(pointer, tag) } - } - - /// Retrieves the tag. - #[inline] - pub fn tag(&self) -> T { - self.raw.tag() - } - - /// Sets the tag to a new value. - #[inline] - pub fn set_tag(&mut self, tag: T) { - self.raw.set_tag(tag) - } -} - -impl<P, T, const CP: bool> Clone for TaggedPtr<P, T, CP> -where - P: Pointer + Clone, - T: Tag, -{ - fn clone(&self) -> Self { - let ptr = self.raw.with_pointer_ref(P::clone); - - Self::new(ptr, self.tag()) - } -} - -impl<P, T, const CP: bool> Deref for TaggedPtr<P, T, CP> -where - P: Pointer, - T: Tag, -{ - type Target = P::Target; - - #[inline] - fn deref(&self) -> &Self::Target { - self.raw.deref() - } -} - -impl<P, T, const CP: bool> DerefMut for TaggedPtr<P, T, CP> -where - P: Pointer + DerefMut, - T: Tag, -{ - #[inline] - fn deref_mut(&mut self) -> &mut Self::Target { - self.raw.deref_mut() - } -} - -impl<P, T, const CP: bool> Drop for TaggedPtr<P, T, CP> -where - P: Pointer, - T: Tag, -{ - fn drop(&mut self) { - // No need to drop the tag, as it's Copy - unsafe { - drop(P::from_ptr(self.raw.pointer_raw())); - } - } -} - -impl<P, T, const CP: bool> fmt::Debug for TaggedPtr<P, T, CP> -where - P: Pointer + fmt::Debug, - T: Tag + fmt::Debug, -{ - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - self.raw.with_pointer_ref(|ptr| { - f.debug_struct("TaggedPtr").field("pointer", ptr).field("tag", &self.tag()).finish() - }) - } -} - -impl<P, T> PartialEq for TaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ - #[inline] - fn eq(&self, other: &Self) -> bool { - self.raw.eq(&other.raw) - } -} - -impl<P, T> Eq for TaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ -} - -impl<P, T> Hash for TaggedPtr<P, T, true> -where - P: Pointer, - T: Tag, -{ - #[inline] - fn hash<H: Hasher>(&self, state: &mut H) { - self.raw.hash(state); - } -} - -impl<P, T, HCX, const CP: bool> HashStable<HCX> for TaggedPtr<P, T, CP> -where - P: Pointer + HashStable<HCX>, - T: Tag + HashStable<HCX>, -{ - fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { - self.raw.hash_stable(hcx, hasher); - } -} - -/// Test that `new` does not compile if there is not enough alignment for the -/// tag in the pointer. -/// -/// ```compile_fail,E0080 -/// use rustc_data_structures::tagged_ptr::{TaggedPtr, Tag}; -/// -/// #[derive(Copy, Clone, Debug, PartialEq, Eq)] -/// enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, B11 = 0b11 }; -/// -/// unsafe impl Tag for Tag2 { -/// const BITS: u32 = 2; -/// -/// fn into_usize(self) -> usize { todo!() } -/// unsafe fn from_usize(tag: usize) -> Self { todo!() } -/// } -/// -/// let value = 12u16; -/// let reference = &value; -/// let tag = Tag2::B01; -/// -/// let _ptr = TaggedPtr::<_, _, true>::new(reference, tag); -/// ``` -// For some reason miri does not get the compile error -// probably it `check`s instead of `build`ing? -#[cfg(not(miri))] -const _: () = (); - -#[cfg(test)] -mod tests; diff --git a/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs deleted file mode 100644 index 4d342c72cc5..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/drop/tests.rs +++ /dev/null @@ -1,72 +0,0 @@ -use std::ptr; -use std::sync::Arc; - -use crate::tagged_ptr::{Pointer, Tag, Tag2, TaggedPtr}; - -#[test] -fn smoke() { - let value = 12u32; - let reference = &value; - let tag = Tag2::B01; - - let ptr = tag_ptr(reference, tag); - - assert_eq!(ptr.tag(), tag); - assert_eq!(*ptr, 12); - - let clone = ptr.clone(); - assert_eq!(clone.tag(), tag); - assert_eq!(*clone, 12); - - let mut ptr = ptr; - ptr.set_tag(Tag2::B00); - assert_eq!(ptr.tag(), Tag2::B00); - - assert_eq!(clone.tag(), tag); - assert_eq!(*clone, 12); - assert!(ptr::eq(&*ptr, &*clone)) -} - -#[test] -fn boxed() { - let value = 12u32; - let boxed = Box::new(value); - let tag = Tag2::B01; - - let ptr = tag_ptr(boxed, tag); - - assert_eq!(ptr.tag(), tag); - assert_eq!(*ptr, 12); - - let clone = ptr.clone(); - assert_eq!(clone.tag(), tag); - assert_eq!(*clone, 12); - - let mut ptr = ptr; - ptr.set_tag(Tag2::B00); - assert_eq!(ptr.tag(), Tag2::B00); - - assert_eq!(clone.tag(), tag); - assert_eq!(*clone, 12); - assert!(!ptr::eq(&*ptr, &*clone)) -} - -#[test] -fn arclones() { - let value = 12u32; - let arc = Arc::new(value); - let tag = Tag2::B01; - - let ptr = tag_ptr(arc, tag); - - assert_eq!(ptr.tag(), tag); - assert_eq!(*ptr, 12); - - let clone = ptr.clone(); - assert!(ptr::eq(&*ptr, &*clone)) -} - -/// Helper to create tagged pointers without specifying `COMPARE_PACKED` if it does not matter. -fn tag_ptr<P: Pointer, T: Tag>(ptr: P, tag: T) -> TaggedPtr<P, T, true> { - TaggedPtr::new(ptr, tag) -} diff --git a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs deleted file mode 100644 index f17a0bf26d7..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag.rs +++ /dev/null @@ -1,144 +0,0 @@ -/// Implements [`Tag`] for a given type. -/// -/// You can use `impl_tag` on structs and enums. -/// You need to specify the type and all its possible values, -/// which can only be paths with optional fields. -/// -/// [`Tag`]: crate::tagged_ptr::Tag -/// -/// # Examples -/// -/// Basic usage: -/// -/// ``` -/// #![feature(macro_metavar_expr)] -/// use rustc_data_structures::{impl_tag, tagged_ptr::Tag}; -/// -/// #[derive(Copy, Clone, PartialEq, Debug)] -/// enum SomeTag { -/// A, -/// B, -/// X { v: bool }, -/// Y(bool, bool), -/// } -/// -/// impl_tag! { -/// // The type for which the `Tag` will be implemented -/// impl Tag for SomeTag; -/// // You need to specify all possible tag values: -/// SomeTag::A, // 0 -/// SomeTag::B, // 1 -/// // For variants with fields, you need to specify the fields: -/// SomeTag::X { v: true }, // 2 -/// SomeTag::X { v: false }, // 3 -/// // For tuple variants use named syntax: -/// SomeTag::Y { 0: true, 1: true }, // 4 -/// SomeTag::Y { 0: false, 1: true }, // 5 -/// SomeTag::Y { 0: true, 1: false }, // 6 -/// SomeTag::Y { 0: false, 1: false }, // 7 -/// } -/// -/// // Tag values are assigned in order: -/// assert_eq!(SomeTag::A.into_usize(), 0); -/// assert_eq!(SomeTag::X { v: false }.into_usize(), 3); -/// assert_eq!(SomeTag::Y(false, true).into_usize(), 5); -/// -/// assert_eq!(unsafe { SomeTag::from_usize(1) }, SomeTag::B); -/// assert_eq!(unsafe { SomeTag::from_usize(2) }, SomeTag::X { v: true }); -/// assert_eq!(unsafe { SomeTag::from_usize(7) }, SomeTag::Y(false, false)); -/// ``` -/// -/// Structs are supported: -/// -/// ``` -/// #![feature(macro_metavar_expr)] -/// # use rustc_data_structures::impl_tag; -/// #[derive(Copy, Clone)] -/// struct Flags { a: bool, b: bool } -/// -/// impl_tag! { -/// impl Tag for Flags; -/// Flags { a: true, b: true }, -/// Flags { a: false, b: true }, -/// Flags { a: true, b: false }, -/// Flags { a: false, b: false }, -/// } -/// ``` -/// -/// Not specifying all values results in a compile error: -/// -/// ```compile_fail,E0004 -/// #![feature(macro_metavar_expr)] -/// # use rustc_data_structures::impl_tag; -/// #[derive(Copy, Clone)] -/// enum E { -/// A, -/// B, -/// } -/// -/// impl_tag! { -/// impl Tag for E; -/// E::A, -/// } -/// ``` -#[macro_export] -macro_rules! impl_tag { - ( - impl Tag for $Self:ty; - $( - $($path:ident)::* $( { $( $fields:tt )* })?, - )* - ) => { - // Safety: - // `bits_for_tags` is called on the same `${index()}`-es as - // `into_usize` returns, thus `BITS` constant is correct. - unsafe impl $crate::tagged_ptr::Tag for $Self { - const BITS: u32 = $crate::tagged_ptr::bits_for_tags(&[ - $( - ${index()}, - $( ${ignore($path)} )* - )* - ]); - - #[inline] - fn into_usize(self) -> usize { - // This forbids use of repeating patterns (`Enum::V`&`Enum::V`, etc) - // (or at least it should, see <https://github.com/rust-lang/rust/issues/110613>) - #[forbid(unreachable_patterns)] - match self { - // `match` is doing heavy lifting here, by requiring exhaustiveness - $( - $($path)::* $( { $( $fields )* } )? => ${index()}, - )* - } - } - - #[inline] - unsafe fn from_usize(tag: usize) -> Self { - match tag { - $( - ${index()} => $($path)::* $( { $( $fields )* } )?, - )* - - // Safety: - // `into_usize` only returns `${index()}` of the same - // repetition as we are filtering above, thus if this is - // reached, the safety contract of this function was - // already breached. - _ => unsafe { - debug_assert!( - false, - "invalid tag: {tag}\ - (this is a bug in the caller of `from_usize`)" - ); - std::hint::unreachable_unchecked() - }, - } - } - - } - }; -} - -#[cfg(test)] -mod tests; diff --git a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs deleted file mode 100644 index 62c926153e1..00000000000 --- a/compiler/rustc_data_structures/src/tagged_ptr/impl_tag/tests.rs +++ /dev/null @@ -1,34 +0,0 @@ -#[test] -fn bits_constant() { - use crate::tagged_ptr::Tag; - - #[derive(Copy, Clone)] - struct Unit; - impl_tag! { impl Tag for Unit; Unit, } - assert_eq!(Unit::BITS, 0); - - #[derive(Copy, Clone)] - enum Enum3 { - A, - B, - C, - } - impl_tag! { impl Tag for Enum3; Enum3::A, Enum3::B, Enum3::C, } - assert_eq!(Enum3::BITS, 2); - - #[derive(Copy, Clone)] - struct Eight(bool, bool, bool); - impl_tag! { - impl Tag for Eight; - Eight { 0: true, 1: true, 2: true }, - Eight { 0: true, 1: true, 2: false }, - Eight { 0: true, 1: false, 2: true }, - Eight { 0: true, 1: false, 2: false }, - Eight { 0: false, 1: true, 2: true }, - Eight { 0: false, 1: true, 2: false }, - Eight { 0: false, 1: false, 2: true }, - Eight { 0: false, 1: false, 2: false }, - } - - assert_eq!(Eight::BITS, 3); -} diff --git a/compiler/rustc_data_structures/src/tagged_ptr/tests.rs b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs new file mode 100644 index 00000000000..b1bdee18d6d --- /dev/null +++ b/compiler/rustc_data_structures/src/tagged_ptr/tests.rs @@ -0,0 +1,105 @@ +use std::ptr; + +use super::*; +use crate::hashes::Hash128; +use crate::stable_hasher::{HashStable, StableHasher}; + +/// A tag type used in [`TaggedRef`] tests. +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum Tag2 { + B00 = 0b00, + B01 = 0b01, + B10 = 0b10, + B11 = 0b11, +} + +unsafe impl Tag for Tag2 { + const BITS: u32 = 2; + + fn into_usize(self) -> usize { + self as _ + } + + unsafe fn from_usize(tag: usize) -> Self { + match tag { + 0b00 => Tag2::B00, + 0b01 => Tag2::B01, + 0b10 => Tag2::B10, + 0b11 => Tag2::B11, + _ => unreachable!(), + } + } +} + +impl<HCX> crate::stable_hasher::HashStable<HCX> for Tag2 { + fn hash_stable(&self, hcx: &mut HCX, hasher: &mut crate::stable_hasher::StableHasher) { + (*self as u8).hash_stable(hcx, hasher); + } +} + +#[test] +fn smoke() { + let value = 12u32; + let reference = &value; + let tag = Tag2::B01; + + let ptr = TaggedRef::new(reference, tag); + + assert_eq!(ptr.tag(), tag); + assert_eq!(*ptr, 12); + assert!(ptr::eq(ptr.pointer(), reference)); + + let copy = ptr; + + let mut ptr = ptr; + ptr.set_tag(Tag2::B00); + assert_eq!(ptr.tag(), Tag2::B00); + + assert_eq!(copy.tag(), tag); + assert_eq!(*copy, 12); + assert!(ptr::eq(copy.pointer(), reference)); +} + +#[test] +fn stable_hash_hashes_as_tuple() { + let hash_packed = { + let mut hasher = StableHasher::new(); + TaggedRef::new(&12, Tag2::B11).hash_stable(&mut (), &mut hasher); + hasher.finish::<Hash128>() + }; + + let hash_tupled = { + let mut hasher = StableHasher::new(); + (&12, Tag2::B11).hash_stable(&mut (), &mut hasher); + hasher.finish::<Hash128>() + }; + + assert_eq!(hash_packed, hash_tupled); +} + +/// Test that `new` does not compile if there is not enough alignment for the +/// tag in the pointer. +/// +/// ```compile_fail,E0080 +/// use rustc_data_structures::tagged_ptr::{TaggedRef, Tag}; +/// +/// #[derive(Copy, Clone, Debug, PartialEq, Eq)] +/// enum Tag2 { B00 = 0b00, B01 = 0b01, B10 = 0b10, B11 = 0b11 }; +/// +/// unsafe impl Tag for Tag2 { +/// const BITS: u32 = 2; +/// +/// fn into_usize(self) -> usize { todo!() } +/// unsafe fn from_usize(tag: usize) -> Self { todo!() } +/// } +/// +/// let value = 12u16; +/// let reference = &value; +/// let tag = Tag2::B01; +/// +/// let _ptr = TaggedRef::<_, _, true>::new(reference, tag); +/// ``` +// For some reason miri does not get the compile error +// probably it `check`s instead of `build`ing? +#[cfg(not(miri))] +const _: () = (); diff --git a/compiler/rustc_data_structures/src/work_queue.rs b/compiler/rustc_data_structures/src/work_queue.rs index ca052e2eac6..815756edfeb 100644 --- a/compiler/rustc_data_structures/src/work_queue.rs +++ b/compiler/rustc_data_structures/src/work_queue.rs @@ -1,7 +1,7 @@ use std::collections::VecDeque; use rustc_index::Idx; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; /// A work queue is a handy data structure for tracking work left to /// do. (For example, basic blocks left to process.) It is basically a @@ -11,14 +11,14 @@ use rustc_index::bit_set::BitSet; /// and also use a bit set to track occupancy. pub struct WorkQueue<T: Idx> { deque: VecDeque<T>, - set: BitSet<T>, + set: DenseBitSet<T>, } impl<T: Idx> WorkQueue<T> { /// Creates a new work queue that starts empty, where elements range from (0..len). #[inline] pub fn with_none(len: usize) -> Self { - WorkQueue { deque: VecDeque::with_capacity(len), set: BitSet::new_empty(len) } + WorkQueue { deque: VecDeque::with_capacity(len), set: DenseBitSet::new_empty(len) } } /// Attempt to enqueue `element` in the work queue. Returns false if it was already present. diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 0413e5e8634..f7e7aa64614 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -1191,15 +1191,6 @@ fn print_flag_list<T>(cmdline_opt: &str, flag_list: &[OptionDesc<T>]) { /// be public when using rustc as a library, see /// <https://github.com/rust-lang/rust/commit/2b4c33817a5aaecabf4c6598d41e190080ec119e> pub fn handle_options(early_dcx: &EarlyDiagCtxt, args: &[String]) -> Option<getopts::Matches> { - if args.is_empty() { - // user did not write `-v` nor `-Z unstable-options`, so do not - // include that extra information. - let nightly_build = - rustc_feature::UnstableFeatures::from_environment(None).is_nightly_build(); - usage(false, false, nightly_build); - return None; - } - // Parse with *all* options defined in the compiler, we don't worry about // option stability here we just want to parse as much as possible. let mut options = getopts::Options::new(); @@ -1245,7 +1236,7 @@ pub fn handle_options(early_dcx: &EarlyDiagCtxt, args: &[String]) -> Option<geto // (unstable option being used on stable) nightly_options::check_nightly_options(early_dcx, &matches, &config::rustc_optgroups()); - if matches.opt_present("h") || matches.opt_present("help") { + if args.is_empty() || matches.opt_present("h") || matches.opt_present("help") { // Only show unstable options in --help if we accept unstable options. let unstable_enabled = nightly_options::is_unstable_enabled(&matches); let nightly_build = nightly_options::match_is_nightly_build(&matches); diff --git a/compiler/rustc_error_codes/src/error_codes/E0207.md b/compiler/rustc_error_codes/src/error_codes/E0207.md index 95e7c9fc76c..5b35748f472 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0207.md +++ b/compiler/rustc_error_codes/src/error_codes/E0207.md @@ -195,6 +195,30 @@ impl<'a> Contains for Foo { Please note that unconstrained lifetime parameters are not supported if they are being used by an associated type. +In cases where the associated type's lifetime is meant to be tied to the the +self type, and none of the methods on the trait need ownership or different +mutability, then an option is to implement the trait on a borrowed type: + +```rust +struct Foo(i32); + +trait Contents { + type Item; + + fn get(&self) -> Self::Item; +} + +// Note the lifetime `'a` is used both for the self type... +impl<'a> Contents for &'a Foo { + // ...and the associated type. + type Item = &'a i32; + + fn get(&self) -> Self::Item { + &self.0 + } +} +``` + ### Additional information For more information, please see [RFC 447]. diff --git a/compiler/rustc_error_codes/src/error_codes/E0253.md b/compiler/rustc_error_codes/src/error_codes/E0253.md index aea51d40238..705d1bfc53e 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0253.md +++ b/compiler/rustc_error_codes/src/error_codes/E0253.md @@ -1,19 +1,19 @@ -Attempt was made to import an unimportable value. This can happen when trying -to import a method from a trait. +Attempt was made to import an unimportable type. This can happen when trying +to import a type from a trait. Erroneous code example: ```compile_fail,E0253 mod foo { pub trait MyTrait { - fn do_something(); + type SomeType; } } -use foo::MyTrait::do_something; -// error: `do_something` is not directly importable +use foo::MyTrait::SomeType; +// error: `SomeType` is not directly importable fn main() {} ``` -It's invalid to directly import methods belonging to a trait or concrete type. +It's invalid to directly import types belonging to a trait. diff --git a/compiler/rustc_errors/src/diagnostic.rs b/compiler/rustc_errors/src/diagnostic.rs index afce877547f..797dcd7b4d1 100644 --- a/compiler/rustc_errors/src/diagnostic.rs +++ b/compiler/rustc_errors/src/diagnostic.rs @@ -880,7 +880,7 @@ impl<'a, G: EmissionGuarantee> Diag<'a, G> { ) } } - /// Show a suggestion that has multiple parts to it, always as it's own subdiagnostic. + /// Show a suggestion that has multiple parts to it, always as its own subdiagnostic. /// In other words, multiple changes need to be applied as part of this suggestion. #[rustc_lint_diagnostics] pub fn multipart_suggestion_verbose( diff --git a/compiler/rustc_errors/src/lib.rs b/compiler/rustc_errors/src/lib.rs index 58fe3ec4b85..cc5eb9c335e 100644 --- a/compiler/rustc_errors/src/lib.rs +++ b/compiler/rustc_errors/src/lib.rs @@ -566,9 +566,7 @@ pub enum StashKey { /// FRU syntax MaybeFruTypo, CallAssocMethod, - TraitMissingMethod, AssociatedTypeSuggestion, - OpaqueHiddenTypeMismatch, MaybeForgetReturn, /// Query cycle detected, stashing in favor of a better error. Cycle, diff --git a/compiler/rustc_expand/src/base.rs b/compiler/rustc_expand/src/base.rs index e6adbc0f0ac..43feab94c01 100644 --- a/compiler/rustc_expand/src/base.rs +++ b/compiler/rustc_expand/src/base.rs @@ -522,7 +522,7 @@ impl MacResult for MacEager { return Some(P(ast::Pat { id: ast::DUMMY_NODE_ID, span: e.span, - kind: PatKind::Lit(e), + kind: PatKind::Expr(e), tokens: None, })); } diff --git a/compiler/rustc_expand/src/build.rs b/compiler/rustc_expand/src/build.rs index 22bfda34cc0..8bf09cf96b3 100644 --- a/compiler/rustc_expand/src/build.rs +++ b/compiler/rustc_expand/src/build.rs @@ -486,7 +486,7 @@ impl<'a> ExtCtxt<'a> { self.pat(span, PatKind::Wild) } pub fn pat_lit(&self, span: Span, expr: P<ast::Expr>) -> P<ast::Pat> { - self.pat(span, PatKind::Lit(expr)) + self.pat(span, PatKind::Expr(expr)) } pub fn pat_ident(&self, span: Span, ident: Ident) -> P<ast::Pat> { self.pat_ident_binding_mode(span, ident, ast::BindingMode::NONE) diff --git a/compiler/rustc_expand/src/config.rs b/compiler/rustc_expand/src/config.rs index 91624c7554c..3e3f35332e0 100644 --- a/compiler/rustc_expand/src/config.rs +++ b/compiler/rustc_expand/src/config.rs @@ -18,7 +18,7 @@ use rustc_lint_defs::BuiltinLintDiag; use rustc_parse::validate_attr; use rustc_session::Session; use rustc_session::parse::feature_err; -use rustc_span::{Span, Symbol, sym}; +use rustc_span::{STDLIB_STABLE_CRATES, Span, Symbol, sym}; use thin_vec::ThinVec; use tracing::instrument; @@ -107,14 +107,11 @@ pub fn features(sess: &Session, krate_attrs: &[Attribute], crate_name: Symbol) - // If the enabled feature is unstable, record it. if UNSTABLE_LANG_FEATURES.iter().find(|f| name == f.name).is_some() { - // When the ICE comes from core, alloc or std (approximation of the standard - // library), there's a chance that the person hitting the ICE may be using - // -Zbuild-std or similar with an untested target. The bug is probably in the - // standard library and not the compiler in that case, but that doesn't really - // matter - we want a bug report. - if features.internal(name) - && ![sym::core, sym::alloc, sym::std].contains(&crate_name) - { + // When the ICE comes a standard library crate, there's a chance that the person + // hitting the ICE may be using -Zbuild-std or similar with an untested target. + // The bug is probably in the standard library and not the compiler in that case, + // but that doesn't really matter - we want a bug report. + if features.internal(name) && !STDLIB_STABLE_CRATES.contains(&crate_name) { sess.using_internal_features.store(true, std::sync::atomic::Ordering::Relaxed); } @@ -133,7 +130,7 @@ pub fn features(sess: &Session, krate_attrs: &[Attribute], crate_name: Symbol) - // Similar to above, detect internal lib features to suppress // the ICE message that asks for a report. - if features.internal(name) && ![sym::core, sym::alloc, sym::std].contains(&crate_name) { + if features.internal(name) && !STDLIB_STABLE_CRATES.contains(&crate_name) { sess.using_internal_features.store(true, std::sync::atomic::Ordering::Relaxed); } } diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs index 776de1988cc..217a7aeb2d7 100644 --- a/compiler/rustc_feature/src/accepted.rs +++ b/compiler/rustc_feature/src/accepted.rs @@ -73,7 +73,7 @@ declare_features! ( /// Allows free and inherent `async fn`s, `async` blocks, and `<expr>.await` expressions. (accepted, async_await, "1.39.0", Some(50547)), /// Allows `async || body` closures. - (accepted, async_closure, "CURRENT_RUSTC_VERSION", Some(62290)), + (accepted, async_closure, "1.85.0", Some(62290)), /// Allows async functions to be declared, implemented, and used in traits. (accepted, async_fn_in_trait, "1.75.0", Some(91611)), /// Allows all literals in attribute lists and values of key-value pairs. @@ -176,7 +176,7 @@ declare_features! ( /// Allows using the `#[diagnostic]` attribute tool namespace (accepted, diagnostic_namespace, "1.78.0", Some(111996)), /// Controls errors in trait implementations. - (accepted, do_not_recommend, "CURRENT_RUSTC_VERSION", Some(51992)), + (accepted, do_not_recommend, "1.85.0", Some(51992)), /// Allows `#[doc(alias = "...")]`. (accepted, doc_alias, "1.48.0", Some(50146)), /// Allows `..` in tuple (struct) patterns. @@ -199,7 +199,7 @@ declare_features! ( (accepted, extended_key_value_attributes, "1.54.0", Some(78835)), /// Allows using `efiapi`, `aapcs`, `sysv64` and `win64` as calling /// convention for functions with varargs. - (accepted, extended_varargs_abi_support, "CURRENT_RUSTC_VERSION", Some(100189)), + (accepted, extended_varargs_abi_support, "1.85.0", Some(100189)), /// Allows resolving absolute paths as paths from other crates. (accepted, extern_absolute_paths, "1.30.0", Some(44660)), /// Allows `extern crate foo as bar;`. This puts `bar` into extern prelude. diff --git a/compiler/rustc_feature/src/builtin_attrs.rs b/compiler/rustc_feature/src/builtin_attrs.rs index 5421517046d..5510e7e09e5 100644 --- a/compiler/rustc_feature/src/builtin_attrs.rs +++ b/compiler/rustc_feature/src/builtin_attrs.rs @@ -572,7 +572,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ // `#[coroutine]` attribute to be applied to closures to make them coroutines instead gated!( coroutine, Normal, template!(Word), ErrorFollowing, - EncodeCrossCrate::No, coroutines, experimental!(coroutines) + EncodeCrossCrate::No, coroutines, experimental!(coroutine) ), // RFC 3543 @@ -623,7 +623,7 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ EncodeCrossCrate::No, "allow_internal_unsafe side-steps the unsafe_code lint", ), rustc_attr!( - rustc_allowed_through_unstable_modules, Normal, template!(Word), + rustc_allowed_through_unstable_modules, Normal, template!(Word, NameValueStr: "deprecation message"), WarnFollowing, EncodeCrossCrate::No, "rustc_allowed_through_unstable_modules special cases accidental stabilizations of stable items \ through unstable paths" @@ -1019,6 +1019,10 @@ pub const BUILTIN_ATTRIBUTES: &[BuiltinAttribute] = &[ rustc_no_mir_inline, Normal, template!(Word), WarnFollowing, EncodeCrossCrate::Yes, "#[rustc_no_mir_inline] prevents the MIR inliner from inlining a function while not affecting codegen" ), + rustc_attr!( + rustc_force_inline, Normal, template!(Word, NameValueStr: "reason"), WarnFollowing, EncodeCrossCrate::Yes, + "#![rustc_force_inline] forces a free function to be inlined" + ), // ========================================================================== // Internal attributes, Testing: diff --git a/compiler/rustc_feature/src/lib.rs b/compiler/rustc_feature/src/lib.rs index 6db512ace1b..0b034a2ae10 100644 --- a/compiler/rustc_feature/src/lib.rs +++ b/compiler/rustc_feature/src/lib.rs @@ -68,6 +68,16 @@ impl UnstableFeatures { /// If `krate` is [`Some`], then setting `RUSTC_BOOTSTRAP=krate` will enable the nightly /// features. Otherwise, only `RUSTC_BOOTSTRAP=1` will work. pub fn from_environment(krate: Option<&str>) -> Self { + Self::from_environment_value(krate, std::env::var("RUSTC_BOOTSTRAP")) + } + + /// Avoid unsafe `std::env::set_var()` by allowing tests to inject + /// `std::env::var("RUSTC_BOOTSTRAP")` with the `env_var_rustc_bootstrap` + /// arg. + fn from_environment_value( + krate: Option<&str>, + env_var_rustc_bootstrap: Result<String, std::env::VarError>, + ) -> Self { // `true` if this is a feature-staged build, i.e., on the beta or stable channel. let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some_and(|s| s != "0"); @@ -75,7 +85,7 @@ impl UnstableFeatures { let is_unstable_crate = |var: &str| krate.is_some_and(|name| var.split(',').any(|new_krate| new_krate == name)); - let bootstrap = std::env::var("RUSTC_BOOTSTRAP").ok(); + let bootstrap = env_var_rustc_bootstrap.ok(); if let Some(val) = bootstrap.as_deref() { match val { val if val == "1" || is_unstable_crate(val) => return UnstableFeatures::Cheat, diff --git a/compiler/rustc_feature/src/removed.rs b/compiler/rustc_feature/src/removed.rs index 388ed9d08fa..9aa59375706 100644 --- a/compiler/rustc_feature/src/removed.rs +++ b/compiler/rustc_feature/src/removed.rs @@ -120,7 +120,7 @@ declare_features! ( /// Allows defining generators. (removed, generators, "1.21.0", Some(43122), Some("renamed to `coroutines`")), /// An extension to the `generic_associated_types` feature, allowing incomplete features. - (removed, generic_associated_types_extended, "CURRENT_RUSTC_VERSION", Some(95451), + (removed, generic_associated_types_extended, "1.85.0", Some(95451), Some( "feature needs overhaul and reimplementation pending \ better implied higher-ranked implied bounds support" diff --git a/compiler/rustc_feature/src/tests.rs b/compiler/rustc_feature/src/tests.rs index cc0e1f31209..a5d589171d1 100644 --- a/compiler/rustc_feature/src/tests.rs +++ b/compiler/rustc_feature/src/tests.rs @@ -2,9 +2,11 @@ use super::UnstableFeatures; #[test] fn rustc_bootstrap_parsing() { - let is_bootstrap = |env, krate| { - std::env::set_var("RUSTC_BOOTSTRAP", env); - matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Cheat) + let is_bootstrap = |env: &str, krate: Option<&str>| { + matches!( + UnstableFeatures::from_environment_value(krate, Ok(env.to_string())), + UnstableFeatures::Cheat + ) }; assert!(is_bootstrap("1", None)); assert!(is_bootstrap("1", Some("x"))); @@ -22,9 +24,11 @@ fn rustc_bootstrap_parsing() { assert!(!is_bootstrap("0", None)); // `RUSTC_BOOTSTRAP=-1` is force-stable, no unstable features allowed. - let is_force_stable = |krate| { - std::env::set_var("RUSTC_BOOTSTRAP", "-1"); - matches!(UnstableFeatures::from_environment(krate), UnstableFeatures::Disallow) + let is_force_stable = |krate: Option<&str>| { + matches!( + UnstableFeatures::from_environment_value(krate, Ok("-1".to_string())), + UnstableFeatures::Disallow + ) }; assert!(is_force_stable(None)); // Does not support specifying any crate. diff --git a/compiler/rustc_feature/src/unstable.rs b/compiler/rustc_feature/src/unstable.rs index 8cc4c18c02a..1dcde453331 100644 --- a/compiler/rustc_feature/src/unstable.rs +++ b/compiler/rustc_feature/src/unstable.rs @@ -333,7 +333,7 @@ declare_features! ( (unstable, hexagon_target_feature, "1.27.0", Some(44839)), (unstable, lahfsahf_target_feature, "1.78.0", Some(44839)), (unstable, loongarch_target_feature, "1.73.0", Some(44839)), - (unstable, m68k_target_feature, "CURRENT_RUSTC_VERSION", Some(134328)), + (unstable, m68k_target_feature, "1.85.0", Some(134328)), (unstable, mips_target_feature, "1.27.0", Some(44839)), (unstable, powerpc_target_feature, "1.27.0", Some(44839)), (unstable, prfchw_target_feature, "1.78.0", Some(44839)), @@ -344,7 +344,7 @@ declare_features! ( (unstable, sse4a_target_feature, "1.27.0", Some(44839)), (unstable, tbm_target_feature, "1.27.0", Some(44839)), (unstable, wasm_target_feature, "1.30.0", Some(44839)), - (unstable, x87_target_feature, "CURRENT_RUSTC_VERSION", Some(44839)), + (unstable, x87_target_feature, "1.85.0", Some(44839)), // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! // Features are listed in alphabetical order. Tidy will fail if you don't keep it this way. // !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! !!!! @@ -361,6 +361,8 @@ declare_features! ( (unstable, abi_avr_interrupt, "1.45.0", Some(69664)), /// Allows `extern "C-cmse-nonsecure-call" fn()`. (unstable, abi_c_cmse_nonsecure_call, "1.51.0", Some(81391)), + /// Allows `extern "gpu-kernel" fn()`. + (unstable, abi_gpu_kernel, "CURRENT_RUSTC_VERSION", Some(135467)), /// Allows `extern "msp430-interrupt" fn()`. (unstable, abi_msp430_interrupt, "1.16.0", Some(38487)), /// Allows `extern "ptx-*" fn()`. @@ -380,11 +382,11 @@ declare_features! ( /// Enables experimental inline assembly support for additional architectures. (unstable, asm_experimental_arch, "1.58.0", Some(93335)), /// Enables experimental register support in inline assembly. - (unstable, asm_experimental_reg, "CURRENT_RUSTC_VERSION", Some(133416)), + (unstable, asm_experimental_reg, "1.85.0", Some(133416)), /// Allows using `label` operands in inline assembly. (unstable, asm_goto, "1.78.0", Some(119364)), /// Allows using `label` operands in inline assembly together with output operands. - (unstable, asm_goto_with_outputs, "CURRENT_RUSTC_VERSION", Some(119364)), + (unstable, asm_goto_with_outputs, "1.85.0", Some(119364)), /// Allows the `may_unwind` option in inline assembly. (unstable, asm_unwind, "1.58.0", Some(93334)), /// Allows users to enforce equality of associated constants `TraitImpl<AssocConst=3>`. @@ -392,13 +394,13 @@ declare_features! ( /// Allows associated type defaults. (unstable, associated_type_defaults, "1.2.0", Some(29661)), /// Allows async functions to be called from `dyn Trait`. - (incomplete, async_fn_in_dyn_trait, "CURRENT_RUSTC_VERSION", Some(133119)), + (incomplete, async_fn_in_dyn_trait, "1.85.0", Some(133119)), /// Allows `#[track_caller]` on async functions. (unstable, async_fn_track_caller, "1.73.0", Some(110011)), /// Allows `for await` loops. (unstable, async_for_loop, "1.77.0", Some(118898)), /// Allows `async` trait bound modifier. - (unstable, async_trait_bounds, "CURRENT_RUSTC_VERSION", Some(62290)), + (unstable, async_trait_bounds, "1.85.0", Some(62290)), /// Allows using C-variadics. (unstable, c_variadic, "1.34.0", Some(44930)), /// Allows the use of `#[cfg(<true/false>)]`. @@ -436,7 +438,7 @@ declare_features! ( /// Allows `const || {}` closures in const contexts. (incomplete, const_closures, "1.68.0", Some(106003)), /// Allows using `~const Destruct` bounds and calling drop impls in const contexts. - (unstable, const_destruct, "CURRENT_RUSTC_VERSION", Some(133214)), + (unstable, const_destruct, "1.85.0", Some(133214)), /// Allows `for _ in _` loops in const contexts. (unstable, const_for, "1.56.0", Some(87575)), /// Be more precise when looking for live drops in a const context. @@ -460,7 +462,7 @@ declare_features! ( (unstable, decl_macro, "1.17.0", Some(39412)), /// Allows the use of default values on struct definitions and the construction of struct /// literals with the functional update syntax without a base. - (unstable, default_field_values, "CURRENT_RUSTC_VERSION", Some(132162)), + (unstable, default_field_values, "1.85.0", Some(132162)), /// Allows using `#[deprecated_safe]` to deprecate the safeness of a function or trait (unstable, deprecated_safe, "1.61.0", Some(94978)), /// Allows having using `suggestion` in the `#[deprecated]` attribute. @@ -510,7 +512,7 @@ declare_features! ( /// Allows registering static items globally, possibly across crates, to iterate over at runtime. (unstable, global_registration, "1.80.0", Some(125119)), /// Allows using guards in patterns. - (incomplete, guard_patterns, "CURRENT_RUSTC_VERSION", Some(129967)), + (incomplete, guard_patterns, "1.85.0", Some(129967)), /// Allows using `..=X` as a patterns in slices. (unstable, half_open_range_patterns_in_slices, "1.66.0", Some(67264)), /// Allows `if let` guard in match arms. @@ -521,6 +523,8 @@ declare_features! ( (unstable, impl_trait_in_bindings, "1.64.0", Some(63065)), /// Allows `impl Trait` as output type in `Fn` traits in return position of functions. (unstable, impl_trait_in_fn_trait_return, "1.64.0", Some(99697)), + /// Allows `use` associated functions from traits. + (unstable, import_trait_associated_functions, "CURRENT_RUSTC_VERSION", Some(134691)), /// Allows associated types in inherent impls. (incomplete, inherent_associated_types, "1.52.0", Some(8995)), /// Allow anonymous constants from an inline `const` block in pattern position @@ -639,9 +643,9 @@ declare_features! ( /// not changed from prior instances of the same struct (RFC #2528) (unstable, type_changing_struct_update, "1.58.0", Some(86555)), /// Allows using `unsafe<'a> &'a T` unsafe binder types. - (incomplete, unsafe_binders, "CURRENT_RUSTC_VERSION", Some(130516)), + (incomplete, unsafe_binders, "1.85.0", Some(130516)), /// Allows declaring fields `unsafe`. - (incomplete, unsafe_fields, "CURRENT_RUSTC_VERSION", Some(132922)), + (incomplete, unsafe_fields, "1.85.0", Some(132922)), /// Allows const generic parameters to be defined with types that /// are not `Sized`, e.g. `fn foo<const N: [u8]>() {`. (incomplete, unsized_const_params, "1.82.0", Some(95174)), diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index 6b4b716d9a8..5339feb5d27 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -7,7 +7,7 @@ use rustc_ast::token::CommentKind; use rustc_ast::util::parser::{AssocOp, ExprPrecedence}; use rustc_ast::{ self as ast, AttrId, AttrStyle, DelimArgs, FloatTy, InlineAsmOptions, InlineAsmTemplatePiece, - IntTy, Label, LitKind, MetaItemInner, MetaItemLit, TraitObjectSyntax, UintTy, + IntTy, Label, LitIntType, LitKind, MetaItemInner, MetaItemLit, TraitObjectSyntax, UintTy, }; pub use rustc_ast::{ BinOp, BinOpKind, BindingMode, BorrowKind, BoundConstness, BoundPolarity, ByRef, CaptureBy, @@ -34,6 +34,7 @@ use crate::intravisit::FnKind; #[derive(Debug, Copy, Clone, HashStable_Generic)] pub struct Lifetime { + #[stable_hasher(ignore)] pub hir_id: HirId, /// Either "`'a`", referring to a named lifetime definition, @@ -214,6 +215,7 @@ impl Path<'_> { pub struct PathSegment<'hir> { /// The identifier portion of this path segment. pub ident: Ident, + #[stable_hasher(ignore)] pub hir_id: HirId, pub res: Res, @@ -304,6 +306,7 @@ pub enum ConstArgKind<'hir> { #[derive(Clone, Copy, Debug, HashStable_Generic)] pub struct InferArg { + #[stable_hasher(ignore)] pub hir_id: HirId, pub span: Span, } @@ -592,6 +595,7 @@ pub enum GenericParamKind<'hir> { #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct GenericParam<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, pub name: ParamName, @@ -850,6 +854,7 @@ impl<'hir> Generics<'hir> { /// A single predicate in a where-clause. #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct WherePredicate<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub span: Span, pub kind: &'hir WherePredicateKind<'hir>, @@ -1386,8 +1391,8 @@ impl<'hir> Pat<'hir> { use PatKind::*; match self.kind { - Wild | Never | Lit(_) | Range(..) | Binding(.., None) | Path(_) | Err(_) => true, - Box(s) | Deref(s) | Ref(s, _) | Binding(.., Some(s)) => s.walk_short_(it), + Wild | Never | Expr(_) | Range(..) | Binding(.., None) | Path(_) | Err(_) => true, + Box(s) | Deref(s) | Ref(s, _) | Binding(.., Some(s)) | Guard(s, _) => s.walk_short_(it), Struct(_, fields, _) => fields.iter().all(|field| field.pat.walk_short_(it)), TupleStruct(_, s, _) | Tuple(s, _) | Or(s) => s.iter().all(|p| p.walk_short_(it)), Slice(before, slice, after) => { @@ -1413,8 +1418,8 @@ impl<'hir> Pat<'hir> { use PatKind::*; match self.kind { - Wild | Never | Lit(_) | Range(..) | Binding(.., None) | Path(_) | Err(_) => {} - Box(s) | Deref(s) | Ref(s, _) | Binding(.., Some(s)) => s.walk_(it), + Wild | Never | Expr(_) | Range(..) | Binding(.., None) | Path(_) | Err(_) => {} + Box(s) | Deref(s) | Ref(s, _) | Binding(.., Some(s)) | Guard(s, _) => s.walk_(it), Struct(_, fields, _) => fields.iter().for_each(|field| field.pat.walk_(it)), TupleStruct(_, s, _) | Tuple(s, _) | Or(s) => s.iter().for_each(|p| p.walk_(it)), Slice(before, slice, after) => { @@ -1520,6 +1525,27 @@ impl fmt::Debug for DotDotPos { } #[derive(Debug, Clone, Copy, HashStable_Generic)] +pub struct PatExpr<'hir> { + #[stable_hasher(ignore)] + pub hir_id: HirId, + pub span: Span, + pub kind: PatExprKind<'hir>, +} + +#[derive(Debug, Clone, Copy, HashStable_Generic)] +pub enum PatExprKind<'hir> { + Lit { + lit: &'hir Lit, + // FIXME: move this into `Lit` and handle negated literal expressions + // once instead of matching on unop neg expressions everywhere. + negated: bool, + }, + ConstBlock(ConstBlock), + /// A path pattern for a unit struct/variant or a (maybe-associated) constant. + Path(QPath<'hir>), +} + +#[derive(Debug, Clone, Copy, HashStable_Generic)] pub enum PatKind<'hir> { /// Represents a wildcard pattern (i.e., `_`). Wild, @@ -1563,11 +1589,14 @@ pub enum PatKind<'hir> { /// A reference pattern (e.g., `&mut (a, b)`). Ref(&'hir Pat<'hir>, Mutability), - /// A literal. - Lit(&'hir Expr<'hir>), + /// A literal, const block or path. + Expr(&'hir PatExpr<'hir>), + + /// A guard pattern (e.g., `x if guard(x)`). + Guard(&'hir Pat<'hir>, &'hir Expr<'hir>), /// A range pattern (e.g., `1..=2` or `1..2`). - Range(Option<&'hir Expr<'hir>>, Option<&'hir Expr<'hir>>, RangeEnd), + Range(Option<&'hir PatExpr<'hir>>, Option<&'hir PatExpr<'hir>>, RangeEnd), /// A slice pattern, `[before_0, ..., before_n, (slice, after_0, ..., after_n)?]`. /// @@ -1587,6 +1616,7 @@ pub enum PatKind<'hir> { /// A statement. #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct Stmt<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub kind: StmtKind<'hir>, pub span: Span, @@ -1618,6 +1648,7 @@ pub struct LetStmt<'hir> { pub init: Option<&'hir Expr<'hir>>, /// Else block for a `let...else` binding. pub els: Option<&'hir Block<'hir>>, + #[stable_hasher(ignore)] pub hir_id: HirId, pub span: Span, /// Can be `ForLoopDesugar` if the `let` statement is part of a `for` loop @@ -1914,6 +1945,7 @@ pub type Lit = Spanned<LitKind>; /// `const N: usize = { ... }` with `tcx.hir().opt_const_param_default_param_def_id(..)` #[derive(Copy, Clone, Debug, HashStable_Generic)] pub struct AnonConst { + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, pub body: BodyId, @@ -1923,6 +1955,7 @@ pub struct AnonConst { /// An inline constant expression `const { something }`. #[derive(Copy, Clone, Debug, HashStable_Generic)] pub struct ConstBlock { + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, pub body: BodyId, @@ -1938,6 +1971,7 @@ pub struct ConstBlock { /// [rust lang reference]: https://doc.rust-lang.org/reference/expressions.html #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct Expr<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub kind: ExprKind<'hir>, pub span: Span, @@ -2071,6 +2105,18 @@ impl Expr<'_> { } } + /// Check if expression is an integer literal that can be used + /// where `usize` is expected. + pub fn is_size_lit(&self) -> bool { + matches!( + self.kind, + ExprKind::Lit(Lit { + node: LitKind::Int(_, LitIntType::Unsuffixed | LitIntType::Unsigned(UintTy::Usize)), + .. + }) + ) + } + /// If `Self.kind` is `ExprKind::DropTemps(expr)`, drill down until we get a non-`DropTemps` /// `Expr`. This is used in suggestions to ignore this `ExprKind` as it is semantically /// silent, only signaling the ownership system. By doing this, suggestions that check the @@ -2804,6 +2850,7 @@ pub enum ImplItemKind<'hir> { /// * the `f(..): Bound` in `Trait<f(..): Bound>` (feature `return_type_notation`) #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct AssocItemConstraint<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub ident: Ident, pub gen_args: &'hir GenericArgs<'hir>, @@ -2872,6 +2919,7 @@ impl<'hir> AssocItemConstraintKind<'hir> { #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct Ty<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub kind: TyKind<'hir>, pub span: Span, @@ -3067,6 +3115,7 @@ pub struct UnsafeBinderTy<'hir> { #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct OpaqueTy<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, pub bounds: GenericBounds<'hir>, @@ -3103,6 +3152,7 @@ impl PreciseCapturingArg<'_> { /// since resolve_bound_vars operates on `Lifetime`s. #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct PreciseCapturingNonLifetimeArg { + #[stable_hasher(ignore)] pub hir_id: HirId, pub ident: Ident, pub res: Res, @@ -3276,6 +3326,7 @@ impl InlineAsm<'_> { /// Represents a parameter in a function header. #[derive(Debug, Clone, Copy, HashStable_Generic)] pub struct Param<'hir> { + #[stable_hasher(ignore)] pub hir_id: HirId, pub pat: &'hir Pat<'hir>, pub ty_span: Span, @@ -3433,6 +3484,7 @@ pub struct Variant<'hir> { /// Name of the variant. pub ident: Ident, /// Id of the variant (not the constructor, see `VariantData::ctor_hir_id()`). + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, /// Fields and constructor id of the variant. @@ -3505,6 +3557,7 @@ pub struct FieldDef<'hir> { pub span: Span, pub vis_span: Span, pub ident: Ident, + #[stable_hasher(ignore)] pub hir_id: HirId, pub def_id: LocalDefId, pub ty: &'hir Ty<'hir>, @@ -3529,11 +3582,11 @@ pub enum VariantData<'hir> { /// A tuple variant. /// /// E.g., `Bar(..)` as in `enum Foo { Bar(..) }`. - Tuple(&'hir [FieldDef<'hir>], HirId, LocalDefId), + Tuple(&'hir [FieldDef<'hir>], #[stable_hasher(ignore)] HirId, LocalDefId), /// A unit variant. /// /// E.g., `Bar = ..` as in `enum Foo { Bar = .. }`. - Unit(HirId, LocalDefId), + Unit(#[stable_hasher(ignore)] HirId, LocalDefId), } impl<'hir> VariantData<'hir> { @@ -3727,9 +3780,30 @@ impl fmt::Display for Constness { } } +/// The actualy safety specified in syntax. We may treat +/// its safety different within the type system to create a +/// "sound by default" system that needs checking this enum +/// explicitly to allow unsafe operations. +#[derive(Copy, Clone, Debug, HashStable_Generic, PartialEq, Eq)] +pub enum HeaderSafety { + /// A safe function annotated with `#[target_features]`. + /// The type system treats this function as an unsafe function, + /// but safety checking will check this enum to treat it as safe + /// and allowing calling other safe target feature functions with + /// the same features without requiring an additional unsafe block. + SafeTargetFeatures, + Normal(Safety), +} + +impl From<Safety> for HeaderSafety { + fn from(v: Safety) -> Self { + Self::Normal(v) + } +} + #[derive(Copy, Clone, Debug, HashStable_Generic)] pub struct FnHeader { - pub safety: Safety, + pub safety: HeaderSafety, pub constness: Constness, pub asyncness: IsAsync, pub abi: ExternAbi, @@ -3745,7 +3819,18 @@ impl FnHeader { } pub fn is_unsafe(&self) -> bool { - self.safety.is_unsafe() + self.safety().is_unsafe() + } + + pub fn is_safe(&self) -> bool { + self.safety().is_safe() + } + + pub fn safety(&self) -> Safety { + match self.safety { + HeaderSafety::SafeTargetFeatures => Safety::Unsafe, + HeaderSafety::Normal(safety) => safety, + } } } @@ -4141,6 +4226,10 @@ pub enum Node<'hir> { OpaqueTy(&'hir OpaqueTy<'hir>), Pat(&'hir Pat<'hir>), PatField(&'hir PatField<'hir>), + /// Needed as its own node with its own HirId for tracking + /// the unadjusted type of literals within patterns + /// (e.g. byte str literals not being of slice type). + PatExpr(&'hir PatExpr<'hir>), Arm(&'hir Arm<'hir>), Block(&'hir Block<'hir>), LetStmt(&'hir LetStmt<'hir>), @@ -4197,6 +4286,7 @@ impl<'hir> Node<'hir> { | Node::Block(..) | Node::Ctor(..) | Node::Pat(..) + | Node::PatExpr(..) | Node::Arm(..) | Node::LetStmt(..) | Node::Crate(..) diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs index 85e555d903b..ef863aca090 100644 --- a/compiler/rustc_hir/src/intravisit.rs +++ b/compiler/rustc_hir/src/intravisit.rs @@ -342,6 +342,9 @@ pub trait Visitor<'v>: Sized { fn visit_pat_field(&mut self, f: &'v PatField<'v>) -> Self::Result { walk_pat_field(self, f) } + fn visit_pat_expr(&mut self, expr: &'v PatExpr<'v>) -> Self::Result { + walk_pat_expr(self, expr) + } fn visit_anon_const(&mut self, c: &'v AnonConst) -> Self::Result { walk_anon_const(self, c) } @@ -685,10 +688,10 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) -> V: try_visit!(visitor.visit_ident(ident)); visit_opt!(visitor, visit_pat, optional_subpattern); } - PatKind::Lit(ref expression) => try_visit!(visitor.visit_expr(expression)), + PatKind::Expr(ref expression) => try_visit!(visitor.visit_pat_expr(expression)), PatKind::Range(ref lower_bound, ref upper_bound, _) => { - visit_opt!(visitor, visit_expr, lower_bound); - visit_opt!(visitor, visit_expr, upper_bound); + visit_opt!(visitor, visit_pat_expr, lower_bound); + visit_opt!(visitor, visit_pat_expr, upper_bound); } PatKind::Never | PatKind::Wild | PatKind::Err(_) => (), PatKind::Slice(prepatterns, ref slice_pattern, postpatterns) => { @@ -696,6 +699,10 @@ pub fn walk_pat<'v, V: Visitor<'v>>(visitor: &mut V, pattern: &'v Pat<'v>) -> V: visit_opt!(visitor, visit_pat, slice_pattern); walk_list!(visitor, visit_pat, postpatterns); } + PatKind::Guard(subpat, condition) => { + try_visit!(visitor.visit_pat(subpat)); + try_visit!(visitor.visit_expr(condition)); + } } V::Result::output() } @@ -706,6 +713,15 @@ pub fn walk_pat_field<'v, V: Visitor<'v>>(visitor: &mut V, field: &'v PatField<' visitor.visit_pat(field.pat) } +pub fn walk_pat_expr<'v, V: Visitor<'v>>(visitor: &mut V, expr: &'v PatExpr<'v>) -> V::Result { + try_visit!(visitor.visit_id(expr.hir_id)); + match &expr.kind { + PatExprKind::Lit { .. } => V::Result::output(), + PatExprKind::ConstBlock(c) => visitor.visit_inline_const(c), + PatExprKind::Path(qpath) => visitor.visit_qpath(qpath, expr.hir_id, expr.span), + } +} + pub fn walk_anon_const<'v, V: Visitor<'v>>(visitor: &mut V, constant: &'v AnonConst) -> V::Result { try_visit!(visitor.visit_id(constant.hir_id)); visitor.visit_nested_body(constant.body) diff --git a/compiler/rustc_hir/src/tests.rs b/compiler/rustc_hir/src/tests.rs index 953e48a6d33..e0e63d183c6 100644 --- a/compiler/rustc_hir/src/tests.rs +++ b/compiler/rustc_hir/src/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use rustc_data_structures::stable_hasher::Hash64; use rustc_span::def_id::{DefPathHash, StableCrateId}; diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl index 0c3ed9b5c60..d7ab6eca84b 100644 --- a/compiler/rustc_hir_analysis/messages.ftl +++ b/compiler/rustc_hir_analysis/messages.ftl @@ -135,7 +135,7 @@ hir_analysis_dispatch_from_dyn_multi = implementing the `DispatchFromDyn` trait hir_analysis_dispatch_from_dyn_repr = structs implementing `DispatchFromDyn` may not have `#[repr(packed)]` or `#[repr(C)]` -hir_analysis_dispatch_from_dyn_zst = the trait `DispatchFromDyn` may only be implemented for structs containing the field being coerced, ZST fields with 1 byte alignment, and nothing else +hir_analysis_dispatch_from_dyn_zst = the trait `DispatchFromDyn` may only be implemented for structs containing the field being coerced, ZST fields with 1 byte alignment that don't mention type/const generics, and nothing else .note = extra field `{$name}` of type `{$ty}` is not allowed hir_analysis_drop_impl_negative = negative `Drop` impls are not supported diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index 8c6059d49a8..b0a6922ff72 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -436,9 +436,9 @@ fn check_opaque_meets_bounds<'tcx>( } else { // Check that any hidden types found during wf checking match the hidden types that `type_of` sees. for (mut key, mut ty) in infcx.take_opaque_types() { - ty.hidden_type.ty = infcx.resolve_vars_if_possible(ty.hidden_type.ty); + ty.ty = infcx.resolve_vars_if_possible(ty.ty); key = infcx.resolve_vars_if_possible(key); - sanity_check_found_hidden_type(tcx, key, ty.hidden_type)?; + sanity_check_found_hidden_type(tcx, key, ty)?; } Ok(()) } @@ -575,7 +575,7 @@ fn sanity_check_found_hidden_type<'tcx>( } else { let span = tcx.def_span(key.def_id); let other = ty::OpaqueHiddenType { ty: hidden_ty, span }; - Err(ty.build_mismatch_error(&other, key.def_id, tcx)?.emit()) + Err(ty.build_mismatch_error(&other, tcx)?.emit()) } } @@ -1637,7 +1637,6 @@ fn check_type_alias_type_params_are_used<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalD let ty = tcx.type_of(def_id).instantiate_identity(); if ty.references_error() { // If there is already another error, do not emit an error for not using a type parameter. - assert!(tcx.dcx().has_errors().is_some()); return; } @@ -1666,7 +1665,7 @@ fn check_type_alias_type_params_are_used<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalD .collect::<FxIndexMap<_, _>>() }); - let mut params_used = BitSet::new_empty(generics.own_params.len()); + let mut params_used = DenseBitSet::new_empty(generics.own_params.len()); for leaf in ty.walk() { if let GenericArgKind::Type(leaf_ty) = leaf.unpack() && let ty::Param(param) = leaf_ty.kind() @@ -1873,7 +1872,7 @@ pub(super) fn check_coroutine_obligations( // Check that any hidden types found when checking these stalled coroutine obligations // are valid. for (key, ty) in infcx.take_opaque_types() { - let hidden_type = infcx.resolve_vars_if_possible(ty.hidden_type); + let hidden_type = infcx.resolve_vars_if_possible(ty); let key = infcx.resolve_vars_if_possible(key); sanity_check_found_hidden_type(tcx, key, hidden_type)?; } diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs index 0b0c92a726d..92b18c80fd8 100644 --- a/compiler/rustc_hir_analysis/src/check/mod.rs +++ b/compiler/rustc_hir_analysis/src/check/mod.rs @@ -79,7 +79,7 @@ use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; use rustc_errors::{Diag, ErrorGuaranteed, pluralize, struct_span_code_err}; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::intravisit::Visitor; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_infer::infer::outlives::env::OutlivesEnvironment; use rustc_infer::infer::{self, TyCtxtInferExt as _}; use rustc_infer::traits::ObligationCause; diff --git a/compiler/rustc_hir_analysis/src/check/region.rs b/compiler/rustc_hir_analysis/src/check/region.rs index ca6729a5bbd..83c69dc2ef4 100644 --- a/compiler/rustc_hir_analysis/src/check/region.rs +++ b/compiler/rustc_hir_analysis/src/check/region.rs @@ -31,7 +31,7 @@ struct Context { parent: Option<(Scope, ScopeDepth)>, } -struct RegionResolutionVisitor<'tcx> { +struct ScopeResolutionVisitor<'tcx> { tcx: TyCtxt<'tcx>, // The number of expressions and patterns visited in the current body. @@ -71,7 +71,7 @@ struct RegionResolutionVisitor<'tcx> { } /// Records the lifetime of a local variable as `cx.var_parent` -fn record_var_lifetime(visitor: &mut RegionResolutionVisitor<'_>, var_id: hir::ItemLocalId) { +fn record_var_lifetime(visitor: &mut ScopeResolutionVisitor<'_>, var_id: hir::ItemLocalId) { match visitor.cx.var_parent { None => { // this can happen in extern fn declarations like @@ -82,7 +82,7 @@ fn record_var_lifetime(visitor: &mut RegionResolutionVisitor<'_>, var_id: hir::I } } -fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) { +fn resolve_block<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, blk: &'tcx hir::Block<'tcx>) { debug!("resolve_block(blk.hir_id={:?})", blk.hir_id); let prev_cx = visitor.cx; @@ -193,7 +193,7 @@ fn resolve_block<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, blk: &'tcx h visitor.cx = prev_cx; } -fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) { +fn resolve_arm<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, arm: &'tcx hir::Arm<'tcx>) { fn has_let_expr(expr: &Expr<'_>) -> bool { match &expr.kind { hir::ExprKind::Binary(_, lhs, rhs) => has_let_expr(lhs) || has_let_expr(rhs), @@ -220,7 +220,7 @@ fn resolve_arm<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, arm: &'tcx hir visitor.cx = prev_cx; } -fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) { +fn resolve_pat<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, pat: &'tcx hir::Pat<'tcx>) { visitor.record_child_scope(Scope { local_id: pat.hir_id.local_id, data: ScopeData::Node }); // If this is a binding then record the lifetime of that binding. @@ -237,7 +237,7 @@ fn resolve_pat<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, pat: &'tcx hir debug!("resolve_pat - post-increment {} pat = {:?}", visitor.expr_and_pat_count, pat); } -fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) { +fn resolve_stmt<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, stmt: &'tcx hir::Stmt<'tcx>) { let stmt_id = stmt.hir_id.local_id; debug!("resolve_stmt(stmt.id={:?})", stmt_id); @@ -256,7 +256,7 @@ fn resolve_stmt<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, stmt: &'tcx h visitor.cx.parent = prev_parent; } -fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) { +fn resolve_expr<'tcx>(visitor: &mut ScopeResolutionVisitor<'tcx>, expr: &'tcx hir::Expr<'tcx>) { debug!("resolve_expr - pre-increment {} expr = {:?}", visitor.expr_and_pat_count, expr); let prev_cx = visitor.cx; @@ -420,10 +420,10 @@ fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx h // properly, we can't miss any types. match expr.kind { - // Manually recurse over closures and inline consts, because they are the only - // case of nested bodies that share the parent environment. - hir::ExprKind::Closure(&hir::Closure { body, .. }) - | hir::ExprKind::ConstBlock(hir::ConstBlock { body, .. }) => { + // Manually recurse over closures, because they are nested bodies + // that share the parent environment. We handle const blocks in + // `visit_inline_const`. + hir::ExprKind::Closure(&hir::Closure { body, .. }) => { let body = visitor.tcx.hir().body(body); visitor.visit_body(body); } @@ -554,7 +554,7 @@ fn resolve_expr<'tcx>(visitor: &mut RegionResolutionVisitor<'tcx>, expr: &'tcx h } fn resolve_local<'tcx>( - visitor: &mut RegionResolutionVisitor<'tcx>, + visitor: &mut ScopeResolutionVisitor<'tcx>, pat: Option<&'tcx hir::Pat<'tcx>>, init: Option<&'tcx hir::Expr<'tcx>>, ) { @@ -654,6 +654,7 @@ fn resolve_local<'tcx>( /// | ( ..., P&, ... ) /// | ... "|" P& "|" ... /// | box P& + /// | P& if ... /// ``` fn is_binding_pat(pat: &hir::Pat<'_>) -> bool { // Note that the code below looks for *explicit* refs only, that is, it won't @@ -694,14 +695,16 @@ fn resolve_local<'tcx>( | PatKind::TupleStruct(_, subpats, _) | PatKind::Tuple(subpats, _) => subpats.iter().any(|p| is_binding_pat(p)), - PatKind::Box(subpat) | PatKind::Deref(subpat) => is_binding_pat(subpat), + PatKind::Box(subpat) | PatKind::Deref(subpat) | PatKind::Guard(subpat, _) => { + is_binding_pat(subpat) + } PatKind::Ref(_, _) | PatKind::Binding(hir::BindingMode(hir::ByRef::No, _), ..) | PatKind::Wild | PatKind::Never | PatKind::Path(_) - | PatKind::Lit(_) + | PatKind::Expr(_) | PatKind::Range(_, _, _) | PatKind::Err(_) => false, } @@ -722,7 +725,7 @@ fn resolve_local<'tcx>( /// | ( E& ) /// ``` fn record_rvalue_scope_if_borrow_expr<'tcx>( - visitor: &mut RegionResolutionVisitor<'tcx>, + visitor: &mut ScopeResolutionVisitor<'tcx>, expr: &hir::Expr<'_>, blk_id: Option<Scope>, ) { @@ -779,7 +782,7 @@ fn resolve_local<'tcx>( } } -impl<'tcx> RegionResolutionVisitor<'tcx> { +impl<'tcx> ScopeResolutionVisitor<'tcx> { /// Records the current parent (if any) as the parent of `child_scope`. /// Returns the depth of `child_scope`. fn record_child_scope(&mut self, child_scope: Scope) -> ScopeDepth { @@ -835,7 +838,7 @@ impl<'tcx> RegionResolutionVisitor<'tcx> { } } -impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> { +impl<'tcx> Visitor<'tcx> for ScopeResolutionVisitor<'tcx> { fn visit_block(&mut self, b: &'tcx Block<'tcx>) { resolve_block(self, b); } @@ -903,6 +906,10 @@ impl<'tcx> Visitor<'tcx> for RegionResolutionVisitor<'tcx> { fn visit_local(&mut self, l: &'tcx LetStmt<'tcx>) { resolve_local(self, Some(l.pat), l.init) } + fn visit_inline_const(&mut self, c: &'tcx hir::ConstBlock) { + let body = self.tcx.hir().body(c.body); + self.visit_body(body); + } } /// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body; @@ -919,7 +926,7 @@ pub(crate) fn region_scope_tree(tcx: TyCtxt<'_>, def_id: DefId) -> &ScopeTree { } let scope_tree = if let Some(body) = tcx.hir().maybe_body_owned_by(def_id.expect_local()) { - let mut visitor = RegionResolutionVisitor { + let mut visitor = ScopeResolutionVisitor { tcx, scope_tree: ScopeTree::default(), expr_and_pat_count: 0, diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index 81a5e9ee90d..dd6adb17c5e 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -1120,7 +1120,7 @@ fn check_type_defn<'tcx>( } else { // Evaluate the constant proactively, to emit an error if the constant has // an unconditional error. We only do so if the const has no type params. - let _ = tcx.const_eval_poly(def_id.into()); + let _ = tcx.const_eval_poly(def_id); } } let field_id = field.did.expect_local(); diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs index 3b98f358b1e..b43a808ccdc 100644 --- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs +++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs @@ -259,19 +259,37 @@ fn visit_implementation_of_dispatch_from_dyn(checker: &Checker<'_>) -> Result<() let coerced_fields = fields .iter() .filter(|field| { + // Ignore PhantomData fields + let unnormalized_ty = tcx.type_of(field.did).instantiate_identity(); + if tcx + .try_normalize_erasing_regions( + ty::TypingEnv::non_body_analysis(tcx, def_a.did()), + unnormalized_ty, + ) + .unwrap_or(unnormalized_ty) + .is_phantom_data() + { + return false; + } + let ty_a = field.ty(tcx, args_a); let ty_b = field.ty(tcx, args_b); - if let Ok(layout) = - tcx.layout_of(infcx.typing_env(param_env).as_query_input(ty_a)) - { - if layout.is_1zst() { + // FIXME: We could do normalization here, but is it really worth it? + if ty_a == ty_b { + // Allow 1-ZSTs that don't mention type params. + // + // Allowing type params here would allow us to possibly transmute + // between ZSTs, which may be used to create library unsoundness. + if let Ok(layout) = + tcx.layout_of(infcx.typing_env(param_env).as_query_input(ty_a)) + && layout.is_1zst() + && !ty_a.has_non_region_param() + { // ignore 1-ZST fields return false; } - } - if ty_a == ty_b { res = Err(tcx.dcx().emit_err(errors::DispatchFromDynZST { span, name: field.name, @@ -460,8 +478,16 @@ pub(crate) fn coerce_unsized_info<'tcx>( .filter_map(|(i, f)| { let (a, b) = (f.ty(tcx, args_a), f.ty(tcx, args_b)); - if tcx.type_of(f.did).instantiate_identity().is_phantom_data() { - // Ignore PhantomData fields + // Ignore PhantomData fields + let unnormalized_ty = tcx.type_of(f.did).instantiate_identity(); + if tcx + .try_normalize_erasing_regions( + ty::TypingEnv::non_body_analysis(tcx, def_a.did()), + unnormalized_ty, + ) + .unwrap_or(unnormalized_ty) + .is_phantom_data() + { return None; } diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index d41b03640b6..86c6532c97d 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -1336,7 +1336,7 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, ty::PolyFn { icx.lowerer().lower_fn_ty( hir_id, - sig.header.safety, + sig.header.safety(), sig.header.abi, sig.decl, Some(generics), @@ -1351,13 +1351,18 @@ fn fn_sig(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_, ty::PolyFn kind: TraitItemKind::Fn(FnSig { header, decl, span: _ }, _), generics, .. - }) => { - icx.lowerer().lower_fn_ty(hir_id, header.safety, header.abi, decl, Some(generics), None) - } + }) => icx.lowerer().lower_fn_ty( + hir_id, + header.safety(), + header.abi, + decl, + Some(generics), + None, + ), ForeignItem(&hir::ForeignItem { kind: ForeignItemKind::Fn(sig, _, _), .. }) => { let abi = tcx.hir().get_foreign_abi(hir_id); - compute_sig_of_foreign_fn_decl(tcx, def_id, sig.decl, abi, sig.header.safety) + compute_sig_of_foreign_fn_decl(tcx, def_id, sig.decl, abi, sig.header.safety()) } Ctor(data) | Variant(hir::Variant { data, .. }) if data.ctor().is_some() => { @@ -1405,7 +1410,7 @@ fn lower_fn_sig_recovering_infer_ret_ty<'tcx>( icx.lowerer().lower_fn_ty( icx.tcx().local_def_id_to_hir_id(def_id), - sig.header.safety, + sig.header.safety(), sig.header.abi, sig.decl, Some(generics), diff --git a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs index 0c19e2e4c51..8a975786a92 100644 --- a/compiler/rustc_hir_analysis/src/collect/predicates_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/predicates_of.rs @@ -653,7 +653,7 @@ pub(super) fn implied_predicates_with_filter<'tcx>( } } } - PredicateFilter::SelfAndAssociatedTypeBounds => { + PredicateFilter::All | PredicateFilter::SelfAndAssociatedTypeBounds => { for &(pred, span) in implied_bounds { debug!("superbound: {:?}", pred); if let ty::ClauseKind::Trait(bound) = pred.kind().skip_binder() @@ -1036,7 +1036,7 @@ pub(super) fn const_conditions<'tcx>( icx.lowerer().lower_bounds( tcx.types.self_param, - supertraits.into_iter(), + supertraits, &mut bounds, ty::List::empty(), PredicateFilter::ConstIfConst, diff --git a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs index c933095fd3d..d1a1e36c1d5 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of/opaque.rs @@ -1,4 +1,3 @@ -use rustc_errors::StashKey; use rustc_hir::def::DefKind; use rustc_hir::def_id::LocalDefId; use rustc_hir::intravisit::{self, Visitor}; @@ -45,7 +44,7 @@ pub(super) fn find_opaque_ty_constraints_for_impl_trait_in_assoc_type( if !hidden.ty.references_error() { for concrete_type in locator.typeck_types { if concrete_type.ty != tcx.erase_regions(hidden.ty) { - if let Ok(d) = hidden.build_mismatch_error(&concrete_type, def_id, tcx) { + if let Ok(d) = hidden.build_mismatch_error(&concrete_type, tcx) { d.emit(); } } @@ -121,7 +120,7 @@ pub(super) fn find_opaque_ty_constraints_for_tait(tcx: TyCtxt<'_>, def_id: Local if !hidden.ty.references_error() { for concrete_type in locator.typeck_types { if concrete_type.ty != tcx.erase_regions(hidden.ty) { - if let Ok(d) = hidden.build_mismatch_error(&concrete_type, def_id, tcx) { + if let Ok(d) = hidden.build_mismatch_error(&concrete_type, tcx) { d.emit(); } } @@ -285,9 +284,8 @@ impl TaitConstraintLocator<'_> { debug!(?concrete_type, "found constraint"); if let Some(prev) = &mut self.found { if concrete_type.ty != prev.ty { - let (Ok(guar) | Err(guar)) = prev - .build_mismatch_error(&concrete_type, self.def_id, self.tcx) - .map(|d| d.emit()); + let (Ok(guar) | Err(guar)) = + prev.build_mismatch_error(&concrete_type, self.tcx).map(|d| d.emit()); prev.ty = Ty::new_error(self.tcx, guar); } } else { @@ -361,11 +359,8 @@ pub(super) fn find_opaque_ty_constraints_for_rpit<'tcx>( ); if let Some(prev) = &mut hir_opaque_ty { if concrete_type.ty != prev.ty { - if let Ok(d) = prev.build_mismatch_error(&concrete_type, def_id, tcx) { - d.stash( - tcx.def_span(opaque_type_key.def_id), - StashKey::OpaqueHiddenTypeMismatch, - ); + if let Ok(d) = prev.build_mismatch_error(&concrete_type, tcx) { + d.emit(); } } } else { @@ -435,9 +430,7 @@ impl RpitConstraintChecker<'_> { debug!(?concrete_type, "found constraint"); if concrete_type.ty != self.found.ty { - if let Ok(d) = - self.found.build_mismatch_error(&concrete_type, self.def_id, self.tcx) - { + if let Ok(d) = self.found.build_mismatch_error(&concrete_type, self.tcx) { d.emit(); } } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs index 0623d35853e..7a3d921f00e 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/errors.rs @@ -179,7 +179,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // all visible traits. If there's one clear winner, just suggest that. let visible_traits: Vec<_> = tcx - .all_traits() + .visible_traits() .filter(|trait_def_id| { let viz = tcx.visibility(*trait_def_id); let def_id = self.item_def_id(); diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs index b7d3617fbe7..a1f2b8c7594 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs @@ -2,10 +2,12 @@ use rustc_ast::TraitObjectSyntax; use rustc_errors::codes::*; use rustc_errors::{Diag, EmissionGuarantee, ErrorGuaranteed, StashKey, Suggestions}; use rustc_hir as hir; -use rustc_hir::def::{DefKind, Res}; +use rustc_hir::def::{DefKind, Namespace, Res}; +use rustc_hir::def_id::DefId; use rustc_lint_defs::Applicability; use rustc_lint_defs::builtin::BARE_TRAIT_OBJECTS; use rustc_span::Span; +use rustc_span::edit_distance::find_best_match_for_name; use rustc_trait_selection::error_reporting::traits::suggestions::NextTypeParamName; use super::HirTyLowerer; @@ -86,7 +88,12 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { // Check if the impl trait that we are considering is an impl of a local trait. self.maybe_suggest_blanket_trait_impl(self_ty, &mut diag); self.maybe_suggest_assoc_ty_bound(self_ty, &mut diag); - // In case there is an associate type with the same name + self.maybe_suggest_typoed_method( + self_ty, + poly_trait_ref.trait_ref.trait_def_id(), + &mut diag, + ); + // In case there is an associated type with the same name // Add the suggestion to this error if let Some(mut sugg) = tcx.dcx().steal_non_err(self_ty.span, StashKey::AssociatedTypeSuggestion) @@ -96,7 +103,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { s1.append(s2); sugg.cancel(); } - diag.stash(self_ty.span, StashKey::TraitMissingMethod) + Some(diag.emit()) } else { tcx.node_span_lint(BARE_TRAIT_OBJECTS, self_ty.hir_id, self_ty.span, |lint| { lint.primary_message("trait objects without an explicit `dyn` are deprecated"); @@ -343,4 +350,44 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { ); } } + + fn maybe_suggest_typoed_method( + &self, + self_ty: &hir::Ty<'_>, + trait_def_id: Option<DefId>, + diag: &mut Diag<'_>, + ) { + let tcx = self.tcx(); + let Some(trait_def_id) = trait_def_id else { + return; + }; + let hir::Node::Expr(hir::Expr { + kind: hir::ExprKind::Path(hir::QPath::TypeRelative(path_ty, segment)), + .. + }) = tcx.parent_hir_node(self_ty.hir_id) + else { + return; + }; + if path_ty.hir_id != self_ty.hir_id { + return; + } + let names: Vec<_> = tcx + .associated_items(trait_def_id) + .in_definition_order() + .filter(|assoc| assoc.kind.namespace() == Namespace::ValueNS) + .map(|cand| cand.name) + .collect(); + if let Some(typo) = find_best_match_for_name(&names, segment.ident.name, None) { + diag.span_suggestion_verbose( + segment.ident.span, + format!( + "you may have misspelled this associated item, causing `{}` \ + to be interpreted as a type rather than a trait", + tcx.item_name(trait_def_id), + ), + typo, + Applicability::MaybeIncorrect, + ); + } + } } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs index 2154568c512..cb90fff782f 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/mod.rs @@ -35,7 +35,7 @@ use rustc_hir::{self as hir, AnonConst, GenericArg, GenericArgs, HirId}; use rustc_infer::infer::{InferCtxt, TyCtxtInferExt}; use rustc_infer::traits::ObligationCause; use rustc_middle::middle::stability::AllowUnstable; -use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput}; +use rustc_middle::mir::interpret::LitToConstInput; use rustc_middle::ty::fold::fold_regions; use rustc_middle::ty::print::PrintPolyTraitRefExt as _; use rustc_middle::ty::{ @@ -2262,25 +2262,11 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { _ => None, }; - if let Some(lit_input) = lit_input { - // If an error occurred, ignore that it's a literal and leave reporting the error up to - // mir. - match tcx.at(expr.span).lit_to_const(lit_input) { - Ok(c) => return Some(c), - Err(_) if lit_input.ty.has_aliases() => { - // allow the `ty` to be an alias type, though we cannot handle it here - return None; - } - Err(e) => { - tcx.dcx().span_delayed_bug( - expr.span, - format!("try_lower_anon_const_lit: couldn't lit_to_const {e:?}"), - ); - } - } - } - - None + lit_input + // Allow the `ty` to be an alias type, though we cannot handle it here, we just go through + // the more expensive anon const code path. + .filter(|l| !l.ty.has_aliases()) + .map(|l| tcx.at(expr.span).lit_to_const(l)) } fn lower_delegation_ty(&self, idx: hir::InferDelegationKind) -> Ty<'tcx> { @@ -2449,44 +2435,39 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { Ty::new_error(tcx, err) } hir::PatKind::Range(start, end, include_end) => { - let expr_to_const = |expr: &'tcx hir::Expr<'tcx>| -> ty::Const<'tcx> { - let (expr, neg) = match expr.kind { - hir::ExprKind::Unary(hir::UnOp::Neg, negated) => { - (negated, Some((expr.hir_id, expr.span))) - } - _ => (expr, None), - }; - let (c, c_ty) = match &expr.kind { - hir::ExprKind::Lit(lit) => { + let expr_to_const = |expr: &'tcx hir::PatExpr<'tcx>| -> ty::Const<'tcx> { + let (c, c_ty) = match expr.kind { + hir::PatExprKind::Lit { lit, negated } => { let lit_input = - LitToConstInput { lit: &lit.node, ty, neg: neg.is_some() }; - let ct = match tcx.lit_to_const(lit_input) { - Ok(c) => c, - Err(LitToConstError::Reported(err)) => { - ty::Const::new_error(tcx, err) - } - Err(LitToConstError::TypeError) => todo!(), - }; + LitToConstInput { lit: &lit.node, ty, neg: negated }; + let ct = tcx.lit_to_const(lit_input); (ct, ty) } - hir::ExprKind::Path(hir::QPath::Resolved( + hir::PatExprKind::Path(hir::QPath::Resolved( _, path @ &hir::Path { res: Res::Def(DefKind::ConstParam, def_id), .. }, )) => { - let _ = self.prohibit_generic_args( + match self.prohibit_generic_args( path.segments.iter(), GenericsArgsErrExtend::Param(def_id), - ); - let ty = tcx - .type_of(def_id) - .no_bound_vars() - .expect("const parameter types cannot be generic"); - let ct = self.lower_const_param(def_id, expr.hir_id); - (ct, ty) + ) { + Ok(()) => { + let ty = tcx + .type_of(def_id) + .no_bound_vars() + .expect("const parameter types cannot be generic"); + let ct = self.lower_const_param(def_id, expr.hir_id); + (ct, ty) + } + Err(guar) => ( + ty::Const::new_error(tcx, guar), + Ty::new_error(tcx, guar), + ), + } } _ => { @@ -2497,9 +2478,6 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } }; self.record_ty(expr.hir_id, c_ty, expr.span); - if let Some((id, span)) = neg { - self.record_ty(id, c_ty, span); - } c }; diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs index 42c98ab434e..3ff6acd79fc 100644 --- a/compiler/rustc_hir_pretty/src/lib.rs +++ b/compiler/rustc_hir_pretty/src/lib.rs @@ -199,6 +199,7 @@ impl<'a> State<'a> { Node::OpaqueTy(o) => self.print_opaque_ty(o), Node::Pat(a) => self.print_pat(a), Node::PatField(a) => self.print_patfield(a), + Node::PatExpr(a) => self.print_pat_expr(a), Node::Arm(a) => self.print_arm(a), Node::Infer(_) => self.word("_"), Node::PreciseCapturingNonLifetimeArg(param) => self.print_ident(param.ident), @@ -1849,6 +1850,19 @@ impl<'a> State<'a> { } } + fn print_pat_expr(&mut self, expr: &hir::PatExpr<'_>) { + match &expr.kind { + hir::PatExprKind::Lit { lit, negated } => { + if *negated { + self.word("-"); + } + self.print_literal(lit); + } + hir::PatExprKind::ConstBlock(c) => self.print_inline_const(c), + hir::PatExprKind::Path(qpath) => self.print_qpath(qpath, true), + } + } + fn print_pat(&mut self, pat: &hir::Pat<'_>) { self.maybe_print_comment(pat.span.lo()); self.ann.pre(self, AnnNode::Pat(pat)); @@ -1966,17 +1980,17 @@ impl<'a> State<'a> { self.pclose(); } } - PatKind::Lit(e) => self.print_expr(e), + PatKind::Expr(e) => self.print_pat_expr(e), PatKind::Range(begin, end, end_kind) => { if let Some(expr) = begin { - self.print_expr(expr); + self.print_pat_expr(expr); } match end_kind { RangeEnd::Included => self.word("..."), RangeEnd::Excluded => self.word(".."), } if let Some(expr) = end { - self.print_expr(expr); + self.print_pat_expr(expr); } } PatKind::Slice(before, slice, after) => { @@ -1999,6 +2013,12 @@ impl<'a> State<'a> { self.commasep(Inconsistent, after, |s, p| s.print_pat(p)); self.word("]"); } + PatKind::Guard(inner, cond) => { + self.print_pat(inner); + self.space(); + self.word_space("if"); + self.print_expr(cond); + } PatKind::Err(_) => { self.popen(); self.word("/*ERROR*/"); @@ -2387,7 +2407,7 @@ impl<'a> State<'a> { self.print_fn( decl, hir::FnHeader { - safety, + safety: safety.into(), abi, constness: hir::Constness::NotConst, asyncness: hir::IsAsync::NotAsync, @@ -2403,12 +2423,20 @@ impl<'a> State<'a> { fn print_fn_header_info(&mut self, header: hir::FnHeader) { self.print_constness(header.constness); + let safety = match header.safety { + hir::HeaderSafety::SafeTargetFeatures => { + self.word_nbsp("#[target_feature]"); + hir::Safety::Safe + } + hir::HeaderSafety::Normal(safety) => safety, + }; + match header.asyncness { hir::IsAsync::NotAsync => {} hir::IsAsync::Async(_) => self.word_nbsp("async"), } - self.print_safety(header.safety); + self.print_safety(safety); if header.abi != ExternAbi::Rust { self.word_nbsp("extern"); diff --git a/compiler/rustc_hir_typeck/messages.ftl b/compiler/rustc_hir_typeck/messages.ftl index a93da52b270..0f424a39840 100644 --- a/compiler/rustc_hir_typeck/messages.ftl +++ b/compiler/rustc_hir_typeck/messages.ftl @@ -165,6 +165,8 @@ hir_typeck_remove_semi_for_coerce_ret = the `match` arms can conform to this ret hir_typeck_remove_semi_for_coerce_semi = the `match` is a statement because of this semicolon, consider removing it hir_typeck_remove_semi_for_coerce_suggestion = remove this semicolon +hir_typeck_replace_comma_with_semicolon = replace the comma with a semicolon to create {$descr} + hir_typeck_return_stmt_outside_of_fn_body = {$statement_kind} statement outside of function body .encl_body_label = the {$statement_kind} is part of this body... diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs index bd26be11279..6945dbc3216 100644 --- a/compiler/rustc_hir_typeck/src/coercion.rs +++ b/compiler/rustc_hir_typeck/src/coercion.rs @@ -38,6 +38,7 @@ use std::ops::Deref; use rustc_abi::ExternAbi; +use rustc_attr_parsing::InlineAttr; use rustc_errors::codes::*; use rustc_errors::{Applicability, Diag, struct_span_code_err}; use rustc_hir as hir; @@ -919,19 +920,32 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { match b.kind() { ty::FnPtr(_, b_hdr) => { - let a_sig = a.fn_sig(self.tcx); + let mut a_sig = a.fn_sig(self.tcx); if let ty::FnDef(def_id, _) = *a.kind() { // Intrinsics are not coercible to function pointers if self.tcx.intrinsic(def_id).is_some() { return Err(TypeError::IntrinsicCast); } - // Safe `#[target_feature]` functions are not assignable to safe fn pointers (RFC 2396). + let fn_attrs = self.tcx.codegen_fn_attrs(def_id); + if matches!(fn_attrs.inline, InlineAttr::Force { .. }) { + return Err(TypeError::ForceInlineCast); + } if b_hdr.safety.is_safe() - && !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty() + && self.tcx.codegen_fn_attrs(def_id).safe_target_features { - return Err(TypeError::TargetFeatureCast(def_id)); + // Allow the coercion if the current function has all the features that would be + // needed to call the coercee safely. + if let Some(safe_sig) = self.tcx.adjust_target_feature_sig( + def_id, + a_sig, + self.fcx.body_id.into(), + ) { + a_sig = safe_sig; + } else { + return Err(TypeError::TargetFeatureCast(def_id)); + } } } @@ -1197,6 +1211,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { return Ok(prev_ty); } + let is_force_inline = |ty: Ty<'tcx>| { + if let ty::FnDef(did, _) = ty.kind() { + matches!(self.tcx.codegen_fn_attrs(did).inline, InlineAttr::Force { .. }) + } else { + false + } + }; + if is_force_inline(prev_ty) || is_force_inline(new_ty) { + return Err(TypeError::ForceInlineCast); + } + // Special-case that coercion alone cannot handle: // Function items or non-capturing closures of differing IDs or GenericArgs. let (a_sig, b_sig) = { diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs index e51323fc5c8..367e7c6de95 100644 --- a/compiler/rustc_hir_typeck/src/demand.rs +++ b/compiler/rustc_hir_typeck/src/demand.rs @@ -30,7 +30,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if expr_ty == expected { return; } - self.annotate_alternative_method_deref(err, expr, error); self.explain_self_literal(err, expr, expected, expr_ty); @@ -39,6 +38,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { || self.suggest_missing_unwrap_expect(err, expr, expected, expr_ty) || self.suggest_remove_last_method_call(err, expr, expected) || self.suggest_associated_const(err, expr, expected) + || self.suggest_semicolon_in_repeat_expr(err, expr, expr_ty) || self.suggest_deref_ref_or_into(err, expr, expected, expr_ty, expected_ty_expr) || self.suggest_option_to_bool(err, expr, expr_ty, expected) || self.suggest_compatible_variants(err, expr, expected, expr_ty) @@ -85,6 +85,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.annotate_expected_due_to_let_ty(err, expr, error); self.annotate_loop_expected_due_to_inference(err, expr, error); + if self.annotate_mut_binding_to_immutable_binding(err, expr, error) { + return; + } // FIXME(#73154): For now, we do leak check when coercing function // pointers in typeck, instead of only during borrowck. This can lead @@ -795,6 +798,98 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + /// Detect the following case + /// + /// ```text + /// fn change_object(mut a: &Ty) { + /// let a = Ty::new(); + /// b = a; + /// } + /// ``` + /// + /// where the user likely meant to modify the value behind there reference, use `a` as an out + /// parameter, instead of mutating the local binding. When encountering this we suggest: + /// + /// ```text + /// fn change_object(a: &'_ mut Ty) { + /// let a = Ty::new(); + /// *b = a; + /// } + /// ``` + fn annotate_mut_binding_to_immutable_binding( + &self, + err: &mut Diag<'_>, + expr: &hir::Expr<'_>, + error: Option<TypeError<'tcx>>, + ) -> bool { + if let Some(TypeError::Sorts(ExpectedFound { expected, found })) = error + && let ty::Ref(_, inner, hir::Mutability::Not) = expected.kind() + + // The difference between the expected and found values is one level of borrowing. + && self.can_eq(self.param_env, *inner, found) + + // We have an `ident = expr;` assignment. + && let hir::Node::Expr(hir::Expr { kind: hir::ExprKind::Assign(lhs, rhs, _), .. }) = + self.tcx.parent_hir_node(expr.hir_id) + && rhs.hir_id == expr.hir_id + + // We are assigning to some binding. + && let hir::ExprKind::Path(hir::QPath::Resolved( + None, + hir::Path { res: hir::def::Res::Local(hir_id), .. }, + )) = lhs.kind + && let hir::Node::Pat(pat) = self.tcx.hir_node(*hir_id) + + // The pattern we have is an fn argument. + && let hir::Node::Param(hir::Param { ty_span, .. }) = + self.tcx.parent_hir_node(pat.hir_id) + && let item = self.tcx.hir().get_parent_item(pat.hir_id) + && let item = self.tcx.hir_owner_node(item) + && let Some(fn_decl) = item.fn_decl() + + // We have a mutable binding in the argument. + && let hir::PatKind::Binding(hir::BindingMode::MUT, _hir_id, ident, _) = pat.kind + + // Look for the type corresponding to the argument pattern we have in the argument list. + && let Some(ty_sugg) = fn_decl + .inputs + .iter() + .filter_map(|ty| { + if ty.span == *ty_span + && let hir::TyKind::Ref(lt, x) = ty.kind + { + // `&'name Ty` -> `&'name mut Ty` or `&Ty` -> `&mut Ty` + Some(( + x.ty.span.shrink_to_lo(), + format!( + "{}mut ", + if lt.ident.span.lo() == lt.ident.span.hi() { "" } else { " " } + ), + )) + } else { + None + } + }) + .next() + { + let sugg = vec![ + ty_sugg, + (pat.span.until(ident.span), String::new()), + (lhs.span.shrink_to_lo(), "*".to_string()), + ]; + // We suggest changing the argument from `mut ident: &Ty` to `ident: &'_ mut Ty` and the + // assignment from `ident = val;` to `*ident = val;`. + err.multipart_suggestion_verbose( + "you might have meant to mutate the pointed at value being passed in, instead of \ + changing the reference in the local binding", + sugg, + Applicability::MaybeIncorrect, + ); + return true; + } + false + } + fn annotate_alternative_method_deref( &self, err: &mut Diag<'_>, diff --git a/compiler/rustc_hir_typeck/src/errors.rs b/compiler/rustc_hir_typeck/src/errors.rs index ff09583cc65..052adaa69b2 100644 --- a/compiler/rustc_hir_typeck/src/errors.rs +++ b/compiler/rustc_hir_typeck/src/errors.rs @@ -19,8 +19,15 @@ use crate::fluent_generated as fluent; pub(crate) struct BaseExpressionDoubleDot { #[primary_span] pub span: Span, + #[suggestion( + hir_typeck_base_expression_double_dot_enable_default_field_values, + code = "#![feature(default_field_values)]\n", + applicability = "machine-applicable", + style = "verbose" + )] + pub default_field_values_suggestion: Option<Span>, #[subdiagnostic] - pub default_field_values: Option<BaseExpressionDoubleDotEnableDefaultFieldValues>, + pub default_field_values_help: Option<BaseExpressionDoubleDotEnableDefaultFieldValues>, #[subdiagnostic] pub add_expr: Option<BaseExpressionDoubleDotAddExpr>, #[subdiagnostic] @@ -846,3 +853,16 @@ pub(crate) struct PassFnItemToVariadicFunction { pub sugg_span: Span, pub replace: String, } + +#[derive(Subdiagnostic)] +#[suggestion( + hir_typeck_replace_comma_with_semicolon, + applicability = "machine-applicable", + style = "verbose", + code = "; " +)] +pub(crate) struct ReplaceCommaWithSemicolon { + #[primary_span] + pub comma_span: Span, + pub descr: &'static str, +} diff --git a/compiler/rustc_hir_typeck/src/expr.rs b/compiler/rustc_hir_typeck/src/expr.rs index fb73985e306..01fed72d5a2 100644 --- a/compiler/rustc_hir_typeck/src/expr.rs +++ b/compiler/rustc_hir_typeck/src/expr.rs @@ -430,6 +430,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { | hir::Node::AssocItemConstraint(_) | hir::Node::TraitRef(_) | hir::Node::PatField(_) + | hir::Node::PatExpr(_) | hir::Node::LetStmt(_) | hir::Node::Synthetic | hir::Node::Err(_) @@ -456,6 +457,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Does not constitute a read. hir::PatKind::Wild => false, + // Might not constitute a read, since the condition might be false. + hir::PatKind::Guard(_, _) => true, + // This is unnecessarily restrictive when the pattern that doesn't // constitute a read is unreachable. // @@ -481,7 +485,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { | hir::PatKind::Box(_) | hir::PatKind::Ref(_, _) | hir::PatKind::Deref(_) - | hir::PatKind::Lit(_) + | hir::PatKind::Expr(_) | hir::PatKind::Range(_, _, _) | hir::PatKind::Slice(_, _, _) | hir::PatKind::Err(_) => true, @@ -834,7 +838,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // We always require that the type provided as the value for // a type parameter outlives the moment of instantiation. let args = self.typeck_results.borrow().node_args(expr.hir_id); - self.add_wf_bounds(args, expr); + self.add_wf_bounds(args, expr.span); ty } @@ -1793,7 +1797,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } - fn check_expr_const_block( + pub(super) fn check_expr_const_block( &self, block: &'tcx hir::ConstBlock, expected: Expectation<'tcx>, @@ -1987,18 +1991,18 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { adt_ty: Ty<'tcx>, expected: Expectation<'tcx>, expr: &hir::Expr<'_>, - span: Span, + path_span: Span, variant: &'tcx ty::VariantDef, hir_fields: &'tcx [hir::ExprField<'tcx>], base_expr: &'tcx hir::StructTailExpr<'tcx>, ) { let tcx = self.tcx; - let adt_ty = self.try_structurally_resolve_type(span, adt_ty); + let adt_ty = self.try_structurally_resolve_type(path_span, adt_ty); let adt_ty_hint = expected.only_has_type(self).and_then(|expected| { self.fudge_inference_if_ok(|| { let ocx = ObligationCtxt::new(self); - ocx.sup(&self.misc(span), self.param_env, expected, adt_ty)?; + ocx.sup(&self.misc(path_span), self.param_env, expected, adt_ty)?; if !ocx.select_where_possible().is_empty() { return Err(TypeError::Mismatch); } @@ -2008,11 +2012,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }); if let Some(adt_ty_hint) = adt_ty_hint { // re-link the variables that the fudging above can create. - self.demand_eqtype(span, adt_ty_hint, adt_ty); + self.demand_eqtype(path_span, adt_ty_hint, adt_ty); } let ty::Adt(adt, args) = adt_ty.kind() else { - span_bug!(span, "non-ADT passed to check_expr_struct_fields"); + span_bug!(path_span, "non-ADT passed to check_expr_struct_fields"); }; let adt_kind = adt.adt_kind(); @@ -2103,7 +2107,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if adt_kind == AdtKind::Union && hir_fields.len() != 1 { struct_span_code_err!( self.dcx(), - span, + path_span, E0784, "union expressions should have exactly one field", ) @@ -2134,13 +2138,24 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } if !self.tcx.features().default_field_values() { + let sugg = self.tcx.crate_level_attribute_injection_span(expr.hir_id); self.dcx().emit_err(BaseExpressionDoubleDot { span: span.shrink_to_hi(), // We only mention enabling the feature if this is a nightly rustc *and* the // expression would make sense with the feature enabled. - default_field_values: if self.tcx.sess.is_nightly_build() + default_field_values_suggestion: if self.tcx.sess.is_nightly_build() + && missing_mandatory_fields.is_empty() + && !missing_optional_fields.is_empty() + && sugg.is_some() + { + sugg + } else { + None + }, + default_field_values_help: if self.tcx.sess.is_nightly_build() && missing_mandatory_fields.is_empty() && !missing_optional_fields.is_empty() + && sugg.is_none() { Some(BaseExpressionDoubleDotEnableDefaultFieldValues) } else { @@ -2163,6 +2178,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }); return; } + if variant.fields.is_empty() { + let mut err = self.dcx().struct_span_err( + span, + format!( + "`{adt_ty}` has no fields, `..` needs at least one default field in the \ + struct definition", + ), + ); + err.span_label(path_span, "this type has no fields"); + err.emit(); + } if !missing_mandatory_fields.is_empty() { let s = pluralize!(missing_mandatory_fields.len()); let fields: Vec<_> = @@ -2312,11 +2338,17 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { .collect(); if !private_fields.is_empty() { - self.report_private_fields(adt_ty, span, expr.span, private_fields, hir_fields); + self.report_private_fields( + adt_ty, + path_span, + expr.span, + private_fields, + hir_fields, + ); } else { self.report_missing_fields( adt_ty, - span, + path_span, remaining_fields, variant, hir_fields, diff --git a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs index ecbae6ac72f..1f48b703e4a 100644 --- a/compiler/rustc_hir_typeck/src/expr_use_visitor.rs +++ b/compiler/rustc_hir_typeck/src/expr_use_visitor.rs @@ -595,7 +595,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx let place_ty = place.place.ty(); needs_to_be_read |= self.is_multivariant_adt(place_ty, pat.span); } - PatKind::Lit(_) | PatKind::Range(..) => { + PatKind::Expr(_) | PatKind::Range(..) => { // If the PatKind is a Lit or a Range then we want // to borrow discr. needs_to_be_read = true; @@ -615,6 +615,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx | PatKind::Box(_) | PatKind::Deref(_) | PatKind::Ref(..) + | PatKind::Guard(..) | PatKind::Wild | PatKind::Err(_) => { // If the PatKind is Or, Box, or Ref, the decision is made later @@ -1737,7 +1738,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx } } - PatKind::Binding(.., Some(subpat)) => { + PatKind::Binding(.., Some(subpat)) | PatKind::Guard(subpat, _) => { self.cat_pattern(place_with_id, subpat, op)?; } @@ -1802,7 +1803,7 @@ impl<'tcx, Cx: TypeInformationCtxt<'tcx>, D: Delegate<'tcx>> ExprUseVisitor<'tcx PatKind::Path(_) | PatKind::Binding(.., None) - | PatKind::Lit(..) + | PatKind::Expr(..) | PatKind::Range(..) | PatKind::Never | PatKind::Wild diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs index 2f6e50c8014..be6d9570e35 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/_impl.rs @@ -3,7 +3,7 @@ use std::slice; use rustc_abi::FieldIdx; use rustc_data_structures::fx::FxHashSet; -use rustc_errors::{Applicability, Diag, ErrorGuaranteed, MultiSpan, StashKey}; +use rustc_errors::{Applicability, Diag, ErrorGuaranteed, MultiSpan}; use rustc_hir::def::{CtorOf, DefKind, Res}; use rustc_hir::def_id::DefId; use rustc_hir::intravisit::Visitor; @@ -577,11 +577,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } /// Registers obligations that all `args` are well-formed. - pub(crate) fn add_wf_bounds(&self, args: GenericArgsRef<'tcx>, expr: &hir::Expr<'_>) { + pub(crate) fn add_wf_bounds(&self, args: GenericArgsRef<'tcx>, span: Span) { for arg in args.iter().filter(|arg| { matches!(arg.unpack(), GenericArgKind::Type(..) | GenericArgKind::Const(..)) }) { - self.register_wf_obligation(arg, expr.span, ObligationCauseCode::WellFormed(None)); + self.register_wf_obligation(arg, span, ObligationCauseCode::WellFormed(None)); } } @@ -806,17 +806,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let item_name = item_segment.ident; let result = self .resolve_fully_qualified_call(span, item_name, ty.normalized, qself.span, hir_id) - .map(|r| { - // lint bare trait if the method is found in the trait - if span.edition().at_least_rust_2021() { - self.dcx().try_steal_modify_and_emit_err( - qself.span, - StashKey::TraitMissingMethod, - |_err| {}, - ); - } - r - }) .or_else(|error| { let guar = self .dcx() @@ -840,17 +829,6 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ); } - // Emit the diagnostic for bare traits. (We used to cancel for slightly better - // error messages, but cancelling stashed diagnostics is no longer allowed because - // it causes problems when tracking whether errors have actually occurred.) - if span.edition().at_least_rust_2021() { - self.dcx().try_steal_modify_and_emit_err( - qself.span, - StashKey::TraitMissingMethod, - |_err| {}, - ); - } - if item_name.name != kw::Empty { self.report_method_error( hir_id, @@ -1039,6 +1017,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { def_id, span, ), + Res::Err => { + return ( + Ty::new_error( + tcx, + tcx.dcx().span_delayed_bug(span, "could not resolve path {:?}"), + ), + res, + ); + } _ => bug!("instantiate_value_path on {:?}", res), }; diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs index 2f4b42587fb..46eed2db236 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/checks.rs @@ -2460,16 +2460,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { spans.push_span_label( param.span, format!( - "{} {} to match the {} type of this parameter", + "{} need{} to match the {} type of this parameter", display_list_with_comma_and(&other_param_matched_names), - format!( - "need{}", - pluralize!(if other_param_matched_names.len() == 1 { - 0 - } else { - 1 - }) - ), + pluralize!(if other_param_matched_names.len() == 1 { + 0 + } else { + 1 + }), matched_ty, ), ); diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs index 3756f6339a4..53e055fdeef 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs @@ -1320,14 +1320,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { let span = expr.span.shrink_to_hi(); let subdiag = if self.type_is_copy_modulo_regions(self.param_env, ty) { errors::OptionResultRefMismatch::Copied { span, def_path } - } else if let Some(clone_did) = self.tcx.lang_items().clone_trait() - && rustc_trait_selection::traits::type_known_to_meet_bound_modulo_regions( - self, - self.param_env, - ty, - clone_did, - ) - { + } else if self.type_is_clone_modulo_regions(self.param_env, ty) { errors::OptionResultRefMismatch::Cloned { span, def_path } } else { return false; @@ -2182,6 +2175,87 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } + /// Suggest replacing comma with semicolon in incorrect repeat expressions + /// like `["_", 10]` or `vec![String::new(), 10]`. + pub(crate) fn suggest_semicolon_in_repeat_expr( + &self, + err: &mut Diag<'_>, + expr: &hir::Expr<'_>, + expr_ty: Ty<'tcx>, + ) -> bool { + // Check if `expr` is contained in array of two elements + if let hir::Node::Expr(array_expr) = self.tcx.parent_hir_node(expr.hir_id) + && let hir::ExprKind::Array(elements) = array_expr.kind + && let [first, second] = &elements[..] + && second.hir_id == expr.hir_id + { + // Span between the two elements of the array + let comma_span = first.span.between(second.span); + + // Check if `expr` is a constant value of type `usize`. + // This can only detect const variable declarations and + // calls to const functions. + + // Checking this here instead of rustc_hir::hir because + // this check needs access to `self.tcx` but rustc_hir + // has no access to `TyCtxt`. + let expr_is_const_usize = expr_ty.is_usize() + && match expr.kind { + ExprKind::Path(QPath::Resolved( + None, + Path { res: Res::Def(DefKind::Const, _), .. }, + )) => true, + ExprKind::Call( + Expr { + kind: + ExprKind::Path(QPath::Resolved( + None, + Path { res: Res::Def(DefKind::Fn, fn_def_id), .. }, + )), + .. + }, + _, + ) => self.tcx.is_const_fn(*fn_def_id), + _ => false, + }; + + // Type of the first element is guaranteed to be checked + // when execution reaches here because `mismatched types` + // error occurs only when type of second element of array + // is not the same as type of first element. + let first_ty = self.typeck_results.borrow().expr_ty(first); + + // `array_expr` is from a macro `vec!["a", 10]` if + // 1. array expression's span is imported from a macro + // 2. first element of array implements `Clone` trait + // 3. second element is an integer literal or is an expression of `usize` like type + if self.tcx.sess.source_map().is_imported(array_expr.span) + && self.type_is_clone_modulo_regions(self.param_env, first_ty) + && (expr.is_size_lit() || expr_ty.is_usize_like()) + { + err.subdiagnostic(errors::ReplaceCommaWithSemicolon { + comma_span, + descr: "a vector", + }); + return true; + } + + // `array_expr` is from an array `["a", 10]` if + // 1. first element of array implements `Copy` trait + // 2. second element is an integer literal or is a const value of type `usize` + if self.type_is_copy_modulo_regions(self.param_env, first_ty) + && (expr.is_size_lit() || expr_is_const_usize) + { + err.subdiagnostic(errors::ReplaceCommaWithSemicolon { + comma_span, + descr: "an array", + }); + return true; + } + } + false + } + /// If the expected type is an enum (Issue #55250) with any variants whose /// sole field is of the found type, suggest such variants. (Issue #42764) pub(crate) fn suggest_compatible_variants( @@ -2608,6 +2682,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if let hir::ExprKind::Unary(hir::UnOp::Deref, inner) = expr.kind && let Some(1) = self.deref_steps_for_suggestion(expected, checked_ty) + && self.typeck_results.borrow().expr_ty(inner).is_ref() { // We have `*&T`, check if what was expected was `&T`. // If so, we may want to suggest removing a `*`. diff --git a/compiler/rustc_hir_typeck/src/lib.rs b/compiler/rustc_hir_typeck/src/lib.rs index 5a0a855147d..9cd9ca040ce 100644 --- a/compiler/rustc_hir_typeck/src/lib.rs +++ b/compiler/rustc_hir_typeck/src/lib.rs @@ -87,7 +87,7 @@ fn used_trait_imports(tcx: TyCtxt<'_>, def_id: LocalDefId) -> &UnordSet<LocalDef } fn typeck<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> { - typeck_with_fallback(tcx, def_id, None) + typeck_with_inspect(tcx, def_id, None) } /// Same as `typeck` but `inspect` is invoked on evaluation of each root obligation. @@ -99,11 +99,11 @@ pub fn inspect_typeck<'tcx>( def_id: LocalDefId, inspect: ObligationInspector<'tcx>, ) -> &'tcx ty::TypeckResults<'tcx> { - typeck_with_fallback(tcx, def_id, Some(inspect)) + typeck_with_inspect(tcx, def_id, Some(inspect)) } #[instrument(level = "debug", skip(tcx, inspector), ret)] -fn typeck_with_fallback<'tcx>( +fn typeck_with_inspect<'tcx>( tcx: TyCtxt<'tcx>, def_id: LocalDefId, inspector: Option<ObligationInspector<'tcx>>, @@ -139,7 +139,7 @@ fn typeck_with_fallback<'tcx>( // type that has an infer in it, lower the type directly so that it'll // be correctly filled with infer. We'll use this inference to provide // a suggestion later on. - fcx.lowerer().lower_fn_ty(id, header.safety, header.abi, decl, None, None) + fcx.lowerer().lower_fn_ty(id, header.safety(), header.abi, decl, None, None) } else { tcx.fn_sig(def_id).instantiate_identity() }; @@ -147,8 +147,23 @@ fn typeck_with_fallback<'tcx>( check_abi(tcx, span, fn_sig.abi()); // Compute the function signature from point of view of inside the fn. - let fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig); - let fn_sig = fcx.normalize(body.value.span, fn_sig); + let mut fn_sig = tcx.liberate_late_bound_regions(def_id.to_def_id(), fn_sig); + + // Normalize the input and output types one at a time, using a different + // `WellFormedLoc` for each. We cannot call `normalize_associated_types` + // on the entire `FnSig`, since this would use the same `WellFormedLoc` + // for each type, preventing the HIR wf check from generating + // a nice error message. + let arg_span = + |idx| decl.inputs.get(idx).map_or(decl.output.span(), |arg: &hir::Ty<'_>| arg.span); + + fn_sig.inputs_and_output = tcx.mk_type_list_from_iter( + fn_sig + .inputs_and_output + .iter() + .enumerate() + .map(|(idx, ty)| fcx.normalize(arg_span(idx), ty)), + ); check_fn(&mut fcx, fn_sig, None, decl, def_id, body, tcx.features().unsized_fn_params()); } else { diff --git a/compiler/rustc_hir_typeck/src/method/confirm.rs b/compiler/rustc_hir_typeck/src/method/confirm.rs index 0c93c9817b4..f549ced9dc3 100644 --- a/compiler/rustc_hir_typeck/src/method/confirm.rs +++ b/compiler/rustc_hir_typeck/src/method/confirm.rs @@ -611,7 +611,7 @@ impl<'a, 'tcx> ConfirmContext<'a, 'tcx> { // this is a projection from a trait reference, so we have to // make sure that the trait reference inputs are well-formed. - self.add_wf_bounds(all_args, self.call_expr); + self.add_wf_bounds(all_args, self.call_expr.span); // the function type must also be well-formed (this is not // implied by the args being well-formed because of inherent diff --git a/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs b/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs index 5ccfcf93f69..69d7a6c97cb 100644 --- a/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs +++ b/compiler/rustc_hir_typeck/src/method/prelude_edition_lints.rs @@ -8,7 +8,7 @@ use rustc_lint::{ARRAY_INTO_ITER, BOXED_SLICE_INTO_ITER}; use rustc_middle::span_bug; use rustc_middle::ty::{self, Ty}; use rustc_session::lint::builtin::{RUST_2021_PRELUDE_COLLISIONS, RUST_2024_PRELUDE_COLLISIONS}; -use rustc_span::{Ident, Span, kw, sym}; +use rustc_span::{Ident, STDLIB_STABLE_CRATES, Span, kw, sym}; use rustc_trait_selection::infer::InferCtxtExt; use tracing::debug; @@ -76,7 +76,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { }; // No need to lint if method came from std/core, as that will now be in the prelude - if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) { + if STDLIB_STABLE_CRATES.contains(&self.tcx.crate_name(pick.item.def_id.krate)) { return; } @@ -252,7 +252,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } // No need to lint if method came from std/core, as that will now be in the prelude - if matches!(self.tcx.crate_name(pick.item.def_id.krate), sym::std | sym::core) { + if STDLIB_STABLE_CRATES.contains(&self.tcx.crate_name(pick.item.def_id.krate)) { return; } diff --git a/compiler/rustc_hir_typeck/src/method/suggest.rs b/compiler/rustc_hir_typeck/src/method/suggest.rs index b4f1dcfb9cc..b3d87ef4ad2 100644 --- a/compiler/rustc_hir_typeck/src/method/suggest.rs +++ b/compiler/rustc_hir_typeck/src/method/suggest.rs @@ -170,6 +170,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { span, .. }) + | hir::Node::PatExpr(&hir::PatExpr { + kind: hir::PatExprKind::Path(QPath::TypeRelative(rcvr, segment)), + span, + .. + }) | hir::Node::Pat(&hir::Pat { kind: hir::PatKind::Path(QPath::TypeRelative(rcvr, segment)) @@ -1091,7 +1096,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { ) ) { continue; - }; + } match self.tcx.hir().get_if_local(item_def_id) { // Unmet obligation comes from a `derive` macro, point at it once to @@ -1205,8 +1210,12 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { entry.1.insert((cause_span, "unsatisfied trait bound introduced here")); entry.2.push(p); } - Some(node) => unreachable!("encountered `{node:?}` due to `{cause:#?}`"), - None => (), + _ => { + // It's possible to use well-formedness clauses to get obligations + // which point arbitrary items like ADTs, so there's no use in ICEing + // here if we find that the obligation originates from some other + // node that we don't handle. + } } } let mut spanned_predicates: Vec<_> = spanned_predicates.into_iter().collect(); diff --git a/compiler/rustc_hir_typeck/src/pat.rs b/compiler/rustc_hir_typeck/src/pat.rs index 98b28240f4c..36094657eaf 100644 --- a/compiler/rustc_hir_typeck/src/pat.rs +++ b/compiler/rustc_hir_typeck/src/pat.rs @@ -30,6 +30,7 @@ use tracing::{debug, instrument, trace}; use ty::VariantDef; use super::report_unexpected_variant_res; +use crate::expectation::Expectation; use crate::gather_locals::DeclOrigin; use crate::{FnCtxt, LoweredTy, errors}; @@ -270,7 +271,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { PatKind::Wild | PatKind::Err(_) => expected, // We allow any type here; we ensure that the type is uninhabited during match checking. PatKind::Never => expected, - PatKind::Lit(lt) => self.check_pat_lit(pat.span, lt, expected, ti), + PatKind::Expr(lt) => self.check_pat_lit(pat.span, lt, expected, ti), PatKind::Range(lhs, rhs, _) => self.check_pat_range(pat.span, lhs, rhs, expected, ti), PatKind::Binding(ba, var_id, ident, sub) => { self.check_pat_ident(pat, ba, var_id, ident, sub, expected, pat_info) @@ -279,11 +280,16 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { self.check_pat_tuple_struct(pat, qpath, subpats, ddpos, expected, pat_info) } PatKind::Path(ref qpath) => { - self.check_pat_path(pat, qpath, path_res.unwrap(), expected, ti) + self.check_pat_path(pat.hir_id, pat.span, qpath, path_res.unwrap(), expected, ti) } PatKind::Struct(ref qpath, fields, has_rest_pat) => { self.check_pat_struct(pat, qpath, fields, has_rest_pat, expected, pat_info) } + PatKind::Guard(pat, cond) => { + self.check_pat(pat, expected, pat_info); + self.check_expr_has_type_or_error(cond, self.tcx.types.bool, |_| {}); + expected + } PatKind::Or(pats) => { for pat in pats { self.check_pat(pat, expected, pat_info); @@ -393,7 +399,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // As a result, we allow `if let 0 = &&0 {}` but not `if let "foo" = &&"foo" {}`. // // Call `resolve_vars_if_possible` here for inline const blocks. - PatKind::Lit(lt) => match self.resolve_vars_if_possible(self.check_expr(lt)).kind() { + PatKind::Expr(lt) => match self.resolve_vars_if_possible(self.check_pat_expr_unadjusted(lt)).kind() { ty::Ref(..) => AdjustMode::Pass, _ => AdjustMode::Peel, }, @@ -422,7 +428,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // An OR-pattern just propagates to each individual alternative. // This is maximally flexible, allowing e.g., `Some(mut x) | &Some(mut x)`. // In that example, `Some(mut x)` results in `Peel` whereas `&Some(mut x)` in `Reset`. - | PatKind::Or(_) => AdjustMode::Pass, + | PatKind::Or(_) + // Like or-patterns, guard patterns just propogate to their subpatterns. + | PatKind::Guard(..) => AdjustMode::Pass, } } @@ -486,10 +494,28 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { (expected, def_br, max_ref_mutbl) } + fn check_pat_expr_unadjusted(&self, lt: &'tcx hir::PatExpr<'tcx>) -> Ty<'tcx> { + let ty = match <.kind { + rustc_hir::PatExprKind::Lit { lit, .. } => { + self.check_expr_lit(lit, Expectation::NoExpectation) + } + rustc_hir::PatExprKind::ConstBlock(c) => { + self.check_expr_const_block(c, Expectation::NoExpectation) + } + rustc_hir::PatExprKind::Path(qpath) => { + let (res, opt_ty, segments) = + self.resolve_ty_and_res_fully_qualified_call(qpath, lt.hir_id, lt.span); + self.instantiate_value_path(segments, opt_ty, res, lt.span, lt.span, lt.hir_id).0 + } + }; + self.write_ty(lt.hir_id, ty); + ty + } + fn check_pat_lit( &self, span: Span, - lt: &hir::Expr<'tcx>, + lt: &hir::PatExpr<'tcx>, expected: Ty<'tcx>, ti: &TopInfo<'tcx>, ) -> Ty<'tcx> { @@ -500,7 +526,10 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Byte string patterns behave the same way as array patterns // They can denote both statically and dynamically-sized byte arrays. let mut pat_ty = ty; - if let hir::ExprKind::Lit(Spanned { node: ast::LitKind::ByteStr(..), .. }) = lt.kind { + if let hir::PatExprKind::Lit { + lit: Spanned { node: ast::LitKind::ByteStr(..), .. }, .. + } = lt.kind + { let expected = self.structurally_resolve_type(span, expected); if let ty::Ref(_, inner_ty, _) = *expected.kind() && self.try_structurally_resolve_type(span, inner_ty).is_slice() @@ -517,7 +546,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } if self.tcx.features().string_deref_patterns() - && let hir::ExprKind::Lit(Spanned { node: ast::LitKind::Str(..), .. }) = lt.kind + && let hir::PatExprKind::Lit { + lit: Spanned { node: ast::LitKind::Str(..), .. }, .. + } = lt.kind { let tcx = self.tcx; let expected = self.resolve_vars_if_possible(expected); @@ -558,15 +589,15 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fn check_pat_range( &self, span: Span, - lhs: Option<&'tcx hir::Expr<'tcx>>, - rhs: Option<&'tcx hir::Expr<'tcx>>, + lhs: Option<&'tcx hir::PatExpr<'tcx>>, + rhs: Option<&'tcx hir::PatExpr<'tcx>>, expected: Ty<'tcx>, ti: &TopInfo<'tcx>, ) -> Ty<'tcx> { - let calc_side = |opt_expr: Option<&'tcx hir::Expr<'tcx>>| match opt_expr { + let calc_side = |opt_expr: Option<&'tcx hir::PatExpr<'tcx>>| match opt_expr { None => None, Some(expr) => { - let ty = self.check_expr(expr); + let ty = self.check_pat_expr_unadjusted(expr); // Check that the end-point is possibly of numeric or char type. // The early check here is not for correctness, but rather better // diagnostics (e.g. when `&str` is being matched, `expected` will @@ -901,6 +932,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { PatKind::Struct(..) | PatKind::TupleStruct(..) | PatKind::Or(..) + | PatKind::Guard(..) | PatKind::Tuple(..) | PatKind::Slice(..) => "binding", @@ -911,7 +943,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { | PatKind::Box(..) | PatKind::Deref(_) | PatKind::Ref(..) - | PatKind::Lit(..) + | PatKind::Expr(..) | PatKind::Range(..) | PatKind::Err(_) => break 'block None, }, @@ -1045,7 +1077,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fn check_pat_path( &self, - pat: &Pat<'tcx>, + hir_id: HirId, + span: Span, qpath: &hir::QPath<'_>, path_resolution: (Res, Option<LoweredTy<'tcx>>, &'tcx [hir::PathSegment<'tcx>]), expected: Ty<'tcx>, @@ -1064,8 +1097,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } Res::Def(DefKind::AssocFn | DefKind::Ctor(_, CtorKind::Fn) | DefKind::Variant, _) => { let expected = "unit struct, unit variant or constant"; - let e = - report_unexpected_variant_res(tcx, res, None, qpath, pat.span, E0533, expected); + let e = report_unexpected_variant_res(tcx, res, None, qpath, span, E0533, expected); return Ty::new_error(tcx, e); } Res::SelfCtor(def_id) => { @@ -1080,7 +1112,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { res, None, qpath, - pat.span, + span, E0533, "unit struct", ); @@ -1099,11 +1131,11 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { // Type-check the path. let (pat_ty, pat_res) = - self.instantiate_value_path(segments, opt_ty, res, pat.span, pat.span, pat.hir_id); + self.instantiate_value_path(segments, opt_ty, res, span, span, hir_id); if let Err(err) = - self.demand_suptype_with_origin(&self.pattern_cause(ti, pat.span), expected, pat_ty) + self.demand_suptype_with_origin(&self.pattern_cause(ti, span), expected, pat_ty) { - self.emit_bad_pat_path(err, pat, res, pat_res, pat_ty, segments); + self.emit_bad_pat_path(err, hir_id, span, res, pat_res, pat_ty, segments); } pat_ty } @@ -1146,13 +1178,13 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { fn emit_bad_pat_path( &self, mut e: Diag<'_>, - pat: &hir::Pat<'tcx>, + hir_id: HirId, + pat_span: Span, res: Res, pat_res: Res, pat_ty: Ty<'tcx>, segments: &'tcx [hir::PathSegment<'tcx>], ) { - let pat_span = pat.span; if let Some(span) = self.tcx.hir().res_span(pat_res) { e.span_label(span, format!("{} defined here", res.descr())); if let [hir::PathSegment { ident, .. }] = &*segments { @@ -1165,7 +1197,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { res.descr(), ), ); - match self.tcx.parent_hir_node(pat.hir_id) { + match self.tcx.parent_hir_node(hir_id) { hir::Node::PatField(..) => { e.span_suggestion_verbose( ident.span.shrink_to_hi(), @@ -1805,9 +1837,9 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { } } else if inexistent_fields.len() == 1 { match pat_field.pat.kind { - PatKind::Lit(expr) + PatKind::Expr(_) if !self.may_coerce( - self.typeck_results.borrow().expr_ty(expr), + self.typeck_results.borrow().node_type(pat_field.pat.hir_id), self.field_ty(field.span, field_def, args), ) => {} _ => { diff --git a/compiler/rustc_hir_typeck/src/upvar.rs b/compiler/rustc_hir_typeck/src/upvar.rs index 53993ba8d6c..cac891c4e4c 100644 --- a/compiler/rustc_hir_typeck/src/upvar.rs +++ b/compiler/rustc_hir_typeck/src/upvar.rs @@ -147,15 +147,16 @@ impl<'a, 'tcx> Visitor<'tcx> for InferBorrowKindVisitor<'a, 'tcx> { self.visit_body(body); self.fcx.analyze_closure(expr.hir_id, expr.span, body_id, body, capture_clause); } - hir::ExprKind::ConstBlock(anon_const) => { - let body = self.fcx.tcx.hir().body(anon_const.body); - self.visit_body(body); - } _ => {} } intravisit::walk_expr(self, expr); } + + fn visit_inline_const(&mut self, c: &'tcx hir::ConstBlock) { + let body = self.fcx.tcx.hir().body(c.body); + self.visit_body(body); + } } impl<'a, 'tcx> FnCtxt<'a, 'tcx> { diff --git a/compiler/rustc_hir_typeck/src/writeback.rs b/compiler/rustc_hir_typeck/src/writeback.rs index 5612aa75aae..683cacdff7d 100644 --- a/compiler/rustc_hir_typeck/src/writeback.rs +++ b/compiler/rustc_hir_typeck/src/writeback.rs @@ -5,7 +5,7 @@ use std::mem; use rustc_data_structures::unord::ExtendUnord; -use rustc_errors::{ErrorGuaranteed, StashKey}; +use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_hir::HirId; use rustc_hir::intravisit::{self, Visitor}; @@ -246,6 +246,13 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { } } } + + fn visit_const_block(&mut self, span: Span, anon_const: &hir::ConstBlock) { + self.visit_node_id(span, anon_const.hir_id); + + let body = self.tcx().hir().body(anon_const.body); + self.visit_body(body); + } } /////////////////////////////////////////////////////////////////////////// @@ -275,11 +282,8 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { hir::ExprKind::Field(..) | hir::ExprKind::OffsetOf(..) => { self.visit_field_id(e.hir_id); } - hir::ExprKind::ConstBlock(anon_const) => { - self.visit_node_id(e.span, anon_const.hir_id); - - let body = self.tcx().hir().body(anon_const.body); - self.visit_body(body); + hir::ExprKind::ConstBlock(ref anon_const) => { + self.visit_const_block(e.span, anon_const); } _ => {} } @@ -335,6 +339,14 @@ impl<'cx, 'tcx> Visitor<'tcx> for WritebackCx<'cx, 'tcx> { intravisit::walk_pat(self, p); } + fn visit_pat_expr(&mut self, expr: &'tcx hir::PatExpr<'tcx>) { + self.visit_node_id(expr.span, expr.hir_id); + if let hir::PatExprKind::ConstBlock(c) = &expr.kind { + self.visit_const_block(expr.span, c); + } + intravisit::walk_pat_expr(self, expr); + } + fn visit_local(&mut self, l: &'tcx hir::LetStmt<'tcx>) { intravisit::walk_local(self, l); let var_ty = self.fcx.local_ty(l.span, l.hir_id); @@ -550,9 +562,9 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { // types or by using this function at the end of writeback and running it as a // fixpoint. let opaque_types = self.fcx.infcx.clone_opaque_types(); - for (opaque_type_key, decl) in opaque_types { - let hidden_type = self.resolve(decl.hidden_type, &decl.hidden_type.span); - let opaque_type_key = self.resolve(opaque_type_key, &decl.hidden_type.span); + for (opaque_type_key, hidden_type) in opaque_types { + let hidden_type = self.resolve(hidden_type, &hidden_type.span); + let opaque_type_key = self.resolve(opaque_type_key, &hidden_type.span); if !self.fcx.next_trait_solver() { if let ty::Alias(ty::Opaque, alias_ty) = hidden_type.ty.kind() @@ -570,15 +582,8 @@ impl<'cx, 'tcx> WritebackCx<'cx, 'tcx> { && last_opaque_ty.ty != hidden_type.ty { assert!(!self.fcx.next_trait_solver()); - if let Ok(d) = hidden_type.build_mismatch_error( - &last_opaque_ty, - opaque_type_key.def_id, - self.tcx(), - ) { - d.stash( - self.tcx().def_span(opaque_type_key.def_id), - StashKey::OpaqueHiddenTypeMismatch, - ); + if let Ok(d) = hidden_type.build_mismatch_error(&last_opaque_ty, self.tcx()) { + d.emit(); } } } diff --git a/compiler/rustc_index/src/bit_set.rs b/compiler/rustc_index/src/bit_set.rs index 38e2dbbde7d..f12df831cb5 100644 --- a/compiler/rustc_index/src/bit_set.rs +++ b/compiler/rustc_index/src/bit_set.rs @@ -97,7 +97,13 @@ macro_rules! bit_relations_inherent_impls { /// A fixed-size bitset type with a dense representation. /// -/// NOTE: Use [`GrowableBitSet`] if you need support for resizing after creation. +/// Note 1: Since this bitset is dense, if your domain is big, and/or relatively +/// homogeneous (for example, with long runs of bits set or unset), then it may +/// be preferable to instead use a [MixedBitSet], or an +/// [IntervalSet](crate::interval::IntervalSet). They should be more suited to +/// sparse, or highly-compressible, domains. +/// +/// Note 2: Use [`GrowableBitSet`] if you need support for resizing after creation. /// /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also /// just be `usize`. @@ -108,33 +114,33 @@ macro_rules! bit_relations_inherent_impls { /// #[cfg_attr(feature = "nightly", derive(Decodable_Generic, Encodable_Generic))] #[derive(Eq, PartialEq, Hash)] -pub struct BitSet<T> { +pub struct DenseBitSet<T> { domain_size: usize, words: SmallVec<[Word; 2]>, marker: PhantomData<T>, } -impl<T> BitSet<T> { +impl<T> DenseBitSet<T> { /// Gets the domain size. pub fn domain_size(&self) -> usize { self.domain_size } } -impl<T: Idx> BitSet<T> { +impl<T: Idx> DenseBitSet<T> { /// Creates a new, empty bitset with a given `domain_size`. #[inline] - pub fn new_empty(domain_size: usize) -> BitSet<T> { + pub fn new_empty(domain_size: usize) -> DenseBitSet<T> { let num_words = num_words(domain_size); - BitSet { domain_size, words: smallvec![0; num_words], marker: PhantomData } + DenseBitSet { domain_size, words: smallvec![0; num_words], marker: PhantomData } } /// Creates a new, filled bitset with a given `domain_size`. #[inline] - pub fn new_filled(domain_size: usize) -> BitSet<T> { + pub fn new_filled(domain_size: usize) -> DenseBitSet<T> { let num_words = num_words(domain_size); let mut result = - BitSet { domain_size, words: smallvec![!0; num_words], marker: PhantomData }; + DenseBitSet { domain_size, words: smallvec![!0; num_words], marker: PhantomData }; result.clear_excess_bits(); result } @@ -165,7 +171,7 @@ impl<T: Idx> BitSet<T> { /// Is `self` is a (non-strict) superset of `other`? #[inline] - pub fn superset(&self, other: &BitSet<T>) -> bool { + pub fn superset(&self, other: &DenseBitSet<T>) -> bool { assert_eq!(self.domain_size, other.domain_size); self.words.iter().zip(&other.words).all(|(a, b)| (a & b) == *b) } @@ -275,35 +281,57 @@ impl<T: Idx> BitSet<T> { } bit_relations_inherent_impls! {} + + /// Sets `self = self | !other`. + /// + /// FIXME: Incorporate this into [`BitRelations`] and fill out + /// implementations for other bitset types, if needed. + pub fn union_not(&mut self, other: &DenseBitSet<T>) { + assert_eq!(self.domain_size, other.domain_size); + + // FIXME(Zalathar): If we were to forcibly _set_ all excess bits before + // the bitwise update, and then clear them again afterwards, we could + // quickly and accurately detect whether the update changed anything. + // But that's only worth doing if there's an actual use-case. + + bitwise(&mut self.words, &other.words, |a, b| a | !b); + // The bitwise update `a | !b` can result in the last word containing + // out-of-domain bits, so we need to clear them. + self.clear_excess_bits(); + } } // dense REL dense -impl<T: Idx> BitRelations<BitSet<T>> for BitSet<T> { - fn union(&mut self, other: &BitSet<T>) -> bool { +impl<T: Idx> BitRelations<DenseBitSet<T>> for DenseBitSet<T> { + fn union(&mut self, other: &DenseBitSet<T>) -> bool { assert_eq!(self.domain_size, other.domain_size); bitwise(&mut self.words, &other.words, |a, b| a | b) } - fn subtract(&mut self, other: &BitSet<T>) -> bool { + fn subtract(&mut self, other: &DenseBitSet<T>) -> bool { assert_eq!(self.domain_size, other.domain_size); bitwise(&mut self.words, &other.words, |a, b| a & !b) } - fn intersect(&mut self, other: &BitSet<T>) -> bool { + fn intersect(&mut self, other: &DenseBitSet<T>) -> bool { assert_eq!(self.domain_size, other.domain_size); bitwise(&mut self.words, &other.words, |a, b| a & b) } } -impl<T: Idx> From<GrowableBitSet<T>> for BitSet<T> { +impl<T: Idx> From<GrowableBitSet<T>> for DenseBitSet<T> { fn from(bit_set: GrowableBitSet<T>) -> Self { bit_set.bit_set } } -impl<T> Clone for BitSet<T> { +impl<T> Clone for DenseBitSet<T> { fn clone(&self) -> Self { - BitSet { domain_size: self.domain_size, words: self.words.clone(), marker: PhantomData } + DenseBitSet { + domain_size: self.domain_size, + words: self.words.clone(), + marker: PhantomData, + } } fn clone_from(&mut self, from: &Self) { @@ -312,13 +340,13 @@ impl<T> Clone for BitSet<T> { } } -impl<T: Idx> fmt::Debug for BitSet<T> { +impl<T: Idx> fmt::Debug for DenseBitSet<T> { fn fmt(&self, w: &mut fmt::Formatter<'_>) -> fmt::Result { w.debug_list().entries(self.iter()).finish() } } -impl<T: Idx> ToString for BitSet<T> { +impl<T: Idx> ToString for DenseBitSet<T> { fn to_string(&self) -> String { let mut result = String::new(); let mut sep = '['; @@ -902,7 +930,7 @@ impl<T: Idx> BitRelations<ChunkedBitSet<T>> for ChunkedBitSet<T> { } } -impl<T: Idx> BitRelations<ChunkedBitSet<T>> for BitSet<T> { +impl<T: Idx> BitRelations<ChunkedBitSet<T>> for DenseBitSet<T> { fn union(&mut self, other: &ChunkedBitSet<T>) -> bool { sequential_update(|elem| self.insert(elem), other.iter()) } @@ -1077,6 +1105,18 @@ impl<T: Idx> fmt::Debug for ChunkedBitSet<T> { } } +/// Sets `out_vec[i] = op(out_vec[i], in_vec[i])` for each index `i` in both +/// slices. The slices must have the same length. +/// +/// Returns true if at least one bit in `out_vec` was changed. +/// +/// ## Warning +/// Some bitwise operations (e.g. union-not, xor) can set output bits that were +/// unset in in both inputs. If this happens in the last word/chunk of a bitset, +/// it can cause the bitset to contain out-of-domain values, which need to +/// be cleared with `clear_excess_bits_in_final_word`. This also makes the +/// "changed" return value unreliable, because the change might have only +/// affected excess bits. #[inline] fn bitwise<Op>(out_vec: &mut [Word], in_vec: &[Word], op: Op) -> bool where @@ -1114,10 +1154,10 @@ where false } -/// A bitset with a mixed representation, using `BitSet` for small and medium -/// bitsets, and `ChunkedBitSet` for large bitsets, i.e. those with enough bits -/// for at least two chunks. This is a good choice for many bitsets that can -/// have large domain sizes (e.g. 5000+). +/// A bitset with a mixed representation, using `DenseBitSet` for small and +/// medium bitsets, and `ChunkedBitSet` for large bitsets, i.e. those with +/// enough bits for at least two chunks. This is a good choice for many bitsets +/// that can have large domain sizes (e.g. 5000+). /// /// `T` is an index type, typically a newtyped `usize` wrapper, but it can also /// just be `usize`. @@ -1127,7 +1167,7 @@ where /// will panic if the bitsets have differing domain sizes. #[derive(PartialEq, Eq)] pub enum MixedBitSet<T> { - Small(BitSet<T>), + Small(DenseBitSet<T>), Large(ChunkedBitSet<T>), } @@ -1144,7 +1184,7 @@ impl<T: Idx> MixedBitSet<T> { #[inline] pub fn new_empty(domain_size: usize) -> MixedBitSet<T> { if domain_size <= CHUNK_BITS { - MixedBitSet::Small(BitSet::new_empty(domain_size)) + MixedBitSet::Small(DenseBitSet::new_empty(domain_size)) } else { MixedBitSet::Large(ChunkedBitSet::new_empty(domain_size)) } @@ -1283,7 +1323,7 @@ impl<'a, T: Idx> Iterator for MixedBitIter<'a, T> { /// to or greater than the domain size. #[derive(Clone, Debug, PartialEq)] pub struct GrowableBitSet<T: Idx> { - bit_set: BitSet<T>, + bit_set: DenseBitSet<T>, } impl<T: Idx> Default for GrowableBitSet<T> { @@ -1306,11 +1346,11 @@ impl<T: Idx> GrowableBitSet<T> { } pub fn new_empty() -> GrowableBitSet<T> { - GrowableBitSet { bit_set: BitSet::new_empty(0) } + GrowableBitSet { bit_set: DenseBitSet::new_empty(0) } } pub fn with_capacity(capacity: usize) -> GrowableBitSet<T> { - GrowableBitSet { bit_set: BitSet::new_empty(capacity) } + GrowableBitSet { bit_set: DenseBitSet::new_empty(capacity) } } /// Returns `true` if the set has changed. @@ -1349,8 +1389,8 @@ impl<T: Idx> GrowableBitSet<T> { } } -impl<T: Idx> From<BitSet<T>> for GrowableBitSet<T> { - fn from(bit_set: BitSet<T>) -> Self { +impl<T: Idx> From<DenseBitSet<T>> for GrowableBitSet<T> { + fn from(bit_set: DenseBitSet<T>) -> Self { Self { bit_set } } } @@ -1386,7 +1426,7 @@ impl<R: Idx, C: Idx> BitMatrix<R, C> { } /// Creates a new matrix, with `row` used as the value for every row. - pub fn from_row_n(row: &BitSet<C>, num_rows: usize) -> BitMatrix<R, C> { + pub fn from_row_n(row: &DenseBitSet<C>, num_rows: usize) -> BitMatrix<R, C> { let num_columns = row.domain_size(); let words_per_row = num_words(num_columns); assert_eq!(words_per_row, row.words.len()); @@ -1484,7 +1524,7 @@ impl<R: Idx, C: Idx> BitMatrix<R, C> { /// Adds the bits from `with` to the bits from row `write`, and /// returns `true` if anything changed. - pub fn union_row_with(&mut self, with: &BitSet<C>, write: R) -> bool { + pub fn union_row_with(&mut self, with: &DenseBitSet<C>, write: R) -> bool { assert!(write.index() < self.num_rows); assert_eq!(with.domain_size(), self.num_columns); let (write_start, write_end) = self.range(write); @@ -1541,8 +1581,8 @@ impl<R: Idx, C: Idx> fmt::Debug for BitMatrix<R, C> { /// A fixed-column-size, variable-row-size 2D bit matrix with a moderately /// sparse representation. /// -/// Initially, every row has no explicit representation. If any bit within a -/// row is set, the entire row is instantiated as `Some(<BitSet>)`. +/// Initially, every row has no explicit representation. If any bit within a row +/// is set, the entire row is instantiated as `Some(<DenseBitSet>)`. /// Furthermore, any previously uninstantiated rows prior to it will be /// instantiated as `None`. Those prior rows may themselves become fully /// instantiated later on if any of their bits are set. @@ -1556,7 +1596,7 @@ where C: Idx, { num_columns: usize, - rows: IndexVec<R, Option<BitSet<C>>>, + rows: IndexVec<R, Option<DenseBitSet<C>>>, } impl<R: Idx, C: Idx> SparseBitMatrix<R, C> { @@ -1565,10 +1605,10 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> { Self { num_columns, rows: IndexVec::new() } } - fn ensure_row(&mut self, row: R) -> &mut BitSet<C> { - // Instantiate any missing rows up to and including row `row` with an empty `BitSet`. - // Then replace row `row` with a full `BitSet` if necessary. - self.rows.get_or_insert_with(row, || BitSet::new_empty(self.num_columns)) + fn ensure_row(&mut self, row: R) -> &mut DenseBitSet<C> { + // Instantiate any missing rows up to and including row `row` with an empty `DenseBitSet`. + // Then replace row `row` with a full `DenseBitSet` if necessary. + self.rows.get_or_insert_with(row, || DenseBitSet::new_empty(self.num_columns)) } /// Sets the cell at `(row, column)` to true. Put another way, insert @@ -1642,17 +1682,17 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> { self.row(row).into_iter().flat_map(|r| r.iter()) } - pub fn row(&self, row: R) -> Option<&BitSet<C>> { + pub fn row(&self, row: R) -> Option<&DenseBitSet<C>> { self.rows.get(row)?.as_ref() } - /// Intersects `row` with `set`. `set` can be either `BitSet` or + /// Intersects `row` with `set`. `set` can be either `DenseBitSet` or /// `ChunkedBitSet`. Has no effect if `row` does not exist. /// /// Returns true if the row was changed. pub fn intersect_row<Set>(&mut self, row: R, set: &Set) -> bool where - BitSet<C>: BitRelations<Set>, + DenseBitSet<C>: BitRelations<Set>, { match self.rows.get_mut(row) { Some(Some(row)) => row.intersect(set), @@ -1660,13 +1700,13 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> { } } - /// Subtracts `set` from `row`. `set` can be either `BitSet` or + /// Subtracts `set` from `row`. `set` can be either `DenseBitSet` or /// `ChunkedBitSet`. Has no effect if `row` does not exist. /// /// Returns true if the row was changed. pub fn subtract_row<Set>(&mut self, row: R, set: &Set) -> bool where - BitSet<C>: BitRelations<Set>, + DenseBitSet<C>: BitRelations<Set>, { match self.rows.get_mut(row) { Some(Some(row)) => row.subtract(set), @@ -1674,13 +1714,13 @@ impl<R: Idx, C: Idx> SparseBitMatrix<R, C> { } } - /// Unions `row` with `set`. `set` can be either `BitSet` or + /// Unions `row` with `set`. `set` can be either `DenseBitSet` or /// `ChunkedBitSet`. /// /// Returns true if the row was changed. pub fn union_row<Set>(&mut self, row: R, set: &Set) -> bool where - BitSet<C>: BitRelations<Set>, + DenseBitSet<C>: BitRelations<Set>, { self.ensure_row(row).union(set) } diff --git a/compiler/rustc_index/src/bit_set/tests.rs b/compiler/rustc_index/src/bit_set/tests.rs index f6142323979..eaa4aafe721 100644 --- a/compiler/rustc_index/src/bit_set/tests.rs +++ b/compiler/rustc_index/src/bit_set/tests.rs @@ -8,7 +8,7 @@ use test::Bencher; #[test] fn test_new_filled() { for i in 0..128 { - let idx_buf = BitSet::new_filled(i); + let idx_buf = DenseBitSet::new_filled(i); let elems: Vec<usize> = idx_buf.iter().collect(); let expected: Vec<usize> = (0..i).collect(); assert_eq!(elems, expected); @@ -17,7 +17,7 @@ fn test_new_filled() { #[test] fn bitset_iter_works() { - let mut bitset: BitSet<usize> = BitSet::new_empty(100); + let mut bitset: DenseBitSet<usize> = DenseBitSet::new_empty(100); bitset.insert(1); bitset.insert(10); bitset.insert(19); @@ -32,7 +32,7 @@ fn bitset_iter_works() { #[test] fn bitset_iter_works_2() { - let mut bitset: BitSet<usize> = BitSet::new_empty(320); + let mut bitset: DenseBitSet<usize> = DenseBitSet::new_empty(320); bitset.insert(0); bitset.insert(127); bitset.insert(191); @@ -43,25 +43,25 @@ fn bitset_iter_works_2() { #[test] fn bitset_clone_from() { - let mut a: BitSet<usize> = BitSet::new_empty(10); + let mut a: DenseBitSet<usize> = DenseBitSet::new_empty(10); a.insert(4); a.insert(7); a.insert(9); - let mut b = BitSet::new_empty(2); + let mut b = DenseBitSet::new_empty(2); b.clone_from(&a); assert_eq!(b.domain_size(), 10); assert_eq!(b.iter().collect::<Vec<_>>(), [4, 7, 9]); - b.clone_from(&BitSet::new_empty(40)); + b.clone_from(&DenseBitSet::new_empty(40)); assert_eq!(b.domain_size(), 40); assert_eq!(b.iter().collect::<Vec<_>>(), []); } #[test] fn union_two_sets() { - let mut set1: BitSet<usize> = BitSet::new_empty(65); - let mut set2: BitSet<usize> = BitSet::new_empty(65); + let mut set1: DenseBitSet<usize> = DenseBitSet::new_empty(65); + let mut set2: DenseBitSet<usize> = DenseBitSet::new_empty(65); assert!(set1.insert(3)); assert!(!set1.insert(3)); assert!(set2.insert(5)); @@ -76,6 +76,32 @@ fn union_two_sets() { } #[test] +fn union_not() { + let mut a = DenseBitSet::<usize>::new_empty(100); + let mut b = DenseBitSet::<usize>::new_empty(100); + + a.insert(3); + a.insert(5); + a.insert(80); + a.insert(81); + + b.insert(5); // Already in `a`. + b.insert(7); + b.insert(63); + b.insert(81); // Already in `a`. + b.insert(90); + + a.union_not(&b); + + // After union-not, `a` should contain all values in the domain, except for + // the ones that are in `b` and were _not_ already in `a`. + assert_eq!( + a.iter().collect::<Vec<_>>(), + (0usize..100).filter(|&x| !matches!(x, 7 | 63 | 90)).collect::<Vec<_>>(), + ); +} + +#[test] fn chunked_bitset() { let mut b0 = ChunkedBitSet::<usize>::new_empty(0); let b0b = b0.clone(); @@ -268,8 +294,8 @@ fn with_elements_chunked(elements: &[usize], domain_size: usize) -> ChunkedBitSe s } -fn with_elements_standard(elements: &[usize], domain_size: usize) -> BitSet<usize> { - let mut s = BitSet::new_empty(domain_size); +fn with_elements_standard(elements: &[usize], domain_size: usize) -> DenseBitSet<usize> { + let mut s = DenseBitSet::new_empty(domain_size); for &e in elements { assert!(s.insert(e)); } @@ -503,15 +529,15 @@ fn sparse_matrix_operations() { matrix.insert(2, 99); matrix.insert(4, 0); - let mut disjoint: BitSet<usize> = BitSet::new_empty(100); + let mut disjoint: DenseBitSet<usize> = DenseBitSet::new_empty(100); disjoint.insert(33); - let mut superset = BitSet::new_empty(100); + let mut superset = DenseBitSet::new_empty(100); superset.insert(22); superset.insert(75); superset.insert(33); - let mut subset = BitSet::new_empty(100); + let mut subset = DenseBitSet::new_empty(100); subset.insert(22); // SparseBitMatrix::remove @@ -568,7 +594,7 @@ fn dense_insert_range() { where R: RangeBounds<usize> + Clone + IntoIterator<Item = usize> + std::fmt::Debug, { - let mut set = BitSet::new_empty(domain); + let mut set = DenseBitSet::new_empty(domain); set.insert_range(range.clone()); for i in set.iter() { assert!(range.contains(&i)); @@ -609,7 +635,7 @@ fn dense_insert_range() { #[test] fn dense_last_set_before() { - fn easy(set: &BitSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> { + fn easy(set: &DenseBitSet<usize>, needle: impl RangeBounds<usize>) -> Option<usize> { let mut last_leq = None; for e in set.iter() { if needle.contains(&e) { @@ -620,7 +646,7 @@ fn dense_last_set_before() { } #[track_caller] - fn cmp(set: &BitSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) { + fn cmp(set: &DenseBitSet<usize>, needle: impl RangeBounds<usize> + Clone + std::fmt::Debug) { assert_eq!( set.last_set_in(needle.clone()), easy(set, needle.clone()), @@ -629,7 +655,7 @@ fn dense_last_set_before() { set ); } - let mut set = BitSet::new_empty(300); + let mut set = DenseBitSet::new_empty(300); cmp(&set, 50..=50); set.insert(WORD_BITS); cmp(&set, WORD_BITS..=WORD_BITS); @@ -645,7 +671,7 @@ fn dense_last_set_before() { for i in 0..=WORD_BITS * 2 { for j in i..=WORD_BITS * 2 { for k in 0..WORD_BITS * 2 { - let mut set = BitSet::new_empty(300); + let mut set = DenseBitSet::new_empty(300); cmp(&set, i..j); cmp(&set, i..=j); set.insert(k); @@ -658,7 +684,7 @@ fn dense_last_set_before() { #[bench] fn bench_insert(b: &mut Bencher) { - let mut bs = BitSet::new_filled(99999usize); + let mut bs = DenseBitSet::new_filled(99999usize); b.iter(|| { black_box(bs.insert(black_box(100u32))); }); @@ -666,7 +692,7 @@ fn bench_insert(b: &mut Bencher) { #[bench] fn bench_remove(b: &mut Bencher) { - let mut bs = BitSet::new_filled(99999usize); + let mut bs = DenseBitSet::new_filled(99999usize); b.iter(|| { black_box(bs.remove(black_box(100u32))); }); @@ -674,7 +700,7 @@ fn bench_remove(b: &mut Bencher) { #[bench] fn bench_iter(b: &mut Bencher) { - let bs = BitSet::new_filled(99999usize); + let bs = DenseBitSet::new_filled(99999usize); b.iter(|| { bs.iter().map(|b: usize| black_box(b)).for_each(drop); }); @@ -682,8 +708,8 @@ fn bench_iter(b: &mut Bencher) { #[bench] fn bench_intersect(b: &mut Bencher) { - let mut ba: BitSet<u32> = BitSet::new_filled(99999usize); - let bb = BitSet::new_filled(99999usize); + let mut ba: DenseBitSet<u32> = DenseBitSet::new_filled(99999usize); + let bb = DenseBitSet::new_filled(99999usize); b.iter(|| { ba.intersect(black_box(&bb)); }); diff --git a/compiler/rustc_infer/src/infer/canonical/query_response.rs b/compiler/rustc_infer/src/infer/canonical/query_response.rs index d5aab4781de..23f63af778d 100644 --- a/compiler/rustc_infer/src/infer/canonical/query_response.rs +++ b/compiler/rustc_infer/src/infer/canonical/query_response.rs @@ -155,12 +155,12 @@ impl<'tcx> InferCtxt<'tcx> { .opaque_type_storage .opaque_types .iter() - .map(|(k, v)| (*k, v.hidden_type.ty)) + .map(|(k, v)| (*k, v.ty)) .collect() } fn take_opaque_types_for_query_response(&self) -> Vec<(ty::OpaqueTypeKey<'tcx>, Ty<'tcx>)> { - self.take_opaque_types().into_iter().map(|(k, v)| (k, v.hidden_type.ty)).collect() + self.take_opaque_types().into_iter().map(|(k, v)| (k, v.ty)).collect() } /// Given the (canonicalized) result to a canonical query, diff --git a/compiler/rustc_infer/src/infer/mod.rs b/compiler/rustc_infer/src/infer/mod.rs index 1f7180fb80a..283ebdfa236 100644 --- a/compiler/rustc_infer/src/infer/mod.rs +++ b/compiler/rustc_infer/src/infer/mod.rs @@ -234,7 +234,7 @@ impl<'tcx> InferCtxtInner<'tcx> { pub fn iter_opaque_types( &self, ) -> impl Iterator<Item = (ty::OpaqueTypeKey<'tcx>, ty::OpaqueHiddenType<'tcx>)> + '_ { - self.opaque_type_storage.opaque_types.iter().map(|(&k, v)| (k, v.hidden_type)) + self.opaque_type_storage.opaque_types.iter().map(|(&k, &v)| (k, v)) } } diff --git a/compiler/rustc_infer/src/infer/opaque_types/mod.rs b/compiler/rustc_infer/src/infer/opaque_types/mod.rs index 137d438a479..f6ef3f40e62 100644 --- a/compiler/rustc_infer/src/infer/opaque_types/mod.rs +++ b/compiler/rustc_infer/src/infer/opaque_types/mod.rs @@ -19,20 +19,9 @@ use crate::traits::{self, Obligation, PredicateObligations}; mod table; -pub(crate) type OpaqueTypeMap<'tcx> = FxIndexMap<OpaqueTypeKey<'tcx>, OpaqueTypeDecl<'tcx>>; +pub(crate) type OpaqueTypeMap<'tcx> = FxIndexMap<OpaqueTypeKey<'tcx>, OpaqueHiddenType<'tcx>>; pub(crate) use table::{OpaqueTypeStorage, OpaqueTypeTable}; -/// Information about the opaque types whose values we -/// are inferring in this function (these are the `impl Trait` that -/// appear in the return type). -#[derive(Clone, Debug)] -pub struct OpaqueTypeDecl<'tcx> { - /// The hidden types that have been inferred for this opaque type. - /// There can be multiple, but they are all `lub`ed together at the end - /// to obtain the canonical hidden type. - pub hidden_type: OpaqueHiddenType<'tcx>, -} - impl<'tcx> InferCtxt<'tcx> { /// This is a backwards compatibility hack to prevent breaking changes from /// lazy TAIT around RPIT handling. diff --git a/compiler/rustc_infer/src/infer/opaque_types/table.rs b/compiler/rustc_infer/src/infer/opaque_types/table.rs index 047d8edad3d..ba6cc0d783d 100644 --- a/compiler/rustc_infer/src/infer/opaque_types/table.rs +++ b/compiler/rustc_infer/src/infer/opaque_types/table.rs @@ -3,7 +3,7 @@ use rustc_middle::bug; use rustc_middle::ty::{self, OpaqueHiddenType, OpaqueTypeKey, Ty}; use tracing::instrument; -use super::{OpaqueTypeDecl, OpaqueTypeMap}; +use super::OpaqueTypeMap; use crate::infer::snapshot::undo_log::{InferCtxtUndoLogs, UndoLog}; #[derive(Default, Debug, Clone)] @@ -11,15 +11,19 @@ pub(crate) struct OpaqueTypeStorage<'tcx> { /// Opaque types found in explicit return types and their /// associated fresh inference variable. Writeback resolves these /// variables to get the concrete type, which can be used to - /// 'de-opaque' OpaqueTypeDecl, after typeck is done with all functions. + /// 'de-opaque' OpaqueHiddenType, after typeck is done with all functions. pub opaque_types: OpaqueTypeMap<'tcx>, } impl<'tcx> OpaqueTypeStorage<'tcx> { #[instrument(level = "debug")] - pub(crate) fn remove(&mut self, key: OpaqueTypeKey<'tcx>, idx: Option<OpaqueHiddenType<'tcx>>) { - if let Some(idx) = idx { - self.opaque_types.get_mut(&key).unwrap().hidden_type = idx; + pub(crate) fn remove( + &mut self, + key: OpaqueTypeKey<'tcx>, + prev: Option<OpaqueHiddenType<'tcx>>, + ) { + if let Some(prev) = prev { + *self.opaque_types.get_mut(&key).unwrap() = prev; } else { // FIXME(#120456) - is `swap_remove` correct? match self.opaque_types.swap_remove(&key) { @@ -59,13 +63,12 @@ impl<'a, 'tcx> OpaqueTypeTable<'a, 'tcx> { key: OpaqueTypeKey<'tcx>, hidden_type: OpaqueHiddenType<'tcx>, ) -> Option<Ty<'tcx>> { - if let Some(decl) = self.storage.opaque_types.get_mut(&key) { - let prev = std::mem::replace(&mut decl.hidden_type, hidden_type); + if let Some(entry) = self.storage.opaque_types.get_mut(&key) { + let prev = std::mem::replace(entry, hidden_type); self.undo_log.push(UndoLog::OpaqueTypes(key, Some(prev))); return Some(prev.ty); } - let decl = OpaqueTypeDecl { hidden_type }; - self.storage.opaque_types.insert(key, decl); + self.storage.opaque_types.insert(key, hidden_type); self.undo_log.push(UndoLog::OpaqueTypes(key, None)); None } diff --git a/compiler/rustc_infer/src/traits/util.rs b/compiler/rustc_infer/src/traits/util.rs index ab8ada1596c..66ed49fe326 100644 --- a/compiler/rustc_infer/src/traits/util.rs +++ b/compiler/rustc_infer/src/traits/util.rs @@ -1,5 +1,5 @@ use rustc_data_structures::fx::FxHashSet; -use rustc_middle::ty::{self, ToPolyTraitRef, TyCtxt}; +use rustc_middle::ty::{self, TyCtxt}; use rustc_span::{Ident, Span}; pub use rustc_type_ir::elaborate::*; @@ -125,8 +125,8 @@ pub fn transitive_bounds_that_define_assoc_item<'tcx>( .iter_identity_copied() .map(|(clause, _)| clause.instantiate_supertrait(tcx, trait_ref)) .filter_map(|clause| clause.as_trait_clause()) - // FIXME: Negative supertraits are elaborated here lol - .map(|trait_pred| trait_pred.to_poly_trait_ref()), + .filter(|clause| clause.polarity() == ty::PredicatePolarity::Positive) + .map(|clause| clause.map_bound(|clause| clause.trait_ref)), ); return Some(trait_ref); diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index aff66e48fbb..241bc35857a 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -875,6 +875,8 @@ fn run_required_analyses(tcx: TyCtxt<'_>) { }); // Freeze definitions as we don't add new ones at this point. // We need to wait until now since we synthesize a by-move body + // for all coroutine-closures. + // // This improves performance by allowing lock-free access to them. tcx.untracked().definitions.freeze(); @@ -887,7 +889,7 @@ fn run_required_analyses(tcx: TyCtxt<'_>) { }); }); sess.time("MIR_effect_checking", || { - for def_id in tcx.hir().body_owners() { + tcx.hir().par_body_owners(|def_id| { tcx.ensure().has_ffi_unwind_calls(def_id); // If we need to codegen, ensure that we emit all errors from @@ -898,15 +900,17 @@ fn run_required_analyses(tcx: TyCtxt<'_>) { { tcx.ensure().mir_drops_elaborated_and_const_checked(def_id); } - } + }); }); - tcx.hir().par_body_owners(|def_id| { - if tcx.is_coroutine(def_id.to_def_id()) { - tcx.ensure().mir_coroutine_witnesses(def_id); - tcx.ensure().check_coroutine_obligations( - tcx.typeck_root_def_id(def_id.to_def_id()).expect_local(), - ); - } + sess.time("coroutine_obligations", || { + tcx.hir().par_body_owners(|def_id| { + if tcx.is_coroutine(def_id.to_def_id()) { + tcx.ensure().mir_coroutine_witnesses(def_id); + tcx.ensure().check_coroutine_obligations( + tcx.typeck_root_def_id(def_id.to_def_id()).expect_local(), + ); + } + }); }); sess.time("layout_testing", || layout_test::test_layout(tcx)); diff --git a/compiler/rustc_interface/src/tests.rs b/compiler/rustc_interface/src/tests.rs index 53d7c84ac3f..07c4b898721 100644 --- a/compiler/rustc_interface/src/tests.rs +++ b/compiler/rustc_interface/src/tests.rs @@ -24,6 +24,7 @@ use rustc_session::{CompilerIO, EarlyDiagCtxt, Session, build_session, filesearc use rustc_span::edition::{DEFAULT_EDITION, Edition}; use rustc_span::source_map::{RealFileLoader, SourceMapInputs}; use rustc_span::{FileName, SourceFileHashAlgorithm, sym}; +use rustc_target::abi::Align; use rustc_target::spec::{ CodeModel, FramePointer, LinkerFlavorCli, MergeFunctions, OnBrokenPipe, PanicStrategy, RelocModel, RelroLevel, SanitizerSet, SplitDebuginfo, StackProtector, TlsModel, WasmCAbi, @@ -807,6 +808,7 @@ fn test_unstable_options_tracking_hash() { tracked!(location_detail, LocationDetail { file: true, line: false, column: false }); tracked!(maximal_hir_to_mir_coverage, true); tracked!(merge_functions, Some(MergeFunctions::Disabled)); + tracked!(min_function_alignment, Some(Align::EIGHT)); tracked!(mir_emit_retag, true); tracked!(mir_enable_passes, vec![("DestProp".to_string(), false)]); tracked!(mir_keep_place_mention, true); diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index e74fb9d92e9..fb3cf5afad0 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -1271,9 +1271,8 @@ declare_lint! { /// `pub(crate)` visibility is recommended to be used instead. This more clearly expresses the /// intent that the item is only visible within its own crate. /// - /// This lint is "allow" by default because it will trigger for a large - /// amount of existing Rust code, and has some false-positives. Eventually it - /// is desired for this to become warn-by-default. + /// This lint is "allow" by default because it will trigger for a large amount of existing Rust code. + /// Eventually it is desired for this to become warn-by-default. /// /// [`unnameable_types`]: #unnameable-types pub UNREACHABLE_PUB, @@ -1304,9 +1303,9 @@ impl UnreachablePub { cx.effective_visibilities.effective_vis(def_id).map(|effective_vis| { effective_vis.at_level(rustc_middle::middle::privacy::Level::Reachable) }) - && let parent_parent = cx.tcx.parent_module_from_def_id( - cx.tcx.parent_module_from_def_id(def_id.into()).into(), - ) + && let parent_parent = cx + .tcx + .parent_module_from_def_id(cx.tcx.parent_module_from_def_id(def_id).into()) && *restricted_did == parent_parent.to_local_def_id() && !restricted_did.to_def_id().is_crate_root() { diff --git a/compiler/rustc_lint/src/impl_trait_overcaptures.rs b/compiler/rustc_lint/src/impl_trait_overcaptures.rs index 7f603f6a655..44f86535527 100644 --- a/compiler/rustc_lint/src/impl_trait_overcaptures.rs +++ b/compiler/rustc_lint/src/impl_trait_overcaptures.rs @@ -99,7 +99,7 @@ declare_lint! { /// To fix this, remove the `use<'a>`, since the lifetime is already captured /// since it is in scope. pub IMPL_TRAIT_REDUNDANT_CAPTURES, - Warn, + Allow, "redundant precise-capturing `use<...>` syntax on an `impl Trait`", } diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index 70dce78b572..e09049f322f 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -234,10 +234,10 @@ declare_lint! { declare_lint_pass!(NonSnakeCase => [NON_SNAKE_CASE]); impl NonSnakeCase { - fn to_snake_case(mut str: &str) -> String { + fn to_snake_case(mut name: &str) -> String { let mut words = vec![]; // Preserve leading underscores - str = str.trim_start_matches(|c: char| { + name = name.trim_start_matches(|c: char| { if c == '_' { words.push(String::new()); true @@ -245,7 +245,7 @@ impl NonSnakeCase { false } }); - for s in str.split('_') { + for s in name.split('_') { let mut last_upper = false; let mut buf = String::new(); if s.is_empty() { diff --git a/compiler/rustc_lint/src/tests.rs b/compiler/rustc_lint/src/tests.rs index 186dec5904b..f49301b0215 100644 --- a/compiler/rustc_lint/src/tests.rs +++ b/compiler/rustc_lint/src/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use rustc_span::{Symbol, create_default_session_globals_then}; diff --git a/compiler/rustc_lint/src/types/literal.rs b/compiler/rustc_lint/src/types/literal.rs index 83942918e3b..4b5163522f8 100644 --- a/compiler/rustc_lint/src/types/literal.rs +++ b/compiler/rustc_lint/src/types/literal.rs @@ -204,20 +204,35 @@ fn get_type_suggestion(t: Ty<'_>, val: u128, negative: bool) -> Option<&'static match t.kind() { ty::Uint(ty::UintTy::Usize) | ty::Int(ty::IntTy::Isize) => None, ty::Uint(_) => Some(Integer::fit_unsigned(val).uint_ty_str()), - ty::Int(_) if negative => Some(Integer::fit_signed(-(val as i128)).int_ty_str()), - ty::Int(int) => { - let signed = Integer::fit_signed(val as i128); - let unsigned = Integer::fit_unsigned(val); - Some(if Some(unsigned.size().bits()) == int.bit_width() { - unsigned.uint_ty_str() + ty::Int(_) => { + let signed = literal_to_i128(val, negative).map(Integer::fit_signed); + if negative { + signed.map(Integer::int_ty_str) } else { - signed.int_ty_str() - }) + let unsigned = Integer::fit_unsigned(val); + Some(if let Some(signed) = signed { + if unsigned.size() < signed.size() { + unsigned.uint_ty_str() + } else { + signed.int_ty_str() + } + } else { + unsigned.uint_ty_str() + }) + } } _ => None, } } +fn literal_to_i128(val: u128, negative: bool) -> Option<i128> { + if negative { + (val <= i128::MAX as u128 + 1).then(|| val.wrapping_neg() as i128) + } else { + val.try_into().ok() + } +} + fn lint_int_literal<'tcx>( cx: &LateContext<'tcx>, type_limits: &TypeLimits, diff --git a/compiler/rustc_lint/src/unqualified_local_imports.rs b/compiler/rustc_lint/src/unqualified_local_imports.rs index c9dd6b32d88..b27398a950c 100644 --- a/compiler/rustc_lint/src/unqualified_local_imports.rs +++ b/compiler/rustc_lint/src/unqualified_local_imports.rs @@ -12,6 +12,7 @@ declare_lint! { /// ### Example /// /// ```rust,edition2018 + /// #![feature(unqualified_local_imports)] /// #![warn(unqualified_local_imports)] /// /// mod localmod { diff --git a/compiler/rustc_lint/src/unused.rs b/compiler/rustc_lint/src/unused.rs index 8b1526bc747..3059adb3fda 100644 --- a/compiler/rustc_lint/src/unused.rs +++ b/compiler/rustc_lint/src/unused.rs @@ -1220,7 +1220,7 @@ impl EarlyLintPass for UnusedParens { // Do not lint on `(..)` as that will result in the other arms being useless. Paren(_) // The other cases do not contain sub-patterns. - | Wild | Never | Rest | Lit(..) | MacCall(..) | Range(..) | Ident(.., None) | Path(..) | Err(_) => {}, + | Wild | Never | Rest | Expr(..) | MacCall(..) | Range(..) | Ident(.., None) | Path(..) | Err(_) => {}, // These are list-like patterns; parens can always be removed. TupleStruct(_, _, ps) | Tuple(ps) | Slice(ps) | Or(ps) => for p in ps { self.check_unused_parens_pat(cx, p, false, false, keep_space); diff --git a/compiler/rustc_lint_defs/src/builtin.rs b/compiler/rustc_lint_defs/src/builtin.rs index 8399f4c12f4..9fc527a6a3a 100644 --- a/compiler/rustc_lint_defs/src/builtin.rs +++ b/compiler/rustc_lint_defs/src/builtin.rs @@ -2671,6 +2671,7 @@ declare_lint! { /// ### Example /// /// ```rust + /// #![feature(strict_provenance_lints)] /// #![warn(fuzzy_provenance_casts)] /// /// fn main() { @@ -2714,6 +2715,7 @@ declare_lint! { /// ### Example /// /// ```rust + /// #![feature(strict_provenance_lints)] /// #![warn(lossy_provenance_casts)] /// /// fn main() { @@ -3595,7 +3597,7 @@ declare_lint! { /// /// [Other ABIs]: https://doc.rust-lang.org/reference/items/external-blocks.html#abi pub MISSING_ABI, - Allow, + Warn, "No declared ABI for extern declaration" } @@ -4033,6 +4035,8 @@ declare_lint! { /// ### Example /// /// ```rust + /// // This lint is intentionally used to test the compiler's behavior + /// // when an unstable lint is enabled without the corresponding feature gate. /// #![allow(test_unstable_lint)] /// ``` /// diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index de14c6d1883..6447a9362b3 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -1389,20 +1389,14 @@ static bool clearDSOLocalOnDeclarations(Module &Mod, TargetMachine &TM) { return ClearDSOLocalOnDeclarations; } -extern "C" bool LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, +extern "C" void LLVMRustPrepareThinLTORename(const LLVMRustThinLTOData *Data, LLVMModuleRef M, LLVMTargetMachineRef TM) { Module &Mod = *unwrap(M); TargetMachine &Target = *unwrap(TM); bool ClearDSOLocal = clearDSOLocalOnDeclarations(Mod, Target); - bool error = renameModuleForThinLTO(Mod, Data->Index, ClearDSOLocal); - - if (error) { - LLVMRustSetLastError("renameModuleForThinLTO failed"); - return false; - } - return true; + renameModuleForThinLTO(Mod, Data->Index, ClearDSOLocal); } extern "C" bool diff --git a/compiler/rustc_log/src/lib.rs b/compiler/rustc_log/src/lib.rs index a3890fc937e..d0ef82f4a6c 100644 --- a/compiler/rustc_log/src/lib.rs +++ b/compiler/rustc_log/src/lib.rs @@ -130,11 +130,11 @@ pub fn init_logger(cfg: LoggerConfig) -> Result<(), Error> { let subscriber = tracing_subscriber::Registry::default().with(filter).with(layer); match cfg.backtrace { - Ok(str) => { + Ok(backtrace_target) => { let fmt_layer = tracing_subscriber::fmt::layer() .with_writer(io::stderr) .without_time() - .event_format(BacktraceFormatter { backtrace_target: str }); + .event_format(BacktraceFormatter { backtrace_target }); let subscriber = subscriber.with(fmt_layer); tracing::subscriber::set_global_default(subscriber).unwrap(); } diff --git a/compiler/rustc_macros/src/symbols.rs b/compiler/rustc_macros/src/symbols.rs index 2552c0a0cfc..37200f62eb5 100644 --- a/compiler/rustc_macros/src/symbols.rs +++ b/compiler/rustc_macros/src/symbols.rs @@ -156,14 +156,14 @@ impl Entries { Entries { map: HashMap::with_capacity(capacity) } } - fn insert(&mut self, span: Span, str: &str, errors: &mut Errors) -> u32 { - if let Some(prev) = self.map.get(str) { - errors.error(span, format!("Symbol `{str}` is duplicated")); + fn insert(&mut self, span: Span, s: &str, errors: &mut Errors) -> u32 { + if let Some(prev) = self.map.get(s) { + errors.error(span, format!("Symbol `{s}` is duplicated")); errors.error(prev.span_of_name, "location of previous definition".to_string()); prev.idx } else { let idx = self.len(); - self.map.insert(str.to_string(), Preinterned { idx, span_of_name: span }); + self.map.insert(s.to_string(), Preinterned { idx, span_of_name: span }); idx } } @@ -192,14 +192,14 @@ fn symbols_with_errors(input: TokenStream) -> (TokenStream, Vec<syn::Error>) { let mut entries = Entries::with_capacity(input.keywords.len() + input.symbols.len() + 10); let mut prev_key: Option<(Span, String)> = None; - let mut check_order = |span: Span, str: &str, errors: &mut Errors| { + let mut check_order = |span: Span, s: &str, errors: &mut Errors| { if let Some((prev_span, ref prev_str)) = prev_key { - if str < prev_str { - errors.error(span, format!("Symbol `{str}` must precede `{prev_str}`")); + if s < prev_str { + errors.error(span, format!("Symbol `{s}` must precede `{prev_str}`")); errors.error(prev_span, format!("location of previous symbol `{prev_str}`")); } } - prev_key = Some((span, str.to_string())); + prev_key = Some((span, s.to_string())); }; // Generate the listed keywords. diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs index c8715f94d5d..6512176cc4a 100644 --- a/compiler/rustc_metadata/src/creader.rs +++ b/compiler/rustc_metadata/src/creader.rs @@ -29,7 +29,7 @@ use rustc_session::lint::{self, BuiltinLintDiag}; use rustc_session::output::validate_crate_name; use rustc_session::search_paths::PathKind; use rustc_span::edition::Edition; -use rustc_span::{DUMMY_SP, Ident, Span, Symbol, sym}; +use rustc_span::{DUMMY_SP, Ident, STDLIB_STABLE_CRATES, Span, Symbol, sym}; use rustc_target::spec::{PanicStrategy, Target, TargetTuple}; use tracing::{debug, info, trace}; @@ -390,19 +390,51 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { None } - // The `dependency` type is determined by the command line arguments(`--extern`) and - // `private_dep`. However, sometimes the directly dependent crate is not specified by - // `--extern`, in this case, `private-dep` is none during loading. This is equivalent to the - // scenario where the command parameter is set to `public-dependency` - fn is_private_dep(&self, name: &str, private_dep: Option<bool>) -> bool { - self.sess.opts.externs.get(name).map_or(private_dep.unwrap_or(false), |e| e.is_private_dep) - && private_dep.unwrap_or(true) + /// Determine whether a dependency should be considered private. + /// + /// Dependencies are private if they get extern option specified, e.g. `--extern priv:mycrate`. + /// This is stored in metadata, so `private_dep` can be correctly set during load. A `Some` + /// value for `private_dep` indicates that the crate is known to be private or public (note + /// that any `None` or `Some(false)` use of the same crate will make it public). + /// + /// Sometimes the directly dependent crate is not specified by `--extern`, in this case, + /// `private-dep` is none during loading. This is equivalent to the scenario where the + /// command parameter is set to `public-dependency` + fn is_private_dep( + &self, + name: Symbol, + private_dep: Option<bool>, + dep_root: Option<&CratePaths>, + ) -> bool { + // Standard library crates are never private. + if STDLIB_STABLE_CRATES.contains(&name) { + tracing::info!("returning false for {name} is private"); + return false; + } + + let extern_private = self.sess.opts.externs.get(name.as_str()).map(|e| e.is_private_dep); + + // Any descendants of `std` should be private. These crates are usually not marked + // private in metadata, so we ignore that field. + if extern_private.is_none() + && dep_root.map_or(false, |d| STDLIB_STABLE_CRATES.contains(&d.name)) + { + return true; + } + + match (extern_private, private_dep) { + // Explicit non-private via `--extern`, explicit non-private from metadata, or + // unspecified with default to public. + (Some(false), _) | (_, Some(false)) | (None, None) => false, + // Marked private via `--extern priv:mycrate` or in metadata. + (Some(true) | None, Some(true) | None) => true, + } } fn register_crate( &mut self, host_lib: Option<Library>, - root: Option<&CratePaths>, + dep_root: Option<&CratePaths>, lib: Library, dep_kind: CrateDepKind, name: Symbol, @@ -414,7 +446,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { let Library { source, metadata } = lib; let crate_root = metadata.get_root(); let host_hash = host_lib.as_ref().map(|lib| lib.metadata.get_root().hash()); - let private_dep = self.is_private_dep(name.as_str(), private_dep); + let private_dep = self.is_private_dep(name, private_dep, dep_root); // Claim this crate number and cache it let feed = self.cstore.intern_stable_crate_id(&crate_root, self.tcx)?; @@ -430,14 +462,14 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { // Maintain a reference to the top most crate. // Stash paths for top-most crate locally if necessary. let crate_paths; - let root = if let Some(root) = root { - root + let dep_root = if let Some(dep_root) = dep_root { + dep_root } else { crate_paths = CratePaths::new(crate_root.name(), source.clone()); &crate_paths }; - let cnum_map = self.resolve_crate_deps(root, &crate_root, &metadata, cnum, dep_kind)?; + let cnum_map = self.resolve_crate_deps(dep_root, &crate_root, &metadata, cnum, dep_kind)?; let raw_proc_macros = if crate_root.is_proc_macro_crate() { let temp_root; @@ -559,23 +591,21 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { &'b mut self, name: Symbol, mut dep_kind: CrateDepKind, - dep: Option<(&'b CratePaths, &'b CrateDep)>, + dep_of: Option<(&'b CratePaths, &'b CrateDep)>, ) -> Result<CrateNum, CrateError> { info!("resolving crate `{}`", name); if !name.as_str().is_ascii() { return Err(CrateError::NonAsciiName(name)); } - let (root, hash, host_hash, extra_filename, path_kind, private_dep) = match dep { - Some((root, dep)) => ( - Some(root), - Some(dep.hash), - dep.host_hash, - Some(&dep.extra_filename[..]), - PathKind::Dependency, - Some(dep.is_private), - ), - None => (None, None, None, None, PathKind::Crate, None), - }; + + let dep_root = dep_of.map(|d| d.0); + let dep = dep_of.map(|d| d.1); + let hash = dep.map(|d| d.hash); + let host_hash = dep.map(|d| d.host_hash).flatten(); + let extra_filename = dep.map(|d| &d.extra_filename[..]); + let path_kind = if dep.is_some() { PathKind::Dependency } else { PathKind::Crate }; + let private_dep = dep.map(|d| d.is_private); + let result = if let Some(cnum) = self.existing_match(name, hash, path_kind) { (LoadResult::Previous(cnum), None) } else { @@ -599,7 +629,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { dep_kind = CrateDepKind::MacrosOnly; match self.load_proc_macro(&mut locator, path_kind, host_hash)? { Some(res) => res, - None => return Err(locator.into_error(root.cloned())), + None => return Err(locator.into_error(dep_root.cloned())), } } } @@ -612,7 +642,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { // not specified by `--extern` on command line parameters, it may be // `private-dependency` when `register_crate` is called for the first time. Then it must be updated to // `public-dependency` here. - let private_dep = self.is_private_dep(name.as_str(), private_dep); + let private_dep = self.is_private_dep(name, private_dep, dep_root); let data = self.cstore.get_crate_data_mut(cnum); if data.is_proc_macro_crate() { dep_kind = CrateDepKind::MacrosOnly; @@ -623,7 +653,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { } (LoadResult::Loaded(library), host_library) => { info!("register newly loaded library for `{}`", name); - self.register_crate(host_library, root, library, dep_kind, name, private_dep) + self.register_crate(host_library, dep_root, library, dep_kind, name, private_dep) } _ => panic!(), } @@ -663,16 +693,20 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { })) } - // Go through the crate metadata and load any crates that it references + /// Go through the crate metadata and load any crates that it references. fn resolve_crate_deps( &mut self, - root: &CratePaths, + dep_root: &CratePaths, crate_root: &CrateRoot, metadata: &MetadataBlob, krate: CrateNum, dep_kind: CrateDepKind, ) -> Result<CrateNumMap, CrateError> { - debug!("resolving deps of external crate"); + debug!( + "resolving deps of external crate `{}` with dep root `{}`", + crate_root.name(), + dep_root.name + ); if crate_root.is_proc_macro_crate() { return Ok(CrateNumMap::new()); } @@ -685,14 +719,17 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { crate_num_map.push(krate); for dep in deps { info!( - "resolving dep crate {} hash: `{}` extra filename: `{}`", - dep.name, dep.hash, dep.extra_filename + "resolving dep `{}`->`{}` hash: `{}` extra filename: `{}`", + crate_root.name(), + dep.name, + dep.hash, + dep.extra_filename ); let dep_kind = match dep_kind { CrateDepKind::MacrosOnly => CrateDepKind::MacrosOnly, _ => dep.kind, }; - let cnum = self.maybe_resolve_crate(dep.name, dep_kind, Some((root, &dep)))?; + let cnum = self.maybe_resolve_crate(dep.name, dep_kind, Some((dep_root, &dep)))?; crate_num_map.push(cnum); } @@ -867,7 +904,7 @@ impl<'a, 'tcx> CrateLoader<'a, 'tcx> { // First up we check for global allocators. Look at the crate graph here // and see what's a global allocator, including if we ourselves are a // global allocator. - #[cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] + #[allow(rustc::symbol_intern_string_literal)] let this_crate = Symbol::intern("this crate"); let mut global_allocator = self.cstore.has_global_allocator.then_some(this_crate); diff --git a/compiler/rustc_metadata/src/locator.rs b/compiler/rustc_metadata/src/locator.rs index b9ebf17af24..2ddabeb49f7 100644 --- a/compiler/rustc_metadata/src/locator.rs +++ b/compiler/rustc_metadata/src/locator.rs @@ -262,7 +262,7 @@ pub(crate) struct CrateLocator<'a> { #[derive(Clone)] pub(crate) struct CratePaths { - name: Symbol, + pub(crate) name: Symbol, source: CrateSource, } @@ -765,10 +765,10 @@ impl<'a> CrateLocator<'a> { self.extract_lib(rlibs, rmetas, dylibs).map(|opt| opt.map(|(_, lib)| lib)) } - pub(crate) fn into_error(self, root: Option<CratePaths>) -> CrateError { + pub(crate) fn into_error(self, dep_root: Option<CratePaths>) -> CrateError { CrateError::LocatorCombined(Box::new(CombinedLocatorError { crate_name: self.crate_name, - root, + dep_root, triple: self.tuple, dll_prefix: self.target.dll_prefix.to_string(), dll_suffix: self.target.dll_suffix.to_string(), @@ -914,7 +914,7 @@ struct CrateRejections { /// otherwise they are ignored. pub(crate) struct CombinedLocatorError { crate_name: Symbol, - root: Option<CratePaths>, + dep_root: Option<CratePaths>, triple: TargetTuple, dll_prefix: String, dll_suffix: String, @@ -987,7 +987,7 @@ impl CrateError { } CrateError::LocatorCombined(locator) => { let crate_name = locator.crate_name; - let add_info = match &locator.root { + let add_info = match &locator.dep_root { None => String::new(), Some(r) => format!(" which `{}` depends on", r.name), }; @@ -1012,7 +1012,7 @@ impl CrateError { path.display() )); } - if let Some(r) = locator.root { + if let Some(r) = locator.dep_root { for path in r.source.paths() { found_crates.push_str(&format!( "\ncrate `{}`: {}", diff --git a/compiler/rustc_metadata/src/rmeta/decoder.rs b/compiler/rustc_metadata/src/rmeta/decoder.rs index c2b5e318bda..e02c4871f35 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder.rs @@ -871,7 +871,7 @@ impl MetadataBlob { let def_kind = root.tables.def_kind.get(blob, item).unwrap(); let def_key = root.tables.def_keys.get(blob, item).unwrap().decode(blob); - #[cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] + #[allow(rustc::symbol_intern_string_literal)] let def_name = if item == CRATE_DEF_INDEX { kw::Crate } else { diff --git a/compiler/rustc_metadata/src/rmeta/mod.rs b/compiler/rustc_metadata/src/rmeta/mod.rs index 4fe8f73efd6..4f9cdc9a474 100644 --- a/compiler/rustc_metadata/src/rmeta/mod.rs +++ b/compiler/rustc_metadata/src/rmeta/mod.rs @@ -15,7 +15,7 @@ use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIndex, DefPathHash, Stable use rustc_hir::definitions::DefKey; use rustc_hir::lang_items::LangItem; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_macros::{ Decodable, Encodable, MetadataDecodable, MetadataEncodable, TyDecodable, TyEncodable, }; @@ -450,7 +450,7 @@ define_tables! { trait_item_def_id: Table<DefIndex, RawDefId>, expn_that_defined: Table<DefIndex, LazyValue<ExpnId>>, default_fields: Table<DefIndex, LazyValue<DefId>>, - params_in_repr: Table<DefIndex, LazyValue<BitSet<u32>>>, + params_in_repr: Table<DefIndex, LazyValue<DenseBitSet<u32>>>, repr_options: Table<DefIndex, LazyValue<ReprOptions>>, // `def_keys` and `def_path_hashes` represent a lazy version of a // `DefPathTable`. This allows us to avoid deserializing an entire diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index e64500f812a..2c34df6ea61 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -6,7 +6,6 @@ edition = "2021" [dependencies] # tidy-alphabetical-start bitflags = "2.4.1" -derive-where = "1.2.7" either = "1.5.0" field-offset = "0.3.5" gsgdt = "0.1.2" diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs index dee4c424387..5d78bed5cf8 100644 --- a/compiler/rustc_middle/src/hir/map/mod.rs +++ b/compiler/rustc_middle/src/hir/map/mod.rs @@ -938,6 +938,7 @@ impl<'hir> Map<'hir> { Node::OpaqueTy(op) => op.span, Node::Pat(pat) => pat.span, Node::PatField(field) => field.span, + Node::PatExpr(lit) => lit.span, Node::Arm(arm) => arm.span, Node::Block(block) => block.span, Node::Ctor(..) => self.span_with_body(self.tcx.parent_hir_id(hir_id)), @@ -1209,6 +1210,7 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String { Node::OpaqueTy(_) => node_str("opaque type"), Node::Pat(_) => node_str("pat"), Node::PatField(_) => node_str("pattern field"), + Node::PatExpr(_) => node_str("pattern literal"), Node::Param(_) => node_str("param"), Node::Arm(_) => node_str("arm"), Node::Block(_) => node_str("block"), @@ -1244,6 +1246,7 @@ pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalModDefId) -> Mod foreign_items, body_owners, opaques, + nested_bodies, .. } = collector; ModuleItems { @@ -1254,6 +1257,7 @@ pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalModDefId) -> Mod foreign_items: foreign_items.into_boxed_slice(), body_owners: body_owners.into_boxed_slice(), opaques: opaques.into_boxed_slice(), + nested_bodies: nested_bodies.into_boxed_slice(), } } @@ -1274,6 +1278,7 @@ pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems { foreign_items, body_owners, opaques, + nested_bodies, .. } = collector; @@ -1285,6 +1290,7 @@ pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems { foreign_items: foreign_items.into_boxed_slice(), body_owners: body_owners.into_boxed_slice(), opaques: opaques.into_boxed_slice(), + nested_bodies: nested_bodies.into_boxed_slice(), } } @@ -1300,6 +1306,7 @@ struct ItemCollector<'tcx> { foreign_items: Vec<ForeignItemId>, body_owners: Vec<LocalDefId>, opaques: Vec<LocalDefId>, + nested_bodies: Vec<LocalDefId>, } impl<'tcx> ItemCollector<'tcx> { @@ -1314,6 +1321,7 @@ impl<'tcx> ItemCollector<'tcx> { foreign_items: Vec::default(), body_owners: Vec::default(), opaques: Vec::default(), + nested_bodies: Vec::default(), } } } @@ -1356,6 +1364,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { fn visit_inline_const(&mut self, c: &'hir ConstBlock) { self.body_owners.push(c.def_id); + self.nested_bodies.push(c.def_id); intravisit::walk_inline_const(self, c) } @@ -1367,6 +1376,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { fn visit_expr(&mut self, ex: &'hir Expr<'hir>) { if let ExprKind::Closure(closure) = ex.kind { self.body_owners.push(closure.def_id); + self.nested_bodies.push(closure.def_id); } intravisit::walk_expr(self, ex) } diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs index ffefd81cd08..0d2acf96d08 100644 --- a/compiler/rustc_middle/src/hir/mod.rs +++ b/compiler/rustc_middle/src/hir/mod.rs @@ -30,6 +30,7 @@ pub struct ModuleItems { foreign_items: Box<[ForeignItemId]>, opaques: Box<[LocalDefId]>, body_owners: Box<[LocalDefId]>, + nested_bodies: Box<[LocalDefId]>, } impl ModuleItems { @@ -70,6 +71,10 @@ impl ModuleItems { self.opaques.iter().copied() } + pub fn nested_bodies(&self) -> impl Iterator<Item = LocalDefId> + '_ { + self.nested_bodies.iter().copied() + } + pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ { self.owners().map(|id| id.def_id) } diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs index 16d868300db..e05f42af6fd 100644 --- a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs +++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs @@ -30,6 +30,8 @@ pub struct CodegenFnAttrs { /// features (only enabled features are supported right now). /// Implied target features have already been applied. pub target_features: Vec<TargetFeature>, + /// Whether the function was declared safe, but has target features + pub safe_target_features: bool, /// The `#[linkage = "..."]` attribute on Rust-defined items and the value we found. pub linkage: Option<Linkage>, /// The `#[linkage = "..."]` attribute on foreign items and the value we found. @@ -150,6 +152,7 @@ impl CodegenFnAttrs { link_name: None, link_ordinal: None, target_features: vec![], + safe_target_features: false, linkage: None, import_linkage: None, link_section: None, diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 84c3c2eb49e..77a7da2c74b 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -30,6 +30,14 @@ pub enum StabilityLevel { Stable, } +#[derive(Copy, Clone)] +pub enum UnstableKind { + /// Enforcing regular stability of an item + Regular, + /// Enforcing const stability of an item + Const(Span), +} + /// An entry in the `depr_map`. #[derive(Copy, Clone, HashStable, Debug, Encodable, Decodable)] pub struct DeprecationEntry { @@ -108,10 +116,16 @@ pub fn report_unstable( is_soft: bool, span: Span, soft_handler: impl FnOnce(&'static Lint, Span, String), + kind: UnstableKind, ) { + let qual = match kind { + UnstableKind::Regular => "", + UnstableKind::Const(_) => " const", + }; + let msg = match reason { - Some(r) => format!("use of unstable library feature `{feature}`: {r}"), - None => format!("use of unstable library feature `{feature}`"), + Some(r) => format!("use of unstable{qual} library feature `{feature}`: {r}"), + None => format!("use of unstable{qual} library feature `{feature}`"), }; if is_soft { @@ -121,6 +135,9 @@ pub fn report_unstable( if let Some((inner_types, msg, sugg, applicability)) = suggestion { err.span_suggestion(inner_types, msg, sugg, applicability); } + if let UnstableKind::Const(kw) = kind { + err.span_label(kw, "trait is not stable as const yet"); + } err.emit(); } } @@ -232,9 +249,18 @@ fn late_report_deprecation( return; } + let is_in_effect = depr.is_in_effect(); + let lint = deprecation_lint(is_in_effect); + + // Calculating message for lint involves calling `self.def_path_str`, + // which will by default invoke the expensive `visible_parent_map` query. + // Skip all that work if the lint is allowed anyway. + if tcx.lint_level_at_node(lint, hir_id).0 == Level::Allow { + return; + } + let def_path = with_no_trimmed_paths!(tcx.def_path_str(def_id)); let def_kind = tcx.def_descr(def_id); - let is_in_effect = depr.is_in_effect(); let method_span = method_span.unwrap_or(span); let suggestion = @@ -250,7 +276,7 @@ fn late_report_deprecation( note: depr.note, since_kind: deprecated_since_kind(is_in_effect, depr.since), }; - tcx.emit_node_span_lint(deprecation_lint(is_in_effect), hir_id, method_span, diag); + tcx.emit_node_span_lint(lint, hir_id, method_span, diag); } /// Result of `TyCtxt::eval_stability`. @@ -360,13 +386,7 @@ impl<'tcx> TyCtxt<'tcx> { // hierarchy. let depr_attr = &depr_entry.attr; if !skip || depr_attr.is_since_rustc_version() { - // Calculating message for lint involves calling `self.def_path_str`. - // Which by default to calculate visible path will invoke expensive `visible_parent_map` query. - // So we skip message calculation altogether, if lint is allowed. - let lint = deprecation_lint(depr_attr.is_in_effect()); - if self.lint_level_at_node(lint, id).0 != Level::Allow { - late_report_deprecation(self, depr_attr, span, method_span, id, def_id); - } + late_report_deprecation(self, depr_attr, span, method_span, id, def_id); } }; } @@ -587,6 +607,7 @@ impl<'tcx> TyCtxt<'tcx> { is_soft, span, soft_handler, + UnstableKind::Regular, ), EvalResult::Unmarked => unmarked(span, def_id), } @@ -594,6 +615,73 @@ impl<'tcx> TyCtxt<'tcx> { is_allowed } + /// This function is analogous to `check_optional_stability` but with the logic in + /// `eval_stability_allow_unstable` inlined, and which operating on const stability + /// instead of regular stability. + /// + /// This enforces *syntactical* const stability of const traits. In other words, + /// it enforces the ability to name `~const`/`const` traits in trait bounds in various + /// syntax positions in HIR (including in the trait of an impl header). + pub fn check_const_stability(self, def_id: DefId, span: Span, const_kw_span: Span) { + let is_staged_api = self.lookup_stability(def_id.krate.as_def_id()).is_some(); + if !is_staged_api { + return; + } + + // Only the cross-crate scenario matters when checking unstable APIs + let cross_crate = !def_id.is_local(); + if !cross_crate { + return; + } + + let stability = self.lookup_const_stability(def_id); + debug!( + "stability: \ + inspecting def_id={:?} span={:?} of stability={:?}", + def_id, span, stability + ); + + match stability { + Some(ConstStability { + level: attr::StabilityLevel::Unstable { reason, issue, is_soft, implied_by, .. }, + feature, + .. + }) => { + assert!(!is_soft); + + if span.allows_unstable(feature) { + debug!("body stability: skipping span={:?} since it is internal", span); + return; + } + if self.features().enabled(feature) { + return; + } + + // If this item was previously part of a now-stabilized feature which is still + // enabled (i.e. the user hasn't removed the attribute for the stabilized feature + // yet) then allow use of this item. + if let Some(implied_by) = implied_by + && self.features().enabled(implied_by) + { + return; + } + + report_unstable( + self.sess, + feature, + reason.to_opt_reason(), + issue, + None, + false, + span, + |_, _, _| {}, + UnstableKind::Const(const_kw_span), + ); + } + Some(_) | None => {} + } + } + pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> { self.lookup_deprecation_entry(id).map(|depr| depr.attr) } diff --git a/compiler/rustc_middle/src/mir/consts.rs b/compiler/rustc_middle/src/mir/consts.rs index 1231ea88569..60e1ff1d049 100644 --- a/compiler/rustc_middle/src/mir/consts.rs +++ b/compiler/rustc_middle/src/mir/consts.rs @@ -467,9 +467,6 @@ impl<'tcx> Const<'tcx> { let const_val = tcx.valtree_to_const_val((ty, valtree)); Self::Val(const_val, ty) } - ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, args }) => { - Self::Unevaluated(UnevaluatedConst { def, args, promoted: None }, ty) - } _ => Self::Ty(ty, c), } } diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs index 29f26180c97..65f51ae9d39 100644 --- a/compiler/rustc_middle/src/mir/coverage.rs +++ b/compiler/rustc_middle/src/mir/coverage.rs @@ -3,7 +3,7 @@ use std::fmt::{self, Debug, Formatter}; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; use rustc_span::Span; @@ -71,11 +71,7 @@ impl ConditionId { /// Enum that can hold a constant zero value, the ID of an physical coverage /// counter, or the ID of a coverage-counter expression. -/// -/// This was originally only used for expression operands (and named `Operand`), -/// but the zero/counter/expression distinction is also useful for representing -/// the value of code/gap mappings, and the true/false arms of branch mappings. -#[derive(Copy, Clone, PartialEq, Eq)] +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord)] #[derive(TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)] pub enum CovTerm { Zero, @@ -171,7 +167,7 @@ impl Op { } } -#[derive(Clone, Debug)] +#[derive(Clone, Debug, PartialEq, Eq)] #[derive(TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)] pub struct Expression { pub lhs: CovTerm, @@ -303,8 +299,8 @@ pub struct MCDCDecisionSpan { /// Used by the `coverage_ids_info` query. #[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)] pub struct CoverageIdsInfo { - pub counters_seen: BitSet<CounterId>, - pub zero_expressions: BitSet<ExpressionId>, + pub counters_seen: DenseBitSet<CounterId>, + pub zero_expressions: DenseBitSet<ExpressionId>, } impl CoverageIdsInfo { diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index 8d73c9e76de..b88137544bc 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -16,7 +16,6 @@ use rustc_abi::{AddressSpace, Align, Endian, HasDataLayout, Size}; use rustc_ast::{LitKind, Mutability}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lock; -use rustc_errors::ErrorGuaranteed; use rustc_hir::def::DefKind; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; @@ -84,16 +83,6 @@ pub struct LitToConstInput<'tcx> { pub neg: bool, } -/// Error type for `tcx.lit_to_const`. -#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)] -pub enum LitToConstError { - /// The literal's inferred type did not match the expected `ty` in the input. - /// This is used for graceful error handling (`span_delayed_bug`) in - /// type checking (`Const::from_anon_const`). - TypeError, - Reported(ErrorGuaranteed), -} - #[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)] pub struct AllocId(pub NonZero<u64>); diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 98ef7d58a50..bbb8bdce4a0 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -21,7 +21,7 @@ use rustc_hir::def_id::{CRATE_DEF_ID, DefId}; use rustc_hir::{ self as hir, BindingMode, ByRef, CoroutineDesugaring, CoroutineKind, HirId, ImplicitSelfKind, }; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{Idx, IndexSlice, IndexVec}; use rustc_macros::{HashStable, TyDecodable, TyEncodable, TypeFoldable, TypeVisitable}; use rustc_serialize::{Decodable, Encodable}; diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index 27168b2a9f2..111c3b6956a 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -132,9 +132,10 @@ impl<'tcx> MonoItem<'tcx> { // creating one copy of this `#[inline]` function which may // conflict with upstream crates as it could be an exported // symbol. - match tcx.codegen_fn_attrs(instance.def_id()).inline { - InlineAttr::Always => InstantiationMode::LocalCopy, - _ => InstantiationMode::GloballyShared { may_conflict: true }, + if tcx.codegen_fn_attrs(instance.def_id()).inline.always() { + InstantiationMode::LocalCopy + } else { + InstantiationMode::GloballyShared { may_conflict: true } } } MonoItem::Static(..) | MonoItem::GlobalAsm(..) => { diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 47522f00bb1..ea35323ccc7 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -1068,6 +1068,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { pretty_print_const(b, fmt, false)?; write!(fmt, "]") } + Len(ref a) => write!(fmt, "Len({a:?})"), Cast(ref kind, ref place, ref ty) => { with_no_trimmed_paths!(write!(fmt, "{place:?} as {ty} ({kind:?})")) } @@ -1555,16 +1556,22 @@ pub fn write_allocations<'tcx>( write!(w, " (vtable: impl {dyn_ty} for {ty})")? } Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => { - match tcx.eval_static_initializer(did) { - Ok(alloc) => { - write!(w, " (static: {}, ", tcx.def_path_str(did))?; - write_allocation_track_relocs(w, alloc)?; + write!(w, " (static: {}", tcx.def_path_str(did))?; + if body.phase <= MirPhase::Runtime(RuntimePhase::PostCleanup) + && tcx.hir().body_const_context(body.source.def_id()).is_some() + { + // Statics may be cyclic and evaluating them too early + // in the MIR pipeline may cause cycle errors even though + // normal compilation is fine. + write!(w, ")")?; + } else { + match tcx.eval_static_initializer(did) { + Ok(alloc) => { + write!(w, ", ")?; + write_allocation_track_relocs(w, alloc)?; + } + Err(_) => write!(w, ", error during initializer evaluation)")?, } - Err(_) => write!( - w, - " (static: {}, error during initializer evaluation)", - tcx.def_path_str(did) - )?, } } Some(GlobalAlloc::Static(did)) => { diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs index 429be9bc725..db5da941f1e 100644 --- a/compiler/rustc_middle/src/mir/query.rs +++ b/compiler/rustc_middle/src/mir/query.rs @@ -3,7 +3,6 @@ use std::cell::Cell; use std::fmt::{self, Debug}; -use derive_where::derive_where; use rustc_abi::{FieldIdx, VariantIdx}; use rustc_data_structures::fx::FxIndexMap; use rustc_errors::ErrorGuaranteed; @@ -225,29 +224,22 @@ rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16); /// See also `rustc_const_eval::borrow_check::constraints`. #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)] -#[derive_where(PartialOrd, Ord)] pub enum ConstraintCategory<'tcx> { Return(ReturnConstraint), Yield, UseAsConst, UseAsStatic, - TypeAnnotation, + TypeAnnotation(AnnotationSource), Cast { /// Whether this cast is a coercion that was automatically inserted by the compiler. is_implicit_coercion: bool, /// Whether this is an unsizing coercion and if yes, this contains the target type. /// Region variables are erased to ReErased. - #[derive_where(skip)] unsize_to: Option<Ty<'tcx>>, }, - /// A constraint that came from checking the body of a closure. - /// - /// We try to get the category that the closure used when reporting this. - ClosureBounds, - /// Contains the function type if available. - CallArgument(#[derive_where(skip)] Option<Ty<'tcx>>), + CallArgument(Option<Ty<'tcx>>), CopyBound, SizedBound, Assignment, @@ -276,13 +268,22 @@ pub enum ConstraintCategory<'tcx> { IllegalUniverse, } -#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] #[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)] pub enum ReturnConstraint { Normal, ClosureUpvar(FieldIdx), } +#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash)] +#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)] +pub enum AnnotationSource { + Ascription, + Declaration, + OpaqueCast, + GenericArg, +} + /// The subject of a `ClosureOutlivesRequirement` -- that is, the thing /// that must outlive some region. #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] diff --git a/compiler/rustc_middle/src/mir/statement.rs b/compiler/rustc_middle/src/mir/statement.rs index da3fa9e324a..609d5647d04 100644 --- a/compiler/rustc_middle/src/mir/statement.rs +++ b/compiler/rustc_middle/src/mir/statement.rs @@ -424,6 +424,7 @@ impl<'tcx> Rvalue<'tcx> { | Rvalue::Ref(_, _, _) | Rvalue::ThreadLocalRef(_) | Rvalue::RawPtr(_, _) + | Rvalue::Len(_) | Rvalue::Cast( CastKind::IntToInt | CastKind::FloatToInt @@ -455,6 +456,8 @@ impl BorrowKind { } } + /// Returns whether borrows represented by this kind are allowed to be split into separate + /// Reservation and Activation phases. pub fn allows_two_phase_borrow(&self) -> bool { match *self { BorrowKind::Shared diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index bbbaffc5a35..0c17a2e0fe5 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -1351,6 +1351,16 @@ pub enum Rvalue<'tcx> { /// model. RawPtr(Mutability, Place<'tcx>), + /// Yields the length of the place, as a `usize`. + /// + /// If the type of the place is an array, this is the array length. For slices (`[T]`, not + /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is + /// ill-formed for places of other types. + /// + /// This cannot be a `UnOp(PtrMetadata, _)` because that expects a value, and we only + /// have a place, and `UnOp(PtrMetadata, RawPtr(place))` is not a thing. + Len(Place<'tcx>), + /// Performs essentially all of the casts that can be performed via `as`. /// /// This allows for casts from/to a variety of types. diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs index cbb26b83c79..db77017310a 100644 --- a/compiler/rustc_middle/src/mir/tcx.rs +++ b/compiler/rustc_middle/src/mir/tcx.rs @@ -210,6 +210,7 @@ impl<'tcx> Rvalue<'tcx> { let place_ty = place.ty(local_decls, tcx).ty; Ty::new_ptr(tcx, place_ty, mutability) } + Rvalue::Len(..) => tcx.types.usize, Rvalue::Cast(.., ty) => ty, Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => { let lhs_ty = lhs.ty(local_decls, tcx); diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index b8b74da401c..0e7dcc24daf 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -21,7 +21,7 @@ use super::*; #[derive(Clone)] pub struct Preorder<'a, 'tcx> { body: &'a Body<'tcx>, - visited: BitSet<BasicBlock>, + visited: DenseBitSet<BasicBlock>, worklist: Vec<BasicBlock>, root_is_start_block: bool, } @@ -32,7 +32,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> { Preorder { body, - visited: BitSet::new_empty(body.basic_blocks.len()), + visited: DenseBitSet::new_empty(body.basic_blocks.len()), worklist, root_is_start_block: root == START_BLOCK, } @@ -106,7 +106,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { /// A Postorder traversal of this graph is `D B C A` or `D C B A` pub struct Postorder<'a, 'tcx, C> { basic_blocks: &'a IndexSlice<BasicBlock, BasicBlockData<'tcx>>, - visited: BitSet<BasicBlock>, + visited: DenseBitSet<BasicBlock>, visit_stack: Vec<(BasicBlock, Successors<'a>)>, root_is_start_block: bool, extra: C, @@ -123,7 +123,7 @@ where ) -> Postorder<'a, 'tcx, C> { let mut po = Postorder { basic_blocks, - visited: BitSet::new_empty(basic_blocks.len()), + visited: DenseBitSet::new_empty(basic_blocks.len()), visit_stack: Vec::new(), root_is_start_block: root == START_BLOCK, extra, @@ -285,8 +285,8 @@ pub fn reachable<'a, 'tcx>( preorder(body) } -/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`. -pub fn reachable_as_bitset(body: &Body<'_>) -> BitSet<BasicBlock> { +/// Returns a `DenseBitSet` containing all basic blocks reachable from the `START_BLOCK`. +pub fn reachable_as_bitset(body: &Body<'_>) -> DenseBitSet<BasicBlock> { let mut iter = preorder(body); while let Some(_) = iter.next() {} iter.visited @@ -340,13 +340,13 @@ pub fn mono_reachable<'a, 'tcx>( MonoReachable::new(body, tcx, instance) } -/// [`MonoReachable`] internally accumulates a [`BitSet`] of visited blocks. This is just a +/// [`MonoReachable`] internally accumulates a [`DenseBitSet`] of visited blocks. This is just a /// convenience function to run that traversal then extract its set of reached blocks. pub fn mono_reachable_as_bitset<'a, 'tcx>( body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, -) -> BitSet<BasicBlock> { +) -> DenseBitSet<BasicBlock> { let mut iter = mono_reachable(body, tcx, instance); while let Some(_) = iter.next() {} iter.visited @@ -356,11 +356,11 @@ pub struct MonoReachable<'a, 'tcx> { body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, - visited: BitSet<BasicBlock>, + visited: DenseBitSet<BasicBlock>, // Other traversers track their worklist in a Vec. But we don't care about order, so we can - // store ours in a BitSet and thus save allocations because BitSet has a small size + // store ours in a DenseBitSet and thus save allocations because DenseBitSet has a small size // optimization. - worklist: BitSet<BasicBlock>, + worklist: DenseBitSet<BasicBlock>, } impl<'a, 'tcx> MonoReachable<'a, 'tcx> { @@ -369,13 +369,13 @@ impl<'a, 'tcx> MonoReachable<'a, 'tcx> { tcx: TyCtxt<'tcx>, instance: Instance<'tcx>, ) -> MonoReachable<'a, 'tcx> { - let mut worklist = BitSet::new_empty(body.basic_blocks.len()); + let mut worklist = DenseBitSet::new_empty(body.basic_blocks.len()); worklist.insert(START_BLOCK); MonoReachable { body, tcx, instance, - visited: BitSet::new_empty(body.basic_blocks.len()), + visited: DenseBitSet::new_empty(body.basic_blocks.len()), worklist, } } diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 12a024a219e..058acbd4024 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -695,6 +695,14 @@ macro_rules! make_mir_visitor { self.visit_place(path, ctx, location); } + Rvalue::Len(path) => { + self.visit_place( + path, + PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect), + location + ); + } + Rvalue::Cast(_cast_kind, operand, ty) => { self.visit_operand(operand, location); self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)); diff --git a/compiler/rustc_middle/src/query/erase.rs b/compiler/rustc_middle/src/query/erase.rs index b72c0e776fe..1676afb4b6e 100644 --- a/compiler/rustc_middle/src/query/erase.rs +++ b/compiler/rustc_middle/src/query/erase.rs @@ -141,14 +141,6 @@ impl EraseType for Result<rustc_abi::TyAndLayout<'_, Ty<'_>>, &ty::layout::Layou >()]; } -impl EraseType for Result<ty::Const<'_>, mir::interpret::LitToConstError> { - type Result = [u8; size_of::<Result<ty::Const<'static>, mir::interpret::LitToConstError>>()]; -} - -impl EraseType for Result<mir::Const<'_>, mir::interpret::LitToConstError> { - type Result = [u8; size_of::<Result<mir::Const<'static>, mir::interpret::LitToConstError>>()]; -} - impl EraseType for Result<mir::ConstAlloc<'_>, mir::interpret::ErrorHandled> { type Result = [u8; size_of::<Result<mir::ConstAlloc<'static>, mir::interpret::ErrorHandled>>()]; } @@ -296,7 +288,6 @@ trivial! { rustc_middle::mir::interpret::AllocId, rustc_middle::mir::interpret::CtfeProvenance, rustc_middle::mir::interpret::ErrorHandled, - rustc_middle::mir::interpret::LitToConstError, rustc_middle::thir::ExprId, rustc_middle::traits::CodegenObligationError, rustc_middle::traits::EvaluationResult, diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 95995b956cd..65e93c3a1cc 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -57,7 +57,7 @@ use crate::middle::resolve_bound_vars::{ObjectLifetimeDefault, ResolveBoundVars, use crate::middle::stability::{self, DeprecationEntry}; use crate::mir::interpret::{ EvalStaticInitializerRawResult, EvalToAllocationRawResult, EvalToConstValueResult, - EvalToValTreeResult, GlobalId, LitToConstError, LitToConstInput, + EvalToValTreeResult, GlobalId, LitToConstInput, }; use crate::mir::mono::{CodegenUnit, CollectionMode, MonoItem}; use crate::query::erase::{Erase, erase, restore}; @@ -297,7 +297,7 @@ rustc_queries! { separate_provide_extern } - query unsizing_params_for_adt(key: DefId) -> &'tcx rustc_index::bit_set::BitSet<u32> + query unsizing_params_for_adt(key: DefId) -> &'tcx rustc_index::bit_set::DenseBitSet<u32> { arena_cache desc { |tcx| @@ -494,7 +494,7 @@ rustc_queries! { } /// Set of param indexes for type params that are in the type's representation - query params_in_repr(key: DefId) -> &'tcx rustc_index::bit_set::BitSet<u32> { + query params_in_repr(key: DefId) -> &'tcx rustc_index::bit_set::DenseBitSet<u32> { desc { "finding type parameters in the representation" } arena_cache no_hash @@ -1164,8 +1164,7 @@ rustc_queries! { } /// Check whether the function has any recursion that could cause the inliner to trigger - /// a cycle. Returns the call stack causing the cycle. The call stack does not contain the - /// current function, just all intermediate functions. + /// a cycle. query mir_callgraph_reachable(key: (ty::Instance<'tcx>, LocalDefId)) -> bool { fatal_cycle desc { |tcx| @@ -1243,6 +1242,7 @@ rustc_queries! { "simplifying constant for the type system `{}`", key.value.display(tcx) } + depth_limit cache_on_disk_if { true } } @@ -1268,7 +1268,7 @@ rustc_queries! { // FIXME get rid of this with valtrees query lit_to_const( key: LitToConstInput<'tcx> - ) -> Result<ty::Const<'tcx>, LitToConstError> { + ) -> ty::Const<'tcx> { desc { "converting literal to const" } } @@ -2128,6 +2128,8 @@ rustc_queries! { eval_always desc { "calculating the stability index for the local crate" } } + /// All available crates in the graph, including those that should not be user-facing + /// (such as private crates). query crates(_: ()) -> &'tcx [CrateNum] { eval_always desc { "fetching all foreign CrateNum instances" } diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 3337f7ceee7..2cb6f6d8c6e 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -548,7 +548,6 @@ macro_rules! define_feedable { let dep_node_index = tcx.dep_graph.with_feed_task( dep_node, tcx, - key, &value, hash_result!([$($modifiers)*]), ); diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index d26c007d227..7035e641f39 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -60,7 +60,7 @@ use crate::dep_graph::{DepGraph, DepKindStruct}; use crate::infer::canonical::{CanonicalParamEnvCache, CanonicalVarInfo, CanonicalVarInfos}; use crate::lint::lint_level; use crate::metadata::ModChild; -use crate::middle::codegen_fn_attrs::CodegenFnAttrs; +use crate::middle::codegen_fn_attrs::{CodegenFnAttrs, TargetFeature}; use crate::middle::{resolve_bound_vars, stability}; use crate::mir::interpret::{self, Allocation, ConstAllocation}; use crate::mir::{Body, Local, Place, PlaceElem, ProjectionKind, Promoted}; @@ -613,7 +613,7 @@ impl<'tcx> Interner for TyCtxt<'tcx> { self.coroutine_is_async_gen(coroutine_def_id) } - type UnsizingParams = &'tcx rustc_index::bit_set::BitSet<u32>; + type UnsizingParams = &'tcx rustc_index::bit_set::DenseBitSet<u32>; fn unsizing_params_for_adt(self, adt_def_id: DefId) -> Self::UnsizingParams { self.unsizing_params_for_adt(adt_def_id) } @@ -1776,6 +1776,37 @@ impl<'tcx> TyCtxt<'tcx> { pub fn dcx(self) -> DiagCtxtHandle<'tcx> { self.sess.dcx() } + + pub fn is_target_feature_call_safe( + self, + callee_features: &[TargetFeature], + body_features: &[TargetFeature], + ) -> bool { + // If the called function has target features the calling function hasn't, + // the call requires `unsafe`. Don't check this on wasm + // targets, though. For more information on wasm see the + // is_like_wasm check in hir_analysis/src/collect.rs + self.sess.target.options.is_like_wasm + || callee_features + .iter() + .all(|feature| body_features.iter().any(|f| f.name == feature.name)) + } + + /// Returns the safe version of the signature of the given function, if calling it + /// would be safe in the context of the given caller. + pub fn adjust_target_feature_sig( + self, + fun_def: DefId, + fun_sig: ty::Binder<'tcx, ty::FnSig<'tcx>>, + caller: DefId, + ) -> Option<ty::Binder<'tcx, ty::FnSig<'tcx>>> { + let fun_features = &self.codegen_fn_attrs(fun_def).target_features; + let callee_features = &self.codegen_fn_attrs(caller).target_features; + if self.is_target_feature_call_safe(&fun_features, &callee_features) { + return Some(fun_sig.map_bound(|sig| ty::FnSig { safety: hir::Safety::Safe, ..sig })); + } + None + } } impl<'tcx> TyCtxtAt<'tcx> { @@ -2078,12 +2109,23 @@ impl<'tcx> TyCtxt<'tcx> { self.limits(()).move_size_limit } + /// All traits in the crate graph, including those not visible to the user. pub fn all_traits(self) -> impl Iterator<Item = DefId> + 'tcx { iter::once(LOCAL_CRATE) .chain(self.crates(()).iter().copied()) .flat_map(move |cnum| self.traits(cnum).iter().copied()) } + /// All traits that are visible within the crate graph (i.e. excluding private dependencies). + pub fn visible_traits(self) -> impl Iterator<Item = DefId> + 'tcx { + let visible_crates = + self.crates(()).iter().copied().filter(move |cnum| self.is_user_visible_dep(*cnum)); + + iter::once(LOCAL_CRATE) + .chain(visible_crates) + .flat_map(move |cnum| self.traits(cnum).iter().copied()) + } + #[inline] pub fn local_visibility(self, def_id: LocalDefId) -> Visibility { self.visibility(def_id).expect_local() diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 714094db053..35fbaa99569 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -109,6 +109,9 @@ impl<'tcx> TypeError<'tcx> { TypeError::ConstMismatch(ref values) => { format!("expected `{}`, found `{}`", values.expected, values.found).into() } + TypeError::ForceInlineCast => { + "cannot coerce functions which must be inlined to function pointers".into() + } TypeError::IntrinsicCast => "cannot coerce intrinsics to function pointers".into(), TypeError::TargetFeatureCast(_) => { "cannot coerce functions with `#[target_feature]` to safe function pointers".into() diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index 49b5588e261..e4ded2c30f5 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -5,7 +5,7 @@ use std::path::PathBuf; use rustc_data_structures::fx::FxHashMap; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; -use rustc_hir::def::Namespace; +use rustc_hir::def::{CtorKind, DefKind, Namespace}; use rustc_hir::def_id::{CrateNum, DefId}; use rustc_hir::lang_items::LangItem; use rustc_index::bit_set::FiniteBitSet; @@ -498,7 +498,8 @@ impl<'tcx> Instance<'tcx> { /// Resolves a `(def_id, args)` pair to an (optional) instance -- most commonly, /// this is used to find the precise code that will run for a trait method invocation, - /// if known. + /// if known. This should only be used for functions and consts. If you want to + /// resolve an associated type, use [`TyCtxt::try_normalize_erasing_regions`]. /// /// Returns `Ok(None)` if we cannot resolve `Instance` to a specific instance. /// For example, in a context like this, @@ -527,6 +528,23 @@ impl<'tcx> Instance<'tcx> { def_id: DefId, args: GenericArgsRef<'tcx>, ) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> { + assert_matches!( + tcx.def_kind(def_id), + DefKind::Fn + | DefKind::AssocFn + | DefKind::Const + | DefKind::AssocConst + | DefKind::AnonConst + | DefKind::InlineConst + | DefKind::Static { .. } + | DefKind::Ctor(_, CtorKind::Fn) + | DefKind::Closure + | DefKind::SyntheticCoroutineBody, + "`Instance::try_resolve` should only be used to resolve instances of \ + functions, statics, and consts; to resolve associated types, use \ + `try_normalize_erasing_regions`." + ); + // Rust code can easily create exponentially-long types using only a // polynomial recursion depth. Even with the default recursion // depth, you can easily get cases that take >2^60 steps to run, diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index 6e6da6de749..1e67cdfc32a 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -770,6 +770,7 @@ where size: Size::ZERO, max_repr_align: None, unadjusted_abi_align: tcx.data_layout.i8_align.abi, + randomization_seed: 0, }) } @@ -1240,6 +1241,7 @@ pub fn fn_can_unwind(tcx: TyCtxt<'_>, fn_def_id: Option<DefId>, abi: ExternAbi) PtxKernel | Msp430Interrupt | X86Interrupt + | GpuKernel | EfiApi | AvrInterrupt | AvrNonBlockingInterrupt diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs index 30a5586f59c..6718493f6b3 100644 --- a/compiler/rustc_middle/src/ty/list.rs +++ b/compiler/rustc_middle/src/ty/list.rs @@ -21,7 +21,7 @@ use crate::arena::Arena; /// pointer. /// - Because of this, you cannot get a `List<T>` that is a sub-list of another /// `List<T>`. You can get a sub-slice `&[T]`, however. -/// - `List<T>` can be used with `CopyTaggedPtr`, which is useful within +/// - `List<T>` can be used with `TaggedRef`, which is useful within /// structs whose size must be minimized. /// - Because of the uniqueness assumption, we can use the address of a /// `List<T>` for faster equality comparisons and hashing. diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 5e929fbec0b..ca70ae794c5 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -32,7 +32,7 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::intern::Interned; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; -use rustc_errors::{Diag, ErrorGuaranteed, StashKey}; +use rustc_errors::{Diag, ErrorGuaranteed}; use rustc_hir::LangItem; use rustc_hir::def::{CtorKind, CtorOf, DefKind, DocLinkResMap, LifetimeRes, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LocalDefIdMap}; @@ -79,8 +79,7 @@ pub use self::predicate::{ PolyExistentialPredicate, PolyExistentialProjection, PolyExistentialTraitRef, PolyProjectionPredicate, PolyRegionOutlivesPredicate, PolySubtypePredicate, PolyTraitPredicate, PolyTraitRef, PolyTypeOutlivesPredicate, Predicate, PredicateKind, ProjectionPredicate, - RegionOutlivesPredicate, SubtypePredicate, ToPolyTraitRef, TraitPredicate, TraitRef, - TypeOutlivesPredicate, + RegionOutlivesPredicate, SubtypePredicate, TraitPredicate, TraitRef, TypeOutlivesPredicate, }; pub use self::region::{ BoundRegion, BoundRegionKind, EarlyParamRegion, LateParamRegion, LateParamRegionKind, Region, @@ -222,6 +221,7 @@ pub struct DelegationFnSig { pub param_count: usize, pub has_self: bool, pub c_variadic: bool, + pub target_feature: bool, } #[derive(Clone, Copy, Debug)] @@ -782,18 +782,8 @@ impl<'tcx> OpaqueHiddenType<'tcx> { pub fn build_mismatch_error( &self, other: &Self, - opaque_def_id: LocalDefId, tcx: TyCtxt<'tcx>, ) -> Result<Diag<'tcx>, ErrorGuaranteed> { - // We used to cancel here for slightly better error messages, but - // cancelling stashed diagnostics is no longer allowed because it - // causes problems when tracking whether errors have actually - // occurred. - tcx.sess.dcx().try_steal_modify_and_emit_err( - tcx.def_span(opaque_def_id), - StashKey::OpaqueHiddenTypeMismatch, - |_err| {}, - ); (self.ty, other.ty).error_reported()?; // Found different concrete types for the opaque type. let sub_diag = if self.span == other.span { @@ -1417,8 +1407,8 @@ impl Hash for FieldDef { impl<'tcx> FieldDef { /// Returns the type of this field. The resulting type is not normalized. The `arg` is /// typically obtained via the second field of [`TyKind::Adt`]. - pub fn ty(&self, tcx: TyCtxt<'tcx>, arg: GenericArgsRef<'tcx>) -> Ty<'tcx> { - tcx.type_of(self.did).instantiate(tcx, arg) + pub fn ty(&self, tcx: TyCtxt<'tcx>, args: GenericArgsRef<'tcx>) -> Ty<'tcx> { + tcx.type_of(self.did).instantiate(tcx, args) } /// Computes the `Ident` of this variant by looking up the `Span` diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs index f611b69905c..e86e01451fe 100644 --- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs +++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs @@ -165,10 +165,14 @@ impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> { arg: ty::GenericArg<'tcx>, ) -> ty::GenericArg<'tcx> { let arg = self.typing_env.as_query_input(arg); - self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| bug!( - "Failed to normalize {:?}, maybe try to call `try_normalize_erasing_regions` instead", - arg.value - )) + self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| { + bug!( + "Failed to normalize {:?} in typing_env={:?}, \ + maybe try to call `try_normalize_erasing_regions` instead", + arg.value, + self.typing_env, + ) + }) } } diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs index 86a95827e84..6b6c6f3c72f 100644 --- a/compiler/rustc_middle/src/ty/parameterized.rs +++ b/compiler/rustc_middle/src/ty/parameterized.rs @@ -96,7 +96,7 @@ trivially_parameterized_over_tcx! { rustc_hir::def_id::DefIndex, rustc_hir::definitions::DefKey, rustc_hir::OpaqueTyOrigin<rustc_hir::def_id::DefId>, - rustc_index::bit_set::BitSet<u32>, + rustc_index::bit_set::DenseBitSet<u32>, rustc_index::bit_set::FiniteBitSet<u32>, rustc_session::cstore::ForeignModule, rustc_session::cstore::LinkagePreference, diff --git a/compiler/rustc_middle/src/ty/predicate.rs b/compiler/rustc_middle/src/ty/predicate.rs index 32d6455e825..584cac22ae8 100644 --- a/compiler/rustc_middle/src/ty/predicate.rs +++ b/compiler/rustc_middle/src/ty/predicate.rs @@ -476,16 +476,6 @@ impl<'tcx> Clause<'tcx> { } } -pub trait ToPolyTraitRef<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>; -} - -impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> { - fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> { - self.map_bound_ref(|trait_pred| trait_pred.trait_ref) - } -} - impl<'tcx> UpcastFrom<TyCtxt<'tcx>, PredicateKind<'tcx>> for Predicate<'tcx> { fn upcast_from(from: PredicateKind<'tcx>, tcx: TyCtxt<'tcx>) -> Self { ty::Binder::dummy(from).upcast(tcx) diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index b0150bc1192..72f353f06ff 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -45,10 +45,25 @@ pub trait Printer<'tcx>: Sized { &mut self, impl_def_id: DefId, args: &'tcx [GenericArg<'tcx>], - self_ty: Ty<'tcx>, - trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - self.default_print_impl_path(impl_def_id, args, self_ty, trait_ref) + let tcx = self.tcx(); + let self_ty = tcx.type_of(impl_def_id); + let impl_trait_ref = tcx.impl_trait_ref(impl_def_id); + let (self_ty, impl_trait_ref) = if tcx.generics_of(impl_def_id).count() <= args.len() { + ( + self_ty.instantiate(tcx, args), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate(tcx, args)), + ) + } else { + // We are probably printing a nested item inside of an impl. + // Use the identity substitutions for the impl. + ( + self_ty.instantiate_identity(), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate_identity()), + ) + }; + + self.default_print_impl_path(impl_def_id, self_ty, impl_trait_ref) } fn print_region(&mut self, region: ty::Region<'tcx>) -> Result<(), PrintError>; @@ -107,23 +122,7 @@ pub trait Printer<'tcx>: Sized { self.path_crate(def_id.krate) } - DefPathData::Impl => { - let generics = self.tcx().generics_of(def_id); - let self_ty = self.tcx().type_of(def_id); - let impl_trait_ref = self.tcx().impl_trait_ref(def_id); - let (self_ty, impl_trait_ref) = if args.len() >= generics.count() { - ( - self_ty.instantiate(self.tcx(), args), - impl_trait_ref.map(|i| i.instantiate(self.tcx(), args)), - ) - } else { - ( - self_ty.instantiate_identity(), - impl_trait_ref.map(|i| i.instantiate_identity()), - ) - }; - self.print_impl_path(def_id, args, self_ty, impl_trait_ref) - } + DefPathData::Impl => self.print_impl_path(def_id, args), _ => { let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id }; @@ -201,7 +200,6 @@ pub trait Printer<'tcx>: Sized { fn default_print_impl_path( &mut self, impl_def_id: DefId, - _args: &'tcx [GenericArg<'tcx>], self_ty: Ty<'tcx>, impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 9fe1caa4b58..ac900edefe1 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -690,7 +690,14 @@ pub trait PrettyPrinter<'tcx>: Printer<'tcx> + fmt::Write { if with_reduced_queries() { p!(print_def_path(def_id, args)); } else { - let sig = self.tcx().fn_sig(def_id).instantiate(self.tcx(), args); + let mut sig = self.tcx().fn_sig(def_id).instantiate(self.tcx(), args); + if self.tcx().codegen_fn_attrs(def_id).safe_target_features { + p!("#[target_features] "); + sig = sig.map_bound(|mut sig| { + sig.safety = hir::Safety::Safe; + sig + }); + } p!(print(sig), " {{", print_value_path(def_id, args), "}}"); } } diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 92b3632c8ac..bf37ae05c82 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -27,7 +27,7 @@ use crate::infer::canonical::Canonical; use crate::ty::InferTy::*; use crate::ty::{ self, AdtDef, BoundRegionKind, Discr, GenericArg, GenericArgs, GenericArgsRef, List, ParamEnv, - Region, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable, TypeVisitor, + Region, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable, TypeVisitor, UintTy, }; // Re-export and re-parameterize some `I = TyCtxt<'tcx>` types here @@ -401,7 +401,7 @@ impl<'tcx> Ty<'tcx> { /// The more specific methods will often optimize their creation. #[allow(rustc::usage_of_ty_tykind)] #[inline] - pub fn new(tcx: TyCtxt<'tcx>, st: TyKind<'tcx>) -> Ty<'tcx> { + fn new(tcx: TyCtxt<'tcx>, st: TyKind<'tcx>) -> Ty<'tcx> { tcx.mk_ty_from_kind(st) } @@ -613,6 +613,41 @@ impl<'tcx> Ty<'tcx> { #[inline] pub fn new_adt(tcx: TyCtxt<'tcx>, def: AdtDef<'tcx>, args: GenericArgsRef<'tcx>) -> Ty<'tcx> { tcx.debug_assert_args_compatible(def.did(), args); + if cfg!(debug_assertions) { + match tcx.def_kind(def.did()) { + DefKind::Struct | DefKind::Union | DefKind::Enum => {} + DefKind::Mod + | DefKind::Variant + | DefKind::Trait + | DefKind::TyAlias + | DefKind::ForeignTy + | DefKind::TraitAlias + | DefKind::AssocTy + | DefKind::TyParam + | DefKind::Fn + | DefKind::Const + | DefKind::ConstParam + | DefKind::Static { .. } + | DefKind::Ctor(..) + | DefKind::AssocFn + | DefKind::AssocConst + | DefKind::Macro(..) + | DefKind::ExternCrate + | DefKind::Use + | DefKind::ForeignMod + | DefKind::AnonConst + | DefKind::InlineConst + | DefKind::OpaqueTy + | DefKind::Field + | DefKind::LifetimeParam + | DefKind::GlobalAsm + | DefKind::Impl { .. } + | DefKind::Closure + | DefKind::SyntheticCoroutineBody => { + bug!("not an adt: {def:?} ({:?})", tcx.def_kind(def.did())) + } + } + } Ty::new(tcx, Adt(def, args)) } @@ -772,7 +807,7 @@ impl<'tcx> Ty<'tcx> { } } }); - Ty::new(tcx, Adt(adt_def, args)) + Ty::new_adt(tcx, adt_def, args) } #[inline] @@ -1017,6 +1052,18 @@ impl<'tcx> Ty<'tcx> { } } + /// Check if type is an `usize`. + #[inline] + pub fn is_usize(self) -> bool { + matches!(self.kind(), Uint(UintTy::Usize)) + } + + /// Check if type is an `usize` or an integral type variable. + #[inline] + pub fn is_usize_like(self) -> bool { + matches!(self.kind(), Uint(UintTy::Usize) | Infer(IntVar(_))) + } + #[inline] pub fn is_never(self) -> bool { matches!(self.kind(), Never) diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 4272893df30..75893da0e58 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -876,6 +876,11 @@ impl<'tcx> TyCtxt<'tcx> { /// [public]: TyCtxt::is_private_dep /// [direct]: rustc_session::cstore::ExternCrate::is_direct pub fn is_user_visible_dep(self, key: CrateNum) -> bool { + // `#![rustc_private]` overrides defaults to make private dependencies usable. + if self.features().enabled(sym::rustc_private) { + return true; + } + // | Private | Direct | Visible | | // |---------|--------|---------|--------------------| // | Yes | Yes | Yes | !true || true | diff --git a/compiler/rustc_middle/src/util/mod.rs b/compiler/rustc_middle/src/util/mod.rs index 8dafc422644..097a868191c 100644 --- a/compiler/rustc_middle/src/util/mod.rs +++ b/compiler/rustc_middle/src/util/mod.rs @@ -1,9 +1,7 @@ pub mod bug; -pub mod call_kind; pub mod common; pub mod find_self_call; -pub use call_kind::{CallDesugaringKind, CallKind, call_kind}; pub use find_self_call::find_self_call; #[derive(Default, Copy, Clone)] diff --git a/compiler/rustc_mir_build/Cargo.toml b/compiler/rustc_mir_build/Cargo.toml index 11904722743..1f3689926bc 100644 --- a/compiler/rustc_mir_build/Cargo.toml +++ b/compiler/rustc_mir_build/Cargo.toml @@ -12,6 +12,7 @@ rustc_abi = { path = "../rustc_abi" } rustc_apfloat = "0.2.0" rustc_arena = { path = "../rustc_arena" } rustc_ast = { path = "../rustc_ast" } +rustc_attr_parsing = { path = "../rustc_attr_parsing" } rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } rustc_fluent_macro = { path = "../rustc_fluent_macro" } diff --git a/compiler/rustc_mir_build/messages.ftl b/compiler/rustc_mir_build/messages.ftl index 5d61a9d1e75..ffdb721fb18 100644 --- a/compiler/rustc_mir_build/messages.ftl +++ b/compiler/rustc_mir_build/messages.ftl @@ -324,12 +324,6 @@ mir_build_type_not_structural_more_info = see https://doc.rust-lang.org/stable/s mir_build_type_not_structural_tip = the `PartialEq` trait must be derived, manual `impl`s are not sufficient; see https://doc.rust-lang.org/stable/std/marker/trait.StructuralPartialEq.html for details -mir_build_unconditional_recursion = function cannot return without recursing - .label = cannot return without recursing - .help = a `loop` may express intention better if this is on purpose - -mir_build_unconditional_recursion_call_site_label = recursive call site - mir_build_union_field_requires_unsafe = access to union field is unsafe and requires unsafe block .note = the field may not be properly initialized: using uninitialized data will cause undefined behavior diff --git a/compiler/rustc_mir_build/src/builder/custom/parse/instruction.rs b/compiler/rustc_mir_build/src/builder/custom/parse/instruction.rs index 3dd5de02230..59f440432eb 100644 --- a/compiler/rustc_mir_build/src/builder/custom/parse/instruction.rs +++ b/compiler/rustc_mir_build/src/builder/custom/parse/instruction.rs @@ -246,6 +246,7 @@ impl<'a, 'tcx> ParseCtxt<'a, 'tcx> { let offset = self.parse_operand(args[1])?; Ok(Rvalue::BinaryOp(BinOp::Offset, Box::new((ptr, offset)))) }, + @call(mir_len, args) => Ok(Rvalue::Len(self.parse_place(args[0])?)), @call(mir_ptr_metadata, args) => Ok(Rvalue::UnaryOp(UnOp::PtrMetadata, self.parse_operand(args[0])?)), @call(mir_copy_for_deref, args) => Ok(Rvalue::CopyForDeref(self.parse_place(args[0])?)), ExprKind::Borrow { borrow_kind, arg } => Ok( diff --git a/compiler/rustc_mir_build/src/builder/expr/as_constant.rs b/compiler/rustc_mir_build/src/builder/expr/as_constant.rs index 177c1e33a83..e4e452aff75 100644 --- a/compiler/rustc_mir_build/src/builder/expr/as_constant.rs +++ b/compiler/rustc_mir_build/src/builder/expr/as_constant.rs @@ -3,13 +3,12 @@ use rustc_abi::Size; use rustc_ast as ast; use rustc_hir::LangItem; -use rustc_middle::mir::interpret::{ - Allocation, CTFE_ALLOC_SALT, LitToConstError, LitToConstInput, Scalar, -}; +use rustc_middle::mir::interpret::{Allocation, CTFE_ALLOC_SALT, LitToConstInput, Scalar}; use rustc_middle::mir::*; use rustc_middle::thir::*; use rustc_middle::ty::{ - self, CanonicalUserType, CanonicalUserTypeAnnotation, Ty, TyCtxt, UserTypeAnnotationIndex, + self, CanonicalUserType, CanonicalUserTypeAnnotation, Ty, TyCtxt, TypeVisitableExt as _, + UserTypeAnnotationIndex, }; use rustc_middle::{bug, mir, span_bug}; use tracing::{instrument, trace}; @@ -50,16 +49,7 @@ pub(crate) fn as_constant_inner<'tcx>( let Expr { ty, temp_lifetime: _, span, ref kind } = *expr; match *kind { ExprKind::Literal { lit, neg } => { - let const_ = match lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }) - { - Ok(c) => c, - Err(LitToConstError::Reported(guar)) => { - Const::Ty(Ty::new_error(tcx, guar), ty::Const::new_error(tcx, guar)) - } - Err(LitToConstError::TypeError) => { - bug!("encountered type error in `lit_to_mir_constant`") - } - }; + let const_ = lit_to_mir_constant(tcx, LitToConstInput { lit: &lit.node, ty, neg }); ConstOperand { span, user_ty: None, const_ } } @@ -108,11 +98,13 @@ pub(crate) fn as_constant_inner<'tcx>( } #[instrument(skip(tcx, lit_input))] -fn lit_to_mir_constant<'tcx>( - tcx: TyCtxt<'tcx>, - lit_input: LitToConstInput<'tcx>, -) -> Result<Const<'tcx>, LitToConstError> { +fn lit_to_mir_constant<'tcx>(tcx: TyCtxt<'tcx>, lit_input: LitToConstInput<'tcx>) -> Const<'tcx> { let LitToConstInput { lit, ty, neg } = lit_input; + + if let Err(guar) = ty.error_reported() { + return Const::Ty(Ty::new_error(tcx, guar), ty::Const::new_error(tcx, guar)); + } + let trunc = |n| { let width = match tcx.layout_of(ty::TypingEnv::fully_monomorphized().as_query_input(ty)) { Ok(layout) => layout.size, @@ -123,7 +115,7 @@ fn lit_to_mir_constant<'tcx>( trace!("trunc {} with size {} and shift {}", n, width.bits(), 128 - width.bits()); let result = width.truncate(n); trace!("trunc result: {}", result); - Ok(ConstValue::Scalar(Scalar::from_uint(result, width))) + ConstValue::Scalar(Scalar::from_uint(result, width)) }; let value = match (lit, ty.kind()) { @@ -154,20 +146,18 @@ fn lit_to_mir_constant<'tcx>( ConstValue::Scalar(Scalar::from_uint(*n, Size::from_bytes(1))) } (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => { - trunc(if neg { (n.get() as i128).overflowing_neg().0 as u128 } else { n.get() })? - } - (ast::LitKind::Float(n, _), ty::Float(fty)) => parse_float_into_constval(*n, *fty, neg) - .ok_or_else(|| { - LitToConstError::Reported( - tcx.dcx() - .delayed_bug(format!("couldn't parse float literal: {:?}", lit_input.lit)), - ) - })?, + trunc(if neg { (n.get() as i128).overflowing_neg().0 as u128 } else { n.get() }) + } + (ast::LitKind::Float(n, _), ty::Float(fty)) => { + parse_float_into_constval(*n, *fty, neg).unwrap() + } (ast::LitKind::Bool(b), ty::Bool) => ConstValue::Scalar(Scalar::from_bool(*b)), (ast::LitKind::Char(c), ty::Char) => ConstValue::Scalar(Scalar::from_char(*c)), - (ast::LitKind::Err(guar), _) => return Err(LitToConstError::Reported(*guar)), - _ => return Err(LitToConstError::TypeError), + (ast::LitKind::Err(guar), _) => { + return Const::Ty(Ty::new_error(tcx, *guar), ty::Const::new_error(tcx, *guar)); + } + _ => bug!("invalid lit/ty combination in `lit_to_mir_constant`: {lit:?}: {ty:?}"), }; - Ok(Const::Val(value, ty)) + Const::Val(value, ty) } diff --git a/compiler/rustc_mir_build/src/builder/expr/as_place.rs b/compiler/rustc_mir_build/src/builder/expr/as_place.rs index 89c7bb357ef..b1851e79d5c 100644 --- a/compiler/rustc_mir_build/src/builder/expr/as_place.rs +++ b/compiler/rustc_mir_build/src/builder/expr/as_place.rs @@ -11,7 +11,7 @@ use rustc_middle::mir::*; use rustc_middle::thir::*; use rustc_middle::ty::{self, AdtDef, CanonicalUserTypeAnnotation, Ty, Variance}; use rustc_middle::{bug, span_bug}; -use rustc_span::{DesugaringKind, Span}; +use rustc_span::Span; use tracing::{debug, instrument, trace}; use crate::builder::ForGuard::{OutsideGuard, RefWithinGuard}; @@ -630,98 +630,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { block.and(base_place.index(idx)) } - /// Given a place that's either an array or a slice, returns an operand - /// with the length of the array/slice. - /// - /// For arrays it'll be `Operand::Constant` with the actual length; - /// For slices it'll be `Operand::Move` of a local using `PtrMetadata`. - pub(in crate::builder) fn len_of_slice_or_array( - &mut self, - block: BasicBlock, - place: Place<'tcx>, - span: Span, - source_info: SourceInfo, - ) -> Operand<'tcx> { - let place_ty = place.ty(&self.local_decls, self.tcx).ty; - let usize_ty = self.tcx.types.usize; - - match place_ty.kind() { - ty::Array(_elem_ty, len_const) => { - let ty_const = if let Some((_, len_ty)) = len_const.try_to_valtree() - && len_ty != self.tcx.types.usize - { - // Bad const generics can give us a constant from the type that's - // not actually a `usize`, so in that case give an error instead. - // FIXME: It'd be nice if the type checker made sure this wasn't - // possible, instead. - let err = self.tcx.dcx().span_delayed_bug( - span, - format!( - "Array length should have already been a type error, as it's {len_ty:?}" - ), - ); - ty::Const::new_error(self.tcx, err) - } else { - // We know how long an array is, so just use that as a constant - // directly -- no locals needed. We do need one statement so - // that borrow- and initialization-checking consider it used, - // though. FIXME: Do we really *need* to count this as a use? - // Could partial array tracking work off something else instead? - self.cfg.push_fake_read(block, source_info, FakeReadCause::ForIndex, place); - *len_const - }; - - let const_ = Const::from_ty_const(ty_const, usize_ty, self.tcx); - Operand::Constant(Box::new(ConstOperand { span, user_ty: None, const_ })) - } - ty::Slice(_elem_ty) => { - let ptr_or_ref = if let [PlaceElem::Deref] = place.projection[..] - && let local_ty = self.local_decls[place.local].ty - && local_ty.is_trivially_pure_clone_copy() - { - // It's extremely common that we have something that can be - // directly passed to `PtrMetadata`, so avoid an unnecessary - // temporary and statement in those cases. Note that we can - // only do that for `Copy` types -- not `&mut [_]` -- because - // the MIR we're building here needs to pass NLL later. - Operand::Copy(Place::from(place.local)) - } else { - let len_span = self.tcx.with_stable_hashing_context(|hcx| { - let span = source_info.span; - span.mark_with_reason( - None, - DesugaringKind::IndexBoundsCheckReborrow, - span.edition(), - hcx, - ) - }); - let ptr_ty = Ty::new_imm_ptr(self.tcx, place_ty); - let slice_ptr = self.temp(ptr_ty, span); - self.cfg.push_assign( - block, - SourceInfo { span: len_span, ..source_info }, - slice_ptr, - Rvalue::RawPtr(Mutability::Not, place), - ); - Operand::Move(slice_ptr) - }; - - let len = self.temp(usize_ty, span); - self.cfg.push_assign( - block, - source_info, - len, - Rvalue::UnaryOp(UnOp::PtrMetadata, ptr_or_ref), - ); - - Operand::Move(len) - } - _ => { - span_bug!(span, "len called on place of type {place_ty:?}") - } - } - } - fn bounds_check( &mut self, block: BasicBlock, @@ -730,25 +638,25 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr_span: Span, source_info: SourceInfo, ) -> BasicBlock { - let slice = slice.to_place(self); + let usize_ty = self.tcx.types.usize; + let bool_ty = self.tcx.types.bool; + // bounds check: + let len = self.temp(usize_ty, expr_span); + let lt = self.temp(bool_ty, expr_span); // len = len(slice) - let len = self.len_of_slice_or_array(block, slice, expr_span, source_info); - + self.cfg.push_assign(block, source_info, len, Rvalue::Len(slice.to_place(self))); // lt = idx < len - let bool_ty = self.tcx.types.bool; - let lt = self.temp(bool_ty, expr_span); self.cfg.push_assign( block, source_info, lt, Rvalue::BinaryOp( BinOp::Lt, - Box::new((Operand::Copy(Place::from(index)), len.to_copy())), + Box::new((Operand::Copy(Place::from(index)), Operand::Copy(len))), ), ); - let msg = BoundsCheck { len, index: Operand::Copy(Place::from(index)) }; - + let msg = BoundsCheck { len: Operand::Move(len), index: Operand::Copy(Place::from(index)) }; // assert!(lt, "...") self.assert(block, Operand::Move(lt), true, msg, expr_span) } diff --git a/compiler/rustc_mir_build/src/builder/matches/test.rs b/compiler/rustc_mir_build/src/builder/matches/test.rs index 0d36b7bb3ee..8cca84d7fcc 100644 --- a/compiler/rustc_mir_build/src/builder/matches/test.rs +++ b/compiler/rustc_mir_build/src/builder/matches/test.rs @@ -243,8 +243,11 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { } TestKind::Len { len, op } => { + let usize_ty = self.tcx.types.usize; + let actual = self.temp(usize_ty, test.span); + // actual = len(place) - let actual = self.len_of_slice_or_array(block, place, test.span, source_info); + self.cfg.push_assign(block, source_info, actual, Rvalue::Len(place)); // expected = <N> let expected = self.push_usize(block, source_info, len); @@ -259,7 +262,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { fail_block, source_info, op, - actual, + Operand::Move(actual), Operand::Move(expected), ); } diff --git a/compiler/rustc_mir_build/src/builder/mod.rs b/compiler/rustc_mir_build/src/builder/mod.rs index 932b6fbe026..9fa431f7d5f 100644 --- a/compiler/rustc_mir_build/src/builder/mod.rs +++ b/compiler/rustc_mir_build/src/builder/mod.rs @@ -26,7 +26,6 @@ use rustc_middle::ty::{self, ScalarInt, Ty, TyCtxt, TypeVisitableExt, TypingMode use rustc_middle::{bug, span_bug}; use rustc_span::{Span, Symbol, sym}; -use super::lints; use crate::builder::expr::as_place::PlaceBuilder; use crate::builder::scope::DropKind; @@ -47,7 +46,7 @@ pub(crate) fn closure_saved_names_of_captured_variables<'tcx>( } /// Construct the MIR for a given `DefId`. -pub(crate) fn mir_build<'tcx>(tcx: TyCtxtAt<'tcx>, def: LocalDefId) -> Body<'tcx> { +pub(crate) fn build_mir<'tcx>(tcx: TyCtxtAt<'tcx>, def: LocalDefId) -> Body<'tcx> { let tcx = tcx.tcx; tcx.ensure_with_value().thir_abstract_const(def); if let Err(e) = tcx.check_match(def) { @@ -79,8 +78,6 @@ pub(crate) fn mir_build<'tcx>(tcx: TyCtxtAt<'tcx>, def: LocalDefId) -> Body<'tcx } }; - lints::check(tcx, &body); - // The borrow checker will replace all the regions here with its own // inference variables. There's no point having non-erased regions here. // The exception is `body.user_type_annotations`, which is used unmodified diff --git a/compiler/rustc_mir_build/src/builder/scope.rs b/compiler/rustc_mir_build/src/builder/scope.rs index 35c98037827..20441530a47 100644 --- a/compiler/rustc_mir_build/src/builder/scope.rs +++ b/compiler/rustc_mir_build/src/builder/scope.rs @@ -1131,15 +1131,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { /// Schedule emission of a backwards incompatible drop lint hint. /// Applicable only to temporary values for now. + #[instrument(level = "debug", skip(self))] pub(crate) fn schedule_backwards_incompatible_drop( &mut self, span: Span, region_scope: region::Scope, local: Local, ) { - if !self.local_decls[local].ty.has_significant_drop(self.tcx, self.typing_env()) { - return; - } + // Note that we are *not* gating BIDs here on whether they have significant destructor. + // We need to know all of them so that we can capture potential borrow-checking errors. for scope in self.scopes.scopes.iter_mut().rev() { // Since we are inserting linting MIR statement, we have to invalidate the caches scope.invalidate_cache(); diff --git a/compiler/rustc_mir_build/src/check_unsafety.rs b/compiler/rustc_mir_build/src/check_unsafety.rs index f7071eb139f..5eed9ef798d 100644 --- a/compiler/rustc_mir_build/src/check_unsafety.rs +++ b/compiler/rustc_mir_build/src/check_unsafety.rs @@ -478,23 +478,26 @@ impl<'a, 'tcx> Visitor<'a, 'tcx> for UnsafetyVisitor<'a, 'tcx> { return; // don't visit the whole expression } ExprKind::Call { fun, ty: _, args: _, from_hir_call: _, fn_span: _ } => { - if self.thir[fun].ty.fn_sig(self.tcx).safety().is_unsafe() { - let func_id = if let ty::FnDef(func_id, _) = self.thir[fun].ty.kind() { + let fn_ty = self.thir[fun].ty; + let sig = fn_ty.fn_sig(self.tcx); + let (callee_features, safe_target_features): (&[_], _) = match fn_ty.kind() { + ty::FnDef(func_id, ..) => { + let cg_attrs = self.tcx.codegen_fn_attrs(func_id); + (&cg_attrs.target_features, cg_attrs.safe_target_features) + } + _ => (&[], false), + }; + if sig.safety().is_unsafe() && !safe_target_features { + let func_id = if let ty::FnDef(func_id, _) = fn_ty.kind() { Some(*func_id) } else { None }; self.requires_unsafe(expr.span, CallToUnsafeFunction(func_id)); - } else if let &ty::FnDef(func_did, _) = self.thir[fun].ty.kind() { - // If the called function has target features the calling function hasn't, - // the call requires `unsafe`. Don't check this on wasm - // targets, though. For more information on wasm see the - // is_like_wasm check in hir_analysis/src/collect.rs - let callee_features = &self.tcx.codegen_fn_attrs(func_did).target_features; - if !self.tcx.sess.target.options.is_like_wasm - && !callee_features.iter().all(|feature| { - self.body_target_features.iter().any(|f| f.name == feature.name) - }) + } else if let &ty::FnDef(func_did, _) = fn_ty.kind() { + if !self + .tcx + .is_target_feature_call_safe(callee_features, self.body_target_features) { let missing: Vec<_> = callee_features .iter() @@ -739,7 +742,10 @@ impl UnsafeOpKind { ) { let parent_id = tcx.hir().get_parent_item(hir_id); let parent_owner = tcx.hir_owner_node(parent_id); - let should_suggest = parent_owner.fn_sig().is_some_and(|sig| sig.header.is_unsafe()); + let should_suggest = parent_owner.fn_sig().is_some_and(|sig| { + // Do not suggest for safe target_feature functions + matches!(sig.header.safety, hir::HeaderSafety::Normal(hir::Safety::Unsafe)) + }); let unsafe_not_inherited_note = if should_suggest { suggest_unsafe_block.then(|| { let body_span = tcx.hir().body(parent_owner.body_id().unwrap()).value.span; @@ -902,7 +908,7 @@ impl UnsafeOpKind { { true } else if let Some(sig) = tcx.hir().fn_sig_by_hir_id(*id) - && sig.header.is_unsafe() + && matches!(sig.header.safety, hir::HeaderSafety::Normal(hir::Safety::Unsafe)) { true } else { @@ -1111,7 +1117,16 @@ pub(crate) fn check_unsafety(tcx: TyCtxt<'_>, def: LocalDefId) { let hir_id = tcx.local_def_id_to_hir_id(def); let safety_context = tcx.hir().fn_sig_by_hir_id(hir_id).map_or(SafetyContext::Safe, |fn_sig| { - if fn_sig.header.safety.is_unsafe() { SafetyContext::UnsafeFn } else { SafetyContext::Safe } + match fn_sig.header.safety { + // We typeck the body as safe, but otherwise treat it as unsafe everywhere else. + // Call sites to other SafeTargetFeatures functions are checked explicitly and don't need + // to care about safety of the body. + hir::HeaderSafety::SafeTargetFeatures => SafetyContext::Safe, + hir::HeaderSafety::Normal(safety) => match safety { + hir::Safety::Unsafe => SafetyContext::UnsafeFn, + hir::Safety::Safe => SafetyContext::Safe, + }, + } }); let body_target_features = &tcx.body_codegen_attrs(def.to_def_id()).target_features; let mut warnings = Vec::new(); diff --git a/compiler/rustc_mir_build/src/errors.rs b/compiler/rustc_mir_build/src/errors.rs index 790d56860d2..83aec9ccdef 100644 --- a/compiler/rustc_mir_build/src/errors.rs +++ b/compiler/rustc_mir_build/src/errors.rs @@ -12,16 +12,6 @@ use rustc_span::{Span, Symbol}; use crate::fluent_generated as fluent; #[derive(LintDiagnostic)] -#[diag(mir_build_unconditional_recursion)] -#[help] -pub(crate) struct UnconditionalRecursion { - #[label] - pub(crate) span: Span, - #[label(mir_build_unconditional_recursion_call_site_label)] - pub(crate) call_sites: Vec<Span>, -} - -#[derive(LintDiagnostic)] #[diag(mir_build_call_to_deprecated_safe_fn_requires_unsafe)] pub(crate) struct CallToDeprecatedSafeFnRequiresUnsafe { #[label] diff --git a/compiler/rustc_mir_build/src/lib.rs b/compiler/rustc_mir_build/src/lib.rs index 467725841dc..8e786733ee0 100644 --- a/compiler/rustc_mir_build/src/lib.rs +++ b/compiler/rustc_mir_build/src/lib.rs @@ -18,7 +18,6 @@ mod builder; mod check_tail_calls; mod check_unsafety; mod errors; -pub mod lints; mod thir; use rustc_middle::util::Providers; @@ -28,7 +27,7 @@ rustc_fluent_macro::fluent_messages! { "../messages.ftl" } pub fn provide(providers: &mut Providers) { providers.check_match = thir::pattern::check_match; providers.lit_to_const = thir::constant::lit_to_const; - providers.hooks.build_mir = builder::mir_build; + providers.hooks.build_mir = builder::build_mir; providers.closure_saved_names_of_captured_variables = builder::closure_saved_names_of_captured_variables; providers.check_unsafety = check_unsafety::check_unsafety; diff --git a/compiler/rustc_mir_build/src/thir/constant.rs b/compiler/rustc_mir_build/src/thir/constant.rs index ce1c635d1b9..49db522cf0e 100644 --- a/compiler/rustc_mir_build/src/thir/constant.rs +++ b/compiler/rustc_mir_build/src/thir/constant.rs @@ -1,7 +1,7 @@ use rustc_ast as ast; use rustc_hir::LangItem; use rustc_middle::bug; -use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput}; +use rustc_middle::mir::interpret::LitToConstInput; use rustc_middle::ty::{self, ScalarInt, TyCtxt, TypeVisitableExt as _}; use tracing::trace; @@ -10,11 +10,11 @@ use crate::builder::parse_float_into_scalar; pub(crate) fn lit_to_const<'tcx>( tcx: TyCtxt<'tcx>, lit_input: LitToConstInput<'tcx>, -) -> Result<ty::Const<'tcx>, LitToConstError> { +) -> ty::Const<'tcx> { let LitToConstInput { lit, ty, neg } = lit_input; if let Err(guar) = ty.error_reported() { - return Ok(ty::Const::new_error(tcx, guar)); + return ty::Const::new_error(tcx, guar); } let trunc = |n| { @@ -28,8 +28,8 @@ pub(crate) fn lit_to_const<'tcx>( let result = width.truncate(n); trace!("trunc result: {}", result); - Ok(ScalarInt::try_from_uint(result, width) - .unwrap_or_else(|| bug!("expected to create ScalarInt from uint {:?}", result))) + ScalarInt::try_from_uint(result, width) + .unwrap_or_else(|| bug!("expected to create ScalarInt from uint {:?}", result)) }; let valtree = match (lit, ty.kind()) { @@ -57,20 +57,20 @@ pub(crate) fn lit_to_const<'tcx>( } (ast::LitKind::Int(n, _), ty::Uint(_)) | (ast::LitKind::Int(n, _), ty::Int(_)) => { let scalar_int = - trunc(if neg { (n.get() as i128).overflowing_neg().0 as u128 } else { n.get() })?; + trunc(if neg { (n.get() as i128).overflowing_neg().0 as u128 } else { n.get() }); ty::ValTree::from_scalar_int(scalar_int) } (ast::LitKind::Bool(b), ty::Bool) => ty::ValTree::from_scalar_int((*b).into()), (ast::LitKind::Float(n, _), ty::Float(fty)) => { - let bits = parse_float_into_scalar(*n, *fty, neg).ok_or_else(|| { + let bits = parse_float_into_scalar(*n, *fty, neg).unwrap_or_else(|| { tcx.dcx().bug(format!("couldn't parse float literal: {:?}", lit_input.lit)) - })?; + }); ty::ValTree::from_scalar_int(bits) } (ast::LitKind::Char(c), ty::Char) => ty::ValTree::from_scalar_int((*c).into()), - (ast::LitKind::Err(guar), _) => return Err(LitToConstError::Reported(*guar)), - _ => return Err(LitToConstError::TypeError), + (ast::LitKind::Err(guar), _) => return ty::Const::new_error(tcx, *guar), + _ => return ty::Const::new_misc_error(tcx), }; - Ok(ty::Const::new_value(tcx, valtree, ty)) + ty::Const::new_value(tcx, valtree, ty) } diff --git a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs index 2b3c98db966..3853b95f78b 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/const_to_pat.rs @@ -1,3 +1,5 @@ +use core::ops::ControlFlow; + use rustc_abi::{FieldIdx, VariantIdx}; use rustc_apfloat::Float; use rustc_data_structures::fx::FxHashSet; @@ -8,7 +10,9 @@ use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::traits::Obligation; use rustc_middle::mir::interpret::ErrorHandled; use rustc_middle::thir::{FieldPat, Pat, PatKind}; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeVisitableExt, TypeVisitor, ValTree}; +use rustc_middle::ty::{ + self, Ty, TyCtxt, TypeSuperVisitable, TypeVisitableExt, TypeVisitor, ValTree, +}; use rustc_middle::{mir, span_bug}; use rustc_span::def_id::DefId; use rustc_span::{Span, sym}; @@ -185,7 +189,7 @@ impl<'tcx> ConstToPat<'tcx> { if !inlined_const_as_pat.references_error() { // Always check for `PartialEq` if we had no other errors yet. - if !type_has_partial_eq_impl(self.tcx, typing_env, ty).0 { + if !type_has_partial_eq_impl(self.tcx, typing_env, ty).has_impl { let mut err = self.tcx.dcx().create_err(TypeNotPartialEq { span: self.span, ty }); extend_type_not_partial_eq(self.tcx, typing_env, ty, &mut err); return self.mk_err(err, ty); @@ -219,12 +223,13 @@ impl<'tcx> ConstToPat<'tcx> { // Extremely important check for all ADTs! Make sure they opted-in to be used in // patterns. debug!("adt_def {:?} has !type_marked_structural for cv.ty: {:?}", adt_def, ty); - let (_impls_partial_eq, derived, structural, impl_def_id) = - type_has_partial_eq_impl(self.tcx, self.typing_env, ty); + let PartialEqImplStatus { + is_derived, structural_partial_eq, non_blanket_impl, .. + } = type_has_partial_eq_impl(self.tcx, self.typing_env, ty); let (manual_partialeq_impl_span, manual_partialeq_impl_note) = - match (structural, impl_def_id) { + match (structural_partial_eq, non_blanket_impl) { (true, _) => (None, false), - (_, Some(def_id)) if def_id.is_local() && !derived => { + (_, Some(def_id)) if def_id.is_local() && !is_derived => { (Some(tcx.def_span(def_id)), false) } _ => (None, true), @@ -379,41 +384,50 @@ fn extend_type_not_partial_eq<'tcx>( adts_without_partialeq: FxHashSet<Span>, /// The user has written `impl PartialEq for Ty` which means it's non-structual, /// but we don't have a span to point at, so we'll just add them as a `note`. - manual: Vec<Ty<'tcx>>, + manual: FxHashSet<Ty<'tcx>>, /// The type has no `PartialEq` implementation, neither manual or derived, but /// we don't have a span to point at, so we'll just add them as a `note`. - without: Vec<Ty<'tcx>>, + without: FxHashSet<Ty<'tcx>>, } impl<'tcx> TypeVisitor<TyCtxt<'tcx>> for UsedParamsNeedInstantiationVisitor<'tcx> { + type Result = ControlFlow<()>; fn visit_ty(&mut self, ty: Ty<'tcx>) -> Self::Result { - if let ty::Adt(def, _args) = ty.kind() { - let ty_def_id = def.did(); - let ty_def_span = self.tcx.def_span(ty_def_id); - let (impls_partial_eq, derived, structural, impl_def_id) = - type_has_partial_eq_impl(self.tcx, self.typing_env, ty); - match (impls_partial_eq, derived, structural, impl_def_id) { - (_, _, true, _) => {} - (true, false, _, Some(def_id)) if def_id.is_local() => { - self.adts_with_manual_partialeq.insert(self.tcx.def_span(def_id)); - } - (true, false, _, _) if ty_def_id.is_local() => { - self.adts_with_manual_partialeq.insert(ty_def_span); - } - (false, _, _, _) if ty_def_id.is_local() => { - self.adts_without_partialeq.insert(ty_def_span); - } - (true, false, _, _) => { - self.manual.push(ty); - } - (false, _, _, _) => { - self.without.push(ty); - } - _ => {} - }; + match ty.kind() { + ty::Dynamic(..) => return ControlFlow::Break(()), + ty::FnPtr(..) => return ControlFlow::Continue(()), + ty::Adt(def, _args) => { + let ty_def_id = def.did(); + let ty_def_span = self.tcx.def_span(ty_def_id); + let PartialEqImplStatus { + has_impl, + is_derived, + structural_partial_eq, + non_blanket_impl, + } = type_has_partial_eq_impl(self.tcx, self.typing_env, ty); + match (has_impl, is_derived, structural_partial_eq, non_blanket_impl) { + (_, _, true, _) => {} + (true, false, _, Some(def_id)) if def_id.is_local() => { + self.adts_with_manual_partialeq.insert(self.tcx.def_span(def_id)); + } + (true, false, _, _) if ty_def_id.is_local() => { + self.adts_with_manual_partialeq.insert(ty_def_span); + } + (false, _, _, _) if ty_def_id.is_local() => { + self.adts_without_partialeq.insert(ty_def_span); + } + (true, false, _, _) => { + self.manual.insert(ty); + } + (false, _, _, _) => { + self.without.insert(ty); + } + _ => {} + }; + ty.super_visit_with(self) + } + _ => ty.super_visit_with(self), } - use rustc_middle::ty::TypeSuperVisitable; - ty.super_visit_with(self) } } let mut v = UsedParamsNeedInstantiationVisitor { @@ -421,10 +435,12 @@ fn extend_type_not_partial_eq<'tcx>( typing_env, adts_with_manual_partialeq: FxHashSet::default(), adts_without_partialeq: FxHashSet::default(), - manual: vec![], - without: vec![], + manual: FxHashSet::default(), + without: FxHashSet::default(), }; - v.visit_ty(ty); + if v.visit_ty(ty).is_break() { + return; + } #[allow(rustc::potential_query_instability)] // Span labels will be sorted by the rendering for span in v.adts_with_manual_partialeq { err.span_note(span, "the `PartialEq` trait must be derived, manual `impl`s are not sufficient; see https://doc.rust-lang.org/stable/std/marker/trait.StructuralPartialEq.html for details"); @@ -436,29 +452,38 @@ fn extend_type_not_partial_eq<'tcx>( "must be annotated with `#[derive(PartialEq)]` to be usable in patterns", ); } - for ty in v.manual { + #[allow(rustc::potential_query_instability)] + let mut manual: Vec<_> = v.manual.into_iter().map(|t| t.to_string()).collect(); + manual.sort(); + for ty in manual { err.note(format!( "`{ty}` must be annotated with `#[derive(PartialEq)]` to be usable in patterns, manual `impl`s are not sufficient; see https://doc.rust-lang.org/stable/std/marker/trait.StructuralPartialEq.html for details" )); } - for ty in v.without { + #[allow(rustc::potential_query_instability)] + let mut without: Vec<_> = v.without.into_iter().map(|t| t.to_string()).collect(); + without.sort(); + for ty in without { err.note(format!( "`{ty}` must be annotated with `#[derive(PartialEq)]` to be usable in patterns" )); } } +#[derive(Debug)] +struct PartialEqImplStatus { + has_impl: bool, + is_derived: bool, + structural_partial_eq: bool, + non_blanket_impl: Option<DefId>, +} + #[instrument(level = "trace", skip(tcx), ret)] fn type_has_partial_eq_impl<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, ty: Ty<'tcx>, -) -> ( - /* has impl */ bool, - /* is derived */ bool, - /* structural partial eq */ bool, - /* non-blanket impl */ Option<DefId>, -) { +) -> PartialEqImplStatus { let (infcx, param_env) = tcx.infer_ctxt().build_with_typing_env(typing_env); // double-check there even *is* a semantic `PartialEq` to dispatch to. // @@ -495,10 +520,10 @@ fn type_has_partial_eq_impl<'tcx>( // that patterns can only do things that the code could also do without patterns, but it is // needed for backwards compatibility. The actual pattern matching compares primitive values, // `PartialEq::eq` never gets invoked, so there's no risk of us running non-const code. - ( - infcx.predicate_must_hold_modulo_regions(&partial_eq_obligation), - automatically_derived, - structural_peq, - impl_def_id, - ) + PartialEqImplStatus { + has_impl: infcx.predicate_must_hold_modulo_regions(&partial_eq_obligation), + is_derived: automatically_derived, + structural_partial_eq: structural_peq, + non_blanket_impl: impl_def_id, + } } diff --git a/compiler/rustc_mir_build/src/thir/pattern/mod.rs b/compiler/rustc_mir_build/src/thir/pattern/mod.rs index bdf243c87b6..44b038bb5fa 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/mod.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/mod.rs @@ -13,7 +13,7 @@ use rustc_hir::pat_util::EnumerateAndAdjustIterator; use rustc_hir::{self as hir, ByRef, Mutability, RangeEnd}; use rustc_index::Idx; use rustc_lint as lint; -use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput}; +use rustc_middle::mir::interpret::LitToConstInput; use rustc_middle::thir::{ Ascription, FieldPat, LocalVarId, Pat, PatKind, PatRange, PatRangeBoundary, }; @@ -154,7 +154,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { fn lower_pattern_range_endpoint( &mut self, - expr: Option<&'tcx hir::Expr<'tcx>>, + expr: Option<&'tcx hir::PatExpr<'tcx>>, ) -> Result< (Option<PatRangeBoundary<'tcx>>, Option<Ascription<'tcx>>, Option<LocalDefId>), ErrorGuaranteed, @@ -200,13 +200,12 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { /// This is only called when the range is already known to be malformed. fn error_on_literal_overflow( &self, - expr: Option<&'tcx hir::Expr<'tcx>>, + expr: Option<&'tcx hir::PatExpr<'tcx>>, ty: Ty<'tcx>, ) -> Result<(), ErrorGuaranteed> { - use hir::{ExprKind, UnOp}; use rustc_ast::ast::LitKind; - let Some(mut expr) = expr else { + let Some(expr) = expr else { return Ok(()); }; let span = expr.span; @@ -214,12 +213,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { // We need to inspect the original expression, because if we only inspect the output of // `eval_bits`, an overflowed value has already been wrapped around. // We mostly copy the logic from the `rustc_lint::OVERFLOWING_LITERALS` lint. - let mut negated = false; - if let ExprKind::Unary(UnOp::Neg, sub_expr) = expr.kind { - negated = true; - expr = sub_expr; - } - let ExprKind::Lit(lit) = expr.kind else { + let hir::PatExprKind::Lit { lit, negated } = expr.kind else { return Ok(()); }; let LitKind::Int(lit_val, _) = lit.node else { @@ -248,8 +242,8 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { fn lower_pattern_range( &mut self, - lo_expr: Option<&'tcx hir::Expr<'tcx>>, - hi_expr: Option<&'tcx hir::Expr<'tcx>>, + lo_expr: Option<&'tcx hir::PatExpr<'tcx>>, + hi_expr: Option<&'tcx hir::PatExpr<'tcx>>, end: RangeEnd, ty: Ty<'tcx>, span: Span, @@ -330,7 +324,7 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { hir::PatKind::Never => PatKind::Never, - hir::PatKind::Lit(value) => self.lower_lit(value), + hir::PatKind::Expr(value) => self.lower_lit(value), hir::PatKind::Range(ref lo_expr, ref hi_expr, end) => { let (lo_expr, hi_expr) = (lo_expr.as_deref(), hi_expr.as_deref()); @@ -435,6 +429,9 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { hir::PatKind::Or(pats) => PatKind::Or { pats: self.lower_patterns(pats) }, + // FIXME(guard_patterns): implement guard pattern lowering + hir::PatKind::Guard(pat, _) => self.lower_pattern(pat).kind, + hir::PatKind::Err(guar) => PatKind::Error(guar), }; @@ -659,31 +656,21 @@ impl<'a, 'tcx> PatCtxt<'a, 'tcx> { /// The special case for negation exists to allow things like `-128_i8` /// which would overflow if we tried to evaluate `128_i8` and then negate /// afterwards. - fn lower_lit(&mut self, expr: &'tcx hir::Expr<'tcx>) -> PatKind<'tcx> { - let (lit, neg) = match expr.kind { - hir::ExprKind::Path(ref qpath) => { + fn lower_lit(&mut self, expr: &'tcx hir::PatExpr<'tcx>) -> PatKind<'tcx> { + let (lit, neg) = match &expr.kind { + hir::PatExprKind::Path(qpath) => { return self.lower_path(qpath, expr.hir_id, expr.span).kind; } - hir::ExprKind::ConstBlock(ref anon_const) => { + hir::PatExprKind::ConstBlock(anon_const) => { return self.lower_inline_const(anon_const, expr.hir_id, expr.span); } - hir::ExprKind::Lit(ref lit) => (lit, false), - hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => { - let hir::ExprKind::Lit(ref lit) = expr.kind else { - span_bug!(expr.span, "not a literal: {:?}", expr); - }; - (lit, true) - } - _ => span_bug!(expr.span, "not a literal: {:?}", expr), + hir::PatExprKind::Lit { lit, negated } => (lit, *negated), }; - let ct_ty = self.typeck_results.expr_ty(expr); + let ct_ty = self.typeck_results.node_type(expr.hir_id); let lit_input = LitToConstInput { lit: &lit.node, ty: ct_ty, neg }; - match self.tcx.at(expr.span).lit_to_const(lit_input) { - Ok(constant) => self.const_to_pat(constant, ct_ty, expr.hir_id, lit.span).kind, - Err(LitToConstError::Reported(e)) => PatKind::Error(e), - Err(LitToConstError::TypeError) => bug!("lower_lit: had type error"), - } + let constant = self.tcx.at(expr.span).lit_to_const(lit_input); + self.const_to_pat(constant, ct_ty, expr.hir_id, lit.span).kind } } diff --git a/compiler/rustc_mir_dataflow/src/debuginfo.rs b/compiler/rustc_mir_dataflow/src/debuginfo.rs index fd5e8cf2955..0d25ce91c9a 100644 --- a/compiler/rustc_mir_dataflow/src/debuginfo.rs +++ b/compiler/rustc_mir_dataflow/src/debuginfo.rs @@ -1,17 +1,17 @@ -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; /// Return the set of locals that appear in debuginfo. -pub fn debuginfo_locals(body: &Body<'_>) -> BitSet<Local> { - let mut visitor = DebuginfoLocals(BitSet::new_empty(body.local_decls.len())); +pub fn debuginfo_locals(body: &Body<'_>) -> DenseBitSet<Local> { + let mut visitor = DebuginfoLocals(DenseBitSet::new_empty(body.local_decls.len())); for debuginfo in body.var_debug_info.iter() { visitor.visit_var_debug_info(debuginfo); } visitor.0 } -struct DebuginfoLocals(BitSet<Local>); +struct DebuginfoLocals(DenseBitSet<Local>); impl Visitor<'_> for DebuginfoLocals { fn visit_local(&mut self, local: Local, _: PlaceContext, _: Location) { diff --git a/compiler/rustc_mir_dataflow/src/framework/cursor.rs b/compiler/rustc_mir_dataflow/src/framework/cursor.rs index 89ff93d9943..c46ae9775cf 100644 --- a/compiler/rustc_mir_dataflow/src/framework/cursor.rs +++ b/compiler/rustc_mir_dataflow/src/framework/cursor.rs @@ -4,7 +4,7 @@ use std::cmp::Ordering; use std::ops::{Deref, DerefMut}; #[cfg(debug_assertions)] -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::{self, BasicBlock, Location}; use super::{Analysis, Direction, Effect, EffectIndex, Results}; @@ -71,7 +71,7 @@ where state_needs_reset: bool, #[cfg(debug_assertions)] - reachable_blocks: BitSet<BasicBlock>, + reachable_blocks: DenseBitSet<BasicBlock>, } impl<'mir, 'tcx, A> ResultsCursor<'mir, 'tcx, A> diff --git a/compiler/rustc_mir_dataflow/src/framework/fmt.rs b/compiler/rustc_mir_dataflow/src/framework/fmt.rs index faf2c411dde..38599cd0949 100644 --- a/compiler/rustc_mir_dataflow/src/framework/fmt.rs +++ b/compiler/rustc_mir_dataflow/src/framework/fmt.rs @@ -4,7 +4,7 @@ use std::fmt; use rustc_index::Idx; -use rustc_index::bit_set::{BitSet, ChunkedBitSet, MixedBitSet}; +use rustc_index::bit_set::{ChunkedBitSet, DenseBitSet, MixedBitSet}; use super::lattice::MaybeReachable; @@ -73,7 +73,7 @@ where // Impls -impl<T, C> DebugWithContext<C> for BitSet<T> +impl<T, C> DebugWithContext<C> for DenseBitSet<T> where T: Idx + DebugWithContext<C>, { diff --git a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs index 60c97710c7f..e457b514936 100644 --- a/compiler/rustc_mir_dataflow/src/framework/graphviz.rs +++ b/compiler/rustc_mir_dataflow/src/framework/graphviz.rs @@ -9,7 +9,7 @@ use std::{io, ops, str}; use regex::Regex; use rustc_hir::def_id::DefId; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::{ self, BasicBlock, Body, Location, create_dump_file, dump_enabled, graphviz_safe_def_name, traversal, @@ -205,7 +205,7 @@ where // the operations that involve the mutation, i.e. within the `borrow_mut`. cursor: RefCell<ResultsCursor<'mir, 'tcx, A>>, style: OutputStyle, - reachable: BitSet<BasicBlock>, + reachable: DenseBitSet<BasicBlock>, } impl<'mir, 'tcx, A> Formatter<'mir, 'tcx, A> diff --git a/compiler/rustc_mir_dataflow/src/framework/lattice.rs b/compiler/rustc_mir_dataflow/src/framework/lattice.rs index cb8159ce37b..919e346bda7 100644 --- a/compiler/rustc_mir_dataflow/src/framework/lattice.rs +++ b/compiler/rustc_mir_dataflow/src/framework/lattice.rs @@ -39,7 +39,7 @@ //! [poset]: https://en.wikipedia.org/wiki/Partially_ordered_set use rustc_index::Idx; -use rustc_index::bit_set::{BitSet, MixedBitSet}; +use rustc_index::bit_set::{DenseBitSet, MixedBitSet}; use crate::framework::BitSetExt; @@ -68,10 +68,10 @@ pub trait HasTop { const TOP: Self; } -/// A `BitSet` represents the lattice formed by the powerset of all possible values of -/// the index type `T` ordered by inclusion. Equivalently, it is a tuple of "two-point" lattices, -/// one for each possible value of `T`. -impl<T: Idx> JoinSemiLattice for BitSet<T> { +/// A `DenseBitSet` represents the lattice formed by the powerset of all possible values of the +/// index type `T` ordered by inclusion. Equivalently, it is a tuple of "two-point" lattices, one +/// for each possible value of `T`. +impl<T: Idx> JoinSemiLattice for DenseBitSet<T> { fn join(&mut self, other: &Self) -> bool { self.union(other) } diff --git a/compiler/rustc_mir_dataflow/src/framework/mod.rs b/compiler/rustc_mir_dataflow/src/framework/mod.rs index 3de2c6e3f47..60c5cb0cae8 100644 --- a/compiler/rustc_mir_dataflow/src/framework/mod.rs +++ b/compiler/rustc_mir_dataflow/src/framework/mod.rs @@ -35,7 +35,7 @@ use std::cmp::Ordering; use rustc_data_structures::work_queue::WorkQueue; -use rustc_index::bit_set::{BitSet, MixedBitSet}; +use rustc_index::bit_set::{DenseBitSet, MixedBitSet}; use rustc_index::{Idx, IndexVec}; use rustc_middle::bug; use rustc_middle::mir::{self, BasicBlock, CallReturnPlaces, Location, TerminatorEdges, traversal}; @@ -65,7 +65,7 @@ pub trait BitSetExt<T> { fn contains(&self, elem: T) -> bool; } -impl<T: Idx> BitSetExt<T> for BitSet<T> { +impl<T: Idx> BitSetExt<T> for DenseBitSet<T> { fn contains(&self, elem: T) -> bool { self.contains(elem) } @@ -334,7 +334,7 @@ pub trait GenKill<T> { } } -impl<T: Idx> GenKill<T> for BitSet<T> { +impl<T: Idx> GenKill<T> for DenseBitSet<T> { fn gen_(&mut self, elem: T) { self.insert(elem); } diff --git a/compiler/rustc_mir_dataflow/src/framework/tests.rs b/compiler/rustc_mir_dataflow/src/framework/tests.rs index 8e7d4ab0fa3..5b3a9ccba69 100644 --- a/compiler/rustc_mir_dataflow/src/framework/tests.rs +++ b/compiler/rustc_mir_dataflow/src/framework/tests.rs @@ -84,13 +84,13 @@ impl<D: Direction> MockAnalysis<'_, D> { /// The entry set for each `BasicBlock` is the ID of that block offset by a fixed amount to /// avoid colliding with the statement/terminator effects. - fn mock_entry_set(&self, bb: BasicBlock) -> BitSet<usize> { + fn mock_entry_set(&self, bb: BasicBlock) -> DenseBitSet<usize> { let mut ret = self.bottom_value(self.body); ret.insert(Self::BASIC_BLOCK_OFFSET + bb.index()); ret } - fn mock_entry_states(&self) -> IndexVec<BasicBlock, BitSet<usize>> { + fn mock_entry_states(&self) -> IndexVec<BasicBlock, DenseBitSet<usize>> { let empty = self.bottom_value(self.body); let mut ret = IndexVec::from_elem(empty, &self.body.basic_blocks); @@ -121,7 +121,7 @@ impl<D: Direction> MockAnalysis<'_, D> { /// For example, the expected state when calling /// `seek_before_primary_effect(Location { block: 2, statement_index: 2 })` /// would be `[102, 0, 1, 2, 3, 4]`. - fn expected_state_at_target(&self, target: SeekTarget) -> BitSet<usize> { + fn expected_state_at_target(&self, target: SeekTarget) -> DenseBitSet<usize> { let block = target.block(); let mut ret = self.bottom_value(self.body); ret.insert(Self::BASIC_BLOCK_OFFSET + block.index()); @@ -155,13 +155,13 @@ impl<D: Direction> MockAnalysis<'_, D> { } impl<'tcx, D: Direction> Analysis<'tcx> for MockAnalysis<'tcx, D> { - type Domain = BitSet<usize>; + type Domain = DenseBitSet<usize>; type Direction = D; const NAME: &'static str = "mock"; fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain { - BitSet::new_empty(Self::BASIC_BLOCK_OFFSET + body.basic_blocks.len()) + DenseBitSet::new_empty(Self::BASIC_BLOCK_OFFSET + body.basic_blocks.len()) } fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) { diff --git a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs index 217594b3238..df4b1a53417 100644 --- a/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs +++ b/compiler/rustc_mir_dataflow/src/impls/borrowed_locals.rs @@ -1,4 +1,4 @@ -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::Visitor; use rustc_middle::mir::*; @@ -21,12 +21,12 @@ impl MaybeBorrowedLocals { } impl<'tcx> Analysis<'tcx> for MaybeBorrowedLocals { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; const NAME: &'static str = "maybe_borrowed_locals"; fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain { // bottom = unborrowed - BitSet::new_empty(body.local_decls().len()) + DenseBitSet::new_empty(body.local_decls().len()) } fn initialize_start_block(&self, _: &Body<'tcx>, _: &mut Self::Domain) { @@ -91,6 +91,7 @@ where | Rvalue::Use(..) | Rvalue::ThreadLocalRef(..) | Rvalue::Repeat(..) + | Rvalue::Len(..) | Rvalue::BinaryOp(..) | Rvalue::NullaryOp(..) | Rvalue::UnaryOp(..) @@ -137,8 +138,8 @@ where } /// The set of locals that are borrowed at some point in the MIR body. -pub fn borrowed_locals(body: &Body<'_>) -> BitSet<Local> { - struct Borrowed(BitSet<Local>); +pub fn borrowed_locals(body: &Body<'_>) -> DenseBitSet<Local> { + struct Borrowed(DenseBitSet<Local>); impl GenKill<Local> for Borrowed { #[inline] @@ -151,7 +152,7 @@ pub fn borrowed_locals(body: &Body<'_>) -> BitSet<Local> { } } - let mut borrowed = Borrowed(BitSet::new_empty(body.local_decls.len())); + let mut borrowed = Borrowed(DenseBitSet::new_empty(body.local_decls.len())); TransferFunction { trans: &mut borrowed }.visit_body(body); borrowed.0 } diff --git a/compiler/rustc_mir_dataflow/src/impls/initialized.rs b/compiler/rustc_mir_dataflow/src/impls/initialized.rs index 769f9c7cfc3..760f94af52d 100644 --- a/compiler/rustc_mir_dataflow/src/impls/initialized.rs +++ b/compiler/rustc_mir_dataflow/src/impls/initialized.rs @@ -2,7 +2,7 @@ use std::assert_matches::assert_matches; use rustc_abi::VariantIdx; use rustc_index::Idx; -use rustc_index::bit_set::{BitSet, MixedBitSet}; +use rustc_index::bit_set::{DenseBitSet, MixedBitSet}; use rustc_middle::bug; use rustc_middle::mir::{self, Body, CallReturnPlaces, Location, TerminatorEdges}; use rustc_middle::ty::util::Discr; @@ -207,7 +207,7 @@ pub struct MaybeUninitializedPlaces<'a, 'tcx> { move_data: &'a MoveData<'tcx>, mark_inactive_variants_as_uninit: bool, - skip_unreachable_unwind: BitSet<mir::BasicBlock>, + skip_unreachable_unwind: DenseBitSet<mir::BasicBlock>, } impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> { @@ -217,7 +217,7 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> { body, move_data, mark_inactive_variants_as_uninit: false, - skip_unreachable_unwind: BitSet::new_empty(body.basic_blocks.len()), + skip_unreachable_unwind: DenseBitSet::new_empty(body.basic_blocks.len()), } } @@ -233,7 +233,7 @@ impl<'a, 'tcx> MaybeUninitializedPlaces<'a, 'tcx> { pub fn skipping_unreachable_unwind( mut self, - unreachable_unwind: BitSet<mir::BasicBlock>, + unreachable_unwind: DenseBitSet<mir::BasicBlock>, ) -> Self { self.skip_unreachable_unwind = unreachable_unwind; self diff --git a/compiler/rustc_mir_dataflow/src/impls/liveness.rs b/compiler/rustc_mir_dataflow/src/impls/liveness.rs index b2050a6adf9..6ec1b03a34e 100644 --- a/compiler/rustc_mir_dataflow/src/impls/liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/liveness.rs @@ -1,4 +1,4 @@ -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{ self, CallReturnPlaces, Local, Location, Place, StatementKind, TerminatorEdges, @@ -26,14 +26,14 @@ use crate::{Analysis, Backward, GenKill}; pub struct MaybeLiveLocals; impl<'tcx> Analysis<'tcx> for MaybeLiveLocals { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; type Direction = Backward; const NAME: &'static str = "liveness"; fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain { // bottom = not live - BitSet::new_empty(body.local_decls.len()) + DenseBitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) { @@ -81,7 +81,7 @@ impl<'tcx> Analysis<'tcx> for MaybeLiveLocals { } } -pub struct TransferFunction<'a>(pub &'a mut BitSet<Local>); +pub struct TransferFunction<'a>(pub &'a mut DenseBitSet<Local>); impl<'tcx> Visitor<'tcx> for TransferFunction<'_> { fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) { @@ -117,7 +117,7 @@ impl<'tcx> Visitor<'tcx> for TransferFunction<'_> { } } -struct YieldResumeEffect<'a>(&'a mut BitSet<Local>); +struct YieldResumeEffect<'a>(&'a mut DenseBitSet<Local>); impl<'tcx> Visitor<'tcx> for YieldResumeEffect<'_> { fn visit_place(&mut self, place: &mir::Place<'tcx>, context: PlaceContext, location: Location) { @@ -137,7 +137,7 @@ enum DefUse { } impl DefUse { - fn apply(state: &mut BitSet<Local>, place: Place<'_>, context: PlaceContext) { + fn apply(state: &mut DenseBitSet<Local>, place: Place<'_>, context: PlaceContext) { match DefUse::for_place(place, context) { Some(DefUse::Def) => state.kill(place.local), Some(DefUse::Use) => state.gen_(place.local), @@ -204,7 +204,7 @@ impl DefUse { /// /// All of the caveats of `MaybeLiveLocals` apply. pub struct MaybeTransitiveLiveLocals<'a> { - always_live: &'a BitSet<Local>, + always_live: &'a DenseBitSet<Local>, } impl<'a> MaybeTransitiveLiveLocals<'a> { @@ -212,20 +212,20 @@ impl<'a> MaybeTransitiveLiveLocals<'a> { /// considered live. /// /// This should include at least all locals that are ever borrowed. - pub fn new(always_live: &'a BitSet<Local>) -> Self { + pub fn new(always_live: &'a DenseBitSet<Local>) -> Self { MaybeTransitiveLiveLocals { always_live } } } impl<'a, 'tcx> Analysis<'tcx> for MaybeTransitiveLiveLocals<'a> { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; type Direction = Backward; const NAME: &'static str = "transitive liveness"; fn bottom_value(&self, body: &mir::Body<'tcx>) -> Self::Domain { // bottom = not live - BitSet::new_empty(body.local_decls.len()) + DenseBitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, _: &mir::Body<'tcx>, _: &mut Self::Domain) { diff --git a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs index 65b480d3a5e..e3aa8f5a620 100644 --- a/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs +++ b/compiler/rustc_mir_dataflow/src/impls/storage_liveness.rs @@ -1,6 +1,6 @@ use std::borrow::Cow; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -10,8 +10,8 @@ use crate::{Analysis, GenKill, ResultsCursor}; /// The set of locals in a MIR body that do not have `StorageLive`/`StorageDead` annotations. /// /// These locals have fixed storage for the duration of the body. -pub fn always_storage_live_locals(body: &Body<'_>) -> BitSet<Local> { - let mut always_live_locals = BitSet::new_filled(body.local_decls.len()); +pub fn always_storage_live_locals(body: &Body<'_>) -> DenseBitSet<Local> { + let mut always_live_locals = DenseBitSet::new_filled(body.local_decls.len()); for block in &*body.basic_blocks { for statement in &block.statements { @@ -25,23 +25,23 @@ pub fn always_storage_live_locals(body: &Body<'_>) -> BitSet<Local> { } pub struct MaybeStorageLive<'a> { - always_live_locals: Cow<'a, BitSet<Local>>, + always_live_locals: Cow<'a, DenseBitSet<Local>>, } impl<'a> MaybeStorageLive<'a> { - pub fn new(always_live_locals: Cow<'a, BitSet<Local>>) -> Self { + pub fn new(always_live_locals: Cow<'a, DenseBitSet<Local>>) -> Self { MaybeStorageLive { always_live_locals } } } impl<'a, 'tcx> Analysis<'tcx> for MaybeStorageLive<'a> { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; const NAME: &'static str = "maybe_storage_live"; fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain { // bottom = dead - BitSet::new_empty(body.local_decls.len()) + DenseBitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, body: &Body<'tcx>, state: &mut Self::Domain) { @@ -67,23 +67,23 @@ impl<'a, 'tcx> Analysis<'tcx> for MaybeStorageLive<'a> { } pub struct MaybeStorageDead<'a> { - always_live_locals: Cow<'a, BitSet<Local>>, + always_live_locals: Cow<'a, DenseBitSet<Local>>, } impl<'a> MaybeStorageDead<'a> { - pub fn new(always_live_locals: Cow<'a, BitSet<Local>>) -> Self { + pub fn new(always_live_locals: Cow<'a, DenseBitSet<Local>>) -> Self { MaybeStorageDead { always_live_locals } } } impl<'a, 'tcx> Analysis<'tcx> for MaybeStorageDead<'a> { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; const NAME: &'static str = "maybe_storage_dead"; fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain { // bottom = live - BitSet::new_empty(body.local_decls.len()) + DenseBitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, body: &Body<'tcx>, state: &mut Self::Domain) { @@ -125,13 +125,13 @@ impl<'mir, 'tcx> MaybeRequiresStorage<'mir, 'tcx> { } impl<'tcx> Analysis<'tcx> for MaybeRequiresStorage<'_, 'tcx> { - type Domain = BitSet<Local>; + type Domain = DenseBitSet<Local>; const NAME: &'static str = "requires_storage"; fn bottom_value(&self, body: &Body<'tcx>) -> Self::Domain { // bottom = dead - BitSet::new_empty(body.local_decls.len()) + DenseBitSet::new_empty(body.local_decls.len()) } fn initialize_start_block(&self, body: &Body<'tcx>, state: &mut Self::Domain) { @@ -304,7 +304,7 @@ impl<'tcx> MaybeRequiresStorage<'_, 'tcx> { struct MoveVisitor<'a, 'mir, 'tcx> { borrowed_locals: &'a mut BorrowedLocalsResults<'mir, 'tcx>, - state: &'a mut BitSet<Local>, + state: &'a mut DenseBitSet<Local>, } impl<'tcx> Visitor<'tcx> for MoveVisitor<'_, '_, 'tcx> { diff --git a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs index 80875f32e4f..d1b3a389e9e 100644 --- a/compiler/rustc_mir_dataflow/src/move_paths/builder.rs +++ b/compiler/rustc_mir_dataflow/src/move_paths/builder.rs @@ -413,6 +413,7 @@ impl<'a, 'tcx, F: Fn(Ty<'tcx>) -> bool> MoveDataBuilder<'a, 'tcx, F> { Rvalue::Ref(..) | Rvalue::RawPtr(..) | Rvalue::Discriminant(..) + | Rvalue::Len(..) | Rvalue::NullaryOp( NullOp::SizeOf | NullOp::AlignOf | NullOp::OffsetOf(..) | NullOp::UbChecks, _, diff --git a/compiler/rustc_mir_dataflow/src/points.rs b/compiler/rustc_mir_dataflow/src/points.rs index 74209da876a..5d2a78acbf5 100644 --- a/compiler/rustc_mir_dataflow/src/points.rs +++ b/compiler/rustc_mir_dataflow/src/points.rs @@ -1,4 +1,4 @@ -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::interval::SparseIntervalMatrix; use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::{self, BasicBlock, Body, Location}; @@ -102,7 +102,7 @@ pub fn save_as_intervals<'tcx, N, A>( ) -> SparseIntervalMatrix<N, PointIndex> where N: Idx, - A: Analysis<'tcx, Domain = BitSet<N>>, + A: Analysis<'tcx, Domain = DenseBitSet<N>>, { let values = SparseIntervalMatrix::new(elements.num_points()); let mut visitor = Visitor { elements, values }; @@ -122,7 +122,7 @@ struct Visitor<'a, N: Idx> { impl<'mir, 'tcx, A, N> ResultsVisitor<'mir, 'tcx, A> for Visitor<'_, N> where - A: Analysis<'tcx, Domain = BitSet<N>>, + A: Analysis<'tcx, Domain = DenseBitSet<N>>, N: Idx, { fn visit_after_primary_statement_effect( diff --git a/compiler/rustc_mir_dataflow/src/value_analysis.rs b/compiler/rustc_mir_dataflow/src/value_analysis.rs index 9328870c7ae..a51af8c40fd 100644 --- a/compiler/rustc_mir_dataflow/src/value_analysis.rs +++ b/compiler/rustc_mir_dataflow/src/value_analysis.rs @@ -6,7 +6,7 @@ use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::{FxHashMap, FxIndexSet, StdEntry}; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::tcx::PlaceTy; use rustc_middle::mir::visit::{MutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -399,7 +399,7 @@ impl<'tcx> Map<'tcx> { &mut self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>, - exclude: BitSet<Local>, + exclude: DenseBitSet<Local>, value_limit: Option<usize>, ) { // Start by constructing the places for each bare local. @@ -912,9 +912,9 @@ pub fn iter_fields<'tcx>( } /// Returns all locals with projections that have their reference or address taken. -pub fn excluded_locals(body: &Body<'_>) -> BitSet<Local> { +pub fn excluded_locals(body: &Body<'_>) -> DenseBitSet<Local> { struct Collector { - result: BitSet<Local>, + result: DenseBitSet<Local>, } impl<'tcx> Visitor<'tcx> for Collector { @@ -932,7 +932,7 @@ pub fn excluded_locals(body: &Body<'_>) -> BitSet<Local> { } } - let mut collector = Collector { result: BitSet::new_empty(body.local_decls.len()) }; + let mut collector = Collector { result: DenseBitSet::new_empty(body.local_decls.len()) }; collector.visit_body(body); collector.result } diff --git a/compiler/rustc_mir_transform/messages.ftl b/compiler/rustc_mir_transform/messages.ftl index d00bfc66a6a..5628f4c9381 100644 --- a/compiler/rustc_mir_transform/messages.ftl +++ b/compiler/rustc_mir_transform/messages.ftl @@ -19,6 +19,23 @@ mir_transform_ffi_unwind_call = call to {$foreign -> mir_transform_fn_item_ref = taking a reference to a function item does not give a function pointer .suggestion = cast `{$ident}` to obtain a function pointer +mir_transform_force_inline = + `{$callee}` could not be inlined into `{$caller}` but is required to be inlined + .call = ...`{$callee}` called here + .attr = inlining due to this annotation + .caller = within `{$caller}`... + .callee = `{$callee}` defined here + .note = could not be inlined due to: {$reason} + +mir_transform_force_inline_attr = + `{$callee}` is incompatible with `#[rustc_force_inline]` + .attr = annotation here + .callee = `{$callee}` defined here + .note = incompatible due to: {$reason} + +mir_transform_force_inline_justification = + `{$callee}` is required to be inlined to: {$sym} + mir_transform_must_not_suspend = {$pre}`{$def_path}`{$post} held across a suspend point, but should not be .label = the value is held across this suspend point .note = {$reason} @@ -55,6 +72,12 @@ mir_transform_unaligned_packed_ref = reference to packed field is unaligned .note_ub = creating a misaligned reference is undefined behavior (even if that reference is never dereferenced) .help = copy the field contents to a local variable, or replace the reference with a raw pointer and use `read_unaligned`/`write_unaligned` (loads and stores via `*p` must be properly aligned even when using raw pointers) +mir_transform_unconditional_recursion = function cannot return without recursing + .label = cannot return without recursing + .help = a `loop` may express intention better if this is on purpose + +mir_transform_unconditional_recursion_call_site_label = recursive call site + mir_transform_undefined_transmute = pointers cannot be transmuted to integers during const eval .note = at compile-time, pointers do not have an integer value .note2 = avoiding this restriction via `union` or raw pointers leads to compile-time undefined behavior diff --git a/compiler/rustc_mir_build/src/lints.rs b/compiler/rustc_mir_transform/src/check_call_recursion.rs index 5cf33868ade..51fd3c6512e 100644 --- a/compiler/rustc_mir_build/src/lints.rs +++ b/compiler/rustc_mir_transform/src/check_call_recursion.rs @@ -10,25 +10,54 @@ use rustc_session::lint::builtin::UNCONDITIONAL_RECURSION; use rustc_span::Span; use crate::errors::UnconditionalRecursion; +use crate::pass_manager::MirLint; -pub(crate) fn check<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { - check_call_recursion(tcx, body); +pub(super) struct CheckCallRecursion; + +impl<'tcx> MirLint<'tcx> for CheckCallRecursion { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + let def_id = body.source.def_id().expect_local(); + + if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) { + // If this is trait/impl method, extract the trait's args. + let trait_args = match tcx.trait_of_item(def_id.to_def_id()) { + Some(trait_def_id) => { + let trait_args_count = tcx.generics_of(trait_def_id).count(); + &GenericArgs::identity_for_item(tcx, def_id)[..trait_args_count] + } + _ => &[], + }; + + check_recursion(tcx, body, CallRecursion { trait_args }) + } + } } -fn check_call_recursion<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { - let def_id = body.source.def_id().expect_local(); +/// Requires drop elaboration to have been performed. +pub(super) struct CheckDropRecursion; - if let DefKind::Fn | DefKind::AssocFn = tcx.def_kind(def_id) { - // If this is trait/impl method, extract the trait's args. - let trait_args = match tcx.trait_of_item(def_id.to_def_id()) { - Some(trait_def_id) => { - let trait_args_count = tcx.generics_of(trait_def_id).count(); - &GenericArgs::identity_for_item(tcx, def_id)[..trait_args_count] - } - _ => &[], - }; +impl<'tcx> MirLint<'tcx> for CheckDropRecursion { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + let def_id = body.source.def_id().expect_local(); - check_recursion(tcx, body, CallRecursion { trait_args }) + // First check if `body` is an `fn drop()` of `Drop` + if let DefKind::AssocFn = tcx.def_kind(def_id) + && let Some(trait_ref) = + tcx.impl_of_method(def_id.to_def_id()).and_then(|def_id| tcx.impl_trait_ref(def_id)) + && let Some(drop_trait) = tcx.lang_items().drop_trait() + && drop_trait == trait_ref.instantiate_identity().def_id + // avoid erroneous `Drop` impls from causing ICEs below + && let sig = tcx.fn_sig(def_id).instantiate_identity() + && sig.inputs().skip_binder().len() == 1 + { + // It was. Now figure out for what type `Drop` is implemented and then + // check for recursion. + if let ty::Ref(_, dropped_ty, _) = + tcx.liberate_late_bound_regions(def_id.to_def_id(), sig.input(0)).kind() + { + check_recursion(tcx, body, RecursiveDrop { drop_for: *dropped_ty }); + } + } } } @@ -61,30 +90,6 @@ fn check_recursion<'tcx>( } } -/// Requires drop elaboration to have been performed first. -pub fn check_drop_recursion<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { - let def_id = body.source.def_id().expect_local(); - - // First check if `body` is an `fn drop()` of `Drop` - if let DefKind::AssocFn = tcx.def_kind(def_id) - && let Some(trait_ref) = - tcx.impl_of_method(def_id.to_def_id()).and_then(|def_id| tcx.impl_trait_ref(def_id)) - && let Some(drop_trait) = tcx.lang_items().drop_trait() - && drop_trait == trait_ref.instantiate_identity().def_id - // avoid erroneous `Drop` impls from causing ICEs below - && let sig = tcx.fn_sig(def_id).instantiate_identity() - && sig.inputs().skip_binder().len() == 1 - { - // It was. Now figure out for what type `Drop` is implemented and then - // check for recursion. - if let ty::Ref(_, dropped_ty, _) = - tcx.liberate_late_bound_regions(def_id.to_def_id(), sig.input(0)).kind() - { - check_recursion(tcx, body, RecursiveDrop { drop_for: *dropped_ty }); - } - } -} - trait TerminatorClassifier<'tcx> { fn is_recursive_terminator( &self, diff --git a/compiler/rustc_mir_transform/src/check_inline.rs b/compiler/rustc_mir_transform/src/check_inline.rs new file mode 100644 index 00000000000..497f4a660ea --- /dev/null +++ b/compiler/rustc_mir_transform/src/check_inline.rs @@ -0,0 +1,91 @@ +//! Check that a body annotated with `#[rustc_force_inline]` will not fail to inline based on its +//! definition alone (irrespective of any specific caller). + +use rustc_attr_parsing::InlineAttr; +use rustc_hir::def_id::DefId; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; +use rustc_middle::mir::{Body, TerminatorKind}; +use rustc_middle::ty; +use rustc_middle::ty::TyCtxt; +use rustc_span::sym; + +use crate::pass_manager::MirLint; + +pub(super) struct CheckForceInline; + +impl<'tcx> MirLint<'tcx> for CheckForceInline { + fn run_lint(&self, tcx: TyCtxt<'tcx>, body: &Body<'tcx>) { + let def_id = body.source.def_id(); + if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() || !def_id.is_local() { + return; + } + let InlineAttr::Force { attr_span, .. } = tcx.codegen_fn_attrs(def_id).inline else { + return; + }; + + if let Err(reason) = + is_inline_valid_on_fn(tcx, def_id).and_then(|_| is_inline_valid_on_body(tcx, body)) + { + tcx.dcx().emit_err(crate::errors::InvalidForceInline { + attr_span, + callee_span: tcx.def_span(def_id), + callee: tcx.def_path_str(def_id), + reason, + }); + } + } +} + +pub(super) fn is_inline_valid_on_fn<'tcx>( + tcx: TyCtxt<'tcx>, + def_id: DefId, +) -> Result<(), &'static str> { + let codegen_attrs = tcx.codegen_fn_attrs(def_id); + if tcx.has_attr(def_id, sym::rustc_no_mir_inline) { + return Err("#[rustc_no_mir_inline]"); + } + + // FIXME(#127234): Coverage instrumentation currently doesn't handle inlined + // MIR correctly when Modified Condition/Decision Coverage is enabled. + if tcx.sess.instrument_coverage_mcdc() { + return Err("incompatible with MC/DC coverage"); + } + + let ty = tcx.type_of(def_id); + if match ty.instantiate_identity().kind() { + ty::FnDef(..) => tcx.fn_sig(def_id).instantiate_identity().c_variadic(), + ty::Closure(_, args) => args.as_closure().sig().c_variadic(), + _ => false, + } { + return Err("C variadic"); + } + + if codegen_attrs.flags.contains(CodegenFnAttrFlags::COLD) { + return Err("cold"); + } + + // Intrinsic fallback bodies are automatically made cross-crate inlineable, + // but at this stage we don't know whether codegen knows the intrinsic, + // so just conservatively don't inline it. This also ensures that we do not + // accidentally inline the body of an intrinsic that *must* be overridden. + if tcx.has_attr(def_id, sym::rustc_intrinsic) { + return Err("callee is an intrinsic"); + } + + Ok(()) +} + +pub(super) fn is_inline_valid_on_body<'tcx>( + _: TyCtxt<'tcx>, + body: &Body<'tcx>, +) -> Result<(), &'static str> { + if body + .basic_blocks + .iter() + .any(|bb| matches!(bb.terminator().kind, TerminatorKind::TailCall { .. })) + { + return Err("can't inline functions with tail calls"); + } + + Ok(()) +} diff --git a/compiler/rustc_mir_transform/src/copy_prop.rs b/compiler/rustc_mir_transform/src/copy_prop.rs index 9b3443d3209..f149bf97cde 100644 --- a/compiler/rustc_mir_transform/src/copy_prop.rs +++ b/compiler/rustc_mir_transform/src/copy_prop.rs @@ -1,5 +1,5 @@ use rustc_index::IndexSlice; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; @@ -34,7 +34,7 @@ impl<'tcx> crate::MirPass<'tcx> for CopyProp { let fully_moved = fully_moved_locals(&ssa, body); debug!(?fully_moved); - let mut storage_to_remove = BitSet::new_empty(fully_moved.domain_size()); + let mut storage_to_remove = DenseBitSet::new_empty(fully_moved.domain_size()); for (local, &head) in ssa.copy_classes().iter_enumerated() { if local != head { storage_to_remove.insert(head); @@ -68,8 +68,8 @@ impl<'tcx> crate::MirPass<'tcx> for CopyProp { /// This means that replacing it by a copy of `_a` if ok, since this copy happens before `_c` is /// moved, and therefore that `_d` is moved. #[instrument(level = "trace", skip(ssa, body))] -fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> BitSet<Local> { - let mut fully_moved = BitSet::new_filled(body.local_decls.len()); +fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> DenseBitSet<Local> { + let mut fully_moved = DenseBitSet::new_filled(body.local_decls.len()); for (_, rvalue, _) in ssa.assignments(body) { let (Rvalue::Use(Operand::Copy(place) | Operand::Move(place)) @@ -96,9 +96,9 @@ fn fully_moved_locals(ssa: &SsaLocals, body: &Body<'_>) -> BitSet<Local> { /// Utility to help performing substitution of `*pattern` by `target`. struct Replacer<'a, 'tcx> { tcx: TyCtxt<'tcx>, - fully_moved: BitSet<Local>, - storage_to_remove: BitSet<Local>, - borrowed_locals: &'a BitSet<Local>, + fully_moved: DenseBitSet<Local>, + storage_to_remove: DenseBitSet<Local>, + borrowed_locals: &'a DenseBitSet<Local>, copy_classes: &'a IndexSlice<Local, Local>, } diff --git a/compiler/rustc_mir_transform/src/coroutine.rs b/compiler/rustc_mir_transform/src/coroutine.rs index f6536d78761..a3715b5d485 100644 --- a/compiler/rustc_mir_transform/src/coroutine.rs +++ b/compiler/rustc_mir_transform/src/coroutine.rs @@ -60,7 +60,7 @@ use rustc_errors::pluralize; use rustc_hir as hir; use rustc_hir::lang_items::LangItem; use rustc_hir::{CoroutineDesugaring, CoroutineKind}; -use rustc_index::bit_set::{BitMatrix, BitSet, GrowableBitSet}; +use rustc_index::bit_set::{BitMatrix, DenseBitSet, GrowableBitSet}; use rustc_index::{Idx, IndexVec}; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -185,13 +185,13 @@ struct TransformVisitor<'tcx> { remap: IndexVec<Local, Option<(Ty<'tcx>, VariantIdx, FieldIdx)>>, // A map from a suspension point in a block to the locals which have live storage at that point - storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>, + storage_liveness: IndexVec<BasicBlock, Option<DenseBitSet<Local>>>, // A list of suspension points, generated during the transform suspension_points: Vec<SuspensionPoint<'tcx>>, // The set of locals that have no `StorageLive`/`StorageDead` annotations. - always_live_locals: BitSet<Local>, + always_live_locals: DenseBitSet<Local>, // The original RETURN_PLACE local old_ret_local: Local, @@ -633,7 +633,7 @@ struct LivenessInfo { saved_locals: CoroutineSavedLocals, /// The set of saved locals live at each suspension point. - live_locals_at_suspension_points: Vec<BitSet<CoroutineSavedLocal>>, + live_locals_at_suspension_points: Vec<DenseBitSet<CoroutineSavedLocal>>, /// Parallel vec to the above with SourceInfo for each yield terminator. source_info_at_suspension_points: Vec<SourceInfo>, @@ -645,7 +645,7 @@ struct LivenessInfo { /// For every suspending block, the locals which are storage-live across /// that suspension point. - storage_liveness: IndexVec<BasicBlock, Option<BitSet<Local>>>, + storage_liveness: IndexVec<BasicBlock, Option<DenseBitSet<Local>>>, } /// Computes which locals have to be stored in the state-machine for the @@ -659,7 +659,7 @@ struct LivenessInfo { fn locals_live_across_suspend_points<'tcx>( tcx: TyCtxt<'tcx>, body: &Body<'tcx>, - always_live_locals: &BitSet<Local>, + always_live_locals: &DenseBitSet<Local>, movable: bool, ) -> LivenessInfo { // Calculate when MIR locals have live storage. This gives us an upper bound of their @@ -688,7 +688,7 @@ fn locals_live_across_suspend_points<'tcx>( let mut storage_liveness_map = IndexVec::from_elem(None, &body.basic_blocks); let mut live_locals_at_suspension_points = Vec::new(); let mut source_info_at_suspension_points = Vec::new(); - let mut live_locals_at_any_suspension_point = BitSet::new_empty(body.local_decls.len()); + let mut live_locals_at_any_suspension_point = DenseBitSet::new_empty(body.local_decls.len()); for (block, data) in body.basic_blocks.iter_enumerated() { if let TerminatorKind::Yield { .. } = data.terminator().kind { @@ -768,7 +768,7 @@ fn locals_live_across_suspend_points<'tcx>( /// `CoroutineSavedLocal` is indexed in terms of the elements in this set; /// i.e. `CoroutineSavedLocal::new(1)` corresponds to the second local /// included in this set. -struct CoroutineSavedLocals(BitSet<Local>); +struct CoroutineSavedLocals(DenseBitSet<Local>); impl CoroutineSavedLocals { /// Returns an iterator over each `CoroutineSavedLocal` along with the `Local` it corresponds @@ -777,11 +777,11 @@ impl CoroutineSavedLocals { self.iter().enumerate().map(|(i, l)| (CoroutineSavedLocal::from(i), l)) } - /// Transforms a `BitSet<Local>` that contains only locals saved across yield points to the - /// equivalent `BitSet<CoroutineSavedLocal>`. - fn renumber_bitset(&self, input: &BitSet<Local>) -> BitSet<CoroutineSavedLocal> { + /// Transforms a `DenseBitSet<Local>` that contains only locals saved across yield points to the + /// equivalent `DenseBitSet<CoroutineSavedLocal>`. + fn renumber_bitset(&self, input: &DenseBitSet<Local>) -> DenseBitSet<CoroutineSavedLocal> { assert!(self.superset(input), "{:?} not a superset of {:?}", self.0, input); - let mut out = BitSet::new_empty(self.count()); + let mut out = DenseBitSet::new_empty(self.count()); for (saved_local, local) in self.iter_enumerated() { if input.contains(local) { out.insert(saved_local); @@ -801,7 +801,7 @@ impl CoroutineSavedLocals { } impl ops::Deref for CoroutineSavedLocals { - type Target = BitSet<Local>; + type Target = DenseBitSet<Local>; fn deref(&self) -> &Self::Target { &self.0 @@ -815,7 +815,7 @@ impl ops::Deref for CoroutineSavedLocals { fn compute_storage_conflicts<'mir, 'tcx>( body: &'mir Body<'tcx>, saved_locals: &'mir CoroutineSavedLocals, - always_live_locals: BitSet<Local>, + always_live_locals: DenseBitSet<Local>, mut requires_storage: Results<'tcx, MaybeRequiresStorage<'mir, 'tcx>>, ) -> BitMatrix<CoroutineSavedLocal, CoroutineSavedLocal> { assert_eq!(body.local_decls.len(), saved_locals.domain_size()); @@ -833,7 +833,7 @@ fn compute_storage_conflicts<'mir, 'tcx>( body, saved_locals, local_conflicts: BitMatrix::from_row_n(&ineligible_locals, body.local_decls.len()), - eligible_storage_live: BitSet::new_empty(body.local_decls.len()), + eligible_storage_live: DenseBitSet::new_empty(body.local_decls.len()), }; requires_storage.visit_reachable_with(body, &mut visitor); @@ -871,7 +871,7 @@ struct StorageConflictVisitor<'a, 'tcx> { // benchmarks for coroutines. local_conflicts: BitMatrix<Local, Local>, // We keep this bitset as a buffer to avoid reallocating memory. - eligible_storage_live: BitSet<Local>, + eligible_storage_live: DenseBitSet<Local>, } impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>> @@ -880,7 +880,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>> fn visit_after_early_statement_effect( &mut self, _results: &mut Results<'tcx, MaybeRequiresStorage<'a, 'tcx>>, - state: &BitSet<Local>, + state: &DenseBitSet<Local>, _statement: &'a Statement<'tcx>, loc: Location, ) { @@ -890,7 +890,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>> fn visit_after_early_terminator_effect( &mut self, _results: &mut Results<'tcx, MaybeRequiresStorage<'a, 'tcx>>, - state: &BitSet<Local>, + state: &DenseBitSet<Local>, _terminator: &'a Terminator<'tcx>, loc: Location, ) { @@ -899,7 +899,7 @@ impl<'a, 'tcx> ResultsVisitor<'a, 'tcx, MaybeRequiresStorage<'a, 'tcx>> } impl StorageConflictVisitor<'_, '_> { - fn apply_state(&mut self, state: &BitSet<Local>, loc: Location) { + fn apply_state(&mut self, state: &DenseBitSet<Local>, loc: Location) { // Ignore unreachable blocks. if let TerminatorKind::Unreachable = self.body.basic_blocks[loc.block].terminator().kind { return; @@ -924,7 +924,7 @@ fn compute_layout<'tcx>( ) -> ( IndexVec<Local, Option<(Ty<'tcx>, VariantIdx, FieldIdx)>>, CoroutineLayout<'tcx>, - IndexVec<BasicBlock, Option<BitSet<Local>>>, + IndexVec<BasicBlock, Option<DenseBitSet<Local>>>, ) { let LivenessInfo { saved_locals, diff --git a/compiler/rustc_mir_transform/src/coverage/counters.rs b/compiler/rustc_mir_transform/src/coverage/counters.rs index 9e80f1f1c4a..8d397f63cc7 100644 --- a/compiler/rustc_mir_transform/src/coverage/counters.rs +++ b/compiler/rustc_mir_transform/src/coverage/counters.rs @@ -1,117 +1,181 @@ use std::cmp::Ordering; -use std::fmt::{self, Debug}; +use either::Either; +use itertools::Itertools; use rustc_data_structures::captures::Captures; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_data_structures::graph::DirectedGraph; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::coverage::{CounterId, CovTerm, Expression, ExpressionId, Op}; -use tracing::{debug, debug_span, instrument}; -use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph, ReadyFirstTraversal}; +use crate::coverage::counters::balanced_flow::BalancedFlowGraph; +use crate::coverage::counters::iter_nodes::IterNodes; +use crate::coverage::counters::node_flow::{CounterTerm, MergedNodeFlowGraph, NodeCounters}; +use crate::coverage::graph::{BasicCoverageBlock, CoverageGraph}; -#[cfg(test)] -mod tests; +mod balanced_flow; +mod iter_nodes; +mod node_flow; +mod union_find; -/// The coverage counter or counter expression associated with a particular -/// BCB node or BCB edge. -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] -enum BcbCounter { - Counter { id: CounterId }, - Expression { id: ExpressionId }, +/// Ensures that each BCB node needing a counter has one, by creating physical +/// counters or counter expressions for nodes as required. +pub(super) fn make_bcb_counters( + graph: &CoverageGraph, + bcb_needs_counter: &DenseBitSet<BasicCoverageBlock>, +) -> CoverageCounters { + // Create the derived graphs that are necessary for subsequent steps. + let balanced_graph = BalancedFlowGraph::for_graph(graph, |n| !graph[n].is_out_summable); + let merged_graph = MergedNodeFlowGraph::for_balanced_graph(&balanced_graph); + + // Use those graphs to determine which nodes get physical counters, and how + // to compute the execution counts of other nodes from those counters. + let nodes = make_node_counter_priority_list(graph, balanced_graph); + let node_counters = merged_graph.make_node_counters(&nodes); + + // Convert the counters into a form suitable for embedding into MIR. + transcribe_counters(&node_counters, bcb_needs_counter) } -impl BcbCounter { - fn as_term(&self) -> CovTerm { - match *self { - BcbCounter::Counter { id, .. } => CovTerm::Counter(id), - BcbCounter::Expression { id, .. } => CovTerm::Expression(id), - } - } +/// Arranges the nodes in `balanced_graph` into a list, such that earlier nodes +/// take priority in being given a counter expression instead of a physical counter. +fn make_node_counter_priority_list( + graph: &CoverageGraph, + balanced_graph: BalancedFlowGraph<&CoverageGraph>, +) -> Vec<BasicCoverageBlock> { + // A "reloop" node has exactly one out-edge, which jumps back to the top + // of an enclosing loop. Reloop nodes are typically visited more times + // than loop-exit nodes, so try to avoid giving them physical counters. + let is_reloop_node = IndexVec::from_fn_n( + |node| match graph.successors[node].as_slice() { + &[succ] => graph.dominates(succ, node), + _ => false, + }, + graph.num_nodes(), + ); + + let mut nodes = balanced_graph.iter_nodes().rev().collect::<Vec<_>>(); + // The first node is the sink, which must not get a physical counter. + assert_eq!(nodes[0], balanced_graph.sink); + // Sort the real nodes, such that earlier (lesser) nodes take priority + // in being given a counter expression instead of a physical counter. + nodes[1..].sort_by(|&a, &b| { + // Start with a dummy `Equal` to make the actual tests line up nicely. + Ordering::Equal + // Prefer a physical counter for return/yield nodes. + .then_with(|| Ord::cmp(&graph[a].is_out_summable, &graph[b].is_out_summable)) + // Prefer an expression for reloop nodes (see definition above). + .then_with(|| Ord::cmp(&is_reloop_node[a], &is_reloop_node[b]).reverse()) + // Otherwise, prefer a physical counter for dominating nodes. + .then_with(|| graph.cmp_in_dominator_order(a, b).reverse()) + }); + nodes } -impl Debug for BcbCounter { - fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Self::Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()), - Self::Expression { id } => write!(fmt, "Expression({:?})", id.index()), - } - } -} +// Converts node counters into a form suitable for embedding into MIR. +fn transcribe_counters( + old: &NodeCounters<BasicCoverageBlock>, + bcb_needs_counter: &DenseBitSet<BasicCoverageBlock>, +) -> CoverageCounters { + let mut new = CoverageCounters::with_num_bcbs(bcb_needs_counter.domain_size()); + + for bcb in bcb_needs_counter.iter() { + // Our counter-creation algorithm doesn't guarantee that a counter + // expression starts or ends with a positive term, so partition the + // counters into "positive" and "negative" lists for easier handling. + let (mut pos, mut neg): (Vec<_>, Vec<_>) = + old.counter_expr(bcb).iter().partition_map(|&CounterTerm { node, op }| match op { + Op::Add => Either::Left(node), + Op::Subtract => Either::Right(node), + }); + + if pos.is_empty() { + // If we somehow end up with no positive terms, fall back to + // creating a physical counter. There's no known way for this + // to happen, but we can avoid an ICE if it does. + debug_assert!(false, "{bcb:?} has no positive counter terms"); + pos = vec![bcb]; + neg = vec![]; + } + + // These intermediate sorts are not strictly necessary, but were helpful + // in reducing churn when switching to the current counter-creation scheme. + // They also help to slightly decrease the overall size of the expression + // table, due to more subexpressions being shared. + pos.sort(); + neg.sort(); + + let mut new_counters_for_sites = |sites: Vec<BasicCoverageBlock>| { + sites.into_iter().map(|node| new.ensure_phys_counter(node)).collect::<Vec<_>>() + }; + let mut pos = new_counters_for_sites(pos); + let mut neg = new_counters_for_sites(neg); -#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] -struct BcbExpression { - lhs: BcbCounter, - op: Op, - rhs: BcbCounter, -} + // These sorts are also not strictly necessary; see above. + pos.sort(); + neg.sort(); + + let pos_counter = new.make_sum(&pos).expect("`pos` should not be empty"); + let new_counter = new.make_subtracted_sum(pos_counter, &neg); + new.set_node_counter(bcb, new_counter); + } -/// Enum representing either a node or an edge in the coverage graph. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub(super) enum Site { - Node { bcb: BasicCoverageBlock }, - Edge { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock }, + new } /// Generates and stores coverage counter and coverage expression information -/// associated with nodes/edges in the BCB graph. +/// associated with nodes in the coverage graph. pub(super) struct CoverageCounters { /// List of places where a counter-increment statement should be injected /// into MIR, each with its corresponding counter ID. - counter_increment_sites: IndexVec<CounterId, Site>, + phys_counter_for_node: FxIndexMap<BasicCoverageBlock, CounterId>, + next_counter_id: CounterId, /// Coverage counters/expressions that are associated with individual BCBs. - node_counters: IndexVec<BasicCoverageBlock, Option<BcbCounter>>, + node_counters: IndexVec<BasicCoverageBlock, Option<CovTerm>>, /// Table of expression data, associating each expression ID with its /// corresponding operator (+ or -) and its LHS/RHS operands. - expressions: IndexVec<ExpressionId, BcbExpression>, + expressions: IndexVec<ExpressionId, Expression>, /// Remember expressions that have already been created (or simplified), /// so that we don't create unnecessary duplicates. - expressions_memo: FxHashMap<BcbExpression, BcbCounter>, + expressions_memo: FxHashMap<Expression, CovTerm>, } impl CoverageCounters { - /// Ensures that each BCB node needing a counter has one, by creating physical - /// counters or counter expressions for nodes and edges as required. - pub(super) fn make_bcb_counters( - graph: &CoverageGraph, - bcb_needs_counter: &BitSet<BasicCoverageBlock>, - ) -> Self { - let mut builder = CountersBuilder::new(graph, bcb_needs_counter); - builder.make_bcb_counters(); - - builder.into_coverage_counters() - } - fn with_num_bcbs(num_bcbs: usize) -> Self { Self { - counter_increment_sites: IndexVec::new(), + phys_counter_for_node: FxIndexMap::default(), + next_counter_id: CounterId::ZERO, node_counters: IndexVec::from_elem_n(None, num_bcbs), expressions: IndexVec::new(), expressions_memo: FxHashMap::default(), } } - /// Creates a new physical counter for a BCB node or edge. - fn make_phys_counter(&mut self, site: Site) -> BcbCounter { - let id = self.counter_increment_sites.push(site); - BcbCounter::Counter { id } + /// Returns the physical counter for the given node, creating it if necessary. + fn ensure_phys_counter(&mut self, bcb: BasicCoverageBlock) -> CovTerm { + let id = *self.phys_counter_for_node.entry(bcb).or_insert_with(|| { + let id = self.next_counter_id; + self.next_counter_id = id + 1; + id + }); + CovTerm::Counter(id) } - fn make_expression(&mut self, lhs: BcbCounter, op: Op, rhs: BcbCounter) -> BcbCounter { - let new_expr = BcbExpression { lhs, op, rhs }; - *self.expressions_memo.entry(new_expr).or_insert_with(|| { + fn make_expression(&mut self, lhs: CovTerm, op: Op, rhs: CovTerm) -> CovTerm { + let new_expr = Expression { lhs, op, rhs }; + *self.expressions_memo.entry(new_expr.clone()).or_insert_with(|| { let id = self.expressions.push(new_expr); - BcbCounter::Expression { id } + CovTerm::Expression(id) }) } /// Creates a counter that is the sum of the given counters. /// /// Returns `None` if the given list of counters was empty. - fn make_sum(&mut self, counters: &[BcbCounter]) -> Option<BcbCounter> { + fn make_sum(&mut self, counters: &[CovTerm]) -> Option<CovTerm> { counters .iter() .copied() @@ -119,16 +183,18 @@ impl CoverageCounters { } /// Creates a counter whose value is `lhs - SUM(rhs)`. - fn make_subtracted_sum(&mut self, lhs: BcbCounter, rhs: &[BcbCounter]) -> BcbCounter { + fn make_subtracted_sum(&mut self, lhs: CovTerm, rhs: &[CovTerm]) -> CovTerm { let Some(rhs_sum) = self.make_sum(rhs) else { return lhs }; self.make_expression(lhs, Op::Subtract, rhs_sum) } pub(super) fn num_counters(&self) -> usize { - self.counter_increment_sites.len() + let num_counters = self.phys_counter_for_node.len(); + assert_eq!(num_counters, self.next_counter_id.as_usize()); + num_counters } - fn set_node_counter(&mut self, bcb: BasicCoverageBlock, counter: BcbCounter) -> BcbCounter { + fn set_node_counter(&mut self, bcb: BasicCoverageBlock, counter: CovTerm) -> CovTerm { let existing = self.node_counters[bcb].replace(counter); assert!( existing.is_none(), @@ -138,16 +204,16 @@ impl CoverageCounters { } pub(super) fn term_for_bcb(&self, bcb: BasicCoverageBlock) -> Option<CovTerm> { - self.node_counters[bcb].map(|counter| counter.as_term()) + self.node_counters[bcb] } - /// Returns an iterator over all the nodes/edges in the coverage graph that + /// Returns an iterator over all the nodes in the coverage graph that /// should have a counter-increment statement injected into MIR, along with /// each site's corresponding counter ID. pub(super) fn counter_increment_sites( &self, - ) -> impl Iterator<Item = (CounterId, Site)> + Captures<'_> { - self.counter_increment_sites.iter_enumerated().map(|(id, &site)| (id, site)) + ) -> impl Iterator<Item = (CounterId, BasicCoverageBlock)> + Captures<'_> { + self.phys_counter_for_node.iter().map(|(&site, &id)| (id, site)) } /// Returns an iterator over the subset of BCB nodes that have been associated @@ -157,432 +223,13 @@ impl CoverageCounters { ) -> impl Iterator<Item = (BasicCoverageBlock, ExpressionId)> + Captures<'_> { self.node_counters.iter_enumerated().filter_map(|(bcb, &counter)| match counter { // Yield the BCB along with its associated expression ID. - Some(BcbCounter::Expression { id }) => Some((bcb, id)), + Some(CovTerm::Expression(id)) => Some((bcb, id)), // This BCB is associated with a counter or nothing, so skip it. - Some(BcbCounter::Counter { .. }) | None => None, + Some(CovTerm::Counter { .. } | CovTerm::Zero) | None => None, }) } pub(super) fn into_expressions(self) -> IndexVec<ExpressionId, Expression> { - let old_len = self.expressions.len(); - let expressions = self - .expressions - .into_iter() - .map(|BcbExpression { lhs, op, rhs }| Expression { - lhs: lhs.as_term(), - op, - rhs: rhs.as_term(), - }) - .collect::<IndexVec<ExpressionId, _>>(); - - // Expression IDs are indexes into this vector, so make sure we didn't - // accidentally invalidate them by changing its length. - assert_eq!(old_len, expressions.len()); - expressions - } -} - -/// Symbolic representation of the coverage counter to be used for a particular -/// node or edge in the coverage graph. The same site counter can be used for -/// multiple sites, if they have been determined to have the same count. -#[derive(Clone, Copy, Debug)] -enum SiteCounter { - /// A physical counter at some node/edge. - Phys { site: Site }, - /// A counter expression for a node that takes the sum of all its in-edge - /// counters. - NodeSumExpr { bcb: BasicCoverageBlock }, - /// A counter expression for an edge that takes the counter of its source - /// node, and subtracts the counters of all its sibling out-edges. - EdgeDiffExpr { from_bcb: BasicCoverageBlock, to_bcb: BasicCoverageBlock }, -} - -/// Yields the graph successors of `from_bcb` that aren't `to_bcb`. This is -/// used when creating a counter expression for [`SiteCounter::EdgeDiffExpr`]. -/// -/// For example, in this diagram the sibling out-edge targets of edge `AC` are -/// the nodes `B` and `D`. -/// -/// ```text -/// A -/// / | \ -/// B C D -/// ``` -fn sibling_out_edge_targets( - graph: &CoverageGraph, - from_bcb: BasicCoverageBlock, - to_bcb: BasicCoverageBlock, -) -> impl Iterator<Item = BasicCoverageBlock> + Captures<'_> { - graph.successors[from_bcb].iter().copied().filter(move |&t| t != to_bcb) -} - -/// Helper struct that allows counter creation to inspect the BCB graph, and -/// the set of nodes that need counters. -struct CountersBuilder<'a> { - graph: &'a CoverageGraph, - bcb_needs_counter: &'a BitSet<BasicCoverageBlock>, - - site_counters: FxHashMap<Site, SiteCounter>, -} - -impl<'a> CountersBuilder<'a> { - fn new(graph: &'a CoverageGraph, bcb_needs_counter: &'a BitSet<BasicCoverageBlock>) -> Self { - assert_eq!(graph.num_nodes(), bcb_needs_counter.domain_size()); - Self { graph, bcb_needs_counter, site_counters: FxHashMap::default() } - } - - fn make_bcb_counters(&mut self) { - debug!("make_bcb_counters(): adding a counter or expression to each BasicCoverageBlock"); - - // Traverse the coverage graph, ensuring that every node that needs a - // coverage counter has one. - for bcb in ReadyFirstTraversal::new(self.graph) { - let _span = debug_span!("traversal", ?bcb).entered(); - if self.bcb_needs_counter.contains(bcb) { - self.make_node_counter_and_out_edge_counters(bcb); - } - } + self.expressions } - - /// Make sure the given node has a node counter, and then make sure each of - /// its out-edges has an edge counter (if appropriate). - #[instrument(level = "debug", skip(self))] - fn make_node_counter_and_out_edge_counters(&mut self, from_bcb: BasicCoverageBlock) { - // First, ensure that this node has a counter of some kind. - // We might also use that counter to compute one of the out-edge counters. - self.get_or_make_node_counter(from_bcb); - - // If this node's out-edges won't sum to the node's counter, - // then there's no reason to create edge counters here. - if !self.graph[from_bcb].is_out_summable { - return; - } - - // When choosing which out-edge should be given a counter expression, ignore edges that - // already have counters, or could use the existing counter of their target node. - let out_edge_has_counter = |to_bcb| { - if self.site_counters.contains_key(&Site::Edge { from_bcb, to_bcb }) { - return true; - } - self.graph.sole_predecessor(to_bcb) == Some(from_bcb) - && self.site_counters.contains_key(&Site::Node { bcb: to_bcb }) - }; - - // Determine the set of out-edges that could benefit from being given an expression. - let candidate_successors = self.graph.successors[from_bcb] - .iter() - .copied() - .filter(|&to_bcb| !out_edge_has_counter(to_bcb)) - .collect::<Vec<_>>(); - debug!(?candidate_successors); - - // If there are out-edges without counters, choose one to be given an expression - // (computed from this node and the other out-edges) instead of a physical counter. - let Some(to_bcb) = self.choose_out_edge_for_expression(from_bcb, &candidate_successors) - else { - return; - }; - - // For each out-edge other than the one that was chosen to get an expression, - // ensure that it has a counter (existing counter/expression or a new counter). - for target in sibling_out_edge_targets(self.graph, from_bcb, to_bcb) { - self.get_or_make_edge_counter(from_bcb, target); - } - - // Now create an expression for the chosen edge, by taking the counter - // for its source node and subtracting the sum of its sibling out-edges. - let counter = SiteCounter::EdgeDiffExpr { from_bcb, to_bcb }; - self.site_counters.insert(Site::Edge { from_bcb, to_bcb }, counter); - } - - #[instrument(level = "debug", skip(self))] - fn get_or_make_node_counter(&mut self, bcb: BasicCoverageBlock) -> SiteCounter { - // If the BCB already has a counter, return it. - if let Some(&counter) = self.site_counters.get(&Site::Node { bcb }) { - debug!("{bcb:?} already has a counter: {counter:?}"); - return counter; - } - - let counter = self.make_node_counter_inner(bcb); - self.site_counters.insert(Site::Node { bcb }, counter); - counter - } - - fn make_node_counter_inner(&mut self, bcb: BasicCoverageBlock) -> SiteCounter { - // If the node's sole in-edge already has a counter, use that. - if let Some(sole_pred) = self.graph.sole_predecessor(bcb) - && let Some(&edge_counter) = - self.site_counters.get(&Site::Edge { from_bcb: sole_pred, to_bcb: bcb }) - { - return edge_counter; - } - - let predecessors = self.graph.predecessors[bcb].as_slice(); - - // Handle cases where we can't compute a node's count from its in-edges: - // - START_BCB has no in-edges, so taking the sum would panic (or be wrong). - // - For nodes with one in-edge, or that directly loop to themselves, - // trying to get the in-edge counts would require this node's counter, - // leading to infinite recursion. - if predecessors.len() <= 1 || predecessors.contains(&bcb) { - debug!(?bcb, ?predecessors, "node has <=1 predecessors or is its own predecessor"); - let counter = SiteCounter::Phys { site: Site::Node { bcb } }; - debug!(?bcb, ?counter, "node gets a physical counter"); - return counter; - } - - // A BCB with multiple incoming edges can compute its count by ensuring that counters - // exist for each of those edges, and then adding them up to get a total count. - for &from_bcb in predecessors { - self.get_or_make_edge_counter(from_bcb, bcb); - } - let sum_of_in_edges = SiteCounter::NodeSumExpr { bcb }; - - debug!("{bcb:?} gets a new counter (sum of predecessor counters): {sum_of_in_edges:?}"); - sum_of_in_edges - } - - #[instrument(level = "debug", skip(self))] - fn get_or_make_edge_counter( - &mut self, - from_bcb: BasicCoverageBlock, - to_bcb: BasicCoverageBlock, - ) -> SiteCounter { - // If the edge already has a counter, return it. - if let Some(&counter) = self.site_counters.get(&Site::Edge { from_bcb, to_bcb }) { - debug!("Edge {from_bcb:?}->{to_bcb:?} already has a counter: {counter:?}"); - return counter; - } - - let counter = self.make_edge_counter_inner(from_bcb, to_bcb); - self.site_counters.insert(Site::Edge { from_bcb, to_bcb }, counter); - counter - } - - fn make_edge_counter_inner( - &mut self, - from_bcb: BasicCoverageBlock, - to_bcb: BasicCoverageBlock, - ) -> SiteCounter { - // If the target node has exactly one in-edge (i.e. this one), then just - // use the node's counter, since it will have the same value. - if let Some(sole_pred) = self.graph.sole_predecessor(to_bcb) { - assert_eq!(sole_pred, from_bcb); - // This call must take care not to invoke `get_or_make_edge` for - // this edge, since that would result in infinite recursion! - return self.get_or_make_node_counter(to_bcb); - } - - // If the source node has exactly one out-edge (i.e. this one) and would have - // the same execution count as that edge, then just use the node's counter. - if let Some(simple_succ) = self.graph.simple_successor(from_bcb) { - assert_eq!(simple_succ, to_bcb); - return self.get_or_make_node_counter(from_bcb); - } - - // Make a new counter to count this edge. - let counter = SiteCounter::Phys { site: Site::Edge { from_bcb, to_bcb } }; - debug!(?from_bcb, ?to_bcb, ?counter, "edge gets a physical counter"); - counter - } - - /// Given a set of candidate out-edges (represented by their successor node), - /// choose one to be given a counter expression instead of a physical counter. - fn choose_out_edge_for_expression( - &self, - from_bcb: BasicCoverageBlock, - candidate_successors: &[BasicCoverageBlock], - ) -> Option<BasicCoverageBlock> { - // Try to find a candidate that leads back to the top of a loop, - // because reloop edges tend to be executed more times than loop-exit edges. - if let Some(reloop_target) = self.find_good_reloop_edge(from_bcb, &candidate_successors) { - debug!("Selecting reloop target {reloop_target:?} to get an expression"); - return Some(reloop_target); - } - - // We couldn't identify a "good" edge, so just choose an arbitrary one. - let arbitrary_target = candidate_successors.first().copied()?; - debug!(?arbitrary_target, "selecting arbitrary out-edge to get an expression"); - Some(arbitrary_target) - } - - /// Given a set of candidate out-edges (represented by their successor node), - /// tries to find one that leads back to the top of a loop. - /// - /// Reloop edges are good candidates for counter expressions, because they - /// will tend to be executed more times than a loop-exit edge, so it's nice - /// for them to be able to avoid a physical counter increment. - fn find_good_reloop_edge( - &self, - from_bcb: BasicCoverageBlock, - candidate_successors: &[BasicCoverageBlock], - ) -> Option<BasicCoverageBlock> { - // If there are no candidates, avoid iterating over the loop stack. - if candidate_successors.is_empty() { - return None; - } - - // Consider each loop on the current traversal context stack, top-down. - for loop_header_node in self.graph.loop_headers_containing(from_bcb) { - // Try to find a candidate edge that doesn't exit this loop. - for &target_bcb in candidate_successors { - // An edge is a reloop edge if its target dominates any BCB that has - // an edge back to the loop header. (Otherwise it's an exit edge.) - let is_reloop_edge = self - .graph - .reloop_predecessors(loop_header_node) - .any(|reloop_bcb| self.graph.dominates(target_bcb, reloop_bcb)); - if is_reloop_edge { - // We found a good out-edge to be given an expression. - return Some(target_bcb); - } - } - - // All of the candidate edges exit this loop, so keep looking - // for a good reloop edge for one of the outer loops. - } - - None - } - - fn into_coverage_counters(self) -> CoverageCounters { - Transcriber::new(&self).transcribe_counters() - } -} - -/// Helper struct for converting `CountersBuilder` into a final `CoverageCounters`. -struct Transcriber<'a> { - old: &'a CountersBuilder<'a>, - new: CoverageCounters, - phys_counter_for_site: FxHashMap<Site, BcbCounter>, -} - -impl<'a> Transcriber<'a> { - fn new(old: &'a CountersBuilder<'a>) -> Self { - Self { - old, - new: CoverageCounters::with_num_bcbs(old.graph.num_nodes()), - phys_counter_for_site: FxHashMap::default(), - } - } - - fn transcribe_counters(mut self) -> CoverageCounters { - for bcb in self.old.bcb_needs_counter.iter() { - let site = Site::Node { bcb }; - let site_counter = self.site_counter(site); - - // Resolve the site counter into flat lists of nodes/edges whose - // physical counts contribute to the counter for this node. - // Distinguish between counts that will be added vs subtracted. - let mut pos = vec![]; - let mut neg = vec![]; - self.push_resolved_sites(site_counter, &mut pos, &mut neg); - - // Simplify by cancelling out sites that appear on both sides. - let (mut pos, mut neg) = sort_and_cancel(pos, neg); - - if pos.is_empty() { - // If we somehow end up with no positive terms after cancellation, - // fall back to creating a physical counter. There's no known way - // for this to happen, but it's hard to confidently rule it out. - debug_assert!(false, "{site:?} has no positive counter terms"); - pos = vec![Some(site)]; - neg = vec![]; - } - - let mut new_counters_for_sites = |sites: Vec<Option<Site>>| { - sites - .into_iter() - .filter_map(|id| try { self.ensure_phys_counter(id?) }) - .collect::<Vec<_>>() - }; - let mut pos = new_counters_for_sites(pos); - let mut neg = new_counters_for_sites(neg); - - pos.sort(); - neg.sort(); - - let pos_counter = self.new.make_sum(&pos).expect("`pos` should not be empty"); - let new_counter = self.new.make_subtracted_sum(pos_counter, &neg); - self.new.set_node_counter(bcb, new_counter); - } - - self.new - } - - fn site_counter(&self, site: Site) -> SiteCounter { - self.old.site_counters.get(&site).copied().unwrap_or_else(|| { - // We should have already created all necessary site counters. - // But if we somehow didn't, avoid crashing in release builds, - // and just use an extra physical counter instead. - debug_assert!(false, "{site:?} should have a counter"); - SiteCounter::Phys { site } - }) - } - - fn ensure_phys_counter(&mut self, site: Site) -> BcbCounter { - *self.phys_counter_for_site.entry(site).or_insert_with(|| self.new.make_phys_counter(site)) - } - - /// Resolves the given counter into flat lists of nodes/edges, whose counters - /// will then be added and subtracted to form a counter expression. - fn push_resolved_sites(&self, counter: SiteCounter, pos: &mut Vec<Site>, neg: &mut Vec<Site>) { - match counter { - SiteCounter::Phys { site } => pos.push(site), - SiteCounter::NodeSumExpr { bcb } => { - for &from_bcb in &self.old.graph.predecessors[bcb] { - let edge_counter = self.site_counter(Site::Edge { from_bcb, to_bcb: bcb }); - self.push_resolved_sites(edge_counter, pos, neg); - } - } - SiteCounter::EdgeDiffExpr { from_bcb, to_bcb } => { - // First, add the count for `from_bcb`. - let node_counter = self.site_counter(Site::Node { bcb: from_bcb }); - self.push_resolved_sites(node_counter, pos, neg); - - // Then subtract the counts for the other out-edges. - for target in sibling_out_edge_targets(self.old.graph, from_bcb, to_bcb) { - let edge_counter = self.site_counter(Site::Edge { from_bcb, to_bcb: target }); - // Swap `neg` and `pos` so that the counter is subtracted. - self.push_resolved_sites(edge_counter, neg, pos); - } - } - } - } -} - -/// Given two lists: -/// - Sorts each list. -/// - Converts each list to `Vec<Option<T>>`. -/// - Scans for values that appear in both lists, and cancels them out by -/// replacing matching pairs of values with `None`. -fn sort_and_cancel<T: Ord>(mut pos: Vec<T>, mut neg: Vec<T>) -> (Vec<Option<T>>, Vec<Option<T>>) { - pos.sort(); - neg.sort(); - - // Convert to `Vec<Option<T>>`. If `T` has a niche, this should be zero-cost. - let mut pos = pos.into_iter().map(Some).collect::<Vec<_>>(); - let mut neg = neg.into_iter().map(Some).collect::<Vec<_>>(); - - // Scan through the lists using two cursors. When either cursor reaches the - // end of its list, there can be no more equal pairs, so stop. - let mut p = 0; - let mut n = 0; - while p < pos.len() && n < neg.len() { - // If the values are equal, remove them and advance both cursors. - // Otherwise, advance whichever cursor points to the lesser value. - // (Choosing which cursor to advance relies on both lists being sorted.) - match pos[p].cmp(&neg[n]) { - Ordering::Less => p += 1, - Ordering::Equal => { - pos[p] = None; - neg[n] = None; - p += 1; - n += 1; - } - Ordering::Greater => n += 1, - } - } - - (pos, neg) } diff --git a/compiler/rustc_mir_transform/src/coverage/counters/balanced_flow.rs b/compiler/rustc_mir_transform/src/coverage/counters/balanced_flow.rs new file mode 100644 index 00000000000..c108f96a564 --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/balanced_flow.rs @@ -0,0 +1,133 @@ +//! A control-flow graph can be said to have “balanced flow” if the flow +//! (execution count) of each node is equal to the sum of its in-edge flows, +//! and also equal to the sum of its out-edge flows. +//! +//! Control-flow graphs typically have one or more nodes that don't satisfy the +//! balanced-flow property, e.g.: +//! - The start node has out-edges, but no in-edges. +//! - Return nodes have in-edges, but no out-edges. +//! - `Yield` nodes can have an out-flow that is less than their in-flow. +//! - Inescapable loops cause the in-flow/out-flow relationship to break down. +//! +//! Balanced-flow graphs are nevertheless useful for analysis, so this module +//! provides a wrapper type ([`BalancedFlowGraph`]) that imposes balanced flow +//! on an underlying graph. This is done by non-destructively adding synthetic +//! nodes and edges as necessary. + +use rustc_data_structures::graph; +use rustc_data_structures::graph::iterate::DepthFirstSearch; +use rustc_data_structures::graph::reversed::ReversedGraph; +use rustc_index::Idx; +use rustc_index::bit_set::DenseBitSet; + +use crate::coverage::counters::iter_nodes::IterNodes; + +/// A view of an underlying graph that has been augmented to have “balanced flow”. +/// This means that the flow (execution count) of each node is equal to the +/// sum of its in-edge flows, and also equal to the sum of its out-edge flows. +/// +/// To achieve this, a synthetic "sink" node is non-destructively added to the +/// graph, with synthetic in-edges from these nodes: +/// - Any node that has no out-edges. +/// - Any node that explicitly requires a sink edge, as indicated by a +/// caller-supplied `force_sink_edge` function. +/// - Any node that would otherwise be unable to reach the sink, because it is +/// part of an inescapable loop. +/// +/// To make the graph fully balanced, there is also a synthetic edge from the +/// sink node back to the start node. +/// +/// --- +/// The benefit of having a balanced-flow graph is that it can be subsequently +/// transformed in ways that are guaranteed to preserve balanced flow +/// (e.g. merging nodes together), which is useful for discovering relationships +/// between the node flows of different nodes in the graph. +pub(crate) struct BalancedFlowGraph<G: graph::DirectedGraph> { + graph: G, + sink_edge_nodes: DenseBitSet<G::Node>, + pub(crate) sink: G::Node, +} + +impl<G: graph::DirectedGraph> BalancedFlowGraph<G> { + /// Creates a balanced view of an underlying graph, by adding a synthetic + /// sink node that has in-edges from nodes that need or request such an edge, + /// and a single out-edge to the start node. + /// + /// Assumes that all nodes in the underlying graph are reachable from the + /// start node. + pub(crate) fn for_graph(graph: G, force_sink_edge: impl Fn(G::Node) -> bool) -> Self + where + G: graph::ControlFlowGraph, + { + let mut sink_edge_nodes = DenseBitSet::new_empty(graph.num_nodes()); + let mut dfs = DepthFirstSearch::new(ReversedGraph::new(&graph)); + + // First, determine the set of nodes that explicitly request or require + // an out-edge to the sink. + for node in graph.iter_nodes() { + if force_sink_edge(node) || graph.successors(node).next().is_none() { + sink_edge_nodes.insert(node); + dfs.push_start_node(node); + } + } + + // Next, find all nodes that are currently not reverse-reachable from + // `sink_edge_nodes`, and add them to the set as well. + dfs.complete_search(); + sink_edge_nodes.union_not(dfs.visited_set()); + + // The sink node is 1 higher than the highest real node. + let sink = G::Node::new(graph.num_nodes()); + + BalancedFlowGraph { graph, sink_edge_nodes, sink } + } +} + +impl<G> graph::DirectedGraph for BalancedFlowGraph<G> +where + G: graph::DirectedGraph, +{ + type Node = G::Node; + + /// Returns the number of nodes in this balanced-flow graph, which is 1 + /// more than the number of nodes in the underlying graph, to account for + /// the synthetic sink node. + fn num_nodes(&self) -> usize { + // The sink node's index is already the size of the underlying graph, + // so just add 1 to that instead. + self.sink.index() + 1 + } +} + +impl<G> graph::StartNode for BalancedFlowGraph<G> +where + G: graph::StartNode, +{ + fn start_node(&self) -> Self::Node { + self.graph.start_node() + } +} + +impl<G> graph::Successors for BalancedFlowGraph<G> +where + G: graph::StartNode + graph::Successors, +{ + fn successors(&self, node: Self::Node) -> impl Iterator<Item = Self::Node> { + let real_edges; + let sink_edge; + + if node == self.sink { + // The sink node has no real out-edges, and one synthetic out-edge + // to the start node. + real_edges = None; + sink_edge = Some(self.graph.start_node()); + } else { + // Real nodes have their real out-edges, and possibly one synthetic + // out-edge to the sink node. + real_edges = Some(self.graph.successors(node)); + sink_edge = self.sink_edge_nodes.contains(node).then_some(self.sink); + } + + real_edges.into_iter().flatten().chain(sink_edge) + } +} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/iter_nodes.rs b/compiler/rustc_mir_transform/src/coverage/counters/iter_nodes.rs new file mode 100644 index 00000000000..9d87f7af1b0 --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/iter_nodes.rs @@ -0,0 +1,16 @@ +use rustc_data_structures::graph; +use rustc_index::Idx; + +pub(crate) trait IterNodes: graph::DirectedGraph { + /// Iterates over all nodes of a graph in ascending numeric order. + /// Assumes that nodes are densely numbered, i.e. every index in + /// `0..num_nodes` is a valid node. + /// + /// FIXME: Can this just be part of [`graph::DirectedGraph`]? + fn iter_nodes( + &self, + ) -> impl Iterator<Item = Self::Node> + DoubleEndedIterator + ExactSizeIterator { + (0..self.num_nodes()).map(<Self::Node as Idx>::new) + } +} +impl<G: graph::DirectedGraph> IterNodes for G {} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs new file mode 100644 index 00000000000..610498c6c0e --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/node_flow.rs @@ -0,0 +1,306 @@ +//! For each node in a control-flow graph, determines whether that node should +//! have a physical counter, or a counter expression that is derived from the +//! physical counters of other nodes. +//! +//! Based on the algorithm given in +//! "Optimal measurement points for program frequency counts" +//! (Knuth & Stevenson, 1973). + +use rustc_data_structures::graph; +use rustc_index::bit_set::DenseBitSet; +use rustc_index::{Idx, IndexVec}; +use rustc_middle::mir::coverage::Op; +use smallvec::SmallVec; + +use crate::coverage::counters::iter_nodes::IterNodes; +use crate::coverage::counters::union_find::{FrozenUnionFind, UnionFind}; + +#[cfg(test)] +mod tests; + +/// View of some underlying graph, in which each node's successors have been +/// merged into a single "supernode". +/// +/// The resulting supernodes have no obvious meaning on their own. +/// However, merging successor nodes means that a node's out-edges can all +/// be combined into a single out-edge, whose flow is the same as the flow +/// (execution count) of its corresponding node in the original graph. +/// +/// With all node flows now in the original graph now represented as edge flows +/// in the merged graph, it becomes possible to analyze the original node flows +/// using techniques for analyzing edge flows. +#[derive(Debug)] +pub(crate) struct MergedNodeFlowGraph<Node: Idx> { + /// Maps each node to the supernode that contains it, indicated by some + /// arbitrary "root" node that is part of that supernode. + supernodes: FrozenUnionFind<Node>, + /// For each node, stores the single supernode that all of its successors + /// have been merged into. + /// + /// (Note that each node in a supernode can potentially have a _different_ + /// successor supernode from its peers.) + succ_supernodes: IndexVec<Node, Node>, +} + +impl<Node: Idx> MergedNodeFlowGraph<Node> { + /// Creates a "merged" view of an underlying graph. + /// + /// The given graph is assumed to have [“balanced flow”](balanced-flow), + /// though it does not necessarily have to be a `BalancedFlowGraph`. + /// + /// [balanced-flow]: `crate::coverage::counters::balanced_flow::BalancedFlowGraph`. + pub(crate) fn for_balanced_graph<G>(graph: G) -> Self + where + G: graph::DirectedGraph<Node = Node> + graph::Successors, + { + let mut supernodes = UnionFind::<G::Node>::new(graph.num_nodes()); + + // For each node, merge its successors into a single supernode, and + // arbitrarily choose one of those successors to represent all of them. + let successors = graph + .iter_nodes() + .map(|node| { + graph + .successors(node) + .reduce(|a, b| supernodes.unify(a, b)) + .expect("each node in a balanced graph must have at least one out-edge") + }) + .collect::<IndexVec<G::Node, G::Node>>(); + + // Now that unification is complete, freeze the supernode forest, + // and resolve each arbitrarily-chosen successor to its canonical root. + // (This avoids having to explicitly resolve them later.) + let supernodes = supernodes.freeze(); + let succ_supernodes = successors.into_iter().map(|succ| supernodes.find(succ)).collect(); + + Self { supernodes, succ_supernodes } + } + + fn num_nodes(&self) -> usize { + self.succ_supernodes.len() + } + + fn is_supernode(&self, node: Node) -> bool { + self.supernodes.find(node) == node + } + + /// Using the information in this merged graph, together with a given + /// permutation of all nodes in the graph, to create physical counters and + /// counter expressions for each node in the underlying graph. + /// + /// The given list must contain exactly one copy of each node in the + /// underlying balanced-flow graph. The order of nodes is used as a hint to + /// influence counter allocation: + /// - Earlier nodes are more likely to receive counter expressions. + /// - Later nodes are more likely to receive physical counters. + pub(crate) fn make_node_counters(&self, all_nodes_permutation: &[Node]) -> NodeCounters<Node> { + let mut builder = SpantreeBuilder::new(self); + + for &node in all_nodes_permutation { + builder.visit_node(node); + } + + NodeCounters { counter_exprs: builder.finish() } + } +} + +/// End result of allocating physical counters and counter expressions for the +/// nodes of a graph. +#[derive(Debug)] +pub(crate) struct NodeCounters<Node: Idx> { + counter_exprs: IndexVec<Node, CounterExprVec<Node>>, +} + +impl<Node: Idx> NodeCounters<Node> { + /// For the given node, returns the finished list of terms that represent + /// its physical counter or counter expression. Always non-empty. + /// + /// If a node was given a physical counter, its "expression" will contain + /// that counter as its sole element. + pub(crate) fn counter_expr(&self, this: Node) -> &[CounterTerm<Node>] { + self.counter_exprs[this].as_slice() + } +} + +#[derive(Debug)] +struct SpantreeEdge<Node> { + /// If true, this edge in the spantree has been reversed an odd number of + /// times, so all physical counters added to its node's counter expression + /// need to be negated. + is_reversed: bool, + /// Each spantree edge is "claimed" by the (regular) node that caused it to + /// be created. When a node with a physical counter traverses this edge, + /// that counter is added to the claiming node's counter expression. + claiming_node: Node, + /// Supernode at the other end of this spantree edge. Transitively points + /// to the "root" of this supernode's spantree component. + span_parent: Node, +} + +/// Part of a node's counter expression, which is a sum of counter terms. +#[derive(Debug)] +pub(crate) struct CounterTerm<Node> { + /// Whether to add or subtract the value of the node's physical counter. + pub(crate) op: Op, + /// The node whose physical counter is represented by this term. + pub(crate) node: Node, +} + +/// Stores the list of counter terms that make up a node's counter expression. +type CounterExprVec<Node> = SmallVec<[CounterTerm<Node>; 2]>; + +#[derive(Debug)] +struct SpantreeBuilder<'a, Node: Idx> { + graph: &'a MergedNodeFlowGraph<Node>, + is_unvisited: DenseBitSet<Node>, + /// Links supernodes to each other, gradually forming a spanning tree of + /// the merged-flow graph. + /// + /// A supernode without a span edge is the root of its component of the + /// spantree. Nodes that aren't supernodes cannot have a spantree edge. + span_edges: IndexVec<Node, Option<SpantreeEdge<Node>>>, + /// Shared path buffer recycled by all calls to `yank_to_spantree_root`. + yank_buffer: Vec<Node>, + /// An in-progress counter expression for each node. Each expression is + /// initially empty, and will be filled in as relevant nodes are visited. + counter_exprs: IndexVec<Node, CounterExprVec<Node>>, +} + +impl<'a, Node: Idx> SpantreeBuilder<'a, Node> { + fn new(graph: &'a MergedNodeFlowGraph<Node>) -> Self { + let num_nodes = graph.num_nodes(); + Self { + graph, + is_unvisited: DenseBitSet::new_filled(num_nodes), + span_edges: IndexVec::from_fn_n(|_| None, num_nodes), + yank_buffer: vec![], + counter_exprs: IndexVec::from_fn_n(|_| SmallVec::new(), num_nodes), + } + } + + /// Given a supernode, finds the supernode that is the "root" of its + /// spantree component. Two nodes that have the same spantree root are + /// connected in the spantree. + fn spantree_root(&self, this: Node) -> Node { + debug_assert!(self.graph.is_supernode(this)); + + match self.span_edges[this] { + None => this, + Some(SpantreeEdge { span_parent, .. }) => self.spantree_root(span_parent), + } + } + + /// Rotates edges in the spantree so that `this` is the root of its + /// spantree component. + fn yank_to_spantree_root(&mut self, this: Node) { + debug_assert!(self.graph.is_supernode(this)); + + // The rotation is done iteratively, by first traversing from `this` to + // its root and storing the path in a buffer, and then traversing the + // path buffer backwards to reverse all the edges. + + // Recycle the same path buffer for all calls to this method. + let path_buf = &mut self.yank_buffer; + path_buf.clear(); + path_buf.push(this); + + // Traverse the spantree until we reach a supernode that has no + // span-parent, which must be the root. + let mut curr = this; + while let &Some(SpantreeEdge { span_parent, .. }) = &self.span_edges[curr] { + path_buf.push(span_parent); + curr = span_parent; + } + + // For each spantree edge `a -> b` in the path that was just traversed, + // reverse it to become `a <- b`, while preserving `claiming_node`. + for &[a, b] in path_buf.array_windows::<2>().rev() { + let SpantreeEdge { is_reversed, claiming_node, span_parent } = self.span_edges[a] + .take() + .expect("all nodes in the path (except the last) have a `span_parent`"); + debug_assert_eq!(span_parent, b); + debug_assert!(self.span_edges[b].is_none()); + self.span_edges[b] = + Some(SpantreeEdge { is_reversed: !is_reversed, claiming_node, span_parent: a }); + } + + // The result of the rotation is that `this` is now a spantree root. + debug_assert!(self.span_edges[this].is_none()); + } + + /// Must be called exactly once for each node in the balanced-flow graph. + fn visit_node(&mut self, this: Node) { + // Assert that this node was unvisited, and mark it visited. + assert!(self.is_unvisited.remove(this), "node has already been visited: {this:?}"); + + // Get the supernode containing `this`, and make it the root of its + // component of the spantree. + let this_supernode = self.graph.supernodes.find(this); + self.yank_to_spantree_root(this_supernode); + + // Get the supernode containing all of this's successors. + let succ_supernode = self.graph.succ_supernodes[this]; + debug_assert!(self.graph.is_supernode(succ_supernode)); + + // If two supernodes are already connected in the spantree, they will + // have the same spantree root. (Each supernode is connected to itself.) + if this_supernode != self.spantree_root(succ_supernode) { + // Adding this node's flow edge to the spantree would cause two + // previously-disconnected supernodes to become connected, so add + // it. That spantree-edge is now "claimed" by this node. + // + // Claiming a spantree-edge means that this node will get a counter + // expression instead of a physical counter. That expression is + // currently empty, but will be built incrementally as the other + // nodes are visited. + self.span_edges[this_supernode] = Some(SpantreeEdge { + is_reversed: false, + claiming_node: this, + span_parent: succ_supernode, + }); + } else { + // This node's flow edge would join two supernodes that are already + // connected in the spantree (or are the same supernode). That would + // create a cycle in the spantree, so don't add an edge. + // + // Instead, create a physical counter for this node, and add that + // counter to all expressions on the path from `succ_supernode` to + // `this_supernode`. + + // Instead of setting `this.measure = true` as in the original paper, + // we just add the node's ID to its own "expression". + self.counter_exprs[this].push(CounterTerm { node: this, op: Op::Add }); + + // Walk the spantree from `this.successor` back to `this`. For each + // spantree edge along the way, add this node's physical counter to + // the counter expression of the node that claimed the spantree edge. + let mut curr = succ_supernode; + while curr != this_supernode { + let &SpantreeEdge { is_reversed, claiming_node, span_parent } = + self.span_edges[curr].as_ref().unwrap(); + let op = if is_reversed { Op::Subtract } else { Op::Add }; + self.counter_exprs[claiming_node].push(CounterTerm { node: this, op }); + + curr = span_parent; + } + } + } + + /// Asserts that all nodes have been visited, and returns the computed + /// counter expressions (made up of physical counters) for each node. + fn finish(self) -> IndexVec<Node, CounterExprVec<Node>> { + let Self { graph, is_unvisited, span_edges, yank_buffer: _, counter_exprs } = self; + assert!(is_unvisited.is_empty(), "some nodes were never visited: {is_unvisited:?}"); + debug_assert!( + span_edges + .iter_enumerated() + .all(|(node, span_edge)| { span_edge.is_some() <= graph.is_supernode(node) }), + "only supernodes can have a span edge", + ); + debug_assert!( + counter_exprs.iter().all(|expr| !expr.is_empty()), + "after visiting all nodes, every node should have a non-empty expression", + ); + counter_exprs + } +} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/node_flow/tests.rs b/compiler/rustc_mir_transform/src/coverage/counters/node_flow/tests.rs new file mode 100644 index 00000000000..9e7f754523d --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/node_flow/tests.rs @@ -0,0 +1,64 @@ +use itertools::Itertools; +use rustc_data_structures::graph; +use rustc_data_structures::graph::vec_graph::VecGraph; +use rustc_index::Idx; +use rustc_middle::mir::coverage::Op; + +use super::{CounterTerm, MergedNodeFlowGraph, NodeCounters}; + +fn merged_node_flow_graph<G: graph::Successors>(graph: G) -> MergedNodeFlowGraph<G::Node> { + MergedNodeFlowGraph::for_balanced_graph(graph) +} + +fn make_graph<Node: Idx + Ord>(num_nodes: usize, edge_pairs: Vec<(Node, Node)>) -> VecGraph<Node> { + VecGraph::new(num_nodes, edge_pairs) +} + +/// Example used in "Optimal Measurement Points for Program Frequency Counts" +/// (Knuth & Stevenson, 1973), but with 0-based node IDs. +#[test] +fn example_driver() { + let graph = make_graph::<u32>(5, vec![ + (0, 1), + (0, 3), + (1, 0), + (1, 2), + (2, 1), + (2, 4), + (3, 3), + (3, 4), + (4, 0), + ]); + + let merged = merged_node_flow_graph(&graph); + let counters = merged.make_node_counters(&[3, 1, 2, 0, 4]); + + assert_eq!(format_counter_expressions(&counters), &[ + // (comment to force vertical formatting for clarity) + "[0]: +c0", + "[1]: +c0 +c2 -c4", + "[2]: +c2", + "[3]: +c3", + "[4]: +c4", + ]); +} + +fn format_counter_expressions<Node: Idx>(counters: &NodeCounters<Node>) -> Vec<String> { + let format_item = |&CounterTerm { node, op }| { + let op = match op { + Op::Subtract => '-', + Op::Add => '+', + }; + format!("{op}c{node:?}") + }; + + counters + .counter_exprs + .indices() + .map(|node| { + let mut expr = counters.counter_expr(node).iter().collect::<Vec<_>>(); + expr.sort_by_key(|item| item.node.index()); + format!("[{node:?}]: {}", expr.into_iter().map(format_item).join(" ")) + }) + .collect() +} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/tests.rs b/compiler/rustc_mir_transform/src/coverage/counters/tests.rs deleted file mode 100644 index 794d4358f82..00000000000 --- a/compiler/rustc_mir_transform/src/coverage/counters/tests.rs +++ /dev/null @@ -1,41 +0,0 @@ -use std::fmt::Debug; - -use super::sort_and_cancel; - -fn flatten<T>(input: Vec<Option<T>>) -> Vec<T> { - input.into_iter().flatten().collect() -} - -fn sort_and_cancel_and_flatten<T: Clone + Ord>(pos: Vec<T>, neg: Vec<T>) -> (Vec<T>, Vec<T>) { - let (pos_actual, neg_actual) = sort_and_cancel(pos, neg); - (flatten(pos_actual), flatten(neg_actual)) -} - -#[track_caller] -fn check_test_case<T: Clone + Debug + Ord>( - pos: Vec<T>, - neg: Vec<T>, - pos_expected: Vec<T>, - neg_expected: Vec<T>, -) { - eprintln!("pos = {pos:?}; neg = {neg:?}"); - let output = sort_and_cancel_and_flatten(pos, neg); - assert_eq!(output, (pos_expected, neg_expected)); -} - -#[test] -fn cancellation() { - let cases: &[(Vec<u32>, Vec<u32>, Vec<u32>, Vec<u32>)] = &[ - (vec![], vec![], vec![], vec![]), - (vec![4, 2, 1, 5, 3], vec![], vec![1, 2, 3, 4, 5], vec![]), - (vec![5, 5, 5, 5, 5], vec![5], vec![5, 5, 5, 5], vec![]), - (vec![1, 1, 2, 2, 3, 3], vec![1, 2, 3], vec![1, 2, 3], vec![]), - (vec![1, 1, 2, 2, 3, 3], vec![2, 4, 2], vec![1, 1, 3, 3], vec![4]), - ]; - - for (pos, neg, pos_expected, neg_expected) in cases { - check_test_case(pos.to_vec(), neg.to_vec(), pos_expected.to_vec(), neg_expected.to_vec()); - // Same test case, but with its inputs flipped and its outputs flipped. - check_test_case(neg.to_vec(), pos.to_vec(), neg_expected.to_vec(), pos_expected.to_vec()); - } -} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs b/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs new file mode 100644 index 00000000000..2da4f5f5fce --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/union_find.rs @@ -0,0 +1,116 @@ +use std::cmp::Ordering; +use std::mem; + +use rustc_index::{Idx, IndexVec}; + +#[cfg(test)] +mod tests; + +/// Simple implementation of a union-find data structure, i.e. a disjoint-set +/// forest. +#[derive(Debug)] +pub(crate) struct UnionFind<Key: Idx> { + table: IndexVec<Key, UnionFindEntry<Key>>, +} + +#[derive(Debug)] +struct UnionFindEntry<Key> { + /// Transitively points towards the "root" of the set containing this key. + /// + /// Invariant: A root key is its own parent. + parent: Key, + /// When merging two "root" keys, their ranks determine which key becomes + /// the new root, to prevent the parent tree from becoming unnecessarily + /// tall. See [`UnionFind::unify`] for details. + rank: u32, +} + +impl<Key: Idx> UnionFind<Key> { + /// Creates a new disjoint-set forest containing the keys `0..num_keys`. + /// Initially, every key is part of its own one-element set. + pub(crate) fn new(num_keys: usize) -> Self { + // Initially, every key is the root of its own set, so its parent is itself. + Self { table: IndexVec::from_fn_n(|key| UnionFindEntry { parent: key, rank: 0 }, num_keys) } + } + + /// Returns the "root" key of the disjoint-set containing the given key. + /// If two keys have the same root, they belong to the same set. + /// + /// Also updates internal data structures to make subsequent `find` + /// operations faster. + pub(crate) fn find(&mut self, key: Key) -> Key { + // Loop until we find a key that is its own parent. + let mut curr = key; + while let parent = self.table[curr].parent + && curr != parent + { + // Perform "path compression" by peeking one layer ahead, and + // setting the current key's parent to that value. + // (This works even when `parent` is the root of its set, because + // of the invariant that a root is its own parent.) + let parent_parent = self.table[parent].parent; + self.table[curr].parent = parent_parent; + + // Advance by one step and continue. + curr = parent; + } + curr + } + + /// Merges the set containing `a` and the set containing `b` into one set. + /// + /// Returns the common root of both keys, after the merge. + pub(crate) fn unify(&mut self, a: Key, b: Key) -> Key { + let mut a = self.find(a); + let mut b = self.find(b); + + // If both keys have the same root, they're already in the same set, + // so there's nothing more to do. + if a == b { + return a; + }; + + // Ensure that `a` has strictly greater rank, swapping if necessary. + // If both keys have the same rank, increment the rank of `a` so that + // future unifications will also prefer `a`, leading to flatter trees. + match Ord::cmp(&self.table[a].rank, &self.table[b].rank) { + Ordering::Less => mem::swap(&mut a, &mut b), + Ordering::Equal => self.table[a].rank += 1, + Ordering::Greater => {} + } + + debug_assert!(self.table[a].rank > self.table[b].rank); + debug_assert_eq!(self.table[b].parent, b); + + // Make `a` the parent of `b`. + self.table[b].parent = a; + + a + } + + /// Creates a snapshot of this disjoint-set forest that can no longer be + /// mutated, but can be queried without mutation. + pub(crate) fn freeze(&mut self) -> FrozenUnionFind<Key> { + // Just resolve each key to its actual root. + let roots = self.table.indices().map(|key| self.find(key)).collect(); + FrozenUnionFind { roots } + } +} + +/// Snapshot of a disjoint-set forest that can no longer be mutated, but can be +/// queried in O(1) time without mutation. +/// +/// This is really just a wrapper around a direct mapping from keys to roots, +/// but with a [`Self::find`] method that resembles [`UnionFind::find`]. +#[derive(Debug)] +pub(crate) struct FrozenUnionFind<Key: Idx> { + roots: IndexVec<Key, Key>, +} + +impl<Key: Idx> FrozenUnionFind<Key> { + /// Returns the "root" key of the disjoint-set containing the given key. + /// If two keys have the same root, they belong to the same set. + pub(crate) fn find(&self, key: Key) -> Key { + self.roots[key] + } +} diff --git a/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs b/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs new file mode 100644 index 00000000000..34a4e4f8e6e --- /dev/null +++ b/compiler/rustc_mir_transform/src/coverage/counters/union_find/tests.rs @@ -0,0 +1,32 @@ +use super::UnionFind; + +#[test] +fn empty() { + let mut sets = UnionFind::<u32>::new(10); + + for i in 1..10 { + assert_eq!(sets.find(i), i); + } +} + +#[test] +fn transitive() { + let mut sets = UnionFind::<u32>::new(10); + + sets.unify(3, 7); + sets.unify(4, 2); + + assert_eq!(sets.find(7), sets.find(3)); + assert_eq!(sets.find(2), sets.find(4)); + assert_ne!(sets.find(3), sets.find(4)); + + sets.unify(7, 4); + + assert_eq!(sets.find(7), sets.find(3)); + assert_eq!(sets.find(2), sets.find(4)); + assert_eq!(sets.find(3), sets.find(4)); + + for i in [0, 1, 5, 6, 8, 9] { + assert_eq!(sets.find(i), i); + } +} diff --git a/compiler/rustc_mir_transform/src/coverage/graph.rs b/compiler/rustc_mir_transform/src/coverage/graph.rs index ad6774fccd6..25dc7f31227 100644 --- a/compiler/rustc_mir_transform/src/coverage/graph.rs +++ b/compiler/rustc_mir_transform/src/coverage/graph.rs @@ -1,14 +1,13 @@ use std::cmp::Ordering; -use std::collections::VecDeque; use std::ops::{Index, IndexMut}; -use std::{iter, mem, slice}; +use std::{mem, slice}; use rustc_data_structures::captures::Captures; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::graph::dominators::Dominators; use rustc_data_structures::graph::{self, DirectedGraph, StartNode}; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::{self, BasicBlock, Terminator, TerminatorKind}; use tracing::debug; @@ -27,7 +26,7 @@ pub(crate) struct CoverageGraph { /// their relative order is consistent but arbitrary. dominator_order_rank: IndexVec<BasicCoverageBlock, u32>, /// A loop header is a node that dominates one or more of its predecessors. - is_loop_header: BitSet<BasicCoverageBlock>, + is_loop_header: DenseBitSet<BasicCoverageBlock>, /// For each node, the loop header node of its nearest enclosing loop. /// This forms a linked list that can be traversed to find all enclosing loops. enclosing_loop_header: IndexVec<BasicCoverageBlock, Option<BasicCoverageBlock>>, @@ -72,7 +71,7 @@ impl CoverageGraph { predecessors, dominators: None, dominator_order_rank: IndexVec::from_elem_n(0, num_nodes), - is_loop_header: BitSet::new_empty(num_nodes), + is_loop_header: DenseBitSet::new_empty(num_nodes), enclosing_loop_header: IndexVec::from_elem_n(None, num_nodes), }; assert_eq!(num_nodes, this.num_nodes()); @@ -211,54 +210,6 @@ impl CoverageGraph { self.dominator_order_rank[a].cmp(&self.dominator_order_rank[b]) } - /// Returns the source of this node's sole in-edge, if it has exactly one. - /// That edge can be assumed to have the same execution count as the node - /// itself (in the absence of panics). - pub(crate) fn sole_predecessor( - &self, - to_bcb: BasicCoverageBlock, - ) -> Option<BasicCoverageBlock> { - // Unlike `simple_successor`, there is no need for extra checks here. - if let &[from_bcb] = self.predecessors[to_bcb].as_slice() { Some(from_bcb) } else { None } - } - - /// Returns the target of this node's sole out-edge, if it has exactly - /// one, but only if that edge can be assumed to have the same execution - /// count as the node itself (in the absence of panics). - pub(crate) fn simple_successor( - &self, - from_bcb: BasicCoverageBlock, - ) -> Option<BasicCoverageBlock> { - // If a node's count is the sum of its out-edges, and it has exactly - // one out-edge, then that edge has the same count as the node. - if self.bcbs[from_bcb].is_out_summable - && let &[to_bcb] = self.successors[from_bcb].as_slice() - { - Some(to_bcb) - } else { - None - } - } - - /// For each loop that contains the given node, yields the "loop header" - /// node representing that loop, from innermost to outermost. If the given - /// node is itself a loop header, it is yielded first. - pub(crate) fn loop_headers_containing( - &self, - bcb: BasicCoverageBlock, - ) -> impl Iterator<Item = BasicCoverageBlock> + Captures<'_> { - let self_if_loop_header = self.is_loop_header.contains(bcb).then_some(bcb).into_iter(); - - let mut curr = Some(bcb); - let strictly_enclosing = iter::from_fn(move || { - let enclosing = self.enclosing_loop_header[curr?]; - curr = enclosing; - enclosing - }); - - self_if_loop_header.chain(strictly_enclosing) - } - /// For the given node, yields the subset of its predecessor nodes that /// it dominates. If that subset is non-empty, the node is a "loop header", /// and each of those predecessors represents an in-edge that jumps back to @@ -489,126 +440,3 @@ impl<'a, 'tcx> graph::Successors for CoverageRelevantSubgraph<'a, 'tcx> { self.coverage_successors(bb).into_iter() } } - -/// State of a node in the coverage graph during ready-first traversal. -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] -enum ReadyState { - /// This node has not yet been added to the fallback queue or ready queue. - Unqueued, - /// This node is currently in the fallback queue. - InFallbackQueue, - /// This node's predecessors have all been visited, so it is in the ready queue. - /// (It might also have a stale entry in the fallback queue.) - InReadyQueue, - /// This node has been visited. - /// (It might also have a stale entry in the fallback queue.) - Visited, -} - -/// Iterator that visits nodes in the coverage graph, in an order that always -/// prefers "ready" nodes whose predecessors have already been visited. -pub(crate) struct ReadyFirstTraversal<'a> { - graph: &'a CoverageGraph, - - /// For each node, the number of its predecessor nodes that haven't been visited yet. - n_unvisited_preds: IndexVec<BasicCoverageBlock, u32>, - /// Indicates whether a node has been visited, or which queue it is in. - state: IndexVec<BasicCoverageBlock, ReadyState>, - - /// Holds unvisited nodes whose predecessors have all been visited. - ready_queue: VecDeque<BasicCoverageBlock>, - /// Holds unvisited nodes with some unvisited predecessors. - /// Also contains stale entries for nodes that were upgraded to ready. - fallback_queue: VecDeque<BasicCoverageBlock>, -} - -impl<'a> ReadyFirstTraversal<'a> { - pub(crate) fn new(graph: &'a CoverageGraph) -> Self { - let num_nodes = graph.num_nodes(); - - let n_unvisited_preds = - IndexVec::from_fn_n(|node| graph.predecessors[node].len() as u32, num_nodes); - let mut state = IndexVec::from_elem_n(ReadyState::Unqueued, num_nodes); - - // We know from coverage graph construction that the start node is the - // only node with no predecessors. - debug_assert!( - n_unvisited_preds.iter_enumerated().all(|(node, &n)| (node == START_BCB) == (n == 0)) - ); - let ready_queue = VecDeque::from(vec![START_BCB]); - state[START_BCB] = ReadyState::InReadyQueue; - - Self { graph, state, n_unvisited_preds, ready_queue, fallback_queue: VecDeque::new() } - } - - /// Returns the next node from the ready queue, or else the next unvisited - /// node from the fallback queue. - fn next_inner(&mut self) -> Option<BasicCoverageBlock> { - // Always prefer to yield a ready node if possible. - if let Some(node) = self.ready_queue.pop_front() { - assert_eq!(self.state[node], ReadyState::InReadyQueue); - return Some(node); - } - - while let Some(node) = self.fallback_queue.pop_front() { - match self.state[node] { - // This entry in the fallback queue is not stale, so yield it. - ReadyState::InFallbackQueue => return Some(node), - // This node was added to the fallback queue, but later became - // ready and was visited via the ready queue. Ignore it here. - ReadyState::Visited => {} - // Unqueued nodes can't be in the fallback queue, by definition. - // We know that the ready queue is empty at this point. - ReadyState::Unqueued | ReadyState::InReadyQueue => unreachable!( - "unexpected state for {node:?} in the fallback queue: {:?}", - self.state[node] - ), - } - } - - None - } - - fn mark_visited_and_enqueue_successors(&mut self, node: BasicCoverageBlock) { - assert!(self.state[node] < ReadyState::Visited); - self.state[node] = ReadyState::Visited; - - // For each of this node's successors, decrease the successor's - // "unvisited predecessors" count, and enqueue it if appropriate. - for &succ in &self.graph.successors[node] { - let is_unqueued = match self.state[succ] { - ReadyState::Unqueued => true, - ReadyState::InFallbackQueue => false, - ReadyState::InReadyQueue => { - unreachable!("nodes in the ready queue have no unvisited predecessors") - } - // The successor was already visited via one of its other predecessors. - ReadyState::Visited => continue, - }; - - self.n_unvisited_preds[succ] -= 1; - if self.n_unvisited_preds[succ] == 0 { - // This node's predecessors have all been visited, so add it to - // the ready queue. If it's already in the fallback queue, that - // fallback entry will be ignored later. - self.state[succ] = ReadyState::InReadyQueue; - self.ready_queue.push_back(succ); - } else if is_unqueued { - // This node has unvisited predecessors, so add it to the - // fallback queue in case we run out of ready nodes later. - self.state[succ] = ReadyState::InFallbackQueue; - self.fallback_queue.push_back(succ); - } - } - } -} - -impl<'a> Iterator for ReadyFirstTraversal<'a> { - type Item = BasicCoverageBlock; - - fn next(&mut self) -> Option<Self::Item> { - let node = self.next_inner()?; - self.mark_visited_and_enqueue_successors(node); - Some(node) - } -} diff --git a/compiler/rustc_mir_transform/src/coverage/mappings.rs b/compiler/rustc_mir_transform/src/coverage/mappings.rs index 4185b3f4d4d..8d0d92dc367 100644 --- a/compiler/rustc_mir_transform/src/coverage/mappings.rs +++ b/compiler/rustc_mir_transform/src/coverage/mappings.rs @@ -3,7 +3,7 @@ use std::collections::BTreeSet; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::graph::DirectedGraph; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::coverage::{ BlockMarkerId, BranchSpan, ConditionId, ConditionInfo, CoverageInfoHi, CoverageKind, }; @@ -128,7 +128,7 @@ pub(super) fn extract_all_mapping_info_from_mir<'tcx>( } impl ExtractedMappings { - pub(super) fn all_bcbs_with_counter_mappings(&self) -> BitSet<BasicCoverageBlock> { + pub(super) fn all_bcbs_with_counter_mappings(&self) -> DenseBitSet<BasicCoverageBlock> { // Fully destructure self to make sure we don't miss any fields that have mappings. let Self { num_bcbs, @@ -140,7 +140,7 @@ impl ExtractedMappings { } = self; // Identify which BCBs have one or more mappings. - let mut bcbs_with_counter_mappings = BitSet::new_empty(*num_bcbs); + let mut bcbs_with_counter_mappings = DenseBitSet::new_empty(*num_bcbs); let mut insert = |bcb| { bcbs_with_counter_mappings.insert(bcb); }; @@ -172,8 +172,8 @@ impl ExtractedMappings { } /// Returns the set of BCBs that have one or more `Code` mappings. - pub(super) fn bcbs_with_ordinary_code_mappings(&self) -> BitSet<BasicCoverageBlock> { - let mut bcbs = BitSet::new_empty(self.num_bcbs); + pub(super) fn bcbs_with_ordinary_code_mappings(&self) -> DenseBitSet<BasicCoverageBlock> { + let mut bcbs = DenseBitSet::new_empty(self.num_bcbs); for &CodeMapping { span: _, bcb } in &self.code_mappings { bcbs.insert(bcb); } @@ -367,9 +367,8 @@ fn calc_test_vectors_index(conditions: &mut Vec<MCDCBranch>) -> usize { }) .collect::<FxIndexMap<_, _>>(); - let mut queue = std::collections::VecDeque::from_iter( - next_conditions.swap_remove(&ConditionId::START).into_iter(), - ); + let mut queue = + std::collections::VecDeque::from_iter(next_conditions.swap_remove(&ConditionId::START)); num_paths_stats[ConditionId::START] = 1; let mut decision_end_nodes = Vec::new(); while let Some(branch) = queue.pop_front() { diff --git a/compiler/rustc_mir_transform/src/coverage/mod.rs b/compiler/rustc_mir_transform/src/coverage/mod.rs index 57956448414..19568735df7 100644 --- a/compiler/rustc_mir_transform/src/coverage/mod.rs +++ b/compiler/rustc_mir_transform/src/coverage/mod.rs @@ -15,16 +15,13 @@ use rustc_middle::hir::nested_filter; use rustc_middle::mir::coverage::{ CoverageKind, DecisionInfo, FunctionCoverageInfo, Mapping, MappingKind, }; -use rustc_middle::mir::{ - self, BasicBlock, BasicBlockData, SourceInfo, Statement, StatementKind, Terminator, - TerminatorKind, -}; +use rustc_middle::mir::{self, BasicBlock, Statement, StatementKind, TerminatorKind}; use rustc_middle::ty::TyCtxt; use rustc_span::Span; use rustc_span::def_id::LocalDefId; use tracing::{debug, debug_span, trace}; -use crate::coverage::counters::{CoverageCounters, Site}; +use crate::coverage::counters::CoverageCounters; use crate::coverage::graph::CoverageGraph; use crate::coverage::mappings::ExtractedMappings; @@ -92,8 +89,7 @@ fn instrument_function_for_coverage<'tcx>(tcx: TyCtxt<'tcx>, mir_body: &mut mir: return; } - let coverage_counters = - CoverageCounters::make_bcb_counters(&graph, &bcbs_with_counter_mappings); + let coverage_counters = counters::make_bcb_counters(&graph, &bcbs_with_counter_mappings); let mappings = create_mappings(&extracted_mappings, &coverage_counters); if mappings.is_empty() { @@ -242,27 +238,8 @@ fn inject_coverage_statements<'tcx>( coverage_counters: &CoverageCounters, ) { // Inject counter-increment statements into MIR. - for (id, site) in coverage_counters.counter_increment_sites() { - // Determine the block to inject a counter-increment statement into. - // For BCB nodes this is just their first block, but for edges we need - // to create a new block between the two BCBs, and inject into that. - let target_bb = match site { - Site::Node { bcb } => graph[bcb].leader_bb(), - Site::Edge { from_bcb, to_bcb } => { - // Create a new block between the last block of `from_bcb` and - // the first block of `to_bcb`. - let from_bb = graph[from_bcb].last_bb(); - let to_bb = graph[to_bcb].leader_bb(); - - let new_bb = inject_edge_counter_basic_block(mir_body, from_bb, to_bb); - debug!( - "Edge {from_bcb:?} (last {from_bb:?}) -> {to_bcb:?} (leader {to_bb:?}) \ - requires a new MIR BasicBlock {new_bb:?} for counter increment {id:?}", - ); - new_bb - } - }; - + for (id, bcb) in coverage_counters.counter_increment_sites() { + let target_bb = graph[bcb].leader_bb(); inject_statement(mir_body, CoverageKind::CounterIncrement { id }, target_bb); } @@ -335,31 +312,6 @@ fn inject_mcdc_statements<'tcx>( } } -/// Given two basic blocks that have a control-flow edge between them, creates -/// and returns a new block that sits between those blocks. -fn inject_edge_counter_basic_block( - mir_body: &mut mir::Body<'_>, - from_bb: BasicBlock, - to_bb: BasicBlock, -) -> BasicBlock { - let span = mir_body[from_bb].terminator().source_info.span.shrink_to_hi(); - let new_bb = mir_body.basic_blocks_mut().push(BasicBlockData { - statements: vec![], // counter will be injected here - terminator: Some(Terminator { - source_info: SourceInfo::outermost(span), - kind: TerminatorKind::Goto { target: to_bb }, - }), - is_cleanup: false, - }); - let edge_ref = mir_body[from_bb] - .terminator_mut() - .successors_mut() - .find(|successor| **successor == to_bb) - .expect("from_bb should have a successor for to_bb"); - *edge_ref = new_bb; - new_bb -} - fn inject_statement(mir_body: &mut mir::Body<'_>, counter_kind: CoverageKind, bb: BasicBlock) { debug!(" injecting statement {counter_kind:?} for {bb:?}"); let data = &mut mir_body[bb]; diff --git a/compiler/rustc_mir_transform/src/coverage/query.rs b/compiler/rustc_mir_transform/src/coverage/query.rs index edaec3c7965..3e7cf8541c2 100644 --- a/compiler/rustc_mir_transform/src/coverage/query.rs +++ b/compiler/rustc_mir_transform/src/coverage/query.rs @@ -1,5 +1,5 @@ use rustc_data_structures::captures::Captures; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrFlags; use rustc_middle::mir::coverage::{ CounterId, CovTerm, CoverageIdsInfo, CoverageKind, Expression, ExpressionId, @@ -92,13 +92,13 @@ fn coverage_ids_info<'tcx>( let Some(fn_cov_info) = mir_body.function_coverage_info.as_deref() else { return CoverageIdsInfo { - counters_seen: BitSet::new_empty(0), - zero_expressions: BitSet::new_empty(0), + counters_seen: DenseBitSet::new_empty(0), + zero_expressions: DenseBitSet::new_empty(0), }; }; - let mut counters_seen = BitSet::new_empty(fn_cov_info.num_counters); - let mut expressions_seen = BitSet::new_filled(fn_cov_info.expressions.len()); + let mut counters_seen = DenseBitSet::new_empty(fn_cov_info.num_counters); + let mut expressions_seen = DenseBitSet::new_filled(fn_cov_info.expressions.len()); // For each expression ID that is directly used by one or more mappings, // mark it as not-yet-seen. This indicates that we expect to see a @@ -148,23 +148,23 @@ fn is_inlined(body: &Body<'_>, statement: &Statement<'_>) -> bool { scope_data.inlined.is_some() || scope_data.inlined_parent_scope.is_some() } -/// Identify expressions that will always have a value of zero, and note -/// their IDs in a `BitSet`. Mappings that refer to a zero expression -/// can instead become mappings to a constant zero value. +/// Identify expressions that will always have a value of zero, and note their +/// IDs in a `DenseBitSet`. Mappings that refer to a zero expression can instead +/// become mappings to a constant zero value. /// /// This function mainly exists to preserve the simplifications that were /// already being performed by the Rust-side expression renumbering, so that /// the resulting coverage mappings don't get worse. fn identify_zero_expressions( fn_cov_info: &FunctionCoverageInfo, - counters_seen: &BitSet<CounterId>, - expressions_seen: &BitSet<ExpressionId>, -) -> BitSet<ExpressionId> { + counters_seen: &DenseBitSet<CounterId>, + expressions_seen: &DenseBitSet<ExpressionId>, +) -> DenseBitSet<ExpressionId> { // The set of expressions that either were optimized out entirely, or // have zero as both of their operands, and will therefore always have // a value of zero. Other expressions that refer to these as operands // can have those operands replaced with `CovTerm::Zero`. - let mut zero_expressions = BitSet::new_empty(fn_cov_info.expressions.len()); + let mut zero_expressions = DenseBitSet::new_empty(fn_cov_info.expressions.len()); // Simplify a copy of each expression based on lower-numbered expressions, // and then update the set of always-zero expressions if necessary. @@ -228,8 +228,8 @@ fn identify_zero_expressions( /// into account knowledge of which counters are unused and which expressions /// are always zero. fn is_zero_term( - counters_seen: &BitSet<CounterId>, - zero_expressions: &BitSet<ExpressionId>, + counters_seen: &DenseBitSet<CounterId>, + zero_expressions: &DenseBitSet<ExpressionId>, term: CovTerm, ) -> bool { match term { diff --git a/compiler/rustc_mir_transform/src/cross_crate_inline.rs b/compiler/rustc_mir_transform/src/cross_crate_inline.rs index e1f1dd83f0d..8fce856687c 100644 --- a/compiler/rustc_mir_transform/src/cross_crate_inline.rs +++ b/compiler/rustc_mir_transform/src/cross_crate_inline.rs @@ -46,7 +46,7 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { // #[inline(never)] to force code generation. match codegen_fn_attrs.inline { InlineAttr::Never => return false, - InlineAttr::Hint | InlineAttr::Always => return true, + InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. } => return true, _ => {} } @@ -69,8 +69,9 @@ fn cross_crate_inlinable(tcx: TyCtxt<'_>, def_id: LocalDefId) -> bool { // Don't do any inference if codegen optimizations are disabled and also MIR inlining is not // enabled. This ensures that we do inference even if someone only passes -Zinline-mir, // which is less confusing than having to also enable -Copt-level=1. - if matches!(tcx.sess.opts.optimize, OptLevel::No) && !pm::should_run_pass(tcx, &inline::Inline) - { + let inliner_will_run = pm::should_run_pass(tcx, &inline::Inline) + || inline::ForceInline::should_run_pass_for_callee(tcx, def_id.to_def_id()); + if matches!(tcx.sess.opts.optimize, OptLevel::No) && !inliner_will_run { return false; } diff --git a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs index cc44114782c..51af77778af 100644 --- a/compiler/rustc_mir_transform/src/dataflow_const_prop.rs +++ b/compiler/rustc_mir_transform/src/dataflow_const_prop.rs @@ -408,6 +408,18 @@ impl<'a, 'tcx> ConstAnalysis<'a, 'tcx> { state: &mut State<FlatSet<Scalar>>, ) -> ValueOrPlace<FlatSet<Scalar>> { let val = match rvalue { + Rvalue::Len(place) => { + let place_ty = place.ty(self.local_decls, self.tcx); + if let ty::Array(_, len) = place_ty.ty.kind() { + Const::Ty(self.tcx.types.usize, *len) + .try_eval_scalar(self.tcx, self.typing_env) + .map_or(FlatSet::Top, FlatSet::Elem) + } else if let [ProjectionElem::Deref] = place.projection[..] { + state.get_len(place.local.into(), &self.map) + } else { + FlatSet::Top + } + } Rvalue::Cast(CastKind::IntToInt | CastKind::IntToFloat, operand, ty) => { let Ok(layout) = self.tcx.layout_of(self.typing_env.as_query_input(*ty)) else { return ValueOrPlace::Value(FlatSet::Top); diff --git a/compiler/rustc_mir_transform/src/dead_store_elimination.rs b/compiler/rustc_mir_transform/src/dead_store_elimination.rs index 0c75cdadc92..434e921d439 100644 --- a/compiler/rustc_mir_transform/src/dead_store_elimination.rs +++ b/compiler/rustc_mir_transform/src/dead_store_elimination.rs @@ -26,8 +26,8 @@ use crate::util::is_within_packed; /// Performs the optimization on the body /// -/// The `borrowed` set must be a `BitSet` of all the locals that are ever borrowed in this body. It -/// can be generated via the [`borrowed_locals`] function. +/// The `borrowed` set must be a `DenseBitSet` of all the locals that are ever borrowed in this +/// body. It can be generated via the [`borrowed_locals`] function. fn eliminate<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let borrowed_locals = borrowed_locals(body); diff --git a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs index 67b215c7c9d..049f13ce96d 100644 --- a/compiler/rustc_mir_transform/src/deduce_param_attrs.rs +++ b/compiler/rustc_mir_transform/src/deduce_param_attrs.rs @@ -6,7 +6,7 @@ //! dependent crates can use them. use rustc_hir::def_id::LocalDefId; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::{NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{Body, Location, Operand, Place, RETURN_PLACE, Terminator, TerminatorKind}; use rustc_middle::ty::{self, DeducedParamAttrs, Ty, TyCtxt}; @@ -18,13 +18,13 @@ struct DeduceReadOnly { /// Each bit is indexed by argument number, starting at zero (so 0 corresponds to local decl /// 1). The bit is true if the argument may have been mutated or false if we know it hasn't /// been up to the point we're at. - mutable_args: BitSet<usize>, + mutable_args: DenseBitSet<usize>, } impl DeduceReadOnly { /// Returns a new DeduceReadOnly instance. fn new(arg_count: usize) -> Self { - Self { mutable_args: BitSet::new_empty(arg_count) } + Self { mutable_args: DenseBitSet::new_empty(arg_count) } } } diff --git a/compiler/rustc_mir_transform/src/dest_prop.rs b/compiler/rustc_mir_transform/src/dest_prop.rs index e99bee6a01f..b4f9f1f08ef 100644 --- a/compiler/rustc_mir_transform/src/dest_prop.rs +++ b/compiler/rustc_mir_transform/src/dest_prop.rs @@ -132,7 +132,7 @@ //! [attempt 3]: https://github.com/rust-lang/rust/pull/72632 use rustc_data_structures::fx::{FxIndexMap, IndexEntry, IndexOccupiedEntry}; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::interval::SparseIntervalMatrix; use rustc_middle::bug; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; @@ -204,7 +204,8 @@ impl<'tcx> crate::MirPass<'tcx> for DestinationPropagation { // Because we only filter once per round, it is unsound to use a local for more than // one merge operation within a single round of optimizations. We store here which ones // we have already used. - let mut merged_locals: BitSet<Local> = BitSet::new_empty(body.local_decls.len()); + let mut merged_locals: DenseBitSet<Local> = + DenseBitSet::new_empty(body.local_decls.len()); // This is the set of merges we will apply this round. It is a subset of the candidates. let mut merges = FxIndexMap::default(); @@ -274,7 +275,7 @@ fn apply_merges<'tcx>( body: &mut Body<'tcx>, tcx: TyCtxt<'tcx>, merges: FxIndexMap<Local, Local>, - merged_locals: BitSet<Local>, + merged_locals: DenseBitSet<Local>, ) { let mut merger = Merger { tcx, merges, merged_locals }; merger.visit_body_preserves_cfg(body); @@ -283,7 +284,7 @@ fn apply_merges<'tcx>( struct Merger<'tcx> { tcx: TyCtxt<'tcx>, merges: FxIndexMap<Local, Local>, - merged_locals: BitSet<Local>, + merged_locals: DenseBitSet<Local>, } impl<'tcx> MutVisitor<'tcx> for Merger<'tcx> { @@ -351,7 +352,7 @@ impl Candidates { /// Collects the candidates for merging. /// /// This is responsible for enforcing the first and third bullet point. - fn reset_and_find<'tcx>(&mut self, body: &Body<'tcx>, borrowed: &BitSet<Local>) { + fn reset_and_find<'tcx>(&mut self, body: &Body<'tcx>, borrowed: &DenseBitSet<Local>) { self.c.clear(); self.reverse.clear(); let mut visitor = FindAssignments { body, candidates: &mut self.c, borrowed }; @@ -574,6 +575,7 @@ impl WriteInfo { | Rvalue::NullaryOp(_, _) | Rvalue::Ref(_, _, _) | Rvalue::RawPtr(_, _) + | Rvalue::Len(_) | Rvalue::Discriminant(_) | Rvalue::CopyForDeref(_) => {} } @@ -735,7 +737,7 @@ fn places_to_candidate_pair<'tcx>( struct FindAssignments<'a, 'tcx> { body: &'a Body<'tcx>, candidates: &'a mut FxIndexMap<Local, Vec<Local>>, - borrowed: &'a BitSet<Local>, + borrowed: &'a DenseBitSet<Local>, } impl<'tcx> Visitor<'tcx> for FindAssignments<'_, 'tcx> { diff --git a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs index b909dfa1320..d6ecadbfe29 100644 --- a/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs +++ b/compiler/rustc_mir_transform/src/elaborate_box_derefs.rs @@ -29,13 +29,8 @@ fn build_ptr_tys<'tcx>( pub(super) fn build_projection<'tcx>( unique_ty: Ty<'tcx>, nonnull_ty: Ty<'tcx>, - ptr_ty: Ty<'tcx>, -) -> [PlaceElem<'tcx>; 3] { - [ - PlaceElem::Field(FieldIdx::ZERO, unique_ty), - PlaceElem::Field(FieldIdx::ZERO, nonnull_ty), - PlaceElem::Field(FieldIdx::ZERO, ptr_ty), - ] +) -> [PlaceElem<'tcx>; 2] { + [PlaceElem::Field(FieldIdx::ZERO, unique_ty), PlaceElem::Field(FieldIdx::ZERO, nonnull_ty)] } struct ElaborateBoxDerefVisitor<'a, 'tcx> { @@ -75,10 +70,14 @@ impl<'a, 'tcx> MutVisitor<'tcx> for ElaborateBoxDerefVisitor<'a, 'tcx> { self.patch.add_assign( location, Place::from(ptr_local), - Rvalue::Use(Operand::Copy( - Place::from(place.local) - .project_deeper(&build_projection(unique_ty, nonnull_ty, ptr_ty), tcx), - )), + Rvalue::Cast( + CastKind::Transmute, + Operand::Copy( + Place::from(place.local) + .project_deeper(&build_projection(unique_ty, nonnull_ty), tcx), + ), + ptr_ty, + ), ); place.local = ptr_local; @@ -133,8 +132,10 @@ impl<'tcx> crate::MirPass<'tcx> for ElaborateBoxDerefs { let (unique_ty, nonnull_ty, ptr_ty) = build_ptr_tys(tcx, boxed_ty, unique_did, nonnull_did); - new_projections - .extend_from_slice(&build_projection(unique_ty, nonnull_ty, ptr_ty)); + new_projections.extend_from_slice(&build_projection(unique_ty, nonnull_ty)); + // While we can't project into `NonNull<_>` in a basic block + // due to MCP#807, this is debug info where it's fine. + new_projections.push(PlaceElem::Field(FieldIdx::ZERO, ptr_ty)); new_projections.push(PlaceElem::Deref); } else if let Some(new_projections) = new_projections.as_mut() { // Keep building up our projections list once we've started it. diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 3ebc9113725..988f1a25561 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -2,7 +2,7 @@ use std::fmt; use rustc_abi::{FieldIdx, VariantIdx}; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; use rustc_middle::ty::{self, TyCtxt}; @@ -96,10 +96,10 @@ impl<'tcx> crate::MirPass<'tcx> for ElaborateDrops { fn compute_dead_unwinds<'a, 'tcx>( body: &'a Body<'tcx>, flow_inits: &mut ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, -) -> BitSet<BasicBlock> { +) -> DenseBitSet<BasicBlock> { // We only need to do this pass once, because unwind edges can only // reach cleanup blocks, which can't have unwind edges themselves. - let mut dead_unwinds = BitSet::new_empty(body.basic_blocks.len()); + let mut dead_unwinds = DenseBitSet::new_empty(body.basic_blocks.len()); for (bb, bb_data) in body.basic_blocks.iter_enumerated() { let TerminatorKind::Drop { place, unwind: UnwindAction::Cleanup(_), .. } = bb_data.terminator().kind diff --git a/compiler/rustc_mir_transform/src/errors.rs b/compiler/rustc_mir_transform/src/errors.rs index 2d9eeddea2e..a2fd46043ca 100644 --- a/compiler/rustc_mir_transform/src/errors.rs +++ b/compiler/rustc_mir_transform/src/errors.rs @@ -4,12 +4,34 @@ use rustc_macros::{Diagnostic, LintDiagnostic, Subdiagnostic}; use rustc_middle::mir::AssertKind; use rustc_middle::ty::TyCtxt; use rustc_session::lint::{self, Lint}; -use rustc_span::Span; use rustc_span::def_id::DefId; +use rustc_span::{Span, Symbol}; use crate::fluent_generated as fluent; #[derive(LintDiagnostic)] +#[diag(mir_transform_unconditional_recursion)] +#[help] +pub(crate) struct UnconditionalRecursion { + #[label] + pub(crate) span: Span, + #[label(mir_transform_unconditional_recursion_call_site_label)] + pub(crate) call_sites: Vec<Span>, +} + +#[derive(Diagnostic)] +#[diag(mir_transform_force_inline_attr)] +#[note] +pub(crate) struct InvalidForceInline { + #[primary_span] + pub attr_span: Span, + #[label(mir_transform_callee)] + pub callee_span: Span, + pub callee: String, + pub reason: &'static str, +} + +#[derive(LintDiagnostic)] pub(crate) enum ConstMutate { #[diag(mir_transform_const_modify)] #[note] @@ -142,3 +164,29 @@ pub(crate) struct MustNotSuspendReason { #[note(mir_transform_note2)] #[help] pub(crate) struct UndefinedTransmute; + +#[derive(Diagnostic)] +#[diag(mir_transform_force_inline)] +#[note] +pub(crate) struct ForceInlineFailure { + #[label(mir_transform_caller)] + pub caller_span: Span, + #[label(mir_transform_callee)] + pub callee_span: Span, + #[label(mir_transform_attr)] + pub attr_span: Span, + #[primary_span] + #[label(mir_transform_call)] + pub call_span: Span, + pub callee: String, + pub caller: String, + pub reason: &'static str, + #[subdiagnostic] + pub justification: Option<ForceInlineJustification>, +} + +#[derive(Subdiagnostic)] +#[note(mir_transform_force_inline_justification)] +pub(crate) struct ForceInlineJustification { + pub sym: Symbol, +} diff --git a/compiler/rustc_mir_transform/src/gvn.rs b/compiler/rustc_mir_transform/src/gvn.rs index 283ed94b615..cb03b422d9e 100644 --- a/compiler/rustc_mir_transform/src/gvn.rs +++ b/compiler/rustc_mir_transform/src/gvn.rs @@ -94,7 +94,7 @@ use rustc_const_eval::interpret::{ use rustc_data_structures::fx::FxIndexSet; use rustc_data_structures::graph::dominators::Dominators; use rustc_hir::def::DefKind; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexVec, newtype_index}; use rustc_middle::bug; use rustc_middle::mir::interpret::GlobalAlloc; @@ -223,6 +223,8 @@ enum Value<'tcx> { Projection(VnIndex, ProjectionElem<VnIndex, Ty<'tcx>>), /// Discriminant of the given value. Discriminant(VnIndex), + /// Length of an array or slice. + Len(VnIndex), // Operations. NullaryOp(NullOp<'tcx>, Ty<'tcx>), @@ -256,7 +258,7 @@ struct VnState<'body, 'tcx> { feature_unsized_locals: bool, ssa: &'body SsaLocals, dominators: Dominators<BasicBlock>, - reused_locals: BitSet<Local>, + reused_locals: DenseBitSet<Local>, } impl<'body, 'tcx> VnState<'body, 'tcx> { @@ -287,7 +289,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { feature_unsized_locals: tcx.features().unsized_locals(), ssa, dominators, - reused_locals: BitSet::new_empty(local_decls.len()), + reused_locals: DenseBitSet::new_empty(local_decls.len()), } } @@ -511,6 +513,13 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { self.ecx.discriminant_for_variant(base.layout.ty, variant).discard_err()?; discr_value.into() } + Len(slice) => { + let slice = self.evaluated[slice].as_ref()?; + let usize_layout = self.ecx.layout_of(self.tcx.types.usize).unwrap(); + let len = slice.len(&self.ecx).discard_err()?; + let imm = ImmTy::from_uint(len, usize_layout); + imm.into() + } NullaryOp(null_op, ty) => { let layout = self.ecx.layout_of(ty).ok()?; if let NullOp::SizeOf | NullOp::AlignOf = null_op @@ -854,6 +863,7 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { } // Operations. + Rvalue::Len(ref mut place) => return self.simplify_len(place, location), Rvalue::Cast(ref mut kind, ref mut value, to) => { return self.simplify_cast(kind, value, to, location); } @@ -1366,63 +1376,155 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { return self.new_opaque(); } - let mut was_updated = false; + let mut was_ever_updated = false; + loop { + let mut was_updated_this_iteration = false; + + // Transmuting between raw pointers is just a pointer cast so long as + // they have the same metadata type (like `*const i32` <=> `*mut u64` + // or `*mut [i32]` <=> `*const [u64]`), including the common special + // case of `*const T` <=> `*mut T`. + if let Transmute = kind + && from.is_unsafe_ptr() + && to.is_unsafe_ptr() + && self.pointers_have_same_metadata(from, to) + { + *kind = PtrToPtr; + was_updated_this_iteration = true; + } - // If that cast just casts away the metadata again, - if let PtrToPtr = kind - && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) = - self.get(value) - && let ty::RawPtr(to_pointee, _) = to.kind() - && to_pointee.is_sized(self.tcx, self.typing_env()) - { - from = *data_pointer_ty; - value = fields[0]; - was_updated = true; - if *data_pointer_ty == to { - return Some(fields[0]); + // If a cast just casts away the metadata again, then we can get it by + // casting the original thin pointer passed to `from_raw_parts` + if let PtrToPtr = kind + && let Value::Aggregate(AggregateTy::RawPtr { data_pointer_ty, .. }, _, fields) = + self.get(value) + && let ty::RawPtr(to_pointee, _) = to.kind() + && to_pointee.is_sized(self.tcx, self.typing_env()) + { + from = *data_pointer_ty; + value = fields[0]; + was_updated_this_iteration = true; + if *data_pointer_ty == to { + return Some(fields[0]); + } } - } - // PtrToPtr-then-PtrToPtr can skip the intermediate step - if let PtrToPtr = kind - && let Value::Cast { kind: inner_kind, value: inner_value, from: inner_from, to: _ } = - *self.get(value) - && let PtrToPtr = inner_kind - { - from = inner_from; - value = inner_value; - was_updated = true; - if inner_from == to { - return Some(inner_value); + // Aggregate-then-Transmute can just transmute the original field value, + // so long as the bytes of a value from only from a single field. + if let Transmute = kind + && let Value::Aggregate(_aggregate_ty, variant_idx, field_values) = self.get(value) + && let Some((field_idx, field_ty)) = + self.value_is_all_in_one_field(from, *variant_idx) + { + from = field_ty; + value = field_values[field_idx.as_usize()]; + was_updated_this_iteration = true; + if field_ty == to { + return Some(value); + } } - } - // PtrToPtr-then-Transmute can just transmute the original, so long as the - // PtrToPtr didn't change metadata (and thus the size of the pointer) - if let Transmute = kind - && let Value::Cast { - kind: PtrToPtr, + // Various cast-then-cast cases can be simplified. + if let Value::Cast { + kind: inner_kind, value: inner_value, from: inner_from, to: inner_to, } = *self.get(value) - && self.pointers_have_same_metadata(inner_from, inner_to) - { - from = inner_from; - value = inner_value; - was_updated = true; - if inner_from == to { - return Some(inner_value); + { + let new_kind = match (inner_kind, *kind) { + // Even if there's a narrowing cast in here that's fine, because + // things like `*mut [i32] -> *mut i32 -> *const i32` and + // `*mut [i32] -> *const [i32] -> *const i32` can skip the middle in MIR. + (PtrToPtr, PtrToPtr) => Some(PtrToPtr), + // PtrToPtr-then-Transmute is fine so long as the pointer cast is identity: + // `*const T -> *mut T -> NonNull<T>` is fine, but we need to check for narrowing + // to skip things like `*const [i32] -> *const i32 -> NonNull<T>`. + (PtrToPtr, Transmute) + if self.pointers_have_same_metadata(inner_from, inner_to) => + { + Some(Transmute) + } + // Similarly, for Transmute-then-PtrToPtr. Note that we need to check different + // variables for their metadata, and thus this can't merge with the previous arm. + (Transmute, PtrToPtr) if self.pointers_have_same_metadata(from, to) => { + Some(Transmute) + } + // If would be legal to always do this, but we don't want to hide information + // from the backend that it'd otherwise be able to use for optimizations. + (Transmute, Transmute) + if !self.type_may_have_niche_of_interest_to_backend(inner_to) => + { + Some(Transmute) + } + _ => None, + }; + if let Some(new_kind) = new_kind { + *kind = new_kind; + from = inner_from; + value = inner_value; + was_updated_this_iteration = true; + if inner_from == to { + return Some(inner_value); + } + } + } + + if was_updated_this_iteration { + was_ever_updated = true; + } else { + break; } } - if was_updated && let Some(op) = self.try_as_operand(value, location) { + if was_ever_updated && let Some(op) = self.try_as_operand(value, location) { *operand = op; } Some(self.insert(Value::Cast { kind: *kind, value, from, to })) } + fn simplify_len(&mut self, place: &mut Place<'tcx>, location: Location) -> Option<VnIndex> { + // Trivial case: we are fetching a statically known length. + let place_ty = place.ty(self.local_decls, self.tcx).ty; + if let ty::Array(_, len) = place_ty.kind() { + return self.insert_constant(Const::from_ty_const( + *len, + self.tcx.types.usize, + self.tcx, + )); + } + + let mut inner = self.simplify_place_value(place, location)?; + + // The length information is stored in the wide pointer. + // Reborrowing copies length information from one pointer to the other. + while let Value::Address { place: borrowed, .. } = self.get(inner) + && let [PlaceElem::Deref] = borrowed.projection[..] + && let Some(borrowed) = self.locals[borrowed.local] + { + inner = borrowed; + } + + // We have an unsizing cast, which assigns the length to wide pointer metadata. + if let Value::Cast { kind, from, to, .. } = self.get(inner) + && let CastKind::PointerCoercion(ty::adjustment::PointerCoercion::Unsize, _) = kind + && let Some(from) = from.builtin_deref(true) + && let ty::Array(_, len) = from.kind() + && let Some(to) = to.builtin_deref(true) + && let ty::Slice(..) = to.kind() + { + return self.insert_constant(Const::from_ty_const( + *len, + self.tcx.types.usize, + self.tcx, + )); + } + + // Fallback: a symbolic `Len`. + Some(self.insert(Value::Len(inner))) + } + fn pointers_have_same_metadata(&self, left_ptr_ty: Ty<'tcx>, right_ptr_ty: Ty<'tcx>) -> bool { let left_meta_ty = left_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); let right_meta_ty = right_ptr_ty.pointee_metadata_ty_or_projection(self.tcx); @@ -1438,6 +1540,54 @@ impl<'body, 'tcx> VnState<'body, 'tcx> { false } } + + /// Returns `false` if we know for sure that this type has no interesting niche, + /// and thus we can skip transmuting through it without worrying. + /// + /// The backend will emit `assume`s when transmuting between types with niches, + /// so we want to preserve `i32 -> char -> u32` so that that data is around, + /// but it's fine to skip whole-range-is-value steps like `A -> u32 -> B`. + fn type_may_have_niche_of_interest_to_backend(&self, ty: Ty<'tcx>) -> bool { + let Ok(layout) = self.ecx.layout_of(ty) else { + // If it's too generic or something, then assume it might be interesting later. + return true; + }; + + match layout.backend_repr { + BackendRepr::Uninhabited => true, + BackendRepr::Scalar(a) => !a.is_always_valid(&self.ecx), + BackendRepr::ScalarPair(a, b) => { + !a.is_always_valid(&self.ecx) || !b.is_always_valid(&self.ecx) + } + BackendRepr::Vector { .. } | BackendRepr::Memory { .. } => false, + } + } + + fn value_is_all_in_one_field( + &self, + ty: Ty<'tcx>, + variant: VariantIdx, + ) -> Option<(FieldIdx, Ty<'tcx>)> { + if let Ok(layout) = self.ecx.layout_of(ty) + && let abi::Variants::Single { index } = layout.variants + && index == variant + && let Some((field_idx, field_layout)) = layout.non_1zst_field(&self.ecx) + && layout.size == field_layout.size + { + // We needed to check the variant to avoid trying to read the tag + // field from an enum where no fields have variants, since that tag + // field isn't in the `Aggregate` from which we're getting values. + Some((FieldIdx::from_usize(field_idx), field_layout.ty)) + } else if let ty::Adt(adt, args) = ty.kind() + && adt.is_struct() + && adt.repr().transparent() + && let [single_field] = adt.non_enum_variant().fields.raw.as_slice() + { + Some((FieldIdx::ZERO, single_field.ty(self.tcx, args))) + } else { + None + } + } } fn op_to_prop_const<'tcx>( @@ -1615,7 +1765,7 @@ impl<'tcx> MutVisitor<'tcx> for VnState<'_, 'tcx> { struct StorageRemover<'tcx> { tcx: TyCtxt<'tcx>, - reused_locals: BitSet<Local>, + reused_locals: DenseBitSet<Local>, } impl<'tcx> MutVisitor<'tcx> for StorageRemover<'tcx> { diff --git a/compiler/rustc_mir_transform/src/impossible_predicates.rs b/compiler/rustc_mir_transform/src/impossible_predicates.rs new file mode 100644 index 00000000000..ba8389bbe2f --- /dev/null +++ b/compiler/rustc_mir_transform/src/impossible_predicates.rs @@ -0,0 +1,56 @@ +//! Check if it's even possible to satisfy the 'where' clauses +//! for this item. +//! +//! It's possible to `#!feature(trivial_bounds)]` to write +//! a function with impossible to satisfy clauses, e.g.: +//! `fn foo() where String: Copy {}`. +//! +//! We don't usually need to worry about this kind of case, +//! since we would get a compilation error if the user tried +//! to call it. However, since we optimize even without any +//! calls to the function, we need to make sure that it even +//! makes sense to try to evaluate the body. +//! +//! If there are unsatisfiable where clauses, then all bets are +//! off, and we just give up. +//! +//! We manually filter the predicates, skipping anything that's not +//! "global". We are in a potentially generic context +//! (e.g. we are evaluating a function without instantiating generic +//! parameters, so this filtering serves two purposes: +//! +//! 1. We skip evaluating any predicates that we would +//! never be able prove are unsatisfiable (e.g. `<T as Foo>` +//! 2. We avoid trying to normalize predicates involving generic +//! parameters (e.g. `<T as Foo>::MyItem`). This can confuse +//! the normalization code (leading to cycle errors), since +//! it's usually never invoked in this way. + +use rustc_middle::mir::{Body, START_BLOCK, TerminatorKind}; +use rustc_middle::ty::{TyCtxt, TypeVisitableExt}; +use rustc_trait_selection::traits; +use tracing::trace; + +use crate::pass_manager::MirPass; + +pub(crate) struct ImpossiblePredicates; + +impl<'tcx> MirPass<'tcx> for ImpossiblePredicates { + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let predicates = tcx + .predicates_of(body.source.def_id()) + .predicates + .iter() + .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None }); + if traits::impossible_predicates(tcx, traits::elaborate(tcx, predicates).collect()) { + trace!("found unsatisfiable predicates for {:?}", body.source); + // Clear the body to only contain a single `unreachable` statement. + let bbs = body.basic_blocks.as_mut(); + bbs.raw.truncate(1); + bbs[START_BLOCK].statements.clear(); + bbs[START_BLOCK].terminator_mut().kind = TerminatorKind::Unreachable; + body.var_debug_info.clear(); + body.local_decls.raw.truncate(body.arg_count + 1); + } + } +} diff --git a/compiler/rustc_mir_transform/src/inline.rs b/compiler/rustc_mir_transform/src/inline.rs index 339acbad6b9..2052e28325c 100644 --- a/compiler/rustc_mir_transform/src/inline.rs +++ b/compiler/rustc_mir_transform/src/inline.rs @@ -8,31 +8,26 @@ use rustc_attr_parsing::InlineAttr; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_index::Idx; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; -use rustc_middle::middle::codegen_fn_attrs::{CodegenFnAttrFlags, CodegenFnAttrs}; +use rustc_middle::middle::codegen_fn_attrs::CodegenFnAttrs; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; use rustc_middle::ty::{self, Instance, InstanceKind, Ty, TyCtxt, TypeFlags, TypeVisitableExt}; use rustc_session::config::{DebugInfo, OptLevel}; use rustc_span::source_map::Spanned; -use rustc_span::sym; use tracing::{debug, instrument, trace, trace_span}; use crate::cost_checker::CostChecker; use crate::deref_separator::deref_finder; use crate::simplify::simplify_cfg; -use crate::util; use crate::validate::validate_types; +use crate::{check_inline, util}; pub(crate) mod cycle; const TOP_DOWN_DEPTH_LIMIT: usize = 5; -// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden -// by custom rustc drivers, running all the steps by themselves. See #114628. -pub struct Inline; - #[derive(Clone, Debug)] struct CallSite<'tcx> { callee: Instance<'tcx>, @@ -41,14 +36,12 @@ struct CallSite<'tcx> { source_info: SourceInfo, } +// Made public so that `mir_drops_elaborated_and_const_checked` can be overridden +// by custom rustc drivers, running all the steps by themselves. See #114628. +pub struct Inline; + impl<'tcx> crate::MirPass<'tcx> for Inline { fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - // FIXME(#127234): Coverage instrumentation currently doesn't handle inlined - // MIR correctly when Modified Condition/Decision Coverage is enabled. - if sess.instrument_coverage_mcdc() { - return false; - } - if let Some(enabled) = sess.opts.unstable_opts.inline_mir { return enabled; } @@ -67,7 +60,7 @@ impl<'tcx> crate::MirPass<'tcx> for Inline { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let span = trace_span!("inline", body = %tcx.def_path_str(body.source.def_id())); let _guard = span.enter(); - if inline(tcx, body) { + if inline::<NormalInliner<'tcx>>(tcx, body) { debug!("running simplify cfg on {:?}", body.source); simplify_cfg(body); deref_finder(tcx, body); @@ -75,47 +68,83 @@ impl<'tcx> crate::MirPass<'tcx> for Inline { } } -fn inline<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool { - let def_id = body.source.def_id().expect_local(); +pub struct ForceInline; - // Only do inlining into fn bodies. - if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() { - return false; +impl ForceInline { + pub fn should_run_pass_for_callee<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> bool { + matches!(tcx.codegen_fn_attrs(def_id).inline, InlineAttr::Force { .. }) } - if body.source.promoted.is_some() { - return false; +} + +impl<'tcx> crate::MirPass<'tcx> for ForceInline { + fn is_enabled(&self, _: &rustc_session::Session) -> bool { + true } - // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation, - // which can create a cycle, even when no attempt is made to inline the function in the other - // direction. - if body.coroutine.is_some() { - return false; + + fn can_be_overridden(&self) -> bool { + false + } + + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { + let span = trace_span!("force_inline", body = %tcx.def_path_str(body.source.def_id())); + let _guard = span.enter(); + if inline::<ForceInliner<'tcx>>(tcx, body) { + debug!("running simplify cfg on {:?}", body.source); + simplify_cfg(body); + deref_finder(tcx, body); + } } +} - let typing_env = body.typing_env(tcx); - let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id); +trait Inliner<'tcx> { + fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self; - let mut this = Inliner { - tcx, - typing_env, - codegen_fn_attrs, - history: Vec::new(), - changed: false, - caller_is_inline_forwarder: matches!( - codegen_fn_attrs.inline, - InlineAttr::Hint | InlineAttr::Always - ) && body_is_forwarder(body), - }; - let blocks = START_BLOCK..body.basic_blocks.next_index(); - this.process_blocks(body, blocks); - this.changed + fn tcx(&self) -> TyCtxt<'tcx>; + fn typing_env(&self) -> ty::TypingEnv<'tcx>; + fn history(&self) -> &[DefId]; + fn caller_def_id(&self) -> DefId; + + /// Has the caller body been changed? + fn changed(self) -> bool; + + /// Should inlining happen for a given callee? + fn should_inline_for_callee(&self, def_id: DefId) -> bool; + + fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool; + + /// Returns inlining decision that is based on the examination of callee MIR body. + /// Assumes that codegen attributes have been checked for compatibility already. + fn check_callee_mir_body( + &self, + callsite: &CallSite<'tcx>, + callee_body: &Body<'tcx>, + callee_attrs: &CodegenFnAttrs, + ) -> Result<(), &'static str>; + + // How many callsites in a body are we allowed to inline? We need to limit this in order + // to prevent super-linear growth in MIR size. + fn inline_limit_for_block(&self) -> Option<usize>; + + /// Called when inlining succeeds. + fn on_inline_success( + &mut self, + callsite: &CallSite<'tcx>, + caller_body: &mut Body<'tcx>, + new_blocks: std::ops::Range<BasicBlock>, + ); + + /// Called when inlining failed or was not performed. + fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str); + + /// Called when the inline limit for a body is reached. + fn on_inline_limit_reached(&self) -> bool; } -struct Inliner<'tcx> { +struct ForceInliner<'tcx> { tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, - /// Caller codegen attributes. - codegen_fn_attrs: &'tcx CodegenFnAttrs, + /// `DefId` of caller. + def_id: DefId, /// Stack of inlined instances. /// We only check the `DefId` and not the args because we want to /// avoid inlining cases of polymorphic recursion. @@ -124,366 +153,203 @@ struct Inliner<'tcx> { history: Vec<DefId>, /// Indicates that the caller body has been modified. changed: bool, - /// Indicates that the caller is #[inline] and just calls another function, - /// and thus we can inline less into it as it'll be inlined itself. - caller_is_inline_forwarder: bool, } -impl<'tcx> Inliner<'tcx> { - fn process_blocks(&mut self, caller_body: &mut Body<'tcx>, blocks: Range<BasicBlock>) { - // How many callsites in this body are we allowed to inline? We need to limit this in order - // to prevent super-linear growth in MIR size - let inline_limit = match self.history.len() { - 0 => usize::MAX, - 1..=TOP_DOWN_DEPTH_LIMIT => 1, - _ => return, - }; - let mut inlined_count = 0; - for bb in blocks { - let bb_data = &caller_body[bb]; - if bb_data.is_cleanup { - continue; - } - - let Some(callsite) = self.resolve_callsite(caller_body, bb, bb_data) else { - continue; - }; - - let span = trace_span!("process_blocks", %callsite.callee, ?bb); - let _guard = span.enter(); - - match self.try_inlining(caller_body, &callsite) { - Err(reason) => { - debug!("not-inlined {} [{}]", callsite.callee, reason); - } - Ok(new_blocks) => { - debug!("inlined {}", callsite.callee); - self.changed = true; - - self.history.push(callsite.callee.def_id()); - self.process_blocks(caller_body, new_blocks); - self.history.pop(); - - inlined_count += 1; - if inlined_count == inline_limit { - debug!("inline count reached"); - return; - } - } - } - } +impl<'tcx> Inliner<'tcx> for ForceInliner<'tcx> { + fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self { + Self { tcx, typing_env: body.typing_env(tcx), def_id, history: Vec::new(), changed: false } } - /// Attempts to inline a callsite into the caller body. When successful returns basic blocks - /// containing the inlined body. Otherwise returns an error describing why inlining didn't take - /// place. - fn try_inlining( - &self, - caller_body: &mut Body<'tcx>, - callsite: &CallSite<'tcx>, - ) -> Result<std::ops::Range<BasicBlock>, &'static str> { - self.check_mir_is_available(caller_body, callsite.callee)?; - - let callee_attrs = self.tcx.codegen_fn_attrs(callsite.callee.def_id()); - let cross_crate_inlinable = self.tcx.cross_crate_inlinable(callsite.callee.def_id()); - self.check_codegen_attributes(callsite, callee_attrs, cross_crate_inlinable)?; - - // Intrinsic fallback bodies are automatically made cross-crate inlineable, - // but at this stage we don't know whether codegen knows the intrinsic, - // so just conservatively don't inline it. This also ensures that we do not - // accidentally inline the body of an intrinsic that *must* be overridden. - if self.tcx.has_attr(callsite.callee.def_id(), sym::rustc_intrinsic) { - return Err("Callee is an intrinsic, do not inline fallback bodies"); - } - - let terminator = caller_body[callsite.block].terminator.as_ref().unwrap(); - let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() }; - let destination_ty = destination.ty(&caller_body.local_decls, self.tcx).ty; - for arg in args { - if !arg.node.ty(&caller_body.local_decls, self.tcx).is_sized(self.tcx, self.typing_env) - { - // We do not allow inlining functions with unsized params. Inlining these functions - // could create unsized locals, which are unsound and being phased out. - return Err("Call has unsized argument"); - } - } - - let callee_body = try_instance_mir(self.tcx, callsite.callee.def)?; - self.check_mir_body(callsite, callee_body, callee_attrs, cross_crate_inlinable)?; - - let Ok(callee_body) = callsite.callee.try_instantiate_mir_and_normalize_erasing_regions( - self.tcx, - self.typing_env, - ty::EarlyBinder::bind(callee_body.clone()), - ) else { - return Err("failed to normalize callee body"); - }; - - // Normally, this shouldn't be required, but trait normalization failure can create a - // validation ICE. - if !validate_types(self.tcx, self.typing_env, &callee_body, &caller_body).is_empty() { - return Err("failed to validate callee body"); - } - - // Check call signature compatibility. - // Normally, this shouldn't be required, but trait normalization failure can create a - // validation ICE. - let output_type = callee_body.return_ty(); - if !util::sub_types(self.tcx, self.typing_env, output_type, destination_ty) { - trace!(?output_type, ?destination_ty); - return Err("failed to normalize return type"); - } - if callsite.fn_sig.abi() == ExternAbi::RustCall { - // FIXME: Don't inline user-written `extern "rust-call"` functions, - // since this is generally perf-negative on rustc, and we hope that - // LLVM will inline these functions instead. - if callee_body.spread_arg.is_some() { - return Err("do not inline user-written rust-call functions"); - } + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } - let (self_arg, arg_tuple) = match &args[..] { - [arg_tuple] => (None, arg_tuple), - [self_arg, arg_tuple] => (Some(self_arg), arg_tuple), - _ => bug!("Expected `rust-call` to have 1 or 2 args"), - }; + fn typing_env(&self) -> ty::TypingEnv<'tcx> { + self.typing_env + } - let self_arg_ty = - self_arg.map(|self_arg| self_arg.node.ty(&caller_body.local_decls, self.tcx)); + fn history(&self) -> &[DefId] { + &self.history + } - let arg_tuple_ty = arg_tuple.node.ty(&caller_body.local_decls, self.tcx); - let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else { - bug!("Closure arguments are not passed as a tuple"); - }; + fn caller_def_id(&self) -> DefId { + self.def_id + } - for (arg_ty, input) in - self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter()) - { - let input_type = callee_body.local_decls[input].ty; - if !util::sub_types(self.tcx, self.typing_env, input_type, arg_ty) { - trace!(?arg_ty, ?input_type); - return Err("failed to normalize tuple argument type"); - } - } - } else { - for (arg, input) in args.iter().zip(callee_body.args_iter()) { - let input_type = callee_body.local_decls[input].ty; - let arg_ty = arg.node.ty(&caller_body.local_decls, self.tcx); - if !util::sub_types(self.tcx, self.typing_env, input_type, arg_ty) { - trace!(?arg_ty, ?input_type); - return Err("failed to normalize argument type"); - } - } - } + fn changed(self) -> bool { + self.changed + } - let old_blocks = caller_body.basic_blocks.next_index(); - self.inline_call(caller_body, callsite, callee_body); - let new_blocks = old_blocks..caller_body.basic_blocks.next_index(); + fn should_inline_for_callee(&self, def_id: DefId) -> bool { + ForceInline::should_run_pass_for_callee(self.tcx(), def_id) + } - Ok(new_blocks) + fn check_caller_mir_body(&self, _: &Body<'tcx>) -> bool { + true } - fn check_mir_is_available( + #[instrument(level = "debug", skip(self, callee_body))] + fn check_callee_mir_body( &self, - caller_body: &Body<'tcx>, - callee: Instance<'tcx>, + _: &CallSite<'tcx>, + callee_body: &Body<'tcx>, + callee_attrs: &CodegenFnAttrs, ) -> Result<(), &'static str> { - let caller_def_id = caller_body.source.def_id(); - let callee_def_id = callee.def_id(); - if callee_def_id == caller_def_id { - return Err("self-recursion"); - } - - match callee.def { - InstanceKind::Item(_) => { - // If there is no MIR available (either because it was not in metadata or - // because it has no MIR because it's an extern function), then the inliner - // won't cause cycles on this. - if !self.tcx.is_mir_available(callee_def_id) { - return Err("item MIR unavailable"); - } - } - // These have no own callable MIR. - InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => { - return Err("instance without MIR (intrinsic / virtual)"); - } - - // FIXME(#127030): `ConstParamHasTy` has bad interactions with - // the drop shim builder, which does not evaluate predicates in - // the correct param-env for types being dropped. Stall resolving - // the MIR for this instance until all of its const params are - // substituted. - InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => { - return Err("still needs substitution"); - } - - // This cannot result in an immediate cycle since the callee MIR is a shim, which does - // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we - // do not need to catch this here, we can wait until the inliner decides to continue - // inlining a second time. - InstanceKind::VTableShim(_) - | InstanceKind::ReifyShim(..) - | InstanceKind::FnPtrShim(..) - | InstanceKind::ClosureOnceShim { .. } - | InstanceKind::ConstructCoroutineInClosureShim { .. } - | InstanceKind::DropGlue(..) - | InstanceKind::CloneShim(..) - | InstanceKind::ThreadLocalShim(..) - | InstanceKind::FnPtrAddrShim(..) - | InstanceKind::AsyncDropGlueCtorShim(..) => return Ok(()), + if callee_body.tainted_by_errors.is_some() { + return Err("body has errors"); } - if self.tcx.is_constructor(callee_def_id) { - trace!("constructors always have MIR"); - // Constructor functions cannot cause a query cycle. - return Ok(()); - } - - if callee_def_id.is_local() { - // If we know for sure that the function we're calling will itself try to - // call us, then we avoid inlining that function. - if self.tcx.mir_callgraph_reachable((callee, caller_def_id.expect_local())) { - return Err("caller might be reachable from callee (query cycle avoidance)"); - } - - Ok(()) + let caller_attrs = self.tcx().codegen_fn_attrs(self.caller_def_id()); + if callee_attrs.instruction_set != caller_attrs.instruction_set + && callee_body + .basic_blocks + .iter() + .any(|bb| matches!(bb.terminator().kind, TerminatorKind::InlineAsm { .. })) + { + // During the attribute checking stage we allow a callee with no + // instruction_set assigned to count as compatible with a function that does + // assign one. However, during this stage we require an exact match when any + // inline-asm is detected. LLVM will still possibly do an inline later on + // if the no-attribute function ends up with the same instruction set anyway. + Err("cannot move inline-asm across instruction sets") } else { - // This cannot result in an immediate cycle since the callee MIR is from another crate - // and is already optimized. Any subsequent inlining may cause cycles, but we do - // not need to catch this here, we can wait until the inliner decides to continue - // inlining a second time. - trace!("functions from other crates always have MIR"); Ok(()) } } - fn resolve_callsite( - &self, - caller_body: &Body<'tcx>, - bb: BasicBlock, - bb_data: &BasicBlockData<'tcx>, - ) -> Option<CallSite<'tcx>> { - // Only consider direct calls to functions - let terminator = bb_data.terminator(); - - // FIXME(explicit_tail_calls): figure out if we can inline tail calls - if let TerminatorKind::Call { ref func, fn_span, .. } = terminator.kind { - let func_ty = func.ty(caller_body, self.tcx); - if let ty::FnDef(def_id, args) = *func_ty.kind() { - // To resolve an instance its args have to be fully normalized. - let args = self.tcx.try_normalize_erasing_regions(self.typing_env, args).ok()?; - let callee = Instance::try_resolve(self.tcx, self.typing_env, def_id, args) - .ok() - .flatten()?; - - if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def { - return None; - } - - if self.history.contains(&callee.def_id()) { - return None; - } + fn inline_limit_for_block(&self) -> Option<usize> { + Some(usize::MAX) + } - let fn_sig = self.tcx.fn_sig(def_id).instantiate(self.tcx, args); + fn on_inline_success( + &mut self, + callsite: &CallSite<'tcx>, + caller_body: &mut Body<'tcx>, + new_blocks: std::ops::Range<BasicBlock>, + ) { + self.changed = true; - // Additionally, check that the body that we're inlining actually agrees - // with the ABI of the trait that the item comes from. - if let InstanceKind::Item(instance_def_id) = callee.def - && self.tcx.def_kind(instance_def_id) == DefKind::AssocFn - && let instance_fn_sig = self.tcx.fn_sig(instance_def_id).skip_binder() - && instance_fn_sig.abi() != fn_sig.abi() - { - return None; - } + self.history.push(callsite.callee.def_id()); + process_blocks(self, caller_body, new_blocks); + self.history.pop(); + } - let source_info = SourceInfo { span: fn_span, ..terminator.source_info }; + fn on_inline_failure(&self, callsite: &CallSite<'tcx>, reason: &'static str) { + let tcx = self.tcx(); + let InlineAttr::Force { attr_span, reason: justification } = + tcx.codegen_fn_attrs(callsite.callee.def_id()).inline + else { + bug!("called on item without required inlining"); + }; - return Some(CallSite { callee, fn_sig, block: bb, source_info }); - } - } + let call_span = callsite.source_info.span; + tcx.dcx().emit_err(crate::errors::ForceInlineFailure { + call_span, + attr_span, + caller_span: tcx.def_span(self.def_id), + caller: tcx.def_path_str(self.def_id), + callee_span: tcx.def_span(callsite.callee.def_id()), + callee: tcx.def_path_str(callsite.callee.def_id()), + reason, + justification: justification.map(|sym| crate::errors::ForceInlineJustification { sym }), + }); + } - None + fn on_inline_limit_reached(&self) -> bool { + false } +} - /// Returns an error if inlining is not possible based on codegen attributes alone. A success - /// indicates that inlining decision should be based on other criteria. - fn check_codegen_attributes( - &self, - callsite: &CallSite<'tcx>, - callee_attrs: &CodegenFnAttrs, - cross_crate_inlinable: bool, - ) -> Result<(), &'static str> { - if self.tcx.has_attr(callsite.callee.def_id(), sym::rustc_no_mir_inline) { - return Err("#[rustc_no_mir_inline]"); - } +struct NormalInliner<'tcx> { + tcx: TyCtxt<'tcx>, + typing_env: ty::TypingEnv<'tcx>, + /// `DefId` of caller. + def_id: DefId, + /// Stack of inlined instances. + /// We only check the `DefId` and not the args because we want to + /// avoid inlining cases of polymorphic recursion. + /// The number of `DefId`s is finite, so checking history is enough + /// to ensure that we do not loop endlessly while inlining. + history: Vec<DefId>, + /// Indicates that the caller body has been modified. + changed: bool, + /// Indicates that the caller is #[inline] and just calls another function, + /// and thus we can inline less into it as it'll be inlined itself. + caller_is_inline_forwarder: bool, +} - if let InlineAttr::Never = callee_attrs.inline { - return Err("never inline hint"); +impl<'tcx> Inliner<'tcx> for NormalInliner<'tcx> { + fn new(tcx: TyCtxt<'tcx>, def_id: DefId, body: &Body<'tcx>) -> Self { + let typing_env = body.typing_env(tcx); + let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id); + + Self { + tcx, + typing_env, + def_id, + history: Vec::new(), + changed: false, + caller_is_inline_forwarder: matches!( + codegen_fn_attrs.inline, + InlineAttr::Hint | InlineAttr::Always | InlineAttr::Force { .. } + ) && body_is_forwarder(body), } + } - // Reachability pass defines which functions are eligible for inlining. Generally inlining - // other functions is incorrect because they could reference symbols that aren't exported. - let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); - if !is_generic && !cross_crate_inlinable { - return Err("not exported"); - } + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } - if callsite.fn_sig.c_variadic() { - return Err("C variadic"); - } + fn caller_def_id(&self) -> DefId { + self.def_id + } - if callee_attrs.flags.contains(CodegenFnAttrFlags::COLD) { - return Err("cold"); - } + fn typing_env(&self) -> ty::TypingEnv<'tcx> { + self.typing_env + } - if callee_attrs.no_sanitize != self.codegen_fn_attrs.no_sanitize { - return Err("incompatible sanitizer set"); - } + fn history(&self) -> &[DefId] { + &self.history + } - // Two functions are compatible if the callee has no attribute (meaning - // that it's codegen agnostic), or sets an attribute that is identical - // to this function's attribute. - if callee_attrs.instruction_set.is_some() - && callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set - { - return Err("incompatible instruction set"); - } + fn changed(self) -> bool { + self.changed + } + + fn should_inline_for_callee(&self, _: DefId) -> bool { + true + } - let callee_feature_names = callee_attrs.target_features.iter().map(|f| f.name); - let this_feature_names = self.codegen_fn_attrs.target_features.iter().map(|f| f.name); - if callee_feature_names.ne(this_feature_names) { - // In general it is not correct to inline a callee with target features that are a - // subset of the caller. This is because the callee might contain calls, and the ABI of - // those calls depends on the target features of the surrounding function. By moving a - // `Call` terminator from one MIR body to another with more target features, we might - // change the ABI of that call! - return Err("incompatible target features"); + fn check_caller_mir_body(&self, body: &Body<'tcx>) -> bool { + // Avoid inlining into coroutines, since their `optimized_mir` is used for layout computation, + // which can create a cycle, even when no attempt is made to inline the function in the other + // direction. + if body.coroutine.is_some() { + return false; } - Ok(()) + true } - /// Returns inlining decision that is based on the examination of callee MIR body. - /// Assumes that codegen attributes have been checked for compatibility already. #[instrument(level = "debug", skip(self, callee_body))] - fn check_mir_body( + fn check_callee_mir_body( &self, callsite: &CallSite<'tcx>, callee_body: &Body<'tcx>, callee_attrs: &CodegenFnAttrs, - cross_crate_inlinable: bool, ) -> Result<(), &'static str> { - let tcx = self.tcx; + let tcx = self.tcx(); if let Some(_) = callee_body.tainted_by_errors { - return Err("Body is tainted"); + return Err("body has errors"); } let mut threshold = if self.caller_is_inline_forwarder { - self.tcx.sess.opts.unstable_opts.inline_mir_forwarder_threshold.unwrap_or(30) - } else if cross_crate_inlinable { - self.tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100) + tcx.sess.opts.unstable_opts.inline_mir_forwarder_threshold.unwrap_or(30) + } else if tcx.cross_crate_inlinable(callsite.callee.def_id()) { + tcx.sess.opts.unstable_opts.inline_mir_hint_threshold.unwrap_or(100) } else { - self.tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50) + tcx.sess.opts.unstable_opts.inline_mir_threshold.unwrap_or(50) }; // Give a bonus functions with a small number of blocks, @@ -497,13 +363,13 @@ impl<'tcx> Inliner<'tcx> { // FIXME: Give a bonus to functions with only a single caller let mut checker = - CostChecker::new(self.tcx, self.typing_env, Some(callsite.callee), callee_body); + CostChecker::new(tcx, self.typing_env(), Some(callsite.callee), callee_body); checker.add_function_level_costs(); // Traverse the MIR manually so we can account for the effects of inlining on the CFG. let mut work_list = vec![START_BLOCK]; - let mut visited = BitSet::new_empty(callee_body.basic_blocks.len()); + let mut visited = DenseBitSet::new_empty(callee_body.basic_blocks.len()); while let Some(bb) = work_list.pop() { if !visited.insert(bb.index()) { continue; @@ -513,20 +379,20 @@ impl<'tcx> Inliner<'tcx> { checker.visit_basic_block_data(bb, blk); let term = blk.terminator(); + let caller_attrs = tcx.codegen_fn_attrs(self.caller_def_id()); if let TerminatorKind::Drop { ref place, target, unwind, replace: _ } = term.kind { work_list.push(target); // If the place doesn't actually need dropping, treat it like a regular goto. - let ty = callsite.callee.instantiate_mir( - self.tcx, - ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty), - ); - if ty.needs_drop(tcx, self.typing_env) + let ty = callsite + .callee + .instantiate_mir(tcx, ty::EarlyBinder::bind(&place.ty(callee_body, tcx).ty)); + if ty.needs_drop(tcx, self.typing_env()) && let UnwindAction::Cleanup(unwind) = unwind { work_list.push(unwind); } - } else if callee_attrs.instruction_set != self.codegen_fn_attrs.instruction_set + } else if callee_attrs.instruction_set != caller_attrs.instruction_set && matches!(term.kind, TerminatorKind::InlineAsm { .. }) { // During the attribute checking stage we allow a callee with no @@ -534,7 +400,7 @@ impl<'tcx> Inliner<'tcx> { // assign one. However, during this stage we require an exact match when any // inline-asm is detected. LLVM will still possibly do an inline later on // if the no-attribute function ends up with the same instruction set anyway. - return Err("Cannot move inline-asm across instruction sets"); + return Err("cannot move inline-asm across instruction sets"); } else if let TerminatorKind::TailCall { .. } = term.kind { // FIXME(explicit_tail_calls): figure out how exactly functions containing tail // calls can be inlined (and if they even should) @@ -558,321 +424,688 @@ impl<'tcx> Inliner<'tcx> { } } - fn inline_call( - &self, - caller_body: &mut Body<'tcx>, + fn inline_limit_for_block(&self) -> Option<usize> { + match self.history.len() { + 0 => Some(usize::MAX), + 1..=TOP_DOWN_DEPTH_LIMIT => Some(1), + _ => None, + } + } + + fn on_inline_success( + &mut self, callsite: &CallSite<'tcx>, - mut callee_body: Body<'tcx>, + caller_body: &mut Body<'tcx>, + new_blocks: std::ops::Range<BasicBlock>, ) { - let terminator = caller_body[callsite.block].terminator.take().unwrap(); - let TerminatorKind::Call { func, args, destination, unwind, target, .. } = terminator.kind - else { - bug!("unexpected terminator kind {:?}", terminator.kind); - }; + self.changed = true; - let return_block = if let Some(block) = target { - // Prepare a new block for code that should execute when call returns. We don't use - // target block directly since it might have other predecessors. - let data = BasicBlockData::new( - Some(Terminator { - source_info: terminator.source_info, - kind: TerminatorKind::Goto { target: block }, - }), - caller_body[block].is_cleanup, - ); - Some(caller_body.basic_blocks_mut().push(data)) - } else { - None + self.history.push(callsite.callee.def_id()); + process_blocks(self, caller_body, new_blocks); + self.history.pop(); + } + + fn on_inline_limit_reached(&self) -> bool { + true + } + + fn on_inline_failure(&self, _: &CallSite<'tcx>, _: &'static str) {} +} + +fn inline<'tcx, T: Inliner<'tcx>>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) -> bool { + let def_id = body.source.def_id(); + + // Only do inlining into fn bodies. + if !tcx.hir().body_owner_kind(def_id).is_fn_or_closure() { + return false; + } + + let mut inliner = T::new(tcx, def_id, body); + if !inliner.check_caller_mir_body(body) { + return false; + } + + let blocks = START_BLOCK..body.basic_blocks.next_index(); + process_blocks(&mut inliner, body, blocks); + inliner.changed() +} + +fn process_blocks<'tcx, I: Inliner<'tcx>>( + inliner: &mut I, + caller_body: &mut Body<'tcx>, + blocks: Range<BasicBlock>, +) { + let Some(inline_limit) = inliner.inline_limit_for_block() else { return }; + let mut inlined_count = 0; + for bb in blocks { + let bb_data = &caller_body[bb]; + if bb_data.is_cleanup { + continue; + } + + let Some(callsite) = resolve_callsite(inliner, caller_body, bb, bb_data) else { + continue; }; - // If the call is something like `a[*i] = f(i)`, where - // `i : &mut usize`, then just duplicating the `a[*i]` - // Place could result in two different locations if `f` - // writes to `i`. To prevent this we need to create a temporary - // borrow of the place and pass the destination as `*temp` instead. - fn dest_needs_borrow(place: Place<'_>) -> bool { - for elem in place.projection.iter() { - match elem { - ProjectionElem::Deref | ProjectionElem::Index(_) => return true, - _ => {} + let span = trace_span!("process_blocks", %callsite.callee, ?bb); + let _guard = span.enter(); + + match try_inlining(inliner, caller_body, &callsite) { + Err(reason) => { + debug!("not-inlined {} [{}]", callsite.callee, reason); + inliner.on_inline_failure(&callsite, reason); + } + Ok(new_blocks) => { + debug!("inlined {}", callsite.callee); + inliner.on_inline_success(&callsite, caller_body, new_blocks); + + inlined_count += 1; + if inlined_count == inline_limit { + if inliner.on_inline_limit_reached() { + return; + } } } + } + } +} + +fn resolve_callsite<'tcx, I: Inliner<'tcx>>( + inliner: &I, + caller_body: &Body<'tcx>, + bb: BasicBlock, + bb_data: &BasicBlockData<'tcx>, +) -> Option<CallSite<'tcx>> { + let tcx = inliner.tcx(); + // Only consider direct calls to functions + let terminator = bb_data.terminator(); + + // FIXME(explicit_tail_calls): figure out if we can inline tail calls + if let TerminatorKind::Call { ref func, fn_span, .. } = terminator.kind { + let func_ty = func.ty(caller_body, tcx); + if let ty::FnDef(def_id, args) = *func_ty.kind() { + if !inliner.should_inline_for_callee(def_id) { + debug!("not enabled"); + return None; + } + + // To resolve an instance its args have to be fully normalized. + let args = tcx.try_normalize_erasing_regions(inliner.typing_env(), args).ok()?; + let callee = + Instance::try_resolve(tcx, inliner.typing_env(), def_id, args).ok().flatten()?; + + if let InstanceKind::Virtual(..) | InstanceKind::Intrinsic(_) = callee.def { + return None; + } + + if inliner.history().contains(&callee.def_id()) { + return None; + } - false + let fn_sig = tcx.fn_sig(def_id).instantiate(tcx, args); + + // Additionally, check that the body that we're inlining actually agrees + // with the ABI of the trait that the item comes from. + if let InstanceKind::Item(instance_def_id) = callee.def + && tcx.def_kind(instance_def_id) == DefKind::AssocFn + && let instance_fn_sig = tcx.fn_sig(instance_def_id).skip_binder() + && instance_fn_sig.abi() != fn_sig.abi() + { + return None; + } + + let source_info = SourceInfo { span: fn_span, ..terminator.source_info }; + + return Some(CallSite { callee, fn_sig, block: bb, source_info }); } + } - let dest = if dest_needs_borrow(destination) { - trace!("creating temp for return destination"); - let dest = Rvalue::Ref( - self.tcx.lifetimes.re_erased, - BorrowKind::Mut { kind: MutBorrowKind::Default }, - destination, - ); - let dest_ty = dest.ty(caller_body, self.tcx); - let temp = - Place::from(self.new_call_temp(caller_body, callsite, dest_ty, return_block)); - caller_body[callsite.block].statements.push(Statement { - source_info: callsite.source_info, - kind: StatementKind::Assign(Box::new((temp, dest))), - }); - self.tcx.mk_place_deref(temp) - } else { - destination - }; + None +} - // Always create a local to hold the destination, as `RETURN_PLACE` may appear - // where a full `Place` is not allowed. - let (remap_destination, destination_local) = if let Some(d) = dest.as_local() { - (false, d) - } else { - ( - true, - self.new_call_temp( - caller_body, - callsite, - destination.ty(caller_body, self.tcx).ty, - return_block, - ), - ) - }; +/// Attempts to inline a callsite into the caller body. When successful returns basic blocks +/// containing the inlined body. Otherwise returns an error describing why inlining didn't take +/// place. +fn try_inlining<'tcx, I: Inliner<'tcx>>( + inliner: &I, + caller_body: &mut Body<'tcx>, + callsite: &CallSite<'tcx>, +) -> Result<std::ops::Range<BasicBlock>, &'static str> { + let tcx = inliner.tcx(); + check_mir_is_available(inliner, caller_body, callsite.callee)?; + + let callee_attrs = tcx.codegen_fn_attrs(callsite.callee.def_id()); + check_inline::is_inline_valid_on_fn(tcx, callsite.callee.def_id())?; + check_codegen_attributes(inliner, callsite, callee_attrs)?; + + let terminator = caller_body[callsite.block].terminator.as_ref().unwrap(); + let TerminatorKind::Call { args, destination, .. } = &terminator.kind else { bug!() }; + let destination_ty = destination.ty(&caller_body.local_decls, tcx).ty; + for arg in args { + if !arg.node.ty(&caller_body.local_decls, tcx).is_sized(tcx, inliner.typing_env()) { + // We do not allow inlining functions with unsized params. Inlining these functions + // could create unsized locals, which are unsound and being phased out. + return Err("call has unsized argument"); + } + } - // Copy the arguments if needed. - let args = self.make_call_args(args, callsite, caller_body, &callee_body, return_block); + let callee_body = try_instance_mir(tcx, callsite.callee.def)?; + check_inline::is_inline_valid_on_body(tcx, callee_body)?; + inliner.check_callee_mir_body(callsite, callee_body, callee_attrs)?; - let mut integrator = Integrator { - args: &args, - new_locals: Local::new(caller_body.local_decls.len()).., - new_scopes: SourceScope::new(caller_body.source_scopes.len()).., - new_blocks: BasicBlock::new(caller_body.basic_blocks.len()).., - destination: destination_local, - callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(), - callsite, - cleanup_block: unwind, - in_cleanup_block: false, - return_block, - tcx: self.tcx, - always_live_locals: BitSet::new_filled(callee_body.local_decls.len()), + let Ok(callee_body) = callsite.callee.try_instantiate_mir_and_normalize_erasing_regions( + tcx, + inliner.typing_env(), + ty::EarlyBinder::bind(callee_body.clone()), + ) else { + debug!("failed to normalize callee body"); + return Err("implementation limitation"); + }; + + // Normally, this shouldn't be required, but trait normalization failure can create a + // validation ICE. + if !validate_types(tcx, inliner.typing_env(), &callee_body, &caller_body).is_empty() { + debug!("failed to validate callee body"); + return Err("implementation limitation"); + } + + // Check call signature compatibility. + // Normally, this shouldn't be required, but trait normalization failure can create a + // validation ICE. + let output_type = callee_body.return_ty(); + if !util::sub_types(tcx, inliner.typing_env(), output_type, destination_ty) { + trace!(?output_type, ?destination_ty); + debug!("failed to normalize return type"); + return Err("implementation limitation"); + } + if callsite.fn_sig.abi() == ExternAbi::RustCall { + // FIXME: Don't inline user-written `extern "rust-call"` functions, + // since this is generally perf-negative on rustc, and we hope that + // LLVM will inline these functions instead. + if callee_body.spread_arg.is_some() { + return Err("user-written rust-call functions"); + } + + let (self_arg, arg_tuple) = match &args[..] { + [arg_tuple] => (None, arg_tuple), + [self_arg, arg_tuple] => (Some(self_arg), arg_tuple), + _ => bug!("Expected `rust-call` to have 1 or 2 args"), }; - // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones - // (or existing ones, in a few special cases) in the caller. - integrator.visit_body(&mut callee_body); + let self_arg_ty = self_arg.map(|self_arg| self_arg.node.ty(&caller_body.local_decls, tcx)); - // If there are any locals without storage markers, give them storage only for the - // duration of the call. - for local in callee_body.vars_and_temps_iter() { - if integrator.always_live_locals.contains(local) { - let new_local = integrator.map_local(local); - caller_body[callsite.block].statements.push(Statement { - source_info: callsite.source_info, - kind: StatementKind::StorageLive(new_local), - }); + let arg_tuple_ty = arg_tuple.node.ty(&caller_body.local_decls, tcx); + let ty::Tuple(arg_tuple_tys) = *arg_tuple_ty.kind() else { + bug!("Closure arguments are not passed as a tuple"); + }; + + for (arg_ty, input) in + self_arg_ty.into_iter().chain(arg_tuple_tys).zip(callee_body.args_iter()) + { + let input_type = callee_body.local_decls[input].ty; + if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) { + trace!(?arg_ty, ?input_type); + debug!("failed to normalize tuple argument type"); + return Err("implementation limitation"); } } - if let Some(block) = return_block { - // To avoid repeated O(n) insert, push any new statements to the end and rotate - // the slice once. - let mut n = 0; - if remap_destination { - caller_body[block].statements.push(Statement { - source_info: callsite.source_info, - kind: StatementKind::Assign(Box::new(( - dest, - Rvalue::Use(Operand::Move(destination_local.into())), - ))), - }); - n += 1; + } else { + for (arg, input) in args.iter().zip(callee_body.args_iter()) { + let input_type = callee_body.local_decls[input].ty; + let arg_ty = arg.node.ty(&caller_body.local_decls, tcx); + if !util::sub_types(tcx, inliner.typing_env(), input_type, arg_ty) { + trace!(?arg_ty, ?input_type); + debug!("failed to normalize argument type"); + return Err("implementation limitation"); } - for local in callee_body.vars_and_temps_iter().rev() { - if integrator.always_live_locals.contains(local) { - let new_local = integrator.map_local(local); - caller_body[block].statements.push(Statement { - source_info: callsite.source_info, - kind: StatementKind::StorageDead(new_local), - }); - n += 1; - } + } + } + + let old_blocks = caller_body.basic_blocks.next_index(); + inline_call(inliner, caller_body, callsite, callee_body); + let new_blocks = old_blocks..caller_body.basic_blocks.next_index(); + + Ok(new_blocks) +} + +fn check_mir_is_available<'tcx, I: Inliner<'tcx>>( + inliner: &I, + caller_body: &Body<'tcx>, + callee: Instance<'tcx>, +) -> Result<(), &'static str> { + let caller_def_id = caller_body.source.def_id(); + let callee_def_id = callee.def_id(); + if callee_def_id == caller_def_id { + return Err("self-recursion"); + } + + match callee.def { + InstanceKind::Item(_) => { + // If there is no MIR available (either because it was not in metadata or + // because it has no MIR because it's an extern function), then the inliner + // won't cause cycles on this. + if !inliner.tcx().is_mir_available(callee_def_id) { + debug!("item MIR unavailable"); + return Err("implementation limitation"); } - caller_body[block].statements.rotate_right(n); + } + // These have no own callable MIR. + InstanceKind::Intrinsic(_) | InstanceKind::Virtual(..) => { + debug!("instance without MIR (intrinsic / virtual)"); + return Err("implementation limitation"); } - // Insert all of the (mapped) parts of the callee body into the caller. - caller_body.local_decls.extend(callee_body.drain_vars_and_temps()); - caller_body.source_scopes.append(&mut callee_body.source_scopes); - if self - .tcx - .sess - .opts - .unstable_opts - .inline_mir_preserve_debug - .unwrap_or(self.tcx.sess.opts.debuginfo != DebugInfo::None) - { - // Note that we need to preserve these in the standard library so that - // people working on rust can build with or without debuginfo while - // still getting consistent results from the mir-opt tests. - caller_body.var_debug_info.append(&mut callee_body.var_debug_info); + // FIXME(#127030): `ConstParamHasTy` has bad interactions with + // the drop shim builder, which does not evaluate predicates in + // the correct param-env for types being dropped. Stall resolving + // the MIR for this instance until all of its const params are + // substituted. + InstanceKind::DropGlue(_, Some(ty)) if ty.has_type_flags(TypeFlags::HAS_CT_PARAM) => { + debug!("still needs substitution"); + return Err("implementation limitation"); } - caller_body.basic_blocks_mut().append(callee_body.basic_blocks_mut()); - caller_body[callsite.block].terminator = Some(Terminator { - source_info: callsite.source_info, - kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) }, - }); + // This cannot result in an immediate cycle since the callee MIR is a shim, which does + // not get any optimizations run on it. Any subsequent inlining may cause cycles, but we + // do not need to catch this here, we can wait until the inliner decides to continue + // inlining a second time. + InstanceKind::VTableShim(_) + | InstanceKind::ReifyShim(..) + | InstanceKind::FnPtrShim(..) + | InstanceKind::ClosureOnceShim { .. } + | InstanceKind::ConstructCoroutineInClosureShim { .. } + | InstanceKind::DropGlue(..) + | InstanceKind::CloneShim(..) + | InstanceKind::ThreadLocalShim(..) + | InstanceKind::FnPtrAddrShim(..) + | InstanceKind::AsyncDropGlueCtorShim(..) => return Ok(()), + } - // Copy required constants from the callee_body into the caller_body. Although we are only - // pushing unevaluated consts to `required_consts`, here they may have been evaluated - // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again. - caller_body.required_consts.as_mut().unwrap().extend( - callee_body.required_consts().into_iter().filter(|ct| ct.const_.is_required_const()), - ); - // Now that we incorporated the callee's `required_consts`, we can remove the callee from - // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does - // some extra work here to save the monomorphization collector work later. It helps a lot, - // since monomorphization can avoid a lot of work when the "mentioned items" are similar to - // the actually used items. By doing this we can entirely avoid visiting the callee! - // We need to reconstruct the `required_item` for the callee so that we can find and - // remove it. - let callee_item = MentionedItem::Fn(func.ty(caller_body, self.tcx)); - let caller_mentioned_items = caller_body.mentioned_items.as_mut().unwrap(); - if let Some(idx) = caller_mentioned_items.iter().position(|item| item.node == callee_item) { - // We found the callee, so remove it and add its items instead. - caller_mentioned_items.remove(idx); - caller_mentioned_items.extend(callee_body.mentioned_items()); - } else { - // If we can't find the callee, there's no point in adding its items. Probably it - // already got removed by being inlined elsewhere in the same function, so we already - // took its items. + if inliner.tcx().is_constructor(callee_def_id) { + trace!("constructors always have MIR"); + // Constructor functions cannot cause a query cycle. + return Ok(()); + } + + if callee_def_id.is_local() + && !inliner + .tcx() + .is_lang_item(inliner.tcx().parent(caller_def_id), rustc_hir::LangItem::FnOnce) + { + // If we know for sure that the function we're calling will itself try to + // call us, then we avoid inlining that function. + if inliner.tcx().mir_callgraph_reachable((callee, caller_def_id.expect_local())) { + debug!("query cycle avoidance"); + return Err("caller might be reachable from callee"); } + + Ok(()) + } else { + // This cannot result in an immediate cycle since the callee MIR is from another crate + // and is already optimized. Any subsequent inlining may cause cycles, but we do + // not need to catch this here, we can wait until the inliner decides to continue + // inlining a second time. + trace!("functions from other crates always have MIR"); + Ok(()) } +} - fn make_call_args( - &self, - args: Box<[Spanned<Operand<'tcx>>]>, - callsite: &CallSite<'tcx>, - caller_body: &mut Body<'tcx>, - callee_body: &Body<'tcx>, - return_block: Option<BasicBlock>, - ) -> Box<[Local]> { - let tcx = self.tcx; - - // There is a bit of a mismatch between the *caller* of a closure and the *callee*. - // The caller provides the arguments wrapped up in a tuple: - // - // tuple_tmp = (a, b, c) - // Fn::call(closure_ref, tuple_tmp) - // - // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`) - // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has - // the job of unpacking this tuple. But here, we are codegen. =) So we want to create - // a vector like - // - // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2] - // - // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient - // if we "spill" that into *another* temporary, so that we can map the argument - // variable in the callee MIR directly to an argument variable on our side. - // So we introduce temporaries like: - // - // tmp0 = tuple_tmp.0 - // tmp1 = tuple_tmp.1 - // tmp2 = tuple_tmp.2 - // - // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. - if callsite.fn_sig.abi() == ExternAbi::RustCall && callee_body.spread_arg.is_none() { - // FIXME(edition_2024): switch back to a normal method call. - let mut args = <_>::into_iter(args); - let self_ = self.create_temp_if_necessary( - args.next().unwrap().node, - callsite, - caller_body, - return_block, - ); - let tuple = self.create_temp_if_necessary( - args.next().unwrap().node, - callsite, - caller_body, - return_block, - ); - assert!(args.next().is_none()); - - let tuple = Place::from(tuple); - let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else { - bug!("Closure arguments are not passed as a tuple"); - }; +/// Returns an error if inlining is not possible based on codegen attributes alone. A success +/// indicates that inlining decision should be based on other criteria. +fn check_codegen_attributes<'tcx, I: Inliner<'tcx>>( + inliner: &I, + callsite: &CallSite<'tcx>, + callee_attrs: &CodegenFnAttrs, +) -> Result<(), &'static str> { + let tcx = inliner.tcx(); + if let InlineAttr::Never = callee_attrs.inline { + return Err("never inline attribute"); + } - // The `closure_ref` in our example above. - let closure_ref_arg = iter::once(self_); + // Reachability pass defines which functions are eligible for inlining. Generally inlining + // other functions is incorrect because they could reference symbols that aren't exported. + let is_generic = callsite.callee.args.non_erasable_generics().next().is_some(); + if !is_generic && !tcx.cross_crate_inlinable(callsite.callee.def_id()) { + return Err("not exported"); + } - // The `tmp0`, `tmp1`, and `tmp2` in our example above. - let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| { - // This is e.g., `tuple_tmp.0` in our example above. - let tuple_field = Operand::Move(tcx.mk_place_field(tuple, FieldIdx::new(i), ty)); + let codegen_fn_attrs = tcx.codegen_fn_attrs(inliner.caller_def_id()); + if callee_attrs.no_sanitize != codegen_fn_attrs.no_sanitize { + return Err("incompatible sanitizer set"); + } - // Spill to a local to make e.g., `tmp0`. - self.create_temp_if_necessary(tuple_field, callsite, caller_body, return_block) - }); + // Two functions are compatible if the callee has no attribute (meaning + // that it's codegen agnostic), or sets an attribute that is identical + // to this function's attribute. + if callee_attrs.instruction_set.is_some() + && callee_attrs.instruction_set != codegen_fn_attrs.instruction_set + { + return Err("incompatible instruction set"); + } - closure_ref_arg.chain(tuple_tmp_args).collect() - } else { - // FIXME(edition_2024): switch back to a normal method call. - <_>::into_iter(args) - .map(|a| self.create_temp_if_necessary(a.node, callsite, caller_body, return_block)) - .collect() - } + let callee_feature_names = callee_attrs.target_features.iter().map(|f| f.name); + let this_feature_names = codegen_fn_attrs.target_features.iter().map(|f| f.name); + if callee_feature_names.ne(this_feature_names) { + // In general it is not correct to inline a callee with target features that are a + // subset of the caller. This is because the callee might contain calls, and the ABI of + // those calls depends on the target features of the surrounding function. By moving a + // `Call` terminator from one MIR body to another with more target features, we might + // change the ABI of that call! + return Err("incompatible target features"); } - /// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh - /// temporary `T` and an instruction `T = arg`, and returns `T`. - fn create_temp_if_necessary( - &self, - arg: Operand<'tcx>, - callsite: &CallSite<'tcx>, - caller_body: &mut Body<'tcx>, - return_block: Option<BasicBlock>, - ) -> Local { - // Reuse the operand if it is a moved temporary. - if let Operand::Move(place) = &arg - && let Some(local) = place.as_local() - && caller_body.local_kind(local) == LocalKind::Temp - { - return local; + Ok(()) +} + +fn inline_call<'tcx, I: Inliner<'tcx>>( + inliner: &I, + caller_body: &mut Body<'tcx>, + callsite: &CallSite<'tcx>, + mut callee_body: Body<'tcx>, +) { + let tcx = inliner.tcx(); + let terminator = caller_body[callsite.block].terminator.take().unwrap(); + let TerminatorKind::Call { func, args, destination, unwind, target, .. } = terminator.kind + else { + bug!("unexpected terminator kind {:?}", terminator.kind); + }; + + let return_block = if let Some(block) = target { + // Prepare a new block for code that should execute when call returns. We don't use + // target block directly since it might have other predecessors. + let data = BasicBlockData::new( + Some(Terminator { + source_info: terminator.source_info, + kind: TerminatorKind::Goto { target: block }, + }), + caller_body[block].is_cleanup, + ); + Some(caller_body.basic_blocks_mut().push(data)) + } else { + None + }; + + // If the call is something like `a[*i] = f(i)`, where + // `i : &mut usize`, then just duplicating the `a[*i]` + // Place could result in two different locations if `f` + // writes to `i`. To prevent this we need to create a temporary + // borrow of the place and pass the destination as `*temp` instead. + fn dest_needs_borrow(place: Place<'_>) -> bool { + for elem in place.projection.iter() { + match elem { + ProjectionElem::Deref | ProjectionElem::Index(_) => return true, + _ => {} + } } - // Otherwise, create a temporary for the argument. - trace!("creating temp for argument {:?}", arg); - let arg_ty = arg.ty(caller_body, self.tcx); - let local = self.new_call_temp(caller_body, callsite, arg_ty, return_block); - caller_body[callsite.block].statements.push(Statement { - source_info: callsite.source_info, - kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))), - }); - local + false } - /// Introduces a new temporary into the caller body that is live for the duration of the call. - fn new_call_temp( - &self, - caller_body: &mut Body<'tcx>, - callsite: &CallSite<'tcx>, - ty: Ty<'tcx>, - return_block: Option<BasicBlock>, - ) -> Local { - let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span)); - + let dest = if dest_needs_borrow(destination) { + trace!("creating temp for return destination"); + let dest = Rvalue::Ref( + tcx.lifetimes.re_erased, + BorrowKind::Mut { kind: MutBorrowKind::Default }, + destination, + ); + let dest_ty = dest.ty(caller_body, tcx); + let temp = Place::from(new_call_temp(caller_body, callsite, dest_ty, return_block)); caller_body[callsite.block].statements.push(Statement { source_info: callsite.source_info, - kind: StatementKind::StorageLive(local), + kind: StatementKind::Assign(Box::new((temp, dest))), }); + tcx.mk_place_deref(temp) + } else { + destination + }; + + // Always create a local to hold the destination, as `RETURN_PLACE` may appear + // where a full `Place` is not allowed. + let (remap_destination, destination_local) = if let Some(d) = dest.as_local() { + (false, d) + } else { + ( + true, + new_call_temp(caller_body, callsite, destination.ty(caller_body, tcx).ty, return_block), + ) + }; - if let Some(block) = return_block { - caller_body[block].statements.insert(0, Statement { + // Copy the arguments if needed. + let args = make_call_args(inliner, args, callsite, caller_body, &callee_body, return_block); + + let mut integrator = Integrator { + args: &args, + new_locals: Local::new(caller_body.local_decls.len()).., + new_scopes: SourceScope::new(caller_body.source_scopes.len()).., + new_blocks: BasicBlock::new(caller_body.basic_blocks.len()).., + destination: destination_local, + callsite_scope: caller_body.source_scopes[callsite.source_info.scope].clone(), + callsite, + cleanup_block: unwind, + in_cleanup_block: false, + return_block, + tcx, + always_live_locals: DenseBitSet::new_filled(callee_body.local_decls.len()), + }; + + // Map all `Local`s, `SourceScope`s and `BasicBlock`s to new ones + // (or existing ones, in a few special cases) in the caller. + integrator.visit_body(&mut callee_body); + + // If there are any locals without storage markers, give them storage only for the + // duration of the call. + for local in callee_body.vars_and_temps_iter() { + if integrator.always_live_locals.contains(local) { + let new_local = integrator.map_local(local); + caller_body[callsite.block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageLive(new_local), + }); + } + } + if let Some(block) = return_block { + // To avoid repeated O(n) insert, push any new statements to the end and rotate + // the slice once. + let mut n = 0; + if remap_destination { + caller_body[block].statements.push(Statement { source_info: callsite.source_info, - kind: StatementKind::StorageDead(local), + kind: StatementKind::Assign(Box::new(( + dest, + Rvalue::Use(Operand::Move(destination_local.into())), + ))), }); + n += 1; + } + for local in callee_body.vars_and_temps_iter().rev() { + if integrator.always_live_locals.contains(local) { + let new_local = integrator.map_local(local); + caller_body[block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageDead(new_local), + }); + n += 1; + } } + caller_body[block].statements.rotate_right(n); + } - local + // Insert all of the (mapped) parts of the callee body into the caller. + caller_body.local_decls.extend(callee_body.drain_vars_and_temps()); + caller_body.source_scopes.append(&mut callee_body.source_scopes); + if tcx + .sess + .opts + .unstable_opts + .inline_mir_preserve_debug + .unwrap_or(tcx.sess.opts.debuginfo != DebugInfo::None) + { + // Note that we need to preserve these in the standard library so that + // people working on rust can build with or without debuginfo while + // still getting consistent results from the mir-opt tests. + caller_body.var_debug_info.append(&mut callee_body.var_debug_info); } + caller_body.basic_blocks_mut().append(callee_body.basic_blocks_mut()); + + caller_body[callsite.block].terminator = Some(Terminator { + source_info: callsite.source_info, + kind: TerminatorKind::Goto { target: integrator.map_block(START_BLOCK) }, + }); + + // Copy required constants from the callee_body into the caller_body. Although we are only + // pushing unevaluated consts to `required_consts`, here they may have been evaluated + // because we are calling `instantiate_and_normalize_erasing_regions` -- so we filter again. + caller_body.required_consts.as_mut().unwrap().extend( + callee_body.required_consts().into_iter().filter(|ct| ct.const_.is_required_const()), + ); + // Now that we incorporated the callee's `required_consts`, we can remove the callee from + // `mentioned_items` -- but we have to take their `mentioned_items` in return. This does + // some extra work here to save the monomorphization collector work later. It helps a lot, + // since monomorphization can avoid a lot of work when the "mentioned items" are similar to + // the actually used items. By doing this we can entirely avoid visiting the callee! + // We need to reconstruct the `required_item` for the callee so that we can find and + // remove it. + let callee_item = MentionedItem::Fn(func.ty(caller_body, tcx)); + let caller_mentioned_items = caller_body.mentioned_items.as_mut().unwrap(); + if let Some(idx) = caller_mentioned_items.iter().position(|item| item.node == callee_item) { + // We found the callee, so remove it and add its items instead. + caller_mentioned_items.remove(idx); + caller_mentioned_items.extend(callee_body.mentioned_items()); + } else { + // If we can't find the callee, there's no point in adding its items. Probably it + // already got removed by being inlined elsewhere in the same function, so we already + // took its items. + } +} + +fn make_call_args<'tcx, I: Inliner<'tcx>>( + inliner: &I, + args: Box<[Spanned<Operand<'tcx>>]>, + callsite: &CallSite<'tcx>, + caller_body: &mut Body<'tcx>, + callee_body: &Body<'tcx>, + return_block: Option<BasicBlock>, +) -> Box<[Local]> { + let tcx = inliner.tcx(); + + // There is a bit of a mismatch between the *caller* of a closure and the *callee*. + // The caller provides the arguments wrapped up in a tuple: + // + // tuple_tmp = (a, b, c) + // Fn::call(closure_ref, tuple_tmp) + // + // meanwhile the closure body expects the arguments (here, `a`, `b`, and `c`) + // as distinct arguments. (This is the "rust-call" ABI hack.) Normally, codegen has + // the job of unpacking this tuple. But here, we are codegen. =) So we want to create + // a vector like + // + // [closure_ref, tuple_tmp.0, tuple_tmp.1, tuple_tmp.2] + // + // Except for one tiny wrinkle: we don't actually want `tuple_tmp.0`. It's more convenient + // if we "spill" that into *another* temporary, so that we can map the argument + // variable in the callee MIR directly to an argument variable on our side. + // So we introduce temporaries like: + // + // tmp0 = tuple_tmp.0 + // tmp1 = tuple_tmp.1 + // tmp2 = tuple_tmp.2 + // + // and the vector is `[closure_ref, tmp0, tmp1, tmp2]`. + if callsite.fn_sig.abi() == ExternAbi::RustCall && callee_body.spread_arg.is_none() { + // FIXME(edition_2024): switch back to a normal method call. + let mut args = <_>::into_iter(args); + let self_ = create_temp_if_necessary( + inliner, + args.next().unwrap().node, + callsite, + caller_body, + return_block, + ); + let tuple = create_temp_if_necessary( + inliner, + args.next().unwrap().node, + callsite, + caller_body, + return_block, + ); + assert!(args.next().is_none()); + + let tuple = Place::from(tuple); + let ty::Tuple(tuple_tys) = tuple.ty(caller_body, tcx).ty.kind() else { + bug!("Closure arguments are not passed as a tuple"); + }; + + // The `closure_ref` in our example above. + let closure_ref_arg = iter::once(self_); + + // The `tmp0`, `tmp1`, and `tmp2` in our example above. + let tuple_tmp_args = tuple_tys.iter().enumerate().map(|(i, ty)| { + // This is e.g., `tuple_tmp.0` in our example above. + let tuple_field = Operand::Move(tcx.mk_place_field(tuple, FieldIdx::new(i), ty)); + + // Spill to a local to make e.g., `tmp0`. + create_temp_if_necessary(inliner, tuple_field, callsite, caller_body, return_block) + }); + + closure_ref_arg.chain(tuple_tmp_args).collect() + } else { + // FIXME(edition_2024): switch back to a normal method call. + <_>::into_iter(args) + .map(|a| create_temp_if_necessary(inliner, a.node, callsite, caller_body, return_block)) + .collect() + } +} + +/// If `arg` is already a temporary, returns it. Otherwise, introduces a fresh temporary `T` and an +/// instruction `T = arg`, and returns `T`. +fn create_temp_if_necessary<'tcx, I: Inliner<'tcx>>( + inliner: &I, + arg: Operand<'tcx>, + callsite: &CallSite<'tcx>, + caller_body: &mut Body<'tcx>, + return_block: Option<BasicBlock>, +) -> Local { + // Reuse the operand if it is a moved temporary. + if let Operand::Move(place) = &arg + && let Some(local) = place.as_local() + && caller_body.local_kind(local) == LocalKind::Temp + { + return local; + } + + // Otherwise, create a temporary for the argument. + trace!("creating temp for argument {:?}", arg); + let arg_ty = arg.ty(caller_body, inliner.tcx()); + let local = new_call_temp(caller_body, callsite, arg_ty, return_block); + caller_body[callsite.block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::Assign(Box::new((Place::from(local), Rvalue::Use(arg)))), + }); + local +} + +/// Introduces a new temporary into the caller body that is live for the duration of the call. +fn new_call_temp<'tcx>( + caller_body: &mut Body<'tcx>, + callsite: &CallSite<'tcx>, + ty: Ty<'tcx>, + return_block: Option<BasicBlock>, +) -> Local { + let local = caller_body.local_decls.push(LocalDecl::new(ty, callsite.source_info.span)); + + caller_body[callsite.block].statements.push(Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageLive(local), + }); + + if let Some(block) = return_block { + caller_body[block].statements.insert(0, Statement { + source_info: callsite.source_info, + kind: StatementKind::StorageDead(local), + }); + } + + local } /** @@ -894,7 +1127,7 @@ struct Integrator<'a, 'tcx> { in_cleanup_block: bool, return_block: Option<BasicBlock>, tcx: TyCtxt<'tcx>, - always_live_locals: BitSet<Local>, + always_live_locals: DenseBitSet<Local>, } impl Integrator<'_, '_> { diff --git a/compiler/rustc_mir_transform/src/instsimplify.rs b/compiler/rustc_mir_transform/src/instsimplify.rs index 1a65affe812..5a36519e6a3 100644 --- a/compiler/rustc_mir_transform/src/instsimplify.rs +++ b/compiler/rustc_mir_transform/src/instsimplify.rs @@ -46,8 +46,11 @@ impl<'tcx> crate::MirPass<'tcx> for InstSimplify { } ctx.simplify_bool_cmp(rvalue); ctx.simplify_ref_deref(rvalue); + ctx.simplify_len(rvalue); ctx.simplify_ptr_aggregate(rvalue); ctx.simplify_cast(rvalue); + ctx.simplify_repeated_aggregate(rvalue); + ctx.simplify_repeat_once(rvalue); } _ => {} } @@ -68,6 +71,35 @@ struct InstSimplifyContext<'a, 'tcx> { } impl<'tcx> InstSimplifyContext<'_, 'tcx> { + /// Transform aggregates like [0, 0, 0, 0, 0] into [0; 5]. + /// GVN can also do this optimization, but GVN is only run at mir-opt-level 2 so having this in + /// InstSimplify helps unoptimized builds. + fn simplify_repeated_aggregate(&self, rvalue: &mut Rvalue<'tcx>) { + let Rvalue::Aggregate(box AggregateKind::Array(_), fields) = rvalue else { + return; + }; + if fields.len() < 5 { + return; + } + let first = &fields[rustc_abi::FieldIdx::ZERO]; + let Operand::Constant(first) = first else { + return; + }; + let Ok(first_val) = first.const_.eval(self.tcx, self.typing_env, first.span) else { + return; + }; + if fields.iter().all(|field| { + let Operand::Constant(field) = field else { + return false; + }; + let field = field.const_.eval(self.tcx, self.typing_env, field.span); + field == Ok(first_val) + }) { + let len = ty::Const::from_target_usize(self.tcx, fields.len().try_into().unwrap()); + *rvalue = Rvalue::Repeat(Operand::Constant(first.clone()), len); + } + } + /// Transform boolean comparisons into logical operations. fn simplify_bool_cmp(&self, rvalue: &mut Rvalue<'tcx>) { match rvalue { @@ -130,6 +162,18 @@ impl<'tcx> InstSimplifyContext<'_, 'tcx> { } } + /// Transform `Len([_; N])` ==> `N`. + fn simplify_len(&self, rvalue: &mut Rvalue<'tcx>) { + if let Rvalue::Len(ref place) = *rvalue { + let place_ty = place.ty(self.local_decls, self.tcx).ty; + if let ty::Array(_, len) = *place_ty.kind() { + let const_ = Const::from_ty_const(len, self.tcx.types.usize, self.tcx); + let constant = ConstOperand { span: DUMMY_SP, const_, user_ty: None }; + *rvalue = Rvalue::Use(Operand::Constant(Box::new(constant))); + } + } + } + /// Transform `Aggregate(RawPtr, [p, ()])` ==> `Cast(PtrToPtr, p)`. fn simplify_ptr_aggregate(&self, rvalue: &mut Rvalue<'tcx>) { if let Rvalue::Aggregate(box AggregateKind::RawPtr(pointee_ty, mutability), fields) = rvalue @@ -173,33 +217,22 @@ impl<'tcx> InstSimplifyContext<'_, 'tcx> { *kind = CastKind::IntToInt; return; } - - // Transmuting a transparent struct/union to a field's type is a projection - if let ty::Adt(adt_def, args) = operand_ty.kind() - && adt_def.repr().transparent() - && (adt_def.is_struct() || adt_def.is_union()) - && let Some(place) = operand.place() - { - let variant = adt_def.non_enum_variant(); - for (i, field) in variant.fields.iter_enumerated() { - let field_ty = field.ty(self.tcx, args); - if field_ty == *cast_ty { - let place = place - .project_deeper(&[ProjectionElem::Field(i, *cast_ty)], self.tcx); - let operand = if operand.is_move() { - Operand::Move(place) - } else { - Operand::Copy(place) - }; - *rvalue = Rvalue::Use(operand); - return; - } - } - } } } } + /// Simplify `[x; 1]` to just `[x]`. + fn simplify_repeat_once(&self, rvalue: &mut Rvalue<'tcx>) { + if let Rvalue::Repeat(operand, count) = rvalue + && let Some(1) = count.try_to_target_usize(self.tcx) + { + *rvalue = Rvalue::Aggregate( + Box::new(AggregateKind::Array(operand.ty(self.local_decls, self.tcx))), + [operand.clone()].into(), + ); + } + } + fn simplify_primitive_clone( &self, terminator: &mut Terminator<'tcx>, diff --git a/compiler/rustc_mir_transform/src/jump_threading.rs b/compiler/rustc_mir_transform/src/jump_threading.rs index 8feb90ff7a0..c73a03489c5 100644 --- a/compiler/rustc_mir_transform/src/jump_threading.rs +++ b/compiler/rustc_mir_transform/src/jump_threading.rs @@ -40,7 +40,7 @@ use rustc_const_eval::const_eval::DummyMachine; use rustc_const_eval::interpret::{ImmTy, Immediate, InterpCx, OpTy, Projectable}; use rustc_data_structures::fx::FxHashSet; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::mir::interpret::Scalar; use rustc_middle::mir::visit::Visitor; @@ -121,7 +121,7 @@ struct TOFinder<'a, 'tcx> { ecx: InterpCx<'tcx, DummyMachine>, body: &'a Body<'tcx>, map: Map<'tcx>, - loop_headers: BitSet<BasicBlock>, + loop_headers: DenseBitSet<BasicBlock>, /// We use an arena to avoid cloning the slices when cloning `state`. arena: &'a DroplessArena, opportunities: Vec<ThreadingOpportunity>, @@ -832,8 +832,8 @@ enum Update { /// at least a predecessor which it dominates. This definition is only correct for reducible CFGs. /// But if the CFG is already irreducible, there is no point in trying much harder. /// is already irreducible. -fn loop_headers(body: &Body<'_>) -> BitSet<BasicBlock> { - let mut loop_headers = BitSet::new_empty(body.basic_blocks.len()); +fn loop_headers(body: &Body<'_>) -> DenseBitSet<BasicBlock> { + let mut loop_headers = DenseBitSet::new_empty(body.basic_blocks.len()); let dominators = body.basic_blocks.dominators(); // Only visit reachable blocks. for (bb, bbdata) in traversal::preorder(body) { diff --git a/compiler/rustc_mir_transform/src/known_panics_lint.rs b/compiler/rustc_mir_transform/src/known_panics_lint.rs index f1705d0c831..f4ac5c6aa80 100644 --- a/compiler/rustc_mir_transform/src/known_panics_lint.rs +++ b/compiler/rustc_mir_transform/src/known_panics_lint.rs @@ -13,7 +13,7 @@ use rustc_data_structures::fx::FxHashSet; use rustc_hir::HirId; use rustc_hir::def::DefKind; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::mir::visit::{MutatingUseContext, NonMutatingUseContext, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -67,7 +67,7 @@ struct ConstPropagator<'mir, 'tcx> { tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, worklist: Vec<BasicBlock>, - visited_blocks: BitSet<BasicBlock>, + visited_blocks: DenseBitSet<BasicBlock>, locals: IndexVec<Local, Value<'tcx>>, body: &'mir Body<'tcx>, written_only_inside_own_block_locals: FxHashSet<Local>, @@ -190,7 +190,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { tcx, typing_env, worklist: vec![START_BLOCK], - visited_blocks: BitSet::new_empty(body.basic_blocks.len()), + visited_blocks: DenseBitSet::new_empty(body.basic_blocks.len()), locals: IndexVec::from_elem_n(Value::Uninit, body.local_decls.len()), body, can_const_prop, @@ -440,6 +440,7 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { | Rvalue::Use(..) | Rvalue::CopyForDeref(..) | Rvalue::Repeat(..) + | Rvalue::Len(..) | Rvalue::Cast(..) | Rvalue::ShallowInitBox(..) | Rvalue::Discriminant(..) @@ -599,6 +600,20 @@ impl<'mir, 'tcx> ConstPropagator<'mir, 'tcx> { return None; } + Len(place) => { + let len = if let ty::Array(_, n) = place.ty(self.local_decls(), self.tcx).ty.kind() + { + n.try_to_target_usize(self.tcx)? + } else { + match self.get_const(place)? { + Value::Immediate(src) => src.len(&self.ecx).discard_err()?, + Value::Aggregate { fields, .. } => fields.len() as u64, + Value::Uninit => return None, + } + }; + ImmTy::from_scalar(Scalar::from_target_usize(len, self), layout).into() + } + Ref(..) | RawPtr(..) => return None, NullaryOp(ref null_op, ty) => { @@ -852,7 +867,7 @@ enum ConstPropMode { struct CanConstProp { can_const_prop: IndexVec<Local, ConstPropMode>, // False at the beginning. Once set, no more assignments are allowed to that local. - found_assignment: BitSet<Local>, + found_assignment: DenseBitSet<Local>, } impl CanConstProp { @@ -864,7 +879,7 @@ impl CanConstProp { ) -> IndexVec<Local, ConstPropMode> { let mut cpv = CanConstProp { can_const_prop: IndexVec::from_elem(ConstPropMode::FullConstProp, &body.local_decls), - found_assignment: BitSet::new_empty(body.local_decls.len()), + found_assignment: DenseBitSet::new_empty(body.local_decls.len()), }; for (local, val) in cpv.can_const_prop.iter_enumerated_mut() { let ty = body.local_decls[local].ty; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index e1fba9be5bb..d1bacf1f598 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -1,4 +1,5 @@ // tidy-alphabetical-start +#![feature(array_windows)] #![feature(assert_matches)] #![feature(box_patterns)] #![feature(const_type_name)] @@ -34,8 +35,7 @@ use rustc_middle::util::Providers; use rustc_middle::{bug, query, span_bug}; use rustc_span::source_map::Spanned; use rustc_span::{DUMMY_SP, sym}; -use rustc_trait_selection::traits; -use tracing::{debug, trace}; +use tracing::debug; #[macro_use] mod pass_manager; @@ -114,6 +114,8 @@ declare_passes! { mod add_moves_for_packed_drops : AddMovesForPackedDrops; mod add_retag : AddRetag; mod add_subtyping_projections : Subtyper; + mod check_inline : CheckForceInline; + mod check_call_recursion : CheckCallRecursion, CheckDropRecursion; mod check_alignment : CheckAlignment; mod check_const_item_mutation : CheckConstItemMutation; mod check_packed_ref : CheckPackedRef; @@ -141,7 +143,8 @@ declare_passes! { mod gvn : GVN; // Made public so that `mir_drops_elaborated_and_const_checked` can be overridden // by custom rustc drivers, running all the steps by themselves. See #114628. - pub mod inline : Inline; + pub mod inline : Inline, ForceInline; + mod impossible_predicates : ImpossiblePredicates; mod instsimplify : InstSimplify { BeforeInline, AfterSimplifyCfg }; mod jump_threading : JumpThreading; mod known_panics_lint : KnownPanicsLint; @@ -374,6 +377,8 @@ fn mir_built(tcx: TyCtxt<'_>, def: LocalDefId) -> &Steal<Body<'_>> { &mut body, &[ // MIR-level lints. + &Lint(check_inline::CheckForceInline), + &Lint(check_call_recursion::CheckCallRecursion), &Lint(check_packed_ref::CheckPackedRef), &Lint(check_const_item_mutation::CheckConstItemMutation), &Lint(function_item_references::FunctionItemReferences), @@ -488,7 +493,9 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & let is_fn_like = tcx.def_kind(def).is_fn_like(); if is_fn_like { // Do not compute the mir call graph without said call graph actually being used. - if pm::should_run_pass(tcx, &inline::Inline) { + if pm::should_run_pass(tcx, &inline::Inline) + || inline::ForceInline::should_run_pass_for_callee(tcx, def.to_def_id()) + { tcx.ensure_with_value().mir_inliner_callees(ty::InstanceKind::Item(def.to_def_id())); } } @@ -500,56 +507,8 @@ fn mir_drops_elaborated_and_const_checked(tcx: TyCtxt<'_>, def: LocalDefId) -> & body.tainted_by_errors = Some(error_reported); } - // Check if it's even possible to satisfy the 'where' clauses - // for this item. - // - // This branch will never be taken for any normal function. - // However, it's possible to `#!feature(trivial_bounds)]` to write - // a function with impossible to satisfy clauses, e.g.: - // `fn foo() where String: Copy {}` - // - // We don't usually need to worry about this kind of case, - // since we would get a compilation error if the user tried - // to call it. However, since we optimize even without any - // calls to the function, we need to make sure that it even - // makes sense to try to evaluate the body. - // - // If there are unsatisfiable where clauses, then all bets are - // off, and we just give up. - // - // We manually filter the predicates, skipping anything that's not - // "global". We are in a potentially generic context - // (e.g. we are evaluating a function without instantiating generic - // parameters, so this filtering serves two purposes: - // - // 1. We skip evaluating any predicates that we would - // never be able prove are unsatisfiable (e.g. `<T as Foo>` - // 2. We avoid trying to normalize predicates involving generic - // parameters (e.g. `<T as Foo>::MyItem`). This can confuse - // the normalization code (leading to cycle errors), since - // it's usually never invoked in this way. - let predicates = tcx - .predicates_of(body.source.def_id()) - .predicates - .iter() - .filter_map(|(p, _)| if p.is_global() { Some(*p) } else { None }); - if traits::impossible_predicates(tcx, traits::elaborate(tcx, predicates).collect()) { - trace!("found unsatisfiable predicates for {:?}", body.source); - // Clear the body to only contain a single `unreachable` statement. - let bbs = body.basic_blocks.as_mut(); - bbs.raw.truncate(1); - bbs[START_BLOCK].statements.clear(); - bbs[START_BLOCK].terminator_mut().kind = TerminatorKind::Unreachable; - body.var_debug_info.clear(); - body.local_decls.raw.truncate(body.arg_count + 1); - } - run_analysis_to_runtime_passes(tcx, &mut body); - // Now that drop elaboration has been performed, we can check for - // unconditional drop recursion. - rustc_mir_build::lints::check_drop_recursion(tcx, &body); - tcx.alloc_steal_mir(body) } @@ -591,6 +550,7 @@ pub fn run_analysis_to_runtime_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<' /// After this series of passes, no lifetime analysis based on borrowing can be done. fn run_analysis_cleanup_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let passes: &[&dyn MirPass<'tcx>] = &[ + &impossible_predicates::ImpossiblePredicates, &cleanup_post_borrowck::CleanupPostBorrowck, &remove_noop_landing_pads::RemoveNoopLandingPads, &simplify::SimplifyCfg::PostAnalysis, @@ -610,6 +570,8 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Calling this after `PostAnalysisNormalize` ensures that we don't deal with opaque types. &add_subtyping_projections::Subtyper, &elaborate_drops::ElaborateDrops, + // Needs to happen after drop elaboration. + &Lint(check_call_recursion::CheckDropRecursion), // This will remove extraneous landing pads which are no longer // necessary as well as forcing any call in a non-unwinding // function calling a possibly-unwinding function to abort the process. @@ -664,6 +626,8 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // Perform instsimplify before inline to eliminate some trivial calls (like clone // shims). &instsimplify::InstSimplify::BeforeInline, + // Perform inlining of `#[rustc_force_inline]`-annotated callees. + &inline::ForceInline, // Perform inlining, which may add a lot of code. &inline::Inline, // Code from other crates may have storage markers, so this needs to happen after diff --git a/compiler/rustc_mir_transform/src/lint.rs b/compiler/rustc_mir_transform/src/lint.rs index 29e762af8de..f472c7cb493 100644 --- a/compiler/rustc_mir_transform/src/lint.rs +++ b/compiler/rustc_mir_transform/src/lint.rs @@ -5,7 +5,7 @@ use std::borrow::Cow; use rustc_data_structures::fx::FxHashSet; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::visit::{PlaceContext, Visitor}; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; @@ -43,7 +43,7 @@ struct Lint<'a, 'tcx> { when: String, body: &'a Body<'tcx>, is_fn_like: bool, - always_live_locals: &'a BitSet<Local>, + always_live_locals: &'a DenseBitSet<Local>, maybe_storage_live: ResultsCursor<'a, 'tcx, MaybeStorageLive<'a>>, maybe_storage_dead: ResultsCursor<'a, 'tcx, MaybeStorageDead<'a>>, places: FxHashSet<PlaceRef<'tcx>>, diff --git a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs index e5a183bc75c..50d10883d2c 100644 --- a/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs +++ b/compiler/rustc_mir_transform/src/lint_tail_expr_drop_order.rs @@ -351,6 +351,11 @@ pub(crate) fn run_lint<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &Body< { return; } + + // FIXME(typing_env): This should be able to reveal the opaques local to the + // body using the typeck results. + let typing_env = ty::TypingEnv::non_body_analysis(tcx, def_id); + // ## About BIDs in blocks ## // Track the set of blocks that contain a backwards-incompatible drop (BID) // and, for each block, the vector of locations. @@ -358,7 +363,7 @@ pub(crate) fn run_lint<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId, body: &Body< // We group them per-block because they tend to scheduled in the same drop ladder block. let mut bid_per_block = IndexMap::default(); let mut bid_places = UnordSet::new(); - let typing_env = ty::TypingEnv::post_analysis(tcx, def_id); + let mut ty_dropped_components = UnordMap::default(); for (block, data) in body.basic_blocks.iter_enumerated() { for (statement_index, stmt) in data.statements.iter().enumerate() { @@ -686,7 +691,7 @@ impl Subdiagnostic for LocalLabel<'_> { for dtor in self.destructors { dtor.add_to_diag_with(diag, f); } - let msg = f(diag, crate::fluent_generated::mir_transform_label_local_epilogue.into()); + let msg = f(diag, crate::fluent_generated::mir_transform_label_local_epilogue); diag.span_label(self.span, msg); } } diff --git a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs index a9227524ce5..6dfa14d6b52 100644 --- a/compiler/rustc_mir_transform/src/multiple_return_terminators.rs +++ b/compiler/rustc_mir_transform/src/multiple_return_terminators.rs @@ -1,7 +1,7 @@ //! This pass removes jumps to basic blocks containing only a return, and replaces them with a //! return instead. -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; @@ -16,7 +16,7 @@ impl<'tcx> crate::MirPass<'tcx> for MultipleReturnTerminators { fn run_pass(&self, _: TyCtxt<'tcx>, body: &mut Body<'tcx>) { // find basic blocks with no statement and a return terminator - let mut bbs_simple_returns = BitSet::new_empty(body.basic_blocks.len()); + let mut bbs_simple_returns = DenseBitSet::new_empty(body.basic_blocks.len()); let bbs = body.basic_blocks_mut(); for idx in bbs.indices() { if bbs[idx].statements.is_empty() diff --git a/compiler/rustc_mir_transform/src/nrvo.rs b/compiler/rustc_mir_transform/src/nrvo.rs index cd026ed6806..35872de3852 100644 --- a/compiler/rustc_mir_transform/src/nrvo.rs +++ b/compiler/rustc_mir_transform/src/nrvo.rs @@ -1,7 +1,7 @@ //! See the docs for [`RenameReturnPlace`]. use rustc_hir::Mutability; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::mir::visit::{MutVisitor, NonUseContext, PlaceContext, Visitor}; use rustc_middle::mir::{self, BasicBlock, Local, Location}; @@ -116,7 +116,7 @@ fn local_eligible_for_nrvo(body: &mir::Body<'_>) -> Option<Local> { fn find_local_assigned_to_return_place(start: BasicBlock, body: &mir::Body<'_>) -> Option<Local> { let mut block = start; - let mut seen = BitSet::new_empty(body.basic_blocks.len()); + let mut seen = DenseBitSet::new_empty(body.basic_blocks.len()); // Iterate as long as `block` has exactly one predecessor that we have not yet visited. while seen.insert(block) { diff --git a/compiler/rustc_mir_transform/src/pass_manager.rs b/compiler/rustc_mir_transform/src/pass_manager.rs index 8a45ce0762d..c3f0a989ce1 100644 --- a/compiler/rustc_mir_transform/src/pass_manager.rs +++ b/compiler/rustc_mir_transform/src/pass_manager.rs @@ -79,6 +79,12 @@ pub(super) trait MirPass<'tcx> { true } + /// Returns `true` if this pass can be overridden by `-Zenable-mir-passes`. This should be + /// true for basically every pass other than those that are necessary for correctness. + fn can_be_overridden(&self) -> bool { + true + } + fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>); fn is_mir_dump_enabled(&self) -> bool { @@ -176,6 +182,10 @@ where { let name = pass.name(); + if !pass.can_be_overridden() { + return pass.is_enabled(tcx.sess); + } + let overridden_passes = &tcx.sess.opts.unstable_opts.mir_enable_passes; let overridden = overridden_passes.iter().rev().find(|(s, _)| s == &*name).map(|(_name, polarity)| { diff --git a/compiler/rustc_mir_transform/src/prettify.rs b/compiler/rustc_mir_transform/src/prettify.rs index 937c207776b..51abd4da86e 100644 --- a/compiler/rustc_mir_transform/src/prettify.rs +++ b/compiler/rustc_mir_transform/src/prettify.rs @@ -4,7 +4,7 @@ //! (`-Zmir-enable-passes=+ReorderBasicBlocks,+ReorderLocals`) //! to make the MIR easier to read for humans. -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -51,8 +51,10 @@ impl<'tcx> crate::MirPass<'tcx> for ReorderLocals { } fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - let mut finder = - LocalFinder { map: IndexVec::new(), seen: BitSet::new_empty(body.local_decls.len()) }; + let mut finder = LocalFinder { + map: IndexVec::new(), + seen: DenseBitSet::new_empty(body.local_decls.len()), + }; // We can't reorder the return place or the arguments for local in (0..=body.arg_count).map(Local::from_usize) { @@ -113,7 +115,7 @@ impl<'tcx> MutVisitor<'tcx> for BasicBlockUpdater<'tcx> { struct LocalFinder { map: IndexVec<Local, Local>, - seen: BitSet<Local>, + seen: DenseBitSet<Local>, } impl LocalFinder { diff --git a/compiler/rustc_mir_transform/src/promote_consts.rs b/compiler/rustc_mir_transform/src/promote_consts.rs index 7451f419304..6be95b1f0f1 100644 --- a/compiler/rustc_mir_transform/src/promote_consts.rs +++ b/compiler/rustc_mir_transform/src/promote_consts.rs @@ -430,7 +430,9 @@ impl<'tcx> Validator<'_, 'tcx> { self.validate_operand(op)? } - Rvalue::Discriminant(place) => self.validate_place(place.as_ref())?, + Rvalue::Discriminant(place) | Rvalue::Len(place) => { + self.validate_place(place.as_ref())? + } Rvalue::ThreadLocalRef(_) => return Err(Unpromotable), diff --git a/compiler/rustc_mir_transform/src/ref_prop.rs b/compiler/rustc_mir_transform/src/ref_prop.rs index 96bcdfa6fac..95b05f94270 100644 --- a/compiler/rustc_mir_transform/src/ref_prop.rs +++ b/compiler/rustc_mir_transform/src/ref_prop.rs @@ -2,7 +2,7 @@ use std::borrow::Cow; use rustc_data_structures::fx::FxHashSet; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::mir::visit::*; use rustc_middle::mir::*; @@ -132,7 +132,7 @@ fn compute_replacement<'tcx>( let mut targets = IndexVec::from_elem(Value::Unknown, &body.local_decls); // Set of locals for which we will remove their storage statement. This is useful for // reborrowed references. - let mut storage_to_remove = BitSet::new_empty(body.local_decls.len()); + let mut storage_to_remove = DenseBitSet::new_empty(body.local_decls.len()); let fully_replacable_locals = fully_replacable_locals(ssa); @@ -324,8 +324,8 @@ fn compute_replacement<'tcx>( /// /// We consider a local to be replacable iff it's only used in a `Deref` projection `*_local` or /// non-use position (like storage statements and debuginfo). -fn fully_replacable_locals(ssa: &SsaLocals) -> BitSet<Local> { - let mut replacable = BitSet::new_empty(ssa.num_locals()); +fn fully_replacable_locals(ssa: &SsaLocals) -> DenseBitSet<Local> { + let mut replacable = DenseBitSet::new_empty(ssa.num_locals()); // First pass: for each local, whether its uses can be fully replaced. for local in ssa.locals() { @@ -344,7 +344,7 @@ fn fully_replacable_locals(ssa: &SsaLocals) -> BitSet<Local> { struct Replacer<'tcx> { tcx: TyCtxt<'tcx>, targets: IndexVec<Local, Value<'tcx>>, - storage_to_remove: BitSet<Local>, + storage_to_remove: DenseBitSet<Local>, allowed_replacements: FxHashSet<(Local, Location)>, any_replacement: bool, } diff --git a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs index fd49e956f43..76a3edfe0be 100644 --- a/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs +++ b/compiler/rustc_mir_transform/src/remove_noop_landing_pads.rs @@ -1,4 +1,4 @@ -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::*; use rustc_middle::ty::TyCtxt; @@ -40,7 +40,7 @@ impl<'tcx> crate::MirPass<'tcx> for RemoveNoopLandingPads { let mut jumps_folded = 0; let mut landing_pads_removed = 0; - let mut nop_landing_pads = BitSet::new_empty(body.basic_blocks.len()); + let mut nop_landing_pads = DenseBitSet::new_empty(body.basic_blocks.len()); // This is a post-order traversal, so that if A post-dominates B // then A will be visited before B. @@ -81,7 +81,7 @@ impl RemoveNoopLandingPads { &self, bb: BasicBlock, body: &Body<'_>, - nop_landing_pads: &BitSet<BasicBlock>, + nop_landing_pads: &DenseBitSet<BasicBlock>, ) -> bool { for stmt in &body[bb].statements { match &stmt.kind { diff --git a/compiler/rustc_mir_transform/src/remove_zsts.rs b/compiler/rustc_mir_transform/src/remove_zsts.rs index 64e183bcbc0..55e5701bd0a 100644 --- a/compiler/rustc_mir_transform/src/remove_zsts.rs +++ b/compiler/rustc_mir_transform/src/remove_zsts.rs @@ -36,31 +36,39 @@ struct Replacer<'a, 'tcx> { } /// A cheap, approximate check to avoid unnecessary `layout_of` calls. -fn maybe_zst(ty: Ty<'_>) -> bool { +/// +/// `Some(true)` is definitely ZST; `Some(false)` is definitely *not* ZST. +/// +/// `None` may or may not be, and must check `layout_of` to be sure. +fn trivially_zst<'tcx>(ty: Ty<'tcx>, tcx: TyCtxt<'tcx>) -> Option<bool> { match ty.kind() { - // maybe ZST (could be more precise) - ty::Adt(..) - | ty::Array(..) - | ty::Closure(..) - | ty::CoroutineClosure(..) - | ty::Tuple(..) - | ty::Alias(ty::Opaque, ..) => true, // definitely ZST - ty::FnDef(..) | ty::Never => true, - // unreachable or can't be ZST - _ => false, + ty::FnDef(..) | ty::Never => Some(true), + ty::Tuple(fields) if fields.is_empty() => Some(true), + ty::Array(_ty, len) if let Some(0) = len.try_to_target_usize(tcx) => Some(true), + // clearly not ZST + ty::Bool + | ty::Char + | ty::Int(..) + | ty::Uint(..) + | ty::Float(..) + | ty::RawPtr(..) + | ty::Ref(..) + | ty::FnPtr(..) => Some(false), + // check `layout_of` to see (including unreachable things we won't actually see) + _ => None, } } impl<'tcx> Replacer<'_, 'tcx> { fn known_to_be_zst(&self, ty: Ty<'tcx>) -> bool { - if !maybe_zst(ty) { - return false; + if let Some(is_zst) = trivially_zst(ty, self.tcx) { + is_zst + } else { + self.tcx + .layout_of(self.typing_env.as_query_input(ty)) + .is_ok_and(|layout| layout.is_zst()) } - let Ok(layout) = self.tcx.layout_of(self.typing_env.as_query_input(ty)) else { - return false; - }; - layout.is_zst() } fn make_zst(&self, ty: Ty<'tcx>) -> ConstOperand<'tcx> { diff --git a/compiler/rustc_mir_transform/src/shim.rs b/compiler/rustc_mir_transform/src/shim.rs index 722da3c420d..4648ec33c93 100644 --- a/compiler/rustc_mir_transform/src/shim.rs +++ b/compiler/rustc_mir_transform/src/shim.rs @@ -20,7 +20,7 @@ use rustc_span::{DUMMY_SP, Span}; use tracing::{debug, instrument}; use crate::{ - abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, + abort_unwinding_calls, add_call_guards, add_moves_for_packed_drops, deref_separator, inline, instsimplify, mentioned_items, pass_manager as pm, remove_noop_landing_pads, simplify, }; @@ -155,6 +155,8 @@ fn make_shim<'tcx>(tcx: TyCtxt<'tcx>, instance: ty::InstanceKind<'tcx>) -> Body< &remove_noop_landing_pads::RemoveNoopLandingPads, &simplify::SimplifyCfg::MakeShim, &instsimplify::InstSimplify::BeforeInline, + // Perform inlining of `#[rustc_force_inline]`-annotated callees. + &inline::ForceInline, &abort_unwinding_calls::AbortUnwindingCalls, &add_call_guards::CriticalCallEdges, ], diff --git a/compiler/rustc_mir_transform/src/single_use_consts.rs b/compiler/rustc_mir_transform/src/single_use_consts.rs index 277a33c0311..10b3c0ae94f 100644 --- a/compiler/rustc_mir_transform/src/single_use_consts.rs +++ b/compiler/rustc_mir_transform/src/single_use_consts.rs @@ -1,5 +1,5 @@ use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::mir::visit::{MutVisitor, PlaceContext, Visitor}; use rustc_middle::mir::*; @@ -28,9 +28,9 @@ impl<'tcx> crate::MirPass<'tcx> for SingleUseConsts { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let mut finder = SingleUseConstsFinder { - ineligible_locals: BitSet::new_empty(body.local_decls.len()), + ineligible_locals: DenseBitSet::new_empty(body.local_decls.len()), locations: IndexVec::from_elem(LocationPair::new(), &body.local_decls), - locals_in_debug_info: BitSet::new_empty(body.local_decls.len()), + locals_in_debug_info: DenseBitSet::new_empty(body.local_decls.len()), }; finder.ineligible_locals.insert_range(..=Local::from_usize(body.arg_count)); @@ -96,9 +96,9 @@ impl LocationPair { } struct SingleUseConstsFinder { - ineligible_locals: BitSet<Local>, + ineligible_locals: DenseBitSet<Local>, locations: IndexVec<Local, LocationPair>, - locals_in_debug_info: BitSet<Local>, + locals_in_debug_info: DenseBitSet<Local>, } impl<'tcx> Visitor<'tcx> for SingleUseConstsFinder { diff --git a/compiler/rustc_mir_transform/src/sroa.rs b/compiler/rustc_mir_transform/src/sroa.rs index 52b9ec1e0a3..d54ea3feab6 100644 --- a/compiler/rustc_mir_transform/src/sroa.rs +++ b/compiler/rustc_mir_transform/src/sroa.rs @@ -2,7 +2,7 @@ use rustc_abi::{FIRST_VARIANT, FieldIdx}; use rustc_data_structures::flat_map_in_place::FlatMapInPlace; use rustc_hir::LangItem; use rustc_index::IndexVec; -use rustc_index::bit_set::{BitSet, GrowableBitSet}; +use rustc_index::bit_set::{DenseBitSet, GrowableBitSet}; use rustc_middle::bug; use rustc_middle::mir::patch::MirPatch; use rustc_middle::mir::visit::*; @@ -60,9 +60,9 @@ impl<'tcx> crate::MirPass<'tcx> for ScalarReplacementOfAggregates { fn escaping_locals<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, - excluded: &BitSet<Local>, + excluded: &DenseBitSet<Local>, body: &Body<'tcx>, -) -> BitSet<Local> { +) -> DenseBitSet<Local> { let is_excluded_ty = |ty: Ty<'tcx>| { if ty.is_union() || ty.is_enum() { return true; @@ -97,7 +97,7 @@ fn escaping_locals<'tcx>( false }; - let mut set = BitSet::new_empty(body.local_decls.len()); + let mut set = DenseBitSet::new_empty(body.local_decls.len()); set.insert_range(RETURN_PLACE..=Local::from_usize(body.arg_count)); for (local, decl) in body.local_decls().iter_enumerated() { if excluded.contains(local) || is_excluded_ty(decl.ty) { @@ -109,7 +109,7 @@ fn escaping_locals<'tcx>( return visitor.set; struct EscapeVisitor { - set: BitSet<Local>, + set: DenseBitSet<Local>, } impl<'tcx> Visitor<'tcx> for EscapeVisitor { @@ -198,7 +198,7 @@ fn compute_flattening<'tcx>( tcx: TyCtxt<'tcx>, typing_env: ty::TypingEnv<'tcx>, body: &mut Body<'tcx>, - escaping: BitSet<Local>, + escaping: DenseBitSet<Local>, ) -> ReplacementMap<'tcx> { let mut fragments = IndexVec::from_elem(None, &body.local_decls); @@ -226,8 +226,8 @@ fn replace_flattened_locals<'tcx>( tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>, replacements: ReplacementMap<'tcx>, -) -> BitSet<Local> { - let mut all_dead_locals = BitSet::new_empty(replacements.fragments.len()); +) -> DenseBitSet<Local> { + let mut all_dead_locals = DenseBitSet::new_empty(replacements.fragments.len()); for (local, replacements) in replacements.fragments.iter_enumerated() { if replacements.is_some() { all_dead_locals.insert(local); @@ -267,7 +267,7 @@ struct ReplacementVisitor<'tcx, 'll> { /// Work to do. replacements: &'ll ReplacementMap<'tcx>, /// This is used to check that we are not leaving references to replaced locals behind. - all_dead_locals: BitSet<Local>, + all_dead_locals: DenseBitSet<Local>, patch: MirPatch<'tcx>, } diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index 5653aef0aae..a24b3b2e80f 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -7,7 +7,7 @@ //! of a `Freeze` local. Those can still be considered to be SSA. use rustc_data_structures::graph::dominators::Dominators; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::bug; use rustc_middle::middle::resolve_bound_vars::Set1; @@ -29,7 +29,7 @@ pub(super) struct SsaLocals { /// We ignore non-uses (Storage statements, debuginfo). direct_uses: IndexVec<Local, u32>, /// Set of SSA locals that are immutably borrowed. - borrowed_locals: BitSet<Local>, + borrowed_locals: DenseBitSet<Local>, } pub(super) enum AssignedValue<'a, 'tcx> { @@ -50,7 +50,7 @@ impl SsaLocals { let dominators = body.basic_blocks.dominators(); let direct_uses = IndexVec::from_elem(0, &body.local_decls); - let borrowed_locals = BitSet::new_empty(body.local_decls.len()); + let borrowed_locals = DenseBitSet::new_empty(body.local_decls.len()); let mut visitor = SsaVisitor { body, assignments, @@ -202,12 +202,12 @@ impl SsaLocals { } /// Set of SSA locals that are immutably borrowed. - pub(super) fn borrowed_locals(&self) -> &BitSet<Local> { + pub(super) fn borrowed_locals(&self) -> &DenseBitSet<Local> { &self.borrowed_locals } /// Make a property uniform on a copy equivalence class by removing elements. - pub(super) fn meet_copy_equivalence(&self, property: &mut BitSet<Local>) { + pub(super) fn meet_copy_equivalence(&self, property: &mut DenseBitSet<Local>) { // Consolidate to have a local iff all its copies are. // // `copy_classes` defines equivalence classes between locals. The `local`s that recursively @@ -241,7 +241,7 @@ struct SsaVisitor<'a, 'tcx> { assignment_order: Vec<Local>, direct_uses: IndexVec<Local, u32>, // Track locals that are immutably borrowed, so we can check their type is `Freeze` later. - borrowed_locals: BitSet<Local>, + borrowed_locals: DenseBitSet<Local>, } impl SsaVisitor<'_, '_> { @@ -396,7 +396,7 @@ pub(crate) struct StorageLiveLocals { impl StorageLiveLocals { pub(crate) fn new( body: &Body<'_>, - always_storage_live_locals: &BitSet<Local>, + always_storage_live_locals: &DenseBitSet<Local>, ) -> StorageLiveLocals { let mut storage_live = IndexVec::from_elem(Set1::Empty, &body.local_decls); for local in always_storage_live_locals.iter() { diff --git a/compiler/rustc_mir_transform/src/validate.rs b/compiler/rustc_mir_transform/src/validate.rs index a670da94fcc..b62e34ac08d 100644 --- a/compiler/rustc_mir_transform/src/validate.rs +++ b/compiler/rustc_mir_transform/src/validate.rs @@ -1,10 +1,11 @@ //! Validates the MIR to ensure that invariants are upheld. use rustc_abi::{ExternAbi, FIRST_VARIANT, Size}; +use rustc_attr_parsing::InlineAttr; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_hir::LangItem; use rustc_index::IndexVec; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_infer::infer::TyCtxtInferExt; use rustc_infer::traits::{Obligation, ObligationCause}; use rustc_middle::mir::coverage::CoverageKind; @@ -97,7 +98,7 @@ struct CfgChecker<'a, 'tcx> { body: &'a Body<'tcx>, tcx: TyCtxt<'tcx>, unwind_edge_count: usize, - reachable_blocks: BitSet<BasicBlock>, + reachable_blocks: DenseBitSet<BasicBlock>, value_cache: FxHashSet<u128>, // If `false`, then the MIR must not contain `UnwindAction::Continue` or // `TerminatorKind::Resume`. @@ -366,7 +367,8 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { self.check_edge(location, *target, EdgeKind::Normal); self.check_unwind_edge(location, *unwind); } - TerminatorKind::Call { args, .. } | TerminatorKind::TailCall { args, .. } => { + TerminatorKind::Call { func, args, .. } + | TerminatorKind::TailCall { func, args, .. } => { // FIXME(explicit_tail_calls): refactor this & add tail-call specific checks if let TerminatorKind::Call { target, unwind, destination, .. } = terminator.kind { if let Some(target) = target { @@ -419,6 +421,13 @@ impl<'a, 'tcx> Visitor<'tcx> for CfgChecker<'a, 'tcx> { } } } + + if let ty::FnDef(did, ..) = func.ty(&self.body.local_decls, self.tcx).kind() + && self.body.phase >= MirPhase::Runtime(RuntimePhase::Optimized) + && matches!(self.tcx.codegen_fn_attrs(did).inline, InlineAttr::Force { .. }) + { + self.fail(location, "`#[rustc_force_inline]`-annotated function not inlined"); + } } TerminatorKind::Assert { target, unwind, .. } => { self.check_edge(location, *target, EdgeKind::Normal); @@ -1009,6 +1018,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { } } Rvalue::Ref(..) => {} + Rvalue::Len(p) => { + let pty = p.ty(&self.body.local_decls, self.tcx).ty; + check_kinds!( + pty, + "Cannot compute length of non-array type {:?}", + ty::Array(..) | ty::Slice(..) + ); + } Rvalue::BinaryOp(op, vals) => { use BinOp::*; let a = vals.0.ty(&self.body.local_decls, self.tcx); @@ -1107,6 +1124,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { ); } UnOp::PtrMetadata => { + if !matches!(self.body.phase, MirPhase::Runtime(_)) { + // It would probably be fine to support this in earlier phases, but at + // the time of writing it's only ever introduced from intrinsic + // lowering or other runtime-phase optimization passes, so earlier + // things can just `bug!` on it. + self.fail(location, "PtrMetadata should be in runtime MIR only"); + } + check_kinds!( a, "Cannot PtrMetadata non-pointer non-reference type {:?}", diff --git a/compiler/rustc_monomorphize/src/collector.rs b/compiler/rustc_monomorphize/src/collector.rs index 00aae03704f..bb603df1129 100644 --- a/compiler/rustc_monomorphize/src/collector.rs +++ b/compiler/rustc_monomorphize/src/collector.rs @@ -207,6 +207,7 @@ use std::path::PathBuf; +use rustc_attr_parsing::InlineAttr; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::sync::{LRef, MTLock, par_for_each_in}; use rustc_data_structures::unord::{UnordMap, UnordSet}; @@ -224,8 +225,8 @@ use rustc_middle::ty::adjustment::{CustomCoerceUnsized, PointerCoercion}; use rustc_middle::ty::layout::ValidityRequirement; use rustc_middle::ty::print::{shrunk_instance_name, with_no_trimmed_paths}; use rustc_middle::ty::{ - self, GenericArgs, GenericParamDefKind, Instance, InstanceKind, Ty, TyCtxt, TypeFoldable, - TypeVisitableExt, VtblEntry, + self, GenericArgs, GenericParamDefKind, Instance, InstanceKind, Interner, Ty, TyCtxt, + TypeFoldable, TypeVisitableExt, VtblEntry, }; use rustc_middle::util::Providers; use rustc_middle::{bug, span_bug}; @@ -959,6 +960,14 @@ fn should_codegen_locally<'tcx>(tcx: TyCtxtAt<'tcx>, instance: Instance<'tcx>) - return false; } + if tcx.def_kind(def_id).has_codegen_attrs() + && matches!(tcx.codegen_fn_attrs(def_id).inline, InlineAttr::Force { .. }) + { + // `#[rustc_force_inline]` items should never be codegened. This should be caught by + // the MIR validator. + tcx.delay_bug("attempt to codegen `#[rustc_force_inline]` item"); + } + if def_id.is_local() { // Local items cannot be referred to locally without monomorphizing them locally. return true; @@ -1368,6 +1377,10 @@ fn collect_roots(tcx: TyCtxt<'_>, mode: MonoItemCollectionStrategy) -> Vec<MonoI collector.process_impl_item(id); } + for id in crate_items.nested_bodies() { + collector.process_nested_body(id); + } + collector.push_extra_entry_roots(); } @@ -1394,20 +1407,37 @@ impl<'v> RootCollector<'_, 'v> { match self.tcx.def_kind(id.owner_id) { DefKind::Enum | DefKind::Struct | DefKind::Union => { if self.strategy == MonoItemCollectionStrategy::Eager - && self.tcx.generics_of(id.owner_id).is_empty() + && !self.tcx.generics_of(id.owner_id).requires_monomorphization(self.tcx) { debug!("RootCollector: ADT drop-glue for `{id:?}`",); + let id_args = + ty::GenericArgs::for_item(self.tcx, id.owner_id.to_def_id(), |param, _| { + match param.kind { + GenericParamDefKind::Lifetime => { + self.tcx.lifetimes.re_erased.into() + } + GenericParamDefKind::Type { .. } + | GenericParamDefKind::Const { .. } => { + unreachable!( + "`own_requires_monomorphization` check means that \ + we should have no type/const params" + ) + } + } + }); // This type is impossible to instantiate, so we should not try to // generate a `drop_in_place` instance for it. if self.tcx.instantiate_and_check_impossible_predicates(( id.owner_id.to_def_id(), - ty::List::empty(), + id_args, )) { return; } - let ty = self.tcx.type_of(id.owner_id.to_def_id()).no_bound_vars().unwrap(); + let ty = + self.tcx.type_of(id.owner_id.to_def_id()).instantiate(self.tcx, id_args); + assert!(!ty.has_non_region_param()); visit_drop_use(self.tcx, ty, true, DUMMY_SP, self.output); } } @@ -1450,10 +1480,39 @@ impl<'v> RootCollector<'_, 'v> { } } + fn process_nested_body(&mut self, def_id: LocalDefId) { + match self.tcx.def_kind(def_id) { + DefKind::Closure => { + if self.strategy == MonoItemCollectionStrategy::Eager + && !self + .tcx + .generics_of(self.tcx.typeck_root_def_id(def_id.to_def_id())) + .requires_monomorphization(self.tcx) + { + let instance = match *self.tcx.type_of(def_id).instantiate_identity().kind() { + ty::Closure(def_id, args) + | ty::Coroutine(def_id, args) + | ty::CoroutineClosure(def_id, args) => { + Instance::new(def_id, self.tcx.erase_regions(args)) + } + _ => unreachable!(), + }; + let mono_item = create_fn_mono_item(self.tcx, instance, DUMMY_SP); + if mono_item.node.is_instantiable(self.tcx) { + self.output.push(mono_item); + } + } + } + _ => {} + } + } + fn is_root(&self, def_id: LocalDefId) -> bool { !self.tcx.generics_of(def_id).requires_monomorphization(self.tcx) && match self.strategy { - MonoItemCollectionStrategy::Eager => true, + MonoItemCollectionStrategy::Eager => { + !matches!(self.tcx.codegen_fn_attrs(def_id).inline, InlineAttr::Force { .. }) + } MonoItemCollectionStrategy::Lazy => { self.entry_fn.and_then(|(id, _)| id.as_local()) == Some(def_id) || self.tcx.is_reachable_non_generic(def_id) diff --git a/compiler/rustc_next_trait_solver/Cargo.toml b/compiler/rustc_next_trait_solver/Cargo.toml index 451c215566b..f9168112216 100644 --- a/compiler/rustc_next_trait_solver/Cargo.toml +++ b/compiler/rustc_next_trait_solver/Cargo.toml @@ -13,6 +13,7 @@ rustc_macros = { path = "../rustc_macros", optional = true } rustc_serialize = { path = "../rustc_serialize", optional = true } rustc_type_ir = { path = "../rustc_type_ir", default-features = false } rustc_type_ir_macros = { path = "../rustc_type_ir_macros" } +smallvec = "1.8.1" tracing = "0.1" # tidy-alphabetical-end diff --git a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs index 7da4f5e0107..3c5d9b95e77 100644 --- a/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs +++ b/compiler/rustc_next_trait_solver/src/solve/assembly/structural_traits.rs @@ -712,6 +712,8 @@ pub(in crate::solve) fn extract_fn_def_from_const_callable<I: Interner>( } } +// NOTE: Keep this in sync with `evaluate_host_effect_for_destruct_goal` in +// the old solver, for as long as that exists. pub(in crate::solve) fn const_conditions_for_destruct<I: Interner>( cx: I, self_ty: I::Ty, diff --git a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs index d68fca60829..4faa243c02a 100644 --- a/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs +++ b/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs @@ -8,6 +8,7 @@ use rustc_type_ir::lang_items::TraitSolverLangItem; use rustc_type_ir::solve::CanonicalResponse; use rustc_type_ir::visit::TypeVisitableExt as _; use rustc_type_ir::{self as ty, Interner, TraitPredicate, TypingMode, Upcast as _, elaborate}; +use smallvec::SmallVec; use tracing::{instrument, trace}; use crate::delegate::SolverDelegate; @@ -225,7 +226,7 @@ where } ecx.probe_and_evaluate_goal_for_constituent_tys( - CandidateSource::BuiltinImpl(BuiltinImplSource::Misc), + CandidateSource::BuiltinImpl(BuiltinImplSource::Trivial), goal, structural_traits::instantiate_constituent_tys_for_sized_trait, ) @@ -741,12 +742,14 @@ where a_data.principal(), )); } else if let Some(a_principal) = a_data.principal() { - for new_a_principal in - elaborate::supertraits(self.cx(), a_principal.with_self_ty(cx, a_ty)).skip(1) + for (idx, new_a_principal) in + elaborate::supertraits(self.cx(), a_principal.with_self_ty(cx, a_ty)) + .enumerate() + .skip(1) { responses.extend(self.consider_builtin_upcast_to_principal( goal, - CandidateSource::BuiltinImpl(BuiltinImplSource::TraitUpcasting), + CandidateSource::BuiltinImpl(BuiltinImplSource::TraitUpcasting(idx)), a_data, a_region, b_data, @@ -1192,7 +1195,30 @@ where }; } - // FIXME: prefer trivial builtin impls + // We prefer trivial builtin candidates, i.e. builtin impls without any + // nested requirements, over all others. This is a fix for #53123 and + // prevents where-bounds from accidentally extending the lifetime of a + // variable. + if candidates + .iter() + .any(|c| matches!(c.source, CandidateSource::BuiltinImpl(BuiltinImplSource::Trivial))) + { + let trivial_builtin_impls: SmallVec<[_; 1]> = candidates + .iter() + .filter(|c| { + matches!(c.source, CandidateSource::BuiltinImpl(BuiltinImplSource::Trivial)) + }) + .map(|c| c.result) + .collect(); + // There should only ever be a single trivial builtin candidate + // as they would otherwise overlap. + assert_eq!(trivial_builtin_impls.len(), 1); + return if let Some(response) = self.try_merge_responses(&trivial_builtin_impls) { + Ok((response, Some(TraitGoalProvenVia::Misc))) + } else { + Ok((self.bail_with_ambiguity(&trivial_builtin_impls), None)) + }; + } // If there are non-global where-bounds, prefer where-bounds // (including global ones) over everything else. diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index 2637ea268c8..10756be6afb 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -655,9 +655,9 @@ impl<'a> Parser<'a> { fn check_keyword_case(&mut self, exp: ExpKeywordPair, case: Case) -> bool { if self.check_keyword(exp) { true - // Do an ASCII case-insensitive match, because all keywords are ASCII. } else if case == Case::Insensitive && let Some((ident, IdentIsRaw::No)) = self.token.ident() + // Do an ASCII case-insensitive match, because all keywords are ASCII. && ident.as_str().eq_ignore_ascii_case(exp.kw.as_str()) { true @@ -689,7 +689,8 @@ impl<'a> Parser<'a> { true } else if case == Case::Insensitive && let Some((ident, IdentIsRaw::No)) = self.token.ident() - && ident.as_str().to_lowercase() == exp.kw.as_str().to_lowercase() + // Do an ASCII case-insensitive match, because all keywords are ASCII. + && ident.as_str().eq_ignore_ascii_case(exp.kw.as_str()) { self.dcx().emit_err(errors::KwBadCase { span: ident.span, kw: exp.kw.as_str() }); self.bump(); diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs index 5ad3da2196f..64bcb1a5a36 100644 --- a/compiler/rustc_parse/src/parser/pat.rs +++ b/compiler/rustc_parse/src/parser/pat.rs @@ -656,14 +656,14 @@ impl<'a> Parser<'a> { fn visit_pat(&mut self, p: &'a Pat) -> Self::Result { match &p.kind { // Base expression - PatKind::Err(_) | PatKind::Lit(_) => { + PatKind::Err(_) | PatKind::Expr(_) => { self.maybe_add_suggestions_then_emit(p.span, p.span, false) } // Sub-patterns // FIXME: this doesn't work with recursive subpats (`&mut &mut <err>`) PatKind::Box(subpat) | PatKind::Ref(subpat, _) - if matches!(subpat.kind, PatKind::Err(_) | PatKind::Lit(_)) => + if matches!(subpat.kind, PatKind::Err(_) | PatKind::Expr(_)) => { self.maybe_add_suggestions_then_emit(subpat.span, p.span, false) } @@ -766,7 +766,7 @@ impl<'a> Parser<'a> { if let Some(re) = self.parse_range_end() { self.parse_pat_range_begin_with(const_expr, re)? } else { - PatKind::Lit(const_expr) + PatKind::Expr(const_expr) } } else if self.is_builtin() { self.parse_pat_builtin()? @@ -833,7 +833,7 @@ impl<'a> Parser<'a> { .struct_span_err(self_.token.span, msg) .with_span_label(self_.token.span, format!("expected {expected}")) }); - PatKind::Lit(self.mk_expr(lo, ExprKind::Lit(lit))) + PatKind::Expr(self.mk_expr(lo, ExprKind::Lit(lit))) } else { // Try to parse everything else as literal with optional minus match self.parse_literal_maybe_minus() { @@ -845,7 +845,7 @@ impl<'a> Parser<'a> { match self.parse_range_end() { Some(form) => self.parse_pat_range_begin_with(begin, form)?, - None => PatKind::Lit(begin), + None => PatKind::Expr(begin), } } Err(err) => return self.fatal_unexpected_non_pat(err, expected), @@ -989,7 +989,7 @@ impl<'a> Parser<'a> { match &pat.kind { // recover ranges with parentheses around the `(start)..` - PatKind::Lit(begin) + PatKind::Expr(begin) if self.may_recover() && let Some(form) = self.parse_range_end() => { diff --git a/compiler/rustc_parse/src/parser/tests.rs b/compiler/rustc_parse/src/parser/tests.rs index 3f8d66c2c95..655ab822359 100644 --- a/compiler/rustc_parse/src/parser/tests.rs +++ b/compiler/rustc_parse/src/parser/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use std::assert_matches::assert_matches; use std::io::prelude::*; @@ -2366,8 +2366,7 @@ fn string_to_tts_1() { token::Ident(sym::i32, IdentIsRaw::No), sp(8, 11), ), - ]) - .into(), + ]), ), TokenTree::Delimited( DelimSpan::from_pair(sp(13, 14), sp(18, 19)), @@ -2383,8 +2382,7 @@ fn string_to_tts_1() { ), // `Alone` because the `;` is followed by whitespace. TokenTree::token_alone(token::Semi, sp(16, 17)), - ]) - .into(), + ]), ), ]); diff --git a/compiler/rustc_parse/src/parser/tokenstream/tests.rs b/compiler/rustc_parse/src/parser/tokenstream/tests.rs index 037b5b1a9de..aac75323ff3 100644 --- a/compiler/rustc_parse/src/parser/tokenstream/tests.rs +++ b/compiler/rustc_parse/src/parser/tokenstream/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use rustc_ast::token::{self, IdentIsRaw}; use rustc_ast::tokenstream::{TokenStream, TokenTree}; diff --git a/compiler/rustc_passes/messages.ftl b/compiler/rustc_passes/messages.ftl index f39bea2a56f..3ed600a717f 100644 --- a/compiler/rustc_passes/messages.ftl +++ b/compiler/rustc_passes/messages.ftl @@ -656,6 +656,14 @@ passes_rustc_allow_const_fn_unstable = passes_rustc_dirty_clean = attribute requires -Z query-dep-graph to be enabled +passes_rustc_force_inline = + attribute should be applied to a function + .label = not a function definition + +passes_rustc_force_inline_coro = + attribute cannot be applied to a `async`, `gen` or `async gen` function + .label = `async`, `gen` or `async gen` function + passes_rustc_layout_scalar_valid_range_arg = expected exactly one integer literal argument @@ -791,6 +799,9 @@ passes_unused_assign = value assigned to `{$name}` is never read passes_unused_assign_passed = value passed to `{$name}` is never read .help = maybe it is overwritten before being read? +passes_unused_assign_suggestion = + you might have meant to mutate the pointed at value being passed in, instead of changing the reference in the local binding + passes_unused_capture_maybe_capture_ref = value captured by `{$name}` is never read .help = did you mean to capture by reference instead? diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index 7656ca86c18..1b2b8ac5dd9 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -247,7 +247,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { self.check_coroutine(attr, target); } [sym::linkage, ..] => self.check_linkage(attr, span, target), - [sym::rustc_pub_transparent, ..] => self.check_rustc_pub_transparent( attr.span, span, attrs), + [sym::rustc_pub_transparent, ..] => self.check_rustc_pub_transparent(attr.span, span, attrs), [ // ok sym::allow @@ -332,6 +332,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { self.check_repr(attrs, span, target, item, hir_id); self.check_used(attrs, target, span); + self.check_rustc_force_inline(hir_id, attrs, span, target); } fn inline_attr_str_error_with_macro_def(&self, hir_id: HirId, attr: &Attribute, sym: &str) { @@ -2480,6 +2481,45 @@ impl<'tcx> CheckAttrVisitor<'tcx> { } } + fn check_rustc_force_inline( + &self, + hir_id: HirId, + attrs: &[Attribute], + span: Span, + target: Target, + ) { + let force_inline_attr = attrs.iter().find(|attr| attr.has_name(sym::rustc_force_inline)); + match (target, force_inline_attr) { + (Target::Closure, None) => { + let is_coro = matches!( + self.tcx.hir().expect_expr(hir_id).kind, + hir::ExprKind::Closure(hir::Closure { + kind: hir::ClosureKind::Coroutine(..) + | hir::ClosureKind::CoroutineClosure(..), + .. + }) + ); + let parent_did = self.tcx.hir().get_parent_item(hir_id).to_def_id(); + let parent_span = self.tcx.def_span(parent_did); + let parent_force_inline_attr = + self.tcx.get_attr(parent_did, sym::rustc_force_inline); + if let Some(attr) = parent_force_inline_attr + && is_coro + { + self.dcx().emit_err(errors::RustcForceInlineCoro { + attr_span: attr.span, + span: parent_span, + }); + } + } + (Target::Fn, _) => (), + (_, Some(attr)) => { + self.dcx().emit_err(errors::RustcForceInline { attr_span: attr.span, span }); + } + (_, None) => (), + } + } + /// Checks if `#[autodiff]` is applied to an item other than a function item. fn check_autodiff(&self, _hir_id: HirId, _attr: &Attribute, span: Span, target: Target) { debug!("check_autodiff"); diff --git a/compiler/rustc_passes/src/diagnostic_items.rs b/compiler/rustc_passes/src/diagnostic_items.rs index 7b02aecdfae..323b414cca0 100644 --- a/compiler/rustc_passes/src/diagnostic_items.rs +++ b/compiler/rustc_passes/src/diagnostic_items.rs @@ -79,8 +79,14 @@ fn all_diagnostic_items(tcx: TyCtxt<'_>, (): ()) -> DiagnosticItems { // Initialize the collector. let mut items = DiagnosticItems::default(); - // Collect diagnostic items in other crates. - for &cnum in tcx.crates(()).iter().chain(std::iter::once(&LOCAL_CRATE)) { + // Collect diagnostic items in visible crates. + for cnum in tcx + .crates(()) + .iter() + .copied() + .filter(|cnum| tcx.is_user_visible_dep(*cnum)) + .chain(std::iter::once(LOCAL_CRATE)) + { for (&name, &def_id) in &tcx.diagnostic_items(cnum).name_to_id { collect_item(tcx, &mut items, name, def_id); } diff --git a/compiler/rustc_passes/src/errors.rs b/compiler/rustc_passes/src/errors.rs index d95fa5db0ce..c3043ac60aa 100644 --- a/compiler/rustc_passes/src/errors.rs +++ b/compiler/rustc_passes/src/errors.rs @@ -689,6 +689,24 @@ pub(crate) struct RustcPubTransparent { } #[derive(Diagnostic)] +#[diag(passes_rustc_force_inline)] +pub(crate) struct RustcForceInline { + #[primary_span] + pub attr_span: Span, + #[label] + pub span: Span, +} + +#[derive(Diagnostic)] +#[diag(passes_rustc_force_inline_coro)] +pub(crate) struct RustcForceInlineCoro { + #[primary_span] + pub attr_span: Span, + #[label] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(passes_link_ordinal)] pub(crate) struct LinkOrdinal { #[primary_span] @@ -1769,9 +1787,26 @@ pub(crate) struct IneffectiveUnstableImpl; #[derive(LintDiagnostic)] #[diag(passes_unused_assign)] -#[help] pub(crate) struct UnusedAssign { pub name: String, + #[subdiagnostic] + pub suggestion: Option<UnusedAssignSuggestion>, + #[help] + pub help: bool, +} + +#[derive(Subdiagnostic)] +#[multipart_suggestion(passes_unused_assign_suggestion, applicability = "maybe-incorrect")] +pub(crate) struct UnusedAssignSuggestion { + pub pre: &'static str, + #[suggestion_part(code = "{pre}mut ")] + pub ty_span: Span, + #[suggestion_part(code = "")] + pub ty_ref_span: Span, + #[suggestion_part(code = "*")] + pub ident_span: Span, + #[suggestion_part(code = "")] + pub expr_ref_span: Span, } #[derive(LintDiagnostic)] diff --git a/compiler/rustc_passes/src/input_stats.rs b/compiler/rustc_passes/src/input_stats.rs index f9cb8c9b927..6617cf2f723 100644 --- a/compiler/rustc_passes/src/input_stats.rs +++ b/compiler/rustc_passes/src/input_stats.rs @@ -304,7 +304,8 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { Box, Deref, Ref, - Lit, + Expr, + Guard, Range, Slice, Err @@ -586,7 +587,7 @@ impl<'v> ast_visit::Visitor<'v> for StatCollector<'v> { Box, Deref, Ref, - Lit, + Expr, Range, Slice, Rest, diff --git a/compiler/rustc_passes/src/liveness.rs b/compiler/rustc_passes/src/liveness.rs index b85a987c641..426899a4d5c 100644 --- a/compiler/rustc_passes/src/liveness.rs +++ b/compiler/rustc_passes/src/liveness.rs @@ -1360,7 +1360,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Liveness<'a, 'tcx> { fn visit_local(&mut self, local: &'tcx hir::LetStmt<'tcx>) { self.check_unused_vars_in_pat(local.pat, None, None, |spans, hir_id, ln, var| { if local.init.is_some() { - self.warn_about_dead_assign(spans, hir_id, ln, var); + self.warn_about_dead_assign(spans, hir_id, ln, var, None); } }); @@ -1460,7 +1460,8 @@ impl<'tcx> Liveness<'_, 'tcx> { // as being used. let ln = self.live_node(expr.hir_id, expr.span); let var = self.variable(var_hid, expr.span); - self.warn_about_dead_assign(vec![expr.span], expr.hir_id, ln, var); + let sugg = self.annotate_mut_binding_to_immutable_binding(var_hid, expr); + self.warn_about_dead_assign(vec![expr.span], expr.hir_id, ln, var, sugg); } } _ => { @@ -1585,6 +1586,70 @@ impl<'tcx> Liveness<'_, 'tcx> { } } + /// Detect the following case + /// + /// ```text + /// fn change_object(mut a: &Ty) { + /// let a = Ty::new(); + /// b = &a; + /// } + /// ``` + /// + /// where the user likely meant to modify the value behind there reference, use `a` as an out + /// parameter, instead of mutating the local binding. When encountering this we suggest: + /// + /// ```text + /// fn change_object(a: &'_ mut Ty) { + /// let a = Ty::new(); + /// *b = a; + /// } + /// ``` + fn annotate_mut_binding_to_immutable_binding( + &self, + var_hid: HirId, + expr: &'tcx Expr<'tcx>, + ) -> Option<errors::UnusedAssignSuggestion> { + if let hir::Node::Expr(parent) = self.ir.tcx.parent_hir_node(expr.hir_id) + && let hir::ExprKind::Assign(_, rhs, _) = parent.kind + && let hir::ExprKind::AddrOf(borrow_kind, _mut, inner) = rhs.kind + && let hir::BorrowKind::Ref = borrow_kind + && let hir::Node::Pat(pat) = self.ir.tcx.hir_node(var_hid) + && let hir::Node::Param(hir::Param { ty_span, .. }) = + self.ir.tcx.parent_hir_node(pat.hir_id) + && let item_id = self.ir.tcx.hir().get_parent_item(pat.hir_id) + && let item = self.ir.tcx.hir_owner_node(item_id) + && let Some(fn_decl) = item.fn_decl() + && let hir::PatKind::Binding(hir::BindingMode::MUT, _hir_id, ident, _) = pat.kind + && let Some((ty_span, pre)) = fn_decl + .inputs + .iter() + .filter_map(|ty| { + if ty.span == *ty_span + && let hir::TyKind::Ref(lt, mut_ty) = ty.kind + { + // `&'name Ty` -> `&'name mut Ty` or `&Ty` -> `&mut Ty` + Some(( + mut_ty.ty.span.shrink_to_lo(), + if lt.ident.span.lo() == lt.ident.span.hi() { "" } else { " " }, + )) + } else { + None + } + }) + .next() + { + Some(errors::UnusedAssignSuggestion { + ty_span, + pre, + ty_ref_span: pat.span.until(ident.span), + ident_span: expr.span.shrink_to_lo(), + expr_ref_span: rhs.span.until(inner.span), + }) + } else { + None + } + } + #[instrument(skip(self), level = "INFO")] fn report_unused( &self, @@ -1738,15 +1803,23 @@ impl<'tcx> Liveness<'_, 'tcx> { suggs } - fn warn_about_dead_assign(&self, spans: Vec<Span>, hir_id: HirId, ln: LiveNode, var: Variable) { + fn warn_about_dead_assign( + &self, + spans: Vec<Span>, + hir_id: HirId, + ln: LiveNode, + var: Variable, + suggestion: Option<errors::UnusedAssignSuggestion>, + ) { if !self.live_on_exit(ln, var) && let Some(name) = self.should_warn(var) { + let help = suggestion.is_none(); self.ir.tcx.emit_node_span_lint( lint::builtin::UNUSED_ASSIGNMENTS, hir_id, spans, - errors::UnusedAssign { name }, + errors::UnusedAssign { name, suggestion, help }, ); } } diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs index 30f9e698521..60734122e63 100644 --- a/compiler/rustc_passes/src/stability.rs +++ b/compiler/rustc_passes/src/stability.rs @@ -5,8 +5,8 @@ use std::mem::replace; use std::num::NonZero; use rustc_attr_parsing::{ - self as attr, ConstStability, DeprecatedSince, Stability, StabilityLevel, StableSince, - UnstableReason, VERSION_PLACEHOLDER, + self as attr, AllowedThroughUnstableModules, ConstStability, DeprecatedSince, Stability, + StabilityLevel, StableSince, UnstableReason, VERSION_PLACEHOLDER, }; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::unord::{ExtendUnord, UnordMap, UnordSet}; @@ -20,11 +20,16 @@ use rustc_hir::{FieldDef, Item, ItemKind, TraitRef, Ty, TyKind, Variant}; use rustc_middle::hir::nested_filter; use rustc_middle::middle::lib_features::{FeatureStability, LibFeatures}; use rustc_middle::middle::privacy::EffectiveVisibilities; -use rustc_middle::middle::stability::{AllowUnstable, DeprecationEntry, Index}; +use rustc_middle::middle::stability::{ + AllowUnstable, Deprecated, DeprecationEntry, EvalResult, Index, +}; use rustc_middle::query::Providers; use rustc_middle::ty::TyCtxt; +use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_session::lint; -use rustc_session::lint::builtin::{INEFFECTIVE_UNSTABLE_TRAIT_IMPL, USELESS_DEPRECATED}; +use rustc_session::lint::builtin::{ + DEPRECATED, INEFFECTIVE_UNSTABLE_TRAIT_IMPL, USELESS_DEPRECATED, +}; use rustc_span::{Span, Symbol, sym}; use tracing::{debug, info}; @@ -593,9 +598,11 @@ impl<'tcx> MissingStabilityAnnotations<'tcx> { } fn check_missing_const_stability(&self, def_id: LocalDefId, span: Span) { - let is_const = self.tcx.is_const_fn(def_id.to_def_id()); + let is_const = self.tcx.is_const_fn(def_id.to_def_id()) + || (self.tcx.def_kind(def_id.to_def_id()) == DefKind::Trait + && self.tcx.is_const_trait(def_id.to_def_id())); - // Reachable const fn must have a stability attribute. + // Reachable const fn/trait must have a stability attribute. if is_const && self.effective_visibilities.is_reachable(def_id) && self.tcx.lookup_const_stability(def_id).is_none() @@ -772,7 +779,13 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { // For implementations of traits, check the stability of each item // individually as it's possible to have a stable trait with unstable // items. - hir::ItemKind::Impl(hir::Impl { of_trait: Some(ref t), self_ty, items, .. }) => { + hir::ItemKind::Impl(hir::Impl { + of_trait: Some(ref t), + self_ty, + items, + constness, + .. + }) => { let features = self.tcx.features(); if features.staged_api() { let attrs = self.tcx.hir().attrs(item.hir_id()); @@ -814,6 +827,16 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { } } + match constness { + rustc_hir::Constness::Const => { + if let Some(def_id) = t.trait_def_id() { + // FIXME(const_trait_impl): Improve the span here. + self.tcx.check_const_stability(def_id, t.path.span, t.path.span); + } + } + rustc_hir::Constness::NotConst => {} + } + for impl_item_ref in *items { let impl_item = self.tcx.associated_item(impl_item_ref.id.owner_id); @@ -829,6 +852,18 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { intravisit::walk_item(self, item); } + fn visit_poly_trait_ref(&mut self, t: &'tcx hir::PolyTraitRef<'tcx>) { + match t.modifiers.constness { + hir::BoundConstness::Always(span) | hir::BoundConstness::Maybe(span) => { + if let Some(def_id) = t.trait_ref.trait_def_id() { + self.tcx.check_const_stability(def_id, t.trait_ref.path.span, span); + } + } + hir::BoundConstness::Never => {} + } + intravisit::walk_poly_trait_ref(self, t); + } + fn visit_path(&mut self, path: &hir::Path<'tcx>, id: hir::HirId) { if let Some(def_id) = path.res.opt_def_id() { let method_span = path.segments.last().map(|s| s.ident.span); @@ -844,42 +879,95 @@ impl<'tcx> Visitor<'tcx> for Checker<'tcx> { }, ); - let is_allowed_through_unstable_modules = |def_id| { - self.tcx.lookup_stability(def_id).is_some_and(|stab| match stab.level { - StabilityLevel::Stable { allowed_through_unstable_modules, .. } => { - allowed_through_unstable_modules + if item_is_allowed { + // The item itself is allowed; check whether the path there is also allowed. + let is_allowed_through_unstable_modules: Option<AllowedThroughUnstableModules> = + self.tcx.lookup_stability(def_id).and_then(|stab| match stab.level { + StabilityLevel::Stable { allowed_through_unstable_modules, .. } => { + allowed_through_unstable_modules + } + _ => None, + }); + + if is_allowed_through_unstable_modules.is_none() { + // Check parent modules stability as well if the item the path refers to is itself + // stable. We only emit warnings for unstable path segments if the item is stable + // or allowed because stability is often inherited, so the most common case is that + // both the segments and the item are unstable behind the same feature flag. + // + // We check here rather than in `visit_path_segment` to prevent visiting the last + // path segment twice + // + // We include special cases via #[rustc_allowed_through_unstable_modules] for items + // that were accidentally stabilized through unstable paths before this check was + // added, such as `core::intrinsics::transmute` + let parents = path.segments.iter().rev().skip(1); + for path_segment in parents { + if let Some(def_id) = path_segment.res.opt_def_id() { + // use `None` for id to prevent deprecation check + self.tcx.check_stability_allow_unstable( + def_id, + None, + path.span, + None, + if is_unstable_reexport(self.tcx, id) { + AllowUnstable::Yes + } else { + AllowUnstable::No + }, + ); + } } - _ => false, - }) - }; - - if item_is_allowed && !is_allowed_through_unstable_modules(def_id) { - // Check parent modules stability as well if the item the path refers to is itself - // stable. We only emit warnings for unstable path segments if the item is stable - // or allowed because stability is often inherited, so the most common case is that - // both the segments and the item are unstable behind the same feature flag. - // - // We check here rather than in `visit_path_segment` to prevent visiting the last - // path segment twice - // - // We include special cases via #[rustc_allowed_through_unstable_modules] for items - // that were accidentally stabilized through unstable paths before this check was - // added, such as `core::intrinsics::transmute` - let parents = path.segments.iter().rev().skip(1); - for path_segment in parents { - if let Some(def_id) = path_segment.res.opt_def_id() { - // use `None` for id to prevent deprecation check - self.tcx.check_stability_allow_unstable( - def_id, - None, - path.span, - None, - if is_unstable_reexport(self.tcx, id) { - AllowUnstable::Yes - } else { - AllowUnstable::No - }, - ); + } else if let Some(AllowedThroughUnstableModules::WithDeprecation(deprecation)) = + is_allowed_through_unstable_modules + { + // Similar to above, but we cannot use `check_stability_allow_unstable` as that would + // immediately show the stability error. We just want to know the result and disaplay + // our own kind of error. + let parents = path.segments.iter().rev().skip(1); + for path_segment in parents { + if let Some(def_id) = path_segment.res.opt_def_id() { + // use `None` for id to prevent deprecation check + let eval_result = self.tcx.eval_stability_allow_unstable( + def_id, + None, + path.span, + None, + if is_unstable_reexport(self.tcx, id) { + AllowUnstable::Yes + } else { + AllowUnstable::No + }, + ); + let is_allowed = matches!(eval_result, EvalResult::Allow); + if !is_allowed { + // Calculating message for lint involves calling `self.def_path_str`, + // which will by default invoke the expensive `visible_parent_map` query. + // Skip all that work if the lint is allowed anyway. + if self.tcx.lint_level_at_node(DEPRECATED, id).0 + == lint::Level::Allow + { + return; + } + // Show a deprecation message. + let def_path = + with_no_trimmed_paths!(self.tcx.def_path_str(def_id)); + let def_kind = self.tcx.def_descr(def_id); + let diag = Deprecated { + sub: None, + kind: def_kind.to_owned(), + path: def_path, + note: Some(deprecation), + since_kind: lint::DeprecatedSinceKind::InEffect, + }; + self.tcx.emit_node_span_lint( + DEPRECATED, + id, + method_span.unwrap_or(path.span), + diag, + ); + } + } } } } diff --git a/compiler/rustc_pattern_analysis/src/constructor.rs b/compiler/rustc_pattern_analysis/src/constructor.rs index 8fce4266345..4ce868f014f 100644 --- a/compiler/rustc_pattern_analysis/src/constructor.rs +++ b/compiler/rustc_pattern_analysis/src/constructor.rs @@ -182,7 +182,7 @@ use std::iter::once; use rustc_apfloat::ieee::{DoubleS, HalfS, IeeeFloat, QuadS, SingleS}; use rustc_index::IndexVec; -use rustc_index::bit_set::{BitSet, GrowableBitSet}; +use rustc_index::bit_set::{DenseBitSet, GrowableBitSet}; use smallvec::SmallVec; use self::Constructor::*; @@ -1072,7 +1072,7 @@ impl<Cx: PatCx> ConstructorSet<Cx> { } } ConstructorSet::Variants { variants, non_exhaustive } => { - let mut seen_set = BitSet::new_empty(variants.len()); + let mut seen_set = DenseBitSet::new_empty(variants.len()); for idx in seen.iter().filter_map(|c| c.as_variant()) { seen_set.insert(idx); } diff --git a/compiler/rustc_pattern_analysis/src/usefulness.rs b/compiler/rustc_pattern_analysis/src/usefulness.rs index 99261eaa95c..cc09cd491af 100644 --- a/compiler/rustc_pattern_analysis/src/usefulness.rs +++ b/compiler/rustc_pattern_analysis/src/usefulness.rs @@ -712,7 +712,7 @@ use std::fmt; #[cfg(feature = "rustc")] use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_hash::{FxHashMap, FxHashSet}; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use smallvec::{SmallVec, smallvec}; use tracing::{debug, instrument}; @@ -1129,7 +1129,7 @@ struct MatrixRow<'p, Cx: PatCx> { /// ``` /// Here the `(true, true)` case is irrelevant. Since we skip it, we will not detect that row 0 /// intersects rows 1 and 2. - intersects_at_least: BitSet<usize>, + intersects_at_least: DenseBitSet<usize>, /// Whether the head pattern is a branch (see definition of "branch pattern" at /// [`BranchPatUsefulness`]) head_is_branch: bool, @@ -1142,7 +1142,7 @@ impl<'p, Cx: PatCx> MatrixRow<'p, Cx> { parent_row: arm_id, is_under_guard: arm.has_guard, useful: false, - intersects_at_least: BitSet::new_empty(0), // Initialized in `Matrix::push`. + intersects_at_least: DenseBitSet::new_empty(0), // Initialized in `Matrix::push`. // This pattern is a branch because it comes from a match arm. head_is_branch: true, } @@ -1171,7 +1171,7 @@ impl<'p, Cx: PatCx> MatrixRow<'p, Cx> { parent_row, is_under_guard: self.is_under_guard, useful: false, - intersects_at_least: BitSet::new_empty(0), // Initialized in `Matrix::push`. + intersects_at_least: DenseBitSet::new_empty(0), // Initialized in `Matrix::push`. head_is_branch: is_or_pat, }) } @@ -1191,7 +1191,7 @@ impl<'p, Cx: PatCx> MatrixRow<'p, Cx> { parent_row, is_under_guard: self.is_under_guard, useful: false, - intersects_at_least: BitSet::new_empty(0), // Initialized in `Matrix::push`. + intersects_at_least: DenseBitSet::new_empty(0), // Initialized in `Matrix::push`. head_is_branch: false, }) } @@ -1230,7 +1230,7 @@ struct Matrix<'p, Cx: PatCx> { impl<'p, Cx: PatCx> Matrix<'p, Cx> { /// Pushes a new row to the matrix. Internal method, prefer [`Matrix::new`]. fn push(&mut self, mut row: MatrixRow<'p, Cx>) { - row.intersects_at_least = BitSet::new_empty(self.rows.len()); + row.intersects_at_least = DenseBitSet::new_empty(self.rows.len()); self.rows.push(row); } @@ -1824,7 +1824,7 @@ pub struct UsefulnessReport<'p, Cx: PatCx> { pub non_exhaustiveness_witnesses: Vec<WitnessPat<Cx>>, /// For each arm, a set of indices of arms above it that have non-empty intersection, i.e. there /// is a value matched by both arms. This may miss real intersections. - pub arm_intersections: Vec<BitSet<usize>>, + pub arm_intersections: Vec<DenseBitSet<usize>>, } /// Computes whether a match is exhaustive and which of its arms are useful. diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index 1b12af62ea5..46ec538735a 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs @@ -27,7 +27,7 @@ use rustc_query_system::query::{ QueryCache, QueryConfig, QueryContext, QueryJobId, QueryMap, QuerySideEffects, QueryStackFrame, force_query, }; -use rustc_query_system::{LayoutOfDepth, QueryOverflow}; +use rustc_query_system::{QueryOverflow, QueryOverflowNote}; use rustc_serialize::{Decodable, Encodable}; use rustc_session::Limit; use rustc_span::def_id::LOCAL_CRATE; @@ -153,14 +153,7 @@ impl QueryContext for QueryCtxt<'_> { } fn depth_limit_error(self, job: QueryJobId) { - let mut span = None; - let mut layout_of_depth = None; - if let Some((info, depth)) = - job.try_find_layout_root(self.collect_active_jobs(), dep_kinds::layout_of) - { - span = Some(info.job.span); - layout_of_depth = Some(LayoutOfDepth { desc: info.query.description, depth }); - } + let (info, depth) = job.find_dep_kind_root(self.collect_active_jobs()); let suggested_limit = match self.recursion_limit() { Limit(0) => Limit(2), @@ -168,8 +161,8 @@ impl QueryContext for QueryCtxt<'_> { }; self.sess.dcx().emit_fatal(QueryOverflow { - span, - layout_of_depth, + span: info.job.span, + note: QueryOverflowNote { desc: info.query.description, depth }, suggested_limit, crate_name: self.crate_name(LOCAL_CRATE), }); diff --git a/compiler/rustc_query_system/messages.ftl b/compiler/rustc_query_system/messages.ftl index d7ab7557511..f48dc60afa0 100644 --- a/compiler/rustc_query_system/messages.ftl +++ b/compiler/rustc_query_system/messages.ftl @@ -21,7 +21,7 @@ query_system_increment_compilation = internal compiler error: encountered increm query_system_increment_compilation_note1 = Please follow the instructions below to create a bug report with the provided information query_system_increment_compilation_note2 = See <https://github.com/rust-lang/rust/issues/84970> for more information -query_system_layout_of_depth = query depth increased by {$depth} when {$desc} +query_system_overflow_note = query depth increased by {$depth} when {$desc} query_system_query_overflow = queries overflow the depth limit! .help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`) diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 4b47ce8389c..fa095b10884 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -376,25 +376,8 @@ impl<D: Deps> DepGraphData<D> { }; let dcx = cx.dep_context(); - let hashing_timer = dcx.profiler().incr_result_hashing(); - let current_fingerprint = - hash_result.map(|f| dcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, &result))); - - // Intern the new `DepNode`. - let (dep_node_index, prev_and_color) = - self.current.intern_node(&self.previous, key, edges, current_fingerprint); - - hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); - - if let Some((prev_index, color)) = prev_and_color { - debug_assert!( - self.colors.get(prev_index).is_none(), - "DepGraph::with_task() - Duplicate DepNodeColor \ - insertion for {key:?}" - ); - - self.colors.insert(prev_index, color); - } + let dep_node_index = + self.hash_result_and_intern_node(dcx, key, edges, &result, hash_result); (result, dep_node_index) } @@ -462,6 +445,38 @@ impl<D: Deps> DepGraphData<D> { (result, dep_node_index) } + + /// Intern the new `DepNode` with the dependencies up-to-now. + fn hash_result_and_intern_node<Ctxt: DepContext<Deps = D>, R>( + &self, + cx: &Ctxt, + node: DepNode, + edges: EdgesVec, + result: &R, + hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, + ) -> DepNodeIndex { + let hashing_timer = cx.profiler().incr_result_hashing(); + let current_fingerprint = hash_result.map(|hash_result| { + cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result)) + }); + + // Intern the new `DepNode` with the dependencies up-to-now. + let (dep_node_index, prev_and_color) = + self.current.intern_node(&self.previous, node, edges, current_fingerprint); + + hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); + + if let Some((prev_index, color)) = prev_and_color { + debug_assert!( + self.colors.get(prev_index).is_none(), + "DepGraph::with_task() - Duplicate DepNodeColor insertion for {node:?}", + ); + + self.colors.insert(prev_index, color); + } + + dep_node_index + } } impl<D: Deps> DepGraph<D> { @@ -536,11 +551,10 @@ impl<D: Deps> DepGraph<D> { /// FIXME: If the code is changed enough for this node to be marked before requiring the /// caller's node, we suppose that those changes will be enough to mark this node red and /// force a recomputation using the "normal" way. - pub fn with_feed_task<Ctxt: DepContext<Deps = D>, A: Debug, R: Debug>( + pub fn with_feed_task<Ctxt: DepContext<Deps = D>, R: Debug>( &self, node: DepNode, cx: Ctxt, - key: A, result: &R, hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> DepNodeIndex { @@ -588,27 +602,7 @@ impl<D: Deps> DepGraph<D> { } }); - let hashing_timer = cx.profiler().incr_result_hashing(); - let current_fingerprint = hash_result.map(|hash_result| { - cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result)) - }); - - // Intern the new `DepNode` with the dependencies up-to-now. - let (dep_node_index, prev_and_color) = - data.current.intern_node(&data.previous, node, edges, current_fingerprint); - - hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); - - if let Some((prev_index, color)) = prev_and_color { - debug_assert!( - data.colors.get(prev_index).is_none(), - "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}", - ); - - data.colors.insert(prev_index, color); - } - - dep_node_index + data.hash_result_and_intern_node(&cx, node, edges, result, hash_result) } else { // Incremental compilation is turned off. We just execute the task // without tracking. We still provide a dep-node index that uniquely diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs index 860f2e66915..5108ecaeea3 100644 --- a/compiler/rustc_query_system/src/error.rs +++ b/compiler/rustc_query_system/src/error.rs @@ -82,16 +82,16 @@ pub(crate) struct IncrementCompilation { #[diag(query_system_query_overflow)] pub struct QueryOverflow { #[primary_span] - pub span: Option<Span>, + pub span: Span, #[subdiagnostic] - pub layout_of_depth: Option<LayoutOfDepth>, + pub note: QueryOverflowNote, pub suggested_limit: Limit, pub crate_name: Symbol, } #[derive(Subdiagnostic)] -#[note(query_system_layout_of_depth)] -pub struct LayoutOfDepth { +#[note(query_system_overflow_note)] +pub struct QueryOverflowNote { pub desc: String, pub depth: usize, } diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs index a85e8a55a21..ee984095ad8 100644 --- a/compiler/rustc_query_system/src/lib.rs +++ b/compiler/rustc_query_system/src/lib.rs @@ -16,7 +16,7 @@ pub mod ich; pub mod query; mod values; -pub use error::{HandleCycleError, LayoutOfDepth, QueryOverflow}; +pub use error::{HandleCycleError, QueryOverflow, QueryOverflowNote}; pub use values::Value; rustc_fluent_macro::fluent_messages! { "../messages.ftl" } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 2a7d759ab35..3e179c61f39 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -15,7 +15,7 @@ use rustc_span::{DUMMY_SP, Span}; use crate::dep_graph::DepContext; use crate::error::CycleStack; use crate::query::plumbing::CycleError; -use crate::query::{DepKind, QueryContext, QueryStackFrame}; +use crate::query::{QueryContext, QueryStackFrame}; /// Represents a span and a query key. #[derive(Clone, Debug)] @@ -136,20 +136,18 @@ impl QueryJobId { #[cold] #[inline(never)] - pub fn try_find_layout_root( - &self, - query_map: QueryMap, - layout_of_kind: DepKind, - ) -> Option<(QueryJobInfo, usize)> { - let mut last_layout = None; - let mut current_id = Some(*self); - let mut depth = 0; + pub fn find_dep_kind_root(&self, query_map: QueryMap) -> (QueryJobInfo, usize) { + let mut depth = 1; + let info = query_map.get(&self).unwrap(); + let dep_kind = info.query.dep_kind; + let mut current_id = info.job.parent; + let mut last_layout = (info.clone(), depth); while let Some(id) = current_id { let info = query_map.get(&id).unwrap(); - if info.query.dep_kind == layout_of_kind { + if info.query.dep_kind == dep_kind { depth += 1; - last_layout = Some((info.clone(), depth)); + last_layout = (info.clone(), depth); } current_id = info.job.parent; } diff --git a/compiler/rustc_resolve/src/diagnostics.rs b/compiler/rustc_resolve/src/diagnostics.rs index 9795299ed6d..dc26d4de57a 100644 --- a/compiler/rustc_resolve/src/diagnostics.rs +++ b/compiler/rustc_resolve/src/diagnostics.rs @@ -1183,7 +1183,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { let in_module_is_extern = !in_module.def_id().is_local(); in_module.for_each_child(self, |this, ident, ns, name_binding| { // avoid non-importable candidates - if !name_binding.is_importable() { + if !name_binding.is_importable() + // FIXME(import_trait_associated_functions): remove this when `import_trait_associated_functions` is stable + || name_binding.is_assoc_const_or_fn() + && !this.tcx.features().import_trait_associated_functions() + { return; } diff --git a/compiler/rustc_resolve/src/imports.rs b/compiler/rustc_resolve/src/imports.rs index 5b1d8d622bd..cad45d3c293 100644 --- a/compiler/rustc_resolve/src/imports.rs +++ b/compiler/rustc_resolve/src/imports.rs @@ -17,9 +17,10 @@ use rustc_session::lint::builtin::{ AMBIGUOUS_GLOB_REEXPORTS, HIDDEN_GLOB_REEXPORTS, PUB_USE_OF_PRIVATE_EXTERN_CRATE, REDUNDANT_IMPORTS, UNUSED_IMPORTS, }; +use rustc_session::parse::feature_err; use rustc_span::edit_distance::find_best_match_for_name; use rustc_span::hygiene::LocalExpnId; -use rustc_span::{Ident, Span, Symbol, kw}; +use rustc_span::{Ident, Span, Symbol, kw, sym}; use smallvec::SmallVec; use tracing::debug; @@ -829,6 +830,17 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // Don't update the resolution, because it was never added. Err(Determined) if target.name == kw::Underscore => {} Ok(binding) if binding.is_importable() => { + if binding.is_assoc_const_or_fn() + && !this.tcx.features().import_trait_associated_functions() + { + feature_err( + this.tcx.sess, + sym::import_trait_associated_functions, + import.span, + "`use` associated items of traits is unstable", + ) + .emit(); + } let imported_binding = this.import(binding, import); target_bindings[ns].set(Some(imported_binding)); this.define(parent, target, ns, imported_binding); diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 7324d3fe786..1a0292ebbde 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -3960,7 +3960,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { match res { Res::SelfCtor(_) // See #70549. | Res::Def( - DefKind::Ctor(_, CtorKind::Const) | DefKind::Const | DefKind::ConstParam, + DefKind::Ctor(_, CtorKind::Const) | DefKind::Const | DefKind::AssocConst | DefKind::ConstParam, _, ) if is_syntactic_ambiguity => { // Disambiguate in favor of a unit struct/variant or constant pattern. @@ -3969,7 +3969,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { } Some(res) } - Res::Def(DefKind::Ctor(..) | DefKind::Const | DefKind::Static { .. }, _) => { + Res::Def(DefKind::Ctor(..) | DefKind::Const | DefKind::AssocConst | DefKind::Static { .. }, _) => { // This is unambiguously a fresh binding, either syntactically // (e.g., `IDENT @ PAT` or `ref IDENT`) or because `IDENT` resolves // to something unusable as a pattern (e.g., constructor function), @@ -4005,7 +4005,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { ); None } - Res::Def(DefKind::Fn, _) | Res::Local(..) | Res::Err => { + Res::Def(DefKind::Fn | DefKind::AssocFn, _) | Res::Local(..) | Res::Err => { // These entities are explicitly allowed to be shadowed by fresh bindings. None } @@ -5019,12 +5019,13 @@ struct ItemInfoCollector<'a, 'ra, 'tcx> { } impl ItemInfoCollector<'_, '_, '_> { - fn collect_fn_info(&mut self, sig: &FnSig, id: NodeId) { + fn collect_fn_info(&mut self, sig: &FnSig, id: NodeId, attrs: &[Attribute]) { let sig = DelegationFnSig { header: sig.header, param_count: sig.decl.inputs.len(), has_self: sig.decl.has_self(), c_variadic: sig.decl.c_variadic(), + target_feature: attrs.iter().any(|attr| attr.has_name(sym::target_feature)), }; self.r.delegation_fn_sigs.insert(self.r.local_def_id(id), sig); } @@ -5043,7 +5044,7 @@ impl<'ast> Visitor<'ast> for ItemInfoCollector<'_, '_, '_> { | ItemKind::Trait(box Trait { ref generics, .. }) | ItemKind::TraitAlias(ref generics, _) => { if let ItemKind::Fn(box Fn { ref sig, .. }) = &item.kind { - self.collect_fn_info(sig, item.id); + self.collect_fn_info(sig, item.id, &item.attrs); } let def_id = self.r.local_def_id(item.id); @@ -5076,7 +5077,7 @@ impl<'ast> Visitor<'ast> for ItemInfoCollector<'_, '_, '_> { fn visit_assoc_item(&mut self, item: &'ast AssocItem, ctxt: AssocCtxt) { if let AssocItemKind::Fn(box Fn { ref sig, .. }) = &item.kind { - self.collect_fn_info(sig, item.id); + self.collect_fn_info(sig, item.id, &item.attrs); } visit::walk_assoc_item(self, item, ctxt); } diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 6ee02e9f47f..2936d722aa7 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -1130,7 +1130,9 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { let None = following_seg else { return }; for rib in self.ribs[ValueNS].iter().rev() { for (def_id, spans) in &rib.patterns_with_skipped_bindings { - if let Some(fields) = self.r.field_idents(*def_id) { + if let DefKind::Struct | DefKind::Variant = self.r.tcx.def_kind(*def_id) + && let Some(fields) = self.r.field_idents(*def_id) + { for field in fields { if field.name == segment.ident.name { if spans.iter().all(|(_, had_error)| had_error.is_err()) { @@ -3155,7 +3157,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { } } - #[cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] + #[allow(rustc::symbol_intern_string_literal)] let existing_name = match &in_scope_lifetimes[..] { [] => Symbol::intern("'a"), [(existing, _)] => existing.name, diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index cc9ed566eda..8e457e68eec 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -920,10 +920,13 @@ impl<'ra> NameBindingData<'ra> { } fn is_importable(&self) -> bool { - !matches!( - self.res(), - Res::Def(DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy, _) - ) + !matches!(self.res(), Res::Def(DefKind::AssocTy, _)) + } + + // FIXME(import_trait_associated_functions): associate `const` or `fn` are not importable unless + // the feature `import_trait_associated_functions` is enable + fn is_assoc_const_or_fn(&self) -> bool { + matches!(self.res(), Res::Def(DefKind::AssocConst | DefKind::AssocFn, _)) } fn macro_kind(&self) -> Option<MacroKind> { diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index b24e343c58d..25e35fead7e 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -1031,6 +1031,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { is_soft, span, soft_handler, + stability::UnstableKind::Regular, ); } } diff --git a/compiler/rustc_resolve/src/rustdoc.rs b/compiler/rustc_resolve/src/rustdoc.rs index 84e43d0e016..7998596c59e 100644 --- a/compiler/rustc_resolve/src/rustdoc.rs +++ b/compiler/rustc_resolve/src/rustdoc.rs @@ -347,7 +347,7 @@ pub fn strip_generics_from_path(path_str: &str) -> Result<Box<str>, MalformedGen /// Returns whether the first doc-comment is an inner attribute. /// -//// If there are no doc-comments, return true. +/// If there are no doc-comments, return true. /// FIXME(#78591): Support both inner and outer attributes on the same item. pub fn inner_docs(attrs: &[impl AttributeExt]) -> bool { attrs diff --git a/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/encode.rs b/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/encode.rs index 895259d52a7..09648e28df4 100644 --- a/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/encode.rs +++ b/compiler/rustc_sanitizers/src/cfi/typeid/itanium_cxx_abi/encode.rs @@ -448,10 +448,9 @@ pub(crate) fn encode_ty<'tcx>( if let Some(cfi_encoding) = tcx.get_attr(def_id, sym::cfi_encoding) { // Use user-defined CFI encoding for type if let Some(value_str) = cfi_encoding.value_str() { - let value_str = value_str.to_string(); - let str = value_str.trim(); - if !str.is_empty() { - s.push_str(str); + let value_str = value_str.as_str().trim(); + if !value_str.is_empty() { + s.push_str(value_str); // Don't compress user-defined builtin types (see // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-builtin and // https://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling-compression). @@ -459,7 +458,7 @@ pub(crate) fn encode_ty<'tcx>( "v", "w", "b", "c", "a", "h", "s", "t", "i", "j", "l", "m", "x", "y", "n", "o", "f", "d", "e", "g", "z", "Dh", ]; - if !builtin_types.contains(&str) { + if !builtin_types.contains(&value_str) { compress(dict, DictKey::Ty(ty, TyQ::None), &mut s); } } else { diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 5c36c986490..21afb7df7cb 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -132,13 +132,6 @@ pub enum LtoCli { } /// The different settings that the `-C instrument-coverage` flag can have. -/// -/// Coverage instrumentation now supports combining `-C instrument-coverage` -/// with compiler and linker optimization (enabled with `-O` or `-C opt-level=1` -/// and higher). Nevertheless, there are many variables, depending on options -/// selected, code structure, and enabled attributes. If errors are encountered, -/// either while compiling or when generating `llvm-cov show` reports, consider -/// lowering the optimization level, or including/excluding `-C link-dead-code`. #[derive(Clone, Copy, PartialEq, Hash, Debug)] pub enum InstrumentCoverage { /// `-C instrument-coverage=no` (or `off`, `false` etc.) @@ -2892,6 +2885,7 @@ pub(crate) mod dep_tracking { use std::num::NonZero; use std::path::PathBuf; + use rustc_abi::Align; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::stable_hasher::Hash64; use rustc_errors::LanguageIdentifier; @@ -3012,6 +3006,7 @@ pub(crate) mod dep_tracking { InliningThreshold, FunctionReturn, WasmCAbi, + Align, ); impl<T1, T2> DepTrackingHash for (T1, T2) diff --git a/compiler/rustc_session/src/options.rs b/compiler/rustc_session/src/options.rs index c1fba4c513d..63aaa3abc8e 100644 --- a/compiler/rustc_session/src/options.rs +++ b/compiler/rustc_session/src/options.rs @@ -4,6 +4,7 @@ use std::num::{IntErrorKind, NonZero}; use std::path::PathBuf; use std::str; +use rustc_abi::Align; use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::profiling::TimePassesFormat; use rustc_data_structures::stable_hasher::Hash64; @@ -482,6 +483,7 @@ mod desc { pub(crate) const parse_wasm_c_abi: &str = "`legacy` or `spec`"; pub(crate) const parse_mir_include_spans: &str = "either a boolean (`yes`, `no`, `on`, `off`, etc), or `nll` (default: `nll`)"; + pub(crate) const parse_align: &str = "a number that is a power of 2 between 1 and 2^29"; } pub mod parse { @@ -1561,6 +1563,21 @@ pub mod parse { true } + + pub(crate) fn parse_align(slot: &mut Option<Align>, v: Option<&str>) -> bool { + let mut bytes = 0u64; + if !parse_number(&mut bytes, v) { + return false; + } + + let Ok(align) = Align::from_bytes(bytes) else { + return false; + }; + + *slot = Some(align); + + true + } } options! { @@ -1621,7 +1638,7 @@ options! { "extra arguments to append to the linker invocation (space separated)"), #[rustc_lint_opt_deny_field_access("use `Session::link_dead_code` instead of this field")] link_dead_code: Option<bool> = (None, parse_opt_bool, [TRACKED], - "keep dead code at link time (useful for code coverage) (default: no)"), + "try to generate and link dead code (default: no)"), link_self_contained: LinkSelfContained = (LinkSelfContained::default(), parse_link_self_contained, [UNTRACKED], "control whether to link Rust provided C objects/libraries or rely \ on a C toolchain or linker installed in the system"), @@ -1921,6 +1938,8 @@ options! { "gather metadata statistics (default: no)"), metrics_dir: Option<PathBuf> = (None, parse_opt_pathbuf, [UNTRACKED], "the directory metrics emitted by rustc are dumped into (implicitly enables default set of metrics)"), + min_function_alignment: Option<Align> = (None, parse_align, [TRACKED], + "align all functions to at least this many bytes. Must be a power of 2"), mir_emit_retag: bool = (false, parse_bool, [TRACKED], "emit Retagging MIR statements, interpreted e.g., by miri; implies -Zmir-opt-level=0 \ (default: no)"), diff --git a/compiler/rustc_smir/src/rustc_internal/internal.rs b/compiler/rustc_smir/src/rustc_internal/internal.rs index c465367b6b9..3bc896dd7ef 100644 --- a/compiler/rustc_smir/src/rustc_internal/internal.rs +++ b/compiler/rustc_smir/src/rustc_internal/internal.rs @@ -472,6 +472,7 @@ impl RustcInternal for Abi { Abi::PtxKernel => rustc_abi::ExternAbi::PtxKernel, Abi::Msp430Interrupt => rustc_abi::ExternAbi::Msp430Interrupt, Abi::X86Interrupt => rustc_abi::ExternAbi::X86Interrupt, + Abi::GpuKernel => rustc_abi::ExternAbi::GpuKernel, Abi::EfiApi => rustc_abi::ExternAbi::EfiApi, Abi::AvrInterrupt => rustc_abi::ExternAbi::AvrInterrupt, Abi::AvrNonBlockingInterrupt => rustc_abi::ExternAbi::AvrNonBlockingInterrupt, diff --git a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs index b39a15a8633..4a03ff4beae 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/abi.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/abi.rs @@ -113,6 +113,7 @@ impl<'tcx> Stable<'tcx> for callconv::Conv { Conv::X86VectorCall => CallConvention::X86VectorCall, Conv::X86_64SysV => CallConvention::X86_64SysV, Conv::X86_64Win64 => CallConvention::X86_64Win64, + Conv::GpuKernel => CallConvention::GpuKernel, Conv::AvrInterrupt => CallConvention::AvrInterrupt, Conv::AvrNonBlockingInterrupt => CallConvention::AvrNonBlockingInterrupt, Conv::RiscvInterrupt { .. } => CallConvention::RiscvInterrupt, diff --git a/compiler/rustc_smir/src/rustc_smir/convert/mir.rs b/compiler/rustc_smir/src/rustc_smir/convert/mir.rs index de933952c6a..a5a17b4b573 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/mir.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/mir.rs @@ -181,6 +181,7 @@ impl<'tcx> Stable<'tcx> for mir::Rvalue<'tcx> { RawPtr(mutability, place) => { stable_mir::mir::Rvalue::AddressOf(mutability.stable(tables), place.stable(tables)) } + Len(place) => stable_mir::mir::Rvalue::Len(place.stable(tables)), Cast(cast_kind, op, ty) => stable_mir::mir::Rvalue::Cast( cast_kind.stable(tables), op.stable(tables), diff --git a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs index e15dad78c69..a7e122639ea 100644 --- a/compiler/rustc_smir/src/rustc_smir/convert/ty.rs +++ b/compiler/rustc_smir/src/rustc_smir/convert/ty.rs @@ -911,6 +911,7 @@ impl<'tcx> Stable<'tcx> for rustc_abi::ExternAbi { ExternAbi::Win64 { unwind } => Abi::Win64 { unwind }, ExternAbi::SysV64 { unwind } => Abi::SysV64 { unwind }, ExternAbi::PtxKernel => Abi::PtxKernel, + ExternAbi::GpuKernel => Abi::GpuKernel, ExternAbi::Msp430Interrupt => Abi::Msp430Interrupt, ExternAbi::X86Interrupt => Abi::X86Interrupt, ExternAbi::EfiApi => Abi::EfiApi, diff --git a/compiler/rustc_span/src/analyze_source_file.rs b/compiler/rustc_span/src/analyze_source_file.rs index 28ce883daee..fba20566580 100644 --- a/compiler/rustc_span/src/analyze_source_file.rs +++ b/compiler/rustc_span/src/analyze_source_file.rs @@ -29,6 +29,7 @@ pub(crate) fn analyze_source_file(src: &str) -> (Vec<RelativeBytePos>, Vec<Multi (lines, multi_byte_chars) } +#[cfg(bootstrap)] cfg_match! { cfg(any(target_arch = "x86", target_arch = "x86_64")) => { fn analyze_source_file_dispatch( @@ -185,6 +186,165 @@ cfg_match! { } } } + +#[cfg(not(bootstrap))] +cfg_match! { + any(target_arch = "x86", target_arch = "x86_64") => { + fn analyze_source_file_dispatch( + src: &str, + lines: &mut Vec<RelativeBytePos>, + multi_byte_chars: &mut Vec<MultiByteChar>, + ) { + if is_x86_feature_detected!("sse2") { + unsafe { + analyze_source_file_sse2(src, lines, multi_byte_chars); + } + } else { + analyze_source_file_generic( + src, + src.len(), + RelativeBytePos::from_u32(0), + lines, + multi_byte_chars, + ); + } + } + + /// Checks 16 byte chunks of text at a time. If the chunk contains + /// something other than printable ASCII characters and newlines, the + /// function falls back to the generic implementation. Otherwise it uses + /// SSE2 intrinsics to quickly find all newlines. + #[target_feature(enable = "sse2")] + unsafe fn analyze_source_file_sse2( + src: &str, + lines: &mut Vec<RelativeBytePos>, + multi_byte_chars: &mut Vec<MultiByteChar>, + ) { + #[cfg(target_arch = "x86")] + use std::arch::x86::*; + #[cfg(target_arch = "x86_64")] + use std::arch::x86_64::*; + + const CHUNK_SIZE: usize = 16; + + let src_bytes = src.as_bytes(); + + let chunk_count = src.len() / CHUNK_SIZE; + + // This variable keeps track of where we should start decoding a + // chunk. If a multi-byte character spans across chunk boundaries, + // we need to skip that part in the next chunk because we already + // handled it. + let mut intra_chunk_offset = 0; + + for chunk_index in 0..chunk_count { + let ptr = src_bytes.as_ptr() as *const __m128i; + // We don't know if the pointer is aligned to 16 bytes, so we + // use `loadu`, which supports unaligned loading. + let chunk = unsafe { _mm_loadu_si128(ptr.add(chunk_index)) }; + + // For character in the chunk, see if its byte value is < 0, which + // indicates that it's part of a UTF-8 char. + let multibyte_test = unsafe { _mm_cmplt_epi8(chunk, _mm_set1_epi8(0)) }; + // Create a bit mask from the comparison results. + let multibyte_mask = unsafe { _mm_movemask_epi8(multibyte_test) }; + + // If the bit mask is all zero, we only have ASCII chars here: + if multibyte_mask == 0 { + assert!(intra_chunk_offset == 0); + + // Check if there are any control characters in the chunk. All + // control characters that we can encounter at this point have a + // byte value less than 32 or ... + let control_char_test0 = unsafe { _mm_cmplt_epi8(chunk, _mm_set1_epi8(32)) }; + let control_char_mask0 = unsafe { _mm_movemask_epi8(control_char_test0) }; + + // ... it's the ASCII 'DEL' character with a value of 127. + let control_char_test1 = unsafe { _mm_cmpeq_epi8(chunk, _mm_set1_epi8(127)) }; + let control_char_mask1 = unsafe { _mm_movemask_epi8(control_char_test1) }; + + let control_char_mask = control_char_mask0 | control_char_mask1; + + if control_char_mask != 0 { + // Check for newlines in the chunk + let newlines_test = unsafe { _mm_cmpeq_epi8(chunk, _mm_set1_epi8(b'\n' as i8)) }; + let newlines_mask = unsafe { _mm_movemask_epi8(newlines_test) }; + + if control_char_mask == newlines_mask { + // All control characters are newlines, record them + let mut newlines_mask = 0xFFFF0000 | newlines_mask as u32; + let output_offset = RelativeBytePos::from_usize(chunk_index * CHUNK_SIZE + 1); + + loop { + let index = newlines_mask.trailing_zeros(); + + if index >= CHUNK_SIZE as u32 { + // We have arrived at the end of the chunk. + break; + } + + lines.push(RelativeBytePos(index) + output_offset); + + // Clear the bit, so we can find the next one. + newlines_mask &= (!1) << index; + } + + // We are done for this chunk. All control characters were + // newlines and we took care of those. + continue; + } else { + // Some of the control characters are not newlines, + // fall through to the slow path below. + } + } else { + // No control characters, nothing to record for this chunk + continue; + } + } + + // The slow path. + // There are control chars in here, fallback to generic decoding. + let scan_start = chunk_index * CHUNK_SIZE + intra_chunk_offset; + intra_chunk_offset = analyze_source_file_generic( + &src[scan_start..], + CHUNK_SIZE - intra_chunk_offset, + RelativeBytePos::from_usize(scan_start), + lines, + multi_byte_chars, + ); + } + + // There might still be a tail left to analyze + let tail_start = chunk_count * CHUNK_SIZE + intra_chunk_offset; + if tail_start < src.len() { + analyze_source_file_generic( + &src[tail_start..], + src.len() - tail_start, + RelativeBytePos::from_usize(tail_start), + lines, + multi_byte_chars, + ); + } + } + } + _ => { + // The target (or compiler version) does not support SSE2 ... + fn analyze_source_file_dispatch( + src: &str, + lines: &mut Vec<RelativeBytePos>, + multi_byte_chars: &mut Vec<MultiByteChar>, + ) { + analyze_source_file_generic( + src, + src.len(), + RelativeBytePos::from_u32(0), + lines, + multi_byte_chars, + ); + } + } +} + // `scan_len` determines the number of bytes in `src` to scan. Note that the // function can read past `scan_len` if a multi-byte character start within the // range but extends past it. The overflow is returned by the function. diff --git a/compiler/rustc_span/src/edit_distance/tests.rs b/compiler/rustc_span/src/edit_distance/tests.rs index 9540f934d7e..8372856c0d3 100644 --- a/compiler/rustc_span/src/edit_distance/tests.rs +++ b/compiler/rustc_span/src/edit_distance/tests.rs @@ -1,4 +1,4 @@ -#![cfg_attr(not(bootstrap), allow(rustc::symbol_intern_string_literal))] +#![allow(rustc::symbol_intern_string_literal)] use super::*; diff --git a/compiler/rustc_span/src/hygiene.rs b/compiler/rustc_span/src/hygiene.rs index 3cdae437b7d..a5826137181 100644 --- a/compiler/rustc_span/src/hygiene.rs +++ b/compiler/rustc_span/src/hygiene.rs @@ -1163,9 +1163,6 @@ pub enum DesugaringKind { WhileLoop, /// `async Fn()` bound modifier BoundModifier, - /// Marks a `&raw const *_1` needed as part of getting the length of a mutable - /// slice for the bounds check, so that MIRI's retag handling can recognize it. - IndexBoundsCheckReborrow, } impl DesugaringKind { @@ -1182,7 +1179,6 @@ impl DesugaringKind { DesugaringKind::ForLoop => "`for` loop", DesugaringKind::WhileLoop => "`while` loop", DesugaringKind::BoundModifier => "trait bound modifier", - DesugaringKind::IndexBoundsCheckReborrow => "slice indexing", } } } diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index d5c2a337b4c..51cfbf59471 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -67,7 +67,7 @@ mod span_encoding; pub use span_encoding::{DUMMY_SP, Span}; pub mod symbol; -pub use symbol::{Ident, MacroRulesNormalizedIdent, Symbol, kw, sym}; +pub use symbol::{Ident, MacroRulesNormalizedIdent, STDLIB_STABLE_CRATES, Symbol, kw, sym}; mod analyze_source_file; pub mod fatal_error; diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 0dcf38e3493..f5ce5dbc9d6 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -379,6 +379,7 @@ symbols! { abi_avr_interrupt, abi_c_cmse_nonsecure_call, abi_efiapi, + abi_gpu_kernel, abi_msp430_interrupt, abi_ptx, abi_riscv_interrupt, @@ -1095,6 +1096,7 @@ symbols! { import, import_name_type, import_shadowing, + import_trait_associated_functions, imported_main, in_band_lifetimes, include, @@ -1285,6 +1287,7 @@ symbols! { mir_drop, mir_field, mir_goto, + mir_len, mir_make_place, mir_move, mir_offset, @@ -1731,6 +1734,7 @@ symbols! { rustc_error, rustc_evaluate_where_clauses, rustc_expected_cgu_reuse, + rustc_force_inline, rustc_has_incoherent_inherent_impls, rustc_hidden_type_of_opaques, rustc_if_this_changed, @@ -2240,6 +2244,10 @@ symbols! { } } +/// Symbols for crates that are part of the stable standard library: `std`, `core`, `alloc`, and +/// `proc_macro`. +pub const STDLIB_STABLE_CRATES: &[Symbol] = &[sym::std, sym::core, sym::alloc, sym::proc_macro]; + #[derive(Copy, Clone, Eq, HashStable_Generic, Encodable, Decodable)] pub struct Ident { pub name: Symbol, diff --git a/compiler/rustc_symbol_mangling/src/legacy.rs b/compiler/rustc_symbol_mangling/src/legacy.rs index 0d6d8488a23..879f3fac21f 100644 --- a/compiler/rustc_symbol_mangling/src/legacy.rs +++ b/compiler/rustc_symbol_mangling/src/legacy.rs @@ -19,15 +19,15 @@ pub(super) fn mangle<'tcx>( let def_id = instance.def_id(); // We want to compute the "type" of this item. Unfortunately, some - // kinds of items (e.g., closures) don't have an entry in the - // item-type array. So walk back up the find the closest parent - // that DOES have an entry. + // kinds of items (e.g., synthetic static allocations from const eval) + // don't have a proper implementation for the `type_of` query. So walk + // back up the find the closest parent that DOES have a type. let mut ty_def_id = def_id; let instance_ty; loop { let key = tcx.def_key(ty_def_id); match key.disambiguated_data.data { - DefPathData::TypeNs(_) | DefPathData::ValueNs(_) => { + DefPathData::TypeNs(_) | DefPathData::ValueNs(_) | DefPathData::Closure => { instance_ty = tcx.type_of(ty_def_id).instantiate_identity(); debug!(?instance_ty); break; @@ -383,14 +383,47 @@ impl<'tcx> Printer<'tcx> for SymbolPrinter<'tcx> { &mut self, impl_def_id: DefId, args: &'tcx [GenericArg<'tcx>], - mut self_ty: Ty<'tcx>, - mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { - let mut typing_env = ty::TypingEnv::post_analysis(self.tcx, impl_def_id); - if !args.is_empty() { - typing_env.param_env = - ty::EarlyBinder::bind(typing_env.param_env).instantiate(self.tcx, args); - } + let self_ty = self.tcx.type_of(impl_def_id); + let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id); + let generics = self.tcx.generics_of(impl_def_id); + // We have two cases to worry about here: + // 1. We're printing a nested item inside of an impl item, like an inner + // function inside of a method. Due to the way that def path printing works, + // we'll render this something like `<Ty as Trait>::method::inner_fn` + // but we have no substs for this impl since it's not really inheriting + // generics from the outer item. We need to use the identity substs, and + // to normalize we need to use the correct param-env too. + // 2. We're mangling an item with identity substs. This seems to only happen + // when generating coverage, since we try to generate coverage for unused + // items too, and if something isn't monomorphized then we necessarily don't + // have anything to substitute the instance with. + // NOTE: We don't support mangling partially substituted but still polymorphic + // instances, like `impl<A> Tr<A> for ()` where `A` is substituted w/ `(T,)`. + let (typing_env, mut self_ty, mut impl_trait_ref) = if generics.count() > args.len() + || &args[..generics.count()] + == self + .tcx + .erase_regions(ty::GenericArgs::identity_for_item(self.tcx, impl_def_id)) + .as_slice() + { + ( + ty::TypingEnv::post_analysis(self.tcx, impl_def_id), + self_ty.instantiate_identity(), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate_identity()), + ) + } else { + assert!( + !args.has_non_region_param(), + "should not be mangling partially substituted \ + polymorphic instance: {impl_def_id:?} {args:?}" + ); + ( + ty::TypingEnv::fully_monomorphized(), + self_ty.instantiate(self.tcx, args), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate(self.tcx, args)), + ) + }; match &mut impl_trait_ref { Some(impl_trait_ref) => { @@ -403,7 +436,7 @@ impl<'tcx> Printer<'tcx> for SymbolPrinter<'tcx> { } } - self.default_print_impl_path(impl_def_id, args, self_ty, impl_trait_ref) + self.default_print_impl_path(impl_def_id, self_ty, impl_trait_ref) } } diff --git a/compiler/rustc_symbol_mangling/src/v0.rs b/compiler/rustc_symbol_mangling/src/v0.rs index 0ca47eba5e8..4ddf530a00d 100644 --- a/compiler/rustc_symbol_mangling/src/v0.rs +++ b/compiler/rustc_symbol_mangling/src/v0.rs @@ -14,8 +14,8 @@ use rustc_middle::bug; use rustc_middle::ty::layout::IntegerExt; use rustc_middle::ty::print::{Print, PrintError, Printer}; use rustc_middle::ty::{ - self, EarlyBinder, FloatTy, GenericArg, GenericArgKind, Instance, IntTy, ReifyReason, Ty, - TyCtxt, TypeVisitable, TypeVisitableExt, UintTy, + self, FloatTy, GenericArg, GenericArgKind, Instance, IntTy, ReifyReason, Ty, TyCtxt, + TypeVisitable, TypeVisitableExt, UintTy, }; use rustc_span::kw; @@ -227,17 +227,50 @@ impl<'tcx> Printer<'tcx> for SymbolMangler<'tcx> { &mut self, impl_def_id: DefId, args: &'tcx [GenericArg<'tcx>], - mut self_ty: Ty<'tcx>, - mut impl_trait_ref: Option<ty::TraitRef<'tcx>>, ) -> Result<(), PrintError> { let key = self.tcx.def_key(impl_def_id); let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id }; - let mut typing_env = ty::TypingEnv::post_analysis(self.tcx, impl_def_id); - if !args.is_empty() { - typing_env.param_env = - EarlyBinder::bind(typing_env.param_env).instantiate(self.tcx, args); - } + let self_ty = self.tcx.type_of(impl_def_id); + let impl_trait_ref = self.tcx.impl_trait_ref(impl_def_id); + let generics = self.tcx.generics_of(impl_def_id); + // We have two cases to worry about here: + // 1. We're printing a nested item inside of an impl item, like an inner + // function inside of a method. Due to the way that def path printing works, + // we'll render this something like `<Ty as Trait>::method::inner_fn` + // but we have no substs for this impl since it's not really inheriting + // generics from the outer item. We need to use the identity substs, and + // to normalize we need to use the correct param-env too. + // 2. We're mangling an item with identity substs. This seems to only happen + // when generating coverage, since we try to generate coverage for unused + // items too, and if something isn't monomorphized then we necessarily don't + // have anything to substitute the instance with. + // NOTE: We don't support mangling partially substituted but still polymorphic + // instances, like `impl<A> Tr<A> for ()` where `A` is substituted w/ `(T,)`. + let (typing_env, mut self_ty, mut impl_trait_ref) = if generics.count() > args.len() + || &args[..generics.count()] + == self + .tcx + .erase_regions(ty::GenericArgs::identity_for_item(self.tcx, impl_def_id)) + .as_slice() + { + ( + ty::TypingEnv::post_analysis(self.tcx, impl_def_id), + self_ty.instantiate_identity(), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate_identity()), + ) + } else { + assert!( + !args.has_non_region_param(), + "should not be mangling partially substituted \ + polymorphic instance: {impl_def_id:?} {args:?}" + ); + ( + ty::TypingEnv::fully_monomorphized(), + self_ty.instantiate(self.tcx, args), + impl_trait_ref.map(|impl_trait_ref| impl_trait_ref.instantiate(self.tcx, args)), + ) + }; match &mut impl_trait_ref { Some(impl_trait_ref) => { diff --git a/compiler/rustc_target/src/callconv/mod.rs b/compiler/rustc_target/src/callconv/mod.rs index 746e8173807..41b78d9121d 100644 --- a/compiler/rustc_target/src/callconv/mod.rs +++ b/compiler/rustc_target/src/callconv/mod.rs @@ -1,7 +1,7 @@ use std::str::FromStr; use std::{fmt, iter}; -pub use rustc_abi::{Reg, RegKind}; +pub use rustc_abi::{ExternAbi, Reg, RegKind}; use rustc_macros::HashStable_Generic; use rustc_span::Symbol; @@ -9,8 +9,7 @@ use crate::abi::{ self, AddressSpace, Align, BackendRepr, HasDataLayout, Pointer, Size, TyAbiInterface, TyAndLayout, }; -use crate::spec::abi::Abi as SpecAbi; -use crate::spec::{self, HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi}; +use crate::spec::{HasTargetSpec, HasWasmCAbiOpt, HasX86AbiOpt, WasmCAbi}; mod aarch64; mod amdgpu; @@ -547,6 +546,8 @@ pub enum Conv { PtxKernel, + GpuKernel, + X86Fastcall, X86Intr, X86Stdcall, @@ -627,20 +628,20 @@ impl<'a, Ty: fmt::Display> fmt::Debug for FnAbi<'a, Ty> { #[derive(Copy, Clone, Debug, HashStable_Generic)] pub enum AdjustForForeignAbiError { /// Target architecture doesn't support "foreign" (i.e. non-Rust) ABIs. - Unsupported { arch: Symbol, abi: spec::abi::Abi }, + Unsupported { arch: Symbol, abi: ExternAbi }, } impl<'a, Ty> FnAbi<'a, Ty> { pub fn adjust_for_foreign_abi<C>( &mut self, cx: &C, - abi: spec::abi::Abi, + abi: ExternAbi, ) -> Result<(), AdjustForForeignAbiError> where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec + HasWasmCAbiOpt + HasX86AbiOpt, { - if abi == spec::abi::Abi::X86Interrupt { + if abi == ExternAbi::X86Interrupt { if let Some(arg) = self.args.first_mut() { arg.pass_by_stack_offset(None); } @@ -651,12 +652,10 @@ impl<'a, Ty> FnAbi<'a, Ty> { match &spec.arch[..] { "x86" => { let (flavor, regparm) = match abi { - spec::abi::Abi::Fastcall { .. } | spec::abi::Abi::Vectorcall { .. } => { + ExternAbi::Fastcall { .. } | ExternAbi::Vectorcall { .. } => { (x86::Flavor::FastcallOrVectorcall, None) } - spec::abi::Abi::C { .. } - | spec::abi::Abi::Cdecl { .. } - | spec::abi::Abi::Stdcall { .. } => { + ExternAbi::C { .. } | ExternAbi::Cdecl { .. } | ExternAbi::Stdcall { .. } => { (x86::Flavor::General, cx.x86_abi_opt().regparm) } _ => (x86::Flavor::General, None), @@ -666,8 +665,10 @@ impl<'a, Ty> FnAbi<'a, Ty> { x86::compute_abi_info(cx, self, opts); } "x86_64" => match abi { - spec::abi::Abi::SysV64 { .. } => x86_64::compute_abi_info(cx, self), - spec::abi::Abi::Win64 { .. } => x86_win64::compute_abi_info(cx, self), + ExternAbi::SysV64 { .. } => x86_64::compute_abi_info(cx, self), + ExternAbi::Win64 { .. } | ExternAbi::Vectorcall { .. } => { + x86_win64::compute_abi_info(cx, self) + } _ => { if cx.target_spec().is_like_windows { x86_win64::compute_abi_info(cx, self) @@ -701,7 +702,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { "sparc" => sparc::compute_abi_info(cx, self), "sparc64" => sparc64::compute_abi_info(cx, self), "nvptx64" => { - if cx.target_spec().adjust_abi(abi, self.c_variadic) == spec::abi::Abi::PtxKernel { + if cx.target_spec().adjust_abi(abi, self.c_variadic) == ExternAbi::PtxKernel { nvptx64::compute_ptx_kernel_abi_info(cx, self) } else { nvptx64::compute_abi_info(self) @@ -730,7 +731,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { Ok(()) } - pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: SpecAbi) + pub fn adjust_for_rust_abi<C>(&mut self, cx: &C, abi: ExternAbi) where Ty: TyAbiInterface<'a, C> + Copy, C: HasDataLayout + HasTargetSpec, @@ -821,7 +822,7 @@ impl<'a, Ty> FnAbi<'a, Ty> { // that's how we connect up to LLVM and it's unstable // anyway, we control all calls to it in libstd. BackendRepr::Vector { .. } - if abi != SpecAbi::RustIntrinsic && spec.simd_types_indirect => + if abi != ExternAbi::RustIntrinsic && spec.simd_types_indirect => { arg.make_indirect(); continue; @@ -866,6 +867,7 @@ impl FromStr for Conv { "X86VectorCall" => Ok(Conv::X86VectorCall), "X86_64SysV" => Ok(Conv::X86_64SysV), "X86_64Win64" => Ok(Conv::X86_64Win64), + "GpuKernel" => Ok(Conv::GpuKernel), "AvrInterrupt" => Ok(Conv::AvrInterrupt), "AvrNonBlockingInterrupt" => Ok(Conv::AvrNonBlockingInterrupt), "RiscvInterrupt(machine)" => { diff --git a/compiler/rustc_target/src/callconv/wasm.rs b/compiler/rustc_target/src/callconv/wasm.rs index 3c4cd76a754..d01b59cbb03 100644 --- a/compiler/rustc_target/src/callconv/wasm.rs +++ b/compiler/rustc_target/src/callconv/wasm.rs @@ -1,3 +1,5 @@ +use rustc_abi::{BackendRepr, Float, Integer, Primitive}; + use crate::abi::call::{ArgAbi, FnAbi}; use crate::abi::{HasDataLayout, TyAbiInterface}; @@ -27,6 +29,16 @@ where if ret.layout.is_aggregate() && !unwrap_trivial_aggregate(cx, ret) { ret.make_indirect(); } + + // `long double`, `__int128_t` and `__uint128_t` use an indirect return + if let BackendRepr::Scalar(scalar) = ret.layout.backend_repr { + match scalar.primitive() { + Primitive::Int(Integer::I128, _) | Primitive::Float(Float::F128) => { + ret.make_indirect(); + } + _ => {} + } + } } fn classify_arg<'a, Ty, C>(cx: &C, arg: &mut ArgAbi<'a, Ty>) diff --git a/compiler/rustc_target/src/callconv/x86_win64.rs b/compiler/rustc_target/src/callconv/x86_win64.rs index 83d94cb11ba..0944bda2687 100644 --- a/compiler/rustc_target/src/callconv/x86_win64.rs +++ b/compiler/rustc_target/src/callconv/x86_win64.rs @@ -5,7 +5,7 @@ use crate::spec::HasTargetSpec; // Win64 ABI: https://docs.microsoft.com/en-us/cpp/build/parameter-passing -pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) { +pub(crate) fn compute_abi_info<Ty>(_cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<'_, Ty>) { let fixup = |a: &mut ArgAbi<'_, Ty>| { match a.layout.backend_repr { BackendRepr::Uninhabited | BackendRepr::Memory { sized: false } => {} @@ -40,16 +40,18 @@ pub(crate) fn compute_abi_info<Ty>(cx: &impl HasTargetSpec, fn_abi: &mut FnAbi<' fixup(&mut fn_abi.ret); } for arg in fn_abi.args.iter_mut() { - if arg.is_ignore() { - // x86_64-pc-windows-gnu doesn't ignore ZSTs. - if cx.target_spec().os == "windows" - && cx.target_spec().env == "gnu" - && arg.layout.is_zst() - { - arg.make_indirect_from_ignore(); - } + if arg.is_ignore() && arg.layout.is_zst() { + // Windows ABIs do not talk about ZST since such types do not exist in MSVC. + // In that sense we can do whatever we want here, and maybe we should throw an error + // (but of course that would be a massive breaking change now). + // We try to match clang and gcc (which allow ZST is their windows-gnu targets), so we + // pass ZST via pointer indirection. + arg.make_indirect_from_ignore(); continue; } fixup(arg); } + // FIXME: We should likely also do something about ZST return types, similar to above. + // However, that's non-trivial due to `()`. + // See <https://github.com/rust-lang/unsafe-code-guidelines/issues/552>. } diff --git a/compiler/rustc_target/src/json.rs b/compiler/rustc_target/src/json.rs index b09d8d724ef..961cce5d6b9 100644 --- a/compiler/rustc_target/src/json.rs +++ b/compiler/rustc_target/src/json.rs @@ -113,6 +113,7 @@ impl ToJson for crate::abi::call::Conv { Self::X86VectorCall => "X86VectorCall", Self::X86_64SysV => "X86_64SysV", Self::X86_64Win64 => "X86_64Win64", + Self::GpuKernel => "GpuKernel", Self::AvrInterrupt => "AvrInterrupt", Self::AvrNonBlockingInterrupt => "AvrNonBlockingInterrupt", Self::RiscvInterrupt { kind } => { diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 0dc1d795a8e..1f2df7f0168 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -1812,9 +1812,11 @@ supported_targets! { ("aarch64-unknown-illumos", aarch64_unknown_illumos), ("x86_64-pc-windows-gnu", x86_64_pc_windows_gnu), + ("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu), + ("x86_64-win7-windows-gnu", x86_64_win7_windows_gnu), ("i686-pc-windows-gnu", i686_pc_windows_gnu), ("i686-uwp-windows-gnu", i686_uwp_windows_gnu), - ("x86_64-uwp-windows-gnu", x86_64_uwp_windows_gnu), + ("i686-win7-windows-gnu", i686_win7_windows_gnu), ("aarch64-pc-windows-gnullvm", aarch64_pc_windows_gnullvm), ("i686-pc-windows-gnullvm", i686_pc_windows_gnullvm), @@ -2813,12 +2815,17 @@ impl Target { Abi::EfiApi if self.arch == "x86_64" => Abi::Win64 { unwind: false }, Abi::EfiApi => Abi::C { unwind: false }, - // See commentary in `is_abi_supported`. - Abi::Stdcall { .. } | Abi::Thiscall { .. } if self.arch == "x86" => abi, - Abi::Stdcall { unwind } | Abi::Thiscall { unwind } => Abi::C { unwind }, - Abi::Fastcall { .. } if self.arch == "x86" => abi, + // See commentary in `is_abi_supported`: we map these ABIs to "C" when they do not make sense. + Abi::Stdcall { .. } | Abi::Thiscall { .. } | Abi::Fastcall { .. } + if self.arch == "x86" => + { + abi + } Abi::Vectorcall { .. } if ["x86", "x86_64"].contains(&&self.arch[..]) => abi, - Abi::Fastcall { unwind } | Abi::Vectorcall { unwind } => Abi::C { unwind }, + Abi::Stdcall { unwind } + | Abi::Thiscall { unwind } + | Abi::Fastcall { unwind } + | Abi::Vectorcall { unwind } => Abi::C { unwind }, // The Windows x64 calling convention we use for `extern "Rust"` // <https://learn.microsoft.com/en-us/cpp/build/x64-software-conventions#register-volatility-and-preservation> @@ -2853,6 +2860,7 @@ impl Target { } Win64 { .. } | SysV64 { .. } => self.arch == "x86_64", PtxKernel => self.arch == "nvptx64", + GpuKernel => ["amdgpu", "nvptx64"].contains(&&self.arch[..]), Msp430Interrupt => self.arch == "msp430", RiscvInterruptM | RiscvInterruptS => ["riscv32", "riscv64"].contains(&&self.arch[..]), AvrInterrupt | AvrNonBlockingInterrupt => self.arch == "avr", diff --git a/compiler/rustc_target/src/spec/targets/aarch64_wrs_vxworks.rs b/compiler/rustc_target/src/spec/targets/aarch64_wrs_vxworks.rs index d5e78d03076..ac53cbaecce 100644 --- a/compiler/rustc_target/src/spec/targets/aarch64_wrs_vxworks.rs +++ b/compiler/rustc_target/src/spec/targets/aarch64_wrs_vxworks.rs @@ -13,7 +13,7 @@ pub(crate) fn target() -> Target { data_layout: "e-m:e-p270:32:32-p271:32:32-p272:64:64-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32".into(), arch: "aarch64".into(), options: TargetOptions { - features: "+v8a".into(), + features: "+v8a,+reserve-x18".into(), max_atomic_width: Some(128), stack_probes: StackProbeType::Inline, ..base::vxworks::opts() diff --git a/compiler/rustc_target/src/spec/targets/i686_win7_windows_gnu.rs b/compiler/rustc_target/src/spec/targets/i686_win7_windows_gnu.rs new file mode 100644 index 00000000000..086a799a68c --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/i686_win7_windows_gnu.rs @@ -0,0 +1,35 @@ +use crate::spec::{Cc, FramePointer, LinkerFlavor, Lld, Target, base}; + +pub(crate) fn target() -> Target { + let mut base = base::windows_gnu::opts(); + base.vendor = "win7".into(); + base.cpu = "pentium4".into(); + base.max_atomic_width = Some(64); + base.frame_pointer = FramePointer::Always; // Required for backtraces + base.linker = Some("i686-w64-mingw32-gcc".into()); + + // Mark all dynamic libraries and executables as compatible with the larger 4GiB address + // space available to x86 Windows binaries on x86_64. + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), &[ + "-m", + "i386pe", + "--large-address-aware", + ]); + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-Wl,--large-address-aware"]); + + Target { + llvm_target: "i686-pc-windows-gnu".into(), + metadata: crate::spec::TargetMetadata { + description: Some("32-bit MinGW (Windows 7+)".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 32, + data_layout: "e-m:x-p:32:32-p270:32:32-p271:32:32-p272:64:64-\ + i64:64-i128:128-f80:32-n8:16:32-a:0:32-S32" + .into(), + arch: "x86".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_gnu.rs b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_gnu.rs new file mode 100644 index 00000000000..d40df5a3e7d --- /dev/null +++ b/compiler/rustc_target/src/spec/targets/x86_64_win7_windows_gnu.rs @@ -0,0 +1,32 @@ +use crate::spec::{Cc, LinkerFlavor, Lld, Target, base}; + +pub(crate) fn target() -> Target { + let mut base = base::windows_gnu::opts(); + base.vendor = "win7".into(); + base.cpu = "x86-64".into(); + base.plt_by_default = false; + // Use high-entropy 64 bit address space for ASLR + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::No, Lld::No), &[ + "-m", + "i386pep", + "--high-entropy-va", + ]); + base.add_pre_link_args(LinkerFlavor::Gnu(Cc::Yes, Lld::No), &["-m64", "-Wl,--high-entropy-va"]); + base.max_atomic_width = Some(64); + base.linker = Some("x86_64-w64-mingw32-gcc".into()); + + Target { + llvm_target: "x86_64-pc-windows-gnu".into(), + metadata: crate::spec::TargetMetadata { + description: Some("64-bit MinGW (Windows 7+)".into()), + tier: Some(3), + host_tools: Some(false), + std: Some(true), + }, + pointer_width: 64, + data_layout: + "e-m:w-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128".into(), + arch: "x86_64".into(), + options: base, + } +} diff --git a/compiler/rustc_target/src/target_features.rs b/compiler/rustc_target/src/target_features.rs index f594d20f928..9fd07c8634a 100644 --- a/compiler/rustc_target/src/target_features.rs +++ b/compiler/rustc_target/src/target_features.rs @@ -148,6 +148,11 @@ const ARM_FEATURES: &[(&str, Stability, ImpliedFeatures)] = &[ ("neon", Unstable(sym::arm_target_feature), &["vfp3"]), ("rclass", Unstable(sym::arm_target_feature), &[]), ("sha2", Unstable(sym::arm_target_feature), &["neon"]), + // This can be *disabled* on non-`hf` targets to enable the use + // of hardfloats while keeping the softfloat ABI. + // FIXME before stabilization: Should we expose this as a `hard-float` target feature instead of + // matching the odd negative feature LLVM uses? + ("soft-float", Unstable(sym::arm_target_feature), &[]), // This is needed for inline assembly, but shouldn't be stabilized as-is // since it should be enabled per-function using #[instruction_set], not // #[target_feature]. @@ -790,6 +795,9 @@ impl Target { match self.llvm_floatabi.unwrap() { FloatAbi::Soft => { // Nothing special required, will use soft-float ABI throughout. + // We can even allow `-soft-float` here; in fact that is useful as it lets + // people use FPU instructions with a softfloat ABI (corresponds to + // `-mfloat-abi=softfp` in GCC/clang). NOTHING } FloatAbi::Hard => { diff --git a/compiler/rustc_trait_selection/messages.ftl b/compiler/rustc_trait_selection/messages.ftl index 1ab89ecde7a..b82bb27eb79 100644 --- a/compiler/rustc_trait_selection/messages.ftl +++ b/compiler/rustc_trait_selection/messages.ftl @@ -251,7 +251,9 @@ trait_selection_no_value_in_rustc_on_unimplemented = this attribute must have a trait_selection_nothing = {""} -trait_selection_oc_cant_coerce = cannot coerce intrinsics to function pointers +trait_selection_oc_cant_coerce_force_inline = + cannot coerce functions which must be inlined to function pointers +trait_selection_oc_cant_coerce_intrinsic = cannot coerce intrinsics to function pointers trait_selection_oc_closure_selfref = closure/coroutine type that references itself trait_selection_oc_const_compat = const not compatible with trait trait_selection_oc_fn_lang_correct_type = {$lang_item_name -> diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs index d279590d45a..9778299eb19 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs @@ -620,6 +620,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { }) => { let then_span = self.find_block_span_from_hir_id(then_id); let else_span = self.find_block_span_from_hir_id(else_id); + if let hir::Node::Expr(e) = self.tcx.hir_node(else_id) + && let hir::ExprKind::If(_cond, _then, None) = e.kind + && else_ty.is_unit() + { + // Account for `let x = if a { 1 } else if b { 2 };` + err.note("`if` expressions without `else` evaluate to `()`"); + err.note("consider adding an `else` block that evaluates to the expected type"); + } err.span_label(then_span, "expected because of this"); if let Some(sp) = outer_span { err.span_label(sp, "`if` and `else` have incompatible types"); @@ -824,9 +832,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn cmp_fn_sig( &self, sig1: &ty::PolyFnSig<'tcx>, - fn_def1: Option<(DefId, &'tcx [ty::GenericArg<'tcx>])>, + fn_def1: Option<(DefId, Option<&'tcx [ty::GenericArg<'tcx>]>)>, sig2: &ty::PolyFnSig<'tcx>, - fn_def2: Option<(DefId, &'tcx [ty::GenericArg<'tcx>])>, + fn_def2: Option<(DefId, Option<&'tcx [ty::GenericArg<'tcx>]>)>, ) -> (DiagStyledString, DiagStyledString) { let sig1 = &(self.normalize_fn_sig)(*sig1); let sig2 = &(self.normalize_fn_sig)(*sig2); @@ -850,8 +858,20 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^ - values.0.push(sig1.safety.prefix_str(), sig1.safety != sig2.safety); - values.1.push(sig2.safety.prefix_str(), sig1.safety != sig2.safety); + let safety = |fn_def, sig: ty::FnSig<'_>| match fn_def { + None => sig.safety.prefix_str(), + Some((did, _)) => { + if self.tcx.codegen_fn_attrs(did).safe_target_features { + "#[target_features] " + } else { + sig.safety.prefix_str() + } + } + }; + let safety1 = safety(fn_def1, sig1); + let safety2 = safety(fn_def2, sig2); + values.0.push(safety1, safety1 != safety2); + values.1.push(safety2, safety1 != safety2); // unsafe extern "C" for<'a> fn(&'a T) -> &'a T // ^^^^^^^^^^ @@ -932,23 +952,23 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { (values.1).0.extend(x2.0); } - let fmt = |(did, args)| format!(" {{{}}}", self.tcx.def_path_str_with_args(did, args)); + let fmt = |did, args| format!(" {{{}}}", self.tcx.def_path_str_with_args(did, args)); match (fn_def1, fn_def2) { - (None, None) => {} - (Some(fn_def1), Some(fn_def2)) => { - let path1 = fmt(fn_def1); - let path2 = fmt(fn_def2); + (Some((fn_def1, Some(fn_args1))), Some((fn_def2, Some(fn_args2)))) => { + let path1 = fmt(fn_def1, fn_args1); + let path2 = fmt(fn_def2, fn_args2); let same_path = path1 == path2; values.0.push(path1, !same_path); values.1.push(path2, !same_path); } - (Some(fn_def1), None) => { - values.0.push_highlighted(fmt(fn_def1)); + (Some((fn_def1, Some(fn_args1))), None) => { + values.0.push_highlighted(fmt(fn_def1, fn_args1)); } - (None, Some(fn_def2)) => { - values.1.push_highlighted(fmt(fn_def2)); + (None, Some((fn_def2, Some(fn_args2)))) => { + values.1.push_highlighted(fmt(fn_def2, fn_args2)); } + _ => {} } values @@ -1339,17 +1359,22 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { (ty::FnDef(did1, args1), ty::FnDef(did2, args2)) => { let sig1 = self.tcx.fn_sig(*did1).instantiate(self.tcx, args1); let sig2 = self.tcx.fn_sig(*did2).instantiate(self.tcx, args2); - self.cmp_fn_sig(&sig1, Some((*did1, args1)), &sig2, Some((*did2, args2))) + self.cmp_fn_sig( + &sig1, + Some((*did1, Some(args1))), + &sig2, + Some((*did2, Some(args2))), + ) } (ty::FnDef(did1, args1), ty::FnPtr(sig_tys2, hdr2)) => { let sig1 = self.tcx.fn_sig(*did1).instantiate(self.tcx, args1); - self.cmp_fn_sig(&sig1, Some((*did1, args1)), &sig_tys2.with(*hdr2), None) + self.cmp_fn_sig(&sig1, Some((*did1, Some(args1))), &sig_tys2.with(*hdr2), None) } (ty::FnPtr(sig_tys1, hdr1), ty::FnDef(did2, args2)) => { let sig2 = self.tcx.fn_sig(*did2).instantiate(self.tcx, args2); - self.cmp_fn_sig(&sig_tys1.with(*hdr1), None, &sig2, Some((*did2, args2))) + self.cmp_fn_sig(&sig_tys1.with(*hdr1), None, &sig2, Some((*did2, Some(args2)))) } (ty::FnPtr(sig_tys1, hdr1), ty::FnPtr(sig_tys2, hdr2)) => { @@ -1531,7 +1556,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { (false, Mismatch::Fixed("existential projection")) } }; - let Some(vals) = self.values_str(values) else { + let Some(vals) = self.values_str(values, cause) else { // Derived error. Cancel the emitter. // NOTE(eddyb) this was `.cancel()`, but `diag` // is borrowed, so we can't fully defuse it. @@ -1956,7 +1981,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { }) | ObligationCauseCode::BlockTailExpression(.., source)) = code && let hir::MatchSource::TryDesugar(_) = source - && let Some((expected_ty, found_ty, _)) = self.values_str(trace.values) + && let Some((expected_ty, found_ty, _)) = self.values_str(trace.values, &trace.cause) { suggestions.push(TypeErrorAdditionalDiags::TryCannotConvert { found: found_ty.content(), @@ -2085,6 +2110,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn values_str( &self, values: ValuePairs<'tcx>, + cause: &ObligationCause<'tcx>, ) -> Option<(DiagStyledString, DiagStyledString, Option<PathBuf>)> { match values { ValuePairs::Regions(exp_found) => self.expected_found_str(exp_found), @@ -2109,7 +2135,19 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if exp_found.references_error() { return None; } - let (exp, fnd) = self.cmp_fn_sig(&exp_found.expected, None, &exp_found.found, None); + let (fn_def1, fn_def2) = if let ObligationCauseCode::CompareImplItem { + impl_item_def_id, + trait_item_def_id, + .. + } = *cause.code() + { + (Some((trait_item_def_id, None)), Some((impl_item_def_id.to_def_id(), None))) + } else { + (None, None) + }; + + let (exp, fnd) = + self.cmp_fn_sig(&exp_found.expected, fn_def1, &exp_found.found, fn_def2); Some((exp, fnd, None)) } } @@ -2294,7 +2332,7 @@ impl<'tcx> ObligationCause<'tcx> { { FailureCode::Error0644 } - TypeError::IntrinsicCast => FailureCode::Error0308, + TypeError::IntrinsicCast | TypeError::ForceInlineCast => FailureCode::Error0308, _ => FailureCode::Error0308, }, } @@ -2360,8 +2398,11 @@ impl<'tcx> ObligationCause<'tcx> { { ObligationCauseFailureCode::ClosureSelfref { span } } + TypeError::ForceInlineCast => { + ObligationCauseFailureCode::CantCoerceForceInline { span, subdiags } + } TypeError::IntrinsicCast => { - ObligationCauseFailureCode::CantCoerce { span, subdiags } + ObligationCauseFailureCode::CantCoerceIntrinsic { span, subdiags } } _ => ObligationCauseFailureCode::Generic { span, subdiags }, }, diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs index 2cfccc57c97..1dd09fe7aaf 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/note_and_explain.rs @@ -461,9 +461,11 @@ impl<T> Trait<T> for X { (ty::FnPtr(_, hdr), ty::FnDef(def_id, _)) | (ty::FnDef(def_id, _), ty::FnPtr(_, hdr)) => { if tcx.fn_sig(def_id).skip_binder().safety() < hdr.safety { - diag.note( + if !tcx.codegen_fn_attrs(def_id).safe_target_features { + diag.note( "unsafe functions cannot be coerced into safe function pointers", - ); + ); + } } } (ty::Adt(_, _), ty::Adt(def, args)) diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs index 98b5fb2052f..3acca47025c 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/region.rs @@ -221,7 +221,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { infer::Subtype(ref trace) => RegionOriginNote::WithRequirement { span: trace.cause.span, requirement: ObligationCauseAsDiagArg(trace.cause.clone()), - expected_found: self.values_str(trace.values).map(|(e, f, _)| (e, f)), + expected_found: self.values_str(trace.values, &trace.cause).map(|(e, f, _)| (e, f)), } .add_to_diag(err), infer::Reborrow(span) => { @@ -946,8 +946,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if let infer::Subtype(ref sup_trace) = sup_origin && let infer::Subtype(ref sub_trace) = sub_origin - && let Some((sup_expected, sup_found, _)) = self.values_str(sup_trace.values) - && let Some((sub_expected, sub_found, _)) = self.values_str(sub_trace.values) + && let Some((sup_expected, sup_found, _)) = + self.values_str(sup_trace.values, &sup_trace.cause) + && let Some((sub_expected, sub_found, _)) = + self.values_str(sub_trace.values, &sup_trace.cause) && sub_expected == sup_expected && sub_found == sup_found { diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs index fc0de13aeab..b4d294a70c0 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/ambiguity.rs @@ -172,14 +172,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let bound_predicate = predicate.kind(); let mut err = match bound_predicate.skip_binder() { ty::PredicateKind::Clause(ty::ClauseKind::Trait(data)) => { - let trait_ref = bound_predicate.rebind(data.trait_ref); - debug!(?trait_ref); + let trait_pred = bound_predicate.rebind(data); + debug!(?trait_pred); if let Err(e) = predicate.error_reported() { return e; } - if let Err(guar) = self.tcx.ensure().coherent_trait(trait_ref.def_id()) { + if let Err(guar) = self.tcx.ensure().coherent_trait(trait_pred.def_id()) { // Avoid bogus "type annotations needed `Foo: Bar`" errors on `impl Bar for Foo` in case // other `Foo` impls are incoherent. return guar; @@ -200,13 +200,13 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // avoid inundating the user with unnecessary errors, but we now // check upstream for type errors and don't add the obligations to // begin with in those cases. - if self.tcx.is_lang_item(trait_ref.def_id(), LangItem::Sized) { + if self.tcx.is_lang_item(trait_pred.def_id(), LangItem::Sized) { match self.tainted_by_errors() { None => { let err = self.emit_inference_failure_err( obligation.cause.body_id, span, - trait_ref.self_ty().skip_binder().into(), + trait_pred.self_ty().skip_binder().into(), TypeAnnotationNeeded::E0282, false, ); @@ -251,10 +251,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let mut ambiguities = compute_applicable_impls_for_diagnostics( self.infcx, - &obligation.with(self.tcx, trait_ref), + &obligation.with(self.tcx, trait_pred), ); - let has_non_region_infer = - trait_ref.skip_binder().args.types().any(|t| !t.is_ty_or_numeric_infer()); + let has_non_region_infer = trait_pred + .skip_binder() + .trait_ref + .args + .types() + .any(|t| !t.is_ty_or_numeric_infer()); // It doesn't make sense to talk about applicable impls if there are more than a // handful of them. If there are a lot of them, but only a few of them have no type // params, we only show those, as they are more likely to be useful/intended. @@ -294,7 +298,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if impl_candidates.len() < 40 { self.report_similar_impl_candidates( impl_candidates.as_slice(), - trait_ref, + trait_pred, obligation.cause.body_id, &mut err, false, @@ -306,7 +310,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if let ObligationCauseCode::WhereClause(def_id, _) | ObligationCauseCode::WhereClauseInExpr(def_id, ..) = *obligation.cause.code() { - self.suggest_fully_qualified_path(&mut err, def_id, span, trait_ref.def_id()); + self.suggest_fully_qualified_path(&mut err, def_id, span, trait_pred.def_id()); } if let Some(ty::GenericArgKind::Type(_)) = arg.map(|arg| arg.unpack()) diff --git a/compiler/rustc_middle/src/util/call_kind.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs index 0e395331687..1c3e570b676 100644 --- a/compiler/rustc_middle/src/util/call_kind.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/call_kind.rs @@ -2,12 +2,14 @@ //! as well as errors when attempting to call a non-const function in a const //! context. +use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; use rustc_hir::{LangItem, lang_items}; +use rustc_middle::ty::{AssocItemContainer, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv}; use rustc_span::{DesugaringKind, Ident, Span, sym}; use tracing::debug; -use crate::ty::{AssocItemContainer, GenericArgsRef, Instance, Ty, TyCtxt, TypingEnv}; +use crate::traits::specialization_graph; #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum CallDesugaringKind { @@ -55,7 +57,7 @@ pub enum CallKind<'tcx> { DerefCoercion { /// The `Span` of the `Target` associated type /// in the `Deref` impl we are using. - deref_target: Span, + deref_target_span: Option<Span>, /// The type `T::Deref` we are dereferencing to deref_target_ty: Ty<'tcx>, self_ty: Ty<'tcx>, @@ -89,61 +91,65 @@ pub fn call_kind<'tcx>( None }; - let is_deref = !from_hir_call && tcx.is_diagnostic_item(sym::deref_method, method_did); - // Check for a 'special' use of 'self' - // an FnOnce call, an operator (e.g. `<<`), or a // deref coercion. - let kind = if let Some(trait_id) = fn_call { - Some(CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_args.type_at(0) }) + if let Some(trait_id) = fn_call { + return CallKind::FnCall { fn_trait_id: trait_id, self_ty: method_args.type_at(0) }; } else if let Some(trait_id) = operator { - Some(CallKind::Operator { self_arg, trait_id, self_ty: method_args.type_at(0) }) - } else if is_deref { - let deref_target = tcx.get_diagnostic_item(sym::deref_target).and_then(|deref_target| { - Instance::try_resolve(tcx, typing_env, deref_target, method_args).transpose() - }); - if let Some(Ok(instance)) = deref_target { - let deref_target_ty = instance.ty(tcx, typing_env); - Some(CallKind::DerefCoercion { - deref_target: tcx.def_span(instance.def_id()), - deref_target_ty, - self_ty: method_args.type_at(0), - }) + return CallKind::Operator { self_arg, trait_id, self_ty: method_args.type_at(0) }; + } else if !from_hir_call && tcx.is_diagnostic_item(sym::deref_method, method_did) { + let deref_target_def_id = + tcx.get_diagnostic_item(sym::deref_target).expect("deref method but no deref target"); + let deref_target_ty = tcx.normalize_erasing_regions( + typing_env, + Ty::new_projection(tcx, deref_target_def_id, method_args), + ); + let deref_target_span = if let Ok(Some(instance)) = + Instance::try_resolve(tcx, typing_env, method_did, method_args) + && let instance_parent_def_id = tcx.parent(instance.def_id()) + && matches!(tcx.def_kind(instance_parent_def_id), DefKind::Impl { .. }) + && let Ok(instance) = + specialization_graph::assoc_def(tcx, instance_parent_def_id, deref_target_def_id) + && instance.is_final() + { + Some(tcx.def_span(instance.item.def_id)) + } else { + None + }; + return CallKind::DerefCoercion { + deref_target_ty, + deref_target_span, + self_ty: method_args.type_at(0), + }; + } + + // This isn't a 'special' use of `self` + debug!(?method_did, ?fn_call_span); + let desugaring = if tcx.is_lang_item(method_did, LangItem::IntoIterIntoIter) + && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop) + { + Some((CallDesugaringKind::ForLoopIntoIter, method_args.type_at(0))) + } else if tcx.is_lang_item(method_did, LangItem::IteratorNext) + && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop) + { + Some((CallDesugaringKind::ForLoopNext, method_args.type_at(0))) + } else if fn_call_span.desugaring_kind() == Some(DesugaringKind::QuestionMark) { + if tcx.is_lang_item(method_did, LangItem::TryTraitBranch) { + Some((CallDesugaringKind::QuestionBranch, method_args.type_at(0))) + } else if tcx.is_lang_item(method_did, LangItem::TryTraitFromResidual) { + Some((CallDesugaringKind::QuestionFromResidual, method_args.type_at(0))) } else { None } + } else if tcx.is_lang_item(method_did, LangItem::TryTraitFromOutput) + && fn_call_span.desugaring_kind() == Some(DesugaringKind::TryBlock) + { + Some((CallDesugaringKind::TryBlockFromOutput, method_args.type_at(0))) + } else if fn_call_span.is_desugaring(DesugaringKind::Await) { + Some((CallDesugaringKind::Await, method_args.type_at(0))) } else { None }; - - kind.unwrap_or_else(|| { - // This isn't a 'special' use of `self` - debug!(?method_did, ?fn_call_span); - let desugaring = if tcx.is_lang_item(method_did, LangItem::IntoIterIntoIter) - && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop) - { - Some((CallDesugaringKind::ForLoopIntoIter, method_args.type_at(0))) - } else if tcx.is_lang_item(method_did, LangItem::IteratorNext) - && fn_call_span.desugaring_kind() == Some(DesugaringKind::ForLoop) - { - Some((CallDesugaringKind::ForLoopNext, method_args.type_at(0))) - } else if fn_call_span.desugaring_kind() == Some(DesugaringKind::QuestionMark) { - if tcx.is_lang_item(method_did, LangItem::TryTraitBranch) { - Some((CallDesugaringKind::QuestionBranch, method_args.type_at(0))) - } else if tcx.is_lang_item(method_did, LangItem::TryTraitFromResidual) { - Some((CallDesugaringKind::QuestionFromResidual, method_args.type_at(0))) - } else { - None - } - } else if tcx.is_lang_item(method_did, LangItem::TryTraitFromOutput) - && fn_call_span.desugaring_kind() == Some(DesugaringKind::TryBlock) - { - Some((CallDesugaringKind::TryBlockFromOutput, method_args.type_at(0))) - } else if fn_call_span.is_desugaring(DesugaringKind::Await) { - Some((CallDesugaringKind::Await, method_args.type_at(0))) - } else { - None - }; - CallKind::Normal { self_arg, desugaring, method_did, method_args } - }) + CallKind::Normal { self_arg, desugaring, method_did, method_args } } diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs index 7ba87e180d0..c40ba330845 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/fulfillment_errors.rs @@ -23,11 +23,9 @@ use rustc_middle::ty::print::{ FmtPrinter, Print, PrintPolyTraitPredicateExt, PrintTraitPredicateExt as _, PrintTraitRefExt as _, with_forced_trimmed_paths, }; -use rustc_middle::ty::{ - self, ToPolyTraitRef, TraitRef, Ty, TyCtxt, TypeFoldable, TypeVisitableExt, Upcast, -}; +use rustc_middle::ty::{self, TraitRef, Ty, TyCtxt, TypeFoldable, TypeVisitableExt, Upcast}; use rustc_middle::{bug, span_bug}; -use rustc_span::{BytePos, DUMMY_SP, Span, Symbol, sym}; +use rustc_span::{BytePos, DUMMY_SP, STDLIB_STABLE_CRATES, Span, Symbol, sym}; use tracing::{debug, instrument}; use super::on_unimplemented::{AppendConstMessage, OnUnimplementedNote}; @@ -155,12 +153,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { (leaf_trait_predicate, &obligation) }; - let main_trait_ref = main_trait_predicate.to_poly_trait_ref(); - let leaf_trait_ref = leaf_trait_predicate.to_poly_trait_ref(); - if let Some(guar) = self.emit_specialized_closure_kind_error( &obligation, - leaf_trait_ref, + leaf_trait_predicate, ) { return guar; } @@ -202,14 +197,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } = self.on_unimplemented_note(main_trait_predicate, main_obligation, &mut long_ty_file); let have_alt_message = message.is_some() || label.is_some(); - let is_try_conversion = self.is_try_conversion(span, main_trait_ref.def_id()); + let is_try_conversion = self.is_try_conversion(span, main_trait_predicate.def_id()); let is_unsize = - self.tcx.is_lang_item(leaf_trait_ref.def_id(), LangItem::Unsize); + self.tcx.is_lang_item(leaf_trait_predicate.def_id(), LangItem::Unsize); let (message, notes, append_const_msg) = if is_try_conversion { ( Some(format!( "`?` couldn't convert the error to `{}`", - main_trait_ref.skip_binder().self_ty(), + main_trait_predicate.skip_binder().self_ty(), )), vec![ "the question mark operation (`?`) implicitly performs a \ @@ -230,12 +225,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { post_message, ); - let (err_msg, safe_transmute_explanation) = if self.tcx.is_lang_item(main_trait_ref.def_id(), LangItem::TransmuteTrait) + let (err_msg, safe_transmute_explanation) = if self.tcx.is_lang_item(main_trait_predicate.def_id(), LangItem::TransmuteTrait) { // Recompute the safe transmute reason and use that for the error reporting match self.get_safe_transmute_error_and_reason( obligation.clone(), - main_trait_ref, + main_trait_predicate, span, ) { GetSafeTransmuteErrorAndReason::Silent => { @@ -266,7 +261,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } let mut suggested = false; if is_try_conversion { - suggested = self.try_conversion_context(&obligation, main_trait_ref.skip_binder(), &mut err); + suggested = self.try_conversion_context(&obligation, main_trait_predicate, &mut err); } if is_try_conversion && let Some(ret_span) = self.return_type_span(&obligation) { @@ -274,12 +269,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { ret_span, format!( "expected `{}` because of this", - main_trait_ref.skip_binder().self_ty() + main_trait_predicate.skip_binder().self_ty() ), ); } - if tcx.is_lang_item(leaf_trait_ref.def_id(), LangItem::Tuple) { + if tcx.is_lang_item(leaf_trait_predicate.def_id(), LangItem::Tuple) { self.add_tuple_trait_message( obligation.cause.code().peel_derives(), &mut err, @@ -319,7 +314,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // If it has a custom `#[rustc_on_unimplemented]` // error message, let's display it as the label! err.span_label(span, s); - if !matches!(leaf_trait_ref.skip_binder().self_ty().kind(), ty::Param(_)) { + if !matches!(leaf_trait_predicate.skip_binder().self_ty().kind(), ty::Param(_)) { // When the self type is a type param We don't need to "the trait // `std::marker::Sized` is not implemented for `T`" as we will point // at the type param with a label to suggest constraining it. @@ -339,7 +334,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if let ObligationCauseCode::Coercion { source, target } = *obligation.cause.code().peel_derives() { - if self.tcx.is_lang_item(leaf_trait_ref.def_id(), LangItem::Sized) { + if self.tcx.is_lang_item(leaf_trait_predicate.def_id(), LangItem::Sized) { self.suggest_borrowing_for_object_cast( &mut err, root_obligation, @@ -368,7 +363,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { err.span_label(tcx.def_span(body), s); } - self.suggest_floating_point_literal(&obligation, &mut err, leaf_trait_ref); + self.suggest_floating_point_literal(&obligation, &mut err, leaf_trait_predicate); self.suggest_dereferencing_index(&obligation, &mut err, leaf_trait_predicate); suggested |= self.suggest_dereferences(&obligation, &mut err, leaf_trait_predicate); suggested |= self.suggest_fn_call(&obligation, &mut err, leaf_trait_predicate); @@ -376,7 +371,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { suggested = if let &[cand] = &impl_candidates[..] { let cand = cand.trait_ref; if let (ty::FnPtr(..), ty::FnDef(..)) = - (cand.self_ty().kind(), main_trait_ref.self_ty().skip_binder().kind()) + (cand.self_ty().kind(), main_trait_predicate.self_ty().skip_binder().kind()) { // Wrap method receivers and `&`-references in parens let suggestion = if self.tcx.sess.source_map().span_look_ahead(span, ".", Some(50)).is_some() { @@ -423,11 +418,11 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { span, leaf_trait_predicate, ); - self.note_version_mismatch(&mut err, leaf_trait_ref); + self.note_version_mismatch(&mut err, leaf_trait_predicate); self.suggest_remove_await(&obligation, &mut err); self.suggest_derive(&obligation, &mut err, leaf_trait_predicate); - if tcx.is_lang_item(leaf_trait_ref.def_id(), LangItem::Try) { + if tcx.is_lang_item(leaf_trait_predicate.def_id(), LangItem::Try) { self.suggest_await_before_try( &mut err, &obligation, @@ -455,9 +450,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { ); } - let is_fn_trait = tcx.is_fn_trait(leaf_trait_ref.def_id()); + let is_fn_trait = tcx.is_fn_trait(leaf_trait_predicate.def_id()); let is_target_feature_fn = if let ty::FnDef(def_id, _) = - *leaf_trait_ref.skip_binder().self_ty().kind() + *leaf_trait_predicate.skip_binder().self_ty().kind() { !self.tcx.codegen_fn_attrs(def_id).target_features.is_empty() } else { @@ -509,7 +504,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } self.explain_hrtb_projection(&mut err, leaf_trait_predicate, obligation.param_env, &obligation.cause); - self.suggest_desugaring_async_fn_in_trait(&mut err, main_trait_ref); + self.suggest_desugaring_async_fn_in_trait(&mut err, main_trait_predicate); // Return early if the trait is Debug or Display and the invocation // originates within a standard library macro, because the output @@ -520,14 +515,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { match obligation.cause.span.ctxt().outer_expn_data().macro_def_id { Some(macro_def_id) => { let crate_name = tcx.crate_name(macro_def_id.krate); - crate_name == sym::std || crate_name == sym::core + STDLIB_STABLE_CRATES.contains(&crate_name) } None => false, }; if in_std_macro && matches!( - self.tcx.get_diagnostic_name(leaf_trait_ref.def_id()), + self.tcx.get_diagnostic_name(leaf_trait_predicate.def_id()), Some(sym::Debug | sym::Display) ) { @@ -785,21 +780,22 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn emit_specialized_closure_kind_error( &self, obligation: &PredicateObligation<'tcx>, - mut trait_ref: ty::PolyTraitRef<'tcx>, + mut trait_pred: ty::PolyTraitPredicate<'tcx>, ) -> Option<ErrorGuaranteed> { // If `AsyncFnKindHelper` is not implemented, that means that the closure kind // doesn't extend the goal kind. This is worth reporting, but we can only do so // if we actually know which closure this goal comes from, so look at the cause // to see if we can extract that information. - if self.tcx.is_lang_item(trait_ref.def_id(), LangItem::AsyncFnKindHelper) - && let Some(found_kind) = trait_ref.skip_binder().args.type_at(0).to_opt_closure_kind() + if self.tcx.is_lang_item(trait_pred.def_id(), LangItem::AsyncFnKindHelper) + && let Some(found_kind) = + trait_pred.skip_binder().trait_ref.args.type_at(0).to_opt_closure_kind() && let Some(expected_kind) = - trait_ref.skip_binder().args.type_at(1).to_opt_closure_kind() + trait_pred.skip_binder().trait_ref.args.type_at(1).to_opt_closure_kind() && !found_kind.extends(expected_kind) { if let Some((_, Some(parent))) = obligation.cause.code().parent_with_predicate() { // If we have a derived obligation, then the parent will be a `AsyncFn*` goal. - trait_ref = parent.to_poly_trait_ref(); + trait_pred = parent; } else if let &ObligationCauseCode::FunctionArg { arg_hir_id, .. } = obligation.cause.code() && let Some(typeck_results) = &self.typeck_results @@ -820,9 +816,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } } - let self_ty = trait_ref.self_ty().skip_binder(); + let self_ty = trait_pred.self_ty().skip_binder(); - if let Some(expected_kind) = self.tcx.fn_trait_kind_from_def_id(trait_ref.def_id()) { + if let Some(expected_kind) = self.tcx.fn_trait_kind_from_def_id(trait_pred.def_id()) { let (closure_def_id, found_args, by_ref_captures) = match *self_ty.kind() { ty::Closure(def_id, args) => { (def_id, args.as_closure().sig().map_bound(|sig| sig.inputs()[0]), None) @@ -837,7 +833,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { _ => return None, }; - let expected_args = trait_ref.map_bound(|trait_ref| trait_ref.args.type_at(1)); + let expected_args = + trait_pred.map_bound(|trait_pred| trait_pred.trait_ref.args.type_at(1)); // Verify that the arguments are compatible. If the signature is // mismatched, then we have a totally different error to report. @@ -909,7 +906,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn try_conversion_context( &self, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::TraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, err: &mut Diag<'_>, ) -> bool { let span = obligation.cause.span; @@ -953,8 +950,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if !self.tcx.is_diagnostic_item(sym::FromResidual, y.def_id()) { return false; } - let self_ty = trait_ref.self_ty(); - let found_ty = trait_ref.args.get(1).and_then(|a| a.as_type()); + let self_ty = trait_pred.skip_binder().self_ty(); + let found_ty = trait_pred.skip_binder().trait_ref.args.get(1).and_then(|a| a.as_type()); let mut prev_ty = self.resolve_vars_if_possible( typeck.expr_ty_adjusted_opt(expr).unwrap_or(Ty::new_misc_error(self.tcx)), @@ -1223,18 +1220,18 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { goal: ty::TraitPredicate<'tcx>, assumption: ty::PolyTraitPredicate<'tcx>, ) -> bool { + // Fast path if goal.polarity != assumption.polarity() { return false; } - let trait_goal = goal.trait_ref; let trait_assumption = self.instantiate_binder_with_fresh_vars( DUMMY_SP, infer::BoundRegionConversionTime::HigherRankedType, - assumption.to_poly_trait_ref(), + assumption, ); - self.can_eq(ty::ParamEnv::empty(), trait_goal, trait_assumption) + self.can_eq(ty::ParamEnv::empty(), goal.trait_ref, trait_assumption.trait_ref) } fn can_match_projection( @@ -1682,7 +1679,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { pub(super) fn report_similar_impl_candidates( &self, impl_candidates: &[ImplCandidate<'tcx>], - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, body_def_id: LocalDefId, err: &mut Diag<'_>, other: bool, @@ -1727,7 +1724,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // We'll check for the case where the reason for the mismatch is that the trait comes from // one crate version and the type comes from another crate version, even though they both // are from the same crate. - let trait_def_id = trait_ref.def_id(); + let trait_def_id = trait_pred.def_id(); let trait_name = self.tcx.item_name(trait_def_id); let crate_name = self.tcx.crate_name(trait_def_id.krate); if let Some(other_trait_def_id) = self.tcx.all_traits().find(|def_id| { @@ -1739,7 +1736,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // different crate `DefId`. We highlight the traits. let found_type = - if let ty::Adt(def, _) = trait_ref.self_ty().skip_binder().peel_refs().kind() { + if let ty::Adt(def, _) = trait_pred.self_ty().skip_binder().peel_refs().kind() { Some(def.did()) } else { None @@ -1836,7 +1833,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { if self.probe(|_| { let ocx = ObligationCtxt::new(self); - self.enter_forall(trait_ref, |obligation_trait_ref| { + self.enter_forall(trait_pred, |obligation_trait_ref| { let impl_args = self.fresh_args_for_item(DUMMY_SP, single.impl_def_id); let impl_trait_ref = ocx.normalize( &ObligationCause::dummy(), @@ -1864,7 +1861,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let mut terrs = vec![]; for (obligation_arg, impl_arg) in - std::iter::zip(obligation_trait_ref.args, impl_trait_ref.args) + std::iter::zip(obligation_trait_ref.trait_ref.args, impl_trait_ref.args) { if (obligation_arg, impl_arg).references_error() { return false; @@ -1906,8 +1903,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } let traits = self.cmp_traits( - obligation_trait_ref.def_id, - &obligation_trait_ref.args[1..], + obligation_trait_ref.def_id(), + &obligation_trait_ref.trait_ref.args[1..], impl_trait_ref.def_id, &impl_trait_ref.args[1..], ); @@ -1991,7 +1988,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } if let &[cand] = &candidates[..] { let (desc, mention_castable) = - match (cand.self_ty().kind(), trait_ref.self_ty().skip_binder().kind()) { + match (cand.self_ty().kind(), trait_pred.self_ty().skip_binder().kind()) { (ty::FnPtr(..), ty::FnDef(..)) => { (" implemented for fn pointer `", ", cast using `as`") } @@ -2055,7 +2052,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { .filter(|cand| !self.tcx.do_not_recommend_impl(cand.impl_def_id)) .collect::<Vec<_>>(); - let def_id = trait_ref.def_id(); + let def_id = trait_pred.def_id(); if impl_candidates.is_empty() { if self.tcx.trait_is_auto(def_id) || self.tcx.lang_items().iter().any(|(_, id)| id == def_id) @@ -2132,11 +2129,10 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { && !self.tcx.trait_is_auto(def_id) && !self.tcx.lang_items().iter().any(|(_, id)| id == def_id) { - let trait_ref = trait_pred.to_poly_trait_ref(); let impl_candidates = self.find_similar_impl_candidates(trait_pred); self.report_similar_impl_candidates( &impl_candidates, - trait_ref, + trait_pred, body_def_id, err, true, @@ -2173,12 +2169,16 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { /// If the `Self` type of the unsatisfied trait `trait_ref` implements a trait /// with the same path as `trait_ref`, a help message about /// a probable version mismatch is added to `err` - fn note_version_mismatch(&self, err: &mut Diag<'_>, trait_ref: ty::PolyTraitRef<'tcx>) -> bool { + fn note_version_mismatch( + &self, + err: &mut Diag<'_>, + trait_pred: ty::PolyTraitPredicate<'tcx>, + ) -> bool { let get_trait_impls = |trait_def_id| { let mut trait_impls = vec![]; self.tcx.for_each_relevant_impl( trait_def_id, - trait_ref.skip_binder().self_ty(), + trait_pred.skip_binder().self_ty(), |impl_def_id| { trait_impls.push(impl_def_id); }, @@ -2186,11 +2186,11 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { trait_impls }; - let required_trait_path = self.tcx.def_path_str(trait_ref.def_id()); + let required_trait_path = self.tcx.def_path_str(trait_pred.def_id()); let traits_with_same_path: UnordSet<_> = self .tcx - .all_traits() - .filter(|trait_def_id| *trait_def_id != trait_ref.def_id()) + .visible_traits() + .filter(|trait_def_id| *trait_def_id != trait_pred.def_id()) .map(|trait_def_id| (self.tcx.def_path_str(trait_def_id), trait_def_id)) .filter(|(p, _)| *p == required_trait_path) .collect(); @@ -2374,7 +2374,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn get_safe_transmute_error_and_reason( &self, obligation: PredicateObligation<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, span: Span, ) -> GetSafeTransmuteErrorAndReason { use rustc_transmute::Answer; @@ -2386,19 +2386,19 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } // Erase regions because layout code doesn't particularly care about regions. - let trait_ref = - self.tcx.erase_regions(self.tcx.instantiate_bound_regions_with_erased(trait_ref)); + let trait_pred = + self.tcx.erase_regions(self.tcx.instantiate_bound_regions_with_erased(trait_pred)); let src_and_dst = rustc_transmute::Types { - dst: trait_ref.args.type_at(0), - src: trait_ref.args.type_at(1), + dst: trait_pred.trait_ref.args.type_at(0), + src: trait_pred.trait_ref.args.type_at(1), }; let ocx = ObligationCtxt::new(self); let Ok(assume) = ocx.structurally_normalize_const( &obligation.cause, obligation.param_env, - trait_ref.args.const_at(2), + trait_pred.trait_ref.args.const_at(2), ) else { self.dcx().span_delayed_bug( span, @@ -2417,8 +2417,8 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { return GetSafeTransmuteErrorAndReason::Silent; }; - let dst = trait_ref.args.type_at(0); - let src = trait_ref.args.type_at(1); + let dst = trait_pred.trait_ref.args.type_at(0); + let src = trait_pred.trait_ref.args.type_at(1); let err_msg = format!("`{src}` cannot be safely transmuted into `{dst}`"); match rustc_transmute::TransmuteTypeEnv::new(self.infcx).is_transmutable( @@ -2566,12 +2566,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { trait_predicate.skip_binder().polarity, ) { - self.add_help_message_for_fn_trait( - trait_predicate.to_poly_trait_ref(), - err, - implemented_kind, - params, - ); + self.add_help_message_for_fn_trait(trait_predicate, err, implemented_kind, params); } else if !trait_predicate.has_non_region_infer() && self.predicate_can_apply(obligation.param_env, trait_predicate) { @@ -2606,7 +2601,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { let impl_candidates = self.find_similar_impl_candidates(trait_predicate); if !self.report_similar_impl_candidates( &impl_candidates, - trait_predicate.to_poly_trait_ref(), + trait_predicate, body_def_id, err, true, @@ -2623,7 +2618,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { self.suggest_convert_to_slice( err, obligation, - trait_predicate.to_poly_trait_ref(), + trait_predicate, impl_candidates.as_slice(), span, ); @@ -2634,7 +2629,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { fn add_help_message_for_fn_trait( &self, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, err: &mut Diag<'_>, implemented_kind: ty::ClosureKind, params: ty::Binder<'tcx, Ty<'tcx>>, @@ -2647,12 +2642,12 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // to implement. let selected_kind = self .tcx - .fn_trait_kind_from_def_id(trait_ref.def_id()) + .fn_trait_kind_from_def_id(trait_pred.def_id()) .expect("expected to map DefId to ClosureKind"); if !implemented_kind.extends(selected_kind) { err.note(format!( "`{}` implements `{}`, but it must implement `{}`, which is more general", - trait_ref.skip_binder().self_ty(), + trait_pred.skip_binder().self_ty(), implemented_kind, selected_kind )); @@ -2660,7 +2655,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // Note any argument mismatches let given_ty = params.skip_binder(); - let expected_ty = trait_ref.skip_binder().args.type_at(1); + let expected_ty = trait_pred.skip_binder().trait_ref.args.type_at(1); if let ty::Tuple(given) = given_ty.kind() && let ty::Tuple(expected) = expected_ty.kind() { diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs index b108a9352a5..cd4f77bb4cf 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/mod.rs @@ -1,4 +1,5 @@ pub mod ambiguity; +pub mod call_kind; mod fulfillment_errors; pub mod on_unimplemented; mod overflow; diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/on_unimplemented.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/on_unimplemented.rs index 51efe39a7bc..2d932e36470 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/on_unimplemented.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/on_unimplemented.rs @@ -10,7 +10,7 @@ use rustc_hir::{AttrArgs, AttrKind, Attribute}; use rustc_macros::LintDiagnostic; use rustc_middle::bug; use rustc_middle::ty::print::PrintTraitRefExt as _; -use rustc_middle::ty::{self, GenericArgsRef, GenericParamDefKind, ToPolyTraitRef, TyCtxt}; +use rustc_middle::ty::{self, GenericArgsRef, GenericParamDefKind, TyCtxt}; use rustc_parse_format::{ParseMode, Parser, Piece, Position}; use rustc_session::lint::builtin::UNKNOWN_OR_MALFORMED_DIAGNOSTIC_ATTRIBUTES; use rustc_span::{Span, Symbol, kw, sym}; @@ -42,18 +42,18 @@ static ALLOWED_FORMAT_SYMBOLS: &[Symbol] = &[ impl<'tcx> TypeErrCtxt<'_, 'tcx> { fn impl_similar_to( &self, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, obligation: &PredicateObligation<'tcx>, ) -> Option<(DefId, GenericArgsRef<'tcx>)> { let tcx = self.tcx; let param_env = obligation.param_env; - self.enter_forall(trait_ref, |trait_ref| { - let trait_self_ty = trait_ref.self_ty(); + self.enter_forall(trait_pred, |trait_pred| { + let trait_self_ty = trait_pred.self_ty(); let mut self_match_impls = vec![]; let mut fuzzy_match_impls = vec![]; - self.tcx.for_each_relevant_impl(trait_ref.def_id, trait_self_ty, |def_id| { + self.tcx.for_each_relevant_impl(trait_pred.def_id(), trait_self_ty, |def_id| { let impl_args = self.fresh_args_for_item(obligation.cause.span, def_id); let impl_trait_ref = tcx.impl_trait_ref(def_id).unwrap().instantiate(tcx, impl_args); @@ -64,7 +64,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { self_match_impls.push((def_id, impl_args)); if iter::zip( - trait_ref.args.types().skip(1), + trait_pred.trait_ref.args.types().skip(1), impl_trait_ref.args.types().skip(1), ) .all(|(u, v)| self.fuzzy_match_tys(u, v, false).is_some()) @@ -117,7 +117,7 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { } let (def_id, args) = self - .impl_similar_to(trait_pred.to_poly_trait_ref(), obligation) + .impl_similar_to(trait_pred, obligation) .unwrap_or_else(|| (trait_pred.def_id(), trait_pred.skip_binder().trait_ref.args)); let trait_pred = trait_pred.skip_binder(); @@ -205,9 +205,15 @@ impl<'tcx> TypeErrCtxt<'_, 'tcx> { if self_ty.is_fn() { let fn_sig = self_ty.fn_sig(self.tcx); - let shortname = match fn_sig.safety() { - hir::Safety::Safe => "fn", - hir::Safety::Unsafe => "unsafe fn", + let shortname = if let ty::FnDef(def_id, _) = self_ty.kind() + && self.tcx.codegen_fn_attrs(def_id).safe_target_features + { + "#[target_feature] fn" + } else { + match fn_sig.safety() { + hir::Safety::Safe => "fn", + hir::Safety::Unsafe => "unsafe fn", + } }; flags.push((sym::_Self, Some(shortname.to_owned()))); } diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs index 9d85ca1dd4d..c2e73b732d3 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs @@ -32,9 +32,9 @@ use rustc_middle::ty::print::{ with_forced_trimmed_paths, with_no_trimmed_paths, }; use rustc_middle::ty::{ - self, AdtKind, GenericArgs, InferTy, IsSuggestable, ToPolyTraitRef, Ty, TyCtxt, TypeFoldable, - TypeFolder, TypeSuperFoldable, TypeVisitableExt, TypeckResults, Upcast, - suggest_arbitrary_trait_bound, suggest_constraining_type_param, + self, AdtKind, GenericArgs, InferTy, IsSuggestable, Ty, TyCtxt, TypeFoldable, TypeFolder, + TypeSuperFoldable, TypeVisitableExt, TypeckResults, Upcast, suggest_arbitrary_trait_bound, + suggest_constraining_type_param, }; use rustc_middle::{bug, span_bug}; use rustc_span::def_id::LocalDefId; @@ -218,15 +218,15 @@ pub fn suggest_restriction<'tcx, G: EmissionGuarantee>( (_, None) => predicate_constraint(hir_generics, trait_pred.upcast(tcx)), (None, Some((ident, []))) => ( ident.span.shrink_to_hi(), - format!(": {}", trait_pred.to_poly_trait_ref().print_trait_sugared()), + format!(": {}", trait_pred.print_modifiers_and_trait_path()), ), (_, Some((_, [.., bounds]))) => ( bounds.span().shrink_to_hi(), - format!(" + {}", trait_pred.to_poly_trait_ref().print_trait_sugared()), + format!(" + {}", trait_pred.print_modifiers_and_trait_path()), ), (Some(_), Some((_, []))) => ( hir_generics.span.shrink_to_hi(), - format!(": {}", trait_pred.to_poly_trait_ref().print_trait_sugared()), + format!(": {}", trait_pred.print_modifiers_and_trait_path()), ), }; @@ -3729,7 +3729,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { &self, obligation: &PredicateObligation<'tcx>, err: &mut Diag<'_>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { let rhs_span = match obligation.cause.code() { ObligationCauseCode::BinOp { rhs_span: Some(span), rhs_is_lit, .. } if *rhs_is_lit => { @@ -3737,8 +3737,9 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } _ => return, }; - if let ty::Float(_) = trait_ref.skip_binder().self_ty().kind() - && let ty::Infer(InferTy::IntVar(_)) = trait_ref.skip_binder().args.type_at(1).kind() + if let ty::Float(_) = trait_pred.skip_binder().self_ty().kind() + && let ty::Infer(InferTy::IntVar(_)) = + trait_pred.skip_binder().trait_ref.args.type_at(1).kind() { err.span_suggestion_verbose( rhs_span.shrink_to_hi(), @@ -4448,7 +4449,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { &self, err: &mut Diag<'_>, obligation: &PredicateObligation<'tcx>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, candidate_impls: &[ImplCandidate<'tcx>], span: Span, ) { @@ -4464,7 +4465,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { // 1. `[T; _]` (array of T) // 2. `&[T; _]` (reference to array of T) // 3. `&mut [T; _]` (mutable reference to array of T) - let (element_ty, mut mutability) = match *trait_ref.skip_binder().self_ty().kind() { + let (element_ty, mut mutability) = match *trait_pred.skip_binder().self_ty().kind() { ty::Array(element_ty, _) => (element_ty, None), ty::Ref(_, pointee_ty, mutability) => match *pointee_ty.kind() { @@ -4620,14 +4621,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { pub(super) fn suggest_desugaring_async_fn_in_trait( &self, err: &mut Diag<'_>, - trait_ref: ty::PolyTraitRef<'tcx>, + trait_pred: ty::PolyTraitPredicate<'tcx>, ) { // Don't suggest if RTN is active -- we should prefer a where-clause bound instead. if self.tcx.features().return_type_notation() { return; } - let trait_def_id = trait_ref.def_id(); + let trait_def_id = trait_pred.def_id(); // Only suggest specifying auto traits if !self.tcx.trait_is_auto(trait_def_id) { @@ -4635,7 +4636,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } // Look for an RPITIT - let ty::Alias(ty::Projection, alias_ty) = trait_ref.self_ty().skip_binder().kind() else { + let ty::Alias(ty::Projection, alias_ty) = trait_pred.self_ty().skip_binder().kind() else { return; }; let Some(ty::ImplTraitInTraitData::Trait { fn_def_id, opaque_def_id }) = diff --git a/compiler/rustc_trait_selection/src/errors.rs b/compiler/rustc_trait_selection/src/errors.rs index a8fddff4e4a..53a4e5031c6 100644 --- a/compiler/rustc_trait_selection/src/errors.rs +++ b/compiler/rustc_trait_selection/src/errors.rs @@ -1729,8 +1729,15 @@ pub enum ObligationCauseFailureCode { #[primary_span] span: Span, }, - #[diag(trait_selection_oc_cant_coerce, code = E0308)] - CantCoerce { + #[diag(trait_selection_oc_cant_coerce_force_inline, code = E0308)] + CantCoerceForceInline { + #[primary_span] + span: Span, + #[subdiagnostic] + subdiags: Vec<TypeErrorAdditionalDiags>, + }, + #[diag(trait_selection_oc_cant_coerce_intrinsic, code = E0308)] + CantCoerceIntrinsic { #[primary_span] span: Span, #[subdiagnostic] diff --git a/compiler/rustc_trait_selection/src/infer.rs b/compiler/rustc_trait_selection/src/infer.rs index ee708564a80..f373706b296 100644 --- a/compiler/rustc_trait_selection/src/infer.rs +++ b/compiler/rustc_trait_selection/src/infer.rs @@ -47,6 +47,12 @@ impl<'tcx> InferCtxt<'tcx> { traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, copy_def_id) } + fn type_is_clone_modulo_regions(&self, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> bool { + let ty = self.resolve_vars_if_possible(ty); + let clone_def_id = self.tcx.require_lang_item(LangItem::Clone, None); + traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, clone_def_id) + } + fn type_is_sized_modulo_regions(&self, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> bool { let lang_item = self.tcx.require_lang_item(LangItem::Sized, None); traits::type_known_to_meet_bound_modulo_regions(self, param_env, ty, lang_item) diff --git a/compiler/rustc_trait_selection/src/solve/fulfill.rs b/compiler/rustc_trait_selection/src/solve/fulfill.rs index 7db0f2bb5a7..4498beff4ea 100644 --- a/compiler/rustc_trait_selection/src/solve/fulfill.rs +++ b/compiler/rustc_trait_selection/src/solve/fulfill.rs @@ -264,9 +264,10 @@ fn fulfillment_error_for_no_solution<'tcx>( infcx.tcx.type_of(uv.def).instantiate(infcx.tcx, uv.args) } ty::ConstKind::Param(param_ct) => param_ct.find_ty_from_env(obligation.param_env), - _ => span_bug!( + ty::ConstKind::Value(ty, _) => ty, + kind => span_bug!( obligation.cause.span, - "ConstArgHasWrongType failed but we don't know how to compute type" + "ConstArgHasWrongType failed but we don't know how to compute type for {kind:?}" ), }; FulfillmentErrorCode::Select(SelectionError::ConstArgHasWrongType { diff --git a/compiler/rustc_trait_selection/src/solve/select.rs b/compiler/rustc_trait_selection/src/solve/select.rs index 1661852903c..b0b6274907d 100644 --- a/compiler/rustc_trait_selection/src/solve/select.rs +++ b/compiler/rustc_trait_selection/src/solve/select.rs @@ -117,6 +117,10 @@ fn candidate_should_be_dropped_in_favor_of<'tcx>( CandidateSource::BuiltinImpl(BuiltinImplSource::Object(a)), CandidateSource::BuiltinImpl(BuiltinImplSource::Object(b)), ) => a >= b, + ( + CandidateSource::BuiltinImpl(BuiltinImplSource::TraitUpcasting(a)), + CandidateSource::BuiltinImpl(BuiltinImplSource::TraitUpcasting(b)), + ) => a >= b, // Prefer dyn candidates over non-dyn candidates. This is necessary to // handle the unsoundness between `impl<T: ?Sized> Any for T` and `dyn Any: Any`. ( diff --git a/compiler/rustc_trait_selection/src/traits/coherence.rs b/compiler/rustc_trait_selection/src/traits/coherence.rs index 971d3a81102..e27143f1396 100644 --- a/compiler/rustc_trait_selection/src/traits/coherence.rs +++ b/compiler/rustc_trait_selection/src/traits/coherence.rs @@ -6,7 +6,7 @@ use std::fmt::Debug; -use rustc_data_structures::fx::FxIndexSet; +use rustc_data_structures::fx::{FxHashSet, FxIndexSet}; use rustc_errors::{Diag, EmissionGuarantee}; use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; @@ -116,28 +116,39 @@ pub fn overlapping_impls( return None; } - let _overlap_with_bad_diagnostics = overlap( - tcx, - TrackAmbiguityCauses::No, - skip_leak_check, - impl1_def_id, - impl2_def_id, - overlap_mode, - )?; - - // In the case where we detect an error, run the check again, but - // this time tracking intercrate ambiguity causes for better - // diagnostics. (These take time and can lead to false errors.) - let overlap = overlap( - tcx, - TrackAmbiguityCauses::Yes, - skip_leak_check, - impl1_def_id, - impl2_def_id, - overlap_mode, - ) - .unwrap(); - Some(overlap) + if tcx.next_trait_solver_in_coherence() { + overlap( + tcx, + TrackAmbiguityCauses::Yes, + skip_leak_check, + impl1_def_id, + impl2_def_id, + overlap_mode, + ) + } else { + let _overlap_with_bad_diagnostics = overlap( + tcx, + TrackAmbiguityCauses::No, + skip_leak_check, + impl1_def_id, + impl2_def_id, + overlap_mode, + )?; + + // In the case where we detect an error, run the check again, but + // this time tracking intercrate ambiguity causes for better + // diagnostics. (These take time and can lead to false errors.) + let overlap = overlap( + tcx, + TrackAmbiguityCauses::Yes, + skip_leak_check, + impl1_def_id, + impl2_def_id, + overlap_mode, + ) + .unwrap(); + Some(overlap) + } } fn fresh_impl_header<'tcx>(infcx: &InferCtxt<'tcx>, impl_def_id: DefId) -> ty::ImplHeader<'tcx> { @@ -615,6 +626,7 @@ fn compute_intercrate_ambiguity_causes<'tcx>( } struct AmbiguityCausesVisitor<'a, 'tcx> { + cache: FxHashSet<Goal<'tcx, ty::Predicate<'tcx>>>, causes: &'a mut FxIndexSet<IntercrateAmbiguityCause<'tcx>>, } @@ -624,6 +636,10 @@ impl<'a, 'tcx> ProofTreeVisitor<'tcx> for AmbiguityCausesVisitor<'a, 'tcx> { } fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) { + if !self.cache.insert(goal.goal()) { + return; + } + let infcx = goal.infcx(); for cand in goal.candidates() { cand.visit_nested_in_probe(self); @@ -748,5 +764,10 @@ fn search_ambiguity_causes<'tcx>( goal: Goal<'tcx, ty::Predicate<'tcx>>, causes: &mut FxIndexSet<IntercrateAmbiguityCause<'tcx>>, ) { - infcx.probe(|_| infcx.visit_proof_tree(goal, &mut AmbiguityCausesVisitor { causes })); + infcx.probe(|_| { + infcx.visit_proof_tree(goal, &mut AmbiguityCausesVisitor { + cache: Default::default(), + causes, + }) + }); } diff --git a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs index 15eb5d74cbf..446f9eaa348 100644 --- a/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs +++ b/compiler/rustc_trait_selection/src/traits/const_evaluatable.rs @@ -79,7 +79,7 @@ pub fn is_const_evaluatable<'tcx>( Err( EvaluateConstErr::EvaluationFailure(e) | EvaluateConstErr::InvalidConstParamTy(e), - ) => Err(NotConstEvaluatable::Error(e.into())), + ) => Err(NotConstEvaluatable::Error(e)), Ok(_) => Ok(()), } } @@ -140,7 +140,7 @@ pub fn is_const_evaluatable<'tcx>( } Err( EvaluateConstErr::EvaluationFailure(e) | EvaluateConstErr::InvalidConstParamTy(e), - ) => Err(NotConstEvaluatable::Error(e.into())), + ) => Err(NotConstEvaluatable::Error(e)), Ok(_) => Ok(()), } } diff --git a/compiler/rustc_trait_selection/src/traits/effects.rs b/compiler/rustc_trait_selection/src/traits/effects.rs index 0ac24eb54e7..b32909efe0b 100644 --- a/compiler/rustc_trait_selection/src/traits/effects.rs +++ b/compiler/rustc_trait_selection/src/traits/effects.rs @@ -1,4 +1,4 @@ -use rustc_hir as hir; +use rustc_hir::{self as hir, LangItem}; use rustc_infer::infer::{BoundRegionConversionTime, DefineOpaqueTypes}; use rustc_infer::traits::{ ImplDerivedHostCause, ImplSource, Obligation, ObligationCauseCode, PredicateObligation, @@ -48,6 +48,12 @@ pub fn evaluate_host_effect_obligation<'tcx>( Err(EvaluationFailure::NoSolution) => {} } + match evaluate_host_effect_from_builtin_impls(selcx, obligation) { + Ok(result) => return Ok(result), + Err(EvaluationFailure::Ambiguous) => return Err(EvaluationFailure::Ambiguous), + Err(EvaluationFailure::NoSolution) => {} + } + match evaluate_host_effect_from_selection_candiate(selcx, obligation) { Ok(result) => return Ok(result), Err(EvaluationFailure::Ambiguous) => return Err(EvaluationFailure::Ambiguous), @@ -228,6 +234,104 @@ fn evaluate_host_effect_from_item_bounds<'tcx>( } } +fn evaluate_host_effect_from_builtin_impls<'tcx>( + selcx: &mut SelectionContext<'_, 'tcx>, + obligation: &HostEffectObligation<'tcx>, +) -> Result<ThinVec<PredicateObligation<'tcx>>, EvaluationFailure> { + match selcx.tcx().as_lang_item(obligation.predicate.def_id()) { + Some(LangItem::Destruct) => evaluate_host_effect_for_destruct_goal(selcx, obligation), + _ => Err(EvaluationFailure::NoSolution), + } +} + +// NOTE: Keep this in sync with `const_conditions_for_destruct` in the new solver. +fn evaluate_host_effect_for_destruct_goal<'tcx>( + selcx: &mut SelectionContext<'_, 'tcx>, + obligation: &HostEffectObligation<'tcx>, +) -> Result<ThinVec<PredicateObligation<'tcx>>, EvaluationFailure> { + let tcx = selcx.tcx(); + let destruct_def_id = tcx.require_lang_item(LangItem::Destruct, None); + let self_ty = obligation.predicate.self_ty(); + + let const_conditions = match *self_ty.kind() { + // An ADT is `~const Destruct` only if all of the fields are, + // *and* if there is a `Drop` impl, that `Drop` impl is also `~const`. + ty::Adt(adt_def, args) => { + let mut const_conditions: ThinVec<_> = adt_def + .all_fields() + .map(|field| ty::TraitRef::new(tcx, destruct_def_id, [field.ty(tcx, args)])) + .collect(); + match adt_def.destructor(tcx).map(|dtor| dtor.constness) { + // `Drop` impl exists, but it's not const. Type cannot be `~const Destruct`. + Some(hir::Constness::NotConst) => return Err(EvaluationFailure::NoSolution), + // `Drop` impl exists, and it's const. Require `Ty: ~const Drop` to hold. + Some(hir::Constness::Const) => { + let drop_def_id = tcx.require_lang_item(LangItem::Drop, None); + let drop_trait_ref = ty::TraitRef::new(tcx, drop_def_id, [self_ty]); + const_conditions.push(drop_trait_ref); + } + // No `Drop` impl, no need to require anything else. + None => {} + } + const_conditions + } + + ty::Array(ty, _) | ty::Pat(ty, _) | ty::Slice(ty) => { + thin_vec![ty::TraitRef::new(tcx, destruct_def_id, [ty])] + } + + ty::Tuple(tys) => { + tys.iter().map(|field_ty| ty::TraitRef::new(tcx, destruct_def_id, [field_ty])).collect() + } + + // Trivially implement `~const Destruct` + ty::Bool + | ty::Char + | ty::Int(..) + | ty::Uint(..) + | ty::Float(..) + | ty::Str + | ty::RawPtr(..) + | ty::Ref(..) + | ty::FnDef(..) + | ty::FnPtr(..) + | ty::Never + | ty::Infer(ty::InferTy::FloatVar(_) | ty::InferTy::IntVar(_)) + | ty::Error(_) => thin_vec![], + + // Coroutines and closures could implement `~const Drop`, + // but they don't really need to right now. + ty::Closure(_, _) + | ty::CoroutineClosure(_, _) + | ty::Coroutine(_, _) + | ty::CoroutineWitness(_, _) => return Err(EvaluationFailure::NoSolution), + + // FIXME(unsafe_binders): Unsafe binders could implement `~const Drop` + // if their inner type implements it. + ty::UnsafeBinder(_) => return Err(EvaluationFailure::NoSolution), + + ty::Dynamic(..) | ty::Param(_) | ty::Alias(..) | ty::Placeholder(_) | ty::Foreign(_) => { + return Err(EvaluationFailure::NoSolution); + } + + ty::Bound(..) + | ty::Infer(ty::TyVar(_) | ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => { + panic!("unexpected type `{self_ty:?}`") + } + }; + + Ok(const_conditions + .into_iter() + .map(|trait_ref| { + obligation.with( + tcx, + ty::Binder::dummy(trait_ref) + .to_host_effect_clause(tcx, obligation.predicate.constness), + ) + }) + .collect()) +} + fn evaluate_host_effect_from_selection_candiate<'tcx>( selcx: &mut SelectionContext<'_, 'tcx>, obligation: &HostEffectObligation<'tcx>, diff --git a/compiler/rustc_trait_selection/src/traits/mod.rs b/compiler/rustc_trait_selection/src/traits/mod.rs index 1dcd0d0dfb8..da16a742099 100644 --- a/compiler/rustc_trait_selection/src/traits/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/mod.rs @@ -714,9 +714,18 @@ pub fn impossible_predicates<'tcx>(tcx: TyCtxt<'tcx>, predicates: Vec<ty::Clause } let errors = ocx.select_all_or_error(); - let result = !errors.is_empty(); - debug!("impossible_predicates = {:?}", result); - result + if !errors.is_empty() { + return true; + } + + // Leak check for any higher-ranked trait mismatches. + // We only need to do this in the old solver, since the new solver already + // leak-checks. + if !infcx.next_trait_solver() && infcx.leak_check(ty::UniverseIndex::ROOT, None).is_err() { + return true; + } + + false } fn instantiate_and_check_impossible_predicates<'tcx>( diff --git a/compiler/rustc_trait_selection/src/traits/project.rs b/compiler/rustc_trait_selection/src/traits/project.rs index 69b7d5cff1e..d59cf88875e 100644 --- a/compiler/rustc_trait_selection/src/traits/project.rs +++ b/compiler/rustc_trait_selection/src/traits/project.rs @@ -991,7 +991,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( Err(ErrorGuaranteed { .. }) => true, } } - ImplSource::Builtin(BuiltinImplSource::Misc, _) => { + ImplSource::Builtin(BuiltinImplSource::Misc | BuiltinImplSource::Trivial, _) => { // While a builtin impl may be known to exist, the associated type may not yet // be known. Any type with multiple potential associated types is therefore // not eligible. @@ -1148,7 +1148,9 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( // If returned by `struct_tail` this is the empty tuple. | ty::Tuple(..) // Integers and floats are always Sized, and so have unit type metadata. - | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) => true, + | ty::Infer(ty::InferTy::IntVar(_) | ty::InferTy::FloatVar(..)) + // This happens if we reach the recursion limit when finding the struct tail. + | ty::Error(..) => true, // We normalize from `Wrapper<Tail>::Metadata` to `Tail::Metadata` if able. // Otherwise, type parameters, opaques, and unnormalized projections have @@ -1179,8 +1181,7 @@ fn assemble_candidates_from_impls<'cx, 'tcx>( | ty::Alias(..) | ty::Bound(..) | ty::Placeholder(..) - | ty::Infer(..) - | ty::Error(_) => { + | ty::Infer(..) => { if tail.has_infer_types() { candidate_set.mark_ambiguous(); } @@ -1295,7 +1296,7 @@ fn confirm_select_candidate<'cx, 'tcx>( ) -> Progress<'tcx> { match impl_source { ImplSource::UserDefined(data) => confirm_impl_candidate(selcx, obligation, data), - ImplSource::Builtin(BuiltinImplSource::Misc, data) => { + ImplSource::Builtin(BuiltinImplSource::Misc | BuiltinImplSource::Trivial, data) => { let tcx = selcx.tcx(); let trait_def_id = obligation.predicate.trait_def_id(tcx); if tcx.is_lang_item(trait_def_id, LangItem::Coroutine) { diff --git a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs index 968dc631e50..b370f802052 100644 --- a/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs +++ b/compiler/rustc_trait_selection/src/traits/select/candidate_assembly.rs @@ -16,7 +16,7 @@ use rustc_infer::traits::{ Obligation, ObligationCause, PolyTraitObligation, PredicateObligations, SelectionError, }; use rustc_middle::ty::fast_reject::DeepRejectCtxt; -use rustc_middle::ty::{self, ToPolyTraitRef, Ty, TypeVisitableExt, TypingMode}; +use rustc_middle::ty::{self, Ty, TypeVisitableExt, TypingMode}; use rustc_middle::{bug, span_bug}; use rustc_type_ir::Interner; use tracing::{debug, instrument, trace}; @@ -186,10 +186,11 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { } selcx.infcx.probe(|_| { + // We checked the polarity already match selcx.match_normalize_trait_ref( obligation, placeholder_trait_predicate.trait_ref, - bound.to_poly_trait_ref(), + bound.map_bound(|pred| pred.trait_ref), ) { Ok(None) => { candidates.vec.push(ProjectionCandidate(idx)); diff --git a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs index 3619d16cde2..729ae3f2c2a 100644 --- a/compiler/rustc_trait_selection/src/traits/select/confirmation.rs +++ b/compiler/rustc_trait_selection/src/traits/select/confirmation.rs @@ -16,7 +16,7 @@ use rustc_hir::lang_items::LangItem; use rustc_infer::infer::{DefineOpaqueTypes, HigherRankedType, InferOk}; use rustc_infer::traits::ObligationCauseCode; use rustc_middle::traits::{BuiltinImplSource, SignatureMismatchData}; -use rustc_middle::ty::{self, GenericArgsRef, ToPolyTraitRef, Ty, TyCtxt, Upcast}; +use rustc_middle::ty::{self, GenericArgsRef, Ty, TyCtxt, Upcast}; use rustc_middle::{bug, span_bug}; use rustc_span::def_id::DefId; use rustc_type_ir::elaborate; @@ -458,8 +458,9 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { ensure_sufficient_stack(|| { let cause = obligation.derived_cause(ObligationCauseCode::BuiltinDerived); - let poly_trait_ref = obligation.predicate.to_poly_trait_ref(); - let trait_ref = self.infcx.enter_forall_and_leak_universe(poly_trait_ref); + assert_eq!(obligation.predicate.polarity(), ty::PredicatePolarity::Positive); + let trait_ref = + self.infcx.enter_forall_and_leak_universe(obligation.predicate).trait_ref; let trait_obligations = self.impl_or_trait_obligations( &cause, obligation.recursion_depth + 1, @@ -1090,7 +1091,7 @@ impl<'cx, 'tcx> SelectionContext<'cx, 'tcx> { )? .expect("did not expect ambiguity during confirmation"); - Ok(ImplSource::Builtin(BuiltinImplSource::TraitUpcasting, nested)) + Ok(ImplSource::Builtin(BuiltinImplSource::TraitUpcasting(idx), nested)) } fn confirm_builtin_unsize_candidate( diff --git a/compiler/rustc_trait_selection/src/traits/select/mod.rs b/compiler/rustc_trait_selection/src/traits/select/mod.rs index 9e7da5eb368..5581ea46882 100644 --- a/compiler/rustc_trait_selection/src/traits/select/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/select/mod.rs @@ -1895,6 +1895,18 @@ impl<'tcx> SelectionContext<'_, 'tcx> { Some(None) => {} None => return None, } + // Same for upcasting. + let upcast_bound = candidates + .iter() + .filter_map(|c| { + if let TraitUpcastingUnsizeCandidate(i) = c.candidate { Some(i) } else { None } + }) + .try_reduce(|c1, c2| if has_non_region_infer { None } else { Some(c1.min(c2)) }); + match upcast_bound { + Some(Some(index)) => return Some(TraitUpcastingUnsizeCandidate(index)), + Some(None) => {} + None => return None, + } // Finally, handle overlapping user-written impls. let impls = candidates.iter().filter_map(|c| { diff --git a/compiler/rustc_ty_utils/src/abi.rs b/compiler/rustc_ty_utils/src/abi.rs index b63534880d1..c528179ae0e 100644 --- a/compiler/rustc_ty_utils/src/abi.rs +++ b/compiler/rustc_ty_utils/src/abi.rs @@ -293,6 +293,7 @@ fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: ExternAbi, c_variadic: bool) -> Conv PtxKernel => Conv::PtxKernel, Msp430Interrupt => Conv::Msp430Intr, X86Interrupt => Conv::X86Intr, + GpuKernel => Conv::GpuKernel, AvrInterrupt => Conv::AvrInterrupt, AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt, RiscvInterruptM => Conv::RiscvInterrupt { kind: RiscvInterruptKind::Machine }, diff --git a/compiler/rustc_ty_utils/src/consts.rs b/compiler/rustc_ty_utils/src/consts.rs index 637e239a570..51a7c976f60 100644 --- a/compiler/rustc_ty_utils/src/consts.rs +++ b/compiler/rustc_ty_utils/src/consts.rs @@ -4,7 +4,7 @@ use rustc_abi::{FIRST_VARIANT, VariantIdx}; use rustc_errors::ErrorGuaranteed; use rustc_hir::def::DefKind; use rustc_hir::def_id::LocalDefId; -use rustc_middle::mir::interpret::{LitToConstError, LitToConstInput}; +use rustc_middle::mir::interpret::LitToConstInput; use rustc_middle::query::Providers; use rustc_middle::thir::visit; use rustc_middle::thir::visit::Visitor; @@ -118,13 +118,7 @@ fn recurse_build<'tcx>( } &ExprKind::Literal { lit, neg } => { let sp = node.span; - match tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) { - Ok(c) => c, - Err(LitToConstError::Reported(guar)) => ty::Const::new_error(tcx, guar), - Err(LitToConstError::TypeError) => { - bug!("encountered type error in lit_to_const") - } - } + tcx.at(sp).lit_to_const(LitToConstInput { lit: &lit.node, ty: node.ty, neg }) } &ExprKind::NonHirLiteral { lit, user_ty: _ } => { let val = ty::ValTree::from_scalar_int(lit); diff --git a/compiler/rustc_ty_utils/src/instance.rs b/compiler/rustc_ty_utils/src/instance.rs index fc76a86f797..d5e1937efaa 100644 --- a/compiler/rustc_ty_utils/src/instance.rs +++ b/compiler/rustc_ty_utils/src/instance.rs @@ -248,7 +248,7 @@ fn resolve_associated_item<'tcx>( }) } } - traits::ImplSource::Builtin(BuiltinImplSource::Misc, _) => { + traits::ImplSource::Builtin(BuiltinImplSource::Misc | BuiltinImplSource::Trivial, _) => { if tcx.is_lang_item(trait_ref.def_id, LangItem::Clone) { // FIXME(eddyb) use lang items for methods instead of names. let name = tcx.item_name(trait_item_id); diff --git a/compiler/rustc_ty_utils/src/layout.rs b/compiler/rustc_ty_utils/src/layout.rs index 9f138cf1275..17be0bd0ab9 100644 --- a/compiler/rustc_ty_utils/src/layout.rs +++ b/compiler/rustc_ty_utils/src/layout.rs @@ -9,7 +9,7 @@ use rustc_abi::{ HasDataLayout, Layout, LayoutCalculatorError, LayoutData, Niche, ReprOptions, Scalar, Size, StructKind, TagEncoding, VariantIdx, Variants, WrappingRange, }; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_index::{IndexSlice, IndexVec}; use rustc_middle::bug; use rustc_middle::mir::{CoroutineLayout, CoroutineSavedLocal}; @@ -105,21 +105,27 @@ fn map_error<'tcx>( // See `tests/ui/layout/trivial-bounds-sized.rs` for an example. assert!(field.layout.is_unsized(), "invalid layout error {err:#?}"); if !field.ty.is_sized(cx.tcx(), cx.typing_env) { - cx.tcx().dcx().delayed_bug(format!( + let guar = cx.tcx().dcx().delayed_bug(format!( "encountered unexpected unsized field in layout of {ty:?}: {field:#?}" )); + LayoutError::ReferencesError(guar) + } else { + LayoutError::Unknown(ty) } - LayoutError::Unknown(ty) } LayoutCalculatorError::EmptyUnion => { // This is always a compile error. - cx.tcx().dcx().delayed_bug(format!("computed layout of empty union: {ty:?}")); - LayoutError::Unknown(ty) + let guar = + cx.tcx().dcx().delayed_bug(format!("computed layout of empty union: {ty:?}")); + LayoutError::ReferencesError(guar) } LayoutCalculatorError::ReprConflict => { // packed enums are the only known trigger of this, but others might arise - cx.tcx().dcx().delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}")); - LayoutError::Unknown(ty) + let guar = cx + .tcx() + .dcx() + .delayed_bug(format!("computed impossible repr (packed enum?): {ty:?}")); + LayoutError::ReferencesError(guar) } }; error(cx, err) @@ -347,6 +353,7 @@ fn layout_of_uncached<'tcx>( size, max_repr_align: None, unadjusted_abi_align: element.align.abi, + randomization_seed: element.randomization_seed.wrapping_add(count), }) } ty::Slice(element) => { @@ -360,6 +367,8 @@ fn layout_of_uncached<'tcx>( size: Size::ZERO, max_repr_align: None, unadjusted_abi_align: element.align.abi, + // adding a randomly chosen value to distinguish slices + randomization_seed: element.randomization_seed.wrapping_add(0x2dcba99c39784102), }) } ty::Str => tcx.mk_layout(LayoutData { @@ -371,6 +380,8 @@ fn layout_of_uncached<'tcx>( size: Size::ZERO, max_repr_align: None, unadjusted_abi_align: dl.i8_align.abi, + // another random value + randomization_seed: 0xc1325f37d127be22, }), // Odd unit types. @@ -427,8 +438,10 @@ fn layout_of_uncached<'tcx>( ty::Adt(def, args) if def.repr().simd() => { if !def.is_struct() { // Should have yielded E0517 by now. - tcx.dcx().delayed_bug("#[repr(simd)] was applied to an ADT that is not a struct"); - return Err(error(cx, LayoutError::Unknown(ty))); + let guar = tcx + .dcx() + .delayed_bug("#[repr(simd)] was applied to an ADT that is not a struct"); + return Err(error(cx, LayoutError::ReferencesError(guar))); } let fields = &def.non_enum_variant().fields; @@ -454,10 +467,10 @@ fn layout_of_uncached<'tcx>( // (should be caught by typeck) for fi in fields { if fi.ty(tcx, args) != f0_ty { - tcx.dcx().delayed_bug( + let guar = tcx.dcx().delayed_bug( "#[repr(simd)] was applied to an ADT with heterogeneous field type", ); - return Err(error(cx, LayoutError::Unknown(ty))); + return Err(error(cx, LayoutError::ReferencesError(guar))); } } @@ -542,6 +555,7 @@ fn layout_of_uncached<'tcx>( align, max_repr_align: None, unadjusted_abi_align: align.abi, + randomization_seed: e_ly.randomization_seed.wrapping_add(e_len), }) } @@ -561,11 +575,11 @@ fn layout_of_uncached<'tcx>( if def.is_union() { if def.repr().pack.is_some() && def.repr().align.is_some() { - tcx.dcx().span_delayed_bug( + let guar = tcx.dcx().span_delayed_bug( tcx.def_span(def.did()), "union cannot be packed and aligned", ); - return Err(error(cx, LayoutError::Unknown(ty))); + return Err(error(cx, LayoutError::ReferencesError(guar))); } return Ok(tcx.mk_layout( @@ -718,7 +732,7 @@ enum SavedLocalEligibility { /// Compute the eligibility and assignment of each local. fn coroutine_saved_local_eligibility( info: &CoroutineLayout<'_>, -) -> (BitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) { +) -> (DenseBitSet<CoroutineSavedLocal>, IndexVec<CoroutineSavedLocal, SavedLocalEligibility>) { use SavedLocalEligibility::*; let mut assignments: IndexVec<CoroutineSavedLocal, SavedLocalEligibility> = @@ -726,7 +740,7 @@ fn coroutine_saved_local_eligibility( // The saved locals not eligible for overlap. These will get // "promoted" to the prefix of our coroutine. - let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); + let mut ineligible_locals = DenseBitSet::new_empty(info.field_tys.len()); // Figure out which of our saved locals are fields in only // one variant. The rest are deemed ineligible for overlap. @@ -786,7 +800,7 @@ fn coroutine_saved_local_eligibility( // lay them out with the other locals in the prefix and eliminate // unnecessary padding bytes. { - let mut used_variants = BitSet::new_empty(info.variant_fields.len()); + let mut used_variants = DenseBitSet::new_empty(info.variant_fields.len()); for assignment in &assignments { if let Assigned(idx) = assignment { used_variants.insert(*idx); @@ -999,6 +1013,9 @@ fn coroutine_layout<'tcx>( BackendRepr::Memory { sized: true } }; + // this is similar to how ReprOptions populates its field_shuffle_seed + let def_hash = tcx.def_path_hash(def_id).0.to_smaller_hash().as_u64(); + let layout = tcx.mk_layout(LayoutData { variants: Variants::Multiple { tag, @@ -1019,6 +1036,7 @@ fn coroutine_layout<'tcx>( align, max_repr_align: None, unadjusted_abi_align: align.abi, + randomization_seed: def_hash, }); debug!("coroutine layout ({:?}): {:#?}", ty, layout); Ok(layout) diff --git a/compiler/rustc_ty_utils/src/representability.rs b/compiler/rustc_ty_utils/src/representability.rs index 0ffb7f62496..98b1550e1a3 100644 --- a/compiler/rustc_ty_utils/src/representability.rs +++ b/compiler/rustc_ty_utils/src/representability.rs @@ -1,5 +1,5 @@ use rustc_hir::def::DefKind; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::query::Providers; use rustc_middle::ty::{self, Representability, Ty, TyCtxt}; @@ -83,10 +83,10 @@ fn representability_adt_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Representab Representability::Representable } -fn params_in_repr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> BitSet<u32> { +fn params_in_repr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> DenseBitSet<u32> { let adt_def = tcx.adt_def(def_id); let generics = tcx.generics_of(def_id); - let mut params_in_repr = BitSet::new_empty(generics.own_params.len()); + let mut params_in_repr = DenseBitSet::new_empty(generics.own_params.len()); for variant in adt_def.variants() { for field in variant.fields.iter() { params_in_repr_ty( @@ -99,7 +99,7 @@ fn params_in_repr(tcx: TyCtxt<'_>, def_id: LocalDefId) -> BitSet<u32> { params_in_repr } -fn params_in_repr_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, params_in_repr: &mut BitSet<u32>) { +fn params_in_repr_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>, params_in_repr: &mut DenseBitSet<u32>) { match *ty.kind() { ty::Adt(adt, args) => { let inner_params_in_repr = tcx.params_in_repr(adt.did()); diff --git a/compiler/rustc_ty_utils/src/ty.rs b/compiler/rustc_ty_utils/src/ty.rs index 7eed32e3a33..8ed45b4e541 100644 --- a/compiler/rustc_ty_utils/src/ty.rs +++ b/compiler/rustc_ty_utils/src/ty.rs @@ -2,7 +2,7 @@ use rustc_data_structures::fx::FxHashSet; use rustc_hir as hir; use rustc_hir::LangItem; use rustc_hir::def::DefKind; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use rustc_middle::bug; use rustc_middle::query::Providers; use rustc_middle::ty::fold::fold_regions; @@ -317,7 +317,7 @@ fn asyncness(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::Asyncness { }) } -fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> BitSet<u32> { +fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> DenseBitSet<u32> { let def = tcx.adt_def(def_id); let num_params = tcx.generics_of(def_id).count(); @@ -338,10 +338,10 @@ fn unsizing_params_for_adt<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> BitSet<u32 // The last field of the structure has to exist and contain type/const parameters. let Some((tail_field, prefix_fields)) = def.non_enum_variant().fields.raw.split_last() else { - return BitSet::new_empty(num_params); + return DenseBitSet::new_empty(num_params); }; - let mut unsizing_params = BitSet::new_empty(num_params); + let mut unsizing_params = DenseBitSet::new_empty(num_params); for arg in tcx.type_of(tail_field.did).instantiate_identity().walk() { if let Some(i) = maybe_unsizing_param_idx(arg) { unsizing_params.insert(i); diff --git a/compiler/rustc_type_ir/src/binder.rs b/compiler/rustc_type_ir/src/binder.rs index cb59bc608c2..0d0092ea1aa 100644 --- a/compiler/rustc_type_ir/src/binder.rs +++ b/compiler/rustc_type_ir/src/binder.rs @@ -804,7 +804,7 @@ impl<'a, I: Interner> ArgFolder<'a, I> { #[inline(never)] fn region_param_out_of_range(&self, ebr: I::EarlyParamRegion, r: I::Region) -> ! { panic!( - "const parameter `{:?}` ({:?}/{}) out of range when instantiating args={:?}", + "region parameter `{:?}` ({:?}/{}) out of range when instantiating args={:?}", ebr, r, ebr.index(), diff --git a/compiler/rustc_type_ir/src/error.rs b/compiler/rustc_type_ir/src/error.rs index 55671b84dbc..68b11489ae7 100644 --- a/compiler/rustc_type_ir/src/error.rs +++ b/compiler/rustc_type_ir/src/error.rs @@ -51,6 +51,9 @@ pub enum TypeError<I: Interner> { ConstMismatch(ExpectedFound<I::Const>), IntrinsicCast, + /// `#[rustc_force_inline]` functions must be inlined and must not be codegened independently, + /// so casting to a function pointer must be prohibited. + ForceInlineCast, /// Safe `#[target_feature]` functions are not assignable to safe function pointers. TargetFeatureCast(I::DefId), } @@ -83,6 +86,7 @@ impl<I: Interner> TypeError<I> { | ProjectionMismatched(_) | ExistentialMismatch(_) | ConstMismatch(_) + | ForceInlineCast | IntrinsicCast => true, } } diff --git a/compiler/rustc_type_ir/src/interner.rs b/compiler/rustc_type_ir/src/interner.rs index 025ec7ae896..4fec606a831 100644 --- a/compiler/rustc_type_ir/src/interner.rs +++ b/compiler/rustc_type_ir/src/interner.rs @@ -3,7 +3,7 @@ use std::hash::Hash; use std::ops::Deref; use rustc_ast_ir::Movability; -use rustc_index::bit_set::BitSet; +use rustc_index::bit_set::DenseBitSet; use smallvec::SmallVec; use crate::fold::TypeFoldable; @@ -282,7 +282,7 @@ pub trait Interner: fn coroutine_is_gen(self, coroutine_def_id: Self::DefId) -> bool; fn coroutine_is_async_gen(self, coroutine_def_id: Self::DefId) -> bool; - type UnsizingParams: Deref<Target = BitSet<u32>>; + type UnsizingParams: Deref<Target = DenseBitSet<u32>>; fn unsizing_params_for_adt(self, adt_def_id: Self::DefId) -> Self::UnsizingParams; fn find_const_ty_from_env( diff --git a/compiler/rustc_type_ir/src/solve/mod.rs b/compiler/rustc_type_ir/src/solve/mod.rs index 1ae904d50e0..c06004d4d0f 100644 --- a/compiler/rustc_type_ir/src/solve/mod.rs +++ b/compiler/rustc_type_ir/src/solve/mod.rs @@ -169,6 +169,9 @@ pub enum CandidateSource<I: Interner> { #[derive(Clone, Copy, Hash, PartialEq, Eq, Debug)] #[cfg_attr(feature = "nightly", derive(HashStable_NoContext, TyEncodable, TyDecodable))] pub enum BuiltinImplSource { + /// A built-in impl that is considered trivial, without any nested requirements. They + /// are preferred over where-clauses, and we want to track them explicitly. + Trivial, /// Some built-in impl we don't need to differentiate. This should be used /// unless more specific information is necessary. Misc, @@ -177,8 +180,9 @@ pub enum BuiltinImplSource { /// A built-in implementation of `Upcast` for trait objects to other trait objects. /// /// This can be removed when `feature(dyn_upcasting)` is stabilized, since we only - /// use it to detect when upcasting traits in hir typeck. - TraitUpcasting, + /// use it to detect when upcasting traits in hir typeck. The index is only used + /// for winnowing. + TraitUpcasting(usize), /// Unsizing a tuple like `(A, B, ..., X)` to `(A, B, ..., Y)` if `X` unsizes to `Y`. /// /// This can be removed when `feature(tuple_unsizing)` is stabilized, since we only diff --git a/compiler/stable_mir/src/abi.rs b/compiler/stable_mir/src/abi.rs index 17e6a852022..861b6692b53 100644 --- a/compiler/stable_mir/src/abi.rs +++ b/compiler/stable_mir/src/abi.rs @@ -442,6 +442,8 @@ pub enum CallConvention { PtxKernel, + GpuKernel, + X86Fastcall, X86Intr, X86Stdcall, diff --git a/compiler/stable_mir/src/ty.rs b/compiler/stable_mir/src/ty.rs index d7eb435e13f..3434597e7b0 100644 --- a/compiler/stable_mir/src/ty.rs +++ b/compiler/stable_mir/src/ty.rs @@ -1077,6 +1077,7 @@ pub enum Abi { PtxKernel, Msp430Interrupt, X86Interrupt, + GpuKernel, EfiApi, AvrInterrupt, AvrNonBlockingInterrupt, |
