diff options
| author | bors <bors@rust-lang.org> | 2023-09-30 21:33:44 +0000 |
|---|---|---|
| committer | bors <bors@rust-lang.org> | 2023-09-30 21:33:44 +0000 |
| commit | ea4a98369aacc19e750b0959c069026a2c28816c (patch) | |
| tree | 47bfc3c4897d7ea9396e7d3382ad0a9521cb869e /compiler | |
| parent | a4561491876e1ceffd177652b94e4db093e5b555 (diff) | |
| parent | 119113114cfe77ccec1fd69d6b6fd42422181978 (diff) | |
| download | rust-ea4a98369aacc19e750b0959c069026a2c28816c.tar.gz rust-ea4a98369aacc19e750b0959c069026a2c28816c.zip | |
Auto merge of #3097 - RalfJung:rustup, r=RalfJung
Rustup
Diffstat (limited to 'compiler')
64 files changed, 971 insertions, 847 deletions
diff --git a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs index 2ea399789b9..27072a60f65 100644 --- a/compiler/rustc_borrowck/src/diagnostics/region_errors.rs +++ b/compiler/rustc_borrowck/src/diagnostics/region_errors.rs @@ -245,7 +245,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { let Trait(PolyTraitRef { trait_ref, span: trait_span, .. }, _) = bound else { return; }; diag.span_note( *trait_span, - format!("due to current limitations in the borrow checker, this implies a `'static` lifetime") + "due to current limitations in the borrow checker, this implies a `'static` lifetime" ); let Some(generics_fn) = hir.get_generics(self.body.source.def_id().expect_local()) else { return; }; let Def(_, trait_res_defid) = trait_ref.path.res else { return; }; @@ -277,7 +277,7 @@ impl<'a, 'tcx> MirBorrowckCtxt<'a, 'tcx> { if suggestions.len() > 0 { suggestions.dedup(); diag.multipart_suggestion_verbose( - format!("consider restricting the type parameter to the `'static` lifetime"), + "consider restricting the type parameter to the `'static` lifetime", suggestions, Applicability::MaybeIncorrect, ); diff --git a/compiler/rustc_codegen_cranelift/src/base.rs b/compiler/rustc_codegen_cranelift/src/base.rs index 9b5a6b89191..6d55fdc3074 100644 --- a/compiler/rustc_codegen_cranelift/src/base.rs +++ b/compiler/rustc_codegen_cranelift/src/base.rs @@ -875,7 +875,7 @@ pub(crate) fn codegen_place<'tcx>( PlaceElem::Deref => { cplace = cplace.place_deref(fx); } - PlaceElem::OpaqueCast(ty) => cplace = cplace.place_opaque_cast(fx, ty), + PlaceElem::OpaqueCast(ty) => bug!("encountered OpaqueCast({ty}) in codegen"), PlaceElem::Field(field, _ty) => { cplace = cplace.place_field(fx, field); } diff --git a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs index c64a4008996..6efbe149863 100644 --- a/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs +++ b/compiler/rustc_codegen_cranelift/src/intrinsics/simd.rs @@ -21,7 +21,7 @@ fn report_simd_type_validation_error( pub(super) fn codegen_simd_intrinsic_call<'tcx>( fx: &mut FunctionCx<'_, '_, 'tcx>, intrinsic: Symbol, - _args: GenericArgsRef<'tcx>, + generic_args: GenericArgsRef<'tcx>, args: &[mir::Operand<'tcx>], ret: CPlace<'tcx>, target: BasicBlock, @@ -117,6 +117,54 @@ pub(super) fn codegen_simd_intrinsic_call<'tcx>( }); } + // simd_shuffle_generic<T, U, const I: &[u32]>(x: T, y: T) -> U + sym::simd_shuffle_generic => { + let [x, y] = args else { + bug!("wrong number of args for intrinsic {intrinsic}"); + }; + let x = codegen_operand(fx, x); + let y = codegen_operand(fx, y); + + if !x.layout().ty.is_simd() { + report_simd_type_validation_error(fx, intrinsic, span, x.layout().ty); + return; + } + + let idx = generic_args[2] + .expect_const() + .eval(fx.tcx, ty::ParamEnv::reveal_all(), Some(span)) + .unwrap() + .unwrap_branch(); + + assert_eq!(x.layout(), y.layout()); + let layout = x.layout(); + + let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx); + let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx); + + assert_eq!(lane_ty, ret_lane_ty); + assert_eq!(idx.len() as u64, ret_lane_count); + + let total_len = lane_count * 2; + + let indexes = + idx.iter().map(|idx| idx.unwrap_leaf().try_to_u16().unwrap()).collect::<Vec<u16>>(); + + for &idx in &indexes { + assert!(u64::from(idx) < total_len, "idx {} out of range 0..{}", idx, total_len); + } + + for (out_idx, in_idx) in indexes.into_iter().enumerate() { + let in_lane = if u64::from(in_idx) < lane_count { + x.value_lane(fx, in_idx.into()) + } else { + y.value_lane(fx, u64::from(in_idx) - lane_count) + }; + let out_lane = ret.place_lane(fx, u64::try_from(out_idx).unwrap()); + out_lane.write_cvalue(fx, in_lane); + } + } + // simd_shuffle<T, I, U>(x: T, y: T, idx: I) -> U sym::simd_shuffle => { let (x, y, idx) = match args { diff --git a/compiler/rustc_codegen_cranelift/src/value_and_place.rs b/compiler/rustc_codegen_cranelift/src/value_and_place.rs index ff95141ce90..d4273c0b593 100644 --- a/compiler/rustc_codegen_cranelift/src/value_and_place.rs +++ b/compiler/rustc_codegen_cranelift/src/value_and_place.rs @@ -674,14 +674,6 @@ impl<'tcx> CPlace<'tcx> { } } - pub(crate) fn place_opaque_cast( - self, - fx: &mut FunctionCx<'_, '_, 'tcx>, - ty: Ty<'tcx>, - ) -> CPlace<'tcx> { - CPlace { inner: self.inner, layout: fx.layout_of(ty) } - } - pub(crate) fn place_field( self, fx: &mut FunctionCx<'_, '_, 'tcx>, diff --git a/compiler/rustc_codegen_llvm/messages.ftl b/compiler/rustc_codegen_llvm/messages.ftl index ddaff36f24b..c0cfe39f1e0 100644 --- a/compiler/rustc_codegen_llvm/messages.ftl +++ b/compiler/rustc_codegen_llvm/messages.ftl @@ -37,6 +37,8 @@ codegen_llvm_lto_disallowed = lto can only be run for executables, cdylibs and s codegen_llvm_lto_dylib = lto cannot be used for `dylib` crate type without `-Zdylib-lto` +codegen_llvm_lto_proc_macro = lto cannot be used for `proc-macro` crate type without `-Zdylib-lto` + codegen_llvm_missing_features = add the missing features in a `target_feature` attribute diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index ba263296bb4..cb5acf79135 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -2,7 +2,7 @@ use crate::back::write::{ self, bitcode_section_name, save_temp_bitcode, CodegenDiagnosticsStage, DiagnosticHandlers, }; use crate::errors::{ - DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, + DynamicLinkingWithLTO, LlvmError, LtoBitcodeFromRlib, LtoDisallowed, LtoDylib, LtoProcMacro, }; use crate::llvm::{self, build_string}; use crate::{LlvmCodegenBackend, ModuleLlvm}; @@ -36,8 +36,12 @@ pub const THIN_LTO_KEYS_INCR_COMP_FILE_NAME: &str = "thin-lto-past-keys.bin"; pub fn crate_type_allows_lto(crate_type: CrateType) -> bool { match crate_type { - CrateType::Executable | CrateType::Dylib | CrateType::Staticlib | CrateType::Cdylib => true, - CrateType::Rlib | CrateType::ProcMacro => false, + CrateType::Executable + | CrateType::Dylib + | CrateType::Staticlib + | CrateType::Cdylib + | CrateType::ProcMacro => true, + CrateType::Rlib => false, } } @@ -87,6 +91,11 @@ fn prepare_lto( diag_handler.emit_err(LtoDylib); return Err(FatalError); } + } else if *crate_type == CrateType::ProcMacro { + if !cgcx.opts.unstable_opts.dylib_lto { + diag_handler.emit_err(LtoProcMacro); + return Err(FatalError); + } } } diff --git a/compiler/rustc_codegen_llvm/src/errors.rs b/compiler/rustc_codegen_llvm/src/errors.rs index 264c273ba30..665d195790c 100644 --- a/compiler/rustc_codegen_llvm/src/errors.rs +++ b/compiler/rustc_codegen_llvm/src/errors.rs @@ -139,6 +139,10 @@ pub(crate) struct LtoDisallowed; pub(crate) struct LtoDylib; #[derive(Diagnostic)] +#[diag(codegen_llvm_lto_proc_macro)] +pub(crate) struct LtoProcMacro; + +#[derive(Diagnostic)] #[diag(codegen_llvm_lto_bitcode_from_rlib)] pub(crate) struct LtoBitcodeFromRlib { pub llvm_err: String, diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index 9289c37d763..a97b803fc64 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -15,7 +15,7 @@ use rustc_codegen_ssa::mir::place::PlaceRef; use rustc_codegen_ssa::traits::*; use rustc_hir as hir; use rustc_middle::ty::layout::{FnAbiOf, HasTyCtxt, LayoutOf}; -use rustc_middle::ty::{self, Ty}; +use rustc_middle::ty::{self, GenericArgsRef, Ty}; use rustc_middle::{bug, span_bug}; use rustc_span::{sym, symbol::kw, Span, Symbol}; use rustc_target::abi::{self, Align, HasDataLayout, Primitive}; @@ -376,7 +376,9 @@ impl<'ll, 'tcx> IntrinsicCallMethods<'tcx> for Builder<'_, 'll, 'tcx> { } _ if name.as_str().starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + match generic_simd_intrinsic( + self, name, callee_ty, fn_args, args, ret_ty, llret_ty, span, + ) { Ok(llval) => llval, Err(()) => return, } @@ -911,6 +913,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, callee_ty: Ty<'tcx>, + fn_args: GenericArgsRef<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, llret_ty: &'ll Type, @@ -1030,6 +1033,56 @@ fn generic_simd_intrinsic<'ll, 'tcx>( )); } + if name == sym::simd_shuffle_generic { + let idx = fn_args[2] + .expect_const() + .eval(tcx, ty::ParamEnv::reveal_all(), Some(span)) + .unwrap() + .unwrap_branch(); + let n = idx.len() as u64; + + require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); + let (out_len, out_ty) = ret_ty.simd_size_and_type(bx.tcx()); + require!( + out_len == n, + InvalidMonomorphization::ReturnLength { span, name, in_len: n, ret_ty, out_len } + ); + require!( + in_elem == out_ty, + InvalidMonomorphization::ReturnElement { span, name, in_elem, in_ty, ret_ty, out_ty } + ); + + let total_len = in_len * 2; + + let indices: Option<Vec<_>> = idx + .iter() + .enumerate() + .map(|(arg_idx, val)| { + let idx = val.unwrap_leaf().try_to_i32().unwrap(); + if idx >= i32::try_from(total_len).unwrap() { + bx.sess().emit_err(InvalidMonomorphization::ShuffleIndexOutOfBounds { + span, + name, + arg_idx: arg_idx as u64, + total_len: total_len.into(), + }); + None + } else { + Some(bx.const_i32(idx)) + } + }) + .collect(); + let Some(indices) = indices else { + return Ok(bx.const_null(llret_ty)); + }; + + return Ok(bx.shuffle_vector( + args[0].immediate(), + args[1].immediate(), + bx.const_vector(&indices), + )); + } + if name == sym::simd_shuffle { // Make sure this is actually an array, since typeck only checks the length-suffixed // version of this intrinsic. diff --git a/compiler/rustc_codegen_ssa/messages.ftl b/compiler/rustc_codegen_ssa/messages.ftl index d0a078505d2..6f7d7482aea 100644 --- a/compiler/rustc_codegen_ssa/messages.ftl +++ b/compiler/rustc_codegen_ssa/messages.ftl @@ -46,8 +46,6 @@ codegen_ssa_ignoring_output = ignoring -o because multiple .{$extension} files w codegen_ssa_illegal_link_ordinal_format = illegal ordinal format in `link_ordinal` .note = an unsuffixed integer value, e.g., `1`, is expected -codegen_ssa_incompatible_linking_modifiers = link modifiers combination `+bundle,+whole-archive` is unstable when generating rlibs - codegen_ssa_insufficient_vs_code_product = VS Code is a different product, and is not sufficient. codegen_ssa_invalid_link_ordinal_nargs = incorrect number of arguments to `#[link_ordinal]` diff --git a/compiler/rustc_codegen_ssa/src/back/link.rs b/compiler/rustc_codegen_ssa/src/back/link.rs index a7ac728c59b..c4a0f6291e7 100644 --- a/compiler/rustc_codegen_ssa/src/back/link.rs +++ b/compiler/rustc_codegen_ssa/src/back/link.rs @@ -365,15 +365,9 @@ fn link_rlib<'a>( // loaded from the libraries found here and then encode that into the // metadata of the rlib we're generating somehow. for lib in codegen_results.crate_info.used_libraries.iter() { - let NativeLibKind::Static { bundle: None | Some(true), whole_archive } = lib.kind else { + let NativeLibKind::Static { bundle: None | Some(true), .. } = lib.kind else { continue; }; - if whole_archive == Some(true) - && flavor == RlibFlavor::Normal - && !codegen_results.crate_info.feature_packed_bundled_libs - { - sess.emit_err(errors::IncompatibleLinkingModifiers); - } if flavor == RlibFlavor::Normal && let Some(filename) = lib.filename { let path = find_native_static_library(filename.as_str(), true, &lib_search_paths, sess); let src = read(path).map_err(|e| sess.emit_fatal(errors::ReadFileError {message: e }))?; diff --git a/compiler/rustc_codegen_ssa/src/base.rs b/compiler/rustc_codegen_ssa/src/base.rs index 6c51dffedbf..1e4ea73a172 100644 --- a/compiler/rustc_codegen_ssa/src/base.rs +++ b/compiler/rustc_codegen_ssa/src/base.rs @@ -181,7 +181,7 @@ pub fn unsized_info<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>( old_info } } - (_, &ty::Dynamic(ref data, _, _)) => meth::get_vtable(cx, source, data.principal()), + (_, ty::Dynamic(data, _, _)) => meth::get_vtable(cx, source, data.principal()), _ => bug!("unsized_info: invalid unsizing {:?} -> {:?}", source, target), } } @@ -857,7 +857,6 @@ impl CrateInfo { dependency_formats: tcx.dependency_formats(()).clone(), windows_subsystem, natvis_debugger_visualizers: Default::default(), - feature_packed_bundled_libs: tcx.features().packed_bundled_libs, }; let crates = tcx.crates(()); diff --git a/compiler/rustc_codegen_ssa/src/errors.rs b/compiler/rustc_codegen_ssa/src/errors.rs index bfd572a2eea..14311ec08fd 100644 --- a/compiler/rustc_codegen_ssa/src/errors.rs +++ b/compiler/rustc_codegen_ssa/src/errors.rs @@ -108,10 +108,6 @@ pub struct CreateTempDir { } #[derive(Diagnostic)] -#[diag(codegen_ssa_incompatible_linking_modifiers)] -pub struct IncompatibleLinkingModifiers; - -#[derive(Diagnostic)] #[diag(codegen_ssa_add_native_library)] pub struct AddNativeLibrary { pub library_path: PathBuf, diff --git a/compiler/rustc_codegen_ssa/src/lib.rs b/compiler/rustc_codegen_ssa/src/lib.rs index 7ebaca9b0bc..f6186a290f8 100644 --- a/compiler/rustc_codegen_ssa/src/lib.rs +++ b/compiler/rustc_codegen_ssa/src/lib.rs @@ -164,7 +164,6 @@ pub struct CrateInfo { pub dependency_formats: Lrc<Dependencies>, pub windows_subsystem: Option<String>, pub natvis_debugger_visualizers: BTreeSet<DebuggerVisualizerFile>, - pub feature_packed_bundled_libs: bool, // unstable feature flag. } #[derive(Encodable, Decodable)] diff --git a/compiler/rustc_codegen_ssa/src/mir/place.rs b/compiler/rustc_codegen_ssa/src/mir/place.rs index a9ecbdc5f35..f775711f870 100644 --- a/compiler/rustc_codegen_ssa/src/mir/place.rs +++ b/compiler/rustc_codegen_ssa/src/mir/place.rs @@ -463,7 +463,9 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { mir::ProjectionElem::Field(ref field, _) => { cg_base.project_field(bx, field.index()) } - mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty), + mir::ProjectionElem::OpaqueCast(ty) => { + bug!("encountered OpaqueCast({ty}) in codegen") + } mir::ProjectionElem::Index(index) => { let index = &mir::Operand::Copy(mir::Place::from(index)); let index = self.codegen_operand(bx, index); diff --git a/compiler/rustc_const_eval/src/interpret/projection.rs b/compiler/rustc_const_eval/src/interpret/projection.rs index 6c720ac4a57..f462c13816e 100644 --- a/compiler/rustc_const_eval/src/interpret/projection.rs +++ b/compiler/rustc_const_eval/src/interpret/projection.rs @@ -316,7 +316,9 @@ where { use rustc_middle::mir::ProjectionElem::*; Ok(match proj_elem { - OpaqueCast(ty) => base.transmute(self.layout_of(ty)?, self)?, + OpaqueCast(ty) => { + span_bug!(self.cur_span(), "OpaqueCast({ty}) encountered after borrowck") + } Field(field, _) => self.project_field(base, field.index())?, Downcast(_, variant) => self.project_downcast(base, variant)?, Deref => self.deref_pointer(&base.to_op(self)?)?.into(), diff --git a/compiler/rustc_const_eval/src/transform/validate.rs b/compiler/rustc_const_eval/src/transform/validate.rs index 0d8733070a4..18b22882e7d 100644 --- a/compiler/rustc_const_eval/src/transform/validate.rs +++ b/compiler/rustc_const_eval/src/transform/validate.rs @@ -633,6 +633,14 @@ impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> { location: Location, ) { match elem { + ProjectionElem::OpaqueCast(ty) + if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) => + { + self.fail( + location, + format!("explicit opaque type cast to `{ty}` after `RevealAll`"), + ) + } ProjectionElem::Index(index) => { let index_ty = self.body.local_decls[index].ty; if index_ty != self.tcx.types.usize { diff --git a/compiler/rustc_error_codes/src/error_codes/E0760.md b/compiler/rustc_error_codes/src/error_codes/E0760.md index 85e5faada22..9c4739f0df0 100644 --- a/compiler/rustc_error_codes/src/error_codes/E0760.md +++ b/compiler/rustc_error_codes/src/error_codes/E0760.md @@ -5,7 +5,7 @@ or `Self` that references lifetimes from a parent scope. Erroneous code example: -```compile_fail,edition2018 +```ignore,edition2018 struct S<'a>(&'a i32); impl<'a> S<'a> { diff --git a/compiler/rustc_feature/src/accepted.rs b/compiler/rustc_feature/src/accepted.rs index afcf30d0b29..32d8380abd3 100644 --- a/compiler/rustc_feature/src/accepted.rs +++ b/compiler/rustc_feature/src/accepted.rs @@ -197,6 +197,8 @@ declare_features! ( /// + `impl<I:Iterator> Iterator for &mut Iterator` /// + `impl Debug for Foo<'_>` (accepted, impl_header_lifetime_elision, "1.31.0", Some(15872), None), + /// Allows referencing `Self` and projections in impl-trait. + (accepted, impl_trait_projections, "CURRENT_RUSTC_VERSION", Some(103532), None), /// Allows using `a..=b` and `..=b` as inclusive range syntaxes. (accepted, inclusive_range_syntax, "1.26.0", Some(28237), None), /// Allows inferring outlives requirements (RFC 2093). @@ -267,6 +269,8 @@ declare_features! ( (accepted, non_modrs_mods, "1.30.0", Some(44660), None), /// Allows the use of or-patterns (e.g., `0 | 1`). (accepted, or_patterns, "1.53.0", Some(54883), None), + /// Allows using `+bundle,+whole-archive` link modifiers with native libs. + (accepted, packed_bundled_libs, "CURRENT_RUSTC_VERSION", Some(108081), None), /// Allows annotating functions conforming to `fn(&PanicInfo) -> !` with `#[panic_handler]`. /// This defines the behavior of panics. (accepted, panic_handler, "1.30.0", Some(44489), None), diff --git a/compiler/rustc_feature/src/active.rs b/compiler/rustc_feature/src/active.rs index 94afb6edcf6..a02c04ecd3e 100644 --- a/compiler/rustc_feature/src/active.rs +++ b/compiler/rustc_feature/src/active.rs @@ -241,8 +241,6 @@ declare_features! ( (active, linkage, "1.0.0", Some(29603), None), /// Allows declaring with `#![needs_panic_runtime]` that a panic runtime is needed. (internal, needs_panic_runtime, "1.10.0", Some(32837), None), - /// Allows using `+bundled,+whole-archive` native libs. - (active, packed_bundled_libs, "1.69.0", Some(108081), None), /// Allows using the `#![panic_runtime]` attribute. (internal, panic_runtime, "1.10.0", Some(32837), None), /// Allows `extern "platform-intrinsic" { ... }`. @@ -472,8 +470,6 @@ declare_features! ( (active, impl_trait_in_assoc_type, "1.70.0", Some(63063), None), /// Allows `impl Trait` as output type in `Fn` traits in return position of functions. (active, impl_trait_in_fn_trait_return, "1.64.0", Some(99697), None), - /// Allows referencing `Self` and projections in impl-trait. - (active, impl_trait_projections, "1.67.0", Some(103532), None), /// Allows using imported `main` function (active, imported_main, "1.53.0", Some(28937), None), /// Allows associated types in inherent impls. diff --git a/compiler/rustc_hir_analysis/messages.ftl b/compiler/rustc_hir_analysis/messages.ftl index 2a68d745c76..1c926533a27 100644 --- a/compiler/rustc_hir_analysis/messages.ftl +++ b/compiler/rustc_hir_analysis/messages.ftl @@ -38,6 +38,17 @@ hir_analysis_cast_thin_pointer_to_fat_pointer = cannot cast thin pointer `{$expr hir_analysis_closure_implicit_hrtb = implicit types in closure signatures are forbidden when `for<...>` is present .label = `for<...>` is here +hir_analysis_coerce_unsized_may = the trait `{$trait_name}` may only be implemented for a coercion between structures + +hir_analysis_coerce_unsized_multi = implementing the trait `CoerceUnsized` requires multiple coercions + .note = `CoerceUnsized` may only be implemented for a coercion between structures with one field being coerced + .coercions_note = currently, {$number} fields need coercions: {$coercions} + .label = requires multiple coercions + +hir_analysis_coercion_between_struct_same_note = expected coercion between the same definition; expected `{$source_path}`, found `{$target_path}` + +hir_analysis_coercion_between_struct_single_note = expected a single field to be coerced, none found + hir_analysis_const_bound_for_non_const_trait = ~const can only be applied to `#[const_trait]` traits @@ -61,6 +72,15 @@ hir_analysis_copy_impl_on_type_with_dtor = the trait `Copy` cannot be implemented for this type; the type has a destructor .label = `Copy` not allowed on types with destructors +hir_analysis_dispatch_from_dyn_multi = implementing the `DispatchFromDyn` trait requires multiple coercions + .note = the trait `DispatchFromDyn` may only be implemented for a coercion between structures with a single field being coerced + .coercions_note = currently, {$number} fields need coercions: {$coercions} + +hir_analysis_dispatch_from_dyn_repr = structs implementing `DispatchFromDyn` may not have `#[repr(packed)]` or `#[repr(C)]` + +hir_analysis_dispatch_from_dyn_zst = the trait `DispatchFromDyn` may only be implemented for structs containing the field being coerced, ZST fields with 1 byte alignment, and nothing else + .note = extra field `{$name}` of type `{$ty}` is not allowed + hir_analysis_drop_impl_negative = negative `Drop` impls are not supported hir_analysis_drop_impl_on_wrong_item = @@ -232,6 +252,8 @@ hir_analysis_pass_to_variadic_function = can't pass `{$ty}` to variadic function hir_analysis_placeholder_not_allowed_item_signatures = the placeholder `_` is not allowed within types on item signatures for {$kind} .label = not allowed in type signatures +hir_analysis_requires_note = the `{$trait_name}` impl for `{$ty}` requires that `{$error_predicate}` + hir_analysis_return_type_notation_conflicting_bound = ambiguous associated function `{$assoc_name}` for `{$ty_name}` .note = `{$assoc_name}` is declared in two supertraits: `{$first_bound}` and `{$second_bound}` @@ -299,6 +321,9 @@ hir_analysis_too_large_static = extern static is too large for the current archi hir_analysis_track_caller_on_main = `main` function is not allowed to be `#[track_caller]` .suggestion = remove this annotation +hir_analysis_trait_cannot_impl_for_ty = the trait `{$trait_name}` cannot be implemented for this type + .label = this field does not implement `{$trait_name}` + hir_analysis_trait_object_declared_with_no_traits = at least one trait is required for an object type .alias_span = this alias does not contain a trait diff --git a/compiler/rustc_hir_analysis/src/check/check.rs b/compiler/rustc_hir_analysis/src/check/check.rs index eb1f48d23ff..44e1bdb8370 100644 --- a/compiler/rustc_hir_analysis/src/check/check.rs +++ b/compiler/rustc_hir_analysis/src/check/check.rs @@ -5,18 +5,15 @@ use super::compare_impl_item::check_type_bounds; use super::compare_impl_item::{compare_impl_method, compare_impl_ty}; use super::*; use rustc_attr as attr; -use rustc_errors::{Applicability, ErrorGuaranteed, MultiSpan}; +use rustc_errors::{ErrorGuaranteed, MultiSpan}; use rustc_hir as hir; -use rustc_hir::def::{CtorKind, DefKind, Res}; +use rustc_hir::def::{CtorKind, DefKind}; use rustc_hir::def_id::{DefId, LocalDefId, LocalModDefId}; -use rustc_hir::intravisit::Visitor; -use rustc_hir::{ItemKind, Node, PathSegment}; -use rustc_infer::infer::opaque_types::ConstrainOpaqueTypeRegionVisitor; +use rustc_hir::Node; use rustc_infer::infer::outlives::env::OutlivesEnvironment; use rustc_infer::infer::{RegionVariableOrigin, TyCtxtInferExt}; use rustc_infer::traits::{Obligation, TraitEngineExt as _}; use rustc_lint_defs::builtin::REPR_TRANSPARENT_EXTERNAL_PRIVATE_FIELDS; -use rustc_middle::hir::nested_filter; use rustc_middle::middle::stability::EvalResult; use rustc_middle::traits::{DefiningAnchor, ObligationCauseCode}; use rustc_middle::ty::fold::BottomUpFolder; @@ -218,9 +215,6 @@ fn check_opaque(tcx: TyCtxt<'_>, id: hir::ItemId) { let args = GenericArgs::identity_for_item(tcx, item.owner_id); let span = tcx.def_span(item.owner_id.def_id); - if !tcx.features().impl_trait_projections { - check_opaque_for_inheriting_lifetimes(tcx, item.owner_id.def_id, span); - } if tcx.type_of(item.owner_id.def_id).instantiate_identity().references_error() { return; } @@ -231,129 +225,6 @@ fn check_opaque(tcx: TyCtxt<'_>, id: hir::ItemId) { let _ = check_opaque_meets_bounds(tcx, item.owner_id.def_id, span, &origin); } -/// Checks that an opaque type does not use `Self` or `T::Foo` projections that would result -/// in "inheriting lifetimes". -#[instrument(level = "debug", skip(tcx, span))] -pub(super) fn check_opaque_for_inheriting_lifetimes( - tcx: TyCtxt<'_>, - def_id: LocalDefId, - span: Span, -) { - let item = tcx.hir().expect_item(def_id); - debug!(?item, ?span); - - struct ProhibitOpaqueVisitor<'tcx> { - tcx: TyCtxt<'tcx>, - opaque_identity_ty: Ty<'tcx>, - parent_count: u32, - references_parent_regions: bool, - selftys: Vec<(Span, Option<String>)>, - } - - impl<'tcx> ty::visit::TypeVisitor<TyCtxt<'tcx>> for ProhibitOpaqueVisitor<'tcx> { - type BreakTy = Ty<'tcx>; - - fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { - debug!(?t, "root_visit_ty"); - if t == self.opaque_identity_ty { - ControlFlow::Continue(()) - } else { - t.visit_with(&mut ConstrainOpaqueTypeRegionVisitor { - tcx: self.tcx, - op: |region| { - if let ty::ReEarlyBound(ty::EarlyBoundRegion { index, .. }) = *region - && index < self.parent_count - { - self.references_parent_regions= true; - } - }, - }); - if self.references_parent_regions { - ControlFlow::Break(t) - } else { - ControlFlow::Continue(()) - } - } - } - } - - impl<'tcx> Visitor<'tcx> for ProhibitOpaqueVisitor<'tcx> { - type NestedFilter = nested_filter::OnlyBodies; - - fn nested_visit_map(&mut self) -> Self::Map { - self.tcx.hir() - } - - fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) { - match arg.kind { - hir::TyKind::Path(hir::QPath::Resolved(None, path)) => match &path.segments { - [PathSegment { res: Res::SelfTyParam { .. }, .. }] => { - let impl_ty_name = None; - self.selftys.push((path.span, impl_ty_name)); - } - [PathSegment { res: Res::SelfTyAlias { alias_to: def_id, .. }, .. }] => { - let impl_ty_name = Some(self.tcx.def_path_str(*def_id)); - self.selftys.push((path.span, impl_ty_name)); - } - _ => {} - }, - _ => {} - } - hir::intravisit::walk_ty(self, arg); - } - } - - if let ItemKind::OpaqueTy(&hir::OpaqueTy { - origin: hir::OpaqueTyOrigin::AsyncFn(..) | hir::OpaqueTyOrigin::FnReturn(..), - .. - }) = item.kind - { - let args = GenericArgs::identity_for_item(tcx, def_id); - let opaque_identity_ty = Ty::new_opaque(tcx, def_id.to_def_id(), args); - let mut visitor = ProhibitOpaqueVisitor { - opaque_identity_ty, - parent_count: tcx.generics_of(def_id).parent_count as u32, - references_parent_regions: false, - tcx, - selftys: vec![], - }; - let prohibit_opaque = tcx - .explicit_item_bounds(def_id) - .instantiate_identity_iter_copied() - .try_for_each(|(predicate, _)| predicate.visit_with(&mut visitor)); - - if let Some(ty) = prohibit_opaque.break_value() { - visitor.visit_item(&item); - let is_async = match item.kind { - ItemKind::OpaqueTy(hir::OpaqueTy { origin, .. }) => { - matches!(origin, hir::OpaqueTyOrigin::AsyncFn(..)) - } - _ => unreachable!(), - }; - - let mut err = feature_err( - &tcx.sess.parse_sess, - sym::impl_trait_projections, - span, - format!( - "`{}` return type cannot contain a projection or `Self` that references \ - lifetimes from a parent scope", - if is_async { "async fn" } else { "impl Trait" }, - ), - ); - for (span, name) in visitor.selftys { - err.span_suggestion( - span, - "consider spelling out the type instead", - name.unwrap_or_else(|| format!("{ty:?}")), - Applicability::MaybeIncorrect, - ); - } - err.emit(); - } - } -} - /// Checks that an opaque type does not contain cycles. pub(super) fn check_opaque_for_cycles<'tcx>( tcx: TyCtxt<'tcx>, diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 3c0d977917f..c61719c1fd2 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -20,6 +20,7 @@ fn equate_intrinsic_type<'tcx>( it: &hir::ForeignItem<'_>, n_tps: usize, n_lts: usize, + n_cts: usize, sig: ty::PolyFnSig<'tcx>, ) { let (own_counts, span) = match &it.kind { @@ -51,7 +52,7 @@ fn equate_intrinsic_type<'tcx>( if gen_count_ok(own_counts.lifetimes, n_lts, "lifetime") && gen_count_ok(own_counts.types, n_tps, "type") - && gen_count_ok(own_counts.consts, 0, "const") + && gen_count_ok(own_counts.consts, n_cts, "const") { let it_def_id = it.owner_id.def_id; check_function_signature( @@ -489,7 +490,7 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) { }; let sig = tcx.mk_fn_sig(inputs, output, false, unsafety, Abi::RustIntrinsic); let sig = ty::Binder::bind_with_vars(sig, bound_vars); - equate_intrinsic_type(tcx, it, n_tps, n_lts, sig) + equate_intrinsic_type(tcx, it, n_tps, n_lts, 0, sig) } /// Type-check `extern "platform-intrinsic" { ... }` functions. @@ -501,9 +502,9 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) let name = it.ident.name; - let (n_tps, inputs, output) = match name { + let (n_tps, n_cts, inputs, output) = match name { sym::simd_eq | sym::simd_ne | sym::simd_lt | sym::simd_le | sym::simd_gt | sym::simd_ge => { - (2, vec![param(0), param(0)], param(1)) + (2, 0, vec![param(0), param(0)], param(1)) } sym::simd_add | sym::simd_sub @@ -519,8 +520,8 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) | sym::simd_fmax | sym::simd_fpow | sym::simd_saturating_add - | sym::simd_saturating_sub => (1, vec![param(0), param(0)], param(0)), - sym::simd_arith_offset => (2, vec![param(0), param(1)], param(0)), + | sym::simd_saturating_sub => (1, 0, vec![param(0), param(0)], param(0)), + sym::simd_arith_offset => (2, 0, vec![param(0), param(1)], param(0)), sym::simd_neg | sym::simd_bswap | sym::simd_bitreverse @@ -538,25 +539,25 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) | sym::simd_ceil | sym::simd_floor | sym::simd_round - | sym::simd_trunc => (1, vec![param(0)], param(0)), - sym::simd_fpowi => (1, vec![param(0), tcx.types.i32], param(0)), - sym::simd_fma => (1, vec![param(0), param(0), param(0)], param(0)), - sym::simd_gather => (3, vec![param(0), param(1), param(2)], param(0)), - sym::simd_scatter => (3, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)), - sym::simd_insert => (2, vec![param(0), tcx.types.u32, param(1)], param(0)), - sym::simd_extract => (2, vec![param(0), tcx.types.u32], param(1)), + | sym::simd_trunc => (1, 0, vec![param(0)], param(0)), + sym::simd_fpowi => (1, 0, vec![param(0), tcx.types.i32], param(0)), + sym::simd_fma => (1, 0, vec![param(0), param(0), param(0)], param(0)), + sym::simd_gather => (3, 0, vec![param(0), param(1), param(2)], param(0)), + sym::simd_scatter => (3, 0, vec![param(0), param(1), param(2)], Ty::new_unit(tcx)), + sym::simd_insert => (2, 0, vec![param(0), tcx.types.u32, param(1)], param(0)), + sym::simd_extract => (2, 0, vec![param(0), tcx.types.u32], param(1)), sym::simd_cast | sym::simd_as | sym::simd_cast_ptr | sym::simd_expose_addr - | sym::simd_from_exposed_addr => (2, vec![param(0)], param(1)), - sym::simd_bitmask => (2, vec![param(0)], param(1)), + | sym::simd_from_exposed_addr => (2, 0, vec![param(0)], param(1)), + sym::simd_bitmask => (2, 0, vec![param(0)], param(1)), sym::simd_select | sym::simd_select_bitmask => { - (2, vec![param(0), param(1), param(1)], param(1)) + (2, 0, vec![param(0), param(1), param(1)], param(1)) } - sym::simd_reduce_all | sym::simd_reduce_any => (1, vec![param(0)], tcx.types.bool), + sym::simd_reduce_all | sym::simd_reduce_any => (1, 0, vec![param(0)], tcx.types.bool), sym::simd_reduce_add_ordered | sym::simd_reduce_mul_ordered => { - (2, vec![param(0), param(1)], param(1)) + (2, 0, vec![param(0), param(1)], param(1)) } sym::simd_reduce_add_unordered | sym::simd_reduce_mul_unordered @@ -566,8 +567,9 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) | sym::simd_reduce_min | sym::simd_reduce_max | sym::simd_reduce_min_nanless - | sym::simd_reduce_max_nanless => (2, vec![param(0)], param(1)), - sym::simd_shuffle => (3, vec![param(0), param(0), param(1)], param(2)), + | sym::simd_reduce_max_nanless => (2, 0, vec![param(0)], param(1)), + sym::simd_shuffle => (3, 0, vec![param(0), param(0), param(1)], param(2)), + sym::simd_shuffle_generic => (2, 1, vec![param(0), param(0)], param(1)), _ => { let msg = format!("unrecognized platform-specific intrinsic function: `{name}`"); tcx.sess.struct_span_err(it.span, msg).emit(); @@ -577,5 +579,5 @@ pub fn check_platform_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) let sig = tcx.mk_fn_sig(inputs, output, false, hir::Unsafety::Unsafe, Abi::PlatformIntrinsic); let sig = ty::Binder::dummy(sig); - equate_intrinsic_type(tcx, it, n_tps, 0, sig) + equate_intrinsic_type(tcx, it, n_tps, 0, n_cts, sig) } diff --git a/compiler/rustc_hir_analysis/src/check/mod.rs b/compiler/rustc_hir_analysis/src/check/mod.rs index 88c98fa979e..5fa65f33c76 100644 --- a/compiler/rustc_hir_analysis/src/check/mod.rs +++ b/compiler/rustc_hir_analysis/src/check/mod.rs @@ -329,41 +329,52 @@ fn bounds_from_generic_predicates<'tcx>( _ => {} } } - let generics = if types.is_empty() { - "".to_string() - } else { - format!( - "<{}>", - types - .keys() - .filter_map(|t| match t.kind() { - ty::Param(_) => Some(t.to_string()), - // Avoid suggesting the following: - // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {} - _ => None, - }) - .collect::<Vec<_>>() - .join(", ") - ) - }; + let mut where_clauses = vec![]; + let mut types_str = vec![]; for (ty, bounds) in types { - where_clauses - .extend(bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound)))); - } - for projection in &projections { - let p = projection.skip_binder(); - // FIXME: this is not currently supported syntax, we should be looking at the `types` and - // insert the associated types where they correspond, but for now let's be "lazy" and - // propose this instead of the following valid resugaring: - // `T: Trait, Trait::Assoc = K` → `T: Trait<Assoc = K>` - where_clauses.push(format!("{} = {}", tcx.def_path_str(p.projection_ty.def_id), p.term)); + if let ty::Param(_) = ty.kind() { + let mut bounds_str = vec![]; + for bound in bounds { + let mut projections_str = vec![]; + for projection in &projections { + let p = projection.skip_binder(); + let alias_ty = p.projection_ty; + if bound == tcx.parent(alias_ty.def_id) && alias_ty.self_ty() == ty { + let name = tcx.item_name(alias_ty.def_id); + projections_str.push(format!("{} = {}", name, p.term)); + } + } + let bound_def_path = tcx.def_path_str(bound); + if projections_str.is_empty() { + where_clauses.push(format!("{}: {}", ty, bound_def_path)); + } else { + bounds_str.push(format!("{}<{}>", bound_def_path, projections_str.join(", "))); + } + } + if bounds_str.is_empty() { + types_str.push(ty.to_string()); + } else { + types_str.push(format!("{}: {}", ty, bounds_str.join(" + "))); + } + } else { + // Avoid suggesting the following: + // fn foo<T, <T as Trait>::Bar>(_: T) where T: Trait, <T as Trait>::Bar: Other {} + where_clauses.extend( + bounds.into_iter().map(|bound| format!("{}: {}", ty, tcx.def_path_str(bound))), + ); + } } + + let generics = + if types_str.is_empty() { "".to_string() } else { format!("<{}>", types_str.join(", ")) }; + let where_clauses = if where_clauses.is_empty() { - String::new() + "".to_string() } else { format!(" where {}", where_clauses.join(", ")) }; + (generics, where_clauses) } diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index b7b162ce27b..c4fdffb0261 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -24,6 +24,9 @@ use rustc_span::symbol::{sym, Ident, Symbol}; use rustc_span::{Span, DUMMY_SP}; use rustc_target::spec::abi::Abi; use rustc_trait_selection::traits::error_reporting::TypeErrCtxtExt; +use rustc_trait_selection::traits::misc::{ + type_allowed_to_implement_const_param_ty, ConstParamTyImplementationError, +}; use rustc_trait_selection::traits::outlives_bounds::InferCtxtExt as _; use rustc_trait_selection::traits::query::evaluate_obligation::InferCtxtExt as _; use rustc_trait_selection::traits::{ @@ -865,43 +868,65 @@ fn check_param_wf(tcx: TyCtxt<'_>, param: &hir::GenericParam<'_>) { ); }); } else { - let err_ty_str; - let mut is_ptr = true; - - let err = match ty.kind() { + let diag = match ty.kind() { ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Error(_) => None, - ty::FnPtr(_) => Some("function pointers"), - ty::RawPtr(_) => Some("raw pointers"), - _ => { - is_ptr = false; - err_ty_str = format!("`{ty}`"); - Some(err_ty_str.as_str()) - } + ty::FnPtr(_) => Some(tcx.sess.struct_span_err( + hir_ty.span, + "using function pointers as const generic parameters is forbidden", + )), + ty::RawPtr(_) => Some(tcx.sess.struct_span_err( + hir_ty.span, + "using raw pointers as const generic parameters is forbidden", + )), + _ => Some(tcx.sess.struct_span_err( + hir_ty.span, + format!("`{}` is forbidden as the type of a const generic parameter", ty), + )), }; - if let Some(unsupported_type) = err { - if is_ptr { - tcx.sess.span_err( - hir_ty.span, - format!( - "using {unsupported_type} as const generic parameters is forbidden", - ), - ); - } else { - let mut err = tcx.sess.struct_span_err( - hir_ty.span, - format!( - "{unsupported_type} is forbidden as the type of a const generic parameter", - ), - ); - err.note("the only supported types are integers, `bool` and `char`"); - if tcx.sess.is_nightly_build() { - err.help( - "more complex types are supported with `#![feature(adt_const_params)]`", - ); + if let Some(mut diag) = diag { + diag.note("the only supported types are integers, `bool` and `char`"); + + let cause = ObligationCause::misc(hir_ty.span, param.def_id); + let may_suggest_feature = match type_allowed_to_implement_const_param_ty( + tcx, + tcx.param_env(param.def_id), + ty, + cause, + ) { + // Can never implement `ConstParamTy`, don't suggest anything. + Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => false, + // May be able to implement `ConstParamTy`. Only emit the feature help + // if the type is local, since the user may be able to fix the local type. + Err(ConstParamTyImplementationError::InfrigingFields(..)) => { + fn ty_is_local(ty: Ty<'_>) -> bool { + match ty.kind() { + ty::Adt(adt_def, ..) => adt_def.did().is_local(), + // Arrays and slices use the inner type's `ConstParamTy`. + ty::Array(ty, ..) => ty_is_local(*ty), + ty::Slice(ty) => ty_is_local(*ty), + // `&` references use the inner type's `ConstParamTy`. + // `&mut` are not supported. + ty::Ref(_, ty, ast::Mutability::Not) => ty_is_local(*ty), + // Say that a tuple is local if any of its components are local. + // This is not strictly correct, but it's likely that the user can fix the local component. + ty::Tuple(tys) => tys.iter().any(|ty| ty_is_local(ty)), + _ => false, + } + } + + ty_is_local(ty) } - err.emit(); + // Implments `ConstParamTy`, suggest adding the feature to enable. + Ok(..) => true, + }; + if may_suggest_feature && tcx.sess.is_nightly_build() { + diag.help( + "add `#![feature(adt_const_params)]` to the crate attributes to enable more complex and user defined types", + ); } + + diag.emit(); } } } diff --git a/compiler/rustc_hir_analysis/src/coherence/builtin.rs b/compiler/rustc_hir_analysis/src/coherence/builtin.rs index 94f3e8706fc..be70acfc35d 100644 --- a/compiler/rustc_hir_analysis/src/coherence/builtin.rs +++ b/compiler/rustc_hir_analysis/src/coherence/builtin.rs @@ -1,11 +1,10 @@ //! Check properties that are required by built-in traits and set //! up data structures required by type-checking/codegen. -use crate::errors::{ - ConstParamTyImplOnNonAdt, CopyImplOnNonAdt, CopyImplOnTypeWithDtor, DropImplOnWrongItem, -}; +use crate::errors; + use rustc_data_structures::fx::FxHashSet; -use rustc_errors::{struct_span_err, ErrorGuaranteed, MultiSpan}; +use rustc_errors::{ErrorGuaranteed, MultiSpan}; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; use rustc_hir::lang_items::LangItem; @@ -65,7 +64,7 @@ fn visit_implementation_of_drop(tcx: TyCtxt<'_>, impl_did: LocalDefId) { let impl_ = tcx.hir().expect_item(impl_did).expect_impl(); - tcx.sess.emit_err(DropImplOnWrongItem { span: impl_.self_ty.span }); + tcx.sess.emit_err(errors::DropImplOnWrongItem { span: impl_.self_ty.span }); } fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) { @@ -91,10 +90,10 @@ fn visit_implementation_of_copy(tcx: TyCtxt<'_>, impl_did: LocalDefId) { infringing_fields_error(tcx, fields, LangItem::Copy, impl_did, span); } Err(CopyImplementationError::NotAnAdt) => { - tcx.sess.emit_err(CopyImplOnNonAdt { span }); + tcx.sess.emit_err(errors::CopyImplOnNonAdt { span }); } Err(CopyImplementationError::HasDestructor) => { - tcx.sess.emit_err(CopyImplOnTypeWithDtor { span }); + tcx.sess.emit_err(errors::CopyImplOnTypeWithDtor { span }); } } } @@ -117,7 +116,7 @@ fn visit_implementation_of_const_param_ty(tcx: TyCtxt<'_>, impl_did: LocalDefId) infringing_fields_error(tcx, fields, LangItem::ConstParamTy, impl_did, span); } Err(ConstParamTyImplementationError::NotAnAdtOrBuiltinAllowed) => { - tcx.sess.emit_err(ConstParamTyImplOnNonAdt { span }); + tcx.sess.emit_err(errors::ConstParamTyImplOnNonAdt { span }); } } } @@ -152,8 +151,6 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef let param_env = tcx.param_env(impl_did); - let create_err = |msg: &str| struct_span_err!(tcx.sess, span, E0378, "{}", msg); - let infcx = tcx.infer_ctxt().build(); let cause = ObligationCause::misc(span, impl_did); @@ -176,22 +173,19 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef let source_path = tcx.def_path_str(def_a.did()); let target_path = tcx.def_path_str(def_b.did()); - create_err(&format!( - "the trait `DispatchFromDyn` may only be implemented \ - for a coercion between structures with the same \ - definition; expected `{source_path}`, found `{target_path}`", - )) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynCoercion { + span, + trait_name: "DispatchFromDyn", + note: true, + source_path, + target_path, + }); return; } if def_a.repr().c() || def_a.repr().packed() { - create_err( - "structs implementing `DispatchFromDyn` may not have \ - `#[repr(packed)]` or `#[repr(C)]`", - ) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynRepr { span }); } let fields = &def_a.non_enum_variant().fields; @@ -213,16 +207,11 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef infcx.at(&cause, param_env).eq(DefineOpaqueTypes::No, ty_a, ty_b) { if ok.obligations.is_empty() { - create_err( - "the trait `DispatchFromDyn` may only be implemented \ - for structs containing the field being coerced, \ - ZST fields with 1 byte alignment, and nothing else", - ) - .note(format!( - "extra field `{}` of type `{}` is not allowed", - field.name, ty_a, - )) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynZST { + span, + name: field.name, + ty: ty_a, + }); return false; } @@ -233,36 +222,29 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef .collect::<Vec<_>>(); if coerced_fields.is_empty() { - create_err( - "the trait `DispatchFromDyn` may only be implemented \ - for a coercion between structures with a single field \ - being coerced, none found", - ) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynSingle { + span, + trait_name: "DispatchFromDyn", + note: true, + }); } else if coerced_fields.len() > 1 { - create_err("implementing the `DispatchFromDyn` trait requires multiple coercions") - .note( - "the trait `DispatchFromDyn` may only be implemented \ - for a coercion between structures with a single field \ - being coerced", - ) - .note(format!( - "currently, {} fields need coercions: {}", - coerced_fields.len(), - coerced_fields - .iter() - .map(|field| { - format!( - "`{}` (`{}` to `{}`)", - field.name, - field.ty(tcx, args_a), - field.ty(tcx, args_b), - ) - }) - .collect::<Vec<_>>() - .join(", ") - )) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynMulti { + span, + coercions_note: true, + number: coerced_fields.len(), + coercions: coerced_fields + .iter() + .map(|field| { + format!( + "`{}` (`{}` to `{}`)", + field.name, + field.ty(tcx, args_a), + field.ty(tcx, args_b), + ) + }) + .collect::<Vec<_>>() + .join(", "), + }); } else { let ocx = ObligationCtxt::new(&infcx); for field in coerced_fields { @@ -288,11 +270,7 @@ fn visit_implementation_of_dispatch_from_dyn(tcx: TyCtxt<'_>, impl_did: LocalDef } } _ => { - create_err( - "the trait `DispatchFromDyn` may only be implemented \ - for a coercion between structures", - ) - .emit(); + tcx.sess.emit_err(errors::CoerceUnsizedMay { span, trait_name: "DispatchFromDyn" }); } } } @@ -359,17 +337,13 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe if def_a != def_b { let source_path = tcx.def_path_str(def_a.did()); let target_path = tcx.def_path_str(def_b.did()); - struct_span_err!( - tcx.sess, + tcx.sess.emit_err(errors::DispatchFromDynSame { span, - E0377, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with the same \ - definition; expected `{}`, found `{}`", + trait_name: "CoerceUnsized", + note: true, source_path, - target_path - ) - .emit(); + target_path, + }); return err_info; } @@ -445,15 +419,11 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe .collect::<Vec<_>>(); if diff_fields.is_empty() { - struct_span_err!( - tcx.sess, + tcx.sess.emit_err(errors::CoerceUnsizedOneField { span, - E0374, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures with one field \ - being coerced, none found" - ) - .emit(); + trait_name: "CoerceUnsized", + note: true, + }); return err_info; } else if diff_fields.len() > 1 { let item = tcx.hir().expect_item(impl_did); @@ -463,29 +433,17 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe tcx.def_span(impl_did) }; - struct_span_err!( - tcx.sess, + tcx.sess.emit_err(errors::CoerceUnsizedMulti { span, - E0375, - "implementing the trait \ - `CoerceUnsized` requires multiple \ - coercions" - ) - .note( - "`CoerceUnsized` may only be implemented for \ - a coercion between structures with one field being coerced", - ) - .note(format!( - "currently, {} fields need coercions: {}", - diff_fields.len(), - diff_fields + coercions_note: true, + number: diff_fields.len(), + coercions: diff_fields .iter() - .map(|&(i, a, b)| { format!("`{}` (`{}` to `{}`)", fields[i].name, a, b) }) + .map(|&(i, a, b)| format!("`{}` (`{}` to `{}`)", fields[i].name, a, b)) .collect::<Vec<_>>() - .join(", ") - )) - .span_label(span, "requires multiple coercions") - .emit(); + .join(", "), + }); + return err_info; } @@ -495,14 +453,7 @@ pub fn coerce_unsized_info<'tcx>(tcx: TyCtxt<'tcx>, impl_did: LocalDefId) -> Coe } _ => { - struct_span_err!( - tcx.sess, - span, - E0376, - "the trait `CoerceUnsized` may only be implemented \ - for a coercion between structures" - ) - .emit(); + tcx.sess.emit_err(errors::DispatchFromDynStruct { span, trait_name: "CoerceUnsized" }); return err_info; } }; @@ -540,13 +491,6 @@ fn infringing_fields_error( let trait_name = tcx.def_path_str(trait_did); - let mut err = struct_span_err!( - tcx.sess, - impl_span, - E0204, - "the trait `{trait_name}` cannot be implemented for this type" - ); - // We'll try to suggest constraining type parameters to fulfill the requirements of // their `Copy` implementation. let mut errors: BTreeMap<_, Vec<_>> = Default::default(); @@ -554,14 +498,15 @@ fn infringing_fields_error( let mut seen_tys = FxHashSet::default(); + let mut label_spans = Vec::new(); + for (field, ty, reason) in fields { // Only report an error once per type. if !seen_tys.insert(ty) { continue; } - let field_span = tcx.def_span(field.did); - err.span_label(field_span, format!("this field does not implement `{trait_name}`")); + label_spans.push(tcx.def_span(field.did)); match reason { InfringingFieldsReason::Fulfill(fulfillment_errors) => { @@ -625,13 +570,24 @@ fn infringing_fields_error( } } } + let mut notes = Vec::new(); for ((ty, error_predicate), spans) in errors { let span: MultiSpan = spans.into(); - err.span_note( + notes.push(errors::ImplForTyRequires { span, - format!("the `{trait_name}` impl for `{ty}` requires that `{error_predicate}`"), - ); + error_predicate, + trait_name: trait_name.clone(), + ty, + }); } + + let mut err = tcx.sess.create_err(errors::TraitCannotImplForTy { + span: impl_span, + trait_name, + label_spans, + notes, + }); + suggest_constraining_type_params( tcx, tcx.hir().get_generics(impl_did).expect("impls always have generics"), diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index cd37221ae6f..221df4e36b2 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -1374,7 +1374,7 @@ fn impl_trait_ref( // make astconv happy. let mut path_segments = ast_trait_ref.path.segments.to_vec(); let last_segment = path_segments.len() - 1; - let mut args = path_segments[last_segment].args().clone(); + let mut args = *path_segments[last_segment].args(); let last_arg = args.args.len() - 1; assert!(matches!(args.args[last_arg], hir::GenericArg::Const(anon_const) if tcx.has_attr(anon_const.value.def_id, sym::rustc_host))); args.args = &args.args[..args.args.len() - 1]; diff --git a/compiler/rustc_hir_analysis/src/errors.rs b/compiler/rustc_hir_analysis/src/errors.rs index 4705e40988b..0efe82b20ee 100644 --- a/compiler/rustc_hir_analysis/src/errors.rs +++ b/compiler/rustc_hir_analysis/src/errors.rs @@ -964,6 +964,25 @@ pub struct InherentTyOutside { } #[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0378")] +pub struct DispatchFromDynCoercion<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, + #[note(hir_analysis_coercion_between_struct_same_note)] + pub note: bool, + pub source_path: String, + pub target_path: String, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_dispatch_from_dyn_repr, code = "E0378")] +pub struct DispatchFromDynRepr { + #[primary_span] + pub span: Span, +} + +#[derive(Diagnostic)] #[diag(hir_analysis_inherent_ty_outside_relevant, code = "E0390")] #[help] pub struct InherentTyOutsideRelevant { @@ -1025,3 +1044,108 @@ pub struct InherentNominal { #[label] pub span: Span, } + +#[derive(Diagnostic)] +#[diag(hir_analysis_dispatch_from_dyn_zst, code = "E0378")] +#[note] +pub struct DispatchFromDynZST<'a> { + #[primary_span] + pub span: Span, + pub name: Symbol, + pub ty: Ty<'a>, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0378")] +pub struct DispatchFromDynSingle<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, + #[note(hir_analysis_coercion_between_struct_single_note)] + pub note: bool, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_dispatch_from_dyn_multi, code = "E0378")] +#[note] +pub struct DispatchFromDynMulti { + #[primary_span] + pub span: Span, + #[note(hir_analysis_coercions_note)] + pub coercions_note: bool, + pub number: usize, + pub coercions: String, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0376")] +pub struct DispatchFromDynStruct<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0377")] +pub struct DispatchFromDynSame<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, + #[note(hir_analysis_coercion_between_struct_same_note)] + pub note: bool, + pub source_path: String, + pub target_path: String, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0374")] +pub struct CoerceUnsizedOneField<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, + #[note(hir_analysis_coercion_between_struct_single_note)] + pub note: bool, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_multi, code = "E0375")] +#[note] +pub struct CoerceUnsizedMulti { + #[primary_span] + #[label] + pub span: Span, + #[note(hir_analysis_coercions_note)] + pub coercions_note: bool, + pub number: usize, + pub coercions: String, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_coerce_unsized_may, code = "E0378")] +pub struct CoerceUnsizedMay<'a> { + #[primary_span] + pub span: Span, + pub trait_name: &'a str, +} + +#[derive(Diagnostic)] +#[diag(hir_analysis_trait_cannot_impl_for_ty, code = "E0204")] +pub struct TraitCannotImplForTy { + #[primary_span] + pub span: Span, + pub trait_name: String, + #[label] + pub label_spans: Vec<Span>, + #[subdiagnostic] + pub notes: Vec<ImplForTyRequires>, +} + +#[derive(Subdiagnostic)] +#[note(hir_analysis_requires_note)] +pub struct ImplForTyRequires { + #[primary_span] + pub span: MultiSpan, + pub error_predicate: String, + pub trait_name: String, + pub ty: String, +} diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs index 256a4bf9449..d97691369c9 100644 --- a/compiler/rustc_hir_typeck/src/demand.rs +++ b/compiler/rustc_hir_typeck/src/demand.rs @@ -644,7 +644,7 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { if self.can_eq(self.param_env, ty, expected) { err.span_label( ex.span, - format!("expected because of this `break`"), + "expected because of this `break`", ); exit = true; } diff --git a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs index b34900da83b..5c3beee284f 100644 --- a/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs +++ b/compiler/rustc_infer/src/infer/error_reporting/note_and_explain.rs @@ -621,7 +621,7 @@ fn foo(&self) -> Self::T { String::new() } { diag.span_label( item.span, - format!("associated type is `default` and may be overridden"), + "associated type is `default` and may be overridden", ); return true; } diff --git a/compiler/rustc_lint/src/context.rs b/compiler/rustc_lint/src/context.rs index 460d54739a2..3c5cde4309b 100644 --- a/compiler/rustc_lint/src/context.rs +++ b/compiler/rustc_lint/src/context.rs @@ -1317,6 +1317,40 @@ impl<'tcx> LateContext<'tcx> { } /// If the given expression is a local binding, find the initializer expression. + /// If that initializer expression is another local binding, find its initializer again. + /// + /// This process repeats as long as possible (but usually no more than once). + /// Type-check adjustments are not taken in account in this function. + /// + /// Examples: + /// ``` + /// let abc = 1; + /// let def = abc + 2; + /// // ^^^^^^^ output + /// let def = def; + /// dbg!(def); + /// // ^^^ input + /// ``` + pub fn expr_or_init<'a>(&self, mut expr: &'a hir::Expr<'tcx>) -> &'a hir::Expr<'tcx> { + expr = expr.peel_blocks(); + + while let hir::ExprKind::Path(ref qpath) = expr.kind + && let Some(parent_node) = match self.qpath_res(qpath, expr.hir_id) { + Res::Local(hir_id) => self.tcx.hir().find_parent(hir_id), + _ => None, + } + && let Some(init) = match parent_node { + hir::Node::Expr(expr) => Some(expr), + hir::Node::Local(hir::Local { init, .. }) => *init, + _ => None + } + { + expr = init.peel_blocks(); + } + expr + } + + /// If the given expression is a local binding, find the initializer expression. /// If that initializer expression is another local or **outside** (`const`/`static`) /// binding, find its initializer again. /// @@ -1338,7 +1372,10 @@ impl<'tcx> LateContext<'tcx> { /// dbg!(def); /// // ^^^ input /// ``` - pub fn expr_or_init<'a>(&self, mut expr: &'a hir::Expr<'tcx>) -> &'a hir::Expr<'tcx> { + pub fn expr_or_init_with_outside_body<'a>( + &self, + mut expr: &'a hir::Expr<'tcx>, + ) -> &'a hir::Expr<'tcx> { expr = expr.peel_blocks(); while let hir::ExprKind::Path(ref qpath) = expr.kind diff --git a/compiler/rustc_lint/src/foreign_modules.rs b/compiler/rustc_lint/src/foreign_modules.rs index 7b291d558e0..e1df69bdaf2 100644 --- a/compiler/rustc_lint/src/foreign_modules.rs +++ b/compiler/rustc_lint/src/foreign_modules.rs @@ -5,19 +5,18 @@ use rustc_hir::def::DefKind; use rustc_middle::query::Providers; use rustc_middle::ty::layout::LayoutError; use rustc_middle::ty::{self, Instance, Ty, TyCtxt}; -use rustc_session::lint::{lint_array, LintArray}; use rustc_span::{sym, Span, Symbol}; use rustc_target::abi::FIRST_VARIANT; use crate::lints::{BuiltinClashingExtern, BuiltinClashingExternSub}; -use crate::types; +use crate::{types, LintVec}; pub(crate) fn provide(providers: &mut Providers) { *providers = Providers { clashing_extern_declarations, ..*providers }; } -pub(crate) fn get_lints() -> LintArray { - lint_array!(CLASHING_EXTERN_DECLARATIONS) +pub(crate) fn get_lints() -> LintVec { + vec![CLASHING_EXTERN_DECLARATIONS] } fn clashing_extern_declarations(tcx: TyCtxt<'_>, (): ()) { diff --git a/compiler/rustc_lint/src/invalid_from_utf8.rs b/compiler/rustc_lint/src/invalid_from_utf8.rs index 1841e7c85a8..e398059ade9 100644 --- a/compiler/rustc_lint/src/invalid_from_utf8.rs +++ b/compiler/rustc_lint/src/invalid_from_utf8.rs @@ -84,9 +84,9 @@ impl<'tcx> LateLintPass<'tcx> for InvalidFromUtf8 { ) }; - let mut init = cx.expr_or_init(arg); + let mut init = cx.expr_or_init_with_outside_body(arg); while let ExprKind::AddrOf(.., inner) = init.kind { - init = cx.expr_or_init(inner); + init = cx.expr_or_init_with_outside_body(inner); } match init.kind { ExprKind::Lit(Spanned { node: lit, .. }) => { diff --git a/compiler/rustc_lint/src/lib.rs b/compiler/rustc_lint/src/lib.rs index 284560465d6..72c103f2d4a 100644 --- a/compiler/rustc_lint/src/lib.rs +++ b/compiler/rustc_lint/src/lib.rs @@ -130,7 +130,7 @@ pub use late::{check_crate, late_lint_mod, unerased_lint_store}; pub use passes::{EarlyLintPass, LateLintPass}; pub use rustc_session::lint::Level::{self, *}; pub use rustc_session::lint::{BufferedEarlyLint, FutureIncompatibleInfo, Lint, LintId}; -pub use rustc_session::lint::{LintArray, LintPass}; +pub use rustc_session::lint::{LintPass, LintVec}; fluent_messages! { "../messages.ftl" } @@ -196,7 +196,7 @@ late_lint_methods!( BoxPointers: BoxPointers, PathStatements: PathStatements, LetUnderscore: LetUnderscore, - InvalidReferenceCasting: InvalidReferenceCasting::default(), + InvalidReferenceCasting: InvalidReferenceCasting, // Depends on referenced function signatures in expressions UnusedResults: UnusedResults, NonUpperCaseGlobals: NonUpperCaseGlobals, diff --git a/compiler/rustc_lint/src/noop_method_call.rs b/compiler/rustc_lint/src/noop_method_call.rs index bc0b9d6d818..cfbca6efbfa 100644 --- a/compiler/rustc_lint/src/noop_method_call.rs +++ b/compiler/rustc_lint/src/noop_method_call.rs @@ -98,6 +98,12 @@ impl<'tcx> LateLintPass<'tcx> for NoopMethodCall { let Ok(Some(i)) = ty::Instance::resolve(cx.tcx, cx.param_env, did, args) else { return }; // (Re)check that it implements the noop diagnostic. let Some(name) = cx.tcx.get_diagnostic_name(i.def_id()) else { return }; + if !matches!( + name, + sym::noop_method_borrow | sym::noop_method_clone | sym::noop_method_deref + ) { + return; + } let receiver_ty = cx.typeck_results().expr_ty(receiver); let expr_ty = cx.typeck_results().expr_ty_adjusted(expr); diff --git a/compiler/rustc_lint/src/passes.rs b/compiler/rustc_lint/src/passes.rs index 7d2a9102640..508f3e1ec31 100644 --- a/compiler/rustc_lint/src/passes.rs +++ b/compiler/rustc_lint/src/passes.rs @@ -111,7 +111,7 @@ macro_rules! declare_combined_late_lint_pass { } } - $v fn get_lints() -> $crate::LintArray { + $v fn get_lints() -> $crate::LintVec { let mut lints = Vec::new(); $(lints.extend_from_slice(&$pass::get_lints());)* lints @@ -226,7 +226,7 @@ macro_rules! declare_combined_early_lint_pass { } } - $v fn get_lints() -> $crate::LintArray { + $v fn get_lints() -> $crate::LintVec { let mut lints = Vec::new(); $(lints.extend_from_slice(&$pass::get_lints());)* lints diff --git a/compiler/rustc_lint/src/reference_casting.rs b/compiler/rustc_lint/src/reference_casting.rs index d540f473ae8..39def599be8 100644 --- a/compiler/rustc_lint/src/reference_casting.rs +++ b/compiler/rustc_lint/src/reference_casting.rs @@ -1,8 +1,7 @@ use rustc_ast::Mutability; -use rustc_data_structures::fx::FxHashMap; -use rustc_hir::{def::Res, Expr, ExprKind, HirId, Local, QPath, StmtKind, UnOp}; +use rustc_hir::{Expr, ExprKind, UnOp}; use rustc_middle::ty::{self, TypeAndMut}; -use rustc_span::{sym, Span}; +use rustc_span::sym; use crate::{lints::InvalidReferenceCastingDiag, LateContext, LateLintPass, LintContext}; @@ -34,38 +33,18 @@ declare_lint! { "casts of `&T` to `&mut T` without interior mutability" } -#[derive(Default)] -pub struct InvalidReferenceCasting { - casted: FxHashMap<HirId, Span>, -} - -impl_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]); +declare_lint_pass!(InvalidReferenceCasting => [INVALID_REFERENCE_CASTING]); impl<'tcx> LateLintPass<'tcx> for InvalidReferenceCasting { - fn check_stmt(&mut self, cx: &LateContext<'tcx>, stmt: &'tcx rustc_hir::Stmt<'tcx>) { - let StmtKind::Local(local) = stmt.kind else { - return; - }; - let Local { init: Some(init), els: None, .. } = local else { - return; - }; - - if is_cast_from_const_to_mut(cx, init) { - self.casted.insert(local.pat.hir_id, init.span); - } - } - fn check_expr(&mut self, cx: &LateContext<'tcx>, expr: &'tcx Expr<'tcx>) { let Some((is_assignment, e)) = is_operation_we_care_about(cx, expr) else { return; }; - let orig_cast = if is_cast_from_const_to_mut(cx, e) { - None - } else if let ExprKind::Path(QPath::Resolved(_, path)) = e.kind - && let Res::Local(hir_id) = &path.res - && let Some(orig_cast) = self.casted.get(hir_id) { - Some(*orig_cast) + let init = cx.expr_or_init(e); + + let orig_cast = if is_cast_from_const_to_mut(cx, init) { + if init.span != e.span { Some(init.span) } else { None } } else { return; }; @@ -125,99 +104,51 @@ fn is_operation_we_care_about<'tcx>( deref_assign_or_addr_of(e).or_else(|| ptr_write(cx, e)) } -fn is_cast_from_const_to_mut<'tcx>(cx: &LateContext<'tcx>, e: &'tcx Expr<'tcx>) -> bool { - let e = e.peel_blocks(); +fn is_cast_from_const_to_mut<'tcx>(cx: &LateContext<'tcx>, orig_expr: &'tcx Expr<'tcx>) -> bool { + let mut need_check_freeze = false; + let mut e = orig_expr; - fn from_casts<'tcx>( - cx: &LateContext<'tcx>, - e: &'tcx Expr<'tcx>, - need_check_freeze: &mut bool, - ) -> Option<&'tcx Expr<'tcx>> { - // <expr> as *mut ... - let mut e = if let ExprKind::Cast(e, t) = e.kind - && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(t.hir_id).kind() { - e - // <expr>.cast_mut() + let end_ty = cx.typeck_results().node_type(orig_expr.hir_id); + + // Bail out early if the end type is **not** a mutable pointer. + if !matches!(end_ty.kind(), ty::RawPtr(TypeAndMut { ty: _, mutbl: Mutability::Mut })) { + return false; + } + + loop { + e = e.peel_blocks(); + // <expr> as ... + e = if let ExprKind::Cast(expr, _) = e.kind { + expr + // <expr>.cast(), <expr>.cast_mut() or <expr>.cast_const() } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id) - && cx.tcx.is_diagnostic_item(sym::ptr_cast_mut, def_id) { + && matches!( + cx.tcx.get_diagnostic_name(def_id), + Some(sym::ptr_cast | sym::const_ptr_cast | sym::ptr_cast_mut | sym::ptr_cast_const) + ) + { expr - // UnsafeCell::raw_get(<expr>) + // ptr::from_ref(<expr>), UnsafeCell::raw_get(<expr>) or mem::transmute<_, _>(<expr>) } else if let ExprKind::Call(path, [arg]) = e.kind && let ExprKind::Path(ref qpath) = path.kind && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() - && cx.tcx.is_diagnostic_item(sym::unsafe_cell_raw_get, def_id) + && matches!( + cx.tcx.get_diagnostic_name(def_id), + Some(sym::ptr_from_ref | sym::unsafe_cell_raw_get | sym::transmute) + ) { - *need_check_freeze = true; + if cx.tcx.is_diagnostic_item(sym::unsafe_cell_raw_get, def_id) { + need_check_freeze = true; + } arg } else { - return None; + break; }; - - let mut had_at_least_one_cast = false; - loop { - e = e.peel_blocks(); - // <expr> as *mut/const ... or <expr> as <uint> - e = if let ExprKind::Cast(expr, t) = e.kind - && matches!(cx.typeck_results().node_type(t.hir_id).kind(), ty::RawPtr(_) | ty::Uint(_)) { - had_at_least_one_cast = true; - expr - // <expr>.cast(), <expr>.cast_mut() or <expr>.cast_const() - } else if let ExprKind::MethodCall(_, expr, [], _) = e.kind - && let Some(def_id) = cx.typeck_results().type_dependent_def_id(e.hir_id) - && matches!( - cx.tcx.get_diagnostic_name(def_id), - Some(sym::ptr_cast | sym::const_ptr_cast | sym::ptr_cast_mut | sym::ptr_cast_const) - ) - { - had_at_least_one_cast = true; - expr - // ptr::from_ref(<expr>) or UnsafeCell::raw_get(<expr>) - } else if let ExprKind::Call(path, [arg]) = e.kind - && let ExprKind::Path(ref qpath) = path.kind - && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() - && matches!( - cx.tcx.get_diagnostic_name(def_id), - Some(sym::ptr_from_ref | sym::unsafe_cell_raw_get) - ) - { - if cx.tcx.is_diagnostic_item(sym::unsafe_cell_raw_get, def_id) { - *need_check_freeze = true; - } - return Some(arg); - } else if had_at_least_one_cast { - return Some(e); - } else { - return None; - }; - } - } - - fn from_transmute<'tcx>( - cx: &LateContext<'tcx>, - e: &'tcx Expr<'tcx>, - ) -> Option<&'tcx Expr<'tcx>> { - // mem::transmute::<_, *mut _>(<expr>) - if let ExprKind::Call(path, [arg]) = e.kind - && let ExprKind::Path(ref qpath) = path.kind - && let Some(def_id) = cx.qpath_res(qpath, path.hir_id).opt_def_id() - && cx.tcx.is_diagnostic_item(sym::transmute, def_id) - && let ty::RawPtr(TypeAndMut { mutbl: Mutability::Mut, .. }) = cx.typeck_results().node_type(e.hir_id).kind() { - Some(arg) - } else { - None - } } - let mut need_check_freeze = false; - let Some(e) = from_casts(cx, e, &mut need_check_freeze).or_else(|| from_transmute(cx, e)) - else { - return false; - }; - - let e = e.peel_blocks(); - let node_type = cx.typeck_results().node_type(e.hir_id); - if let ty::Ref(_, inner_ty, Mutability::Not) = node_type.kind() { + let start_ty = cx.typeck_results().node_type(e.hir_id); + if let ty::Ref(_, inner_ty, Mutability::Not) = start_ty.kind() { // If an UnsafeCell method is involved we need to additionaly check the // inner type for the presence of the Freeze trait (ie does NOT contain // an UnsafeCell), since in that case we would incorrectly lint on valid casts. diff --git a/compiler/rustc_lint_defs/src/lib.rs b/compiler/rustc_lint_defs/src/lib.rs index 84e7ecb0b88..7ba589c3b5a 100644 --- a/compiler/rustc_lint_defs/src/lib.rs +++ b/compiler/rustc_lint_defs/src/lib.rs @@ -785,16 +785,7 @@ macro_rules! declare_tool_lint { ); } -/// Declares a static `LintArray` and return it as an expression. -#[macro_export] -macro_rules! lint_array { - ($( $lint:expr ),* ,) => { lint_array!( $($lint),* ) }; - ($( $lint:expr ),*) => {{ - vec![$($lint),*] - }} -} - -pub type LintArray = Vec<&'static Lint>; +pub type LintVec = Vec<&'static Lint>; pub trait LintPass { fn name(&self) -> &'static str; @@ -808,7 +799,7 @@ macro_rules! impl_lint_pass { fn name(&self) -> &'static str { stringify!($ty) } } impl $ty { - pub fn get_lints() -> $crate::LintArray { $crate::lint_array!($($lint),*) } + pub fn get_lints() -> $crate::LintVec { vec![$($lint),*] } } }; } diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs index cd770c395e4..3ecd5b9cd34 100644 --- a/compiler/rustc_middle/src/mir/basic_blocks.rs +++ b/compiler/rustc_middle/src/mir/basic_blocks.rs @@ -63,11 +63,14 @@ impl<'tcx> BasicBlocks<'tcx> { } /// Returns basic blocks in a reverse postorder. + /// + /// See [`traversal::reverse_postorder`]'s docs to learn what is preorder traversal. + /// + /// [`traversal::reverse_postorder`]: crate::mir::traversal::reverse_postorder #[inline] pub fn reverse_postorder(&self) -> &[BasicBlock] { self.cache.reverse_postorder.get_or_init(|| { - let mut rpo: Vec<_> = - Postorder::new(&self.basic_blocks, START_BLOCK).map(|(bb, _)| bb).collect(); + let mut rpo: Vec<_> = Postorder::new(&self.basic_blocks, START_BLOCK).collect(); rpo.reverse(); rpo }) diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 349b32c10fb..76567c3f6b0 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -1146,10 +1146,10 @@ fn post_fmt_projection(projection: &[PlaceElem<'_>], fmt: &mut Formatter<'_>) -> ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => { write!(fmt, "[-{offset:?} of {min_length:?}]")?; } - ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => { + ProjectionElem::Subslice { from, to: 0, from_end: true } => { write!(fmt, "[{from:?}:]")?; } - ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => { + ProjectionElem::Subslice { from: 0, to, from_end: true } => { write!(fmt, "[:-{to:?}]")?; } ProjectionElem::Subslice { from, to, from_end: true } => { diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index 201926fee3e..55f895f73b4 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -139,6 +139,7 @@ pub enum RuntimePhase { /// * [`TerminatorKind::Yield`] /// * [`TerminatorKind::GeneratorDrop`] /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array` + /// * [`PlaceElem::OpaqueCast`] /// /// And the following variants are allowed: /// * [`StatementKind::Retag`] diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index ec16a8470c4..a1ff8410eac 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -41,6 +41,12 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> { } } +/// Preorder traversal of a graph. +/// +/// This function creates an iterator over the `Body`'s basic blocks, that +/// returns basic blocks in a preorder. +/// +/// See [`Preorder`]'s docs to learn what is preorder traversal. pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> { Preorder::new(body, START_BLOCK) } @@ -178,7 +184,7 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { // When we yield `C` and call `traverse_successor`, we push `B` to the stack, but // since we've already visited `E`, that child isn't added to the stack. The last // two iterations yield `B` and finally `A` for a final traversal of [E, D, C, B, A] - while let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() && let Some(bb) = iter.next_back() { + while let Some(bb) = self.visit_stack.last_mut().and_then(|(_, iter)| iter.next_back()) { if self.visited.insert(bb) { if let Some(term) = &self.basic_blocks[bb].terminator { self.visit_stack.push((bb, term.successors())); @@ -188,16 +194,14 @@ impl<'a, 'tcx> Postorder<'a, 'tcx> { } } -impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { - type Item = (BasicBlock, &'a BasicBlockData<'tcx>); +impl<'tcx> Iterator for Postorder<'_, 'tcx> { + type Item = BasicBlock; - fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { - let next = self.visit_stack.pop(); - if next.is_some() { - self.traverse_successor(); - } + fn next(&mut self) -> Option<BasicBlock> { + let (bb, _) = self.visit_stack.pop()?; + self.traverse_successor(); - next.map(|(bb, _)| (bb, &self.basic_blocks[bb])) + Some(bb) } fn size_hint(&self) -> (usize, Option<usize>) { @@ -215,10 +219,14 @@ impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> { } } -/// Creates an iterator over the `Body`'s basic blocks, that: +/// Postorder traversal of a graph. +/// +/// This function creates an iterator over the `Body`'s basic blocks, that: /// - returns basic blocks in a postorder, /// - traverses the `BasicBlocks` CFG cache's reverse postorder backwards, and does not cache the /// postorder itself. +/// +/// See [`Postorder`]'s docs to learn what is postorder traversal. pub fn postorder<'a, 'tcx>( body: &'a Body<'tcx>, ) -> impl Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> + ExactSizeIterator + DoubleEndedIterator @@ -226,7 +234,28 @@ pub fn postorder<'a, 'tcx>( reverse_postorder(body).rev() } -/// Reverse postorder traversal of a graph +/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular +/// order. +/// +/// This is clearer than writing `preorder` in cases where the order doesn't matter. +pub fn reachable<'a, 'tcx>( + body: &'a Body<'tcx>, +) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> { + preorder(body) +} + +/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`. +pub fn reachable_as_bitset(body: &Body<'_>) -> BitSet<BasicBlock> { + let mut iter = preorder(body); + iter.by_ref().for_each(drop); + iter.visited +} + +/// Reverse postorder traversal of a graph. +/// +/// This function creates an iterator over the `Body`'s basic blocks, that: +/// - returns basic blocks in a reverse postorder, +/// - makes use of the `BasicBlocks` CFG cache's reverse postorder. /// /// Reverse postorder is the reverse order of a postorder traversal. /// This is different to a preorder traversal and represents a natural @@ -246,65 +275,6 @@ pub fn postorder<'a, 'tcx>( /// A reverse postorder traversal of this graph is either `A B C D` or `A C B D` /// Note that for a graph containing no loops (i.e., A DAG), this is equivalent to /// a topological sort. -/// -/// Construction of a `ReversePostorder` traversal requires doing a full -/// postorder traversal of the graph, therefore this traversal should be -/// constructed as few times as possible. Use the `reset` method to be able -/// to re-use the traversal -#[derive(Clone)] -pub struct ReversePostorder<'a, 'tcx> { - body: &'a Body<'tcx>, - blocks: Vec<BasicBlock>, - idx: usize, -} - -impl<'a, 'tcx> ReversePostorder<'a, 'tcx> { - pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> { - let blocks: Vec<_> = Postorder::new(&body.basic_blocks, root).map(|(bb, _)| bb).collect(); - let len = blocks.len(); - ReversePostorder { body, blocks, idx: len } - } -} - -impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> { - type Item = (BasicBlock, &'a BasicBlockData<'tcx>); - - fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> { - if self.idx == 0 { - return None; - } - self.idx -= 1; - - self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb])) - } - - fn size_hint(&self) -> (usize, Option<usize>) { - (self.idx, Some(self.idx)) - } -} - -impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {} - -/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular -/// order. -/// -/// This is clearer than writing `preorder` in cases where the order doesn't matter. -pub fn reachable<'a, 'tcx>( - body: &'a Body<'tcx>, -) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> { - preorder(body) -} - -/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`. -pub fn reachable_as_bitset(body: &Body<'_>) -> BitSet<BasicBlock> { - let mut iter = preorder(body); - (&mut iter).for_each(drop); - iter.visited -} - -/// Creates an iterator over the `Body`'s basic blocks, that: -/// - returns basic blocks in a reverse postorder, -/// - makes use of the `BasicBlocks` CFG cache's reverse postorder. pub fn reverse_postorder<'a, 'tcx>( body: &'a Body<'tcx>, ) -> impl Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> + ExactSizeIterator + DoubleEndedIterator diff --git a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs index 95dced644e1..d440ca31926 100644 --- a/compiler/rustc_mir_build/src/thir/pattern/check_match.rs +++ b/compiler/rustc_mir_build/src/thir/pattern/check_match.rs @@ -740,9 +740,7 @@ fn non_exhaustive_match<'p, 'tcx>( )); } } else if ty == cx.tcx.types.str_ { - err.note(format!( - "`&str` cannot be matched exhaustively, so a wildcard `_` is necessary", - )); + err.note("`&str` cannot be matched exhaustively, so a wildcard `_` is necessary"); } else if cx.is_foreign_non_exhaustive_enum(ty) { err.note(format!("`{ty}` is marked as non-exhaustive, so a wildcard `_` is necessary to match exhaustively")); } diff --git a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs index e24685cb90f..c9991e499b3 100644 --- a/compiler/rustc_mir_dataflow/src/elaborate_drops.rs +++ b/compiler/rustc_mir_dataflow/src/elaborate_drops.rs @@ -194,6 +194,7 @@ where D: DropElaborator<'b, 'tcx>, 'tcx: 'b, { + #[instrument(level = "trace", skip(self), ret)] fn place_ty(&self, place: Place<'tcx>) -> Ty<'tcx> { place.ty(self.elaborator.body(), self.tcx()).ty } @@ -220,11 +221,9 @@ where // // FIXME: I think we should just control the flags externally, // and then we do not need this machinery. + #[instrument(level = "debug")] pub fn elaborate_drop(&mut self, bb: BasicBlock) { - debug!("elaborate_drop({:?}, {:?})", bb, self); - let style = self.elaborator.drop_style(self.path, DropFlagMode::Deep); - debug!("elaborate_drop({:?}, {:?}): live - {:?}", bb, self, style); - match style { + match self.elaborator.drop_style(self.path, DropFlagMode::Deep) { DropStyle::Dead => { self.elaborator .patch() diff --git a/compiler/rustc_mir_transform/src/coverage/spans.rs b/compiler/rustc_mir_transform/src/coverage/spans.rs index 5b24fa10bea..767f8e9f4fa 100644 --- a/compiler/rustc_mir_transform/src/coverage/spans.rs +++ b/compiler/rustc_mir_transform/src/coverage/spans.rs @@ -763,7 +763,7 @@ pub(super) fn filtered_statement_span(statement: &Statement<'_>) -> Option<Span> // and `_1` is the `Place` for `somenum`. // // If and when the Issue is resolved, remove this special case match pattern: - StatementKind::FakeRead(box (cause, _)) if cause == FakeReadCause::ForGuardBinding => None, + StatementKind::FakeRead(box (FakeReadCause::ForGuardBinding, _)) => None, // Retain spans from all other statements StatementKind::FakeRead(box (_, _)) // Not including `ForGuardBinding` diff --git a/compiler/rustc_mir_transform/src/elaborate_drops.rs b/compiler/rustc_mir_transform/src/elaborate_drops.rs index 6a89d067275..b62d7da2a4c 100644 --- a/compiler/rustc_mir_transform/src/elaborate_drops.rs +++ b/compiler/rustc_mir_transform/src/elaborate_drops.rs @@ -170,6 +170,7 @@ impl<'a, 'tcx> DropElaborator<'a, 'tcx> for Elaborator<'a, '_, 'tcx> { self.ctxt.param_env() } + #[instrument(level = "debug", skip(self), ret)] fn drop_style(&self, path: Self::Path, mode: DropFlagMode) -> DropStyle { let ((maybe_live, maybe_dead), multipart) = match mode { DropFlagMode::Shallow => (self.ctxt.init_data.maybe_live_dead(path), false), diff --git a/compiler/rustc_mir_transform/src/large_enums.rs b/compiler/rustc_mir_transform/src/large_enums.rs index 4eee45f8d00..886ff760481 100644 --- a/compiler/rustc_mir_transform/src/large_enums.rs +++ b/compiler/rustc_mir_transform/src/large_enums.rs @@ -54,11 +54,8 @@ impl EnumSizeOpt { let layout = tcx.layout_of(param_env.and(ty)).ok()?; let variants = match &layout.variants { Variants::Single { .. } => return None, - Variants::Multiple { tag_encoding, .. } - if matches!(tag_encoding, TagEncoding::Niche { .. }) => - { - return None; - } + Variants::Multiple { tag_encoding: TagEncoding::Niche { .. }, .. } => return None, + Variants::Multiple { variants, .. } if variants.len() <= 1 => return None, Variants::Multiple { variants, .. } => variants, }; diff --git a/compiler/rustc_mir_transform/src/lib.rs b/compiler/rustc_mir_transform/src/lib.rs index 9e4bc456d51..754f2ee8376 100644 --- a/compiler/rustc_mir_transform/src/lib.rs +++ b/compiler/rustc_mir_transform/src/lib.rs @@ -480,6 +480,7 @@ fn run_runtime_lowering_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { let passes: &[&dyn MirPass<'tcx>] = &[ // These next passes must be executed together &add_call_guards::CriticalCallEdges, + &reveal_all::RevealAll, // has to be done before drop elaboration, since we need to drop opaque types, too. &elaborate_drops::ElaborateDrops, // This will remove extraneous landing pads which are no longer // necessary as well as well as forcing any call in a non-unwinding @@ -526,7 +527,6 @@ fn run_optimization_passes<'tcx>(tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { body, &[ &check_alignment::CheckAlignment, - &reveal_all::RevealAll, // has to be done before inlining, since inlined code is in RevealAll mode. &lower_slice_len::LowerSliceLenCalls, // has to be done before inlining, otherwise actual call will be almost always inlined. Also simple, so can just do first &unreachable_prop::UnreachablePropagation, &uninhabited_enum_branching::UninhabitedEnumBranching, diff --git a/compiler/rustc_mir_transform/src/reveal_all.rs b/compiler/rustc_mir_transform/src/reveal_all.rs index 065793348e4..55f1eac6f84 100644 --- a/compiler/rustc_mir_transform/src/reveal_all.rs +++ b/compiler/rustc_mir_transform/src/reveal_all.rs @@ -8,16 +8,7 @@ use rustc_middle::ty::{self, Ty, TyCtxt}; pub struct RevealAll; impl<'tcx> MirPass<'tcx> for RevealAll { - fn is_enabled(&self, sess: &rustc_session::Session) -> bool { - sess.mir_opt_level() >= 3 || super::inline::Inline.is_enabled(sess) - } - fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) { - // Do not apply this transformation to generators. - if body.generator.is_some() { - return; - } - let param_env = tcx.param_env_reveal_all_normalized(body.source.def_id()); RevealAllVisitor { tcx, param_env }.visit_body_preserves_cfg(body); } @@ -35,6 +26,29 @@ impl<'tcx> MutVisitor<'tcx> for RevealAllVisitor<'tcx> { } #[inline] + fn visit_place( + &mut self, + place: &mut Place<'tcx>, + _context: PlaceContext, + _location: Location, + ) { + // Performance optimization: don't reintern if there is no `OpaqueCast` to remove. + if place.projection.iter().all(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) { + return; + } + // `OpaqueCast` projections are only needed if there are opaque types on which projections are performed. + // After the `RevealAll` pass, all opaque types are replaced with their hidden types, so we don't need these + // projections anymore. + place.projection = self.tcx.mk_place_elems( + &place + .projection + .into_iter() + .filter(|elem| !matches!(elem, ProjectionElem::OpaqueCast(_))) + .collect::<Vec<_>>(), + ); + } + + #[inline] fn visit_constant(&mut self, constant: &mut ConstOperand<'tcx>, _: Location) { // We have to use `try_normalize_erasing_regions` here, since it's // possible that we visit impossible-to-satisfy where clauses here, diff --git a/compiler/rustc_mir_transform/src/ssa.rs b/compiler/rustc_mir_transform/src/ssa.rs index 3a675752fba..af9514ed6bb 100644 --- a/compiler/rustc_mir_transform/src/ssa.rs +++ b/compiler/rustc_mir_transform/src/ssa.rs @@ -78,14 +78,10 @@ impl SsaLocals { visitor.assignments[local] = Set1::One(LocationExtended::Arg); } - if body.basic_blocks.len() > 2 { - for (bb, data) in traversal::reverse_postorder(body) { - visitor.visit_basic_block_data(bb, data); - } - } else { - for (bb, data) in body.basic_blocks.iter_enumerated() { - visitor.visit_basic_block_data(bb, data); - } + // For SSA assignments, a RPO visit will see the assignment before it sees any use. + // We only visit reachable nodes: computing `dominates` on an unreachable node ICEs. + for (bb, data) in traversal::reverse_postorder(body) { + visitor.visit_basic_block_data(bb, data); } for var_debug_info in &body.var_debug_info { diff --git a/compiler/rustc_parse/messages.ftl b/compiler/rustc_parse/messages.ftl index 2c4bc7bb568..05b6c406206 100644 --- a/compiler/rustc_parse/messages.ftl +++ b/compiler/rustc_parse/messages.ftl @@ -509,7 +509,7 @@ parse_maybe_fn_typo_with_impl = you might have meant to write `impl` instead of parse_maybe_recover_from_bad_qpath_stage_2 = missing angle brackets in associated item path - .suggestion = try: `{$ty}` + .suggestion = types that don't start with an identifier need to be surrounded with angle brackets in qualified paths parse_maybe_recover_from_bad_type_plus = expected a path on the left-hand side of `+`, not `{$ty}` diff --git a/compiler/rustc_parse/src/errors.rs b/compiler/rustc_parse/src/errors.rs index 5d3ec683552..7c75e440aaa 100644 --- a/compiler/rustc_parse/src/errors.rs +++ b/compiler/rustc_parse/src/errors.rs @@ -59,9 +59,18 @@ pub(crate) enum BadTypePlusSub { #[diag(parse_maybe_recover_from_bad_qpath_stage_2)] pub(crate) struct BadQPathStage2 { #[primary_span] - #[suggestion(code = "", applicability = "maybe-incorrect")] pub span: Span, - pub ty: String, + #[subdiagnostic] + pub wrap: WrapType, +} + +#[derive(Subdiagnostic)] +#[multipart_suggestion(parse_suggestion, applicability = "machine-applicable")] +pub(crate) struct WrapType { + #[suggestion_part(code = "<")] + pub lo: Span, + #[suggestion_part(code = ">")] + pub hi: Span, } #[derive(Diagnostic)] diff --git a/compiler/rustc_parse/src/parser/diagnostics.rs b/compiler/rustc_parse/src/parser/diagnostics.rs index 6c8ef34063f..06b1b1523ed 100644 --- a/compiler/rustc_parse/src/parser/diagnostics.rs +++ b/compiler/rustc_parse/src/parser/diagnostics.rs @@ -16,7 +16,7 @@ use crate::errors::{ StructLiteralBodyWithoutPath, StructLiteralBodyWithoutPathSugg, StructLiteralNeedingParens, StructLiteralNeedingParensSugg, SuggAddMissingLetStmt, SuggEscapeIdentifier, SuggRemoveComma, TernaryOperator, UnexpectedConstInGenericParam, UnexpectedConstParamDeclaration, - UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead, + UnexpectedConstParamDeclarationSugg, UnmatchedAngleBrackets, UseEqInstead, WrapType, }; use crate::fluent_generated as fluent; @@ -1589,10 +1589,9 @@ impl<'a> Parser<'a> { self.parse_path_segments(&mut path.segments, T::PATH_STYLE, None)?; path.span = ty_span.to(self.prev_token.span); - let ty_str = self.span_to_snippet(ty_span).unwrap_or_else(|_| pprust::ty_to_string(&ty)); self.sess.emit_err(BadQPathStage2 { - span: path.span, - ty: format!("<{}>::{}", ty_str, pprust::path_to_string(&path)), + span: ty_span, + wrap: WrapType { lo: ty_span.shrink_to_lo(), hi: ty_span.shrink_to_hi() }, }); let path_span = ty_span.shrink_to_hi(); // Use an empty path since `position == 0`. diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index c3c43346ed8..15ec727e4c9 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -4140,6 +4140,12 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { }); } + fn resolve_expr_field(&mut self, f: &'ast ExprField, e: &'ast Expr) { + self.resolve_expr(&f.expr, Some(e)); + self.visit_ident(f.ident); + walk_list!(self, visit_attribute, f.attrs.iter()); + } + fn resolve_expr(&mut self, expr: &'ast Expr, parent: Option<&'ast Expr>) { // First, record candidate traits for this expression if it could // result in the invocation of a method call. @@ -4155,7 +4161,19 @@ impl<'a: 'ast, 'b, 'ast, 'tcx> LateResolutionVisitor<'a, 'b, 'ast, 'tcx> { ExprKind::Struct(ref se) => { self.smart_resolve_path(expr.id, &se.qself, &se.path, PathSource::Struct); - visit::walk_expr(self, expr); + // This is the same as `visit::walk_expr(self, expr);`, but we want to pass the + // parent in for accurate suggestions when encountering `Foo { bar }` that should + // have been `Foo { bar: self.bar }`. + if let Some(qself) = &se.qself { + self.visit_ty(&qself.ty); + } + self.visit_path(&se.path, expr.id); + walk_list!(self, resolve_expr_field, &se.fields, expr); + match &se.rest { + StructRest::Base(expr) => self.visit_expr(expr), + StructRest::Rest(_span) => {} + StructRest::None => {} + } } ExprKind::Break(Some(label), _) | ExprKind::Continue(Some(label)) => { diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index 64440a6c04e..bc5f8a37b0f 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -41,7 +41,7 @@ type Res = def::Res<ast::NodeId>; /// A field or associated item from self type suggested in case of resolution failure. enum AssocSuggestion { - Field, + Field(Span), MethodWithSelf { called: bool }, AssocFn { called: bool }, AssocType, @@ -51,7 +51,7 @@ enum AssocSuggestion { impl AssocSuggestion { fn action(&self) -> &'static str { match self { - AssocSuggestion::Field => "use the available field", + AssocSuggestion::Field(_) => "use the available field", AssocSuggestion::MethodWithSelf { called: true } => { "call the method with the fully-qualified path" } @@ -186,7 +186,7 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { fallback_label: format!("not a {expected}"), span, span_label: match res { - Res::Def(kind, def_id) if kind == DefKind::TyParam => { + Res::Def(DefKind::TyParam, def_id) => { Some((self.r.def_span(def_id), "found this type parameter")) } _ => None, @@ -215,7 +215,8 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { } } else { let mut span_label = None; - let item_span = path.last().unwrap().ident.span; + let item_ident = path.last().unwrap().ident; + let item_span = item_ident.span; let (mod_prefix, mod_str, module, suggestion) = if path.len() == 1 { debug!(?self.diagnostic_metadata.current_impl_items); debug!(?self.diagnostic_metadata.current_function); @@ -231,9 +232,35 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { }) { let sp = item_span.shrink_to_lo(); + + // Account for `Foo { field }` when suggesting `self.field` so we result on + // `Foo { field: self.field }`. + let field = match source { + PathSource::Expr(Some(Expr { kind: ExprKind::Struct(expr), .. })) => { + expr.fields.iter().find(|f| f.ident == item_ident) + } + _ => None, + }; + let pre = if let Some(field) = field && field.is_shorthand { + format!("{item_ident}: ") + } else { + String::new() + }; + // Ensure we provide a structured suggestion for an assoc fn only for + // expressions that are actually a fn call. + let is_call = match field { + Some(ast::ExprField { expr, .. }) => { + matches!(expr.kind, ExprKind::Call(..)) + } + _ => matches!( + source, + PathSource::Expr(Some(Expr { kind: ExprKind::Call(..), ..})), + ), + }; + match &item.kind { AssocItemKind::Fn(fn_) - if !sig.decl.has_self() && fn_.sig.decl.has_self() => { + if (!sig.decl.has_self() || !is_call) && fn_.sig.decl.has_self() => { // Ensure that we only suggest `self.` if `self` is available, // you can't call `fn foo(&self)` from `fn bar()` (#115992). // We also want to mention that the method exists. @@ -243,20 +270,28 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { )); None } + AssocItemKind::Fn(fn_) + if !fn_.sig.decl.has_self() && !is_call => { + span_label = Some(( + item.ident.span, + "an associated function by that name is available on `Self` here", + )); + None + } AssocItemKind::Fn(fn_) if fn_.sig.decl.has_self() => Some(( sp, "consider using the method on `Self`", - "self.".to_string(), + format!("{pre}self."), )), AssocItemKind::Fn(_) => Some(( sp, "consider using the associated function on `Self`", - "Self::".to_string(), + format!("{pre}Self::"), )), AssocItemKind::Const(..) => Some(( sp, "consider using the associated constant on `Self`", - "Self::".to_string(), + format!("{pre}Self::"), )), _ => None } @@ -621,17 +656,30 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { self.lookup_assoc_candidate(ident, ns, is_expected, source.is_call()) { let self_is_available = self.self_value_is_available(path[0].ident.span); + // Account for `Foo { field }` when suggesting `self.field` so we result on + // `Foo { field: self.field }`. + let pre = match source { + PathSource::Expr(Some(Expr { kind: ExprKind::Struct(expr), .. })) + if expr + .fields + .iter() + .any(|f| f.ident == path[0].ident && f.is_shorthand) => + { + format!("{path_str}: ") + } + _ => String::new(), + }; match candidate { - AssocSuggestion::Field => { + AssocSuggestion::Field(field_span) => { if self_is_available { - err.span_suggestion( - span, + err.span_suggestion_verbose( + span.shrink_to_lo(), "you might have meant to use the available field", - format!("self.{path_str}"), + format!("{pre}self."), Applicability::MachineApplicable, ); } else { - err.span_label(span, "a field by this name exists in `Self`"); + err.span_label(field_span, "a field by that name exists in `Self`"); } } AssocSuggestion::MethodWithSelf { called } if self_is_available => { @@ -640,10 +688,10 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { } else { "you might have meant to refer to the method" }; - err.span_suggestion( - span, + err.span_suggestion_verbose( + span.shrink_to_lo(), msg, - format!("self.{path_str}"), + "self.".to_string(), Applicability::MachineApplicable, ); } @@ -651,10 +699,10 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { | AssocSuggestion::AssocFn { .. } | AssocSuggestion::AssocConst | AssocSuggestion::AssocType => { - err.span_suggestion( - span, + err.span_suggestion_verbose( + span.shrink_to_lo(), format!("you might have meant to {}", candidate.action()), - format!("Self::{path_str}"), + "Self::".to_string(), Applicability::MachineApplicable, ); } @@ -1667,11 +1715,11 @@ impl<'a: 'ast, 'ast, 'tcx> LateResolutionVisitor<'a, '_, 'ast, 'tcx> { resolution.full_res() { if let Some(field_ids) = self.r.field_def_ids(did) { - if field_ids + if let Some(field_id) = field_ids .iter() - .any(|&field_id| ident.name == self.r.tcx.item_name(field_id)) + .find(|&&field_id| ident.name == self.r.tcx.item_name(field_id)) { - return Some(AssocSuggestion::Field); + return Some(AssocSuggestion::Field(self.r.def_span(*field_id))); } } } diff --git a/compiler/rustc_smir/src/rustc_internal/mod.rs b/compiler/rustc_smir/src/rustc_internal/mod.rs index 441aafd1257..1a9dea99f64 100644 --- a/compiler/rustc_smir/src/rustc_internal/mod.rs +++ b/compiler/rustc_smir/src/rustc_internal/mod.rs @@ -86,6 +86,10 @@ impl<'tcx> Tables<'tcx> { stable_mir::ty::ImplDef(self.create_def_id(did)) } + pub fn region_def(&mut self, did: DefId) -> stable_mir::ty::RegionDef { + stable_mir::ty::RegionDef(self.create_def_id(did)) + } + pub fn prov(&mut self, aid: AllocId) -> stable_mir::ty::Prov { stable_mir::ty::Prov(self.create_alloc_id(aid)) } diff --git a/compiler/rustc_smir/src/rustc_smir/mod.rs b/compiler/rustc_smir/src/rustc_smir/mod.rs index 5ff17613b4e..c6c97ce35e8 100644 --- a/compiler/rustc_smir/src/rustc_smir/mod.rs +++ b/compiler/rustc_smir/src/rustc_smir/mod.rs @@ -7,7 +7,8 @@ //! //! For now, we are developing everything inside `rustc`, thus, we keep this module private. -use hir::def::DefKind; +use crate::rustc_smir::hir::def::DefKind; +use crate::rustc_smir::stable_mir::ty::{BoundRegion, EarlyBoundRegion, Region}; use rustc_hir as hir; use rustc_middle::mir; use rustc_middle::mir::interpret::{alloc_range, AllocId}; @@ -1500,9 +1501,39 @@ impl<'tcx> Stable<'tcx> for ty::ImplPolarity { impl<'tcx> Stable<'tcx> for ty::Region<'tcx> { type T = stable_mir::ty::Region; - fn stable(&self, _: &mut Tables<'tcx>) -> Self::T { - // FIXME: add a real implementation of stable regions - opaque(self) + fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T { + Region { kind: self.kind().stable(tables) } + } +} + +impl<'tcx> Stable<'tcx> for ty::RegionKind<'tcx> { + type T = stable_mir::ty::RegionKind; + + fn stable(&self, tables: &mut Tables<'tcx>) -> Self::T { + use stable_mir::ty::RegionKind; + match self { + ty::ReEarlyBound(early_reg) => RegionKind::ReEarlyBound(EarlyBoundRegion { + def_id: tables.region_def(early_reg.def_id), + index: early_reg.index, + name: early_reg.name.to_string(), + }), + ty::ReLateBound(db_index, bound_reg) => RegionKind::ReLateBound( + db_index.as_u32(), + BoundRegion { var: bound_reg.var.as_u32(), kind: bound_reg.kind.stable(tables) }, + ), + ty::ReStatic => RegionKind::ReStatic, + ty::RePlaceholder(place_holder) => { + RegionKind::RePlaceholder(stable_mir::ty::Placeholder { + universe: place_holder.universe.as_u32(), + bound: BoundRegion { + var: place_holder.bound.var.as_u32(), + kind: place_holder.bound.kind.stable(tables), + }, + }) + } + ty::ReErased => RegionKind::ReErased, + _ => unreachable!("{self:?}"), + } } } diff --git a/compiler/rustc_span/src/lib.rs b/compiler/rustc_span/src/lib.rs index c58fdbcb5e1..772e09291a1 100644 --- a/compiler/rustc_span/src/lib.rs +++ b/compiler/rustc_span/src/lib.rs @@ -1753,7 +1753,7 @@ impl SourceFile { // is recorded. let diff = match self.normalized_pos.binary_search_by(|np| np.pos.cmp(&pos)) { Ok(i) => self.normalized_pos[i].diff, - Err(i) if i == 0 => 0, + Err(0) => 0, Err(i) => self.normalized_pos[i - 1].diff, }; @@ -1775,7 +1775,7 @@ impl SourceFile { .binary_search_by(|np| (np.pos.0 + np.diff).cmp(&(self.start_pos.0 + offset))) { Ok(i) => self.normalized_pos[i].diff, - Err(i) if i == 0 => 0, + Err(0) => 0, Err(i) => self.normalized_pos[i - 1].diff, }; diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index 382754be2ca..4f46256626f 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -1465,6 +1465,7 @@ symbols! { simd_shl, simd_shr, simd_shuffle, + simd_shuffle_generic, simd_sub, simd_trunc, simd_xor, diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs b/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs deleted file mode 100644 index be48447e27c..00000000000 --- a/compiler/rustc_trait_selection/src/solve/search_graph/cache.rs +++ /dev/null @@ -1,102 +0,0 @@ -//! This module both handles the global cache which stores "finished" goals, -//! and the provisional cache which contains partially computed goals. -//! -//! The provisional cache is necessary when dealing with coinductive cycles. -//! -//! For more information about the provisional cache and coinduction in general, -//! check out the relevant section of the rustc-dev-guide. -//! -//! FIXME(@lcnr): Write that section, feel free to ping me if you need help here -//! before then or if I still haven't done that before January 2023. -use super::StackDepth; -use rustc_data_structures::fx::FxHashMap; -use rustc_index::IndexVec; -use rustc_middle::traits::solve::{CanonicalInput, QueryResult}; - -rustc_index::newtype_index! { - pub struct EntryIndex {} -} - -#[derive(Debug, Clone)] -pub(super) struct ProvisionalEntry<'tcx> { - /// In case we have a coinductive cycle, this is the - /// the current provisional result of this goal. - /// - /// This starts out as `None` for all goals and gets to some - /// when the goal gets popped from the stack or we rerun evaluation - /// for this goal to reach a fixpoint. - pub(super) response: Option<QueryResult<'tcx>>, - /// In case of a cycle, the position of deepest stack entry involved - /// in that cycle. This is monotonically decreasing in the stack as all - /// elements between the current stack element in the deepest stack entry - /// involved have to also be involved in that cycle. - /// - /// We can only move entries to the global cache once we're complete done - /// with the cycle. If this entry has not been involved in a cycle, - /// this is just its own depth. - pub(super) depth: StackDepth, - - /// The goal for this entry. Should always be equal to the corresponding goal - /// in the lookup table. - pub(super) input: CanonicalInput<'tcx>, -} - -pub(super) struct ProvisionalCache<'tcx> { - pub(super) entries: IndexVec<EntryIndex, ProvisionalEntry<'tcx>>, - // FIXME: This is only used to quickly check whether a given goal - // is in the cache. We should experiment with using something like - // `SsoHashSet` here because in most cases there are only a few entries. - pub(super) lookup_table: FxHashMap<CanonicalInput<'tcx>, EntryIndex>, -} - -impl<'tcx> ProvisionalCache<'tcx> { - pub(super) fn empty() -> ProvisionalCache<'tcx> { - ProvisionalCache { entries: Default::default(), lookup_table: Default::default() } - } - - pub(super) fn is_empty(&self) -> bool { - self.entries.is_empty() && self.lookup_table.is_empty() - } - - /// Adds a dependency from the current leaf to `target` in the cache - /// to prevent us from moving any goals which depend on the current leaf - /// to the global cache while we're still computing `target`. - /// - /// Its important to note that `target` may already be part of a different cycle. - /// In this case we have to ensure that we also depend on all other goals - /// in the existing cycle in addition to the potentially direct cycle with `target`. - pub(super) fn add_dependency_of_leaf_on(&mut self, target: EntryIndex) { - let depth = self.entries[target].depth; - for provisional_entry in &mut self.entries.raw[target.index()..] { - // The depth of `target` is the position of the deepest goal in the stack - // on which `target` depends. That goal is the `root` of this cycle. - // - // Any entry which was added after `target` is either on the stack itself - // at which point its depth is definitely at least as high as the depth of - // `root`. If it's not on the stack itself it has to depend on a goal - // between `root` and `leaf`. If it were to depend on a goal deeper in the - // stack than `root`, then `root` would also depend on that goal, at which - // point `root` wouldn't be the root anymore. - debug_assert!(provisional_entry.depth >= depth); - provisional_entry.depth = depth; - } - - // We only update entries which were added after `target` as no other - // entry should have a higher depth. - // - // Any entry which previously had a higher depth than target has to - // be between `target` and `root`. Because of this we would have updated - // its depth when calling `add_dependency_of_leaf_on(root)` for `target`. - if cfg!(debug_assertions) { - self.entries.iter().all(|e| e.depth <= depth); - } - } - - pub(super) fn depth(&self, entry_index: EntryIndex) -> StackDepth { - self.entries[entry_index].depth - } - - pub(super) fn provisional_result(&self, entry_index: EntryIndex) -> Option<QueryResult<'tcx>> { - self.entries[entry_index].response - } -} diff --git a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs index 728d0fc1ae7..33513f6bd43 100644 --- a/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs +++ b/compiler/rustc_trait_selection/src/solve/search_graph/mod.rs @@ -1,10 +1,7 @@ -mod cache; - -use self::cache::ProvisionalEntry; use super::inspect; use super::inspect::ProofTreeBuilder; use super::SolverMode; -use cache::ProvisionalCache; +use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::fx::FxHashSet; use rustc_index::Idx; use rustc_index::IndexVec; @@ -27,8 +24,14 @@ struct StackEntry<'tcx> { // The maximum depth reached by this stack entry, only up-to date // for the top of the stack and lazily updated for the rest. reached_depth: StackDepth, + // In case of a cycle, the depth of the root. + cycle_root_depth: StackDepth, + encountered_overflow: bool, has_been_used: bool, + /// Starts out as `None` and gets set when rerunning this + /// goal in case we encounter a cycle. + provisional_result: Option<QueryResult<'tcx>>, /// We put only the root goal of a coinductive cycle into the global cache. /// @@ -47,7 +50,7 @@ pub(super) struct SearchGraph<'tcx> { /// /// An element is *deeper* in the stack if its index is *lower*. stack: IndexVec<StackDepth, StackEntry<'tcx>>, - provisional_cache: ProvisionalCache<'tcx>, + stack_entries: FxHashMap<CanonicalInput<'tcx>, StackDepth>, } impl<'tcx> SearchGraph<'tcx> { @@ -56,7 +59,7 @@ impl<'tcx> SearchGraph<'tcx> { mode, local_overflow_limit: tcx.recursion_limit().0.checked_ilog2().unwrap_or(0) as usize, stack: Default::default(), - provisional_cache: ProvisionalCache::empty(), + stack_entries: Default::default(), } } @@ -85,6 +88,7 @@ impl<'tcx> SearchGraph<'tcx> { /// would cause us to not track overflow and recursion depth correctly. fn pop_stack(&mut self) -> StackEntry<'tcx> { let elem = self.stack.pop().unwrap(); + assert!(self.stack_entries.remove(&elem.input).is_some()); if let Some(last) = self.stack.raw.last_mut() { last.reached_depth = last.reached_depth.max(elem.reached_depth); last.encountered_overflow |= elem.encountered_overflow; @@ -104,22 +108,17 @@ impl<'tcx> SearchGraph<'tcx> { } pub(super) fn is_empty(&self) -> bool { - self.stack.is_empty() && self.provisional_cache.is_empty() + self.stack.is_empty() } /// Whether we're currently in a cycle. This should only be used /// for debug assertions. pub(super) fn in_cycle(&self) -> bool { if let Some(stack_depth) = self.stack.last_index() { - // Either the current goal on the stack is the root of a cycle... - if self.stack[stack_depth].has_been_used { - return true; - } - - // ...or it depends on a goal with a lower depth. - let current_goal = self.stack[stack_depth].input; - let entry_index = self.provisional_cache.lookup_table[¤t_goal]; - self.provisional_cache.entries[entry_index].depth != stack_depth + // Either the current goal on the stack is the root of a cycle + // or it depends on a goal with a lower depth. + self.stack[stack_depth].has_been_used + || self.stack[stack_depth].cycle_root_depth != stack_depth } else { false } @@ -211,9 +210,8 @@ impl<'tcx> SearchGraph<'tcx> { } } - // Look at the provisional cache to detect cycles. - let cache = &mut self.provisional_cache; - match cache.lookup_table.entry(input) { + // Check whether we're in a cycle. + match self.stack_entries.entry(input) { // No entry, we push this goal on the stack and try to prove it. Entry::Vacant(v) => { let depth = self.stack.next_index(); @@ -221,14 +219,14 @@ impl<'tcx> SearchGraph<'tcx> { input, available_depth, reached_depth: depth, + cycle_root_depth: depth, encountered_overflow: false, has_been_used: false, + provisional_result: None, cycle_participants: Default::default(), }; assert_eq!(self.stack.push(entry), depth); - let entry_index = - cache.entries.push(ProvisionalEntry { response: None, depth, input }); - v.insert(entry_index); + v.insert(depth); } // We have a nested goal which relies on a goal `root` deeper in the stack. // @@ -239,41 +237,50 @@ impl<'tcx> SearchGraph<'tcx> { // // Finally we can return either the provisional response for that goal if we have a // coinductive cycle or an ambiguous result if the cycle is inductive. - Entry::Occupied(entry_index) => { + Entry::Occupied(entry) => { inspect.goal_evaluation_kind(inspect::WipCanonicalGoalEvaluationKind::CacheHit( CacheHit::Provisional, )); - let entry_index = *entry_index.get(); - let stack_depth = cache.depth(entry_index); + let stack_depth = *entry.get(); debug!("encountered cycle with depth {stack_depth:?}"); - - cache.add_dependency_of_leaf_on(entry_index); - let mut iter = self.stack.iter_mut(); - let root = iter.nth(stack_depth.as_usize()).unwrap(); - for e in iter { - root.cycle_participants.insert(e.input); + // We start by updating the root depth of all cycle participants, and + // add all cycle participants to the root. + let root_depth = self.stack[stack_depth].cycle_root_depth; + let (prev, participants) = self.stack.raw.split_at_mut(stack_depth.as_usize() + 1); + let root = &mut prev[root_depth.as_usize()]; + for entry in participants { + debug_assert!(entry.cycle_root_depth >= root_depth); + entry.cycle_root_depth = root_depth; + root.cycle_participants.insert(entry.input); + // FIXME(@lcnr): I believe that this line is needed as we could + // otherwise access a cache entry for the root of a cycle while + // computing the result for a cycle participant. This can result + // in unstable results due to incompleteness. + // + // However, a test for this would be an even more complex version of + // tests/ui/traits/new-solver/coinduction/incompleteness-unstable-result.rs. + // I did not bother to write such a test and we have no regression test + // for this. It would be good to have such a test :) + #[allow(rustc::potential_query_instability)] + root.cycle_participants.extend(entry.cycle_participants.drain()); } - // If we're in a cycle, we have to retry proving the current goal - // until we reach a fixpoint. + // If we're in a cycle, we have to retry proving the cycle head + // until we reach a fixpoint. It is not enough to simply retry the + // `root` goal of this cycle. + // + // See tests/ui/traits/new-solver/cycles/fixpoint-rerun-all-cycle-heads.rs + // for an example. self.stack[stack_depth].has_been_used = true; - return if let Some(result) = cache.provisional_result(entry_index) { + return if let Some(result) = self.stack[stack_depth].provisional_result { result } else { - // If we don't have a provisional result yet, the goal has to - // still be on the stack. - let mut goal_on_stack = false; - let mut is_coinductive = true; - for entry in self.stack.raw[stack_depth.index()..] + // If we don't have a provisional result yet we're in the first iteration, + // so we start with no constraints. + let is_coinductive = self.stack.raw[stack_depth.index()..] .iter() - .skip_while(|entry| entry.input != input) - { - goal_on_stack = true; - is_coinductive &= entry.input.value.goal.predicate.is_coinductive(tcx); - } - debug_assert!(goal_on_stack); - + .all(|entry| entry.input.value.goal.predicate.is_coinductive(tcx)); if is_coinductive { Self::response_no_constraints(tcx, input, Certainty::Yes) } else { @@ -294,40 +301,25 @@ impl<'tcx> SearchGraph<'tcx> { // of the previous iteration is equal to the final result, at which // point we are done. for _ in 0..self.local_overflow_limit() { - let response = prove_goal(self, inspect); + let result = prove_goal(self, inspect); // Check whether the current goal is the root of a cycle and whether // we have to rerun because its provisional result differed from the // final result. - // - // Also update the response for this goal stored in the provisional - // cache. let stack_entry = self.pop_stack(); debug_assert_eq!(stack_entry.input, input); - let cache = &mut self.provisional_cache; - let provisional_entry_index = - *cache.lookup_table.get(&stack_entry.input).unwrap(); - let provisional_entry = &mut cache.entries[provisional_entry_index]; if stack_entry.has_been_used - && provisional_entry.response.map_or(true, |r| r != response) + && stack_entry.provisional_result.map_or(true, |r| r != result) { - // If so, update the provisional result for this goal and remove - // all entries whose result depends on this goal from the provisional - // cache... - // - // That's not completely correct, as a nested goal can also only - // depend on a goal which is lower in the stack so it doesn't - // actually depend on the current goal. This should be fairly - // rare and is hopefully not relevant for performance. - provisional_entry.response = Some(response); - #[allow(rustc::potential_query_instability)] - cache.lookup_table.retain(|_key, index| *index <= provisional_entry_index); - cache.entries.truncate(provisional_entry_index.index() + 1); - - // ...and finally push our goal back on the stack and reevaluate it. - self.stack.push(StackEntry { has_been_used: false, ..stack_entry }); + // If so, update its provisional result and reevaluate it. + let depth = self.stack.push(StackEntry { + has_been_used: false, + provisional_result: Some(result), + ..stack_entry + }); + assert_eq!(self.stack_entries.insert(input, depth), None); } else { - return (stack_entry, response); + return (stack_entry, result); } } @@ -343,17 +335,7 @@ impl<'tcx> SearchGraph<'tcx> { // // It is not possible for any nested goal to depend on something deeper on the // stack, as this would have also updated the depth of the current goal. - let cache = &mut self.provisional_cache; - let provisional_entry_index = *cache.lookup_table.get(&input).unwrap(); - let provisional_entry = &mut cache.entries[provisional_entry_index]; - let depth = provisional_entry.depth; - if depth == self.stack.next_index() { - for (i, entry) in cache.entries.drain_enumerated(provisional_entry_index.index()..) { - let actual_index = cache.lookup_table.remove(&entry.input); - debug_assert_eq!(Some(i), actual_index); - debug_assert!(entry.depth == depth); - } - + if final_entry.cycle_root_depth == self.stack.next_index() { // When encountering a cycle, both inductive and coinductive, we only // move the root into the global cache. We also store all other cycle // participants involved. @@ -371,8 +353,6 @@ impl<'tcx> SearchGraph<'tcx> { dep_node, result, ) - } else { - provisional_entry.response = Some(result); } result diff --git a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs index 2a333a4f0e3..2a586f810d6 100644 --- a/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs +++ b/compiler/rustc_trait_selection/src/traits/error_reporting/mod.rs @@ -3211,7 +3211,7 @@ impl<'tcx> InferCtxtPrivExt<'tcx> for TypeErrCtxt<'_, 'tcx> { ) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> { let name = match self.tcx.opaque_type_origin(def_id.expect_local()) { hir::OpaqueTyOrigin::FnReturn(_) | hir::OpaqueTyOrigin::AsyncFn(_) => { - format!("opaque type") + "opaque type".to_string() } hir::OpaqueTyOrigin::TyAlias { .. } => { format!("`{}`", self.tcx.def_path_debug_str(def_id)) diff --git a/compiler/stable_mir/src/fold.rs b/compiler/stable_mir/src/fold.rs index 16ae62311aa..6471b2c2a3a 100644 --- a/compiler/stable_mir/src/fold.rs +++ b/compiler/stable_mir/src/fold.rs @@ -4,17 +4,20 @@ use crate::Opaque; use super::ty::{ Allocation, Binder, Const, ConstDef, ConstantKind, ExistentialPredicate, FnSig, GenericArgKind, - GenericArgs, Promoted, RigidTy, TermKind, Ty, TyKind, UnevaluatedConst, + GenericArgs, Promoted, Region, RigidTy, TermKind, Ty, TyKind, UnevaluatedConst, }; pub trait Folder: Sized { type Break; - fn visit_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> { + fn fold_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> { ty.super_fold(self) } fn fold_const(&mut self, c: &Const) -> ControlFlow<Self::Break, Const> { c.super_fold(self) } + fn fold_reg(&mut self, reg: &Region) -> ControlFlow<Self::Break, Region> { + reg.super_fold(self) + } } pub trait Foldable: Sized + Clone { @@ -26,7 +29,7 @@ pub trait Foldable: Sized + Clone { impl Foldable for Ty { fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> { - folder.visit_ty(self) + folder.fold_ty(self) } fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> { let mut kind = self.kind(); @@ -81,7 +84,7 @@ impl Foldable for UnevaluatedConst { impl Foldable for ConstDef { fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> { - ControlFlow::Continue(self.clone()) + ControlFlow::Continue(*self) } } @@ -96,7 +99,7 @@ impl<T: Foldable> Foldable for Option<T> { impl Foldable for Promoted { fn super_fold<V: Folder>(&self, _folder: &mut V) -> ControlFlow<V::Break, Self> { - ControlFlow::Continue(self.clone()) + ControlFlow::Continue(*self) } } @@ -106,6 +109,15 @@ impl Foldable for GenericArgs { } } +impl Foldable for Region { + fn fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> { + folder.fold_reg(self) + } + fn super_fold<V: Folder>(&self, _: &mut V) -> ControlFlow<V::Break, Self> { + ControlFlow::Continue(self.clone()) + } +} + impl Foldable for GenericArgKind { fn super_fold<V: Folder>(&self, folder: &mut V) -> ControlFlow<V::Break, Self> { let mut this = self.clone(); @@ -136,7 +148,10 @@ impl Foldable for RigidTy { } RigidTy::Slice(inner) => *inner = inner.fold(folder)?, RigidTy::RawPtr(ty, _) => *ty = ty.fold(folder)?, - RigidTy::Ref(_, ty, _) => *ty = ty.fold(folder)?, + RigidTy::Ref(reg, ty, _) => { + *reg = reg.fold(folder)?; + *ty = ty.fold(folder)? + } RigidTy::FnDef(_, args) => *args = args.fold(folder)?, RigidTy::FnPtr(sig) => *sig = sig.fold(folder)?, RigidTy::Closure(_, args) => *args = args.fold(folder)?, @@ -214,7 +229,7 @@ pub enum Never {} impl Folder for GenericArgs { type Break = Never; - fn visit_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> { + fn fold_ty(&mut self, ty: &Ty) -> ControlFlow<Self::Break, Ty> { ControlFlow::Continue(match ty.kind() { TyKind::Param(p) => self[p], _ => *ty, diff --git a/compiler/stable_mir/src/ty.rs b/compiler/stable_mir/src/ty.rs index 82007e30683..6029e3c11ad 100644 --- a/compiler/stable_mir/src/ty.rs +++ b/compiler/stable_mir/src/ty.rs @@ -1,7 +1,7 @@ use super::{ mir::Safety, mir::{Body, Mutability}, - with, AllocId, DefId, + with, AllocId, DefId, Symbol, }; use crate::Opaque; use std::fmt::{self, Debug, Formatter}; @@ -34,7 +34,46 @@ pub struct Const { } type Ident = Opaque; -pub type Region = Opaque; + +#[derive(Debug, Clone)] +pub struct Region { + pub kind: RegionKind, +} + +#[derive(Debug, Clone)] +pub enum RegionKind { + ReEarlyBound(EarlyBoundRegion), + ReLateBound(DebruijnIndex, BoundRegion), + ReStatic, + RePlaceholder(Placeholder<BoundRegion>), + ReErased, +} + +pub(crate) type DebruijnIndex = u32; + +#[derive(Debug, Clone)] +pub struct EarlyBoundRegion { + pub def_id: RegionDef, + pub index: u32, + pub name: Symbol, +} + +pub(crate) type BoundVar = u32; + +#[derive(Debug, Clone)] +pub struct BoundRegion { + pub var: BoundVar, + pub kind: BoundRegionKind, +} + +pub(crate) type UniverseIndex = u32; + +#[derive(Debug, Clone)] +pub struct Placeholder<T> { + pub universe: UniverseIndex, + pub bound: T, +} + #[derive(Clone, Copy, PartialEq, Eq)] pub struct Span(pub usize); @@ -152,6 +191,9 @@ pub struct ConstDef(pub DefId); #[derive(Clone, PartialEq, Eq, Debug)] pub struct ImplDef(pub DefId); +#[derive(Clone, PartialEq, Eq, Debug)] +pub struct RegionDef(pub DefId); + #[derive(Clone, Debug)] pub struct GenericArgs(pub Vec<GenericArgKind>); diff --git a/compiler/stable_mir/src/visitor.rs b/compiler/stable_mir/src/visitor.rs index 9c3b4cd994a..96100958138 100644 --- a/compiler/stable_mir/src/visitor.rs +++ b/compiler/stable_mir/src/visitor.rs @@ -4,7 +4,7 @@ use crate::Opaque; use super::ty::{ Allocation, Binder, Const, ConstDef, ExistentialPredicate, FnSig, GenericArgKind, GenericArgs, - Promoted, RigidTy, TermKind, Ty, UnevaluatedConst, + Promoted, Region, RigidTy, TermKind, Ty, UnevaluatedConst, }; pub trait Visitor: Sized { @@ -15,6 +15,9 @@ pub trait Visitor: Sized { fn visit_const(&mut self, c: &Const) -> ControlFlow<Self::Break> { c.super_visit(self) } + fn visit_reg(&mut self, reg: &Region) -> ControlFlow<Self::Break> { + reg.super_visit(self) + } } pub trait Visitable { @@ -101,6 +104,16 @@ impl Visitable for GenericArgs { } } +impl Visitable for Region { + fn visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { + visitor.visit_reg(self) + } + + fn super_visit<V: Visitor>(&self, _: &mut V) -> ControlFlow<V::Break> { + ControlFlow::Continue(()) + } +} + impl Visitable for GenericArgKind { fn super_visit<V: Visitor>(&self, visitor: &mut V) -> ControlFlow<V::Break> { match self { @@ -128,7 +141,10 @@ impl Visitable for RigidTy { } RigidTy::Slice(inner) => inner.visit(visitor), RigidTy::RawPtr(ty, _) => ty.visit(visitor), - RigidTy::Ref(_, ty, _) => ty.visit(visitor), + RigidTy::Ref(reg, ty, _) => { + reg.visit(visitor); + ty.visit(visitor) + } RigidTy::FnDef(_, args) => args.visit(visitor), RigidTy::FnPtr(sig) => sig.visit(visitor), RigidTy::Closure(_, args) => args.visit(visitor), |
