diff options
246 files changed, 4583 insertions, 1697 deletions
| diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4559246ce42..81fb39cdc56 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -46,7 +46,7 @@ jobs: # If you want to modify CI jobs, take a look at src/ci/github-actions/jobs.yml. calculate_matrix: name: Calculate job matrix - runs-on: ubuntu-24.04 + runs-on: ubuntu-24.04-arm outputs: jobs: ${{ steps.jobs.outputs.jobs }} run_type: ${{ steps.jobs.outputs.run_type }} diff --git a/compiler/rustc_ast_lowering/src/index.rs b/compiler/rustc_ast_lowering/src/index.rs index 26c0e7e5f82..956cb580d10 100644 --- a/compiler/rustc_ast_lowering/src/index.rs +++ b/compiler/rustc_ast_lowering/src/index.rs @@ -164,7 +164,7 @@ impl<'a, 'hir> Visitor<'hir> for NodeCollector<'a, 'hir> { fn visit_item(&mut self, i: &'hir Item<'hir>) { debug_assert_eq!(i.owner_id, self.owner); self.with_parent(i.hir_id(), |this| { - if let ItemKind::Struct(_, struct_def, _) = &i.kind { + if let ItemKind::Struct(_, _, struct_def) = &i.kind { // If this is a tuple or unit-like struct, register the constructor. if let Some(ctor_hir_id) = struct_def.ctor_hir_id() { this.insert(i.span, ctor_hir_id, Node::Ctor(struct_def)); diff --git a/compiler/rustc_ast_lowering/src/item.rs b/compiler/rustc_ast_lowering/src/item.rs index 7f7d45790ee..d3aacaa15a8 100644 --- a/compiler/rustc_ast_lowering/src/item.rs +++ b/compiler/rustc_ast_lowering/src/item.rs @@ -180,7 +180,7 @@ impl<'hir> LoweringContext<'_, 'hir> { let (ty, body_id) = self.lower_const_item(t, span, e.as_deref(), ImplTraitPosition::StaticTy); self.lower_define_opaque(hir_id, define_opaque); - hir::ItemKind::Static(ident, ty, *m, body_id) + hir::ItemKind::Static(*m, ident, ty, body_id) } ItemKind::Const(box ast::ConstItem { ident, @@ -200,7 +200,7 @@ impl<'hir> LoweringContext<'_, 'hir> { }, ); self.lower_define_opaque(hir_id, &define_opaque); - hir::ItemKind::Const(ident, ty, generics, body_id) + hir::ItemKind::Const(ident, generics, ty, body_id) } ItemKind::Fn(box Fn { sig: FnSig { decl, header, span: fn_sig_span }, @@ -304,7 +304,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ), }, ); - hir::ItemKind::TyAlias(ident, ty, generics) + hir::ItemKind::TyAlias(ident, generics, ty) } ItemKind::Enum(ident, generics, enum_definition) => { let ident = self.lower_ident(*ident); @@ -318,7 +318,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ) }, ); - hir::ItemKind::Enum(ident, hir::EnumDef { variants }, generics) + hir::ItemKind::Enum(ident, generics, hir::EnumDef { variants }) } ItemKind::Struct(ident, generics, struct_def) => { let ident = self.lower_ident(*ident); @@ -328,7 +328,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ImplTraitContext::Disallowed(ImplTraitPosition::Generic), |this| this.lower_variant_data(hir_id, struct_def), ); - hir::ItemKind::Struct(ident, struct_def, generics) + hir::ItemKind::Struct(ident, generics, struct_def) } ItemKind::Union(ident, generics, vdata) => { let ident = self.lower_ident(*ident); @@ -338,7 +338,7 @@ impl<'hir> LoweringContext<'_, 'hir> { ImplTraitContext::Disallowed(ImplTraitPosition::Generic), |this| this.lower_variant_data(hir_id, vdata), ); - hir::ItemKind::Union(ident, vdata, generics) + hir::ItemKind::Union(ident, generics, vdata) } ItemKind::Impl(box Impl { safety, @@ -467,8 +467,8 @@ impl<'hir> LoweringContext<'_, 'hir> { ItemKind::Delegation(box delegation) => { let delegation_results = self.lower_delegation(delegation, id, false); hir::ItemKind::Fn { - ident: delegation_results.ident, sig: delegation_results.sig, + ident: delegation_results.ident, generics: delegation_results.generics, body: delegation_results.body_id, has_body: true, diff --git a/compiler/rustc_borrowck/src/lib.rs b/compiler/rustc_borrowck/src/lib.rs index 676cb618b72..2bd0ffd143b 100644 --- a/compiler/rustc_borrowck/src/lib.rs +++ b/compiler/rustc_borrowck/src/lib.rs @@ -40,9 +40,7 @@ use rustc_middle::ty::{ self, ParamEnv, RegionVid, Ty, TyCtxt, TypeFoldable, TypeVisitable, TypingMode, fold_regions, }; use rustc_middle::{bug, span_bug}; -use rustc_mir_dataflow::impls::{ - EverInitializedPlaces, MaybeInitializedPlaces, MaybeUninitializedPlaces, -}; +use rustc_mir_dataflow::impls::{EverInitializedPlaces, MaybeUninitializedPlaces}; use rustc_mir_dataflow::move_paths::{ InitIndex, InitLocation, LookupResult, MoveData, MovePathIndex, }; @@ -324,10 +322,6 @@ fn do_mir_borrowck<'tcx>( let move_data = MoveData::gather_moves(body, tcx, |_| true); - let flow_inits = MaybeInitializedPlaces::new(tcx, body, &move_data) - .iterate_to_fixpoint(tcx, body, Some("borrowck")) - .into_results_cursor(body); - let locals_are_invalidated_at_exit = tcx.hir_body_owner_kind(def).is_fn_or_closure(); let borrow_set = BorrowSet::build(tcx, body, locals_are_invalidated_at_exit, &move_data); @@ -346,7 +340,6 @@ fn do_mir_borrowck<'tcx>( body, &promoted, &location_table, - flow_inits, &move_data, &borrow_set, consumer_options, diff --git a/compiler/rustc_borrowck/src/nll.rs b/compiler/rustc_borrowck/src/nll.rs index fe899bb054f..8664e99cae3 100644 --- a/compiler/rustc_borrowck/src/nll.rs +++ b/compiler/rustc_borrowck/src/nll.rs @@ -11,8 +11,6 @@ use rustc_middle::mir::pretty::{PrettyPrintMirOptions, dump_mir_with_options}; use rustc_middle::mir::{Body, PassWhere, Promoted, create_dump_file, dump_enabled, dump_mir}; use rustc_middle::ty::print::with_no_trimmed_paths; use rustc_middle::ty::{self, TyCtxt}; -use rustc_mir_dataflow::ResultsCursor; -use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::MoveData; use rustc_mir_dataflow::points::DenseLocationMap; use rustc_session::config::MirIncludeSpans; @@ -75,14 +73,13 @@ pub(crate) fn replace_regions_in_mir<'tcx>( /// Computes the (non-lexical) regions from the input MIR. /// /// This may result in errors being reported. -pub(crate) fn compute_regions<'a, 'tcx>( +pub(crate) fn compute_regions<'tcx>( root_cx: &mut BorrowCheckRootCtxt<'tcx>, infcx: &BorrowckInferCtxt<'tcx>, universal_regions: UniversalRegions<'tcx>, body: &Body<'tcx>, promoted: &IndexSlice<Promoted, Body<'tcx>>, location_table: &PoloniusLocationTable, - flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, borrow_set: &BorrowSet<'tcx>, consumer_options: Option<ConsumerOptions>, @@ -112,7 +109,6 @@ pub(crate) fn compute_regions<'a, 'tcx>( location_table, borrow_set, &mut polonius_facts, - flow_inits, move_data, Rc::clone(&location_map), ); diff --git a/compiler/rustc_borrowck/src/region_infer/mod.rs b/compiler/rustc_borrowck/src/region_infer/mod.rs index b4ff3d66f3d..aa584713593 100644 --- a/compiler/rustc_borrowck/src/region_infer/mod.rs +++ b/compiler/rustc_borrowck/src/region_infer/mod.rs @@ -1,3 +1,4 @@ +use std::cell::OnceCell; use std::collections::VecDeque; use std::rc::Rc; @@ -197,8 +198,8 @@ pub struct RegionInferenceContext<'tcx> { /// Reverse of the SCC constraint graph -- i.e., an edge `A -> B` exists if /// `B: A`. This is used to compute the universal regions that are required - /// to outlive a given SCC. Computed lazily. - rev_scc_graph: Option<ReverseSccGraph>, + /// to outlive a given SCC. + rev_scc_graph: OnceCell<ReverseSccGraph>, /// The "R0 member of [R1..Rn]" constraints, indexed by SCC. member_constraints: Rc<MemberConstraintSet<'tcx, ConstraintSccIndex>>, @@ -502,7 +503,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { constraint_graph, constraint_sccs, scc_annotations, - rev_scc_graph: None, + rev_scc_graph: OnceCell::new(), member_constraints, member_constraints_applied: Vec::new(), universe_causes, @@ -809,9 +810,6 @@ impl<'tcx> RegionInferenceContext<'tcx> { member_constraint_index: NllMemberConstraintIndex, choice_regions: &[ty::RegionVid], ) { - // Lazily compute the reverse graph, we'll need it later. - self.compute_reverse_scc_graph(); - // Create a mutable vector of the options. We'll try to winnow // them down. let mut choice_regions: Vec<ty::RegionVid> = choice_regions.to_vec(); @@ -849,7 +847,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { // R0`). Therefore, we need only keep an option `O` if `UB: O` // for all UB. let universal_region_relations = &self.universal_region_relations; - for ub in self.rev_scc_graph.as_ref().unwrap().upper_bounds(scc) { + for ub in self.reverse_scc_graph().upper_bounds(scc) { debug!(?ub); choice_regions.retain(|&o_r| universal_region_relations.outlives(ub, o_r)); } diff --git a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs index 25cbd579ea1..f0d72085c40 100644 --- a/compiler/rustc_borrowck/src/region_infer/opaque_types.rs +++ b/compiler/rustc_borrowck/src/region_infer/opaque_types.rs @@ -215,9 +215,7 @@ impl<'tcx> RegionInferenceContext<'tcx> { // FIXME: We could probably compute the LUB if there is one. let scc = self.constraint_sccs.scc(vid); let upper_bounds: Vec<_> = self - .rev_scc_graph - .as_ref() - .unwrap() + .reverse_scc_graph() .upper_bounds(scc) .filter_map(|vid| self.definitions[vid].external_name) .filter(|r| !r.is_static()) diff --git a/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs b/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs index 8e04791461b..604265f8940 100644 --- a/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs +++ b/compiler/rustc_borrowck/src/region_infer/reverse_sccs.rs @@ -59,13 +59,10 @@ impl ReverseSccGraph { } impl RegionInferenceContext<'_> { - /// Compute the reverse SCC-based constraint graph (lazily). - pub(super) fn compute_reverse_scc_graph(&mut self) { - if self.rev_scc_graph.is_some() { - return; - } - - self.rev_scc_graph = - Some(ReverseSccGraph::compute(&self.constraint_sccs, self.universal_regions())); + /// Return the reverse graph of the region SCCs, initialising it if needed. + pub(super) fn reverse_scc_graph(&self) -> &ReverseSccGraph { + self.rev_scc_graph.get_or_init(|| { + ReverseSccGraph::compute(&self.constraint_sccs, self.universal_regions()) + }) } } diff --git a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs index b7a21cf48c8..ca1b850f766 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/mod.rs @@ -5,8 +5,6 @@ use rustc_middle::mir::{Body, Local, Location, SourceInfo}; use rustc_middle::span_bug; use rustc_middle::ty::relate::Relate; use rustc_middle::ty::{GenericArgsRef, Region, RegionVid, Ty, TyCtxt, TypeVisitable}; -use rustc_mir_dataflow::ResultsCursor; -use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::MoveData; use rustc_mir_dataflow::points::DenseLocationMap; use tracing::debug; @@ -28,10 +26,9 @@ mod trace; /// /// N.B., this computation requires normalization; therefore, it must be /// performed before -pub(super) fn generate<'a, 'tcx>( +pub(super) fn generate<'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, location_map: &DenseLocationMap, - flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, ) { debug!("liveness::generate"); @@ -58,7 +55,7 @@ pub(super) fn generate<'a, 'tcx>( let (relevant_live_locals, boring_locals) = compute_relevant_live_locals(typeck.tcx(), &free_regions, typeck.body); - trace::trace(typeck, location_map, flow_inits, move_data, relevant_live_locals, boring_locals); + trace::trace(typeck, location_map, move_data, relevant_live_locals, boring_locals); // Mark regions that should be live where they appear within rvalues or within a call: like // args, regions, and types. diff --git a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs index 512288a0f7d..5d30fa71e92 100644 --- a/compiler/rustc_borrowck/src/type_check/liveness/trace.rs +++ b/compiler/rustc_borrowck/src/type_check/liveness/trace.rs @@ -7,10 +7,10 @@ use rustc_middle::mir::{BasicBlock, Body, ConstraintCategory, HasLocalDecls, Loc use rustc_middle::traits::query::DropckOutlivesResult; use rustc_middle::ty::relate::Relate; use rustc_middle::ty::{Ty, TyCtxt, TypeVisitable, TypeVisitableExt}; -use rustc_mir_dataflow::ResultsCursor; use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::{HasMoveData, MoveData, MovePathIndex}; use rustc_mir_dataflow::points::{DenseLocationMap, PointIndex}; +use rustc_mir_dataflow::{Analysis, ResultsCursor}; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span}; use rustc_trait_selection::error_reporting::InferCtxtErrorExt; use rustc_trait_selection::traits::ObligationCtxt; @@ -37,10 +37,9 @@ use crate::type_check::{NormalizeLocation, TypeChecker}; /// DROP-LIVE set are to the liveness sets for regions found in the /// `dropck_outlives` result of the variable's type (in particular, /// this respects `#[may_dangle]` annotations). -pub(super) fn trace<'a, 'tcx>( +pub(super) fn trace<'tcx>( typeck: &mut TypeChecker<'_, 'tcx>, location_map: &DenseLocationMap, - flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, relevant_live_locals: Vec<Local>, boring_locals: Vec<Local>, @@ -48,7 +47,7 @@ pub(super) fn trace<'a, 'tcx>( let local_use_map = &LocalUseMap::build(&relevant_live_locals, location_map, typeck.body); let cx = LivenessContext { typeck, - flow_inits, + flow_inits: None, location_map, local_use_map, move_data, @@ -65,7 +64,7 @@ pub(super) fn trace<'a, 'tcx>( } /// Contextual state for the type-liveness coroutine. -struct LivenessContext<'a, 'typeck, 'b, 'tcx> { +struct LivenessContext<'a, 'typeck, 'tcx> { /// Current type-checker, giving us our inference context etc. /// /// This also stores the body we're currently analyzing. @@ -81,8 +80,8 @@ struct LivenessContext<'a, 'typeck, 'b, 'tcx> { drop_data: FxIndexMap<Ty<'tcx>, DropData<'tcx>>, /// Results of dataflow tracking which variables (and paths) have been - /// initialized. - flow_inits: ResultsCursor<'b, 'tcx, MaybeInitializedPlaces<'b, 'tcx>>, + /// initialized. Computed lazily when needed by drop-liveness. + flow_inits: Option<ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>>, /// Index indicating where each variable is assigned, used, or /// dropped. @@ -94,8 +93,8 @@ struct DropData<'tcx> { region_constraint_data: Option<&'tcx QueryRegionConstraints<'tcx>>, } -struct LivenessResults<'a, 'typeck, 'b, 'tcx> { - cx: LivenessContext<'a, 'typeck, 'b, 'tcx>, +struct LivenessResults<'a, 'typeck, 'tcx> { + cx: LivenessContext<'a, 'typeck, 'tcx>, /// Set of points that define the current local. defs: DenseBitSet<PointIndex>, @@ -116,8 +115,8 @@ struct LivenessResults<'a, 'typeck, 'b, 'tcx> { stack: Vec<PointIndex>, } -impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { - fn new(cx: LivenessContext<'a, 'typeck, 'b, 'tcx>) -> Self { +impl<'a, 'typeck, 'tcx> LivenessResults<'a, 'typeck, 'tcx> { + fn new(cx: LivenessContext<'a, 'typeck, 'tcx>) -> Self { let num_points = cx.location_map.num_points(); LivenessResults { cx, @@ -459,20 +458,56 @@ impl<'a, 'typeck, 'b, 'tcx> LivenessResults<'a, 'typeck, 'b, 'tcx> { } } -impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { +impl<'a, 'typeck, 'tcx> LivenessContext<'a, 'typeck, 'tcx> { + /// Computes the `MaybeInitializedPlaces` dataflow analysis if it hasn't been done already. + /// + /// In practice, the results of this dataflow analysis are rarely needed but can be expensive to + /// compute on big functions, so we compute them lazily as a fast path when: + /// - there are relevant live locals + /// - there are drop points for these relevant live locals. + /// + /// This happens as part of the drop-liveness computation: it's the only place checking for + /// maybe-initializedness of `MovePathIndex`es. + fn flow_inits(&mut self) -> &mut ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>> { + self.flow_inits.get_or_insert_with(|| { + let tcx = self.typeck.tcx(); + let body = self.typeck.body; + // FIXME: reduce the `MaybeInitializedPlaces` domain to the useful `MovePath`s. + // + // This dataflow analysis computes maybe-initializedness of all move paths, which + // explains why it can be expensive on big functions. But this data is only used in + // drop-liveness. Therefore, most of the move paths computed here are ultimately unused, + // even if the results are computed lazily and "no relevant live locals with drop + // points" is the common case. + // + // So we only need the ones for 1) relevant live locals 2) that have drop points. That's + // a much, much smaller domain: in our benchmarks, when it's not zero (the most likely + // case), there are a few dozens compared to e.g. thousands or tens of thousands of + // locals and move paths. + let flow_inits = MaybeInitializedPlaces::new(tcx, body, self.move_data) + .iterate_to_fixpoint(tcx, body, Some("borrowck")) + .into_results_cursor(body); + flow_inits + }) + } +} + +impl<'tcx> LivenessContext<'_, '_, 'tcx> { fn body(&self) -> &Body<'tcx> { self.typeck.body } + /// Returns `true` if the local variable (or some part of it) is initialized at the current /// cursor position. Callers should call one of the `seek` methods immediately before to point /// the cursor to the desired location. - fn initialized_at_curr_loc(&self, mpi: MovePathIndex) -> bool { - let state = self.flow_inits.get(); + fn initialized_at_curr_loc(&mut self, mpi: MovePathIndex) -> bool { + let flow_inits = self.flow_inits(); + let state = flow_inits.get(); if state.contains(mpi) { return true; } - let move_paths = &self.flow_inits.analysis().move_data().move_paths; + let move_paths = &flow_inits.analysis().move_data().move_paths; move_paths[mpi].find_descendant(move_paths, |mpi| state.contains(mpi)).is_some() } @@ -481,7 +516,8 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { /// DROP of some local variable will have an effect -- note that /// drops, as they may unwind, are always terminators. fn initialized_at_terminator(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool { - self.flow_inits.seek_before_primary_effect(self.body().terminator_loc(block)); + let terminator_location = self.body().terminator_loc(block); + self.flow_inits().seek_before_primary_effect(terminator_location); self.initialized_at_curr_loc(mpi) } @@ -491,7 +527,8 @@ impl<'tcx> LivenessContext<'_, '_, '_, 'tcx> { /// **Warning:** Does not account for the result of `Call` /// instructions. fn initialized_at_exit(&mut self, block: BasicBlock, mpi: MovePathIndex) -> bool { - self.flow_inits.seek_after_primary_effect(self.body().terminator_loc(block)); + let terminator_location = self.body().terminator_loc(block); + self.flow_inits().seek_after_primary_effect(terminator_location); self.initialized_at_curr_loc(mpi) } diff --git a/compiler/rustc_borrowck/src/type_check/mod.rs b/compiler/rustc_borrowck/src/type_check/mod.rs index 3c95ebf9ffa..4f5baeff7c3 100644 --- a/compiler/rustc_borrowck/src/type_check/mod.rs +++ b/compiler/rustc_borrowck/src/type_check/mod.rs @@ -30,8 +30,6 @@ use rustc_middle::ty::{ TypeVisitableExt, UserArgs, UserTypeAnnotationIndex, fold_regions, }; use rustc_middle::{bug, span_bug}; -use rustc_mir_dataflow::ResultsCursor; -use rustc_mir_dataflow::impls::MaybeInitializedPlaces; use rustc_mir_dataflow::move_paths::MoveData; use rustc_mir_dataflow::points::DenseLocationMap; use rustc_span::def_id::CRATE_DEF_ID; @@ -97,10 +95,9 @@ mod relate_tys; /// - `location_table` -- for datalog polonius, the map between `Location`s and `RichLocation`s /// - `borrow_set` -- information about borrows occurring in `body` /// - `polonius_facts` -- when using Polonius, this is the generated set of Polonius facts -/// - `flow_inits` -- results of a maybe-init dataflow analysis /// - `move_data` -- move-data constructed when performing the maybe-init dataflow analysis /// - `location_map` -- map between MIR `Location` and `PointIndex` -pub(crate) fn type_check<'a, 'tcx>( +pub(crate) fn type_check<'tcx>( root_cx: &mut BorrowCheckRootCtxt<'tcx>, infcx: &BorrowckInferCtxt<'tcx>, body: &Body<'tcx>, @@ -109,7 +106,6 @@ pub(crate) fn type_check<'a, 'tcx>( location_table: &PoloniusLocationTable, borrow_set: &BorrowSet<'tcx>, polonius_facts: &mut Option<PoloniusFacts>, - flow_inits: ResultsCursor<'a, 'tcx, MaybeInitializedPlaces<'a, 'tcx>>, move_data: &MoveData<'tcx>, location_map: Rc<DenseLocationMap>, ) -> MirTypeckResults<'tcx> { @@ -167,7 +163,7 @@ pub(crate) fn type_check<'a, 'tcx>( typeck.equate_inputs_and_outputs(&normalized_inputs_and_output); typeck.check_signature_annotation(); - liveness::generate(&mut typeck, &location_map, flow_inits, move_data); + liveness::generate(&mut typeck, &location_map, move_data); let opaque_type_values = opaque_types::take_opaques_and_register_member_constraints(&mut typeck); diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs index ff1ae2d9d79..73be25ba92b 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/mod.rs @@ -23,7 +23,7 @@ use rustc_codegen_ssa::traits::{ use rustc_middle::bug; #[cfg(feature = "master")] use rustc_middle::ty::layout::FnAbiOf; -use rustc_middle::ty::layout::{HasTypingEnv, LayoutOf}; +use rustc_middle::ty::layout::LayoutOf; use rustc_middle::ty::{self, Instance, Ty}; use rustc_span::{Span, Symbol, sym}; use rustc_target::callconv::{ArgAbi, PassMode}; @@ -205,21 +205,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc span: Span, ) -> Result<(), Instance<'tcx>> { let tcx = self.tcx; - let callee_ty = instance.ty(tcx, self.typing_env()); - let (def_id, fn_args) = match *callee_ty.kind() { - ty::FnDef(def_id, fn_args) => (def_id, fn_args), - _ => bug!("expected fn item type, found {}", callee_ty), - }; - - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = tcx.item_name(def_id); + let name = tcx.item_name(instance.def_id()); let name_str = name.as_str(); - - let llret_ty = self.layout_of(ret_ty).gcc_type(self); + let fn_args = instance.args; let simple = get_simple_intrinsic(self, name); let simple_func = get_simple_function(self, name); @@ -320,8 +309,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc | sym::rotate_right | sym::saturating_add | sym::saturating_sub => { - let ty = arg_tys[0]; - match int_type_width_signed(ty, self) { + match int_type_width_signed(args[0].layout.ty, self) { Some((width, signed)) => match name { sym::ctlz | sym::cttz => { let func = self.current_func.borrow().expect("func"); @@ -400,7 +388,7 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, name, - ty, + ty: args[0].layout.ty, }); return Ok(()); } @@ -492,7 +480,14 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc } _ if name_str.starts_with("simd_") => { - match generic_simd_intrinsic(self, name, callee_ty, args, ret_ty, llret_ty, span) { + match generic_simd_intrinsic( + self, + name, + args, + result.layout.ty, + result.layout.gcc_type(self), + span, + ) { Ok(value) => value, Err(()) => return Ok(()), } @@ -503,13 +498,10 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc }; if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(self, value, result.layout) - .val - .store(self, result); + let val = self.from_immediate(value); + self.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { - let ptr_llty = self.type_ptr_to(result.layout.gcc_type(self)); - let ptr = self.pointercast(result.val.llval, ptr_llty); - self.store(value, ptr, result.val.align); + self.store_to_place(value, result.val); } Ok(()) } diff --git a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs index b897d079249..82ef0d0b13a 100644 --- a/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs +++ b/compiler/rustc_codegen_gcc/src/intrinsic/simd.rs @@ -28,7 +28,6 @@ use crate::context::CodegenCx; pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( bx: &mut Builder<'a, 'gcc, 'tcx>, name: Symbol, - callee_ty: Ty<'tcx>, args: &[OperandRef<'tcx, RValue<'gcc>>], ret_ty: Ty<'tcx>, llret_ty: Type<'gcc>, @@ -54,24 +53,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( }; } - let tcx = bx.tcx(); - let sig = tcx.normalize_erasing_late_bound_regions( - ty::TypingEnv::fully_monomorphized(), - callee_ty.fn_sig(tcx), - ); - let arg_tys = sig.inputs(); - if name == sym::simd_select_bitmask { require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty } ); - let (len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); let expected_int_bits = (len.max(8) - 1).next_power_of_two(); let expected_bytes = len / 8 + ((len % 8 > 0) as u64); - let mask_ty = arg_tys[0]; + let mask_ty = args[0].layout.ty; let mut mask = match *mask_ty.kind() { ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), @@ -121,8 +113,11 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - require_simd!(arg_tys[0], InvalidMonomorphization::SimdInput { span, name, ty: arg_tys[0] }); - let in_ty = arg_tys[0]; + require_simd!( + args[0].layout.ty, + InvalidMonomorphization::SimdInput { span, name, ty: args[0].layout.ty } + ); + let in_ty = args[0].layout.ty; let comparison = match name { sym::simd_eq => Some(BinOp::Eq), @@ -134,7 +129,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( _ => None, }; - let (in_len, in_elem) = arg_tys[0].simd_size_and_type(bx.tcx()); + let (in_len, in_elem) = args[0].layout.ty.simd_size_and_type(bx.tcx()); if let Some(cmp_op) = comparison { require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); @@ -401,13 +396,13 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( #[cfg(feature = "master")] if name == sym::simd_insert || name == sym::simd_insert_dyn { require!( - in_elem == arg_tys[2], + in_elem == args[2].layout.ty, InvalidMonomorphization::InsertedType { span, name, in_elem, in_ty, - out_ty: arg_tys[2] + out_ty: args[2].layout.ty } ); @@ -439,10 +434,10 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( let m_elem_ty = in_elem; let m_len = in_len; require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdArgument { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdArgument { span, name, ty: args[1].layout.ty } ); - let (v_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (v_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -911,18 +906,18 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty } ); require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + args[2].layout.ty, + InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty } ); require_simd!(ret_ty, InvalidMonomorphization::SimdReturn { span, name, ty: ret_ty }); // Of the same length: - let (out_len, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (out_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (out_len, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (out_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx()); require!( in_len == out_len, InvalidMonomorphization::SecondArgumentLength { @@ -930,7 +925,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len } ); @@ -941,7 +936,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: out_len2 } ); @@ -970,8 +965,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx()); let (pointer_count, underlying_ty) = match *element_ty1.kind() { ty::RawPtr(p_ty, _) if p_ty == in_elem => { (ptr_count(element_ty1), non_ptr(element_ty1)) @@ -983,7 +978,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Not, @@ -998,7 +993,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The element type of the third argument must be an integer type of any width: // TODO: also support unsigned integers. - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx()); match *element_ty2.kind() { ty::Int(_) => (), _ => { @@ -1030,17 +1025,17 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // All types must be simd vector types require_simd!(in_ty, InvalidMonomorphization::SimdFirst { span, name, ty: in_ty }); require_simd!( - arg_tys[1], - InvalidMonomorphization::SimdSecond { span, name, ty: arg_tys[1] } + args[1].layout.ty, + InvalidMonomorphization::SimdSecond { span, name, ty: args[1].layout.ty } ); require_simd!( - arg_tys[2], - InvalidMonomorphization::SimdThird { span, name, ty: arg_tys[2] } + args[2].layout.ty, + InvalidMonomorphization::SimdThird { span, name, ty: args[2].layout.ty } ); // Of the same length: - let (element_len1, _) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (element_len2, _) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (element_len1, _) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (element_len2, _) = args[2].layout.ty.simd_size_and_type(bx.tcx()); require!( in_len == element_len1, InvalidMonomorphization::SecondArgumentLength { @@ -1048,7 +1043,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len: element_len1 } ); @@ -1059,7 +1054,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: element_len2 } ); @@ -1082,9 +1077,9 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument - let (_, element_ty0) = arg_tys[0].simd_size_and_type(bx.tcx()); - let (_, element_ty1) = arg_tys[1].simd_size_and_type(bx.tcx()); - let (_, element_ty2) = arg_tys[2].simd_size_and_type(bx.tcx()); + let (_, element_ty0) = args[0].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty1) = args[1].layout.ty.simd_size_and_type(bx.tcx()); + let (_, element_ty2) = args[2].layout.ty.simd_size_and_type(bx.tcx()); let (pointer_count, underlying_ty) = match *element_ty1.kind() { ty::RawPtr(p_ty, mutbl) if p_ty == in_elem && mutbl == hir::Mutability::Mut => { (ptr_count(element_ty1), non_ptr(element_ty1)) @@ -1096,7 +1091,7 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Mut, @@ -1194,8 +1189,8 @@ pub fn generic_simd_intrinsic<'a, 'gcc, 'tcx>( return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vector_type: arg_tys[0], + expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1, + vector_type: args[0].layout.ty, }); } }; diff --git a/compiler/rustc_codegen_llvm/src/builder.rs b/compiler/rustc_codegen_llvm/src/builder.rs index fcb55a04635..167678c2ff1 100644 --- a/compiler/rustc_codegen_llvm/src/builder.rs +++ b/compiler/rustc_codegen_llvm/src/builder.rs @@ -1452,9 +1452,15 @@ impl<'a, 'll, 'tcx> BuilderMethods<'a, 'tcx> for Builder<'a, 'll, 'tcx> { impl<'ll> StaticBuilderMethods for Builder<'_, 'll, '_> { fn get_static(&mut self, def_id: DefId) -> &'ll Value { // Forward to the `get_static` method of `CodegenCx` - let s = self.cx().get_static(def_id); - // Cast to default address space if globals are in a different addrspace - self.cx().const_pointercast(s, self.type_ptr()) + let global = self.cx().get_static(def_id); + if self.cx().tcx.is_thread_local_static(def_id) { + let pointer = self.call_intrinsic("llvm.threadlocal.address", &[global]); + // Cast to default address space if globals are in a different addrspace + self.pointercast(pointer, self.type_ptr()) + } else { + // Cast to default address space if globals are in a different addrspace + self.cx().const_pointercast(global, self.type_ptr()) + } } } diff --git a/compiler/rustc_codegen_llvm/src/context.rs b/compiler/rustc_codegen_llvm/src/context.rs index 8cc2cb9c333..8d6e1d8941b 100644 --- a/compiler/rustc_codegen_llvm/src/context.rs +++ b/compiler/rustc_codegen_llvm/src/context.rs @@ -1243,6 +1243,7 @@ impl<'ll> CodegenCx<'ll, '_> { } ifn!("llvm.ptrmask", fn(ptr, t_isize) -> ptr); + ifn!("llvm.threadlocal.address", fn(ptr) -> ptr); None } diff --git a/compiler/rustc_codegen_llvm/src/intrinsic.rs b/compiler/rustc_codegen_llvm/src/intrinsic.rs index e8629aeebb9..989752eb78e 100644 --- a/compiler/rustc_codegen_llvm/src/intrinsic.rs +++ b/compiler/rustc_codegen_llvm/src/intrinsic.rs @@ -169,19 +169,9 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { span: Span, ) -> Result<(), ty::Instance<'tcx>> { let tcx = self.tcx; - let callee_ty = instance.ty(tcx, self.typing_env()); - let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { - bug!("expected fn item type, found {}", callee_ty); - }; - - let sig = callee_ty.fn_sig(tcx); - let sig = tcx.normalize_erasing_late_bound_regions(self.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = tcx.item_name(def_id); - - let llret_ty = self.layout_of(ret_ty).llvm_type(self); + let name = tcx.item_name(instance.def_id()); + let fn_args = instance.args; let simple = get_simple_intrinsic(self, name); let llval = match name { @@ -265,22 +255,22 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { BackendRepr::Scalar(scalar) => { match scalar.primitive() { Primitive::Int(..) => { - if self.cx().size_of(ret_ty).bytes() < 4 { + if self.cx().size_of(result.layout.ty).bytes() < 4 { // `va_arg` should not be called on an integer type // less than 4 bytes in length. If it is, promote // the integer to an `i32` and truncate the result // back to the smaller type. let promoted_result = emit_va_arg(self, args[0], tcx.types.i32); - self.trunc(promoted_result, llret_ty) + self.trunc(promoted_result, result.layout.llvm_type(self)) } else { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } } Primitive::Float(Float::F16) => { bug!("the va_arg intrinsic does not work with `f16`") } Primitive::Float(Float::F64) | Primitive::Pointer(_) => { - emit_va_arg(self, args[0], ret_ty) + emit_va_arg(self, args[0], result.layout.ty) } // `va_arg` should never be used with the return type f32. Primitive::Float(Float::F32) => { @@ -384,7 +374,7 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { | sym::rotate_right | sym::saturating_add | sym::saturating_sub => { - let ty = arg_tys[0]; + let ty = args[0].layout.ty; if !ty.is_integral() { tcx.dcx().emit_err(InvalidMonomorphization::BasicIntegerType { span, @@ -403,26 +393,26 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { &[args[0].immediate(), y], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctlz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.ctlz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::cttz_nonzero => { let y = self.const_bool(true); let llvm_name = &format!("llvm.cttz.i{width}"); let ret = self.call_intrinsic(llvm_name, &[args[0].immediate(), y]); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::ctpop => { let ret = self.call_intrinsic( &format!("llvm.ctpop.i{width}"), &[args[0].immediate()], ); - self.intcast(ret, llret_ty, false) + self.intcast(ret, result.layout.llvm_type(self), false) } sym::bswap => { if width == 8 { @@ -554,16 +544,16 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { // Unpack non-power-of-2 #[repr(packed, simd)] arguments. // This gives them the expected layout of a regular #[repr(simd)] vector. let mut loaded_args = Vec::new(); - for (ty, arg) in arg_tys.iter().zip(args) { + for arg in args { loaded_args.push( // #[repr(packed, simd)] vectors are passed like arrays (as references, // with reduced alignment and no padding) rather than as immediates. // We can use a vector load to fix the layout and turn the argument // into an immediate. - if ty.is_simd() + if arg.layout.ty.is_simd() && let OperandValue::Ref(place) = arg.val { - let (size, elem_ty) = ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = arg.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -580,10 +570,10 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { ); } - let llret_ty = if ret_ty.is_simd() - && let BackendRepr::Memory { .. } = self.layout_of(ret_ty).layout.backend_repr + let llret_ty = if result.layout.ty.is_simd() + && let BackendRepr::Memory { .. } = result.layout.backend_repr { - let (size, elem_ty) = ret_ty.simd_size_and_type(self.tcx()); + let (size, elem_ty) = result.layout.ty.simd_size_and_type(self.tcx()); let elem_ll_ty = match elem_ty.kind() { ty::Float(f) => self.type_float_from_ty(*f), ty::Int(i) => self.type_int_from_ty(*i), @@ -593,16 +583,15 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; self.type_vector(elem_ll_ty, size) } else { - llret_ty + result.layout.llvm_type(self) }; match generic_simd_intrinsic( self, name, - callee_ty, fn_args, &loaded_args, - ret_ty, + result.layout.ty, llret_ty, span, ) { @@ -621,9 +610,8 @@ impl<'ll, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'_, 'll, 'tcx> { }; if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(self, llval, result.layout) - .val - .store(self, result); + let val = self.from_immediate(llval); + self.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { self.store_to_place(llval, result.val); } @@ -1151,7 +1139,6 @@ fn get_rust_try_fn<'a, 'll, 'tcx>( fn generic_simd_intrinsic<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, name: Symbol, - callee_ty: Ty<'tcx>, fn_args: GenericArgsRef<'tcx>, args: &[OperandRef<'tcx, &'ll Value>], ret_ty: Ty<'tcx>, @@ -1222,26 +1209,22 @@ fn generic_simd_intrinsic<'ll, 'tcx>( bx.trunc(i_xn_msb, bx.type_vector(bx.type_i1(), in_len)) } - let tcx = bx.tcx(); - let sig = tcx.normalize_erasing_late_bound_regions(bx.typing_env(), callee_ty.fn_sig(tcx)); - let arg_tys = sig.inputs(); - // Sanity-check: all vector arguments must be immediates. if cfg!(debug_assertions) { - for (ty, arg) in arg_tys.iter().zip(args) { - if ty.is_simd() { + for arg in args { + if arg.layout.ty.is_simd() { assert_matches!(arg.val, OperandValue::Immediate(_)); } } } if name == sym::simd_select_bitmask { - let (len, _) = require_simd!(arg_tys[1], SimdArgument); + let (len, _) = require_simd!(args[1].layout.ty, SimdArgument); let expected_int_bits = len.max(8).next_power_of_two(); let expected_bytes = len.div_ceil(8); - let mask_ty = arg_tys[0]; + let mask_ty = args[0].layout.ty; let mask = match mask_ty.kind() { ty::Int(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), ty::Uint(i) if i.bit_width() == Some(expected_int_bits) => args[0].immediate(), @@ -1275,8 +1258,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( } // every intrinsic below takes a SIMD vector as its first argument - let (in_len, in_elem) = require_simd!(arg_tys[0], SimdInput); - let in_ty = arg_tys[0]; + let (in_len, in_elem) = require_simd!(args[0].layout.ty, SimdInput); + let in_ty = args[0].layout.ty; let comparison = match name { sym::simd_eq => Some(BinOp::Eq), @@ -1407,13 +1390,13 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_insert || name == sym::simd_insert_dyn { require!( - in_elem == arg_tys[2], + in_elem == args[2].layout.ty, InvalidMonomorphization::InsertedType { span, name, in_elem, in_ty, - out_ty: arg_tys[2] + out_ty: args[2].layout.ty } ); @@ -1464,7 +1447,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( if name == sym::simd_select { let m_elem_ty = in_elem; let m_len = in_len; - let (v_len, _) = require_simd!(arg_tys[1], SimdArgument); + let (v_len, _) = require_simd!(args[1].layout.ty, SimdArgument); require!( m_len == v_len, InvalidMonomorphization::MismatchedLengths { span, name, m_len, v_len } @@ -1665,9 +1648,9 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (out_len, element_ty1) = require_simd!(arg_tys[1], SimdSecond); + let (out_len, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); // The element type of the third argument must be a signed integer type of any width: - let (out_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (out_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); require_simd!(ret_ty, SimdReturn); // Of the same length: @@ -1678,7 +1661,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len } ); @@ -1689,7 +1672,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: out_len2 } ); @@ -1709,7 +1692,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Not, @@ -1770,10 +1753,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument is a passthrough vector providing values for disabled lanes - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); require_simd!(ret_ty, SimdReturn); @@ -1861,10 +1844,10 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let (mask_len, mask_elem) = (in_len, in_elem); // The second argument must be a pointer matching the element type - let pointer_ty = arg_tys[1]; + let pointer_ty = args[1].layout.ty; // The last argument specifies the values to store to memory - let values_ty = arg_tys[2]; + let values_ty = args[2].layout.ty; let (values_len, values_elem) = require_simd!(values_ty, SimdThird); // Of the same length: @@ -1944,8 +1927,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( // The second argument must be a simd vector with an element type that's a pointer // to the element type of the first argument let (_, element_ty0) = require_simd!(in_ty, SimdFirst); - let (element_len1, element_ty1) = require_simd!(arg_tys[1], SimdSecond); - let (element_len2, element_ty2) = require_simd!(arg_tys[2], SimdThird); + let (element_len1, element_ty1) = require_simd!(args[1].layout.ty, SimdSecond); + let (element_len2, element_ty2) = require_simd!(args[2].layout.ty, SimdThird); // Of the same length: require!( @@ -1955,7 +1938,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[1], + arg_ty: args[1].layout.ty, out_len: element_len1 } ); @@ -1966,7 +1949,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( name, in_len, in_ty, - arg_ty: arg_tys[2], + arg_ty: args[2].layout.ty, out_len: element_len2 } ); @@ -1981,7 +1964,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( span, name, expected_element: element_ty1, - second_arg: arg_tys[1], + second_arg: args[1].layout.ty, in_elem, in_ty, mutability: ExpectedPointerMutability::Mut, @@ -2503,7 +2486,7 @@ fn generic_simd_intrinsic<'ll, 'tcx>( let ptrs = args[0].immediate(); // The second argument must be a ptr-sized integer. // (We don't care about the signedness, this is wrapping anyway.) - let (_offsets_len, offsets_elem) = arg_tys[1].simd_size_and_type(bx.tcx()); + let (_offsets_len, offsets_elem) = args[1].layout.ty.simd_size_and_type(bx.tcx()); if !matches!(offsets_elem.kind(), ty::Int(ty::IntTy::Isize) | ty::Uint(ty::UintTy::Usize)) { span_bug!( span, @@ -2527,8 +2510,8 @@ fn generic_simd_intrinsic<'ll, 'tcx>( return_error!(InvalidMonomorphization::ExpectedVectorElementType { span, name, - expected_element: arg_tys[0].simd_size_and_type(bx.tcx()).1, - vector_type: arg_tys[0] + expected_element: args[0].layout.ty.simd_size_and_type(bx.tcx()).1, + vector_type: args[0].layout.ty }); } }; diff --git a/compiler/rustc_codegen_llvm/src/va_arg.rs b/compiler/rustc_codegen_llvm/src/va_arg.rs index 8eedb5392b5..236568590be 100644 --- a/compiler/rustc_codegen_llvm/src/va_arg.rs +++ b/compiler/rustc_codegen_llvm/src/va_arg.rs @@ -40,6 +40,7 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( align: Align, slot_size: Align, allow_higher_align: bool, + force_right_adjust: bool, ) -> (&'ll Value, Align) { let va_list_ty = bx.type_ptr(); let va_list_addr = list.immediate(); @@ -57,7 +58,10 @@ fn emit_direct_ptr_va_arg<'ll, 'tcx>( let next = bx.inbounds_ptradd(addr, full_direct_size); bx.store(next, va_list_addr, bx.tcx().data_layout.pointer_align.abi); - if size.bytes() < slot_size.bytes() && bx.tcx().sess.target.endian == Endian::Big { + if size.bytes() < slot_size.bytes() + && bx.tcx().sess.target.endian == Endian::Big + && force_right_adjust + { let adjusted_size = bx.cx().const_i32((slot_size.bytes() - size.bytes()) as i32); let adjusted = bx.inbounds_ptradd(addr, adjusted_size); (adjusted, addr_align) @@ -81,6 +85,11 @@ enum AllowHigherAlign { Yes, } +enum ForceRightAdjust { + No, + Yes, +} + fn emit_ptr_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, @@ -88,9 +97,11 @@ fn emit_ptr_va_arg<'ll, 'tcx>( pass_mode: PassMode, slot_size: SlotSize, allow_higher_align: AllowHigherAlign, + force_right_adjust: ForceRightAdjust, ) -> &'ll Value { let indirect = matches!(pass_mode, PassMode::Indirect); let allow_higher_align = matches!(allow_higher_align, AllowHigherAlign::Yes); + let force_right_adjust = matches!(force_right_adjust, ForceRightAdjust::Yes); let slot_size = Align::from_bytes(slot_size as u64).unwrap(); let layout = bx.cx.layout_of(target_ty); @@ -103,8 +114,15 @@ fn emit_ptr_va_arg<'ll, 'tcx>( } else { (layout.llvm_type(bx.cx), layout.size, layout.align) }; - let (addr, addr_align) = - emit_direct_ptr_va_arg(bx, list, size, align.abi, slot_size, allow_higher_align); + let (addr, addr_align) = emit_direct_ptr_va_arg( + bx, + list, + size, + align.abi, + slot_size, + allow_higher_align, + force_right_adjust, + ); if indirect { let tmp_ret = bx.load(llty, addr, addr_align); bx.load(bx.cx.layout_of(target_ty).llvm_type(bx.cx), tmp_ret, align.abi) @@ -208,6 +226,7 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes8, AllowHigherAlign::Yes, + ForceRightAdjust::No, ); bx.br(end); @@ -218,6 +237,150 @@ fn emit_aapcs_va_arg<'ll, 'tcx>( val } +fn emit_powerpc_va_arg<'ll, 'tcx>( + bx: &mut Builder<'_, 'll, 'tcx>, + list: OperandRef<'tcx, &'ll Value>, + target_ty: Ty<'tcx>, +) -> &'ll Value { + let dl = bx.cx.data_layout(); + + // struct __va_list_tag { + // unsigned char gpr; + // unsigned char fpr; + // unsigned short reserved; + // void *overflow_arg_area; + // void *reg_save_area; + // }; + let va_list_addr = list.immediate(); + + // Peel off any newtype wrappers. + let layout = { + let mut layout = bx.cx.layout_of(target_ty); + + while let Some((_, inner)) = layout.non_1zst_field(bx.cx) { + layout = inner; + } + + layout + }; + + // Rust does not currently support any powerpc softfloat targets. + let target = &bx.cx.tcx.sess.target; + let is_soft_float_abi = target.abi == "softfloat"; + assert!(!is_soft_float_abi); + + // All instances of VaArgSafe are passed directly. + let is_indirect = false; + + let (is_i64, is_int, is_f64) = match layout.layout.backend_repr() { + BackendRepr::Scalar(scalar) => match scalar.primitive() { + rustc_abi::Primitive::Int(integer, _) => (integer.size().bits() == 64, true, false), + rustc_abi::Primitive::Float(float) => (false, false, float.size().bits() == 64), + rustc_abi::Primitive::Pointer(_) => (false, true, false), + }, + _ => unreachable!("all instances of VaArgSafe are represented as scalars"), + }; + + let num_regs_addr = if is_int || is_soft_float_abi { + va_list_addr // gpr + } else { + bx.inbounds_ptradd(va_list_addr, bx.const_usize(1)) // fpr + }; + + let mut num_regs = bx.load(bx.type_i8(), num_regs_addr, dl.i8_align.abi); + + // "Align" the register count when the type is passed as `i64`. + if is_i64 || (is_f64 && is_soft_float_abi) { + num_regs = bx.add(num_regs, bx.const_u8(1)); + num_regs = bx.and(num_regs, bx.const_u8(0b1111_1110)); + } + + let max_regs = 8u8; + let use_regs = bx.icmp(IntPredicate::IntULT, num_regs, bx.const_u8(max_regs)); + + let in_reg = bx.append_sibling_block("va_arg.in_reg"); + let in_mem = bx.append_sibling_block("va_arg.in_mem"); + let end = bx.append_sibling_block("va_arg.end"); + + bx.cond_br(use_regs, in_reg, in_mem); + + let reg_addr = { + bx.switch_to_block(in_reg); + + let reg_safe_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2 + 4)); + let mut reg_addr = bx.load(bx.type_ptr(), reg_safe_area_ptr, dl.pointer_align.abi); + + // Floating-point registers start after the general-purpose registers. + if !is_int && !is_soft_float_abi { + reg_addr = bx.inbounds_ptradd(reg_addr, bx.cx.const_usize(32)) + } + + // Get the address of the saved value by scaling the number of + // registers we've used by the number of. + let reg_size = if is_int || is_soft_float_abi { 4 } else { 8 }; + let reg_offset = bx.mul(num_regs, bx.cx().const_u8(reg_size)); + let reg_addr = bx.inbounds_ptradd(reg_addr, reg_offset); + + // Increase the used-register count. + let reg_incr = if is_i64 || (is_f64 && is_soft_float_abi) { 2 } else { 1 }; + let new_num_regs = bx.add(num_regs, bx.cx.const_u8(reg_incr)); + bx.store(new_num_regs, num_regs_addr, dl.i8_align.abi); + + bx.br(end); + + reg_addr + }; + + let mem_addr = { + bx.switch_to_block(in_mem); + + bx.store(bx.const_u8(max_regs), num_regs_addr, dl.i8_align.abi); + + // Everything in the overflow area is rounded up to a size of at least 4. + let overflow_area_align = Align::from_bytes(4).unwrap(); + + let size = if !is_indirect { + layout.layout.size.align_to(overflow_area_align) + } else { + dl.pointer_size + }; + + let overflow_area_ptr = bx.inbounds_ptradd(va_list_addr, bx.cx.const_usize(1 + 1 + 2)); + let mut overflow_area = bx.load(bx.type_ptr(), overflow_area_ptr, dl.pointer_align.abi); + + // Round up address of argument to alignment + if layout.layout.align.abi > overflow_area_align { + overflow_area = round_pointer_up_to_alignment( + bx, + overflow_area, + layout.layout.align.abi, + bx.type_ptr(), + ); + } + + let mem_addr = overflow_area; + + // Increase the overflow area. + overflow_area = bx.inbounds_ptradd(overflow_area, bx.const_usize(size.bytes())); + bx.store(overflow_area, overflow_area_ptr, dl.pointer_align.abi); + + bx.br(end); + + mem_addr + }; + + // Return the appropriate result. + bx.switch_to_block(end); + let val_addr = bx.phi(bx.type_ptr(), &[reg_addr, mem_addr], &[in_reg, in_mem]); + let val_type = layout.llvm_type(bx); + let val_addr = if is_indirect { + bx.load(bx.cx.type_ptr(), val_addr, dl.pointer_align.abi) + } else { + val_addr + }; + bx.load(val_type, val_addr, layout.align.abi) +} + fn emit_s390x_va_arg<'ll, 'tcx>( bx: &mut Builder<'_, 'll, 'tcx>, list: OperandRef<'tcx, &'ll Value>, @@ -728,6 +891,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes4, if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ForceRightAdjust::No, ), "aarch64" | "arm64ec" if target.is_like_windows || target.is_like_darwin => { emit_ptr_va_arg( @@ -737,10 +901,24 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( PassMode::Direct, SlotSize::Bytes8, if target.is_like_windows { AllowHigherAlign::No } else { AllowHigherAlign::Yes }, + ForceRightAdjust::No, ) } "aarch64" => emit_aapcs_va_arg(bx, addr, target_ty), "s390x" => emit_s390x_va_arg(bx, addr, target_ty), + "powerpc" => emit_powerpc_va_arg(bx, addr, target_ty), + "powerpc64" | "powerpc64le" => emit_ptr_va_arg( + bx, + addr, + target_ty, + PassMode::Direct, + SlotSize::Bytes8, + AllowHigherAlign::Yes, + match &*target.arch { + "powerpc64" => ForceRightAdjust::Yes, + _ => ForceRightAdjust::No, + }, + ), // Windows x86_64 "x86_64" if target.is_like_windows => { let target_ty_size = bx.cx.size_of(target_ty).bytes(); @@ -755,6 +933,7 @@ pub(super) fn emit_va_arg<'ll, 'tcx>( }, SlotSize::Bytes8, AllowHigherAlign::No, + ForceRightAdjust::No, ) } // This includes `target.is_like_darwin`, which on x86_64 targets is like sysv64. diff --git a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs index a79d67bb6cd..8c6f52084c2 100644 --- a/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs +++ b/compiler/rustc_codegen_ssa/src/mir/intrinsic.rs @@ -1,7 +1,7 @@ use rustc_abi::WrappingRange; +use rustc_middle::bug; use rustc_middle::mir::SourceInfo; use rustc_middle::ty::{self, Ty, TyCtxt}; -use rustc_middle::{bug, span_bug}; use rustc_session::config::OptLevel; use rustc_span::sym; @@ -60,18 +60,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { source_info: SourceInfo, ) -> Result<(), ty::Instance<'tcx>> { let span = source_info.span; - let callee_ty = instance.ty(bx.tcx(), bx.typing_env()); - let ty::FnDef(def_id, fn_args) = *callee_ty.kind() else { - span_bug!(span, "expected fn item type, found {}", callee_ty); - }; - - let sig = callee_ty.fn_sig(bx.tcx()); - let sig = bx.tcx().normalize_erasing_late_bound_regions(bx.typing_env(), sig); - let arg_tys = sig.inputs(); - let ret_ty = sig.output(); - let name = bx.tcx().item_name(def_id); + let name = bx.tcx().item_name(instance.def_id()); let name_str = name.as_str(); + let fn_args = instance.args; // If we're swapping something that's *not* an `OperandValue::Ref`, // then we can do it directly and avoid the alloca. @@ -97,13 +89,10 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } - let llret_ty = bx.backend_type(bx.layout_of(ret_ty)); - let ret_llval = |bx: &mut Bx, llval| { if result.layout.ty.is_bool() { - OperandRef::from_immediate_or_packed_pair(bx, llval, result.layout) - .val - .store(bx, result); + let val = bx.from_immediate(llval); + bx.store_to_place(val, result.val); } else if !result.layout.ty.is_unit() { bx.store_to_place(llval, result.val); } @@ -143,7 +132,11 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { sym::vtable_align => ty::COMMON_VTABLE_ENTRIES_ALIGN, _ => bug!(), }; - let value = meth::VirtualIndex::from_index(idx).get_usize(bx, vtable, callee_ty); + let value = meth::VirtualIndex::from_index(idx).get_usize( + bx, + vtable, + instance.ty(bx.tcx(), bx.typing_env()), + ); match name { // Size is always <= isize::MAX. sym::vtable_size => { @@ -164,7 +157,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { | sym::type_name | sym::variant_count => { let value = bx.tcx().const_eval_instance(bx.typing_env(), instance, span).unwrap(); - OperandRef::from_const(bx, value, ret_ty).immediate_or_packed_pair(bx) + OperandRef::from_const(bx, value, result.layout.ty).immediate_or_packed_pair(bx) } sym::arith_offset => { let ty = fn_args.type_at(0); @@ -248,7 +241,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.or_disjoint(a, b) } sym::exact_div => { - let ty = arg_tys[0]; + let ty = args[0].layout.ty; match int_type_width_signed(ty, bx.tcx()) { Some((_width, signed)) => { if signed { @@ -268,7 +261,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { } } sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => { - match float_type_width(arg_tys[0]) { + match float_type_width(args[0].layout.ty) { Some(_width) => match name { sym::fadd_fast => bx.fadd_fast(args[0].immediate(), args[1].immediate()), sym::fsub_fast => bx.fsub_fast(args[0].immediate(), args[1].immediate()), @@ -281,7 +274,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType { span, name, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } @@ -291,7 +284,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { | sym::fsub_algebraic | sym::fmul_algebraic | sym::fdiv_algebraic - | sym::frem_algebraic => match float_type_width(arg_tys[0]) { + | sym::frem_algebraic => match float_type_width(args[0].layout.ty) { Some(_width) => match name { sym::fadd_algebraic => { bx.fadd_algebraic(args[0].immediate(), args[1].immediate()) @@ -314,31 +307,32 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> { bx.tcx().dcx().emit_err(InvalidMonomorphization::BasicFloatType { span, name, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } }, sym::float_to_int_unchecked => { - if float_type_width(arg_tys[0]).is_none() { + if float_type_width(args[0].layout.ty).is_none() { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, - ty: arg_tys[0], + ty: args[0].layout.ty, }); return Ok(()); } - let Some((_width, signed)) = int_type_width_signed(ret_ty, bx.tcx()) else { + let Some((_width, signed)) = int_type_width_signed(result.layout.ty, bx.tcx()) + else { bx.tcx().dcx().emit_err(InvalidMonomorphization::FloatToIntUnchecked { span, - ty: ret_ty, + ty: result.layout.ty, }); return Ok(()); }; if signed { - bx.fptosi(args[0].immediate(), llret_ty) + bx.fptosi(args[0].immediate(), bx.backend_type(result.layout)) } else { - bx.fptoui(args[0].immediate(), llret_ty) + bx.fptoui(args[0].immediate(), bx.backend_type(result.layout)) } } diff --git a/compiler/rustc_const_eval/src/interpret/intrinsics.rs b/compiler/rustc_const_eval/src/interpret/intrinsics.rs index bf4152d4b8c..64467a90136 100644 --- a/compiler/rustc_const_eval/src/interpret/intrinsics.rs +++ b/compiler/rustc_const_eval/src/interpret/intrinsics.rs @@ -518,6 +518,103 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { sym::fabsf64 => self.float_abs_intrinsic::<Double>(args, dest)?, sym::fabsf128 => self.float_abs_intrinsic::<Quad>(args, dest)?, + sym::floorf16 => self.float_round_intrinsic::<Half>( + args, + dest, + rustc_apfloat::Round::TowardNegative, + )?, + sym::floorf32 => self.float_round_intrinsic::<Single>( + args, + dest, + rustc_apfloat::Round::TowardNegative, + )?, + sym::floorf64 => self.float_round_intrinsic::<Double>( + args, + dest, + rustc_apfloat::Round::TowardNegative, + )?, + sym::floorf128 => self.float_round_intrinsic::<Quad>( + args, + dest, + rustc_apfloat::Round::TowardNegative, + )?, + + sym::ceilf16 => self.float_round_intrinsic::<Half>( + args, + dest, + rustc_apfloat::Round::TowardPositive, + )?, + sym::ceilf32 => self.float_round_intrinsic::<Single>( + args, + dest, + rustc_apfloat::Round::TowardPositive, + )?, + sym::ceilf64 => self.float_round_intrinsic::<Double>( + args, + dest, + rustc_apfloat::Round::TowardPositive, + )?, + sym::ceilf128 => self.float_round_intrinsic::<Quad>( + args, + dest, + rustc_apfloat::Round::TowardPositive, + )?, + + sym::truncf16 => { + self.float_round_intrinsic::<Half>(args, dest, rustc_apfloat::Round::TowardZero)? + } + sym::truncf32 => { + self.float_round_intrinsic::<Single>(args, dest, rustc_apfloat::Round::TowardZero)? + } + sym::truncf64 => { + self.float_round_intrinsic::<Double>(args, dest, rustc_apfloat::Round::TowardZero)? + } + sym::truncf128 => { + self.float_round_intrinsic::<Quad>(args, dest, rustc_apfloat::Round::TowardZero)? + } + + sym::roundf16 => self.float_round_intrinsic::<Half>( + args, + dest, + rustc_apfloat::Round::NearestTiesToAway, + )?, + sym::roundf32 => self.float_round_intrinsic::<Single>( + args, + dest, + rustc_apfloat::Round::NearestTiesToAway, + )?, + sym::roundf64 => self.float_round_intrinsic::<Double>( + args, + dest, + rustc_apfloat::Round::NearestTiesToAway, + )?, + sym::roundf128 => self.float_round_intrinsic::<Quad>( + args, + dest, + rustc_apfloat::Round::NearestTiesToAway, + )?, + + sym::round_ties_even_f16 => self.float_round_intrinsic::<Half>( + args, + dest, + rustc_apfloat::Round::NearestTiesToEven, + )?, + sym::round_ties_even_f32 => self.float_round_intrinsic::<Single>( + args, + dest, + rustc_apfloat::Round::NearestTiesToEven, + )?, + sym::round_ties_even_f64 => self.float_round_intrinsic::<Double>( + args, + dest, + rustc_apfloat::Round::NearestTiesToEven, + )?, + sym::round_ties_even_f128 => self.float_round_intrinsic::<Quad>( + args, + dest, + rustc_apfloat::Round::NearestTiesToEven, + )?, + // Unsupported intrinsic: skip the return_to_block below. _ => return interp_ok(false), } @@ -900,4 +997,20 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> { self.write_scalar(x.abs(), dest)?; interp_ok(()) } + + fn float_round_intrinsic<F>( + &mut self, + args: &[OpTy<'tcx, M::Provenance>], + dest: &PlaceTy<'tcx, M::Provenance>, + mode: rustc_apfloat::Round, + ) -> InterpResult<'tcx, ()> + where + F: rustc_apfloat::Float + rustc_apfloat::FloatConvert<F> + Into<Scalar<M::Provenance>>, + { + let x: F = self.read_scalar(&args[0])?.to_float()?; + let res = x.round_to_integral(mode).value; + let res = self.adjust_nan(res, &[x]); + self.write_scalar(res, dest)?; + interp_ok(()) + } } diff --git a/compiler/rustc_driver_impl/src/lib.rs b/compiler/rustc_driver_impl/src/lib.rs index 056c476d5e1..54a331a4904 100644 --- a/compiler/rustc_driver_impl/src/lib.rs +++ b/compiler/rustc_driver_impl/src/lib.rs @@ -12,7 +12,6 @@ #![feature(decl_macro)] #![feature(panic_backtrace_config)] #![feature(panic_update_hook)] -#![feature(result_flattening)] #![feature(rustdoc_internals)] #![feature(try_blocks)] // tidy-alphabetical-end diff --git a/compiler/rustc_hir/src/hir.rs b/compiler/rustc_hir/src/hir.rs index f63ab303689..b4fcc16c09c 100644 --- a/compiler/rustc_hir/src/hir.rs +++ b/compiler/rustc_hir/src/hir.rs @@ -4106,11 +4106,11 @@ impl<'hir> Item<'hir> { expect_use, (&'hir UsePath<'hir>, UseKind), ItemKind::Use(p, uk), (p, *uk); - expect_static, (Ident, &'hir Ty<'hir>, Mutability, BodyId), - ItemKind::Static(ident, ty, mutbl, body), (*ident, ty, *mutbl, *body); + expect_static, (Mutability, Ident, &'hir Ty<'hir>, BodyId), + ItemKind::Static(mutbl, ident, ty, body), (*mutbl, *ident, ty, *body); - expect_const, (Ident, &'hir Ty<'hir>, &'hir Generics<'hir>, BodyId), - ItemKind::Const(ident, ty, generics, body), (*ident, ty, generics, *body); + expect_const, (Ident, &'hir Generics<'hir>, &'hir Ty<'hir>, BodyId), + ItemKind::Const(ident, generics, ty, body), (*ident, generics, ty, *body); expect_fn, (Ident, &FnSig<'hir>, &'hir Generics<'hir>, BodyId), ItemKind::Fn { ident, sig, generics, body, .. }, (*ident, sig, generics, *body); @@ -4125,17 +4125,17 @@ impl<'hir> Item<'hir> { expect_global_asm, &'hir InlineAsm<'hir>, ItemKind::GlobalAsm { asm, .. }, asm; - expect_ty_alias, (Ident, &'hir Ty<'hir>, &'hir Generics<'hir>), - ItemKind::TyAlias(ident, ty, generics), (*ident, ty, generics); + expect_ty_alias, (Ident, &'hir Generics<'hir>, &'hir Ty<'hir>), + ItemKind::TyAlias(ident, generics, ty), (*ident, generics, ty); - expect_enum, (Ident, &EnumDef<'hir>, &'hir Generics<'hir>), - ItemKind::Enum(ident, def, generics), (*ident, def, generics); + expect_enum, (Ident, &'hir Generics<'hir>, &EnumDef<'hir>), + ItemKind::Enum(ident, generics, def), (*ident, generics, def); - expect_struct, (Ident, &VariantData<'hir>, &'hir Generics<'hir>), - ItemKind::Struct(ident, data, generics), (*ident, data, generics); + expect_struct, (Ident, &'hir Generics<'hir>, &VariantData<'hir>), + ItemKind::Struct(ident, generics, data), (*ident, generics, data); - expect_union, (Ident, &VariantData<'hir>, &'hir Generics<'hir>), - ItemKind::Union(ident, data, generics), (*ident, data, generics); + expect_union, (Ident, &'hir Generics<'hir>, &VariantData<'hir>), + ItemKind::Union(ident, generics, data), (*ident, generics, data); expect_trait, ( @@ -4278,13 +4278,13 @@ pub enum ItemKind<'hir> { Use(&'hir UsePath<'hir>, UseKind), /// A `static` item. - Static(Ident, &'hir Ty<'hir>, Mutability, BodyId), + Static(Mutability, Ident, &'hir Ty<'hir>, BodyId), /// A `const` item. - Const(Ident, &'hir Ty<'hir>, &'hir Generics<'hir>, BodyId), + Const(Ident, &'hir Generics<'hir>, &'hir Ty<'hir>, BodyId), /// A function declaration. Fn { - ident: Ident, sig: FnSig<'hir>, + ident: Ident, generics: &'hir Generics<'hir>, body: BodyId, /// Whether this function actually has a body. @@ -4309,13 +4309,13 @@ pub enum ItemKind<'hir> { fake_body: BodyId, }, /// A type alias, e.g., `type Foo = Bar<u8>`. - TyAlias(Ident, &'hir Ty<'hir>, &'hir Generics<'hir>), + TyAlias(Ident, &'hir Generics<'hir>, &'hir Ty<'hir>), /// An enum definition, e.g., `enum Foo<A, B> { C<A>, D<B> }`. - Enum(Ident, EnumDef<'hir>, &'hir Generics<'hir>), + Enum(Ident, &'hir Generics<'hir>, EnumDef<'hir>), /// A struct definition, e.g., `struct Foo<A> {x: A}`. - Struct(Ident, VariantData<'hir>, &'hir Generics<'hir>), + Struct(Ident, &'hir Generics<'hir>, VariantData<'hir>), /// A union definition, e.g., `union Foo<A, B> {x: A, y: B}`. - Union(Ident, VariantData<'hir>, &'hir Generics<'hir>), + Union(Ident, &'hir Generics<'hir>, VariantData<'hir>), /// A trait definition. Trait(IsAuto, Safety, Ident, &'hir Generics<'hir>, GenericBounds<'hir>, &'hir [TraitItemRef]), /// A trait alias. @@ -4352,7 +4352,7 @@ impl ItemKind<'_> { match *self { ItemKind::ExternCrate(_, ident) | ItemKind::Use(_, UseKind::Single(ident)) - | ItemKind::Static(ident, ..) + | ItemKind::Static(_, ident, ..) | ItemKind::Const(ident, ..) | ItemKind::Fn { ident, .. } | ItemKind::Macro(ident, ..) @@ -4374,11 +4374,11 @@ impl ItemKind<'_> { pub fn generics(&self) -> Option<&Generics<'_>> { Some(match self { ItemKind::Fn { generics, .. } - | ItemKind::TyAlias(_, _, generics) - | ItemKind::Const(_, _, generics, _) - | ItemKind::Enum(_, _, generics) - | ItemKind::Struct(_, _, generics) - | ItemKind::Union(_, _, generics) + | ItemKind::TyAlias(_, generics, _) + | ItemKind::Const(_, generics, _, _) + | ItemKind::Enum(_, generics, _) + | ItemKind::Struct(_, generics, _) + | ItemKind::Union(_, generics, _) | ItemKind::Trait(_, _, _, generics, _, _) | ItemKind::TraitAlias(_, generics, _) | ItemKind::Impl(Impl { generics, .. }) => generics, @@ -4802,9 +4802,9 @@ impl<'hir> Node<'hir> { pub fn ty(self) -> Option<&'hir Ty<'hir>> { match self { Node::Item(it) => match it.kind { - ItemKind::TyAlias(_, ty, _) - | ItemKind::Static(_, ty, _, _) - | ItemKind::Const(_, ty, _, _) => Some(ty), + ItemKind::TyAlias(_, _, ty) + | ItemKind::Static(_, _, ty, _) + | ItemKind::Const(_, _, ty, _) => Some(ty), ItemKind::Impl(impl_item) => Some(&impl_item.self_ty), _ => None, }, @@ -4824,7 +4824,7 @@ impl<'hir> Node<'hir> { pub fn alias_ty(self) -> Option<&'hir Ty<'hir>> { match self { - Node::Item(Item { kind: ItemKind::TyAlias(_, ty, _), .. }) => Some(ty), + Node::Item(Item { kind: ItemKind::TyAlias(_, _, ty), .. }) => Some(ty), _ => None, } } diff --git a/compiler/rustc_hir/src/intravisit.rs b/compiler/rustc_hir/src/intravisit.rs index a60de4b1fc3..1fd44e44b9c 100644 --- a/compiler/rustc_hir/src/intravisit.rs +++ b/compiler/rustc_hir/src/intravisit.rs @@ -545,15 +545,15 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: UseKind::Glob | UseKind::ListStem => {} } } - ItemKind::Static(ident, ref typ, _, body) => { + ItemKind::Static(_, ident, ref typ, body) => { try_visit!(visitor.visit_ident(ident)); try_visit!(visitor.visit_ty_unambig(typ)); try_visit!(visitor.visit_nested_body(body)); } - ItemKind::Const(ident, ref typ, ref generics, body) => { + ItemKind::Const(ident, ref generics, ref typ, body) => { try_visit!(visitor.visit_ident(ident)); - try_visit!(visitor.visit_ty_unambig(typ)); try_visit!(visitor.visit_generics(generics)); + try_visit!(visitor.visit_ty_unambig(typ)); try_visit!(visitor.visit_nested_body(body)); } ItemKind::Fn { ident, sig, generics, body: body_id, .. } => { @@ -583,12 +583,12 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: // typeck results set correctly. try_visit!(visitor.visit_nested_body(fake_body)); } - ItemKind::TyAlias(ident, ref ty, ref generics) => { + ItemKind::TyAlias(ident, ref generics, ref ty) => { try_visit!(visitor.visit_ident(ident)); - try_visit!(visitor.visit_ty_unambig(ty)); try_visit!(visitor.visit_generics(generics)); + try_visit!(visitor.visit_ty_unambig(ty)); } - ItemKind::Enum(ident, ref enum_definition, ref generics) => { + ItemKind::Enum(ident, ref generics, ref enum_definition) => { try_visit!(visitor.visit_ident(ident)); try_visit!(visitor.visit_generics(generics)); try_visit!(visitor.visit_enum_def(enum_definition)); @@ -609,8 +609,8 @@ pub fn walk_item<'v, V: Visitor<'v>>(visitor: &mut V, item: &'v Item<'v>) -> V:: try_visit!(visitor.visit_ty_unambig(self_ty)); walk_list!(visitor, visit_impl_item_ref, *items); } - ItemKind::Struct(ident, ref struct_definition, ref generics) - | ItemKind::Union(ident, ref struct_definition, ref generics) => { + ItemKind::Struct(ident, ref generics, ref struct_definition) + | ItemKind::Union(ident, ref generics, ref struct_definition) => { try_visit!(visitor.visit_ident(ident)); try_visit!(visitor.visit_generics(generics)); try_visit!(visitor.visit_variant_data(struct_definition)); diff --git a/compiler/rustc_hir_analysis/src/check/intrinsic.rs b/compiler/rustc_hir_analysis/src/check/intrinsic.rs index 54bb3ac4113..09610a2f3ec 100644 --- a/compiler/rustc_hir_analysis/src/check/intrinsic.rs +++ b/compiler/rustc_hir_analysis/src/check/intrinsic.rs @@ -265,6 +265,7 @@ pub(crate) fn check_intrinsic_type( vec![Ty::new_imm_ptr(tcx, param(0)), tcx.types.isize], Ty::new_imm_ptr(tcx, param(0)), ), + sym::slice_get_unchecked => (3, 0, vec![param(1), tcx.types.usize], param(0)), sym::ptr_mask => ( 1, 0, diff --git a/compiler/rustc_hir_analysis/src/check/wfcheck.rs b/compiler/rustc_hir_analysis/src/check/wfcheck.rs index f85ff5a6f4b..b764b714fe1 100644 --- a/compiler/rustc_hir_analysis/src/check/wfcheck.rs +++ b/compiler/rustc_hir_analysis/src/check/wfcheck.rs @@ -297,32 +297,30 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) -> Result<() hir::ItemKind::Fn { ident, sig, .. } => { check_item_fn(tcx, def_id, ident, item.span, sig.decl) } - hir::ItemKind::Static(_, ty, ..) => { + hir::ItemKind::Static(_, _, ty, _) => { check_static_item(tcx, def_id, ty.span, UnsizedHandling::Forbid) } - hir::ItemKind::Const(_, ty, ..) => check_const_item(tcx, def_id, ty.span, item.span), - hir::ItemKind::Struct(_, _, hir_generics) => { + hir::ItemKind::Const(_, _, ty, _) => check_const_item(tcx, def_id, ty.span, item.span), + hir::ItemKind::Struct(_, generics, _) => { let res = check_type_defn(tcx, item, false); - check_variances_for_type_defn(tcx, item, hir_generics); + check_variances_for_type_defn(tcx, item, generics); res } - hir::ItemKind::Union(_, _, hir_generics) => { + hir::ItemKind::Union(_, generics, _) => { let res = check_type_defn(tcx, item, true); - check_variances_for_type_defn(tcx, item, hir_generics); + check_variances_for_type_defn(tcx, item, generics); res } - hir::ItemKind::Enum(_, _, hir_generics) => { + hir::ItemKind::Enum(_, generics, _) => { let res = check_type_defn(tcx, item, true); - check_variances_for_type_defn(tcx, item, hir_generics); + check_variances_for_type_defn(tcx, item, generics); res } hir::ItemKind::Trait(..) => check_trait(tcx, item), hir::ItemKind::TraitAlias(..) => check_trait(tcx, item), // `ForeignItem`s are handled separately. hir::ItemKind::ForeignMod { .. } => Ok(()), - hir::ItemKind::TyAlias(_, hir_ty, hir_generics) - if tcx.type_alias_is_lazy(item.owner_id) => - { + hir::ItemKind::TyAlias(_, generics, hir_ty) if tcx.type_alias_is_lazy(item.owner_id) => { let res = enter_wf_checking_ctxt(tcx, item.span, def_id, |wfcx| { let ty = tcx.type_of(def_id).instantiate_identity(); let item_ty = @@ -335,7 +333,7 @@ fn check_item<'tcx>(tcx: TyCtxt<'tcx>, item: &'tcx hir::Item<'tcx>) -> Result<() check_where_clauses(wfcx, item.span, def_id); Ok(()) }); - check_variances_for_type_defn(tcx, item, hir_generics); + check_variances_for_type_defn(tcx, item, generics); res } _ => Ok(()), diff --git a/compiler/rustc_hir_analysis/src/collect.rs b/compiler/rustc_hir_analysis/src/collect.rs index 8a2edd843f2..a649e7d67af 100644 --- a/compiler/rustc_hir_analysis/src/collect.rs +++ b/compiler/rustc_hir_analysis/src/collect.rs @@ -248,13 +248,13 @@ fn reject_placeholder_type_signatures_in_item<'tcx>( item: &'tcx hir::Item<'tcx>, ) { let (generics, suggest) = match &item.kind { - hir::ItemKind::Union(_, _, generics) - | hir::ItemKind::Enum(_, _, generics) + hir::ItemKind::Union(_, generics, _) + | hir::ItemKind::Enum(_, generics, _) | hir::ItemKind::TraitAlias(_, generics, _) | hir::ItemKind::Trait(_, _, _, generics, ..) | hir::ItemKind::Impl(hir::Impl { generics, .. }) - | hir::ItemKind::Struct(_, _, generics) => (generics, true), - hir::ItemKind::TyAlias(_, _, generics) => (generics, false), + | hir::ItemKind::Struct(_, generics, _) => (generics, true), + hir::ItemKind::TyAlias(_, generics, _) => (generics, false), // `static`, `fn` and `const` are handled elsewhere to suggest appropriate type. _ => return, }; @@ -470,9 +470,9 @@ impl<'tcx> HirTyLowerer<'tcx> for ItemCtxt<'tcx> { .tcx .hir_expect_item(self.tcx.hir_get_parent_item(self.hir_id()).def_id); match &item.kind { - hir::ItemKind::Enum(_, _, generics) - | hir::ItemKind::Struct(_, _, generics) - | hir::ItemKind::Union(_, _, generics) => { + hir::ItemKind::Enum(_, generics, _) + | hir::ItemKind::Struct(_, generics, _) + | hir::ItemKind::Union(_, generics, _) => { let lt_name = get_new_lifetime_name(self.tcx, poly_trait_ref, generics); let (lt_sp, sugg) = match generics.params { [] => (generics.span, format!("<{lt_name}>")), @@ -740,7 +740,7 @@ fn lower_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) { tcx.at(it.span).explicit_super_predicates_of(def_id); tcx.ensure_ok().predicates_of(def_id); } - hir::ItemKind::Struct(_, struct_def, _) | hir::ItemKind::Union(_, struct_def, _) => { + hir::ItemKind::Struct(_, _, struct_def) | hir::ItemKind::Union(_, _, struct_def) => { tcx.ensure_ok().generics_of(def_id); tcx.ensure_ok().type_of(def_id); tcx.ensure_ok().predicates_of(def_id); @@ -762,7 +762,7 @@ fn lower_item(tcx: TyCtxt<'_>, item_id: hir::ItemId) { tcx.ensure_ok().predicates_of(def_id); } - hir::ItemKind::Static(_, ty, ..) | hir::ItemKind::Const(_, ty, ..) => { + hir::ItemKind::Static(_, _, ty, _) | hir::ItemKind::Const(_, _, ty, _) => { tcx.ensure_ok().generics_of(def_id); tcx.ensure_ok().type_of(def_id); tcx.ensure_ok().predicates_of(def_id); @@ -1093,7 +1093,7 @@ fn adt_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::AdtDef<'_> { let repr = tcx.repr_options_of_def(def_id); let (kind, variants) = match &item.kind { - ItemKind::Enum(_, def, _) => { + ItemKind::Enum(_, _, def) => { let mut distance_from_explicit = 0; let variants = def .variants @@ -1121,7 +1121,7 @@ fn adt_def(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::AdtDef<'_> { (AdtKind::Enum, variants) } - ItemKind::Struct(ident, def, _) | ItemKind::Union(ident, def, _) => { + ItemKind::Struct(ident, _, def) | ItemKind::Union(ident, _, def) => { let adt_kind = match item.kind { ItemKind::Struct(..) => AdtKind::Struct, _ => AdtKind::Union, diff --git a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs index 709446d09cd..d45f0475e99 100644 --- a/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs +++ b/compiler/rustc_hir_analysis/src/collect/resolve_bound_vars.rs @@ -634,11 +634,11 @@ impl<'a, 'tcx> Visitor<'tcx> for BoundVarContext<'a, 'tcx> { // These sorts of items have no lifetime parameters at all. intravisit::walk_item(self, item); } - hir::ItemKind::TyAlias(_, _, generics) - | hir::ItemKind::Const(_, _, generics, _) - | hir::ItemKind::Enum(_, _, generics) - | hir::ItemKind::Struct(_, _, generics) - | hir::ItemKind::Union(_, _, generics) + hir::ItemKind::TyAlias(_, generics, _) + | hir::ItemKind::Const(_, generics, _, _) + | hir::ItemKind::Enum(_, generics, _) + | hir::ItemKind::Struct(_, generics, _) + | hir::ItemKind::Union(_, generics, _) | hir::ItemKind::Trait(_, _, _, generics, ..) | hir::ItemKind::TraitAlias(_, generics, ..) | hir::ItemKind::Impl(&hir::Impl { generics, .. }) => { diff --git a/compiler/rustc_hir_analysis/src/collect/type_of.rs b/compiler/rustc_hir_analysis/src/collect/type_of.rs index c20b14df770..141d96b57e5 100644 --- a/compiler/rustc_hir_analysis/src/collect/type_of.rs +++ b/compiler/rustc_hir_analysis/src/collect/type_of.rs @@ -206,7 +206,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_ }, Node::Item(item) => match item.kind { - ItemKind::Static(ident, ty, .., body_id) => { + ItemKind::Static(_, ident, ty, body_id) => { if ty.is_suggestable_infer_ty() { infer_placeholder_type( icx.lowerer(), @@ -220,7 +220,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_ icx.lower_ty(ty) } } - ItemKind::Const(ident, ty, _, body_id) => { + ItemKind::Const(ident, _, ty, body_id) => { if ty.is_suggestable_infer_ty() { infer_placeholder_type( icx.lowerer(), @@ -234,7 +234,7 @@ pub(super) fn type_of(tcx: TyCtxt<'_>, def_id: LocalDefId) -> ty::EarlyBinder<'_ icx.lower_ty(ty) } } - ItemKind::TyAlias(_, self_ty, _) => icx.lower_ty(self_ty), + ItemKind::TyAlias(_, _, self_ty) => icx.lower_ty(self_ty), ItemKind::Impl(hir::Impl { self_ty, .. }) => match self_ty.find_self_aliases() { spans if spans.len() > 0 => { let guar = tcx @@ -532,5 +532,5 @@ pub(crate) fn type_alias_is_lazy<'tcx>(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> } } } - HasTait.visit_ty_unambig(tcx.hir_expect_item(def_id).expect_ty_alias().1).is_break() + HasTait.visit_ty_unambig(tcx.hir_expect_item(def_id).expect_ty_alias().2).is_break() } diff --git a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs index 1f4692b19f1..9abae33ffdb 100644 --- a/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs +++ b/compiler/rustc_hir_analysis/src/hir_ty_lowering/lint.rs @@ -141,7 +141,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { let generics = match tcx.hir_node_by_def_id(parent_item) { hir::Node::Item(hir::Item { - kind: hir::ItemKind::Struct(_, variant, generics), + kind: hir::ItemKind::Struct(_, generics, variant), .. }) => { if !variant.fields().iter().any(|field| field.hir_id == parent_hir_id) { @@ -149,7 +149,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { } generics } - hir::Node::Item(hir::Item { kind: hir::ItemKind::Enum(_, def, generics), .. }) => { + hir::Node::Item(hir::Item { kind: hir::ItemKind::Enum(_, generics, def), .. }) => { if !def .variants .iter() @@ -269,7 +269,7 @@ impl<'tcx> dyn HirTyLowerer<'tcx> + '_ { hir::Node::Field(field) => { // Enums can't have unsized fields, fields can only have an unsized tail field. if let hir::Node::Item(hir::Item { - kind: hir::ItemKind::Struct(_, variant, _), .. + kind: hir::ItemKind::Struct(_, _, variant), .. }) = tcx.parent_hir_node(field.hir_id) && variant .fields() diff --git a/compiler/rustc_hir_analysis/src/hir_wf_check.rs b/compiler/rustc_hir_analysis/src/hir_wf_check.rs index 64c1a78bd1c..4633f3951a7 100644 --- a/compiler/rustc_hir_analysis/src/hir_wf_check.rs +++ b/compiler/rustc_hir_analysis/src/hir_wf_check.rs @@ -145,9 +145,9 @@ fn diagnostic_hir_wf_check<'tcx>( ref item => bug!("Unexpected TraitItem {:?}", item), }, hir::Node::Item(item) => match item.kind { - hir::ItemKind::TyAlias(_, ty, _) - | hir::ItemKind::Static(_, ty, _, _) - | hir::ItemKind::Const(_, ty, _, _) => vec![ty], + hir::ItemKind::TyAlias(_, _, ty) + | hir::ItemKind::Static(_, _, ty, _) + | hir::ItemKind::Const(_, _, ty, _) => vec![ty], hir::ItemKind::Impl(impl_) => match &impl_.of_trait { Some(t) => t .path diff --git a/compiler/rustc_hir_pretty/src/lib.rs b/compiler/rustc_hir_pretty/src/lib.rs index 04f9c831b0a..b23b3125c59 100644 --- a/compiler/rustc_hir_pretty/src/lib.rs +++ b/compiler/rustc_hir_pretty/src/lib.rs @@ -477,10 +477,10 @@ impl<'a> State<'a> { hir::ForeignItemKind::Fn(sig, arg_idents, generics) => { let (cb, ib) = self.head(""); self.print_fn( - sig.decl, sig.header, Some(item.ident.name), generics, + sig.decl, arg_idents, None, ); @@ -593,7 +593,7 @@ impl<'a> State<'a> { self.end(ib); self.end(cb); } - hir::ItemKind::Static(ident, ty, m, expr) => { + hir::ItemKind::Static(m, ident, ty, expr) => { let (cb, ib) = self.head("static"); if m.is_mut() { self.word_space("mut"); @@ -609,7 +609,7 @@ impl<'a> State<'a> { self.word(";"); self.end(cb); } - hir::ItemKind::Const(ident, ty, generics, expr) => { + hir::ItemKind::Const(ident, generics, ty, expr) => { let (cb, ib) = self.head("const"); self.print_ident(ident); self.print_generic_params(generics.params); @@ -626,7 +626,7 @@ impl<'a> State<'a> { } hir::ItemKind::Fn { ident, sig, generics, body, .. } => { let (cb, ib) = self.head(""); - self.print_fn(sig.decl, sig.header, Some(ident.name), generics, &[], Some(body)); + self.print_fn(sig.header, Some(ident.name), generics, sig.decl, &[], Some(body)); self.word(" "); self.end(ib); self.end(cb); @@ -660,7 +660,7 @@ impl<'a> State<'a> { self.end(cb); self.end(ib); } - hir::ItemKind::TyAlias(ident, ty, generics) => { + hir::ItemKind::TyAlias(ident, generics, ty) => { let (cb, ib) = self.head("type"); self.print_ident(ident); self.print_generic_params(generics.params); @@ -673,16 +673,16 @@ impl<'a> State<'a> { self.word(";"); self.end(cb); } - hir::ItemKind::Enum(ident, ref enum_definition, params) => { - self.print_enum_def(enum_definition, params, ident.name, item.span); + hir::ItemKind::Enum(ident, generics, ref enum_def) => { + self.print_enum_def(ident.name, generics, enum_def, item.span); } - hir::ItemKind::Struct(ident, ref struct_def, generics) => { + hir::ItemKind::Struct(ident, generics, ref struct_def) => { let (cb, ib) = self.head("struct"); - self.print_struct(struct_def, generics, ident.name, item.span, true, cb, ib); + self.print_struct(ident.name, generics, struct_def, item.span, true, cb, ib); } - hir::ItemKind::Union(ident, ref struct_def, generics) => { + hir::ItemKind::Union(ident, generics, ref struct_def) => { let (cb, ib) = self.head("union"); - self.print_struct(struct_def, generics, ident.name, item.span, true, cb, ib); + self.print_struct(ident.name, generics, struct_def, item.span, true, cb, ib); } hir::ItemKind::Impl(&hir::Impl { constness, @@ -791,9 +791,9 @@ impl<'a> State<'a> { fn print_enum_def( &mut self, - enum_definition: &hir::EnumDef<'_>, - generics: &hir::Generics<'_>, name: Symbol, + generics: &hir::Generics<'_>, + enum_def: &hir::EnumDef<'_>, span: rustc_span::Span, ) { let (cb, ib) = self.head("enum"); @@ -801,7 +801,7 @@ impl<'a> State<'a> { self.print_generic_params(generics.params); self.print_where_clause(generics); self.space(); - self.print_variants(enum_definition.variants, span, cb, ib); + self.print_variants(enum_def.variants, span, cb, ib); } fn print_variants( @@ -834,9 +834,9 @@ impl<'a> State<'a> { fn print_struct( &mut self, - struct_def: &hir::VariantData<'_>, - generics: &hir::Generics<'_>, name: Symbol, + generics: &hir::Generics<'_>, + struct_def: &hir::VariantData<'_>, span: rustc_span::Span, print_finalizer: bool, cb: BoxMarker, @@ -886,7 +886,7 @@ impl<'a> State<'a> { pub fn print_variant(&mut self, v: &hir::Variant<'_>) { let (cb, ib) = self.head(""); let generics = hir::Generics::empty(); - self.print_struct(&v.data, generics, v.ident.name, v.span, false, cb, ib); + self.print_struct(v.ident.name, generics, &v.data, v.span, false, cb, ib); if let Some(ref d) = v.disr_expr { self.space(); self.word_space("="); @@ -902,7 +902,7 @@ impl<'a> State<'a> { arg_idents: &[Option<Ident>], body_id: Option<hir::BodyId>, ) { - self.print_fn(m.decl, m.header, Some(ident.name), generics, arg_idents, body_id); + self.print_fn(m.header, Some(ident.name), generics, m.decl, arg_idents, body_id); } fn print_trait_item(&mut self, ti: &hir::TraitItem<'_>) { @@ -2141,10 +2141,10 @@ impl<'a> State<'a> { fn print_fn( &mut self, - decl: &hir::FnDecl<'_>, header: hir::FnHeader, name: Option<Symbol>, generics: &hir::Generics<'_>, + decl: &hir::FnDecl<'_>, arg_idents: &[Option<Ident>], body_id: Option<hir::BodyId>, ) { @@ -2483,7 +2483,6 @@ impl<'a> State<'a> { self.print_formal_generic_params(generic_params); let generics = hir::Generics::empty(); self.print_fn( - decl, hir::FnHeader { safety: safety.into(), abi, @@ -2492,6 +2491,7 @@ impl<'a> State<'a> { }, name, generics, + decl, arg_idents, None, ); diff --git a/compiler/rustc_hir_typeck/src/cast.rs b/compiler/rustc_hir_typeck/src/cast.rs index c044c4f7c37..e144a6ab599 100644 --- a/compiler/rustc_hir_typeck/src/cast.rs +++ b/compiler/rustc_hir_typeck/src/cast.rs @@ -408,6 +408,16 @@ impl<'a, 'tcx> CastCheck<'tcx> { self.expr_ty, fcx.ty_to_string(self.cast_ty) ); + + if let Ok(snippet) = fcx.tcx.sess.source_map().span_to_snippet(self.expr_span) + && matches!(self.expr.kind, ExprKind::AddrOf(..)) + { + err.note(format!( + "casting reference expression `{}` because `&` binds tighter than `as`", + snippet + )); + } + let mut sugg = None; let mut sugg_mutref = false; if let ty::Ref(reg, cast_ty, mutbl) = *self.cast_ty.kind() { diff --git a/compiler/rustc_hir_typeck/src/demand.rs b/compiler/rustc_hir_typeck/src/demand.rs index 8182851a015..152c88ad92a 100644 --- a/compiler/rustc_hir_typeck/src/demand.rs +++ b/compiler/rustc_hir_typeck/src/demand.rs @@ -722,8 +722,8 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { )) => { if let Some(hir::Node::Item(hir::Item { kind: - hir::ItemKind::Static(ident, ty, ..) - | hir::ItemKind::Const(ident, ty, ..), + hir::ItemKind::Static(_, ident, ty, _) + | hir::ItemKind::Const(ident, _, ty, _), .. })) = self.tcx.hir_get_if_local(*def_id) { diff --git a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs index 43b662ca453..1c3bc338d85 100644 --- a/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs +++ b/compiler/rustc_hir_typeck/src/fn_ctxt/suggestions.rs @@ -2713,6 +2713,31 @@ impl<'a, 'tcx> FnCtxt<'a, 'tcx> { )); } + // Don't try to suggest ref/deref on an `if` expression, because: + // - The `if` could be part of a desugared `if else` statement, + // which would create impossible suggestions such as `if ... { ... } else &if { ... } else { ... }`. + // - In general the suggestions it creates such as `&if ... { ... } else { ... }` are not very helpful. + // We try to generate a suggestion such as `if ... { &... } else { &... }` instead. + if let hir::ExprKind::If(_c, then, els) = expr.kind { + // The `then` of a `Expr::If` always contains a block, and that block may have a final expression that we can borrow + // If the block does not have a final expression, it will return () and we do not make a suggestion to borrow that. + let ExprKind::Block(then, _) = then.kind else { return None }; + let Some(then) = then.expr else { return None }; + let (mut suggs, help, app, verbose, mutref) = + self.suggest_deref_or_ref(then, checked_ty, expected)?; + + // If there is no `else`, the return type of this `if` will be (), so suggesting to change the `then` block is useless + let els_expr = match els?.kind { + ExprKind::Block(block, _) => block.expr?, + _ => els?, + }; + let (else_suggs, ..) = + self.suggest_deref_or_ref(els_expr, checked_ty, expected)?; + suggs.extend(else_suggs); + + return Some((suggs, help, app, verbose, mutref)); + } + if let Some((sugg, msg)) = self.can_use_as_ref(expr) { return Some(( sugg, diff --git a/compiler/rustc_interface/src/passes.rs b/compiler/rustc_interface/src/passes.rs index 75d92ae7a2e..5bc7559d29a 100644 --- a/compiler/rustc_interface/src/passes.rs +++ b/compiler/rustc_interface/src/passes.rs @@ -1012,10 +1012,6 @@ fn run_required_analyses(tcx: TyCtxt<'_>) { { tcx.ensure_ok().mir_drops_elaborated_and_const_checked(def_id); } - }); - }); - sess.time("coroutine_obligations", || { - tcx.par_hir_body_owners(|def_id| { if tcx.is_coroutine(def_id.to_def_id()) { tcx.ensure_ok().mir_coroutine_witnesses(def_id); let _ = tcx.ensure_ok().check_coroutine_obligations( diff --git a/compiler/rustc_lint/src/builtin.rs b/compiler/rustc_lint/src/builtin.rs index 95e31e4af1e..47b80135bae 100644 --- a/compiler/rustc_lint/src/builtin.rs +++ b/compiler/rustc_lint/src/builtin.rs @@ -545,22 +545,22 @@ impl<'tcx> LateLintPass<'tcx> for MissingCopyImplementations { return; } let (def, ty) = match item.kind { - hir::ItemKind::Struct(_, _, ast_generics) => { - if !ast_generics.params.is_empty() { + hir::ItemKind::Struct(_, generics, _) => { + if !generics.params.is_empty() { return; } let def = cx.tcx.adt_def(item.owner_id); (def, Ty::new_adt(cx.tcx, def, ty::List::empty())) } - hir::ItemKind::Union(_, _, ast_generics) => { - if !ast_generics.params.is_empty() { + hir::ItemKind::Union(_, generics, _) => { + if !generics.params.is_empty() { return; } let def = cx.tcx.adt_def(item.owner_id); (def, Ty::new_adt(cx.tcx, def, ty::List::empty())) } - hir::ItemKind::Enum(_, _, ast_generics) => { - if !ast_generics.params.is_empty() { + hir::ItemKind::Enum(_, generics, _) => { + if !generics.params.is_empty() { return; } let def = cx.tcx.adt_def(item.owner_id); @@ -1422,7 +1422,7 @@ impl TypeAliasBounds { impl<'tcx> LateLintPass<'tcx> for TypeAliasBounds { fn check_item(&mut self, cx: &LateContext<'_>, item: &hir::Item<'_>) { - let hir::ItemKind::TyAlias(_, hir_ty, generics) = item.kind else { return }; + let hir::ItemKind::TyAlias(_, generics, hir_ty) = item.kind else { return }; // There must not be a where clause. if generics.predicates.is_empty() { @@ -2125,9 +2125,9 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { use rustc_middle::middle::resolve_bound_vars::ResolvedArg; let def_id = item.owner_id.def_id; - if let hir::ItemKind::Struct(_, _, hir_generics) - | hir::ItemKind::Enum(_, _, hir_generics) - | hir::ItemKind::Union(_, _, hir_generics) = item.kind + if let hir::ItemKind::Struct(_, generics, _) + | hir::ItemKind::Enum(_, generics, _) + | hir::ItemKind::Union(_, generics, _) = item.kind { let inferred_outlives = cx.tcx.inferred_outlives_of(def_id); if inferred_outlives.is_empty() { @@ -2135,7 +2135,7 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { } let ty_generics = cx.tcx.generics_of(def_id); - let num_where_predicates = hir_generics + let num_where_predicates = generics .predicates .iter() .filter(|predicate| predicate.kind.in_where_clause()) @@ -2145,7 +2145,7 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { let mut lint_spans = Vec::new(); let mut where_lint_spans = Vec::new(); let mut dropped_where_predicate_count = 0; - for (i, where_predicate) in hir_generics.predicates.iter().enumerate() { + for (i, where_predicate) in generics.predicates.iter().enumerate() { let (relevant_lifetimes, bounds, predicate_span, in_where_clause) = match where_predicate.kind { hir::WherePredicateKind::RegionPredicate(predicate) => { @@ -2228,7 +2228,7 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { } else if i + 1 < num_where_predicates { // If all the bounds on a predicate were inferable and there are // further predicates, we want to eat the trailing comma. - let next_predicate_span = hir_generics.predicates[i + 1].span; + let next_predicate_span = generics.predicates[i + 1].span; if next_predicate_span.from_expansion() { where_lint_spans.push(predicate_span); } else { @@ -2237,7 +2237,7 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { } } else { // Eat the optional trailing comma after the last predicate. - let where_span = hir_generics.where_clause_span; + let where_span = generics.where_clause_span; if where_span.from_expansion() { where_lint_spans.push(predicate_span); } else { @@ -2255,18 +2255,18 @@ impl<'tcx> LateLintPass<'tcx> for ExplicitOutlivesRequirements { // If all predicates in where clause are inferable, drop the entire clause // (including the `where`) - if hir_generics.has_where_clause_predicates + if generics.has_where_clause_predicates && dropped_where_predicate_count == num_where_predicates { - let where_span = hir_generics.where_clause_span; + let where_span = generics.where_clause_span; // Extend the where clause back to the closing `>` of the // generics, except for tuple struct, which have the `where` // after the fields of the struct. let full_where_span = - if let hir::ItemKind::Struct(_, hir::VariantData::Tuple(..), _) = item.kind { + if let hir::ItemKind::Struct(_, _, hir::VariantData::Tuple(..)) = item.kind { where_span } else { - hir_generics.span.shrink_to_hi().to(where_span) + generics.span.shrink_to_hi().to(where_span) }; // Due to macro expansions, the `full_where_span` might not actually contain all diff --git a/compiler/rustc_lint/src/default_could_be_derived.rs b/compiler/rustc_lint/src/default_could_be_derived.rs index 78d129642dc..7734f441df2 100644 --- a/compiler/rustc_lint/src/default_could_be_derived.rs +++ b/compiler/rustc_lint/src/default_could_be_derived.rs @@ -95,8 +95,8 @@ impl<'tcx> LateLintPass<'tcx> for DefaultCouldBeDerived { kind: hir::ItemKind::Struct( _, - hir::VariantData::Struct { fields, recovered: _ }, _generics, + hir::VariantData::Struct { fields, recovered: _ }, ), .. })) => fields.iter().map(|f| (f.ident.name, f)).collect::<FxHashMap<_, _>>(), diff --git a/compiler/rustc_lint/src/non_local_def.rs b/compiler/rustc_lint/src/non_local_def.rs index 9ed11d9cc82..b877f909fc0 100644 --- a/compiler/rustc_lint/src/non_local_def.rs +++ b/compiler/rustc_lint/src/non_local_def.rs @@ -183,7 +183,7 @@ impl<'tcx> LateLintPass<'tcx> for NonLocalDefinitions { && parent_opt_item_name != Some(kw::Underscore) && let Some(parent) = parent.as_local() && let Node::Item(item) = cx.tcx.hir_node_by_def_id(parent) - && let ItemKind::Const(ident, ty, _, _) = item.kind + && let ItemKind::Const(ident, _, ty, _) = item.kind && let TyKind::Tup(&[]) = ty.kind { Some(ident.span) diff --git a/compiler/rustc_lint/src/nonstandard_style.rs b/compiler/rustc_lint/src/nonstandard_style.rs index 048d377b78f..31c18074466 100644 --- a/compiler/rustc_lint/src/nonstandard_style.rs +++ b/compiler/rustc_lint/src/nonstandard_style.rs @@ -513,7 +513,7 @@ impl<'tcx> LateLintPass<'tcx> for NonUpperCaseGlobals { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { let attrs = cx.tcx.hir_attrs(it.hir_id()); match it.kind { - hir::ItemKind::Static(ident, ..) + hir::ItemKind::Static(_, ident, ..) if !ast::attr::contains_name(attrs, sym::no_mangle) => { NonUpperCaseGlobals::check_upper_case(cx, "static variable", &ident); diff --git a/compiler/rustc_lint/src/types.rs b/compiler/rustc_lint/src/types.rs index af134622d38..77dc6335113 100644 --- a/compiler/rustc_lint/src/types.rs +++ b/compiler/rustc_lint/src/types.rs @@ -1710,7 +1710,7 @@ impl ImproperCTypesDefinitions { && cx.tcx.sess.target.os == "aix" && !adt_def.all_fields().next().is_none() { - let struct_variant_data = item.expect_struct().1; + let struct_variant_data = item.expect_struct().2; for field_def in struct_variant_data.fields().iter().skip(1) { // Struct fields (after the first field) are checked for the // power alignment rule, as fields after the first are likely @@ -1735,9 +1735,9 @@ impl ImproperCTypesDefinitions { impl<'tcx> LateLintPass<'tcx> for ImproperCTypesDefinitions { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx hir::Item<'tcx>) { match item.kind { - hir::ItemKind::Static(_, ty, ..) - | hir::ItemKind::Const(_, ty, ..) - | hir::ItemKind::TyAlias(_, ty, ..) => { + hir::ItemKind::Static(_, _, ty, _) + | hir::ItemKind::Const(_, _, ty, _) + | hir::ItemKind::TyAlias(_, _, ty) => { self.check_ty_maybe_containing_foreign_fnptr( cx, ty, @@ -1804,7 +1804,7 @@ declare_lint_pass!(VariantSizeDifferences => [VARIANT_SIZE_DIFFERENCES]); impl<'tcx> LateLintPass<'tcx> for VariantSizeDifferences { fn check_item(&mut self, cx: &LateContext<'_>, it: &hir::Item<'_>) { - if let hir::ItemKind::Enum(_, ref enum_definition, _) = it.kind { + if let hir::ItemKind::Enum(_, _, ref enum_definition) = it.kind { let t = cx.tcx.type_of(it.owner_id).instantiate_identity(); let ty = cx.tcx.erase_regions(t); let Ok(layout) = cx.layout_of(ty) else { return }; diff --git a/compiler/rustc_metadata/src/creader.rs b/compiler/rustc_metadata/src/creader.rs index 802d75241de..de19c10bc57 100644 --- a/compiler/rustc_metadata/src/creader.rs +++ b/compiler/rustc_metadata/src/creader.rs @@ -21,6 +21,7 @@ use rustc_hir::def_id::{CrateNum, LOCAL_CRATE, LocalDefId, StableCrateId}; use rustc_hir::definitions::Definitions; use rustc_index::IndexVec; use rustc_middle::bug; +use rustc_middle::ty::data_structures::IndexSet; use rustc_middle::ty::{TyCtxt, TyCtxtFeed}; use rustc_proc_macro::bridge::client::ProcMacro; use rustc_session::config::{ @@ -281,7 +282,7 @@ impl CStore { .filter_map(|(cnum, data)| data.as_deref_mut().map(|data| (cnum, data))) } - fn push_dependencies_in_postorder(&self, deps: &mut Vec<CrateNum>, cnum: CrateNum) { + fn push_dependencies_in_postorder(&self, deps: &mut IndexSet<CrateNum>, cnum: CrateNum) { if !deps.contains(&cnum) { let data = self.get_crate_data(cnum); for dep in data.dependencies() { @@ -290,12 +291,12 @@ impl CStore { } } - deps.push(cnum); + deps.insert(cnum); } } - pub(crate) fn crate_dependencies_in_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> { - let mut deps = Vec::new(); + pub(crate) fn crate_dependencies_in_postorder(&self, cnum: CrateNum) -> IndexSet<CrateNum> { + let mut deps = IndexSet::default(); if cnum == LOCAL_CRATE { for (cnum, _) in self.iter_crate_data() { self.push_dependencies_in_postorder(&mut deps, cnum); @@ -306,10 +307,11 @@ impl CStore { deps } - fn crate_dependencies_in_reverse_postorder(&self, cnum: CrateNum) -> Vec<CrateNum> { - let mut deps = self.crate_dependencies_in_postorder(cnum); - deps.reverse(); - deps + fn crate_dependencies_in_reverse_postorder( + &self, + cnum: CrateNum, + ) -> impl Iterator<Item = CrateNum> { + self.crate_dependencies_in_postorder(cnum).into_iter().rev() } pub(crate) fn injected_panic_runtime(&self) -> Option<CrateNum> { diff --git a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs index f40a2374bea..375928c2245 100644 --- a/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs +++ b/compiler/rustc_metadata/src/rmeta/decoder/cstore_impl.rs @@ -549,8 +549,9 @@ pub(in crate::rmeta) fn provide(providers: &mut Providers) { has_global_allocator: |tcx, LocalCrate| CStore::from_tcx(tcx).has_global_allocator(), has_alloc_error_handler: |tcx, LocalCrate| CStore::from_tcx(tcx).has_alloc_error_handler(), postorder_cnums: |tcx, ()| { - tcx.arena - .alloc_slice(&CStore::from_tcx(tcx).crate_dependencies_in_postorder(LOCAL_CRATE)) + tcx.arena.alloc_from_iter( + CStore::from_tcx(tcx).crate_dependencies_in_postorder(LOCAL_CRATE).into_iter(), + ) }, crates: |tcx, ()| { // The list of loaded crates is now frozen in query cache, diff --git a/compiler/rustc_middle/src/hir/map.rs b/compiler/rustc_middle/src/hir/map.rs index 9c8f1c9eccf..3de97c8c0d9 100644 --- a/compiler/rustc_middle/src/hir/map.rs +++ b/compiler/rustc_middle/src/hir/map.rs @@ -920,7 +920,7 @@ impl<'tcx> TyCtxt<'tcx> { }) => until_within(*outer_span, generics.where_clause_span), // Constants and Statics. Node::Item(Item { - kind: ItemKind::Const(_, ty, ..) | ItemKind::Static(_, ty, ..), + kind: ItemKind::Const(_, _, ty, _) | ItemKind::Static(_, _, ty, _), span: outer_span, .. }) diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 77b9becba57..404674c359e 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -1882,10 +1882,8 @@ impl<'tcx> Ty<'tcx> { // Needs normalization or revealing to determine, so no is the safe answer. ty::Alias(..) => false, - ty::Param(..) | ty::Placeholder(..) | ty::Infer(..) | ty::Error(..) => false, - - ty::Bound(..) => { - bug!("`is_trivially_pure_clone_copy` applied to unexpected type: {:?}", self); + ty::Param(..) | ty::Placeholder(..) | ty::Bound(..) | ty::Infer(..) | ty::Error(..) => { + false } } } diff --git a/compiler/rustc_mir_build/src/builder/mod.rs b/compiler/rustc_mir_build/src/builder/mod.rs index 127b191e335..3d5f6f4cf45 100644 --- a/compiler/rustc_mir_build/src/builder/mod.rs +++ b/compiler/rustc_mir_build/src/builder/mod.rs @@ -558,7 +558,7 @@ fn construct_const<'a, 'tcx>( // Figure out what primary body this item has. let (span, const_ty_span) = match tcx.hir_node(hir_id) { Node::Item(hir::Item { - kind: hir::ItemKind::Static(_, ty, _, _) | hir::ItemKind::Const(_, ty, _, _), + kind: hir::ItemKind::Static(_, _, ty, _) | hir::ItemKind::Const(_, _, ty, _), span, .. }) diff --git a/compiler/rustc_mir_transform/src/lower_intrinsics.rs b/compiler/rustc_mir_transform/src/lower_intrinsics.rs index 9c21bcfc0d2..52f4c39c09b 100644 --- a/compiler/rustc_mir_transform/src/lower_intrinsics.rs +++ b/compiler/rustc_mir_transform/src/lower_intrinsics.rs @@ -262,6 +262,52 @@ impl<'tcx> crate::MirPass<'tcx> for LowerIntrinsics { }); terminator.kind = TerminatorKind::Goto { target }; } + sym::slice_get_unchecked => { + let target = target.unwrap(); + let Ok([ptrish, index]) = take_array(args) else { + span_bug!( + terminator.source_info.span, + "Wrong number of arguments for {intrinsic:?}", + ); + }; + + let place = ptrish.node.place().unwrap(); + assert!(!place.is_indirect()); + let updated_place = place.project_deeper( + &[ + ProjectionElem::Deref, + ProjectionElem::Index( + index.node.place().unwrap().as_local().unwrap(), + ), + ], + tcx, + ); + + let ret_ty = generic_args.type_at(0); + let rvalue = match *ret_ty.kind() { + ty::RawPtr(_, Mutability::Not) => { + Rvalue::RawPtr(RawPtrKind::Const, updated_place) + } + ty::RawPtr(_, Mutability::Mut) => { + Rvalue::RawPtr(RawPtrKind::Mut, updated_place) + } + ty::Ref(region, _, Mutability::Not) => { + Rvalue::Ref(region, BorrowKind::Shared, updated_place) + } + ty::Ref(region, _, Mutability::Mut) => Rvalue::Ref( + region, + BorrowKind::Mut { kind: MutBorrowKind::Default }, + updated_place, + ), + _ => bug!("Unknown return type {ret_ty:?}"), + }; + + block.statements.push(Statement { + source_info: terminator.source_info, + kind: StatementKind::Assign(Box::new((*destination, rvalue))), + }); + terminator.kind = TerminatorKind::Goto { target }; + } sym::transmute | sym::transmute_unchecked => { let dst_ty = destination.ty(local_decls, tcx).ty; let Ok([arg]) = take_array(args) else { diff --git a/compiler/rustc_mir_transform/src/match_branches.rs b/compiler/rustc_mir_transform/src/match_branches.rs index 8c0c3096899..5e511f1a418 100644 --- a/compiler/rustc_mir_transform/src/match_branches.rs +++ b/compiler/rustc_mir_transform/src/match_branches.rs @@ -284,12 +284,14 @@ fn can_cast( let v = match src_layout.ty.kind() { ty::Uint(_) => from_scalar.to_uint(src_layout.size), ty::Int(_) => from_scalar.to_int(src_layout.size) as u128, - _ => unreachable!("invalid int"), + // We can also transform the values of other integer representations (such as char), + // although this may not be practical in real-world scenarios. + _ => return false, }; let size = match *cast_ty.kind() { ty::Int(t) => Integer::from_int_ty(&tcx, t).size(), ty::Uint(t) => Integer::from_uint_ty(&tcx, t).size(), - _ => unreachable!("invalid int"), + _ => return false, }; let v = size.truncate(v); let cast_scalar = ScalarInt::try_from_uint(v, size).unwrap(); diff --git a/compiler/rustc_next_trait_solver/src/delegate.rs b/compiler/rustc_next_trait_solver/src/delegate.rs index 32dc85b3e6a..7b932010d49 100644 --- a/compiler/rustc_next_trait_solver/src/delegate.rs +++ b/compiler/rustc_next_trait_solver/src/delegate.rs @@ -3,8 +3,6 @@ use std::ops::Deref; use rustc_type_ir::solve::{Certainty, Goal, NoSolution}; use rustc_type_ir::{self as ty, InferCtxtLike, Interner, TypeFoldable}; -use crate::solve::HasChanged; - pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized { type Infcx: InferCtxtLike<Interner = Self::Interner>; type Interner: Interner; @@ -23,7 +21,7 @@ pub trait SolverDelegate: Deref<Target = Self::Infcx> + Sized { &self, goal: Goal<Self::Interner, <Self::Interner as Interner>::Predicate>, span: <Self::Interner as Interner>::Span, - ) -> Option<HasChanged>; + ) -> Option<Certainty>; fn fresh_var_for_kind_with_span( &self, diff --git a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs index b1a842e04af..38d7ff576a5 100644 --- a/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs +++ b/compiler/rustc_next_trait_solver/src/solve/eval_ctxt/mod.rs @@ -671,10 +671,13 @@ where // If this loop did not result in any progress, what's our final certainty. let mut unchanged_certainty = Some(Certainty::Yes); for (source, goal, stalled_on) in mem::take(&mut self.nested_goals) { - if let Some(has_changed) = self.delegate.compute_goal_fast_path(goal, self.origin_span) - { - if matches!(has_changed, HasChanged::Yes) { - unchanged_certainty = None; + if let Some(certainty) = self.delegate.compute_goal_fast_path(goal, self.origin_span) { + match certainty { + Certainty::Yes => {} + Certainty::Maybe(_) => { + self.nested_goals.push((source, goal, None)); + unchanged_certainty = unchanged_certainty.map(|c| c.and(certainty)); + } } continue; } diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 2a7910a6af4..1a44f4af8a6 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -3311,26 +3311,44 @@ impl<'a> Parser<'a> { let sm = this.psess.source_map(); if let Ok(expr_lines) = sm.span_to_lines(expr_span) && let Ok(arm_start_lines) = sm.span_to_lines(arm_start_span) - && arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col && expr_lines.lines.len() == 2 { - // We check whether there's any trailing code in the parse span, - // if there isn't, we very likely have the following: - // - // X | &Y => "y" - // | -- - missing comma - // | | - // | arrow_span - // X | &X => "x" - // | - ^^ self.token.span - // | | - // | parsed until here as `"y" & X` - err.span_suggestion_short( - arm_start_span.shrink_to_hi(), - "missing a comma here to end this `match` arm", - ",", - Applicability::MachineApplicable, - ); + if arm_start_lines.lines[0].end_col == expr_lines.lines[0].end_col { + // We check whether there's any trailing code in the parse span, + // if there isn't, we very likely have the following: + // + // X | &Y => "y" + // | -- - missing comma + // | | + // | arrow_span + // X | &X => "x" + // | - ^^ self.token.span + // | | + // | parsed until here as `"y" & X` + err.span_suggestion_short( + arm_start_span.shrink_to_hi(), + "missing a comma here to end this `match` arm", + ",", + Applicability::MachineApplicable, + ); + } else if arm_start_lines.lines[0].end_col + rustc_span::CharPos(1) + == expr_lines.lines[0].end_col + { + // similar to the above, but we may typo a `.` or `/` at the end of the line + let comma_span = arm_start_span + .shrink_to_hi() + .with_hi(arm_start_span.hi() + rustc_span::BytePos(1)); + if let Ok(res) = sm.span_to_snippet(comma_span) + && (res == "." || res == "/") + { + err.span_suggestion_short( + comma_span, + "you might have meant to write a `,` to end this `match` arm", + ",", + Applicability::MachineApplicable, + ); + } + } } } else { err.span_label( diff --git a/compiler/rustc_passes/src/check_attr.rs b/compiler/rustc_passes/src/check_attr.rs index df8b5a6b181..0777d442b59 100644 --- a/compiler/rustc_passes/src/check_attr.rs +++ b/compiler/rustc_passes/src/check_attr.rs @@ -802,7 +802,7 @@ impl<'tcx> CheckAttrVisitor<'tcx> { match target { Target::Struct => { if let Some(ItemLike::Item(hir::Item { - kind: hir::ItemKind::Struct(_, hir::VariantData::Struct { fields, .. }, _), + kind: hir::ItemKind::Struct(_, _, hir::VariantData::Struct { fields, .. }), .. })) = item && !fields.is_empty() @@ -1130,12 +1130,12 @@ impl<'tcx> CheckAttrVisitor<'tcx> { return; }; match item.kind { - ItemKind::Enum(_, _, generics) | ItemKind::Struct(_, _, generics) + ItemKind::Enum(_, generics, _) | ItemKind::Struct(_, generics, _) if generics.params.len() != 0 => {} ItemKind::Trait(_, _, _, generics, _, items) if generics.params.len() != 0 || items.iter().any(|item| matches!(item.kind, AssocItemKind::Type)) => {} - ItemKind::TyAlias(_, _, generics) if generics.params.len() != 0 => {} + ItemKind::TyAlias(_, generics, _) if generics.params.len() != 0 => {} _ => { self.dcx().emit_err(errors::DocSearchUnboxInvalid { span: meta.span() }); } @@ -2792,7 +2792,7 @@ impl<'tcx> Visitor<'tcx> for CheckAttrVisitor<'tcx> { } fn is_c_like_enum(item: &Item<'_>) -> bool { - if let ItemKind::Enum(_, ref def, _) = item.kind { + if let ItemKind::Enum(_, _, ref def) = item.kind { for variant in def.variants { match variant.data { hir::VariantData::Unit(..) => { /* continue */ } diff --git a/compiler/rustc_passes/src/dead.rs b/compiler/rustc_passes/src/dead.rs index f83c7471770..6b82252f32c 100644 --- a/compiler/rustc_passes/src/dead.rs +++ b/compiler/rustc_passes/src/dead.rs @@ -736,7 +736,7 @@ fn check_item<'tcx>( match tcx.def_kind(id.owner_id) { DefKind::Enum => { let item = tcx.hir_item(id); - if let hir::ItemKind::Enum(_, ref enum_def, _) = item.kind { + if let hir::ItemKind::Enum(_, _, ref enum_def) = item.kind { if let Some(comes_from_allow) = allow_dead_code { worklist.extend( enum_def.variants.iter().map(|variant| (variant.def_id, comes_from_allow)), @@ -783,7 +783,7 @@ fn check_item<'tcx>( } DefKind::Struct => { let item = tcx.hir_item(id); - if let hir::ItemKind::Struct(_, ref variant_data, _) = item.kind + if let hir::ItemKind::Struct(_, _, ref variant_data) = item.kind && let Some(ctor_def_id) = variant_data.ctor_def_id() { struct_constructors.insert(ctor_def_id, item.owner_id.def_id); @@ -1055,7 +1055,7 @@ impl<'tcx> DeadVisitor<'tcx> { let tuple_fields = if let Some(parent_id) = parent_item && let node = tcx.hir_node_by_def_id(parent_id) && let hir::Node::Item(hir::Item { - kind: hir::ItemKind::Struct(_, hir::VariantData::Tuple(fields, _, _), _), + kind: hir::ItemKind::Struct(_, _, hir::VariantData::Tuple(fields, _, _)), .. }) = node { diff --git a/compiler/rustc_passes/src/input_stats.rs b/compiler/rustc_passes/src/input_stats.rs index 71815448172..6852153a2e7 100644 --- a/compiler/rustc_passes/src/input_stats.rs +++ b/compiler/rustc_passes/src/input_stats.rs @@ -426,10 +426,16 @@ impl<'v> hir_visit::Visitor<'v> for StatCollector<'v> { hir_visit::walk_fn(self, fk, fd, b, id) } - fn visit_use(&mut self, p: &'v hir::UsePath<'v>, hir_id: HirId) { + fn visit_use(&mut self, p: &'v hir::UsePath<'v>, _hir_id: HirId) { // This is `visit_use`, but the type is `Path` so record it that way. self.record("Path", None, p); - hir_visit::walk_use(self, p, hir_id) + // Don't call `hir_visit::walk_use(self, p, hir_id)`: it calls + // `visit_path` up to three times, once for each namespace result in + // `p.res`, by building temporary `Path`s that are not part of the real + // HIR, which causes `p` to be double- or triple-counted. Instead just + // walk the path internals (i.e. the segments) directly. + let hir::Path { span: _, res: _, segments } = *p; + ast_visit::walk_list!(self, visit_path_segment, segments); } fn visit_trait_item(&mut self, ti: &'v hir::TraitItem<'v>) { diff --git a/compiler/rustc_passes/src/stability.rs b/compiler/rustc_passes/src/stability.rs index 9884386d68f..45e26c8999a 100644 --- a/compiler/rustc_passes/src/stability.rs +++ b/compiler/rustc_passes/src/stability.rs @@ -430,7 +430,7 @@ impl<'a, 'tcx> Visitor<'tcx> for Annotator<'a, 'tcx> { kind = AnnotationKind::DeprecationProhibited; const_stab_inherit = InheritConstStability::Yes; } - hir::ItemKind::Struct(_, ref sd, _) => { + hir::ItemKind::Struct(_, _, ref sd) => { if let Some(ctor_def_id) = sd.ctor_def_id() { self.annotate( ctor_def_id, diff --git a/compiler/rustc_privacy/src/lib.rs b/compiler/rustc_privacy/src/lib.rs index e2dfaec61b3..963f4c77d80 100644 --- a/compiler/rustc_privacy/src/lib.rs +++ b/compiler/rustc_privacy/src/lib.rs @@ -599,8 +599,8 @@ impl<'tcx> EmbargoVisitor<'tcx> { DefKind::Struct | DefKind::Union => { // While structs and unions have type privacy, their fields do not. let item = self.tcx.hir_expect_item(def_id); - if let hir::ItemKind::Struct(_, ref struct_def, _) - | hir::ItemKind::Union(_, ref struct_def, _) = item.kind + if let hir::ItemKind::Struct(_, _, ref struct_def) + | hir::ItemKind::Union(_, _, ref struct_def) = item.kind { for field in struct_def.fields() { let field_vis = self.tcx.local_visibility(field.def_id); @@ -725,7 +725,7 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> { } } } - hir::ItemKind::Enum(_, ref def, _) => { + hir::ItemKind::Enum(_, _, ref def) => { if let Some(item_ev) = item_ev { self.reach(item.owner_id.def_id, item_ev).generics().predicates(); } @@ -763,8 +763,8 @@ impl<'tcx> Visitor<'tcx> for EmbargoVisitor<'tcx> { } } } - hir::ItemKind::Struct(_, ref struct_def, _) - | hir::ItemKind::Union(_, ref struct_def, _) => { + hir::ItemKind::Struct(_, _, ref struct_def) + | hir::ItemKind::Union(_, _, ref struct_def) => { if let Some(item_ev) = item_ev { self.reach(item.owner_id.def_id, item_ev).generics().predicates(); for field in struct_def.fields() { @@ -868,7 +868,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TestReachabilityVisitor<'a, 'tcx> { self.effective_visibility_diagnostic(item.owner_id.def_id); match item.kind { - hir::ItemKind::Enum(_, ref def, _) => { + hir::ItemKind::Enum(_, _, ref def) => { for variant in def.variants.iter() { self.effective_visibility_diagnostic(variant.def_id); if let Some(ctor_def_id) = variant.data.ctor_def_id() { @@ -879,7 +879,7 @@ impl<'a, 'tcx> Visitor<'tcx> for TestReachabilityVisitor<'a, 'tcx> { } } } - hir::ItemKind::Struct(_, ref def, _) | hir::ItemKind::Union(_, ref def, _) => { + hir::ItemKind::Struct(_, _, ref def) | hir::ItemKind::Union(_, _, ref def) => { if let Some(ctor_def_id) = def.ctor_def_id() { self.effective_visibility_diagnostic(ctor_def_id); } @@ -1651,7 +1651,7 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'_, 'tcx> { } DefKind::Enum => { let item = tcx.hir_item(id); - if let hir::ItemKind::Enum(_, ref def, _) = item.kind { + if let hir::ItemKind::Enum(_, _, ref def) = item.kind { self.check_unnameable(item.owner_id.def_id, effective_vis); self.check(item.owner_id.def_id, item_visibility, effective_vis) @@ -1689,8 +1689,8 @@ impl<'tcx> PrivateItemsInPublicInterfacesChecker<'_, 'tcx> { // Subitems of structs and unions have their own publicity. DefKind::Struct | DefKind::Union => { let item = tcx.hir_item(id); - if let hir::ItemKind::Struct(_, ref struct_def, _) - | hir::ItemKind::Union(_, ref struct_def, _) = item.kind + if let hir::ItemKind::Struct(_, _, ref struct_def) + | hir::ItemKind::Union(_, _, ref struct_def) = item.kind { self.check_unnameable(item.owner_id.def_id, effective_vis); self.check(item.owner_id.def_id, item_visibility, effective_vis) diff --git a/compiler/rustc_resolve/src/late.rs b/compiler/rustc_resolve/src/late.rs index 4cfa079e49b..744e99c86e1 100644 --- a/compiler/rustc_resolve/src/late.rs +++ b/compiler/rustc_resolve/src/late.rs @@ -415,7 +415,7 @@ pub(crate) enum AliasPossibility { } #[derive(Copy, Clone, Debug)] -pub(crate) enum PathSource<'a> { +pub(crate) enum PathSource<'a, 'c> { /// Type paths `Path`. Type, /// Trait paths in bounds or impls. @@ -429,7 +429,10 @@ pub(crate) enum PathSource<'a> { /// Paths in tuple struct patterns `Path(..)`. TupleStruct(Span, &'a [Span]), /// `m::A::B` in `<T as m::A>::B::C`. - TraitItem(Namespace), + /// + /// Second field holds the "cause" of this one, i.e. the context within + /// which the trait item is resolved. Used for diagnostics. + TraitItem(Namespace, &'c PathSource<'a, 'c>), /// Paths in delegation item Delegation, /// An arg in a `use<'a, N>` precise-capturing bound. @@ -440,7 +443,7 @@ pub(crate) enum PathSource<'a> { DefineOpaques, } -impl<'a> PathSource<'a> { +impl<'a> PathSource<'a, '_> { fn namespace(self) -> Namespace { match self { PathSource::Type @@ -452,7 +455,7 @@ impl<'a> PathSource<'a> { | PathSource::TupleStruct(..) | PathSource::Delegation | PathSource::ReturnTypeNotation => ValueNS, - PathSource::TraitItem(ns) => ns, + PathSource::TraitItem(ns, _) => ns, PathSource::PreciseCapturingArg(ns) => ns, } } @@ -480,8 +483,9 @@ impl<'a> PathSource<'a> { PathSource::Trait(_) => "trait", PathSource::Pat => "unit struct, unit variant or constant", PathSource::Struct => "struct, variant or union type", - PathSource::TupleStruct(..) => "tuple struct or tuple variant", - PathSource::TraitItem(ns) => match ns { + PathSource::TraitItem(ValueNS, PathSource::TupleStruct(..)) + | PathSource::TupleStruct(..) => "tuple struct or tuple variant", + PathSource::TraitItem(ns, _) => match ns { TypeNS => "associated type", ValueNS => "method or associated constant", MacroNS => bug!("associated macro"), @@ -585,7 +589,7 @@ impl<'a> PathSource<'a> { ) | Res::SelfTyParam { .. } | Res::SelfTyAlias { .. } ), - PathSource::TraitItem(ns) => match res { + PathSource::TraitItem(ns, _) => match res { Res::Def(DefKind::AssocConst | DefKind::AssocFn, _) if ns == ValueNS => true, Res::Def(DefKind::AssocTy, _) if ns == TypeNS => true, _ => false, @@ -934,8 +938,7 @@ impl<'ra: 'ast, 'ast, 'tcx> Visitor<'ast> for LateResolutionVisitor<'_, 'ast, 'r ) } TyKind::UnsafeBinder(unsafe_binder) => { - // FIXME(unsafe_binder): Better span - let span = ty.span; + let span = ty.span.shrink_to_lo().to(unsafe_binder.inner_ty.span.shrink_to_lo()); self.with_generic_param_rib( &unsafe_binder.generic_params, RibKind::Normal, @@ -2008,7 +2011,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { &mut self, partial_res: PartialRes, path: &[Segment], - source: PathSource<'_>, + source: PathSource<'_, '_>, path_span: Span, ) { let proj_start = path.len() - partial_res.unresolved_segments(); @@ -4207,7 +4210,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { id: NodeId, qself: &Option<P<QSelf>>, path: &Path, - source: PathSource<'ast>, + source: PathSource<'ast, '_>, ) { self.smart_resolve_path_fragment( qself, @@ -4224,7 +4227,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { &mut self, qself: &Option<P<QSelf>>, path: &[Segment], - source: PathSource<'ast>, + source: PathSource<'ast, '_>, finalize: Finalize, record_partial_res: RecordPartialRes, parent_qself: Option<&QSelf>, @@ -4405,6 +4408,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { path_span, source.defer_to_typeck(), finalize, + source, ) { Ok(Some(partial_res)) if let Some(res) = partial_res.full_res() => { // if we also have an associated type that matches the ident, stash a suggestion @@ -4527,12 +4531,13 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { span: Span, defer_to_typeck: bool, finalize: Finalize, + source: PathSource<'ast, '_>, ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> { let mut fin_res = None; for (i, &ns) in [primary_ns, TypeNS, ValueNS].iter().enumerate() { if i == 0 || ns != primary_ns { - match self.resolve_qpath(qself, path, ns, finalize)? { + match self.resolve_qpath(qself, path, ns, finalize, source)? { Some(partial_res) if partial_res.unresolved_segments() == 0 || defer_to_typeck => { @@ -4569,6 +4574,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { path: &[Segment], ns: Namespace, finalize: Finalize, + source: PathSource<'ast, '_>, ) -> Result<Option<PartialRes>, Spanned<ResolutionError<'ra>>> { debug!( "resolve_qpath(qself={:?}, path={:?}, ns={:?}, finalize={:?})", @@ -4616,7 +4622,7 @@ impl<'a, 'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'a, 'ast, 'ra, 'tcx> { let partial_res = self.smart_resolve_path_fragment( &None, &path[..=qself.position], - PathSource::TraitItem(ns), + PathSource::TraitItem(ns, &source), Finalize::with_root_span(finalize.node_id, finalize.path_span, qself.path_span), RecordPartialRes::No, Some(&qself), diff --git a/compiler/rustc_resolve/src/late/diagnostics.rs b/compiler/rustc_resolve/src/late/diagnostics.rs index ca25cdc9563..97a45fcf233 100644 --- a/compiler/rustc_resolve/src/late/diagnostics.rs +++ b/compiler/rustc_resolve/src/late/diagnostics.rs @@ -175,7 +175,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, path: &[Segment], span: Span, - source: PathSource<'_>, + source: PathSource<'_, '_>, res: Option<Res>, ) -> BaseError { // Make the base error. @@ -421,7 +421,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { path: &[Segment], following_seg: Option<&Segment>, span: Span, - source: PathSource<'_>, + source: PathSource<'_, '_>, res: Option<Res>, qself: Option<&QSelf>, ) -> (Diag<'tcx>, Vec<ImportSuggestion>) { @@ -539,12 +539,12 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { path: &[Segment], following_seg: Option<&Segment>, span: Span, - source: PathSource<'_>, + source: PathSource<'_, '_>, res: Option<Res>, qself: Option<&QSelf>, ) { if let Some(Res::Def(DefKind::AssocFn, _)) = res - && let PathSource::TraitItem(TypeNS) = source + && let PathSource::TraitItem(TypeNS, _) = source && let None = following_seg && let Some(qself) = qself && let TyKind::Path(None, ty_path) = &qself.ty.kind @@ -650,7 +650,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn try_lookup_name_relaxed( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -940,7 +940,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_trait_and_bounds( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, res: Option<Res>, span: Span, base_error: &BaseError, @@ -1017,7 +1017,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_typo( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -1063,7 +1063,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_shadowed( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], following_seg: Option<&Segment>, span: Span, @@ -1096,7 +1096,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn err_code_special_cases( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], span: Span, ) { @@ -1141,7 +1141,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_self_ty( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], span: Span, ) -> bool { @@ -1164,7 +1164,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_self_value( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], span: Span, ) -> bool { @@ -1332,7 +1332,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_swapping_misplaced_self_ty_and_trait( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, res: Option<Res>, span: Span, ) { @@ -1361,7 +1361,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, res: Option<Res>, - source: PathSource<'_>, + source: PathSource<'_, '_>, ) { let PathSource::TupleStruct(_, _) = source else { return }; let Some(Res::Def(DefKind::Fn, _)) = res else { return }; @@ -1373,7 +1373,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, res: Option<Res>, - source: PathSource<'_>, + source: PathSource<'_, '_>, span: Span, ) { let PathSource::Trait(_) = source else { return }; @@ -1422,7 +1422,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_pattern_match_with_let( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, span: Span, ) -> bool { if let PathSource::Expr(_) = source @@ -1448,10 +1448,10 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn get_single_associated_item( &mut self, path: &[Segment], - source: &PathSource<'_>, + source: &PathSource<'_, '_>, filter_fn: &impl Fn(Res) -> bool, ) -> Option<TypoSuggestion> { - if let crate::PathSource::TraitItem(_) = source { + if let crate::PathSource::TraitItem(_, _) = source { let mod_path = &path[..path.len() - 1]; if let PathResult::Module(ModuleOrUniformRoot::Module(module)) = self.resolve_path(mod_path, None, None) @@ -1556,7 +1556,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { /// Check if the source is call expression and the first argument is `self`. If true, /// return the span of whole call and the span for all arguments expect the first one (`self`). - fn call_has_self_arg(&self, source: PathSource<'_>) -> Option<(Span, Option<Span>)> { + fn call_has_self_arg(&self, source: PathSource<'_, '_>) -> Option<(Span, Option<Span>)> { let mut has_self_arg = None; if let PathSource::Expr(Some(parent)) = source && let ExprKind::Call(_, args) = &parent.kind @@ -1614,7 +1614,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { &mut self, err: &mut Diag<'_>, span: Span, - source: PathSource<'_>, + source: PathSource<'_, '_>, path: &[Segment], res: Res, path_str: &str, @@ -1666,7 +1666,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { } }; - let find_span = |source: &PathSource<'_>, err: &mut Diag<'_>| { + let find_span = |source: &PathSource<'_, '_>, err: &mut Diag<'_>| { match source { PathSource::Expr(Some(Expr { span, kind: ExprKind::Call(_, _), .. })) | PathSource::TupleStruct(span, _) => { @@ -2050,8 +2050,86 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { err.span_label(span, fallback_label.to_string()); err.note("can't use `Self` as a constructor, you must use the implemented struct"); } - (Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), _) if ns == ValueNS => { + ( + Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), + PathSource::TraitItem(ValueNS, PathSource::TupleStruct(whole, args)), + ) => { + err.note("can't use a type alias as tuple pattern"); + + let mut suggestion = Vec::new(); + + if let &&[first, ..] = args + && let &&[.., last] = args + { + suggestion.extend([ + // "0: " has to be included here so that the fix is machine applicable. + // + // If this would only add " { " and then the code below add "0: ", + // rustfix would crash, because end of this suggestion is the same as start + // of the suggestion below. Thus, we have to merge these... + (span.between(first), " { 0: ".to_owned()), + (last.between(whole.shrink_to_hi()), " }".to_owned()), + ]); + + suggestion.extend( + args.iter() + .enumerate() + .skip(1) // See above + .map(|(index, &arg)| (arg.shrink_to_lo(), format!("{index}: "))), + ) + } else { + suggestion.push((span.between(whole.shrink_to_hi()), " {}".to_owned())); + } + + err.multipart_suggestion( + "use struct pattern instead", + suggestion, + Applicability::MachineApplicable, + ); + } + ( + Res::Def(DefKind::TyAlias | DefKind::AssocTy, _), + PathSource::TraitItem( + ValueNS, + PathSource::Expr(Some(ast::Expr { + span: whole, + kind: ast::ExprKind::Call(_, args), + .. + })), + ), + ) => { err.note("can't use a type alias as a constructor"); + + let mut suggestion = Vec::new(); + + if let [first, ..] = &**args + && let [.., last] = &**args + { + suggestion.extend([ + // "0: " has to be included here so that the fix is machine applicable. + // + // If this would only add " { " and then the code below add "0: ", + // rustfix would crash, because end of this suggestion is the same as start + // of the suggestion below. Thus, we have to merge these... + (span.between(first.span), " { 0: ".to_owned()), + (last.span.between(whole.shrink_to_hi()), " }".to_owned()), + ]); + + suggestion.extend( + args.iter() + .enumerate() + .skip(1) // See above + .map(|(index, arg)| (arg.span.shrink_to_lo(), format!("{index}: "))), + ) + } else { + suggestion.push((span.between(whole.shrink_to_hi()), " {}".to_owned())); + } + + err.multipart_suggestion( + "use struct expression instead", + suggestion, + Applicability::MachineApplicable, + ); } _ => return false, } @@ -2621,7 +2699,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { fn suggest_using_enum_variant( &mut self, err: &mut Diag<'_>, - source: PathSource<'_>, + source: PathSource<'_, '_>, def_id: DefId, span: Span, ) { @@ -2799,7 +2877,7 @@ impl<'ast, 'ra: 'ast, 'tcx> LateResolutionVisitor<'_, 'ast, 'ra, 'tcx> { pub(crate) fn suggest_adding_generic_parameter( &self, path: &[Segment], - source: PathSource<'_>, + source: PathSource<'_, '_>, ) -> Option<(Span, &'static str, String, Applicability)> { let (ident, span) = match path { [segment] diff --git a/compiler/rustc_resolve/src/rustdoc.rs b/compiler/rustc_resolve/src/rustdoc.rs index 01bb1324645..fa839d2748d 100644 --- a/compiler/rustc_resolve/src/rustdoc.rs +++ b/compiler/rustc_resolve/src/rustdoc.rs @@ -12,10 +12,14 @@ use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::unord::UnordSet; use rustc_middle::ty::TyCtxt; use rustc_span::def_id::DefId; +use rustc_span::source_map::SourceMap; use rustc_span::{DUMMY_SP, InnerSpan, Span, Symbol, sym}; use thin_vec::ThinVec; use tracing::{debug, trace}; +#[cfg(test)] +mod tests; + #[derive(Clone, Copy, PartialEq, Eq, Debug)] pub enum DocFragmentKind { /// A doc fragment created from a `///` or `//!` doc comment. @@ -532,9 +536,19 @@ pub fn source_span_for_markdown_range( md_range: &Range<usize>, fragments: &[DocFragment], ) -> Option<Span> { + let map = tcx.sess.source_map(); + source_span_for_markdown_range_inner(map, markdown, md_range, fragments) +} + +// inner function used for unit testing +pub fn source_span_for_markdown_range_inner( + map: &SourceMap, + markdown: &str, + md_range: &Range<usize>, + fragments: &[DocFragment], +) -> Option<Span> { use rustc_span::BytePos; - let map = tcx.sess.source_map(); if let &[fragment] = &fragments && fragment.kind == DocFragmentKind::RawDoc && let Ok(snippet) = map.span_to_snippet(fragment.span) @@ -570,7 +584,13 @@ pub fn source_span_for_markdown_range( { // If there is either a match in a previous fragment, or // multiple matches in this fragment, there is ambiguity. - if match_data.is_none() && !snippet[match_start + 1..].contains(pat) { + // the snippet cannot be zero-sized, because it matches + // the pattern, which is checked to not be zero sized. + if match_data.is_none() + && !snippet.as_bytes()[match_start + 1..] + .windows(pat.len()) + .any(|s| s == pat.as_bytes()) + { match_data = Some((i, match_start)); } else { // Heirustic produced ambiguity, return nothing. diff --git a/compiler/rustc_resolve/src/rustdoc/tests.rs b/compiler/rustc_resolve/src/rustdoc/tests.rs new file mode 100644 index 00000000000..221ac907e7c --- /dev/null +++ b/compiler/rustc_resolve/src/rustdoc/tests.rs @@ -0,0 +1,50 @@ +use std::path::PathBuf; + +use rustc_span::source_map::{FilePathMapping, SourceMap}; +use rustc_span::symbol::sym; +use rustc_span::{BytePos, Span}; + +use super::{DocFragment, DocFragmentKind, source_span_for_markdown_range_inner}; + +#[test] +fn single_backtick() { + let sm = SourceMap::new(FilePathMapping::empty()); + sm.new_source_file(PathBuf::from("foo.rs").into(), r#"#[doc = "`"] fn foo() {}"#.to_string()); + let span = source_span_for_markdown_range_inner( + &sm, + "`", + &(0..1), + &[DocFragment { + span: Span::with_root_ctxt(BytePos(8), BytePos(11)), + item_id: None, + kind: DocFragmentKind::RawDoc, + doc: sym::empty, // unused placeholder + indent: 0, + }], + ) + .unwrap(); + assert_eq!(span.lo(), BytePos(9)); + assert_eq!(span.hi(), BytePos(10)); +} + +#[test] +fn utf8() { + // regression test for https://github.com/rust-lang/rust/issues/141665 + let sm = SourceMap::new(FilePathMapping::empty()); + sm.new_source_file(PathBuf::from("foo.rs").into(), r#"#[doc = "âš "] fn foo() {}"#.to_string()); + let span = source_span_for_markdown_range_inner( + &sm, + "âš ", + &(0..3), + &[DocFragment { + span: Span::with_root_ctxt(BytePos(8), BytePos(14)), + item_id: None, + kind: DocFragmentKind::RawDoc, + doc: sym::empty, // unused placeholder + indent: 0, + }], + ) + .unwrap(); + assert_eq!(span.lo(), BytePos(9)); + assert_eq!(span.hi(), BytePos(12)); +} diff --git a/compiler/rustc_session/src/filesearch.rs b/compiler/rustc_session/src/filesearch.rs index 92f1bd8ab73..0e711890e07 100644 --- a/compiler/rustc_session/src/filesearch.rs +++ b/compiler/rustc_session/src/filesearch.rs @@ -172,6 +172,11 @@ fn current_dll_path() -> Result<PathBuf, String> { Ok(OsString::from_wide(&filename).into()) } +#[cfg(target_os = "wasi")] +fn current_dll_path() -> Result<PathBuf, String> { + Err("current_dll_path is not supported on WASI".to_string()) +} + pub fn sysroot_candidates() -> SmallVec<[PathBuf; 2]> { let target = crate::config::host_tuple(); let mut sysroot_candidates: SmallVec<[PathBuf; 2]> = smallvec![get_or_default_sysroot()]; diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index ae94169b01d..970faf2997c 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -2000,6 +2000,7 @@ symbols! { slice, slice_from_raw_parts, slice_from_raw_parts_mut, + slice_get_unchecked, slice_into_vec, slice_iter, slice_len_fn, diff --git a/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_gnullvm.rs b/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_gnullvm.rs index a8b133d19bb..eee668cc67e 100644 --- a/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_gnullvm.rs +++ b/compiler/rustc_target/src/spec/targets/aarch64_pc_windows_gnullvm.rs @@ -1,4 +1,4 @@ -use crate::spec::{Target, TargetMetadata, base}; +use crate::spec::{FramePointer, Target, TargetMetadata, base}; pub(crate) fn target() -> Target { let mut base = base::windows_gnullvm::opts(); @@ -6,6 +6,12 @@ pub(crate) fn target() -> Target { base.features = "+v8a,+neon,+fp-armv8".into(); base.linker = Some("aarch64-w64-mingw32-clang".into()); + // Microsoft recommends enabling frame pointers on Arm64 Windows. + // From https://learn.microsoft.com/en-us/cpp/build/arm64-windows-abi-conventions?view=msvc-170#integer-registers + // "The frame pointer (x29) is required for compatibility with fast stack walking used by ETW + // and other services. It must point to the previous {x29, x30} pair on the stack." + base.frame_pointer = FramePointer::NonLeaf; + Target { llvm_target: "aarch64-pc-windows-gnu".into(), metadata: TargetMetadata { diff --git a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs index aeadb32ac2b..8e2137da655 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/infer/mod.rs @@ -2026,7 +2026,7 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { } LetVisitor { span }.visit_body(body).break_value() } - hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(_, ty, _, _), .. }) => { + hir::Node::Item(hir::Item { kind: hir::ItemKind::Const(_, _, ty, _), .. }) => { Some(&ty.peel_refs().kind) } _ => None, diff --git a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs index 6863857f9ec..c4f1f7d712a 100644 --- a/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs +++ b/compiler/rustc_trait_selection/src/error_reporting/traits/suggestions.rs @@ -351,14 +351,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { hir::Node::Item(hir::Item { kind: - hir::ItemKind::Struct(_, _, generics) - | hir::ItemKind::Enum(_, _, generics) - | hir::ItemKind::Union(_, _, generics) + hir::ItemKind::Struct(_, generics, _) + | hir::ItemKind::Enum(_, generics, _) + | hir::ItemKind::Union(_, generics, _) | hir::ItemKind::Trait(_, _, _, generics, ..) | hir::ItemKind::Impl(hir::Impl { generics, .. }) | hir::ItemKind::Fn { generics, .. } - | hir::ItemKind::TyAlias(_, _, generics) - | hir::ItemKind::Const(_, _, generics, _) + | hir::ItemKind::TyAlias(_, generics, _) + | hir::ItemKind::Const(_, generics, _, _) | hir::ItemKind::TraitAlias(_, generics, _), .. }) @@ -411,14 +411,14 @@ impl<'a, 'tcx> TypeErrCtxt<'a, 'tcx> { hir::Node::Item(hir::Item { kind: - hir::ItemKind::Struct(_, _, generics) - | hir::ItemKind::Enum(_, _, generics) - | hir::ItemKind::Union(_, _, generics) + hir::ItemKind::Struct(_, generics, _) + | hir::ItemKind::Enum(_, generics, _) + | hir::ItemKind::Union(_, generics, _) | hir::ItemKind::Trait(_, _, _, generics, ..) | hir::ItemKind::Impl(hir::Impl { generics, .. }) | hir::ItemKind::Fn { generics, .. } - | hir::ItemKind::TyAlias(_, _, generics) - | hir::ItemKind::Const(_, _, generics, _) + | hir::ItemKind::TyAlias(_, generics, _) + | hir::ItemKind::Const(_, generics, _, _) | hir::ItemKind::TraitAlias(_, generics, _), .. }) if !param_ty => { diff --git a/compiler/rustc_trait_selection/src/solve/delegate.rs b/compiler/rustc_trait_selection/src/solve/delegate.rs index 038cdc1a564..e92e37b8738 100644 --- a/compiler/rustc_trait_selection/src/solve/delegate.rs +++ b/compiler/rustc_trait_selection/src/solve/delegate.rs @@ -11,8 +11,9 @@ use rustc_infer::infer::{InferCtxt, RegionVariableOrigin, SubregionOrigin, TyCtx use rustc_infer::traits::solve::Goal; use rustc_middle::traits::query::NoSolution; use rustc_middle::traits::solve::Certainty; -use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable, TypeVisitableExt as _, TypingMode}; -use rustc_next_trait_solver::solve::HasChanged; +use rustc_middle::ty::{ + self, Ty, TyCtxt, TypeFlags, TypeFoldable, TypeVisitableExt as _, TypingMode, +}; use rustc_span::{DUMMY_SP, ErrorGuaranteed, Span}; use crate::traits::{EvaluateConstErr, ObligationCause, specialization_graph}; @@ -61,11 +62,41 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< &self, goal: Goal<'tcx, ty::Predicate<'tcx>>, span: Span, - ) -> Option<HasChanged> { + ) -> Option<Certainty> { + if let Some(trait_pred) = goal.predicate.as_trait_clause() { + if trait_pred.polarity() == ty::PredicatePolarity::Positive { + match self.0.tcx.as_lang_item(trait_pred.def_id()) { + Some(LangItem::Sized) + if self + .resolve_vars_if_possible(trait_pred.self_ty().skip_binder()) + .is_trivially_sized(self.0.tcx) => + { + return Some(Certainty::Yes); + } + Some(LangItem::Copy | LangItem::Clone) => { + let self_ty = + self.resolve_vars_if_possible(trait_pred.self_ty().skip_binder()); + // Unlike `Sized` traits, which always prefer the built-in impl, + // `Copy`/`Clone` may be shadowed by a param-env candidate which + // could force a lifetime error or guide inference. While that's + // not generally desirable, it is observable, so for now let's + // ignore this fast path for types that have regions or infer. + if !self_ty + .has_type_flags(TypeFlags::HAS_FREE_REGIONS | TypeFlags::HAS_INFER) + && self_ty.is_trivially_pure_clone_copy() + { + return Some(Certainty::Yes); + } + } + _ => {} + } + } + } + let pred = goal.predicate.kind(); match pred.no_bound_vars()? { ty::PredicateKind::DynCompatible(def_id) if self.0.tcx.is_dyn_compatible(def_id) => { - Some(HasChanged::No) + Some(Certainty::Yes) } ty::PredicateKind::Clause(ty::ClauseKind::RegionOutlives(outlives)) => { self.0.sub_regions( @@ -73,7 +104,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< outlives.1, outlives.0, ); - Some(HasChanged::No) + Some(Certainty::Yes) } ty::PredicateKind::Clause(ty::ClauseKind::TypeOutlives(outlives)) => { self.0.register_type_outlives_constraint( @@ -82,22 +113,7 @@ impl<'tcx> rustc_next_trait_solver::delegate::SolverDelegate for SolverDelegate< &ObligationCause::dummy_with_span(span), ); - Some(HasChanged::No) - } - ty::PredicateKind::Clause(ty::ClauseKind::Trait(trait_pred)) => { - match self.0.tcx.as_lang_item(trait_pred.def_id()) { - Some(LangItem::Sized) - if trait_pred.self_ty().is_trivially_sized(self.0.tcx) => - { - Some(HasChanged::No) - } - Some(LangItem::Copy | LangItem::Clone) - if trait_pred.self_ty().is_trivially_pure_clone_copy() => - { - Some(HasChanged::No) - } - _ => None, - } + Some(Certainty::Yes) } _ => None, } diff --git a/compiler/rustc_trait_selection/src/solve/fulfill.rs b/compiler/rustc_trait_selection/src/solve/fulfill.rs index d273703a9b1..ed99c678a4d 100644 --- a/compiler/rustc_trait_selection/src/solve/fulfill.rs +++ b/compiler/rustc_trait_selection/src/solve/fulfill.rs @@ -195,10 +195,15 @@ where let goal = obligation.as_goal(); let delegate = <&SolverDelegate<'tcx>>::from(infcx); - if let Some(fast_path_has_changed) = + if let Some(certainty) = delegate.compute_goal_fast_path(goal, obligation.cause.span) { - any_changed |= matches!(fast_path_has_changed, HasChanged::Yes); + match certainty { + Certainty::Yes => {} + Certainty::Maybe(_) => { + self.obligations.register(obligation, None); + } + } continue; } diff --git a/library/Cargo.lock b/library/Cargo.lock index 02018057ed5..0c75977ee79 100644 --- a/library/Cargo.lock +++ b/library/Cargo.lock @@ -61,9 +61,9 @@ dependencies = [ [[package]] name = "compiler_builtins" -version = "0.1.159" +version = "0.1.160" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "448068da8f2326b2a0472353cb401dd8795a89c007ef30fff90f50706e862e72" +checksum = "6376049cfa92c0aa8b9ac95fae22184b981c658208d4ed8a1dc553cd83612895" dependencies = [ "cc", "rustc-std-workspace-core", diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml index 9d0d957226d..365c9dc00df 100644 --- a/library/alloc/Cargo.toml +++ b/library/alloc/Cargo.toml @@ -16,7 +16,7 @@ bench = false [dependencies] core = { path = "../core", public = true } -compiler_builtins = { version = "=0.1.159", features = ['rustc-dep-of-std'] } +compiler_builtins = { version = "=0.1.160", features = ['rustc-dep-of-std'] } [features] compiler-builtins-mem = ['compiler_builtins/mem'] diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs index ea81645aa64..52b98291ff9 100644 --- a/library/alloc/src/collections/btree/map.rs +++ b/library/alloc/src/collections/btree/map.rs @@ -1151,7 +1151,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { K: Ord, F: FnMut(&K, &mut V) -> bool, { - self.extract_if(|k, v| !f(k, v)).for_each(drop); + self.extract_if(.., |k, v| !f(k, v)).for_each(drop); } /// Moves all elements from `other` into `self`, leaving `other` empty. @@ -1397,7 +1397,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { } } - /// Creates an iterator that visits all elements (key-value pairs) in + /// Creates an iterator that visits elements (key-value pairs) in the specified range in /// ascending key order and uses a closure to determine if an element /// should be removed. /// @@ -1423,33 +1423,42 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { /// use std::collections::BTreeMap; /// /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect(); - /// let evens: BTreeMap<_, _> = map.extract_if(|k, _v| k % 2 == 0).collect(); + /// let evens: BTreeMap<_, _> = map.extract_if(.., |k, _v| k % 2 == 0).collect(); /// let odds = map; /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), [0, 2, 4, 6]); /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), [1, 3, 5, 7]); + /// + /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect(); + /// let low: BTreeMap<_, _> = map.extract_if(0..4, |_k, _v| true).collect(); + /// let high = map; + /// assert_eq!(low.keys().copied().collect::<Vec<_>>(), [0, 1, 2, 3]); + /// assert_eq!(high.keys().copied().collect::<Vec<_>>(), [4, 5, 6, 7]); /// ``` #[unstable(feature = "btree_extract_if", issue = "70530")] - pub fn extract_if<F>(&mut self, pred: F) -> ExtractIf<'_, K, V, F, A> + pub fn extract_if<F, R>(&mut self, range: R, pred: F) -> ExtractIf<'_, K, V, R, F, A> where K: Ord, + R: RangeBounds<K>, F: FnMut(&K, &mut V) -> bool, { - let (inner, alloc) = self.extract_if_inner(); + let (inner, alloc) = self.extract_if_inner(range); ExtractIf { pred, inner, alloc } } - pub(super) fn extract_if_inner(&mut self) -> (ExtractIfInner<'_, K, V>, A) + pub(super) fn extract_if_inner<R>(&mut self, range: R) -> (ExtractIfInner<'_, K, V, R>, A) where K: Ord, + R: RangeBounds<K>, { if let Some(root) = self.root.as_mut() { let (root, dormant_root) = DormantMutRef::new(root); - let front = root.borrow_mut().first_leaf_edge(); + let first = root.borrow_mut().lower_bound(SearchBound::from_range(range.start_bound())); ( ExtractIfInner { length: &mut self.length, dormant_root: Some(dormant_root), - cur_leaf_edge: Some(front), + cur_leaf_edge: Some(first), + range, }, (*self.alloc).clone(), ) @@ -1459,6 +1468,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> { length: &mut self.length, dormant_root: None, cur_leaf_edge: None, + range, }, (*self.alloc).clone(), ) @@ -1917,18 +1927,19 @@ pub struct ExtractIf< 'a, K, V, + R, F, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global, > { pred: F, - inner: ExtractIfInner<'a, K, V>, + inner: ExtractIfInner<'a, K, V, R>, /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`. alloc: A, } /// Most of the implementation of ExtractIf are generic over the type /// of the predicate, thus also serving for BTreeSet::ExtractIf. -pub(super) struct ExtractIfInner<'a, K, V> { +pub(super) struct ExtractIfInner<'a, K, V, R> { /// Reference to the length field in the borrowed map, updated live. length: &'a mut usize, /// Buried reference to the root field in the borrowed map. @@ -1938,10 +1949,13 @@ pub(super) struct ExtractIfInner<'a, K, V> { /// Empty if the map has no root, if iteration went beyond the last leaf edge, /// or if a panic occurred in the predicate. cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>, + /// Range over which iteration was requested. We don't need the left side, but we + /// can't extract the right side without requiring K: Clone. + range: R, } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<K, V, F, A> fmt::Debug for ExtractIf<'_, K, V, F, A> +impl<K, V, R, F, A> fmt::Debug for ExtractIf<'_, K, V, R, F, A> where K: fmt::Debug, V: fmt::Debug, @@ -1953,8 +1967,10 @@ where } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<K, V, F, A: Allocator + Clone> Iterator for ExtractIf<'_, K, V, F, A> +impl<K, V, R, F, A: Allocator + Clone> Iterator for ExtractIf<'_, K, V, R, F, A> where + K: PartialOrd, + R: RangeBounds<K>, F: FnMut(&K, &mut V) -> bool, { type Item = (K, V); @@ -1968,7 +1984,7 @@ where } } -impl<'a, K, V> ExtractIfInner<'a, K, V> { +impl<'a, K, V, R> ExtractIfInner<'a, K, V, R> { /// Allow Debug implementations to predict the next element. pub(super) fn peek(&self) -> Option<(&K, &V)> { let edge = self.cur_leaf_edge.as_ref()?; @@ -1978,10 +1994,22 @@ impl<'a, K, V> ExtractIfInner<'a, K, V> { /// Implementation of a typical `ExtractIf::next` method, given the predicate. pub(super) fn next<F, A: Allocator + Clone>(&mut self, pred: &mut F, alloc: A) -> Option<(K, V)> where + K: PartialOrd, + R: RangeBounds<K>, F: FnMut(&K, &mut V) -> bool, { while let Ok(mut kv) = self.cur_leaf_edge.take()?.next_kv() { let (k, v) = kv.kv_mut(); + + // On creation, we navigated directly to the left bound, so we need only check the + // right bound here to decide whether to stop. + match self.range.end_bound() { + Bound::Included(ref end) if (*k).le(end) => (), + Bound::Excluded(ref end) if (*k).lt(end) => (), + Bound::Unbounded => (), + _ => return None, + } + if pred(k, v) { *self.length -= 1; let (kv, pos) = kv.remove_kv_tracking( @@ -2013,7 +2041,13 @@ impl<'a, K, V> ExtractIfInner<'a, K, V> { } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<K, V, F> FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} +impl<K, V, R, F> FusedIterator for ExtractIf<'_, K, V, R, F> +where + K: PartialOrd, + R: RangeBounds<K>, + F: FnMut(&K, &mut V) -> bool, +{ +} #[stable(feature = "btree_range", since = "1.17.0")] impl<'a, K, V> Iterator for Range<'a, K, V> { diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs index 5975134382e..79879d31d3d 100644 --- a/library/alloc/src/collections/btree/map/tests.rs +++ b/library/alloc/src/collections/btree/map/tests.rs @@ -944,7 +944,7 @@ mod test_extract_if { #[test] fn empty() { let mut map: BTreeMap<i32, i32> = BTreeMap::new(); - map.extract_if(|_, _| unreachable!("there's nothing to decide on")).for_each(drop); + map.extract_if(.., |_, _| unreachable!("there's nothing to decide on")).for_each(drop); assert_eq!(map.height(), None); map.check(); } @@ -954,7 +954,7 @@ mod test_extract_if { fn consumed_keeping_all() { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - assert!(map.extract_if(|_, _| false).eq(iter::empty())); + assert!(map.extract_if(.., |_, _| false).eq(iter::empty())); map.check(); } @@ -963,18 +963,42 @@ mod test_extract_if { fn consumed_removing_all() { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs.clone()); - assert!(map.extract_if(|_, _| true).eq(pairs)); + assert!(map.extract_if(.., |_, _| true).eq(pairs)); assert!(map.is_empty()); map.check(); } + #[test] + fn consumed_removing_some() { + let pairs = (0..3).map(|i| (i, i)); + let map = BTreeMap::from_iter(pairs); + for x in 0..3 { + for y in 0..3 { + let mut map = map.clone(); + assert!(map.extract_if(x..y, |_, _| true).eq((x..y).map(|i| (i, i)))); + for i in 0..3 { + assert_ne!(map.contains_key(&i), (x..y).contains(&i)); + } + } + } + for x in 0..3 { + for y in 0..2 { + let mut map = map.clone(); + assert!(map.extract_if(x..=y, |_, _| true).eq((x..=y).map(|i| (i, i)))); + for i in 0..3 { + assert_ne!(map.contains_key(&i), (x..=y).contains(&i)); + } + } + } + } + // Explicitly consumes the iterator and modifies values through it. #[test] fn mutating_and_keeping() { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); assert!( - map.extract_if(|_, v| { + map.extract_if(.., |_, v| { *v += 6; false }) @@ -991,7 +1015,7 @@ mod test_extract_if { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); assert!( - map.extract_if(|_, v| { + map.extract_if(.., |_, v| { *v += 6; true }) @@ -1005,7 +1029,7 @@ mod test_extract_if { fn underfull_keeping_all() { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| false).for_each(drop); + map.extract_if(.., |_, _| false).for_each(drop); assert!(map.keys().copied().eq(0..3)); map.check(); } @@ -1015,7 +1039,7 @@ mod test_extract_if { let pairs = (0..3).map(|i| (i, i)); for doomed in 0..3 { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i == doomed).for_each(drop); + map.extract_if(.., |i, _| *i == doomed).for_each(drop); assert_eq!(map.len(), 2); map.check(); } @@ -1026,7 +1050,7 @@ mod test_extract_if { let pairs = (0..3).map(|i| (i, i)); for sacred in 0..3 { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i != sacred).for_each(drop); + map.extract_if(.., |i, _| *i != sacred).for_each(drop); assert!(map.keys().copied().eq(sacred..=sacred)); map.check(); } @@ -1036,7 +1060,7 @@ mod test_extract_if { fn underfull_removing_all() { let pairs = (0..3).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| true).for_each(drop); + map.extract_if(.., |_, _| true).for_each(drop); assert!(map.is_empty()); map.check(); } @@ -1045,7 +1069,7 @@ mod test_extract_if { fn height_0_keeping_all() { let pairs = (0..node::CAPACITY).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| false).for_each(drop); + map.extract_if(.., |_, _| false).for_each(drop); assert!(map.keys().copied().eq(0..node::CAPACITY)); map.check(); } @@ -1055,7 +1079,7 @@ mod test_extract_if { let pairs = (0..node::CAPACITY).map(|i| (i, i)); for doomed in 0..node::CAPACITY { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i == doomed).for_each(drop); + map.extract_if(.., |i, _| *i == doomed).for_each(drop); assert_eq!(map.len(), node::CAPACITY - 1); map.check(); } @@ -1066,7 +1090,7 @@ mod test_extract_if { let pairs = (0..node::CAPACITY).map(|i| (i, i)); for sacred in 0..node::CAPACITY { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i != sacred).for_each(drop); + map.extract_if(.., |i, _| *i != sacred).for_each(drop); assert!(map.keys().copied().eq(sacred..=sacred)); map.check(); } @@ -1076,7 +1100,7 @@ mod test_extract_if { fn height_0_removing_all() { let pairs = (0..node::CAPACITY).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| true).for_each(drop); + map.extract_if(.., |_, _| true).for_each(drop); assert!(map.is_empty()); map.check(); } @@ -1084,7 +1108,7 @@ mod test_extract_if { #[test] fn height_0_keeping_half() { let mut map = BTreeMap::from_iter((0..16).map(|i| (i, i))); - assert_eq!(map.extract_if(|i, _| *i % 2 == 0).count(), 8); + assert_eq!(map.extract_if(.., |i, _| *i % 2 == 0).count(), 8); assert_eq!(map.len(), 8); map.check(); } @@ -1093,7 +1117,7 @@ mod test_extract_if { fn height_1_removing_all() { let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| true).for_each(drop); + map.extract_if(.., |_, _| true).for_each(drop); assert!(map.is_empty()); map.check(); } @@ -1103,7 +1127,7 @@ mod test_extract_if { let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i)); for doomed in 0..MIN_INSERTS_HEIGHT_1 { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i == doomed).for_each(drop); + map.extract_if(.., |i, _| *i == doomed).for_each(drop); assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1); map.check(); } @@ -1114,7 +1138,7 @@ mod test_extract_if { let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i)); for sacred in 0..MIN_INSERTS_HEIGHT_1 { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i != sacred).for_each(drop); + map.extract_if(.., |i, _| *i != sacred).for_each(drop); assert!(map.keys().copied().eq(sacred..=sacred)); map.check(); } @@ -1125,7 +1149,7 @@ mod test_extract_if { let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)); for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i == doomed).for_each(drop); + map.extract_if(.., |i, _| *i == doomed).for_each(drop); assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1); map.check(); } @@ -1136,7 +1160,7 @@ mod test_extract_if { let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)); for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) { let mut map = BTreeMap::from_iter(pairs.clone()); - map.extract_if(|i, _| *i != sacred).for_each(drop); + map.extract_if(.., |i, _| *i != sacred).for_each(drop); assert!(map.keys().copied().eq(sacred..=sacred)); map.check(); } @@ -1146,7 +1170,7 @@ mod test_extract_if { fn height_2_removing_all() { let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)); let mut map = BTreeMap::from_iter(pairs); - map.extract_if(|_, _| true).for_each(drop); + map.extract_if(.., |_, _| true).for_each(drop); assert!(map.is_empty()); map.check(); } @@ -1162,7 +1186,7 @@ mod test_extract_if { map.insert(b.spawn(Panic::InDrop), ()); map.insert(c.spawn(Panic::Never), ()); - catch_unwind(move || map.extract_if(|dummy, _| dummy.query(true)).for_each(drop)) + catch_unwind(move || map.extract_if(.., |dummy, _| dummy.query(true)).for_each(drop)) .unwrap_err(); assert_eq!(a.queried(), 1); @@ -1185,7 +1209,7 @@ mod test_extract_if { map.insert(c.spawn(Panic::InQuery), ()); catch_unwind(AssertUnwindSafe(|| { - map.extract_if(|dummy, _| dummy.query(true)).for_each(drop) + map.extract_if(.., |dummy, _| dummy.query(true)).for_each(drop) })) .unwrap_err(); @@ -1214,7 +1238,7 @@ mod test_extract_if { map.insert(c.spawn(Panic::InQuery), ()); { - let mut it = map.extract_if(|dummy, _| dummy.query(true)); + let mut it = map.extract_if(.., |dummy, _| dummy.query(true)); catch_unwind(AssertUnwindSafe(|| while it.next().is_some() {})).unwrap_err(); // Iterator behavior after a panic is explicitly unspecified, // so this is just the current implementation: @@ -1658,7 +1682,7 @@ fn assert_sync() { } fn extract_if<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ { - v.extract_if(|_, _| false) + v.extract_if(.., |_, _| false) } fn iter<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ { @@ -1727,7 +1751,7 @@ fn assert_send() { } fn extract_if<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ { - v.extract_if(|_, _| false) + v.extract_if(.., |_, _| false) } fn iter<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ { diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs index 343934680b8..780bd8b0dd1 100644 --- a/library/alloc/src/collections/btree/set.rs +++ b/library/alloc/src/collections/btree/set.rs @@ -1109,7 +1109,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { T: Ord, F: FnMut(&T) -> bool, { - self.extract_if(|v| !f(v)).for_each(drop); + self.extract_if(.., |v| !f(v)).for_each(drop); } /// Moves all elements from `other` into `self`, leaving `other` empty. @@ -1187,7 +1187,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { BTreeSet { map: self.map.split_off(value) } } - /// Creates an iterator that visits all elements in ascending order and + /// Creates an iterator that visits elements in the specified range in ascending order and /// uses a closure to determine if an element should be removed. /// /// If the closure returns `true`, the element is removed from the set and @@ -1208,18 +1208,25 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> { /// use std::collections::BTreeSet; /// /// let mut set: BTreeSet<i32> = (0..8).collect(); - /// let evens: BTreeSet<_> = set.extract_if(|v| v % 2 == 0).collect(); + /// let evens: BTreeSet<_> = set.extract_if(.., |v| v % 2 == 0).collect(); /// let odds = set; /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![0, 2, 4, 6]); /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7]); + /// + /// let mut map: BTreeSet<i32> = (0..8).collect(); + /// let low: BTreeSet<_> = map.extract_if(0..4, |_v| true).collect(); + /// let high = map; + /// assert_eq!(low.into_iter().collect::<Vec<_>>(), [0, 1, 2, 3]); + /// assert_eq!(high.into_iter().collect::<Vec<_>>(), [4, 5, 6, 7]); /// ``` #[unstable(feature = "btree_extract_if", issue = "70530")] - pub fn extract_if<'a, F>(&'a mut self, pred: F) -> ExtractIf<'a, T, F, A> + pub fn extract_if<'a, F, R>(&'a mut self, range: R, pred: F) -> ExtractIf<'a, T, R, F, A> where T: Ord, + R: RangeBounds<T>, F: 'a + FnMut(&T) -> bool, { - let (inner, alloc) = self.map.extract_if_inner(); + let (inner, alloc) = self.map.extract_if_inner(range); ExtractIf { pred, inner, alloc } } @@ -1554,17 +1561,18 @@ impl<'a, T, A: Allocator + Clone> IntoIterator for &'a BTreeSet<T, A> { pub struct ExtractIf< 'a, T, + R, F, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global, > { pred: F, - inner: super::map::ExtractIfInner<'a, T, SetValZST>, + inner: super::map::ExtractIfInner<'a, T, SetValZST, R>, /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`. alloc: A, } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<T, F, A> fmt::Debug for ExtractIf<'_, T, F, A> +impl<T, R, F, A> fmt::Debug for ExtractIf<'_, T, R, F, A> where T: fmt::Debug, A: Allocator + Clone, @@ -1577,8 +1585,10 @@ where } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<'a, T, F, A: Allocator + Clone> Iterator for ExtractIf<'_, T, F, A> +impl<'a, T, R, F, A: Allocator + Clone> Iterator for ExtractIf<'_, T, R, F, A> where + T: PartialOrd, + R: RangeBounds<T>, F: 'a + FnMut(&T) -> bool, { type Item = T; @@ -1595,7 +1605,13 @@ where } #[unstable(feature = "btree_extract_if", issue = "70530")] -impl<T, F, A: Allocator + Clone> FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&T) -> bool {} +impl<T, R, F, A: Allocator + Clone> FusedIterator for ExtractIf<'_, T, R, F, A> +where + T: PartialOrd, + R: RangeBounds<T>, + F: FnMut(&T) -> bool, +{ +} #[stable(feature = "rust1", since = "1.0.0")] impl<T: Ord, A: Allocator + Clone> Extend<T> for BTreeSet<T, A> { diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs index d538ef707eb..85c9a98c461 100644 --- a/library/alloc/src/collections/btree/set/tests.rs +++ b/library/alloc/src/collections/btree/set/tests.rs @@ -368,8 +368,8 @@ fn test_extract_if() { let mut x = BTreeSet::from([1]); let mut y = BTreeSet::from([1]); - x.extract_if(|_| true).for_each(drop); - y.extract_if(|_| false).for_each(drop); + x.extract_if(.., |_| true).for_each(drop); + y.extract_if(.., |_| false).for_each(drop); assert_eq!(x.len(), 0); assert_eq!(y.len(), 1); } @@ -385,7 +385,7 @@ fn test_extract_if_drop_panic_leak() { set.insert(b.spawn(Panic::InDrop)); set.insert(c.spawn(Panic::Never)); - catch_unwind(move || set.extract_if(|dummy| dummy.query(true)).for_each(drop)).ok(); + catch_unwind(move || set.extract_if(.., |dummy| dummy.query(true)).for_each(drop)).ok(); assert_eq!(a.queried(), 1); assert_eq!(b.queried(), 1); @@ -406,7 +406,7 @@ fn test_extract_if_pred_panic_leak() { set.insert(b.spawn(Panic::InQuery)); set.insert(c.spawn(Panic::InQuery)); - catch_unwind(AssertUnwindSafe(|| set.extract_if(|dummy| dummy.query(true)).for_each(drop))) + catch_unwind(AssertUnwindSafe(|| set.extract_if(.., |dummy| dummy.query(true)).for_each(drop))) .ok(); assert_eq!(a.queried(), 1); @@ -605,7 +605,7 @@ fn assert_sync() { } fn extract_if<T: Sync + Ord>(v: &mut BTreeSet<T>) -> impl Sync + '_ { - v.extract_if(|_| false) + v.extract_if(.., |_| false) } fn difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ { @@ -644,7 +644,7 @@ fn assert_send() { } fn extract_if<T: Send + Ord>(v: &mut BTreeSet<T>) -> impl Send + '_ { - v.extract_if(|_| false) + v.extract_if(.., |_| false) } fn difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ { diff --git a/library/alloctests/benches/btree/map.rs b/library/alloctests/benches/btree/map.rs index 20f02dc3a96..778065fd965 100644 --- a/library/alloctests/benches/btree/map.rs +++ b/library/alloctests/benches/btree/map.rs @@ -386,7 +386,7 @@ pub fn clone_slim_100_and_clear(b: &mut Bencher) { #[bench] pub fn clone_slim_100_and_drain_all(b: &mut Bencher) { let src = slim_map(100); - b.iter(|| src.clone().extract_if(|_, _| true).count()) + b.iter(|| src.clone().extract_if(.., |_, _| true).count()) } #[bench] @@ -394,7 +394,7 @@ pub fn clone_slim_100_and_drain_half(b: &mut Bencher) { let src = slim_map(100); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 100 / 2); + assert_eq!(map.extract_if(.., |i, _| i % 2 == 0).count(), 100 / 2); assert_eq!(map.len(), 100 / 2); }) } @@ -457,7 +457,7 @@ pub fn clone_slim_10k_and_clear(b: &mut Bencher) { #[bench] pub fn clone_slim_10k_and_drain_all(b: &mut Bencher) { let src = slim_map(10_000); - b.iter(|| src.clone().extract_if(|_, _| true).count()) + b.iter(|| src.clone().extract_if(.., |_, _| true).count()) } #[bench] @@ -465,7 +465,7 @@ pub fn clone_slim_10k_and_drain_half(b: &mut Bencher) { let src = slim_map(10_000); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 10_000 / 2); + assert_eq!(map.extract_if(.., |i, _| i % 2 == 0).count(), 10_000 / 2); assert_eq!(map.len(), 10_000 / 2); }) } @@ -528,7 +528,7 @@ pub fn clone_fat_val_100_and_clear(b: &mut Bencher) { #[bench] pub fn clone_fat_val_100_and_drain_all(b: &mut Bencher) { let src = fat_val_map(100); - b.iter(|| src.clone().extract_if(|_, _| true).count()) + b.iter(|| src.clone().extract_if(.., |_, _| true).count()) } #[bench] @@ -536,7 +536,7 @@ pub fn clone_fat_val_100_and_drain_half(b: &mut Bencher) { let src = fat_val_map(100); b.iter(|| { let mut map = src.clone(); - assert_eq!(map.extract_if(|i, _| i % 2 == 0).count(), 100 / 2); + assert_eq!(map.extract_if(.., |i, _| i % 2 == 0).count(), 100 / 2); assert_eq!(map.len(), 100 / 2); }) } diff --git a/library/alloctests/benches/btree/set.rs b/library/alloctests/benches/btree/set.rs index 5aa395b4d52..027c86a89a5 100644 --- a/library/alloctests/benches/btree/set.rs +++ b/library/alloctests/benches/btree/set.rs @@ -69,7 +69,7 @@ pub fn clone_100_and_clear(b: &mut Bencher) { #[bench] pub fn clone_100_and_drain_all(b: &mut Bencher) { let src = slim_set(100); - b.iter(|| src.clone().extract_if(|_| true).count()) + b.iter(|| src.clone().extract_if(.., |_| true).count()) } #[bench] @@ -77,7 +77,7 @@ pub fn clone_100_and_drain_half(b: &mut Bencher) { let src = slim_set(100); b.iter(|| { let mut set = src.clone(); - assert_eq!(set.extract_if(|i| i % 2 == 0).count(), 100 / 2); + assert_eq!(set.extract_if(.., |i| i % 2 == 0).count(), 100 / 2); assert_eq!(set.len(), 100 / 2); }) } @@ -140,7 +140,7 @@ pub fn clone_10k_and_clear(b: &mut Bencher) { #[bench] pub fn clone_10k_and_drain_all(b: &mut Bencher) { let src = slim_set(10_000); - b.iter(|| src.clone().extract_if(|_| true).count()) + b.iter(|| src.clone().extract_if(.., |_| true).count()) } #[bench] @@ -148,7 +148,7 @@ pub fn clone_10k_and_drain_half(b: &mut Bencher) { let src = slim_set(10_000); b.iter(|| { let mut set = src.clone(); - assert_eq!(set.extract_if(|i| i % 2 == 0).count(), 10_000 / 2); + assert_eq!(set.extract_if(.., |i| i % 2 == 0).count(), 10_000 / 2); assert_eq!(set.len(), 10_000 / 2); }) } diff --git a/library/alloctests/tests/autotraits.rs b/library/alloctests/tests/autotraits.rs index 6b82deeac8a..ad0a1038596 100644 --- a/library/alloctests/tests/autotraits.rs +++ b/library/alloctests/tests/autotraits.rs @@ -1,3 +1,5 @@ +use std::ops::Range; + fn require_sync<T: Sync>(_: T) {} fn require_send_sync<T: Send + Sync>(_: T) {} @@ -55,7 +57,13 @@ fn test_btree_map() { require_send_sync(async { let _v = None::< - alloc::collections::btree_map::ExtractIf<'_, &u32, &u32, fn(&&u32, &mut &u32) -> bool>, + alloc::collections::btree_map::ExtractIf< + '_, + &u32, + &u32, + Range<u32>, + fn(&&u32, &mut &u32) -> bool, + >, >; async {}.await; }); @@ -144,7 +152,9 @@ fn test_btree_set() { }); require_send_sync(async { - let _v = None::<alloc::collections::btree_set::ExtractIf<'_, &u32, fn(&&u32) -> bool>>; + let _v = None::< + alloc::collections::btree_set::ExtractIf<'_, &u32, Range<u32>, fn(&&u32) -> bool>, + >; async {}.await; }); diff --git a/library/core/src/clone.rs b/library/core/src/clone.rs index c237ac84cf4..2c0662c9629 100644 --- a/library/core/src/clone.rs +++ b/library/core/src/clone.rs @@ -38,7 +38,16 @@ mod uninit; -/// A common trait for the ability to explicitly duplicate an object. +/// A common trait that allows explicit creation of a duplicate value. +/// +/// Calling [`clone`] always produces a new value. +/// However, for types that are references to other data (such as smart pointers or references), +/// the new value may still point to the same underlying data, rather than duplicating it. +/// See [`Clone::clone`] for more details. +/// +/// This distinction is especially important when using `#[derive(Clone)]` on structs containing +/// smart pointers like `Arc<Mutex<T>>` - the cloned struct will share mutable state with the +/// original. /// /// Differs from [`Copy`] in that [`Copy`] is implicit and an inexpensive bit-wise copy, while /// `Clone` is always explicit and may or may not be expensive. In order to enforce @@ -147,7 +156,16 @@ mod uninit; #[rustc_diagnostic_item = "Clone"] #[rustc_trivial_field_reads] pub trait Clone: Sized { - /// Returns a copy of the value. + /// Returns a duplicate of the value. + /// + /// Note that what "duplicate" means varies by type: + /// - For most types, this creates a deep, independent copy + /// - For reference types like `&T`, this creates another reference to the same value + /// - For smart pointers like [`Arc`] or [`Rc`], this increments the reference count + /// but still points to the same underlying data + /// + /// [`Arc`]: ../../std/sync/struct.Arc.html + /// [`Rc`]: ../../std/rc/struct.Rc.html /// /// # Examples /// @@ -157,6 +175,23 @@ pub trait Clone: Sized { /// /// assert_eq!("Hello", hello.clone()); /// ``` + /// + /// Example with a reference-counted type: + /// + /// ``` + /// use std::sync::{Arc, Mutex}; + /// + /// let data = Arc::new(Mutex::new(vec![1, 2, 3])); + /// let data_clone = data.clone(); // Creates another Arc pointing to the same Mutex + /// + /// { + /// let mut lock = data.lock().unwrap(); + /// lock.push(4); + /// } + /// + /// // Changes are visible through the clone because they share the same underlying data + /// assert_eq!(*data_clone.lock().unwrap(), vec![1, 2, 3, 4]); + /// ``` #[stable(feature = "rust1", since = "1.0.0")] #[must_use = "cloning is often expensive and is not expected to have side effects"] // Clone::clone is special because the compiler generates MIR to implement it for some types. diff --git a/library/core/src/ffi/c_str.rs b/library/core/src/ffi/c_str.rs index bb2bf128be1..595cc1fe025 100644 --- a/library/core/src/ffi/c_str.rs +++ b/library/core/src/ffi/c_str.rs @@ -207,7 +207,7 @@ impl CStr { /// * `ptr` must be [valid] for reads of bytes up to and including the nul terminator. /// This means in particular: /// - /// * The entire memory range of this `CStr` must be contained within a single allocated object! + /// * The entire memory range of this `CStr` must be contained within a single allocation! /// * `ptr` must be non-null even for a zero-length cstr. /// /// * The memory referenced by the returned `CStr` must not be mutated for diff --git a/library/core/src/intrinsics/bounds.rs b/library/core/src/intrinsics/bounds.rs new file mode 100644 index 00000000000..046e191212c --- /dev/null +++ b/library/core/src/intrinsics/bounds.rs @@ -0,0 +1,39 @@ +//! Various traits used to restrict intrinsics to not-completely-wrong types. + +/// Types with a built-in dereference operator in runtime MIR, +/// aka references and raw pointers. +/// +/// # Safety +/// Must actually *be* such a type. +pub unsafe trait BuiltinDeref: Sized { + type Pointee: ?Sized; +} + +unsafe impl<T: ?Sized> BuiltinDeref for &mut T { + type Pointee = T; +} +unsafe impl<T: ?Sized> BuiltinDeref for &T { + type Pointee = T; +} +unsafe impl<T: ?Sized> BuiltinDeref for *mut T { + type Pointee = T; +} +unsafe impl<T: ?Sized> BuiltinDeref for *const T { + type Pointee = T; +} + +pub trait ChangePointee<U: ?Sized>: BuiltinDeref { + type Output; +} +impl<'a, T: ?Sized + 'a, U: ?Sized + 'a> ChangePointee<U> for &'a mut T { + type Output = &'a mut U; +} +impl<'a, T: ?Sized + 'a, U: ?Sized + 'a> ChangePointee<U> for &'a T { + type Output = &'a U; +} +impl<T: ?Sized, U: ?Sized> ChangePointee<U> for *mut T { + type Output = *mut U; +} +impl<T: ?Sized, U: ?Sized> ChangePointee<U> for *const T { + type Output = *const U; +} diff --git a/library/core/src/intrinsics/mod.rs b/library/core/src/intrinsics/mod.rs index f89baef76f0..d147cf889cc 100644 --- a/library/core/src/intrinsics/mod.rs +++ b/library/core/src/intrinsics/mod.rs @@ -53,6 +53,7 @@ use crate::marker::{ConstParamTy, DiscriminantKind, Tuple}; use crate::ptr; +mod bounds; pub mod fallback; pub mod mir; pub mod simd; @@ -1722,7 +1723,7 @@ pub const fn needs_drop<T: ?Sized>() -> bool; /// # Safety /// /// If the computed offset is non-zero, then both the starting and resulting pointer must be -/// either in bounds or at the end of an allocated object. If either pointer is out +/// either in bounds or at the end of an allocation. If either pointer is out /// of bounds or arithmetic overflow occurs then this operation is undefined behavior. /// /// The stabilized version of this intrinsic is [`pointer::offset`]. @@ -1730,7 +1731,7 @@ pub const fn needs_drop<T: ?Sized>() -> bool; #[rustc_intrinsic_const_stable_indirect] #[rustc_nounwind] #[rustc_intrinsic] -pub const unsafe fn offset<Ptr, Delta>(dst: Ptr, offset: Delta) -> Ptr; +pub const unsafe fn offset<Ptr: bounds::BuiltinDeref, Delta>(dst: Ptr, offset: Delta) -> Ptr; /// Calculates the offset from a pointer, potentially wrapping. /// @@ -1751,6 +1752,33 @@ pub const unsafe fn offset<Ptr, Delta>(dst: Ptr, offset: Delta) -> Ptr; #[rustc_intrinsic] pub const unsafe fn arith_offset<T>(dst: *const T, offset: isize) -> *const T; +/// Projects to the `index`-th element of `slice_ptr`, as the same kind of pointer +/// as the slice was provided -- so `&mut [T] → &mut T`, `&[T] → &T`, +/// `*mut [T] → *mut T`, or `*const [T] → *const T` -- without a bounds check. +/// +/// This is exposed via `<usize as SliceIndex>::get(_unchecked)(_mut)`, +/// and isn't intended to be used elsewhere. +/// +/// Expands in MIR to `{&, &mut, &raw const, &raw mut} (*slice_ptr)[index]`, +/// depending on the types involved, so no backend support is needed. +/// +/// # Safety +/// +/// - `index < PtrMetadata(slice_ptr)`, so the indexing is in-bounds for the slice +/// - the resulting offsetting is in-bounds of the allocated object, which is +/// always the case for references, but needs to be upheld manually for pointers +#[cfg(not(bootstrap))] +#[rustc_nounwind] +#[rustc_intrinsic] +pub const unsafe fn slice_get_unchecked< + ItemPtr: bounds::ChangePointee<[T], Pointee = T, Output = SlicePtr>, + SlicePtr, + T, +>( + slice_ptr: SlicePtr, + index: usize, +) -> ItemPtr; + /// Masks out bits of the pointer according to a mask. /// /// Note that, unlike most intrinsics, this is safe to call; @@ -2212,28 +2240,28 @@ pub unsafe fn fmuladdf128(a: f128, b: f128, c: f128) -> f128; /// [`f16::floor`](../../std/primitive.f16.html#method.floor) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn floorf16(x: f16) -> f16; +pub const unsafe fn floorf16(x: f16) -> f16; /// Returns the largest integer less than or equal to an `f32`. /// /// The stabilized version of this intrinsic is /// [`f32::floor`](../../std/primitive.f32.html#method.floor) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn floorf32(x: f32) -> f32; +pub const unsafe fn floorf32(x: f32) -> f32; /// Returns the largest integer less than or equal to an `f64`. /// /// The stabilized version of this intrinsic is /// [`f64::floor`](../../std/primitive.f64.html#method.floor) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn floorf64(x: f64) -> f64; +pub const unsafe fn floorf64(x: f64) -> f64; /// Returns the largest integer less than or equal to an `f128`. /// /// The stabilized version of this intrinsic is /// [`f128::floor`](../../std/primitive.f128.html#method.floor) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn floorf128(x: f128) -> f128; +pub const unsafe fn floorf128(x: f128) -> f128; /// Returns the smallest integer greater than or equal to an `f16`. /// @@ -2241,28 +2269,28 @@ pub unsafe fn floorf128(x: f128) -> f128; /// [`f16::ceil`](../../std/primitive.f16.html#method.ceil) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn ceilf16(x: f16) -> f16; +pub const unsafe fn ceilf16(x: f16) -> f16; /// Returns the smallest integer greater than or equal to an `f32`. /// /// The stabilized version of this intrinsic is /// [`f32::ceil`](../../std/primitive.f32.html#method.ceil) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn ceilf32(x: f32) -> f32; +pub const unsafe fn ceilf32(x: f32) -> f32; /// Returns the smallest integer greater than or equal to an `f64`. /// /// The stabilized version of this intrinsic is /// [`f64::ceil`](../../std/primitive.f64.html#method.ceil) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn ceilf64(x: f64) -> f64; +pub const unsafe fn ceilf64(x: f64) -> f64; /// Returns the smallest integer greater than or equal to an `f128`. /// /// The stabilized version of this intrinsic is /// [`f128::ceil`](../../std/primitive.f128.html#method.ceil) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn ceilf128(x: f128) -> f128; +pub const unsafe fn ceilf128(x: f128) -> f128; /// Returns the integer part of an `f16`. /// @@ -2270,28 +2298,28 @@ pub unsafe fn ceilf128(x: f128) -> f128; /// [`f16::trunc`](../../std/primitive.f16.html#method.trunc) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn truncf16(x: f16) -> f16; +pub const unsafe fn truncf16(x: f16) -> f16; /// Returns the integer part of an `f32`. /// /// The stabilized version of this intrinsic is /// [`f32::trunc`](../../std/primitive.f32.html#method.trunc) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn truncf32(x: f32) -> f32; +pub const unsafe fn truncf32(x: f32) -> f32; /// Returns the integer part of an `f64`. /// /// The stabilized version of this intrinsic is /// [`f64::trunc`](../../std/primitive.f64.html#method.trunc) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn truncf64(x: f64) -> f64; +pub const unsafe fn truncf64(x: f64) -> f64; /// Returns the integer part of an `f128`. /// /// The stabilized version of this intrinsic is /// [`f128::trunc`](../../std/primitive.f128.html#method.trunc) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn truncf128(x: f128) -> f128; +pub const unsafe fn truncf128(x: f128) -> f128; /// Returns the nearest integer to an `f16`. Rounds half-way cases to the number with an even /// least significant digit. @@ -2300,7 +2328,7 @@ pub unsafe fn truncf128(x: f128) -> f128; /// [`f16::round_ties_even`](../../std/primitive.f16.html#method.round_ties_even) #[rustc_intrinsic] #[rustc_nounwind] -pub fn round_ties_even_f16(x: f16) -> f16; +pub const fn round_ties_even_f16(x: f16) -> f16; /// Returns the nearest integer to an `f32`. Rounds half-way cases to the number with an even /// least significant digit. @@ -2309,7 +2337,7 @@ pub fn round_ties_even_f16(x: f16) -> f16; /// [`f32::round_ties_even`](../../std/primitive.f32.html#method.round_ties_even) #[rustc_intrinsic] #[rustc_nounwind] -pub fn round_ties_even_f32(x: f32) -> f32; +pub const fn round_ties_even_f32(x: f32) -> f32; /// Returns the nearest integer to an `f64`. Rounds half-way cases to the number with an even /// least significant digit. @@ -2318,7 +2346,7 @@ pub fn round_ties_even_f32(x: f32) -> f32; /// [`f64::round_ties_even`](../../std/primitive.f64.html#method.round_ties_even) #[rustc_intrinsic] #[rustc_nounwind] -pub fn round_ties_even_f64(x: f64) -> f64; +pub const fn round_ties_even_f64(x: f64) -> f64; /// Returns the nearest integer to an `f128`. Rounds half-way cases to the number with an even /// least significant digit. @@ -2327,7 +2355,7 @@ pub fn round_ties_even_f64(x: f64) -> f64; /// [`f128::round_ties_even`](../../std/primitive.f128.html#method.round_ties_even) #[rustc_intrinsic] #[rustc_nounwind] -pub fn round_ties_even_f128(x: f128) -> f128; +pub const fn round_ties_even_f128(x: f128) -> f128; /// Returns the nearest integer to an `f16`. Rounds half-way cases away from zero. /// @@ -2335,28 +2363,28 @@ pub fn round_ties_even_f128(x: f128) -> f128; /// [`f16::round`](../../std/primitive.f16.html#method.round) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn roundf16(x: f16) -> f16; +pub const unsafe fn roundf16(x: f16) -> f16; /// Returns the nearest integer to an `f32`. Rounds half-way cases away from zero. /// /// The stabilized version of this intrinsic is /// [`f32::round`](../../std/primitive.f32.html#method.round) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn roundf32(x: f32) -> f32; +pub const unsafe fn roundf32(x: f32) -> f32; /// Returns the nearest integer to an `f64`. Rounds half-way cases away from zero. /// /// The stabilized version of this intrinsic is /// [`f64::round`](../../std/primitive.f64.html#method.round) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn roundf64(x: f64) -> f64; +pub const unsafe fn roundf64(x: f64) -> f64; /// Returns the nearest integer to an `f128`. Rounds half-way cases away from zero. /// /// The stabilized version of this intrinsic is /// [`f128::round`](../../std/primitive.f128.html#method.round) #[rustc_intrinsic] #[rustc_nounwind] -pub unsafe fn roundf128(x: f128) -> f128; +pub const unsafe fn roundf128(x: f128) -> f128; /// Float addition that allows optimizations based on algebraic rules. /// May assume inputs are finite. @@ -3575,18 +3603,9 @@ pub const fn type_id<T: ?Sized + 'static>() -> u128; #[unstable(feature = "core_intrinsics", issue = "none")] #[rustc_intrinsic_const_stable_indirect] #[rustc_intrinsic] -pub const fn aggregate_raw_ptr<P: AggregateRawPtr<D, Metadata = M>, D, M>(data: D, meta: M) -> P; - -#[unstable(feature = "core_intrinsics", issue = "none")] -pub trait AggregateRawPtr<D> { - type Metadata: Copy; -} -impl<P: ?Sized, T: ptr::Thin> AggregateRawPtr<*const T> for *const P { - type Metadata = <P as ptr::Pointee>::Metadata; -} -impl<P: ?Sized, T: ptr::Thin> AggregateRawPtr<*mut T> for *mut P { - type Metadata = <P as ptr::Pointee>::Metadata; -} +pub const fn aggregate_raw_ptr<P: bounds::BuiltinDeref, D, M>(data: D, meta: M) -> P +where + <P as bounds::BuiltinDeref>::Pointee: ptr::Pointee<Metadata = M>; /// Lowers in MIR to `Rvalue::UnaryOp` with `UnOp::PtrMetadata`. /// diff --git a/library/core/src/num/f128.rs b/library/core/src/num/f128.rs index 0c2c4155d66..6b9b2ba8689 100644 --- a/library/core/src/num/f128.rs +++ b/library/core/src/num/f128.rs @@ -1447,8 +1447,10 @@ impl f128 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn floor(self) -> f128 { + pub const fn floor(self) -> f128 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::floorf128(self) } } @@ -1477,8 +1479,10 @@ impl f128 { #[doc(alias = "ceiling")] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn ceil(self) -> f128 { + pub const fn ceil(self) -> f128 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::ceilf128(self) } } @@ -1513,8 +1517,10 @@ impl f128 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round(self) -> f128 { + pub const fn round(self) -> f128 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::roundf128(self) } } @@ -1547,8 +1553,10 @@ impl f128 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round_ties_even(self) -> f128 { + pub const fn round_ties_even(self) -> f128 { intrinsics::round_ties_even_f128(self) } @@ -1579,8 +1587,10 @@ impl f128 { #[doc(alias = "truncate")] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn trunc(self) -> f128 { + pub const fn trunc(self) -> f128 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::truncf128(self) } } @@ -1610,8 +1620,10 @@ impl f128 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f128", issue = "116909")] + #[rustc_const_unstable(feature = "f128", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn fract(self) -> f128 { + pub const fn fract(self) -> f128 { self - self.trunc() } diff --git a/library/core/src/num/f16.rs b/library/core/src/num/f16.rs index 1a859f2277f..eb7af993c60 100644 --- a/library/core/src/num/f16.rs +++ b/library/core/src/num/f16.rs @@ -1423,8 +1423,10 @@ impl f16 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn floor(self) -> f16 { + pub const fn floor(self) -> f16 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::floorf16(self) } } @@ -1453,8 +1455,10 @@ impl f16 { #[doc(alias = "ceiling")] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn ceil(self) -> f16 { + pub const fn ceil(self) -> f16 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::ceilf16(self) } } @@ -1489,8 +1493,10 @@ impl f16 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round(self) -> f16 { + pub const fn round(self) -> f16 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::roundf16(self) } } @@ -1523,8 +1529,10 @@ impl f16 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round_ties_even(self) -> f16 { + pub const fn round_ties_even(self) -> f16 { intrinsics::round_ties_even_f16(self) } @@ -1555,8 +1563,10 @@ impl f16 { #[doc(alias = "truncate")] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn trunc(self) -> f16 { + pub const fn trunc(self) -> f16 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::truncf16(self) } } @@ -1586,8 +1596,10 @@ impl f16 { #[inline] #[rustc_allow_incoherent_impl] #[unstable(feature = "f16", issue = "116909")] + #[rustc_const_unstable(feature = "f16", issue = "116909")] + // #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn fract(self) -> f16 { + pub const fn fract(self) -> f16 { self - self.trunc() } diff --git a/library/core/src/num/f32.rs b/library/core/src/num/f32.rs index 6636054a659..bf923d4070a 100644 --- a/library/core/src/num/f32.rs +++ b/library/core/src/num/f32.rs @@ -1591,8 +1591,9 @@ pub mod math { /// [`f32::floor`]: ../../../std/primitive.f32.html#method.floor #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn floor(x: f32) -> f32 { + pub const fn floor(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::floorf32(x) } } @@ -1621,7 +1622,8 @@ pub mod math { #[doc(alias = "ceiling")] #[must_use = "method returns a new number and does not mutate the original value"] #[unstable(feature = "core_float_math", issue = "137578")] - pub fn ceil(x: f32) -> f32 { + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + pub const fn ceil(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::ceilf32(x) } } @@ -1655,7 +1657,8 @@ pub mod math { #[inline] #[unstable(feature = "core_float_math", issue = "137578")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round(x: f32) -> f32 { + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + pub const fn round(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::roundf32(x) } } @@ -1688,7 +1691,8 @@ pub mod math { #[inline] #[unstable(feature = "core_float_math", issue = "137578")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round_ties_even(x: f32) -> f32 { + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + pub const fn round_ties_even(x: f32) -> f32 { intrinsics::round_ties_even_f32(x) } @@ -1718,7 +1722,8 @@ pub mod math { #[doc(alias = "truncate")] #[must_use = "method returns a new number and does not mutate the original value"] #[unstable(feature = "core_float_math", issue = "137578")] - pub fn trunc(x: f32) -> f32 { + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] + pub const fn trunc(x: f32) -> f32 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::truncf32(x) } } @@ -1747,8 +1752,9 @@ pub mod math { /// [`f32::fract`]: ../../../std/primitive.f32.html#method.fract #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn fract(x: f32) -> f32 { + pub const fn fract(x: f32) -> f32 { x - trunc(x) } diff --git a/library/core/src/num/f64.rs b/library/core/src/num/f64.rs index 8fbf2cffbaf..0a63ed828ae 100644 --- a/library/core/src/num/f64.rs +++ b/library/core/src/num/f64.rs @@ -1589,8 +1589,9 @@ pub mod math { /// [`f64::floor`]: ../../../std/primitive.f64.html#method.floor #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn floor(x: f64) -> f64 { + pub const fn floor(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::floorf64(x) } } @@ -1618,8 +1619,9 @@ pub mod math { #[inline] #[doc(alias = "ceiling")] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn ceil(x: f64) -> f64 { + pub const fn ceil(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::ceilf64(x) } } @@ -1652,8 +1654,9 @@ pub mod math { /// [`f64::round`]: ../../../std/primitive.f64.html#method.round #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round(x: f64) -> f64 { + pub const fn round(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::roundf64(x) } } @@ -1685,8 +1688,9 @@ pub mod math { /// [`f64::round_ties_even`]: ../../../std/primitive.f64.html#method.round_ties_even #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn round_ties_even(x: f64) -> f64 { + pub const fn round_ties_even(x: f64) -> f64 { intrinsics::round_ties_even_f64(x) } @@ -1715,8 +1719,9 @@ pub mod math { #[inline] #[doc(alias = "truncate")] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn trunc(x: f64) -> f64 { + pub const fn trunc(x: f64) -> f64 { // SAFETY: intrinsic with no preconditions unsafe { intrinsics::truncf64(x) } } @@ -1745,8 +1750,9 @@ pub mod math { /// [`f64::fract`]: ../../../std/primitive.f64.html#method.fract #[inline] #[unstable(feature = "core_float_math", issue = "137578")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[must_use = "method returns a new number and does not mutate the original value"] - pub fn fract(x: f64) -> f64 { + pub const fn fract(x: f64) -> f64 { x - trunc(x) } diff --git a/library/core/src/num/int_macros.rs b/library/core/src/num/int_macros.rs index f320a194271..65560f63c18 100644 --- a/library/core/src/num/int_macros.rs +++ b/library/core/src/num/int_macros.rs @@ -4,7 +4,7 @@ macro_rules! int_impl { ActualT = $ActualT:ident, UnsignedT = $UnsignedT:ty, - // There are all for use *only* in doc comments. + // These are all for use *only* in doc comments. // As such, they're all passed as literals -- passing them as a string // literal is fine if they need to be multiple code tokens. // In non-comments, use the associated constants rather than these. @@ -1018,6 +1018,110 @@ macro_rules! int_impl { if b { overflow_panic::div() } else { a } } + /// Checked integer division without remainder. Computes `self / rhs`, + /// returning `None` if `rhs == 0`, the division results in overflow, + /// or `self % rhs != 0`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_div)] + #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).checked_exact_div(-1), Some(", stringify!($Max), "));")] + #[doc = concat!("assert_eq!((-5", stringify!($SelfT), ").checked_exact_div(2), None);")] + #[doc = concat!("assert_eq!(", stringify!($SelfT), "::MIN.checked_exact_div(-1), None);")] + #[doc = concat!("assert_eq!((1", stringify!($SelfT), ").checked_exact_div(0), None);")] + /// ``` + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn checked_exact_div(self, rhs: Self) -> Option<Self> { + if intrinsics::unlikely(rhs == 0 || ((self == Self::MIN) && (rhs == -1))) { + None + } else { + // SAFETY: division by zero and overflow are checked above + unsafe { + if intrinsics::unlikely(intrinsics::unchecked_rem(self, rhs) != 0) { + None + } else { + Some(intrinsics::exact_div(self, rhs)) + } + } + } + } + + /// Checked integer division without remainder. Computes `self / rhs`. + /// + /// # Panics + /// + /// This function will panic if `rhs == 0`, the division results in overflow, + /// or `self % rhs != 0`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_div)] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(2), 32);")] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(32), 2);")] + #[doc = concat!("assert_eq!((", stringify!($SelfT), "::MIN + 1).exact_div(-1), ", stringify!($Max), ");")] + /// ``` + /// + /// ```should_panic + /// #![feature(exact_div)] + #[doc = concat!("let _ = 65", stringify!($SelfT), ".exact_div(2);")] + /// ``` + /// ```should_panic + /// #![feature(exact_div)] + #[doc = concat!("let _ = ", stringify!($SelfT), "::MIN.exact_div(-1);")] + /// ``` + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn exact_div(self, rhs: Self) -> Self { + match self.checked_exact_div(rhs) { + Some(x) => x, + None => panic!("Failed to divide without remainder"), + } + } + + /// Unchecked integer division without remainder. Computes `self / rhs`. + /// + /// # Safety + /// + /// This results in undefined behavior when `rhs == 0`, `self % rhs != 0`, or + #[doc = concat!("`self == ", stringify!($SelfT), "::MIN && rhs == -1`,")] + /// i.e. when [`checked_exact_div`](Self::checked_exact_div) would return `None`. + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const unsafe fn unchecked_exact_div(self, rhs: Self) -> Self { + assert_unsafe_precondition!( + check_language_ub, + concat!(stringify!($SelfT), "::unchecked_exact_div cannot overflow, divide by zero, or leave a remainder"), + ( + lhs: $SelfT = self, + rhs: $SelfT = rhs, + ) => rhs > 0 && lhs % rhs == 0 && (lhs != <$SelfT>::MIN || rhs != -1), + ); + // SAFETY: Same precondition + unsafe { intrinsics::exact_div(self, rhs) } + } + /// Checked integer remainder. Computes `self % rhs`, returning `None` if /// `rhs == 0` or the division results in overflow. /// diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index 10597854ff8..5f82e6af86b 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs @@ -1110,6 +1110,108 @@ macro_rules! uint_impl { self / rhs } + /// Checked integer division without remainder. Computes `self / rhs`. + /// + /// # Panics + /// + /// This function will panic if `rhs == 0` or `self % rhs != 0`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_div)] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(2), 32);")] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(32), 2);")] + /// ``` + /// + /// ```should_panic + /// #![feature(exact_div)] + #[doc = concat!("let _ = 65", stringify!($SelfT), ".exact_div(2);")] + /// ``` + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn checked_exact_div(self, rhs: Self) -> Option<Self> { + if intrinsics::unlikely(rhs == 0) { + None + } else { + // SAFETY: division by zero is checked above + unsafe { + if intrinsics::unlikely(intrinsics::unchecked_rem(self, rhs) != 0) { + None + } else { + Some(intrinsics::exact_div(self, rhs)) + } + } + } + } + + /// Checked integer division without remainder. Computes `self / rhs`. + /// + /// # Panics + /// + /// This function will panic if `rhs == 0` or `self % rhs != 0`. + /// + /// # Examples + /// + /// Basic usage: + /// + /// ``` + /// #![feature(exact_div)] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(2), 32);")] + #[doc = concat!("assert_eq!(64", stringify!($SelfT), ".exact_div(32), 2);")] + /// ``` + /// + /// ```should_panic + /// #![feature(exact_div)] + #[doc = concat!("let _ = 65", stringify!($SelfT), ".exact_div(2);")] + /// ``` + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const fn exact_div(self, rhs: Self) -> Self { + match self.checked_exact_div(rhs) { + Some(x) => x, + None => panic!("Failed to divide without remainder"), + } + } + + /// Unchecked integer division without remainder. Computes `self / rhs`. + /// + /// # Safety + /// + /// This results in undefined behavior when `rhs == 0` or `self % rhs != 0`, + /// i.e. when [`checked_exact_div`](Self::checked_exact_div) would return `None`. + #[unstable( + feature = "exact_div", + issue = "139911", + )] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline] + pub const unsafe fn unchecked_exact_div(self, rhs: Self) -> Self { + assert_unsafe_precondition!( + check_language_ub, + concat!(stringify!($SelfT), "::unchecked_exact_div divide by zero or leave a remainder"), + ( + lhs: $SelfT = self, + rhs: $SelfT = rhs, + ) => rhs > 0 && lhs % rhs == 0, + ); + // SAFETY: Same precondition + unsafe { intrinsics::exact_div(self, rhs) } + } + /// Checked integer remainder. Computes `self % rhs`, returning `None` /// if `rhs == 0`. /// diff --git a/library/core/src/primitive_docs.rs b/library/core/src/primitive_docs.rs index 17c4b488361..10b11613f90 100644 --- a/library/core/src/primitive_docs.rs +++ b/library/core/src/primitive_docs.rs @@ -1623,7 +1623,7 @@ mod prim_usize {} /// * if `size_of_val(t) > 0`, then `t` is dereferenceable for `size_of_val(t)` many bytes /// /// If `t` points at address `a`, being "dereferenceable" for N bytes means that the memory range -/// `[a, a + N)` is all contained within a single [allocated object]. +/// `[a, a + N)` is all contained within a single [allocation]. /// /// For instance, this means that unsafe code in a safe function may assume these invariants are /// ensured of arguments passed by the caller, and it may assume that these invariants are ensured @@ -1639,7 +1639,7 @@ mod prim_usize {} /// may be unsound or become unsound in future versions of Rust depending on how this question is /// decided. /// -/// [allocated object]: ptr#allocated-object +/// [allocation]: ptr#allocation #[stable(feature = "rust1", since = "1.0.0")] mod prim_ref {} diff --git a/library/core/src/ptr/const_ptr.rs b/library/core/src/ptr/const_ptr.rs index 31267bfb1f9..a1dab23ea7b 100644 --- a/library/core/src/ptr/const_ptr.rs +++ b/library/core/src/ptr/const_ptr.rs @@ -397,35 +397,7 @@ impl<T: ?Sized> *const T { if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) } } - /// Adds a signed offset to a pointer. - /// - /// `count` is in units of T; e.g., a `count` of 3 represents a pointer - /// offset of `3 * size_of::<T>()` bytes. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is Undefined Behavior: - /// - /// * The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without - /// "wrapping around"), must fit in an `isize`. - /// - /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge - /// of the address space. Note that "range" here refers to a half-open range as usual in Rust, - /// i.e., `self..result` for non-negative offsets and `result..self` for negative offsets. - /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. - /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always - /// safe. - /// - /// Consider using [`wrapping_offset`] instead if these constraints are - /// difficult to satisfy. The only advantage of this method is that it - /// enables more aggressive compiler optimizations. - /// - /// [`wrapping_offset`]: #method.wrapping_offset - /// [allocated object]: crate::ptr#allocated-object + #[doc = include_str!("./docs/offset.md")] /// /// # Examples /// @@ -510,17 +482,17 @@ impl<T: ?Sized> *const T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to + /// The resulting pointer "remembers" the [allocation] that `self` points to /// (this is called "[Provenance](ptr/index.html#provenance)"). - /// The pointer must not be used to read or write other allocated objects. + /// The pointer must not be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`offset`], this method basically delays the requirement of staying within the - /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object + /// same allocation: [`offset`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -528,10 +500,10 @@ impl<T: ?Sized> *const T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other - /// words, leaving the allocated object and then re-entering it later is permitted. + /// words, leaving the allocation and then re-entering it later is permitted. /// /// [`offset`]: #method.offset - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -644,7 +616,7 @@ impl<T: ?Sized> *const T { /// * `self` and `origin` must either /// /// * point to the same address, or - /// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocated object], and the memory range between + /// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocation], and the memory range between /// the two pointers must be in bounds of that object. (See below for an example.) /// /// * The distance between the pointers, in bytes, must be an exact multiple @@ -652,10 +624,10 @@ impl<T: ?Sized> *const T { /// /// As a consequence, the absolute distance between the pointers, in bytes, computed on /// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is - /// implied by the in-bounds requirement, and the fact that no allocated object can be larger + /// implied by the in-bounds requirement, and the fact that no allocation can be larger /// than `isize::MAX` bytes. /// - /// The requirement for pointers to be derived from the same allocated object is primarily + /// The requirement for pointers to be derived from the same allocation is primarily /// needed for `const`-compatibility: the distance between pointers into *different* allocated /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between @@ -664,7 +636,7 @@ impl<T: ?Sized> *const T { // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics /// @@ -905,38 +877,7 @@ impl<T: ?Sized> *const T { } } - /// Adds an unsigned offset to a pointer. - /// - /// This can only move the pointer forward (or not move it). If you need to move forward or - /// backward depending on the value, then you might want [`offset`](#method.offset) instead - /// which takes a signed offset. - /// - /// `count` is in units of T; e.g., a `count` of 3 represents a pointer - /// offset of `3 * size_of::<T>()` bytes. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is Undefined Behavior: - /// - /// * The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without - /// "wrapping around"), must fit in an `isize`. - /// - /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge - /// of the address space. - /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. - /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always - /// safe. - /// - /// Consider using [`wrapping_add`] instead if these constraints are - /// difficult to satisfy. The only advantage of this method is that it - /// enables more aggressive compiler optimizations. - /// - /// [`wrapping_add`]: #method.wrapping_add - /// [allocated object]: crate::ptr#allocated-object + #[doc = include_str!("./docs/add.md")] /// /// # Examples /// @@ -1028,12 +969,12 @@ impl<T: ?Sized> *const T { /// "wrapping around"), must fit in an `isize`. /// /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge + /// [allocation], and the entire memory range between `self` and the result must be in + /// bounds of that allocation. In particular, this range must not "wrap around" the edge /// of the address space. /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. + /// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset + /// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always /// safe. /// @@ -1042,7 +983,7 @@ impl<T: ?Sized> *const T { /// enables more aggressive compiler optimizations. /// /// [`wrapping_sub`]: #method.wrapping_sub - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1132,16 +1073,16 @@ impl<T: ?Sized> *const T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not - /// be used to read or write other allocated objects. + /// The resulting pointer "remembers" the [allocation] that `self` points to; it must not + /// be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`add`], this method basically delays the requirement of staying within the - /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object + /// same allocation: [`add`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -1149,10 +1090,10 @@ impl<T: ?Sized> *const T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the - /// allocated object and then re-entering it later is permitted. + /// allocation and then re-entering it later is permitted. /// /// [`add`]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1211,16 +1152,16 @@ impl<T: ?Sized> *const T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not - /// be used to read or write other allocated objects. + /// The resulting pointer "remembers" the [allocation] that `self` points to; it must not + /// be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`sub`], this method basically delays the requirement of staying within the - /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object + /// same allocation: [`sub`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -1228,10 +1169,10 @@ impl<T: ?Sized> *const T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the - /// allocated object and then re-entering it later is permitted. + /// allocation and then re-entering it later is permitted. /// /// [`sub`]: #method.sub - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1623,8 +1564,8 @@ impl<T> *const [T] { /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes, /// and it must be properly aligned. This means in particular: /// - /// * The entire memory range of this slice must be contained within a single [allocated object]! - /// Slices can never span across multiple allocated objects. + /// * The entire memory range of this slice must be contained within a single [allocation]! + /// Slices can never span across multiple allocations. /// /// * The pointer must be aligned even for zero-length slices. One /// reason for this is that enum layout optimizations may rely on references @@ -1645,7 +1586,7 @@ impl<T> *const [T] { /// See also [`slice::from_raw_parts`][]. /// /// [valid]: crate::ptr#safety - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics during const evaluation /// diff --git a/library/core/src/ptr/docs/add.md b/library/core/src/ptr/docs/add.md new file mode 100644 index 00000000000..ae7c7785684 --- /dev/null +++ b/library/core/src/ptr/docs/add.md @@ -0,0 +1,32 @@ +Adds an unsigned offset to a pointer. + +This can only move the pointer forward (or not move it). If you need to move forward or +backward depending on the value, then you might want [`offset`](#method.offset) instead +which takes a signed offset. + +`count` is in units of T; e.g., a `count` of 3 represents a pointer +offset of `3 * size_of::<T>()` bytes. + +# Safety + +If any of the following conditions are violated, the result is Undefined Behavior: + +* The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without +"wrapping around"), must fit in an `isize`. + +* If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some +[allocation], and the entire memory range between `self` and the result must be in +bounds of that allocation. In particular, this range must not "wrap around" the edge +of the address space. + +Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset +stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. +This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always +safe. + +Consider using [`wrapping_add`] instead if these constraints are +difficult to satisfy. The only advantage of this method is that it +enables more aggressive compiler optimizations. + +[`wrapping_add`]: #method.wrapping_add +[allocation]: crate::ptr#allocation diff --git a/library/core/src/ptr/docs/offset.md b/library/core/src/ptr/docs/offset.md new file mode 100644 index 00000000000..f2e335a79a5 --- /dev/null +++ b/library/core/src/ptr/docs/offset.md @@ -0,0 +1,29 @@ +Adds a signed offset to a pointer. + +`count` is in units of T; e.g., a `count` of 3 represents a pointer +offset of `3 * size_of::<T>()` bytes. + +# Safety + +If any of the following conditions are violated, the result is Undefined Behavior: + +* The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without +"wrapping around"), must fit in an `isize`. + +* If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some +[allocation], and the entire memory range between `self` and the result must be in +bounds of that allocation. In particular, this range must not "wrap around" the edge +of the address space. Note that "range" here refers to a half-open range as usual in Rust, +i.e., `self..result` for non-negative offsets and `result..self` for negative offsets. + +Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset +stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. +This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always +safe. + +Consider using [`wrapping_offset`] instead if these constraints are +difficult to satisfy. The only advantage of this method is that it +enables more aggressive compiler optimizations. + +[`wrapping_offset`]: #method.wrapping_offset +[allocation]: crate::ptr#allocation diff --git a/library/core/src/ptr/mod.rs b/library/core/src/ptr/mod.rs index 99c4211cea8..81bf6778b05 100644 --- a/library/core/src/ptr/mod.rs +++ b/library/core/src/ptr/mod.rs @@ -19,10 +19,10 @@ //! pointer. The following points are only concerned with non-zero-sized accesses. //! * A [null] pointer is *never* valid. //! * For a pointer to be valid, it is necessary, but not always sufficient, that the pointer be -//! *dereferenceable*. The [provenance] of the pointer is used to determine which [allocated -//! object] it is derived from; a pointer is dereferenceable if the memory range of the given size -//! starting at the pointer is entirely contained within the bounds of that allocated object. Note -//! that in Rust, every (stack-allocated) variable is considered a separate allocated object. +//! *dereferenceable*. The [provenance] of the pointer is used to determine which [allocation] +//! it is derived from; a pointer is dereferenceable if the memory range of the given size +//! starting at the pointer is entirely contained within the bounds of that allocation. Note +//! that in Rust, every (stack-allocated) variable is considered a separate allocation. //! * All accesses performed by functions in this module are *non-atomic* in the sense //! of [atomic operations] used to synchronize between threads. This means it is //! undefined behavior to perform two concurrent accesses to the same location from different @@ -30,7 +30,7 @@ //! includes [`read_volatile`] and [`write_volatile`]: Volatile accesses cannot //! be used for inter-thread synchronization. //! * The result of casting a reference to a pointer is valid for as long as the -//! underlying object is live and no reference (just raw pointers) is used to +//! underlying allocation is live and no reference (just raw pointers) is used to //! access the same memory. That is, reference and pointer accesses cannot be //! interleaved. //! @@ -95,24 +95,26 @@ //! //! [valid value]: ../../reference/behavior-considered-undefined.html#invalid-values //! -//! ## Allocated object +//! ## Allocation //! -//! An *allocated object* is a subset of program memory which is addressable +//! <a id="allocated-object"></a> <!-- keep old URLs working --> +//! +//! An *allocation* is a subset of program memory which is addressable //! from Rust, and within which pointer arithmetic is possible. Examples of -//! allocated objects include heap allocations, stack-allocated variables, +//! allocations include heap allocations, stack-allocated variables, //! statics, and consts. The safety preconditions of some Rust operations - //! such as `offset` and field projections (`expr.field`) - are defined in -//! terms of the allocated objects on which they operate. +//! terms of the allocations on which they operate. //! -//! An allocated object has a base address, a size, and a set of memory -//! addresses. It is possible for an allocated object to have zero size, but -//! such an allocated object will still have a base address. The base address -//! of an allocated object is not necessarily unique. While it is currently the -//! case that an allocated object always has a set of memory addresses which is +//! An allocation has a base address, a size, and a set of memory +//! addresses. It is possible for an allocation to have zero size, but +//! such an allocation will still have a base address. The base address +//! of an allocation is not necessarily unique. While it is currently the +//! case that an allocation always has a set of memory addresses which is //! fully contiguous (i.e., has no "holes"), there is no guarantee that this //! will not change in the future. //! -//! For any allocated object with `base` address, `size`, and a set of +//! For any allocation with `base` address, `size`, and a set of //! `addresses`, the following are guaranteed: //! - For all addresses `a` in `addresses`, `a` is in the range `base .. (base + //! size)` (note that this requires `a < base + size`, not `a <= base + size`) @@ -122,11 +124,11 @@ //! - `size <= isize::MAX` //! //! As a consequence of these guarantees, given any address `a` within the set -//! of addresses of an allocated object: +//! of addresses of an allocation: //! - It is guaranteed that `a - base` does not overflow `isize` //! - It is guaranteed that `a - base` is non-negative //! - It is guaranteed that, given `o = a - base` (i.e., the offset of `a` within -//! the allocated object), `base + o` will not wrap around the address space (in +//! the allocation), `base + o` will not wrap around the address space (in //! other words, will not overflow `usize`) //! //! [`null()`]: null @@ -138,8 +140,8 @@ //! and the freed memory gets reallocated before your read/write (in fact this is the //! worst-case scenario, UAFs would be much less concerning if this didn't happen!). //! As another example, consider that [`wrapping_offset`] is documented to "remember" -//! the allocated object that the original pointer points to, even if it is offset far -//! outside the memory range occupied by that allocated object. +//! the allocation that the original pointer points to, even if it is offset far +//! outside the memory range occupied by that allocation. //! To rationalize claims like this, pointers need to somehow be *more* than just their addresses: //! they must have **provenance**. //! @@ -159,12 +161,12 @@ //! writes. Note that this can interact with the other components, e.g. a pointer might permit //! mutation only for a subset of addresses, or only for a subset of its maximal timespan. //! -//! When an [allocated object] is created, it has a unique Original Pointer. For alloc +//! When an [allocation] is created, it has a unique Original Pointer. For alloc //! APIs this is literally the pointer the call returns, and for local variables and statics, //! this is the name of the variable/static. (This is mildly overloading the term "pointer" //! for the sake of brevity/exposition.) //! -//! The Original Pointer for an allocated object has provenance that constrains the *spatial* +//! The Original Pointer for an allocation has provenance that constrains the *spatial* //! permissions of this pointer to the memory range of the allocation, and the *temporal* //! permissions to the lifetime of the allocation. Provenance is implicitly inherited by all //! pointers transitively derived from the Original Pointer through operations like [`offset`], @@ -192,10 +194,10 @@ //! provenance since they access an empty range of memory. //! //! * It is undefined behavior to [`offset`] a pointer across a memory range that is not contained -//! in the allocated object it is derived from, or to [`offset_from`] two pointers not derived -//! from the same allocated object. Provenance is used to say what exactly "derived from" even +//! in the allocation it is derived from, or to [`offset_from`] two pointers not derived +//! from the same allocation. Provenance is used to say what exactly "derived from" even //! means: the lineage of a pointer is traced back to the Original Pointer it descends from, and -//! that identifies the relevant allocated object. In particular, it's always UB to offset a +//! that identifies the relevant allocation. In particular, it's always UB to offset a //! pointer derived from something that is now deallocated, except if the offset is 0. //! //! But it *is* still sound to: @@ -216,7 +218,7 @@ //! * Compare arbitrary pointers by address. Pointer comparison ignores provenance and addresses //! *are* just integers, so there is always a coherent answer, even if the pointers are dangling //! or from different provenances. Note that if you get "lucky" and notice that a pointer at the -//! end of one allocated object is the "same" address as the start of another allocated object, +//! end of one allocation is the "same" address as the start of another allocation, //! anything you do with that fact is *probably* going to be gibberish. The scope of that //! gibberish is kept under control by the fact that the two pointers *still* aren't allowed to //! access the other's allocation (bytes), because they still have different provenance. @@ -369,7 +371,7 @@ //! integer-to-pointer casts. //! //! [aliasing]: ../../nomicon/aliasing.html -//! [allocated object]: #allocated-object +//! [allocation]: #allocation //! [provenance]: #provenance //! [book]: ../../book/ch19-01-unsafe-rust.html#dereferencing-a-raw-pointer //! [ub]: ../../reference/behavior-considered-undefined.html @@ -1289,7 +1291,7 @@ pub const unsafe fn swap<T>(x: *mut T, y: *mut T) { // SAFETY: the caller must guarantee that `x` and `y` are // valid for writes and properly aligned. `tmp` cannot be // overlapping either `x` or `y` because `tmp` was just allocated - // on the stack as a separate allocated object. + // on the stack as a separate allocation. unsafe { copy_nonoverlapping(x, tmp.as_mut_ptr(), 1); copy(y, x, 1); // `x` and `y` may overlap @@ -1409,7 +1411,7 @@ pub const unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) { // Going though a slice here helps codegen know the size fits in `isize` let slice = slice_from_raw_parts_mut(x, count); // SAFETY: This is all readable from the pointer, meaning it's one - // allocated object, and thus cannot be more than isize::MAX bytes. + // allocation, and thus cannot be more than isize::MAX bytes. let bytes = unsafe { mem::size_of_val_raw::<[T]>(slice) }; if let Some(bytes) = NonZero::new(bytes) { // SAFETY: These are the same ranges, just expressed in a different @@ -1563,7 +1565,7 @@ pub const unsafe fn replace<T>(dst: *mut T, src: T) -> T { // SAFETY: the caller must guarantee that `dst` is valid to be // cast to a mutable reference (valid for writes, aligned, initialized), // and cannot overlap `src` since `dst` must point to a distinct - // allocated object. + // allocation. unsafe { ub_checks::assert_unsafe_precondition!( check_language_ub, @@ -1810,7 +1812,7 @@ pub const unsafe fn read_unaligned<T>(src: *const T) -> T { let mut tmp = MaybeUninit::<T>::uninit(); // SAFETY: the caller must guarantee that `src` is valid for reads. // `src` cannot overlap `tmp` because `tmp` was just allocated on - // the stack as a separate allocated object. + // the stack as a separate allocation. // // Also, since we just wrote a valid value into `tmp`, it is guaranteed // to be properly initialized. diff --git a/library/core/src/ptr/mut_ptr.rs b/library/core/src/ptr/mut_ptr.rs index b1b3379d741..968f033bf59 100644 --- a/library/core/src/ptr/mut_ptr.rs +++ b/library/core/src/ptr/mut_ptr.rs @@ -394,34 +394,7 @@ impl<T: ?Sized> *mut T { if self.is_null() { None } else { Some(unsafe { &*(self as *const MaybeUninit<T>) }) } } - /// Adds a signed offset to a pointer. - /// - /// `count` is in units of T; e.g., a `count` of 3 represents a pointer - /// offset of `3 * size_of::<T>()` bytes. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is Undefined Behavior: - /// - /// * The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without - /// "wrapping around"), must fit in an `isize`. - /// - /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge - /// of the address space. - /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. - /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always - /// safe. - /// - /// Consider using [`wrapping_offset`] instead if these constraints are - /// difficult to satisfy. The only advantage of this method is that it - /// enables more aggressive compiler optimizations. - /// - /// [`wrapping_offset`]: #method.wrapping_offset - /// [allocated object]: crate::ptr#allocated-object + #[doc = include_str!("./docs/offset.md")] /// /// # Examples /// @@ -475,7 +448,7 @@ impl<T: ?Sized> *mut T { // SAFETY: the caller must uphold the safety contract for `offset`. // The obtained pointer is valid for writes since the caller must - // guarantee that it points to the same allocated object as `self`. + // guarantee that it points to the same allocation as `self`. unsafe { intrinsics::offset(self, count) } } @@ -508,17 +481,17 @@ impl<T: ?Sized> *mut T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to + /// The resulting pointer "remembers" the [allocation] that `self` points to /// (this is called "[Provenance](ptr/index.html#provenance)"). - /// The pointer must not be used to read or write other allocated objects. + /// The pointer must not be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_offset((y as isize) - (x as isize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`offset`], this method basically delays the requirement of staying within the - /// same allocated object: [`offset`] is immediate Undefined Behavior when crossing object + /// same allocation: [`offset`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_offset` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`offset`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -526,10 +499,10 @@ impl<T: ?Sized> *mut T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_offset(o).wrapping_offset(o.wrapping_neg())` is always the same as `x`. In other - /// words, leaving the allocated object and then re-entering it later is permitted. + /// words, leaving the allocation and then re-entering it later is permitted. /// /// [`offset`]: #method.offset - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -818,7 +791,7 @@ impl<T: ?Sized> *mut T { /// * `self` and `origin` must either /// /// * point to the same address, or - /// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocated object], and the memory range between + /// * both be [derived from][crate::ptr#provenance] a pointer to the same [allocation], and the memory range between /// the two pointers must be in bounds of that object. (See below for an example.) /// /// * The distance between the pointers, in bytes, must be an exact multiple @@ -826,10 +799,10 @@ impl<T: ?Sized> *mut T { /// /// As a consequence, the absolute distance between the pointers, in bytes, computed on /// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is - /// implied by the in-bounds requirement, and the fact that no allocated object can be larger + /// implied by the in-bounds requirement, and the fact that no allocation can be larger /// than `isize::MAX` bytes. /// - /// The requirement for pointers to be derived from the same allocated object is primarily + /// The requirement for pointers to be derived from the same allocation is primarily /// needed for `const`-compatibility: the distance between pointers into *different* allocated /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between @@ -838,7 +811,7 @@ impl<T: ?Sized> *mut T { // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics /// @@ -996,44 +969,13 @@ impl<T: ?Sized> *mut T { unsafe { (self as *const T).byte_offset_from_unsigned(origin) } } - /// Adds an unsigned offset to a pointer. - /// - /// This can only move the pointer forward (or not move it). If you need to move forward or - /// backward depending on the value, then you might want [`offset`](#method.offset) instead - /// which takes a signed offset. - /// - /// `count` is in units of T; e.g., a `count` of 3 represents a pointer - /// offset of `3 * size_of::<T>()` bytes. - /// - /// # Safety - /// - /// If any of the following conditions are violated, the result is Undefined Behavior: - /// - /// * The offset in bytes, `count * size_of::<T>()`, computed on mathematical integers (without - /// "wrapping around"), must fit in an `isize`. - /// - /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge - /// of the address space. - /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. - /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always - /// safe. - /// - /// Consider using [`wrapping_add`] instead if these constraints are - /// difficult to satisfy. The only advantage of this method is that it - /// enables more aggressive compiler optimizations. - /// - /// [`wrapping_add`]: #method.wrapping_add - /// [allocated object]: crate::ptr#allocated-object + #[doc = include_str!("./docs/add.md")] /// /// # Examples /// /// ``` - /// let s: &str = "123"; - /// let ptr: *const u8 = s.as_ptr(); + /// let mut s: String = "123".to_string(); + /// let ptr: *mut u8 = s.as_mut_ptr(); /// /// unsafe { /// assert_eq!('2', *ptr.add(1) as char); @@ -1119,12 +1061,12 @@ impl<T: ?Sized> *mut T { /// "wrapping around"), must fit in an `isize`. /// /// * If the computed offset is non-zero, then `self` must be [derived from][crate::ptr#provenance] a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge + /// [allocation], and the entire memory range between `self` and the result must be in + /// bounds of that allocation. In particular, this range must not "wrap around" the edge /// of the address space. /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. + /// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset + /// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always /// safe. /// @@ -1133,7 +1075,7 @@ impl<T: ?Sized> *mut T { /// enables more aggressive compiler optimizations. /// /// [`wrapping_sub`]: #method.wrapping_sub - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1223,16 +1165,16 @@ impl<T: ?Sized> *mut T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not - /// be used to read or write other allocated objects. + /// The resulting pointer "remembers" the [allocation] that `self` points to; it must not + /// be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_add((y as usize) - (x as usize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`add`], this method basically delays the requirement of staying within the - /// same allocated object: [`add`] is immediate Undefined Behavior when crossing object + /// same allocation: [`add`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_add` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`add`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -1240,10 +1182,10 @@ impl<T: ?Sized> *mut T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the - /// allocated object and then re-entering it later is permitted. + /// allocation and then re-entering it later is permitted. /// /// [`add`]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1299,16 +1241,16 @@ impl<T: ?Sized> *mut T { /// /// This operation itself is always safe, but using the resulting pointer is not. /// - /// The resulting pointer "remembers" the [allocated object] that `self` points to; it must not - /// be used to read or write other allocated objects. + /// The resulting pointer "remembers" the [allocation] that `self` points to; it must not + /// be used to read or write other allocations. /// /// In other words, `let z = x.wrapping_sub((x as usize) - (y as usize))` does *not* make `z` /// the same as `y` even if we assume `T` has size `1` and there is no overflow: `z` is still /// attached to the object `x` is attached to, and dereferencing it is Undefined Behavior unless - /// `x` and `y` point into the same allocated object. + /// `x` and `y` point into the same allocation. /// /// Compared to [`sub`], this method basically delays the requirement of staying within the - /// same allocated object: [`sub`] is immediate Undefined Behavior when crossing object + /// same allocation: [`sub`] is immediate Undefined Behavior when crossing object /// boundaries; `wrapping_sub` produces a pointer but still leads to Undefined Behavior if a /// pointer is dereferenced when it is out-of-bounds of the object it is attached to. [`sub`] /// can be optimized better and is thus preferable in performance-sensitive code. @@ -1316,10 +1258,10 @@ impl<T: ?Sized> *mut T { /// The delayed check only considers the value of the pointer that was dereferenced, not the /// intermediate values used during the computation of the final result. For example, /// `x.wrapping_add(o).wrapping_sub(o)` is always the same as `x`. In other words, leaving the - /// allocated object and then re-entering it later is permitted. + /// allocation and then re-entering it later is permitted. /// /// [`sub`]: #method.sub - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -1828,7 +1770,7 @@ impl<T> *mut [T] { /// /// # Safety /// - /// `mid` must be [in-bounds] of the underlying [allocated object]. + /// `mid` must be [in-bounds] of the underlying [allocation]. /// Which means `self` must be dereferenceable and span a single allocation /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these /// requirements is *[undefined behavior]* even if the resulting pointers are not used. @@ -1839,7 +1781,7 @@ impl<T> *mut [T] { /// /// [`split_at_mut_unchecked`]: #method.split_at_mut_unchecked /// [in-bounds]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html /// /// # Examples @@ -1874,13 +1816,14 @@ impl<T> *mut [T] { /// /// # Safety /// - /// `mid` must be [in-bounds] of the underlying [allocated object]. + /// `mid` must be [in-bounds] of the underlying [allocation]. /// Which means `self` must be dereferenceable and span a single allocation /// that is at least `mid * size_of::<T>()` bytes long. Not upholding these /// requirements is *[undefined behavior]* even if the resulting pointers are not used. /// /// [in-bounds]: #method.add /// [out-of-bounds index]: #method.add + /// [allocation]: crate::ptr#allocation /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html /// /// # Examples @@ -1980,8 +1923,8 @@ impl<T> *mut [T] { /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes, /// and it must be properly aligned. This means in particular: /// - /// * The entire memory range of this slice must be contained within a single [allocated object]! - /// Slices can never span across multiple allocated objects. + /// * The entire memory range of this slice must be contained within a single [allocation]! + /// Slices can never span across multiple allocations. /// /// * The pointer must be aligned even for zero-length slices. One /// reason for this is that enum layout optimizations may rely on references @@ -2002,7 +1945,7 @@ impl<T> *mut [T] { /// See also [`slice::from_raw_parts`][]. /// /// [valid]: crate::ptr#safety - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics during const evaluation /// @@ -2038,8 +1981,8 @@ impl<T> *mut [T] { /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()` /// many bytes, and it must be properly aligned. This means in particular: /// - /// * The entire memory range of this slice must be contained within a single [allocated object]! - /// Slices can never span across multiple allocated objects. + /// * The entire memory range of this slice must be contained within a single [allocation]! + /// Slices can never span across multiple allocations. /// /// * The pointer must be aligned even for zero-length slices. One /// reason for this is that enum layout optimizations may rely on references @@ -2060,7 +2003,7 @@ impl<T> *mut [T] { /// See also [`slice::from_raw_parts_mut`][]. /// /// [valid]: crate::ptr#safety - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics during const evaluation /// diff --git a/library/core/src/ptr/non_null.rs b/library/core/src/ptr/non_null.rs index 226b5229f17..91b8d1bf9a7 100644 --- a/library/core/src/ptr/non_null.rs +++ b/library/core/src/ptr/non_null.rs @@ -530,16 +530,16 @@ impl<T: ?Sized> NonNull<T> { /// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`. /// /// * If the computed offset is non-zero, then `self` must be derived from a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge + /// [allocation], and the entire memory range between `self` and the result must be in + /// bounds of that allocation. In particular, this range must not "wrap around" the edge /// of the address space. /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. + /// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset + /// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always /// safe. /// - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -606,16 +606,16 @@ impl<T: ?Sized> NonNull<T> { /// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`. /// /// * If the computed offset is non-zero, then `self` must be derived from a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge + /// [allocation], and the entire memory range between `self` and the result must be in + /// bounds of that allocation. In particular, this range must not "wrap around" the edge /// of the address space. /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. + /// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset + /// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always /// safe. /// - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -683,16 +683,16 @@ impl<T: ?Sized> NonNull<T> { /// * The computed offset, `count * size_of::<T>()` bytes, must not overflow `isize`. /// /// * If the computed offset is non-zero, then `self` must be derived from a pointer to some - /// [allocated object], and the entire memory range between `self` and the result must be in - /// bounds of that allocated object. In particular, this range must not "wrap around" the edge + /// [allocation], and the entire memory range between `self` and the result must be in + /// bounds of that allocation. In particular, this range must not "wrap around" the edge /// of the address space. /// - /// Allocated objects can never be larger than `isize::MAX` bytes, so if the computed offset - /// stays in bounds of the allocated object, it is guaranteed to satisfy the first requirement. + /// Allocations can never be larger than `isize::MAX` bytes, so if the computed offset + /// stays in bounds of the allocation, it is guaranteed to satisfy the first requirement. /// This implies, for instance, that `vec.as_ptr().add(vec.len())` (for `vec: Vec<T>`) is always /// safe. /// - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Examples /// @@ -775,7 +775,7 @@ impl<T: ?Sized> NonNull<T> { /// * `self` and `origin` must either /// /// * point to the same address, or - /// * both be *derived from* a pointer to the same [allocated object], and the memory range between + /// * both be *derived from* a pointer to the same [allocation], and the memory range between /// the two pointers must be in bounds of that object. (See below for an example.) /// /// * The distance between the pointers, in bytes, must be an exact multiple @@ -783,10 +783,10 @@ impl<T: ?Sized> NonNull<T> { /// /// As a consequence, the absolute distance between the pointers, in bytes, computed on /// mathematical integers (without "wrapping around"), cannot overflow an `isize`. This is - /// implied by the in-bounds requirement, and the fact that no allocated object can be larger + /// implied by the in-bounds requirement, and the fact that no allocation can be larger /// than `isize::MAX` bytes. /// - /// The requirement for pointers to be derived from the same allocated object is primarily + /// The requirement for pointers to be derived from the same allocation is primarily /// needed for `const`-compatibility: the distance between pointers into *different* allocated /// objects is not known at compile-time. However, the requirement also exists at /// runtime and may be exploited by optimizations. If you wish to compute the difference between @@ -795,7 +795,7 @@ impl<T: ?Sized> NonNull<T> { // FIXME: recommend `addr()` instead of `as usize` once that is stable. /// /// [`add`]: #method.add - /// [allocated object]: crate::ptr#allocated-object + /// [allocation]: crate::ptr#allocation /// /// # Panics /// @@ -1475,8 +1475,8 @@ impl<T> NonNull<[T]> { /// * The pointer must be [valid] for reads for `ptr.len() * size_of::<T>()` many bytes, /// and it must be properly aligned. This means in particular: /// - /// * The entire memory range of this slice must be contained within a single allocated object! - /// Slices can never span across multiple allocated objects. + /// * The entire memory range of this slice must be contained within a single allocation! + /// Slices can never span across multiple allocations. /// /// * The pointer must be aligned even for zero-length slices. One /// reason for this is that enum layout optimizations may rely on references @@ -1520,8 +1520,8 @@ impl<T> NonNull<[T]> { /// * The pointer must be [valid] for reads and writes for `ptr.len() * size_of::<T>()` /// many bytes, and it must be properly aligned. This means in particular: /// - /// * The entire memory range of this slice must be contained within a single allocated object! - /// Slices can never span across multiple allocated objects. + /// * The entire memory range of this slice must be contained within a single allocation! + /// Slices can never span across multiple allocations. /// /// * The pointer must be aligned even for zero-length slices. One /// reason for this is that enum layout optimizations may rely on references diff --git a/library/core/src/result.rs b/library/core/src/result.rs index ef2da5e8fbf..23e32c2e0f0 100644 --- a/library/core/src/result.rs +++ b/library/core/src/result.rs @@ -1722,7 +1722,6 @@ impl<T, E> Result<Result<T, E>, E> { /// # Examples /// /// ``` - /// #![feature(result_flattening)] /// let x: Result<Result<&'static str, u32>, u32> = Ok(Ok("hello")); /// assert_eq!(Ok("hello"), x.flatten()); /// @@ -1736,14 +1735,14 @@ impl<T, E> Result<Result<T, E>, E> { /// Flattening only removes one level of nesting at a time: /// /// ``` - /// #![feature(result_flattening)] /// let x: Result<Result<Result<&'static str, u32>, u32>, u32> = Ok(Ok(Ok("hello"))); /// assert_eq!(Ok(Ok("hello")), x.flatten()); /// assert_eq!(Ok("hello"), x.flatten().flatten()); /// ``` #[inline] - #[unstable(feature = "result_flattening", issue = "70142")] - #[rustc_const_unstable(feature = "result_flattening", issue = "70142")] + #[stable(feature = "result_flattening", since = "CURRENT_RUSTC_VERSION")] + #[rustc_allow_const_fn_unstable(const_precise_live_drops)] + #[rustc_const_stable(feature = "result_flattening", since = "CURRENT_RUSTC_VERSION")] pub const fn flatten(self) -> Result<T, E> { // FIXME(const-hack): could be written with `and_then` match self { diff --git a/library/core/src/slice/index.rs b/library/core/src/slice/index.rs index b18d316c73a..69160a911b2 100644 --- a/library/core/src/slice/index.rs +++ b/library/core/src/slice/index.rs @@ -1,5 +1,7 @@ //! Indexing implementations for `[T]`. +#[cfg(not(bootstrap))] +use crate::intrinsics::slice_get_unchecked; use crate::panic::const_panic; use crate::ub_checks::assert_unsafe_precondition; use crate::{ops, range}; @@ -83,6 +85,7 @@ const fn slice_end_index_overflow_fail() -> ! { // Both the safe and unsafe public methods share these helpers, // which use intrinsics directly to get *no* extra checks. +#[cfg(bootstrap)] #[inline(always)] const unsafe fn get_noubcheck<T>(ptr: *const [T], index: usize) -> *const T { let ptr = ptr as *const T; @@ -90,6 +93,7 @@ const unsafe fn get_noubcheck<T>(ptr: *const [T], index: usize) -> *const T { unsafe { crate::intrinsics::offset(ptr, index) } } +#[cfg(bootstrap)] #[inline(always)] const unsafe fn get_mut_noubcheck<T>(ptr: *mut [T], index: usize) -> *mut T { let ptr = ptr as *mut T; @@ -103,8 +107,9 @@ const unsafe fn get_offset_len_noubcheck<T>( offset: usize, len: usize, ) -> *const [T] { + let ptr = ptr as *const T; // SAFETY: The caller already checked these preconditions - let ptr = unsafe { get_noubcheck(ptr, offset) }; + let ptr = unsafe { crate::intrinsics::offset(ptr, offset) }; crate::intrinsics::aggregate_raw_ptr(ptr, len) } @@ -114,8 +119,9 @@ const unsafe fn get_offset_len_mut_noubcheck<T>( offset: usize, len: usize, ) -> *mut [T] { + let ptr = ptr as *mut T; // SAFETY: The caller already checked these preconditions - let ptr = unsafe { get_mut_noubcheck(ptr, offset) }; + let ptr = unsafe { crate::intrinsics::offset(ptr, offset) }; crate::intrinsics::aggregate_raw_ptr(ptr, len) } @@ -224,15 +230,35 @@ unsafe impl<T> SliceIndex<[T]> for usize { #[inline] fn get(self, slice: &[T]) -> Option<&T> { - // SAFETY: `self` is checked to be in bounds. - if self < slice.len() { unsafe { Some(&*get_noubcheck(slice, self)) } } else { None } + if self < slice.len() { + #[cfg(bootstrap)] + // SAFETY: `self` is checked to be in bounds. + unsafe { + Some(&*get_noubcheck(slice, self)) + } + #[cfg(not(bootstrap))] + // SAFETY: `self` is checked to be in bounds. + unsafe { + Some(slice_get_unchecked(slice, self)) + } + } else { + None + } } #[inline] fn get_mut(self, slice: &mut [T]) -> Option<&mut T> { if self < slice.len() { + #[cfg(bootstrap)] + // SAFETY: `self` is checked to be in bounds. + unsafe { + Some(&mut *get_mut_noubcheck(slice, self)) + } + #[cfg(not(bootstrap))] // SAFETY: `self` is checked to be in bounds. - unsafe { Some(&mut *get_mut_noubcheck(slice, self)) } + unsafe { + Some(slice_get_unchecked(slice, self)) + } } else { None } @@ -254,7 +280,14 @@ unsafe impl<T> SliceIndex<[T]> for usize { // Use intrinsics::assume instead of hint::assert_unchecked so that we don't check the // precondition of this function twice. crate::intrinsics::assume(self < slice.len()); - get_noubcheck(slice, self) + #[cfg(bootstrap)] + { + get_noubcheck(slice, self) + } + #[cfg(not(bootstrap))] + { + slice_get_unchecked(slice, self) + } } } @@ -267,7 +300,16 @@ unsafe impl<T> SliceIndex<[T]> for usize { (this: usize = self, len: usize = slice.len()) => this < len ); // SAFETY: see comments for `get_unchecked` above. - unsafe { get_mut_noubcheck(slice, self) } + unsafe { + #[cfg(bootstrap)] + { + get_mut_noubcheck(slice, self) + } + #[cfg(not(bootstrap))] + { + slice_get_unchecked(slice, self) + } + } } #[inline] diff --git a/library/core/src/slice/raw.rs b/library/core/src/slice/raw.rs index 40da69c1562..eba2f89a169 100644 --- a/library/core/src/slice/raw.rs +++ b/library/core/src/slice/raw.rs @@ -14,8 +14,8 @@ use crate::{array, ptr, ub_checks}; /// * `data` must be non-null, [valid] for reads for `len * size_of::<T>()` many bytes, /// and it must be properly aligned. This means in particular: /// -/// * The entire memory range of this slice must be contained within a single allocated object! -/// Slices can never span across multiple allocated objects. See [below](#incorrect-usage) +/// * The entire memory range of this slice must be contained within a single allocation! +/// Slices can never span across multiple allocations. See [below](#incorrect-usage) /// for an example incorrectly not taking this into account. /// * `data` must be non-null and aligned even for zero-length slices or slices of ZSTs. One /// reason for this is that enum layout optimizations may rely on references @@ -65,14 +65,14 @@ use crate::{array, ptr, ub_checks}; /// assert_eq!(fst_end, snd_start, "Slices must be contiguous!"); /// unsafe { /// // The assertion above ensures `fst` and `snd` are contiguous, but they might -/// // still be contained within _different allocated objects_, in which case +/// // still be contained within _different allocations_, in which case /// // creating this slice is undefined behavior. /// slice::from_raw_parts(fst.as_ptr(), fst.len() + snd.len()) /// } /// } /// /// fn main() { -/// // `a` and `b` are different allocated objects... +/// // `a` and `b` are different allocations... /// let a = 42; /// let b = 27; /// // ... which may nevertheless be laid out contiguously in memory: | a | b | @@ -150,8 +150,8 @@ pub const unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] /// * `data` must be non-null, [valid] for both reads and writes for `len * size_of::<T>()` many bytes, /// and it must be properly aligned. This means in particular: /// -/// * The entire memory range of this slice must be contained within a single allocated object! -/// Slices can never span across multiple allocated objects. +/// * The entire memory range of this slice must be contained within a single allocation! +/// Slices can never span across multiple allocations. /// * `data` must be non-null and aligned even for zero-length slices or slices of ZSTs. One /// reason for this is that enum layout optimizations may rely on references /// (including slices of any length) being aligned and non-null to distinguish @@ -228,8 +228,8 @@ pub const fn from_mut<T>(s: &mut T) -> &mut [T] { /// the last element, such that the offset from the end to the start pointer is /// the length of the slice. /// -/// * The entire memory range of this slice must be contained within a single allocated object! -/// Slices can never span across multiple allocated objects. +/// * The entire memory range of this slice must be contained within a single allocation! +/// Slices can never span across multiple allocations. /// /// * The range must contain `N` consecutive properly initialized values of type `T`. /// @@ -298,8 +298,8 @@ pub const unsafe fn from_ptr_range<'a, T>(range: Range<*const T>) -> &'a [T] { /// the last element, such that the offset from the end to the start pointer is /// the length of the slice. /// -/// * The entire memory range of this slice must be contained within a single allocated object! -/// Slices can never span across multiple allocated objects. +/// * The entire memory range of this slice must be contained within a single allocation! +/// Slices can never span across multiple allocations. /// /// * The range must contain `N` consecutive properly initialized values of type `T`. /// diff --git a/library/core/src/str/converts.rs b/library/core/src/str/converts.rs index 058628797ea..6da9dce2d87 100644 --- a/library/core/src/str/converts.rs +++ b/library/core/src/str/converts.rs @@ -6,6 +6,8 @@ use crate::{mem, ptr}; /// Converts a slice of bytes to a string slice. /// +/// This is an alias to [`str::from_utf8`]. +/// /// A string slice ([`&str`]) is made of bytes ([`u8`]), and a byte slice /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts between /// the two. Not all byte slices are valid string slices, however: [`&str`] requires @@ -97,6 +99,8 @@ pub const fn from_utf8(v: &[u8]) -> Result<&str, Utf8Error> { /// Converts a mutable slice of bytes to a mutable string slice. /// +/// This is an alias to [`str::from_utf8_mut`]. +/// /// # Examples /// /// Basic usage: @@ -142,6 +146,8 @@ pub const fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> { /// Converts a slice of bytes to a string slice without checking /// that the string contains valid UTF-8. /// +/// This is an alias to [`str::from_utf8_unchecked`]. +/// /// See the safe version, [`from_utf8`], for more information. /// /// # Safety @@ -178,6 +184,8 @@ pub const unsafe fn from_utf8_unchecked(v: &[u8]) -> &str { /// Converts a slice of bytes to a string slice without checking /// that the string contains valid UTF-8; mutable version. /// +/// This is an alias to [`str::from_utf8_unchecked_mut`]. +/// /// See the immutable version, [`from_utf8_unchecked()`] for documentation and safety requirements. /// /// # Examples diff --git a/library/core/src/str/iter.rs b/library/core/src/str/iter.rs index 49c581f352e..425c4eaee28 100644 --- a/library/core/src/str/iter.rs +++ b/library/core/src/str/iter.rs @@ -656,7 +656,7 @@ impl<'a, P: Pattern> SplitInternal<'a, P> { None } - #[inline(always)] + #[inline] fn next(&mut self) -> Option<&'a str> { if self.finished { return None; diff --git a/library/core/src/str/pattern.rs b/library/core/src/str/pattern.rs index e8189a2187b..bcbbb11c83b 100644 --- a/library/core/src/str/pattern.rs +++ b/library/core/src/str/pattern.rs @@ -429,23 +429,8 @@ unsafe impl<'a> Searcher<'a> for CharSearcher<'a> { SearchStep::Done } } - #[inline(always)] + #[inline] fn next_match(&mut self) -> Option<(usize, usize)> { - if self.utf8_size == 1 { - return match self - .haystack - .as_bytes() - .get(self.finger..self.finger_back)? - .iter() - .position(|x| *x == self.utf8_encoded[0]) - { - Some(x) => { - self.finger += x + 1; - Some((self.finger - 1, self.finger)) - } - None => None, - }; - } loop { // get the haystack after the last character found let bytes = self.haystack.as_bytes().get(self.finger..self.finger_back)?; @@ -513,21 +498,6 @@ unsafe impl<'a> ReverseSearcher<'a> for CharSearcher<'a> { } #[inline] fn next_match_back(&mut self) -> Option<(usize, usize)> { - if self.utf8_size == 1 { - return match self - .haystack - .get(self.finger..self.finger_back)? - .as_bytes() - .iter() - .rposition(|&x| x == self.utf8_encoded[0]) - { - Some(x) => { - self.finger_back = self.finger + x; - Some((self.finger_back, self.finger_back + 1)) - } - None => None, - }; - } let haystack = self.haystack.as_bytes(); loop { // get the haystack up to but not including the last character searched diff --git a/library/coretests/tests/lib.rs b/library/coretests/tests/lib.rs index 693b14ef762..dd7cdd79c0d 100644 --- a/library/coretests/tests/lib.rs +++ b/library/coretests/tests/lib.rs @@ -16,6 +16,7 @@ #![feature(char_max_len)] #![feature(clone_to_uninit)] #![feature(const_eval_select)] +#![feature(const_float_round_methods)] #![feature(const_trait_impl)] #![feature(core_float_math)] #![feature(core_intrinsics)] diff --git a/library/coretests/tests/num/mod.rs b/library/coretests/tests/num/mod.rs index 1212d36a1b1..fa05bbdd9b7 100644 --- a/library/coretests/tests/num/mod.rs +++ b/library/coretests/tests/num/mod.rs @@ -731,6 +731,9 @@ assume_usize_width! { } } +// FIXME(141726): there is a lot of duplication between the following tests and +// the tests in `coretests/tests/floats/f*.rs` +// See issue https://github.com/rust-lang/rust/issues/141726 for more details. macro_rules! test_float { ($modname: ident, $fassert: ident, $fty: ty) => { mod $modname { @@ -947,6 +950,117 @@ macro_rules! test_float { assert!(<$fty>::INFINITY.div_euclid(<$fty>::NAN).is_nan()); assert!(<$fty>::NAN.div_euclid(<$fty>::INFINITY).is_nan()); } + #[test] + #[cfg(not(bootstrap))] + fn floor() { + $fassert!((0.0 as $fty).floor(), 0.0); + $fassert!((0.0 as $fty).floor().is_sign_positive()); + $fassert!((-0.0 as $fty).floor(), -0.0); + $fassert!((-0.0 as $fty).floor().is_sign_negative()); + $fassert!((0.5 as $fty).floor(), 0.0); + $fassert!((-0.5 as $fty).floor(), -1.0); + $fassert!((1.5 as $fty).floor(), 1.0); + $fassert!(<$fty>::MAX.floor(), <$fty>::MAX); + $fassert!(<$fty>::MIN.floor(), <$fty>::MIN); + $fassert!(<$fty>::MIN_POSITIVE.floor(), 0.0); + $fassert!((-<$fty>::MIN_POSITIVE).floor(), -1.0); + $fassert!(<$fty>::NAN.floor().is_nan()); + $fassert!(<$fty>::INFINITY.floor(), <$fty>::INFINITY); + $fassert!(<$fty>::NEG_INFINITY.floor(), <$fty>::NEG_INFINITY); + } + #[test] + #[cfg(not(bootstrap))] + fn ceil() { + $fassert!((0.0 as $fty).ceil(), 0.0); + $fassert!((0.0 as $fty).ceil().is_sign_positive()); + $fassert!((-0.0 as $fty).ceil(), 0.0); + $fassert!((-0.0 as $fty).ceil().is_sign_negative()); + $fassert!((0.5 as $fty).ceil(), 1.0); + $fassert!((-0.5 as $fty).ceil(), 0.0); + $fassert!(<$fty>::MAX.ceil(), <$fty>::MAX); + $fassert!(<$fty>::MIN.ceil(), <$fty>::MIN); + $fassert!(<$fty>::MIN_POSITIVE.ceil(), 1.0); + $fassert!((-<$fty>::MIN_POSITIVE).ceil(), 0.0); + $fassert!(<$fty>::NAN.ceil().is_nan()); + $fassert!(<$fty>::INFINITY.ceil(), <$fty>::INFINITY); + $fassert!(<$fty>::NEG_INFINITY.ceil(), <$fty>::NEG_INFINITY); + } + #[test] + #[cfg(not(bootstrap))] + fn round() { + $fassert!((0.0 as $fty).round(), 0.0); + $fassert!((0.0 as $fty).round().is_sign_positive()); + $fassert!((-0.0 as $fty).round(), -0.0); + $fassert!((-0.0 as $fty).round().is_sign_negative()); + $fassert!((0.5 as $fty).round(), 1.0); + $fassert!((-0.5 as $fty).round(), -1.0); + $fassert!(<$fty>::MAX.round(), <$fty>::MAX); + $fassert!(<$fty>::MIN.round(), <$fty>::MIN); + $fassert!(<$fty>::MIN_POSITIVE.round(), 0.0); + $fassert!((-<$fty>::MIN_POSITIVE).round(), 0.0); + $fassert!(<$fty>::NAN.round().is_nan()); + $fassert!(<$fty>::INFINITY.round(), <$fty>::INFINITY); + $fassert!(<$fty>::NEG_INFINITY.round(), <$fty>::NEG_INFINITY); + } + #[test] + #[cfg(not(bootstrap))] + fn round_ties_even() { + $fassert!((0.0 as $fty).round_ties_even(), 0.0); + $fassert!((0.0 as $fty).round_ties_even().is_sign_positive()); + $fassert!((-0.0 as $fty).round_ties_even(), -0.0); + $fassert!((-0.0 as $fty).round_ties_even().is_sign_negative()); + $fassert!((0.5 as $fty).round_ties_even(), 0.0); + $fassert!((0.5 as $fty).round_ties_even().is_sign_positive()); + $fassert!((-0.5 as $fty).round_ties_even(), -0.0); + $fassert!((-0.5 as $fty).round_ties_even().is_sign_negative()); + $fassert!(<$fty>::MAX.round_ties_even(), <$fty>::MAX); + $fassert!(<$fty>::MIN.round_ties_even(), <$fty>::MIN); + $fassert!(<$fty>::MIN_POSITIVE.round_ties_even(), 0.0); + $fassert!((-<$fty>::MIN_POSITIVE).round_ties_even(), 0.0); + $fassert!(<$fty>::NAN.round_ties_even().is_nan()); + $fassert!(<$fty>::INFINITY.round_ties_even(), <$fty>::INFINITY); + $fassert!(<$fty>::NEG_INFINITY.round_ties_even(), <$fty>::NEG_INFINITY); + } + #[test] + #[cfg(not(bootstrap))] + fn trunc() { + $fassert!((0.0 as $fty).trunc(), 0.0); + $fassert!((0.0 as $fty).trunc().is_sign_positive()); + $fassert!((-0.0 as $fty).trunc(), -0.0); + $fassert!((-0.0 as $fty).trunc().is_sign_negative()); + $fassert!((0.5 as $fty).trunc(), 0.0); + $fassert!((0.5 as $fty).trunc().is_sign_positive()); + $fassert!((-0.5 as $fty).trunc(), -0.0); + $fassert!((-0.5 as $fty).trunc().is_sign_negative()); + $fassert!(<$fty>::MAX.trunc(), <$fty>::MAX); + $fassert!(<$fty>::MIN.trunc(), <$fty>::MIN); + $fassert!(<$fty>::MIN_POSITIVE.trunc(), 0.0); + $fassert!((-<$fty>::MIN_POSITIVE).trunc(), 0.0); + $fassert!(<$fty>::NAN.trunc().is_nan()); + $fassert!(<$fty>::INFINITY.trunc(), <$fty>::INFINITY); + $fassert!(<$fty>::NEG_INFINITY.trunc(), <$fty>::NEG_INFINITY); + } + #[test] + #[cfg(not(bootstrap))] + fn fract() { + $fassert!((0.0 as $fty).fract(), 0.0); + $fassert!((0.0 as $fty).fract().is_sign_positive()); + $fassert!((-0.0 as $fty).fract(), 0.0); + $fassert!((-0.0 as $fty).fract().is_sign_positive()); + $fassert!((0.5 as $fty).fract(), 0.5); + $fassert!((0.5 as $fty).fract().is_sign_positive()); + $fassert!((-0.5 as $fty).fract(), -0.5); + $fassert!((-0.5 as $fty).fract().is_sign_negative()); + $fassert!(<$fty>::MAX.fract(), 0.0); + $fassert!(<$fty>::MIN.fract(), 0.0); + $fassert!(<$fty>::MIN_POSITIVE.fract(), <$fty>::MIN_POSITIVE); + $fassert!(<$fty>::MIN_POSITIVE.fract().is_sign_positive()); + $fassert!((-<$fty>::MIN_POSITIVE).fract(), -<$fty>::MIN_POSITIVE); + $fassert!((-<$fty>::MIN_POSITIVE).fract().is_sign_negative()); + $fassert!(<$fty>::NAN.fract().is_nan()); + $fassert!(<$fty>::INFINITY.fract().is_nan()); + $fassert!(<$fty>::NEG_INFINITY.fract().is_nan()); + } } }; } diff --git a/library/std/Cargo.toml b/library/std/Cargo.toml index 4ff4895ecde..31371f06b38 100644 --- a/library/std/Cargo.toml +++ b/library/std/Cargo.toml @@ -18,7 +18,7 @@ cfg-if = { version = "1.0", features = ['rustc-dep-of-std'] } panic_unwind = { path = "../panic_unwind", optional = true } panic_abort = { path = "../panic_abort" } core = { path = "../core", public = true } -compiler_builtins = { version = "=0.1.159" } +compiler_builtins = { version = "=0.1.160" } unwind = { path = "../unwind" } hashbrown = { version = "0.15", default-features = false, features = [ 'rustc-dep-of-std', diff --git a/library/std/src/fs.rs b/library/std/src/fs.rs index 8ed5800e9d0..711efc7d011 100644 --- a/library/std/src/fs.rs +++ b/library/std/src/fs.rs @@ -2915,17 +2915,28 @@ pub fn remove_dir<P: AsRef<Path>>(path: P) -> io::Result<()> { /// /// # Platform-specific behavior /// -/// This function currently corresponds to `openat`, `fdopendir`, `unlinkat` and `lstat` functions -/// on Unix (except for REDOX) and the `CreateFileW`, `GetFileInformationByHandleEx`, -/// `SetFileInformationByHandle`, and `NtCreateFile` functions on Windows. Note that, this -/// [may change in the future][changes]. +/// These implementation details [may change in the future][changes]. +/// +/// - "Unix-like": By default, this function currently corresponds to +/// `openat`, `fdopendir`, `unlinkat` and `lstat` +/// on Unix-family platforms, except where noted otherwise. +/// - "Windows": This function currently corresponds to `CreateFileW`, +/// `GetFileInformationByHandleEx`, `SetFileInformationByHandle`, and `NtCreateFile`. +/// +/// ## Time-of-check to time-of-use (TOCTOU) race conditions +/// On a few platforms there is no way to remove a directory's contents without following symlinks +/// unless you perform a check and then operate on paths based on that directory. +/// This allows concurrently-running code to replace the directory with a symlink after the check, +/// causing a removal to instead operate on a path based on the symlink. This is a TOCTOU race. +/// By default, `fs::remove_dir_all` protects against a symlink TOCTOU race on all platforms +/// except the following. It should not be used in security-sensitive contexts on these platforms: +/// - Miri: Even when emulating targets where the underlying implementation will protect against +/// TOCTOU races, Miri will not do so. +/// - Redox OS: This function does not protect against TOCTOU races, as Redox does not implement +/// the required platform support to do so. /// /// [changes]: io#platform-specific-behavior /// -/// On REDOX, as well as when running in Miri for any target, this function is not protected against -/// time-of-check to time-of-use (TOCTOU) race conditions, and should not be used in -/// security-sensitive code on those platforms. All other platforms are protected. -/// /// # Errors /// /// See [`fs::remove_file`] and [`fs::remove_dir`]. diff --git a/library/std/src/lib.rs b/library/std/src/lib.rs index a3f0f3cc55a..74a34339860 100644 --- a/library/std/src/lib.rs +++ b/library/std/src/lib.rs @@ -335,6 +335,7 @@ #![feature(bstr_internals)] #![feature(char_internals)] #![feature(clone_to_uninit)] +#![feature(const_float_round_methods)] #![feature(core_intrinsics)] #![feature(core_io_borrowed_buf)] #![feature(duration_constants)] diff --git a/library/std/src/num/f32.rs b/library/std/src/num/f32.rs index 5210e75ec45..b7f6529ac40 100644 --- a/library/std/src/num/f32.rs +++ b/library/std/src/num/f32.rs @@ -44,8 +44,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn floor(self) -> f32 { + pub const fn floor(self) -> f32 { core::f32::math::floor(self) } @@ -66,8 +67,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn ceil(self) -> f32 { + pub const fn ceil(self) -> f32 { core::f32::math::ceil(self) } @@ -94,8 +96,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn round(self) -> f32 { + pub const fn round(self) -> f32 { core::f32::math::round(self) } @@ -120,8 +123,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "round_ties_even", since = "1.77.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn round_ties_even(self) -> f32 { + pub const fn round_ties_even(self) -> f32 { core::f32::math::round_ties_even(self) } @@ -145,8 +149,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn trunc(self) -> f32 { + pub const fn trunc(self) -> f32 { core::f32::math::trunc(self) } @@ -168,8 +173,9 @@ impl f32 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn fract(self) -> f32 { + pub const fn fract(self) -> f32 { core::f32::math::fract(self) } diff --git a/library/std/src/num/f64.rs b/library/std/src/num/f64.rs index f837800d663..75e35a8db33 100644 --- a/library/std/src/num/f64.rs +++ b/library/std/src/num/f64.rs @@ -44,8 +44,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn floor(self) -> f64 { + pub const fn floor(self) -> f64 { core::f64::math::floor(self) } @@ -66,8 +67,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn ceil(self) -> f64 { + pub const fn ceil(self) -> f64 { core::f64::math::ceil(self) } @@ -94,8 +96,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn round(self) -> f64 { + pub const fn round(self) -> f64 { core::f64::math::round(self) } @@ -120,8 +123,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "round_ties_even", since = "1.77.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn round_ties_even(self) -> f64 { + pub const fn round_ties_even(self) -> f64 { core::f64::math::round_ties_even(self) } @@ -145,8 +149,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn trunc(self) -> f64 { + pub const fn trunc(self) -> f64 { core::f64::math::trunc(self) } @@ -168,8 +173,9 @@ impl f64 { #[rustc_allow_incoherent_impl] #[must_use = "method returns a new number and does not mutate the original value"] #[stable(feature = "rust1", since = "1.0.0")] + #[rustc_const_unstable(feature = "const_float_round_methods", issue = "141555")] #[inline] - pub fn fract(self) -> f64 { + pub const fn fract(self) -> f64 { core::f64::math::fract(self) } diff --git a/library/std/src/sys/thread_local/native/lazy.rs b/library/std/src/sys/thread_local/native/lazy.rs index 51294285ba0..b556dd9aa25 100644 --- a/library/std/src/sys/thread_local/native/lazy.rs +++ b/library/std/src/sys/thread_local/native/lazy.rs @@ -1,9 +1,9 @@ -use crate::cell::UnsafeCell; -use crate::hint::unreachable_unchecked; +use crate::cell::{Cell, UnsafeCell}; +use crate::mem::MaybeUninit; use crate::ptr; use crate::sys::thread_local::{abort_on_dtor_unwind, destructors}; -pub unsafe trait DestroyedState: Sized { +pub unsafe trait DestroyedState: Sized + Copy { fn register_dtor<T>(s: &Storage<T, Self>); } @@ -19,15 +19,17 @@ unsafe impl DestroyedState for () { } } -enum State<T, D> { - Initial, - Alive(T), +#[derive(Copy, Clone)] +enum State<D> { + Uninitialized, + Alive, Destroyed(D), } #[allow(missing_debug_implementations)] pub struct Storage<T, D> { - state: UnsafeCell<State<T, D>>, + state: Cell<State<D>>, + value: UnsafeCell<MaybeUninit<T>>, } impl<T, D> Storage<T, D> @@ -35,7 +37,10 @@ where D: DestroyedState, { pub const fn new() -> Storage<T, D> { - Storage { state: UnsafeCell::new(State::Initial) } + Storage { + state: Cell::new(State::Uninitialized), + value: UnsafeCell::new(MaybeUninit::uninit()), + } } /// Gets a pointer to the TLS value, potentially initializing it with the @@ -49,35 +54,49 @@ where /// The `self` reference must remain valid until the TLS destructor is run. #[inline] pub unsafe fn get_or_init(&self, i: Option<&mut Option<T>>, f: impl FnOnce() -> T) -> *const T { - let state = unsafe { &*self.state.get() }; - match state { - State::Alive(v) => v, - State::Destroyed(_) => ptr::null(), - State::Initial => unsafe { self.initialize(i, f) }, + if let State::Alive = self.state.get() { + self.value.get().cast() + } else { + unsafe { self.get_or_init_slow(i, f) } } } + /// # Safety + /// The `self` reference must remain valid until the TLS destructor is run. #[cold] - unsafe fn initialize(&self, i: Option<&mut Option<T>>, f: impl FnOnce() -> T) -> *const T { - // Perform initialization + unsafe fn get_or_init_slow( + &self, + i: Option<&mut Option<T>>, + f: impl FnOnce() -> T, + ) -> *const T { + match self.state.get() { + State::Uninitialized => {} + State::Alive => return self.value.get().cast(), + State::Destroyed(_) => return ptr::null(), + } let v = i.and_then(Option::take).unwrap_or_else(f); - let old = unsafe { self.state.get().replace(State::Alive(v)) }; - match old { + // SAFETY: we cannot be inside a `LocalKey::with` scope, as the initializer + // has already returned and the next scope only starts after we return + // the pointer. Therefore, there can be no references to the old value, + // even if it was initialized. Thus because we are !Sync we have exclusive + // access to self.value and may replace it. + let mut old_value = unsafe { self.value.get().replace(MaybeUninit::new(v)) }; + match self.state.replace(State::Alive) { // If the variable is not being recursively initialized, register // the destructor. This might be a noop if the value does not need // destruction. - State::Initial => D::register_dtor(self), - // Else, drop the old value. This might be changed to a panic. - val => drop(val), - } + State::Uninitialized => D::register_dtor(self), - // SAFETY: the state was just set to `Alive` - unsafe { - let State::Alive(v) = &*self.state.get() else { unreachable_unchecked() }; - v + // Recursive initialization, we only need to drop the old value + // as we've already registered the destructor. + State::Alive => unsafe { old_value.assume_init_drop() }, + + State::Destroyed(_) => unreachable!(), } + + self.value.get().cast() } } @@ -92,9 +111,12 @@ unsafe extern "C" fn destroy<T>(ptr: *mut u8) { // Print a nice abort message if a panic occurs. abort_on_dtor_unwind(|| { let storage = unsafe { &*(ptr as *const Storage<T, ()>) }; - // Update the state before running the destructor as it may attempt to - // access the variable. - let val = unsafe { storage.state.get().replace(State::Destroyed(())) }; - drop(val); + if let State::Alive = storage.state.replace(State::Destroyed(())) { + // SAFETY: we ensured the state was Alive so the value was initialized. + // We also updated the state to Destroyed to prevent the destructor + // from accessing the thread-local variable, as this would violate + // the exclusive access provided by &mut T in Drop::drop. + unsafe { (*storage.value.get()).assume_init_drop() } + } }) } diff --git a/rust-bors.toml b/rust-bors.toml index f27eb239367..fbfaa980f05 100644 --- a/rust-bors.toml +++ b/rust-bors.toml @@ -1 +1,2 @@ -timeout = 14400 +# 6 hours timeout for CI builds +timeout = 21600 diff --git a/src/bootstrap/bootstrap.py b/src/bootstrap/bootstrap.py index 42ad14a81d0..c60c6b8db64 100644 --- a/src/bootstrap/bootstrap.py +++ b/src/bootstrap/bootstrap.py @@ -1118,7 +1118,6 @@ class RustBuild(object): if "RUSTFLAGS_BOOTSTRAP" in env: env["RUSTFLAGS"] += " " + env["RUSTFLAGS_BOOTSTRAP"] - env["PATH"] = os.path.join(self.bin_root(), "bin") + os.pathsep + env["PATH"] if not os.path.isfile(self.cargo()): raise Exception("no cargo executable found at `{}`".format(self.cargo())) args = [ diff --git a/src/bootstrap/src/core/build_steps/compile.rs b/src/bootstrap/src/core/build_steps/compile.rs index 0c18ebedb37..a782d0f6b1a 100644 --- a/src/bootstrap/src/core/build_steps/compile.rs +++ b/src/bootstrap/src/core/build_steps/compile.rs @@ -1413,7 +1413,7 @@ fn rustc_llvm_env(builder: &Builder<'_>, cargo: &mut Cargo, target: TargetSelect cargo.env("LLVM_LINKER_FLAGS", llvm_linker_flags); } - // Building with a static libstdc++ is only supported on linux right now, + // Building with a static libstdc++ is only supported on Linux and windows-gnu* right now, // not for MSVC or macOS if builder.config.llvm_static_stdcpp && !target.contains("freebsd") @@ -1421,12 +1421,14 @@ fn rustc_llvm_env(builder: &Builder<'_>, cargo: &mut Cargo, target: TargetSelect && !target.contains("apple") && !target.contains("solaris") { + let libstdcxx_name = + if target.contains("windows-gnullvm") { "libc++.a" } else { "libstdc++.a" }; let file = compiler_file( builder, &builder.cxx(target).unwrap(), target, CLang::Cxx, - "libstdc++.a", + libstdcxx_name, ); cargo.env("LLVM_STATIC_STDCPP", file); } diff --git a/src/bootstrap/src/core/build_steps/llvm.rs b/src/bootstrap/src/core/build_steps/llvm.rs index 9ce3abbcf2c..20a4d1a1515 100644 --- a/src/bootstrap/src/core/build_steps/llvm.rs +++ b/src/bootstrap/src/core/build_steps/llvm.rs @@ -285,7 +285,8 @@ impl Step for Llvm { LlvmBuildStatus::ShouldBuild(m) => m, }; - if builder.llvm_link_shared() && target.is_windows() { + if builder.llvm_link_shared() && target.is_windows() && !target.ends_with("windows-gnullvm") + { panic!("shared linking to LLVM is not currently supported on {}", target.triple); } diff --git a/src/bootstrap/src/core/build_steps/run.rs b/src/bootstrap/src/core/build_steps/run.rs index eeba7780c65..6ef1b13abcd 100644 --- a/src/bootstrap/src/core/build_steps/run.rs +++ b/src/bootstrap/src/core/build_steps/run.rs @@ -5,7 +5,6 @@ use std::path::PathBuf; -use crate::Mode; use crate::core::build_steps::dist::distdir; use crate::core::build_steps::test; use crate::core::build_steps::tool::{self, SourceType, Tool}; @@ -14,6 +13,7 @@ use crate::core::builder::{Builder, Kind, RunConfig, ShouldRun, Step}; use crate::core::config::TargetSelection; use crate::core::config::flags::get_completion; use crate::utils::exec::command; +use crate::{Mode, t}; #[derive(Debug, PartialOrd, Ord, Clone, Hash, PartialEq, Eq)] pub struct BuildManifest; @@ -253,6 +253,7 @@ impl Step for GenerateCopyright { cmd.env("SRC_DIR", &builder.src); cmd.env("VENDOR_DIR", &vendored_sources); cmd.env("CARGO", &builder.initial_cargo); + cmd.env("CARGO_HOME", t!(home::cargo_home())); // it is important that generate-copyright runs from the root of the // source tree, because it uses relative paths cmd.current_dir(&builder.src); diff --git a/src/bootstrap/src/core/build_steps/test.rs b/src/bootstrap/src/core/build_steps/test.rs index b0eee8765ef..22ab3e56a9c 100644 --- a/src/bootstrap/src/core/build_steps/test.rs +++ b/src/bootstrap/src/core/build_steps/test.rs @@ -2964,7 +2964,14 @@ impl Step for Distcheck { run.builder.ensure(Distcheck); } - /// Runs "distcheck", a 'make check' from a tarball + /// Runs `distcheck`, which is a collection of smoke tests: + /// + /// - Run `make check` from an unpacked dist tarball to make sure we can at the minimum run + /// check steps from those sources. + /// - Check that selected dist components (`rust-src` only at the moment) at least have expected + /// directory shape and crate manifests that cargo can generate a lockfile from. + /// + /// FIXME(#136822): dist components are under-tested. fn run(self, builder: &Builder<'_>) { builder.info("Distcheck"); let dir = builder.tempdir().join("distcheck"); diff --git a/src/bootstrap/src/core/build_steps/tool.rs b/src/bootstrap/src/core/build_steps/tool.rs index f5f9a3ec4ab..173b3ff0816 100644 --- a/src/bootstrap/src/core/build_steps/tool.rs +++ b/src/bootstrap/src/core/build_steps/tool.rs @@ -1197,9 +1197,9 @@ fn run_tool_build_step( artifact_kind: ToolArtifactKind::Binary, }); - // FIXME: This should just be an if-let-chain, but those are unstable. - if let Some(add_bins_to_sysroot) = - add_bins_to_sysroot.filter(|bins| !bins.is_empty() && target_compiler.stage > 0) + if let Some(add_bins_to_sysroot) = add_bins_to_sysroot + && !add_bins_to_sysroot.is_empty() + && target_compiler.stage > 0 { let bindir = builder.sysroot(target_compiler).join("bin"); t!(fs::create_dir_all(&bindir)); diff --git a/src/bootstrap/src/core/builder/cargo.rs b/src/bootstrap/src/core/builder/cargo.rs index bac1288fb7f..1e9af68a92d 100644 --- a/src/bootstrap/src/core/builder/cargo.rs +++ b/src/bootstrap/src/core/builder/cargo.rs @@ -1004,7 +1004,12 @@ impl Builder<'_> { // efficient initial-exec TLS model. This doesn't work with `dlopen`, // so we can't use it by default in general, but we can use it for tools // and our own internal libraries. - if !mode.must_support_dlopen() && !target.triple.starts_with("powerpc-") { + // + // Cygwin only supports emutls. + if !mode.must_support_dlopen() + && !target.triple.starts_with("powerpc-") + && !target.triple.contains("cygwin") + { cargo.env("RUSTC_TLS_MODEL_INITIAL_EXEC", "1"); } diff --git a/src/build_helper/src/lib.rs b/src/build_helper/src/lib.rs index 7e580db48aa..1f5cf723641 100644 --- a/src/build_helper/src/lib.rs +++ b/src/build_helper/src/lib.rs @@ -29,4 +29,5 @@ pub const RUSTC_PGO_CRATES: &[&str] = &[ "tuple-stress", "diesel-2.2.10", "bitmaps-3.2.1", + "serde-1.0.219-new-solver", ]; diff --git a/src/ci/docker/host-x86_64/x86_64-gnu-distcheck/Dockerfile b/src/ci/docker/host-x86_64/x86_64-gnu-distcheck/Dockerfile index 2217e6ee704..98fd31a22e9 100644 --- a/src/ci/docker/host-x86_64/x86_64-gnu-distcheck/Dockerfile +++ b/src/ci/docker/host-x86_64/x86_64-gnu-distcheck/Dockerfile @@ -1,3 +1,15 @@ +# Runs `distcheck`, which is a collection of smoke tests: +# +# - Run `make check` from an unpacked dist tarball to make sure we can at the +# minimum run check steps from those sources. +# - Check that selected dist components at least have expected directory shape +# and crate manifests that cargo can generate a lockfile from. +# +# Refer to `src/bootstrap/src/core/build_steps/test.rs` `Distcheck::run` for +# specifics. +# +# FIXME(#136822): dist components are generally under-tested. + FROM ubuntu:22.04 ARG DEBIAN_FRONTEND=noninteractive diff --git a/src/ci/docker/host-x86_64/x86_64-gnu-tools/checktools.sh b/src/ci/docker/host-x86_64/x86_64-gnu-tools/checktools.sh index 9222710b843..62e0451814b 100755 --- a/src/ci/docker/host-x86_64/x86_64-gnu-tools/checktools.sh +++ b/src/ci/docker/host-x86_64/x86_64-gnu-tools/checktools.sh @@ -53,8 +53,8 @@ MIRIFLAGS="-Zmiri-force-intrinsic-fallback --cfg force_intrinsic_fallback -O -Zm case $HOST_TARGET in x86_64-unknown-linux-gnu) # Only this branch runs in PR CI. - # Fully test all main OSes, including a 32bit target. - python3 "$X_PY" test --stage 2 src/tools/miri src/tools/miri/cargo-miri --target x86_64-apple-darwin + # Fully test all main OSes, and all main architectures. + python3 "$X_PY" test --stage 2 src/tools/miri src/tools/miri/cargo-miri --target aarch64-apple-darwin python3 "$X_PY" test --stage 2 src/tools/miri src/tools/miri/cargo-miri --target i686-pc-windows-msvc # Only run "pass" tests for the remaining targets, which is quite a bit faster. python3 "$X_PY" test --stage 2 src/tools/miri --target x86_64-pc-windows-gnu --test-args pass @@ -69,7 +69,7 @@ case $HOST_TARGET in #FIXME: Re-enable this once CI issues are fixed # See <https://github.com/rust-lang/rust/issues/127883> # For now, these tests are moved to `x86_64-msvc-ext2` in `src/ci/github-actions/jobs.yml`. - #python3 "$X_PY" test --stage 2 src/tools/miri --target aarch64-apple-darwin --test-args pass + #python3 "$X_PY" test --stage 2 src/tools/miri --target x86_64-apple-darwin --test-args pass ;; *) echo "FATAL: unexpected host $HOST_TARGET" diff --git a/src/ci/github-actions/jobs.yml b/src/ci/github-actions/jobs.yml index ba17c2b407c..65859067a63 100644 --- a/src/ci/github-actions/jobs.yml +++ b/src/ci/github-actions/jobs.yml @@ -535,11 +535,13 @@ auto: - name: x86_64-msvc-ext2 env: SCRIPT: > - python x.py test --stage 2 src/tools/miri --target aarch64-apple-darwin --test-args pass && + python x.py test --stage 2 src/tools/miri --target x86_64-apple-darwin --test-args pass && python x.py test --stage 2 src/tools/miri --target x86_64-pc-windows-gnu --test-args pass && python x.py miri --stage 2 library/core --test-args notest && python x.py miri --stage 2 library/alloc --test-args notest && python x.py miri --stage 2 library/std --test-args notest + # The last 3 lines smoke-test `x.py miri`. This doesn't run any actual tests (that would take + # too long), but it ensures that the crates build properly when tested with Miri. RUST_CONFIGURE_ARGS: --build=x86_64-pc-windows-msvc --enable-lld <<: *job-windows diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 0fbffc7808d..7e8e087c3a2 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs @@ -1749,7 +1749,7 @@ fn maybe_expand_private_type_alias<'tcx>( } else { return None; }; - let hir::ItemKind::TyAlias(_, ty, generics) = alias else { return None }; + let hir::ItemKind::TyAlias(_, generics, ty) = alias else { return None }; let final_seg = &path.segments.last().expect("segments were empty"); let mut args = DefIdMap::default(); @@ -2803,21 +2803,21 @@ fn clean_maybe_renamed_item<'tcx>( let mut name = get_name(cx, item, renamed).unwrap(); let kind = match item.kind { - ItemKind::Static(_, ty, mutability, body_id) => StaticItem(Static { + ItemKind::Static(mutability, _, ty, body_id) => StaticItem(Static { type_: Box::new(clean_ty(ty, cx)), mutability, expr: Some(body_id), }), - ItemKind::Const(_, ty, generics, body_id) => ConstantItem(Box::new(Constant { + ItemKind::Const(_, generics, ty, body_id) => ConstantItem(Box::new(Constant { generics: clean_generics(generics, cx), type_: clean_ty(ty, cx), kind: ConstantKind::Local { body: body_id, def_id }, })), - ItemKind::TyAlias(_, hir_ty, generics) => { + ItemKind::TyAlias(_, generics, ty) => { *cx.current_type_aliases.entry(def_id).or_insert(0) += 1; - let rustdoc_ty = clean_ty(hir_ty, cx); + let rustdoc_ty = clean_ty(ty, cx); let type_ = - clean_middle_ty(ty::Binder::dummy(lower_ty(cx.tcx, hir_ty)), cx, None, None); + clean_middle_ty(ty::Binder::dummy(lower_ty(cx.tcx, ty)), cx, None, None); let generics = clean_generics(generics, cx); if let Some(count) = cx.current_type_aliases.get_mut(&def_id) { *count -= 1; @@ -2846,7 +2846,7 @@ fn clean_maybe_renamed_item<'tcx>( )); return ret; } - ItemKind::Enum(_, def, generics) => EnumItem(Enum { + ItemKind::Enum(_, generics, def) => EnumItem(Enum { variants: def.variants.iter().map(|v| clean_variant(v, cx)).collect(), generics: clean_generics(generics, cx), }), @@ -2854,11 +2854,11 @@ fn clean_maybe_renamed_item<'tcx>( generics: clean_generics(generics, cx), bounds: bounds.iter().filter_map(|x| clean_generic_bound(x, cx)).collect(), }), - ItemKind::Union(_, variant_data, generics) => UnionItem(Union { + ItemKind::Union(_, generics, variant_data) => UnionItem(Union { generics: clean_generics(generics, cx), fields: variant_data.fields().iter().map(|x| clean_field(x, cx)).collect(), }), - ItemKind::Struct(_, variant_data, generics) => StructItem(Struct { + ItemKind::Struct(_, generics, variant_data) => StructItem(Struct { ctor_kind: variant_data.ctor_kind(), generics: clean_generics(generics, cx), fields: variant_data.fields().iter().map(|x| clean_field(x, cx)).collect(), diff --git a/src/librustdoc/passes/calculate_doc_coverage.rs b/src/librustdoc/passes/calculate_doc_coverage.rs index c9f0baaaa4c..66d8b667a4c 100644 --- a/src/librustdoc/passes/calculate_doc_coverage.rs +++ b/src/librustdoc/passes/calculate_doc_coverage.rs @@ -241,7 +241,7 @@ impl DocVisitor<'_> for CoverageCalculator<'_, '_> { data: hir::VariantData::Tuple(_, _, _), .. }) | hir::Node::Item(hir::Item { - kind: hir::ItemKind::Struct(_, hir::VariantData::Tuple(_, _, _), _), + kind: hir::ItemKind::Struct(_, _, hir::VariantData::Tuple(_, _, _)), .. }) ) diff --git a/src/tools/cargo b/src/tools/cargo -Subproject 68db37499f2de8acef704c73d9031be6fbcbaee +Subproject 64a12460708cf146e16cc61f28aba5dc2463bbb diff --git a/src/tools/clippy/clippy_lints/src/arbitrary_source_item_ordering.rs b/src/tools/clippy/clippy_lints/src/arbitrary_source_item_ordering.rs index 5c1c85d3918..59a0c7c8868 100644 --- a/src/tools/clippy/clippy_lints/src/arbitrary_source_item_ordering.rs +++ b/src/tools/clippy/clippy_lints/src/arbitrary_source_item_ordering.rs @@ -272,7 +272,7 @@ impl<'tcx> LateLintPass<'tcx> for ArbitrarySourceItemOrdering { return; } match &item.kind { - ItemKind::Enum(_, enum_def, _generics) if self.enable_ordering_for_enum => { + ItemKind::Enum(_, _generics, enum_def) if self.enable_ordering_for_enum => { let mut cur_v: Option<&Variant<'_>> = None; for variant in enum_def.variants { if variant.span.in_external_macro(cx.sess().source_map()) { @@ -288,7 +288,7 @@ impl<'tcx> LateLintPass<'tcx> for ArbitrarySourceItemOrdering { cur_v = Some(variant); } }, - ItemKind::Struct(_, VariantData::Struct { fields, .. }, _generics) if self.enable_ordering_for_struct => { + ItemKind::Struct(_, _generics, VariantData::Struct { fields, .. }) if self.enable_ordering_for_struct => { let mut cur_f: Option<&FieldDef<'_>> = None; for field in *fields { if field.span.in_external_macro(cx.sess().source_map()) { diff --git a/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs b/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs index 8c12364883c..4414aebbf9a 100644 --- a/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs +++ b/src/tools/clippy/clippy_lints/src/empty_with_brackets.rs @@ -92,7 +92,7 @@ impl_lint_pass!(EmptyWithBrackets => [EMPTY_STRUCTS_WITH_BRACKETS, EMPTY_ENUM_VA impl LateLintPass<'_> for EmptyWithBrackets { fn check_item(&mut self, cx: &LateContext<'_>, item: &Item<'_>) { - if let ItemKind::Struct(ident, var_data, _) = &item.kind + if let ItemKind::Struct(ident, _, var_data) = &item.kind && !item.span.from_expansion() && has_brackets(var_data) && let span_after_ident = item.span.with_lo(ident.span.hi()) diff --git a/src/tools/clippy/clippy_lints/src/enum_clike.rs b/src/tools/clippy/clippy_lints/src/enum_clike.rs index ec81294624e..098571a5351 100644 --- a/src/tools/clippy/clippy_lints/src/enum_clike.rs +++ b/src/tools/clippy/clippy_lints/src/enum_clike.rs @@ -38,7 +38,7 @@ impl<'tcx> LateLintPass<'tcx> for UnportableVariant { if cx.tcx.data_layout.pointer_size.bits() != 64 { return; } - if let ItemKind::Enum(_, def, _) = &item.kind { + if let ItemKind::Enum(_, _, def) = &item.kind { for var in def.variants { if let Some(anon_const) = &var.disr_expr { let def_id = cx.tcx.hir_body_owner_def_id(anon_const.body); diff --git a/src/tools/clippy/clippy_lints/src/excessive_bools.rs b/src/tools/clippy/clippy_lints/src/excessive_bools.rs index 38d115b878c..686dc5c3c4f 100644 --- a/src/tools/clippy/clippy_lints/src/excessive_bools.rs +++ b/src/tools/clippy/clippy_lints/src/excessive_bools.rs @@ -127,7 +127,7 @@ fn check_fn_decl(cx: &LateContext<'_>, decl: &FnDecl<'_>, sp: Span, max: u64) { impl<'tcx> LateLintPass<'tcx> for ExcessiveBools { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'tcx>) { - if let ItemKind::Struct(_, variant_data, _) = &item.kind + if let ItemKind::Struct(_, _, variant_data) = &item.kind && variant_data.fields().len() as u64 > self.max_struct_bools && has_n_bools( variant_data.fields().iter().map(|field| field.ty), diff --git a/src/tools/clippy/clippy_lints/src/exhaustive_items.rs b/src/tools/clippy/clippy_lints/src/exhaustive_items.rs index 5a74e97c97c..1fb0e4d24d0 100644 --- a/src/tools/clippy/clippy_lints/src/exhaustive_items.rs +++ b/src/tools/clippy/clippy_lints/src/exhaustive_items.rs @@ -76,7 +76,7 @@ impl LateLintPass<'_> for ExhaustiveItems { "exported enums should not be exhaustive", [].as_slice(), ), - ItemKind::Struct(_, v, ..) => ( + ItemKind::Struct(_, _, v) => ( EXHAUSTIVE_STRUCTS, "exported structs should not be exhaustive", v.fields(), diff --git a/src/tools/clippy/clippy_lints/src/functions/result.rs b/src/tools/clippy/clippy_lints/src/functions/result.rs index 00ce4cfcc52..bb98ae82611 100644 --- a/src/tools/clippy/clippy_lints/src/functions/result.rs +++ b/src/tools/clippy/clippy_lints/src/functions/result.rs @@ -103,7 +103,7 @@ fn check_result_large_err<'tcx>(cx: &LateContext<'tcx>, err_ty: Ty<'tcx>, hir_ty .did() .as_local() && let hir::Node::Item(item) = cx.tcx.hir_node_by_def_id(local_def_id) - && let hir::ItemKind::Enum(_, ref def, _) = item.kind + && let hir::ItemKind::Enum(_, _, ref def) = item.kind { let variants_size = AdtVariantInfo::new(cx, *adt, subst); if let Some((first_variant, variants)) = variants_size.split_first() diff --git a/src/tools/clippy/clippy_lints/src/item_name_repetitions.rs b/src/tools/clippy/clippy_lints/src/item_name_repetitions.rs index 3d4dcd02070..9c91cf68085 100644 --- a/src/tools/clippy/clippy_lints/src/item_name_repetitions.rs +++ b/src/tools/clippy/clippy_lints/src/item_name_repetitions.rs @@ -535,10 +535,10 @@ impl LateLintPass<'_> for ItemNameRepetitions { if span_is_local(item.span) { match item.kind { - ItemKind::Enum(_, def, _) => { + ItemKind::Enum(_, _, def) => { self.check_variants(cx, item, &def); }, - ItemKind::Struct(_, VariantData::Struct { fields, .. }, _) => { + ItemKind::Struct(_, _, VariantData::Struct { fields, .. }) => { self.check_fields(cx, item, fields); }, _ => (), diff --git a/src/tools/clippy/clippy_lints/src/large_const_arrays.rs b/src/tools/clippy/clippy_lints/src/large_const_arrays.rs index 394005e9912..cee8ca1261e 100644 --- a/src/tools/clippy/clippy_lints/src/large_const_arrays.rs +++ b/src/tools/clippy/clippy_lints/src/large_const_arrays.rs @@ -48,7 +48,7 @@ impl_lint_pass!(LargeConstArrays => [LARGE_CONST_ARRAYS]); impl<'tcx> LateLintPass<'tcx> for LargeConstArrays { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) { - if let ItemKind::Const(ident, _, generics, _) = &item.kind + if let ItemKind::Const(ident, generics, _, _) = &item.kind // Since static items may not have generics, skip generic const items. // FIXME(generic_const_items): I don't think checking `generics.hwcp` suffices as it // doesn't account for empty where-clauses that only consist of keyword `where` IINM. diff --git a/src/tools/clippy/clippy_lints/src/large_enum_variant.rs b/src/tools/clippy/clippy_lints/src/large_enum_variant.rs index d08efa0ec9c..e85d779b488 100644 --- a/src/tools/clippy/clippy_lints/src/large_enum_variant.rs +++ b/src/tools/clippy/clippy_lints/src/large_enum_variant.rs @@ -73,7 +73,7 @@ impl_lint_pass!(LargeEnumVariant => [LARGE_ENUM_VARIANT]); impl<'tcx> LateLintPass<'tcx> for LargeEnumVariant { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &Item<'tcx>) { - if let ItemKind::Enum(ident, ref def, _) = item.kind + if let ItemKind::Enum(ident, _, ref def) = item.kind && let ty = cx.tcx.type_of(item.owner_id).instantiate_identity() && let ty::Adt(adt, subst) = ty.kind() && adt.variants().len() > 1 diff --git a/src/tools/clippy/clippy_lints/src/manual_non_exhaustive.rs b/src/tools/clippy/clippy_lints/src/manual_non_exhaustive.rs index 067b92cd46e..3562b1ff5cc 100644 --- a/src/tools/clippy/clippy_lints/src/manual_non_exhaustive.rs +++ b/src/tools/clippy/clippy_lints/src/manual_non_exhaustive.rs @@ -87,7 +87,7 @@ impl<'tcx> LateLintPass<'tcx> for ManualNonExhaustive { } match item.kind { - ItemKind::Enum(_, def, _) if def.variants.len() > 1 => { + ItemKind::Enum(_, _, def) if def.variants.len() > 1 => { let iter = def.variants.iter().filter_map(|v| { (matches!(v.data, VariantData::Unit(_, _)) && is_doc_hidden(cx.tcx.hir_attrs(v.hir_id))) .then_some((v.def_id, v.span)) @@ -98,7 +98,7 @@ impl<'tcx> LateLintPass<'tcx> for ManualNonExhaustive { self.potential_enums.push((item.owner_id.def_id, id, item.span, span)); } }, - ItemKind::Struct(_, variant_data, _) => { + ItemKind::Struct(_, _, variant_data) => { let fields = variant_data.fields(); let private_fields = fields .iter() diff --git a/src/tools/clippy/clippy_lints/src/missing_fields_in_debug.rs b/src/tools/clippy/clippy_lints/src/missing_fields_in_debug.rs index be7dd74fd62..d4d33029dbd 100644 --- a/src/tools/clippy/clippy_lints/src/missing_fields_in_debug.rs +++ b/src/tools/clippy/clippy_lints/src/missing_fields_in_debug.rs @@ -225,7 +225,7 @@ impl<'tcx> LateLintPass<'tcx> for MissingFieldsInDebug { && let typeck_results = cx.tcx.typeck_body(*body_id) && should_lint(cx, typeck_results, block) // we intentionally only lint structs, see lint description - && let ItemKind::Struct(_, data, _) = &self_item.kind + && let ItemKind::Struct(_, _, data) = &self_item.kind { check_struct(cx, typeck_results, block, self_ty, item, data); } diff --git a/src/tools/clippy/clippy_lints/src/non_std_lazy_statics.rs b/src/tools/clippy/clippy_lints/src/non_std_lazy_statics.rs index f66b9519317..abee3c44c5a 100644 --- a/src/tools/clippy/clippy_lints/src/non_std_lazy_statics.rs +++ b/src/tools/clippy/clippy_lints/src/non_std_lazy_statics.rs @@ -187,7 +187,7 @@ struct LazyInfo { impl LazyInfo { fn from_item(cx: &LateContext<'_>, item: &Item<'_>) -> Option<Self> { // Check if item is a `once_cell:sync::Lazy` static. - if let ItemKind::Static(_, ty, _, body_id) = item.kind + if let ItemKind::Static(_, _, ty, body_id) = item.kind && let Some(path_def_id) = path_def_id(cx, ty) && let hir::TyKind::Path(hir::QPath::Resolved(_, path)) = ty.kind && paths::ONCE_CELL_SYNC_LAZY.matches(cx, path_def_id) diff --git a/src/tools/clippy/clippy_lints/src/pub_underscore_fields.rs b/src/tools/clippy/clippy_lints/src/pub_underscore_fields.rs index e4a9bf7a848..66c59cb70d3 100644 --- a/src/tools/clippy/clippy_lints/src/pub_underscore_fields.rs +++ b/src/tools/clippy/clippy_lints/src/pub_underscore_fields.rs @@ -58,7 +58,7 @@ impl PubUnderscoreFields { impl<'tcx> LateLintPass<'tcx> for PubUnderscoreFields { fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) { // This lint only pertains to structs. - let ItemKind::Struct(_, variant_data, _) = &item.kind else { + let ItemKind::Struct(_, _, variant_data) = &item.kind else { return; }; diff --git a/src/tools/clippy/clippy_lints/src/trailing_empty_array.rs b/src/tools/clippy/clippy_lints/src/trailing_empty_array.rs index 20bf3a0bff1..75a82770af0 100644 --- a/src/tools/clippy/clippy_lints/src/trailing_empty_array.rs +++ b/src/tools/clippy/clippy_lints/src/trailing_empty_array.rs @@ -57,7 +57,7 @@ impl<'tcx> LateLintPass<'tcx> for TrailingEmptyArray { } fn is_struct_with_trailing_zero_sized_array<'tcx>(cx: &LateContext<'tcx>, item: &Item<'tcx>) -> bool { - if let ItemKind::Struct(_, data, _) = &item.kind + if let ItemKind::Struct(_, _, data) = &item.kind && let Some(last_field) = data.fields().last() && let field_ty = cx.tcx.normalize_erasing_regions( cx.typing_env(), diff --git a/src/tools/clippy/clippy_lints/src/types/mod.rs b/src/tools/clippy/clippy_lints/src/types/mod.rs index c1c7cc51656..515be5adeed 100644 --- a/src/tools/clippy/clippy_lints/src/types/mod.rs +++ b/src/tools/clippy/clippy_lints/src/types/mod.rs @@ -447,7 +447,7 @@ impl<'tcx> LateLintPass<'tcx> for Types { let is_exported = cx.effective_visibilities.is_exported(item.owner_id.def_id); match item.kind { - ItemKind::Static(_, ty, _, _) | ItemKind::Const(_, ty, _, _) => self.check_ty( + ItemKind::Static(_, _, ty, _) | ItemKind::Const(_, _, ty, _) => self.check_ty( cx, ty, CheckTyContext { diff --git a/src/tools/clippy/clippy_lints/src/upper_case_acronyms.rs b/src/tools/clippy/clippy_lints/src/upper_case_acronyms.rs index 8922478e718..02281b9e922 100644 --- a/src/tools/clippy/clippy_lints/src/upper_case_acronyms.rs +++ b/src/tools/clippy/clippy_lints/src/upper_case_acronyms.rs @@ -134,7 +134,7 @@ impl LateLintPass<'_> for UpperCaseAcronyms { ItemKind::TyAlias(ident, ..) | ItemKind::Struct(ident, ..) | ItemKind::Trait(_, _, ident, ..) => { check_ident(cx, &ident, it.hir_id(), self.upper_case_acronyms_aggressive); }, - ItemKind::Enum(ident, ref enumdef, _) => { + ItemKind::Enum(ident, _, ref enumdef) => { check_ident(cx, &ident, it.hir_id(), self.upper_case_acronyms_aggressive); // check enum variants separately because again we only want to lint on private enums and // the fn check_variant does not know about the vis of the enum of its variants diff --git a/src/tools/clippy/clippy_utils/src/check_proc_macro.rs b/src/tools/clippy/clippy_utils/src/check_proc_macro.rs index 004c840c331..407e92d88fb 100644 --- a/src/tools/clippy/clippy_utils/src/check_proc_macro.rs +++ b/src/tools/clippy/clippy_utils/src/check_proc_macro.rs @@ -249,7 +249,7 @@ fn item_search_pat(item: &Item<'_>) -> (Pat, Pat) { ItemKind::ForeignMod { .. } => (Pat::Str("extern"), Pat::Str("}")), ItemKind::TyAlias(..) => (Pat::Str("type"), Pat::Str(";")), ItemKind::Enum(..) => (Pat::Str("enum"), Pat::Str("}")), - ItemKind::Struct(_, VariantData::Struct { .. }, _) => (Pat::Str("struct"), Pat::Str("}")), + ItemKind::Struct(_, _, VariantData::Struct { .. }) => (Pat::Str("struct"), Pat::Str("}")), ItemKind::Struct(..) => (Pat::Str("struct"), Pat::Str(";")), ItemKind::Union(..) => (Pat::Str("union"), Pat::Str("}")), ItemKind::Trait(_, Safety::Unsafe, ..) diff --git a/src/tools/clippy/clippy_utils/src/lib.rs b/src/tools/clippy/clippy_utils/src/lib.rs index 8716ee48c88..2020f3d6b5b 100644 --- a/src/tools/clippy/clippy_utils/src/lib.rs +++ b/src/tools/clippy/clippy_utils/src/lib.rs @@ -2362,7 +2362,7 @@ fn with_test_item_names(tcx: TyCtxt<'_>, module: LocalModDefId, f: impl FnOnce(& for id in tcx.hir_module_free_items(module) { if matches!(tcx.def_kind(id.owner_id), DefKind::Const) && let item = tcx.hir_item(id) - && let ItemKind::Const(ident, ty, _generics, _body) = item.kind + && let ItemKind::Const(ident, _generics, ty, _body) = item.kind && let TyKind::Path(QPath::Resolved(_, path)) = ty.kind // We could also check for the type name `test::TestDescAndFn` && let Res::Def(DefKind::Struct, _) = path.res diff --git a/src/tools/clippy/tests/ui/map_flatten.rs b/src/tools/clippy/tests/ui/map_flatten.rs index d7e9c9d9900..0970da8039a 100644 --- a/src/tools/clippy/tests/ui/map_flatten.rs +++ b/src/tools/clippy/tests/ui/map_flatten.rs @@ -1,5 +1,5 @@ #![warn(clippy::map_flatten)] -#![feature(result_flattening)] + //@no-rustfix // issue #8506, multi-line #[rustfmt::skip] diff --git a/src/tools/clippy/tests/ui/map_flatten_fixable.fixed b/src/tools/clippy/tests/ui/map_flatten_fixable.fixed index f8379ed23c5..6d8a27d3018 100644 --- a/src/tools/clippy/tests/ui/map_flatten_fixable.fixed +++ b/src/tools/clippy/tests/ui/map_flatten_fixable.fixed @@ -1,4 +1,3 @@ -#![feature(result_flattening)] #![allow( clippy::let_underscore_untyped, clippy::missing_docs_in_private_items, diff --git a/src/tools/clippy/tests/ui/map_flatten_fixable.rs b/src/tools/clippy/tests/ui/map_flatten_fixable.rs index 040a9ca85f6..845e3a79ae2 100644 --- a/src/tools/clippy/tests/ui/map_flatten_fixable.rs +++ b/src/tools/clippy/tests/ui/map_flatten_fixable.rs @@ -1,4 +1,3 @@ -#![feature(result_flattening)] #![allow( clippy::let_underscore_untyped, clippy::missing_docs_in_private_items, diff --git a/src/tools/clippy/tests/ui/map_flatten_fixable.stderr b/src/tools/clippy/tests/ui/map_flatten_fixable.stderr index fe68eb7e4ab..05d4d9a6ad8 100644 --- a/src/tools/clippy/tests/ui/map_flatten_fixable.stderr +++ b/src/tools/clippy/tests/ui/map_flatten_fixable.stderr @@ -1,5 +1,5 @@ error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:17:47 + --> tests/ui/map_flatten_fixable.rs:16:47 | LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().collect(); | ^^^^^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `filter_map` and remove the `.flatten()`: `filter_map(option_id)` @@ -8,43 +8,43 @@ LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id).flatten().coll = help: to override `-D warnings` add `#[allow(clippy::map_flatten)]` error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:19:47 + --> tests/ui/map_flatten_fixable.rs:18:47 | LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_ref).flatten().collect(); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `filter_map` and remove the `.flatten()`: `filter_map(option_id_ref)` error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:21:47 + --> tests/ui/map_flatten_fixable.rs:20:47 | LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(option_id_closure).flatten().collect(); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `filter_map` and remove the `.flatten()`: `filter_map(option_id_closure)` error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:23:47 + --> tests/ui/map_flatten_fixable.rs:22:47 | LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| x.checked_add(1)).flatten().collect(); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `filter_map` and remove the `.flatten()`: `filter_map(|x| x.checked_add(1))` error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:27:47 + --> tests/ui/map_flatten_fixable.rs:26:47 | LL | let _: Vec<_> = vec![5_i8; 6].into_iter().map(|x| 0..x).flatten().collect(); | ^^^^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `flat_map` and remove the `.flatten()`: `flat_map(|x| 0..x)` error: called `map(..).flatten()` on `Option` - --> tests/ui/map_flatten_fixable.rs:31:40 + --> tests/ui/map_flatten_fixable.rs:30:40 | LL | let _: Option<_> = (Some(Some(1))).map(|x| x).flatten(); | ^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `and_then` and remove the `.flatten()`: `and_then(|x| x)` error: called `map(..).flatten()` on `Result` - --> tests/ui/map_flatten_fixable.rs:35:42 + --> tests/ui/map_flatten_fixable.rs:34:42 | LL | let _: Result<_, &str> = (Ok(Ok(1))).map(|x| x).flatten(); | ^^^^^^^^^^^^^^^^^^^^ help: try replacing `map` with `and_then` and remove the `.flatten()`: `and_then(|x| x)` error: called `map(..).flatten()` on `Iterator` - --> tests/ui/map_flatten_fixable.rs:45:10 + --> tests/ui/map_flatten_fixable.rs:44:10 | LL | .map(|n| match n { | __________^ @@ -74,7 +74,7 @@ LL ~ }); | error: called `map(..).flatten()` on `Option` - --> tests/ui/map_flatten_fixable.rs:66:10 + --> tests/ui/map_flatten_fixable.rs:65:10 | LL | .map(|_| { | __________^ diff --git a/src/tools/generate-copyright/src/cargo_metadata.rs b/src/tools/generate-copyright/src/cargo_metadata.rs index b717bd53eb1..3fae26bda47 100644 --- a/src/tools/generate-copyright/src/cargo_metadata.rs +++ b/src/tools/generate-copyright/src/cargo_metadata.rs @@ -46,11 +46,12 @@ pub struct PackageMetadata { /// covered it already. pub fn get_metadata_and_notices( cargo: &Path, + cargo_home_path: &Path, vendor_path: &Path, root_path: &Path, manifest_paths: &[PathBuf], ) -> Result<BTreeMap<Package, PackageMetadata>, Error> { - let mut output = get_metadata(cargo, root_path, manifest_paths)?; + let mut output = get_metadata(cargo, cargo_home_path, root_path, manifest_paths)?; // Now for each dependency we found, go and grab any important looking files for (package, metadata) in output.iter_mut() { @@ -66,6 +67,7 @@ pub fn get_metadata_and_notices( /// assume `reuse` has covered it already. pub fn get_metadata( cargo: &Path, + cargo_home_path: &Path, root_path: &Path, manifest_paths: &[PathBuf], ) -> Result<BTreeMap<Package, PackageMetadata>, Error> { @@ -81,8 +83,11 @@ pub fn get_metadata( .manifest_path(manifest_path) .exec()?; for package in metadata.packages { - let manifest_path = package.manifest_path.as_path(); - if manifest_path.starts_with(root_path) { + let package_manifest_path = package.manifest_path.as_path(); + + if package_manifest_path.starts_with(root_path) + && !package_manifest_path.starts_with(cargo_home_path) + { // it's an in-tree dependency and reuse covers it continue; } diff --git a/src/tools/generate-copyright/src/main.rs b/src/tools/generate-copyright/src/main.rs index d6ed7261b7c..5497db1f5f3 100644 --- a/src/tools/generate-copyright/src/main.rs +++ b/src/tools/generate-copyright/src/main.rs @@ -15,6 +15,7 @@ mod cargo_metadata; /// /// Run `x.py run generate-copyright` fn main() -> Result<(), Error> { + let cargo_home = env_path("CARGO_HOME")?; let dest_file = env_path("DEST")?; let libstd_dest_file = env_path("DEST_LIBSTD")?; let src_dir = env_path("SRC_DIR")?; @@ -39,11 +40,17 @@ fn main() -> Result<(), Error> { .collect::<Vec<_>>(); // Scan Cargo dependencies - let mut collected_cargo_metadata = - cargo_metadata::get_metadata_and_notices(&cargo, &vendor_dir, &src_dir, &cargo_manifests)?; + let mut collected_cargo_metadata = cargo_metadata::get_metadata_and_notices( + &cargo, + &cargo_home, + &vendor_dir, + &src_dir, + &cargo_manifests, + )?; let library_collected_cargo_metadata = cargo_metadata::get_metadata_and_notices( &cargo, + &cargo_home, &vendor_dir, &src_dir, &library_manifests, diff --git a/src/tools/miri/cargo-miri/src/phases.rs b/src/tools/miri/cargo-miri/src/phases.rs index 4857f62cd3a..a5e019a8ea9 100644 --- a/src/tools/miri/cargo-miri/src/phases.rs +++ b/src/tools/miri/cargo-miri/src/phases.rs @@ -90,7 +90,7 @@ pub fn phase_cargo_miri(mut args: impl Iterator<Item = String>) { "`cargo miri` supports the following subcommands: `run`, `test`, `nextest`, `clean`, and `setup`." ), }; - let verbose = num_arg_flag("-v"); + let verbose = num_arg_flag("-v") + num_arg_flag("--verbose"); let quiet = has_arg_flag("-q") || has_arg_flag("--quiet"); // Determine the involved architectures. diff --git a/src/tools/miri/ci/ci.sh b/src/tools/miri/ci/ci.sh index 9ae15739dcb..8941af681a4 100755 --- a/src/tools/miri/ci/ci.sh +++ b/src/tools/miri/ci/ci.sh @@ -142,12 +142,12 @@ case $HOST_TARGET in # Host GC_STRESS=1 MIR_OPT=1 MANY_SEEDS=64 TEST_BENCH=1 CARGO_MIRI_ENV=1 run_tests # Extra tier 1 - # With reduced many-seed count to avoid spending too much time on that. - # (All OSes and ABIs are run with 64 seeds at least once though via the macOS runner.) - MANY_SEEDS=16 TEST_TARGET=i686-unknown-linux-gnu run_tests - MANY_SEEDS=16 TEST_TARGET=aarch64-unknown-linux-gnu run_tests - MANY_SEEDS=16 TEST_TARGET=x86_64-apple-darwin run_tests - MANY_SEEDS=16 TEST_TARGET=x86_64-pc-windows-gnu run_tests + MANY_SEEDS=64 TEST_TARGET=i686-unknown-linux-gnu run_tests + MANY_SEEDS=64 TEST_TARGET=aarch64-unknown-linux-gnu run_tests + MANY_SEEDS=64 TEST_TARGET=x86_64-apple-darwin run_tests + MANY_SEEDS=64 TEST_TARGET=x86_64-pc-windows-gnu run_tests + # Extra tier 1 candidate + MANY_SEEDS=64 TEST_TARGET=aarch64-pc-windows-msvc run_tests ;; aarch64-apple-darwin) # Host @@ -156,7 +156,8 @@ case $HOST_TARGET in MANY_SEEDS=64 TEST_TARGET=i686-pc-windows-gnu run_tests MANY_SEEDS=64 TEST_TARGET=x86_64-pc-windows-msvc CARGO_MIRI_ENV=1 run_tests # Extra tier 2 - MANY_SEEDS=16 TEST_TARGET=arm-unknown-linux-gnueabi run_tests + MANY_SEEDS=16 TEST_TARGET=arm-unknown-linux-gnueabi run_tests # 32bit ARM + MANY_SEEDS=16 TEST_TARGET=aarch64-pc-windows-gnullvm run_tests # gnullvm ABI MANY_SEEDS=16 TEST_TARGET=s390x-unknown-linux-gnu run_tests # big-endian architecture of choice # Not officially supported tier 2 MANY_SEEDS=16 TEST_TARGET=mips-unknown-linux-gnu run_tests # a 32bit big-endian target, and also a target without 64bit atomics @@ -178,7 +179,7 @@ case $HOST_TARGET in # Host # Without GC_STRESS and with reduced many-seeds count as this is the slowest runner. # (The macOS runner checks windows-msvc with full many-seeds count.) - MIR_OPT=1 MANY_SEEDS=16 TEST_BENCH=1 run_tests + MIR_OPT=1 MANY_SEEDS=64 TEST_BENCH=1 run_tests # Extra tier 1 # We really want to ensure a Linux target works on a Windows host, # and a 64bit target works on a 32bit host. diff --git a/src/tools/miri/miri-script/src/commands.rs b/src/tools/miri/miri-script/src/commands.rs index 3b7b159aeab..86362145d47 100644 --- a/src/tools/miri/miri-script/src/commands.rs +++ b/src/tools/miri/miri-script/src/commands.rs @@ -1,4 +1,4 @@ -use std::collections::HashMap; +use std::collections::BTreeMap; use std::ffi::{OsStr, OsString}; use std::fmt::Write as _; use std::fs::{self, File}; @@ -404,7 +404,28 @@ impl Command { // We want to forward the host stdin so apparently we cannot use `cmd!`. let mut cmd = process::Command::new("git"); cmd.arg("rebase").arg(&base).arg("--interactive"); - cmd.env("GIT_SEQUENCE_EDITOR", env::current_exe()?); + let current_exe = { + if cfg!(windows) { + // Apparently git-for-Windows gets confused by backslashes if we just use + // `current_exe()` here. So replace them by forward slashes if this is not a "magic" + // path starting with "\\". This is clearly a git bug but we work around it here. + // Also see <https://github.com/rust-lang/miri/issues/4340>. + let bin = env::current_exe()?; + match bin.into_os_string().into_string() { + Err(not_utf8) => not_utf8.into(), // :shrug: + Ok(str) => { + if str.starts_with(r"\\") { + str.into() // don't touch these magic paths, they must use backslashes + } else { + str.replace('\\', "/").into() + } + } + } + } else { + env::current_exe()? + } + }; + cmd.env("GIT_SEQUENCE_EDITOR", current_exe); cmd.env("MIRI_SCRIPT_IS_GIT_SEQUENCE_EDITOR", "1"); cmd.current_dir(sh.current_dir()); let result = cmd.status()?; @@ -489,7 +510,9 @@ impl Command { sh.read_dir(benches_dir)? .into_iter() .filter(|path| path.is_dir()) - .map(|path| path.into_os_string().into_string().unwrap()) + // Only keep the basename: that matches the usage with a manual bench list, + // and it ensure the path concatenations below work as intended. + .map(|path| path.file_name().unwrap().to_owned().into_string().unwrap()) .collect() } else { benches.into_iter().collect() @@ -530,14 +553,16 @@ impl Command { stddev: f64, } - let gather_results = || -> Result<HashMap<&str, BenchResult>> { + let gather_results = || -> Result<BTreeMap<&str, BenchResult>> { let baseline_temp_dir = results_json_dir.unwrap(); - let mut results = HashMap::new(); + let mut results = BTreeMap::new(); for bench in &benches { - let result = File::open(path!(baseline_temp_dir / format!("{bench}.bench.json")))?; - let mut result: serde_json::Value = - serde_json::from_reader(BufReader::new(result))?; - let result: BenchResult = serde_json::from_value(result["results"][0].take())?; + let result = File::open(path!(baseline_temp_dir / format!("{bench}.bench.json"))) + .context("failed to read hyperfine JSON")?; + let mut result: serde_json::Value = serde_json::from_reader(BufReader::new(result)) + .context("failed to parse hyperfine JSON")?; + let result: BenchResult = serde_json::from_value(result["results"][0].take()) + .context("failed to interpret hyperfine JSON")?; results.insert(bench as &str, result); } Ok(results) @@ -549,15 +574,15 @@ impl Command { serde_json::to_writer_pretty(BufWriter::new(baseline), &results)?; } else if let Some(baseline_file) = load_baseline { let new_results = gather_results()?; - let baseline_results: HashMap<String, BenchResult> = { + let baseline_results: BTreeMap<String, BenchResult> = { let f = File::open(baseline_file)?; serde_json::from_reader(BufReader::new(f))? }; println!( "Comparison with baseline (relative speed, lower is better for the new results):" ); - for (bench, new_result) in new_results.iter() { - let Some(baseline_result) = baseline_results.get(*bench) else { continue }; + for (bench, new_result) in new_results { + let Some(baseline_result) = baseline_results.get(bench) else { continue }; // Compare results (inspired by hyperfine) let ratio = new_result.mean / baseline_result.mean; diff --git a/src/tools/miri/rust-version b/src/tools/miri/rust-version index 46989695302..553d410b2bc 100644 --- a/src/tools/miri/rust-version +++ b/src/tools/miri/rust-version @@ -1 +1 @@ -2b96ddca1272960623e41829439df8dae82d20af +337c11e5932275e7d450c1f2e26f289f0ddfa717 diff --git a/src/tools/miri/src/alloc_bytes.rs b/src/tools/miri/src/alloc/alloc_bytes.rs index 2bac2659ec0..2a253952b27 100644 --- a/src/tools/miri/src/alloc_bytes.rs +++ b/src/tools/miri/src/alloc/alloc_bytes.rs @@ -1,12 +1,23 @@ use std::alloc::Layout; use std::borrow::Cow; use std::{alloc, slice}; +#[cfg(target_os = "linux")] +use std::{cell::RefCell, rc::Rc}; use rustc_abi::{Align, Size}; use rustc_middle::mir::interpret::AllocBytes; +#[cfg(target_os = "linux")] +use crate::alloc::isolated_alloc::IsolatedAlloc; use crate::helpers::ToU64 as _; +#[derive(Clone, Debug)] +pub enum MiriAllocParams { + Global, + #[cfg(target_os = "linux")] + Isolated(Rc<RefCell<IsolatedAlloc>>), +} + /// Allocation bytes that explicitly handle the layout of the data they're storing. /// This is necessary to interface with native code that accesses the program store in Miri. #[derive(Debug)] @@ -18,13 +29,16 @@ pub struct MiriAllocBytes { /// * If `self.layout.size() == 0`, then `self.ptr` was allocated with the equivalent layout with size 1. /// * Otherwise, `self.ptr` points to memory allocated with `self.layout`. ptr: *mut u8, + /// Whether this instance of `MiriAllocBytes` had its allocation created by calling `alloc::alloc()` + /// (`Global`) or the discrete allocator (`Isolated`) + params: MiriAllocParams, } impl Clone for MiriAllocBytes { fn clone(&self) -> Self { let bytes: Cow<'_, [u8]> = Cow::Borrowed(self); let align = Align::from_bytes(self.layout.align().to_u64()).unwrap(); - MiriAllocBytes::from_bytes(bytes, align, ()) + MiriAllocBytes::from_bytes(bytes, align, self.params.clone()) } } @@ -37,8 +51,16 @@ impl Drop for MiriAllocBytes { } else { self.layout }; + // SAFETY: Invariant, `self.ptr` points to memory allocated with `self.layout`. - unsafe { alloc::dealloc(self.ptr, alloc_layout) } + unsafe { + match self.params.clone() { + MiriAllocParams::Global => alloc::dealloc(self.ptr, alloc_layout), + #[cfg(target_os = "linux")] + MiriAllocParams::Isolated(alloc) => + alloc.borrow_mut().dealloc(self.ptr, alloc_layout), + } + } } } @@ -67,7 +89,8 @@ impl MiriAllocBytes { fn alloc_with( size: u64, align: u64, - alloc_fn: impl FnOnce(Layout) -> *mut u8, + params: MiriAllocParams, + alloc_fn: impl FnOnce(Layout, &MiriAllocParams) -> *mut u8, ) -> Result<MiriAllocBytes, ()> { let size = usize::try_from(size).map_err(|_| ())?; let align = usize::try_from(align).map_err(|_| ())?; @@ -75,27 +98,36 @@ impl MiriAllocBytes { // When size is 0 we allocate 1 byte anyway, to ensure each allocation has a unique address. let alloc_layout = if size == 0 { Layout::from_size_align(1, align).unwrap() } else { layout }; - let ptr = alloc_fn(alloc_layout); + let ptr = alloc_fn(alloc_layout, ¶ms); if ptr.is_null() { Err(()) } else { // SAFETY: All `MiriAllocBytes` invariants are fulfilled. - Ok(Self { ptr, layout }) + Ok(Self { ptr, layout, params }) } } } impl AllocBytes for MiriAllocBytes { - /// Placeholder! - type AllocParams = (); + type AllocParams = MiriAllocParams; - fn from_bytes<'a>(slice: impl Into<Cow<'a, [u8]>>, align: Align, _params: ()) -> Self { + fn from_bytes<'a>( + slice: impl Into<Cow<'a, [u8]>>, + align: Align, + params: MiriAllocParams, + ) -> Self { let slice = slice.into(); let size = slice.len(); let align = align.bytes(); // SAFETY: `alloc_fn` will only be used with `size != 0`. - let alloc_fn = |layout| unsafe { alloc::alloc(layout) }; - let alloc_bytes = MiriAllocBytes::alloc_with(size.to_u64(), align, alloc_fn) + let alloc_fn = |layout, params: &MiriAllocParams| unsafe { + match params { + MiriAllocParams::Global => alloc::alloc(layout), + #[cfg(target_os = "linux")] + MiriAllocParams::Isolated(alloc) => alloc.borrow_mut().alloc(layout), + } + }; + let alloc_bytes = MiriAllocBytes::alloc_with(size.to_u64(), align, params, alloc_fn) .unwrap_or_else(|()| { panic!("Miri ran out of memory: cannot create allocation of {size} bytes") }); @@ -105,12 +137,18 @@ impl AllocBytes for MiriAllocBytes { alloc_bytes } - fn zeroed(size: Size, align: Align, _params: ()) -> Option<Self> { + fn zeroed(size: Size, align: Align, params: MiriAllocParams) -> Option<Self> { let size = size.bytes(); let align = align.bytes(); // SAFETY: `alloc_fn` will only be used with `size != 0`. - let alloc_fn = |layout| unsafe { alloc::alloc_zeroed(layout) }; - MiriAllocBytes::alloc_with(size, align, alloc_fn).ok() + let alloc_fn = |layout, params: &MiriAllocParams| unsafe { + match params { + MiriAllocParams::Global => alloc::alloc_zeroed(layout), + #[cfg(target_os = "linux")] + MiriAllocParams::Isolated(alloc) => alloc.borrow_mut().alloc_zeroed(layout), + } + }; + MiriAllocBytes::alloc_with(size, align, params, alloc_fn).ok() } fn as_mut_ptr(&mut self) -> *mut u8 { diff --git a/src/tools/miri/src/alloc/isolated_alloc.rs b/src/tools/miri/src/alloc/isolated_alloc.rs new file mode 100644 index 00000000000..7b74d171373 --- /dev/null +++ b/src/tools/miri/src/alloc/isolated_alloc.rs @@ -0,0 +1,389 @@ +use std::alloc::{self, Layout}; + +use rustc_index::bit_set::DenseBitSet; + +/// How many bytes of memory each bit in the bitset represents. +const COMPRESSION_FACTOR: usize = 4; + +/// A dedicated allocator for interpreter memory contents, ensuring they are stored on dedicated +/// pages (not mixed with Miri's own memory). This is used in native-lib mode. +#[derive(Debug)] +pub struct IsolatedAlloc { + /// Pointers to page-aligned memory that has been claimed by the allocator. + /// Every pointer here must point to a page-sized allocation claimed via + /// the global allocator. These pointers are used for "small" allocations. + page_ptrs: Vec<*mut u8>, + /// Metadata about which bytes have been allocated on each page. The length + /// of this vector must be the same as that of `page_ptrs`, and the domain + /// size of the bitset must be exactly `page_size / COMPRESSION_FACTOR`. + /// + /// Conceptually, each bit of the bitset represents the allocation status of + /// one n-byte chunk on the corresponding element of `page_ptrs`. Thus, + /// indexing into it should be done with a value one-nth of the corresponding + /// offset on the matching `page_ptrs` element (n = `COMPRESSION_FACTOR`). + page_infos: Vec<DenseBitSet<usize>>, + /// Pointers to multiple-page-sized allocations. These must also be page-aligned, + /// with their size stored as the second element of the vector. + huge_ptrs: Vec<(*mut u8, usize)>, + /// The host (not emulated) page size. + page_size: usize, +} + +impl IsolatedAlloc { + /// Creates an empty allocator. + pub fn new() -> Self { + Self { + page_ptrs: Vec::new(), + huge_ptrs: Vec::new(), + page_infos: Vec::new(), + // SAFETY: `sysconf(_SC_PAGESIZE)` is always safe to call at runtime + // See https://www.man7.org/linux/man-pages/man3/sysconf.3.html + page_size: unsafe { libc::sysconf(libc::_SC_PAGESIZE).try_into().unwrap() }, + } + } + + /// For simplicity, we serve small allocations in multiples of COMPRESSION_FACTOR + /// bytes with at least that alignment. + #[inline] + fn normalized_layout(layout: Layout) -> Layout { + let align = + if layout.align() < COMPRESSION_FACTOR { COMPRESSION_FACTOR } else { layout.align() }; + let size = layout.size().next_multiple_of(COMPRESSION_FACTOR); + Layout::from_size_align(size, align).unwrap() + } + + /// Returns the layout used to allocate the pages that hold small allocations. + #[inline] + fn page_layout(&self) -> Layout { + Layout::from_size_align(self.page_size, self.page_size).unwrap() + } + + /// If the allocation is greater than a page, then round to the nearest page #. + #[inline] + fn huge_normalized_layout(layout: Layout, page_size: usize) -> Layout { + // Allocate in page-sized chunks + let size = layout.size().next_multiple_of(page_size); + // And make sure the align is at least one page + let align = std::cmp::max(layout.align(), page_size); + Layout::from_size_align(size, align).unwrap() + } + + /// Determined whether a given normalized (size, align) should be sent to + /// `alloc_huge` / `dealloc_huge`. + #[inline] + fn is_huge_alloc(&self, layout: &Layout) -> bool { + layout.align() > self.page_size / 2 || layout.size() >= self.page_size / 2 + } + + /// Allocates memory as described in `Layout`. This memory should be deallocated + /// by calling `dealloc` on this same allocator. + /// + /// SAFETY: See `alloc::alloc()` + pub unsafe fn alloc(&mut self, layout: Layout) -> *mut u8 { + // SAFETY: Upheld by caller + unsafe { self.allocate(layout, false) } + } + + /// Same as `alloc`, but zeroes out the memory. + /// + /// SAFETY: See `alloc::alloc_zeroed()` + pub unsafe fn alloc_zeroed(&mut self, layout: Layout) -> *mut u8 { + // SAFETY: Upheld by caller + unsafe { self.allocate(layout, true) } + } + + /// Abstracts over the logic of `alloc_zeroed` vs `alloc`, as determined by + /// the `zeroed` argument. + /// + /// SAFETY: See `alloc::alloc()`, with the added restriction that `page_size` + /// corresponds to the host pagesize. + unsafe fn allocate(&mut self, layout: Layout, zeroed: bool) -> *mut u8 { + let layout = IsolatedAlloc::normalized_layout(layout); + if self.is_huge_alloc(&layout) { + // SAFETY: Validity of `layout` upheld by caller; we checked that + // the size and alignment are appropriate for being a huge alloc + unsafe { self.alloc_huge(layout, zeroed) } + } else { + for (&mut page, pinfo) in std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) { + // SAFETY: The value in `self.page_size` is used to allocate + // `page`, with page alignment + if let Some(ptr) = + unsafe { Self::alloc_small(self.page_size, layout, page, pinfo, zeroed) } + { + return ptr; + } + } + + // We get here only if there's no space in our existing pages + let page_size = self.page_size; + // Add another page and allocate from it; this cannot fail since the + // new page is empty and we already asserted it fits into a page + let (page, pinfo) = self.add_page(); + + // SAFETY: See comment on `alloc_from_page` above + unsafe { Self::alloc_small(page_size, layout, page, pinfo, zeroed).unwrap() } + } + } + + /// Used internally by `allocate` to abstract over some logic. + /// + /// SAFETY: `page` must be a page-aligned pointer to an allocated page, + /// where the allocation is (at least) `page_size` bytes. + unsafe fn alloc_small( + page_size: usize, + layout: Layout, + page: *mut u8, + pinfo: &mut DenseBitSet<usize>, + zeroed: bool, + ) -> Option<*mut u8> { + // Check every alignment-sized block and see if there exists a `size` + // chunk of empty space i.e. forall idx . !pinfo.contains(idx / n) + for offset in (0..page_size).step_by(layout.align()) { + let offset_pinfo = offset / COMPRESSION_FACTOR; + let size_pinfo = layout.size() / COMPRESSION_FACTOR; + // DenseBitSet::contains() panics if the index is out of bounds + if pinfo.domain_size() < offset_pinfo + size_pinfo { + break; + } + // FIXME: is there a more efficient way to check whether the entire range is unset + // in the bitset? + let range_avail = !(offset_pinfo..offset_pinfo + size_pinfo).any(|i| pinfo.contains(i)); + if range_avail { + pinfo.insert_range(offset_pinfo..offset_pinfo + size_pinfo); + // SAFETY: We checked the available bytes after `idx` in the call + // to `domain_size` above and asserted there are at least `idx + + // layout.size()` bytes available and unallocated after it. + // `page` must point to the start of the page, so adding `idx` + // is safe per the above. + unsafe { + let ptr = page.add(offset); + if zeroed { + // Only write the bytes we were specifically asked to + // zero out, even if we allocated more + ptr.write_bytes(0, layout.size()); + } + return Some(ptr); + } + } + } + None + } + + /// Expands the available memory pool by adding one page. + fn add_page(&mut self) -> (*mut u8, &mut DenseBitSet<usize>) { + // SAFETY: The system page size, which is the layout size, cannot be 0 + let page_ptr = unsafe { alloc::alloc(self.page_layout()) }; + // `page_infos` has to have one bit for each `COMPRESSION_FACTOR`-sized chunk of bytes in the page. + assert!(self.page_size % COMPRESSION_FACTOR == 0); + self.page_infos.push(DenseBitSet::new_empty(self.page_size / COMPRESSION_FACTOR)); + self.page_ptrs.push(page_ptr); + (page_ptr, self.page_infos.last_mut().unwrap()) + } + + /// Allocates in multiples of one page on the host system. + /// + /// SAFETY: Same as `alloc()`. + unsafe fn alloc_huge(&mut self, layout: Layout, zeroed: bool) -> *mut u8 { + let layout = IsolatedAlloc::huge_normalized_layout(layout, self.page_size); + // SAFETY: Upheld by caller + let ret = + unsafe { if zeroed { alloc::alloc_zeroed(layout) } else { alloc::alloc(layout) } }; + self.huge_ptrs.push((ret, layout.size())); + ret + } + + /// Deallocates a pointer from this allocator. + /// + /// SAFETY: This pointer must have been allocated by calling `alloc()` (or + /// `alloc_zeroed()`) with the same layout as the one passed on this same + /// `IsolatedAlloc`. + pub unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) { + let layout = IsolatedAlloc::normalized_layout(layout); + + if self.is_huge_alloc(&layout) { + // SAFETY: Partly upheld by caller, and we checked that the size + // and align, meaning this must have been allocated via `alloc_huge` + unsafe { + self.dealloc_huge(ptr, layout); + } + } else { + // SAFETY: It's not a huge allocation, therefore it is a small one. + let idx = unsafe { self.dealloc_small(ptr, layout) }; + + // This may have been the last allocation on this page. If so, free the entire page. + // FIXME: this can lead to threshold effects, we should probably add some form + // of hysteresis. + if self.page_infos[idx].is_empty() { + self.page_infos.remove(idx); + let page_ptr = self.page_ptrs.remove(idx); + // SAFETY: We checked that there are no outstanding allocations + // from us pointing to this page, and we know it was allocated + // with this layout + unsafe { + alloc::dealloc(page_ptr, self.page_layout()); + } + } + } + } + + /// Returns the index of the page that this was deallocated from + /// + /// SAFETY: the pointer must have been allocated with `alloc_small`. + unsafe fn dealloc_small(&mut self, ptr: *mut u8, layout: Layout) -> usize { + // Offset of the pointer in the current page + let offset = ptr.addr() % self.page_size; + // And then the page's base address + let page_addr = ptr.addr() - offset; + + // Find the page this allocation belongs to. + // This could be made faster if the list was sorted -- the allocator isn't fully optimized at the moment. + let pinfo = std::iter::zip(&mut self.page_ptrs, &mut self.page_infos) + .enumerate() + .find(|(_, (page, _))| page.addr() == page_addr); + let Some((idx_of_pinfo, (_, pinfo))) = pinfo else { + panic!("Freeing in an unallocated page: {ptr:?}\nHolding pages {:?}", self.page_ptrs) + }; + // Mark this range as available in the page. + let ptr_idx_pinfo = offset / COMPRESSION_FACTOR; + let size_pinfo = layout.size() / COMPRESSION_FACTOR; + for idx in ptr_idx_pinfo..ptr_idx_pinfo + size_pinfo { + pinfo.remove(idx); + } + idx_of_pinfo + } + + /// SAFETY: Same as `dealloc()` with the added requirement that `layout` + /// must ask for a size larger than the host pagesize. + unsafe fn dealloc_huge(&mut self, ptr: *mut u8, layout: Layout) { + let layout = IsolatedAlloc::huge_normalized_layout(layout, self.page_size); + // Find the pointer matching in address with the one we got + let idx = self + .huge_ptrs + .iter() + .position(|pg| ptr.addr() == pg.0.addr()) + .expect("Freeing unallocated pages"); + // And kick it from the list + self.huge_ptrs.remove(idx); + // SAFETY: Caller ensures validity of the layout + unsafe { + alloc::dealloc(ptr, layout); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Helper function to assert that all bytes from `ptr` to `ptr.add(layout.size())` + /// are zeroes. + /// + /// SAFETY: `ptr` must have been allocated with `layout`. + unsafe fn assert_zeroes(ptr: *mut u8, layout: Layout) { + // SAFETY: Caller ensures this is valid + unsafe { + for ofs in 0..layout.size() { + assert_eq!(0, ptr.add(ofs).read()); + } + } + } + + /// Check that small (sub-pagesize) allocations are properly zeroed out. + #[test] + fn small_zeroes() { + let mut alloc = IsolatedAlloc::new(); + // 256 should be less than the pagesize on *any* system + let layout = Layout::from_size_align(256, 32).unwrap(); + // SAFETY: layout size is the constant above, not 0 + let ptr = unsafe { alloc.alloc_zeroed(layout) }; + // SAFETY: `ptr` was just allocated with `layout` + unsafe { + assert_zeroes(ptr, layout); + alloc.dealloc(ptr, layout); + } + } + + /// Check that huge (> 1 page) allocations are properly zeroed out also. + #[test] + fn huge_zeroes() { + let mut alloc = IsolatedAlloc::new(); + // 16k is about as big as pages get e.g. on macos aarch64 + let layout = Layout::from_size_align(16 * 1024, 128).unwrap(); + // SAFETY: layout size is the constant above, not 0 + let ptr = unsafe { alloc.alloc_zeroed(layout) }; + // SAFETY: `ptr` was just allocated with `layout` + unsafe { + assert_zeroes(ptr, layout); + alloc.dealloc(ptr, layout); + } + } + + /// Check that repeatedly reallocating the same memory will still zero out + /// everything properly + #[test] + fn repeated_allocs() { + let mut alloc = IsolatedAlloc::new(); + // Try both sub-pagesize allocs and those larger than / equal to a page + for sz in (1..=(16 * 1024)).step_by(128) { + let layout = Layout::from_size_align(sz, 1).unwrap(); + // SAFETY: all sizes in the range above are nonzero as we start from 1 + let ptr = unsafe { alloc.alloc_zeroed(layout) }; + // SAFETY: `ptr` was just allocated with `layout`, which was used + // to bound the access size + unsafe { + assert_zeroes(ptr, layout); + ptr.write_bytes(255, sz); + alloc.dealloc(ptr, layout); + } + } + } + + /// Checks that allocations of different sizes do not overlap, then for memory + /// leaks that might have occurred. + #[test] + fn check_leaks_and_overlaps() { + let mut alloc = IsolatedAlloc::new(); + + // Some random sizes and aligns + let mut sizes = vec![32; 10]; + sizes.append(&mut vec![15; 4]); + sizes.append(&mut vec![256; 12]); + // Give it some multi-page ones too + sizes.append(&mut vec![32 * 1024; 4]); + + // Matching aligns for the sizes + let mut aligns = vec![16; 12]; + aligns.append(&mut vec![256; 2]); + aligns.append(&mut vec![64; 12]); + aligns.append(&mut vec![4096; 4]); + + // Make sure we didn't mess up in the test itself! + assert_eq!(sizes.len(), aligns.len()); + + // Aggregate the sizes and aligns into a vec of layouts, then allocate them + let layouts: Vec<_> = std::iter::zip(sizes, aligns) + .map(|(sz, al)| Layout::from_size_align(sz, al).unwrap()) + .collect(); + // SAFETY: all sizes specified in `sizes` are nonzero + let ptrs: Vec<_> = + layouts.iter().map(|layout| unsafe { alloc.alloc_zeroed(*layout) }).collect(); + + for (&ptr, &layout) in std::iter::zip(&ptrs, &layouts) { + // We requested zeroed allocations, so check that that's true + // Then write to the end of the current size, so if the allocs + // overlap (or the zeroing is wrong) then `assert_zeroes` will panic. + // Also check that the alignment we asked for was respected + assert_eq!(ptr.addr().strict_rem(layout.align()), 0); + // SAFETY: each `ptr` was allocated with its corresponding `layout`, + // which is used to bound the access size + unsafe { + assert_zeroes(ptr, layout); + ptr.write_bytes(255, layout.size()); + alloc.dealloc(ptr, layout); + } + } + + // And then verify that no memory was leaked after all that + assert!(alloc.page_ptrs.is_empty() && alloc.huge_ptrs.is_empty()); + } +} diff --git a/src/tools/miri/src/alloc/mod.rs b/src/tools/miri/src/alloc/mod.rs new file mode 100644 index 00000000000..3be885920d2 --- /dev/null +++ b/src/tools/miri/src/alloc/mod.rs @@ -0,0 +1,5 @@ +mod alloc_bytes; +#[cfg(target_os = "linux")] +pub mod isolated_alloc; + +pub use self::alloc_bytes::{MiriAllocBytes, MiriAllocParams}; diff --git a/src/tools/miri/src/alloc_addresses/mod.rs b/src/tools/miri/src/alloc_addresses/mod.rs index d2977a55e46..12a320b9676 100644 --- a/src/tools/miri/src/alloc_addresses/mod.rs +++ b/src/tools/miri/src/alloc_addresses/mod.rs @@ -135,11 +135,12 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { if this.machine.native_lib.is_some() { // In native lib mode, we use the "real" address of the bytes for this allocation. // This ensures the interpreted program and native code have the same view of memory. + let params = this.machine.get_default_alloc_params(); let base_ptr = match info.kind { AllocKind::LiveData => { if memory_kind == MiriMemoryKind::Global.into() { // For new global allocations, we always pre-allocate the memory to be able use the machine address directly. - let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align, ()) + let prepared_bytes = MiriAllocBytes::zeroed(info.size, info.align, params) .unwrap_or_else(|| { panic!("Miri ran out of memory: cannot create allocation of {size:?} bytes", size = info.size) }); @@ -158,8 +159,11 @@ trait EvalContextExtPriv<'tcx>: crate::MiriInterpCxExt<'tcx> { } AllocKind::Function | AllocKind::VTable => { // Allocate some dummy memory to get a unique address for this function/vtable. - let alloc_bytes = - MiriAllocBytes::from_bytes(&[0u8; 1], Align::from_bytes(1).unwrap(), ()); + let alloc_bytes = MiriAllocBytes::from_bytes( + &[0u8; 1], + Align::from_bytes(1).unwrap(), + params, + ); let ptr = alloc_bytes.as_ptr(); // Leak the underlying memory to ensure it remains unique. std::mem::forget(alloc_bytes); @@ -429,7 +433,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { prepared_alloc_bytes.copy_from_slice(bytes); interp_ok(prepared_alloc_bytes) } else { - interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align, ())) + let params = this.machine.get_default_alloc_params(); + interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align, params)) } } diff --git a/src/tools/miri/src/bin/miri.rs b/src/tools/miri/src/bin/miri.rs index 7098ef5130d..0121472d330 100644 --- a/src/tools/miri/src/bin/miri.rs +++ b/src/tools/miri/src/bin/miri.rs @@ -281,7 +281,9 @@ impl rustc_driver::Callbacks for MiriBeRustCompilerCalls { } let codegen_fn_attrs = tcx.codegen_fn_attrs(local_def_id); if codegen_fn_attrs.contains_extern_indicator() - || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::USED_COMPILER) + || codegen_fn_attrs + .flags + .contains(CodegenFnAttrFlags::USED_COMPILER) || codegen_fn_attrs.flags.contains(CodegenFnAttrFlags::USED_LINKER) { Some(( diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/diagnostics.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/diagnostics.rs index f5a0013047a..7b4c533cfae 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/diagnostics.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/diagnostics.rs @@ -504,7 +504,7 @@ impl DisplayFmt { if let Some(perm) = perm { format!( "{ac}{st}", - ac = if perm.is_initialized() { self.accessed.yes } else { self.accessed.no }, + ac = if perm.is_accessed() { self.accessed.yes } else { self.accessed.no }, st = perm.permission().short_name(), ) } else { diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs index f3e32e75f2f..411ae89da90 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/mod.rs @@ -3,6 +3,8 @@ use rustc_middle::mir::{Mutability, RetagKind}; use rustc_middle::ty::layout::HasTypingEnv; use rustc_middle::ty::{self, Ty}; +use self::foreign_access_skipping::IdempotentForeignAccess; +use self::tree::LocationState; use crate::borrow_tracker::{GlobalState, GlobalStateInner, ProtectorKind}; use crate::concurrency::data_race::NaReadType; use crate::*; @@ -95,7 +97,7 @@ impl<'tcx> Tree { /// A tag just lost its protector. /// /// This emits a special kind of access that is only applied - /// to initialized locations, as a protection against other + /// to accessed locations, as a protection against other /// tags not having been made aware of the existence of this /// protector. pub fn release_protector( @@ -113,16 +115,19 @@ impl<'tcx> Tree { /// Policy for a new borrow. #[derive(Debug, Clone, Copy)] -struct NewPermission { - /// Which permission should the pointer start with. - initial_state: Permission, +pub struct NewPermission { + /// Permission for the frozen part of the range. + freeze_perm: Permission, + /// Whether a read access should be performed on the frozen part on a retag. + freeze_access: bool, + /// Permission for the non-frozen part of the range. + nonfreeze_perm: Permission, + /// Whether a read access should be performed on the non-frozen + /// part on a retag. + nonfreeze_access: bool, /// Whether this pointer is part of the arguments of a function call. /// `protector` is `Some(_)` for all pointers marked `noalias`. protector: Option<ProtectorKind>, - /// Whether a read should be performed on a retag. This should be `false` - /// for `Cell` because this could cause data races when using thread-safe - /// data types like `Mutex<T>`. - initial_read: bool, } impl<'tcx> NewPermission { @@ -133,27 +138,42 @@ impl<'tcx> NewPermission { kind: RetagKind, cx: &crate::MiriInterpCx<'tcx>, ) -> Option<Self> { - let ty_is_freeze = pointee.is_freeze(*cx.tcx, cx.typing_env()); let ty_is_unpin = pointee.is_unpin(*cx.tcx, cx.typing_env()); let is_protected = kind == RetagKind::FnEntry; - // As demonstrated by `tests/fail/tree_borrows/reservedim_spurious_write.rs`, - // interior mutability and protectors interact poorly. - // To eliminate the case of Protected Reserved IM we override interior mutability - // in the case of a protected reference: protected references are always considered - // "freeze" in their reservation phase. - let (initial_state, initial_read) = match mutability { + let protector = is_protected.then_some(ProtectorKind::StrongProtector); + + Some(match mutability { Mutability::Mut if ty_is_unpin => - (Permission::new_reserved(ty_is_freeze, is_protected), true), - Mutability::Not if ty_is_freeze => (Permission::new_frozen(), true), - Mutability::Not if !ty_is_freeze => (Permission::new_cell(), false), - // Raw pointers never enter this function so they are not handled. - // However raw pointers are not the only pointers that take the parent - // tag, this also happens for `!Unpin` `&mut`s, which are excluded above. + NewPermission { + freeze_perm: Permission::new_reserved( + /* ty_is_freeze */ true, + is_protected, + ), + freeze_access: true, + nonfreeze_perm: Permission::new_reserved( + /* ty_is_freeze */ false, + is_protected, + ), + // If we have a mutable reference, then the non-frozen part will + // have state `ReservedIM` or `Reserved`, which can have an initial read access + // performed on it because you cannot have multiple mutable borrows. + nonfreeze_access: true, + protector, + }, + Mutability::Not => + NewPermission { + freeze_perm: Permission::new_frozen(), + freeze_access: true, + nonfreeze_perm: Permission::new_cell(), + // If it is a shared reference, then the non-frozen + // part will have state `Cell`, which should not have an initial access, + // as this can cause data races when using thread-safe data types like + // `Mutex<T>`. + nonfreeze_access: false, + protector, + }, _ => return None, - }; - - let protector = is_protected.then_some(ProtectorKind::StrongProtector); - Some(Self { initial_state, protector, initial_read }) + }) } /// Compute permission for `Box`-like type (`Box` always, and also `Unique` if enabled). @@ -168,13 +188,17 @@ impl<'tcx> NewPermission { pointee.is_unpin(*cx.tcx, cx.typing_env()).then_some(()).map(|()| { // Regular `Unpin` box, give it `noalias` but only a weak protector // because it is valid to deallocate it within the function. - let ty_is_freeze = pointee.is_freeze(*cx.tcx, cx.typing_env()); - let protected = kind == RetagKind::FnEntry; - let initial_state = Permission::new_reserved(ty_is_freeze, protected); - Self { - initial_state, - protector: protected.then_some(ProtectorKind::WeakProtector), - initial_read: true, + let is_protected = kind == RetagKind::FnEntry; + let protector = is_protected.then_some(ProtectorKind::WeakProtector); + NewPermission { + freeze_perm: Permission::new_reserved(/* ty_is_freeze */ true, is_protected), + freeze_access: true, + nonfreeze_perm: Permission::new_reserved( + /* ty_is_freeze */ false, + is_protected, + ), + nonfreeze_access: true, + protector, } }) } @@ -194,8 +218,6 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { new_tag: BorTag, ) -> InterpResult<'tcx, Option<Provenance>> { let this = self.eval_context_mut(); - // Make sure the new permission makes sense as the initial permission of a fresh tag. - assert!(new_perm.initial_state.is_initial()); // Ensure we bail out if the pointer goes out-of-bounds (see miri#1050). this.check_ptr_access(place.ptr(), ptr_size, CheckInAllocMsg::Dereferenceable)?; @@ -206,7 +228,13 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let global = this.machine.borrow_tracker.as_ref().unwrap().borrow(); let ty = place.layout.ty; if global.tracked_pointer_tags.contains(&new_tag) { - let kind_str = format!("initial state {} (pointee type {ty})", new_perm.initial_state); + let ty_is_freeze = ty.is_freeze(*this.tcx, this.typing_env()); + let kind_str = + if ty_is_freeze { + format!("initial state {} (pointee type {ty})", new_perm.freeze_perm) + } else { + format!("initial state {}/{} outside/inside UnsafeCell (pointee type {ty})", new_perm.freeze_perm, new_perm.nonfreeze_perm) + }; this.emit_diagnostic(NonHaltingDiagnostic::CreatedPointerTag( new_tag.inner(), Some(kind_str), @@ -285,43 +313,103 @@ trait EvalContextPrivExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let span = this.machine.current_span(); let alloc_extra = this.get_alloc_extra(alloc_id)?; - let range = alloc_range(base_offset, ptr_size); let mut tree_borrows = alloc_extra.borrow_tracker_tb().borrow_mut(); - // All reborrows incur a (possibly zero-sized) read access to the parent - if new_perm.initial_read { - tree_borrows.perform_access( - orig_tag, - Some((range, AccessKind::Read, diagnostics::AccessCause::Reborrow)), - this.machine.borrow_tracker.as_ref().unwrap(), - alloc_id, - this.machine.current_span(), - )?; - } + // Store initial permissions and their corresponding range. + let mut perms_map: RangeMap<LocationState> = RangeMap::new( + ptr_size, + LocationState::new_accessed(Permission::new_disabled(), IdempotentForeignAccess::None), // this will be overwritten + ); + // Keep track of whether the node has any part that allows for interior mutability. + // FIXME: This misses `PhantomData<UnsafeCell<T>>` which could be considered a marker + // for requesting interior mutability. + let mut has_unsafe_cell = false; + + // When adding a new node, the SIFA of its parents needs to be updated, potentially across + // the entire memory range. For the parts that are being accessed below, the access itself + // trivially takes care of that. However, we have to do some more work to also deal with + // the parts that are not being accessed. Specifically what we do is that we + // call `update_last_accessed_after_retag` on the SIFA of the permission set for the part of + // memory outside `perm_map` -- so that part is definitely taken care of. The remaining concern + // is the part of memory that is in the range of `perms_map`, but not accessed below. + // There we have two cases: + // * If we do have an `UnsafeCell` (`has_unsafe_cell` becomes true), then the non-accessed part + // uses `nonfreeze_perm`, so the `nonfreeze_perm` initialized parts are also fine. We enforce + // the `freeze_perm` parts to be accessed, and thus everything is taken care of. + // * If there is no `UnsafeCell`, then `freeze_perm` is used everywhere (both inside and outside the initial range), + // and we update everything to have the `freeze_perm`'s SIFA, so there are no issues. (And this assert below is not + // actually needed in this case). + assert!(new_perm.freeze_access); + + let protected = new_perm.protector.is_some(); + this.visit_freeze_sensitive(place, ptr_size, |range, frozen| { + has_unsafe_cell = has_unsafe_cell || !frozen; + + // We are only ever `Frozen` inside the frozen bits. + let (perm, access) = if frozen { + (new_perm.freeze_perm, new_perm.freeze_access) + } else { + (new_perm.nonfreeze_perm, new_perm.nonfreeze_access) + }; + + // Store initial permissions. + for (_loc_range, loc) in perms_map.iter_mut(range.start, range.size) { + let sifa = perm.strongest_idempotent_foreign_access(protected); + // NOTE: Currently, `access` is false if and only if `perm` is Cell, so this `if` + // doesn't not change whether any code is UB or not. We could just always use + // `new_accessed` and everything would stay the same. But that seems conceptually + // odd, so we keep the initial "accessed" bit of the `LocationState` in sync with whether + // a read access is performed below. + if access { + *loc = LocationState::new_accessed(perm, sifa); + } else { + *loc = LocationState::new_non_accessed(perm, sifa); + } + } + + // Some reborrows incur a read access to the parent. + if access { + // Adjust range to be relative to allocation start (rather than to `place`). + let mut range_in_alloc = range; + range_in_alloc.start += base_offset; + + tree_borrows.perform_access( + orig_tag, + Some((range_in_alloc, AccessKind::Read, diagnostics::AccessCause::Reborrow)), + this.machine.borrow_tracker.as_ref().unwrap(), + alloc_id, + this.machine.current_span(), + )?; + + // Also inform the data race model (but only if any bytes are actually affected). + if range.size.bytes() > 0 { + if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() { + data_race.read( + alloc_id, + range_in_alloc, + NaReadType::Retag, + Some(place.layout.ty), + &this.machine, + )? + } + } + } + interp_ok(()) + })?; + // Record the parent-child pair in the tree. tree_borrows.new_child( + base_offset, orig_tag, new_tag, - new_perm.initial_state, - range, + perms_map, + // Allow lazily writing to surrounding data if we found an `UnsafeCell`. + if has_unsafe_cell { new_perm.nonfreeze_perm } else { new_perm.freeze_perm }, + protected, span, - new_perm.protector.is_some(), )?; drop(tree_borrows); - // Also inform the data race model (but only if any bytes are actually affected). - if range.size.bytes() > 0 && new_perm.initial_read { - if let Some(data_race) = alloc_extra.data_race.as_vclocks_ref() { - data_race.read( - alloc_id, - range, - NaReadType::Retag, - Some(place.layout.ty), - &this.machine, - )?; - } - } - interp_ok(Some(Provenance::Concrete { alloc_id, tag: new_tag })) } @@ -508,15 +596,21 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { fn tb_protect_place(&mut self, place: &MPlaceTy<'tcx>) -> InterpResult<'tcx, MPlaceTy<'tcx>> { let this = self.eval_context_mut(); - // Note: if we were to inline `new_reserved` below we would find out that - // `ty_is_freeze` is eventually unused because it appears in a `ty_is_freeze || true`. - // We are nevertheless including it here for clarity. - let ty_is_freeze = place.layout.ty.is_freeze(*this.tcx, this.typing_env()); // Retag it. With protection! That is the entire point. let new_perm = NewPermission { - initial_state: Permission::new_reserved(ty_is_freeze, /* protected */ true), + // Note: If we are creating a protected Reserved, which can + // never be ReservedIM, the value of the `ty_is_freeze` + // argument doesn't matter + // (`ty_is_freeze || true` in `new_reserved` will always be `true`). + freeze_perm: Permission::new_reserved( + /* ty_is_freeze */ true, /* protected */ true, + ), + freeze_access: true, + nonfreeze_perm: Permission::new_reserved( + /* ty_is_freeze */ false, /* protected */ true, + ), + nonfreeze_access: true, protector: Some(ProtectorKind::StrongProtector), - initial_read: true, }; this.tb_retag_place(place, new_perm) } diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs index 087f6fc3f24..38863ca0734 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/perms.rs @@ -94,6 +94,7 @@ impl PermissionPriv { } /// Reject `ReservedIM` that cannot exist in the presence of a protector. + #[cfg(test)] fn compatible_with_protector(&self) -> bool { // FIXME(TB-Cell): It is unclear what to do here. // `Cell` will occur with a protector but won't provide the guarantees @@ -253,10 +254,6 @@ impl Permission { pub fn is_disabled(&self) -> bool { self.inner == Disabled } - /// Check if `self` is the post-child-write state of a pointer (is `Active`). - pub fn is_active(&self) -> bool { - self.inner == Active - } /// Check if `self` is the never-allow-writes-again state of a pointer (is `Frozen`). pub fn is_frozen(&self) -> bool { self.inner == Frozen @@ -289,6 +286,11 @@ impl Permission { /// is a protector is relevant because being protected takes priority over being /// interior mutable) pub fn new_reserved(ty_is_freeze: bool, protected: bool) -> Self { + // As demonstrated by `tests/fail/tree_borrows/reservedim_spurious_write.rs`, + // interior mutability and protectors interact poorly. + // To eliminate the case of Protected Reserved IM we override interior mutability + // in the case of a protected reference: protected references are always considered + // "freeze" in their reservation phase. if ty_is_freeze || protected { Self::new_reserved_frz() } else { Self::new_reserved_im() } } @@ -309,6 +311,7 @@ impl Permission { } /// Reject `ReservedIM` that cannot exist in the presence of a protector. + #[cfg(test)] pub fn compatible_with_protector(&self) -> bool { self.inner.compatible_with_protector() } @@ -393,11 +396,6 @@ impl PermTransition { self.from <= self.to } - pub fn from(from: Permission, to: Permission) -> Option<Self> { - let t = Self { from: from.inner, to: to.inner }; - t.is_possible().then_some(t) - } - pub fn is_noop(self) -> bool { self.from == self.to } @@ -407,11 +405,6 @@ impl PermTransition { (starting_point.inner == self.from).then_some(Permission { inner: self.to }) } - /// Extract starting point of a transition - pub fn started(self) -> Permission { - Permission { inner: self.from } - } - /// Determines if this transition would disable the permission. pub fn produces_disabled(self) -> bool { self.to == Disabled diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs index 47ccaadbb9e..48e4a19e263 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/tree.rs @@ -10,6 +10,7 @@ //! and the relative position of the access; //! - idempotency properties asserted in `perms.rs` (for optimizations) +use std::ops::Range; use std::{fmt, mem}; use rustc_abi::Size; @@ -32,18 +33,18 @@ mod tests; /// Data for a single *location*. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(super) struct LocationState { - /// A location is initialized when it is child-accessed for the first time (and the initial + /// A location is "accessed" when it is child-accessed for the first time (and the initial /// retag initializes the location for the range covered by the type), and it then stays - /// initialized forever. - /// For initialized locations, "permission" is the current permission. However, for - /// uninitialized locations, we still need to track the "future initial permission": this will + /// accessed forever. + /// For accessed locations, "permission" is the current permission. However, for + /// non-accessed locations, we still need to track the "future initial permission": this will /// start out to be `default_initial_perm`, but foreign accesses need to be taken into account. /// Crucially however, while transitions to `Disabled` would usually be UB if this location is - /// protected, that is *not* the case for uninitialized locations. Instead we just have a latent + /// protected, that is *not* the case for non-accessed locations. Instead we just have a latent /// "future initial permission" of `Disabled`, causing UB only if an access is ever actually /// performed. - /// Note that the tree root is also always initialized, as if the allocation was a write access. - initialized: bool, + /// Note that the tree root is also always accessed, as if the allocation was a write access. + accessed: bool, /// This pointer's current permission / future initial permission. permission: Permission, /// See `foreign_access_skipping.rs`. @@ -58,30 +59,30 @@ impl LocationState { /// to any foreign access yet. /// The permission is not allowed to be `Active`. /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs` - fn new_uninit(permission: Permission, sifa: IdempotentForeignAccess) -> Self { + pub fn new_non_accessed(permission: Permission, sifa: IdempotentForeignAccess) -> Self { assert!(permission.is_initial() || permission.is_disabled()); - Self { permission, initialized: false, idempotent_foreign_access: sifa } + Self { permission, accessed: false, idempotent_foreign_access: sifa } } /// Constructs a new initial state. It has not yet been subjected /// to any foreign access. However, it is already marked as having been accessed. /// `sifa` is the (strongest) idempotent foreign access, see `foreign_access_skipping.rs` - fn new_init(permission: Permission, sifa: IdempotentForeignAccess) -> Self { - Self { permission, initialized: true, idempotent_foreign_access: sifa } + pub fn new_accessed(permission: Permission, sifa: IdempotentForeignAccess) -> Self { + Self { permission, accessed: true, idempotent_foreign_access: sifa } } - /// Check if the location has been initialized, i.e. if it has + /// Check if the location has been accessed, i.e. if it has /// ever been accessed through a child pointer. - pub fn is_initialized(&self) -> bool { - self.initialized + pub fn is_accessed(&self) -> bool { + self.accessed } /// Check if the state can exist as the initial permission of a pointer. /// - /// Do not confuse with `is_initialized`, the two are almost orthogonal - /// as apart from `Active` which is not initial and must be initialized, + /// Do not confuse with `is_accessed`, the two are almost orthogonal + /// as apart from `Active` which is not initial and must be accessed, /// any other permission can have an arbitrary combination of being - /// initial/initialized. + /// initial/accessed. /// FIXME: when the corresponding `assert` in `tree_borrows/mod.rs` finally /// passes and can be uncommented, remove this `#[allow(dead_code)]`. #[cfg_attr(not(test), allow(dead_code))] @@ -95,8 +96,8 @@ impl LocationState { /// Apply the effect of an access to one location, including /// - applying `Permission::perform_access` to the inner `Permission`, - /// - emitting protector UB if the location is initialized, - /// - updating the initialized status (child accesses produce initialized locations). + /// - emitting protector UB if the location is accessed, + /// - updating the accessed status (child accesses produce accessed locations). fn perform_access( &mut self, access_kind: AccessKind, @@ -106,14 +107,14 @@ impl LocationState { let old_perm = self.permission; let transition = Permission::perform_access(access_kind, rel_pos, old_perm, protected) .ok_or(TransitionError::ChildAccessForbidden(old_perm))?; - self.initialized |= !rel_pos.is_foreign(); + self.accessed |= !rel_pos.is_foreign(); self.permission = transition.applied(old_perm).unwrap(); - // Why do only initialized locations cause protector errors? + // Why do only accessed locations cause protector errors? // Consider two mutable references `x`, `y` into disjoint parts of // the same allocation. A priori, these may actually both be used to // access the entire allocation, as long as only reads occur. However, // a write to `y` needs to somehow record that `x` can no longer be used - // on that location at all. For these uninitialized locations (i.e., locations + // on that location at all. For these non-accessed locations (i.e., locations // that haven't been accessed with `x` yet), we track the "future initial state": // it defaults to whatever the initial state of the tag is, // but the access to `y` moves that "future initial state" of `x` to `Disabled`. @@ -121,8 +122,8 @@ impl LocationState { // So clearly protectors shouldn't fire for such "future initial state" transitions. // // See the test `two_mut_protected_same_alloc` in `tests/pass/tree_borrows/tree-borrows.rs` - // for an example of safe code that would be UB if we forgot to check `self.initialized`. - if protected && self.initialized && transition.produces_disabled() { + // for an example of safe code that would be UB if we forgot to check `self.accessed`. + if protected && self.accessed && transition.produces_disabled() { return Err(TransitionError::ProtectedDisabled(old_perm)); } Ok(transition) @@ -157,11 +158,11 @@ impl LocationState { self.idempotent_foreign_access.can_skip_foreign_access(happening_now); if self.permission.is_disabled() { // A foreign access to a `Disabled` tag will have almost no observable effect. - // It's a theorem that `Disabled` node have no protected initialized children, + // It's a theorem that `Disabled` node have no protected accessed children, // and so this foreign access will never trigger any protector. - // (Intuition: You're either protected initialized, and thus can't become Disabled - // or you're already Disabled protected, but not initialized, and then can't - // become initialized since that requires a child access, which Disabled blocks.) + // (Intuition: You're either protected accessed, and thus can't become Disabled + // or you're already Disabled protected, but not accessed, and then can't + // become accessed since that requires a child access, which Disabled blocks.) // Further, the children will never be able to read or write again, since they // have a `Disabled` parent. So this only affects diagnostics, such that the // blocking write will still be identified directly, just at a different tag. @@ -217,7 +218,7 @@ impl LocationState { impl fmt::Display for LocationState { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.permission)?; - if !self.initialized { + if !self.accessed { write!(f, "?")?; } Ok(()) @@ -598,12 +599,15 @@ impl Tree { let rperms = { let mut perms = UniValMap::default(); // We manually set it to `Active` on all in-bounds positions. - // We also ensure that it is initialized, so that no `Active` but - // not yet initialized nodes exist. Essentially, we pretend there + // We also ensure that it is accessed, so that no `Active` but + // not yet accessed nodes exist. Essentially, we pretend there // was a write that initialized these to `Active`. perms.insert( root_idx, - LocationState::new_init(Permission::new_active(), IdempotentForeignAccess::None), + LocationState::new_accessed( + Permission::new_active(), + IdempotentForeignAccess::None, + ), ); RangeMap::new(size, perms) }; @@ -612,20 +616,32 @@ impl Tree { } impl<'tcx> Tree { - /// Insert a new tag in the tree - pub fn new_child( + /// Insert a new tag in the tree. + /// + /// `initial_perms` defines the initial permissions for the part of memory + /// that is already considered "initialized" immediately. The ranges in this + /// map are relative to `base_offset`. + /// `default_perm` defines the initial permission for the rest of the allocation. + /// + /// For all non-accessed locations in the RangeMap (those that haven't had an + /// implicit read), their SIFA must be weaker than or as weak as the SIFA of + /// `default_perm`. + pub(super) fn new_child( &mut self, + base_offset: Size, parent_tag: BorTag, new_tag: BorTag, - default_initial_perm: Permission, - reborrow_range: AllocRange, + initial_perms: RangeMap<LocationState>, + default_perm: Permission, + protected: bool, span: Span, - prot: bool, ) -> InterpResult<'tcx> { - assert!(!self.tag_mapping.contains_key(&new_tag)); let idx = self.tag_mapping.insert(new_tag); let parent_idx = self.tag_mapping.get(&parent_tag).unwrap(); - let strongest_idempotent = default_initial_perm.strongest_idempotent_foreign_access(prot); + assert!(default_perm.is_initial()); + + let default_strongest_idempotent = + default_perm.strongest_idempotent_foreign_access(protected); // Create the node self.nodes.insert( idx, @@ -633,25 +649,36 @@ impl<'tcx> Tree { tag: new_tag, parent: Some(parent_idx), children: SmallVec::default(), - default_initial_perm, - default_initial_idempotent_foreign_access: strongest_idempotent, - debug_info: NodeDebugInfo::new(new_tag, default_initial_perm, span), + default_initial_perm: default_perm, + default_initial_idempotent_foreign_access: default_strongest_idempotent, + debug_info: NodeDebugInfo::new(new_tag, default_perm, span), }, ); // Register new_tag as a child of parent_tag self.nodes.get_mut(parent_idx).unwrap().children.push(idx); - // Initialize perms - let perm = LocationState::new_init(default_initial_perm, strongest_idempotent); - for (_perms_range, perms) in self.rperms.iter_mut(reborrow_range.start, reborrow_range.size) + + for (Range { start, end }, &perm) in + initial_perms.iter(Size::from_bytes(0), initial_perms.size()) { - perms.insert(idx, perm); + assert!(perm.is_initial()); + for (_perms_range, perms) in self + .rperms + .iter_mut(Size::from_bytes(start) + base_offset, Size::from_bytes(end - start)) + { + assert!( + default_strongest_idempotent + >= perm.permission.strongest_idempotent_foreign_access(protected) + ); + perms.insert(idx, perm); + } } // Inserting the new perms might have broken the SIFA invariant (see `foreign_access_skipping.rs`). // We now weaken the recorded SIFA for our parents, until the invariant is restored. // We could weaken them all to `LocalAccess`, but it is more efficient to compute the SIFA // for the new permission statically, and use that. - self.update_last_accessed_after_retag(parent_idx, strongest_idempotent); + // See the comment in `tb_reborrow` for why it is correct to use the SIFA of `default_uninit_perm`. + self.update_last_accessed_after_retag(parent_idx, default_strongest_idempotent); interp_ok(()) } @@ -758,14 +785,14 @@ impl<'tcx> Tree { /// /// If `access_range_and_kind` is `None`, this is interpreted as the special /// access that is applied on protector release: - /// - the access will be applied only to initialized locations of the allocation, + /// - the access will be applied only to accessed locations of the allocation, /// - it will not be visible to children, /// - it will be recorded as a `FnExit` diagnostic access /// - and it will be a read except if the location is `Active`, i.e. has been written to, /// in which case it will be a write. /// /// `LocationState::perform_access` will take care of raising transition - /// errors and updating the `initialized` status of each location, + /// errors and updating the `accessed` status of each location, /// this traversal adds to that: /// - inserting into the map locations that do not exist yet, /// - trimming the traversal, @@ -858,7 +885,7 @@ impl<'tcx> Tree { } } else { // This is a special access through the entire allocation. - // It actually only affects `initialized` locations, so we need + // It actually only affects `accessed` locations, so we need // to filter on those before initiating the traversal. // // In addition this implicit access should not be visible to children, @@ -868,10 +895,10 @@ impl<'tcx> Tree { // why this is important. for (perms_range, perms) in self.rperms.iter_mut_all() { let idx = self.tag_mapping.get(&tag).unwrap(); - // Only visit initialized permissions + // Only visit accessed permissions if let Some(p) = perms.get(idx) && let Some(access_kind) = p.permission.protector_end_access() - && p.initialized + && p.accessed { let access_cause = diagnostics::AccessCause::FnExit(access_kind); TreeVisitor { nodes: &mut self.nodes, tag_mapping: &self.tag_mapping, perms } @@ -1035,7 +1062,7 @@ impl Tree { impl Node { pub fn default_location_state(&self) -> LocationState { - LocationState::new_uninit( + LocationState::new_non_accessed( self.default_initial_perm, self.default_initial_idempotent_foreign_access, ) @@ -1073,15 +1100,4 @@ impl AccessRelatedness { pub fn is_foreign(self) -> bool { matches!(self, AccessRelatedness::AncestorAccess | AccessRelatedness::CousinAccess) } - - /// Given the AccessRelatedness for the parent node, compute the AccessRelatedness - /// for the child node. This function assumes that we propagate away from the initial - /// access. - pub fn for_child(self) -> Self { - use AccessRelatedness::*; - match self { - AncestorAccess | This => AncestorAccess, - StrictChildAccess | CousinAccess => CousinAccess, - } - } } diff --git a/src/tools/miri/src/borrow_tracker/tree_borrows/tree/tests.rs b/src/tools/miri/src/borrow_tracker/tree_borrows/tree/tests.rs index dbfa9807e3b..bb3fc2d80b3 100644 --- a/src/tools/miri/src/borrow_tracker/tree_borrows/tree/tests.rs +++ b/src/tools/miri/src/borrow_tracker/tree_borrows/tree/tests.rs @@ -9,10 +9,10 @@ use crate::borrow_tracker::tree_borrows::exhaustive::{Exhaustive, precondition}; impl Exhaustive for LocationState { fn exhaustive() -> Box<dyn Iterator<Item = Self>> { // We keep `latest_foreign_access` at `None` as that's just a cache. - Box::new(<(Permission, bool)>::exhaustive().map(|(permission, initialized)| { + Box::new(<(Permission, bool)>::exhaustive().map(|(permission, accessed)| { Self { permission, - initialized, + accessed, idempotent_foreign_access: IdempotentForeignAccess::default(), } })) @@ -76,8 +76,8 @@ fn as_protected(b: bool) -> &'static str { if b { " (protected)" } else { "" } } -fn as_lazy_or_init(b: bool) -> &'static str { - if b { "initialized" } else { "lazy" } +fn as_lazy_or_accessed(b: bool) -> &'static str { + if b { "accessed" } else { "lazy" } } /// Test that tree compacting (as performed by the GC) is sound. @@ -106,7 +106,7 @@ fn tree_compacting_is_sound() { as_foreign_or_child(rel), kind, parent.permission(), - as_lazy_or_init(child.is_initialized()), + as_lazy_or_accessed(child.is_accessed()), child.permission(), as_protected(child_protected), np.permission(), @@ -122,7 +122,7 @@ fn tree_compacting_is_sound() { as_foreign_or_child(rel), kind, parent.permission(), - as_lazy_or_init(child.is_initialized()), + as_lazy_or_accessed(child.is_accessed()), child.permission(), as_protected(child_protected), nc.permission() @@ -435,19 +435,19 @@ mod spurious_read { Ok(Self { x, y, ..self }) } - /// Perform a read on the given pointer if its state is `initialized`. + /// Perform a read on the given pointer if its state is `accessed`. /// Must be called just after reborrowing a pointer, and just after /// removing a protector. - fn read_if_initialized(self, ptr: PtrSelector) -> Result<Self, ()> { - let initialized = match ptr { - PtrSelector::X => self.x.state.initialized, - PtrSelector::Y => self.y.state.initialized, + fn read_if_accessed(self, ptr: PtrSelector) -> Result<Self, ()> { + let accessed = match ptr { + PtrSelector::X => self.x.state.accessed, + PtrSelector::Y => self.y.state.accessed, PtrSelector::Other => panic!( - "the `initialized` status of `PtrSelector::Other` is unknown, do not pass it to `read_if_initialized`" + "the `accessed` status of `PtrSelector::Other` is unknown, do not pass it to `read_if_accessed`" ), }; - if initialized { + if accessed { self.perform_test_access(&TestAccess { ptr, kind: AccessKind::Read }) } else { Ok(self) @@ -457,13 +457,13 @@ mod spurious_read { /// Remove the protector of `x`, including the implicit read on function exit. fn end_protector_x(self) -> Result<Self, ()> { let x = self.x.end_protector(); - Self { x, ..self }.read_if_initialized(PtrSelector::X) + Self { x, ..self }.read_if_accessed(PtrSelector::X) } /// Remove the protector of `y`, including the implicit read on function exit. fn end_protector_y(self) -> Result<Self, ()> { let y = self.y.end_protector(); - Self { y, ..self }.read_if_initialized(PtrSelector::Y) + Self { y, ..self }.read_if_accessed(PtrSelector::Y) } fn retag_y(self, new_y: LocStateProt) -> Result<Self, ()> { @@ -473,7 +473,7 @@ mod spurious_read { } // `xy_rel` changes to "mutually foreign" now: `y` can no longer be a parent of `x`. Self { y: new_y, xy_rel: RelPosXY::MutuallyForeign, ..self } - .read_if_initialized(PtrSelector::Y) + .read_if_accessed(PtrSelector::Y) } fn perform_test_event<RetX, RetY>(self, evt: &TestEvent<RetX, RetY>) -> Result<Self, ()> { @@ -602,14 +602,14 @@ mod spurious_read { xy_rel: RelPosXY::MutuallyForeign, x: LocStateProt { // For the tests, the strongest idempotent foreign access does not matter, so we use `Default::default` - state: LocationState::new_init( + state: LocationState::new_accessed( Permission::new_frozen(), IdempotentForeignAccess::default(), ), prot: true, }, y: LocStateProt { - state: LocationState::new_uninit( + state: LocationState::new_non_accessed( Permission::new_reserved(/* freeze */ true, /* protected */ true), IdempotentForeignAccess::default(), ), @@ -650,8 +650,8 @@ mod spurious_read { for xy_rel in RelPosXY::exhaustive() { for (x_retag_perm, y_current_perm) in <(LocationState, LocationState)>::exhaustive() { - // We can only do spurious reads for initialized locations anyway. - precondition!(x_retag_perm.initialized); + // We can only do spurious reads for accessed locations anyway. + precondition!(x_retag_perm.accessed); // And `x` just got retagged, so it must be initial. precondition!(x_retag_perm.permission.is_initial()); // As stated earlier, `x` is always protected in the patterns we consider here. @@ -696,7 +696,7 @@ mod spurious_read { fn initial_state(&self) -> Result<LocStateProtPair, ()> { let (x, y) = self.retag_permissions(); let state = LocStateProtPair { xy_rel: self.xy_rel, x, y }; - state.read_if_initialized(PtrSelector::X) + state.read_if_accessed(PtrSelector::X) } } diff --git a/src/tools/miri/src/concurrency/thread.rs b/src/tools/miri/src/concurrency/thread.rs index 15f15572c93..ba1436b77b8 100644 --- a/src/tools/miri/src/concurrency/thread.rs +++ b/src/tools/miri/src/concurrency/thread.rs @@ -897,12 +897,17 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { if tcx.is_foreign_item(def_id) { throw_unsup_format!("foreign thread-local statics are not supported"); } + let params = this.machine.get_default_alloc_params(); let alloc = this.ctfe_query(|tcx| tcx.eval_static_initializer(def_id))?; // We make a full copy of this allocation. let mut alloc = alloc.inner().adjust_from_tcx( &this.tcx, |bytes, align| { - interp_ok(MiriAllocBytes::from_bytes(std::borrow::Cow::Borrowed(bytes), align, ())) + interp_ok(MiriAllocBytes::from_bytes( + std::borrow::Cow::Borrowed(bytes), + align, + params, + )) }, |ptr| this.global_root_pointer(ptr), )?; diff --git a/src/tools/miri/src/diagnostics.rs b/src/tools/miri/src/diagnostics.rs index 10570a37e5d..1728a9cfd6d 100644 --- a/src/tools/miri/src/diagnostics.rs +++ b/src/tools/miri/src/diagnostics.rs @@ -255,8 +255,7 @@ pub fn report_error<'tcx>( ], UnsupportedForeignItem(_) => { vec![ - note!("if this is a basic API commonly used on this target, please report an issue with Miri"), - note!("however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases"), + note!("this means the program tried to do something Miri does not support; it does not indicate a bug in the program"), ] } StackedBorrowsUb { help, history, .. } => { diff --git a/src/tools/miri/src/intrinsics/mod.rs b/src/tools/miri/src/intrinsics/mod.rs index 581005bc9a1..a4882a20148 100644 --- a/src/tools/miri/src/intrinsics/mod.rs +++ b/src/tools/miri/src/intrinsics/mod.rs @@ -159,67 +159,6 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.write_scalar(Scalar::from_bool(branch), dest)?; } - "floorf16" | "ceilf16" | "truncf16" | "roundf16" | "round_ties_even_f16" => { - let [f] = check_intrinsic_arg_count(args)?; - let f = this.read_scalar(f)?.to_f16()?; - let mode = match intrinsic_name { - "floorf16" => Round::TowardNegative, - "ceilf16" => Round::TowardPositive, - "truncf16" => Round::TowardZero, - "roundf16" => Round::NearestTiesToAway, - "round_ties_even_f16" => Round::NearestTiesToEven, - _ => bug!(), - }; - let res = f.round_to_integral(mode).value; - let res = this.adjust_nan(res, &[f]); - this.write_scalar(res, dest)?; - } - "floorf32" | "ceilf32" | "truncf32" | "roundf32" | "round_ties_even_f32" => { - let [f] = check_intrinsic_arg_count(args)?; - let f = this.read_scalar(f)?.to_f32()?; - let mode = match intrinsic_name { - "floorf32" => Round::TowardNegative, - "ceilf32" => Round::TowardPositive, - "truncf32" => Round::TowardZero, - "roundf32" => Round::NearestTiesToAway, - "round_ties_even_f32" => Round::NearestTiesToEven, - _ => bug!(), - }; - let res = f.round_to_integral(mode).value; - let res = this.adjust_nan(res, &[f]); - this.write_scalar(res, dest)?; - } - "floorf64" | "ceilf64" | "truncf64" | "roundf64" | "round_ties_even_f64" => { - let [f] = check_intrinsic_arg_count(args)?; - let f = this.read_scalar(f)?.to_f64()?; - let mode = match intrinsic_name { - "floorf64" => Round::TowardNegative, - "ceilf64" => Round::TowardPositive, - "truncf64" => Round::TowardZero, - "roundf64" => Round::NearestTiesToAway, - "round_ties_even_f64" => Round::NearestTiesToEven, - _ => bug!(), - }; - let res = f.round_to_integral(mode).value; - let res = this.adjust_nan(res, &[f]); - this.write_scalar(res, dest)?; - } - "floorf128" | "ceilf128" | "truncf128" | "roundf128" | "round_ties_even_f128" => { - let [f] = check_intrinsic_arg_count(args)?; - let f = this.read_scalar(f)?.to_f128()?; - let mode = match intrinsic_name { - "floorf128" => Round::TowardNegative, - "ceilf128" => Round::TowardPositive, - "truncf128" => Round::TowardZero, - "roundf128" => Round::NearestTiesToAway, - "round_ties_even_f128" => Round::NearestTiesToEven, - _ => bug!(), - }; - let res = f.round_to_integral(mode).value; - let res = this.adjust_nan(res, &[f]); - this.write_scalar(res, dest)?; - } - "sqrtf32" => { let [f] = check_intrinsic_arg_count(args)?; let f = this.read_scalar(f)?.to_f32()?; diff --git a/src/tools/miri/src/lib.rs b/src/tools/miri/src/lib.rs index a3d2b8c874e..51ec19af52a 100644 --- a/src/tools/miri/src/lib.rs +++ b/src/tools/miri/src/lib.rs @@ -12,6 +12,7 @@ #![feature(nonzero_ops)] #![feature(strict_overflow_ops)] #![feature(pointer_is_aligned_to)] +#![feature(ptr_metadata)] #![feature(unqualified_local_imports)] #![feature(derive_coerce_pointee)] #![feature(arbitrary_self_types)] @@ -70,8 +71,8 @@ extern crate rustc_target; #[allow(unused_extern_crates)] extern crate rustc_driver; +mod alloc; mod alloc_addresses; -mod alloc_bytes; mod borrow_tracker; mod clock; mod concurrency; @@ -106,8 +107,8 @@ pub type OpTy<'tcx> = interpret::OpTy<'tcx, machine::Provenance>; pub type PlaceTy<'tcx> = interpret::PlaceTy<'tcx, machine::Provenance>; pub type MPlaceTy<'tcx> = interpret::MPlaceTy<'tcx, machine::Provenance>; +pub use crate::alloc::MiriAllocBytes; pub use crate::alloc_addresses::{EvalContextExt as _, ProvenanceMode}; -pub use crate::alloc_bytes::MiriAllocBytes; pub use crate::borrow_tracker::stacked_borrows::{ EvalContextExt as _, Item, Permission, Stack, Stacks, }; diff --git a/src/tools/miri/src/machine.rs b/src/tools/miri/src/machine.rs index e7a2cb25159..15b3653d7ae 100644 --- a/src/tools/miri/src/machine.rs +++ b/src/tools/miri/src/machine.rs @@ -532,6 +532,10 @@ pub struct MiriMachine<'tcx> { /// Needs to be queried by ptr_to_int, hence needs interior mutability. pub(crate) rng: RefCell<StdRng>, + /// The allocator used for the machine's `AllocBytes` in native-libs mode. + #[cfg(target_os = "linux")] + pub(crate) allocator: Option<Rc<RefCell<crate::alloc::isolated_alloc::IsolatedAlloc>>>, + /// The allocation IDs to report when they are being allocated /// (helps for debugging memory leaks and use after free bugs). tracked_alloc_ids: FxHashSet<AllocId>, @@ -715,6 +719,10 @@ impl<'tcx> MiriMachine<'tcx> { local_crates, extern_statics: FxHashMap::default(), rng: RefCell::new(rng), + #[cfg(target_os = "linux")] + allocator: if config.native_lib.is_some() { + Some(Rc::new(RefCell::new(crate::alloc::isolated_alloc::IsolatedAlloc::new()))) + } else { None }, tracked_alloc_ids: config.tracked_alloc_ids.clone(), track_alloc_accesses: config.track_alloc_accesses, check_alignment: config.check_alignment, @@ -917,6 +925,8 @@ impl VisitProvenance for MiriMachine<'_> { backtrace_style: _, local_crates: _, rng: _, + #[cfg(target_os = "linux")] + allocator: _, tracked_alloc_ids: _, track_alloc_accesses: _, check_alignment: _, @@ -1637,7 +1647,7 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { fn before_stack_pop(ecx: &mut InterpCx<'tcx, Self>) -> InterpResult<'tcx> { let frame = ecx.frame(); // We want this *before* the return value copy, because the return place itself is protected - // until we do `end_call` here. + // until we do `on_stack_pop` here, and we need to un-protect it to copy the return value. if ecx.machine.borrow_tracker.is_some() { ecx.on_stack_pop(frame)?; } @@ -1804,8 +1814,17 @@ impl<'tcx> Machine<'tcx> for MiriMachine<'tcx> { Cow::Borrowed(ecx.machine.union_data_ranges.entry(ty).or_insert_with(compute_range)) } - /// Placeholder! - fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams { () } + fn get_default_alloc_params(&self) -> <Self::Bytes as AllocBytes>::AllocParams { + use crate::alloc::MiriAllocParams; + + #[cfg(target_os = "linux")] + match &self.allocator { + Some(alloc) => MiriAllocParams::Isolated(alloc.clone()), + None => MiriAllocParams::Global, + } + #[cfg(not(target_os = "linux"))] + MiriAllocParams::Global + } } /// Trait for callbacks handling asynchronous machine operations. diff --git a/src/tools/miri/src/range_map.rs b/src/tools/miri/src/range_map.rs index 2c2484cd0bc..29a5a8537a4 100644 --- a/src/tools/miri/src/range_map.rs +++ b/src/tools/miri/src/range_map.rs @@ -31,6 +31,11 @@ impl<T> RangeMap<T> { RangeMap { v } } + pub fn size(&self) -> Size { + let size = self.v.last().map(|x| x.range.end).unwrap_or(0); + Size::from_bytes(size) + } + /// Finds the index containing the given offset. fn find_offset(&self, offset: u64) -> usize { self.v @@ -71,10 +76,7 @@ impl<T> RangeMap<T> { }; // The first offset that is not included any more. let end = offset + len; - assert!( - end <= self.v.last().unwrap().range.end, - "iterating beyond the bounds of this RangeMap" - ); + assert!(end <= self.size().bytes(), "iterating beyond the bounds of this RangeMap"); slice .iter() .take_while(move |elem| elem.range.start < end) @@ -327,4 +329,16 @@ mod tests { let map = RangeMap::<i32>::new(Size::from_bytes(20), -1); let _ = map.iter(Size::from_bytes(11), Size::from_bytes(11)); } + + #[test] + fn empty_map_iter() { + let map = RangeMap::<i32>::new(Size::from_bytes(0), -1); + let _ = map.iter(Size::from_bytes(0), Size::from_bytes(0)); + } + + #[test] + fn empty_map_iter_mut() { + let mut map = RangeMap::<i32>::new(Size::from_bytes(0), -1); + let _ = map.iter_mut(Size::from_bytes(0), Size::from_bytes(0)); + } } diff --git a/src/tools/miri/src/shims/files.rs b/src/tools/miri/src/shims/files.rs index 31142431247..606d1ffbea6 100644 --- a/src/tools/miri/src/shims/files.rs +++ b/src/tools/miri/src/shims/files.rs @@ -202,6 +202,20 @@ pub trait FileDescription: std::fmt::Debug + FileDescriptionExt { fn as_unix<'tcx>(&self, _ecx: &MiriInterpCx<'tcx>) -> &dyn UnixFileDescription { panic!("Not a unix file descriptor: {}", self.name()); } + + /// Implementation of fcntl(F_GETFL) for this FD. + fn get_flags<'tcx>(&self, _ecx: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Scalar> { + throw_unsup_format!("fcntl: {} is not supported for F_GETFL", self.name()); + } + + /// Implementation of fcntl(F_SETFL) for this FD. + fn set_flags<'tcx>( + &self, + _flag: i32, + _ecx: &mut MiriInterpCx<'tcx>, + ) -> InterpResult<'tcx, Scalar> { + throw_unsup_format!("fcntl: {} is not supported for F_SETFL", self.name()); + } } impl FileDescription for io::Stdin { diff --git a/src/tools/miri/src/shims/unix/fd.rs b/src/tools/miri/src/shims/unix/fd.rs index 156814a26fa..71102d9f2f3 100644 --- a/src/tools/miri/src/shims/unix/fd.rs +++ b/src/tools/miri/src/shims/unix/fd.rs @@ -141,6 +141,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let f_getfd = this.eval_libc_i32("F_GETFD"); let f_dupfd = this.eval_libc_i32("F_DUPFD"); let f_dupfd_cloexec = this.eval_libc_i32("F_DUPFD_CLOEXEC"); + let f_getfl = this.eval_libc_i32("F_GETFL"); + let f_setfl = this.eval_libc_i32("F_SETFL"); // We only support getting the flags for a descriptor. match cmd { @@ -175,6 +177,25 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { this.set_last_error_and_return_i32(LibcError("EBADF")) } } + cmd if cmd == f_getfl => { + // Check if this is a valid open file descriptor. + let Some(fd) = this.machine.fds.get(fd_num) else { + return this.set_last_error_and_return_i32(LibcError("EBADF")); + }; + + fd.get_flags(this) + } + cmd if cmd == f_setfl => { + // Check if this is a valid open file descriptor. + let Some(fd) = this.machine.fds.get(fd_num) else { + return this.set_last_error_and_return_i32(LibcError("EBADF")); + }; + + let [flag] = check_min_vararg_count("fcntl(fd, F_SETFL, ...)", varargs)?; + let flag = this.read_scalar(flag)?.to_i32()?; + + fd.set_flags(flag, this) + } cmd if this.tcx.sess.target.os == "macos" && cmd == this.eval_libc_i32("F_FULLFSYNC") => { diff --git a/src/tools/miri/src/shims/unix/unnamed_socket.rs b/src/tools/miri/src/shims/unix/unnamed_socket.rs index 135d8f6bee7..817ddd7954d 100644 --- a/src/tools/miri/src/shims/unix/unnamed_socket.rs +++ b/src/tools/miri/src/shims/unix/unnamed_socket.rs @@ -20,6 +20,16 @@ use crate::*; /// be configured in the real system. const MAX_SOCKETPAIR_BUFFER_CAPACITY: usize = 212992; +#[derive(Debug, PartialEq)] +enum AnonSocketType { + // Either end of the socketpair fd. + Socketpair, + // Read end of the pipe. + PipeRead, + // Write end of the pipe. + PipeWrite, +} + /// One end of a pair of connected unnamed sockets. #[derive(Debug)] struct AnonSocket { @@ -40,7 +50,10 @@ struct AnonSocket { /// A list of thread ids blocked because the buffer was full. /// Once another thread reads some bytes, these threads will be unblocked. blocked_write_tid: RefCell<Vec<ThreadId>>, - is_nonblock: bool, + /// Whether this fd is non-blocking or not. + is_nonblock: Cell<bool>, + // Differentiate between different AnonSocket fd types. + fd_type: AnonSocketType, } #[derive(Debug)] @@ -63,7 +76,10 @@ impl AnonSocket { impl FileDescription for AnonSocket { fn name(&self) -> &'static str { - "socketpair" + match self.fd_type { + AnonSocketType::Socketpair => "socketpair", + AnonSocketType::PipeRead | AnonSocketType::PipeWrite => "pipe", + } } fn close<'tcx>( @@ -110,6 +126,66 @@ impl FileDescription for AnonSocket { fn as_unix<'tcx>(&self, _ecx: &MiriInterpCx<'tcx>) -> &dyn UnixFileDescription { self } + + fn get_flags<'tcx>(&self, ecx: &mut MiriInterpCx<'tcx>) -> InterpResult<'tcx, Scalar> { + let mut flags = 0; + + // Get flag for file access mode. + // The flag for both socketpair and pipe will remain the same even when the peer + // fd is closed, so we need to look at the original type of this socket, not at whether + // the peer socket still exists. + match self.fd_type { + AnonSocketType::Socketpair => { + flags |= ecx.eval_libc_i32("O_RDWR"); + } + AnonSocketType::PipeRead => { + flags |= ecx.eval_libc_i32("O_RDONLY"); + } + AnonSocketType::PipeWrite => { + flags |= ecx.eval_libc_i32("O_WRONLY"); + } + } + + // Get flag for blocking status. + if self.is_nonblock.get() { + flags |= ecx.eval_libc_i32("O_NONBLOCK"); + } + + interp_ok(Scalar::from_i32(flags)) + } + + fn set_flags<'tcx>( + &self, + mut flag: i32, + ecx: &mut MiriInterpCx<'tcx>, + ) -> InterpResult<'tcx, Scalar> { + // FIXME: File creation flags should be ignored. + + let o_nonblock = ecx.eval_libc_i32("O_NONBLOCK"); + let o_rdonly = ecx.eval_libc_i32("O_RDONLY"); + let o_wronly = ecx.eval_libc_i32("O_WRONLY"); + let o_rdwr = ecx.eval_libc_i32("O_RDWR"); + + // O_NONBLOCK flag can be set / unset by user. + if flag & o_nonblock == o_nonblock { + self.is_nonblock.set(true); + flag &= !o_nonblock; + } else { + self.is_nonblock.set(false); + } + + // Ignore all file access mode flags. + flag &= !(o_rdonly | o_wronly | o_rdwr); + + // Throw error if there is any unsupported flag. + if flag != 0 { + throw_unsup_format!( + "fcntl: only O_NONBLOCK is supported for F_SETFL on socketpairs and pipes" + ) + } + + interp_ok(Scalar::from_i32(0)) + } } /// Write to AnonSocket based on the space available and return the written byte size. @@ -141,7 +217,7 @@ fn anonsocket_write<'tcx>( // Let's see if we can write. let available_space = MAX_SOCKETPAIR_BUFFER_CAPACITY.strict_sub(writebuf.borrow().buf.len()); if available_space == 0 { - if self_ref.is_nonblock { + if self_ref.is_nonblock.get() { // Non-blocking socketpair with a full buffer. return finish.call(ecx, Err(ErrorKind::WouldBlock.into())); } else { @@ -223,7 +299,7 @@ fn anonsocket_read<'tcx>( // Socketpair with no peer and empty buffer. // 0 bytes successfully read indicates end-of-file. return finish.call(ecx, Ok(0)); - } else if self_ref.is_nonblock { + } else if self_ref.is_nonblock.get() { // Non-blocking socketpair with writer and empty buffer. // https://linux.die.net/man/2/read // EAGAIN or EWOULDBLOCK can be returned for socket, @@ -407,7 +483,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { peer_lost_data: Cell::new(false), blocked_read_tid: RefCell::new(Vec::new()), blocked_write_tid: RefCell::new(Vec::new()), - is_nonblock: is_sock_nonblock, + is_nonblock: Cell::new(is_sock_nonblock), + fd_type: AnonSocketType::Socketpair, }); let fd1 = fds.new_ref(AnonSocket { readbuf: Some(RefCell::new(Buffer::new())), @@ -415,7 +492,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { peer_lost_data: Cell::new(false), blocked_read_tid: RefCell::new(Vec::new()), blocked_write_tid: RefCell::new(Vec::new()), - is_nonblock: is_sock_nonblock, + is_nonblock: Cell::new(is_sock_nonblock), + fd_type: AnonSocketType::Socketpair, }); // Make the file descriptions point to each other. @@ -475,7 +553,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { peer_lost_data: Cell::new(false), blocked_read_tid: RefCell::new(Vec::new()), blocked_write_tid: RefCell::new(Vec::new()), - is_nonblock, + is_nonblock: Cell::new(is_nonblock), + fd_type: AnonSocketType::PipeRead, }); let fd1 = fds.new_ref(AnonSocket { readbuf: None, @@ -483,7 +562,8 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { peer_lost_data: Cell::new(false), blocked_read_tid: RefCell::new(Vec::new()), blocked_write_tid: RefCell::new(Vec::new()), - is_nonblock, + is_nonblock: Cell::new(is_nonblock), + fd_type: AnonSocketType::PipeWrite, }); // Make the file descriptions point to each other. diff --git a/src/tools/miri/src/shims/windows/foreign_items.rs b/src/tools/miri/src/shims/windows/foreign_items.rs index d822dd07fcd..98099e07b2e 100644 --- a/src/tools/miri/src/shims/windows/foreign_items.rs +++ b/src/tools/miri/src/shims/windows/foreign_items.rs @@ -572,6 +572,14 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let ret = this.WaitForSingleObject(handle, timeout)?; this.write_scalar(ret, dest)?; } + "GetCurrentProcess" => { + let [] = this.check_shim(abi, sys_conv, link_name, args)?; + + this.write_scalar( + Handle::Pseudo(PseudoHandle::CurrentProcess).to_scalar(this), + dest, + )?; + } "GetCurrentThread" => { let [] = this.check_shim(abi, sys_conv, link_name, args)?; @@ -693,6 +701,20 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { let res = this.GetStdHandle(which)?; this.write_scalar(res, dest)?; } + "DuplicateHandle" => { + let [src_proc, src_handle, target_proc, target_handle, access, inherit, options] = + this.check_shim(abi, sys_conv, link_name, args)?; + let res = this.DuplicateHandle( + src_proc, + src_handle, + target_proc, + target_handle, + access, + inherit, + options, + )?; + this.write_scalar(res, dest)?; + } "CloseHandle" => { let [handle] = this.check_shim(abi, sys_conv, link_name, args)?; diff --git a/src/tools/miri/src/shims/windows/handle.rs b/src/tools/miri/src/shims/windows/handle.rs index 5c04271fac5..1e30bf25ed9 100644 --- a/src/tools/miri/src/shims/windows/handle.rs +++ b/src/tools/miri/src/shims/windows/handle.rs @@ -9,6 +9,7 @@ use crate::*; #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)] pub enum PseudoHandle { CurrentThread, + CurrentProcess, } /// Miri representation of a Windows `HANDLE` @@ -23,16 +24,19 @@ pub enum Handle { impl PseudoHandle { const CURRENT_THREAD_VALUE: u32 = 0; + const CURRENT_PROCESS_VALUE: u32 = 1; fn value(self) -> u32 { match self { Self::CurrentThread => Self::CURRENT_THREAD_VALUE, + Self::CurrentProcess => Self::CURRENT_PROCESS_VALUE, } } fn from_value(value: u32) -> Option<Self> { match value { Self::CURRENT_THREAD_VALUE => Some(Self::CurrentThread), + Self::CURRENT_PROCESS_VALUE => Some(Self::CurrentProcess), _ => None, } } @@ -244,6 +248,76 @@ pub trait EvalContextExt<'tcx>: crate::MiriInterpCxExt<'tcx> { interp_ok(handle.to_scalar(this)) } + fn DuplicateHandle( + &mut self, + src_proc: &OpTy<'tcx>, // HANDLE + src_handle: &OpTy<'tcx>, // HANDLE + target_proc: &OpTy<'tcx>, // HANDLE + target_handle: &OpTy<'tcx>, // LPHANDLE + desired_access: &OpTy<'tcx>, // DWORD + inherit: &OpTy<'tcx>, // BOOL + options: &OpTy<'tcx>, // DWORD + ) -> InterpResult<'tcx, Scalar> { + // ^ Returns BOOL (i32 on Windows) + let this = self.eval_context_mut(); + + let src_proc = this.read_handle(src_proc, "DuplicateHandle")?; + let src_handle = this.read_handle(src_handle, "DuplicateHandle")?; + let target_proc = this.read_handle(target_proc, "DuplicateHandle")?; + let target_handle_ptr = this.read_pointer(target_handle)?; + // Since we only support DUPLICATE_SAME_ACCESS, this value is ignored, but should be valid + let _ = this.read_scalar(desired_access)?.to_u32()?; + // We don't support the CreateProcess API, so inheritable or not means nothing. + // If we ever add CreateProcess support, this will need to be implemented. + let _ = this.read_scalar(inherit)?; + let options = this.read_scalar(options)?; + + if src_proc != Handle::Pseudo(PseudoHandle::CurrentProcess) { + throw_unsup_format!( + "`DuplicateHandle` `hSourceProcessHandle` parameter is not the current process, which is unsupported" + ); + } + + if target_proc != Handle::Pseudo(PseudoHandle::CurrentProcess) { + throw_unsup_format!( + "`DuplicateHandle` `hSourceProcessHandle` parameter is not the current process, which is unsupported" + ); + } + + if this.ptr_is_null(target_handle_ptr)? { + throw_unsup_format!( + "`DuplicateHandle` `lpTargetHandle` parameter is null, which is unsupported" + ); + } + + if options != this.eval_windows("c", "DUPLICATE_SAME_ACCESS") { + throw_unsup_format!( + "`DuplicateHandle` `dwOptions` parameter is not `DUPLICATE_SAME_ACCESS`, which is unsupported" + ); + } + + let new_handle = match src_handle { + Handle::File(old_fd_num) => { + let Some(fd) = this.machine.fds.get(old_fd_num) else { + this.invalid_handle("DuplicateHandle")? + }; + Handle::File(this.machine.fds.insert(fd)) + } + Handle::Thread(_) => { + throw_unsup_format!( + "`DuplicateHandle` called on a thread handle, which is unsupported" + ); + } + Handle::Pseudo(pseudo) => Handle::Pseudo(pseudo), + Handle::Null | Handle::Invalid => this.invalid_handle("DuplicateHandle")?, + }; + + let target_place = this.deref_pointer_as(target_handle, this.machine.layouts.usize)?; + this.write_scalar(new_handle.to_scalar(this), &target_place)?; + + interp_ok(this.eval_windows("c", "TRUE")) + } + fn CloseHandle(&mut self, handle_op: &OpTy<'tcx>) -> InterpResult<'tcx, Scalar> { let this = self.eval_context_mut(); diff --git a/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.rs b/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.rs new file mode 100644 index 00000000000..eef32136a0a --- /dev/null +++ b/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.rs @@ -0,0 +1,20 @@ +//@ignore-target: windows # Sockets/pipes are not implemented yet +//~^ ERROR: deadlock: the evaluated program deadlocked +//@compile-flags: -Zmiri-deterministic-concurrency +use std::thread; + +/// If an O_NONBLOCK flag is set while the fd is blocking, that fd will not be woken up. +fn main() { + let mut fds = [-1, -1]; + let res = unsafe { libc::pipe(fds.as_mut_ptr()) }; + assert_eq!(res, 0); + let mut buf: [u8; 5] = [0; 5]; + let _thread1 = thread::spawn(move || { + // Add O_NONBLOCK flag while pipe is still block on read. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }; + assert_eq!(res, 0); + }); + // Main thread will block on read. + let _res = unsafe { libc::read(fds[0], buf.as_mut_ptr().cast(), buf.len() as libc::size_t) }; + //~^ ERROR: deadlock: the evaluated program deadlocked +} diff --git a/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.stderr b/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.stderr new file mode 100644 index 00000000000..9ca5598abae --- /dev/null +++ b/src/tools/miri/tests/fail-dep/libc/fcntl_fsetfl_while_blocking.stderr @@ -0,0 +1,19 @@ +error: deadlock: the evaluated program deadlocked + | + = note: the evaluated program deadlocked + = note: (no span available) + = note: BACKTRACE on thread `unnamed-ID`: + +error: deadlock: the evaluated program deadlocked + --> tests/fail-dep/libc/fcntl_fsetfl_while_blocking.rs:LL:CC + | +LL | let _res = unsafe { libc::read(fds[0], buf.as_mut_ptr().cast(), buf.len() as libc::size_t) }; + | ^ the evaluated program deadlocked + | + = note: BACKTRACE: + = note: inside `main` at tests/fail-dep/libc/fcntl_fsetfl_while_blocking.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 2 previous errors + diff --git a/src/tools/miri/tests/fail-dep/libc/unsupported_incomplete_function.stderr b/src/tools/miri/tests/fail-dep/libc/unsupported_incomplete_function.stderr index a92a97cef3b..52a93ab263d 100644 --- a/src/tools/miri/tests/fail-dep/libc/unsupported_incomplete_function.stderr +++ b/src/tools/miri/tests/fail-dep/libc/unsupported_incomplete_function.stderr @@ -4,8 +4,7 @@ error: unsupported operation: can't call foreign function `signal` on $OS LL | libc::signal(libc::SIGPIPE, libc::SIG_IGN); | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ can't call foreign function `signal` on $OS | - = help: if this is a basic API commonly used on this target, please report an issue with Miri - = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases + = help: this means the program tried to do something Miri does not support; it does not indicate a bug in the program = note: BACKTRACE: = note: inside `main` at tests/fail-dep/libc/unsupported_incomplete_function.rs:LL:CC diff --git a/src/tools/miri/tests/fail/alloc/no_global_allocator.stderr b/src/tools/miri/tests/fail/alloc/no_global_allocator.stderr index 541af64b894..e80a3646714 100644 --- a/src/tools/miri/tests/fail/alloc/no_global_allocator.stderr +++ b/src/tools/miri/tests/fail/alloc/no_global_allocator.stderr @@ -4,8 +4,7 @@ error: unsupported operation: can't call foreign function `__rust_alloc` on $OS LL | __rust_alloc(1, 1); | ^^^^^^^^^^^^^^^^^^ can't call foreign function `__rust_alloc` on $OS | - = help: if this is a basic API commonly used on this target, please report an issue with Miri - = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases + = help: this means the program tried to do something Miri does not support; it does not indicate a bug in the program = note: BACKTRACE: = note: inside `miri_start` at tests/fail/alloc/no_global_allocator.rs:LL:CC diff --git a/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.rs b/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.rs new file mode 100644 index 00000000000..ff797877682 --- /dev/null +++ b/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.rs @@ -0,0 +1,33 @@ +//! A version of `cell_inside_struct` that dumps the tree so that we can see what is happening. +//@compile-flags: -Zmiri-tree-borrows +#[path = "../../utils/mod.rs"] +#[macro_use] +mod utils; + +use std::cell::Cell; + +struct Foo { + field1: u32, + field2: Cell<u32>, +} + +pub fn main() { + let root = Foo { field1: 42, field2: Cell::new(88) }; + unsafe { + let a = &root; + + name!(a as *const Foo, "a"); + + let a: *const Foo = a as *const Foo; + let a: *mut Foo = a as *mut Foo; + + let alloc_id = alloc_id!(a); + print_state!(alloc_id); + + // Writing to `field2`, which is interior mutable, should be allowed. + (*a).field2.set(10); + + // Writing to `field1`, which is frozen, should not be allowed. + (*a).field1 = 88; //~ ERROR: /write access through .* is forbidden/ + } +} diff --git a/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.stderr b/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.stderr new file mode 100644 index 00000000000..717f1419452 --- /dev/null +++ b/src/tools/miri/tests/fail/tree_borrows/cell-inside-struct.stderr @@ -0,0 +1,26 @@ +────────────────────────────────────────────────── +Warning: this tree is indicative only. Some tags may have been hidden. +0.. 4.. 8 +| Act | Act | └─┬──<TAG=root of the allocation> +| Frz |?Cel | └────<TAG=a> +────────────────────────────────────────────────── +error: Undefined Behavior: write access through <TAG> (a) at ALLOC[0x0] is forbidden + --> tests/fail/tree_borrows/cell-inside-struct.rs:LL:CC + | +LL | (*a).field1 = 88; + | ^^^^^^^^^^^^^^^^ write access through <TAG> (a) at ALLOC[0x0] is forbidden + | + = help: this indicates a potential bug in the program: it performed an invalid operation, but the Tree Borrows rules it violated are still experimental + = help: the accessed tag <TAG> (a) has state Frozen which forbids this child write access +help: the accessed tag <TAG> was created here, in the initial state Cell + --> tests/fail/tree_borrows/cell-inside-struct.rs:LL:CC + | +LL | let a = &root; + | ^^^^^ + = note: BACKTRACE (of the first span): + = note: inside `main` at tests/fail/tree_borrows/cell-inside-struct.rs:LL:CC + +note: some details are omitted, run with `MIRIFLAGS=-Zmiri-backtrace=full` for a verbose backtrace + +error: aborting due to 1 previous error + diff --git a/src/tools/miri/tests/fail/unsupported_foreign_function.stderr b/src/tools/miri/tests/fail/unsupported_foreign_function.stderr index 4fe45b0868a..bbfc5c31256 100644 --- a/src/tools/miri/tests/fail/unsupported_foreign_function.stderr +++ b/src/tools/miri/tests/fail/unsupported_foreign_function.stderr @@ -4,8 +4,7 @@ error: unsupported operation: can't call foreign function `foo` on $OS LL | foo(); | ^^^^^ can't call foreign function `foo` on $OS | - = help: if this is a basic API commonly used on this target, please report an issue with Miri - = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases + = help: this means the program tried to do something Miri does not support; it does not indicate a bug in the program = note: BACKTRACE: = note: inside `main` at tests/fail/unsupported_foreign_function.rs:LL:CC diff --git a/src/tools/miri/tests/many-seeds/reentrant-lock.rs b/src/tools/miri/tests/many-seeds/reentrant-lock.rs index 8a363179a9c..4c2dc463f48 100644 --- a/src/tools/miri/tests/many-seeds/reentrant-lock.rs +++ b/src/tools/miri/tests/many-seeds/reentrant-lock.rs @@ -1,6 +1,6 @@ #![feature(reentrant_lock)] //! This is a regression test for -//! <https://rust-lang.zulipchat.com/#narrow/channel/269128-miri/topic/reentrant.20lock.20failure.20on.20musl>. +//! <https://rust-lang.zulipchat.com/#narrow/channel/269128-miri/topic/reentrant.20lock.20failure.20on.20mips>. use std::cell::Cell; use std::sync::ReentrantLock; diff --git a/src/tools/miri/tests/native-lib/fail/function_not_in_so.stderr b/src/tools/miri/tests/native-lib/fail/function_not_in_so.stderr index bf1cfd573b8..b663fd41457 100644 --- a/src/tools/miri/tests/native-lib/fail/function_not_in_so.stderr +++ b/src/tools/miri/tests/native-lib/fail/function_not_in_so.stderr @@ -4,8 +4,7 @@ error: unsupported operation: can't call foreign function `foo` on $OS LL | foo(); | ^^^^^ can't call foreign function `foo` on $OS | - = help: if this is a basic API commonly used on this target, please report an issue with Miri - = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases + = help: this means the program tried to do something Miri does not support; it does not indicate a bug in the program = note: BACKTRACE: = note: inside `main` at tests/native-lib/fail/function_not_in_so.rs:LL:CC diff --git a/src/tools/miri/tests/native-lib/fail/private_function.stderr b/src/tools/miri/tests/native-lib/fail/private_function.stderr index 2cfc062212b..03681240015 100644 --- a/src/tools/miri/tests/native-lib/fail/private_function.stderr +++ b/src/tools/miri/tests/native-lib/fail/private_function.stderr @@ -4,8 +4,7 @@ error: unsupported operation: can't call foreign function `not_exported` on $OS LL | not_exported(); | ^^^^^^^^^^^^^^ can't call foreign function `not_exported` on $OS | - = help: if this is a basic API commonly used on this target, please report an issue with Miri - = help: however, note that Miri does not aim to support every FFI function out there; for instance, we will not support APIs for things such as GUIs, scripting languages, or databases + = help: this means the program tried to do something Miri does not support; it does not indicate a bug in the program = note: BACKTRACE: = note: inside `main` at tests/native-lib/fail/private_function.rs:LL:CC diff --git a/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs b/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs index 05f6c870c3d..bc755af864c 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-pipe.rs @@ -15,6 +15,8 @@ fn main() { ))] // `pipe2` only exists in some specific os. test_pipe2(); + test_pipe_setfl_getfl(); + test_pipe_fcntl_threaded(); } fn test_pipe() { @@ -127,3 +129,68 @@ fn test_pipe2() { let res = unsafe { libc::pipe2(fds.as_mut_ptr(), libc::O_NONBLOCK) }; assert_eq!(res, 0); } + +/// Basic test for pipe fcntl's F_SETFL and F_GETFL flag. +fn test_pipe_setfl_getfl() { + // Initialise pipe fds. + let mut fds = [-1, -1]; + let res = unsafe { libc::pipe(fds.as_mut_ptr()) }; + assert_eq!(res, 0); + + // Both sides should either have O_RONLY or O_WRONLY. + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDONLY); + let res = unsafe { libc::fcntl(fds[1], libc::F_GETFL) }; + assert_eq!(res, libc::O_WRONLY); + + // Add the O_NONBLOCK flag with F_SETFL. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }; + assert_eq!(res, 0); + + // Test if the O_NONBLOCK flag is successfully added. + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDONLY | libc::O_NONBLOCK); + + // The other side remains unchanged. + let res = unsafe { libc::fcntl(fds[1], libc::F_GETFL) }; + assert_eq!(res, libc::O_WRONLY); + + // Test if O_NONBLOCK flag can be unset. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, 0) }; + assert_eq!(res, 0); + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDONLY); +} + +/// Test the behaviour of F_SETFL/F_GETFL when a fd is blocking. +/// The expected execution is: +/// 1. Main thread blocks on fds[0] `read`. +/// 2. Thread 1 sets O_NONBLOCK flag on fds[0], +/// checks the value of F_GETFL, +/// then writes to fds[1] to unblock main thread's `read`. +fn test_pipe_fcntl_threaded() { + let mut fds = [-1, -1]; + let res = unsafe { libc::pipe(fds.as_mut_ptr()) }; + assert_eq!(res, 0); + let mut buf: [u8; 5] = [0; 5]; + let thread1 = thread::spawn(move || { + // Add O_NONBLOCK flag while pipe is still blocked on read. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }; + assert_eq!(res, 0); + + // Check the new flag value while the main thread is still blocked on fds[0]. + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_NONBLOCK); + + // The write below will unblock the `read` in main thread: even though + // the socket is now "non-blocking", the shim needs to deal correctly + // with threads that were blocked before the socket was made non-blocking. + let data = "abcde".as_bytes().as_ptr(); + let res = unsafe { libc::write(fds[1], data as *const libc::c_void, 5) }; + assert_eq!(res, 5); + }); + // The `read` below will block. + let res = unsafe { libc::read(fds[0], buf.as_mut_ptr().cast(), buf.len() as libc::size_t) }; + thread1.join().unwrap(); + assert_eq!(res, 5); +} diff --git a/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs b/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs index 9e48410f704..c36f6b11224 100644 --- a/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs +++ b/src/tools/miri/tests/pass-dep/libc/libc-socketpair.rs @@ -12,6 +12,7 @@ fn main() { test_race(); test_blocking_read(); test_blocking_write(); + test_socketpair_setfl_getfl(); } fn test_socketpair() { @@ -182,3 +183,35 @@ fn test_blocking_write() { thread1.join().unwrap(); thread2.join().unwrap(); } + +/// Basic test for socketpair fcntl's F_SETFL and F_GETFL flag. +fn test_socketpair_setfl_getfl() { + // Initialise socketpair fds. + let mut fds = [-1, -1]; + let res = unsafe { libc::socketpair(libc::AF_UNIX, libc::SOCK_STREAM, 0, fds.as_mut_ptr()) }; + assert_eq!(res, 0); + + // Test if both sides have O_RDWR. + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDWR); + let res = unsafe { libc::fcntl(fds[1], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDWR); + + // Add the O_NONBLOCK flag with F_SETFL. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, libc::O_NONBLOCK) }; + assert_eq!(res, 0); + + // Test if the O_NONBLOCK flag is successfully added. + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDWR | libc::O_NONBLOCK); + + // The other side remains unchanged. + let res = unsafe { libc::fcntl(fds[1], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDWR); + + // Test if O_NONBLOCK flag can be unset. + let res = unsafe { libc::fcntl(fds[0], libc::F_SETFL, 0) }; + assert_eq!(res, 0); + let res = unsafe { libc::fcntl(fds[0], libc::F_GETFL) }; + assert_eq!(res, libc::O_RDWR); +} diff --git a/src/tools/miri/tests/pass-dep/tokio/file-io.rs b/src/tools/miri/tests/pass-dep/tokio/file-io.rs index 6e88b907f5d..067753203bb 100644 --- a/src/tools/miri/tests/pass-dep/tokio/file-io.rs +++ b/src/tools/miri/tests/pass-dep/tokio/file-io.rs @@ -20,7 +20,11 @@ async fn test_create_and_write() -> io::Result<()> { let mut file = File::create(&path).await?; // Write 10 bytes to the file. - file.write(b"some bytes").await?; + file.write_all(b"some bytes").await?; + // For tokio's file I/O, `await` does not have its usual semantics of waiting until the + // operation is completed, so we have to wait some more to make sure the write is completed. + file.flush().await?; + // Check that 10 bytes have been written. assert_eq!(file.metadata().await.unwrap().len(), 10); remove_file(&path).unwrap(); @@ -31,10 +35,10 @@ async fn test_create_and_read() -> io::Result<()> { let bytes = b"more bytes"; let path = utils::prepare_with_content("foo.txt", bytes); let mut file = OpenOptions::new().read(true).open(&path).await.unwrap(); - let mut buffer = [0u8; 10]; + let mut buffer = vec![]; // Read the whole file. - file.read(&mut buffer[..]).await?; + file.read_to_end(&mut buffer).await?; assert_eq!(&buffer, b"more bytes"); remove_file(&path).unwrap(); diff --git a/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs b/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs index c76e7f2eebd..6a625e597df 100644 --- a/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs +++ b/src/tools/miri/tests/pass/both_borrows/basic_aliasing_model.rs @@ -1,6 +1,7 @@ //@revisions: stack tree //@[tree]compile-flags: -Zmiri-tree-borrows #![feature(allocator_api)] +use std::cell::Cell; use std::ptr; // Test various aliasing-model-related things. @@ -22,6 +23,7 @@ fn main() { not_unpin_not_protected(); write_does_not_invalidate_all_aliases(); box_into_raw_allows_interior_mutable_alias(); + cell_inside_struct() } // Make sure that reading from an `&mut` does, like reborrowing to `&`, @@ -259,7 +261,7 @@ fn write_does_not_invalidate_all_aliases() { fn box_into_raw_allows_interior_mutable_alias() { unsafe { - let b = Box::new(std::cell::Cell::new(42)); + let b = Box::new(Cell::new(42)); let raw = Box::into_raw(b); let c = &*raw; let d = raw.cast::<i32>(); // bypassing `Cell` -- only okay in Miri tests @@ -269,3 +271,19 @@ fn box_into_raw_allows_interior_mutable_alias() { drop(Box::from_raw(raw)); } } + +fn cell_inside_struct() { + struct Foo { + field1: u32, + field2: Cell<u32>, + } + + let mut root = Foo { field1: 42, field2: Cell::new(88) }; + let a = &mut root; + + // Writing to `field2`, which is interior mutable, should be allowed. + (*a).field2.set(10); + + // Writing to `field1`, which is reserved, should also be allowed. + (*a).field1 = 88; +} diff --git a/src/tools/miri/tests/pass/btreemap.rs b/src/tools/miri/tests/pass/btreemap.rs index 1213f81a6f1..1d65e69bf72 100644 --- a/src/tools/miri/tests/pass/btreemap.rs +++ b/src/tools/miri/tests/pass/btreemap.rs @@ -50,7 +50,7 @@ pub fn main() { test_all_refs(&mut 13, b.values_mut()); // Test forgetting the extractor. - let mut d = b.extract_if(|_, i| *i < 30); + let mut d = b.extract_if(.., |_, i| *i < 30); d.next().unwrap(); mem::forget(d); } diff --git a/src/tools/miri/tests/pass/shims/fs.rs b/src/tools/miri/tests/pass/shims/fs.rs index 315637ff7ec..87df43ca7e5 100644 --- a/src/tools/miri/tests/pass/shims/fs.rs +++ b/src/tools/miri/tests/pass/shims/fs.rs @@ -23,9 +23,9 @@ fn main() { test_seek(); test_errors(); test_from_raw_os_error(); + test_file_clone(); // Windows file handling is very incomplete. if cfg!(not(windows)) { - test_file_clone(); test_file_set_len(); test_file_sync(); test_rename(); diff --git a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr index 75a30c9a083..e09aed2cf5d 100644 --- a/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr +++ b/src/tools/miri/tests/pass/tree_borrows/cell-alternate-writes.stderr @@ -3,8 +3,8 @@ Warning: this tree is indicative only. Some tags may have been hidden. 0.. 1 | Act | └─┬──<TAG=root of the allocation> | ReIM| └─┬──<TAG=data> -| Cel | ├────<TAG=x> -| Cel | └────<TAG=y> +|?Cel | ├────<TAG=x> +|?Cel | └────<TAG=y> ────────────────────────────────────────────────── ────────────────────────────────────────────────── Warning: this tree is indicative only. Some tags may have been hidden. diff --git a/src/tools/miri/tests/pass/tree_borrows/cell-lazy-write-to-surrounding.rs b/src/tools/miri/tests/pass/tree_borrows/cell-lazy-write-to-surrounding.rs new file mode 100644 index 00000000000..abe08f2cd22 --- /dev/null +++ b/src/tools/miri/tests/pass/tree_borrows/cell-lazy-write-to-surrounding.rs @@ -0,0 +1,22 @@ +//@compile-flags: -Zmiri-tree-borrows + +use std::cell::Cell; + +fn foo(x: &Cell<i32>) { + unsafe { + let ptr = x as *const Cell<i32> as *mut Cell<i32> as *mut i32; + ptr.offset(1).write(0); + } +} + +fn main() { + let arr = [Cell::new(1), Cell::new(1)]; + foo(&arr[0]); + + let pair = (Cell::new(1), 1); + // TODO: Ideally, this would result in UB since the second element + // in `pair` is Frozen. We would need some way to express a + // "shared reference with permission to access surrounding + // interior mutable data". + foo(&pair.0); +} diff --git a/tests/codegen/thread-local.rs b/tests/codegen/thread-local.rs index 9ce34473b91..41df8c9be1b 100644 --- a/tests/codegen/thread-local.rs +++ b/tests/codegen/thread-local.rs @@ -14,13 +14,14 @@ use std::cell::Cell; thread_local!(static A: Cell<u32> = const { Cell::new(1) }); -// CHECK: [[TLS_AUX:@.+]] = external thread_local local_unnamed_addr global i64 -// CHECK: [[TLS:@.+]] = internal thread_local unnamed_addr global +// CHECK: [[TLS_AUX:@.+]] = external thread_local{{.*}} global i64 +// CHECK: [[TLS:@.+]] = internal thread_local{{.*}} global // CHECK-LABEL: @get #[no_mangle] fn get() -> u32 { - // CHECK: [[RET_0:%.+]] = load i32, {{.*}}[[TLS]]{{.*}} + // CHECK: [[PTR:%.+]] = tail call {{.*}} ptr @llvm.threadlocal.address.p0(ptr [[TLS]]) + // CHECK-NEXT: [[RET_0:%.+]] = load i32, ptr [[PTR]] // CHECK-NEXT: ret i32 [[RET_0]] A.with(|a| a.get()) } @@ -28,7 +29,8 @@ fn get() -> u32 { // CHECK-LABEL: @set #[no_mangle] fn set(v: u32) { - // CHECK: store i32 %0, {{.*}}[[TLS]]{{.*}} + // CHECK: [[PTR:%.+]] = tail call {{.*}} ptr @llvm.threadlocal.address.p0(ptr [[TLS]]) + // CHECK-NEXT: store i32 %0, ptr [[PTR]] // CHECK-NEXT: ret void A.with(|a| a.set(v)) } @@ -36,7 +38,8 @@ fn set(v: u32) { // CHECK-LABEL: @get_aux #[no_mangle] fn get_aux() -> u64 { - // CHECK: [[RET_1:%.+]] = load i64, {{.*}}[[TLS_AUX]] + // CHECK: [[PTR:%.+]] = tail call {{.*}} ptr @llvm.threadlocal.address.p0(ptr [[TLS_AUX]]) + // CHECK-NEXT: [[RET_1:%.+]] = load i64, ptr [[PTR]] // CHECK-NEXT: ret i64 [[RET_1]] aux::A.with(|a| a.get()) } @@ -44,7 +47,8 @@ fn get_aux() -> u64 { // CHECK-LABEL: @set_aux #[no_mangle] fn set_aux(v: u64) { - // CHECK: store i64 %0, {{.*}}[[TLS_AUX]] + // CHECK: [[PTR:%.+]] = tail call {{.*}} ptr @llvm.threadlocal.address.p0(ptr [[TLS_AUX]]) + // CHECK-NEXT: store i64 %0, ptr [[PTR]] // CHECK-NEXT: ret void aux::A.with(|a| a.set(v)) } diff --git a/tests/mir-opt/dataflow.main.maybe_init.borrowck.dot b/tests/mir-opt/dataflow.main.maybe_init.borrowck.dot deleted file mode 100644 index 7c7d8921fb3..00000000000 --- a/tests/mir-opt/dataflow.main.maybe_init.borrowck.dot +++ /dev/null @@ -1,6 +0,0 @@ -digraph graph_for_def_id_0_3 { - graph[fontname="Courier, monospace"]; - node[fontname="Courier, monospace"]; - edge[fontname="Courier, monospace"]; - bb_0[label=<<table border="1" cellborder="1" cellspacing="0" cellpadding="3" sides="rb"><tr><td colspan="3" sides="tl">bb0</td></tr><tr><td colspan="2" bgcolor="#a0a0a0" sides="tl">MIR</td><td bgcolor="#a0a0a0" sides="tl">STATE</td></tr><tr><td valign="bottom" sides="tl" align="right"></td><td valign="bottom" sides="tl" align="left">(on start)</td><td colspan="1" valign="bottom" sides="tl" align="left">{}</td></tr><tr><td valign="top" sides="tl" bgcolor="#f0f0f0" align="right">0</td><td valign="top" sides="tl" bgcolor="#f0f0f0" align="left">_0 = const ()</td><td valign="top" sides="tl" bgcolor="#f0f0f0" align="left"><font color="darkgreen">+_0</font></td></tr><tr><td valign="top" sides="tl" align="right">T</td><td valign="top" sides="tl" align="left">return</td><td valign="top" sides="tl" align="left"></td></tr><tr><td valign="bottom" sides="tl" bgcolor="#f0f0f0" align="right"></td><td valign="bottom" sides="tl" bgcolor="#f0f0f0" align="left">(on end)</td><td colspan="1" valign="bottom" sides="tl" bgcolor="#f0f0f0" align="left">{_0}</td></tr></table>>][shape="none"]; -} diff --git a/tests/mir-opt/dataflow.main.maybe_uninit.borrowck.dot b/tests/mir-opt/dataflow.main.maybe_uninit.borrowck.dot new file mode 100644 index 00000000000..258404b8da7 --- /dev/null +++ b/tests/mir-opt/dataflow.main.maybe_uninit.borrowck.dot @@ -0,0 +1,6 @@ +digraph graph_for_def_id_0_3 { + graph[fontname="Courier, monospace"]; + node[fontname="Courier, monospace"]; + edge[fontname="Courier, monospace"]; + bb_0[label=<<table border="1" cellborder="1" cellspacing="0" cellpadding="3" sides="rb"><tr><td colspan="3" sides="tl">bb0</td></tr><tr><td colspan="2" bgcolor="#a0a0a0" sides="tl">MIR</td><td bgcolor="#a0a0a0" sides="tl">STATE</td></tr><tr><td valign="bottom" sides="tl" align="right"></td><td valign="bottom" sides="tl" align="left">(on start)</td><td colspan="1" valign="bottom" sides="tl" align="left">{_0}</td></tr><tr><td valign="top" sides="tl" bgcolor="#f0f0f0" align="right">0</td><td valign="top" sides="tl" bgcolor="#f0f0f0" align="left">_0 = const ()</td><td valign="top" sides="tl" bgcolor="#f0f0f0" align="left"><font color="red">-_0</font></td></tr><tr><td valign="top" sides="tl" align="right">T</td><td valign="top" sides="tl" align="left">return</td><td valign="top" sides="tl" align="left"></td></tr><tr><td valign="bottom" sides="tl" bgcolor="#f0f0f0" align="right"></td><td valign="bottom" sides="tl" bgcolor="#f0f0f0" align="left">(on end)</td><td colspan="1" valign="bottom" sides="tl" bgcolor="#f0f0f0" align="left">{}</td></tr></table>>][shape="none"]; +} diff --git a/tests/mir-opt/dataflow.rs b/tests/mir-opt/dataflow.rs index 3a28f5d47b9..5ed3da4c531 100644 --- a/tests/mir-opt/dataflow.rs +++ b/tests/mir-opt/dataflow.rs @@ -2,5 +2,5 @@ // Test graphviz dataflow output //@ compile-flags: -Z dump-mir=main -Z dump-mir-dataflow -// EMIT_MIR dataflow.main.maybe_init.borrowck.dot +// EMIT_MIR dataflow.main.maybe_uninit.borrowck.dot fn main() {} diff --git a/tests/mir-opt/lower_intrinsics.rs b/tests/mir-opt/lower_intrinsics.rs index 4e000b05a4b..1e4f2024194 100644 --- a/tests/mir-opt/lower_intrinsics.rs +++ b/tests/mir-opt/lower_intrinsics.rs @@ -263,3 +263,24 @@ pub fn get_metadata(a: *const i32, b: *const [u8], c: *const dyn std::fmt::Debug let _usize = ptr_metadata(b); let _vtable = ptr_metadata(c); } + +// EMIT_MIR lower_intrinsics.slice_get.LowerIntrinsics.diff +pub unsafe fn slice_get<'a, 'b>( + r: &'a [i8], + rm: &'b mut [i16], + p: *const [i32], + pm: *mut [i64], + i: usize, +) -> (&'a i8, &'b mut i16, *const i32, *mut i64) { + use std::intrinsics::slice_get_unchecked; + // CHECK: = &(*_{{[0-9]+}})[_{{[0-9]+}}] + // CHECK: = &mut (*_{{[0-9]+}})[_{{[0-9]+}}] + // CHECK: = &raw const (*_{{[0-9]+}})[_{{[0-9]+}}] + // CHECK: = &raw mut (*_{{[0-9]+}})[_{{[0-9]+}}] + ( + slice_get_unchecked(r, i), + slice_get_unchecked(rm, i), + slice_get_unchecked(p, i), + slice_get_unchecked(pm, i), + ) +} diff --git a/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-abort.diff b/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-abort.diff new file mode 100644 index 00000000000..d18bdc43168 --- /dev/null +++ b/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-abort.diff @@ -0,0 +1,85 @@ +- // MIR for `slice_get` before LowerIntrinsics ++ // MIR for `slice_get` after LowerIntrinsics + + fn slice_get(_1: &[i8], _2: &mut [i16], _3: *const [i32], _4: *mut [i64], _5: usize) -> (&i8, &mut i16, *const i32, *mut i64) { + debug r => _1; + debug rm => _2; + debug p => _3; + debug pm => _4; + debug i => _5; + let mut _0: (&i8, &mut i16, *const i32, *mut i64); + let mut _6: &i8; + let mut _7: &[i8]; + let mut _8: usize; + let mut _9: &mut i16; + let mut _10: &mut [i16]; + let mut _11: usize; + let mut _12: *const i32; + let mut _13: *const [i32]; + let mut _14: usize; + let mut _15: *mut i64; + let mut _16: *mut [i64]; + let mut _17: usize; + + bb0: { + StorageLive(_6); + StorageLive(_7); + _7 = copy _1; + StorageLive(_8); + _8 = copy _5; +- _6 = slice_get_unchecked::<&i8, &[i8], i8>(move _7, move _8) -> [return: bb1, unwind unreachable]; ++ _6 = &(*_7)[_8]; ++ goto -> bb1; + } + + bb1: { + StorageDead(_8); + StorageDead(_7); + StorageLive(_9); + StorageLive(_10); + _10 = move _2; + StorageLive(_11); + _11 = copy _5; +- _9 = slice_get_unchecked::<&mut i16, &mut [i16], i16>(move _10, move _11) -> [return: bb2, unwind unreachable]; ++ _9 = &mut (*_10)[_11]; ++ goto -> bb2; + } + + bb2: { + StorageDead(_11); + StorageDead(_10); + StorageLive(_12); + StorageLive(_13); + _13 = copy _3; + StorageLive(_14); + _14 = copy _5; +- _12 = slice_get_unchecked::<*const i32, *const [i32], i32>(move _13, move _14) -> [return: bb3, unwind unreachable]; ++ _12 = &raw const (*_13)[_14]; ++ goto -> bb3; + } + + bb3: { + StorageDead(_14); + StorageDead(_13); + StorageLive(_15); + StorageLive(_16); + _16 = copy _4; + StorageLive(_17); + _17 = copy _5; +- _15 = slice_get_unchecked::<*mut i64, *mut [i64], i64>(move _16, move _17) -> [return: bb4, unwind unreachable]; ++ _15 = &raw mut (*_16)[_17]; ++ goto -> bb4; + } + + bb4: { + StorageDead(_17); + StorageDead(_16); + _0 = (move _6, move _9, move _12, move _15); + StorageDead(_15); + StorageDead(_12); + StorageDead(_9); + StorageDead(_6); + return; + } + } + diff --git a/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-unwind.diff b/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-unwind.diff new file mode 100644 index 00000000000..d18bdc43168 --- /dev/null +++ b/tests/mir-opt/lower_intrinsics.slice_get.LowerIntrinsics.panic-unwind.diff @@ -0,0 +1,85 @@ +- // MIR for `slice_get` before LowerIntrinsics ++ // MIR for `slice_get` after LowerIntrinsics + + fn slice_get(_1: &[i8], _2: &mut [i16], _3: *const [i32], _4: *mut [i64], _5: usize) -> (&i8, &mut i16, *const i32, *mut i64) { + debug r => _1; + debug rm => _2; + debug p => _3; + debug pm => _4; + debug i => _5; + let mut _0: (&i8, &mut i16, *const i32, *mut i64); + let mut _6: &i8; + let mut _7: &[i8]; + let mut _8: usize; + let mut _9: &mut i16; + let mut _10: &mut [i16]; + let mut _11: usize; + let mut _12: *const i32; + let mut _13: *const [i32]; + let mut _14: usize; + let mut _15: *mut i64; + let mut _16: *mut [i64]; + let mut _17: usize; + + bb0: { + StorageLive(_6); + StorageLive(_7); + _7 = copy _1; + StorageLive(_8); + _8 = copy _5; +- _6 = slice_get_unchecked::<&i8, &[i8], i8>(move _7, move _8) -> [return: bb1, unwind unreachable]; ++ _6 = &(*_7)[_8]; ++ goto -> bb1; + } + + bb1: { + StorageDead(_8); + StorageDead(_7); + StorageLive(_9); + StorageLive(_10); + _10 = move _2; + StorageLive(_11); + _11 = copy _5; +- _9 = slice_get_unchecked::<&mut i16, &mut [i16], i16>(move _10, move _11) -> [return: bb2, unwind unreachable]; ++ _9 = &mut (*_10)[_11]; ++ goto -> bb2; + } + + bb2: { + StorageDead(_11); + StorageDead(_10); + StorageLive(_12); + StorageLive(_13); + _13 = copy _3; + StorageLive(_14); + _14 = copy _5; +- _12 = slice_get_unchecked::<*const i32, *const [i32], i32>(move _13, move _14) -> [return: bb3, unwind unreachable]; ++ _12 = &raw const (*_13)[_14]; ++ goto -> bb3; + } + + bb3: { + StorageDead(_14); + StorageDead(_13); + StorageLive(_15); + StorageLive(_16); + _16 = copy _4; + StorageLive(_17); + _17 = copy _5; +- _15 = slice_get_unchecked::<*mut i64, *mut [i64], i64>(move _16, move _17) -> [return: bb4, unwind unreachable]; ++ _15 = &raw mut (*_16)[_17]; ++ goto -> bb4; + } + + bb4: { + StorageDead(_17); + StorageDead(_16); + _0 = (move _6, move _9, move _12, move _15); + StorageDead(_15); + StorageDead(_12); + StorageDead(_9); + StorageDead(_6); + return; + } + } + diff --git a/tests/mir-opt/matches_reduce_branches.match_non_int_failed.MatchBranchSimplification.diff b/tests/mir-opt/matches_reduce_branches.match_non_int_failed.MatchBranchSimplification.diff new file mode 100644 index 00000000000..81e900a34c0 --- /dev/null +++ b/tests/mir-opt/matches_reduce_branches.match_non_int_failed.MatchBranchSimplification.diff @@ -0,0 +1,29 @@ +- // MIR for `match_non_int_failed` before MatchBranchSimplification ++ // MIR for `match_non_int_failed` after MatchBranchSimplification + + fn match_non_int_failed(_1: char) -> u8 { + let mut _0: u8; + + bb0: { + switchInt(copy _1) -> [97: bb1, 98: bb2, otherwise: bb3]; + } + + bb1: { + _0 = const 97_u8; + goto -> bb4; + } + + bb2: { + _0 = const 98_u8; + goto -> bb4; + } + + bb3: { + unreachable; + } + + bb4: { + return; + } + } + diff --git a/tests/mir-opt/matches_reduce_branches.rs b/tests/mir-opt/matches_reduce_branches.rs index 00131b0116d..89ef3bfb308 100644 --- a/tests/mir-opt/matches_reduce_branches.rs +++ b/tests/mir-opt/matches_reduce_branches.rs @@ -627,6 +627,37 @@ fn match_i128_u128(i: EnumAi128) -> u128 { } } +// EMIT_MIR matches_reduce_branches.match_non_int_failed.MatchBranchSimplification.diff +#[custom_mir(dialect = "runtime")] +fn match_non_int_failed(i: char) -> u8 { + // CHECK-LABEL: fn match_non_int_failed( + // CHECK: switchInt + // CHECK: return + mir! { + { + match i { + 'a' => bb1, + 'b' => bb2, + _ => unreachable_bb, + } + } + bb1 = { + RET = 97; + Goto(ret) + } + bb2 = { + RET = 98; + Goto(ret) + } + unreachable_bb = { + Unreachable() + } + ret = { + Return() + } + } +} + fn main() { let _ = foo(None); let _ = foo(Some(())); @@ -664,4 +695,5 @@ fn main() { let _ = match_i128_u128(EnumAi128::A); let _ = my_is_some(None); + let _ = match_non_int_failed('a'); } diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-abort.mir index ec67193bc79..d1b1e3d7dd7 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-abort.mir @@ -8,19 +8,11 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> { scope 2 (inlined <usize as SliceIndex<[u32]>>::get_mut) { let mut _3: usize; let mut _4: bool; - let mut _5: *mut [u32]; - let mut _7: *mut u32; - let mut _8: &mut u32; - scope 3 (inlined core::slice::index::get_mut_noubcheck::<u32>) { - let _6: *mut u32; - scope 4 { - } - } + let mut _5: &mut u32; } } bb0: { - StorageLive(_8); StorageLive(_4); StorageLive(_3); _3 = PtrMetadata(copy _1); @@ -36,23 +28,15 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> { bb2: { StorageDead(_3); - StorageLive(_7); StorageLive(_5); - _5 = &raw mut (*_1); - StorageLive(_6); - _6 = copy _5 as *mut u32 (PtrToPtr); - _7 = Offset(copy _6, copy _2); - StorageDead(_6); + _5 = &mut (*_1)[_2]; + _0 = Option::<&mut u32>::Some(move _5); StorageDead(_5); - _8 = &mut (*_7); - _0 = Option::<&mut u32>::Some(copy _8); - StorageDead(_7); goto -> bb3; } bb3: { StorageDead(_4); - StorageDead(_8); return; } } diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-unwind.mir index ec67193bc79..d1b1e3d7dd7 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_get_mut_usize.PreCodegen.after.panic-unwind.mir @@ -8,19 +8,11 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> { scope 2 (inlined <usize as SliceIndex<[u32]>>::get_mut) { let mut _3: usize; let mut _4: bool; - let mut _5: *mut [u32]; - let mut _7: *mut u32; - let mut _8: &mut u32; - scope 3 (inlined core::slice::index::get_mut_noubcheck::<u32>) { - let _6: *mut u32; - scope 4 { - } - } + let mut _5: &mut u32; } } bb0: { - StorageLive(_8); StorageLive(_4); StorageLive(_3); _3 = PtrMetadata(copy _1); @@ -36,23 +28,15 @@ fn slice_get_mut_usize(_1: &mut [u32], _2: usize) -> Option<&mut u32> { bb2: { StorageDead(_3); - StorageLive(_7); StorageLive(_5); - _5 = &raw mut (*_1); - StorageLive(_6); - _6 = copy _5 as *mut u32 (PtrToPtr); - _7 = Offset(copy _6, copy _2); - StorageDead(_6); + _5 = &mut (*_1)[_2]; + _0 = Option::<&mut u32>::Some(move _5); StorageDead(_5); - _8 = &mut (*_7); - _0 = Option::<&mut u32>::Some(copy _8); - StorageDead(_7); goto -> bb3; } bb3: { StorageDead(_4); - StorageDead(_8); return; } } diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-abort.mir index 597f02e837a..6fb1637a6e0 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-abort.mir @@ -15,12 +15,10 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) -> let _8: usize; scope 3 { scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) { - let _10: *mut u32; + let _9: *mut u32; scope 7 { - } - scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) { - let _9: *mut u32; - scope 9 { + let _10: *mut u32; + scope 8 { } } } @@ -47,13 +45,13 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) -> bb1: { StorageDead(_6); _8 = SubUnchecked(copy _4, copy _3); - StorageLive(_10); StorageLive(_9); + StorageLive(_10); _9 = copy _5 as *mut u32 (PtrToPtr); _10 = Offset(copy _9, copy _3); - StorageDead(_9); _11 = *mut [u32] from (copy _10, copy _8); StorageDead(_10); + StorageDead(_9); StorageDead(_8); StorageDead(_5); _0 = &mut (*_11); diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-unwind.mir index 597f02e837a..6fb1637a6e0 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_get_unchecked_mut_range.PreCodegen.after.panic-unwind.mir @@ -15,12 +15,10 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) -> let _8: usize; scope 3 { scope 6 (inlined core::slice::index::get_offset_len_mut_noubcheck::<u32>) { - let _10: *mut u32; + let _9: *mut u32; scope 7 { - } - scope 8 (inlined core::slice::index::get_mut_noubcheck::<u32>) { - let _9: *mut u32; - scope 9 { + let _10: *mut u32; + scope 8 { } } } @@ -47,13 +45,13 @@ fn slice_get_unchecked_mut_range(_1: &mut [u32], _2: std::ops::Range<usize>) -> bb1: { StorageDead(_6); _8 = SubUnchecked(copy _4, copy _3); - StorageLive(_10); StorageLive(_9); + StorageLive(_10); _9 = copy _5 as *mut u32 (PtrToPtr); _10 = Offset(copy _9, copy _3); - StorageDead(_9); _11 = *mut [u32] from (copy _10, copy _8); StorageDead(_10); + StorageDead(_9); StorageDead(_8); StorageDead(_5); _0 = &mut (*_11); diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-abort.mir b/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-abort.mir index e34723898e4..ad1ca5dff43 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-abort.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-abort.mir @@ -13,12 +13,10 @@ fn slice_ptr_get_unchecked_range(_1: *const [u32], _2: std::ops::Range<usize>) - let _7: usize; scope 3 { scope 6 (inlined core::slice::index::get_offset_len_noubcheck::<u32>) { - let _9: *const u32; + let _8: *const u32; scope 7 { - } - scope 8 (inlined core::slice::index::get_noubcheck::<u32>) { - let _8: *const u32; - scope 9 { + let _9: *const u32; + scope 8 { } } } @@ -42,13 +40,13 @@ fn slice_ptr_get_unchecked_range(_1: *const [u32], _2: std::ops::Range<usize>) - bb1: { StorageDead(_5); _7 = SubUnchecked(copy _4, copy _3); - StorageLive(_9); StorageLive(_8); + StorageLive(_9); _8 = copy _1 as *const u32 (PtrToPtr); _9 = Offset(copy _8, copy _3); - StorageDead(_8); _0 = *const [u32] from (copy _9, copy _7); StorageDead(_9); + StorageDead(_8); StorageDead(_7); return; } diff --git a/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-unwind.mir b/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-unwind.mir index e34723898e4..ad1ca5dff43 100644 --- a/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-unwind.mir +++ b/tests/mir-opt/pre-codegen/slice_index.slice_ptr_get_unchecked_range.PreCodegen.after.panic-unwind.mir @@ -13,12 +13,10 @@ fn slice_ptr_get_unchecked_range(_1: *const [u32], _2: std::ops::Range<usize>) - let _7: usize; scope 3 { scope 6 (inlined core::slice::index::get_offset_len_noubcheck::<u32>) { - let _9: *const u32; + let _8: *const u32; scope 7 { - } - scope 8 (inlined core::slice::index::get_noubcheck::<u32>) { - let _8: *const u32; - scope 9 { + let _9: *const u32; + scope 8 { } } } @@ -42,13 +40,13 @@ fn slice_ptr_get_unchecked_range(_1: *const [u32], _2: std::ops::Range<usize>) - bb1: { StorageDead(_5); _7 = SubUnchecked(copy _4, copy _3); - StorageLive(_9); StorageLive(_8); + StorageLive(_9); _8 = copy _1 as *const u32 (PtrToPtr); _9 = Offset(copy _8, copy _3); - StorageDead(_8); _0 = *const [u32] from (copy _9, copy _7); StorageDead(_9); + StorageDead(_8); StorageDead(_7); return; } diff --git a/tests/ui/associated-type-bounds/return-type-notation/path-missing.stderr b/tests/ui/associated-type-bounds/return-type-notation/path-missing.stderr index edac09db89d..677fc0e10bb 100644 --- a/tests/ui/associated-type-bounds/return-type-notation/path-missing.stderr +++ b/tests/ui/associated-type-bounds/return-type-notation/path-missing.stderr @@ -8,9 +8,7 @@ error[E0575]: expected method or associated constant, found associated type `A:: --> $DIR/path-missing.rs:12:5 | LL | <T as A>::bad(..): Send, - | ^^^^^^^^^^^^^^^^^ - | - = note: can't use a type alias as a constructor + | ^^^^^^^^^^^^^^^^^ not a method or associated constant error[E0220]: associated function `method` not found for `T` --> $DIR/path-missing.rs:19:8 diff --git a/tests/ui/associated-types/associated-type-tuple-struct-construction.rs b/tests/ui/associated-types/associated-type-tuple-struct-construction.rs deleted file mode 100644 index d5809ecd55d..00000000000 --- a/tests/ui/associated-types/associated-type-tuple-struct-construction.rs +++ /dev/null @@ -1,24 +0,0 @@ -// Users cannot yet construct structs through associated types -// in both expressions and patterns - -#![feature(more_qualified_paths)] - -fn main() { - let <Foo as A>::Assoc(n) = <Foo as A>::Assoc(2); - //~^ ERROR expected method or associated constant, found associated type - //~| ERROR expected method or associated constant, found associated type - assert!(n == 2); -} - -struct TupleStruct(i8); - -struct Foo; - - -trait A { - type Assoc; -} - -impl A for Foo { - type Assoc = TupleStruct; -} diff --git a/tests/ui/associated-types/associated-type-tuple-struct-construction.stderr b/tests/ui/associated-types/associated-type-tuple-struct-construction.stderr deleted file mode 100644 index bca7deeb512..00000000000 --- a/tests/ui/associated-types/associated-type-tuple-struct-construction.stderr +++ /dev/null @@ -1,19 +0,0 @@ -error[E0575]: expected method or associated constant, found associated type `A::Assoc` - --> $DIR/associated-type-tuple-struct-construction.rs:7:32 - | -LL | let <Foo as A>::Assoc(n) = <Foo as A>::Assoc(2); - | ^^^^^^^^^^^^^^^^^ - | - = note: can't use a type alias as a constructor - -error[E0575]: expected method or associated constant, found associated type `A::Assoc` - --> $DIR/associated-type-tuple-struct-construction.rs:7:9 - | -LL | let <Foo as A>::Assoc(n) = <Foo as A>::Assoc(2); - | ^^^^^^^^^^^^^^^^^ - | - = note: can't use a type alias as a constructor - -error: aborting due to 2 previous errors - -For more information about this error, try `rustc --explain E0575`. diff --git a/tests/ui/associated-types/tuple-struct-expr-pat.fixed b/tests/ui/associated-types/tuple-struct-expr-pat.fixed new file mode 100644 index 00000000000..d6e2385f821 --- /dev/null +++ b/tests/ui/associated-types/tuple-struct-expr-pat.fixed @@ -0,0 +1,48 @@ +// Check that fully qualified syntax can **not** be used in tuple struct expressions (calls) and +// patterns. Both tuple struct expressions and patterns are resolved in value namespace and thus +// can't be resolved through associated *types*. +// +//@ run-rustfix + +#![feature(more_qualified_paths)] + +fn main() { + let <T<0> as Trait>::Assoc {} = <T<0> as Trait>::Assoc {}; + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<1> as Trait>::Assoc { 0: _a } = <T<1> as Trait>::Assoc { 0: 0 }; + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<2> as Trait>::Assoc { 0: _a, 1: _b } = <T<2> as Trait>::Assoc { 0: 0, 1: 1 }; + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<3> as Trait>::Assoc { 0: ref _a, 1: ref mut _b, 2: mut _c } = <T<3> as Trait>::Assoc { 0: 0, 1: 1, 2: 2 }; + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type +} + + +struct T<const N: usize>; + +struct T0(); +struct T1(u8); +struct T2(u8, u8); +struct T3(u8, u8, u8); + +trait Trait { + type Assoc; +} + +impl Trait for T<0> { + type Assoc = T0; +} + +impl Trait for T<1> { + type Assoc = T1; +} +impl Trait for T<2> { + type Assoc = T2; +} +impl Trait for T<3> { + type Assoc = T3; +} diff --git a/tests/ui/associated-types/tuple-struct-expr-pat.rs b/tests/ui/associated-types/tuple-struct-expr-pat.rs new file mode 100644 index 00000000000..f27a5fe1753 --- /dev/null +++ b/tests/ui/associated-types/tuple-struct-expr-pat.rs @@ -0,0 +1,48 @@ +// Check that fully qualified syntax can **not** be used in tuple struct expressions (calls) and +// patterns. Both tuple struct expressions and patterns are resolved in value namespace and thus +// can't be resolved through associated *types*. +// +//@ run-rustfix + +#![feature(more_qualified_paths)] + +fn main() { + let <T<0> as Trait>::Assoc() = <T<0> as Trait>::Assoc(); + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc(0); + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc(0, 1); + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type + let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc(0, 1, 2); + //~^ error: expected method or associated constant, found associated type + //~| error: expected tuple struct or tuple variant, found associated type +} + + +struct T<const N: usize>; + +struct T0(); +struct T1(u8); +struct T2(u8, u8); +struct T3(u8, u8, u8); + +trait Trait { + type Assoc; +} + +impl Trait for T<0> { + type Assoc = T0; +} + +impl Trait for T<1> { + type Assoc = T1; +} +impl Trait for T<2> { + type Assoc = T2; +} +impl Trait for T<3> { + type Assoc = T3; +} diff --git a/tests/ui/associated-types/tuple-struct-expr-pat.stderr b/tests/ui/associated-types/tuple-struct-expr-pat.stderr new file mode 100644 index 00000000000..135dfcb3447 --- /dev/null +++ b/tests/ui/associated-types/tuple-struct-expr-pat.stderr @@ -0,0 +1,97 @@ +error[E0575]: expected method or associated constant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:10:36 + | +LL | let <T<0> as Trait>::Assoc() = <T<0> as Trait>::Assoc(); + | ^^^^^^^^^^^^^^^^^^^^^^-- help: use struct expression instead: `{}` + | + = note: can't use a type alias as a constructor + +error[E0575]: expected tuple struct or tuple variant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:10:9 + | +LL | let <T<0> as Trait>::Assoc() = <T<0> as Trait>::Assoc(); + | ^^^^^^^^^^^^^^^^^^^^^^-- help: use struct pattern instead: `{}` + | + = note: can't use a type alias as tuple pattern + +error[E0575]: expected method or associated constant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:13:38 + | +LL | let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc(0); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as a constructor +help: use struct expression instead + | +LL - let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc(0); +LL + let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc { 0: 0 }; + | + +error[E0575]: expected tuple struct or tuple variant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:13:9 + | +LL | let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc(0); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as tuple pattern +help: use struct pattern instead + | +LL - let <T<1> as Trait>::Assoc(_a) = <T<1> as Trait>::Assoc(0); +LL + let <T<1> as Trait>::Assoc { 0: _a } = <T<1> as Trait>::Assoc(0); + | + +error[E0575]: expected method or associated constant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:16:42 + | +LL | let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc(0, 1); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as a constructor +help: use struct expression instead + | +LL - let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc(0, 1); +LL + let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc { 0: 0, 1: 1 }; + | + +error[E0575]: expected tuple struct or tuple variant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:16:9 + | +LL | let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc(0, 1); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as tuple pattern +help: use struct pattern instead + | +LL - let <T<2> as Trait>::Assoc(_a, _b) = <T<2> as Trait>::Assoc(0, 1); +LL + let <T<2> as Trait>::Assoc { 0: _a, 1: _b } = <T<2> as Trait>::Assoc(0, 1); + | + +error[E0575]: expected method or associated constant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:19:62 + | +LL | let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc(0, 1, 2); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as a constructor +help: use struct expression instead + | +LL - let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc(0, 1, 2); +LL + let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc { 0: 0, 1: 1, 2: 2 }; + | + +error[E0575]: expected tuple struct or tuple variant, found associated type `Trait::Assoc` + --> $DIR/tuple-struct-expr-pat.rs:19:9 + | +LL | let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc(0, 1, 2); + | ^^^^^^^^^^^^^^^^^^^^^^ + | + = note: can't use a type alias as tuple pattern +help: use struct pattern instead + | +LL - let <T<3> as Trait>::Assoc(ref _a, ref mut _b, mut _c) = <T<3> as Trait>::Assoc(0, 1, 2); +LL + let <T<3> as Trait>::Assoc { 0: ref _a, 1: ref mut _b, 2: mut _c } = <T<3> as Trait>::Assoc(0, 1, 2); + | + +error: aborting due to 8 previous errors + +For more information about this error, try `rustc --explain E0575`. diff --git a/tests/ui/autoderef-full-lval.rs b/tests/ui/autoderef-full-lval.rs deleted file mode 100644 index 0fadc5c9827..00000000000 --- a/tests/ui/autoderef-full-lval.rs +++ /dev/null @@ -1,25 +0,0 @@ -struct Clam { - x: Box<isize>, - y: Box<isize>, -} - - - -struct Fish { - a: Box<isize>, -} - -fn main() { - let a: Clam = Clam{ x: Box::new(1), y: Box::new(2) }; - let b: Clam = Clam{ x: Box::new(10), y: Box::new(20) }; - let z: isize = a.x + b.y; - //~^ ERROR cannot add `Box<isize>` to `Box<isize>` - println!("{}", z); - assert_eq!(z, 21); - let forty: Fish = Fish{ a: Box::new(40) }; - let two: Fish = Fish{ a: Box::new(2) }; - let answer: isize = forty.a + two.a; - //~^ ERROR cannot add `Box<isize>` to `Box<isize>` - println!("{}", answer); - assert_eq!(answer, 42); -} diff --git a/tests/ui/autoref-autoderef/autoderef-box-no-add.rs b/tests/ui/autoref-autoderef/autoderef-box-no-add.rs new file mode 100644 index 00000000000..f8085c1ae96 --- /dev/null +++ b/tests/ui/autoref-autoderef/autoderef-box-no-add.rs @@ -0,0 +1,35 @@ +//! Tests that auto-dereferencing does not allow addition of `Box<isize>` values. +//! +//! This test ensures that `Box<isize>` fields in structs (`Clam` and `Fish`) are not +//! automatically dereferenced to `isize` during addition operations, as `Box<isize>` +//! does not implement the `Add` trait. + +struct Clam { + x: Box<isize>, + y: Box<isize>, +} + +struct Fish { + a: Box<isize>, +} + +fn main() { + let a: Clam = Clam { + x: Box::new(1), + y: Box::new(2), + }; + let b: Clam = Clam { + x: Box::new(10), + y: Box::new(20), + }; + let z: isize = a.x + b.y; + //~^ ERROR cannot add `Box<isize>` to `Box<isize>` + println!("{}", z); + assert_eq!(z, 21); + let forty: Fish = Fish { a: Box::new(40) }; + let two: Fish = Fish { a: Box::new(2) }; + let answer: isize = forty.a + two.a; + //~^ ERROR cannot add `Box<isize>` to `Box<isize>` + println!("{}", answer); + assert_eq!(answer, 42); +} diff --git a/tests/ui/autoderef-full-lval.stderr b/tests/ui/autoref-autoderef/autoderef-box-no-add.stderr index d90238a7fb2..20ef3352831 100644 --- a/tests/ui/autoderef-full-lval.stderr +++ b/tests/ui/autoref-autoderef/autoderef-box-no-add.stderr @@ -1,5 +1,5 @@ error[E0369]: cannot add `Box<isize>` to `Box<isize>` - --> $DIR/autoderef-full-lval.rs:15:24 + --> $DIR/autoderef-box-no-add.rs:25:24 | LL | let z: isize = a.x + b.y; | --- ^ --- Box<isize> @@ -13,7 +13,7 @@ note: the foreign item type `Box<isize>` doesn't implement `Add` = note: not implement `Add` error[E0369]: cannot add `Box<isize>` to `Box<isize>` - --> $DIR/autoderef-full-lval.rs:21:33 + --> $DIR/autoderef-box-no-add.rs:31:33 | LL | let answer: isize = forty.a + two.a; | ------- ^ ----- Box<isize> diff --git a/tests/ui/cast/func-pointer-issue-140491.rs b/tests/ui/cast/func-pointer-issue-140491.rs new file mode 100644 index 00000000000..d5d86a66f5a --- /dev/null +++ b/tests/ui/cast/func-pointer-issue-140491.rs @@ -0,0 +1,7 @@ +fn my_fn(event: &Event<'_>) {} + +struct Event<'a>(&'a ()); + +fn main() { + const ptr: &fn(&Event<'_>) = &my_fn as _; //~ ERROR non-primitive cast: `&for<'a, 'b> fn(&'a Event<'b>) {my_fn}` as `&for<'a, 'b> fn(&'a Event<'b>)` [E0605] +} diff --git a/tests/ui/cast/func-pointer-issue-140491.stderr b/tests/ui/cast/func-pointer-issue-140491.stderr new file mode 100644 index 00000000000..e1c07010e69 --- /dev/null +++ b/tests/ui/cast/func-pointer-issue-140491.stderr @@ -0,0 +1,11 @@ +error[E0605]: non-primitive cast: `&for<'a, 'b> fn(&'a Event<'b>) {my_fn}` as `&for<'a, 'b> fn(&'a Event<'b>)` + --> $DIR/func-pointer-issue-140491.rs:6:34 + | +LL | ..._>) = &my_fn as _; + | ^^^^^^^^^^^ an `as` expression can only be used to convert between primitive types or to coerce to a specific trait object + | + = note: casting reference expression `&my_fn` because `&` binds tighter than `as` + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0605`. diff --git a/tests/ui/closures/2229_closure_analysis/run_pass/lit-pattern-matching-with-methods.rs b/tests/ui/closures/2229_closure_analysis/run_pass/lit-pattern-matching-with-methods.rs index 7a4d7d9a81e..afb16cf58e8 100644 --- a/tests/ui/closures/2229_closure_analysis/run_pass/lit-pattern-matching-with-methods.rs +++ b/tests/ui/closures/2229_closure_analysis/run_pass/lit-pattern-matching-with-methods.rs @@ -14,14 +14,14 @@ fn main() { map.insert("c", ()); { - let mut it = map.extract_if(|_, _| true); + let mut it = map.extract_if(.., |_, _| true); catch_unwind(AssertUnwindSafe(|| while it.next().is_some() {})).unwrap_err(); let result = catch_unwind(AssertUnwindSafe(|| it.next())); assert!(matches!(result, Ok(None))); } { - let mut it = map.extract_if(|_, _| true); + let mut it = map.extract_if(.., |_, _| true); catch_unwind(AssertUnwindSafe(|| while let Some(_) = it.next() {})).unwrap_err(); let result = catch_unwind(AssertUnwindSafe(|| it.next())); assert!(matches!(result, Ok(None))); diff --git a/tests/ui/coercion/issue-73886.stderr b/tests/ui/coercion/issue-73886.stderr index 0d4c90017cf..a287aa29e11 100644 --- a/tests/ui/coercion/issue-73886.stderr +++ b/tests/ui/coercion/issue-73886.stderr @@ -3,6 +3,8 @@ error[E0605]: non-primitive cast: `&&[i32; 1]` as `&[_]` | LL | let _ = &&[0] as &[_]; | ^^^^^^^^^^^^^ an `as` expression can only be used to convert between primitive types or to coerce to a specific trait object + | + = note: casting reference expression `&&[0]` because `&` binds tighter than `as` error[E0605]: non-primitive cast: `u32` as `Option<_>` --> $DIR/issue-73886.rs:4:13 diff --git a/tests/ui/const-ptr/allowed_slices.rs b/tests/ui/const-ptr/allowed_slices.rs index e5b9966c609..23f63ff5feb 100644 --- a/tests/ui/const-ptr/allowed_slices.rs +++ b/tests/ui/const-ptr/allowed_slices.rs @@ -26,7 +26,7 @@ pub static S5: &[MaybeUninit<u8>] = unsafe { from_raw_parts((&D1) as *const _ as // is valid as [bool; 4], so this is not UB (it's basically a transmute) pub static S6: &[bool] = unsafe { from_raw_parts((&D0) as *const _ as _, 4) }; -// Structs are considered single allocated objects, +// Structs are considered single allocations, // as long as you don't reinterpret padding as initialized // data everything is ok. pub static S7: &[u16] = unsafe { diff --git a/tests/ui/delegation/bad-resolve.stderr b/tests/ui/delegation/bad-resolve.stderr index 966387e1d61..fc6811292a6 100644 --- a/tests/ui/delegation/bad-resolve.stderr +++ b/tests/ui/delegation/bad-resolve.stderr @@ -44,9 +44,7 @@ error[E0575]: expected method or associated constant, found associated type `Tra --> $DIR/bad-resolve.rs:27:11 | LL | reuse <F as Trait>::Type; - | ^^^^^^^^^^^^^^^^^^ - | - = note: can't use a type alias as a constructor + | ^^^^^^^^^^^^^^^^^^ not a method or associated constant error[E0576]: cannot find method or associated constant `baz` in trait `Trait` --> $DIR/bad-resolve.rs:30:25 diff --git a/tests/ui/delegation/glob-non-fn.stderr b/tests/ui/delegation/glob-non-fn.stderr index 4b918c53b84..f63c8e88c6f 100644 --- a/tests/ui/delegation/glob-non-fn.stderr +++ b/tests/ui/delegation/glob-non-fn.stderr @@ -38,9 +38,7 @@ error[E0423]: expected function, found associated type `Trait::Type` --> $DIR/glob-non-fn.rs:30:11 | LL | reuse Trait::* { &self.0 } - | ^^^^^ - | - = note: can't use a type alias as a constructor + | ^^^^^ not a function error[E0046]: not all trait items implemented, missing: `CONST`, `Type`, `method` --> $DIR/glob-non-fn.rs:29:1 diff --git a/tests/ui/bare-fn-implements-fn-mut.rs b/tests/ui/functions-closures/bare-fn-implements-fn-mut.rs index 49b31f28f8a..52d5ad3d0f7 100644 --- a/tests/ui/bare-fn-implements-fn-mut.rs +++ b/tests/ui/functions-closures/bare-fn-implements-fn-mut.rs @@ -1,3 +1,7 @@ +//! Tests that bare functions implement the `FnMut` trait. +//! +//! See <https://github.com/rust-lang/rust/issues/15448>. + //@ run-pass fn call_f<F:FnMut()>(mut f: F) { diff --git a/tests/ui/big-literals.rs b/tests/ui/lint/overflowing-literals-valid.rs index d2f447a595c..08aa092ee71 100644 --- a/tests/ui/big-literals.rs +++ b/tests/ui/lint/overflowing-literals-valid.rs @@ -1,5 +1,7 @@ +//! Test that valid large numeric literals do not trigger the `overflowing_literals` lint. + //@ run-pass -// Catch mistakes in the overflowing literals lint. + #![deny(overflowing_literals)] pub fn main() { diff --git a/tests/ui/auto-ref-slice-plus-ref.rs b/tests/ui/methods/vec-autoderef-autoref.rs index 00b279d3226..38c0ba8574b 100644 --- a/tests/ui/auto-ref-slice-plus-ref.rs +++ b/tests/ui/methods/vec-autoderef-autoref.rs @@ -1,8 +1,7 @@ -fn main() { - - // Testing that method lookup does not automatically borrow - // vectors to slices then automatically create a self reference. +//! Test that method resolution does not autoderef `Vec` +//! into a slice or perform additional autorefs. +fn main() { let mut a = vec![0]; a.test_mut(); //~ ERROR no method named `test_mut` found a.test(); //~ ERROR no method named `test` found diff --git a/tests/ui/auto-ref-slice-plus-ref.stderr b/tests/ui/methods/vec-autoderef-autoref.stderr index 806c1ee064f..61c3bcc5b3b 100644 --- a/tests/ui/auto-ref-slice-plus-ref.stderr +++ b/tests/ui/methods/vec-autoderef-autoref.stderr @@ -1,12 +1,12 @@ error[E0599]: no method named `test_mut` found for struct `Vec<{integer}>` in the current scope - --> $DIR/auto-ref-slice-plus-ref.rs:7:7 + --> $DIR/vec-autoderef-autoref.rs:6:7 | LL | a.test_mut(); | ^^^^^^^^ | = help: items from traits can only be used if the trait is implemented and in scope note: `MyIter` defines an item `test_mut`, perhaps you need to implement it - --> $DIR/auto-ref-slice-plus-ref.rs:14:1 + --> $DIR/vec-autoderef-autoref.rs:13:1 | LL | trait MyIter { | ^^^^^^^^^^^^ @@ -14,40 +14,40 @@ help: there is a method `get_mut` with a similar name, but with different argume --> $SRC_DIR/core/src/slice/mod.rs:LL:COL error[E0599]: no method named `test` found for struct `Vec<{integer}>` in the current scope - --> $DIR/auto-ref-slice-plus-ref.rs:8:7 + --> $DIR/vec-autoderef-autoref.rs:7:7 | LL | a.test(); | ^^^^ method not found in `Vec<{integer}>` | = help: items from traits can only be used if the trait is implemented and in scope note: `MyIter` defines an item `test`, perhaps you need to implement it - --> $DIR/auto-ref-slice-plus-ref.rs:14:1 + --> $DIR/vec-autoderef-autoref.rs:13:1 | LL | trait MyIter { | ^^^^^^^^^^^^ error[E0599]: no method named `test` found for array `[{integer}; 1]` in the current scope - --> $DIR/auto-ref-slice-plus-ref.rs:10:11 + --> $DIR/vec-autoderef-autoref.rs:9:11 | LL | ([1]).test(); | ^^^^ method not found in `[{integer}; 1]` | = help: items from traits can only be used if the trait is implemented and in scope note: `MyIter` defines an item `test`, perhaps you need to implement it - --> $DIR/auto-ref-slice-plus-ref.rs:14:1 + --> $DIR/vec-autoderef-autoref.rs:13:1 | LL | trait MyIter { | ^^^^^^^^^^^^ error[E0599]: no method named `test` found for reference `&[{integer}; 1]` in the current scope - --> $DIR/auto-ref-slice-plus-ref.rs:11:12 + --> $DIR/vec-autoderef-autoref.rs:10:12 | LL | (&[1]).test(); | ^^^^ method not found in `&[{integer}; 1]` | = help: items from traits can only be used if the trait is implemented and in scope note: `MyIter` defines an item `test`, perhaps you need to implement it - --> $DIR/auto-ref-slice-plus-ref.rs:14:1 + --> $DIR/vec-autoderef-autoref.rs:13:1 | LL | trait MyIter { | ^^^^^^^^^^^^ diff --git a/tests/ui/namespace/namespace-mix.stderr b/tests/ui/namespace/namespace-mix.stderr index 412ea4aba30..200d31cc710 100644 --- a/tests/ui/namespace/namespace-mix.stderr +++ b/tests/ui/namespace/namespace-mix.stderr @@ -7,7 +7,6 @@ LL | pub struct TS(); LL | check(m1::S); | ^^^^^ | - = note: can't use a type alias as a constructor help: a tuple struct with a similar name exists | LL | check(m1::TS); @@ -35,7 +34,6 @@ LL | check(xm1::S); LL | pub struct TS(); | ------------- similarly named tuple struct `TS` defined here | - = note: can't use a type alias as a constructor help: a tuple struct with a similar name exists | LL | check(xm1::TS); @@ -61,7 +59,6 @@ LL | TV(), LL | check(m7::V); | ^^^^^ | - = note: can't use a type alias as a constructor help: a tuple variant with a similar name exists | LL | check(m7::TV); @@ -89,7 +86,6 @@ LL | check(xm7::V); LL | TV(), | -- similarly named tuple variant `TV` defined here | - = note: can't use a type alias as a constructor help: a tuple variant with a similar name exists | LL | check(xm7::TV); diff --git a/tests/ui/parser/match-arm-comma-typo-issue-140991.fixed b/tests/ui/parser/match-arm-comma-typo-issue-140991.fixed new file mode 100644 index 00000000000..4d99e4bdc1c --- /dev/null +++ b/tests/ui/parser/match-arm-comma-typo-issue-140991.fixed @@ -0,0 +1,24 @@ +//@ run-rustfix + +pub enum Foo { + X, Y +} + +pub fn typo1(foo: Foo) -> Foo { + use Foo::*; + match foo { + X => Y, + Y => X, //~ ERROR expected one of + } +} + +pub fn typo2(foo: Foo) -> Foo { + use Foo::*; + match foo { + X => Y, + Y => X, //~ ERROR expected one of + } +} + + +fn main() { } diff --git a/tests/ui/parser/match-arm-comma-typo-issue-140991.rs b/tests/ui/parser/match-arm-comma-typo-issue-140991.rs new file mode 100644 index 00000000000..3baf1ff3fa1 --- /dev/null +++ b/tests/ui/parser/match-arm-comma-typo-issue-140991.rs @@ -0,0 +1,24 @@ +//@ run-rustfix + +pub enum Foo { + X, Y +} + +pub fn typo1(foo: Foo) -> Foo { + use Foo::*; + match foo { + X => Y. + Y => X, //~ ERROR expected one of + } +} + +pub fn typo2(foo: Foo) -> Foo { + use Foo::*; + match foo { + X => Y/ + Y => X, //~ ERROR expected one of + } +} + + +fn main() { } diff --git a/tests/ui/parser/match-arm-comma-typo-issue-140991.stderr b/tests/ui/parser/match-arm-comma-typo-issue-140991.stderr new file mode 100644 index 00000000000..19532d14245 --- /dev/null +++ b/tests/ui/parser/match-arm-comma-typo-issue-140991.stderr @@ -0,0 +1,26 @@ +error: expected one of `(`, `,`, `.`, `::`, `?`, `}`, or an operator, found `=>` + --> $DIR/match-arm-comma-typo-issue-140991.rs:11:11 + | +LL | Y => X, + | ^^ expected one of 7 possible tokens + | +help: you might have meant to write a `,` to end this `match` arm + | +LL - X => Y. +LL + X => Y, + | + +error: expected one of `!`, `,`, `.`, `::`, `?`, `{`, `}`, or an operator, found `=>` + --> $DIR/match-arm-comma-typo-issue-140991.rs:19:11 + | +LL | Y => X, + | ^^ expected one of 8 possible tokens + | +help: you might have meant to write a `,` to end this `match` arm + | +LL - X => Y/ +LL + X => Y, + | + +error: aborting due to 2 previous errors + diff --git a/tests/ui/resolve/tuple-struct-alias.stderr b/tests/ui/resolve/tuple-struct-alias.stderr index a739ea43eed..bf026a499b8 100644 --- a/tests/ui/resolve/tuple-struct-alias.stderr +++ b/tests/ui/resolve/tuple-struct-alias.stderr @@ -6,8 +6,6 @@ LL | struct S(u8, u16); ... LL | A(..) => {} | ^ help: a tuple struct with a similar name exists: `S` - | - = note: can't use a type alias as a constructor error[E0423]: expected function, tuple struct or tuple variant, found type alias `A` --> $DIR/tuple-struct-alias.rs:5:13 @@ -16,9 +14,18 @@ LL | struct S(u8, u16); | ------------------ similarly named tuple struct `S` defined here ... LL | let s = A(0, 1); - | ^ help: a tuple struct with a similar name exists: `S` + | ^ + | +help: a tuple struct with a similar name exists + | +LL - let s = A(0, 1); +LL + let s = S(0, 1); + | +help: you might have meant to use `:` for type annotation + | +LL - let s = A(0, 1); +LL + let s: A(0, 1); | - = note: can't use a type alias as a constructor error: aborting due to 2 previous errors diff --git a/tests/ui/stats/input-stats.rs b/tests/ui/stats/input-stats.rs index f19a53cc610..e760e2894e3 100644 --- a/tests/ui/stats/input-stats.rs +++ b/tests/ui/stats/input-stats.rs @@ -3,6 +3,10 @@ //@ only-64bit // layout randomization affects the hir stat output //@ needs-deterministic-layouts +// +// Filter out the percentages because a change to a single count can affect +// many or all percentages, which makes the diffs hard to read. +//@ normalize-stderr: "\([0-9 ][0-9]\.[0-9]%\)" -> "(NN.N%)" // Type layouts sometimes change. When that happens, until the next bootstrap // bump occurs, stage1 and stage2 will give different outputs for this test. diff --git a/tests/ui/stats/input-stats.stderr b/tests/ui/stats/input-stats.stderr index b369af62f87..58ab1a72556 100644 --- a/tests/ui/stats/input-stats.stderr +++ b/tests/ui/stats/input-stats.stderr @@ -1,178 +1,178 @@ ast-stats-1 PRE EXPANSION AST STATS ast-stats-1 Name Accumulated Size Count Item Size ast-stats-1 ---------------------------------------------------------------- -ast-stats-1 Crate 40 ( 0.6%) 1 40 -ast-stats-1 GenericArgs 40 ( 0.6%) 1 40 -ast-stats-1 - AngleBracketed 40 ( 0.6%) 1 -ast-stats-1 ExprField 48 ( 0.7%) 1 48 -ast-stats-1 Attribute 64 ( 0.9%) 2 32 -ast-stats-1 - DocComment 32 ( 0.5%) 1 -ast-stats-1 - Normal 32 ( 0.5%) 1 -ast-stats-1 WherePredicate 72 ( 1.1%) 1 72 -ast-stats-1 - BoundPredicate 72 ( 1.1%) 1 -ast-stats-1 ForeignItem 80 ( 1.2%) 1 80 -ast-stats-1 - Fn 80 ( 1.2%) 1 -ast-stats-1 Arm 96 ( 1.4%) 2 48 -ast-stats-1 Local 96 ( 1.4%) 1 96 -ast-stats-1 FnDecl 120 ( 1.8%) 5 24 -ast-stats-1 Param 160 ( 2.4%) 4 40 -ast-stats-1 Stmt 160 ( 2.4%) 5 32 -ast-stats-1 - Let 32 ( 0.5%) 1 -ast-stats-1 - MacCall 32 ( 0.5%) 1 -ast-stats-1 - Expr 96 ( 1.4%) 3 -ast-stats-1 Block 192 ( 2.8%) 6 32 -ast-stats-1 FieldDef 208 ( 3.1%) 2 104 -ast-stats-1 Variant 208 ( 3.1%) 2 104 -ast-stats-1 AssocItem 320 ( 4.7%) 4 80 -ast-stats-1 - Fn 160 ( 2.4%) 2 -ast-stats-1 - Type 160 ( 2.4%) 2 -ast-stats-1 GenericBound 352 ( 5.2%) 4 88 -ast-stats-1 - Trait 352 ( 5.2%) 4 -ast-stats-1 GenericParam 480 ( 7.1%) 5 96 -ast-stats-1 Pat 504 ( 7.5%) 7 72 -ast-stats-1 - Struct 72 ( 1.1%) 1 -ast-stats-1 - Wild 72 ( 1.1%) 1 -ast-stats-1 - Ident 360 ( 5.3%) 5 -ast-stats-1 Expr 576 ( 8.5%) 8 72 -ast-stats-1 - Match 72 ( 1.1%) 1 -ast-stats-1 - Path 72 ( 1.1%) 1 -ast-stats-1 - Struct 72 ( 1.1%) 1 -ast-stats-1 - Lit 144 ( 2.1%) 2 -ast-stats-1 - Block 216 ( 3.2%) 3 -ast-stats-1 PathSegment 744 (11.0%) 31 24 -ast-stats-1 Ty 896 (13.3%) 14 64 -ast-stats-1 - Ptr 64 ( 0.9%) 1 -ast-stats-1 - Ref 64 ( 0.9%) 1 -ast-stats-1 - ImplicitSelf 128 ( 1.9%) 2 -ast-stats-1 - Path 640 ( 9.5%) 10 -ast-stats-1 Item 1_296 (19.2%) 9 144 -ast-stats-1 - Enum 144 ( 2.1%) 1 -ast-stats-1 - ForeignMod 144 ( 2.1%) 1 -ast-stats-1 - Impl 144 ( 2.1%) 1 -ast-stats-1 - Trait 144 ( 2.1%) 1 -ast-stats-1 - Fn 288 ( 4.3%) 2 -ast-stats-1 - Use 432 ( 6.4%) 3 +ast-stats-1 Crate 40 (NN.N%) 1 40 +ast-stats-1 GenericArgs 40 (NN.N%) 1 40 +ast-stats-1 - AngleBracketed 40 (NN.N%) 1 +ast-stats-1 ExprField 48 (NN.N%) 1 48 +ast-stats-1 Attribute 64 (NN.N%) 2 32 +ast-stats-1 - DocComment 32 (NN.N%) 1 +ast-stats-1 - Normal 32 (NN.N%) 1 +ast-stats-1 WherePredicate 72 (NN.N%) 1 72 +ast-stats-1 - BoundPredicate 72 (NN.N%) 1 +ast-stats-1 ForeignItem 80 (NN.N%) 1 80 +ast-stats-1 - Fn 80 (NN.N%) 1 +ast-stats-1 Arm 96 (NN.N%) 2 48 +ast-stats-1 Local 96 (NN.N%) 1 96 +ast-stats-1 FnDecl 120 (NN.N%) 5 24 +ast-stats-1 Param 160 (NN.N%) 4 40 +ast-stats-1 Stmt 160 (NN.N%) 5 32 +ast-stats-1 - Let 32 (NN.N%) 1 +ast-stats-1 - MacCall 32 (NN.N%) 1 +ast-stats-1 - Expr 96 (NN.N%) 3 +ast-stats-1 Block 192 (NN.N%) 6 32 +ast-stats-1 FieldDef 208 (NN.N%) 2 104 +ast-stats-1 Variant 208 (NN.N%) 2 104 +ast-stats-1 AssocItem 320 (NN.N%) 4 80 +ast-stats-1 - Fn 160 (NN.N%) 2 +ast-stats-1 - Type 160 (NN.N%) 2 +ast-stats-1 GenericBound 352 (NN.N%) 4 88 +ast-stats-1 - Trait 352 (NN.N%) 4 +ast-stats-1 GenericParam 480 (NN.N%) 5 96 +ast-stats-1 Pat 504 (NN.N%) 7 72 +ast-stats-1 - Struct 72 (NN.N%) 1 +ast-stats-1 - Wild 72 (NN.N%) 1 +ast-stats-1 - Ident 360 (NN.N%) 5 +ast-stats-1 Expr 576 (NN.N%) 8 72 +ast-stats-1 - Match 72 (NN.N%) 1 +ast-stats-1 - Path 72 (NN.N%) 1 +ast-stats-1 - Struct 72 (NN.N%) 1 +ast-stats-1 - Lit 144 (NN.N%) 2 +ast-stats-1 - Block 216 (NN.N%) 3 +ast-stats-1 PathSegment 744 (NN.N%) 31 24 +ast-stats-1 Ty 896 (NN.N%) 14 64 +ast-stats-1 - Ptr 64 (NN.N%) 1 +ast-stats-1 - Ref 64 (NN.N%) 1 +ast-stats-1 - ImplicitSelf 128 (NN.N%) 2 +ast-stats-1 - Path 640 (NN.N%) 10 +ast-stats-1 Item 1_296 (NN.N%) 9 144 +ast-stats-1 - Enum 144 (NN.N%) 1 +ast-stats-1 - ForeignMod 144 (NN.N%) 1 +ast-stats-1 - Impl 144 (NN.N%) 1 +ast-stats-1 - Trait 144 (NN.N%) 1 +ast-stats-1 - Fn 288 (NN.N%) 2 +ast-stats-1 - Use 432 (NN.N%) 3 ast-stats-1 ---------------------------------------------------------------- ast-stats-1 Total 6_752 116 ast-stats-1 ast-stats-2 POST EXPANSION AST STATS ast-stats-2 Name Accumulated Size Count Item Size ast-stats-2 ---------------------------------------------------------------- -ast-stats-2 Crate 40 ( 0.5%) 1 40 -ast-stats-2 GenericArgs 40 ( 0.5%) 1 40 -ast-stats-2 - AngleBracketed 40 ( 0.5%) 1 -ast-stats-2 ExprField 48 ( 0.6%) 1 48 -ast-stats-2 WherePredicate 72 ( 1.0%) 1 72 -ast-stats-2 - BoundPredicate 72 ( 1.0%) 1 -ast-stats-2 ForeignItem 80 ( 1.1%) 1 80 -ast-stats-2 - Fn 80 ( 1.1%) 1 -ast-stats-2 Arm 96 ( 1.3%) 2 48 -ast-stats-2 Local 96 ( 1.3%) 1 96 -ast-stats-2 FnDecl 120 ( 1.6%) 5 24 -ast-stats-2 InlineAsm 120 ( 1.6%) 1 120 -ast-stats-2 Attribute 128 ( 1.7%) 4 32 -ast-stats-2 - DocComment 32 ( 0.4%) 1 -ast-stats-2 - Normal 96 ( 1.3%) 3 -ast-stats-2 Param 160 ( 2.2%) 4 40 -ast-stats-2 Stmt 160 ( 2.2%) 5 32 -ast-stats-2 - Let 32 ( 0.4%) 1 -ast-stats-2 - Semi 32 ( 0.4%) 1 -ast-stats-2 - Expr 96 ( 1.3%) 3 -ast-stats-2 Block 192 ( 2.6%) 6 32 -ast-stats-2 FieldDef 208 ( 2.8%) 2 104 -ast-stats-2 Variant 208 ( 2.8%) 2 104 -ast-stats-2 AssocItem 320 ( 4.3%) 4 80 -ast-stats-2 - Fn 160 ( 2.2%) 2 -ast-stats-2 - Type 160 ( 2.2%) 2 -ast-stats-2 GenericBound 352 ( 4.7%) 4 88 -ast-stats-2 - Trait 352 ( 4.7%) 4 -ast-stats-2 GenericParam 480 ( 6.5%) 5 96 -ast-stats-2 Pat 504 ( 6.8%) 7 72 -ast-stats-2 - Struct 72 ( 1.0%) 1 -ast-stats-2 - Wild 72 ( 1.0%) 1 -ast-stats-2 - Ident 360 ( 4.9%) 5 -ast-stats-2 Expr 648 ( 8.7%) 9 72 -ast-stats-2 - InlineAsm 72 ( 1.0%) 1 -ast-stats-2 - Match 72 ( 1.0%) 1 -ast-stats-2 - Path 72 ( 1.0%) 1 -ast-stats-2 - Struct 72 ( 1.0%) 1 -ast-stats-2 - Lit 144 ( 1.9%) 2 -ast-stats-2 - Block 216 ( 2.9%) 3 -ast-stats-2 PathSegment 864 (11.7%) 36 24 -ast-stats-2 Ty 896 (12.1%) 14 64 -ast-stats-2 - Ptr 64 ( 0.9%) 1 -ast-stats-2 - Ref 64 ( 0.9%) 1 -ast-stats-2 - ImplicitSelf 128 ( 1.7%) 2 -ast-stats-2 - Path 640 ( 8.6%) 10 -ast-stats-2 Item 1_584 (21.4%) 11 144 -ast-stats-2 - Enum 144 ( 1.9%) 1 -ast-stats-2 - ExternCrate 144 ( 1.9%) 1 -ast-stats-2 - ForeignMod 144 ( 1.9%) 1 -ast-stats-2 - Impl 144 ( 1.9%) 1 -ast-stats-2 - Trait 144 ( 1.9%) 1 -ast-stats-2 - Fn 288 ( 3.9%) 2 -ast-stats-2 - Use 576 ( 7.8%) 4 +ast-stats-2 Crate 40 (NN.N%) 1 40 +ast-stats-2 GenericArgs 40 (NN.N%) 1 40 +ast-stats-2 - AngleBracketed 40 (NN.N%) 1 +ast-stats-2 ExprField 48 (NN.N%) 1 48 +ast-stats-2 WherePredicate 72 (NN.N%) 1 72 +ast-stats-2 - BoundPredicate 72 (NN.N%) 1 +ast-stats-2 ForeignItem 80 (NN.N%) 1 80 +ast-stats-2 - Fn 80 (NN.N%) 1 +ast-stats-2 Arm 96 (NN.N%) 2 48 +ast-stats-2 Local 96 (NN.N%) 1 96 +ast-stats-2 FnDecl 120 (NN.N%) 5 24 +ast-stats-2 InlineAsm 120 (NN.N%) 1 120 +ast-stats-2 Attribute 128 (NN.N%) 4 32 +ast-stats-2 - DocComment 32 (NN.N%) 1 +ast-stats-2 - Normal 96 (NN.N%) 3 +ast-stats-2 Param 160 (NN.N%) 4 40 +ast-stats-2 Stmt 160 (NN.N%) 5 32 +ast-stats-2 - Let 32 (NN.N%) 1 +ast-stats-2 - Semi 32 (NN.N%) 1 +ast-stats-2 - Expr 96 (NN.N%) 3 +ast-stats-2 Block 192 (NN.N%) 6 32 +ast-stats-2 FieldDef 208 (NN.N%) 2 104 +ast-stats-2 Variant 208 (NN.N%) 2 104 +ast-stats-2 AssocItem 320 (NN.N%) 4 80 +ast-stats-2 - Fn 160 (NN.N%) 2 +ast-stats-2 - Type 160 (NN.N%) 2 +ast-stats-2 GenericBound 352 (NN.N%) 4 88 +ast-stats-2 - Trait 352 (NN.N%) 4 +ast-stats-2 GenericParam 480 (NN.N%) 5 96 +ast-stats-2 Pat 504 (NN.N%) 7 72 +ast-stats-2 - Struct 72 (NN.N%) 1 +ast-stats-2 - Wild 72 (NN.N%) 1 +ast-stats-2 - Ident 360 (NN.N%) 5 +ast-stats-2 Expr 648 (NN.N%) 9 72 +ast-stats-2 - InlineAsm 72 (NN.N%) 1 +ast-stats-2 - Match 72 (NN.N%) 1 +ast-stats-2 - Path 72 (NN.N%) 1 +ast-stats-2 - Struct 72 (NN.N%) 1 +ast-stats-2 - Lit 144 (NN.N%) 2 +ast-stats-2 - Block 216 (NN.N%) 3 +ast-stats-2 PathSegment 864 (NN.N%) 36 24 +ast-stats-2 Ty 896 (NN.N%) 14 64 +ast-stats-2 - Ptr 64 (NN.N%) 1 +ast-stats-2 - Ref 64 (NN.N%) 1 +ast-stats-2 - ImplicitSelf 128 (NN.N%) 2 +ast-stats-2 - Path 640 (NN.N%) 10 +ast-stats-2 Item 1_584 (NN.N%) 11 144 +ast-stats-2 - Enum 144 (NN.N%) 1 +ast-stats-2 - ExternCrate 144 (NN.N%) 1 +ast-stats-2 - ForeignMod 144 (NN.N%) 1 +ast-stats-2 - Impl 144 (NN.N%) 1 +ast-stats-2 - Trait 144 (NN.N%) 1 +ast-stats-2 - Fn 288 (NN.N%) 2 +ast-stats-2 - Use 576 (NN.N%) 4 ast-stats-2 ---------------------------------------------------------------- ast-stats-2 Total 7_416 127 ast-stats-2 hir-stats HIR STATS hir-stats Name Accumulated Size Count Item Size hir-stats ---------------------------------------------------------------- -hir-stats ForeignItemRef 24 ( 0.3%) 1 24 -hir-stats Lifetime 28 ( 0.3%) 1 28 -hir-stats Mod 32 ( 0.4%) 1 32 -hir-stats ExprField 40 ( 0.4%) 1 40 -hir-stats TraitItemRef 56 ( 0.6%) 2 28 -hir-stats GenericArg 64 ( 0.7%) 4 16 -hir-stats - Type 16 ( 0.2%) 1 -hir-stats - Lifetime 48 ( 0.5%) 3 -hir-stats Param 64 ( 0.7%) 2 32 -hir-stats Body 72 ( 0.8%) 3 24 -hir-stats ImplItemRef 72 ( 0.8%) 2 36 -hir-stats InlineAsm 72 ( 0.8%) 1 72 -hir-stats Local 72 ( 0.8%) 1 72 -hir-stats WherePredicate 72 ( 0.8%) 3 24 -hir-stats - BoundPredicate 72 ( 0.8%) 3 -hir-stats Arm 80 ( 0.9%) 2 40 -hir-stats Stmt 96 ( 1.1%) 3 32 -hir-stats - Expr 32 ( 0.4%) 1 -hir-stats - Let 32 ( 0.4%) 1 -hir-stats - Semi 32 ( 0.4%) 1 -hir-stats FnDecl 120 ( 1.3%) 3 40 -hir-stats Attribute 128 ( 1.4%) 4 32 -hir-stats FieldDef 128 ( 1.4%) 2 64 -hir-stats GenericArgs 144 ( 1.6%) 3 48 -hir-stats Variant 144 ( 1.6%) 2 72 -hir-stats GenericBound 256 ( 2.8%) 4 64 -hir-stats - Trait 256 ( 2.8%) 4 -hir-stats Block 288 ( 3.2%) 6 48 -hir-stats Pat 360 ( 4.0%) 5 72 -hir-stats - Struct 72 ( 0.8%) 1 -hir-stats - Wild 72 ( 0.8%) 1 -hir-stats - Binding 216 ( 2.4%) 3 -hir-stats GenericParam 400 ( 4.5%) 5 80 -hir-stats Generics 560 ( 6.2%) 10 56 -hir-stats Ty 720 ( 8.0%) 15 48 -hir-stats - Ptr 48 ( 0.5%) 1 -hir-stats - Ref 48 ( 0.5%) 1 -hir-stats - Path 624 ( 6.9%) 13 -hir-stats Expr 768 ( 8.5%) 12 64 -hir-stats - InlineAsm 64 ( 0.7%) 1 -hir-stats - Match 64 ( 0.7%) 1 -hir-stats - Path 64 ( 0.7%) 1 -hir-stats - Struct 64 ( 0.7%) 1 -hir-stats - Lit 128 ( 1.4%) 2 -hir-stats - Block 384 ( 4.3%) 6 -hir-stats Item 968 (10.8%) 11 88 -hir-stats - Enum 88 ( 1.0%) 1 -hir-stats - ExternCrate 88 ( 1.0%) 1 -hir-stats - ForeignMod 88 ( 1.0%) 1 -hir-stats - Impl 88 ( 1.0%) 1 -hir-stats - Trait 88 ( 1.0%) 1 -hir-stats - Fn 176 ( 2.0%) 2 -hir-stats - Use 352 ( 3.9%) 4 -hir-stats Path 1_240 (13.8%) 31 40 -hir-stats PathSegment 1_920 (21.4%) 40 48 +hir-stats ForeignItemRef 24 (NN.N%) 1 24 +hir-stats Lifetime 28 (NN.N%) 1 28 +hir-stats Mod 32 (NN.N%) 1 32 +hir-stats ExprField 40 (NN.N%) 1 40 +hir-stats TraitItemRef 56 (NN.N%) 2 28 +hir-stats GenericArg 64 (NN.N%) 4 16 +hir-stats - Type 16 (NN.N%) 1 +hir-stats - Lifetime 48 (NN.N%) 3 +hir-stats Param 64 (NN.N%) 2 32 +hir-stats Body 72 (NN.N%) 3 24 +hir-stats ImplItemRef 72 (NN.N%) 2 36 +hir-stats InlineAsm 72 (NN.N%) 1 72 +hir-stats Local 72 (NN.N%) 1 72 +hir-stats WherePredicate 72 (NN.N%) 3 24 +hir-stats - BoundPredicate 72 (NN.N%) 3 +hir-stats Arm 80 (NN.N%) 2 40 +hir-stats Stmt 96 (NN.N%) 3 32 +hir-stats - Expr 32 (NN.N%) 1 +hir-stats - Let 32 (NN.N%) 1 +hir-stats - Semi 32 (NN.N%) 1 +hir-stats FnDecl 120 (NN.N%) 3 40 +hir-stats Attribute 128 (NN.N%) 4 32 +hir-stats FieldDef 128 (NN.N%) 2 64 +hir-stats GenericArgs 144 (NN.N%) 3 48 +hir-stats Variant 144 (NN.N%) 2 72 +hir-stats GenericBound 256 (NN.N%) 4 64 +hir-stats - Trait 256 (NN.N%) 4 +hir-stats Block 288 (NN.N%) 6 48 +hir-stats Pat 360 (NN.N%) 5 72 +hir-stats - Struct 72 (NN.N%) 1 +hir-stats - Wild 72 (NN.N%) 1 +hir-stats - Binding 216 (NN.N%) 3 +hir-stats GenericParam 400 (NN.N%) 5 80 +hir-stats Generics 560 (NN.N%) 10 56 +hir-stats Ty 720 (NN.N%) 15 48 +hir-stats - Ptr 48 (NN.N%) 1 +hir-stats - Ref 48 (NN.N%) 1 +hir-stats - Path 624 (NN.N%) 13 +hir-stats Expr 768 (NN.N%) 12 64 +hir-stats - InlineAsm 64 (NN.N%) 1 +hir-stats - Match 64 (NN.N%) 1 +hir-stats - Path 64 (NN.N%) 1 +hir-stats - Struct 64 (NN.N%) 1 +hir-stats - Lit 128 (NN.N%) 2 +hir-stats - Block 384 (NN.N%) 6 +hir-stats Item 968 (NN.N%) 11 88 +hir-stats - Enum 88 (NN.N%) 1 +hir-stats - ExternCrate 88 (NN.N%) 1 +hir-stats - ForeignMod 88 (NN.N%) 1 +hir-stats - Impl 88 (NN.N%) 1 +hir-stats - Trait 88 (NN.N%) 1 +hir-stats - Fn 176 (NN.N%) 2 +hir-stats - Use 352 (NN.N%) 4 +hir-stats Path 1_040 (NN.N%) 26 40 +hir-stats PathSegment 1_776 (NN.N%) 37 48 hir-stats ---------------------------------------------------------------- -hir-stats Total 8_988 180 +hir-stats Total 8_644 172 hir-stats diff --git a/tests/ui/bare-static-string.rs b/tests/ui/str/str-static-literal.rs index b71cf38cfe8..61630f0f22b 100644 --- a/tests/ui/bare-static-string.rs +++ b/tests/ui/str/str-static-literal.rs @@ -1,3 +1,5 @@ +//! Check that a bare string literal is typed as a `&'static str` and is usable. + //@ run-pass pub fn main() { diff --git a/tests/ui/threads-sendsync/tls-dont-move-after-init.rs b/tests/ui/threads-sendsync/tls-dont-move-after-init.rs new file mode 100644 index 00000000000..54fcc32e9bd --- /dev/null +++ b/tests/ui/threads-sendsync/tls-dont-move-after-init.rs @@ -0,0 +1,37 @@ +//@ run-pass +//@ needs-threads + +use std::cell::Cell; +use std::thread; + +#[derive(Default)] +struct Foo { + ptr: Cell<*const Foo>, +} + +impl Foo { + fn touch(&self) { + if self.ptr.get().is_null() { + self.ptr.set(self); + } else { + assert!(self.ptr.get() == self); + } + } +} + +impl Drop for Foo { + fn drop(&mut self) { + self.touch(); + } +} + +thread_local!(static FOO: Foo = Foo::default()); + +fn main() { + thread::spawn(|| { + FOO.with(|foo| foo.touch()); + FOO.with(|foo| foo.touch()); + }) + .join() + .unwrap(); +} diff --git a/tests/ui/typeck/consider-borrowing-141810-1.rs b/tests/ui/typeck/consider-borrowing-141810-1.rs new file mode 100644 index 00000000000..94c2d690915 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-1.rs @@ -0,0 +1,9 @@ +fn main() { + let x = if true { + &true + } else if false { //~ ERROR `if` and `else` have incompatible types [E0308] + true //~ HELP consider borrowing here + } else { + true + }; +} diff --git a/tests/ui/typeck/consider-borrowing-141810-1.stderr b/tests/ui/typeck/consider-borrowing-141810-1.stderr new file mode 100644 index 00000000000..9291721ac71 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-1.stderr @@ -0,0 +1,28 @@ +error[E0308]: `if` and `else` have incompatible types + --> $DIR/consider-borrowing-141810-1.rs:4:12 + | +LL | let x = if true { + | ______________- +LL | | &true + | | ----- expected because of this +LL | | } else if false { + | | ____________^ +LL | || true +LL | || } else { +LL | || true +LL | || }; + | || ^ + | ||_____| + | |_____`if` and `else` have incompatible types + | expected `&bool`, found `bool` + | +help: consider borrowing here + | +LL ~ &true +LL | } else { +LL ~ &true + | + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0308`. diff --git a/tests/ui/typeck/consider-borrowing-141810-2.rs b/tests/ui/typeck/consider-borrowing-141810-2.rs new file mode 100644 index 00000000000..e32e689efb7 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-2.rs @@ -0,0 +1,8 @@ +fn main() { + let x = if true { + &() + } else if false { //~ ERROR `if` and `else` have incompatible types [E0308] + } else { + }; + +} diff --git a/tests/ui/typeck/consider-borrowing-141810-2.stderr b/tests/ui/typeck/consider-borrowing-141810-2.stderr new file mode 100644 index 00000000000..dd229897283 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-2.stderr @@ -0,0 +1,19 @@ +error[E0308]: `if` and `else` have incompatible types + --> $DIR/consider-borrowing-141810-2.rs:4:12 + | +LL | let x = if true { + | ______________- +LL | | &() + | | --- expected because of this +LL | | } else if false { + | | ____________^ +LL | || } else { +LL | || }; + | || ^ + | ||_____| + | |_____`if` and `else` have incompatible types + | expected `&()`, found `()` + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0308`. diff --git a/tests/ui/typeck/consider-borrowing-141810-3.rs b/tests/ui/typeck/consider-borrowing-141810-3.rs new file mode 100644 index 00000000000..d38828de7c1 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-3.rs @@ -0,0 +1,7 @@ +fn main() { + let x = if true { + &() + } else if false { //~ ERROR `if` and `else` have incompatible types [E0308] + + }; +} diff --git a/tests/ui/typeck/consider-borrowing-141810-3.stderr b/tests/ui/typeck/consider-borrowing-141810-3.stderr new file mode 100644 index 00000000000..0b0c5f191a0 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-3.stderr @@ -0,0 +1,22 @@ +error[E0308]: `if` and `else` have incompatible types + --> $DIR/consider-borrowing-141810-3.rs:4:12 + | +LL | let x = if true { + | ______________- +LL | | &() + | | --- expected because of this +LL | | } else if false { + | | ____________^ +LL | || +LL | || }; + | || ^ + | ||_____| + | |_____`if` and `else` have incompatible types + | expected `&()`, found `()` + | + = note: `if` expressions without `else` evaluate to `()` + = note: consider adding an `else` block that evaluates to the expected type + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0308`. diff --git a/tests/ui/typeck/consider-borrowing-141810-4.rs b/tests/ui/typeck/consider-borrowing-141810-4.rs new file mode 100644 index 00000000000..754af7920a8 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-4.rs @@ -0,0 +1,11 @@ +fn baz(x: &String) {} + +fn bar() { + baz({ + String::from("hi") //~ ERROR mismatched types + }); +} + +fn main() { + bar(); +} diff --git a/tests/ui/typeck/consider-borrowing-141810-4.stderr b/tests/ui/typeck/consider-borrowing-141810-4.stderr new file mode 100644 index 00000000000..80869d4a5d5 --- /dev/null +++ b/tests/ui/typeck/consider-borrowing-141810-4.stderr @@ -0,0 +1,14 @@ +error[E0308]: mismatched types + --> $DIR/consider-borrowing-141810-4.rs:5:9 + | +LL | String::from("hi") + | ^^^^^^^^^^^^^^^^^^ expected `&String`, found `String` + | +help: consider borrowing here + | +LL | &String::from("hi") + | + + +error: aborting due to 1 previous error + +For more information about this error, try `rustc --explain E0308`. diff --git a/tests/ui/ufcs/ufcs-partially-resolved.stderr b/tests/ui/ufcs/ufcs-partially-resolved.stderr index 0a9c190cb35..69d6bd74a73 100644 --- a/tests/ui/ufcs/ufcs-partially-resolved.stderr +++ b/tests/ui/ufcs/ufcs-partially-resolved.stderr @@ -235,8 +235,6 @@ LL | <u8 as Dr>::X; | ^^^^^^^^^^^^- | | | help: an associated function with a similar name exists: `Z` - | - = note: can't use a type alias as a constructor error[E0575]: expected associated type, found associated function `Dr::Z` --> $DIR/ufcs-partially-resolved.rs:54:12 diff --git a/tests/ui/unsafe-binders/unused-lifetimes-2.fixed b/tests/ui/unsafe-binders/unused-lifetimes-2.fixed new file mode 100644 index 00000000000..714a5fdaf03 --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes-2.fixed @@ -0,0 +1,20 @@ +// regression test for #141758 +//@ run-rustfix +//@ check-pass + +#![warn(unused_lifetimes)] +#![allow(incomplete_features, unused_imports, dead_code)] +#![feature(unsafe_binders)] + +use std::unsafe_binder::unwrap_binder; + +#[derive(Copy, Clone)] +pub struct S([usize; 8]); + +// Regression test for <https://github.com/rust-lang/rust/issues/141418>. +pub fn by_value(_x: unsafe<'a> &'a S) -> usize { + //~^ WARN lifetime parameter `'b` never used + 0 +} + +fn main() {} diff --git a/tests/ui/unsafe-binders/unused-lifetimes-2.rs b/tests/ui/unsafe-binders/unused-lifetimes-2.rs new file mode 100644 index 00000000000..5b34cf91163 --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes-2.rs @@ -0,0 +1,20 @@ +// regression test for #141758 +//@ run-rustfix +//@ check-pass + +#![warn(unused_lifetimes)] +#![allow(incomplete_features, unused_imports, dead_code)] +#![feature(unsafe_binders)] + +use std::unsafe_binder::unwrap_binder; + +#[derive(Copy, Clone)] +pub struct S([usize; 8]); + +// Regression test for <https://github.com/rust-lang/rust/issues/141418>. +pub fn by_value(_x: unsafe<'a, 'b> &'a S) -> usize { + //~^ WARN lifetime parameter `'b` never used + 0 +} + +fn main() {} diff --git a/tests/ui/unsafe-binders/unused-lifetimes-2.stderr b/tests/ui/unsafe-binders/unused-lifetimes-2.stderr new file mode 100644 index 00000000000..bca8a15d56b --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes-2.stderr @@ -0,0 +1,16 @@ +warning: lifetime parameter `'b` never used + --> $DIR/unused-lifetimes-2.rs:15:32 + | +LL | pub fn by_value(_x: unsafe<'a, 'b> &'a S) -> usize { + | --^^ + | | + | help: elide the unused lifetime + | +note: the lint level is defined here + --> $DIR/unused-lifetimes-2.rs:5:9 + | +LL | #![warn(unused_lifetimes)] + | ^^^^^^^^^^^^^^^^ + +warning: 1 warning emitted + diff --git a/tests/ui/unsafe-binders/unused-lifetimes.fixed b/tests/ui/unsafe-binders/unused-lifetimes.fixed new file mode 100644 index 00000000000..4295b6a848c --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes.fixed @@ -0,0 +1,20 @@ +// regression test for #141758 +//@ run-rustfix +//@ check-pass + +#![warn(unused_lifetimes)] +#![allow(incomplete_features, unused_imports, dead_code)] +#![feature(unsafe_binders)] + +use std::unsafe_binder::unwrap_binder; + +#[derive(Copy, Clone)] +pub struct S([usize; 8]); + +// Regression test for <https://github.com/rust-lang/rust/issues/141418>. +pub fn by_value(_x: S) -> usize { + //~^ WARN lifetime parameter `'a` never used + 0 +} + +fn main() {} diff --git a/tests/ui/unsafe-binders/unused-lifetimes.rs b/tests/ui/unsafe-binders/unused-lifetimes.rs new file mode 100644 index 00000000000..b1382328318 --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes.rs @@ -0,0 +1,20 @@ +// regression test for #141758 +//@ run-rustfix +//@ check-pass + +#![warn(unused_lifetimes)] +#![allow(incomplete_features, unused_imports, dead_code)] +#![feature(unsafe_binders)] + +use std::unsafe_binder::unwrap_binder; + +#[derive(Copy, Clone)] +pub struct S([usize; 8]); + +// Regression test for <https://github.com/rust-lang/rust/issues/141418>. +pub fn by_value(_x: unsafe<'a> S) -> usize { + //~^ WARN lifetime parameter `'a` never used + 0 +} + +fn main() {} diff --git a/tests/ui/unsafe-binders/unused-lifetimes.stderr b/tests/ui/unsafe-binders/unused-lifetimes.stderr new file mode 100644 index 00000000000..d9a5216301f --- /dev/null +++ b/tests/ui/unsafe-binders/unused-lifetimes.stderr @@ -0,0 +1,14 @@ +warning: lifetime parameter `'a` never used + --> $DIR/unused-lifetimes.rs:15:28 + | +LL | pub fn by_value(_x: unsafe<'a> S) -> usize { + | -------^^-- help: elide the unused lifetime + | +note: the lint level is defined here + --> $DIR/unused-lifetimes.rs:5:9 + | +LL | #![warn(unused_lifetimes)] + | ^^^^^^^^^^^^^^^^ + +warning: 1 warning emitted + | 
